repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,219
|
arch/hexagon/kernel/trampoline.S
|
/*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* Trampoline sequences to be copied onto user stack.
* This consumes a little more space than hand-assembling
* immediate constants for use in C, but is more portable
* to future tweaks to the Hexagon instruction set.
*/
#include <asm/unistd.h>
/* Sig trampolines - call sys_sigreturn or sys_rt_sigreturn as appropriate */
/* plain sigreturn is gone. */
.globl __rt_sigtramp_template
__rt_sigtramp_template:
r6 = #__NR_rt_sigreturn;
trap0(#1);
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,957
|
arch/hexagon/kernel/vmlinux.lds.S
|
/*
* Linker script for Hexagon kernel
*
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm-generic/vmlinux.lds.h>
#include <asm/asm-offsets.h> /* Most of the kernel defines are here */
#include <asm/mem-layout.h> /* except for page_offset */
#include <asm/cache.h> /* and now we're pulling cache line size */
#include <asm/thread_info.h> /* and we need THREAD_SIZE too */
OUTPUT_ARCH(hexagon)
ENTRY(stext)
jiffies = jiffies_64;
/*
See asm-generic/vmlinux.lds.h for expansion of some of these macros.
See asm-generic/sections.h for seemingly required labels.
*/
#define PAGE_SIZE _PAGE_SIZE
SECTIONS
{
. = PAGE_OFFSET;
__init_begin = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
__init_end = .;
. = ALIGN(_PAGE_SIZE);
_stext = .;
.text : AT(ADDR(.text)) {
_text = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
}
_etext = .;
INIT_DATA_SECTION(PAGE_SIZE)
_sdata = .;
RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE)
RO_DATA_SECTION(PAGE_SIZE)
_edata = .;
EXCEPTION_TABLE(16)
NOTES
BSS_SECTION(_PAGE_SIZE, _PAGE_SIZE, _PAGE_SIZE)
_end = .;
/DISCARD/ : {
EXIT_TEXT
EXIT_DATA
EXIT_CALL
}
STABS_DEBUG
DWARF_DEBUG
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,023
|
arch/hexagon/kernel/vm_switch.S
|
/*
* Context switch support for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/asm-offsets.h>
.text
/*
* The register used as a fast-path thread information pointer
* is determined as a kernel configuration option. If it happens
* to be a callee-save register, we're going to be saving and
* restoring it twice here.
*
* This code anticipates a revised ABI where R20-23 are added
* to the set of callee-save registers, but this should be
* backward compatible to legacy tools.
*/
/*
* void switch_to(struct task_struct *prev,
* struct task_struct *next, struct task_struct *last);
*/
.p2align 2
.globl __switch_to
.type __switch_to, @function
/*
* When we exit the wormhole, we need to store the previous task
* in the new R0's pointer. Technically it should be R2, but they should
* be the same; seems like a legacy thing. In short, don't butcher
* R0, let it go back out unmolested.
*/
__switch_to:
/*
* Push callee-saves onto "prev" stack.
* Here, we're sneaky because the LR and FP
* storage of the thread_stack structure
* is automagically allocated by allocframe,
* so we pass struct size less 8.
*/
allocframe(#(_SWITCH_STACK_SIZE - 8));
memd(R29+#(_SWITCH_R2726))=R27:26;
memd(R29+#(_SWITCH_R2524))=R25:24;
memd(R29+#(_SWITCH_R2322))=R23:22;
memd(R29+#(_SWITCH_R2120))=R21:20;
memd(R29+#(_SWITCH_R1918))=R19:18;
memd(R29+#(_SWITCH_R1716))=R17:16;
/* Stash thread_info pointer in task_struct */
memw(R0+#_TASK_THREAD_INFO) = THREADINFO_REG;
memw(R0 +#(_TASK_STRUCT_THREAD + _THREAD_STRUCT_SWITCH_SP)) = R29;
/* Switch to "next" stack and restore callee saves from there */
R29 = memw(R1 + #(_TASK_STRUCT_THREAD + _THREAD_STRUCT_SWITCH_SP));
{
R27:26 = memd(R29+#(_SWITCH_R2726));
R25:24 = memd(R29+#(_SWITCH_R2524));
}
{
R23:22 = memd(R29+#(_SWITCH_R2322));
R21:20 = memd(R29+#(_SWITCH_R2120));
}
{
R19:18 = memd(R29+#(_SWITCH_R1918));
R17:16 = memd(R29+#(_SWITCH_R1716));
}
{
/* THREADINFO_REG is currently one of the callee-saved regs
* above, and so be sure to re-load it last.
*/
THREADINFO_REG = memw(R1 + #_TASK_THREAD_INFO);
R31:30 = memd(R29+#_SWITCH_FP);
}
{
R29 = add(R29,#_SWITCH_STACK_SIZE);
jumpr R31;
}
.size __switch_to, .-__switch_to
|
AirFortressIlikara/LS2K0300-linux-4.19
| 16,011
|
arch/hexagon/lib/memcpy.S
|
/*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* Description
*
* library function for memcpy where length bytes are copied from
* ptr_in to ptr_out. ptr_out is returned unchanged.
* Allows any combination of alignment on input and output pointers
* and length from 0 to 2^32-1
*
* Restrictions
* The arrays should not overlap, the program will produce undefined output
* if they do.
* For blocks less than 16 bytes a byte by byte copy is performed. For
* 8byte alignments, and length multiples, a dword copy is performed up to
* 96bytes
* History
*
* DJH 5/15/09 Initial version 1.0
* DJH 6/ 1/09 Version 1.1 modified ABI to inlcude R16-R19
* DJH 7/12/09 Version 1.2 optimized codesize down to 760 was 840
* DJH 10/14/09 Version 1.3 added special loop for aligned case, was
* overreading bloated codesize back up to 892
* DJH 4/20/10 Version 1.4 fixed Ldword_loop_epilog loop to prevent loads
* occurring if only 1 left outstanding, fixes bug
* # 3888, corrected for all alignments. Peeled off
* 1 32byte chunk from kernel loop and extended 8byte
* loop at end to solve all combinations and prevent
* over read. Fixed Ldword_loop_prolog to prevent
* overread for blocks less than 48bytes. Reduced
* codesize to 752 bytes
* DJH 4/21/10 version 1.5 1.4 fix broke code for input block ends not
* aligned to dword boundaries,underwriting by 1
* byte, added detection for this and fixed. A
* little bloat.
* DJH 4/23/10 version 1.6 corrected stack error, R20 was not being restored
* always, fixed the error of R20 being modified
* before it was being saved
* Natural c model
* ===============
* void * memcpy(char * ptr_out, char * ptr_in, int length) {
* int i;
* if(length) for(i=0; i < length; i++) { ptr_out[i] = ptr_in[i]; }
* return(ptr_out);
* }
*
* Optimized memcpy function
* =========================
* void * memcpy(char * ptr_out, char * ptr_in, int len) {
* int i, prolog, kernel, epilog, mask;
* u8 offset;
* s64 data0, dataF8, data70;
*
* s64 * ptr8_in;
* s64 * ptr8_out;
* s32 * ptr4;
* s16 * ptr2;
*
* offset = ((int) ptr_in) & 7;
* ptr8_in = (s64 *) &ptr_in[-offset]; //read in the aligned pointers
*
* data70 = *ptr8_in++;
* dataF8 = *ptr8_in++;
*
* data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
*
* prolog = 32 - ((int) ptr_out);
* mask = 0x7fffffff >> HEXAGON_R_cl0_R(len);
* prolog = prolog & mask;
* kernel = len - prolog;
* epilog = kernel & 0x1F;
* kernel = kernel>>5;
*
* if (prolog & 1) { ptr_out[0] = (u8) data0; data0 >>= 8; ptr_out += 1;}
* ptr2 = (s16 *) &ptr_out[0];
* if (prolog & 2) { ptr2[0] = (u16) data0; data0 >>= 16; ptr_out += 2;}
* ptr4 = (s32 *) &ptr_out[0];
* if (prolog & 4) { ptr4[0] = (u32) data0; data0 >>= 32; ptr_out += 4;}
*
* offset = offset + (prolog & 7);
* if (offset >= 8) {
* data70 = dataF8;
* dataF8 = *ptr8_in++;
* }
* offset = offset & 0x7;
*
* prolog = prolog >> 3;
* if (prolog) for (i=0; i < prolog; i++) {
* data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
* ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
* data70 = dataF8;
* dataF8 = *ptr8_in++;
* }
* if(kernel) { kernel -= 1; epilog += 32; }
* if(kernel) for(i=0; i < kernel; i++) {
* data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
* ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
* data70 = *ptr8_in++;
*
* data0 = HEXAGON_P_valignb_PPp(data70, dataF8, offset);
* ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
* dataF8 = *ptr8_in++;
*
* data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
* ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
* data70 = *ptr8_in++;
*
* data0 = HEXAGON_P_valignb_PPp(data70, dataF8, offset);
* ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
* dataF8 = *ptr8_in++;
* }
* epilogdws = epilog >> 3;
* if (epilogdws) for (i=0; i < epilogdws; i++) {
* data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
* ptr8_out = (s64 *) &ptr_out[0]; *ptr8_out = data0; ptr_out += 8;
* data70 = dataF8;
* dataF8 = *ptr8_in++;
* }
* data0 = HEXAGON_P_valignb_PPp(dataF8, data70, offset);
*
* ptr4 = (s32 *) &ptr_out[0];
* if (epilog & 4) { ptr4[0] = (u32) data0; data0 >>= 32; ptr_out += 4;}
* ptr2 = (s16 *) &ptr_out[0];
* if (epilog & 2) { ptr2[0] = (u16) data0; data0 >>= 16; ptr_out += 2;}
* if (epilog & 1) { *ptr_out++ = (u8) data0; }
*
* return(ptr_out - length);
* }
*
* Codesize : 784 bytes
*/
#define ptr_out R0 /* destination pounter */
#define ptr_in R1 /* source pointer */
#define len R2 /* length of copy in bytes */
#define data70 R13:12 /* lo 8 bytes of non-aligned transfer */
#define dataF8 R11:10 /* hi 8 bytes of non-aligned transfer */
#define ldata0 R7:6 /* even 8 bytes chunks */
#define ldata1 R25:24 /* odd 8 bytes chunks */
#define data1 R7 /* lower 8 bytes of ldata1 */
#define data0 R6 /* lower 8 bytes of ldata0 */
#define ifbyte p0 /* if transfer has bytes in epilog/prolog */
#define ifhword p0 /* if transfer has shorts in epilog/prolog */
#define ifword p0 /* if transfer has words in epilog/prolog */
#define noprolog p0 /* no prolog, xfer starts at 32byte */
#define nokernel p1 /* no 32byte multiple block in the transfer */
#define noepilog p0 /* no epilog, xfer ends on 32byte boundary */
#define align p2 /* alignment of input rel to 8byte boundary */
#define kernel1 p0 /* kernel count == 1 */
#define dalign R25 /* rel alignment of input to output data */
#define star3 R16 /* number bytes in prolog - dwords */
#define rest R8 /* length - prolog bytes */
#define back R7 /* nr bytes > dword boundary in src block */
#define epilog R3 /* bytes in epilog */
#define inc R15:14 /* inc kernel by -1 and defetch ptr by 32 */
#define kernel R4 /* number of 32byte chunks in kernel */
#define ptr_in_p_128 R5 /* pointer for prefetch of input data */
#define mask R8 /* mask used to determine prolog size */
#define shift R8 /* used to work a shifter to extract bytes */
#define shift2 R5 /* in epilog to workshifter to extract bytes */
#define prolog R15 /* bytes in prolog */
#define epilogdws R15 /* number dwords in epilog */
#define shiftb R14 /* used to extract bytes */
#define offset R9 /* same as align in reg */
#define ptr_out_p_32 R17 /* pointer to output dczero */
#define align888 R14 /* if simple dword loop can be used */
#define len8 R9 /* number of dwords in length */
#define over R20 /* nr of bytes > last inp buf dword boundary */
#define ptr_in_p_128kernel R5:4 /* packed fetch pointer & kernel cnt */
.section .text
.p2align 4
.global memcpy
.type memcpy, @function
memcpy:
{
p2 = cmp.eq(len, #0); /* =0 */
align888 = or(ptr_in, ptr_out); /* %8 < 97 */
p0 = cmp.gtu(len, #23); /* %1, <24 */
p1 = cmp.eq(ptr_in, ptr_out); /* attempt to overwrite self */
}
{
p1 = or(p2, p1);
p3 = cmp.gtu(len, #95); /* %8 < 97 */
align888 = or(align888, len); /* %8 < 97 */
len8 = lsr(len, #3); /* %8 < 97 */
}
{
dcfetch(ptr_in); /* zero/ptrin=ptrout causes fetch */
p2 = bitsclr(align888, #7); /* %8 < 97 */
if(p1) jumpr r31; /* =0 */
}
{
p2 = and(p2,!p3); /* %8 < 97 */
if (p2.new) len = add(len, #-8); /* %8 < 97 */
if (p2.new) jump:NT .Ldwordaligned; /* %8 < 97 */
}
{
if(!p0) jump .Lbytes23orless; /* %1, <24 */
mask.l = #LO(0x7fffffff);
/* all bytes before line multiples of data */
prolog = sub(#0, ptr_out);
}
{
/* save r31 on stack, decrement sp by 16 */
allocframe(#24);
mask.h = #HI(0x7fffffff);
ptr_in_p_128 = add(ptr_in, #32);
back = cl0(len);
}
{
memd(sp+#0) = R17:16; /* save r16,r17 on stack6 */
r31.l = #LO(.Lmemcpy_return); /* set up final return pointer */
prolog &= lsr(mask, back);
offset = and(ptr_in, #7);
}
{
memd(sp+#8) = R25:24; /* save r25,r24 on stack */
dalign = sub(ptr_out, ptr_in);
r31.h = #HI(.Lmemcpy_return); /* set up final return pointer */
}
{
/* see if there if input buffer end if aligned */
over = add(len, ptr_in);
back = add(len, offset);
memd(sp+#16) = R21:20; /* save r20,r21 on stack */
}
{
noprolog = bitsclr(prolog, #7);
prolog = and(prolog, #31);
dcfetch(ptr_in_p_128);
ptr_in_p_128 = add(ptr_in_p_128, #32);
}
{
kernel = sub(len, prolog);
shift = asl(prolog, #3);
star3 = and(prolog, #7);
ptr_in = and(ptr_in, #-8);
}
{
prolog = lsr(prolog, #3);
epilog = and(kernel, #31);
ptr_out_p_32 = add(ptr_out, prolog);
over = and(over, #7);
}
{
p3 = cmp.gtu(back, #8);
kernel = lsr(kernel, #5);
dcfetch(ptr_in_p_128);
ptr_in_p_128 = add(ptr_in_p_128, #32);
}
{
p1 = cmp.eq(prolog, #0);
if(!p1.new) prolog = add(prolog, #1);
dcfetch(ptr_in_p_128); /* reserve the line 64bytes on */
ptr_in_p_128 = add(ptr_in_p_128, #32);
}
{
nokernel = cmp.eq(kernel,#0);
dcfetch(ptr_in_p_128); /* reserve the line 64bytes on */
ptr_in_p_128 = add(ptr_in_p_128, #32);
shiftb = and(shift, #8);
}
{
dcfetch(ptr_in_p_128); /* reserve the line 64bytes on */
ptr_in_p_128 = add(ptr_in_p_128, #32);
if(nokernel) jump .Lskip64;
p2 = cmp.eq(kernel, #1); /* skip ovr if kernel == 0 */
}
{
dczeroa(ptr_out_p_32);
/* don't advance pointer */
if(!p2) ptr_out_p_32 = add(ptr_out_p_32, #32);
}
{
dalign = and(dalign, #31);
dczeroa(ptr_out_p_32);
}
.Lskip64:
{
data70 = memd(ptr_in++#16);
if(p3) dataF8 = memd(ptr_in+#8);
if(noprolog) jump .Lnoprolog32;
align = offset;
}
/* upto initial 7 bytes */
{
ldata0 = valignb(dataF8, data70, align);
ifbyte = tstbit(shift,#3);
offset = add(offset, star3);
}
{
if(ifbyte) memb(ptr_out++#1) = data0;
ldata0 = lsr(ldata0, shiftb);
shiftb = and(shift, #16);
ifhword = tstbit(shift,#4);
}
{
if(ifhword) memh(ptr_out++#2) = data0;
ldata0 = lsr(ldata0, shiftb);
ifword = tstbit(shift,#5);
p2 = cmp.gtu(offset, #7);
}
{
if(ifword) memw(ptr_out++#4) = data0;
if(p2) data70 = dataF8;
if(p2) dataF8 = memd(ptr_in++#8); /* another 8 bytes */
align = offset;
}
.Lnoprolog32:
{
p3 = sp1loop0(.Ldword_loop_prolog, prolog)
rest = sub(len, star3); /* whats left after the loop */
p0 = cmp.gt(over, #0);
}
if(p0) rest = add(rest, #16);
.Ldword_loop_prolog:
{
if(p3) memd(ptr_out++#8) = ldata0;
ldata0 = valignb(dataF8, data70, align);
p0 = cmp.gt(rest, #16);
}
{
data70 = dataF8;
if(p0) dataF8 = memd(ptr_in++#8);
rest = add(rest, #-8);
}:endloop0
.Lkernel:
{
/* kernel is at least 32bytes */
p3 = cmp.gtu(kernel, #0);
/* last itn. remove edge effects */
if(p3.new) kernel = add(kernel, #-1);
/* dealt with in last dword loop */
if(p3.new) epilog = add(epilog, #32);
}
{
nokernel = cmp.eq(kernel, #0); /* after adjustment, recheck */
if(nokernel.new) jump:NT .Lepilog; /* likely not taken */
inc = combine(#32, #-1);
p3 = cmp.gtu(dalign, #24);
}
{
if(p3) jump .Lodd_alignment;
}
{
loop0(.Loword_loop_25to31, kernel);
kernel1 = cmp.gtu(kernel, #1);
rest = kernel;
}
.falign
.Loword_loop_25to31:
{
dcfetch(ptr_in_p_128); /* prefetch 4 lines ahead */
if(kernel1) ptr_out_p_32 = add(ptr_out_p_32, #32);
}
{
dczeroa(ptr_out_p_32); /* reserve the next 32bytes in cache */
p3 = cmp.eq(kernel, rest);
}
{
/* kernel -= 1 */
ptr_in_p_128kernel = vaddw(ptr_in_p_128kernel, inc);
/* kill write on first iteration */
if(!p3) memd(ptr_out++#8) = ldata1;
ldata1 = valignb(dataF8, data70, align);
data70 = memd(ptr_in++#8);
}
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(data70, dataF8, align);
dataF8 = memd(ptr_in++#8);
}
{
memd(ptr_out++#8) = ldata1;
ldata1 = valignb(dataF8, data70, align);
data70 = memd(ptr_in++#8);
}
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(data70, dataF8, align);
dataF8 = memd(ptr_in++#8);
kernel1 = cmp.gtu(kernel, #1);
}:endloop0
{
memd(ptr_out++#8) = ldata1;
jump .Lepilog;
}
.Lodd_alignment:
{
loop0(.Loword_loop_00to24, kernel);
kernel1 = cmp.gtu(kernel, #1);
rest = add(kernel, #-1);
}
.falign
.Loword_loop_00to24:
{
dcfetch(ptr_in_p_128); /* prefetch 4 lines ahead */
ptr_in_p_128kernel = vaddw(ptr_in_p_128kernel, inc);
if(kernel1) ptr_out_p_32 = add(ptr_out_p_32, #32);
}
{
dczeroa(ptr_out_p_32); /* reserve the next 32bytes in cache */
}
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(dataF8, data70, align);
data70 = memd(ptr_in++#8);
}
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(data70, dataF8, align);
dataF8 = memd(ptr_in++#8);
}
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(dataF8, data70, align);
data70 = memd(ptr_in++#8);
}
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(data70, dataF8, align);
dataF8 = memd(ptr_in++#8);
kernel1 = cmp.gtu(kernel, #1);
}:endloop0
.Lepilog:
{
noepilog = cmp.eq(epilog,#0);
epilogdws = lsr(epilog, #3);
kernel = and(epilog, #7);
}
{
if(noepilog) jumpr r31;
if(noepilog) ptr_out = sub(ptr_out, len);
p3 = cmp.eq(epilogdws, #0);
shift2 = asl(epilog, #3);
}
{
shiftb = and(shift2, #32);
ifword = tstbit(epilog,#2);
if(p3) jump .Lepilog60;
if(!p3) epilog = add(epilog, #-16);
}
{
loop0(.Ldword_loop_epilog, epilogdws);
/* stop criteria is lsbs unless = 0 then its 8 */
p3 = cmp.eq(kernel, #0);
if(p3.new) kernel= #8;
p1 = cmp.gt(over, #0);
}
/* if not aligned to end of buffer execute 1 more iteration */
if(p1) kernel= #0;
.Ldword_loop_epilog:
{
memd(ptr_out++#8) = ldata0;
ldata0 = valignb(dataF8, data70, align);
p3 = cmp.gt(epilog, kernel);
}
{
data70 = dataF8;
if(p3) dataF8 = memd(ptr_in++#8);
epilog = add(epilog, #-8);
}:endloop0
/* copy last 7 bytes */
.Lepilog60:
{
if(ifword) memw(ptr_out++#4) = data0;
ldata0 = lsr(ldata0, shiftb);
ifhword = tstbit(epilog,#1);
shiftb = and(shift2, #16);
}
{
if(ifhword) memh(ptr_out++#2) = data0;
ldata0 = lsr(ldata0, shiftb);
ifbyte = tstbit(epilog,#0);
if(ifbyte.new) len = add(len, #-1);
}
{
if(ifbyte) memb(ptr_out) = data0;
ptr_out = sub(ptr_out, len); /* return dest pointer */
jumpr r31;
}
/* do byte copy for small n */
.Lbytes23orless:
{
p3 = sp1loop0(.Lbyte_copy, len);
len = add(len, #-1);
}
.Lbyte_copy:
{
data0 = memb(ptr_in++#1);
if(p3) memb(ptr_out++#1) = data0;
}:endloop0
{
memb(ptr_out) = data0;
ptr_out = sub(ptr_out, len);
jumpr r31;
}
/* do dword copies for aligned in, out and length */
.Ldwordaligned:
{
p3 = sp1loop0(.Ldword_copy, len8);
}
.Ldword_copy:
{
if(p3) memd(ptr_out++#8) = ldata0;
ldata0 = memd(ptr_in++#8);
}:endloop0
{
memd(ptr_out) = ldata0;
ptr_out = sub(ptr_out, len);
jumpr r31; /* return to function caller */
}
.Lmemcpy_return:
r21:20 = memd(sp+#16); /* restore r20+r21 */
{
r25:24 = memd(sp+#8); /* restore r24+r25 */
r17:16 = memd(sp+#0); /* restore r16+r17 */
}
deallocframe; /* restore r31 and incrment stack by 16 */
jumpr r31
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,293
|
arch/hexagon/lib/memset.S
|
/*
* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/* HEXAGON assembly optimized memset */
/* Replaces the standard library function memset */
.macro HEXAGON_OPT_FUNC_BEGIN name
.text
.p2align 4
.globl \name
.type \name, @function
\name:
.endm
.macro HEXAGON_OPT_FUNC_FINISH name
.size \name, . - \name
.endm
/* FUNCTION: memset (v2 version) */
#if __HEXAGON_ARCH__ < 3
HEXAGON_OPT_FUNC_BEGIN memset
{
r6 = #8
r7 = extractu(r0, #3 , #0)
p0 = cmp.eq(r2, #0)
p1 = cmp.gtu(r2, #7)
}
{
r4 = vsplatb(r1)
r8 = r0 /* leave r0 intact for return val */
r9 = sub(r6, r7) /* bytes until double alignment */
if p0 jumpr r31 /* count == 0, so return */
}
{
r3 = #0
r7 = #0
p0 = tstbit(r9, #0)
if p1 jump 2f /* skip byte loop */
}
/* less than 8 bytes to set, so just set a byte at a time and return */
loop0(1f, r2) /* byte loop */
.falign
1: /* byte loop */
{
memb(r8++#1) = r4
}:endloop0
jumpr r31
.falign
2: /* skip byte loop */
{
r6 = #1
p0 = tstbit(r9, #1)
p1 = cmp.eq(r2, #1)
if !p0 jump 3f /* skip initial byte store */
}
{
memb(r8++#1) = r4
r3:2 = sub(r3:2, r7:6)
if p1 jumpr r31
}
.falign
3: /* skip initial byte store */
{
r6 = #2
p0 = tstbit(r9, #2)
p1 = cmp.eq(r2, #2)
if !p0 jump 4f /* skip initial half store */
}
{
memh(r8++#2) = r4
r3:2 = sub(r3:2, r7:6)
if p1 jumpr r31
}
.falign
4: /* skip initial half store */
{
r6 = #4
p0 = cmp.gtu(r2, #7)
p1 = cmp.eq(r2, #4)
if !p0 jump 5f /* skip initial word store */
}
{
memw(r8++#4) = r4
r3:2 = sub(r3:2, r7:6)
p0 = cmp.gtu(r2, #11)
if p1 jumpr r31
}
.falign
5: /* skip initial word store */
{
r10 = lsr(r2, #3)
p1 = cmp.eq(r3, #1)
if !p0 jump 7f /* skip double loop */
}
{
r5 = r4
r6 = #8
loop0(6f, r10) /* double loop */
}
/* set bytes a double word at a time */
.falign
6: /* double loop */
{
memd(r8++#8) = r5:4
r3:2 = sub(r3:2, r7:6)
p1 = cmp.eq(r2, #8)
}:endloop0
.falign
7: /* skip double loop */
{
p0 = tstbit(r2, #2)
if p1 jumpr r31
}
{
r6 = #4
p0 = tstbit(r2, #1)
p1 = cmp.eq(r2, #4)
if !p0 jump 8f /* skip final word store */
}
{
memw(r8++#4) = r4
r3:2 = sub(r3:2, r7:6)
if p1 jumpr r31
}
.falign
8: /* skip final word store */
{
p1 = cmp.eq(r2, #2)
if !p0 jump 9f /* skip final half store */
}
{
memh(r8++#2) = r4
if p1 jumpr r31
}
.falign
9: /* skip final half store */
{
memb(r8++#1) = r4
jumpr r31
}
HEXAGON_OPT_FUNC_FINISH memset
#endif
/* FUNCTION: memset (v3 and higher version) */
#if __HEXAGON_ARCH__ >= 3
HEXAGON_OPT_FUNC_BEGIN memset
{
r7=vsplatb(r1)
r6 = r0
if (r2==#0) jump:nt .L1
}
{
r5:4=combine(r7,r7)
p0 = cmp.gtu(r2,#8)
if (p0.new) jump:nt .L3
}
{
r3 = r0
loop0(.L47,r2)
}
.falign
.L47:
{
memb(r3++#1) = r1
}:endloop0 /* start=.L47 */
jumpr r31
.L3:
{
p0 = tstbit(r0,#0)
if (!p0.new) jump:nt .L8
p1 = cmp.eq(r2, #1)
}
{
r6 = add(r0, #1)
r2 = add(r2,#-1)
memb(r0) = r1
if (p1) jump .L1
}
.L8:
{
p0 = tstbit(r6,#1)
if (!p0.new) jump:nt .L10
}
{
r2 = add(r2,#-2)
memh(r6++#2) = r7
p0 = cmp.eq(r2, #2)
if (p0.new) jump:nt .L1
}
.L10:
{
p0 = tstbit(r6,#2)
if (!p0.new) jump:nt .L12
}
{
r2 = add(r2,#-4)
memw(r6++#4) = r7
p0 = cmp.eq(r2, #4)
if (p0.new) jump:nt .L1
}
.L12:
{
p0 = cmp.gtu(r2,#127)
if (!p0.new) jump:nt .L14
}
r3 = and(r6,#31)
if (r3==#0) jump:nt .L17
{
memd(r6++#8) = r5:4
r2 = add(r2,#-8)
}
r3 = and(r6,#31)
if (r3==#0) jump:nt .L17
{
memd(r6++#8) = r5:4
r2 = add(r2,#-8)
}
r3 = and(r6,#31)
if (r3==#0) jump:nt .L17
{
memd(r6++#8) = r5:4
r2 = add(r2,#-8)
}
.L17:
{
r3 = lsr(r2,#5)
if (r1!=#0) jump:nt .L18
}
{
r8 = r3
r3 = r6
loop0(.L46,r3)
}
.falign
.L46:
{
dczeroa(r6)
r6 = add(r6,#32)
r2 = add(r2,#-32)
}:endloop0 /* start=.L46 */
.L14:
{
p0 = cmp.gtu(r2,#7)
if (!p0.new) jump:nt .L28
r8 = lsr(r2,#3)
}
loop0(.L44,r8)
.falign
.L44:
{
memd(r6++#8) = r5:4
r2 = add(r2,#-8)
}:endloop0 /* start=.L44 */
.L28:
{
p0 = tstbit(r2,#2)
if (!p0.new) jump:nt .L33
}
{
r2 = add(r2,#-4)
memw(r6++#4) = r7
}
.L33:
{
p0 = tstbit(r2,#1)
if (!p0.new) jump:nt .L35
}
{
r2 = add(r2,#-2)
memh(r6++#2) = r7
}
.L35:
p0 = cmp.eq(r2,#1)
if (p0) memb(r6) = r1
.L1:
jumpr r31
.L18:
loop0(.L45,r3)
.falign
.L45:
dczeroa(r6)
{
memd(r6++#8) = r5:4
r2 = add(r2,#-32)
}
memd(r6++#8) = r5:4
memd(r6++#8) = r5:4
{
memd(r6++#8) = r5:4
}:endloop0 /* start=.L45 */
jump .L14
HEXAGON_OPT_FUNC_FINISH memset
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,185
|
arch/hexagon/mm/copy_to_user.S
|
/*
* User memory copying routines for the Hexagon Kernel
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/* The right way to do this involves valignb
* The easy way to do this is only speed up src/dest similar alignment.
*/
/*
* Copy to/from user are the same, except that for packets with a load and
* a store, I don't know how to tell which kind of exception we got.
* Therefore, we duplicate the function, and handle faulting addresses
* differently for each function
*/
/*
* copy to user: stores can fault
*/
#define src_sav r13
#define dst_sav r12
#define src_dst_sav r13:12
#define d_dbuf r15:14
#define w_dbuf r15
#define dst r0
#define src r1
#define bytes r2
#define loopcount r5
#define FUNCNAME raw_copy_to_user
#include "copy_user_template.S"
/* STORE FAULTS from COPY_TO_USER */
.falign
1109:
2109:
4109:
/* Alignment loop. r2 has been updated. Return it. */
{
r0 = r2
jumpr r31
}
/* Normal copy loops. Use dst-dst_sav to compute distance */
/* dst holds best write, no need to unwind any loops */
/* X - (A - B) == X + B - A */
.falign
8189:
8199:
4189:
4199:
2189:
2199:
1189:
1199:
{
r2 += sub(dst_sav,dst)
}
{
r0 = r2
jumpr r31
}
/* COPY TO USER: only stores can fail */
.section __ex_table,"a"
.long 1100b,1109b
.long 2100b,2109b
.long 4100b,4109b
.long 8180b,8189b
.long 8190b,8199b
.long 4180b,4189b
.long 4190b,4199b
.long 2180b,2189b
.long 2190b,2199b
.long 1180b,1189b
.long 1190b,1199b
.previous
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,623
|
arch/hexagon/mm/strnlen_user.S
|
/*
* User string length functions for kernel
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#define isrc r0
#define max r1 /* Do not change! */
#define end r2
#define tmp1 r3
#define obo r6 /* off-by-one */
#define start r7
#define mod8 r8
#define dbuf r15:14
#define dcmp r13:12
/*
* The vector mask version of this turned out *really* badly.
* The hardware loop version also turned out *really* badly.
* Seems straight pointer arithmetic basically wins here.
*/
#define fname __strnlen_user
.text
.global fname
.type fname, @function
.p2align 5 /* why? */
fname:
{
mod8 = and(isrc,#7);
end = add(isrc,max);
start = isrc;
}
{
P0 = cmp.eq(mod8,#0);
mod8 = and(end,#7);
dcmp = #0;
if (P0.new) jump:t dw_loop; /* fire up the oven */
}
alignment_loop:
fail_1: {
tmp1 = memb(start++#1);
}
{
P0 = cmp.eq(tmp1,#0);
if (P0.new) jump:nt exit_found;
P1 = cmp.gtu(end,start);
mod8 = and(start,#7);
}
{
if (!P1) jump exit_error; /* hit the end */
P0 = cmp.eq(mod8,#0);
}
{
if (!P0) jump alignment_loop;
}
dw_loop:
fail_2: {
dbuf = memd(start);
obo = add(start,#1);
}
{
P0 = vcmpb.eq(dbuf,dcmp);
}
{
tmp1 = P0;
P0 = cmp.gtu(end,start);
}
{
tmp1 = ct0(tmp1);
mod8 = and(end,#7);
if (!P0) jump end_check;
}
{
P0 = cmp.eq(tmp1,#32);
if (!P0.new) jump:nt exit_found;
if (!P0.new) start = add(obo,tmp1);
}
{
start = add(start,#8);
jump dw_loop;
} /* might be nice to combine these jumps... */
end_check:
{
P0 = cmp.gt(tmp1,mod8);
if (P0.new) jump:nt exit_error; /* neverfound! */
start = add(obo,tmp1);
}
exit_found:
{
R0 = sub(start,isrc);
jumpr R31;
}
exit_error:
{
R0 = add(max,#1);
jumpr R31;
}
/* Uh, what does the "fixup" return here? */
.falign
fix_1:
{
R0 = #0;
jumpr R31;
}
.size fname,.-fname
.section __ex_table,"a"
.long fail_1,fix_1
.long fail_2,fix_1
.previous
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,301
|
arch/hexagon/mm/copy_user_template.S
|
/*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/* Numerology:
* WXYZ
* W: width in bytes
* X: Load=0, Store=1
* Y: Location 0=preamble,8=loop,9=epilog
* Z: Location=0,handler=9
*/
.text
.global FUNCNAME
.type FUNCNAME, @function
.p2align 5
FUNCNAME:
{
p0 = cmp.gtu(bytes,#0)
if (!p0.new) jump:nt .Ldone
r3 = or(dst,src)
r4 = xor(dst,src)
}
{
p1 = cmp.gtu(bytes,#15)
p0 = bitsclr(r3,#7)
if (!p0.new) jump:nt .Loop_not_aligned_8
src_dst_sav = combine(src,dst)
}
{
loopcount = lsr(bytes,#3)
if (!p1) jump .Lsmall
}
p3=sp1loop0(.Loop8,loopcount)
.Loop8:
8080:
8180:
{
if (p3) memd(dst++#8) = d_dbuf
d_dbuf = memd(src++#8)
}:endloop0
8190:
{
memd(dst++#8) = d_dbuf
bytes -= asl(loopcount,#3)
jump .Lsmall
}
.Loop_not_aligned_8:
{
p0 = bitsclr(r4,#7)
if (p0.new) jump:nt .Lalign
}
{
p0 = bitsclr(r3,#3)
if (!p0.new) jump:nt .Loop_not_aligned_4
p1 = cmp.gtu(bytes,#7)
}
{
if (!p1) jump .Lsmall
loopcount = lsr(bytes,#2)
}
p3=sp1loop0(.Loop4,loopcount)
.Loop4:
4080:
4180:
{
if (p3) memw(dst++#4) = w_dbuf
w_dbuf = memw(src++#4)
}:endloop0
4190:
{
memw(dst++#4) = w_dbuf
bytes -= asl(loopcount,#2)
jump .Lsmall
}
.Loop_not_aligned_4:
{
p0 = bitsclr(r3,#1)
if (!p0.new) jump:nt .Loop_not_aligned
p1 = cmp.gtu(bytes,#3)
}
{
if (!p1) jump .Lsmall
loopcount = lsr(bytes,#1)
}
p3=sp1loop0(.Loop2,loopcount)
.Loop2:
2080:
2180:
{
if (p3) memh(dst++#2) = w_dbuf
w_dbuf = memuh(src++#2)
}:endloop0
2190:
{
memh(dst++#2) = w_dbuf
bytes -= asl(loopcount,#1)
jump .Lsmall
}
.Loop_not_aligned: /* Works for as small as one byte */
p3=sp1loop0(.Loop1,bytes)
.Loop1:
1080:
1180:
{
if (p3) memb(dst++#1) = w_dbuf
w_dbuf = memub(src++#1)
}:endloop0
/* Done */
1190:
{
memb(dst) = w_dbuf
jumpr r31
r0 = #0
}
.Lsmall:
{
p0 = cmp.gtu(bytes,#0)
if (p0.new) jump:nt .Loop_not_aligned
}
.Ldone:
{
r0 = #0
jumpr r31
}
.falign
.Lalign:
1000:
{
if (p0.new) w_dbuf = memub(src)
p0 = tstbit(src,#0)
if (!p1) jump .Lsmall
}
1100:
{
if (p0) memb(dst++#1) = w_dbuf
if (p0) bytes = add(bytes,#-1)
if (p0) src = add(src,#1)
}
2000:
{
if (p0.new) w_dbuf = memuh(src)
p0 = tstbit(src,#1)
if (!p1) jump .Lsmall
}
2100:
{
if (p0) memh(dst++#2) = w_dbuf
if (p0) bytes = add(bytes,#-2)
if (p0) src = add(src,#2)
}
4000:
{
if (p0.new) w_dbuf = memw(src)
p0 = tstbit(src,#2)
if (!p1) jump .Lsmall
}
4100:
{
if (p0) memw(dst++#4) = w_dbuf
if (p0) bytes = add(bytes,#-4)
if (p0) src = add(src,#4)
jump FUNCNAME
}
.size FUNCNAME,.-FUNCNAME
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,321
|
arch/hexagon/mm/copy_from_user.S
|
/*
* User memory copy functions for kernel
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* The right way to do this involves valignb
* The easy way to do this is only speed up src/dest similar alignment.
*/
/*
* Copy to/from user are the same, except that for packets with a load and
* a store, I don't know how to tell which kind of exception we got.
* Therefore, we duplicate the function, and handle faulting addresses
* differently for each function
*/
/*
* copy from user: loads can fault
*/
#define src_sav r13
#define dst_sav r12
#define src_dst_sav r13:12
#define d_dbuf r15:14
#define w_dbuf r15
#define dst r0
#define src r1
#define bytes r2
#define loopcount r5
#define FUNCNAME raw_copy_from_user
#include "copy_user_template.S"
/* LOAD FAULTS from COPY_FROM_USER */
/* Alignment loop. r2 has been updated. Return it. */
.falign
1009:
2009:
4009:
{
r0 = r2
jumpr r31
}
/* Normal copy loops. Do epilog. Use src-src_sav to compute distance */
/* X - (A - B) == X + B - A */
.falign
8089:
{
memd(dst) = d_dbuf
r2 += sub(src_sav,src)
}
{
r0 = r2
jumpr r31
}
.falign
4089:
{
memw(dst) = w_dbuf
r2 += sub(src_sav,src)
}
{
r0 = r2
jumpr r31
}
.falign
2089:
{
memh(dst) = w_dbuf
r2 += sub(src_sav,src)
}
{
r0 = r2
jumpr r31
}
.falign
1089:
{
memb(dst) = w_dbuf
r2 += sub(src_sav,src)
}
{
r0 = r2
jumpr r31
}
/* COPY FROM USER: only loads can fail */
.section __ex_table,"a"
.long 1000b,1009b
.long 2000b,2009b
.long 4000b,4009b
.long 8080b,8089b
.long 4080b,4089b
.long 2080b,2089b
.long 1080b,1089b
.previous
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,578
|
arch/parisc/kernel/head.S
|
/* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
* Copyright 1999 SuSE GmbH (Philipp Rumpf)
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
* Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
* Copyright (C) 2001 Grant Grundler (Hewlett Packard)
* Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
*
* Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
*/
#include <asm/asm-offsets.h>
#include <asm/psw.h>
#include <asm/pdc.h>
#include <asm/assembly.h>
#include <asm/pgtable.h>
#include <linux/linkage.h>
#include <linux/init.h>
.level PA_ASM_LEVEL
__INITDATA
ENTRY(boot_args)
.word 0 /* arg0 */
.word 0 /* arg1 */
.word 0 /* arg2 */
.word 0 /* arg3 */
END(boot_args)
__HEAD
.align 4
.import init_thread_union,data
.import fault_vector_20,code /* IVA parisc 2.0 32 bit */
#ifndef CONFIG_64BIT
.import fault_vector_11,code /* IVA parisc 1.1 32 bit */
.import $global$ /* forward declaration */
#endif /*!CONFIG_64BIT*/
ENTRY(parisc_kernel_start)
.proc
.callinfo
/* Make sure sr4-sr7 are set to zero for the kernel address space */
mtsp %r0,%sr4
mtsp %r0,%sr5
mtsp %r0,%sr6
mtsp %r0,%sr7
/* Clear BSS (shouldn't the boot loader do this?) */
.import __bss_start,data
.import __bss_stop,data
load32 PA(__bss_start),%r3
load32 PA(__bss_stop),%r4
$bss_loop:
cmpb,<<,n %r3,%r4,$bss_loop
stw,ma %r0,4(%r3)
/* Save away the arguments the boot loader passed in (32 bit args) */
load32 PA(boot_args),%r1
stw,ma %arg0,4(%r1)
stw,ma %arg1,4(%r1)
stw,ma %arg2,4(%r1)
stw,ma %arg3,4(%r1)
/* Initialize startup VM. Just map first 16/32 MB of memory */
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
#if CONFIG_PGTABLE_LEVELS == 3
/* Set pmd in pgd */
load32 PA(pmd0),%r5
shrd %r5,PxD_VALUE_SHIFT,%r3
ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
#else
/* 2-level page table, so pmd == pgd */
ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
#endif
/* Fill in pmd with enough pte directories */
load32 PA(pg0),%r1
SHRREG %r1,PxD_VALUE_SHIFT,%r3
ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
ldi ASM_PT_INITIAL,%r1
1:
stw %r3,0(%r4)
ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
addib,> -1,%r1,1b
#if CONFIG_PGTABLE_LEVELS == 3
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
#else
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
#endif
/* Now initialize the PTEs themselves. We use RWX for
* everything ... it will get remapped correctly later */
ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
load32 PA(pg0),%r1
$pgt_fill_loop:
STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
addib,> -1,%r11,$pgt_fill_loop
nop
/* Load the return address...er...crash 'n burn */
copy %r0,%r2
/* And the RFI Target address too */
load32 start_parisc,%r11
/* And the initial task pointer */
load32 init_thread_union,%r6
mtctl %r6,%cr30
/* And the stack pointer too */
ldo THREAD_SZ_ALGN(%r6),%sp
#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
.import _mcount,data
/* initialize mcount FPTR */
/* Get the global data pointer */
loadgp
load32 PA(_mcount), %r10
std %dp,0x18(%r10)
#endif
#ifdef CONFIG_64BIT
/* Get PDCE_PROC for monarch CPU. */
#define MEM_PDC_LO 0x388
#define MEM_PDC_HI 0x35C
ldw MEM_PDC_LO(%r0),%r3
ldw MEM_PDC_HI(%r0),%r10
depd %r10, 31, 32, %r3 /* move to upper word */
#endif
#ifdef CONFIG_SMP
/* Set the smp rendezvous address into page zero.
** It would be safer to do this in init_smp_config() but
** it's just way easier to deal with here because
** of 64-bit function ptrs and the address is local to this file.
*/
load32 PA(smp_slave_stext),%r10
stw %r10,0x10(%r0) /* MEM_RENDEZ */
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
/* FALLTHROUGH */
.procend
/*
** Code Common to both Monarch and Slave processors.
** Entry:
**
** 1.1:
** %r11 must contain RFI target address.
** %r25/%r26 args to pass to target function
** %r2 in case rfi target decides it didn't like something
**
** 2.0w:
** %r3 PDCE_PROC address
** %r11 RFI target address
**
** Caller must init: SR4-7, %sp, %r10, %cr24/25,
*/
common_stext:
.proc
.callinfo
#else
/* Clear PDC entry point - we won't use it */
stw %r0,0x10(%r0) /* MEM_RENDEZ */
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
#endif /*CONFIG_SMP*/
#ifdef CONFIG_64BIT
tophys_r1 %sp
/* Save the rfi target address */
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10
std %r11, TASK_PT_GR11(%r10)
/* Switch to wide mode Superdome doesn't support narrow PDC
** calls.
*/
1: mfia %rp /* clear upper part of pcoq */
ldo 2f-1b(%rp),%rp
depdi 0,31,32,%rp
bv (%rp)
ssm PSW_SM_W,%r0
/* Set Wide mode as the "Default" (eg for traps)
** First trap occurs *right* after (or part of) rfi for slave CPUs.
** Someday, palo might not do this for the Monarch either.
*/
2:
mfctl %cr30,%r6 /* PCX-W2 firmware bug */
ldo PDC_PSW(%r0),%arg0 /* 21 */
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
load32 PA(stext_pdc_ret), %rp
bv (%r3)
copy %r0,%arg3
stext_pdc_ret:
mtctl %r6,%cr30 /* restore task thread info */
/* restore rfi target address*/
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10
ldd TASK_PT_GR11(%r10), %r11
tovirt_r1 %sp
#endif
/* PARANOID: clear user scratch/user space SR's */
mtsp %r0,%sr0
mtsp %r0,%sr1
mtsp %r0,%sr2
mtsp %r0,%sr3
/* Initialize Protection Registers */
mtctl %r0,%cr8
mtctl %r0,%cr9
mtctl %r0,%cr12
mtctl %r0,%cr13
/* Initialize the global data pointer */
loadgp
/* Set up our interrupt table. HPMCs might not work after this!
*
* We need to install the correct iva for PA1.1 or PA2.0. The
* following short sequence of instructions can determine this
* (without being illegal on a PA1.1 machine).
*/
#ifndef CONFIG_64BIT
ldi 32,%r10
mtctl %r10,%cr11
.level 2.0
mfctl,w %cr11,%r10
.level 1.1
comib,<>,n 0,%r10,$is_pa20
ldil L%PA(fault_vector_11),%r10
b $install_iva
ldo R%PA(fault_vector_11)(%r10),%r10
$is_pa20:
.level PA_ASM_LEVEL /* restore 1.1 || 2.0w */
#endif /*!CONFIG_64BIT*/
load32 PA(fault_vector_20),%r10
$install_iva:
mtctl %r10,%cr14
b aligned_rfi /* Prepare to RFI! Man all the cannons! */
nop
.align 128
aligned_rfi:
pcxt_ssm_bug
copy %r3, %arg0 /* PDCE_PROC for smp_callin() */
rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
/* Don't need NOPs, have 8 compliant insn before rfi */
mtctl %r0,%cr17 /* Clear IIASQ tail */
mtctl %r0,%cr17 /* Clear IIASQ head */
/* Load RFI target into PC queue */
mtctl %r11,%cr18 /* IIAOQ head */
ldo 4(%r11),%r11
mtctl %r11,%cr18 /* IIAOQ tail */
load32 KERNEL_PSW,%r10
mtctl %r10,%ipsw
/* Jump through hyperspace to Virt Mode */
rfi
nop
.procend
#ifdef CONFIG_SMP
.import smp_init_current_idle_task,data
.import smp_callin,code
#ifndef CONFIG_64BIT
smp_callin_rtn:
.proc
.callinfo
break 1,1 /* Break if returned from start_secondary */
nop
nop
.procend
#endif /*!CONFIG_64BIT*/
/***************************************************************************
* smp_slave_stext is executed by all non-monarch Processors when the Monarch
* pokes the slave CPUs in smp.c:smp_boot_cpus().
*
* Once here, registers values are initialized in order to branch to virtual
* mode. Once all available/eligible CPUs are in virtual mode, all are
* released and start out by executing their own idle task.
*****************************************************************************/
smp_slave_stext:
.proc
.callinfo
/*
** Initialize Space registers
*/
mtsp %r0,%sr4
mtsp %r0,%sr5
mtsp %r0,%sr6
mtsp %r0,%sr7
/* Initialize the SP - monarch sets up smp_init_current_idle_task */
load32 PA(smp_init_current_idle_task),%sp
LDREG 0(%sp),%sp /* load task address */
tophys_r1 %sp
LDREG TASK_THREAD_INFO(%sp),%sp
mtctl %sp,%cr30 /* store in cr30 */
ldo THREAD_SZ_ALGN(%sp),%sp
/* point CPU to kernel page tables */
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
#ifdef CONFIG_64BIT
/* Setup PDCE_PROC entry */
copy %arg0,%r3
#else
/* Load RFI *return* address in case smp_callin bails */
load32 smp_callin_rtn,%r2
#endif
/* Load RFI target address. */
load32 smp_callin,%r11
/* ok...common code can handle the rest */
b common_stext
nop
.procend
#endif /* CONFIG_SMP */
ENDPROC(parisc_kernel_start)
#ifndef CONFIG_64BIT
.section .data..read_mostly
.align 4
.export $global$,data
.type $global$,@object
.size $global$,4
$global$:
.word 0
#endif /*!CONFIG_64BIT*/
|
AirFortressIlikara/LS2K0300-linux-4.19
| 14,124
|
arch/parisc/kernel/syscall_table.S
|
/* System Call Table
*
* Copyright (C) 1999-2004 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 Alan Modra <amodra at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000 Grant Grundler <grundler at parisc-linux.org>
* Copyright (C) 2001 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
* Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2000-2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2005-2006 Kyle McMartin <kyle at parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if defined(CONFIG_64BIT) && !defined(SYSCALL_TABLE_64BIT)
/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
* narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific
* implementation is required on wide palinux. Use ENTRY_COMP where
* the compatibility layer has a useful 32-bit implementation.
*/
#define ENTRY_SAME(_name_) .dword sys_##_name_
#define ENTRY_DIFF(_name_) .dword sys32_##_name_
#define ENTRY_UHOH(_name_) .dword sys32_##unimplemented
#define ENTRY_OURS(_name_) .dword parisc_##_name_
#define ENTRY_COMP(_name_) .dword compat_sys_##_name_
#elif defined(CONFIG_64BIT) && defined(SYSCALL_TABLE_64BIT)
#define ENTRY_SAME(_name_) .dword sys_##_name_
#define ENTRY_DIFF(_name_) .dword sys_##_name_
#define ENTRY_UHOH(_name_) .dword sys_##_name_
#define ENTRY_OURS(_name_) .dword sys_##_name_
#define ENTRY_COMP(_name_) .dword sys_##_name_
#else
#define ENTRY_SAME(_name_) .word sys_##_name_
#define ENTRY_DIFF(_name_) .word sys_##_name_
#define ENTRY_UHOH(_name_) .word sys_##_name_
#define ENTRY_OURS(_name_) .word parisc_##_name_
#define ENTRY_COMP(_name_) .word sys_##_name_
#endif
90: ENTRY_SAME(restart_syscall) /* 0 */
91: ENTRY_SAME(exit)
ENTRY_SAME(fork_wrapper)
ENTRY_SAME(read)
ENTRY_SAME(write)
ENTRY_COMP(open) /* 5 */
ENTRY_SAME(close)
ENTRY_SAME(waitpid)
ENTRY_SAME(creat)
ENTRY_SAME(link)
ENTRY_SAME(unlink) /* 10 */
ENTRY_COMP(execve)
ENTRY_SAME(chdir)
/* See comments in kernel/time.c!!! Maybe we don't need this? */
ENTRY_COMP(time)
ENTRY_SAME(mknod)
ENTRY_SAME(chmod) /* 15 */
ENTRY_SAME(lchown)
ENTRY_SAME(socket)
/* struct stat is MAYBE identical wide and narrow ?? */
ENTRY_COMP(newstat)
ENTRY_COMP(lseek)
ENTRY_SAME(getpid) /* 20 */
/* the 'void * data' parameter may need re-packing in wide */
ENTRY_COMP(mount)
/* concerned about struct sockaddr in wide/narrow */
/* ---> I think sockaddr is OK unless the compiler packs the struct */
/* differently to align the char array */
ENTRY_SAME(bind)
ENTRY_SAME(setuid)
ENTRY_SAME(getuid)
ENTRY_COMP(stime) /* 25 */
ENTRY_COMP(ptrace)
ENTRY_SAME(alarm)
/* see stat comment */
ENTRY_COMP(newfstat)
ENTRY_SAME(pause)
/* struct utimbuf uses time_t which might vary */
ENTRY_COMP(utime) /* 30 */
/* struct sockaddr... */
ENTRY_SAME(connect)
ENTRY_SAME(listen)
ENTRY_SAME(access)
ENTRY_SAME(nice)
/* struct sockaddr... */
ENTRY_SAME(accept) /* 35 */
ENTRY_SAME(sync)
ENTRY_SAME(kill)
ENTRY_SAME(rename)
ENTRY_SAME(mkdir)
ENTRY_SAME(rmdir) /* 40 */
ENTRY_SAME(dup)
ENTRY_SAME(pipe)
ENTRY_COMP(times)
/* struct sockaddr... */
ENTRY_SAME(getsockname)
/* it seems possible brk() could return a >4G pointer... */
ENTRY_SAME(brk) /* 45 */
ENTRY_SAME(setgid)
ENTRY_SAME(getgid)
ENTRY_SAME(signal)
ENTRY_SAME(geteuid)
ENTRY_SAME(getegid) /* 50 */
ENTRY_SAME(acct)
ENTRY_SAME(umount)
/* struct sockaddr... */
ENTRY_SAME(getpeername)
ENTRY_COMP(ioctl)
ENTRY_COMP(fcntl) /* 55 */
ENTRY_SAME(socketpair)
ENTRY_SAME(setpgid)
ENTRY_SAME(send)
ENTRY_SAME(newuname)
ENTRY_SAME(umask) /* 60 */
ENTRY_SAME(chroot)
ENTRY_COMP(ustat)
ENTRY_SAME(dup2)
ENTRY_SAME(getppid)
ENTRY_SAME(getpgrp) /* 65 */
ENTRY_SAME(setsid)
ENTRY_SAME(pivot_root)
/* I don't like this */
ENTRY_UHOH(sgetmask)
ENTRY_UHOH(ssetmask)
ENTRY_SAME(setreuid) /* 70 */
ENTRY_SAME(setregid)
ENTRY_SAME(mincore)
ENTRY_COMP(sigpending)
ENTRY_SAME(sethostname)
/* Following 3 have linux-common-code structs containing longs -( */
ENTRY_COMP(setrlimit) /* 75 */
ENTRY_COMP(getrlimit)
ENTRY_COMP(getrusage)
/* struct timeval and timezone are maybe?? consistent wide and narrow */
ENTRY_COMP(gettimeofday)
ENTRY_COMP(settimeofday)
ENTRY_SAME(getgroups) /* 80 */
ENTRY_SAME(setgroups)
/* struct socketaddr... */
ENTRY_SAME(sendto)
ENTRY_SAME(symlink)
/* see stat comment */
ENTRY_COMP(newlstat)
ENTRY_SAME(readlink) /* 85 */
ENTRY_SAME(ni_syscall) /* was uselib */
ENTRY_SAME(swapon)
ENTRY_SAME(reboot)
ENTRY_SAME(mmap2)
ENTRY_SAME(mmap) /* 90 */
ENTRY_SAME(munmap)
ENTRY_COMP(truncate)
ENTRY_COMP(ftruncate)
ENTRY_SAME(fchmod)
ENTRY_SAME(fchown) /* 95 */
ENTRY_SAME(getpriority)
ENTRY_SAME(setpriority)
ENTRY_SAME(recv)
ENTRY_COMP(statfs)
ENTRY_COMP(fstatfs) /* 100 */
ENTRY_SAME(stat64)
ENTRY_SAME(ni_syscall) /* was socketcall */
ENTRY_SAME(syslog)
/* even though manpage says struct timeval contains longs, ours has
* time_t and suseconds_t -- both of which are safe wide/narrow */
ENTRY_COMP(setitimer)
ENTRY_COMP(getitimer) /* 105 */
ENTRY_SAME(capget)
ENTRY_SAME(capset)
ENTRY_OURS(pread64)
ENTRY_OURS(pwrite64)
ENTRY_SAME(getcwd) /* 110 */
ENTRY_SAME(vhangup)
ENTRY_SAME(fstat64)
ENTRY_SAME(vfork_wrapper)
/* struct rusage contains longs... */
ENTRY_COMP(wait4)
ENTRY_SAME(swapoff) /* 115 */
ENTRY_COMP(sysinfo)
ENTRY_SAME(shutdown)
ENTRY_SAME(fsync)
ENTRY_SAME(madvise)
ENTRY_SAME(clone_wrapper) /* 120 */
ENTRY_SAME(setdomainname)
ENTRY_COMP(sendfile)
/* struct sockaddr... */
ENTRY_SAME(recvfrom)
/* struct timex contains longs */
ENTRY_COMP(adjtimex)
ENTRY_SAME(mprotect) /* 125 */
/* old_sigset_t forced to 32 bits. Beware glibc sigset_t */
ENTRY_COMP(sigprocmask)
ENTRY_SAME(ni_syscall) /* create_module */
ENTRY_SAME(init_module)
ENTRY_SAME(delete_module)
ENTRY_SAME(ni_syscall) /* 130: get_kernel_syms */
/* time_t inside struct dqblk */
ENTRY_SAME(quotactl)
ENTRY_SAME(getpgid)
ENTRY_SAME(fchdir)
ENTRY_SAME(bdflush)
ENTRY_SAME(sysfs) /* 135 */
ENTRY_OURS(personality)
ENTRY_SAME(ni_syscall) /* for afs_syscall */
ENTRY_SAME(setfsuid)
ENTRY_SAME(setfsgid)
/* I think this might work */
ENTRY_SAME(llseek) /* 140 */
ENTRY_COMP(getdents)
/* it is POSSIBLE that select will be OK because even though fd_set
* contains longs, the macros and sizes are clever. */
ENTRY_COMP(select)
ENTRY_SAME(flock)
ENTRY_SAME(msync)
/* struct iovec contains pointers */
ENTRY_COMP(readv) /* 145 */
ENTRY_COMP(writev)
ENTRY_SAME(getsid)
ENTRY_SAME(fdatasync)
/* struct __sysctl_args is a mess */
ENTRY_COMP(sysctl)
ENTRY_SAME(mlock) /* 150 */
ENTRY_SAME(munlock)
ENTRY_SAME(mlockall)
ENTRY_SAME(munlockall)
/* struct sched_param is ok for now */
ENTRY_SAME(sched_setparam)
ENTRY_SAME(sched_getparam) /* 155 */
ENTRY_SAME(sched_setscheduler)
ENTRY_SAME(sched_getscheduler)
ENTRY_SAME(sched_yield)
ENTRY_SAME(sched_get_priority_max)
ENTRY_SAME(sched_get_priority_min) /* 160 */
ENTRY_COMP(sched_rr_get_interval)
ENTRY_COMP(nanosleep)
ENTRY_SAME(mremap)
ENTRY_SAME(setresuid)
ENTRY_SAME(getresuid) /* 165 */
ENTRY_COMP(sigaltstack)
ENTRY_SAME(ni_syscall) /* query_module */
ENTRY_SAME(poll)
/* structs contain pointers and an in_addr... */
ENTRY_SAME(ni_syscall) /* was nfsservctl */
ENTRY_SAME(setresgid) /* 170 */
ENTRY_SAME(getresgid)
ENTRY_SAME(prctl)
/* signals need a careful review */
ENTRY_SAME(rt_sigreturn_wrapper)
ENTRY_COMP(rt_sigaction)
ENTRY_COMP(rt_sigprocmask) /* 175 */
ENTRY_COMP(rt_sigpending)
ENTRY_COMP(rt_sigtimedwait)
/* even though the struct siginfo_t is different, it appears like
* all the paths use values which should be same wide and narrow.
* Also the struct is padded to 128 bytes which means we don't have
* to worry about faulting trying to copy in a larger 64-bit
* struct from a 32-bit user-space app.
*/
ENTRY_COMP(rt_sigqueueinfo)
ENTRY_COMP(rt_sigsuspend)
ENTRY_SAME(chown) /* 180 */
/* setsockopt() used by iptables: SO_SET_REPLACE/SO_SET_ADD_COUNTERS */
ENTRY_COMP(setsockopt)
ENTRY_COMP(getsockopt)
ENTRY_COMP(sendmsg)
ENTRY_COMP(recvmsg)
ENTRY_SAME(semop) /* 185 */
ENTRY_SAME(semget)
ENTRY_COMP(semctl)
ENTRY_COMP(msgsnd)
ENTRY_COMP(msgrcv)
ENTRY_SAME(msgget) /* 190 */
ENTRY_COMP(msgctl)
ENTRY_COMP(shmat)
ENTRY_SAME(shmdt)
ENTRY_SAME(shmget)
ENTRY_COMP(shmctl) /* 195 */
ENTRY_SAME(ni_syscall) /* streams1 */
ENTRY_SAME(ni_syscall) /* streams2 */
ENTRY_SAME(lstat64)
ENTRY_OURS(truncate64)
ENTRY_OURS(ftruncate64) /* 200 */
ENTRY_SAME(getdents64)
ENTRY_COMP(fcntl64)
ENTRY_SAME(ni_syscall) /* attrctl -- dead */
ENTRY_SAME(ni_syscall) /* acl_get -- dead */
ENTRY_SAME(ni_syscall) /* 205 (acl_set -- dead) */
ENTRY_SAME(gettid)
ENTRY_OURS(readahead)
ENTRY_SAME(tkill)
ENTRY_COMP(sendfile64)
ENTRY_COMP(futex) /* 210 */
ENTRY_COMP(sched_setaffinity)
ENTRY_COMP(sched_getaffinity)
ENTRY_SAME(ni_syscall) /* set_thread_area */
ENTRY_SAME(ni_syscall) /* get_thread_area */
ENTRY_COMP(io_setup) /* 215 */
ENTRY_SAME(io_destroy)
ENTRY_COMP(io_getevents)
ENTRY_COMP(io_submit)
ENTRY_SAME(io_cancel)
ENTRY_SAME(ni_syscall) /* 220: was alloc_hugepages */
ENTRY_SAME(ni_syscall) /* was free_hugepages */
ENTRY_SAME(exit_group)
ENTRY_COMP(lookup_dcookie)
ENTRY_SAME(epoll_create)
ENTRY_SAME(epoll_ctl) /* 225 */
ENTRY_SAME(epoll_wait)
ENTRY_SAME(remap_file_pages)
ENTRY_COMP(semtimedop)
ENTRY_COMP(mq_open)
ENTRY_SAME(mq_unlink) /* 230 */
ENTRY_COMP(mq_timedsend)
ENTRY_COMP(mq_timedreceive)
ENTRY_COMP(mq_notify)
ENTRY_COMP(mq_getsetattr)
ENTRY_COMP(waitid) /* 235 */
ENTRY_OURS(fadvise64_64)
ENTRY_SAME(set_tid_address)
ENTRY_SAME(setxattr)
ENTRY_SAME(lsetxattr)
ENTRY_SAME(fsetxattr) /* 240 */
ENTRY_SAME(getxattr)
ENTRY_SAME(lgetxattr)
ENTRY_SAME(fgetxattr)
ENTRY_SAME(listxattr)
ENTRY_SAME(llistxattr) /* 245 */
ENTRY_SAME(flistxattr)
ENTRY_SAME(removexattr)
ENTRY_SAME(lremovexattr)
ENTRY_SAME(fremovexattr)
ENTRY_COMP(timer_create) /* 250 */
ENTRY_COMP(timer_settime)
ENTRY_COMP(timer_gettime)
ENTRY_SAME(timer_getoverrun)
ENTRY_SAME(timer_delete)
ENTRY_COMP(clock_settime) /* 255 */
ENTRY_COMP(clock_gettime)
ENTRY_COMP(clock_getres)
ENTRY_COMP(clock_nanosleep)
ENTRY_SAME(tgkill)
ENTRY_COMP(mbind) /* 260 */
ENTRY_COMP(get_mempolicy)
ENTRY_COMP(set_mempolicy)
ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
ENTRY_SAME(add_key)
ENTRY_SAME(request_key) /* 265 */
ENTRY_COMP(keyctl)
ENTRY_SAME(ioprio_set)
ENTRY_SAME(ioprio_get)
ENTRY_SAME(inotify_init)
ENTRY_SAME(inotify_add_watch) /* 270 */
ENTRY_SAME(inotify_rm_watch)
ENTRY_SAME(migrate_pages)
ENTRY_COMP(pselect6)
ENTRY_COMP(ppoll)
ENTRY_COMP(openat) /* 275 */
ENTRY_SAME(mkdirat)
ENTRY_SAME(mknodat)
ENTRY_SAME(fchownat)
ENTRY_COMP(futimesat)
ENTRY_SAME(fstatat64) /* 280 */
ENTRY_SAME(unlinkat)
ENTRY_SAME(renameat)
ENTRY_SAME(linkat)
ENTRY_SAME(symlinkat)
ENTRY_SAME(readlinkat) /* 285 */
ENTRY_SAME(fchmodat)
ENTRY_SAME(faccessat)
ENTRY_SAME(unshare)
ENTRY_COMP(set_robust_list)
ENTRY_COMP(get_robust_list) /* 290 */
ENTRY_SAME(splice)
ENTRY_OURS(sync_file_range)
ENTRY_SAME(tee)
ENTRY_COMP(vmsplice)
ENTRY_COMP(move_pages) /* 295 */
ENTRY_SAME(getcpu)
ENTRY_COMP(epoll_pwait)
ENTRY_COMP(statfs64)
ENTRY_COMP(fstatfs64)
ENTRY_COMP(kexec_load) /* 300 */
ENTRY_COMP(utimensat)
ENTRY_COMP(signalfd)
ENTRY_SAME(ni_syscall) /* was timerfd */
ENTRY_SAME(eventfd)
ENTRY_OURS(fallocate) /* 305 */
ENTRY_SAME(timerfd_create)
ENTRY_COMP(timerfd_settime)
ENTRY_COMP(timerfd_gettime)
ENTRY_COMP(signalfd4)
ENTRY_SAME(eventfd2) /* 310 */
ENTRY_SAME(epoll_create1)
ENTRY_SAME(dup3)
ENTRY_SAME(pipe2)
ENTRY_SAME(inotify_init1)
ENTRY_COMP(preadv) /* 315 */
ENTRY_COMP(pwritev)
ENTRY_COMP(rt_tgsigqueueinfo)
ENTRY_SAME(perf_event_open)
ENTRY_COMP(recvmmsg)
ENTRY_SAME(accept4) /* 320 */
ENTRY_SAME(prlimit64)
ENTRY_SAME(fanotify_init)
ENTRY_DIFF(fanotify_mark)
ENTRY_COMP(clock_adjtime)
ENTRY_SAME(name_to_handle_at) /* 325 */
ENTRY_COMP(open_by_handle_at)
ENTRY_SAME(syncfs)
ENTRY_SAME(setns)
ENTRY_COMP(sendmmsg)
ENTRY_COMP(process_vm_readv) /* 330 */
ENTRY_COMP(process_vm_writev)
ENTRY_SAME(kcmp)
ENTRY_SAME(finit_module)
ENTRY_SAME(sched_setattr)
ENTRY_SAME(sched_getattr) /* 335 */
ENTRY_COMP(utimes)
ENTRY_SAME(renameat2)
ENTRY_SAME(seccomp)
ENTRY_SAME(getrandom)
ENTRY_SAME(memfd_create) /* 340 */
ENTRY_SAME(bpf)
ENTRY_COMP(execveat)
ENTRY_SAME(membarrier)
ENTRY_SAME(userfaultfd)
ENTRY_SAME(mlock2) /* 345 */
ENTRY_SAME(copy_file_range)
ENTRY_COMP(preadv2)
ENTRY_COMP(pwritev2)
ENTRY_SAME(statx)
ENTRY_COMP(io_pgetevents) /* 350 */
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
.error "size of syscall table does not fit value of __NR_Linux_syscalls"
.endif
#undef ENTRY_SAME
#undef ENTRY_DIFF
#undef ENTRY_UHOH
#undef ENTRY_COMP
#undef ENTRY_OURS
|
AirFortressIlikara/LS2K0300-linux-4.19
| 54,893
|
arch/parisc/kernel/entry.S
|
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* kernel entry points (interruptions, system call wrappers)
* Copyright (C) 1999,2000 Philipp Rumpf
* Copyright (C) 1999 SuSE GmbH Nuernberg
* Copyright (C) 2000 Hewlett-Packard (John Marvin)
* Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <asm/asm-offsets.h>
/* we have the following possibilities to act on an interruption:
* - handle in assembly and use shadowed registers only
* - save registers to kernel stack and handle in assembly or C */
#include <asm/psw.h>
#include <asm/cache.h> /* for L1_CACHE_SHIFT */
#include <asm/assembly.h> /* for LDREG/STREG defines */
#include <asm/pgtable.h>
#include <asm/signal.h>
#include <asm/unistd.h>
#include <asm/ldcw.h>
#include <asm/traps.h>
#include <asm/thread_info.h>
#include <linux/linkage.h>
#ifdef CONFIG_64BIT
.level 2.0w
#else
.level 2.0
#endif
.import pa_tlb_lock,data
.macro load_pa_tlb_lock reg
#if __PA_LDCW_ALIGNMENT > 4
load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
#else
load32 PA(pa_tlb_lock), \reg
#endif
.endm
/* space_to_prot macro creates a prot id from a space id */
#if (SPACEID_SHIFT) == 0
.macro space_to_prot spc prot
depd,z \spc,62,31,\prot
.endm
#else
.macro space_to_prot spc prot
extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
.endm
#endif
/* Switch to virtual mapping, trashing only %r1 */
.macro virt_map
/* pcxt_ssm_bug */
rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
mtsp %r0, %sr4
mtsp %r0, %sr5
mtsp %r0, %sr6
tovirt_r1 %r29
load32 KERNEL_PSW, %r1
rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
mtctl %r1, %ipsw
load32 4f, %r1
mtctl %r1, %cr18 /* Set IIAOQ tail */
ldo 4(%r1), %r1
mtctl %r1, %cr18 /* Set IIAOQ head */
rfir
nop
4:
.endm
/*
* The "get_stack" macros are responsible for determining the
* kernel stack value.
*
* If sr7 == 0
* Already using a kernel stack, so call the
* get_stack_use_r30 macro to push a pt_regs structure
* on the stack, and store registers there.
* else
* Need to set up a kernel stack, so call the
* get_stack_use_cr30 macro to set up a pointer
* to the pt_regs structure contained within the
* task pointer pointed to by cr30. Set the stack
* pointer to point to the end of the task structure.
*
* Note that we use shadowed registers for temps until
* we can save %r26 and %r29. %r26 is used to preserve
* %r8 (a shadowed register) which temporarily contained
* either the fault type ("code") or the eirr. We need
* to use a non-shadowed register to carry the value over
* the rfir in virt_map. We use %r26 since this value winds
* up being passed as the argument to either do_cpu_irq_mask
* or handle_interruption. %r29 is used to hold a pointer
* the register save area, and once again, it needs to
* be a non-shadowed register so that it survives the rfir.
*
* N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
*/
.macro get_stack_use_cr30
/* we save the registers in the task struct */
copy %r30, %r17
mfctl %cr30, %r1
ldo THREAD_SZ_ALGN(%r1), %r30
mtsp %r0,%sr7
mtsp %r16,%sr3
tophys %r1,%r9
LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
tophys %r1,%r9
ldo TASK_REGS(%r9),%r9
STREG %r17,PT_GR30(%r9)
STREG %r29,PT_GR29(%r9)
STREG %r26,PT_GR26(%r9)
STREG %r16,PT_SR7(%r9)
copy %r9,%r29
.endm
.macro get_stack_use_r30
/* we put a struct pt_regs on the stack and save the registers there */
tophys %r30,%r9
copy %r30,%r1
ldo PT_SZ_ALGN(%r30),%r30
STREG %r1,PT_GR30(%r9)
STREG %r29,PT_GR29(%r9)
STREG %r26,PT_GR26(%r9)
STREG %r16,PT_SR7(%r9)
copy %r9,%r29
.endm
.macro rest_stack
LDREG PT_GR1(%r29), %r1
LDREG PT_GR30(%r29),%r30
LDREG PT_GR29(%r29),%r29
.endm
/* default interruption handler
* (calls traps.c:handle_interruption) */
.macro def code
b intr_save
ldi \code, %r8
.align 32
.endm
/* Interrupt interruption handler
* (calls irq.c:do_cpu_irq_mask) */
.macro extint code
b intr_extint
mfsp %sr7,%r16
.align 32
.endm
.import os_hpmc, code
/* HPMC handler */
.macro hpmc code
nop /* must be a NOP, will be patched later */
load32 PA(os_hpmc), %r3
bv,n 0(%r3)
nop
.word 0 /* checksum (will be patched) */
.word 0 /* address of handler */
.word 0 /* length of handler */
.endm
/*
* Performance Note: Instructions will be moved up into
* this part of the code later on, once we are sure
* that the tlb miss handlers are close to final form.
*/
/* Register definitions for tlb miss handler macros */
va = r8 /* virtual address for which the trap occurred */
spc = r24 /* space for which the trap occurred */
#ifndef CONFIG_64BIT
/*
* itlb miss interruption handler (parisc 1.1 - 32 bit)
*/
.macro itlb_11 code
mfctl %pcsq, spc
b itlb_miss_11
mfctl %pcoq, va
.align 32
.endm
#endif
/*
* itlb miss interruption handler (parisc 2.0)
*/
.macro itlb_20 code
mfctl %pcsq, spc
#ifdef CONFIG_64BIT
b itlb_miss_20w
#else
b itlb_miss_20
#endif
mfctl %pcoq, va
.align 32
.endm
#ifndef CONFIG_64BIT
/*
* naitlb miss interruption handler (parisc 1.1 - 32 bit)
*/
.macro naitlb_11 code
mfctl %isr,spc
b naitlb_miss_11
mfctl %ior,va
.align 32
.endm
#endif
/*
* naitlb miss interruption handler (parisc 2.0)
*/
.macro naitlb_20 code
mfctl %isr,spc
#ifdef CONFIG_64BIT
b naitlb_miss_20w
#else
b naitlb_miss_20
#endif
mfctl %ior,va
.align 32
.endm
#ifndef CONFIG_64BIT
/*
* dtlb miss interruption handler (parisc 1.1 - 32 bit)
*/
.macro dtlb_11 code
mfctl %isr, spc
b dtlb_miss_11
mfctl %ior, va
.align 32
.endm
#endif
/*
* dtlb miss interruption handler (parisc 2.0)
*/
.macro dtlb_20 code
mfctl %isr, spc
#ifdef CONFIG_64BIT
b dtlb_miss_20w
#else
b dtlb_miss_20
#endif
mfctl %ior, va
.align 32
.endm
#ifndef CONFIG_64BIT
/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
.macro nadtlb_11 code
mfctl %isr,spc
b nadtlb_miss_11
mfctl %ior,va
.align 32
.endm
#endif
/* nadtlb miss interruption handler (parisc 2.0) */
.macro nadtlb_20 code
mfctl %isr,spc
#ifdef CONFIG_64BIT
b nadtlb_miss_20w
#else
b nadtlb_miss_20
#endif
mfctl %ior,va
.align 32
.endm
#ifndef CONFIG_64BIT
/*
* dirty bit trap interruption handler (parisc 1.1 - 32 bit)
*/
.macro dbit_11 code
mfctl %isr,spc
b dbit_trap_11
mfctl %ior,va
.align 32
.endm
#endif
/*
* dirty bit trap interruption handler (parisc 2.0)
*/
.macro dbit_20 code
mfctl %isr,spc
#ifdef CONFIG_64BIT
b dbit_trap_20w
#else
b dbit_trap_20
#endif
mfctl %ior,va
.align 32
.endm
/* In LP64, the space contains part of the upper 32 bits of the
* fault. We have to extract this and place it in the va,
* zeroing the corresponding bits in the space register */
.macro space_adjust spc,va,tmp
#ifdef CONFIG_64BIT
extrd,u \spc,63,SPACEID_SHIFT,\tmp
depd %r0,63,SPACEID_SHIFT,\spc
depd \tmp,31,SPACEID_SHIFT,\va
#endif
.endm
.import swapper_pg_dir,code
/* Get the pgd. For faults on space zero (kernel space), this
* is simply swapper_pg_dir. For user space faults, the
* pgd is stored in %cr25 */
.macro get_pgd spc,reg
ldil L%PA(swapper_pg_dir),\reg
ldo R%PA(swapper_pg_dir)(\reg),\reg
or,COND(=) %r0,\spc,%r0
mfctl %cr25,\reg
.endm
/*
space_check(spc,tmp,fault)
spc - The space we saw the fault with.
tmp - The place to store the current space.
fault - Function to call on failure.
Only allow faults on different spaces from the
currently active one if we're the kernel
*/
.macro space_check spc,tmp,fault
mfsp %sr7,\tmp
or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
* as kernel, so defeat the space
* check if it is */
copy \spc,\tmp
or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
cmpb,COND(<>),n \tmp,\spc,\fault
.endm
/* Look up a PTE in a 2-Level scheme (faulting at each
* level if the entry isn't present
*
* NOTE: we use ldw even for LP64, since the short pointers
* can address up to 1TB
*/
.macro L2_ptep pmd,pte,index,va,fault
#if CONFIG_PGTABLE_LEVELS == 3
extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
#else
# if defined(CONFIG_64BIT)
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
#else
# if PAGE_SIZE > 4096
extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
# else
extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
# endif
# endif
#endif
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
copy %r0,\pte
ldw,s \index(\pmd),\pmd
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
copy \pmd,%r9
SHLREG %r9,PxD_VALUE_SHIFT,\pmd
extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
LDREG %r0(\pmd),\pte
bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
.endm
/* Look up PTE in a 3-Level scheme.
*
* Here we implement a Hybrid L2/L3 scheme: we allocate the
* first pmd adjacent to the pgd. This means that we can
* subtract a constant offset to get to it. The pmd and pgd
* sizes are arranged so that a single pmd covers 4GB (giving
* a full LP64 process access to 8TB) so our lookups are
* effectively L2 for the first 4GB of the kernel (i.e. for
* all ILP32 processes and all the kernel for machines with
* under 4GB of memory) */
.macro L3_ptep pgd,pte,index,va,fault
#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
copy %r0,\pte
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
ldw,s \index(\pgd),\pgd
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
shld \pgd,PxD_VALUE_SHIFT,\index
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
copy \index,\pgd
extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
#endif
L2_ptep \pgd,\pte,\index,\va,\fault
.endm
/* Acquire pa_tlb_lock lock and recheck page is still present. */
.macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
#ifdef CONFIG_SMP
cmpib,COND(=),n 0,\spc,2f
load_pa_tlb_lock \tmp
1: LDCW 0(\tmp),\tmp1
cmpib,COND(=) 0,\tmp1,1b
nop
LDREG 0(\ptp),\pte
bb,<,n \pte,_PAGE_PRESENT_BIT,2f
b \fault
stw \spc,0(\tmp)
2:
#endif
.endm
/* Release pa_tlb_lock lock without reloading lock address. */
.macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP
or,COND(=) %r0,\spc,%r0
sync
or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
#endif
.endm
/* Release pa_tlb_lock lock. */
.macro tlb_unlock1 spc,tmp
#ifdef CONFIG_SMP
load_pa_tlb_lock \tmp
tlb_unlock0 \spc,\tmp
#endif
.endm
/* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
* don't needlessly dirty the cache line if it was already set */
.macro update_accessed ptp,pte,tmp,tmp1
ldi _PAGE_ACCESSED,\tmp1
or \tmp1,\pte,\tmp
and,COND(<>) \tmp1,\pte,%r0
STREG \tmp,0(\ptp)
.endm
/* Set the dirty bit (and accessed bit). No need to be
* clever, this is only used from the dirty fault */
.macro update_dirty ptp,pte,tmp
ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
or \tmp,\pte,\pte
STREG \pte,0(\ptp)
.endm
/* We have (depending on the page size):
* - 38 to 52-bit Physical Page Number
* - 12 to 26-bit page offset
*/
/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
.macro convert_for_tlb_insert20 pte,tmp
#ifdef CONFIG_HUGETLB_PAGE
copy \pte,\tmp
extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
#else /* Huge pages disabled */
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
#endif
.endm
/* Convert the pte and prot to tlb insertion values. How
* this happens is quite subtle, read below */
.macro make_insert_tlb spc,pte,prot,tmp
space_to_prot \spc \prot /* create prot id from space */
/* The following is the real subtlety. This is depositing
* T <-> _PAGE_REFTRAP
* D <-> _PAGE_DIRTY
* B <-> _PAGE_DMB (memory break)
*
* Then incredible subtlety: The access rights are
* _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
* See 3-14 of the parisc 2.0 manual
*
* Finally, _PAGE_READ goes in the top bit of PL1 (so we
* trigger an access rights trap in user space if the user
* tries to read an unreadable page */
depd \pte,8,7,\prot
/* PAGE_USER indicates the page can be read with user privileges,
* so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
* contains _PAGE_READ) */
extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
depdi 7,11,3,\prot
/* If we're a gateway page, drop PL2 back to zero for promotion
* to kernel privilege (so we can execute the page as kernel).
* Any privilege promotion page always denys read and write */
extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
/* Enforce uncacheable pages.
* This should ONLY be use for MMIO on PA 2.0 machines.
* Memory/DMA is cache coherent on all PA2.0 machines we support
* (that means T-class is NOT supported) and the memory controllers
* on most of those machines only handles cache transactions.
*/
extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
depdi 1,12,1,\prot
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
convert_for_tlb_insert20 \pte \tmp
.endm
/* Identical macro to make_insert_tlb above, except it
* makes the tlb entry for the differently formatted pa11
* insertion instructions */
.macro make_insert_tlb_11 spc,pte,prot
zdep \spc,30,15,\prot
dep \pte,8,7,\prot
extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
depi 1,12,1,\prot
extru,= \pte,_PAGE_USER_BIT,1,%r0
depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
/* Get rid of prot bits and convert to page addr for iitlba */
depi 0,31,ASM_PFN_PTE_SHIFT,\pte
SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
.endm
/* This is for ILP32 PA2.0 only. The TLB insertion needs
* to extend into I/O space if the address is 0xfXXXXXXX
* so we extend the f's into the top word of the pte in
* this case */
.macro f_extend pte,tmp
extrd,s \pte,42,4,\tmp
addi,<> 1,\tmp,%r0
extrd,s \pte,63,25,\pte
.endm
/* The alias region is an 8MB aligned 16MB to do clear and
* copy user pages at addresses congruent with the user
* virtual address.
*
* To use the alias page, you set %r26 up with the to TLB
* entry (identifying the physical page) and %r23 up with
* the from tlb entry (or nothing if only a to entry---for
* clear_user_page_asm) */
.macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
cmpib,COND(<>),n 0,\spc,\fault
ldil L%(TMPALIAS_MAP_START),\tmp
#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
/* on LP64, ldi will sign extend into the upper 32 bits,
* which is behaviour we don't want */
depdi 0,31,32,\tmp
#endif
copy \va,\tmp1
depi 0,31,23,\tmp1
cmpb,COND(<>),n \tmp,\tmp1,\fault
mfctl %cr19,\tmp /* iir */
/* get the opcode (first six bits) into \tmp */
extrw,u \tmp,5,6,\tmp
/*
* Only setting the T bit prevents data cache movein
* Setting access rights to zero prevents instruction cache movein
*
* Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
* to type field and _PAGE_READ goes to top bit of PL1
*/
ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
/*
* so if the opcode is one (i.e. this is a memory management
* instruction) nullify the next load so \prot is only T.
* Otherwise this is a normal data operation
*/
cmpiclr,= 0x01,\tmp,%r0
ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
.ifc \patype,20
depd,z \prot,8,7,\prot
.else
.ifc \patype,11
depw,z \prot,8,7,\prot
.else
.error "undefined PA type to do_alias"
.endif
.endif
/*
* OK, it is in the temp alias region, check whether "from" or "to".
* Check "subtle" note in pacache.S re: r23/r26.
*/
#ifdef CONFIG_64BIT
extrd,u,*= \va,41,1,%r0
#else
extrw,u,= \va,9,1,%r0
#endif
or,COND(tr) %r23,%r0,\pte
or %r26,%r0,\pte
.endm
/*
* Fault_vectors are architecturally required to be aligned on a 2K
* boundary
*/
.section .text.hot
.align 2048
ENTRY(fault_vector_20)
/* First vector is invalid (0) */
.ascii "cows can fly"
.byte 0
.align 32
hpmc 1
def 2
def 3
extint 4
def 5
itlb_20 PARISC_ITLB_TRAP
def 7
def 8
def 9
def 10
def 11
def 12
def 13
def 14
dtlb_20 15
naitlb_20 16
nadtlb_20 17
def 18
def 19
dbit_20 20
def 21
def 22
def 23
def 24
def 25
def 26
def 27
def 28
def 29
def 30
def 31
END(fault_vector_20)
#ifndef CONFIG_64BIT
.align 2048
ENTRY(fault_vector_11)
/* First vector is invalid (0) */
.ascii "cows can fly"
.byte 0
.align 32
hpmc 1
def 2
def 3
extint 4
def 5
itlb_11 PARISC_ITLB_TRAP
def 7
def 8
def 9
def 10
def 11
def 12
def 13
def 14
dtlb_11 15
naitlb_11 16
nadtlb_11 17
def 18
def 19
dbit_11 20
def 21
def 22
def 23
def 24
def 25
def 26
def 27
def 28
def 29
def 30
def 31
END(fault_vector_11)
#endif
/* Fault vector is separately protected and *must* be on its own page */
.align PAGE_SIZE
.import handle_interruption,code
.import do_cpu_irq_mask,code
/*
* Child Returns here
*
* copy_thread moved args into task save area.
*/
ENTRY(ret_from_kernel_thread)
/* Call schedule_tail first though */
BL schedule_tail, %r2
nop
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
LDREG TASK_PT_GR25(%r1), %r26
#ifdef CONFIG_64BIT
LDREG TASK_PT_GR27(%r1), %r27
#endif
LDREG TASK_PT_GR26(%r1), %r1
ble 0(%sr7, %r1)
copy %r31, %r2
b finish_child_return
nop
END(ret_from_kernel_thread)
/*
* struct task_struct *_switch_to(struct task_struct *prev,
* struct task_struct *next)
*
* switch kernel stacks and return prev */
ENTRY_CFI(_switch_to)
STREG %r2, -RP_OFFSET(%r30)
callee_save_float
callee_save
load32 _switch_to_ret, %r2
STREG %r2, TASK_PT_KPC(%r26)
LDREG TASK_PT_KPC(%r25), %r2
STREG %r30, TASK_PT_KSP(%r26)
LDREG TASK_PT_KSP(%r25), %r30
LDREG TASK_THREAD_INFO(%r25), %r25
bv %r0(%r2)
mtctl %r25,%cr30
ENTRY(_switch_to_ret)
mtctl %r0, %cr0 /* Needed for single stepping */
callee_rest
callee_rest_float
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
copy %r26, %r28
ENDPROC_CFI(_switch_to)
/*
* Common rfi return path for interruptions, kernel execve, and
* sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
* return via this path if the signal was received when the process
* was running; if the process was blocked on a syscall then the
* normal syscall_exit path is used. All syscalls for traced
* proceses exit via intr_restore.
*
* XXX If any syscalls that change a processes space id ever exit
* this way, then we will need to copy %sr3 in to PT_SR[3..7], and
* adjust IASQ[0..1].
*
*/
.align PAGE_SIZE
ENTRY_CFI(syscall_exit_rfi)
mfctl %cr30,%r16
LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
ldo TASK_REGS(%r16),%r16
/* Force iaoq to userspace, as the user has had access to our current
* context via sigcontext. Also Filter the PSW for the same reason.
*/
LDREG PT_IAOQ0(%r16),%r19
depi 3,31,2,%r19
STREG %r19,PT_IAOQ0(%r16)
LDREG PT_IAOQ1(%r16),%r19
depi 3,31,2,%r19
STREG %r19,PT_IAOQ1(%r16)
LDREG PT_PSW(%r16),%r19
load32 USER_PSW_MASK,%r1
#ifdef CONFIG_64BIT
load32 USER_PSW_HI_MASK,%r20
depd %r20,31,32,%r1
#endif
and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
load32 USER_PSW,%r1
or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
STREG %r19,PT_PSW(%r16)
/*
* If we aren't being traced, we never saved space registers
* (we don't store them in the sigcontext), so set them
* to "proper" values now (otherwise we'll wind up restoring
* whatever was last stored in the task structure, which might
* be inconsistent if an interrupt occurred while on the gateway
* page). Note that we may be "trashing" values the user put in
* them, but we don't support the user changing them.
*/
STREG %r0,PT_SR2(%r16)
mfsp %sr3,%r19
STREG %r19,PT_SR0(%r16)
STREG %r19,PT_SR1(%r16)
STREG %r19,PT_SR3(%r16)
STREG %r19,PT_SR4(%r16)
STREG %r19,PT_SR5(%r16)
STREG %r19,PT_SR6(%r16)
STREG %r19,PT_SR7(%r16)
ENTRY(intr_return)
/* check for reschedule */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
.import do_notify_resume,code
intr_check_sig:
/* As above */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19
ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
and,COND(<>) %r19, %r20, %r0
b,n intr_restore /* skip past if we've nothing to do */
/* This check is critical to having LWS
* working. The IASQ is zero on the gateway
* page and we cannot deliver any signals until
* we get off the gateway page.
*
* Only do signals if we are returning to user space
*/
LDREG PT_IASQ0(%r16), %r20
cmpib,COND(=),n 0,%r20,intr_restore /* backward */
LDREG PT_IASQ1(%r16), %r20
cmpib,COND(=),n 0,%r20,intr_restore /* backward */
/* NOTE: We need to enable interrupts if we have to deliver
* signals. We used to do this earlier but it caused kernel
* stack overflows. */
ssm PSW_SM_I, %r0
copy %r0, %r25 /* long in_syscall = 0 */
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
BL do_notify_resume,%r2
copy %r16, %r26 /* struct pt_regs *regs */
b,n intr_check_sig
intr_restore:
copy %r16,%r29
ldo PT_FR31(%r29),%r1
rest_fp %r1
rest_general %r29
/* inverse of virt_map */
pcxt_ssm_bug
rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
tophys_r1 %r29
/* Restore space id's and special cr's from PT_REGS
* structure pointed to by r29
*/
rest_specials %r29
/* IMPORTANT: rest_stack restores r29 last (we are using it)!
* It also restores r1 and r30.
*/
rest_stack
rfi
nop
#ifndef CONFIG_PREEMPT
# define intr_do_preempt intr_restore
#endif /* !CONFIG_PREEMPT */
.import schedule,code
intr_do_resched:
/* Only call schedule on return to userspace. If we're returning
* to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
* we jump back to intr_restore.
*/
LDREG PT_IASQ0(%r16), %r20
cmpib,COND(=) 0, %r20, intr_do_preempt
nop
LDREG PT_IASQ1(%r16), %r20
cmpib,COND(=) 0, %r20, intr_do_preempt
nop
/* NOTE: We need to enable interrupts if we schedule. We used
* to do this earlier but it caused kernel stack overflows. */
ssm PSW_SM_I, %r0
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
ldil L%intr_check_sig, %r2
#ifndef CONFIG_64BIT
b schedule
#else
load32 schedule, %r20
bv %r0(%r20)
#endif
ldo R%intr_check_sig(%r2), %r2
/* preempt the current task on returning to kernel
* mode from an interrupt, iff need_resched is set,
* and preempt_count is 0. otherwise, we continue on
* our merry way back to the current running task.
*/
#ifdef CONFIG_PREEMPT
.import preempt_schedule_irq,code
intr_do_preempt:
rsm PSW_SM_I, %r0 /* disable interrupts */
/* current_thread_info()->preempt_count */
mfctl %cr30, %r1
LDREG TI_PRE_COUNT(%r1), %r19
cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
nop /* prev insn branched backwards */
/* check if we interrupted a critical path */
LDREG PT_PSW(%r16), %r20
bb,<,n %r20, 31 - PSW_SM_I, intr_restore
nop
BL preempt_schedule_irq, %r2
nop
b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
#endif /* CONFIG_PREEMPT */
/*
* External interrupts.
*/
intr_extint:
cmpib,COND(=),n 0,%r16,1f
get_stack_use_cr30
b,n 2f
1:
get_stack_use_r30
2:
save_specials %r29
virt_map
save_general %r29
ldo PT_FR0(%r29), %r24
save_fp %r24
loadgp
copy %r29, %r26 /* arg0 is pt_regs */
copy %r29, %r16 /* save pt_regs */
ldil L%intr_return, %r2
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
b do_cpu_irq_mask
ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
ENDPROC_CFI(syscall_exit_rfi)
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
ENTRY_CFI(intr_save) /* for os_hpmc */
mfsp %sr7,%r16
cmpib,COND(=),n 0,%r16,1f
get_stack_use_cr30
b 2f
copy %r8,%r26
1:
get_stack_use_r30
copy %r8,%r26
2:
save_specials %r29
/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
mfctl %isr, %r16
nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
mfctl %ior, %r17
#ifdef CONFIG_64BIT
/*
* If the interrupted code was running with W bit off (32 bit),
* clear the b bits (bits 0 & 1) in the ior.
* save_specials left ipsw value in r8 for us to test.
*/
extrd,u,*<> %r8,PSW_W_BIT,1,%r0
depdi 0,1,2,%r17
/* adjust isr/ior: get high bits from isr and deposit in ior */
space_adjust %r16,%r17,%r1
#endif
STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29)
#if 0 && defined(CONFIG_64BIT)
/* Revisit when we have 64-bit code above 4Gb */
b,n intr_save2
skip_save_ior:
/* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
* need to adjust iasq/iaoq here in the same way we adjusted isr/ior
* above.
*/
extrd,u,* %r8,PSW_W_BIT,1,%r1
cmpib,COND(=),n 1,%r1,intr_save2
LDREG PT_IASQ0(%r29), %r16
LDREG PT_IAOQ0(%r29), %r17
/* adjust iasq/iaoq */
space_adjust %r16,%r17,%r1
STREG %r16, PT_IASQ0(%r29)
STREG %r17, PT_IAOQ0(%r29)
#else
skip_save_ior:
#endif
intr_save2:
virt_map
save_general %r29
ldo PT_FR0(%r29), %r25
save_fp %r25
loadgp
copy %r29, %r25 /* arg1 is pt_regs */
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
ldil L%intr_check_sig, %r2
copy %r25, %r16 /* save pt_regs */
b handle_interruption
ldo R%intr_check_sig(%r2), %r2
ENDPROC_CFI(intr_save)
/*
* Note for all tlb miss handlers:
*
* cr24 contains a pointer to the kernel address space
* page directory.
*
* cr25 contains a pointer to the current user address
* space page directory.
*
* sr3 will contain the space id of the user address space
* of the current running thread while that thread is
* running in the kernel.
*/
/*
* register number allocations. Note that these are all
* in the shadowed registers
*/
t0 = r1 /* temporary register 0 */
va = r8 /* virtual address for which the trap occurred */
t1 = r9 /* temporary register 1 */
pte = r16 /* pte/phys page # */
prot = r17 /* prot bits */
spc = r24 /* space for which the trap occurred */
ptp = r25 /* page directory/page table pointer */
#ifdef CONFIG_64BIT
dtlb_miss_20w:
space_adjust spc,va,t0
get_pgd spc,ptp
space_check spc,t0,dtlb_fault
L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot
tlb_unlock1 spc,t0
rfir
nop
dtlb_check_alias_20w:
do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
idtlbt pte,prot
rfir
nop
nadtlb_miss_20w:
space_adjust spc,va,t0
get_pgd spc,ptp
space_check spc,t0,nadtlb_fault
L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot
tlb_unlock1 spc,t0
rfir
nop
nadtlb_check_alias_20w:
do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
idtlbt pte,prot
rfir
nop
#else
dtlb_miss_11:
get_pgd spc,ptp
space_check spc,t0,dtlb_fault
L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
idtlba pte,(%sr1,va)
idtlbp prot,(%sr1,va)
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0
rfir
nop
dtlb_check_alias_11:
do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
idtlba pte,(va)
idtlbp prot,(va)
rfir
nop
nadtlb_miss_11:
get_pgd spc,ptp
space_check spc,t0,nadtlb_fault
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
idtlba pte,(%sr1,va)
idtlbp prot,(%sr1,va)
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0
rfir
nop
nadtlb_check_alias_11:
do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
idtlba pte,(va)
idtlbp prot,(va)
rfir
nop
dtlb_miss_20:
space_adjust spc,va,t0
get_pgd spc,ptp
space_check spc,t0,dtlb_fault
L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
idtlbt pte,prot
tlb_unlock1 spc,t0
rfir
nop
dtlb_check_alias_20:
do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
idtlbt pte,prot
rfir
nop
nadtlb_miss_20:
get_pgd spc,ptp
space_check spc,t0,nadtlb_fault
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
idtlbt pte,prot
tlb_unlock1 spc,t0
rfir
nop
nadtlb_check_alias_20:
do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
idtlbt pte,prot
rfir
nop
#endif
nadtlb_emulate:
/*
* Non access misses can be caused by fdc,fic,pdc,lpa,probe and
* probei instructions. We don't want to fault for these
* instructions (not only does it not make sense, it can cause
* deadlocks, since some flushes are done with the mmap
* semaphore held). If the translation doesn't exist, we can't
* insert a translation, so have to emulate the side effects
* of the instruction. Since we don't insert a translation
* we can get a lot of faults during a flush loop, so it makes
* sense to try to do it here with minimum overhead. We only
* emulate fdc,fic,pdc,probew,prober instructions whose base
* and index registers are not shadowed. We defer everything
* else to the "slow" path.
*/
mfctl %cr19,%r9 /* Get iir */
/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
/* Checks for fdc,fdce,pdc,"fic,4f" only */
ldi 0x280,%r16
and %r9,%r16,%r17
cmpb,<>,n %r16,%r17,nadtlb_probe_check
bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
BL get_register,%r25
extrw,u %r9,15,5,%r8 /* Get index register # */
cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
copy %r1,%r24
BL get_register,%r25
extrw,u %r9,10,5,%r8 /* Get base register # */
cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
BL set_register,%r25
add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
nadtlb_nullify:
mfctl %ipsw,%r8
ldil L%PSW_N,%r9
or %r8,%r9,%r8 /* Set PSW_N */
mtctl %r8,%ipsw
rfir
nop
/*
When there is no translation for the probe address then we
must nullify the insn and return zero in the target register.
This will indicate to the calling code that it does not have
write/read privileges to this address.
This should technically work for prober and probew in PA 1.1,
and also probe,r and probe,w in PA 2.0
WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
*/
nadtlb_probe_check:
ldi 0x80,%r16
and %r9,%r16,%r17
cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
BL get_register,%r25 /* Find the target register */
extrw,u %r9,31,5,%r8 /* Get target register */
cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
BL set_register,%r25
copy %r0,%r1 /* Write zero to target register */
b nadtlb_nullify /* Nullify return insn */
nop
#ifdef CONFIG_64BIT
itlb_miss_20w:
/*
* I miss is a little different, since we allow users to fault
* on the gateway page which is in the kernel address space.
*/
space_adjust spc,va,t0
get_pgd spc,ptp
space_check spc,t0,itlb_fault
L3_ptep ptp,pte,t0,va,itlb_fault
tlb_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
iitlbt pte,prot
tlb_unlock1 spc,t0
rfir
nop
naitlb_miss_20w:
/*
* I miss is a little different, since we allow users to fault
* on the gateway page which is in the kernel address space.
*/
space_adjust spc,va,t0
get_pgd spc,ptp
space_check spc,t0,naitlb_fault
L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
iitlbt pte,prot
tlb_unlock1 spc,t0
rfir
nop
naitlb_check_alias_20w:
do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
iitlbt pte,prot
rfir
nop
#else
itlb_miss_11:
get_pgd spc,ptp
space_check spc,t0,itlb_fault
L2_ptep ptp,pte,t0,va,itlb_fault
tlb_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
iitlba pte,(%sr1,va)
iitlbp prot,(%sr1,va)
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0
rfir
nop
naitlb_miss_11:
get_pgd spc,ptp
space_check spc,t0,naitlb_fault
L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
iitlba pte,(%sr1,va)
iitlbp prot,(%sr1,va)
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0
rfir
nop
naitlb_check_alias_11:
do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
iitlba pte,(%sr0, va)
iitlbp prot,(%sr0, va)
rfir
nop
itlb_miss_20:
get_pgd spc,ptp
space_check spc,t0,itlb_fault
L2_ptep ptp,pte,t0,va,itlb_fault
tlb_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
iitlbt pte,prot
tlb_unlock1 spc,t0
rfir
nop
naitlb_miss_20:
get_pgd spc,ptp
space_check spc,t0,naitlb_fault
L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
iitlbt pte,prot
tlb_unlock1 spc,t0
rfir
nop
naitlb_check_alias_20:
do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
iitlbt pte,prot
rfir
nop
#endif
#ifdef CONFIG_64BIT
dbit_trap_20w:
space_adjust spc,va,t0
get_pgd spc,ptp
space_check spc,t0,dbit_fault
L3_ptep ptp,pte,t0,va,dbit_fault
tlb_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1
make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot
tlb_unlock0 spc,t0
rfir
nop
#else
dbit_trap_11:
get_pgd spc,ptp
space_check spc,t0,dbit_fault
L2_ptep ptp,pte,t0,va,dbit_fault
tlb_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1
make_insert_tlb_11 spc,pte,prot
mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
idtlba pte,(%sr1,va)
idtlbp prot,(%sr1,va)
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock0 spc,t0
rfir
nop
dbit_trap_20:
get_pgd spc,ptp
space_check spc,t0,dbit_fault
L2_ptep ptp,pte,t0,va,dbit_fault
tlb_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1
make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
idtlbt pte,prot
tlb_unlock0 spc,t0
rfir
nop
#endif
.import handle_interruption,code
kernel_bad_space:
b intr_save
ldi 31,%r8 /* Use an unused code */
dbit_fault:
b intr_save
ldi 20,%r8
itlb_fault:
b intr_save
ldi 6,%r8
nadtlb_fault:
b intr_save
ldi 17,%r8
naitlb_fault:
b intr_save
ldi 16,%r8
dtlb_fault:
b intr_save
ldi 15,%r8
/* Register saving semantics for system calls:
%r1 clobbered by system call macro in userspace
%r2 saved in PT_REGS by gateway page
%r3 - %r18 preserved by C code (saved by signal code)
%r19 - %r20 saved in PT_REGS by gateway page
%r21 - %r22 non-standard syscall args
stored in kernel stack by gateway page
%r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
%r27 - %r30 saved in PT_REGS by gateway page
%r31 syscall return pointer
*/
/* Floating point registers (FIXME: what do we do with these?)
%fr0 - %fr3 status/exception, not preserved
%fr4 - %fr7 arguments
%fr8 - %fr11 not preserved by C code
%fr12 - %fr21 preserved by C code
%fr22 - %fr31 not preserved by C code
*/
.macro reg_save regs
STREG %r3, PT_GR3(\regs)
STREG %r4, PT_GR4(\regs)
STREG %r5, PT_GR5(\regs)
STREG %r6, PT_GR6(\regs)
STREG %r7, PT_GR7(\regs)
STREG %r8, PT_GR8(\regs)
STREG %r9, PT_GR9(\regs)
STREG %r10,PT_GR10(\regs)
STREG %r11,PT_GR11(\regs)
STREG %r12,PT_GR12(\regs)
STREG %r13,PT_GR13(\regs)
STREG %r14,PT_GR14(\regs)
STREG %r15,PT_GR15(\regs)
STREG %r16,PT_GR16(\regs)
STREG %r17,PT_GR17(\regs)
STREG %r18,PT_GR18(\regs)
.endm
.macro reg_restore regs
LDREG PT_GR3(\regs), %r3
LDREG PT_GR4(\regs), %r4
LDREG PT_GR5(\regs), %r5
LDREG PT_GR6(\regs), %r6
LDREG PT_GR7(\regs), %r7
LDREG PT_GR8(\regs), %r8
LDREG PT_GR9(\regs), %r9
LDREG PT_GR10(\regs),%r10
LDREG PT_GR11(\regs),%r11
LDREG PT_GR12(\regs),%r12
LDREG PT_GR13(\regs),%r13
LDREG PT_GR14(\regs),%r14
LDREG PT_GR15(\regs),%r15
LDREG PT_GR16(\regs),%r16
LDREG PT_GR17(\regs),%r17
LDREG PT_GR18(\regs),%r18
.endm
.macro fork_like name
ENTRY_CFI(sys_\name\()_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
ldo TASK_REGS(%r1),%r1
reg_save %r1
mfctl %cr27, %r28
ldil L%sys_\name, %r31
be R%sys_\name(%sr4,%r31)
STREG %r28, PT_CR27(%r1)
ENDPROC_CFI(sys_\name\()_wrapper)
.endm
fork_like clone
fork_like fork
fork_like vfork
/* Set the return value for the child */
ENTRY(child_return)
BL schedule_tail, %r2
nop
finish_child_return:
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
ldo TASK_REGS(%r1),%r1 /* get pt regs */
LDREG PT_CR27(%r1), %r3
mtctl %r3, %cr27
reg_restore %r1
b syscall_exit
copy %r0,%r28
END(child_return)
ENTRY_CFI(sys_rt_sigreturn_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
ldo TASK_REGS(%r26),%r26 /* get pt regs */
/* Don't save regs, we are going to restore them from sigcontext. */
STREG %r2, -RP_OFFSET(%r30)
#ifdef CONFIG_64BIT
ldo FRAME_SIZE(%r30), %r30
BL sys_rt_sigreturn,%r2
ldo -16(%r30),%r29 /* Reference param save area */
#else
BL sys_rt_sigreturn,%r2
ldo FRAME_SIZE(%r30), %r30
#endif
ldo -FRAME_SIZE(%r30), %r30
LDREG -RP_OFFSET(%r30), %r2
/* FIXME: I think we need to restore a few more things here. */
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
ldo TASK_REGS(%r1),%r1 /* get pt regs */
reg_restore %r1
/* If the signal was received while the process was blocked on a
* syscall, then r2 will take us to syscall_exit; otherwise r2 will
* take us to syscall_exit_rfi and on to intr_return.
*/
bv %r0(%r2)
LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
ENDPROC_CFI(sys_rt_sigreturn_wrapper)
ENTRY(syscall_exit)
/* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
* via syscall_exit_rfi if the signal was received while the process
* was running.
*/
/* save return value now */
mfctl %cr30, %r1
LDREG TI_TASK(%r1),%r1
STREG %r28,TASK_PT_GR28(%r1)
/* Seems to me that dp could be wrong here, if the syscall involved
* calling a module, and nothing got round to restoring dp on return.
*/
loadgp
syscall_check_resched:
/* check for reschedule */
LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
.import do_signal,code
syscall_check_sig:
LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
and,COND(<>) %r19, %r26, %r0
b,n syscall_restore /* skip past if we've nothing to do */
syscall_do_signal:
/* Save callee-save registers (for sigcontext).
* FIXME: After this point the process structure should be
* consistent with all the relevant state of the process
* before the syscall. We need to verify this.
*/
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
reg_save %r26
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
BL do_notify_resume,%r2
ldi 1, %r25 /* long in_syscall = 1 */
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
reg_restore %r20
b,n syscall_check_sig
syscall_restore:
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
/* Are we being ptraced? */
ldw TASK_FLAGS(%r1),%r19
ldi _TIF_SYSCALL_TRACE_MASK,%r2
and,COND(=) %r19,%r2,%r0
b,n syscall_restore_rfi
ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
rest_fp %r19
LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
mtsar %r19
LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
LDREG TASK_PT_GR19(%r1),%r19
LDREG TASK_PT_GR20(%r1),%r20
LDREG TASK_PT_GR21(%r1),%r21
LDREG TASK_PT_GR22(%r1),%r22
LDREG TASK_PT_GR23(%r1),%r23
LDREG TASK_PT_GR24(%r1),%r24
LDREG TASK_PT_GR25(%r1),%r25
LDREG TASK_PT_GR26(%r1),%r26
LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
LDREG TASK_PT_GR29(%r1),%r29
LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
/* NOTE: We use rsm/ssm pair to make this operation atomic */
LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
rsm PSW_SM_I, %r0
copy %r1,%r30 /* Restore user sp */
mfsp %sr3,%r1 /* Get user space id */
mtsp %r1,%sr7 /* Restore sr7 */
ssm PSW_SM_I, %r0
/* Set sr2 to zero for userspace syscalls to work. */
mtsp %r0,%sr2
mtsp %r1,%sr4 /* Restore sr4 */
mtsp %r1,%sr5 /* Restore sr5 */
mtsp %r1,%sr6 /* Restore sr6 */
depi 3,31,2,%r31 /* ensure return to user mode. */
#ifdef CONFIG_64BIT
/* decide whether to reset the wide mode bit
*
* For a syscall, the W bit is stored in the lowest bit
* of sp. Extract it and reset W if it is zero */
extrd,u,*<> %r30,63,1,%r1
rsm PSW_SM_W, %r0
/* now reset the lowest bit of sp if it was set */
xor %r30,%r1,%r30
#endif
be,n 0(%sr3,%r31) /* return to user space */
/* We have to return via an RFI, so that PSW T and R bits can be set
* appropriately.
* This sets up pt_regs so we can return via intr_restore, which is not
* the most efficient way of doing things, but it works.
*/
syscall_restore_rfi:
ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
mtctl %r2,%cr0 /* for immediate trap */
LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
ldi 0x0b,%r20 /* Create new PSW */
depi -1,13,1,%r20 /* C, Q, D, and I bits */
/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
* set in thread_info.h and converted to PA bitmap
* numbers in asm-offsets.c */
/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
depi -1,27,1,%r20 /* R bit */
/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
depi -1,7,1,%r20 /* T bit */
STREG %r20,TASK_PT_PSW(%r1)
/* Always store space registers, since sr3 can be changed (e.g. fork) */
mfsp %sr3,%r25
STREG %r25,TASK_PT_SR3(%r1)
STREG %r25,TASK_PT_SR4(%r1)
STREG %r25,TASK_PT_SR5(%r1)
STREG %r25,TASK_PT_SR6(%r1)
STREG %r25,TASK_PT_SR7(%r1)
STREG %r25,TASK_PT_IASQ0(%r1)
STREG %r25,TASK_PT_IASQ1(%r1)
/* XXX W bit??? */
/* Now if old D bit is clear, it means we didn't save all registers
* on syscall entry, so do that now. This only happens on TRACEME
* calls, or if someone attached to us while we were on a syscall.
* We could make this more efficient by not saving r3-r18, but
* then we wouldn't be able to use the common intr_restore path.
* It is only for traced processes anyway, so performance is not
* an issue.
*/
bb,< %r2,30,pt_regs_ok /* Branch if D set */
ldo TASK_REGS(%r1),%r25
reg_save %r25 /* Save r3 to r18 */
/* Save the current sr */
mfsp %sr0,%r2
STREG %r2,TASK_PT_SR0(%r1)
/* Save the scratch sr */
mfsp %sr1,%r2
STREG %r2,TASK_PT_SR1(%r1)
/* sr2 should be set to zero for userspace syscalls */
STREG %r0,TASK_PT_SR2(%r1)
LDREG TASK_PT_GR31(%r1),%r2
depi 3,31,2,%r2 /* ensure return to user mode. */
STREG %r2,TASK_PT_IAOQ0(%r1)
ldo 4(%r2),%r2
STREG %r2,TASK_PT_IAOQ1(%r1)
b intr_restore
copy %r25,%r16
pt_regs_ok:
LDREG TASK_PT_IAOQ0(%r1),%r2
depi 3,31,2,%r2 /* ensure return to user mode. */
STREG %r2,TASK_PT_IAOQ0(%r1)
LDREG TASK_PT_IAOQ1(%r1),%r2
depi 3,31,2,%r2
STREG %r2,TASK_PT_IAOQ1(%r1)
b intr_restore
copy %r25,%r16
syscall_do_resched:
load32 syscall_check_resched,%r2 /* if resched, we start over again */
load32 schedule,%r19
bv %r0(%r19) /* jumps to schedule() */
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#else
nop
#endif
END(syscall_exit)
#ifdef CONFIG_FUNCTION_TRACER
.import ftrace_function_trampoline,code
.align L1_CACHE_BYTES
ENTRY_CFI(mcount, caller)
_mcount:
.export _mcount,data
/*
* The 64bit mcount() function pointer needs 4 dwords, of which the
* first two are free. We optimize it here and put 2 instructions for
* calling mcount(), and 2 instructions for ftrace_stub(). That way we
* have all on one L1 cacheline.
*/
b ftrace_function_trampoline
copy %r3, %arg2 /* caller original %sp */
ftrace_stub:
.globl ftrace_stub
.type ftrace_stub, @function
#ifdef CONFIG_64BIT
bve (%rp)
#else
bv %r0(%rp)
#endif
nop
#ifdef CONFIG_64BIT
.dword mcount
.dword 0 /* code in head.S puts value of global gp here */
#endif
ENDPROC_CFI(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.align 8
ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
.export parisc_return_to_handler,data
parisc_return_to_handler:
copy %r3,%r1
STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
copy %sp,%r3
STREGM %r1,FRAME_SIZE(%sp)
STREG %ret0,8(%r3)
STREG %ret1,16(%r3)
#ifdef CONFIG_64BIT
loadgp
#endif
/* call ftrace_return_to_handler(0) */
.import ftrace_return_to_handler,code
load32 ftrace_return_to_handler,%ret0
load32 .Lftrace_ret,%r2
#ifdef CONFIG_64BIT
ldo -16(%sp),%ret1 /* Reference param save area */
bve (%ret0)
#else
bv %r0(%ret0)
#endif
ldi 0,%r26
.Lftrace_ret:
copy %ret0,%rp
/* restore original return values */
LDREG 8(%r3),%ret0
LDREG 16(%r3),%ret1
/* return from function */
#ifdef CONFIG_64BIT
bve (%rp)
#else
bv %r0(%rp)
#endif
LDREGM -FRAME_SIZE(%sp),%r3
ENDPROC_CFI(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_IRQSTACKS
/* void call_on_stack(unsigned long param1, void *func,
unsigned long new_stack) */
ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
ENTRY(_call_on_stack)
copy %sp, %r1
/* Regarding the HPPA calling conventions for function pointers,
we assume the PIC register is not changed across call. For
CONFIG_64BIT, the argument pointer is left to point at the
argument region allocated for the call to call_on_stack. */
/* Switch to new stack. We allocate two frames. */
ldo 2*FRAME_SIZE(%arg2), %sp
# ifdef CONFIG_64BIT
/* Save previous stack pointer and return pointer in frame marker */
STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls always use function descriptor */
LDREG 16(%arg1), %arg1
bve,l (%arg1), %rp
STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bve (%rp)
LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# else
/* Save previous stack pointer and return pointer in frame marker */
STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
/* Calls use function descriptor if PLABEL bit is set */
bb,>=,n %arg1, 30, 1f
depwi 0,31,2, %arg1
LDREG 0(%arg1), %arg1
1:
be,l 0(%sr4,%arg1), %sr0, %r31
copy %r31, %rp
LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
bv (%rp)
LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
# endif /* CONFIG_64BIT */
ENDPROC_CFI(call_on_stack)
#endif /* CONFIG_IRQSTACKS */
ENTRY_CFI(get_register)
/*
* get_register is used by the non access tlb miss handlers to
* copy the value of the general register specified in r8 into
* r1. This routine can't be used for shadowed registers, since
* the rfir will restore the original value. So, for the shadowed
* registers we put a -1 into r1 to indicate that the register
* should not be used (the register being copied could also have
* a -1 in it, but that is OK, it just means that we will have
* to use the slow path instead).
*/
blr %r8,%r0
nop
bv %r0(%r25) /* r0 */
copy %r0,%r1
bv %r0(%r25) /* r1 - shadowed */
ldi -1,%r1
bv %r0(%r25) /* r2 */
copy %r2,%r1
bv %r0(%r25) /* r3 */
copy %r3,%r1
bv %r0(%r25) /* r4 */
copy %r4,%r1
bv %r0(%r25) /* r5 */
copy %r5,%r1
bv %r0(%r25) /* r6 */
copy %r6,%r1
bv %r0(%r25) /* r7 */
copy %r7,%r1
bv %r0(%r25) /* r8 - shadowed */
ldi -1,%r1
bv %r0(%r25) /* r9 - shadowed */
ldi -1,%r1
bv %r0(%r25) /* r10 */
copy %r10,%r1
bv %r0(%r25) /* r11 */
copy %r11,%r1
bv %r0(%r25) /* r12 */
copy %r12,%r1
bv %r0(%r25) /* r13 */
copy %r13,%r1
bv %r0(%r25) /* r14 */
copy %r14,%r1
bv %r0(%r25) /* r15 */
copy %r15,%r1
bv %r0(%r25) /* r16 - shadowed */
ldi -1,%r1
bv %r0(%r25) /* r17 - shadowed */
ldi -1,%r1
bv %r0(%r25) /* r18 */
copy %r18,%r1
bv %r0(%r25) /* r19 */
copy %r19,%r1
bv %r0(%r25) /* r20 */
copy %r20,%r1
bv %r0(%r25) /* r21 */
copy %r21,%r1
bv %r0(%r25) /* r22 */
copy %r22,%r1
bv %r0(%r25) /* r23 */
copy %r23,%r1
bv %r0(%r25) /* r24 - shadowed */
ldi -1,%r1
bv %r0(%r25) /* r25 - shadowed */
ldi -1,%r1
bv %r0(%r25) /* r26 */
copy %r26,%r1
bv %r0(%r25) /* r27 */
copy %r27,%r1
bv %r0(%r25) /* r28 */
copy %r28,%r1
bv %r0(%r25) /* r29 */
copy %r29,%r1
bv %r0(%r25) /* r30 */
copy %r30,%r1
bv %r0(%r25) /* r31 */
copy %r31,%r1
ENDPROC_CFI(get_register)
ENTRY_CFI(set_register)
/*
* set_register is used by the non access tlb miss handlers to
* copy the value of r1 into the general register specified in
* r8.
*/
blr %r8,%r0
nop
bv %r0(%r25) /* r0 (silly, but it is a place holder) */
copy %r1,%r0
bv %r0(%r25) /* r1 */
copy %r1,%r1
bv %r0(%r25) /* r2 */
copy %r1,%r2
bv %r0(%r25) /* r3 */
copy %r1,%r3
bv %r0(%r25) /* r4 */
copy %r1,%r4
bv %r0(%r25) /* r5 */
copy %r1,%r5
bv %r0(%r25) /* r6 */
copy %r1,%r6
bv %r0(%r25) /* r7 */
copy %r1,%r7
bv %r0(%r25) /* r8 */
copy %r1,%r8
bv %r0(%r25) /* r9 */
copy %r1,%r9
bv %r0(%r25) /* r10 */
copy %r1,%r10
bv %r0(%r25) /* r11 */
copy %r1,%r11
bv %r0(%r25) /* r12 */
copy %r1,%r12
bv %r0(%r25) /* r13 */
copy %r1,%r13
bv %r0(%r25) /* r14 */
copy %r1,%r14
bv %r0(%r25) /* r15 */
copy %r1,%r15
bv %r0(%r25) /* r16 */
copy %r1,%r16
bv %r0(%r25) /* r17 */
copy %r1,%r17
bv %r0(%r25) /* r18 */
copy %r1,%r18
bv %r0(%r25) /* r19 */
copy %r1,%r19
bv %r0(%r25) /* r20 */
copy %r1,%r20
bv %r0(%r25) /* r21 */
copy %r1,%r21
bv %r0(%r25) /* r22 */
copy %r1,%r22
bv %r0(%r25) /* r23 */
copy %r1,%r23
bv %r0(%r25) /* r24 */
copy %r1,%r24
bv %r0(%r25) /* r25 */
copy %r1,%r25
bv %r0(%r25) /* r26 */
copy %r1,%r26
bv %r0(%r25) /* r27 */
copy %r1,%r27
bv %r0(%r25) /* r28 */
copy %r1,%r28
bv %r0(%r25) /* r29 */
copy %r1,%r29
bv %r0(%r25) /* r30 */
copy %r1,%r30
bv %r0(%r25) /* r31 */
copy %r1,%r31
ENDPROC_CFI(set_register)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 29,854
|
arch/parisc/kernel/pacache.S
|
/*
* PARISC TLB and cache flushing support
* Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
* Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
* Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* NOTE: fdc,fic, and pdc instructions that use base register modification
* should only use index and base registers that are not shadowed,
* so that the fast path emulation in the non access miss handler
* can be used.
*/
#ifdef CONFIG_64BIT
.level 2.0w
#else
.level 2.0
#endif
#include <asm/psw.h>
#include <asm/assembly.h>
#include <asm/pgtable.h>
#include <asm/cache.h>
#include <asm/ldcw.h>
#include <linux/linkage.h>
#include <linux/init.h>
.section .text.hot
.align 16
ENTRY_CFI(flush_tlb_all_local)
/*
* The pitlbe and pdtlbe instructions should only be used to
* flush the entire tlb. Also, there needs to be no intervening
* tlb operations, e.g. tlb misses, so the operation needs
* to happen in real mode with all interruptions disabled.
*/
/* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
rsm PSW_SM_I, %r19 /* save I-bit state */
load32 PA(1f), %r1
nop
nop
nop
nop
nop
rsm PSW_SM_Q, %r0 /* prep to load iia queue */
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
mtctl %r1, %cr18 /* IIAOQ head */
ldo 4(%r1), %r1
mtctl %r1, %cr18 /* IIAOQ tail */
load32 REAL_MODE_PSW, %r1
mtctl %r1, %ipsw
rfi
nop
1: load32 PA(cache_info), %r1
/* Flush Instruction Tlb */
LDREG ITLB_SID_BASE(%r1), %r20
LDREG ITLB_SID_STRIDE(%r1), %r21
LDREG ITLB_SID_COUNT(%r1), %r22
LDREG ITLB_OFF_BASE(%r1), %arg0
LDREG ITLB_OFF_STRIDE(%r1), %arg1
LDREG ITLB_OFF_COUNT(%r1), %arg2
LDREG ITLB_LOOP(%r1), %arg3
addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
copy %arg0, %r28 /* Init base addr */
fitmanyloop: /* Loop if LOOP >= 2 */
mtsp %r20, %sr1
add %r21, %r20, %r20 /* increment space */
copy %arg2, %r29 /* Init middle loop count */
fitmanymiddle: /* Loop if LOOP >= 2 */
addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
pitlbe %r0(%sr1, %r28)
pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
copy %arg3, %r31 /* Re-init inner loop count */
movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
fitoneloop: /* Loop if LOOP = 1 */
mtsp %r20, %sr1
copy %arg0, %r28 /* init base addr */
copy %arg2, %r29 /* init middle loop count */
fitonemiddle: /* Loop if LOOP = 1 */
addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
add %r21, %r20, %r20 /* increment space */
fitdone:
/* Flush Data Tlb */
LDREG DTLB_SID_BASE(%r1), %r20
LDREG DTLB_SID_STRIDE(%r1), %r21
LDREG DTLB_SID_COUNT(%r1), %r22
LDREG DTLB_OFF_BASE(%r1), %arg0
LDREG DTLB_OFF_STRIDE(%r1), %arg1
LDREG DTLB_OFF_COUNT(%r1), %arg2
LDREG DTLB_LOOP(%r1), %arg3
addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
copy %arg0, %r28 /* Init base addr */
fdtmanyloop: /* Loop if LOOP >= 2 */
mtsp %r20, %sr1
add %r21, %r20, %r20 /* increment space */
copy %arg2, %r29 /* Init middle loop count */
fdtmanymiddle: /* Loop if LOOP >= 2 */
addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
pdtlbe %r0(%sr1, %r28)
pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
copy %arg3, %r31 /* Re-init inner loop count */
movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
fdtoneloop: /* Loop if LOOP = 1 */
mtsp %r20, %sr1
copy %arg0, %r28 /* init base addr */
copy %arg2, %r29 /* init middle loop count */
fdtonemiddle: /* Loop if LOOP = 1 */
addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
add %r21, %r20, %r20 /* increment space */
fdtdone:
/*
* Switch back to virtual mode
*/
/* pcxt_ssm_bug */
rsm PSW_SM_I, %r0
load32 2f, %r1
nop
nop
nop
nop
nop
rsm PSW_SM_Q, %r0 /* prep to load iia queue */
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
mtctl %r1, %cr18 /* IIAOQ head */
ldo 4(%r1), %r1
mtctl %r1, %cr18 /* IIAOQ tail */
load32 KERNEL_PSW, %r1
or %r1, %r19, %r1 /* I-bit to state on entry */
mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
rfi
nop
2: bv %r0(%r2)
nop
ENDPROC_CFI(flush_tlb_all_local)
.import cache_info,data
ENTRY_CFI(flush_instruction_cache_local)
load32 cache_info, %r1
/* Flush Instruction Cache */
LDREG ICACHE_BASE(%r1), %arg0
LDREG ICACHE_STRIDE(%r1), %arg1
LDREG ICACHE_COUNT(%r1), %arg2
LDREG ICACHE_LOOP(%r1), %arg3
rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
mtsp %r0, %sr1
addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
fimanyloop: /* Loop if LOOP >= 2 */
addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
fice %r0(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
fioneloop: /* Loop if LOOP = 1 */
/* Some implementations may flush with a single fice instruction */
cmpib,COND(>>=),n 15, %arg2, fioneloop2
fioneloop1:
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0)
addib,COND(>) -16, %arg2, fioneloop1
fice,m %arg1(%sr1, %arg0)
/* Check if done */
cmpb,COND(=),n %arg2, %r0, fisync /* Predict branch taken */
fioneloop2:
addib,COND(>) -1, %arg2, fioneloop2 /* Outer loop count decr */
fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
fisync:
sync
mtsm %r22 /* restore I-bit */
bv %r0(%r2)
nop
ENDPROC_CFI(flush_instruction_cache_local)
.import cache_info, data
ENTRY_CFI(flush_data_cache_local)
load32 cache_info, %r1
/* Flush Data Cache */
LDREG DCACHE_BASE(%r1), %arg0
LDREG DCACHE_STRIDE(%r1), %arg1
LDREG DCACHE_COUNT(%r1), %arg2
LDREG DCACHE_LOOP(%r1), %arg3
rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
mtsp %r0, %sr1
addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
fdmanyloop: /* Loop if LOOP >= 2 */
addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
fdce %r0(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
fdoneloop: /* Loop if LOOP = 1 */
/* Some implementations may flush with a single fdce instruction */
cmpib,COND(>>=),n 15, %arg2, fdoneloop2
fdoneloop1:
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0)
addib,COND(>) -16, %arg2, fdoneloop1
fdce,m %arg1(%sr1, %arg0)
/* Check if done */
cmpb,COND(=),n %arg2, %r0, fdsync /* Predict branch taken */
fdoneloop2:
addib,COND(>) -1, %arg2, fdoneloop2 /* Outer loop count decr */
fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
fdsync:
syncdma
sync
mtsm %r22 /* restore I-bit */
bv %r0(%r2)
nop
ENDPROC_CFI(flush_data_cache_local)
/* Macros to serialize TLB purge operations on SMP. */
.macro tlb_lock la,flags,tmp
#ifdef CONFIG_SMP
#if __PA_LDCW_ALIGNMENT > 4
load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
#else
load32 pa_tlb_lock, \la
#endif
rsm PSW_SM_I,\flags
1: LDCW 0(\la),\tmp
cmpib,<>,n 0,\tmp,3f
2: ldw 0(\la),\tmp
cmpb,<> %r0,\tmp,1b
nop
b,n 2b
3:
#endif
.endm
.macro tlb_unlock la,flags,tmp
#ifdef CONFIG_SMP
ldi 1,\tmp
sync
stw \tmp,0(\la)
mtsm \flags
#endif
.endm
/* Clear page using kernel mapping. */
ENTRY_CFI(clear_page_asm)
#ifdef CONFIG_64BIT
/* Unroll the loop. */
ldi (PAGE_SIZE / 128), %r1
1:
std %r0, 0(%r26)
std %r0, 8(%r26)
std %r0, 16(%r26)
std %r0, 24(%r26)
std %r0, 32(%r26)
std %r0, 40(%r26)
std %r0, 48(%r26)
std %r0, 56(%r26)
std %r0, 64(%r26)
std %r0, 72(%r26)
std %r0, 80(%r26)
std %r0, 88(%r26)
std %r0, 96(%r26)
std %r0, 104(%r26)
std %r0, 112(%r26)
std %r0, 120(%r26)
/* Note reverse branch hint for addib is taken. */
addib,COND(>),n -1, %r1, 1b
ldo 128(%r26), %r26
#else
/*
* Note that until (if) we start saving the full 64-bit register
* values on interrupt, we can't use std on a 32 bit kernel.
*/
ldi (PAGE_SIZE / 64), %r1
1:
stw %r0, 0(%r26)
stw %r0, 4(%r26)
stw %r0, 8(%r26)
stw %r0, 12(%r26)
stw %r0, 16(%r26)
stw %r0, 20(%r26)
stw %r0, 24(%r26)
stw %r0, 28(%r26)
stw %r0, 32(%r26)
stw %r0, 36(%r26)
stw %r0, 40(%r26)
stw %r0, 44(%r26)
stw %r0, 48(%r26)
stw %r0, 52(%r26)
stw %r0, 56(%r26)
stw %r0, 60(%r26)
addib,COND(>),n -1, %r1, 1b
ldo 64(%r26), %r26
#endif
bv %r0(%r2)
nop
ENDPROC_CFI(clear_page_asm)
/* Copy page using kernel mapping. */
ENTRY_CFI(copy_page_asm)
#ifdef CONFIG_64BIT
/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
* Unroll the loop by hand and arrange insn appropriately.
* Prefetch doesn't improve performance on rp3440.
* GCC probably can do this just as well...
*/
ldi (PAGE_SIZE / 128), %r1
1: ldd 0(%r25), %r19
ldd 8(%r25), %r20
ldd 16(%r25), %r21
ldd 24(%r25), %r22
std %r19, 0(%r26)
std %r20, 8(%r26)
ldd 32(%r25), %r19
ldd 40(%r25), %r20
std %r21, 16(%r26)
std %r22, 24(%r26)
ldd 48(%r25), %r21
ldd 56(%r25), %r22
std %r19, 32(%r26)
std %r20, 40(%r26)
ldd 64(%r25), %r19
ldd 72(%r25), %r20
std %r21, 48(%r26)
std %r22, 56(%r26)
ldd 80(%r25), %r21
ldd 88(%r25), %r22
std %r19, 64(%r26)
std %r20, 72(%r26)
ldd 96(%r25), %r19
ldd 104(%r25), %r20
std %r21, 80(%r26)
std %r22, 88(%r26)
ldd 112(%r25), %r21
ldd 120(%r25), %r22
ldo 128(%r25), %r25
std %r19, 96(%r26)
std %r20, 104(%r26)
std %r21, 112(%r26)
std %r22, 120(%r26)
/* Note reverse branch hint for addib is taken. */
addib,COND(>),n -1, %r1, 1b
ldo 128(%r26), %r26
#else
/*
* This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
* bundles (very restricted rules for bundling).
* Note that until (if) we start saving
* the full 64 bit register values on interrupt, we can't
* use ldd/std on a 32 bit kernel.
*/
ldw 0(%r25), %r19
ldi (PAGE_SIZE / 64), %r1
1:
ldw 4(%r25), %r20
ldw 8(%r25), %r21
ldw 12(%r25), %r22
stw %r19, 0(%r26)
stw %r20, 4(%r26)
stw %r21, 8(%r26)
stw %r22, 12(%r26)
ldw 16(%r25), %r19
ldw 20(%r25), %r20
ldw 24(%r25), %r21
ldw 28(%r25), %r22
stw %r19, 16(%r26)
stw %r20, 20(%r26)
stw %r21, 24(%r26)
stw %r22, 28(%r26)
ldw 32(%r25), %r19
ldw 36(%r25), %r20
ldw 40(%r25), %r21
ldw 44(%r25), %r22
stw %r19, 32(%r26)
stw %r20, 36(%r26)
stw %r21, 40(%r26)
stw %r22, 44(%r26)
ldw 48(%r25), %r19
ldw 52(%r25), %r20
ldw 56(%r25), %r21
ldw 60(%r25), %r22
stw %r19, 48(%r26)
stw %r20, 52(%r26)
ldo 64(%r25), %r25
stw %r21, 56(%r26)
stw %r22, 60(%r26)
ldo 64(%r26), %r26
addib,COND(>),n -1, %r1, 1b
ldw 0(%r25), %r19
#endif
bv %r0(%r2)
nop
ENDPROC_CFI(copy_page_asm)
/*
* NOTE: Code in clear_user_page has a hard coded dependency on the
* maximum alias boundary being 4 Mb. We've been assured by the
* parisc chip designers that there will not ever be a parisc
* chip with a larger alias boundary (Never say never :-) ).
*
* Subtle: the dtlb miss handlers support the temp alias region by
* "knowing" that if a dtlb miss happens within the temp alias
* region it must have occurred while in clear_user_page. Since
* this routine makes use of processor local translations, we
* don't want to insert them into the kernel page table. Instead,
* we load up some general registers (they need to be registers
* which aren't shadowed) with the physical page numbers (preshifted
* for tlb insertion) needed to insert the translations. When we
* miss on the translation, the dtlb miss handler inserts the
* translation into the tlb using these values:
*
* %r26 physical page (shifted for tlb insert) of "to" translation
* %r23 physical page (shifted for tlb insert) of "from" translation
*/
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
.macro convert_phys_for_tlb_insert20 phys
extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
#if _PAGE_SIZE_ENCODING_DEFAULT
depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
#endif
.endm
/*
* copy_user_page_asm() performs a page copy using mappings
* equivalent to the user page mappings. It can be used to
* implement copy_user_page() but unfortunately both the `from'
* and `to' pages need to be flushed through mappings equivalent
* to the user mappings after the copy because the kernel accesses
* the `from' page through the kmap kernel mapping and the `to'
* page needs to be flushed since code can be copied. As a
* result, this implementation is less efficient than the simpler
* copy using the kernel mapping. It only needs the `from' page
* to flushed via the user mapping. The kunmap routines handle
* the flushes needed for the kernel mapping.
*
* I'm still keeping this around because it may be possible to
* use it if more information is passed into copy_user_page().
* Have to do some measurements to see if it is worthwhile to
* lobby for such a change.
*
*/
ENTRY_CFI(copy_user_page_asm)
/* Convert virtual `to' and `from' addresses to physical addresses.
Move `from' physical address to non shadowed register. */
ldil L%(__PAGE_OFFSET), %r1
sub %r26, %r1, %r26
sub %r25, %r1, %r23
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
depdi 0, 31,32, %r28 /* clear any sign extension */
#endif
convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
copy %r28, %r29
depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
copy %r28, %r29
depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
#endif
/* Purge any old translations */
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
pdtlb,l %r0(%r29)
#else
tlb_lock %r20,%r21,%r22
pdtlb %r0(%r28)
pdtlb %r0(%r29)
tlb_unlock %r20,%r21,%r22
#endif
#ifdef CONFIG_64BIT
/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
* Unroll the loop by hand and arrange insn appropriately.
* GCC probably can do this just as well.
*/
ldd 0(%r29), %r19
ldi (PAGE_SIZE / 128), %r1
1: ldd 8(%r29), %r20
ldd 16(%r29), %r21
ldd 24(%r29), %r22
std %r19, 0(%r28)
std %r20, 8(%r28)
ldd 32(%r29), %r19
ldd 40(%r29), %r20
std %r21, 16(%r28)
std %r22, 24(%r28)
ldd 48(%r29), %r21
ldd 56(%r29), %r22
std %r19, 32(%r28)
std %r20, 40(%r28)
ldd 64(%r29), %r19
ldd 72(%r29), %r20
std %r21, 48(%r28)
std %r22, 56(%r28)
ldd 80(%r29), %r21
ldd 88(%r29), %r22
std %r19, 64(%r28)
std %r20, 72(%r28)
ldd 96(%r29), %r19
ldd 104(%r29), %r20
std %r21, 80(%r28)
std %r22, 88(%r28)
ldd 112(%r29), %r21
ldd 120(%r29), %r22
std %r19, 96(%r28)
std %r20, 104(%r28)
ldo 128(%r29), %r29
std %r21, 112(%r28)
std %r22, 120(%r28)
ldo 128(%r28), %r28
/* conditional branches nullify on forward taken branch, and on
* non-taken backward branch. Note that .+4 is a backwards branch.
* The ldd should only get executed if the branch is taken.
*/
addib,COND(>),n -1, %r1, 1b /* bundle 10 */
ldd 0(%r29), %r19 /* start next loads */
#else
ldi (PAGE_SIZE / 64), %r1
/*
* This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
* bundles (very restricted rules for bundling). It probably
* does OK on PCXU and better, but we could do better with
* ldd/std instructions. Note that until (if) we start saving
* the full 64 bit register values on interrupt, we can't
* use ldd/std on a 32 bit kernel.
*/
1: ldw 0(%r29), %r19
ldw 4(%r29), %r20
ldw 8(%r29), %r21
ldw 12(%r29), %r22
stw %r19, 0(%r28)
stw %r20, 4(%r28)
stw %r21, 8(%r28)
stw %r22, 12(%r28)
ldw 16(%r29), %r19
ldw 20(%r29), %r20
ldw 24(%r29), %r21
ldw 28(%r29), %r22
stw %r19, 16(%r28)
stw %r20, 20(%r28)
stw %r21, 24(%r28)
stw %r22, 28(%r28)
ldw 32(%r29), %r19
ldw 36(%r29), %r20
ldw 40(%r29), %r21
ldw 44(%r29), %r22
stw %r19, 32(%r28)
stw %r20, 36(%r28)
stw %r21, 40(%r28)
stw %r22, 44(%r28)
ldw 48(%r29), %r19
ldw 52(%r29), %r20
ldw 56(%r29), %r21
ldw 60(%r29), %r22
stw %r19, 48(%r28)
stw %r20, 52(%r28)
stw %r21, 56(%r28)
stw %r22, 60(%r28)
ldo 64(%r28), %r28
addib,COND(>) -1, %r1,1b
ldo 64(%r29), %r29
#endif
bv %r0(%r2)
nop
ENDPROC_CFI(copy_user_page_asm)
ENTRY_CFI(clear_user_page_asm)
tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
depdi 0, 31,32, %r28 /* clear any sign extension */
#endif
convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
#endif
/* Purge any old translation */
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
#endif
#ifdef CONFIG_64BIT
ldi (PAGE_SIZE / 128), %r1
/* PREFETCH (Write) has not (yet) been proven to help here */
/* #define PREFETCHW_OP ldd 256(%0), %r0 */
1: std %r0, 0(%r28)
std %r0, 8(%r28)
std %r0, 16(%r28)
std %r0, 24(%r28)
std %r0, 32(%r28)
std %r0, 40(%r28)
std %r0, 48(%r28)
std %r0, 56(%r28)
std %r0, 64(%r28)
std %r0, 72(%r28)
std %r0, 80(%r28)
std %r0, 88(%r28)
std %r0, 96(%r28)
std %r0, 104(%r28)
std %r0, 112(%r28)
std %r0, 120(%r28)
addib,COND(>) -1, %r1, 1b
ldo 128(%r28), %r28
#else /* ! CONFIG_64BIT */
ldi (PAGE_SIZE / 64), %r1
1: stw %r0, 0(%r28)
stw %r0, 4(%r28)
stw %r0, 8(%r28)
stw %r0, 12(%r28)
stw %r0, 16(%r28)
stw %r0, 20(%r28)
stw %r0, 24(%r28)
stw %r0, 28(%r28)
stw %r0, 32(%r28)
stw %r0, 36(%r28)
stw %r0, 40(%r28)
stw %r0, 44(%r28)
stw %r0, 48(%r28)
stw %r0, 52(%r28)
stw %r0, 56(%r28)
stw %r0, 60(%r28)
addib,COND(>) -1, %r1, 1b
ldo 64(%r28), %r28
#endif /* CONFIG_64BIT */
bv %r0(%r2)
nop
ENDPROC_CFI(clear_user_page_asm)
ENTRY_CFI(flush_dcache_page_asm)
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
depdi 0, 31,32, %r28 /* clear any sign extension */
#endif
convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
#endif
/* Purge any old translation */
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
#endif
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), r31
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
#endif
add %r28, %r25, %r25
sub %r25, r31, %r25
1: fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
cmpb,COND(<<) %r28, %r25,1b
fdc,m r31(%r28)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_dcache_page_asm)
ENTRY_CFI(flush_icache_page_asm)
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
depdi 0, 31,32, %r28 /* clear any sign extension */
#endif
convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
#endif
/* Purge any old translation. Note that the FIC instruction
* may use either the instruction or data TLB. Given that we
* have a flat address space, it's not clear which TLB will be
* used. So, we purge both entries. */
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
pitlb,l %r0(%sr4,%r28)
#else
tlb_lock %r20,%r21,%r22
pdtlb %r0(%r28)
pitlb %r0(%sr4,%r28)
tlb_unlock %r20,%r21,%r22
#endif
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r31
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
#endif
add %r28, %r25, %r25
sub %r25, %r31, %r25
/* fic only has the type 26 form on PA1.1, requiring an
* explicit space specification, so use %sr4 */
1: fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
cmpb,COND(<<) %r28, %r25,1b
fic,m %r31(%sr4,%r28)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm)
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
#endif
add %r26, %r25, %r25
sub %r25, %r23, %r25
1: fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
cmpb,COND(<<) %r26, %r25,1b
fdc,m %r23(%r26)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm)
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
#endif
add %r26, %r25, %r25
sub %r25, %r23, %r25
1: pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
cmpb,COND(<<) %r26, %r25, 1b
pdc,m %r23(%r26)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(purge_kernel_dcache_page_asm)
ENTRY_CFI(flush_user_dcache_range_asm)
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25, 1b
fdc,m %r23(%sr3, %r26)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_user_dcache_range_asm)
ENTRY_CFI(flush_kernel_dcache_range_asm)
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25,1b
fdc,m %r23(%r26)
sync
syncdma
bv %r0(%r2)
nop
ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY_CFI(purge_kernel_dcache_range_asm)
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25,1b
pdc,m %r23(%r26)
sync
syncdma
bv %r0(%r2)
nop
ENDPROC_CFI(purge_kernel_dcache_range_asm)
ENTRY_CFI(flush_user_icache_range_asm)
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25,1b
fic,m %r23(%sr3, %r26)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_user_icache_range_asm)
ENTRY_CFI(flush_kernel_icache_page)
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
#endif
add %r26, %r25, %r25
sub %r25, %r23, %r25
1: fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
cmpb,COND(<<) %r26, %r25, 1b
fic,m %r23(%sr4, %r26)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_kernel_icache_page)
ENTRY_CFI(flush_kernel_icache_range_asm)
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25, 1b
fic,m %r23(%sr4, %r26)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_kernel_icache_range_asm)
__INIT
/* align should cover use of rfi in disable_sr_hashing_asm and
* srdis_done.
*/
.align 256
ENTRY_CFI(disable_sr_hashing_asm)
/*
* Switch to real mode
*/
/* pcxt_ssm_bug */
rsm PSW_SM_I, %r0
load32 PA(1f), %r1
nop
nop
nop
nop
nop
rsm PSW_SM_Q, %r0 /* prep to load iia queue */
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
mtctl %r1, %cr18 /* IIAOQ head */
ldo 4(%r1), %r1
mtctl %r1, %cr18 /* IIAOQ tail */
load32 REAL_MODE_PSW, %r1
mtctl %r1, %ipsw
rfi
nop
1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
b,n srdis_done
srdis_pcxs:
/* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
.word 0x141c1a00 /* mfdiag %dr0, %r28 */
.word 0x141c1a00 /* must issue twice */
depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
.word 0x141c1600 /* mtdiag %r28, %dr0 */
.word 0x141c1600 /* must issue twice */
b,n srdis_done
srdis_pcxl:
/* Disable Space Register Hashing for PCXL */
.word 0x141c0600 /* mfdiag %dr0, %r28 */
depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
.word 0x141c0240 /* mtdiag %r28, %dr0 */
b,n srdis_done
srdis_pa20:
/* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
.word 0x144008bc /* mfdiag %dr2, %r28 */
depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
.word 0x145c1840 /* mtdiag %r28, %dr2 */
srdis_done:
/* Switch back to virtual mode */
rsm PSW_SM_I, %r0 /* prep to load iia queue */
load32 2f, %r1
nop
nop
nop
nop
nop
rsm PSW_SM_Q, %r0 /* prep to load iia queue */
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
mtctl %r1, %cr18 /* IIAOQ head */
ldo 4(%r1), %r1
mtctl %r1, %cr18 /* IIAOQ tail */
load32 KERNEL_PSW, %r1
mtctl %r1, %ipsw
rfi
nop
2: bv %r0(%r2)
nop
ENDPROC_CFI(disable_sr_hashing_asm)
.end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 26,851
|
arch/parisc/kernel/syscall.S
|
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* System call entry code / Linux gateway page
* Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
* Licensed under the GNU GPL.
* thanks to Philipp Rumpf, Mike Shaver and various others
* sorry about the wall, puffin..
*/
/*
How does the Linux gateway page on PA-RISC work?
------------------------------------------------
The Linux gateway page on PA-RISC is "special".
It actually has PAGE_GATEWAY bits set (this is linux terminology; in parisc
terminology it's Execute, promote to PL0) in the page map. So anything
executing on this page executes with kernel level privilege (there's more to it
than that: to have this happen, you also have to use a branch with a ,gate
completer to activate the privilege promotion). The upshot is that everything
that runs on the gateway page runs at kernel privilege but with the current
user process address space (although you have access to kernel space via %sr2).
For the 0x100 syscall entry, we redo the space registers to point to the kernel
address space (preserving the user address space in %sr3), move to wide mode if
required, save the user registers and branch into the kernel syscall entry
point. For all the other functions, we execute at kernel privilege but don't
flip address spaces. The basic upshot of this is that these code snippets are
executed atomically (because the kernel can't be pre-empted) and they may
perform architecturally forbidden (to PL3) operations (like setting control
registers).
*/
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/page.h>
#include <asm/psw.h>
#include <asm/thread_info.h>
#include <asm/assembly.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <linux/linkage.h>
/* We fill the empty parts of the gateway page with
* something that will kill the kernel or a
* userspace application.
*/
#define KILL_INSN break 0,0
.level PA_ASM_LEVEL
.text
.import syscall_exit,code
.import syscall_exit_rfi,code
/* Linux gateway page is aliased to virtual page 0 in the kernel
* address space. Since it is a gateway page it cannot be
* dereferenced, so null pointers will still fault. We start
* the actual entry point at 0x100. We put break instructions
* at the beginning of the page to trap null indirect function
* pointers.
*/
.align PAGE_SIZE
ENTRY(linux_gateway_page)
/* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
.rept 44
KILL_INSN
.endr
/* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
/* Light-weight-syscall entry must always be located at 0xb0 */
/* WARNING: Keep this number updated with table size changes */
#define __NR_lws_entries (3)
lws_entry:
gate lws_start, %r0 /* increase privilege */
depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
/* Fill from 0xb8 to 0xe0 */
.rept 10
KILL_INSN
.endr
/* This function MUST be located at 0xe0 for glibc's threading
mechanism to work. DO NOT MOVE THIS CODE EVER! */
set_thread_pointer:
gate .+8, %r0 /* increase privilege */
depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
be 0(%sr7,%r31) /* return to user space */
mtctl %r26, %cr27 /* move arg0 to the control register */
/* Increase the chance of trapping if random jumps occur to this
address, fill from 0xf0 to 0x100 */
.rept 4
KILL_INSN
.endr
/* This address must remain fixed at 0x100 for glibc's syscalls to work */
.align LINUX_GATEWAY_ADDR
linux_gateway_entry:
gate .+8, %r0 /* become privileged */
mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */
#ifdef CONFIG_64BIT
/* Store W bit on entry to the syscall in case it's a wide userland
* process. */
ssm PSW_SM_W, %r1
extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into
* the bottom of sp temporarily */
or,ev %r1,%r30,%r30
b,n 1f
/* The top halves of argument registers must be cleared on syscall
* entry from narrow executable.
*/
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24
depdi 0, 31, 32, %r23
depdi 0, 31, 32, %r22
depdi 0, 31, 32, %r21
1:
#endif
/* We use a rsm/ssm pair to prevent sr3 from being clobbered
* by external interrupts.
*/
mfsp %sr7,%r1 /* save user sr7 */
rsm PSW_SM_I, %r0 /* disable interrupts */
mtsp %r1,%sr3 /* and store it in sr3 */
mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1
xor %r1,%r30,%r30
ldo THREAD_SZ_ALGN+FRAME_SIZE(%r30),%r30 /* set up kernel stack */
/* N.B.: It is critical that we don't set sr7 to 0 until r30
* contains a valid kernel stack pointer. It is also
* critical that we don't start using the kernel stack
* until after sr7 has been set to 0.
*/
mtsp %r0,%sr7 /* get kernel space into sr7 */
ssm PSW_SM_I, %r0 /* enable interrupts */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1
/* Save some registers for sigcontext and potential task
switch (see entry.S for the details of which ones are
saved/restored). TASK_PT_PSW is zeroed so we can see whether
a process is on a syscall or not. For an interrupt the real
PSW value is stored. This is needed for gdb and sys_ptrace. */
STREG %r0, TASK_PT_PSW(%r1)
STREG %r2, TASK_PT_GR2(%r1) /* preserve rp */
STREG %r19, TASK_PT_GR19(%r1)
LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */
#ifdef CONFIG_64BIT
extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */
#if 0
xor %r19,%r2,%r2 /* clear bottom bit */
depd,z %r19,1,1,%r19
std %r19,TASK_PT_PSW(%r1)
#endif
#endif
STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */
STREG %r21, TASK_PT_GR21(%r1)
STREG %r22, TASK_PT_GR22(%r1)
STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
STREG %r24, TASK_PT_GR24(%r1) /* 3rd argument */
STREG %r25, TASK_PT_GR25(%r1) /* 2nd argument */
STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */
STREG %r27, TASK_PT_GR27(%r1) /* user dp */
STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */
STREG %r0, TASK_PT_ORIG_R28(%r1) /* don't prohibit restarts */
STREG %r29, TASK_PT_GR29(%r1) /* return value 1 */
STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */
ldo TASK_PT_FR0(%r1), %r27 /* save fpregs from the kernel */
save_fp %r27 /* or potential task switch */
mfctl %cr11, %r27 /* i.e. SAR */
STREG %r27, TASK_PT_SAR(%r1)
loadgp
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
copy %r19,%r2 /* W bit back to r2 */
#else
/* no need to save these on stack in wide mode because the first 8
* args are passed in registers */
stw %r22, -52(%r30) /* 5th argument */
stw %r21, -56(%r30) /* 6th argument */
#endif
/* Are we being ptraced? */
mfctl %cr30, %r1
LDREG TI_FLAGS(%r1),%r1
ldi _TIF_SYSCALL_TRACE_MASK, %r19
and,COND(=) %r1, %r19, %r0
b,n .Ltracesys
/* Note! We cannot use the syscall table that is mapped
nearby since the gateway page is mapped execute-only. */
#ifdef CONFIG_64BIT
ldil L%sys_call_table, %r1
or,= %r2,%r2,%r2
addil L%(sys_call_table64-sys_call_table), %r1
ldo R%sys_call_table(%r1), %r19
or,= %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
#endif
comiclr,>> __NR_Linux_syscalls, %r20, %r0
b,n .Lsyscall_nosys
LDREGX %r20(%r19), %r19
/* If this is a sys_rt_sigreturn call, and the signal was received
* when not in_syscall, then we want to return via syscall_exit_rfi,
* not syscall_exit. Signal no. in r20, in_syscall in r25 (see
* trampoline code in signal.c).
*/
ldi __NR_rt_sigreturn,%r2
comb,= %r2,%r20,.Lrt_sigreturn
.Lin_syscall:
ldil L%syscall_exit,%r2
be 0(%sr7,%r19)
ldo R%syscall_exit(%r2),%r2
.Lrt_sigreturn:
comib,<> 0,%r25,.Lin_syscall
ldil L%syscall_exit_rfi,%r2
be 0(%sr7,%r19)
ldo R%syscall_exit_rfi(%r2),%r2
/* Note! Because we are not running where we were linked, any
calls to functions external to this file must be indirect. To
be safe, we apply the opposite rule to functions within this
file, with local labels given to them to ensure correctness. */
.Lsyscall_nosys:
syscall_nosys:
ldil L%syscall_exit,%r1
be R%syscall_exit(%sr7,%r1)
ldo -ENOSYS(%r0),%r28 /* set errno */
/* Warning! This trace code is a virtual duplicate of the code above so be
* sure to maintain both! */
.Ltracesys:
tracesys:
/* Need to save more registers so the debugger can see where we
* are. This saves only the lower 8 bits of PSW, so that the C
* bit is still clear on syscalls, and the D bit is set if this
* full register save path has been executed. We check the D
* bit on syscall_return_rfi to determine which registers to
* restore. An interrupt results in a full PSW saved with the
* C bit set, a non-straced syscall entry results in C and D clear
* in the saved PSW.
*/
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
ssm 0,%r2
STREG %r2,TASK_PT_PSW(%r1) /* Lower 8 bits only!! */
mfsp %sr0,%r2
STREG %r2,TASK_PT_SR0(%r1)
mfsp %sr1,%r2
STREG %r2,TASK_PT_SR1(%r1)
mfsp %sr2,%r2
STREG %r2,TASK_PT_SR2(%r1)
mfsp %sr3,%r2
STREG %r2,TASK_PT_SR3(%r1)
STREG %r2,TASK_PT_SR4(%r1)
STREG %r2,TASK_PT_SR5(%r1)
STREG %r2,TASK_PT_SR6(%r1)
STREG %r2,TASK_PT_SR7(%r1)
STREG %r2,TASK_PT_IASQ0(%r1)
STREG %r2,TASK_PT_IASQ1(%r1)
LDREG TASK_PT_GR31(%r1),%r2
STREG %r2,TASK_PT_IAOQ0(%r1)
ldo 4(%r2),%r2
STREG %r2,TASK_PT_IAOQ1(%r1)
ldo TASK_REGS(%r1),%r2
/* reg_save %r2 */
STREG %r3,PT_GR3(%r2)
STREG %r4,PT_GR4(%r2)
STREG %r5,PT_GR5(%r2)
STREG %r6,PT_GR6(%r2)
STREG %r7,PT_GR7(%r2)
STREG %r8,PT_GR8(%r2)
STREG %r9,PT_GR9(%r2)
STREG %r10,PT_GR10(%r2)
STREG %r11,PT_GR11(%r2)
STREG %r12,PT_GR12(%r2)
STREG %r13,PT_GR13(%r2)
STREG %r14,PT_GR14(%r2)
STREG %r15,PT_GR15(%r2)
STREG %r16,PT_GR16(%r2)
STREG %r17,PT_GR17(%r2)
STREG %r18,PT_GR18(%r2)
/* Finished saving things for the debugger */
copy %r2,%r26
ldil L%do_syscall_trace_enter,%r1
ldil L%tracesys_next,%r2
be R%do_syscall_trace_enter(%sr7,%r1)
ldo R%tracesys_next(%r2),%r2
tracesys_next:
/* do_syscall_trace_enter either returned the syscallno, or -1L,
* so we skip restoring the PT_GR20 below, since we pulled it from
* task->thread.regs.gr[20] above.
*/
copy %ret0,%r20
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
LDREG TASK_PT_GR28(%r1), %r28 /* Restore return value */
LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
LDREG TASK_PT_GR25(%r1), %r25
LDREG TASK_PT_GR24(%r1), %r24
LDREG TASK_PT_GR23(%r1), %r23
LDREG TASK_PT_GR22(%r1), %r22
LDREG TASK_PT_GR21(%r1), %r21
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#else
stw %r22, -52(%r30) /* 5th argument */
stw %r21, -56(%r30) /* 6th argument */
#endif
cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
comiclr,>> __NR_Linux_syscalls, %r20, %r0
b,n .Ltracesys_nosys
/* Note! We cannot use the syscall table that is mapped
nearby since the gateway page is mapped execute-only. */
#ifdef CONFIG_64BIT
LDREG TASK_PT_GR30(%r1), %r19 /* get users sp back */
extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */
ldil L%sys_call_table, %r1
or,= %r2,%r2,%r2
addil L%(sys_call_table64-sys_call_table), %r1
ldo R%sys_call_table(%r1), %r19
or,= %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
#endif
LDREGX %r20(%r19), %r19
/* If this is a sys_rt_sigreturn call, and the signal was received
* when not in_syscall, then we want to return via syscall_exit_rfi,
* not syscall_exit. Signal no. in r20, in_syscall in r25 (see
* trampoline code in signal.c).
*/
ldi __NR_rt_sigreturn,%r2
comb,= %r2,%r20,.Ltrace_rt_sigreturn
.Ltrace_in_syscall:
ldil L%tracesys_exit,%r2
be 0(%sr7,%r19)
ldo R%tracesys_exit(%r2),%r2
.Ltracesys_nosys:
ldo -ENOSYS(%r0),%r28 /* set errno */
/* Do *not* call this function on the gateway page, because it
makes a direct call to syscall_trace. */
tracesys_exit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
ldo TASK_REGS(%r1),%r26
BL do_syscall_trace_exit,%r2
STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
LDREG TASK_PT_GR28(%r1), %r28 /* Restore return val. */
ldil L%syscall_exit,%r1
be,n R%syscall_exit(%sr7,%r1)
.Ltrace_rt_sigreturn:
comib,<> 0,%r25,.Ltrace_in_syscall
ldil L%tracesys_sigexit,%r2
be 0(%sr7,%r19)
ldo R%tracesys_sigexit(%r2),%r2
tracesys_sigexit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
BL do_syscall_trace_exit,%r2
ldo TASK_REGS(%r1),%r26
ldil L%syscall_exit_rfi,%r1
be,n R%syscall_exit_rfi(%sr7,%r1)
/*********************************************************
32/64-bit Light-Weight-Syscall ABI
* - Indicates a hint for userspace inline asm
implementations.
Syscall number (caller-saves)
- %r20
* In asm clobber.
Argument registers (caller-saves)
- %r26, %r25, %r24, %r23, %r22
* In asm input.
Return registers (caller-saves)
- %r28 (return), %r21 (errno)
* In asm output.
Caller-saves registers
- %r1, %r27, %r29
- %r2 (return pointer)
- %r31 (ble link register)
* In asm clobber.
Callee-saves registers
- %r3-%r18
- %r30 (stack pointer)
* Not in asm clobber.
If userspace is 32-bit:
Callee-saves registers
- %r19 (32-bit PIC register)
Differences from 32-bit calling convention:
- Syscall number in %r20
- Additional argument register %r22 (arg4)
- Callee-saves %r19.
If userspace is 64-bit:
Callee-saves registers
- %r27 (64-bit PIC register)
Differences from 64-bit calling convention:
- Syscall number in %r20
- Additional argument register %r22 (arg4)
- Callee-saves %r27.
Error codes returned by entry path:
ENOSYS - r20 was an invalid LWS number.
*********************************************************/
lws_start:
#ifdef CONFIG_64BIT
ssm PSW_SM_W, %r1
extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into
* the bottom of sp temporarily */
or,ev %r1,%r30,%r30
/* Clip LWS number to a 32-bit value for 32-bit processes */
depdi 0, 31, 32, %r20
#endif
/* Is the lws entry number valid? */
comiclr,>> __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys
/* Load table start */
ldil L%lws_table, %r1
ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
LDREGX %r20(%sr2,r28), %r21 /* Scratch use of r21 */
/* Jump to lws, lws table pointers already relocated */
be,n 0(%sr2,%r21)
lws_exit_nosys:
ldo -ENOSYS(%r0),%r21 /* set errno */
/* Fall through: Return to userspace */
lws_exit:
#ifdef CONFIG_64BIT
/* decide whether to reset the wide mode bit
*
* For a syscall, the W bit is stored in the lowest bit
* of sp. Extract it and reset W if it is zero */
extrd,u,*<> %r30,63,1,%r1
rsm PSW_SM_W, %r0
/* now reset the lowest bit of sp if it was set */
xor %r30,%r1,%r30
#endif
be,n 0(%sr7, %r31)
/***************************************************
Implementing 32bit CAS as an atomic operation:
%r26 - Address to examine
%r25 - Old value to check (old)
%r24 - New value to set (new)
%r28 - Return prev through this register.
%r21 - Kernel error code
If debugging is DISabled:
%r21 has the following meanings:
EAGAIN - CAS is busy, ldcw failed, try again.
EFAULT - Read or write failed.
If debugging is enabled:
EDEADLOCK - CAS called recursively.
EAGAIN && r28 == 1 - CAS is busy. Lock contended.
EAGAIN && r28 == 2 - CAS is busy. ldcw failed.
EFAULT - Read or write failed.
Scratch: r20, r28, r1
****************************************************/
/* Do not enable LWS debugging */
#define ENABLE_LWS_DEBUG 0
/* ELF64 Process entry path */
lws_compare_and_swap64:
#ifdef CONFIG_64BIT
b,n lws_compare_and_swap
#else
/* If we are not a 64-bit kernel, then we don't
* have 64-bit input registers, and calling
* the 64-bit LWS CAS returns ENOSYS.
*/
b,n lws_exit_nosys
#endif
/* ELF32 Process entry path */
lws_compare_and_swap32:
#ifdef CONFIG_64BIT
/* Clip all the input registers */
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24
#endif
lws_compare_and_swap:
/* Load start of lock table */
ldil L%lws_lock_start, %r20
ldo R%lws_lock_start(%r20), %r28
/* Extract four bits from r26 and hash lock (Bits 4-7) */
extru %r26, 27, 4, %r20
/* Find lock to use, the hash is either one of 0 to
15, multiplied by 16 (keep it 16-byte aligned)
and add to the lock table offset. */
shlw %r20, 4, %r20
add %r20, %r28, %r20
# if ENABLE_LWS_DEBUG
/*
DEBUG, check for deadlock!
If the thread register values are the same
then we were the one that locked it last and
this is a recurisve call that will deadlock.
We *must* giveup this call and fail.
*/
ldw 4(%sr2,%r20), %r28 /* Load thread register */
/* WARNING: If cr27 cycles to the same value we have problems */
mfctl %cr27, %r21 /* Get current thread register */
cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
b lws_exit /* Return error! */
ldo -EDEADLOCK(%r0), %r21
cas_lock:
cmpb,=,n %r0, %r28, cas_nocontend /* Is nobody using it? */
ldo 1(%r0), %r28 /* 1st case */
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
cas_nocontend:
# endif
/* ENABLE_LWS_DEBUG */
rsm PSW_SM_I, %r0 /* Disable interrupts */
/* COW breaks can cause contention on UP systems */
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
cas_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */
ssm PSW_SM_I, %r0
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
/*
prev = *addr;
if ( prev == old )
*addr = new;
return prev;
*/
/* NOTES:
This all works becuse intr_do_signal
and schedule both check the return iasq
and see that we are on the kernel page
so this process is never scheduled off
or is ever sent any signal of any sort,
thus it is wholly atomic from usrspaces
perspective
*/
cas_action:
#if defined CONFIG_SMP && ENABLE_LWS_DEBUG
/* DEBUG */
mfctl %cr27, %r1
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
1: ldw 0(%r26), %r28
sub,<> %r28, %r25, %r0
2: stw %r24, 0(%r26)
/* Free lock */
sync
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
#endif
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
b lws_exit
copy %r0, %r21
3:
/* Error occurred on load or store */
/* Free lock */
sync
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
#endif
ssm PSW_SM_I, %r0
b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */
nop
nop
nop
nop
/* Two exception table entries, one for the load,
the other for the store. Either return -EFAULT.
Each of the entries must be relocated. */
ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
/***************************************************
New CAS implementation which uses pointers and variable size
information. The value pointed by old and new MUST NOT change
while performing CAS. The lock only protect the value at %r26.
%r26 - Address to examine
%r25 - Pointer to the value to check (old)
%r24 - Pointer to the value to set (new)
%r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
%r28 - Return non-zero on failure
%r21 - Kernel error code
%r21 has the following meanings:
EAGAIN - CAS is busy, ldcw failed, try again.
EFAULT - Read or write failed.
Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
****************************************************/
/* ELF32 Process entry path */
lws_compare_and_swap_2:
#ifdef CONFIG_64BIT
/* Clip the input registers. We don't need to clip %r23 as we
only use it for word operations */
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24
#endif
/* Check the validity of the size pointer */
subi,>>= 3, %r23, %r0
b,n lws_exit_nosys
/* Jump to the functions which will load the old and new values into
registers depending on the their size */
shlw %r23, 2, %r29
blr %r29, %r0
nop
/* 8bit load */
4: ldb 0(%r25), %r25
b cas2_lock_start
5: ldb 0(%r24), %r24
nop
nop
nop
nop
nop
/* 16bit load */
6: ldh 0(%r25), %r25
b cas2_lock_start
7: ldh 0(%r24), %r24
nop
nop
nop
nop
nop
/* 32bit load */
8: ldw 0(%r25), %r25
b cas2_lock_start
9: ldw 0(%r24), %r24
nop
nop
nop
nop
nop
/* 64bit load */
#ifdef CONFIG_64BIT
10: ldd 0(%r25), %r25
11: ldd 0(%r24), %r24
#else
/* Load old value into r22/r23 - high/low */
10: ldw 0(%r25), %r22
11: ldw 4(%r25), %r23
/* Load new value into fr4 for atomic store later */
12: flddx 0(%r24), %fr4
#endif
cas2_lock_start:
/* Load start of lock table */
ldil L%lws_lock_start, %r20
ldo R%lws_lock_start(%r20), %r28
/* Extract four bits from r26 and hash lock (Bits 4-7) */
extru %r26, 27, 4, %r20
/* Find lock to use, the hash is either one of 0 to
15, multiplied by 16 (keep it 16-byte aligned)
and add to the lock table offset. */
shlw %r20, 4, %r20
add %r20, %r28, %r20
rsm PSW_SM_I, %r0 /* Disable interrupts */
/* COW breaks can cause contention on UP systems */
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */
cas2_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */
ssm PSW_SM_I, %r0
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
/*
prev = *addr;
if ( prev == old )
*addr = new;
return prev;
*/
/* NOTES:
This all works becuse intr_do_signal
and schedule both check the return iasq
and see that we are on the kernel page
so this process is never scheduled off
or is ever sent any signal of any sort,
thus it is wholly atomic from usrspaces
perspective
*/
cas2_action:
/* Jump to the correct function */
blr %r29, %r0
/* Set %r28 as non-zero for now */
ldo 1(%r0),%r28
/* 8bit CAS */
13: ldb 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
14: stb %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
15: ldh 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
16: sth %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
17: ldw 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
18: stw %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 64bit CAS */
#ifdef CONFIG_64BIT
19: ldd 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
20: std %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
19: ldw 0(%r26), %r29
sub,= %r29, %r22, %r0
b,n cas2_end
/* Compare second word */
20: ldw 4(%r26), %r29
sub,= %r29, %r23, %r0
b,n cas2_end
/* Perform the store */
21: fstdx %fr4, 0(%r26)
copy %r0, %r28
#endif
cas2_end:
/* Free lock */
sync
stw %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
b lws_exit
copy %r0, %r21
22:
/* Error occurred on load or store */
/* Free lock */
sync
stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */
nop
nop
nop
/* Exception table entries, for the load and store, return EFAULT.
Each of the entries must be relocated. */
ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page)
#ifndef CONFIG_64BIT
ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page)
ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page)
#endif
/* Make sure nothing else is placed on this page */
.align PAGE_SIZE
END(linux_gateway_page)
ENTRY(end_linux_gateway_page)
/* Relocate symbols assuming linux_gateway_page is mapped
to virtual address 0x0 */
#define LWS_ENTRY(_name_) ASM_ULONG_INSN (lws_##_name_ - linux_gateway_page)
.section .rodata,"a"
.align 8
/* Light-weight-syscall table */
/* Start of lws table. */
ENTRY(lws_table)
LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */
LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */
END(lws_table)
/* End of lws table */
.align 8
ENTRY(sys_call_table)
.export sys_call_table,data
#include "syscall_table.S"
END(sys_call_table)
#ifdef CONFIG_64BIT
.align 8
ENTRY(sys_call_table64)
#define SYSCALL_TABLE_64BIT
#include "syscall_table.S"
END(sys_call_table64)
#endif
/*
All light-weight-syscall atomic operations
will use this set of locks
NOTE: The lws_lock_start symbol must be
at least 16-byte aligned for safe use
with ldcw.
*/
.section .data
.align L1_CACHE_BYTES
ENTRY(lws_lock_start)
/* lws locks */
.rept 16
/* Keep locks aligned at 16-bytes */
.word 1
.word 0
.word 0
.word 0
.endr
END(lws_lock_start)
.previous
.end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,021
|
arch/parisc/kernel/hpmc.S
|
/*
* HPMC (High Priority Machine Check) handler.
*
* Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
* Copyright (C) 2000 Hewlett-Packard (John Marvin)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* This HPMC handler retrieves the HPMC pim data, resets IO and
* returns to the default trap handler with code set to 1 (HPMC).
* The default trap handler calls handle interruption, which
* does a stack and register dump. This at least allows kernel
* developers to get back to C code in virtual mode, where they
* have the option to examine and print values from memory that
* would help in debugging an HPMC caused by a software bug.
*
* There is more to do here:
*
* 1) On MP systems we need to synchronize processors
* before calling pdc/iodc.
* 2) We should be checking the system state and not
* returning to the fault handler if things are really
* bad.
*
*/
.level 1.1
#include <asm/assembly.h>
#include <asm/pdc.h>
#include <asm/psw.h>
#include <linux/linkage.h>
#include <linux/init.h>
/*
* stack for os_hpmc, the HPMC handler.
* buffer for IODC procedures (for the HPMC handler).
*
* IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
*/
__PAGE_ALIGNED_BSS
.align 4096
hpmc_stack:
.block 16384
#define HPMC_IODC_BUF_SIZE 0x8000
__PAGE_ALIGNED_BSS
.align 4096
hpmc_iodc_buf:
.block HPMC_IODC_BUF_SIZE
.section .bss
.align 8
hpmc_raddr:
.block 128
#define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */
.section .bss
.align 8
ENTRY(hpmc_pim_data)
.block HPMC_PIM_DATA_SIZE
END(hpmc_pim_data)
.text
.import intr_save, code
.align 16
ENTRY(os_hpmc)
.os_hpmc:
/*
* registers modified:
*
* Using callee saves registers without saving them. The
* original values are in the pim dump if we need them.
*
* r2 (rp) return pointer
* r3 address of PDCE_PROC
* r4 scratch
* r5 scratch
* r23 (arg3) procedure arg
* r24 (arg2) procedure arg
* r25 (arg1) procedure arg
* r26 (arg0) procedure arg
* r30 (sp) stack pointer
*
* registers read:
*
* r26 contains address of PDCE_PROC on entry
* r28 (ret0) return value from procedure
*/
copy arg0, %r3 /* save address of PDCE_PROC */
/*
* disable nested HPMCs
*
* Increment os_hpmc checksum to invalidate it.
* Do this before turning the PSW M bit off.
*/
mfctl %cr14, %r4
ldw 52(%r4),%r5
addi 1,%r5,%r5
stw %r5,52(%r4)
/* MP_FIXME: synchronize all processors. */
/* Setup stack pointer. */
load32 PA(hpmc_stack),sp
ldo 128(sp),sp /* leave room for arguments */
/*
* Most PDC routines require that the M bit be off.
* So turn on the Q bit and turn off the M bit.
*/
ldi PSW_SM_Q,%r4 /* PSW Q on, PSW M off */
mtctl %r4,ipsw
mtctl %r0,pcsq
mtctl %r0,pcsq
load32 PA(os_hpmc_1),%r4
mtctl %r4,pcoq
ldo 4(%r4),%r4
mtctl %r4,pcoq
rfi
nop
os_hpmc_1:
/* Call PDC_PIM to get HPMC pim info */
/*
* Note that on some newer boxes, PDC_PIM must be called
* before PDC_IO if you want IO to be reset. PDC_PIM sets
* a flag that PDC_IO examines.
*/
ldo PDC_PIM(%r0), arg0
ldo PDC_PIM_HPMC(%r0),arg1 /* Transfer HPMC data */
load32 PA(hpmc_raddr),arg2
load32 PA(hpmc_pim_data),arg3
load32 HPMC_PIM_DATA_SIZE,%r4
stw %r4,-52(sp)
ldil L%PA(os_hpmc_2), rp
bv (r3) /* call pdce_proc */
ldo R%PA(os_hpmc_2)(rp), rp
os_hpmc_2:
comib,<> 0,ret0, os_hpmc_fail
/* Reset IO by calling the hversion dependent PDC_IO routine */
ldo PDC_IO(%r0),arg0
ldo 0(%r0),arg1 /* log IO errors */
ldo 0(%r0),arg2 /* reserved */
ldo 0(%r0),arg3 /* reserved */
stw %r0,-52(sp) /* reserved */
ldil L%PA(os_hpmc_3),rp
bv (%r3) /* call pdce_proc */
ldo R%PA(os_hpmc_3)(rp),rp
os_hpmc_3:
/* FIXME? Check for errors from PDC_IO (-1 might be OK) */
/*
* Initialize the IODC console device (HPA,SPA, path etc.
* are stored on page 0.
*/
/*
* Load IODC into hpmc_iodc_buf by calling PDC_IODC.
* Note that PDC_IODC handles flushing the appropriate
* data and instruction cache lines.
*/
ldo PDC_IODC(%r0),arg0
ldo PDC_IODC_READ(%r0),arg1
load32 PA(hpmc_raddr),arg2
ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg3 /* console hpa */
ldo PDC_IODC_RI_INIT(%r0),%r4
stw %r4,-52(sp)
load32 PA(hpmc_iodc_buf),%r4
stw %r4,-56(sp)
load32 HPMC_IODC_BUF_SIZE,%r4
stw %r4,-60(sp)
ldil L%PA(os_hpmc_4),rp
bv (%r3) /* call pdce_proc */
ldo R%PA(os_hpmc_4)(rp),rp
os_hpmc_4:
comib,<> 0,ret0,os_hpmc_fail
/* Call the entry init (just loaded by PDC_IODC) */
ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg0 /* console hpa */
ldo ENTRY_INIT_MOD_DEV(%r0), arg1
ldw BOOT_CONSOLE_SPA_OFFSET(%r0),arg2 /* console spa */
depi 0,31,11,arg2 /* clear bits 21-31 */
ldo BOOT_CONSOLE_PATH_OFFSET(%r0),arg3 /* console path */
load32 PA(hpmc_raddr),%r4
stw %r4, -52(sp)
stw %r0, -56(sp) /* HV */
stw %r0, -60(sp) /* HV */
stw %r0, -64(sp) /* HV */
stw %r0, -68(sp) /* lang, must be zero */
load32 PA(hpmc_iodc_buf),%r5
ldil L%PA(os_hpmc_5),rp
bv (%r5)
ldo R%PA(os_hpmc_5)(rp),rp
os_hpmc_5:
comib,<> 0,ret0,os_hpmc_fail
/* Prepare to call intr_save */
/*
* Load kernel page directory (load into user also, since
* we don't intend to ever return to user land anyway)
*/
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
/* Clear sr4-sr7 */
mtsp %r0, %sr4
mtsp %r0, %sr5
mtsp %r0, %sr6
mtsp %r0, %sr7
tovirt_r1 %r30 /* make sp virtual */
rsm PSW_SM_Q,%r0 /* Clear Q bit */
ldi 1,%r8 /* Set trap code to "1" for HPMC */
load32 PA(intr_save),%r1
be 0(%sr7,%r1)
nop
os_hpmc_fail:
/*
* Reset the system
*
* Some systems may lockup from a broadcast reset, so try the
* hversion PDC_BROADCAST_RESET() first.
* MP_FIXME: reset all processors if more than one central bus.
*/
/* PDC_BROADCAST_RESET() */
ldo PDC_BROADCAST_RESET(%r0),arg0
ldo 0(%r0),arg1 /* do reset */
ldil L%PA(os_hpmc_6),rp
bv (%r3) /* call pdce_proc */
ldo R%PA(os_hpmc_6)(rp),rp
os_hpmc_6:
/*
* possible return values:
* -1 non-existent procedure
* -2 non-existent option
* -16 unaligned stack
*
* If call returned, do a broadcast reset.
*/
ldil L%0xfffc0000,%r4 /* IO_BROADCAST */
ldo 5(%r0),%r5
stw %r5,48(%r4) /* CMD_RESET to IO_COMMAND offset */
b .
nop
.align 16 /* make function length multiple of 16 bytes */
.os_hpmc_end:
__INITRODATA
.globl os_hpmc_size
.align 4
.type os_hpmc_size, @object
.size os_hpmc_size, 4
os_hpmc_size:
.word .os_hpmc_end-.os_hpmc
|
AirFortressIlikara/LS2K0300-linux-4.19
| 26,159
|
arch/parisc/kernel/perf_asm.S
|
/* low-level asm for "intrigue" (PA8500-8700 CPU perf counters)
*
* Copyright (C) 2001 Randolph Chung <tausq at parisc-linux.org>
* Copyright (C) 2001 Hewlett-Packard (Grant Grundler)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/assembly.h>
#include <linux/init.h>
#include <linux/linkage.h>
#ifdef CONFIG_64BIT
.level 2.0w
#endif /* CONFIG_64BIT */
#define MTDIAG_1(gr) .word 0x14201840 + gr*0x10000
#define MTDIAG_2(gr) .word 0x14401840 + gr*0x10000
#define MFDIAG_1(gr) .word 0x142008A0 + gr
#define MFDIAG_2(gr) .word 0x144008A0 + gr
#define STDIAG(dr) .word 0x14000AA0 + dr*0x200000
#define SFDIAG(dr) .word 0x14000BA0 + dr*0x200000
#define DR2_SLOW_RET 53
;
; Enable the performance counters
;
; The coprocessor only needs to be enabled when
; starting/stopping the coprocessor with the pmenb/pmdis.
;
.text
ENTRY(perf_intrigue_enable_perf_counters)
.proc
.callinfo frame=0,NO_CALLS
.entry
ldi 0x20,%r25 ; load up perfmon bit
mfctl ccr,%r26 ; get coprocessor register
or %r25,%r26,%r26 ; set bit
mtctl %r26,ccr ; turn on performance coprocessor
pmenb ; enable performance monitor
ssm 0,0 ; dummy op to ensure completion
sync ; follow ERS
andcm %r26,%r25,%r26 ; clear bit now
mtctl %r26,ccr ; turn off performance coprocessor
nop ; NOPs as specified in ERS
nop
nop
nop
nop
nop
nop
bve (%r2)
nop
.exit
.procend
ENDPROC(perf_intrigue_enable_perf_counters)
ENTRY(perf_intrigue_disable_perf_counters)
.proc
.callinfo frame=0,NO_CALLS
.entry
ldi 0x20,%r25 ; load up perfmon bit
mfctl ccr,%r26 ; get coprocessor register
or %r25,%r26,%r26 ; set bit
mtctl %r26,ccr ; turn on performance coprocessor
pmdis ; disable performance monitor
ssm 0,0 ; dummy op to ensure completion
andcm %r26,%r25,%r26 ; clear bit now
bve (%r2)
mtctl %r26,ccr ; turn off performance coprocessor
.exit
.procend
ENDPROC(perf_intrigue_disable_perf_counters)
;***********************************************************************
;*
;* Name: perf_rdr_shift_in_W
;*
;* Description:
;* This routine shifts data in from the RDR in arg0 and returns
;* the result in ret0. If the RDR is <= 64 bits in length, it
;* is shifted shifted backup immediately. This is to compensate
;* for RDR10 which has bits that preclude PDC stack operations
;* when they are in the wrong state.
;*
;* Arguments:
;* arg0 : rdr to be read
;* arg1 : bit length of rdr
;*
;* Returns:
;* ret0 = next 64 bits of rdr data from staging register
;*
;* Register usage:
;* arg0 : rdr to be read
;* arg1 : bit length of rdr
;* %r24 - original DR2 value
;* %r1 - scratch
;* %r29 - scratch
;*
;* Returns:
;* ret0 = RDR data (right justified)
;*
;***********************************************************************
ENTRY(perf_rdr_shift_in_W)
.proc
.callinfo frame=0,NO_CALLS
.entry
;
; read(shift in) the RDR.
;
; NOTE: The PCX-W ERS states that DR2_SLOW_RET must be set before any
; shifting is done, from or to, remote diagnose registers.
;
depdi,z 1,DR2_SLOW_RET,1,%r29
MFDIAG_2 (24)
or %r24,%r29,%r29
MTDIAG_2 (29) ; set DR2_SLOW_RET
nop
nop
nop
nop
;
; Cacheline start (32-byte cacheline)
;
nop
nop
nop
extrd,u arg1,63,6,%r1 ; setup shift amount by bits to move
mtsar %r1
shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
blr %r1,%r0 ; branch to 8-instruction sequence
nop
;
; Cacheline start (32-byte cacheline)
;
;
; RDR 0 sequence
;
SFDIAG (0)
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1) ; mtdiag %dr1, %r1
STDIAG (0)
ssm 0,0
b,n perf_rdr_shift_in_W_leave
;
; RDR 1 sequence
;
sync
ssm 0,0
SFDIAG (1)
ssm 0,0
MFDIAG_1 (28)
ssm 0,0
b,n perf_rdr_shift_in_W_leave
nop
;
; RDR 2 read sequence
;
SFDIAG (2)
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (2)
ssm 0,0
b,n perf_rdr_shift_in_W_leave
;
; RDR 3 read sequence
;
b,n perf_rdr_shift_in_W_leave
nop
nop
nop
nop
nop
nop
nop
;
; RDR 4 read sequence
;
sync
ssm 0,0
SFDIAG (4)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 5 read sequence
;
sync
ssm 0,0
SFDIAG (5)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 6 read sequence
;
sync
ssm 0,0
SFDIAG (6)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 7 read sequence
;
b,n perf_rdr_shift_in_W_leave
nop
nop
nop
nop
nop
nop
nop
;
; RDR 8 read sequence
;
b,n perf_rdr_shift_in_W_leave
nop
nop
nop
nop
nop
nop
nop
;
; RDR 9 read sequence
;
b,n perf_rdr_shift_in_W_leave
nop
nop
nop
nop
nop
nop
nop
;
; RDR 10 read sequence
;
SFDIAG (10)
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (10)
ssm 0,0
b,n perf_rdr_shift_in_W_leave
;
; RDR 11 read sequence
;
SFDIAG (11)
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (11)
ssm 0,0
b,n perf_rdr_shift_in_W_leave
;
; RDR 12 read sequence
;
b,n perf_rdr_shift_in_W_leave
nop
nop
nop
nop
nop
nop
nop
;
; RDR 13 read sequence
;
sync
ssm 0,0
SFDIAG (13)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 14 read sequence
;
SFDIAG (14)
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (14)
ssm 0,0
b,n perf_rdr_shift_in_W_leave
;
; RDR 15 read sequence
;
sync
ssm 0,0
SFDIAG (15)
ssm 0,0
MFDIAG_1 (28)
ssm 0,0
b,n perf_rdr_shift_in_W_leave
nop
;
; RDR 16 read sequence
;
sync
ssm 0,0
SFDIAG (16)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 17 read sequence
;
SFDIAG (17)
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (17)
ssm 0,0
b,n perf_rdr_shift_in_W_leave
;
; RDR 18 read sequence
;
SFDIAG (18)
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (18)
ssm 0,0
b,n perf_rdr_shift_in_W_leave
;
; RDR 19 read sequence
;
b,n perf_rdr_shift_in_W_leave
nop
nop
nop
nop
nop
nop
nop
;
; RDR 20 read sequence
;
sync
ssm 0,0
SFDIAG (20)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 21 read sequence
;
sync
ssm 0,0
SFDIAG (21)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 22 read sequence
;
sync
ssm 0,0
SFDIAG (22)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 23 read sequence
;
sync
ssm 0,0
SFDIAG (23)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 24 read sequence
;
sync
ssm 0,0
SFDIAG (24)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 25 read sequence
;
sync
ssm 0,0
SFDIAG (25)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 26 read sequence
;
SFDIAG (26)
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (26)
ssm 0,0
b,n perf_rdr_shift_in_W_leave
;
; RDR 27 read sequence
;
SFDIAG (27)
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (27)
ssm 0,0
b,n perf_rdr_shift_in_W_leave
;
; RDR 28 read sequence
;
sync
ssm 0,0
SFDIAG (28)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 29 read sequence
;
sync
ssm 0,0
SFDIAG (29)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_W_leave
ssm 0,0
nop
;
; RDR 30 read sequence
;
SFDIAG (30)
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (30)
ssm 0,0
b,n perf_rdr_shift_in_W_leave
;
; RDR 31 read sequence
;
sync
ssm 0,0
SFDIAG (31)
ssm 0,0
MFDIAG_1 (28)
nop
ssm 0,0
nop
;
; Fallthrough
;
perf_rdr_shift_in_W_leave:
bve (%r2)
.exit
MTDIAG_2 (24) ; restore DR2
.procend
ENDPROC(perf_rdr_shift_in_W)
;***********************************************************************
;*
;* Name: perf_rdr_shift_out_W
;*
;* Description:
;* This routine moves data to the RDR's. The double-word that
;* arg1 points to is loaded and moved into the staging register.
;* Then the STDIAG instruction for the RDR # in arg0 is called
;* to move the data to the RDR.
;*
;* Arguments:
;* arg0 = rdr number
;* arg1 = 64-bit value to write
;* %r24 - DR2 | DR2_SLOW_RET
;* %r23 - original DR2 value
;*
;* Returns:
;* None
;*
;* Register usage:
;*
;***********************************************************************
ENTRY(perf_rdr_shift_out_W)
.proc
.callinfo frame=0,NO_CALLS
.entry
;
; NOTE: The PCX-W ERS states that DR2_SLOW_RET must be set before any
; shifting is done, from or to, the remote diagnose registers.
;
depdi,z 1,DR2_SLOW_RET,1,%r24
MFDIAG_2 (23)
or %r24,%r23,%r24
MTDIAG_2 (24) ; set DR2_SLOW_RET
MTDIAG_1 (25) ; data to the staging register
shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
blr %r1,%r0 ; branch to 8-instruction sequence
nop
;
; RDR 0 write sequence
;
sync ; RDR 0 write sequence
ssm 0,0
STDIAG (0)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 1 write sequence
;
sync
ssm 0,0
STDIAG (1)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 2 write sequence
;
sync
ssm 0,0
STDIAG (2)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 3 write sequence
;
sync
ssm 0,0
STDIAG (3)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 4 write sequence
;
sync
ssm 0,0
STDIAG (4)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 5 write sequence
;
sync
ssm 0,0
STDIAG (5)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 6 write sequence
;
sync
ssm 0,0
STDIAG (6)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 7 write sequence
;
sync
ssm 0,0
STDIAG (7)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 8 write sequence
;
sync
ssm 0,0
STDIAG (8)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 9 write sequence
;
sync
ssm 0,0
STDIAG (9)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 10 write sequence
;
sync
ssm 0,0
STDIAG (10)
STDIAG (26)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
ssm 0,0
nop
;
; RDR 11 write sequence
;
sync
ssm 0,0
STDIAG (11)
STDIAG (27)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
ssm 0,0
nop
;
; RDR 12 write sequence
;
sync
ssm 0,0
STDIAG (12)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 13 write sequence
;
sync
ssm 0,0
STDIAG (13)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 14 write sequence
;
sync
ssm 0,0
STDIAG (14)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 15 write sequence
;
sync
ssm 0,0
STDIAG (15)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 16 write sequence
;
sync
ssm 0,0
STDIAG (16)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 17 write sequence
;
sync
ssm 0,0
STDIAG (17)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 18 write sequence
;
sync
ssm 0,0
STDIAG (18)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 19 write sequence
;
sync
ssm 0,0
STDIAG (19)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 20 write sequence
;
sync
ssm 0,0
STDIAG (20)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 21 write sequence
;
sync
ssm 0,0
STDIAG (21)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 22 write sequence
;
sync
ssm 0,0
STDIAG (22)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 23 write sequence
;
sync
ssm 0,0
STDIAG (23)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 24 write sequence
;
sync
ssm 0,0
STDIAG (24)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 25 write sequence
;
sync
ssm 0,0
STDIAG (25)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 26 write sequence
;
sync
ssm 0,0
STDIAG (10)
STDIAG (26)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
ssm 0,0
nop
;
; RDR 27 write sequence
;
sync
ssm 0,0
STDIAG (11)
STDIAG (27)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
ssm 0,0
nop
;
; RDR 28 write sequence
;
sync
ssm 0,0
STDIAG (28)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 29 write sequence
;
sync
ssm 0,0
STDIAG (29)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 30 write sequence
;
sync
ssm 0,0
STDIAG (30)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
;
; RDR 31 write sequence
;
sync
ssm 0,0
STDIAG (31)
ssm 0,0
b,n perf_rdr_shift_out_W_leave
nop
ssm 0,0
nop
perf_rdr_shift_out_W_leave:
bve (%r2)
.exit
MTDIAG_2 (23) ; restore DR2
.procend
ENDPROC(perf_rdr_shift_out_W)
;***********************************************************************
;*
;* Name: rdr_shift_in_U
;*
;* Description:
;* This routine shifts data in from the RDR in arg0 and returns
;* the result in ret0. If the RDR is <= 64 bits in length, it
;* is shifted shifted backup immediately. This is to compensate
;* for RDR10 which has bits that preclude PDC stack operations
;* when they are in the wrong state.
;*
;* Arguments:
;* arg0 : rdr to be read
;* arg1 : bit length of rdr
;*
;* Returns:
;* ret0 = next 64 bits of rdr data from staging register
;*
;* Register usage:
;* arg0 : rdr to be read
;* arg1 : bit length of rdr
;* %r24 - original DR2 value
;* %r23 - DR2 | DR2_SLOW_RET
;* %r1 - scratch
;*
;***********************************************************************
ENTRY(perf_rdr_shift_in_U)
.proc
.callinfo frame=0,NO_CALLS
.entry
; read(shift in) the RDR.
;
; NOTE: The PCX-U ERS states that DR2_SLOW_RET must be set before any
; shifting is done, from or to, remote diagnose registers.
depdi,z 1,DR2_SLOW_RET,1,%r29
MFDIAG_2 (24)
or %r24,%r29,%r29
MTDIAG_2 (29) ; set DR2_SLOW_RET
nop
nop
nop
nop
;
; Start of next 32-byte cacheline
;
nop
nop
nop
extrd,u arg1,63,6,%r1
mtsar %r1
shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
blr %r1,%r0 ; branch to 8-instruction sequence
nop
;
; Start of next 32-byte cacheline
;
SFDIAG (0) ; RDR 0 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (0)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
SFDIAG (1) ; RDR 1 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (1)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
sync ; RDR 2 read sequence
ssm 0,0
SFDIAG (4)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
sync ; RDR 3 read sequence
ssm 0,0
SFDIAG (3)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
sync ; RDR 4 read sequence
ssm 0,0
SFDIAG (4)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
sync ; RDR 5 read sequence
ssm 0,0
SFDIAG (5)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
sync ; RDR 6 read sequence
ssm 0,0
SFDIAG (6)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
sync ; RDR 7 read sequence
ssm 0,0
SFDIAG (7)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
b,n perf_rdr_shift_in_U_leave
nop
nop
nop
nop
nop
nop
nop
SFDIAG (9) ; RDR 9 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (9)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
SFDIAG (10) ; RDR 10 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (10)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
SFDIAG (11) ; RDR 11 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (11)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
SFDIAG (12) ; RDR 12 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (12)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
SFDIAG (13) ; RDR 13 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (13)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
SFDIAG (14) ; RDR 14 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (14)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
SFDIAG (15) ; RDR 15 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (15)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
sync ; RDR 16 read sequence
ssm 0,0
SFDIAG (16)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
SFDIAG (17) ; RDR 17 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (17)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
SFDIAG (18) ; RDR 18 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (18)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
b,n perf_rdr_shift_in_U_leave
nop
nop
nop
nop
nop
nop
nop
sync ; RDR 20 read sequence
ssm 0,0
SFDIAG (20)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
sync ; RDR 21 read sequence
ssm 0,0
SFDIAG (21)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
sync ; RDR 22 read sequence
ssm 0,0
SFDIAG (22)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
sync ; RDR 23 read sequence
ssm 0,0
SFDIAG (23)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
sync ; RDR 24 read sequence
ssm 0,0
SFDIAG (24)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
sync ; RDR 25 read sequence
ssm 0,0
SFDIAG (25)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
SFDIAG (26) ; RDR 26 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (26)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
SFDIAG (27) ; RDR 27 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (27)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
sync ; RDR 28 read sequence
ssm 0,0
SFDIAG (28)
ssm 0,0
MFDIAG_1 (28)
b,n perf_rdr_shift_in_U_leave
ssm 0,0
nop
b,n perf_rdr_shift_in_U_leave
nop
nop
nop
nop
nop
nop
nop
SFDIAG (30) ; RDR 30 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (30)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
SFDIAG (31) ; RDR 31 read sequence
ssm 0,0
MFDIAG_1 (28)
shrpd ret0,%r0,%sar,%r1
MTDIAG_1 (1)
STDIAG (31)
ssm 0,0
b,n perf_rdr_shift_in_U_leave
nop
perf_rdr_shift_in_U_leave:
bve (%r2)
.exit
MTDIAG_2 (24) ; restore DR2
.procend
ENDPROC(perf_rdr_shift_in_U)
;***********************************************************************
;*
;* Name: rdr_shift_out_U
;*
;* Description:
;* This routine moves data to the RDR's. The double-word that
;* arg1 points to is loaded and moved into the staging register.
;* Then the STDIAG instruction for the RDR # in arg0 is called
;* to move the data to the RDR.
;*
;* Arguments:
;* arg0 = rdr target
;* arg1 = buffer pointer
;*
;* Returns:
;* None
;*
;* Register usage:
;* arg0 = rdr target
;* arg1 = buffer pointer
;* %r24 - DR2 | DR2_SLOW_RET
;* %r23 - original DR2 value
;*
;***********************************************************************
ENTRY(perf_rdr_shift_out_U)
.proc
.callinfo frame=0,NO_CALLS
.entry
;
; NOTE: The PCX-U ERS states that DR2_SLOW_RET must be set before any
; shifting is done, from or to, the remote diagnose registers.
;
depdi,z 1,DR2_SLOW_RET,1,%r24
MFDIAG_2 (23)
or %r24,%r23,%r24
MTDIAG_2 (24) ; set DR2_SLOW_RET
MTDIAG_1 (25) ; data to the staging register
shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
blr %r1,%r0 ; branch to 8-instruction sequence
nop
;
; 32-byte cachline aligned
;
sync ; RDR 0 write sequence
ssm 0,0
STDIAG (0)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 1 write sequence
ssm 0,0
STDIAG (1)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 2 write sequence
ssm 0,0
STDIAG (2)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 3 write sequence
ssm 0,0
STDIAG (3)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 4 write sequence
ssm 0,0
STDIAG (4)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 5 write sequence
ssm 0,0
STDIAG (5)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 6 write sequence
ssm 0,0
STDIAG (6)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 7 write sequence
ssm 0,0
STDIAG (7)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 8 write sequence
ssm 0,0
STDIAG (8)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 9 write sequence
ssm 0,0
STDIAG (9)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 10 write sequence
ssm 0,0
STDIAG (10)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 11 write sequence
ssm 0,0
STDIAG (11)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 12 write sequence
ssm 0,0
STDIAG (12)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 13 write sequence
ssm 0,0
STDIAG (13)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 14 write sequence
ssm 0,0
STDIAG (14)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 15 write sequence
ssm 0,0
STDIAG (15)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 16 write sequence
ssm 0,0
STDIAG (16)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 17 write sequence
ssm 0,0
STDIAG (17)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 18 write sequence
ssm 0,0
STDIAG (18)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 19 write sequence
ssm 0,0
STDIAG (19)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 20 write sequence
ssm 0,0
STDIAG (20)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 21 write sequence
ssm 0,0
STDIAG (21)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 22 write sequence
ssm 0,0
STDIAG (22)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 23 write sequence
ssm 0,0
STDIAG (23)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 24 write sequence
ssm 0,0
STDIAG (24)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 25 write sequence
ssm 0,0
STDIAG (25)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 26 write sequence
ssm 0,0
STDIAG (26)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 27 write sequence
ssm 0,0
STDIAG (27)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 28 write sequence
ssm 0,0
STDIAG (28)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 29 write sequence
ssm 0,0
STDIAG (29)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 30 write sequence
ssm 0,0
STDIAG (30)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
sync ; RDR 31 write sequence
ssm 0,0
STDIAG (31)
ssm 0,0
b,n perf_rdr_shift_out_U_leave
nop
ssm 0,0
nop
perf_rdr_shift_out_U_leave:
bve (%r2)
.exit
MTDIAG_2 (23) ; restore DR2
.procend
ENDPROC(perf_rdr_shift_out_U)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,546
|
arch/parisc/kernel/vmlinux.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Kernel link layout for various "sections"
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
* Copyright (C) 2006-2013 Helge Deller <deller@gmx.de>
*/
/*
* Put page table entries (swapper_pg_dir) as the first thing in .bss. This
* will ensure that it has .bss alignment (PAGE_SIZE).
*/
#define BSS_FIRST_SECTIONS *(.data..vm0.pmd) \
*(.data..vm0.pgd) \
*(.data..vm0.pte)
#include <asm-generic/vmlinux.lds.h>
/* needed for the processor specific cache alignment size */
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
/* ld script to make hppa Linux kernel */
#ifndef CONFIG_64BIT
OUTPUT_FORMAT("elf32-hppa-linux")
OUTPUT_ARCH(hppa)
#else
OUTPUT_FORMAT("elf64-hppa-linux")
OUTPUT_ARCH(hppa:hppa2.0w)
#endif
ENTRY(parisc_kernel_start)
#ifndef CONFIG_64BIT
jiffies = jiffies_64 + 4;
#else
jiffies = jiffies_64;
#endif
SECTIONS
{
. = KERNEL_BINARY_TEXT_START;
__init_begin = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(8)
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(PAGE_SIZE)
/* we have to discard exit text and such at runtime, not link time */
.exit.text :
{
EXIT_TEXT
}
.exit.data :
{
EXIT_DATA
}
PERCPU_SECTION(8)
. = ALIGN(HUGEPAGE_SIZE);
__init_end = .;
/* freed after init ends here */
_text = .; /* Text and read-only data */
_stext = .;
.text ALIGN(PAGE_SIZE) : {
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.text.do_softirq)
*(.text.sys_exit)
*(.text.do_sigaltstack)
*(.text.do_fork)
*(.text.div)
*($$*) /* millicode routines */
*(.text.*)
*(.fixup)
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
}
. = ALIGN(PAGE_SIZE);
_etext = .;
/* End of text section */
/* Start of data section */
_sdata = .;
/* Architecturally we need to keep __gp below 0x1000000 and thus
* in front of RO_DATA_SECTION() which stores lots of tracepoint
* and ftrace symbols. */
#ifdef CONFIG_64BIT
. = ALIGN(16);
/* Linkage tables */
.opd : {
__start_opd = .;
*(.opd)
__end_opd = .;
} PROVIDE (__gp = .);
.plt : {
*(.plt)
}
.dlt : {
*(.dlt)
}
#endif
RO_DATA_SECTION(8)
/* RO because of BUILDTIME_EXTABLE_SORT */
EXCEPTION_TABLE(8)
NOTES
/* unwind info */
.PARISC.unwind : {
__start___unwind = .;
*(.PARISC.unwind)
__stop___unwind = .;
}
/* writeable */
/* Make sure this is page aligned so
* that we can properly leave these
* as writable
*/
. = ALIGN(HUGEPAGE_SIZE);
data_start = .;
/* Data */
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE)
/* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16);
.data..lock_aligned : {
*(.data..lock_aligned)
}
/* End of data section */
_edata = .;
/* BSS */
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
. = ALIGN(HUGEPAGE_SIZE);
_end = . ;
STABS_DEBUG
.note 0 : { *(.note) }
/* Sections to be discarded */
DISCARDS
/DISCARD/ : {
#ifdef CONFIG_64BIT
/* temporary hack until binutils is fixed to not emit these
* for static binaries
*/
*(.interp)
*(.dynsym)
*(.dynstr)
*(.dynamic)
*(.hash)
*(.gnu.hash)
#endif
}
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,525
|
arch/parisc/kernel/real2.S
|
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
*
*/
#include <asm/pdc.h>
#include <asm/psw.h>
#include <asm/assembly.h>
#include <asm/asm-offsets.h>
#include <linux/linkage.h>
.section .bss
.export pdc_result
.export pdc_result2
.align 8
pdc_result:
.block ASM_PDC_RESULT_SIZE
pdc_result2:
.block ASM_PDC_RESULT_SIZE
.export real_stack
.export real32_stack
.export real64_stack
.align 64
real_stack:
real32_stack:
real64_stack:
.block 8192
#define N_SAVED_REGS 9
save_cr_space:
.block REG_SZ * N_SAVED_REGS
save_cr_end:
/************************ 32-bit real-mode calls ***********************/
/* This can be called in both narrow and wide kernels */
.text
/* unsigned long real32_call_asm(unsigned int *sp,
* unsigned int *arg0p,
* unsigned int iodc_fn)
* sp is value of stack pointer to adopt before calling PDC (virt)
* arg0p points to where saved arg values may be found
* iodc_fn is the IODC function to call
*/
ENTRY_CFI(real32_call_asm)
STREG %rp, -RP_OFFSET(%sp) /* save RP */
#ifdef CONFIG_64BIT
callee_save
ldo 2*REG_SZ(%sp), %sp /* room for a couple more saves */
STREG %r27, -1*REG_SZ(%sp)
STREG %r29, -2*REG_SZ(%sp)
#endif
STREG %sp, -REG_SZ(%arg0) /* save SP on real-mode stack */
copy %arg0, %sp /* adopt the real-mode SP */
/* save iodc_fn */
copy %arg2, %r31
/* load up the arg registers from the saved arg area */
/* 32-bit calling convention passes first 4 args in registers */
ldw 0(%arg1), %arg0 /* note overwriting arg0 */
ldw -8(%arg1), %arg2
ldw -12(%arg1), %arg3
ldw -4(%arg1), %arg1 /* obviously must do this one last! */
tophys_r1 %sp
b,l rfi_virt2real,%r2
nop
b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
nop
#ifdef CONFIG_64BIT
rsm PSW_SM_W, %r0 /* go narrow */
#endif
load32 PA(ric_ret), %r2
bv 0(%r31)
nop
ric_ret:
#ifdef CONFIG_64BIT
ssm PSW_SM_W, %r0 /* go wide */
#endif
/* restore CRs before going virtual in case we page fault */
b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */
nop
b,l rfi_real2virt,%r2
nop
tovirt_r1 %sp
LDREG -REG_SZ(%sp), %sp /* restore SP */
#ifdef CONFIG_64BIT
LDREG -1*REG_SZ(%sp), %r27
LDREG -2*REG_SZ(%sp), %r29
ldo -2*REG_SZ(%sp), %sp
callee_rest
#endif
LDREG -RP_OFFSET(%sp), %rp /* restore RP */
bv 0(%rp)
nop
ENDPROC_CFI(real32_call_asm)
# define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
# define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r
.text
ENTRY_CFI(save_control_regs)
load32 PA(save_cr_space), %r28
PUSH_CR(%cr24, %r28)
PUSH_CR(%cr25, %r28)
PUSH_CR(%cr26, %r28)
PUSH_CR(%cr27, %r28)
PUSH_CR(%cr28, %r28)
PUSH_CR(%cr29, %r28)
PUSH_CR(%cr30, %r28)
PUSH_CR(%cr31, %r28)
PUSH_CR(%cr15, %r28)
bv 0(%r2)
nop
ENDPROC_CFI(save_control_regs)
ENTRY_CFI(restore_control_regs)
load32 PA(save_cr_end), %r26
POP_CR(%cr15, %r26)
POP_CR(%cr31, %r26)
POP_CR(%cr30, %r26)
POP_CR(%cr29, %r26)
POP_CR(%cr28, %r26)
POP_CR(%cr27, %r26)
POP_CR(%cr26, %r26)
POP_CR(%cr25, %r26)
POP_CR(%cr24, %r26)
bv 0(%r2)
nop
ENDPROC_CFI(restore_control_regs)
/* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for
* more general-purpose use by the several places which need RFIs
*/
.text
.align 128
ENTRY_CFI(rfi_virt2real)
#if !defined(BOOTLOADER)
/* switch to real mode... */
rsm PSW_SM_I,%r0
load32 PA(rfi_v2r_1), %r1
nop
nop
nop
nop
nop
rsm PSW_SM_Q,%r0 /* disable Q & I bits to load iia queue */
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
mtctl %r1, %cr18 /* IIAOQ head */
ldo 4(%r1), %r1
mtctl %r1, %cr18 /* IIAOQ tail */
load32 REAL_MODE_PSW, %r1
mtctl %r1, %cr22
rfi
nop
nop
nop
nop
nop
nop
nop
nop
rfi_v2r_1:
tophys_r1 %r2
#endif /* defined(BOOTLOADER) */
bv 0(%r2)
nop
ENDPROC_CFI(rfi_virt2real)
.text
.align 128
ENTRY_CFI(rfi_real2virt)
#if !defined(BOOTLOADER)
rsm PSW_SM_I,%r0
load32 (rfi_r2v_1), %r1
nop
nop
nop
nop
nop
rsm PSW_SM_Q,%r0 /* disable Q bit to load iia queue */
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
mtctl %r1, %cr18 /* IIAOQ head */
ldo 4(%r1), %r1
mtctl %r1, %cr18 /* IIAOQ tail */
load32 KERNEL_PSW, %r1
mtctl %r1, %cr22
rfi
nop
nop
nop
nop
nop
nop
nop
nop
rfi_r2v_1:
tovirt_r1 %r2
#endif /* defined(BOOTLOADER) */
bv 0(%r2)
nop
ENDPROC_CFI(rfi_real2virt)
#ifdef CONFIG_64BIT
/************************ 64-bit real-mode calls ***********************/
/* This is only usable in wide kernels right now and will probably stay so */
.text
/* unsigned long real64_call_asm(unsigned long *sp,
* unsigned long *arg0p,
* unsigned long fn)
* sp is value of stack pointer to adopt before calling PDC (virt)
* arg0p points to where saved arg values may be found
* iodc_fn is the IODC function to call
*/
ENTRY_CFI(real64_call_asm)
std %rp, -0x10(%sp) /* save RP */
std %sp, -8(%arg0) /* save SP on real-mode stack */
copy %arg0, %sp /* adopt the real-mode SP */
/* save fn */
copy %arg2, %r31
/* set up the new ap */
ldo 64(%arg1), %r29
/* load up the arg registers from the saved arg area */
/* 32-bit calling convention passes first 4 args in registers */
ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
ldd 2*REG_SZ(%arg1), %arg2
ldd 3*REG_SZ(%arg1), %arg3
ldd 4*REG_SZ(%arg1), %r22
ldd 5*REG_SZ(%arg1), %r21
ldd 6*REG_SZ(%arg1), %r20
ldd 7*REG_SZ(%arg1), %r19
ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */
tophys_r1 %sp
b,l rfi_virt2real,%r2
nop
b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
nop
load32 PA(r64_ret), %r2
bv 0(%r31)
nop
r64_ret:
/* restore CRs before going virtual in case we page fault */
b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */
nop
b,l rfi_real2virt,%r2
nop
tovirt_r1 %sp
ldd -8(%sp), %sp /* restore SP */
ldd -0x10(%sp), %rp /* restore RP */
bv 0(%rp)
nop
ENDPROC_CFI(real64_call_asm)
#endif
.text
/* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html
** GCC 3.3 and later has a new function in libgcc.a for
** comparing function pointers.
*/
ENTRY_CFI(__canonicalize_funcptr_for_compare)
#ifdef CONFIG_64BIT
bve (%r2)
#else
bv %r0(%r2)
#endif
copy %r26,%r28
ENDPROC_CFI(__canonicalize_funcptr_for_compare)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,798
|
arch/parisc/lib/lusercopy.S
|
/*
* User Space Access Routines
*
* Copyright (C) 2000-2002 Hewlett-Packard (John Marvin)
* Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
* Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2017 Helge Deller <deller@gmx.de>
* Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* These routines still have plenty of room for optimization
* (word & doubleword load/store, dual issue, store hints, etc.).
*/
/*
* The following routines assume that space register 3 (sr3) contains
* the space id associated with the current users address space.
*/
.text
#include <asm/assembly.h>
#include <asm/errno.h>
#include <linux/linkage.h>
/*
* get_sr gets the appropriate space value into
* sr1 for kernel/user space access, depending
* on the flag stored in the task structure.
*/
.macro get_sr
mfctl %cr30,%r1
ldw TI_SEGMENT(%r1),%r22
mfsp %sr3,%r1
or,<> %r22,%r0,%r0
copy %r0,%r1
mtsp %r1,%sr1
.endm
/*
* unsigned long lclear_user(void *to, unsigned long n)
*
* Returns 0 for success.
* otherwise, returns number of bytes not transferred.
*/
ENTRY_CFI(lclear_user)
comib,=,n 0,%r25,$lclu_done
get_sr
$lclu_loop:
addib,<> -1,%r25,$lclu_loop
1: stbs,ma %r0,1(%sr1,%r26)
$lclu_done:
bv %r0(%r2)
copy %r25,%r28
2: b $lclu_done
ldo 1(%r25),%r25
ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
ENDPROC_CFI(lclear_user)
/*
* long lstrnlen_user(char *s, long n)
*
* Returns 0 if exception before zero byte or reaching N,
* N+1 if N would be exceeded,
* else strlen + 1 (i.e. includes zero byte).
*/
ENTRY_CFI(lstrnlen_user)
comib,= 0,%r25,$lslen_nzero
copy %r26,%r24
get_sr
1: ldbs,ma 1(%sr1,%r26),%r1
$lslen_loop:
comib,=,n 0,%r1,$lslen_done
addib,<> -1,%r25,$lslen_loop
2: ldbs,ma 1(%sr1,%r26),%r1
$lslen_done:
bv %r0(%r2)
sub %r26,%r24,%r28
$lslen_nzero:
b $lslen_done
ldo 1(%r26),%r26 /* special case for N == 0 */
3: b $lslen_done
copy %r24,%r26 /* reset r26 so 0 is returned on fault */
ASM_EXCEPTIONTABLE_ENTRY(1b,3b)
ASM_EXCEPTIONTABLE_ENTRY(2b,3b)
ENDPROC_CFI(lstrnlen_user)
/*
* unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
*
* Inputs:
* - sr1 already contains space of source region
* - sr2 already contains space of destination region
*
* Returns:
* - number of bytes that could not be copied.
* On success, this will be zero.
*
* This code is based on a C-implementation of a copy routine written by
* Randolph Chung, which in turn was derived from the glibc.
*
* Several strategies are tried to try to get the best performance for various
* conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
* at a time using general registers. Unaligned copies are handled either by
* aligning the destination and then using shift-and-write method, or in a few
* cases by falling back to a byte-at-a-time copy.
*
* Testing with various alignments and buffer sizes shows that this code is
* often >10x faster than a simple byte-at-a-time copy, even for strangely
* aligned operands. It is interesting to note that the glibc version of memcpy
* (written in C) is actually quite fast already. This routine is able to beat
* it by 30-40% for aligned copies because of the loop unrolling, but in some
* cases the glibc version is still slightly faster. This lends more
* credibility that gcc can generate very good code as long as we are careful.
*
* Possible optimizations:
* - add cache prefetching
* - try not to use the post-increment address modifiers; they may create
* additional interlocks. Assumption is that those were only efficient on old
* machines (pre PA8000 processors)
*/
dst = arg0
src = arg1
len = arg2
end = arg3
t1 = r19
t2 = r20
t3 = r21
t4 = r22
srcspc = sr1
dstspc = sr2
t0 = r1
a1 = t1
a2 = t2
a3 = t3
a0 = t4
save_src = ret0
save_dst = ret1
save_len = r31
ENTRY_CFI(pa_memcpy)
/* Last destination address */
add dst,len,end
/* short copy with less than 16 bytes? */
cmpib,COND(>>=),n 15,len,.Lbyte_loop
/* same alignment? */
xor src,dst,t0
extru t0,31,2,t1
cmpib,<>,n 0,t1,.Lunaligned_copy
#ifdef CONFIG_64BIT
/* only do 64-bit copies if we can get aligned. */
extru t0,31,3,t1
cmpib,<>,n 0,t1,.Lalign_loop32
/* loop until we are 64-bit aligned */
.Lalign_loop64:
extru dst,31,3,t1
cmpib,=,n 0,t1,.Lcopy_loop_16_start
20: ldb,ma 1(srcspc,src),t1
21: stb,ma t1,1(dstspc,dst)
b .Lalign_loop64
ldo -1(len),len
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
.Lcopy_loop_16_start:
ldi 31,t0
.Lcopy_loop_16:
cmpb,COND(>>=),n t0,len,.Lword_loop
10: ldd 0(srcspc,src),t1
11: ldd 8(srcspc,src),t2
ldo 16(src),src
12: std,ma t1,8(dstspc,dst)
13: std,ma t2,8(dstspc,dst)
14: ldd 0(srcspc,src),t1
15: ldd 8(srcspc,src),t2
ldo 16(src),src
16: std,ma t1,8(dstspc,dst)
17: std,ma t2,8(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
b .Lcopy_loop_16
ldo -32(len),len
.Lword_loop:
cmpib,COND(>>=),n 3,len,.Lbyte_loop
20: ldw,ma 4(srcspc,src),t1
21: stw,ma t1,4(dstspc,dst)
b .Lword_loop
ldo -4(len),len
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
#endif /* CONFIG_64BIT */
/* loop until we are 32-bit aligned */
.Lalign_loop32:
extru dst,31,2,t1
cmpib,=,n 0,t1,.Lcopy_loop_8
20: ldb,ma 1(srcspc,src),t1
21: stb,ma t1,1(dstspc,dst)
b .Lalign_loop32
ldo -1(len),len
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
.Lcopy_loop_8:
cmpib,COND(>>=),n 15,len,.Lbyte_loop
10: ldw 0(srcspc,src),t1
11: ldw 4(srcspc,src),t2
12: stw,ma t1,4(dstspc,dst)
13: stw,ma t2,4(dstspc,dst)
14: ldw 8(srcspc,src),t1
15: ldw 12(srcspc,src),t2
ldo 16(src),src
16: stw,ma t1,4(dstspc,dst)
17: stw,ma t2,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
b .Lcopy_loop_8
ldo -16(len),len
.Lbyte_loop:
cmpclr,COND(<>) len,%r0,%r0
b,n .Lcopy_done
20: ldb 0(srcspc,src),t1
ldo 1(src),src
21: stb,ma t1,1(dstspc,dst)
b .Lbyte_loop
ldo -1(len),len
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
.Lcopy_done:
bv %r0(%r2)
sub end,dst,ret0
/* src and dst are not aligned the same way. */
/* need to go the hard way */
.Lunaligned_copy:
/* align until dst is 32bit-word-aligned */
extru dst,31,2,t1
cmpib,=,n 0,t1,.Lcopy_dstaligned
20: ldb 0(srcspc,src),t1
ldo 1(src),src
21: stb,ma t1,1(dstspc,dst)
b .Lunaligned_copy
ldo -1(len),len
ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
.Lcopy_dstaligned:
/* store src, dst and len in safe place */
copy src,save_src
copy dst,save_dst
copy len,save_len
/* len now needs give number of words to copy */
SHRREG len,2,len
/*
* Copy from a not-aligned src to an aligned dst using shifts.
* Handles 4 words per loop.
*/
depw,z src,28,2,t0
subi 32,t0,t0
mtsar t0
extru len,31,2,t0
cmpib,= 2,t0,.Lcase2
/* Make src aligned by rounding it down. */
depi 0,31,2,src
cmpiclr,<> 3,t0,%r0
b,n .Lcase3
cmpiclr,<> 1,t0,%r0
b,n .Lcase1
.Lcase0:
cmpb,COND(=) %r0,len,.Lcda_finish
nop
1: ldw,ma 4(srcspc,src), a3
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
1: ldw,ma 4(srcspc,src), a0
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
b,n .Ldo3
.Lcase1:
1: ldw,ma 4(srcspc,src), a2
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
1: ldw,ma 4(srcspc,src), a3
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
ldo -1(len),len
cmpb,COND(=),n %r0,len,.Ldo0
.Ldo4:
1: ldw,ma 4(srcspc,src), a0
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
shrpw a2, a3, %sar, t0
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
.Ldo3:
1: ldw,ma 4(srcspc,src), a1
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
shrpw a3, a0, %sar, t0
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
.Ldo2:
1: ldw,ma 4(srcspc,src), a2
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
shrpw a0, a1, %sar, t0
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
.Ldo1:
1: ldw,ma 4(srcspc,src), a3
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
shrpw a1, a2, %sar, t0
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
ldo -4(len),len
cmpb,COND(<>) %r0,len,.Ldo4
nop
.Ldo0:
shrpw a2, a3, %sar, t0
1: stw,ma t0, 4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
.Lcda_rdfault:
.Lcda_finish:
/* calculate new src, dst and len and jump to byte-copy loop */
sub dst,save_dst,t0
add save_src,t0,src
b .Lbyte_loop
sub save_len,t0,len
.Lcase3:
1: ldw,ma 4(srcspc,src), a0
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
1: ldw,ma 4(srcspc,src), a1
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
b .Ldo2
ldo 1(len),len
.Lcase2:
1: ldw,ma 4(srcspc,src), a1
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
1: ldw,ma 4(srcspc,src), a2
ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
b .Ldo1
ldo 2(len),len
/* fault exception fixup handlers: */
#ifdef CONFIG_64BIT
.Lcopy16_fault:
b .Lcopy_done
10: std,ma t1,8(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
#endif
.Lcopy8_fault:
b .Lcopy_done
10: stw,ma t1,4(dstspc,dst)
ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
ENDPROC_CFI(pa_memcpy)
.end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,690
|
arch/parisc/boot/compressed/head.S
|
/*
* Startup glue code to uncompress the kernel
*
* (C) 2017 Helge Deller <deller@gmx.de>
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/psw.h>
#include <asm/pdc.h>
#include <asm/assembly.h>
#include "sizes.h"
#define BOOTADDR(x) (x)
#ifndef CONFIG_64BIT
.import $global$ /* forward declaration */
#endif /*!CONFIG_64BIT*/
__HEAD
ENTRY(startup)
.level PA_ASM_LEVEL
#define PSW_W_SM 0x200
#define PSW_W_BIT 36
;! nuke the W bit, saving original value
.level 2.0
rsm PSW_W_SM, %r1
.level 1.1
extrw,u %r1, PSW_W_BIT-32, 1, %r1
copy %r1, %arg0
/* Make sure sr4-sr7 are set to zero for the kernel address space */
mtsp %r0,%sr4
mtsp %r0,%sr5
mtsp %r0,%sr6
mtsp %r0,%sr7
/* Clear BSS */
.import _bss,data
.import _ebss,data
load32 BOOTADDR(_bss),%r3
load32 BOOTADDR(_ebss),%r4
ldo FRAME_SIZE(%r4),%sp /* stack at end of bss */
$bss_loop:
cmpb,<<,n %r3,%r4,$bss_loop
stw,ma %r0,4(%r3)
/* Initialize the global data pointer */
loadgp
/* arg0..arg4 were set by palo. */
copy %arg1, %r6 /* command line */
copy %arg2, %r7 /* rd-start */
copy %arg3, %r8 /* rd-end */
load32 BOOTADDR(decompress_kernel),%r3
#ifdef CONFIG_64BIT
.level PA_ASM_LEVEL
ssm PSW_W_SM, %r0 /* set W-bit */
depdi 0, 31, 32, %r3
#endif
load32 BOOTADDR(startup_continue), %r2
bv,n 0(%r3)
startup_continue:
#ifdef CONFIG_64BIT
.level PA_ASM_LEVEL
rsm PSW_W_SM, %r0 /* clear W-bit */
#endif
load32 KERNEL_BINARY_TEXT_START, %arg0 /* free mem */
copy %r6, %arg1 /* command line */
copy %r7, %arg2 /* rd-start */
copy %r8, %arg3 /* rd-end */
bv,n 0(%ret0)
END(startup)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,691
|
arch/parisc/boot/compressed/vmlinux.lds.S
|
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
#include "sizes.h"
#ifndef CONFIG_64BIT
OUTPUT_FORMAT("elf32-hppa-linux")
OUTPUT_ARCH(hppa)
#else
OUTPUT_FORMAT("elf64-hppa-linux")
OUTPUT_ARCH(hppa:hppa2.0w)
#endif
ENTRY(startup)
SECTIONS
{
/* palo loads at 0x60000 */
/* loaded kernel will move to 0x10000 */
. = 0xe0000; /* should not overwrite palo code */
.head.text : {
_head = . ;
HEAD_TEXT
_ehead = . ;
}
/* keep __gp below 0x1000000 */
#ifdef CONFIG_64BIT
. = ALIGN(16);
/* Linkage tables */
.opd : {
__start_opd = .;
*(.opd)
__end_opd = .;
} PROVIDE (__gp = .);
.plt : {
*(.plt)
}
.dlt : {
*(.dlt)
}
#endif
_startcode_end = .;
/* bootloader code and data starts at least behind area of extracted kernel */
. = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START));
/* align on next page boundary */
. = ALIGN(4096);
.text : {
_text = .; /* Text */
*(.text)
*(.text.*)
_etext = . ;
}
. = ALIGN(8);
.data : {
_data = . ;
*(.data)
*(.data.*)
_edata = . ;
}
. = ALIGN(8);
.rodata : {
_rodata = . ;
*(.rodata) /* read-only data */
*(.rodata.*)
_erodata = . ;
}
. = ALIGN(8);
.rodata.compressed : {
*(.rodata.compressed)
}
. = ALIGN(8);
.bss : {
_bss = . ;
*(.bss)
*(.bss.*)
*(COMMON)
. = ALIGN(4096);
_ebss = .;
}
STABS_DEBUG
.note 0 : { *(.note) }
/* Sections to be discarded */
DISCARDS
/DISCARD/ : {
#ifdef CONFIG_64BIT
/* temporary hack until binutils is fixed to not emit these
* for static binaries
*/
*(.PARISC.unwind) /* no unwind data */
*(.interp)
*(.dynsym)
*(.dynstr)
*(.dynamic)
*(.hash)
*(.gnu.hash)
#endif
}
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,351
|
arch/sparc/power/hibernate_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* hibernate_asm.S: Hibernaton support specific for sparc64.
*
* Copyright (C) 2013 Kirill V Tkhai (tkhai@yandex.ru)
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/cpudata.h>
#include <asm/page.h>
ENTRY(swsusp_arch_suspend)
save %sp, -128, %sp
save %sp, -128, %sp
flushw
setuw saved_context, %g3
/* Save window regs */
rdpr %cwp, %g2
stx %g2, [%g3 + SC_REG_CWP]
rdpr %wstate, %g2
stx %g2, [%g3 + SC_REG_WSTATE]
stx %fp, [%g3 + SC_REG_FP]
/* Save state regs */
rdpr %tick, %g2
stx %g2, [%g3 + SC_REG_TICK]
rdpr %pstate, %g2
stx %g2, [%g3 + SC_REG_PSTATE]
/* Save global regs */
stx %g4, [%g3 + SC_REG_G4]
stx %g5, [%g3 + SC_REG_G5]
stx %g6, [%g3 + SC_REG_G6]
call swsusp_save
nop
mov %o0, %i0
restore
mov %o0, %i0
ret
restore
ENTRY(swsusp_arch_resume)
/* Write restore_pblist to %l0 */
sethi %hi(restore_pblist), %l0
ldx [%l0 + %lo(restore_pblist)], %l0
call __flush_tlb_all
nop
/* Write PAGE_OFFSET to %g7 */
sethi %hi(PAGE_OFFSET), %g7
ldx [%g7 + %lo(PAGE_OFFSET)], %g7
setuw (PAGE_SIZE-8), %g3
/* Use MMU Bypass */
rd %asi, %g1
wr %g0, ASI_PHYS_USE_EC, %asi
ba fill_itlb
nop
pbe_loop:
cmp %l0, %g0
be restore_ctx
sub %l0, %g7, %l0
ldxa [%l0 ] %asi, %l1 /* address */
ldxa [%l0 + 8] %asi, %l2 /* orig_address */
/* phys addr */
sub %l1, %g7, %l1
sub %l2, %g7, %l2
mov %g3, %l3 /* PAGE_SIZE-8 */
copy_loop:
ldxa [%l1 + %l3] ASI_PHYS_USE_EC, %g2
stxa %g2, [%l2 + %l3] ASI_PHYS_USE_EC
cmp %l3, %g0
bne copy_loop
sub %l3, 8, %l3
/* next pbe */
ba pbe_loop
ldxa [%l0 + 16] %asi, %l0
restore_ctx:
setuw saved_context, %g3
/* Restore window regs */
wrpr %g0, 0, %canrestore
wrpr %g0, 0, %otherwin
wrpr %g0, 6, %cansave
wrpr %g0, 0, %cleanwin
ldxa [%g3 + SC_REG_CWP] %asi, %g2
wrpr %g2, %cwp
ldxa [%g3 + SC_REG_WSTATE] %asi, %g2
wrpr %g2, %wstate
ldxa [%g3 + SC_REG_FP] %asi, %fp
/* Restore state regs */
ldxa [%g3 + SC_REG_PSTATE] %asi, %g2
wrpr %g2, %pstate
ldxa [%g3 + SC_REG_TICK] %asi, %g2
wrpr %g2, %tick
/* Restore global regs */
ldxa [%g3 + SC_REG_G4] %asi, %g4
ldxa [%g3 + SC_REG_G5] %asi, %g5
ldxa [%g3 + SC_REG_G6] %asi, %g6
wr %g1, %g0, %asi
restore
restore
wrpr %g0, 14, %pil
retl
mov %g0, %o0
fill_itlb:
ba pbe_loop
wrpr %g0, 15, %pil
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,351
|
arch/sparc/vdso/vdso-layout.lds.S
|
/*
* Linker script for vDSO. This is an ELF shared object prelinked to
* its virtual address, and with only one read-only segment.
* This script controls its layout.
*/
#if defined(BUILD_VDSO64)
# define SHDR_SIZE 64
#elif defined(BUILD_VDSO32)
# define SHDR_SIZE 40
#else
# error unknown VDSO target
#endif
#define NUM_FAKE_SHDRS 7
SECTIONS
{
/*
* User/kernel shared data is before the vDSO. This may be a little
* uglier than putting it after the vDSO, but it avoids issues with
* non-allocatable things that dangle past the end of the PT_LOAD
* segment. Page size is 8192 for both 64-bit and 32-bit vdso binaries
*/
vvar_start = . -8192;
vvar_data = vvar_start;
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.dynamic : { *(.dynamic) } :text :dynamic
.rodata : {
*(.rodata*)
*(.data*)
*(.sdata*)
*(.got.plt) *(.got)
*(.gnu.linkonce.d.*)
*(.bss*)
*(.dynbss*)
*(.gnu.linkonce.b.*)
/*
* Ideally this would live in a C file: kept in here for
* compatibility with x86-64.
*/
VDSO_FAKE_SECTION_TABLE_START = .;
. = . + NUM_FAKE_SHDRS * SHDR_SIZE;
VDSO_FAKE_SECTION_TABLE_END = .;
} :text
.fake_shstrtab : { *(.fake_shstrtab) } :text
.note : { *(.note.*) } :text :note
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
/*
* Text is well-separated from actual data: there's plenty of
* stuff that isn't used at runtime in between.
*/
.text : { *(.text*) } :text =0x90909090,
.vread_tick_patch : {
vread_tick_patch_start = .;
*(.vread_tick_patch)
vread_tick_patch_end = .;
}
/DISCARD/ : {
*(.discard)
*(.discard.*)
*(__bug_table)
}
}
/*
* Very old versions of ld do not recognize this name token; use the constant.
*/
#define PT_GNU_EH_FRAME 0x6474e550
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,964
|
arch/sparc/kernel/ktlb.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
*
* Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
* Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tsb.h>
.text
.align 32
kvmap_itlb:
/* g6: TAG TARGET */
mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_IMMU, %g4
/* The kernel executes in context zero, therefore we do not
* need to clear the context ID bits out of %g4 here.
*/
/* sun4v_itlb_miss branches here with the missing virtual
* address already loaded into %g4
*/
kvmap_itlb_4v:
/* Catch kernel NULL pointer calls. */
sethi %hi(PAGE_SIZE), %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_itlb_longpath
nop
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
kvmap_itlb_tsb_miss:
sethi %hi(LOW_OBP_ADDRESS), %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_itlb_vmalloc_addr
mov 0x1, %g5
sllx %g5, 32, %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_itlb_obp
nop
kvmap_itlb_vmalloc_addr:
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
TSB_LOCK_TAG(%g1, %g2, %g7)
TSB_WRITE(%g1, %g5, %g6)
/* fallthrough to TLB load */
kvmap_itlb_load:
661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
retry
.section .sun4v_2insn_patch, "ax"
.word 661b
nop
nop
.previous
/* For sun4v the ASI_ITLB_DATA_IN store and the retry
* instruction get nop'd out and we get here to branch
* to the sun4v tlb load code. The registers are setup
* as follows:
*
* %g4: vaddr
* %g5: PTE
* %g6: TAG
*
* The sun4v TLB load wants the PTE in %g3 so we fix that
* up here.
*/
ba,pt %xcc, sun4v_itlb_load
mov %g5, %g3
kvmap_itlb_longpath:
661: rdpr %pstate, %g5
wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
.section .sun4v_2insn_patch, "ax"
.word 661b
SET_GL(1)
nop
.previous
rdpr %tpc, %g5
ba,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_ITLB, %g4
kvmap_itlb_obp:
OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
TSB_LOCK_TAG(%g1, %g2, %g7)
TSB_WRITE(%g1, %g5, %g6)
ba,pt %xcc, kvmap_itlb_load
nop
kvmap_dtlb_obp:
OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
TSB_LOCK_TAG(%g1, %g2, %g7)
TSB_WRITE(%g1, %g5, %g6)
ba,pt %xcc, kvmap_dtlb_load
nop
kvmap_linear_early:
sethi %hi(kern_linear_pte_xor), %g7
ldx [%g7 + %lo(kern_linear_pte_xor)], %g2
ba,pt %xcc, kvmap_dtlb_tsb4m_load
xor %g2, %g4, %g5
.align 32
kvmap_dtlb_tsb4m_load:
TSB_LOCK_TAG(%g1, %g2, %g7)
TSB_WRITE(%g1, %g5, %g6)
ba,pt %xcc, kvmap_dtlb_load
nop
kvmap_dtlb:
/* %g6: TAG TARGET */
mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_DMMU, %g4
/* The kernel executes in context zero, therefore we do not
* need to clear the context ID bits out of %g4 here.
*/
/* sun4v_dtlb_miss branches here with the missing virtual
* address already loaded into %g4
*/
kvmap_dtlb_4v:
brgez,pn %g4, kvmap_dtlb_nonlinear
nop
#ifdef CONFIG_DEBUG_PAGEALLOC
/* Index through the base page size TSB even for linear
* mappings when using page allocation debugging.
*/
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
#else
/* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
#endif
/* Linear mapping TSB lookup failed. Fallthrough to kernel
* page table based lookup.
*/
.globl kvmap_linear_patch
kvmap_linear_patch:
ba,a,pt %xcc, kvmap_linear_early
kvmap_dtlb_vmalloc_addr:
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
TSB_LOCK_TAG(%g1, %g2, %g7)
TSB_WRITE(%g1, %g5, %g6)
/* fallthrough to TLB load */
kvmap_dtlb_load:
661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
retry
.section .sun4v_2insn_patch, "ax"
.word 661b
nop
nop
.previous
/* For sun4v the ASI_DTLB_DATA_IN store and the retry
* instruction get nop'd out and we get here to branch
* to the sun4v tlb load code. The registers are setup
* as follows:
*
* %g4: vaddr
* %g5: PTE
* %g6: TAG
*
* The sun4v TLB load wants the PTE in %g3 so we fix that
* up here.
*/
ba,pt %xcc, sun4v_dtlb_load
mov %g5, %g3
#ifdef CONFIG_SPARSEMEM_VMEMMAP
kvmap_vmemmap:
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
ba,a,pt %xcc, kvmap_dtlb_load
#endif
kvmap_dtlb_nonlinear:
/* Catch kernel NULL pointer derefs. */
sethi %hi(PAGE_SIZE), %g5
cmp %g4, %g5
bleu,pn %xcc, kvmap_dtlb_longpath
nop
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* Do not use the TSB for vmemmap. */
sethi %hi(VMEMMAP_BASE), %g5
ldx [%g5 + %lo(VMEMMAP_BASE)], %g5
cmp %g4,%g5
bgeu,pn %xcc, kvmap_vmemmap
nop
#endif
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
kvmap_dtlb_tsbmiss:
sethi %hi(MODULES_VADDR), %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_dtlb_longpath
sethi %hi(VMALLOC_END), %g5
ldx [%g5 + %lo(VMALLOC_END)], %g5
cmp %g4, %g5
bgeu,pn %xcc, kvmap_dtlb_longpath
nop
kvmap_check_obp:
sethi %hi(LOW_OBP_ADDRESS), %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_dtlb_vmalloc_addr
mov 0x1, %g5
sllx %g5, 32, %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_dtlb_obp
nop
ba,pt %xcc, kvmap_dtlb_vmalloc_addr
nop
kvmap_dtlb_longpath:
661: rdpr %pstate, %g5
wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
.section .sun4v_2insn_patch, "ax"
.word 661b
SET_GL(1)
ldxa [%g0] ASI_SCRATCHPAD, %g5
.previous
rdpr %tl, %g3
cmp %g3, 1
661: mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_DMMU, %g5
.section .sun4v_2insn_patch, "ax"
.word 661b
ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
nop
.previous
/* The kernel executes in context zero, therefore we do not
* need to clear the context ID bits out of %g5 here.
*/
be,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_DTLB, %g4
ba,pt %xcc, winfix_trampoline
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,348
|
arch/sparc/kernel/etrap_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* etrap.S: Sparc trap window preparation for entry into the
* Linux kernel.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/contregs.h>
#include <asm/page.h>
#include <asm/psr.h>
#include <asm/ptrace.h>
#include <asm/winmacro.h>
#include <asm/asmmacro.h>
#include <asm/thread_info.h>
/* Registers to not touch at all. */
#define t_psr l0 /* Set by caller */
#define t_pc l1 /* Set by caller */
#define t_npc l2 /* Set by caller */
#define t_wim l3 /* Set by caller */
#define t_twinmask l4 /* Set at beginning of this entry routine. */
#define t_kstack l5 /* Set right before pt_regs frame is built */
#define t_retpc l6 /* If you change this, change winmacro.h header file */
#define t_systable l7 /* Never touch this, could be the syscall table ptr. */
#define curptr g6 /* Set after pt_regs frame is built */
.text
.align 4
/* SEVEN WINDOW PATCH INSTRUCTIONS */
.globl tsetup_7win_patch1, tsetup_7win_patch2
.globl tsetup_7win_patch3, tsetup_7win_patch4
.globl tsetup_7win_patch5, tsetup_7win_patch6
tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim
tsetup_7win_patch2: and %g2, 0x7f, %g2
tsetup_7win_patch3: and %g2, 0x7f, %g2
tsetup_7win_patch4: and %g1, 0x7f, %g1
tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim
tsetup_7win_patch6: and %g2, 0x7f, %g2
/* END OF PATCH INSTRUCTIONS */
/* At trap time, interrupts and all generic traps do the
* following:
*
* rd %psr, %l0
* b some_handler
* rd %wim, %l3
* nop
*
* Then 'some_handler' if it needs a trap frame (ie. it has
* to call c-code and the trap cannot be handled in-window)
* then it does the SAVE_ALL macro in entry.S which does
*
* sethi %hi(trap_setup), %l4
* jmpl %l4 + %lo(trap_setup), %l6
* nop
*/
/* 2 3 4 window number
* -----
* O T S mnemonic
*
* O == Current window before trap
* T == Window entered when trap occurred
* S == Window we will need to save if (1<<T) == %wim
*
* Before execution gets here, it must be guaranteed that
* %l0 contains trap time %psr, %l1 and %l2 contain the
* trap pc and npc, and %l3 contains the trap time %wim.
*/
.globl trap_setup, tsetup_patch1, tsetup_patch2
.globl tsetup_patch3, tsetup_patch4
.globl tsetup_patch5, tsetup_patch6
trap_setup:
/* Calculate mask of trap window. See if from user
* or kernel and branch conditionally.
*/
mov 1, %t_twinmask
andcc %t_psr, PSR_PS, %g0 ! fromsupv_p = (psr & PSR_PS)
be trap_setup_from_user ! nope, from user mode
sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
/* From kernel, allocate more kernel stack and
* build a pt_regs trap frame.
*/
sub %fp, (STACKFRAME_SZ + TRACEREG_SZ), %t_kstack
STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
/* See if we are in the trap window. */
andcc %t_twinmask, %t_wim, %g0
bne trap_setup_kernel_spill ! in trap window, clean up
nop
/* Trap from kernel with a window available.
* Just do it...
*/
jmpl %t_retpc + 0x8, %g0 ! return to caller
mov %t_kstack, %sp ! jump onto new stack
trap_setup_kernel_spill:
ld [%curptr + TI_UWINMASK], %g1
orcc %g0, %g1, %g0
bne trap_setup_user_spill ! there are some user windows, yuck
/* Spill from kernel, but only kernel windows, adjust
* %wim and go.
*/
srl %t_wim, 0x1, %g2 ! begin computation of new %wim
tsetup_patch1:
sll %t_wim, 0x7, %t_wim ! patched on 7 window Sparcs
or %t_wim, %g2, %g2
tsetup_patch2:
and %g2, 0xff, %g2 ! patched on 7 window Sparcs
save %g0, %g0, %g0
/* Set new %wim value */
wr %g2, 0x0, %wim
/* Save the kernel window onto the corresponding stack. */
STORE_WINDOW(sp)
restore %g0, %g0, %g0
jmpl %t_retpc + 0x8, %g0 ! return to caller
mov %t_kstack, %sp ! and onto new kernel stack
#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
trap_setup_from_user:
/* We can't use %curptr yet. */
LOAD_CURRENT(t_kstack, t_twinmask)
sethi %hi(STACK_OFFSET), %t_twinmask
or %t_twinmask, %lo(STACK_OFFSET), %t_twinmask
add %t_kstack, %t_twinmask, %t_kstack
mov 1, %t_twinmask
sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
/* Build pt_regs frame. */
STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
#if 0
/* If we're sure every task_struct is THREAD_SIZE aligned,
we can speed this up. */
sethi %hi(STACK_OFFSET), %curptr
or %curptr, %lo(STACK_OFFSET), %curptr
sub %t_kstack, %curptr, %curptr
#else
sethi %hi(~(THREAD_SIZE - 1)), %curptr
and %t_kstack, %curptr, %curptr
#endif
/* Clear current_thread_info->w_saved */
st %g0, [%curptr + TI_W_SAVED]
/* See if we are in the trap window. */
andcc %t_twinmask, %t_wim, %g0
bne trap_setup_user_spill ! yep we are
orn %g0, %t_twinmask, %g1 ! negate trap win mask into %g1
/* Trap from user, but not into the invalid window.
* Calculate new umask. The way this works is,
* any window from the %wim at trap time until
* the window right before the one we are in now,
* is a user window. A diagram:
*
* 7 6 5 4 3 2 1 0 window number
* ---------------
* I L T mnemonic
*
* Window 'I' is the invalid window in our example,
* window 'L' is the window the user was in when
* the trap occurred, window T is the trap window
* we are in now. So therefore, windows 5, 4 and
* 3 are user windows. The following sequence
* computes the user winmask to represent this.
*/
subcc %t_wim, %t_twinmask, %g2
bneg,a 1f
sub %g2, 0x1, %g2
1:
andn %g2, %t_twinmask, %g2
tsetup_patch3:
and %g2, 0xff, %g2 ! patched on 7win Sparcs
st %g2, [%curptr + TI_UWINMASK] ! store new umask
jmpl %t_retpc + 0x8, %g0 ! return to caller
mov %t_kstack, %sp ! and onto kernel stack
trap_setup_user_spill:
/* A spill occurred from either kernel or user mode
* and there exist some user windows to deal with.
* A mask of the currently valid user windows
* is in %g1 upon entry to here.
*/
tsetup_patch4:
and %g1, 0xff, %g1 ! patched on 7win Sparcs, mask
srl %t_wim, 0x1, %g2 ! compute new %wim
tsetup_patch5:
sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs
or %t_wim, %g2, %g2 ! %g2 is new %wim
tsetup_patch6:
and %g2, 0xff, %g2 ! patched on 7win Sparcs
andn %g1, %g2, %g1 ! clear this bit in %g1
st %g1, [%curptr + TI_UWINMASK]
save %g0, %g0, %g0
wr %g2, 0x0, %wim
/* Call MMU-architecture dependent stack checking
* routine.
*/
b tsetup_srmmu_stackchk
andcc %sp, 0x7, %g0
/* Architecture specific stack checking routines. When either
* of these routines are called, the globals are free to use
* as they have been safely stashed on the new kernel stack
* pointer. Thus the definition below for simplicity.
*/
#define glob_tmp g1
.globl tsetup_srmmu_stackchk
tsetup_srmmu_stackchk:
/* Check results of callers andcc %sp, 0x7, %g0 */
bne trap_setup_user_stack_is_bolixed
sethi %hi(PAGE_OFFSET), %glob_tmp
cmp %glob_tmp, %sp
bleu,a 1f
LEON_PI( lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control
SUN_PI_( lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control
trap_setup_user_stack_is_bolixed:
/* From user/kernel into invalid window w/bad user
* stack. Save bad user stack, and return to caller.
*/
SAVE_BOLIXED_USER_STACK(curptr, g3)
restore %g0, %g0, %g0
jmpl %t_retpc + 0x8, %g0
mov %t_kstack, %sp
1:
/* Clear the fault status and turn on the no_fault bit. */
or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it
SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it
/* Dump the registers and cross fingers. */
STORE_WINDOW(sp)
/* Clear the no_fault bit and check the status. */
andn %glob_tmp, 0x2, %glob_tmp
LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)
SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)
mov AC_M_SFAR, %glob_tmp
LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0)
SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0)
mov AC_M_SFSR, %glob_tmp
LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore
SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp) ! save away status of winstore
andcc %glob_tmp, 0x2, %g0 ! did we fault?
bne trap_setup_user_stack_is_bolixed ! failure
nop
restore %g0, %g0, %g0
jmpl %t_retpc + 0x8, %g0
mov %t_kstack, %sp
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,341
|
arch/sparc/kernel/sys32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* sys32.S: I-cache tricks for 32-bit compatibility layer simple
* conversions.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <asm/errno.h>
/* NOTE: call as jump breaks return stack, we have to avoid that */
.text
.globl sys32_mmap2
sys32_mmap2:
sethi %hi(sys_mmap), %g1
jmpl %g1 + %lo(sys_mmap), %g0
sllx %o5, 12, %o5
.align 32
.globl sys32_socketcall
sys32_socketcall: /* %o0=call, %o1=args */
cmp %o0, 1
bl,pn %xcc, do_einval
cmp %o0, 18
bg,pn %xcc, do_einval
sub %o0, 1, %o0
sllx %o0, 5, %o0
sethi %hi(__socketcall_table_begin), %g2
or %g2, %lo(__socketcall_table_begin), %g2
jmpl %g2 + %o0, %g0
nop
do_einval:
retl
mov -EINVAL, %o0
.align 32
__socketcall_table_begin:
/* Each entry is exactly 32 bytes. */
do_sys_socket: /* sys_socket(int, int, int) */
1: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_socket), %g1
2: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_socket), %g0
3: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
4: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_bind), %g1
5: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_bind), %g0
6: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
7: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_connect), %g1
8: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_connect), %g0
9: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_listen: /* sys_listen(int, int) */
10: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_listen), %g1
jmpl %g1 + %lo(sys_listen), %g0
11: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
nop
do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
12: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_accept), %g1
13: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_accept), %g0
14: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
15: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getsockname), %g1
16: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_getsockname), %g0
17: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
18: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getpeername), %g1
19: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_getpeername), %g0
20: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
21: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_socketpair), %g1
22: ldswa [%o1 + 0x8] %asi, %o2
23: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_socketpair), %g0
24: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
25: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_send), %g1
26: lduwa [%o1 + 0x8] %asi, %o2
27: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_send), %g0
28: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
29: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_recv), %g1
30: lduwa [%o1 + 0x8] %asi, %o2
31: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_recv), %g0
32: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
33: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_sendto), %g1
34: lduwa [%o1 + 0x8] %asi, %o2
35: lduwa [%o1 + 0xc] %asi, %o3
36: lduwa [%o1 + 0x10] %asi, %o4
37: ldswa [%o1 + 0x14] %asi, %o5
jmpl %g1 + %lo(sys_sendto), %g0
38: lduwa [%o1 + 0x4] %asi, %o1
do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
39: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_recvfrom), %g1
40: lduwa [%o1 + 0x8] %asi, %o2
41: lduwa [%o1 + 0xc] %asi, %o3
42: lduwa [%o1 + 0x10] %asi, %o4
43: lduwa [%o1 + 0x14] %asi, %o5
jmpl %g1 + %lo(sys_recvfrom), %g0
44: lduwa [%o1 + 0x4] %asi, %o1
do_sys_shutdown: /* sys_shutdown(int, int) */
45: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_shutdown), %g1
jmpl %g1 + %lo(sys_shutdown), %g0
46: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
nop
do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */
47: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_setsockopt), %g1
48: ldswa [%o1 + 0x8] %asi, %o2
49: lduwa [%o1 + 0xc] %asi, %o3
50: ldswa [%o1 + 0x10] %asi, %o4
jmpl %g1 + %lo(compat_sys_setsockopt), %g0
51: ldswa [%o1 + 0x4] %asi, %o1
nop
do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */
52: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_getsockopt), %g1
53: ldswa [%o1 + 0x8] %asi, %o2
54: lduwa [%o1 + 0xc] %asi, %o3
55: lduwa [%o1 + 0x10] %asi, %o4
jmpl %g1 + %lo(compat_sys_getsockopt), %g0
56: ldswa [%o1 + 0x4] %asi, %o1
nop
do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
57: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_sendmsg), %g1
58: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(compat_sys_sendmsg), %g0
59: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
60: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_recvmsg), %g1
61: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(compat_sys_recvmsg), %g0
62: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
63: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_accept4), %g1
64: lduwa [%o1 + 0x8] %asi, %o2
65: ldswa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_accept4), %g0
66: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
.section __ex_table,"a"
.align 4
.word 1b, __retl_efault, 2b, __retl_efault
.word 3b, __retl_efault, 4b, __retl_efault
.word 5b, __retl_efault, 6b, __retl_efault
.word 7b, __retl_efault, 8b, __retl_efault
.word 9b, __retl_efault, 10b, __retl_efault
.word 11b, __retl_efault, 12b, __retl_efault
.word 13b, __retl_efault, 14b, __retl_efault
.word 15b, __retl_efault, 16b, __retl_efault
.word 17b, __retl_efault, 18b, __retl_efault
.word 19b, __retl_efault, 20b, __retl_efault
.word 21b, __retl_efault, 22b, __retl_efault
.word 23b, __retl_efault, 24b, __retl_efault
.word 25b, __retl_efault, 26b, __retl_efault
.word 27b, __retl_efault, 28b, __retl_efault
.word 29b, __retl_efault, 30b, __retl_efault
.word 31b, __retl_efault, 32b, __retl_efault
.word 33b, __retl_efault, 34b, __retl_efault
.word 35b, __retl_efault, 36b, __retl_efault
.word 37b, __retl_efault, 38b, __retl_efault
.word 39b, __retl_efault, 40b, __retl_efault
.word 41b, __retl_efault, 42b, __retl_efault
.word 43b, __retl_efault, 44b, __retl_efault
.word 45b, __retl_efault, 46b, __retl_efault
.word 47b, __retl_efault, 48b, __retl_efault
.word 49b, __retl_efault, 50b, __retl_efault
.word 51b, __retl_efault, 52b, __retl_efault
.word 53b, __retl_efault, 54b, __retl_efault
.word 55b, __retl_efault, 56b, __retl_efault
.word 57b, __retl_efault, 58b, __retl_efault
.word 59b, __retl_efault, 60b, __retl_efault
.word 61b, __retl_efault, 62b, __retl_efault
.word 63b, __retl_efault, 64b, __retl_efault
.word 65b, __retl_efault, 66b, __retl_efault
.previous
|
AirFortressIlikara/LS2K0300-linux-4.19
| 15,258
|
arch/sparc/kernel/cherrs.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* These get patched into the trap table at boot time
* once we know we have a cheetah processor.
*/
.globl cheetah_fecc_trap_vector
.type cheetah_fecc_trap_vector,#function
cheetah_fecc_trap_vector:
membar #Sync
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
andn %g1, DCU_DC | DCU_IC, %g1
stxa %g1, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
sethi %hi(cheetah_fast_ecc), %g2
jmpl %g2 + %lo(cheetah_fast_ecc), %g0
mov 0, %g1
.size cheetah_fecc_trap_vector,.-cheetah_fecc_trap_vector
.globl cheetah_fecc_trap_vector_tl1
.type cheetah_fecc_trap_vector_tl1,#function
cheetah_fecc_trap_vector_tl1:
membar #Sync
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
andn %g1, DCU_DC | DCU_IC, %g1
stxa %g1, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
sethi %hi(cheetah_fast_ecc), %g2
jmpl %g2 + %lo(cheetah_fast_ecc), %g0
mov 1, %g1
.size cheetah_fecc_trap_vector_tl1,.-cheetah_fecc_trap_vector_tl1
.globl cheetah_cee_trap_vector
.type cheetah_cee_trap_vector,#function
cheetah_cee_trap_vector:
membar #Sync
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
andn %g1, DCU_IC, %g1
stxa %g1, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
sethi %hi(cheetah_cee), %g2
jmpl %g2 + %lo(cheetah_cee), %g0
mov 0, %g1
.size cheetah_cee_trap_vector,.-cheetah_cee_trap_vector
.globl cheetah_cee_trap_vector_tl1
.type cheetah_cee_trap_vector_tl1,#function
cheetah_cee_trap_vector_tl1:
membar #Sync
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
andn %g1, DCU_IC, %g1
stxa %g1, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
sethi %hi(cheetah_cee), %g2
jmpl %g2 + %lo(cheetah_cee), %g0
mov 1, %g1
.size cheetah_cee_trap_vector_tl1,.-cheetah_cee_trap_vector_tl1
.globl cheetah_deferred_trap_vector
.type cheetah_deferred_trap_vector,#function
cheetah_deferred_trap_vector:
membar #Sync
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1;
andn %g1, DCU_DC | DCU_IC, %g1;
stxa %g1, [%g0] ASI_DCU_CONTROL_REG;
membar #Sync;
sethi %hi(cheetah_deferred_trap), %g2
jmpl %g2 + %lo(cheetah_deferred_trap), %g0
mov 0, %g1
.size cheetah_deferred_trap_vector,.-cheetah_deferred_trap_vector
.globl cheetah_deferred_trap_vector_tl1
.type cheetah_deferred_trap_vector_tl1,#function
cheetah_deferred_trap_vector_tl1:
membar #Sync;
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1;
andn %g1, DCU_DC | DCU_IC, %g1;
stxa %g1, [%g0] ASI_DCU_CONTROL_REG;
membar #Sync;
sethi %hi(cheetah_deferred_trap), %g2
jmpl %g2 + %lo(cheetah_deferred_trap), %g0
mov 1, %g1
.size cheetah_deferred_trap_vector_tl1,.-cheetah_deferred_trap_vector_tl1
/* Cheetah+ specific traps. These are for the new I/D cache parity
* error traps. The first argument to cheetah_plus_parity_handler
* is encoded as follows:
*
* Bit0: 0=dcache,1=icache
* Bit1: 0=recoverable,1=unrecoverable
*/
.globl cheetah_plus_dcpe_trap_vector
.type cheetah_plus_dcpe_trap_vector,#function
cheetah_plus_dcpe_trap_vector:
membar #Sync
sethi %hi(do_cheetah_plus_data_parity), %g7
jmpl %g7 + %lo(do_cheetah_plus_data_parity), %g0
nop
nop
nop
nop
nop
.size cheetah_plus_dcpe_trap_vector,.-cheetah_plus_dcpe_trap_vector
.type do_cheetah_plus_data_parity,#function
do_cheetah_plus_data_parity:
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
mov 0x0, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
ba,a,pt %xcc, rtrap_irq
.size do_cheetah_plus_data_parity,.-do_cheetah_plus_data_parity
.globl cheetah_plus_dcpe_trap_vector_tl1
.type cheetah_plus_dcpe_trap_vector_tl1,#function
cheetah_plus_dcpe_trap_vector_tl1:
membar #Sync
wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
sethi %hi(do_dcpe_tl1), %g3
jmpl %g3 + %lo(do_dcpe_tl1), %g0
nop
nop
nop
nop
.size cheetah_plus_dcpe_trap_vector_tl1,.-cheetah_plus_dcpe_trap_vector_tl1
.globl cheetah_plus_icpe_trap_vector
.type cheetah_plus_icpe_trap_vector,#function
cheetah_plus_icpe_trap_vector:
membar #Sync
sethi %hi(do_cheetah_plus_insn_parity), %g7
jmpl %g7 + %lo(do_cheetah_plus_insn_parity), %g0
nop
nop
nop
nop
nop
.size cheetah_plus_icpe_trap_vector,.-cheetah_plus_icpe_trap_vector
.type do_cheetah_plus_insn_parity,#function
do_cheetah_plus_insn_parity:
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
mov 0x1, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
ba,a,pt %xcc, rtrap_irq
.size do_cheetah_plus_insn_parity,.-do_cheetah_plus_insn_parity
.globl cheetah_plus_icpe_trap_vector_tl1
.type cheetah_plus_icpe_trap_vector_tl1,#function
cheetah_plus_icpe_trap_vector_tl1:
membar #Sync
wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
sethi %hi(do_icpe_tl1), %g3
jmpl %g3 + %lo(do_icpe_tl1), %g0
nop
nop
nop
nop
.size cheetah_plus_icpe_trap_vector_tl1,.-cheetah_plus_icpe_trap_vector_tl1
/* If we take one of these traps when tl >= 1, then we
* jump to interrupt globals. If some trap level above us
* was also using interrupt globals, we cannot recover.
* We may use all interrupt global registers except %g6.
*/
.globl do_dcpe_tl1
.type do_dcpe_tl1,#function
do_dcpe_tl1:
rdpr %tl, %g1 ! Save original trap level
mov 1, %g2 ! Setup TSTATE checking loop
sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit
1: wrpr %g2, %tl ! Set trap level to check
rdpr %tstate, %g4 ! Read TSTATE for this level
andcc %g4, %g3, %g0 ! Interrupt globals in use?
bne,a,pn %xcc, do_dcpe_tl1_fatal ! Yep, irrecoverable
wrpr %g1, %tl ! Restore original trap level
add %g2, 1, %g2 ! Next trap level
cmp %g2, %g1 ! Hit them all yet?
ble,pt %icc, 1b ! Not yet
nop
wrpr %g1, %tl ! Restore original trap level
do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
sethi %hi(dcache_parity_tl1_occurred), %g2
lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1
add %g1, 1, %g1
stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)]
/* Reset D-cache parity */
sethi %hi(1 << 16), %g1 ! D-cache size
mov (1 << 5), %g2 ! D-cache line size
sub %g1, %g2, %g1 ! Move down 1 cacheline
1: srl %g1, 14, %g3 ! Compute UTAG
membar #Sync
stxa %g3, [%g1] ASI_DCACHE_UTAG
membar #Sync
sub %g2, 8, %g3 ! 64-bit data word within line
2: membar #Sync
stxa %g0, [%g1 + %g3] ASI_DCACHE_DATA
membar #Sync
subcc %g3, 8, %g3 ! Next 64-bit data word
bge,pt %icc, 2b
nop
subcc %g1, %g2, %g1 ! Next cacheline
bge,pt %icc, 1b
nop
ba,a,pt %xcc, dcpe_icpe_tl1_common
do_dcpe_tl1_fatal:
sethi %hi(1f), %g7
ba,pt %xcc, etraptl1
1: or %g7, %lo(1b), %g7
mov 0x2, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
ba,a,pt %xcc, rtrap
.size do_dcpe_tl1,.-do_dcpe_tl1
.globl do_icpe_tl1
.type do_icpe_tl1,#function
do_icpe_tl1:
rdpr %tl, %g1 ! Save original trap level
mov 1, %g2 ! Setup TSTATE checking loop
sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit
1: wrpr %g2, %tl ! Set trap level to check
rdpr %tstate, %g4 ! Read TSTATE for this level
andcc %g4, %g3, %g0 ! Interrupt globals in use?
bne,a,pn %xcc, do_icpe_tl1_fatal ! Yep, irrecoverable
wrpr %g1, %tl ! Restore original trap level
add %g2, 1, %g2 ! Next trap level
cmp %g2, %g1 ! Hit them all yet?
ble,pt %icc, 1b ! Not yet
nop
wrpr %g1, %tl ! Restore original trap level
do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
sethi %hi(icache_parity_tl1_occurred), %g2
lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1
add %g1, 1, %g1
stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)]
/* Flush I-cache */
sethi %hi(1 << 15), %g1 ! I-cache size
mov (1 << 5), %g2 ! I-cache line size
sub %g1, %g2, %g1
1: or %g1, (2 << 3), %g3
stxa %g0, [%g3] ASI_IC_TAG
membar #Sync
subcc %g1, %g2, %g1
bge,pt %icc, 1b
nop
ba,a,pt %xcc, dcpe_icpe_tl1_common
do_icpe_tl1_fatal:
sethi %hi(1f), %g7
ba,pt %xcc, etraptl1
1: or %g7, %lo(1b), %g7
mov 0x3, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
ba,a,pt %xcc, rtrap
.size do_icpe_tl1,.-do_icpe_tl1
.type dcpe_icpe_tl1_common,#function
dcpe_icpe_tl1_common:
/* Flush D-cache, re-enable D/I caches in DCU and finally
* retry the trapping instruction.
*/
sethi %hi(1 << 16), %g1 ! D-cache size
mov (1 << 5), %g2 ! D-cache line size
sub %g1, %g2, %g1
1: stxa %g0, [%g1] ASI_DCACHE_TAG
membar #Sync
subcc %g1, %g2, %g1
bge,pt %icc, 1b
nop
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
or %g1, (DCU_DC | DCU_IC), %g1
stxa %g1, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
retry
.size dcpe_icpe_tl1_common,.-dcpe_icpe_tl1_common
/* Capture I/D/E-cache state into per-cpu error scoreboard.
*
* %g1: (TL>=0) ? 1 : 0
* %g2: scratch
* %g3: scratch
* %g4: AFSR
* %g5: AFAR
* %g6: unused, will have current thread ptr after etrap
* %g7: scratch
*/
.type __cheetah_log_error,#function
__cheetah_log_error:
/* Put "TL1" software bit into AFSR. */
and %g1, 0x1, %g1
sllx %g1, 63, %g2
or %g4, %g2, %g4
/* Get log entry pointer for this cpu at this trap level. */
BRANCH_IF_JALAPENO(g2,g3,50f)
ldxa [%g0] ASI_SAFARI_CONFIG, %g2
srlx %g2, 17, %g2
ba,pt %xcc, 60f
and %g2, 0x3ff, %g2
50: ldxa [%g0] ASI_JBUS_CONFIG, %g2
srlx %g2, 17, %g2
and %g2, 0x1f, %g2
60: sllx %g2, 9, %g2
sethi %hi(cheetah_error_log), %g3
ldx [%g3 + %lo(cheetah_error_log)], %g3
brz,pn %g3, 80f
nop
add %g3, %g2, %g3
sllx %g1, 8, %g1
add %g3, %g1, %g1
/* %g1 holds pointer to the top of the logging scoreboard */
ldx [%g1 + 0x0], %g7
cmp %g7, -1
bne,pn %xcc, 80f
nop
stx %g4, [%g1 + 0x0]
stx %g5, [%g1 + 0x8]
add %g1, 0x10, %g1
/* %g1 now points to D-cache logging area */
set 0x3ff8, %g2 /* DC_addr mask */
and %g5, %g2, %g2 /* DC_addr bits of AFAR */
srlx %g5, 12, %g3
or %g3, 1, %g3 /* PHYS tag + valid */
10: ldxa [%g2] ASI_DCACHE_TAG, %g7
cmp %g3, %g7 /* TAG match? */
bne,pt %xcc, 13f
nop
/* Yep, what we want, capture state. */
stx %g2, [%g1 + 0x20]
stx %g7, [%g1 + 0x28]
/* A membar Sync is required before and after utag access. */
membar #Sync
ldxa [%g2] ASI_DCACHE_UTAG, %g7
membar #Sync
stx %g7, [%g1 + 0x30]
ldxa [%g2] ASI_DCACHE_SNOOP_TAG, %g7
stx %g7, [%g1 + 0x38]
clr %g3
12: ldxa [%g2 + %g3] ASI_DCACHE_DATA, %g7
stx %g7, [%g1]
add %g3, (1 << 5), %g3
cmp %g3, (4 << 5)
bl,pt %xcc, 12b
add %g1, 0x8, %g1
ba,pt %xcc, 20f
add %g1, 0x20, %g1
13: sethi %hi(1 << 14), %g7
add %g2, %g7, %g2
srlx %g2, 14, %g7
cmp %g7, 4
bl,pt %xcc, 10b
nop
add %g1, 0x40, %g1
/* %g1 now points to I-cache logging area */
20: set 0x1fe0, %g2 /* IC_addr mask */
and %g5, %g2, %g2 /* IC_addr bits of AFAR */
sllx %g2, 1, %g2 /* IC_addr[13:6]==VA[12:5] */
srlx %g5, (13 - 8), %g3 /* Make PTAG */
andn %g3, 0xff, %g3 /* Mask off undefined bits */
21: ldxa [%g2] ASI_IC_TAG, %g7
andn %g7, 0xff, %g7
cmp %g3, %g7
bne,pt %xcc, 23f
nop
/* Yep, what we want, capture state. */
stx %g2, [%g1 + 0x40]
stx %g7, [%g1 + 0x48]
add %g2, (1 << 3), %g2
ldxa [%g2] ASI_IC_TAG, %g7
add %g2, (1 << 3), %g2
stx %g7, [%g1 + 0x50]
ldxa [%g2] ASI_IC_TAG, %g7
add %g2, (1 << 3), %g2
stx %g7, [%g1 + 0x60]
ldxa [%g2] ASI_IC_TAG, %g7
stx %g7, [%g1 + 0x68]
sub %g2, (3 << 3), %g2
ldxa [%g2] ASI_IC_STAG, %g7
stx %g7, [%g1 + 0x58]
clr %g3
srlx %g2, 2, %g2
22: ldxa [%g2 + %g3] ASI_IC_INSTR, %g7
stx %g7, [%g1]
add %g3, (1 << 3), %g3
cmp %g3, (8 << 3)
bl,pt %xcc, 22b
add %g1, 0x8, %g1
ba,pt %xcc, 30f
add %g1, 0x30, %g1
23: sethi %hi(1 << 14), %g7
add %g2, %g7, %g2
srlx %g2, 14, %g7
cmp %g7, 4
bl,pt %xcc, 21b
nop
add %g1, 0x70, %g1
/* %g1 now points to E-cache logging area */
30: andn %g5, (32 - 1), %g2
stx %g2, [%g1 + 0x20]
ldxa [%g2] ASI_EC_TAG_DATA, %g7
stx %g7, [%g1 + 0x28]
ldxa [%g2] ASI_EC_R, %g0
clr %g3
31: ldxa [%g3] ASI_EC_DATA, %g7
stx %g7, [%g1 + %g3]
add %g3, 0x8, %g3
cmp %g3, 0x20
bl,pt %xcc, 31b
nop
80:
rdpr %tt, %g2
cmp %g2, 0x70
be c_fast_ecc
cmp %g2, 0x63
be c_cee
nop
ba,a,pt %xcc, c_deferred
.size __cheetah_log_error,.-__cheetah_log_error
/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
* in the trap table. That code has done a memory barrier
* and has disabled both the I-cache and D-cache in the DCU
* control register. The I-cache is disabled so that we may
* capture the corrupted cache line, and the D-cache is disabled
* because corrupt data may have been placed there and we don't
* want to reference it.
*
* %g1 is one if this trap occurred at %tl >= 1.
*
* Next, we turn off error reporting so that we don't recurse.
*/
.globl cheetah_fast_ecc
.type cheetah_fast_ecc,#function
cheetah_fast_ecc:
ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync
/* Fetch and clear AFSR/AFAR */
ldxa [%g0] ASI_AFSR, %g4
ldxa [%g0] ASI_AFAR, %g5
stxa %g4, [%g0] ASI_AFSR
membar #Sync
ba,pt %xcc, __cheetah_log_error
nop
.size cheetah_fast_ecc,.-cheetah_fast_ecc
.type c_fast_ecc,#function
c_fast_ecc:
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
mov %l4, %o1
mov %l5, %o2
call cheetah_fecc_handler
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap_irq
.size c_fast_ecc,.-c_fast_ecc
/* Our caller has disabled I-cache and performed membar Sync. */
.globl cheetah_cee
.type cheetah_cee,#function
cheetah_cee:
ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
andn %g2, ESTATE_ERROR_CEEN, %g2
stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync
/* Fetch and clear AFSR/AFAR */
ldxa [%g0] ASI_AFSR, %g4
ldxa [%g0] ASI_AFAR, %g5
stxa %g4, [%g0] ASI_AFSR
membar #Sync
ba,pt %xcc, __cheetah_log_error
nop
.size cheetah_cee,.-cheetah_cee
.type c_cee,#function
c_cee:
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
mov %l4, %o1
mov %l5, %o2
call cheetah_cee_handler
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap_irq
.size c_cee,.-c_cee
/* Our caller has disabled I-cache+D-cache and performed membar Sync. */
.globl cheetah_deferred_trap
.type cheetah_deferred_trap,#function
cheetah_deferred_trap:
ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync
/* Fetch and clear AFSR/AFAR */
ldxa [%g0] ASI_AFSR, %g4
ldxa [%g0] ASI_AFAR, %g5
stxa %g4, [%g0] ASI_AFSR
membar #Sync
ba,pt %xcc, __cheetah_log_error
nop
.size cheetah_deferred_trap,.-cheetah_deferred_trap
.type c_deferred,#function
c_deferred:
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
mov %l4, %o1
mov %l5, %o2
call cheetah_deferred_handler
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap_irq
.size c_deferred,.-c_deferred
|
AirFortressIlikara/LS2K0300-linux-4.19
| 12,094
|
arch/sparc/kernel/wof.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* wof.S: Sparc window overflow handler.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/contregs.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/smp.h>
#include <asm/asi.h>
#include <asm/winmacro.h>
#include <asm/asmmacro.h>
#include <asm/thread_info.h>
/* WARNING: This routine is hairy and _very_ complicated, but it
* must be as fast as possible as it handles the allocation
* of register windows to the user and kernel. If you touch
* this code be _very_ careful as many other pieces of the
* kernel depend upon how this code behaves. You have been
* duly warned...
*/
/* We define macro's for registers which have a fixed
* meaning throughout this entire routine. The 'T' in
* the comments mean that the register can only be
* accessed when in the 'trap' window, 'G' means
* accessible in any window. Do not change these registers
* after they have been set, until you are ready to return
* from the trap.
*/
#define t_psr l0 /* %psr at trap time T */
#define t_pc l1 /* PC for trap return T */
#define t_npc l2 /* NPC for trap return T */
#define t_wim l3 /* %wim at trap time T */
#define saved_g5 l5 /* Global save register T */
#define saved_g6 l6 /* Global save register T */
#define curptr g6 /* Gets set to 'current' then stays G */
/* Now registers whose values can change within the handler. */
#define twin_tmp l4 /* Temp reg, only usable in trap window T */
#define glob_tmp g5 /* Global temporary reg, usable anywhere G */
.text
.align 4
/* BEGINNING OF PATCH INSTRUCTIONS */
/* On a 7-window Sparc the boot code patches spnwin_*
* instructions with the following ones.
*/
.globl spnwin_patch1_7win, spnwin_patch2_7win, spnwin_patch3_7win
spnwin_patch1_7win: sll %t_wim, 6, %glob_tmp
spnwin_patch2_7win: and %glob_tmp, 0x7f, %glob_tmp
spnwin_patch3_7win: and %twin_tmp, 0x7f, %twin_tmp
/* END OF PATCH INSTRUCTIONS */
/* The trap entry point has done the following:
*
* rd %psr, %l0
* rd %wim, %l3
* b spill_window_entry
* andcc %l0, PSR_PS, %g0
*/
/* Datum current_thread_info->uwinmask contains at all times a bitmask
* where if any user windows are active, at least one bit will
* be set in to mask. If no user windows are active, the bitmask
* will be all zeroes.
*/
.globl spill_window_entry
.globl spnwin_patch1, spnwin_patch2, spnwin_patch3
spill_window_entry:
/* LOCATION: Trap Window */
mov %g5, %saved_g5 ! save away global temp register
mov %g6, %saved_g6 ! save away 'current' ptr register
/* Compute what the new %wim will be if we save the
* window properly in this trap handler.
*
* newwim = ((%wim>>1) | (%wim<<(nwindows - 1)));
*/
srl %t_wim, 0x1, %twin_tmp
spnwin_patch1: sll %t_wim, 7, %glob_tmp
or %glob_tmp, %twin_tmp, %glob_tmp
spnwin_patch2: and %glob_tmp, 0xff, %glob_tmp
/* The trap entry point has set the condition codes
* up for us to see if this is from user or kernel.
* Get the load of 'curptr' out of the way.
*/
LOAD_CURRENT(curptr, twin_tmp)
andcc %t_psr, PSR_PS, %g0
be,a spwin_fromuser ! all user wins, branch
save %g0, %g0, %g0 ! Go where saving will occur
/* See if any user windows are active in the set. */
ld [%curptr + TI_UWINMASK], %twin_tmp ! grab win mask
orcc %g0, %twin_tmp, %g0 ! check for set bits
bne spwin_exist_uwins ! yep, there are some
andn %twin_tmp, %glob_tmp, %twin_tmp ! compute new uwinmask
/* Save into the window which must be saved and do it.
* Basically if we are here, this means that we trapped
* from kernel mode with only kernel windows in the register
* file.
*/
save %g0, %g0, %g0 ! save into the window to stash away
wr %glob_tmp, 0x0, %wim ! set new %wim, this is safe now
spwin_no_userwins_from_kernel:
/* LOCATION: Window to be saved */
STORE_WINDOW(sp) ! stash the window
restore %g0, %g0, %g0 ! go back into trap window
/* LOCATION: Trap window */
mov %saved_g5, %g5 ! restore %glob_tmp
mov %saved_g6, %g6 ! restore %curptr
wr %t_psr, 0x0, %psr ! restore condition codes in %psr
WRITE_PAUSE ! waste some time
jmp %t_pc ! Return from trap
rett %t_npc ! we are done
spwin_exist_uwins:
/* LOCATION: Trap window */
/* Wow, user windows have to be dealt with, this is dirty
* and messy as all hell. And difficult to follow if you
* are approaching the infamous register window trap handling
* problem for the first time. DON'T LOOK!
*
* Note that how the execution path works out, the new %wim
* will be left for us in the global temporary register,
* %glob_tmp. We cannot set the new %wim first because we
* need to save into the appropriate window without inducing
* a trap (traps are off, we'd get a watchdog wheee)...
* But first, store the new user window mask calculated
* above.
*/
st %twin_tmp, [%curptr + TI_UWINMASK]
save %g0, %g0, %g0 ! Go to where the saving will occur
spwin_fromuser:
/* LOCATION: Window to be saved */
wr %glob_tmp, 0x0, %wim ! Now it is safe to set new %wim
/* LOCATION: Window to be saved */
/* This instruction branches to a routine which will check
* to validity of the users stack pointer by whatever means
* are necessary. This means that this is architecture
* specific and thus this branch instruction will need to
* be patched at boot time once the machine type is known.
* This routine _shall not_ touch %curptr under any
* circumstances whatsoever! It will branch back to the
* label 'spwin_good_ustack' if the stack is ok but still
* needs to be dumped (SRMMU for instance will not need to
* do this) or 'spwin_finish_up' if the stack is ok and the
* registers have already been saved. If the stack is found
* to be bogus for some reason the routine shall branch to
* the label 'spwin_user_stack_is_bolixed' which will take
* care of things at that point.
*/
b spwin_srmmu_stackchk
andcc %sp, 0x7, %g0
spwin_good_ustack:
/* LOCATION: Window to be saved */
/* The users stack is ok and we can safely save it at
* %sp.
*/
STORE_WINDOW(sp)
spwin_finish_up:
restore %g0, %g0, %g0 /* Back to trap window. */
/* LOCATION: Trap window */
/* We have spilled successfully, and we have properly stored
* the appropriate window onto the stack.
*/
/* Restore saved globals */
mov %saved_g5, %g5
mov %saved_g6, %g6
wr %t_psr, 0x0, %psr
WRITE_PAUSE
jmp %t_pc
rett %t_npc
spwin_user_stack_is_bolixed:
/* LOCATION: Window to be saved */
/* Wheee, user has trashed his/her stack. We have to decide
* how to proceed based upon whether we came from kernel mode
* or not. If we came from kernel mode, toss the window into
* a special buffer and proceed, the kernel _needs_ a window
* and we could be in an interrupt handler so timing is crucial.
* If we came from user land we build a full stack frame and call
* c-code to gun down the process.
*/
rd %psr, %glob_tmp
andcc %glob_tmp, PSR_PS, %g0
bne spwin_bad_ustack_from_kernel
nop
/* Oh well, throw this one window into the per-task window
* buffer, the first one.
*/
st %sp, [%curptr + TI_RWIN_SPTRS]
STORE_WINDOW(curptr + TI_REG_WINDOW)
restore %g0, %g0, %g0
/* LOCATION: Trap Window */
/* Back in the trap window, update winbuffer save count. */
mov 1, %twin_tmp
st %twin_tmp, [%curptr + TI_W_SAVED]
/* Compute new user window mask. What we are basically
* doing is taking two windows, the invalid one at trap
* time and the one we attempted to throw onto the users
* stack, and saying that everything else is an ok user
* window. umask = ((~(%t_wim | %wim)) & valid_wim_bits)
*/
rd %wim, %twin_tmp
or %twin_tmp, %t_wim, %twin_tmp
not %twin_tmp
spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs
st %twin_tmp, [%curptr + TI_UWINMASK]
#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
sethi %hi(STACK_OFFSET), %sp
or %sp, %lo(STACK_OFFSET), %sp
add %curptr, %sp, %sp
/* Restore the saved globals and build a pt_regs frame. */
mov %saved_g5, %g5
mov %saved_g6, %g6
STORE_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
sethi %hi(STACK_OFFSET), %g6
or %g6, %lo(STACK_OFFSET), %g6
sub %sp, %g6, %g6 ! curptr
/* Turn on traps and call c-code to deal with it. */
wr %t_psr, PSR_ET, %psr
nop
call window_overflow_fault
nop
/* Return from trap if C-code actually fixes things, if it
* doesn't then we never get this far as the process will
* be given the look of death from Commander Peanut.
*/
b ret_trap_entry
clr %l6
spwin_bad_ustack_from_kernel:
/* LOCATION: Window to be saved */
/* The kernel provoked a spill window trap, but the window we
* need to save is a user one and the process has trashed its
* stack pointer. We need to be quick, so we throw it into
* a per-process window buffer until we can properly handle
* this later on.
*/
SAVE_BOLIXED_USER_STACK(curptr, glob_tmp)
restore %g0, %g0, %g0
/* LOCATION: Trap window */
/* Restore globals, condition codes in the %psr and
* return from trap. Note, restoring %g6 when returning
* to kernel mode is not necessarily these days. ;-)
*/
mov %saved_g5, %g5
mov %saved_g6, %g6
wr %t_psr, 0x0, %psr
WRITE_PAUSE
jmp %t_pc
rett %t_npc
/* Undefine the register macros which would only cause trouble
* if used below. This helps find 'stupid' coding errors that
* produce 'odd' behavior. The routines below are allowed to
* make usage of glob_tmp and t_psr so we leave them defined.
*/
#undef twin_tmp
#undef curptr
#undef t_pc
#undef t_npc
#undef t_wim
#undef saved_g5
#undef saved_g6
/* Now come the per-architecture window overflow stack checking routines.
* As noted above %curptr cannot be touched by this routine at all.
*/
/* This is a generic SRMMU routine. As far as I know this
* works for all current v8/srmmu implementations, we'll
* see...
*/
.globl spwin_srmmu_stackchk
spwin_srmmu_stackchk:
/* LOCATION: Window to be saved on the stack */
/* Because of SMP concerns and speed we play a trick.
* We disable fault traps in the MMU control register,
* Execute the stores, then check the fault registers
* to see what happens. I can hear Linus now
* "disgusting... broken hardware...".
*
* But first, check to see if the users stack has ended
* up in kernel vma, then we would succeed for the 'wrong'
* reason... ;( Note that the 'sethi' below assumes the
* kernel is page aligned, which should always be the case.
*/
/* Check results of callers andcc %sp, 0x7, %g0 */
bne spwin_user_stack_is_bolixed
sethi %hi(PAGE_OFFSET), %glob_tmp
cmp %glob_tmp, %sp
bleu spwin_user_stack_is_bolixed
mov AC_M_SFSR, %glob_tmp
/* Clear the fault status and turn on the no_fault bit. */
LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0) ! eat SFSR
SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0) ! eat SFSR
LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control
SUN_PI_(lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control
or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it
SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it
/* Dump the registers and cross fingers. */
STORE_WINDOW(sp)
/* Clear the no_fault bit and check the status. */
andn %glob_tmp, 0x2, %glob_tmp
LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)
SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)
mov AC_M_SFAR, %glob_tmp
LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0)
SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0)
mov AC_M_SFSR, %glob_tmp
LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)
SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp)
andcc %glob_tmp, 0x2, %g0 ! did we fault?
be,a spwin_finish_up + 0x4 ! cool beans, success
restore %g0, %g0, %g0
rd %psr, %glob_tmp
b spwin_user_stack_is_bolixed + 0x4 ! we faulted, ugh
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,122
|
arch/sparc/kernel/una_asm_64.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* una_asm.S: Kernel unaligned trap assembler helpers.
*
* Copyright (C) 1996,2005 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
.text
.globl __do_int_store
__do_int_store:
rd %asi, %o4
wr %o3, 0, %asi
mov %o2, %g3
cmp %o1, 2
be,pn %icc, 2f
cmp %o1, 4
be,pt %icc, 1f
srlx %g3, 24, %g2
srlx %g3, 56, %g1
srlx %g3, 48, %g7
4: stba %g1, [%o0] %asi
srlx %g3, 40, %g1
5: stba %g7, [%o0 + 1] %asi
srlx %g3, 32, %g7
6: stba %g1, [%o0 + 2] %asi
7: stba %g7, [%o0 + 3] %asi
srlx %g3, 16, %g1
8: stba %g2, [%o0 + 4] %asi
srlx %g3, 8, %g7
9: stba %g1, [%o0 + 5] %asi
10: stba %g7, [%o0 + 6] %asi
ba,pt %xcc, 0f
11: stba %g3, [%o0 + 7] %asi
1: srl %g3, 16, %g7
12: stba %g2, [%o0] %asi
srl %g3, 8, %g2
13: stba %g7, [%o0 + 1] %asi
14: stba %g2, [%o0 + 2] %asi
ba,pt %xcc, 0f
15: stba %g3, [%o0 + 3] %asi
2: srl %g3, 8, %g2
16: stba %g2, [%o0] %asi
17: stba %g3, [%o0 + 1] %asi
0:
wr %o4, 0x0, %asi
retl
mov 0, %o0
.size __do_int_store, .-__do_int_store
.section __ex_table,"a"
.word 4b, __retl_efault
.word 5b, __retl_efault
.word 6b, __retl_efault
.word 7b, __retl_efault
.word 8b, __retl_efault
.word 9b, __retl_efault
.word 10b, __retl_efault
.word 11b, __retl_efault
.word 12b, __retl_efault
.word 13b, __retl_efault
.word 14b, __retl_efault
.word 15b, __retl_efault
.word 16b, __retl_efault
.word 17b, __retl_efault
.previous
.globl do_int_load
do_int_load:
rd %asi, %o5
wr %o4, 0, %asi
cmp %o1, 8
bge,pn %icc, 9f
cmp %o1, 4
be,pt %icc, 6f
4: lduba [%o2] %asi, %g2
5: lduba [%o2 + 1] %asi, %g3
sll %g2, 8, %g2
brz,pt %o3, 3f
add %g2, %g3, %g2
sllx %g2, 48, %g2
srax %g2, 48, %g2
3: ba,pt %xcc, 0f
stx %g2, [%o0]
6: lduba [%o2 + 1] %asi, %g3
sll %g2, 24, %g2
7: lduba [%o2 + 2] %asi, %g7
sll %g3, 16, %g3
8: lduba [%o2 + 3] %asi, %g1
sll %g7, 8, %g7
or %g2, %g3, %g2
or %g7, %g1, %g7
or %g2, %g7, %g2
brnz,a,pt %o3, 3f
sra %g2, 0, %g2
3: ba,pt %xcc, 0f
stx %g2, [%o0]
9: lduba [%o2] %asi, %g2
10: lduba [%o2 + 1] %asi, %g3
sllx %g2, 56, %g2
11: lduba [%o2 + 2] %asi, %g7
sllx %g3, 48, %g3
12: lduba [%o2 + 3] %asi, %g1
sllx %g7, 40, %g7
sllx %g1, 32, %g1
or %g2, %g3, %g2
or %g7, %g1, %g7
13: lduba [%o2 + 4] %asi, %g3
or %g2, %g7, %g7
14: lduba [%o2 + 5] %asi, %g1
sllx %g3, 24, %g3
15: lduba [%o2 + 6] %asi, %g2
sllx %g1, 16, %g1
or %g7, %g3, %g7
16: lduba [%o2 + 7] %asi, %g3
sllx %g2, 8, %g2
or %g7, %g1, %g7
or %g2, %g3, %g2
or %g7, %g2, %g7
cmp %o1, 8
be,a,pt %icc, 0f
stx %g7, [%o0]
srlx %g7, 32, %g2
sra %g7, 0, %g7
stx %g2, [%o0]
stx %g7, [%o0 + 8]
0:
wr %o5, 0x0, %asi
retl
mov 0, %o0
.size do_int_load, .-do_int_load
.section __ex_table,"a"
.word 4b, __retl_efault
.word 5b, __retl_efault
.word 6b, __retl_efault
.word 7b, __retl_efault
.word 8b, __retl_efault
.word 9b, __retl_efault
.word 10b, __retl_efault
.word 11b, __retl_efault
.word 12b, __retl_efault
.word 13b, __retl_efault
.word 14b, __retl_efault
.word 15b, __retl_efault
.word 16b, __retl_efault
.previous
|
AirFortressIlikara/LS2K0300-linux-4.19
| 16,338
|
arch/sparc/kernel/hvcalls.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* %o0: devhandle
* %o1: devino
*
* returns %o0: sysino
*/
ENTRY(sun4v_devino_to_sysino)
mov HV_FAST_INTR_DEVINO2SYSINO, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
ENDPROC(sun4v_devino_to_sysino)
/* %o0: sysino
*
* returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED})
*/
ENTRY(sun4v_intr_getenabled)
mov HV_FAST_INTR_GETENABLED, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
ENDPROC(sun4v_intr_getenabled)
/* %o0: sysino
* %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
*/
ENTRY(sun4v_intr_setenabled)
mov HV_FAST_INTR_SETENABLED, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_intr_setenabled)
/* %o0: sysino
*
* returns %o0: intr_state (HV_INTR_STATE_*)
*/
ENTRY(sun4v_intr_getstate)
mov HV_FAST_INTR_GETSTATE, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
ENDPROC(sun4v_intr_getstate)
/* %o0: sysino
* %o1: intr_state (HV_INTR_STATE_*)
*/
ENTRY(sun4v_intr_setstate)
mov HV_FAST_INTR_SETSTATE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_intr_setstate)
/* %o0: sysino
*
* returns %o0: cpuid
*/
ENTRY(sun4v_intr_gettarget)
mov HV_FAST_INTR_GETTARGET, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
ENDPROC(sun4v_intr_gettarget)
/* %o0: sysino
* %o1: cpuid
*/
ENTRY(sun4v_intr_settarget)
mov HV_FAST_INTR_SETTARGET, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_intr_settarget)
/* %o0: cpuid
* %o1: pc
* %o2: rtba
* %o3: arg0
*
* returns %o0: status
*/
ENTRY(sun4v_cpu_start)
mov HV_FAST_CPU_START, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_start)
/* %o0: cpuid
*
* returns %o0: status
*/
ENTRY(sun4v_cpu_stop)
mov HV_FAST_CPU_STOP, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_stop)
/* returns %o0: status */
ENTRY(sun4v_cpu_yield)
mov HV_FAST_CPU_YIELD, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_yield)
/* %o0: cpuid
*
* returns %o0: status
*/
ENTRY(sun4v_cpu_poke)
mov HV_FAST_CPU_POKE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_poke)
/* %o0: type
* %o1: queue paddr
* %o2: num queue entries
*
* returns %o0: status
*/
ENTRY(sun4v_cpu_qconf)
mov HV_FAST_CPU_QCONF, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_qconf)
/* %o0: num cpus in cpu list
* %o1: cpu list paddr
* %o2: mondo block paddr
*
* returns %o0: status
*/
ENTRY(sun4v_cpu_mondo_send)
mov HV_FAST_CPU_MONDO_SEND, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_mondo_send)
/* %o0: CPU ID
*
* returns %o0: -status if status non-zero, else
* %o0: cpu state as HV_CPU_STATE_*
*/
ENTRY(sun4v_cpu_state)
mov HV_FAST_CPU_STATE, %o5
ta HV_FAST_TRAP
brnz,pn %o0, 1f
sub %g0, %o0, %o0
mov %o1, %o0
1: retl
nop
ENDPROC(sun4v_cpu_state)
/* %o0: virtual address
* %o1: must be zero
* %o2: TTE
* %o3: HV_MMU_* flags
*
* returns %o0: status
*/
ENTRY(sun4v_mmu_map_perm_addr)
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_mmu_map_perm_addr)
/* %o0: number of TSB descriptions
* %o1: TSB descriptions real address
*
* returns %o0: status
*/
ENTRY(sun4v_mmu_tsb_ctx0)
mov HV_FAST_MMU_TSB_CTX0, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_mmu_tsb_ctx0)
/* %o0: API group number
* %o1: pointer to unsigned long major number storage
* %o2: pointer to unsigned long minor number storage
*
* returns %o0: status
*/
ENTRY(sun4v_get_version)
mov HV_CORE_GET_VER, %o5
mov %o1, %o3
mov %o2, %o4
ta HV_CORE_TRAP
stx %o1, [%o3]
retl
stx %o2, [%o4]
ENDPROC(sun4v_get_version)
/* %o0: API group number
* %o1: desired major number
* %o2: desired minor number
* %o3: pointer to unsigned long actual minor number storage
*
* returns %o0: status
*/
ENTRY(sun4v_set_version)
mov HV_CORE_SET_VER, %o5
mov %o3, %o4
ta HV_CORE_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_set_version)
/* %o0: pointer to unsigned long time
*
* returns %o0: status
*/
ENTRY(sun4v_tod_get)
mov %o0, %o4
mov HV_FAST_TOD_GET, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_tod_get)
/* %o0: time
*
* returns %o0: status
*/
ENTRY(sun4v_tod_set)
mov HV_FAST_TOD_SET, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_tod_set)
/* %o0: pointer to unsigned long status
*
* returns %o0: signed character
*/
ENTRY(sun4v_con_getchar)
mov %o0, %o4
mov HV_FAST_CONS_GETCHAR, %o5
clr %o0
clr %o1
ta HV_FAST_TRAP
stx %o0, [%o4]
retl
sra %o1, 0, %o0
ENDPROC(sun4v_con_getchar)
/* %o0: signed long character
*
* returns %o0: status
*/
ENTRY(sun4v_con_putchar)
mov HV_FAST_CONS_PUTCHAR, %o5
ta HV_FAST_TRAP
retl
sra %o0, 0, %o0
ENDPROC(sun4v_con_putchar)
/* %o0: buffer real address
* %o1: buffer size
* %o2: pointer to unsigned long bytes_read
*
* returns %o0: status
*/
ENTRY(sun4v_con_read)
mov %o2, %o4
mov HV_FAST_CONS_READ, %o5
ta HV_FAST_TRAP
brnz %o0, 1f
cmp %o1, -1 /* break */
be,a,pn %icc, 1f
mov %o1, %o0
cmp %o1, -2 /* hup */
be,a,pn %icc, 1f
mov %o1, %o0
stx %o1, [%o4]
1: retl
nop
ENDPROC(sun4v_con_read)
/* %o0: buffer real address
* %o1: buffer size
* %o2: pointer to unsigned long bytes_written
*
* returns %o0: status
*/
ENTRY(sun4v_con_write)
mov %o2, %o4
mov HV_FAST_CONS_WRITE, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_con_write)
/* %o0: soft state
* %o1: address of description string
*
* returns %o0: status
*/
ENTRY(sun4v_mach_set_soft_state)
mov HV_FAST_MACH_SET_SOFT_STATE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_mach_set_soft_state)
/* %o0: exit code
*
* Does not return.
*/
ENTRY(sun4v_mach_exit)
mov HV_FAST_MACH_EXIT, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_mach_exit)
/* %o0: buffer real address
* %o1: buffer length
* %o2: pointer to unsigned long real_buf_len
*
* returns %o0: status
*/
ENTRY(sun4v_mach_desc)
mov %o2, %o4
mov HV_FAST_MACH_DESC, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_mach_desc)
/* %o0: new timeout in milliseconds
* %o1: pointer to unsigned long orig_timeout
*
* returns %o0: status
*/
ENTRY(sun4v_mach_set_watchdog)
mov %o1, %o4
mov HV_FAST_MACH_SET_WATCHDOG, %o5
ta HV_FAST_TRAP
brnz,a,pn %o4, 0f
stx %o1, [%o4]
0: retl
nop
ENDPROC(sun4v_mach_set_watchdog)
EXPORT_SYMBOL(sun4v_mach_set_watchdog)
/* No inputs and does not return. */
ENTRY(sun4v_mach_sir)
mov %o1, %o4
mov HV_FAST_MACH_SIR, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_mach_sir)
/* %o0: channel
* %o1: ra
* %o2: num_entries
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_tx_qconf)
mov HV_FAST_LDC_TX_QCONF, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_tx_qconf)
/* %o0: channel
* %o1: pointer to unsigned long ra
* %o2: pointer to unsigned long num_entries
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_tx_qinfo)
mov %o1, %g1
mov %o2, %g2
mov HV_FAST_LDC_TX_QINFO, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
retl
nop
ENDPROC(sun4v_ldc_tx_qinfo)
/* %o0: channel
* %o1: pointer to unsigned long head_off
* %o2: pointer to unsigned long tail_off
* %o2: pointer to unsigned long chan_state
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_tx_get_state)
mov %o1, %g1
mov %o2, %g2
mov %o3, %g3
mov HV_FAST_LDC_TX_GET_STATE, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
stx %o3, [%g3]
retl
nop
ENDPROC(sun4v_ldc_tx_get_state)
/* %o0: channel
* %o1: tail_off
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_tx_set_qtail)
mov HV_FAST_LDC_TX_SET_QTAIL, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_tx_set_qtail)
/* %o0: channel
* %o1: ra
* %o2: num_entries
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_rx_qconf)
mov HV_FAST_LDC_RX_QCONF, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_rx_qconf)
/* %o0: channel
* %o1: pointer to unsigned long ra
* %o2: pointer to unsigned long num_entries
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_rx_qinfo)
mov %o1, %g1
mov %o2, %g2
mov HV_FAST_LDC_RX_QINFO, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
retl
nop
ENDPROC(sun4v_ldc_rx_qinfo)
/* %o0: channel
* %o1: pointer to unsigned long head_off
* %o2: pointer to unsigned long tail_off
* %o2: pointer to unsigned long chan_state
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_rx_get_state)
mov %o1, %g1
mov %o2, %g2
mov %o3, %g3
mov HV_FAST_LDC_RX_GET_STATE, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
stx %o3, [%g3]
retl
nop
ENDPROC(sun4v_ldc_rx_get_state)
/* %o0: channel
* %o1: head_off
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_rx_set_qhead)
mov HV_FAST_LDC_RX_SET_QHEAD, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_rx_set_qhead)
/* %o0: channel
* %o1: ra
* %o2: num_entries
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_set_map_table)
mov HV_FAST_LDC_SET_MAP_TABLE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_set_map_table)
/* %o0: channel
* %o1: pointer to unsigned long ra
* %o2: pointer to unsigned long num_entries
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_get_map_table)
mov %o1, %g1
mov %o2, %g2
mov HV_FAST_LDC_GET_MAP_TABLE, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
retl
nop
ENDPROC(sun4v_ldc_get_map_table)
/* %o0: channel
* %o1: dir_code
* %o2: tgt_raddr
* %o3: lcl_raddr
* %o4: len
* %o5: pointer to unsigned long actual_len
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_copy)
mov %o5, %g1
mov HV_FAST_LDC_COPY, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
retl
nop
ENDPROC(sun4v_ldc_copy)
/* %o0: channel
* %o1: cookie
* %o2: pointer to unsigned long ra
* %o3: pointer to unsigned long perm
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_mapin)
mov %o2, %g1
mov %o3, %g2
mov HV_FAST_LDC_MAPIN, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
retl
nop
ENDPROC(sun4v_ldc_mapin)
/* %o0: ra
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_unmap)
mov HV_FAST_LDC_UNMAP, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_unmap)
/* %o0: channel
* %o1: cookie
* %o2: mte_cookie
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_revoke)
mov HV_FAST_LDC_REVOKE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_revoke)
/* %o0: device handle
* %o1: device INO
* %o2: pointer to unsigned long cookie
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_get_cookie)
mov %o2, %g1
mov HV_FAST_VINTR_GET_COOKIE, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
retl
nop
ENDPROC(sun4v_vintr_get_cookie)
/* %o0: device handle
* %o1: device INO
* %o2: cookie
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_set_cookie)
mov HV_FAST_VINTR_SET_COOKIE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_vintr_set_cookie)
/* %o0: device handle
* %o1: device INO
* %o2: pointer to unsigned long valid_state
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_get_valid)
mov %o2, %g1
mov HV_FAST_VINTR_GET_VALID, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
retl
nop
ENDPROC(sun4v_vintr_get_valid)
/* %o0: device handle
* %o1: device INO
* %o2: valid_state
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_set_valid)
mov HV_FAST_VINTR_SET_VALID, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_vintr_set_valid)
/* %o0: device handle
* %o1: device INO
* %o2: pointer to unsigned long state
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_get_state)
mov %o2, %g1
mov HV_FAST_VINTR_GET_STATE, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
retl
nop
ENDPROC(sun4v_vintr_get_state)
/* %o0: device handle
* %o1: device INO
* %o2: state
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_set_state)
mov HV_FAST_VINTR_SET_STATE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_vintr_set_state)
/* %o0: device handle
* %o1: device INO
* %o2: pointer to unsigned long cpuid
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_get_target)
mov %o2, %g1
mov HV_FAST_VINTR_GET_TARGET, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
retl
nop
ENDPROC(sun4v_vintr_get_target)
/* %o0: device handle
* %o1: device INO
* %o2: cpuid
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_set_target)
mov HV_FAST_VINTR_SET_TARGET, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_vintr_set_target)
/* %o0: NCS sub-function
* %o1: sub-function arg real-address
* %o2: sub-function arg size
*
* returns %o0: status
*/
ENTRY(sun4v_ncs_request)
mov HV_FAST_NCS_REQUEST, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ncs_request)
ENTRY(sun4v_svc_send)
save %sp, -192, %sp
mov %i0, %o0
mov %i1, %o1
mov %i2, %o2
mov HV_FAST_SVC_SEND, %o5
ta HV_FAST_TRAP
stx %o1, [%i3]
ret
restore
ENDPROC(sun4v_svc_send)
ENTRY(sun4v_svc_recv)
save %sp, -192, %sp
mov %i0, %o0
mov %i1, %o1
mov %i2, %o2
mov HV_FAST_SVC_RECV, %o5
ta HV_FAST_TRAP
stx %o1, [%i3]
ret
restore
ENDPROC(sun4v_svc_recv)
ENTRY(sun4v_svc_getstatus)
mov HV_FAST_SVC_GETSTATUS, %o5
mov %o1, %o4
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_svc_getstatus)
ENTRY(sun4v_svc_setstatus)
mov HV_FAST_SVC_SETSTATUS, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_svc_setstatus)
ENTRY(sun4v_svc_clrstatus)
mov HV_FAST_SVC_CLRSTATUS, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_svc_clrstatus)
ENTRY(sun4v_mmustat_conf)
mov %o1, %o4
mov HV_FAST_MMUSTAT_CONF, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_mmustat_conf)
ENTRY(sun4v_mmustat_info)
mov %o0, %o4
mov HV_FAST_MMUSTAT_INFO, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_mmustat_info)
ENTRY(sun4v_mmu_demap_all)
clr %o0
clr %o1
mov HV_MMU_ALL, %o2
mov HV_FAST_MMU_DEMAP_ALL, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_mmu_demap_all)
ENTRY(sun4v_niagara_getperf)
mov %o0, %o4
mov HV_FAST_GET_PERFREG, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_niagara_getperf)
EXPORT_SYMBOL(sun4v_niagara_getperf)
ENTRY(sun4v_niagara_setperf)
mov HV_FAST_SET_PERFREG, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_niagara_setperf)
EXPORT_SYMBOL(sun4v_niagara_setperf)
ENTRY(sun4v_niagara2_getperf)
mov %o0, %o4
mov HV_FAST_N2_GET_PERFREG, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_niagara2_getperf)
EXPORT_SYMBOL(sun4v_niagara2_getperf)
ENTRY(sun4v_niagara2_setperf)
mov HV_FAST_N2_SET_PERFREG, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_niagara2_setperf)
EXPORT_SYMBOL(sun4v_niagara2_setperf)
ENTRY(sun4v_reboot_data_set)
mov HV_FAST_REBOOT_DATA_SET, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_reboot_data_set)
ENTRY(sun4v_vt_get_perfreg)
mov %o1, %o4
mov HV_FAST_VT_GET_PERFREG, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_vt_get_perfreg)
ENTRY(sun4v_vt_set_perfreg)
mov HV_FAST_VT_SET_PERFREG, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_vt_set_perfreg)
ENTRY(sun4v_t5_get_perfreg)
mov %o1, %o4
mov HV_FAST_T5_GET_PERFREG, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_t5_get_perfreg)
ENTRY(sun4v_t5_set_perfreg)
mov HV_FAST_T5_SET_PERFREG, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_t5_set_perfreg)
ENTRY(sun4v_m7_get_perfreg)
mov %o1, %o4
mov HV_FAST_M7_GET_PERFREG, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_m7_get_perfreg)
ENTRY(sun4v_m7_set_perfreg)
mov HV_FAST_M7_SET_PERFREG, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_m7_set_perfreg)
/* %o0: address of CCB array
* %o1: size (in bytes) of CCB array
* %o2: flags
* %o3: reserved
*
* returns:
* %o0: status
* %o1: size (in bytes) of the CCB array that was accepted
* %o2: status data
* %o3: reserved
*/
ENTRY(sun4v_ccb_submit)
mov %o5, %g1
mov HV_CCB_SUBMIT, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
stx %o2, [%g1]
ENDPROC(sun4v_ccb_submit)
EXPORT_SYMBOL(sun4v_ccb_submit)
/* %o0: completion area ra for the ccb to get info
*
* returns:
* %o0: status
* %o1: CCB state
* %o2: position
* %o3: dax unit
* %o4: queue
*/
ENTRY(sun4v_ccb_info)
mov %o1, %g1
mov HV_CCB_INFO, %o5
ta HV_FAST_TRAP
sth %o1, [%g1 + CCB_INFO_OFFSET_CCB_STATE]
sth %o2, [%g1 + CCB_INFO_OFFSET_QUEUE_POS]
sth %o3, [%g1 + CCB_INFO_OFFSET_DAX_UNIT]
retl
sth %o4, [%g1 + CCB_INFO_OFFSET_QUEUE_NUM]
ENDPROC(sun4v_ccb_info)
EXPORT_SYMBOL(sun4v_ccb_info)
/* %o0: completion area ra for the ccb to kill
*
* returns:
* %o0: status
* %o1: result of the kill
*/
ENTRY(sun4v_ccb_kill)
mov %o1, %g1
mov HV_CCB_KILL, %o5
ta HV_FAST_TRAP
retl
sth %o1, [%g1]
ENDPROC(sun4v_ccb_kill)
EXPORT_SYMBOL(sun4v_ccb_kill)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 21,032
|
arch/sparc/kernel/ttable_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* The Sparc trap table, bootloader gives us control at _start. */
__HEAD
.globl _start
_start:
.globl _stext
_stext:
.globl trapbase
trapbase:
#ifdef CONFIG_SMP
trapbase_cpu0:
#endif
/* We get control passed to us here at t_zero. */
t_zero: b gokernel; nop; nop; nop;
t_tflt: SRMMU_TFAULT /* Inst. Access Exception */
t_bins: TRAP_ENTRY(0x2, bad_instruction) /* Illegal Instruction */
t_pins: TRAP_ENTRY(0x3, priv_instruction) /* Privileged Instruction */
t_fpd: TRAP_ENTRY(0x4, fpd_trap_handler) /* Floating Point Disabled */
t_wovf: WINDOW_SPILL /* Window Overflow */
t_wunf: WINDOW_FILL /* Window Underflow */
t_mna: TRAP_ENTRY(0x7, mna_handler) /* Memory Address Not Aligned */
t_fpe: TRAP_ENTRY(0x8, fpe_trap_handler) /* Floating Point Exception */
t_dflt: SRMMU_DFAULT /* Data Miss Exception */
t_tio: TRAP_ENTRY(0xa, do_tag_overflow) /* Tagged Instruction Ovrflw */
t_wpt: TRAP_ENTRY(0xb, do_watchpoint) /* Watchpoint Detected */
t_badc: BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
t_irq1: TRAP_ENTRY_INTERRUPT(1) /* IRQ Software/SBUS Level 1 */
t_irq2: TRAP_ENTRY_INTERRUPT(2) /* IRQ SBUS Level 2 */
t_irq3: TRAP_ENTRY_INTERRUPT(3) /* IRQ SCSI/DMA/SBUS Level 3 */
t_irq4: TRAP_ENTRY_INTERRUPT(4) /* IRQ Software Level 4 */
t_irq5: TRAP_ENTRY_INTERRUPT(5) /* IRQ SBUS/Ethernet Level 5 */
t_irq6: TRAP_ENTRY_INTERRUPT(6) /* IRQ Software Level 6 */
t_irq7: TRAP_ENTRY_INTERRUPT(7) /* IRQ Video/SBUS Level 5 */
t_irq8: TRAP_ENTRY_INTERRUPT(8) /* IRQ SBUS Level 6 */
t_irq9: TRAP_ENTRY_INTERRUPT(9) /* IRQ SBUS Level 7 */
t_irq10:TRAP_ENTRY_INTERRUPT(10) /* IRQ Timer #1 (one we use) */
t_irq11:TRAP_ENTRY_INTERRUPT(11) /* IRQ Floppy Intr. */
t_irq12:TRAP_ENTRY_INTERRUPT(12) /* IRQ Zilog serial chip */
t_irq13:TRAP_ENTRY_INTERRUPT(13) /* IRQ Audio Intr. */
t_irq14:TRAP_ENTRY_INTERRUPT(14) /* IRQ Timer #2 */
.globl t_nmi
t_nmi: TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
t_racc: TRAP_ENTRY(0x20, do_reg_access) /* General Register Access Error */
t_iacce:BAD_TRAP(0x21) /* Instr Access Error */
t_bad22:BAD_TRAP(0x22)
BAD_TRAP(0x23)
t_cpdis:TRAP_ENTRY(0x24, do_cp_disabled) /* Co-Processor Disabled */
t_uflsh:SKIP_TRAP(0x25, unimp_flush) /* Unimplemented FLUSH inst. */
t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27)
t_cpexc:TRAP_ENTRY(0x28, do_cp_exception) /* Co-Processor Exception */
t_dacce:SRMMU_DFAULT /* Data Access Error */
t_hwdz: TRAP_ENTRY(0x2a, do_hw_divzero) /* Division by zero, you lose... */
t_dserr:BAD_TRAP(0x2b) /* Data Store Error */
t_daccm:BAD_TRAP(0x2c) /* Data Access MMU-Miss */
t_bad2d:BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
t_bad32:BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
t_bad37:BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
t_iaccm:BAD_TRAP(0x3c) /* Instr Access MMU-Miss */
t_bad3d:BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) BAD_TRAP(0x41)
t_bad42:BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) BAD_TRAP(0x46)
t_bad47:BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) BAD_TRAP(0x4b)
t_bad4c:BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) BAD_TRAP(0x50)
t_bad51:BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
t_bad56:BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
t_bad5b:BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
t_bad60:BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
t_bad65:BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
t_bad6a:BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
t_bad6f:BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
t_bad74:BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
t_bad79:BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
t_bad7e:BAD_TRAP(0x7e) BAD_TRAP(0x7f)
t_bad80:BAD_TRAP(0x80) /* SunOS System Call */
t_sbkpt:BREAKPOINT_TRAP /* Software Breakpoint/KGDB */
t_divz: TRAP_ENTRY(0x82, do_hw_divzero) /* Divide by zero trap */
t_flwin:TRAP_ENTRY(0x83, do_flush_windows) /* Flush Windows Trap */
t_clwin:BAD_TRAP(0x84) /* Clean Windows Trap */
t_rchk: BAD_TRAP(0x85) /* Range Check */
t_funal:BAD_TRAP(0x86) /* Fix Unaligned Access Trap */
t_iovf: BAD_TRAP(0x87) /* Integer Overflow Trap */
t_bad88:BAD_TRAP(0x88) /* Slowaris System Call */
t_bad89:BAD_TRAP(0x89) /* Net-B.S. System Call */
t_bad8a:BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) BAD_TRAP(0x8d) BAD_TRAP(0x8e)
t_bad8f:BAD_TRAP(0x8f)
t_linux:LINUX_SYSCALL_TRAP /* Linux System Call */
t_bad91:BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95)
t_bad96:BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a)
t_bad9b:BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) BAD_TRAP(0x9f)
t_getcc:GETCC_TRAP /* Get Condition Codes */
t_setcc:SETCC_TRAP /* Set Condition Codes */
t_getpsr:GETPSR_TRAP /* Get PSR Register */
t_bada3:BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
t_bada7:BAD_TRAP(0xa7)
t_bada8:BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
t_badac:BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
t_badb1:BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
t_badb6:BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
t_badbb:BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
t_badc0:BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
t_badc5:BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
t_badca:BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
t_badcf:BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
t_badd4:BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
t_badd9:BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
t_badde:BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
t_bade3:BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
t_bade8:BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
t_baded:BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
t_badf2:BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
t_badf7:BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
t_badfc:BAD_TRAP(0xfc)
t_kgdb: KGDB_TRAP(0xfd)
dbtrap: BAD_TRAP(0xfe) /* Debugger/PROM breakpoint #1 */
dbtrap2:BAD_TRAP(0xff) /* Debugger/PROM breakpoint #2 */
.globl end_traptable
end_traptable:
#ifdef CONFIG_SMP
/* Trap tables for the other cpus. */
.globl trapbase_cpu1, trapbase_cpu2, trapbase_cpu3
trapbase_cpu1:
BAD_TRAP(0x0)
SRMMU_TFAULT
TRAP_ENTRY(0x2, bad_instruction)
TRAP_ENTRY(0x3, priv_instruction)
TRAP_ENTRY(0x4, fpd_trap_handler)
WINDOW_SPILL
WINDOW_FILL
TRAP_ENTRY(0x7, mna_handler)
TRAP_ENTRY(0x8, fpe_trap_handler)
SRMMU_DFAULT
TRAP_ENTRY(0xa, do_tag_overflow)
TRAP_ENTRY(0xb, do_watchpoint)
BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
TRAP_ENTRY(0x20, do_reg_access)
BAD_TRAP(0x21)
BAD_TRAP(0x22)
BAD_TRAP(0x23)
TRAP_ENTRY(0x24, do_cp_disabled)
SKIP_TRAP(0x25, unimp_flush)
BAD_TRAP(0x26)
BAD_TRAP(0x27)
TRAP_ENTRY(0x28, do_cp_exception)
SRMMU_DFAULT
TRAP_ENTRY(0x2a, do_hw_divzero)
BAD_TRAP(0x2b)
BAD_TRAP(0x2c)
BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
BAD_TRAP(0x50)
BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
BAD_TRAP(0x7e) BAD_TRAP(0x7f)
BAD_TRAP(0x80)
BREAKPOINT_TRAP
TRAP_ENTRY(0x82, do_hw_divzero)
TRAP_ENTRY(0x83, do_flush_windows)
BAD_TRAP(0x84) BAD_TRAP(0x85) BAD_TRAP(0x86)
BAD_TRAP(0x87) BAD_TRAP(0x88) BAD_TRAP(0x89)
BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
LINUX_SYSCALL_TRAP BAD_TRAP(0x91)
BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
BAD_TRAP(0x9f)
GETCC_TRAP
SETCC_TRAP
GETPSR_TRAP
BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
BAD_TRAP(0xfc)
KGDB_TRAP(0xfd)
BAD_TRAP(0xfe)
BAD_TRAP(0xff)
trapbase_cpu2:
BAD_TRAP(0x0)
SRMMU_TFAULT
TRAP_ENTRY(0x2, bad_instruction)
TRAP_ENTRY(0x3, priv_instruction)
TRAP_ENTRY(0x4, fpd_trap_handler)
WINDOW_SPILL
WINDOW_FILL
TRAP_ENTRY(0x7, mna_handler)
TRAP_ENTRY(0x8, fpe_trap_handler)
SRMMU_DFAULT
TRAP_ENTRY(0xa, do_tag_overflow)
TRAP_ENTRY(0xb, do_watchpoint)
BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
TRAP_ENTRY_INTERRUPT(1)
TRAP_ENTRY_INTERRUPT(2)
TRAP_ENTRY_INTERRUPT(3)
TRAP_ENTRY_INTERRUPT(4)
TRAP_ENTRY_INTERRUPT(5)
TRAP_ENTRY_INTERRUPT(6)
TRAP_ENTRY_INTERRUPT(7)
TRAP_ENTRY_INTERRUPT(8)
TRAP_ENTRY_INTERRUPT(9)
TRAP_ENTRY_INTERRUPT(10)
TRAP_ENTRY_INTERRUPT(11)
TRAP_ENTRY_INTERRUPT(12)
TRAP_ENTRY_INTERRUPT(13)
TRAP_ENTRY_INTERRUPT(14)
TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
TRAP_ENTRY(0x20, do_reg_access)
BAD_TRAP(0x21)
BAD_TRAP(0x22)
BAD_TRAP(0x23)
TRAP_ENTRY(0x24, do_cp_disabled)
SKIP_TRAP(0x25, unimp_flush)
BAD_TRAP(0x26)
BAD_TRAP(0x27)
TRAP_ENTRY(0x28, do_cp_exception)
SRMMU_DFAULT
TRAP_ENTRY(0x2a, do_hw_divzero)
BAD_TRAP(0x2b)
BAD_TRAP(0x2c)
BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
BAD_TRAP(0x50)
BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
BAD_TRAP(0x7e) BAD_TRAP(0x7f)
BAD_TRAP(0x80)
BREAKPOINT_TRAP
TRAP_ENTRY(0x82, do_hw_divzero)
TRAP_ENTRY(0x83, do_flush_windows)
BAD_TRAP(0x84)
BAD_TRAP(0x85)
BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88)
BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
LINUX_SYSCALL_TRAP BAD_TRAP(0x91)
BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
BAD_TRAP(0x9f)
GETCC_TRAP
SETCC_TRAP
GETPSR_TRAP
BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
BAD_TRAP(0xfc)
KGDB_TRAP(0xfd)
BAD_TRAP(0xfe)
BAD_TRAP(0xff)
trapbase_cpu3:
BAD_TRAP(0x0)
SRMMU_TFAULT
TRAP_ENTRY(0x2, bad_instruction)
TRAP_ENTRY(0x3, priv_instruction)
TRAP_ENTRY(0x4, fpd_trap_handler)
WINDOW_SPILL
WINDOW_FILL
TRAP_ENTRY(0x7, mna_handler)
TRAP_ENTRY(0x8, fpe_trap_handler)
SRMMU_DFAULT
TRAP_ENTRY(0xa, do_tag_overflow)
TRAP_ENTRY(0xb, do_watchpoint)
BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
TRAP_ENTRY_INTERRUPT(1)
TRAP_ENTRY_INTERRUPT(2)
TRAP_ENTRY_INTERRUPT(3)
TRAP_ENTRY_INTERRUPT(4)
TRAP_ENTRY_INTERRUPT(5)
TRAP_ENTRY_INTERRUPT(6)
TRAP_ENTRY_INTERRUPT(7)
TRAP_ENTRY_INTERRUPT(8)
TRAP_ENTRY_INTERRUPT(9)
TRAP_ENTRY_INTERRUPT(10)
TRAP_ENTRY_INTERRUPT(11)
TRAP_ENTRY_INTERRUPT(12)
TRAP_ENTRY_INTERRUPT(13)
TRAP_ENTRY_INTERRUPT(14)
TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
TRAP_ENTRY(0x20, do_reg_access)
BAD_TRAP(0x21)
BAD_TRAP(0x22)
BAD_TRAP(0x23)
TRAP_ENTRY(0x24, do_cp_disabled)
SKIP_TRAP(0x25, unimp_flush)
BAD_TRAP(0x26)
BAD_TRAP(0x27)
TRAP_ENTRY(0x28, do_cp_exception)
SRMMU_DFAULT
TRAP_ENTRY(0x2a, do_hw_divzero)
BAD_TRAP(0x2b) BAD_TRAP(0x2c)
BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
BAD_TRAP(0x50)
BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
BAD_TRAP(0x7e) BAD_TRAP(0x7f)
BAD_TRAP(0x80)
BREAKPOINT_TRAP
TRAP_ENTRY(0x82, do_hw_divzero)
TRAP_ENTRY(0x83, do_flush_windows)
BAD_TRAP(0x84) BAD_TRAP(0x85)
BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88)
BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
LINUX_SYSCALL_TRAP
BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
BAD_TRAP(0x9f)
GETCC_TRAP
SETCC_TRAP
GETPSR_TRAP
BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
BAD_TRAP(0xfc)
KGDB_TRAP(0xfd)
BAD_TRAP(0xfe)
BAD_TRAP(0xff)
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,360
|
arch/sparc/kernel/trampoline_64.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* trampoline.S: Jump start slave processors on sparc64.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/lsu.h>
#include <asm/dcr.h>
#include <asm/dcu.h>
#include <asm/pstate.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
#include <asm/mmu.h>
#include <asm/hypervisor.h>
#include <asm/cpudata.h>
.data
.align 8
call_method:
.asciz "call-method"
.align 8
itlb_load:
.asciz "SUNW,itlb-load"
.align 8
dtlb_load:
.asciz "SUNW,dtlb-load"
#define TRAMP_STACK_SIZE 1024
.align 16
tramp_stack:
.skip TRAMP_STACK_SIZE
.align 8
.globl sparc64_cpu_startup, sparc64_cpu_startup_end
sparc64_cpu_startup:
BRANCH_IF_SUN4V(g1, niagara_startup)
BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup)
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup)
ba,pt %xcc, spitfire_startup
nop
cheetah_plus_startup:
/* Preserve OBP chosen DCU and DCR register settings. */
ba,pt %xcc, cheetah_generic_startup
nop
cheetah_startup:
mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
wr %g1, %asr18
sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
sllx %g5, 32, %g5
or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
stxa %g5, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
/* fallthru */
cheetah_generic_startup:
mov TSB_EXTENSION_P, %g3
stxa %g0, [%g3] ASI_DMMU
stxa %g0, [%g3] ASI_IMMU
membar #Sync
mov TSB_EXTENSION_S, %g3
stxa %g0, [%g3] ASI_DMMU
membar #Sync
mov TSB_EXTENSION_N, %g3
stxa %g0, [%g3] ASI_DMMU
stxa %g0, [%g3] ASI_IMMU
membar #Sync
/* fallthru */
niagara_startup:
/* Disable STICK_INT interrupts. */
sethi %hi(0x80000000), %g5
sllx %g5, 32, %g5
wr %g5, %asr25
ba,pt %xcc, startup_continue
nop
spitfire_startup:
mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
stxa %g1, [%g0] ASI_LSU_CONTROL
membar #Sync
startup_continue:
mov %o0, %l0
BRANCH_IF_SUN4V(g1, niagara_lock_tlb)
sethi %hi(0x80000000), %g2
sllx %g2, 32, %g2
wr %g2, 0, %tick_cmpr
/* Call OBP by hand to lock KERNBASE into i/d tlbs.
* We lock 'num_kernel_image_mappings' consequetive entries.
*/
sethi %hi(prom_entry_lock), %g2
1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
brnz,pn %g1, 1b
nop
/* Get onto temporary stack which will be in the locked
* kernel image.
*/
sethi %hi(tramp_stack), %g1
or %g1, %lo(tramp_stack), %g1
add %g1, TRAMP_STACK_SIZE, %g1
sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
flushw
/* Setup the loop variables:
* %l3: VADDR base
* %l4: TTE base
* %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings'
* %l6: Number of TTE entries to map
* %l7: Highest TTE entry number, we count down
*/
sethi %hi(KERNBASE), %l3
sethi %hi(kern_locked_tte_data), %l4
ldx [%l4 + %lo(kern_locked_tte_data)], %l4
clr %l5
sethi %hi(num_kernel_image_mappings), %l6
lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
mov 15, %l7
BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
mov 63, %l7
2:
3:
/* Lock into I-MMU */
sethi %hi(call_method), %g2
or %g2, %lo(call_method), %g2
stx %g2, [%sp + 2047 + 128 + 0x00]
mov 5, %g2
stx %g2, [%sp + 2047 + 128 + 0x08]
mov 1, %g2
stx %g2, [%sp + 2047 + 128 + 0x10]
sethi %hi(itlb_load), %g2
or %g2, %lo(itlb_load), %g2
stx %g2, [%sp + 2047 + 128 + 0x18]
sethi %hi(prom_mmu_ihandle_cache), %g2
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
/* Each TTE maps 4MB, convert index to offset. */
sllx %l5, 22, %g1
add %l3, %g1, %g2
stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
add %l4, %g1, %g2
stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
/* TTE index is highest minus loop index. */
sub %l7, %l5, %g2
stx %g2, [%sp + 2047 + 128 + 0x38]
sethi %hi(p1275buf), %g2
or %g2, %lo(p1275buf), %g2
ldx [%g2 + 0x08], %o1
call %o1
add %sp, (2047 + 128), %o0
/* Lock into D-MMU */
sethi %hi(call_method), %g2
or %g2, %lo(call_method), %g2
stx %g2, [%sp + 2047 + 128 + 0x00]
mov 5, %g2
stx %g2, [%sp + 2047 + 128 + 0x08]
mov 1, %g2
stx %g2, [%sp + 2047 + 128 + 0x10]
sethi %hi(dtlb_load), %g2
or %g2, %lo(dtlb_load), %g2
stx %g2, [%sp + 2047 + 128 + 0x18]
sethi %hi(prom_mmu_ihandle_cache), %g2
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
/* Each TTE maps 4MB, convert index to offset. */
sllx %l5, 22, %g1
add %l3, %g1, %g2
stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
add %l4, %g1, %g2
stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
/* TTE index is highest minus loop index. */
sub %l7, %l5, %g2
stx %g2, [%sp + 2047 + 128 + 0x38]
sethi %hi(p1275buf), %g2
or %g2, %lo(p1275buf), %g2
ldx [%g2 + 0x08], %o1
call %o1
add %sp, (2047 + 128), %o0
add %l5, 1, %l5
cmp %l5, %l6
bne,pt %xcc, 3b
nop
sethi %hi(prom_entry_lock), %g2
stb %g0, [%g2 + %lo(prom_entry_lock)]
ba,pt %xcc, after_lock_tlb
nop
niagara_lock_tlb:
sethi %hi(KERNBASE), %l3
sethi %hi(kern_locked_tte_data), %l4
ldx [%l4 + %lo(kern_locked_tte_data)], %l4
clr %l5
sethi %hi(num_kernel_image_mappings), %l6
lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
1:
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
sllx %l5, 22, %g2
add %l3, %g2, %o0
clr %o1
add %l4, %g2, %o2
mov HV_MMU_IMMU, %o3
ta HV_FAST_TRAP
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
sllx %l5, 22, %g2
add %l3, %g2, %o0
clr %o1
add %l4, %g2, %o2
mov HV_MMU_DMMU, %o3
ta HV_FAST_TRAP
add %l5, 1, %l5
cmp %l5, %l6
bne,pt %xcc, 1b
nop
after_lock_tlb:
wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
wr %g0, 0, %fprs
wr %g0, ASI_P, %asi
mov PRIMARY_CONTEXT, %g7
661: stxa %g0, [%g7] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g0, [%g7] ASI_MMU
.previous
membar #Sync
mov SECONDARY_CONTEXT, %g7
661: stxa %g0, [%g7] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g0, [%g7] ASI_MMU
.previous
membar #Sync
/* Everything we do here, until we properly take over the
* trap table, must be done with extreme care. We cannot
* make any references to %g6 (current thread pointer),
* %g4 (current task pointer), or %g5 (base of current cpu's
* per-cpu area) until we properly take over the trap table
* from the firmware and hypervisor.
*
* Get onto temporary stack which is in the locked kernel image.
*/
sethi %hi(tramp_stack), %g1
or %g1, %lo(tramp_stack), %g1
add %g1, TRAMP_STACK_SIZE, %g1
sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
mov 0, %fp
/* Put garbage in these registers to trap any access to them. */
set 0xdeadbeef, %g4
set 0xdeadbeef, %g5
set 0xdeadbeef, %g6
call init_irqwork_curcpu
nop
sethi %hi(tlb_type), %g3
lduw [%g3 + %lo(tlb_type)], %g2
cmp %g2, 3
bne,pt %icc, 1f
nop
call hard_smp_processor_id
nop
call sun4v_register_mondo_queues
nop
1: call init_cur_cpu_trap
ldx [%l0], %o0
/* Start using proper page size encodings in ctx register. */
sethi %hi(sparc64_kern_pri_context), %g3
ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1
661: stxa %g2, [%g1] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g1] ASI_MMU
.previous
membar #Sync
wrpr %g0, 0, %wstate
sethi %hi(prom_entry_lock), %g2
1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
brnz,pn %g1, 1b
nop
/* As a hack, put &init_thread_union into %g6.
* prom_world() loads from here to restore the %asi
* register.
*/
sethi %hi(init_thread_union), %g6
or %g6, %lo(init_thread_union), %g6
sethi %hi(is_sun4v), %o0
lduw [%o0 + %lo(is_sun4v)], %o0
brz,pt %o0, 2f
nop
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
add %g2, TRAP_PER_CPU_FAULT_INFO, %g2
stxa %g2, [%g0] ASI_SCRATCHPAD
/* Compute physical address:
*
* paddr = kern_base + (mmfsa_vaddr - KERNBASE)
*/
sethi %hi(KERNBASE), %g3
sub %g2, %g3, %g2
sethi %hi(kern_base), %g3
ldx [%g3 + %lo(kern_base)], %g3
add %g2, %g3, %o1
sethi %hi(sparc64_ttable_tl0), %o0
set prom_set_trap_table_name, %g2
stx %g2, [%sp + 2047 + 128 + 0x00]
mov 2, %g2
stx %g2, [%sp + 2047 + 128 + 0x08]
mov 0, %g2
stx %g2, [%sp + 2047 + 128 + 0x10]
stx %o0, [%sp + 2047 + 128 + 0x18]
stx %o1, [%sp + 2047 + 128 + 0x20]
sethi %hi(p1275buf), %g2
or %g2, %lo(p1275buf), %g2
ldx [%g2 + 0x08], %o1
call %o1
add %sp, (2047 + 128), %o0
ba,pt %xcc, 3f
nop
2: sethi %hi(sparc64_ttable_tl0), %o0
set prom_set_trap_table_name, %g2
stx %g2, [%sp + 2047 + 128 + 0x00]
mov 1, %g2
stx %g2, [%sp + 2047 + 128 + 0x08]
mov 0, %g2
stx %g2, [%sp + 2047 + 128 + 0x10]
stx %o0, [%sp + 2047 + 128 + 0x18]
sethi %hi(p1275buf), %g2
or %g2, %lo(p1275buf), %g2
ldx [%g2 + 0x08], %o1
call %o1
add %sp, (2047 + 128), %o0
3: sethi %hi(prom_entry_lock), %g2
stb %g0, [%g2 + %lo(prom_entry_lock)]
ldx [%l0], %g6
ldx [%g6 + TI_TASK], %g4
mov 1, %g5
sllx %g5, THREAD_SHIFT, %g5
sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
add %g6, %g5, %sp
rdpr %pstate, %o1
or %o1, PSTATE_IE, %o1
wrpr %o1, 0, %pstate
call smp_callin
nop
call cpu_panic
nop
1: b,a,pt %xcc, 1b
.align 8
sparc64_cpu_startup_end:
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,593
|
arch/sparc/kernel/pci_sun4v_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* pci_sun4v_asm: Hypervisor calls for PCI support.
*
* Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
#include <asm/hypervisor.h>
/* %o0: devhandle
* %o1: tsbid
* %o2: num ttes
* %o3: io_attributes
* %o4: io_page_list phys address
*
* returns %o0: -status if status was non-zero, else
* %o0: num pages mapped
*/
ENTRY(pci_sun4v_iommu_map)
mov %o5, %g1
mov HV_FAST_PCI_IOMMU_MAP, %o5
ta HV_FAST_TRAP
brnz,pn %o0, 1f
sub %g0, %o0, %o0
mov %o1, %o0
1: retl
nop
ENDPROC(pci_sun4v_iommu_map)
/* %o0: devhandle
* %o1: tsbid
* %o2: num ttes
*
* returns %o0: num ttes demapped
*/
ENTRY(pci_sun4v_iommu_demap)
mov HV_FAST_PCI_IOMMU_DEMAP, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
ENDPROC(pci_sun4v_iommu_demap)
/* %o0: devhandle
* %o1: tsbid
* %o2: &io_attributes
* %o3: &real_address
*
* returns %o0: status
*/
ENTRY(pci_sun4v_iommu_getmap)
mov %o2, %o4
mov HV_FAST_PCI_IOMMU_GETMAP, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
stx %o2, [%o3]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_iommu_getmap)
/* %o0: devhandle
* %o1: pci_device
* %o2: pci_config_offset
* %o3: size
*
* returns %o0: data
*
* If there is an error, the data will be returned
* as all 1's.
*/
ENTRY(pci_sun4v_config_get)
mov HV_FAST_PCI_CONFIG_GET, %o5
ta HV_FAST_TRAP
brnz,a,pn %o1, 1f
mov -1, %o2
1: retl
mov %o2, %o0
ENDPROC(pci_sun4v_config_get)
/* %o0: devhandle
* %o1: pci_device
* %o2: pci_config_offset
* %o3: size
* %o4: data
*
* returns %o0: status
*
* status will be zero if the operation completed
* successfully, else -1 if not
*/
ENTRY(pci_sun4v_config_put)
mov HV_FAST_PCI_CONFIG_PUT, %o5
ta HV_FAST_TRAP
brnz,a,pn %o1, 1f
mov -1, %o1
1: retl
mov %o1, %o0
ENDPROC(pci_sun4v_config_put)
/* %o0: devhandle
* %o1: msiqid
* %o2: msiq phys address
* %o3: num entries
*
* returns %o0: status
*
* status will be zero if the operation completed
* successfully, else -1 if not
*/
ENTRY(pci_sun4v_msiq_conf)
mov HV_FAST_PCI_MSIQ_CONF, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_conf)
/* %o0: devhandle
* %o1: msiqid
* %o2: &msiq_phys_addr
* %o3: &msiq_num_entries
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_info)
mov %o2, %o4
mov HV_FAST_PCI_MSIQ_INFO, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
stx %o2, [%o3]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_info)
/* %o0: devhandle
* %o1: msiqid
* %o2: &valid
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_getvalid)
mov HV_FAST_PCI_MSIQ_GETVALID, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_getvalid)
/* %o0: devhandle
* %o1: msiqid
* %o2: valid
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_setvalid)
mov HV_FAST_PCI_MSIQ_SETVALID, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_setvalid)
/* %o0: devhandle
* %o1: msiqid
* %o2: &state
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_getstate)
mov HV_FAST_PCI_MSIQ_GETSTATE, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_getstate)
/* %o0: devhandle
* %o1: msiqid
* %o2: state
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_setstate)
mov HV_FAST_PCI_MSIQ_SETSTATE, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_setstate)
/* %o0: devhandle
* %o1: msiqid
* %o2: &head
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_gethead)
mov HV_FAST_PCI_MSIQ_GETHEAD, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_gethead)
/* %o0: devhandle
* %o1: msiqid
* %o2: head
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_sethead)
mov HV_FAST_PCI_MSIQ_SETHEAD, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_sethead)
/* %o0: devhandle
* %o1: msiqid
* %o2: &tail
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_gettail)
mov HV_FAST_PCI_MSIQ_GETTAIL, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_gettail)
/* %o0: devhandle
* %o1: msinum
* %o2: &valid
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msi_getvalid)
mov HV_FAST_PCI_MSI_GETVALID, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msi_getvalid)
/* %o0: devhandle
* %o1: msinum
* %o2: valid
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msi_setvalid)
mov HV_FAST_PCI_MSI_SETVALID, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msi_setvalid)
/* %o0: devhandle
* %o1: msinum
* %o2: &msiq
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msi_getmsiq)
mov HV_FAST_PCI_MSI_GETMSIQ, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msi_getmsiq)
/* %o0: devhandle
* %o1: msinum
* %o2: msitype
* %o3: msiq
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msi_setmsiq)
mov HV_FAST_PCI_MSI_SETMSIQ, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msi_setmsiq)
/* %o0: devhandle
* %o1: msinum
* %o2: &state
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msi_getstate)
mov HV_FAST_PCI_MSI_GETSTATE, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msi_getstate)
/* %o0: devhandle
* %o1: msinum
* %o2: state
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msi_setstate)
mov HV_FAST_PCI_MSI_SETSTATE, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msi_setstate)
/* %o0: devhandle
* %o1: msinum
* %o2: &msiq
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msg_getmsiq)
mov HV_FAST_PCI_MSG_GETMSIQ, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msg_getmsiq)
/* %o0: devhandle
* %o1: msinum
* %o2: msiq
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msg_setmsiq)
mov HV_FAST_PCI_MSG_SETMSIQ, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msg_setmsiq)
/* %o0: devhandle
* %o1: msinum
* %o2: &valid
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msg_getvalid)
mov HV_FAST_PCI_MSG_GETVALID, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msg_getvalid)
/* %o0: devhandle
* %o1: msinum
* %o2: valid
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msg_setvalid)
mov HV_FAST_PCI_MSG_SETVALID, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msg_setvalid)
/*
* %o0: devhandle
* %o1: r_addr
* %o2: size
* %o3: pagesize
* %o4: virt
* %o5: &iotsb_num/&iotsb_handle
*
* returns %o0: status
* %o1: iotsb_num/iotsb_handle
*/
ENTRY(pci_sun4v_iotsb_conf)
mov %o5, %g1
mov HV_FAST_PCI_IOTSB_CONF, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%g1]
ENDPROC(pci_sun4v_iotsb_conf)
/*
* %o0: devhandle
* %o1: iotsb_num/iotsb_handle
* %o2: pci_device
*
* returns %o0: status
*/
ENTRY(pci_sun4v_iotsb_bind)
mov HV_FAST_PCI_IOTSB_BIND, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(pci_sun4v_iotsb_bind)
/*
* %o0: devhandle
* %o1: iotsb_num/iotsb_handle
* %o2: index_count
* %o3: iotte_attributes
* %o4: io_page_list_p
* %o5: &mapped
*
* returns %o0: status
* %o1: #mapped
*/
ENTRY(pci_sun4v_iotsb_map)
mov %o5, %g1
mov HV_FAST_PCI_IOTSB_MAP, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%g1]
ENDPROC(pci_sun4v_iotsb_map)
/*
* %o0: devhandle
* %o1: iotsb_num/iotsb_handle
* %o2: iotsb_index
* %o3: #iottes
* %o4: &demapped
*
* returns %o0: status
* %o1: #demapped
*/
ENTRY(pci_sun4v_iotsb_demap)
mov HV_FAST_PCI_IOTSB_DEMAP, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(pci_sun4v_iotsb_demap)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 29,079
|
arch/sparc/kernel/entry.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
*
* Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1996-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
*/
#include <linux/linkage.h>
#include <linux/errno.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/smp.h>
#include <asm/contregs.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/psr.h>
#include <asm/vaddrs.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/winmacro.h>
#include <asm/signal.h>
#include <asm/obio.h>
#include <asm/mxcc.h>
#include <asm/thread_info.h>
#include <asm/param.h>
#include <asm/unistd.h>
#include <asm/asmmacro.h>
#include <asm/export.h>
#define curptr g6
/* These are just handy. */
#define _SV save %sp, -STACKFRAME_SZ, %sp
#define _RS restore
#define FLUSH_ALL_KERNEL_WINDOWS \
_SV; _SV; _SV; _SV; _SV; _SV; _SV; \
_RS; _RS; _RS; _RS; _RS; _RS; _RS;
.text
#ifdef CONFIG_KGDB
.align 4
.globl arch_kgdb_breakpoint
.type arch_kgdb_breakpoint,#function
arch_kgdb_breakpoint:
ta 0x7d
retl
nop
.size arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
#endif
#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
.align 4
.globl floppy_hardint
floppy_hardint:
/*
* This code cannot touch registers %l0 %l1 and %l2
* because SAVE_ALL depends on their values. It depends
* on %l3 also, but we regenerate it before a call.
* Other registers are:
* %l3 -- base address of fdc registers
* %l4 -- pdma_vaddr
* %l5 -- scratch for ld/st address
* %l6 -- pdma_size
* %l7 -- scratch [floppy byte, ld/st address, aux. data]
*/
/* Do we have work to do? */
sethi %hi(doing_pdma), %l7
ld [%l7 + %lo(doing_pdma)], %l7
cmp %l7, 0
be floppy_dosoftint
nop
/* Load fdc register base */
sethi %hi(fdc_status), %l3
ld [%l3 + %lo(fdc_status)], %l3
/* Setup register addresses */
sethi %hi(pdma_vaddr), %l5 ! transfer buffer
ld [%l5 + %lo(pdma_vaddr)], %l4
sethi %hi(pdma_size), %l5 ! bytes to go
ld [%l5 + %lo(pdma_size)], %l6
next_byte:
ldub [%l3], %l7
andcc %l7, 0x80, %g0 ! Does fifo still have data
bz floppy_fifo_emptied ! fifo has been emptied...
andcc %l7, 0x20, %g0 ! in non-dma mode still?
bz floppy_overrun ! nope, overrun
andcc %l7, 0x40, %g0 ! 0=write 1=read
bz floppy_write
sub %l6, 0x1, %l6
/* Ok, actually read this byte */
ldub [%l3 + 1], %l7
orcc %g0, %l6, %g0
stb %l7, [%l4]
bne next_byte
add %l4, 0x1, %l4
b floppy_tdone
nop
floppy_write:
/* Ok, actually write this byte */
ldub [%l4], %l7
orcc %g0, %l6, %g0
stb %l7, [%l3 + 1]
bne next_byte
add %l4, 0x1, %l4
/* fall through... */
floppy_tdone:
sethi %hi(pdma_vaddr), %l5
st %l4, [%l5 + %lo(pdma_vaddr)]
sethi %hi(pdma_size), %l5
st %l6, [%l5 + %lo(pdma_size)]
/* Flip terminal count pin */
set auxio_register, %l7
ld [%l7], %l7
ldub [%l7], %l5
or %l5, 0xc2, %l5
stb %l5, [%l7]
andn %l5, 0x02, %l5
2:
/* Kill some time so the bits set */
WRITE_PAUSE
WRITE_PAUSE
stb %l5, [%l7]
/* Prevent recursion */
sethi %hi(doing_pdma), %l7
b floppy_dosoftint
st %g0, [%l7 + %lo(doing_pdma)]
/* We emptied the FIFO, but we haven't read everything
* as of yet. Store the current transfer address and
* bytes left to read so we can continue when the next
* fast IRQ comes in.
*/
floppy_fifo_emptied:
sethi %hi(pdma_vaddr), %l5
st %l4, [%l5 + %lo(pdma_vaddr)]
sethi %hi(pdma_size), %l7
st %l6, [%l7 + %lo(pdma_size)]
/* Restore condition codes */
wr %l0, 0x0, %psr
WRITE_PAUSE
jmp %l1
rett %l2
floppy_overrun:
sethi %hi(pdma_vaddr), %l5
st %l4, [%l5 + %lo(pdma_vaddr)]
sethi %hi(pdma_size), %l5
st %l6, [%l5 + %lo(pdma_size)]
/* Prevent recursion */
sethi %hi(doing_pdma), %l7
st %g0, [%l7 + %lo(doing_pdma)]
/* fall through... */
floppy_dosoftint:
rd %wim, %l3
SAVE_ALL
/* Set all IRQs off. */
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
mov 11, %o0 ! floppy irq level (unused anyway)
mov %g0, %o1 ! devid is not used in fast interrupts
call sparc_floppy_irq
add %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs
RESTORE_ALL
#endif /* (CONFIG_BLK_DEV_FD) */
/* Bad trap handler */
.globl bad_trap_handler
bad_trap_handler:
SAVE_ALL
wr %l0, PSR_ET, %psr
WRITE_PAUSE
add %sp, STACKFRAME_SZ, %o0 ! pt_regs
call do_hw_interrupt
mov %l7, %o1 ! trap number
RESTORE_ALL
/* For now all IRQ's not registered get sent here. handler_irq() will
* see if a routine is registered to handle this interrupt and if not
* it will say so on the console.
*/
.align 4
.globl real_irq_entry, patch_handler_irq
real_irq_entry:
SAVE_ALL
#ifdef CONFIG_SMP
.globl patchme_maybe_smp_msg
cmp %l7, 11
patchme_maybe_smp_msg:
bgu maybe_smp4m_msg
nop
#endif
real_irq_continue:
or %l0, PSR_PIL, %g2
wr %g2, 0x0, %psr
WRITE_PAUSE
wr %g2, PSR_ET, %psr
WRITE_PAUSE
mov %l7, %o0 ! irq level
patch_handler_irq:
call handler_irq
add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
or %l0, PSR_PIL, %g2 ! restore PIL after handler_irq
wr %g2, PSR_ET, %psr ! keep ET up
WRITE_PAUSE
RESTORE_ALL
#ifdef CONFIG_SMP
/* SMP per-cpu ticker interrupts are handled specially. */
smp4m_ticker:
bne real_irq_continue+4
or %l0, PSR_PIL, %g2
wr %g2, 0x0, %psr
WRITE_PAUSE
wr %g2, PSR_ET, %psr
WRITE_PAUSE
call smp4m_percpu_timer_interrupt
add %sp, STACKFRAME_SZ, %o0
wr %l0, PSR_ET, %psr
WRITE_PAUSE
RESTORE_ALL
#define GET_PROCESSOR4M_ID(reg) \
rd %tbr, %reg; \
srl %reg, 12, %reg; \
and %reg, 3, %reg;
/* Here is where we check for possible SMP IPI passed to us
* on some level other than 15 which is the NMI and only used
* for cross calls. That has a separate entry point below.
*
* IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*.
*/
maybe_smp4m_msg:
GET_PROCESSOR4M_ID(o3)
sethi %hi(sun4m_irq_percpu), %l5
sll %o3, 2, %o3
or %l5, %lo(sun4m_irq_percpu), %o5
sethi %hi(0x70000000), %o2 ! Check all soft-IRQs
ld [%o5 + %o3], %o1
ld [%o1 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending
andcc %o3, %o2, %g0
be,a smp4m_ticker
cmp %l7, 14
/* Soft-IRQ IPI */
st %o2, [%o1 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x70000000
WRITE_PAUSE
ld [%o1 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending
WRITE_PAUSE
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
srl %o3, 28, %o2 ! shift for simpler checks below
maybe_smp4m_msg_check_single:
andcc %o2, 0x1, %g0
beq,a maybe_smp4m_msg_check_mask
andcc %o2, 0x2, %g0
call smp_call_function_single_interrupt
nop
andcc %o2, 0x2, %g0
maybe_smp4m_msg_check_mask:
beq,a maybe_smp4m_msg_check_resched
andcc %o2, 0x4, %g0
call smp_call_function_interrupt
nop
andcc %o2, 0x4, %g0
maybe_smp4m_msg_check_resched:
/* rescheduling is done in RESTORE_ALL regardless, but incr stats */
beq,a maybe_smp4m_msg_out
nop
call smp_resched_interrupt
nop
maybe_smp4m_msg_out:
RESTORE_ALL
.align 4
.globl linux_trap_ipi15_sun4m
linux_trap_ipi15_sun4m:
SAVE_ALL
sethi %hi(0x80000000), %o2
GET_PROCESSOR4M_ID(o0)
sethi %hi(sun4m_irq_percpu), %l5
or %l5, %lo(sun4m_irq_percpu), %o5
sll %o0, 2, %o0
ld [%o5 + %o0], %o5
ld [%o5 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending
andcc %o3, %o2, %g0
be sun4m_nmi_error ! Must be an NMI async memory error
st %o2, [%o5 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x80000000
WRITE_PAUSE
ld [%o5 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending
WRITE_PAUSE
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call smp4m_cross_call_irq
nop
b ret_trap_lockless_ipi
clr %l6
.globl smp4d_ticker
/* SMP per-cpu ticker interrupts are handled specially. */
smp4d_ticker:
SAVE_ALL
or %l0, PSR_PIL, %g2
sethi %hi(CC_ICLR), %o0
sethi %hi(1 << 14), %o1
or %o0, %lo(CC_ICLR), %o0
stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 14 in MXCC's ICLR */
wr %g2, 0x0, %psr
WRITE_PAUSE
wr %g2, PSR_ET, %psr
WRITE_PAUSE
call smp4d_percpu_timer_interrupt
add %sp, STACKFRAME_SZ, %o0
wr %l0, PSR_ET, %psr
WRITE_PAUSE
RESTORE_ALL
.align 4
.globl linux_trap_ipi15_sun4d
linux_trap_ipi15_sun4d:
SAVE_ALL
sethi %hi(CC_BASE), %o4
sethi %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
or %o4, (CC_EREG - CC_BASE), %o0
ldda [%o0] ASI_M_MXCC, %o0
andcc %o0, %o2, %g0
bne 1f
sethi %hi(BB_STAT2), %o2
lduba [%o2] ASI_M_CTL, %o2
andcc %o2, BB_STAT2_MASK, %g0
bne 2f
or %o4, (CC_ICLR - CC_BASE), %o0
sethi %hi(1 << 15), %o1
stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 15 in MXCC's ICLR */
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call smp4d_cross_call_irq
nop
b ret_trap_lockless_ipi
clr %l6
1: /* MXCC error */
2: /* BB error */
/* Disable PIL 15 */
set CC_IMSK, %l4
lduha [%l4] ASI_M_MXCC, %l5
sethi %hi(1 << 15), %l7
or %l5, %l7, %l5
stha %l5, [%l4] ASI_M_MXCC
/* FIXME */
1: b,a 1b
.globl smpleon_ipi
.extern leon_ipi_interrupt
/* SMP per-cpu IPI interrupts are handled specially. */
smpleon_ipi:
SAVE_ALL
or %l0, PSR_PIL, %g2
wr %g2, 0x0, %psr
WRITE_PAUSE
wr %g2, PSR_ET, %psr
WRITE_PAUSE
call leonsmp_ipi_interrupt
add %sp, STACKFRAME_SZ, %o1 ! pt_regs
wr %l0, PSR_ET, %psr
WRITE_PAUSE
RESTORE_ALL
.align 4
.globl linux_trap_ipi15_leon
linux_trap_ipi15_leon:
SAVE_ALL
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call leon_cross_call_irq
nop
b ret_trap_lockless_ipi
clr %l6
#endif /* CONFIG_SMP */
/* This routine handles illegal instructions and privileged
* instruction attempts from user code.
*/
.align 4
.globl bad_instruction
bad_instruction:
sethi %hi(0xc1f80000), %l4
ld [%l1], %l5
sethi %hi(0x81d80000), %l7
and %l5, %l4, %l5
cmp %l5, %l7
be 1f
SAVE_ALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call do_illegal_instruction
mov %l0, %o3
RESTORE_ALL
1: /* unimplemented flush - just skip */
jmpl %l2, %g0
rett %l2 + 4
.align 4
.globl priv_instruction
priv_instruction:
SAVE_ALL
wr %l0, PSR_ET, %psr
WRITE_PAUSE
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call do_priv_instruction
mov %l0, %o3
RESTORE_ALL
/* This routine handles unaligned data accesses. */
.align 4
.globl mna_handler
mna_handler:
andcc %l0, PSR_PS, %g0
be mna_fromuser
nop
SAVE_ALL
wr %l0, PSR_ET, %psr
WRITE_PAUSE
ld [%l1], %o1
call kernel_unaligned_trap
add %sp, STACKFRAME_SZ, %o0
RESTORE_ALL
mna_fromuser:
SAVE_ALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
ld [%l1], %o1
call user_unaligned_trap
add %sp, STACKFRAME_SZ, %o0
RESTORE_ALL
/* This routine handles floating point disabled traps. */
.align 4
.globl fpd_trap_handler
fpd_trap_handler:
SAVE_ALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call do_fpd_trap
mov %l0, %o3
RESTORE_ALL
/* This routine handles Floating Point Exceptions. */
.align 4
.globl fpe_trap_handler
fpe_trap_handler:
set fpsave_magic, %l5
cmp %l1, %l5
be 1f
sethi %hi(fpsave), %l5
or %l5, %lo(fpsave), %l5
cmp %l1, %l5
bne 2f
sethi %hi(fpsave_catch2), %l5
or %l5, %lo(fpsave_catch2), %l5
wr %l0, 0x0, %psr
WRITE_PAUSE
jmp %l5
rett %l5 + 4
1:
sethi %hi(fpsave_catch), %l5
or %l5, %lo(fpsave_catch), %l5
wr %l0, 0x0, %psr
WRITE_PAUSE
jmp %l5
rett %l5 + 4
2:
SAVE_ALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call do_fpe_trap
mov %l0, %o3
RESTORE_ALL
/* This routine handles Tag Overflow Exceptions. */
.align 4
.globl do_tag_overflow
do_tag_overflow:
SAVE_ALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call handle_tag_overflow
mov %l0, %o3
RESTORE_ALL
/* This routine handles Watchpoint Exceptions. */
.align 4
.globl do_watchpoint
do_watchpoint:
SAVE_ALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call handle_watchpoint
mov %l0, %o3
RESTORE_ALL
/* This routine handles Register Access Exceptions. */
.align 4
.globl do_reg_access
do_reg_access:
SAVE_ALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call handle_reg_access
mov %l0, %o3
RESTORE_ALL
/* This routine handles Co-Processor Disabled Exceptions. */
.align 4
.globl do_cp_disabled
do_cp_disabled:
SAVE_ALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call handle_cp_disabled
mov %l0, %o3
RESTORE_ALL
/* This routine handles Co-Processor Exceptions. */
.align 4
.globl do_cp_exception
do_cp_exception:
SAVE_ALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call handle_cp_exception
mov %l0, %o3
RESTORE_ALL
/* This routine handles Hardware Divide By Zero Exceptions. */
.align 4
.globl do_hw_divzero
do_hw_divzero:
SAVE_ALL
wr %l0, PSR_ET, %psr ! re-enable traps
WRITE_PAUSE
add %sp, STACKFRAME_SZ, %o0
mov %l1, %o1
mov %l2, %o2
call handle_hw_divzero
mov %l0, %o3
RESTORE_ALL
.align 4
.globl do_flush_windows
do_flush_windows:
SAVE_ALL
wr %l0, PSR_ET, %psr
WRITE_PAUSE
andcc %l0, PSR_PS, %g0
bne dfw_kernel
nop
call flush_user_windows
nop
/* Advance over the trap instruction. */
ld [%sp + STACKFRAME_SZ + PT_NPC], %l1
add %l1, 0x4, %l2
st %l1, [%sp + STACKFRAME_SZ + PT_PC]
st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
RESTORE_ALL
.globl flush_patch_one
/* We get these for debugging routines using __builtin_return_address() */
dfw_kernel:
flush_patch_one:
FLUSH_ALL_KERNEL_WINDOWS
/* Advance over the trap instruction. */
ld [%sp + STACKFRAME_SZ + PT_NPC], %l1
add %l1, 0x4, %l2
st %l1, [%sp + STACKFRAME_SZ + PT_PC]
st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
RESTORE_ALL
/* The getcc software trap. The user wants the condition codes from
* the %psr in register %g1.
*/
.align 4
.globl getcc_trap_handler
getcc_trap_handler:
srl %l0, 20, %g1 ! give user
and %g1, 0xf, %g1 ! only ICC bits in %psr
jmp %l2 ! advance over trap instruction
rett %l2 + 0x4 ! like this...
/* The setcc software trap. The user has condition codes in %g1
* that it would like placed in the %psr. Be careful not to flip
* any unintentional bits!
*/
.align 4
.globl setcc_trap_handler
setcc_trap_handler:
sll %g1, 0x14, %l4
set PSR_ICC, %l5
andn %l0, %l5, %l0 ! clear ICC bits in %psr
and %l4, %l5, %l4 ! clear non-ICC bits in user value
or %l4, %l0, %l4 ! or them in... mix mix mix
wr %l4, 0x0, %psr ! set new %psr
WRITE_PAUSE ! TI scumbags...
jmp %l2 ! advance over trap instruction
rett %l2 + 0x4 ! like this...
sun4m_nmi_error:
/* NMI async memory error handling. */
sethi %hi(0x80000000), %l4
sethi %hi(sun4m_irq_global), %o5
ld [%o5 + %lo(sun4m_irq_global)], %l5
st %l4, [%l5 + 0x0c] ! sun4m_irq_global->mask_set=0x80000000
WRITE_PAUSE
ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending
WRITE_PAUSE
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call sun4m_nmi
nop
st %l4, [%l5 + 0x08] ! sun4m_irq_global->mask_clear=0x80000000
WRITE_PAUSE
ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending
WRITE_PAUSE
RESTORE_ALL
#ifndef CONFIG_SMP
.align 4
.globl linux_trap_ipi15_sun4m
linux_trap_ipi15_sun4m:
SAVE_ALL
ba sun4m_nmi_error
nop
#endif /* CONFIG_SMP */
.align 4
.globl srmmu_fault
srmmu_fault:
mov 0x400, %l5
mov 0x300, %l4
LEON_PI(lda [%l5] ASI_LEON_MMUREGS, %l6) ! read sfar first
SUN_PI_(lda [%l5] ASI_M_MMUREGS, %l6) ! read sfar first
LEON_PI(lda [%l4] ASI_LEON_MMUREGS, %l5) ! read sfsr last
SUN_PI_(lda [%l4] ASI_M_MMUREGS, %l5) ! read sfsr last
andn %l6, 0xfff, %l6
srl %l5, 6, %l5 ! and encode all info into l7
and %l5, 2, %l5
or %l5, %l6, %l6
or %l6, %l7, %l7 ! l7 = [addr,write,txtfault]
SAVE_ALL
mov %l7, %o1
mov %l7, %o2
and %o1, 1, %o1 ! arg2 = text_faultp
mov %l7, %o3
and %o2, 2, %o2 ! arg3 = writep
andn %o3, 0xfff, %o3 ! arg4 = faulting address
wr %l0, PSR_ET, %psr
WRITE_PAUSE
call do_sparc_fault
add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
RESTORE_ALL
.align 4
sunos_execv:
.globl sunos_execv
b sys_execve
clr %i2
.align 4
.globl sys_sigstack
sys_sigstack:
mov %o7, %l5
mov %fp, %o2
call do_sys_sigstack
mov %l5, %o7
.align 4
.globl sys_sigreturn
sys_sigreturn:
call do_sigreturn
add %sp, STACKFRAME_SZ, %o0
ld [%curptr + TI_FLAGS], %l5
andcc %l5, _TIF_SYSCALL_TRACE, %g0
be 1f
nop
call syscall_trace
mov 1, %o1
1:
/* We don't want to muck with user registers like a
* normal syscall, just return.
*/
RESTORE_ALL
.align 4
.globl sys_rt_sigreturn
sys_rt_sigreturn:
call do_rt_sigreturn
add %sp, STACKFRAME_SZ, %o0
ld [%curptr + TI_FLAGS], %l5
andcc %l5, _TIF_SYSCALL_TRACE, %g0
be 1f
nop
add %sp, STACKFRAME_SZ, %o0
call syscall_trace
mov 1, %o1
1:
/* We are returning to a signal handler. */
RESTORE_ALL
/* Now that we have a real sys_clone, sys_fork() is
* implemented in terms of it. Our _real_ implementation
* of SunOS vfork() will use sys_vfork().
*
* XXX These three should be consolidated into mostly shared
* XXX code just like on sparc64... -DaveM
*/
.align 4
.globl sys_fork, flush_patch_two
sys_fork:
mov %o7, %l5
flush_patch_two:
FLUSH_ALL_KERNEL_WINDOWS;
ld [%curptr + TI_TASK], %o4
rd %psr, %g4
WRITE_PAUSE
mov SIGCHLD, %o0 ! arg0: clone flags
rd %wim, %g5
WRITE_PAUSE
mov %fp, %o1 ! arg1: usp
std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr
mov 0, %o3
call sparc_do_fork
mov %l5, %o7
/* Whee, kernel threads! */
.globl sys_clone, flush_patch_three
sys_clone:
mov %o7, %l5
flush_patch_three:
FLUSH_ALL_KERNEL_WINDOWS;
ld [%curptr + TI_TASK], %o4
rd %psr, %g4
WRITE_PAUSE
/* arg0,1: flags,usp -- loaded already */
cmp %o1, 0x0 ! Is new_usp NULL?
rd %wim, %g5
WRITE_PAUSE
be,a 1f
mov %fp, %o1 ! yes, use callers usp
andn %o1, 7, %o1 ! no, align to 8 bytes
1:
std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr
mov 0, %o3
call sparc_do_fork
mov %l5, %o7
/* Whee, real vfork! */
.globl sys_vfork, flush_patch_four
sys_vfork:
flush_patch_four:
FLUSH_ALL_KERNEL_WINDOWS;
ld [%curptr + TI_TASK], %o4
rd %psr, %g4
WRITE_PAUSE
rd %wim, %g5
WRITE_PAUSE
std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
mov %fp, %o1
or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
sethi %hi(sparc_do_fork), %l1
mov 0, %o3
jmpl %l1 + %lo(sparc_do_fork), %g0
add %sp, STACKFRAME_SZ, %o2
.align 4
linux_sparc_ni_syscall:
sethi %hi(sys_ni_syscall), %l7
b do_syscall
or %l7, %lo(sys_ni_syscall), %l7
linux_syscall_trace:
add %sp, STACKFRAME_SZ, %o0
call syscall_trace
mov 0, %o1
cmp %o0, 0
bne 3f
mov -ENOSYS, %o0
/* Syscall tracing can modify the registers. */
ld [%sp + STACKFRAME_SZ + PT_G1], %g1
sethi %hi(sys_call_table), %l7
ld [%sp + STACKFRAME_SZ + PT_I0], %i0
or %l7, %lo(sys_call_table), %l7
ld [%sp + STACKFRAME_SZ + PT_I1], %i1
ld [%sp + STACKFRAME_SZ + PT_I2], %i2
ld [%sp + STACKFRAME_SZ + PT_I3], %i3
ld [%sp + STACKFRAME_SZ + PT_I4], %i4
ld [%sp + STACKFRAME_SZ + PT_I5], %i5
cmp %g1, NR_syscalls
bgeu 3f
mov -ENOSYS, %o0
sll %g1, 2, %l4
mov %i0, %o0
ld [%l7 + %l4], %l7
mov %i1, %o1
mov %i2, %o2
mov %i3, %o3
b 2f
mov %i4, %o4
.globl ret_from_fork
ret_from_fork:
call schedule_tail
ld [%g3 + TI_TASK], %o0
b ret_sys_call
ld [%sp + STACKFRAME_SZ + PT_I0], %o0
.globl ret_from_kernel_thread
ret_from_kernel_thread:
call schedule_tail
ld [%g3 + TI_TASK], %o0
ld [%sp + STACKFRAME_SZ + PT_G1], %l0
call %l0
ld [%sp + STACKFRAME_SZ + PT_G2], %o0
rd %psr, %l1
ld [%sp + STACKFRAME_SZ + PT_PSR], %l0
andn %l0, PSR_CWP, %l0
nop
and %l1, PSR_CWP, %l1
or %l0, %l1, %l0
st %l0, [%sp + STACKFRAME_SZ + PT_PSR]
b ret_sys_call
mov 0, %o0
/* Linux native system calls enter here... */
.align 4
.globl linux_sparc_syscall
linux_sparc_syscall:
sethi %hi(PSR_SYSCALL), %l4
or %l0, %l4, %l0
/* Direct access to user regs, must faster. */
cmp %g1, NR_syscalls
bgeu linux_sparc_ni_syscall
sll %g1, 2, %l4
ld [%l7 + %l4], %l7
do_syscall:
SAVE_ALL_HEAD
rd %wim, %l3
wr %l0, PSR_ET, %psr
mov %i0, %o0
mov %i1, %o1
mov %i2, %o2
ld [%curptr + TI_FLAGS], %l5
mov %i3, %o3
andcc %l5, _TIF_SYSCALL_TRACE, %g0
mov %i4, %o4
bne linux_syscall_trace
mov %i0, %l5
2:
call %l7
mov %i5, %o5
3:
st %o0, [%sp + STACKFRAME_SZ + PT_I0]
ret_sys_call:
ld [%curptr + TI_FLAGS], %l6
cmp %o0, -ERESTART_RESTARTBLOCK
ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
set PSR_C, %g2
bgeu 1f
andcc %l6, _TIF_SYSCALL_TRACE, %g0
/* System call success, clear Carry condition code. */
andn %g3, %g2, %g3
clr %l6
st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
bne linux_syscall_trace2
ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
add %l1, 0x4, %l2 /* npc = npc+4 */
st %l1, [%sp + STACKFRAME_SZ + PT_PC]
b ret_trap_entry
st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1:
/* System call failure, set Carry condition code.
* Also, get abs(errno) to return to the process.
*/
sub %g0, %o0, %o0
or %g3, %g2, %g3
st %o0, [%sp + STACKFRAME_SZ + PT_I0]
mov 1, %l6
st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
bne linux_syscall_trace2
ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
add %l1, 0x4, %l2 /* npc = npc+4 */
st %l1, [%sp + STACKFRAME_SZ + PT_PC]
b ret_trap_entry
st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
linux_syscall_trace2:
add %sp, STACKFRAME_SZ, %o0
mov 1, %o1
call syscall_trace
add %l1, 0x4, %l2 /* npc = npc+4 */
st %l1, [%sp + STACKFRAME_SZ + PT_PC]
b ret_trap_entry
st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
/* Saving and restoring the FPU state is best done from lowlevel code.
*
* void fpsave(unsigned long *fpregs, unsigned long *fsr,
* void *fpqueue, unsigned long *fpqdepth)
*/
.globl fpsave
fpsave:
st %fsr, [%o1] ! this can trap on us if fpu is in bogon state
ld [%o1], %g1
set 0x2000, %g4
andcc %g1, %g4, %g0
be 2f
mov 0, %g2
/* We have an fpqueue to save. */
1:
std %fq, [%o2]
fpsave_magic:
st %fsr, [%o1]
ld [%o1], %g3
andcc %g3, %g4, %g0
add %g2, 1, %g2
bne 1b
add %o2, 8, %o2
2:
st %g2, [%o3]
std %f0, [%o0 + 0x00]
std %f2, [%o0 + 0x08]
std %f4, [%o0 + 0x10]
std %f6, [%o0 + 0x18]
std %f8, [%o0 + 0x20]
std %f10, [%o0 + 0x28]
std %f12, [%o0 + 0x30]
std %f14, [%o0 + 0x38]
std %f16, [%o0 + 0x40]
std %f18, [%o0 + 0x48]
std %f20, [%o0 + 0x50]
std %f22, [%o0 + 0x58]
std %f24, [%o0 + 0x60]
std %f26, [%o0 + 0x68]
std %f28, [%o0 + 0x70]
retl
std %f30, [%o0 + 0x78]
/* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
* code for pointing out this possible deadlock, while we save state
* above we could trap on the fsr store so our low level fpu trap
* code has to know how to deal with this.
*/
fpsave_catch:
b fpsave_magic + 4
st %fsr, [%o1]
fpsave_catch2:
b fpsave + 4
st %fsr, [%o1]
/* void fpload(unsigned long *fpregs, unsigned long *fsr); */
.globl fpload
fpload:
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
ldd [%o0 + 0x78], %f30
ld [%o1], %fsr
retl
nop
/* __ndelay and __udelay take two arguments:
* 0 - nsecs or usecs to delay
* 1 - per_cpu udelay_val (loops per jiffy)
*
* Note that ndelay gives HZ times higher resolution but has a 10ms
* limit. udelay can handle up to 1s.
*/
.globl __ndelay
__ndelay:
save %sp, -STACKFRAME_SZ, %sp
mov %i0, %o0 ! round multiplier up so large ns ok
mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ)
umul %o0, %o1, %o0
rd %y, %o1
mov %i1, %o1 ! udelay_val
umul %o0, %o1, %o0
rd %y, %o1
ba delay_continue
mov %o1, %o0 ! >>32 later for better resolution
.globl __udelay
__udelay:
save %sp, -STACKFRAME_SZ, %sp
mov %i0, %o0
sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok
or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000
umul %o0, %o1, %o0
rd %y, %o1
mov %i1, %o1 ! udelay_val
umul %o0, %o1, %o0
rd %y, %o1
sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32,
or %g0, %lo(0x028f4b62), %l0
addcc %o0, %l0, %o0 ! 2**32 * 0.009 999
bcs,a 3f
add %o1, 0x01, %o1
3:
mov HZ, %o0 ! >>32 earlier for wider range
umul %o0, %o1, %o0
rd %y, %o1
delay_continue:
cmp %o0, 0x0
1:
bne 1b
subcc %o0, 1, %o0
ret
restore
EXPORT_SYMBOL(__udelay)
EXPORT_SYMBOL(__ndelay)
/* Handle a software breakpoint */
/* We have to inform parent that child has stopped */
.align 4
.globl breakpoint_trap
breakpoint_trap:
rd %wim,%l3
SAVE_ALL
wr %l0, PSR_ET, %psr
WRITE_PAUSE
st %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
call sparc_breakpoint
add %sp, STACKFRAME_SZ, %o0
RESTORE_ALL
#ifdef CONFIG_KGDB
ENTRY(kgdb_trap_low)
rd %wim,%l3
SAVE_ALL
wr %l0, PSR_ET, %psr
WRITE_PAUSE
mov %l7, %o0 ! trap_level
call kgdb_trap
add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
RESTORE_ALL
ENDPROC(kgdb_trap_low)
#endif
.align 4
.globl flush_patch_exception
flush_patch_exception:
FLUSH_ALL_KERNEL_WINDOWS;
ldd [%o0], %o6
jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h
mov 1, %g1 ! signal EFAULT condition
.align 4
.globl kill_user_windows, kuw_patch1_7win
.globl kuw_patch1
kuw_patch1_7win: sll %o3, 6, %o3
/* No matter how much overhead this routine has in the worst
* case scenario, it is several times better than taking the
* traps with the old method of just doing flush_user_windows().
*/
kill_user_windows:
ld [%g6 + TI_UWINMASK], %o0 ! get current umask
orcc %g0, %o0, %g0 ! if no bits set, we are done
be 3f ! nothing to do
rd %psr, %o5 ! must clear interrupts
or %o5, PSR_PIL, %o4 ! or else that could change
wr %o4, 0x0, %psr ! the uwinmask state
WRITE_PAUSE ! burn them cycles
1:
ld [%g6 + TI_UWINMASK], %o0 ! get consistent state
orcc %g0, %o0, %g0 ! did an interrupt come in?
be 4f ! yep, we are done
rd %wim, %o3 ! get current wim
srl %o3, 1, %o4 ! simulate a save
kuw_patch1:
sll %o3, 7, %o3 ! compute next wim
or %o4, %o3, %o3 ! result
andncc %o0, %o3, %o0 ! clean this bit in umask
bne kuw_patch1 ! not done yet
srl %o3, 1, %o4 ! begin another save simulation
wr %o3, 0x0, %wim ! set the new wim
st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask
4:
wr %o5, 0x0, %psr ! re-enable interrupts
WRITE_PAUSE ! burn baby burn
3:
retl ! return
st %g0, [%g6 + TI_W_SAVED] ! no windows saved
.align 4
.globl restore_current
restore_current:
LOAD_CURRENT(g6, o0)
retl
nop
#ifdef CONFIG_PCIC_PCI
#include <asm/pcic.h>
.align 4
.globl linux_trap_ipi15_pcic
linux_trap_ipi15_pcic:
rd %wim, %l3
SAVE_ALL
/*
* First deactivate NMI
* or we cannot drop ET, cannot get window spill traps.
* The busy loop is necessary because the PIO error
* sometimes does not go away quickly and we trap again.
*/
sethi %hi(pcic_regs), %o1
ld [%o1 + %lo(pcic_regs)], %o2
! Get pending status for printouts later.
ld [%o2 + PCI_SYS_INT_PENDING], %o0
mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
1:
ld [%o2 + PCI_SYS_INT_PENDING], %o1
andcc %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
bne 1b
nop
or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
WRITE_PAUSE
wr %l4, PSR_ET, %psr
WRITE_PAUSE
call pcic_nmi
add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
RESTORE_ALL
.globl pcic_nmi_trap_patch
pcic_nmi_trap_patch:
sethi %hi(linux_trap_ipi15_pcic), %l3
jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0
rd %psr, %l0
.word 0
#endif /* CONFIG_PCIC_PCI */
.globl flushw_all
flushw_all:
save %sp, -0x40, %sp
save %sp, -0x40, %sp
save %sp, -0x40, %sp
save %sp, -0x40, %sp
save %sp, -0x40, %sp
save %sp, -0x40, %sp
save %sp, -0x40, %sp
restore
restore
restore
restore
restore
restore
ret
restore
#ifdef CONFIG_SMP
ENTRY(hard_smp_processor_id)
661: rd %tbr, %g1
srl %g1, 12, %o0
and %o0, 3, %o0
.section .cpuid_patch, "ax"
/* Instruction location. */
.word 661b
/* SUN4D implementation. */
lda [%g0] ASI_M_VIKING_TMP1, %o0
nop
nop
/* LEON implementation. */
rd %asr17, %o0
srl %o0, 0x1c, %o0
nop
.previous
retl
nop
ENDPROC(hard_smp_processor_id)
#endif
/* End of entry.S */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 11,128
|
arch/sparc/kernel/ttable_64.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah/SUN4V extensions.
*
* Copyright (C) 1996, 2001, 2006 David S. Miller (davem@davemloft.net)
*/
.globl sparc64_ttable_tl0, sparc64_ttable_tl1
.globl tl0_icpe, tl1_icpe
.globl tl0_dcpe, tl1_dcpe
.globl tl0_fecc, tl1_fecc
.globl tl0_cee, tl1_cee
.globl tl0_iae, tl1_iae
.globl tl0_dae, tl1_dae
sparc64_ttable_tl0:
tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
tl0_iax: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception)
tl0_itsb_4v: SUN4V_ITSB_MISS
tl0_iae: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
tl0_ill: membar #Sync
TRAP_7INSNS(do_illegal_instruction)
tl0_privop: TRAP(do_privop)
tl0_resv012: BTRAP(0x12) BTRAP(0x13) BTRAP(0x14) BTRAP(0x15) BTRAP(0x16) BTRAP(0x17)
tl0_resv018: BTRAP(0x18) BTRAP(0x19)
tl0_mcd: SUN4V_MCD_PRECISE
tl0_resv01b: BTRAP(0x1b)
tl0_resv01c: BTRAP(0x1c) BTRAP(0x1d) BTRAP(0x1e) BTRAP(0x1f)
tl0_fpdis: TRAP_NOSAVE(do_fpdis)
tl0_fpieee: TRAP_SAVEFPU(do_fpieee)
tl0_fpother: TRAP_NOSAVE(do_fpother_check_fitos)
tl0_tof: TRAP(do_tof)
tl0_cwin: CLEAN_WINDOW
tl0_div0: TRAP(do_div0)
tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
tl0_resv02f: BTRAP(0x2f)
tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception)
tl0_dtsb_4v: SUN4V_DTSB_MISS
tl0_dae: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl0_resv033: BTRAP(0x33)
tl0_mna: TRAP_NOSAVE(do_mna)
tl0_lddfmna: TRAP_NOSAVE(do_lddfmna)
tl0_stdfmna: TRAP_NOSAVE(do_stdfmna)
tl0_privact: TRAP_NOSAVE(__do_privact)
tl0_resv038: BTRAP(0x38) BTRAP(0x39) BTRAP(0x3a) BTRAP(0x3b) BTRAP(0x3c) BTRAP(0x3d)
tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
#ifdef CONFIG_SMP
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
tl0_irq4: BTRAP(0x44)
#else
tl0_irq1: BTRAP(0x41)
tl0_irq2: BTRAP(0x42)
tl0_irq3: BTRAP(0x43)
tl0_irq4: BTRAP(0x44)
#endif
tl0_irq5: TRAP_IRQ(handler_irq, 5)
#ifdef CONFIG_SMP
tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6)
#else
tl0_irq6: BTRAP(0x46)
#endif
tl0_irq7: TRAP_IRQ(deferred_pcr_work_irq, 7)
#if defined(CONFIG_KGDB) && defined(CONFIG_SMP)
tl0_irq8: TRAP_IRQ(smp_kgdb_capture_client, 8)
#else
tl0_irq8: BTRAP(0x48)
#endif
tl0_irq9: BTRAP(0x49)
tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
tl0_irq14: TRAP_IRQ(timer_interrupt, 14)
tl0_irq15: TRAP_NMI_IRQ(perfctr_irq, 15)
tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55)
tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b)
tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
tl0_ivec: TRAP_IVEC
tl0_paw: TRAP(do_paw)
tl0_vaw: TRAP(do_vaw)
tl0_cee: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_cee_trap)
tl0_iamiss:
#include "itlb_miss.S"
tl0_damiss:
#include "dtlb_miss.S"
tl0_daprot:
#include "dtlb_prot.S"
tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */
tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */
tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */
tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75)
tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b)
tl0_cpu_mondo: TRAP_NOSAVE(sun4v_cpu_mondo)
tl0_dev_mondo: TRAP_NOSAVE(sun4v_dev_mondo)
tl0_res_mondo: TRAP_NOSAVE(sun4v_res_mondo)
tl0_nres_mondo: TRAP_NOSAVE(sun4v_nonres_mondo)
tl0_s0n: SPILL_0_NORMAL
tl0_s1n: SPILL_1_NORMAL
tl0_s2n: SPILL_2_NORMAL
tl0_s3n: SPILL_0_NORMAL_ETRAP
tl0_s4n: SPILL_1_GENERIC_ETRAP
tl0_s5n: SPILL_1_GENERIC_ETRAP_FIXUP
tl0_s6n: SPILL_2_GENERIC_ETRAP
tl0_s7n: SPILL_2_GENERIC_ETRAP_FIXUP
tl0_s0o: SPILL_0_OTHER
tl0_s1o: SPILL_1_OTHER
tl0_s2o: SPILL_2_OTHER
tl0_s3o: SPILL_3_OTHER
tl0_s4o: SPILL_4_OTHER
tl0_s5o: SPILL_5_OTHER
tl0_s6o: SPILL_6_OTHER
tl0_s7o: SPILL_7_OTHER
tl0_f0n: FILL_0_NORMAL
tl0_f1n: FILL_1_NORMAL
tl0_f2n: FILL_2_NORMAL
tl0_f3n: FILL_3_NORMAL
tl0_f4n: FILL_4_NORMAL
tl0_f5n: FILL_0_NORMAL_RTRAP
tl0_f6n: FILL_1_GENERIC_RTRAP
tl0_f7n: FILL_2_GENERIC_RTRAP
tl0_f0o: FILL_0_OTHER
tl0_f1o: FILL_1_OTHER
tl0_f2o: FILL_2_OTHER
tl0_f3o: FILL_3_OTHER
tl0_f4o: FILL_4_OTHER
tl0_f5o: FILL_5_OTHER
tl0_f6o: FILL_6_OTHER
tl0_f7o: FILL_7_OTHER
tl0_resv100: BTRAP(0x100)
tl0_bkpt: BREAKPOINT_TRAP
tl0_divz: TRAP(do_div0)
tl0_flushw: FLUSH_WINDOW_TRAP
tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107) BTRAP(0x108)
tl0_resv109: BTRAP(0x109) BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d)
tl0_resv10e: BTRAP(0x10e) BTRAP(0x10f)
tl0_linux32: LINUX_32BIT_SYSCALL_TRAP
tl0_oldlinux64: LINUX_64BIT_SYSCALL_TRAP
tl0_resv112: TRAP_UTRAP(UT_TRAP_INSTRUCTION_18,0x112) TRAP_UTRAP(UT_TRAP_INSTRUCTION_19,0x113)
tl0_resv114: TRAP_UTRAP(UT_TRAP_INSTRUCTION_20,0x114) TRAP_UTRAP(UT_TRAP_INSTRUCTION_21,0x115)
tl0_resv116: TRAP_UTRAP(UT_TRAP_INSTRUCTION_22,0x116) TRAP_UTRAP(UT_TRAP_INSTRUCTION_23,0x117)
tl0_resv118: TRAP_UTRAP(UT_TRAP_INSTRUCTION_24,0x118) TRAP_UTRAP(UT_TRAP_INSTRUCTION_25,0x119)
tl0_resv11a: TRAP_UTRAP(UT_TRAP_INSTRUCTION_26,0x11a) TRAP_UTRAP(UT_TRAP_INSTRUCTION_27,0x11b)
tl0_resv11c: TRAP_UTRAP(UT_TRAP_INSTRUCTION_28,0x11c) TRAP_UTRAP(UT_TRAP_INSTRUCTION_29,0x11d)
tl0_resv11e: TRAP_UTRAP(UT_TRAP_INSTRUCTION_30,0x11e) TRAP_UTRAP(UT_TRAP_INSTRUCTION_31,0x11f)
tl0_getcc: GETCC_TRAP
tl0_setcc: SETCC_TRAP
tl0_getpsr: TRAP(do_getpsr)
tl0_resv123: BTRAP(0x123) BTRAP(0x124) BTRAP(0x125) BTRAP(0x126) BTRAP(0x127)
tl0_resv128: BTRAP(0x128) BTRAP(0x129) BTRAP(0x12a) BTRAP(0x12b) BTRAP(0x12c)
tl0_resv12d: BTRAP(0x12d) BTRAP(0x12e) BTRAP(0x12f) BTRAP(0x130) BTRAP(0x131)
tl0_resv132: BTRAP(0x132) BTRAP(0x133) BTRAP(0x134) BTRAP(0x135) BTRAP(0x136)
tl0_resv137: BTRAP(0x137) BTRAP(0x138) BTRAP(0x139) BTRAP(0x13a) BTRAP(0x13b)
tl0_resv13c: BTRAP(0x13c) BTRAP(0x13d) BTRAP(0x13e) BTRAP(0x13f) BTRAP(0x140)
tl0_resv141: BTRAP(0x141) BTRAP(0x142) BTRAP(0x143) BTRAP(0x144) BTRAP(0x145)
tl0_resv146: BTRAP(0x146) BTRAP(0x147) BTRAP(0x148) BTRAP(0x149) BTRAP(0x14a)
tl0_resv14b: BTRAP(0x14b) BTRAP(0x14c) BTRAP(0x14d) BTRAP(0x14e) BTRAP(0x14f)
tl0_resv150: BTRAP(0x150) BTRAP(0x151) BTRAP(0x152) BTRAP(0x153) BTRAP(0x154)
tl0_resv155: BTRAP(0x155) BTRAP(0x156) BTRAP(0x157) BTRAP(0x158) BTRAP(0x159)
tl0_resv15a: BTRAP(0x15a) BTRAP(0x15b) BTRAP(0x15c) BTRAP(0x15d) BTRAP(0x15e)
tl0_resv15f: BTRAP(0x15f) BTRAP(0x160) BTRAP(0x161) BTRAP(0x162) BTRAP(0x163)
tl0_resv164: BTRAP(0x164) BTRAP(0x165) BTRAP(0x166) BTRAP(0x167) BTRAP(0x168)
tl0_resv169: BTRAP(0x169) BTRAP(0x16a) BTRAP(0x16b) BTRAP(0x16c)
tl0_linux64: LINUX_64BIT_SYSCALL_TRAP
tl0_gsctx: TRAP(sparc64_get_context) TRAP(sparc64_set_context)
tl0_resv170: KPROBES_TRAP(0x170) KPROBES_TRAP(0x171) KGDB_TRAP(0x172)
tl0_resv173: UPROBES_TRAP(0x173) UPROBES_TRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177)
tl0_resv178: BTRAP(0x178) BTRAP(0x179) BTRAP(0x17a) BTRAP(0x17b) BTRAP(0x17c)
tl0_resv17d: BTRAP(0x17d) BTRAP(0x17e) BTRAP(0x17f)
#define BTRAPS(x) BTRAP(x) BTRAP(x+1) BTRAP(x+2) BTRAP(x+3) BTRAP(x+4) BTRAP(x+5) BTRAP(x+6) BTRAP(x+7)
tl0_resv180: BTRAPS(0x180) BTRAPS(0x188)
tl0_resv190: BTRAPS(0x190) BTRAPS(0x198)
tl0_resv1a0: BTRAPS(0x1a0) BTRAPS(0x1a8)
tl0_resv1b0: BTRAPS(0x1b0) BTRAPS(0x1b8)
tl0_resv1c0: BTRAPS(0x1c0) BTRAPS(0x1c8)
tl0_resv1d0: BTRAPS(0x1d0) BTRAPS(0x1d8)
tl0_resv1e0: BTRAPS(0x1e0) BTRAPS(0x1e8)
tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8)
sparc64_ttable_tl1:
tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1)
tl1_itsb_4v: SUN4V_ITSB_MISS
tl1_iae: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
tl1_ill: TRAPTL1(do_ill_tl1)
tl1_privop: BTRAPTL1(0x11)
tl1_resv012: BTRAPTL1(0x12) BTRAPTL1(0x13) BTRAPTL1(0x14) BTRAPTL1(0x15)
tl1_resv016: BTRAPTL1(0x16) BTRAPTL1(0x17) BTRAPTL1(0x18) BTRAPTL1(0x19)
tl1_resv01a: BTRAPTL1(0x1a) BTRAPTL1(0x1b) BTRAPTL1(0x1c) BTRAPTL1(0x1d)
tl1_resv01e: BTRAPTL1(0x1e) BTRAPTL1(0x1f)
tl1_fpdis: TRAP_NOSAVE(do_fpdis)
tl1_fpieee: TRAPTL1(do_fpieee_tl1)
tl1_fpother: TRAPTL1(do_fpother_tl1)
tl1_tof: TRAPTL1(do_tof_tl1)
tl1_cwin: CLEAN_WINDOW
tl1_div0: TRAPTL1(do_div0_tl1)
tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1)
tl1_dtsb_4v: SUN4V_DTSB_MISS
tl1_dae: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl1_resv033: BTRAPTL1(0x33)
tl1_mna: TRAP_NOSAVE(do_mna)
tl1_lddfmna: TRAPTL1(do_lddfmna_tl1)
tl1_stdfmna: TRAPTL1(do_stdfmna_tl1)
tl1_privact: BTRAPTL1(0x37)
tl1_resv038: BTRAPTL1(0x38) BTRAPTL1(0x39) BTRAPTL1(0x3a) BTRAPTL1(0x3b)
tl1_resv03c: BTRAPTL1(0x3c) BTRAPTL1(0x3d) BTRAPTL1(0x3e) BTRAPTL1(0x3f)
tl1_resv040: BTRAPTL1(0x40)
tl1_irq1: TRAP_IRQ(do_irq_tl1, 1) TRAP_IRQ(do_irq_tl1, 2) TRAP_IRQ(do_irq_tl1, 3)
tl1_irq4: TRAP_IRQ(do_irq_tl1, 4) TRAP_IRQ(do_irq_tl1, 5) TRAP_IRQ(do_irq_tl1, 6)
tl1_irq7: TRAP_IRQ(do_irq_tl1, 7) TRAP_IRQ(do_irq_tl1, 8) TRAP_IRQ(do_irq_tl1, 9)
tl1_irq10: TRAP_IRQ(do_irq_tl1, 10) TRAP_IRQ(do_irq_tl1, 11)
tl1_irq12: TRAP_IRQ(do_irq_tl1, 12) TRAP_IRQ(do_irq_tl1, 13)
tl1_irq14: TRAP_IRQ(do_irq_tl1, 14) TRAP_IRQ(do_irq_tl1, 15)
tl1_resv050: BTRAPTL1(0x50) BTRAPTL1(0x51) BTRAPTL1(0x52) BTRAPTL1(0x53)
tl1_resv054: BTRAPTL1(0x54) BTRAPTL1(0x55) BTRAPTL1(0x56) BTRAPTL1(0x57)
tl1_resv058: BTRAPTL1(0x58) BTRAPTL1(0x59) BTRAPTL1(0x5a) BTRAPTL1(0x5b)
tl1_resv05c: BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f)
tl1_ivec: TRAP_IVEC
tl1_paw: TRAPTL1(do_paw_tl1)
tl1_vaw: TRAPTL1(do_vaw_tl1)
tl1_cee: BTRAPTL1(0x63)
tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67)
tl1_damiss:
#include "dtlb_miss.S"
tl1_daprot:
#include "dtlb_prot.S"
tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */
tl1_dcpe: BTRAPTL1(0x71) /* D-cache Parity Error on Cheetah+ */
tl1_icpe: BTRAPTL1(0x72) /* I-cache Parity Error on Cheetah+ */
tl1_resv073: BTRAPTL1(0x73)
tl1_resv074: BTRAPTL1(0x74) BTRAPTL1(0x75) BTRAPTL1(0x76) BTRAPTL1(0x77)
tl1_resv078: BTRAPTL1(0x78) BTRAPTL1(0x79) BTRAPTL1(0x7a) BTRAPTL1(0x7b)
tl1_resv07c: BTRAPTL1(0x7c) BTRAPTL1(0x7d) BTRAPTL1(0x7e) BTRAPTL1(0x7f)
tl1_s0n: SPILL_0_NORMAL
tl1_s1n: SPILL_1_NORMAL
tl1_s2n: SPILL_2_NORMAL
tl1_s3n: SPILL_3_NORMAL
tl1_s4n: SPILL_4_NORMAL
tl1_s5n: SPILL_5_NORMAL
tl1_s6n: SPILL_6_NORMAL
tl1_s7n: SPILL_7_NORMAL
tl1_s0o: SPILL_0_OTHER
tl1_s1o: SPILL_1_OTHER
tl1_s2o: SPILL_2_OTHER
tl1_s3o: SPILL_3_OTHER
tl1_s4o: SPILL_4_OTHER
tl1_s5o: SPILL_5_OTHER
tl1_s6o: SPILL_6_OTHER
tl1_s7o: SPILL_7_OTHER
tl1_f0n: FILL_0_NORMAL
tl1_f1n: FILL_1_NORMAL
tl1_f2n: FILL_2_NORMAL
tl1_f3n: FILL_3_NORMAL
tl1_f4n: FILL_4_NORMAL
tl1_f5n: FILL_5_NORMAL
tl1_f6n: FILL_6_NORMAL
tl1_f7n: FILL_7_NORMAL
tl1_f0o: FILL_0_OTHER
tl1_f1o: FILL_1_OTHER
tl1_f2o: FILL_2_OTHER
tl1_f3o: FILL_3_OTHER
tl1_f4o: FILL_4_OTHER
tl1_f5o: FILL_5_OTHER
tl1_f6o: FILL_6_OTHER
tl1_f7o: FILL_7_OTHER
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,767
|
arch/sparc/kernel/rtrap_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* rtrap.S: Return from Sparc trap low-level code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/asi.h>
#include <asm/smp.h>
#include <asm/contregs.h>
#include <asm/winmacro.h>
#include <asm/asmmacro.h>
#include <asm/thread_info.h>
#define t_psr l0
#define t_pc l1
#define t_npc l2
#define t_wim l3
#define twin_tmp1 l4
#define glob_tmp g4
#define curptr g6
/* 7 WINDOW SPARC PATCH INSTRUCTIONS */
.globl rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3
.globl rtrap_7win_patch4, rtrap_7win_patch5
rtrap_7win_patch1: srl %t_wim, 0x6, %glob_tmp
rtrap_7win_patch2: and %glob_tmp, 0x7f, %glob_tmp
rtrap_7win_patch3: srl %g1, 7, %g2
rtrap_7win_patch4: srl %g2, 6, %g2
rtrap_7win_patch5: and %g1, 0x7f, %g1
/* END OF PATCH INSTRUCTIONS */
/* We need to check for a few things which are:
* 1) The need to call schedule() because this
* processes quantum is up.
* 2) Pending signals for this process, if any
* exist we need to call do_signal() to do
* the needy.
*
* Else we just check if the rett would land us
* in an invalid window, if so we need to grab
* it off the user/kernel stack first.
*/
.globl ret_trap_entry, rtrap_patch1, rtrap_patch2
.globl rtrap_patch3, rtrap_patch4, rtrap_patch5
.globl ret_trap_lockless_ipi
ret_trap_entry:
ret_trap_lockless_ipi:
andcc %t_psr, PSR_PS, %g0
sethi %hi(PSR_SYSCALL), %g1
be 1f
andn %t_psr, %g1, %t_psr
wr %t_psr, 0x0, %psr
b ret_trap_kernel
nop
1:
ld [%curptr + TI_FLAGS], %g2
andcc %g2, (_TIF_NEED_RESCHED), %g0
be signal_p
nop
call schedule
nop
ld [%curptr + TI_FLAGS], %g2
signal_p:
andcc %g2, _TIF_DO_NOTIFY_RESUME_MASK, %g0
bz,a ret_trap_continue
ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
mov %g2, %o2
mov %l5, %o1
call do_notify_resume
add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
b signal_p
ld [%curptr + TI_FLAGS], %g2
ret_trap_continue:
sethi %hi(PSR_SYSCALL), %g1
andn %t_psr, %g1, %t_psr
wr %t_psr, 0x0, %psr
WRITE_PAUSE
ld [%curptr + TI_W_SAVED], %twin_tmp1
orcc %g0, %twin_tmp1, %g0
be ret_trap_nobufwins
nop
wr %t_psr, PSR_ET, %psr
WRITE_PAUSE
mov 1, %o1
call try_to_clear_window_buffer
add %sp, STACKFRAME_SZ, %o0
b signal_p
ld [%curptr + TI_FLAGS], %g2
ret_trap_nobufwins:
/* Load up the user's out registers so we can pull
* a window from the stack, if necessary.
*/
LOAD_PT_INS(sp)
/* If there are already live user windows in the
* set we can return from trap safely.
*/
ld [%curptr + TI_UWINMASK], %twin_tmp1
orcc %g0, %twin_tmp1, %g0
bne ret_trap_userwins_ok
nop
/* Calculate new %wim, we have to pull a register
* window from the users stack.
*/
ret_trap_pull_one_window:
rd %wim, %t_wim
sll %t_wim, 0x1, %twin_tmp1
rtrap_patch1: srl %t_wim, 0x7, %glob_tmp
or %glob_tmp, %twin_tmp1, %glob_tmp
rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp
wr %glob_tmp, 0x0, %wim
/* Here comes the architecture specific
* branch to the user stack checking routine
* for return from traps.
*/
b srmmu_rett_stackchk
andcc %fp, 0x7, %g0
ret_trap_userwins_ok:
LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
or %t_pc, %t_npc, %g2
andcc %g2, 0x3, %g0
sethi %hi(PSR_SYSCALL), %g2
be 1f
andn %t_psr, %g2, %t_psr
b ret_trap_unaligned_pc
add %sp, STACKFRAME_SZ, %o0
1:
LOAD_PT_YREG(sp, g1)
LOAD_PT_GLOBALS(sp)
wr %t_psr, 0x0, %psr
WRITE_PAUSE
jmp %t_pc
rett %t_npc
ret_trap_unaligned_pc:
ld [%sp + STACKFRAME_SZ + PT_PC], %o1
ld [%sp + STACKFRAME_SZ + PT_NPC], %o2
ld [%sp + STACKFRAME_SZ + PT_PSR], %o3
wr %t_wim, 0x0, %wim ! or else...
wr %t_psr, PSR_ET, %psr
WRITE_PAUSE
call do_memaccess_unaligned
nop
b signal_p
ld [%curptr + TI_FLAGS], %g2
ret_trap_kernel:
/* Will the rett land us in the invalid window? */
mov 2, %g1
sll %g1, %t_psr, %g1
rtrap_patch3: srl %g1, 8, %g2
or %g1, %g2, %g1
rd %wim, %g2
andcc %g2, %g1, %g0
be 1f ! Nope, just return from the trap
sll %g2, 0x1, %g1
/* We have to grab a window before returning. */
rtrap_patch4: srl %g2, 7, %g2
or %g1, %g2, %g1
rtrap_patch5: and %g1, 0xff, %g1
wr %g1, 0x0, %wim
/* Grrr, make sure we load from the right %sp... */
LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
restore %g0, %g0, %g0
LOAD_WINDOW(sp)
b 2f
save %g0, %g0, %g0
/* Reload the entire frame in case this is from a
* kernel system call or whatever...
*/
1:
LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
2:
sethi %hi(PSR_SYSCALL), %twin_tmp1
andn %t_psr, %twin_tmp1, %t_psr
wr %t_psr, 0x0, %psr
WRITE_PAUSE
jmp %t_pc
rett %t_npc
ret_trap_user_stack_is_bolixed:
wr %t_wim, 0x0, %wim
wr %t_psr, PSR_ET, %psr
WRITE_PAUSE
call window_ret_fault
add %sp, STACKFRAME_SZ, %o0
b signal_p
ld [%curptr + TI_FLAGS], %g2
.globl srmmu_rett_stackchk
srmmu_rett_stackchk:
bne ret_trap_user_stack_is_bolixed
sethi %hi(PAGE_OFFSET), %g1
cmp %g1, %fp
bleu ret_trap_user_stack_is_bolixed
mov AC_M_SFSR, %g1
LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g0)
SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g0)
LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %g1)
SUN_PI_(lda [%g0] ASI_M_MMUREGS, %g1)
or %g1, 0x2, %g1
LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
restore %g0, %g0, %g0
LOAD_WINDOW(sp)
save %g0, %g0, %g0
andn %g1, 0x2, %g1
LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
mov AC_M_SFAR, %g2
LEON_PI(lda [%g2] ASI_LEON_MMUREGS, %g2)
SUN_PI_(lda [%g2] ASI_M_MMUREGS, %g2)
mov AC_M_SFSR, %g1
LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g1)
SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g1)
andcc %g1, 0x2, %g0
be ret_trap_userwins_ok
nop
b,a ret_trap_user_stack_is_bolixed
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,954
|
arch/sparc/kernel/trampoline_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* trampoline.S: SMP cpu boot-up trampoline code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <asm/head.h>
#include <asm/psr.h>
#include <asm/page.h>
#include <asm/asi.h>
#include <asm/ptrace.h>
#include <asm/vaddrs.h>
#include <asm/contregs.h>
#include <asm/thread_info.h>
.globl sun4m_cpu_startup
.globl sun4d_cpu_startup
.align 4
/* When we start up a cpu for the first time it enters this routine.
* This initializes the chip from whatever state the prom left it
* in and sets PIL in %psr to 15, no irqs.
*/
sun4m_cpu_startup:
cpu1_startup:
sethi %hi(trapbase_cpu1), %g3
b 1f
or %g3, %lo(trapbase_cpu1), %g3
cpu2_startup:
sethi %hi(trapbase_cpu2), %g3
b 1f
or %g3, %lo(trapbase_cpu2), %g3
cpu3_startup:
sethi %hi(trapbase_cpu3), %g3
b 1f
or %g3, %lo(trapbase_cpu3), %g3
1:
/* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
set (PSR_PIL | PSR_S | PSR_PS), %g1
wr %g1, 0x0, %psr ! traps off though
WRITE_PAUSE
/* Our %wim is one behind CWP */
mov 2, %g1
wr %g1, 0x0, %wim
WRITE_PAUSE
/* This identifies "this cpu". */
wr %g3, 0x0, %tbr
WRITE_PAUSE
/* Give ourselves a stack and curptr. */
set current_set, %g5
srl %g3, 10, %g4
and %g4, 0xc, %g4
ld [%g5 + %g4], %g6
sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp
or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp
add %g6, %sp, %sp
/* Turn on traps (PSR_ET). */
rd %psr, %g1
wr %g1, PSR_ET, %psr ! traps on
WRITE_PAUSE
/* Init our caches, etc. */
set poke_srmmu, %g5
ld [%g5], %g5
call %g5
nop
/* Start this processor. */
call smp_callin
nop
b,a smp_panic
.text
.align 4
smp_panic:
call cpu_panic
nop
/* CPUID in bootbus can be found at PA 0xff0140000 */
#define SUN4D_BOOTBUS_CPUID 0xf0140000
.align 4
sun4d_cpu_startup:
/* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
set (PSR_PIL | PSR_S | PSR_PS), %g1
wr %g1, 0x0, %psr ! traps off though
WRITE_PAUSE
/* Our %wim is one behind CWP */
mov 2, %g1
wr %g1, 0x0, %wim
WRITE_PAUSE
/* Set tbr - we use just one trap table. */
set trapbase, %g1
wr %g1, 0x0, %tbr
WRITE_PAUSE
/* Get our CPU id out of bootbus */
set SUN4D_BOOTBUS_CPUID, %g3
lduba [%g3] ASI_M_CTL, %g3
and %g3, 0xf8, %g3
srl %g3, 3, %g1
sta %g1, [%g0] ASI_M_VIKING_TMP1
/* Give ourselves a stack and curptr. */
set current_set, %g5
srl %g3, 1, %g4
ld [%g5 + %g4], %g6
sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp
or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp
add %g6, %sp, %sp
/* Turn on traps (PSR_ET). */
rd %psr, %g1
wr %g1, PSR_ET, %psr ! traps on
WRITE_PAUSE
/* Init our caches, etc. */
set poke_srmmu, %g5
ld [%g5], %g5
call %g5
nop
/* Start this processor. */
call smp_callin
nop
b,a smp_panic
.align 4
.global leon_smp_cpu_startup, smp_penguin_ctable
leon_smp_cpu_startup:
set smp_penguin_ctable,%g1
ld [%g1+4],%g1
srl %g1,4,%g1
set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */
sta %g1, [%g5] ASI_LEON_MMUREGS
/* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
set (PSR_PIL | PSR_S | PSR_PS), %g1
wr %g1, 0x0, %psr ! traps off though
WRITE_PAUSE
/* Our %wim is one behind CWP */
mov 2, %g1
wr %g1, 0x0, %wim
WRITE_PAUSE
/* Set tbr - we use just one trap table. */
set trapbase, %g1
wr %g1, 0x0, %tbr
WRITE_PAUSE
/* Get our CPU id */
rd %asr17,%g3
/* Give ourselves a stack and curptr. */
set current_set, %g5
srl %g3, 28, %g4
sll %g4, 2, %g4
ld [%g5 + %g4], %g6
sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp
or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp
add %g6, %sp, %sp
/* Turn on traps (PSR_ET). */
rd %psr, %g1
wr %g1, PSR_ET, %psr ! traps on
WRITE_PAUSE
/* Init our caches, etc. */
set poke_srmmu, %g5
ld [%g5], %g5
call %g5
nop
/* Start this processor. */
call smp_callin
nop
b,a smp_panic
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,836
|
arch/sparc/kernel/sun4v_tlb_miss.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* sun4v_tlb_miss.S: Sun4v TLB miss handlers.
*
* Copyright (C) 2006 <davem@davemloft.net>
*/
.text
.align 32
/* Load ITLB fault information into VADDR and CTX, using BASE. */
#define LOAD_ITLB_INFO(BASE, VADDR, CTX) \
ldx [BASE + HV_FAULT_I_ADDR_OFFSET], VADDR; \
ldx [BASE + HV_FAULT_I_CTX_OFFSET], CTX;
/* Load DTLB fault information into VADDR and CTX, using BASE. */
#define LOAD_DTLB_INFO(BASE, VADDR, CTX) \
ldx [BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \
ldx [BASE + HV_FAULT_D_CTX_OFFSET], CTX;
/* DEST = (VADDR >> 22)
*
* Branch to ZERO_CTX_LABEL if context is zero.
*/
#define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, ZERO_CTX_LABEL) \
srlx VADDR, 22, DEST; \
brz,pn CTX, ZERO_CTX_LABEL; \
nop;
/* Create TSB pointer. This is something like:
*
* index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
* tsb_base = tsb_reg & ~0x7UL;
* tsb_index = ((vaddr >> HASH_SHIFT) & tsb_mask);
* tsb_ptr = tsb_base + (tsb_index * 16);
*/
#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \
and TSB_PTR, 0x7, TMP1; \
mov 512, TMP2; \
andn TSB_PTR, 0x7, TSB_PTR; \
sllx TMP2, TMP1, TMP2; \
srlx VADDR, HASH_SHIFT, TMP1; \
sub TMP2, 1, TMP2; \
and TMP1, TMP2, TMP1; \
sllx TMP1, 4, TMP1; \
add TSB_PTR, TMP1, TSB_PTR;
sun4v_itlb_miss:
/* Load MMU Miss base into %g2. */
ldxa [%g0] ASI_SCRATCHPAD, %g2
/* Load UTSB reg into %g1. */
mov SCRATCHPAD_UTSBREG1, %g1
ldxa [%g1] ASI_SCRATCHPAD, %g1
LOAD_ITLB_INFO(%g2, %g4, %g5)
COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v)
COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
cmp %g2, %g6
bne,a,pn %xcc, tsb_miss_page_table_walk
mov FAULT_CODE_ITLB, %g3
andcc %g3, _PAGE_EXEC_4V, %g0
be,a,pn %xcc, tsb_do_fault
mov FAULT_CODE_ITLB, %g3
/* We have a valid entry, make hypervisor call to load
* I-TLB and return from trap.
*
* %g3: PTE
* %g4: vaddr
*/
sun4v_itlb_load:
ldxa [%g0] ASI_SCRATCHPAD, %g6
mov %o0, %g1 ! save %o0
mov %o1, %g2 ! save %o1
mov %o2, %g5 ! save %o2
mov %o3, %g7 ! save %o3
mov %g4, %o0 ! vaddr
ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 ! ctx
mov %g3, %o2 ! PTE
mov HV_MMU_IMMU, %o3 ! flags
ta HV_MMU_MAP_ADDR_TRAP
brnz,pn %o0, sun4v_itlb_error
mov %g2, %o1 ! restore %o1
mov %g1, %o0 ! restore %o0
mov %g5, %o2 ! restore %o2
mov %g7, %o3 ! restore %o3
retry
sun4v_dtlb_miss:
/* Load MMU Miss base into %g2. */
ldxa [%g0] ASI_SCRATCHPAD, %g2
/* Load UTSB reg into %g1. */
mov SCRATCHPAD_UTSBREG1, %g1
ldxa [%g1] ASI_SCRATCHPAD, %g1
LOAD_DTLB_INFO(%g2, %g4, %g5)
COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v)
COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
cmp %g2, %g6
bne,a,pn %xcc, tsb_miss_page_table_walk
mov FAULT_CODE_DTLB, %g3
/* We have a valid entry, make hypervisor call to load
* D-TLB and return from trap.
*
* %g3: PTE
* %g4: vaddr
*/
sun4v_dtlb_load:
ldxa [%g0] ASI_SCRATCHPAD, %g6
mov %o0, %g1 ! save %o0
mov %o1, %g2 ! save %o1
mov %o2, %g5 ! save %o2
mov %o3, %g7 ! save %o3
mov %g4, %o0 ! vaddr
ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 ! ctx
mov %g3, %o2 ! PTE
mov HV_MMU_DMMU, %o3 ! flags
ta HV_MMU_MAP_ADDR_TRAP
brnz,pn %o0, sun4v_dtlb_error
mov %g2, %o1 ! restore %o1
mov %g1, %o0 ! restore %o0
mov %g5, %o2 ! restore %o2
mov %g7, %o3 ! restore %o3
retry
sun4v_dtlb_prot:
SET_GL(1)
/* Load MMU Miss base into %g5. */
ldxa [%g0] ASI_SCRATCHPAD, %g5
ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
rdpr %tl, %g1
cmp %g1, 1
bgu,pn %xcc, winfix_trampoline
mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
ba,pt %xcc, sparc64_realfault_common
nop
/* Called from trap table:
* %g4: vaddr
* %g5: context
* %g6: TAG TARGET
*/
sun4v_itsb_miss:
mov SCRATCHPAD_UTSBREG1, %g1
ldxa [%g1] ASI_SCRATCHPAD, %g1
brz,pn %g5, kvmap_itlb_4v
mov FAULT_CODE_ITLB, %g3
ba,a,pt %xcc, sun4v_tsb_miss_common
/* Called from trap table:
* %g4: vaddr
* %g5: context
* %g6: TAG TARGET
*/
sun4v_dtsb_miss:
mov SCRATCHPAD_UTSBREG1, %g1
ldxa [%g1] ASI_SCRATCHPAD, %g1
brz,pn %g5, kvmap_dtlb_4v
mov FAULT_CODE_DTLB, %g3
/* fallthrough */
sun4v_tsb_miss_common:
COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g5, %g7)
sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
mov SCRATCHPAD_UTSBREG2, %g5
ldxa [%g5] ASI_SCRATCHPAD, %g5
cmp %g5, -1
be,pt %xcc, 80f
nop
COMPUTE_TSB_PTR(%g5, %g4, REAL_HPAGE_SHIFT, %g2, %g7)
/* That clobbered %g2, reload it. */
ldxa [%g0] ASI_SCRATCHPAD, %g2
sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2
80: stx %g5, [%g2 + TRAP_PER_CPU_TSB_HUGE_TEMP]
#endif
ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath
ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
sun4v_itlb_error:
rdpr %tl, %g1
cmp %g1, 1
ble,pt %icc, sun4v_bad_ra
or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_ITLB, %g1
sethi %hi(sun4v_err_itlb_vaddr), %g1
stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)]
sethi %hi(sun4v_err_itlb_ctx), %g1
ldxa [%g0] ASI_SCRATCHPAD, %g6
ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1
stx %o1, [%g1 + %lo(sun4v_err_itlb_ctx)]
sethi %hi(sun4v_err_itlb_pte), %g1
stx %g3, [%g1 + %lo(sun4v_err_itlb_pte)]
sethi %hi(sun4v_err_itlb_error), %g1
stx %o0, [%g1 + %lo(sun4v_err_itlb_error)]
sethi %hi(1f), %g7
rdpr %tl, %g4
ba,pt %xcc, etraptl1
1: or %g7, %lo(1f), %g7
mov %l4, %o1
call sun4v_itlb_error_report
add %sp, PTREGS_OFF, %o0
/* NOTREACHED */
sun4v_dtlb_error:
rdpr %tl, %g1
cmp %g1, 1
ble,pt %icc, sun4v_bad_ra
or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_DTLB, %g1
sethi %hi(sun4v_err_dtlb_vaddr), %g1
stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)]
sethi %hi(sun4v_err_dtlb_ctx), %g1
ldxa [%g0] ASI_SCRATCHPAD, %g6
ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1
stx %o1, [%g1 + %lo(sun4v_err_dtlb_ctx)]
sethi %hi(sun4v_err_dtlb_pte), %g1
stx %g3, [%g1 + %lo(sun4v_err_dtlb_pte)]
sethi %hi(sun4v_err_dtlb_error), %g1
stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)]
sethi %hi(1f), %g7
rdpr %tl, %g4
ba,pt %xcc, etraptl1
1: or %g7, %lo(1f), %g7
mov %l4, %o1
call sun4v_dtlb_error_report
add %sp, PTREGS_OFF, %o0
/* NOTREACHED */
sun4v_bad_ra:
or %g0, %g4, %g5
ba,pt %xcc, sparc64_realfault_common
or %g1, %g0, %g4
/* NOTREACHED */
/* Instruction Access Exception, tl0. */
sun4v_iacc:
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3
ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etrap
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call sun4v_insn_access_exception
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Instruction Access Exception, tl1. */
sun4v_iacc_tl1:
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3
ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etraptl1
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call sun4v_insn_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Data Access Exception, tl0. */
sun4v_dacc:
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etrap
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call sun4v_data_access_exception
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Data Access Exception, tl1. */
sun4v_dacc_tl1:
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etraptl1
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call sun4v_data_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Memory Address Unaligned. */
sun4v_mna:
/* Window fixup? */
rdpr %tl, %g2
cmp %g2, 1
ble,pt %icc, 1f
nop
SET_GL(1)
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5
mov HV_FAULT_TYPE_UNALIGNED, %g3
ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g4
sllx %g3, 16, %g3
or %g4, %g3, %g4
ba,pt %xcc, winfix_mna
rdpr %tpc, %g3
/* not reached */
1: ldxa [%g0] ASI_SCRATCHPAD, %g2
mov HV_FAULT_TYPE_UNALIGNED, %g3
ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etrap
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call sun4v_do_mna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
nop
/* Privileged Action. */
sun4v_privact:
ba,pt %xcc, etrap
rd %pc, %g7
call do_privact
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Unaligned ldd float, tl0. */
sun4v_lddfmna:
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etrap
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call handle_lddfmna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Unaligned std float, tl0. */
sun4v_stdfmna:
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etrap
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call handle_stdfmna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define SUN4V_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
sll %g1, 11, %g1; \
srl %g1, 11 + 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
.globl sun4v_patch_tlb_handlers
.type sun4v_patch_tlb_handlers,#function
sun4v_patch_tlb_handlers:
SUN4V_DO_PATCH(tl0_iamiss, sun4v_itlb_miss)
SUN4V_DO_PATCH(tl1_iamiss, sun4v_itlb_miss)
SUN4V_DO_PATCH(tl0_damiss, sun4v_dtlb_miss)
SUN4V_DO_PATCH(tl1_damiss, sun4v_dtlb_miss)
SUN4V_DO_PATCH(tl0_daprot, sun4v_dtlb_prot)
SUN4V_DO_PATCH(tl1_daprot, sun4v_dtlb_prot)
SUN4V_DO_PATCH(tl0_iax, sun4v_iacc)
SUN4V_DO_PATCH(tl1_iax, sun4v_iacc_tl1)
SUN4V_DO_PATCH(tl0_dax, sun4v_dacc)
SUN4V_DO_PATCH(tl1_dax, sun4v_dacc_tl1)
SUN4V_DO_PATCH(tl0_mna, sun4v_mna)
SUN4V_DO_PATCH(tl1_mna, sun4v_mna)
SUN4V_DO_PATCH(tl0_lddfmna, sun4v_lddfmna)
SUN4V_DO_PATCH(tl0_stdfmna, sun4v_stdfmna)
SUN4V_DO_PATCH(tl0_privact, sun4v_privact)
retl
nop
.size sun4v_patch_tlb_handlers,.-sun4v_patch_tlb_handlers
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,022
|
arch/sparc/kernel/urtt_fill.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/thread_info.h>
#include <asm/trap_block.h>
#include <asm/spitfire.h>
#include <asm/ptrace.h>
#include <asm/head.h>
.text
.align 8
.globl user_rtt_fill_fixup_common
user_rtt_fill_fixup_common:
rdpr %cwp, %g1
add %g1, 1, %g1
wrpr %g1, 0x0, %cwp
rdpr %wstate, %g2
sll %g2, 3, %g2
wrpr %g2, 0x0, %wstate
/* We know %canrestore and %otherwin are both zero. */
sethi %hi(sparc64_kern_pri_context), %g2
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1
661: stxa %g2, [%g1] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g1] ASI_MMU
.previous
sethi %hi(KERNBASE), %g1
flush %g1
mov %g4, %l4
mov %g5, %l5
brnz,pn %g3, 1f
mov %g3, %l3
or %g4, FAULT_CODE_WINFIXUP, %g4
stb %g4, [%g6 + TI_FAULT_CODE]
stx %g5, [%g6 + TI_FAULT_ADDR]
1:
mov %g6, %l1
wrpr %g0, 0x0, %tl
661: nop
.section .sun4v_1insn_patch, "ax"
.word 661b
SET_GL(0)
.previous
661: wrpr %g0, RTRAP_PSTATE, %pstate
.section .sun_m7_1insn_patch, "ax"
.word 661b
/* Re-enable PSTATE.mcde to maintain ADI security */
wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
.previous
mov %l1, %g6
ldx [%g6 + TI_TASK], %g4
LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
brnz,pn %l3, 1f
nop
call do_sparc64_fault
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
1: cmp %g3, 2
bne,pn %xcc, 2f
nop
sethi %hi(tlb_type), %g1
lduw [%g1 + %lo(tlb_type)], %g1
cmp %g1, 3
bne,pt %icc, 1f
add %sp, PTREGS_OFF, %o0
mov %l4, %o2
call sun4v_do_mna
mov %l5, %o1
ba,a,pt %xcc, rtrap
1: mov %l4, %o1
mov %l5, %o2
call mem_address_unaligned
nop
ba,a,pt %xcc, rtrap
2: sethi %hi(tlb_type), %g1
mov %l4, %o1
lduw [%g1 + %lo(tlb_type)], %g1
mov %l5, %o2
cmp %g1, 3
bne,pt %icc, 1f
add %sp, PTREGS_OFF, %o0
call sun4v_data_access_exception
nop
ba,a,pt %xcc, rtrap
nop
1: call spitfire_data_access_exception
nop
ba,a,pt %xcc, rtrap
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,073
|
arch/sparc/kernel/sun4v_ivec.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* sun4v_ivec.S: Sun4v interrupt vector handling.
*
* Copyright (C) 2006 <davem@davemloft.net>
*/
#include <asm/cpudata.h>
#include <asm/intr_queue.h>
#include <asm/pil.h>
.text
.align 32
sun4v_cpu_mondo:
/* Head offset in %g2, tail offset in %g4.
* If they are the same, no work.
*/
mov INTRQ_CPU_MONDO_HEAD, %g2
ldxa [%g2] ASI_QUEUE, %g2
mov INTRQ_CPU_MONDO_TAIL, %g4
ldxa [%g4] ASI_QUEUE, %g4
cmp %g2, %g4
be,pn %xcc, sun4v_cpu_mondo_queue_empty
nop
/* Get &trap_block[smp_processor_id()] into %g4. */
ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
/* Get smp_processor_id() into %g3 */
sethi %hi(trap_block), %g5
or %g5, %lo(trap_block), %g5
sub %g4, %g5, %g3
srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
/* Increment cpu_mondo_counter[smp_processor_id()] */
sethi %hi(cpu_mondo_counter), %g5
or %g5, %lo(cpu_mondo_counter), %g5
sllx %g3, 3, %g3
add %g5, %g3, %g5
ldx [%g5], %g3
add %g3, 1, %g3
stx %g3, [%g5]
/* Get CPU mondo queue base phys address into %g7. */
ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
/* Now get the cross-call arguments and handler PC, same
* layout as sun4u:
*
* 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
* high half is context arg to MMU flushes, into %g5
* 2nd 64-bit word: 64-bit arg, load into %g1
* 3rd 64-bit word: 64-bit arg, load into %g7
*/
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
add %g2, 0x8, %g2
srlx %g3, 32, %g5
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
add %g2, 0x8, %g2
srl %g3, 0, %g3
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
add %g2, 0x40 - 0x8 - 0x8, %g2
/* Update queue head pointer. */
lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
and %g2, %g4, %g2
mov INTRQ_CPU_MONDO_HEAD, %g4
stxa %g2, [%g4] ASI_QUEUE
membar #Sync
jmpl %g3, %g0
nop
sun4v_cpu_mondo_queue_empty:
retry
sun4v_dev_mondo:
/* Head offset in %g2, tail offset in %g4. */
mov INTRQ_DEVICE_MONDO_HEAD, %g2
ldxa [%g2] ASI_QUEUE, %g2
mov INTRQ_DEVICE_MONDO_TAIL, %g4
ldxa [%g4] ASI_QUEUE, %g4
cmp %g2, %g4
be,pn %xcc, sun4v_dev_mondo_queue_empty
nop
/* Get &trap_block[smp_processor_id()] into %g4. */
ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
/* Get DEV mondo queue base phys address into %g5. */
ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
/* Load IVEC into %g3. */
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
add %g2, 0x40, %g2
/* XXX There can be a full 64-byte block of data here.
* XXX This is how we can get at MSI vector data.
* XXX Current we do not capture this, but when we do we'll
* XXX need to add a 64-byte storage area in the struct ino_bucket
* XXX or the struct irq_desc.
*/
/* Update queue head pointer, this frees up some registers. */
lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
and %g2, %g4, %g2
mov INTRQ_DEVICE_MONDO_HEAD, %g4
stxa %g2, [%g4] ASI_QUEUE
membar #Sync
TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
/* For VIRQs, cookie is encoded as ~bucket_phys_addr */
brlz,pt %g3, 1f
xnor %g3, %g0, %g4
/* Get __pa(&ivector_table[IVEC]) into %g4. */
sethi %hi(ivector_table_pa), %g4
ldx [%g4 + %lo(ivector_table_pa)], %g4
sllx %g3, 4, %g3
add %g4, %g3, %g4
1: ldx [%g1], %g2
stxa %g2, [%g4] ASI_PHYS_USE_EC
stx %g4, [%g1]
/* Signal the interrupt by setting (1 << pil) in %softint. */
wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
sun4v_dev_mondo_queue_empty:
retry
sun4v_res_mondo:
/* Head offset in %g2, tail offset in %g4. */
mov INTRQ_RESUM_MONDO_HEAD, %g2
ldxa [%g2] ASI_QUEUE, %g2
mov INTRQ_RESUM_MONDO_TAIL, %g4
ldxa [%g4] ASI_QUEUE, %g4
cmp %g2, %g4
be,pn %xcc, sun4v_res_mondo_queue_empty
nop
/* Get &trap_block[smp_processor_id()] into %g3. */
ldxa [%g0] ASI_SCRATCHPAD, %g3
sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
/* Get RES mondo queue base phys address into %g5. */
ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
/* Get RES kernel buffer base phys address into %g7. */
ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
/* If the first word is non-zero, queue is full. */
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
brnz,pn %g1, sun4v_res_mondo_queue_full
nop
lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
/* Remember this entry's offset in %g1. */
mov %g2, %g1
/* Copy 64-byte queue entry into kernel buffer. */
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
/* Update queue head pointer. */
and %g2, %g4, %g2
mov INTRQ_RESUM_MONDO_HEAD, %g4
stxa %g2, [%g4] ASI_QUEUE
membar #Sync
/* Disable interrupts and save register state so we can call
* C code. The etrap handling will leave %g4 in %l4 for us
* when it's done.
*/
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
mov %g1, %g4
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
/* Log the event. */
add %sp, PTREGS_OFF, %o0
call sun4v_resum_error
mov %l4, %o1
/* Return from trap. */
ba,pt %xcc, rtrap_irq
nop
sun4v_res_mondo_queue_empty:
retry
sun4v_res_mondo_queue_full:
/* The queue is full, consolidate our damage by setting
* the head equal to the tail. We'll just trap again otherwise.
* Call C code to log the event.
*/
mov INTRQ_RESUM_MONDO_HEAD, %g2
stxa %g4, [%g2] ASI_QUEUE
membar #Sync
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
call sun4v_resum_overflow
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap_irq
nop
sun4v_nonres_mondo:
/* Head offset in %g2, tail offset in %g4. */
mov INTRQ_NONRESUM_MONDO_HEAD, %g2
ldxa [%g2] ASI_QUEUE, %g2
mov INTRQ_NONRESUM_MONDO_TAIL, %g4
ldxa [%g4] ASI_QUEUE, %g4
cmp %g2, %g4
be,pn %xcc, sun4v_nonres_mondo_queue_empty
nop
/* Get &trap_block[smp_processor_id()] into %g3. */
ldxa [%g0] ASI_SCRATCHPAD, %g3
sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
/* Get RES mondo queue base phys address into %g5. */
ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
/* Get RES kernel buffer base phys address into %g7. */
ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
/* If the first word is non-zero, queue is full. */
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
brnz,pn %g1, sun4v_nonres_mondo_queue_full
nop
lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
/* Remember this entry's offset in %g1. */
mov %g2, %g1
/* Copy 64-byte queue entry into kernel buffer. */
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
/* Update queue head pointer. */
and %g2, %g4, %g2
mov INTRQ_NONRESUM_MONDO_HEAD, %g4
stxa %g2, [%g4] ASI_QUEUE
membar #Sync
/* Disable interrupts and save register state so we can call
* C code. The etrap handling will leave %g4 in %l4 for us
* when it's done.
*/
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
mov %g1, %g4
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
/* Log the event. */
add %sp, PTREGS_OFF, %o0
call sun4v_nonresum_error
mov %l4, %o1
/* Return from trap. */
ba,pt %xcc, rtrap_irq
nop
sun4v_nonres_mondo_queue_empty:
retry
sun4v_nonres_mondo_queue_full:
/* The queue is full, consolidate our damage by setting
* the head equal to the tail. We'll just trap again otherwise.
* Call C code to log the event.
*/
mov INTRQ_NONRESUM_MONDO_HEAD, %g2
stxa %g4, [%g2] ASI_QUEUE
membar #Sync
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
call sun4v_nonresum_overflow
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap_irq
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 13,575
|
arch/sparc/kernel/tsb.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* tsb.S: Sparc64 TSB table handling.
*
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/
#include <asm/tsb.h>
#include <asm/hypervisor.h>
#include <asm/page.h>
#include <asm/cpudata.h>
#include <asm/mmu.h>
.text
.align 32
/* Invoked from TLB miss handler, we are in the
* MMU global registers and they are setup like
* this:
*
* %g1: TSB entry pointer
* %g2: available temporary
* %g3: FAULT_CODE_{D,I}TLB
* %g4: available temporary
* %g5: available temporary
* %g6: TAG TARGET
* %g7: available temporary, will be loaded by us with
* the physical address base of the linux page
* tables for the current address space
*/
tsb_miss_dtlb:
mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_DMMU, %g4
srlx %g4, PAGE_SHIFT, %g4
ba,pt %xcc, tsb_miss_page_table_walk
sllx %g4, PAGE_SHIFT, %g4
tsb_miss_itlb:
mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_IMMU, %g4
srlx %g4, PAGE_SHIFT, %g4
ba,pt %xcc, tsb_miss_page_table_walk
sllx %g4, PAGE_SHIFT, %g4
/* At this point we have:
* %g1 -- PAGE_SIZE TSB entry address
* %g3 -- FAULT_CODE_{D,I}TLB
* %g4 -- missing virtual address
* %g6 -- TAG TARGET (vaddr >> 22)
*/
tsb_miss_page_table_walk:
TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
/* Before committing to a full page table walk,
* check the huge page TSB.
*/
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
nop
.section .sun4v_2insn_patch, "ax"
.word 661b
mov SCRATCHPAD_UTSBREG2, %g5
ldxa [%g5] ASI_SCRATCHPAD, %g5
.previous
cmp %g5, -1
be,pt %xcc, 80f
nop
/* We need an aligned pair of registers containing 2 values
* which can be easily rematerialized. %g6 and %g7 foot the
* bill just nicely. We'll save %g6 away into %g2 for the
* huge page TSB TAG comparison.
*
* Perform a huge page TSB lookup.
*/
mov %g6, %g2
and %g5, 0x7, %g6
mov 512, %g7
andn %g5, 0x7, %g5
sllx %g7, %g6, %g7
srlx %g4, REAL_HPAGE_SHIFT, %g6
sub %g7, 1, %g7
and %g6, %g7, %g6
sllx %g6, 4, %g6
add %g5, %g6, %g5
TSB_LOAD_QUAD(%g5, %g6)
cmp %g6, %g2
be,a,pt %xcc, tsb_tlb_reload
mov %g7, %g5
/* No match, remember the huge page TSB entry address,
* and restore %g6 and %g7.
*/
TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
srlx %g4, 22, %g6
80: stx %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
#endif
ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
/* At this point we have:
* %g1 -- TSB entry address
* %g3 -- FAULT_CODE_{D,I}TLB
* %g4 -- missing virtual address
* %g6 -- TAG TARGET (vaddr >> 22)
* %g7 -- page table physical address
*
* We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
* TSB both lack a matching entry.
*/
tsb_miss_page_table_walk_sun4v_fastpath:
USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
/* Valid PTE is now in %g5. */
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
sethi %uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7
sllx %g7, 32, %g7
andcc %g5, %g7, %g0
be,pt %xcc, 60f
nop
/* It is a huge page, use huge page TSB entry address we
* calculated above. If the huge page TSB has not been
* allocated, setup a trap stack and call hugetlb_setup()
* to do so, then return from the trap to replay the TLB
* miss.
*
* This is necessary to handle the case of transparent huge
* pages where we don't really have a non-atomic context
* in which to allocate the hugepage TSB hash table. When
* the 'mm' faults in the hugepage for the first time, we
* thus handle it here. This also makes sure that we can
* allocate the TSB hash table on the correct NUMA node.
*/
TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1
cmp %g1, -1
bne,pt %xcc, 60f
nop
661: rdpr %pstate, %g5
wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
.section .sun4v_2insn_patch, "ax"
.word 661b
SET_GL(1)
nop
.previous
rdpr %tl, %g7
cmp %g7, 1
bne,pn %xcc, winfix_trampoline
mov %g3, %g4
ba,pt %xcc, etrap
rd %pc, %g7
call hugetlb_setup
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
60:
#endif
/* At this point we have:
* %g1 -- TSB entry address
* %g3 -- FAULT_CODE_{D,I}TLB
* %g5 -- valid PTE
* %g6 -- TAG TARGET (vaddr >> 22)
*/
tsb_reload:
TSB_LOCK_TAG(%g1, %g2, %g7)
TSB_WRITE(%g1, %g5, %g6)
/* Finally, load TLB and return from trap. */
tsb_tlb_reload:
cmp %g3, FAULT_CODE_DTLB
bne,pn %xcc, tsb_itlb_load
nop
tsb_dtlb_load:
661: stxa %g5, [%g0] ASI_DTLB_DATA_IN
retry
.section .sun4v_2insn_patch, "ax"
.word 661b
nop
nop
.previous
/* For sun4v the ASI_DTLB_DATA_IN store and the retry
* instruction get nop'd out and we get here to branch
* to the sun4v tlb load code. The registers are setup
* as follows:
*
* %g4: vaddr
* %g5: PTE
* %g6: TAG
*
* The sun4v TLB load wants the PTE in %g3 so we fix that
* up here.
*/
ba,pt %xcc, sun4v_dtlb_load
mov %g5, %g3
tsb_itlb_load:
/* Executable bit must be set. */
661: sethi %hi(_PAGE_EXEC_4U), %g4
andcc %g5, %g4, %g0
.section .sun4v_2insn_patch, "ax"
.word 661b
andcc %g5, _PAGE_EXEC_4V, %g0
nop
.previous
be,pn %xcc, tsb_do_fault
nop
661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
retry
.section .sun4v_2insn_patch, "ax"
.word 661b
nop
nop
.previous
/* For sun4v the ASI_ITLB_DATA_IN store and the retry
* instruction get nop'd out and we get here to branch
* to the sun4v tlb load code. The registers are setup
* as follows:
*
* %g4: vaddr
* %g5: PTE
* %g6: TAG
*
* The sun4v TLB load wants the PTE in %g3 so we fix that
* up here.
*/
ba,pt %xcc, sun4v_itlb_load
mov %g5, %g3
/* No valid entry in the page tables, do full fault
* processing.
*/
.globl tsb_do_fault
tsb_do_fault:
cmp %g3, FAULT_CODE_DTLB
661: rdpr %pstate, %g5
wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
.section .sun4v_2insn_patch, "ax"
.word 661b
SET_GL(1)
ldxa [%g0] ASI_SCRATCHPAD, %g4
.previous
bne,pn %xcc, tsb_do_itlb_fault
nop
tsb_do_dtlb_fault:
rdpr %tl, %g3
cmp %g3, 1
661: mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_DMMU, %g5
.section .sun4v_2insn_patch, "ax"
.word 661b
ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
nop
.previous
/* Clear context ID bits. */
srlx %g5, PAGE_SHIFT, %g5
sllx %g5, PAGE_SHIFT, %g5
be,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_DTLB, %g4
ba,pt %xcc, winfix_trampoline
nop
tsb_do_itlb_fault:
rdpr %tpc, %g5
ba,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_ITLB, %g4
.globl sparc64_realfault_common
sparc64_realfault_common:
/* fault code in %g4, fault address in %g5, etrap will
* preserve these two values in %l4 and %l5 respectively
*/
ba,pt %xcc, etrap ! Save trap state
1: rd %pc, %g7 ! ...
stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code
stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address
call do_sparc64_fault ! Call fault handler
add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg
ba,pt %xcc, rtrap ! Restore cpu state
nop ! Delay slot (fill me)
winfix_trampoline:
rdpr %tpc, %g3 ! Prepare winfixup TNPC
or %g3, 0x7c, %g3 ! Compute branch offset
wrpr %g3, %tnpc ! Write it into TNPC
done ! Trap return
/* Insert an entry into the TSB.
*
* %o0: TSB entry pointer (virt or phys address)
* %o1: tag
* %o2: pte
*/
.align 32
.globl __tsb_insert
__tsb_insert:
rdpr %pstate, %o5
wrpr %o5, PSTATE_IE, %pstate
TSB_LOCK_TAG(%o0, %g2, %g3)
TSB_WRITE(%o0, %o2, %o1)
wrpr %o5, %pstate
retl
nop
.size __tsb_insert, .-__tsb_insert
/* Flush the given TSB entry if it has the matching
* tag.
*
* %o0: TSB entry pointer (virt or phys address)
* %o1: tag
*/
.align 32
.globl tsb_flush
.type tsb_flush,#function
tsb_flush:
sethi %hi(TSB_TAG_LOCK_HIGH), %g2
1: TSB_LOAD_TAG(%o0, %g1)
srlx %g1, 32, %o3
andcc %o3, %g2, %g0
bne,pn %icc, 1b
nop
cmp %g1, %o1
mov 1, %o3
bne,pt %xcc, 2f
sllx %o3, TSB_TAG_INVALID_BIT, %o3
TSB_CAS_TAG(%o0, %g1, %o3)
cmp %g1, %o3
bne,pn %xcc, 1b
nop
2: retl
nop
.size tsb_flush, .-tsb_flush
/* Reload MMU related context switch state at
* schedule() time.
*
* %o0: page table physical address
* %o1: TSB base config pointer
* %o2: TSB huge config pointer, or NULL if none
* %o3: Hypervisor TSB descriptor physical address
* %o4: Secondary context to load, if non-zero
*
* We have to run this whole thing with interrupts
* disabled so that the current cpu doesn't change
* due to preemption.
*/
.align 32
.globl __tsb_context_switch
.type __tsb_context_switch,#function
__tsb_context_switch:
rdpr %pstate, %g1
wrpr %g1, PSTATE_IE, %pstate
brz,pn %o4, 1f
mov SECONDARY_CONTEXT, %o5
661: stxa %o4, [%o5] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %o4, [%o5] ASI_MMU
.previous
flush %g6
1:
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
ldx [%o1 + TSB_CONFIG_REG_VAL], %o0
brz,pt %o2, 1f
mov -1, %g3
ldx [%o2 + TSB_CONFIG_REG_VAL], %g3
1: stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
sethi %hi(tlb_type), %g2
lduw [%g2 + %lo(tlb_type)], %g2
cmp %g2, 3
bne,pt %icc, 50f
nop
/* Hypervisor TSB switch. */
mov SCRATCHPAD_UTSBREG1, %o5
stxa %o0, [%o5] ASI_SCRATCHPAD
mov SCRATCHPAD_UTSBREG2, %o5
stxa %g3, [%o5] ASI_SCRATCHPAD
mov 2, %o0
cmp %g3, -1
move %xcc, 1, %o0
mov HV_FAST_MMU_TSB_CTXNON0, %o5
mov %o3, %o1
ta HV_FAST_TRAP
/* Finish up. */
ba,pt %xcc, 9f
nop
/* SUN4U TSB switch. */
50: mov TSB_REG, %o5
stxa %o0, [%o5] ASI_DMMU
membar #Sync
stxa %o0, [%o5] ASI_IMMU
membar #Sync
2: ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4
brz %o4, 9f
ldx [%o1 + TSB_CONFIG_MAP_PTE], %o5
sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2
mov TLB_TAG_ACCESS, %g3
lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
stxa %o4, [%g3] ASI_DMMU
membar #Sync
sllx %g2, 3, %g2
stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
membar #Sync
brz,pt %o2, 9f
nop
ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4
ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5
mov TLB_TAG_ACCESS, %g3
stxa %o4, [%g3] ASI_DMMU
membar #Sync
sub %g2, (1 << 3), %g2
stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
membar #Sync
9:
wrpr %g1, %pstate
retl
nop
.size __tsb_context_switch, .-__tsb_context_switch
#define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \
(1 << TSB_TAG_INVALID_BIT))
.align 32
.globl copy_tsb
.type copy_tsb,#function
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
* %o2=new_tsb_base, %o3=new_tsb_size
* %o4=page_size_shift
*/
sethi %uhi(TSB_PASS_BITS), %g7
srlx %o3, 4, %o3
add %o0, %o1, %o1 /* end of old tsb */
sllx %g7, 32, %g7
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
mov %o4, %g1 /* page_size_shift */
661: prefetcha [%o0] ASI_N, #one_read
.section .tsb_phys_patch, "ax"
.word 661b
prefetcha [%o0] ASI_PHYS_USE_EC, #one_read
.previous
90: andcc %o0, (64 - 1), %g0
bne 1f
add %o0, 64, %o5
661: prefetcha [%o5] ASI_N, #one_read
.section .tsb_phys_patch, "ax"
.word 661b
prefetcha [%o5] ASI_PHYS_USE_EC, #one_read
.previous
1: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */
andcc %g2, %g7, %g0 /* LOCK or INVALID set? */
bne,pn %xcc, 80f /* Skip it */
sllx %g2, 22, %o4 /* TAG --> VADDR */
/* This can definitely be computed faster... */
srlx %o0, 4, %o5 /* Build index */
and %o5, 511, %o5 /* Mask index */
sllx %o5, %g1, %o5 /* Put into vaddr position */
or %o4, %o5, %o4 /* Full VADDR. */
srlx %o4, %g1, %o4 /* Shift down to create index */
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
add %o4, 0x8, %o4 /* Advance to TTE */
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
80: add %o0, 16, %o0
cmp %o0, %o1
bne,pt %xcc, 90b
nop
retl
nop
.size copy_tsb, .-copy_tsb
/* Set the invalid bit in all TSB entries. */
.align 32
.globl tsb_init
.type tsb_init,#function
tsb_init: /* %o0 = TSB vaddr, %o1 = size in bytes */
prefetch [%o0 + 0x000], #n_writes
mov 1, %g1
prefetch [%o0 + 0x040], #n_writes
sllx %g1, TSB_TAG_INVALID_BIT, %g1
prefetch [%o0 + 0x080], #n_writes
1: prefetch [%o0 + 0x0c0], #n_writes
stx %g1, [%o0 + 0x00]
stx %g1, [%o0 + 0x10]
stx %g1, [%o0 + 0x20]
stx %g1, [%o0 + 0x30]
prefetch [%o0 + 0x100], #n_writes
stx %g1, [%o0 + 0x40]
stx %g1, [%o0 + 0x50]
stx %g1, [%o0 + 0x60]
stx %g1, [%o0 + 0x70]
prefetch [%o0 + 0x140], #n_writes
stx %g1, [%o0 + 0x80]
stx %g1, [%o0 + 0x90]
stx %g1, [%o0 + 0xa0]
stx %g1, [%o0 + 0xb0]
prefetch [%o0 + 0x180], #n_writes
stx %g1, [%o0 + 0xc0]
stx %g1, [%o0 + 0xd0]
stx %g1, [%o0 + 0xe0]
stx %g1, [%o0 + 0xf0]
subcc %o1, 0x100, %o1
bne,pt %xcc, 1b
add %o0, 0x100, %o0
retl
nop
nop
nop
.size tsb_init, .-tsb_init
.globl NGtsb_init
.type NGtsb_init,#function
NGtsb_init:
rd %asi, %g2
mov 1, %g1
wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
sllx %g1, TSB_TAG_INVALID_BIT, %g1
1: stxa %g1, [%o0 + 0x00] %asi
stxa %g1, [%o0 + 0x10] %asi
stxa %g1, [%o0 + 0x20] %asi
stxa %g1, [%o0 + 0x30] %asi
stxa %g1, [%o0 + 0x40] %asi
stxa %g1, [%o0 + 0x50] %asi
stxa %g1, [%o0 + 0x60] %asi
stxa %g1, [%o0 + 0x70] %asi
stxa %g1, [%o0 + 0x80] %asi
stxa %g1, [%o0 + 0x90] %asi
stxa %g1, [%o0 + 0xa0] %asi
stxa %g1, [%o0 + 0xb0] %asi
stxa %g1, [%o0 + 0xc0] %asi
stxa %g1, [%o0 + 0xd0] %asi
stxa %g1, [%o0 + 0xe0] %asi
stxa %g1, [%o0 + 0xf0] %asi
subcc %o1, 0x100, %o1
bne,pt %xcc, 1b
add %o0, 0x100, %o0
membar #Sync
retl
wr %g2, 0x0, %asi
.size NGtsb_init, .-NGtsb_init
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,179
|
arch/sparc/kernel/spiterrs.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* We need to carefully read the error status, ACK the errors,
* prevent recursive traps, and pass the information on to C
* code for logging.
*
* We pass the AFAR in as-is, and we encode the status
* information as described in asm-sparc64/sfafsr.h
*/
.type __spitfire_access_error,#function
__spitfire_access_error:
/* Disable ESTATE error reporting so that we do not take
* recursive traps and RED state the processor.
*/
stxa %g0, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync
mov UDBE_UE, %g1
ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
/* __spitfire_cee_trap branches here with AFSR in %g4 and
* UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the ESTATE
* Error Enable register.
*/
__spitfire_cee_trap_continue:
ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR
rdpr %tt, %g3
and %g3, 0x1ff, %g3 ! Paranoia
sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
or %g4, %g3, %g4
rdpr %tl, %g3
cmp %g3, 1
mov 1, %g3
bleu %xcc, 1f
sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
or %g4, %g3, %g4
/* Read in the UDB error register state, clearing the sticky
* error bits as-needed. We only clear them if the UE bit is
* set. Likewise, __spitfire_cee_trap below will only do so
* if the CE bit is set.
*
* NOTE: UltraSparc-I/II have high and low UDB error
* registers, corresponding to the two UDB units
* present on those chips. UltraSparc-IIi only
* has a single UDB, called "SDB" in the manual.
* For IIi the upper UDB register always reads
* as zero so for our purposes things will just
* work with the checks below.
*/
1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3
and %g3, 0x3ff, %g7 ! Paranoia
sllx %g7, SFSTAT_UDBH_SHIFT, %g7
or %g4, %g7, %g4
andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
be,pn %xcc, 1f
nop
stxa %g3, [%g0] ASI_UDB_ERROR_W
membar #Sync
1: mov 0x18, %g3
ldxa [%g3] ASI_UDBL_ERROR_R, %g3
and %g3, 0x3ff, %g7 ! Paranoia
sllx %g7, SFSTAT_UDBL_SHIFT, %g7
or %g4, %g7, %g4
andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
be,pn %xcc, 1f
nop
mov 0x18, %g7
stxa %g3, [%g7] ASI_UDB_ERROR_W
membar #Sync
1: /* Ok, now that we've latched the error state, clear the
* sticky bits in the AFSR.
*/
stxa %g4, [%g0] ASI_AFSR
membar #Sync
rdpr %tl, %g2
cmp %g2, 1
rdpr %pil, %g2
bleu,pt %xcc, 1f
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etraptl1
rd %pc, %g7
ba,a,pt %xcc, 2f
nop
1: ba,pt %xcc, etrap_irq
rd %pc, %g7
2:
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
mov %l4, %o1
mov %l5, %o2
call spitfire_access_error
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
.size __spitfire_access_error,.-__spitfire_access_error
/* This is the trap handler entry point for ECC correctable
* errors. They are corrected, but we listen for the trap so
* that the event can be logged.
*
* Disrupting errors are either:
* 1) single-bit ECC errors during UDB reads to system
* memory
* 2) data parity errors during write-back events
*
* As far as I can make out from the manual, the CEE trap is
* only for correctable errors during memory read accesses by
* the front-end of the processor.
*
* The code below is only for trap level 1 CEE events, as it
* is the only situation where we can safely record and log.
* For trap level >1 we just clear the CE bit in the AFSR and
* return.
*
* This is just like __spiftire_access_error above, but it
* specifically handles correctable errors. If an
* uncorrectable error is indicated in the AFSR we will branch
* directly above to __spitfire_access_error to handle it
* instead. Uncorrectable therefore takes priority over
* correctable, and the error logging C code will notice this
* case by inspecting the trap type.
*/
.type __spitfire_cee_trap,#function
__spitfire_cee_trap:
ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
mov 1, %g3
sllx %g3, SFAFSR_UE_SHIFT, %g3
andcc %g4, %g3, %g0 ! Check for UE
bne,pn %xcc, __spitfire_access_error
nop
/* Ok, in this case we only have a correctable error.
* Indicate we only wish to capture that state in register
* %g1, and we only disable CE error reporting unlike UE
* handling which disables all errors.
*/
ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3
andn %g3, ESTATE_ERR_CE, %g3
stxa %g3, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync
/* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
ba,pt %xcc, __spitfire_cee_trap_continue
mov UDBE_CE, %g1
.size __spitfire_cee_trap,.-__spitfire_cee_trap
.type __spitfire_data_access_exception_tl1,#function
__spitfire_data_access_exception_tl1:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
mov DMMU_SFAR, %g5
ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
membar #Sync
rdpr %tt, %g3
cmp %g3, 0x80 ! first win spill/fill trap
blu,pn %xcc, 1f
cmp %g3, 0xff ! last win spill/fill trap
bgu,pn %xcc, 1f
nop
ba,pt %xcc, winfix_dax
rdpr %tpc, %g3
1: sethi %hi(109f), %g7
ba,pt %xcc, etraptl1
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call spitfire_data_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
.size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
.type __spitfire_data_access_exception,#function
__spitfire_data_access_exception:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
mov DMMU_SFAR, %g5
ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
.size __spitfire_data_access_exception,.-__spitfire_data_access_exception
.type __spitfire_insn_access_exception_tl1,#function
__spitfire_insn_access_exception_tl1:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etraptl1
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call spitfire_insn_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
.size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
.type __spitfire_insn_access_exception,#function
__spitfire_insn_access_exception:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call spitfire_insn_access_exception
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
.size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
|
AirFortressIlikara/LS2K0300-linux-4.19
| 19,378
|
arch/sparc/kernel/head_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* head.S: The initial boot code for the Sparc port of Linux.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995,1999 Pete Zaitcev (zaitcev@yahoo.com)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 Michael A. Griffith (grif@acm.org)
*
* CompactPCI platform by Eric Brower, 1999.
*/
#include <linux/version.h>
#include <linux/init.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/contregs.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/page.h>
#include <asm/kdebug.h>
#include <asm/winmacro.h>
#include <asm/thread_info.h> /* TI_UWINMASK */
#include <asm/errno.h>
#include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */
#include <asm/export.h>
.data
/* The following are used with the prom_vector node-ops to figure out
* the cpu-type
*/
.align 4
.globl cputypval
cputypval:
.asciz "sun4m"
.ascii " "
/* Tested on SS-5, SS-10 */
.align 4
cputypvar:
.asciz "compatible"
.align 4
notsup:
.asciz "Sparc-Linux sun4/sun4c or MMU-less not supported\n\n"
.align 4
sun4e_notsup:
.asciz "Sparc-Linux sun4e support does not exist\n\n"
.align 4
/* The trap-table - located in the __HEAD section */
#include "ttable_32.S"
.align PAGE_SIZE
/* This was the only reasonable way I could think of to properly align
* these page-table data structures.
*/
.globl empty_zero_page
empty_zero_page: .skip PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)
.global root_flags
.global ram_flags
.global root_dev
.global sparc_ramdisk_image
.global sparc_ramdisk_size
/* This stuff has to be in sync with SILO and other potential boot loaders
* Fields should be kept upward compatible and whenever any change is made,
* HdrS version should be incremented.
*/
.ascii "HdrS"
.word LINUX_VERSION_CODE
.half 0x0203 /* HdrS version */
root_flags:
.half 1
root_dev:
.half 0
ram_flags:
.half 0
sparc_ramdisk_image:
.word 0
sparc_ramdisk_size:
.word 0
.word reboot_command
.word 0, 0, 0
.word _end
/* Cool, here we go. Pick up the romvec pointer in %o0 and stash it in
* %g7 and at prom_vector_p. And also quickly check whether we are on
* a v0, v2, or v3 prom.
*/
gokernel:
/* Ok, it's nice to know, as early as possible, if we
* are already mapped where we expect to be in virtual
* memory. The Solaris /boot elf format bootloader
* will peek into our elf header and load us where
* we want to be, otherwise we have to re-map.
*
* Some boot loaders don't place the jmp'rs address
* in %o7, so we do a pc-relative call to a local
* label, then see what %o7 has.
*/
mov %o7, %g4 ! Save %o7
/* Jump to it, and pray... */
current_pc:
call 1f
nop
1:
mov %o7, %g3
tst %o0
be no_sun4u_here
mov %g4, %o7 /* Previous %o7. */
mov %o0, %l0 ! stash away romvec
mov %o0, %g7 ! put it here too
mov %o1, %l1 ! stash away debug_vec too
/* Ok, let's check out our run time program counter. */
set current_pc, %g5
cmp %g3, %g5
be already_mapped
nop
/* %l6 will hold the offset we have to subtract
* from absolute symbols in order to access areas
* in our own image. If already mapped this is
* just plain zero, else it is KERNBASE.
*/
set KERNBASE, %l6
b copy_prom_lvl14
nop
already_mapped:
mov 0, %l6
/* Copy over the Prom's level 14 clock handler. */
copy_prom_lvl14:
#if 1
/* DJHR
* preserve our linked/calculated instructions
*/
set lvl14_save, %g1
set t_irq14, %g3
sub %g1, %l6, %g1 ! translate to physical
sub %g3, %l6, %g3 ! translate to physical
ldd [%g3], %g4
std %g4, [%g1]
ldd [%g3+8], %g4
std %g4, [%g1+8]
#endif
rd %tbr, %g1
andn %g1, 0xfff, %g1 ! proms trap table base
or %g0, (0x1e<<4), %g2 ! offset to lvl14 intr
or %g1, %g2, %g2
set t_irq14, %g3
sub %g3, %l6, %g3
ldd [%g2], %g4
std %g4, [%g3]
ldd [%g2 + 0x8], %g4
std %g4, [%g3 + 0x8] ! Copy proms handler
/* DON'T TOUCH %l0 thru %l5 in these remapping routines,
* we need their values afterwards!
*/
/* Now check whether we are already mapped, if we
* are we can skip all this garbage coming up.
*/
copy_prom_done:
cmp %l6, 0
be go_to_highmem ! this will be a nop then
nop
/* Validate that we are in fact running on an
* SRMMU based cpu.
*/
set 0x4000, %g6
cmp %g7, %g6
bne not_a_sun4
nop
halt_notsup:
ld [%g7 + 0x68], %o1
set notsup, %o0
sub %o0, %l6, %o0
call %o1
nop
ba halt_me
nop
not_a_sun4:
/* It looks like this is a machine we support.
* Now find out what MMU we are dealing with
* LEON - identified by the psr.impl field
* Viking - identified by the psr.impl field
* In all other cases a sun4m srmmu.
* We check that the MMU is enabled in all cases.
*/
/* Check if this is a LEON CPU */
rd %psr, %g3
srl %g3, PSR_IMPL_SHIFT, %g3
and %g3, PSR_IMPL_SHIFTED_MASK, %g3
cmp %g3, PSR_IMPL_LEON
be leon_remap /* It is a LEON - jump */
nop
/* Sanity-check, is MMU enabled */
lda [%g0] ASI_M_MMUREGS, %g1
andcc %g1, 1, %g0
be halt_notsup
nop
/* Check for a viking (TI) module. */
cmp %g3, PSR_IMPL_TI
bne srmmu_not_viking
nop
/* Figure out what kind of viking we are on.
* We need to know if we have to play with the
* AC bit and disable traps or not.
*/
/* I've only seen MicroSparc's on SparcClassics with this
* bit set.
*/
set 0x800, %g2
lda [%g0] ASI_M_MMUREGS, %g3 ! peek in the control reg
and %g2, %g3, %g3
subcc %g3, 0x0, %g0
bnz srmmu_not_viking ! is in mbus mode
nop
rd %psr, %g3 ! DO NOT TOUCH %g3
andn %g3, PSR_ET, %g2
wr %g2, 0x0, %psr
WRITE_PAUSE
/* Get context table pointer, then convert to
* a physical address, which is 36 bits.
*/
set AC_M_CTPR, %g4
lda [%g4] ASI_M_MMUREGS, %g4
sll %g4, 0x4, %g4 ! We use this below
! DO NOT TOUCH %g4
/* Set the AC bit in the Viking's MMU control reg. */
lda [%g0] ASI_M_MMUREGS, %g5 ! DO NOT TOUCH %g5
set 0x8000, %g6 ! AC bit mask
or %g5, %g6, %g6 ! Or it in...
sta %g6, [%g0] ASI_M_MMUREGS ! Close your eyes...
/* Grrr, why does it seem like every other load/store
* on the sun4m is in some ASI space...
* Fine with me, let's get the pointer to the level 1
* page table directory and fetch its entry.
*/
lda [%g4] ASI_M_BYPASS, %o1 ! This is a level 1 ptr
srl %o1, 0x4, %o1 ! Clear low 4 bits
sll %o1, 0x8, %o1 ! Make physical
/* Ok, pull in the PTD. */
lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd
/* Calculate to KERNBASE entry. */
add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
/* Poke the entry into the calculated address. */
sta %o2, [%o3] ASI_M_BYPASS
/* I don't get it Sun, if you engineered all these
* boot loaders and the PROM (thank you for the debugging
* features btw) why did you not have them load kernel
* images up in high address space, since this is necessary
* for ABI compliance anyways? Does this low-mapping provide
* enhanced interoperability?
*
* "The PROM is the computer."
*/
/* Ok, restore the MMU control register we saved in %g5 */
sta %g5, [%g0] ASI_M_MMUREGS ! POW... ouch
/* Turn traps back on. We saved it in %g3 earlier. */
wr %g3, 0x0, %psr ! tick tock, tick tock
/* Now we burn precious CPU cycles due to bad engineering. */
WRITE_PAUSE
/* Wow, all that just to move a 32-bit value from one
* place to another... Jump to high memory.
*/
b go_to_highmem
nop
srmmu_not_viking:
/* This works on viking's in Mbus mode and all
* other MBUS modules. It is virtually the same as
* the above madness sans turning traps off and flipping
* the AC bit.
*/
set AC_M_CTPR, %g1
lda [%g1] ASI_M_MMUREGS, %g1 ! get ctx table ptr
sll %g1, 0x4, %g1 ! make physical addr
lda [%g1] ASI_M_BYPASS, %g1 ! ptr to level 1 pg_table
srl %g1, 0x4, %g1
sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
b go_to_highmem
nop ! wheee....
leon_remap:
/* Sanity-check, is MMU enabled */
lda [%g0] ASI_LEON_MMUREGS, %g1
andcc %g1, 1, %g0
be halt_notsup
nop
/* Same code as in the srmmu_not_viking case,
* with the LEON ASI for mmuregs
*/
set AC_M_CTPR, %g1
lda [%g1] ASI_LEON_MMUREGS, %g1 ! get ctx table ptr
sll %g1, 0x4, %g1 ! make physical addr
lda [%g1] ASI_M_BYPASS, %g1 ! ptr to level 1 pg_table
srl %g1, 0x4, %g1
sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
b go_to_highmem
nop ! wheee....
/* Now do a non-relative jump so that PC is in high-memory */
go_to_highmem:
set execute_in_high_mem, %g1
jmpl %g1, %g0
nop
/* The code above should be at beginning and we have to take care about
* short jumps, as branching to .init.text section from .text is usually
* impossible */
__INIT
/* Acquire boot time privileged register values, this will help debugging.
* I figure out and store nwindows and nwindowsm1 later on.
*/
execute_in_high_mem:
mov %l0, %o0 ! put back romvec
mov %l1, %o1 ! and debug_vec
sethi %hi(prom_vector_p), %g1
st %o0, [%g1 + %lo(prom_vector_p)]
sethi %hi(linux_dbvec), %g1
st %o1, [%g1 + %lo(linux_dbvec)]
/* Get the machine type via the romvec
* getprops node operation
*/
add %g7, 0x1c, %l1
ld [%l1], %l0
ld [%l0], %l0
call %l0
or %g0, %g0, %o0 ! next_node(0) = first_node
or %o0, %g0, %g6
sethi %hi(cputypvar), %o1 ! First node has cpu-arch
or %o1, %lo(cputypvar), %o1
sethi %hi(cputypval), %o2 ! information, the string
or %o2, %lo(cputypval), %o2
ld [%l1], %l0 ! 'compatible' tells
ld [%l0 + 0xc], %l0 ! that we want 'sun4x' where
call %l0 ! x is one of 'm', 'd' or 'e'.
nop ! %o2 holds pointer
! to a buf where above string
! will get stored by the prom.
/* Check value of "compatible" property.
* "value" => "model"
* leon => sparc_leon
* sun4m => sun4m
* sun4s => sun4m
* sun4d => sun4d
* sun4e => "no_sun4e_here"
* '*' => "no_sun4u_here"
* Check single letters only
*/
set cputypval, %o2
/* If cputypval[0] == 'l' (lower case letter L) this is leon */
ldub [%o2], %l1
cmp %l1, 'l'
be leon_init
nop
/* Check cputypval[4] to find the sun model */
ldub [%o2 + 0x4], %l1
cmp %l1, 'm'
be sun4m_init
cmp %l1, 's'
be sun4m_init
cmp %l1, 'd'
be sun4d_init
cmp %l1, 'e'
be no_sun4e_here ! Could be a sun4e.
nop
b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
nop
leon_init:
/* LEON CPU - set boot_cpu_id */
sethi %hi(boot_cpu_id), %g2 ! boot-cpu index
#ifdef CONFIG_SMP
ldub [%g2 + %lo(boot_cpu_id)], %g1
cmp %g1, 0xff ! unset means first CPU
bne leon_smp_cpu_startup ! continue only with master
nop
#endif
/* Get CPU-ID from most significant 4-bit of ASR17 */
rd %asr17, %g1
srl %g1, 28, %g1
/* Update boot_cpu_id only on boot cpu */
stub %g1, [%g2 + %lo(boot_cpu_id)]
ba continue_boot
nop
/* CPUID in bootbus can be found at PA 0xff0140000 */
#define SUN4D_BOOTBUS_CPUID 0xf0140000
sun4d_init:
/* Need to patch call to handler_irq */
set patch_handler_irq, %g4
set sun4d_handler_irq, %g5
sethi %hi(0x40000000), %g3 ! call
sub %g5, %g4, %g5
srl %g5, 2, %g5
or %g5, %g3, %g5
st %g5, [%g4]
#ifdef CONFIG_SMP
/* Get our CPU id out of bootbus */
set SUN4D_BOOTBUS_CPUID, %g3
lduba [%g3] ASI_M_CTL, %g3
and %g3, 0xf8, %g3
srl %g3, 3, %g4
sta %g4, [%g0] ASI_M_VIKING_TMP1
sethi %hi(boot_cpu_id), %g5
stb %g4, [%g5 + %lo(boot_cpu_id)]
#endif
/* Fall through to sun4m_init */
sun4m_init:
/* Ok, the PROM could have done funny things and apple cider could still
* be sitting in the fault status/address registers. Read them all to
* clear them so we don't get magic faults later on.
*/
/* This sucks, apparently this makes Vikings call prom panic, will fix later */
2:
rd %psr, %o1
srl %o1, PSR_IMPL_SHIFT, %o1 ! Get a type of the CPU
subcc %o1, PSR_IMPL_TI, %g0 ! TI: Viking or MicroSPARC
be continue_boot
nop
set AC_M_SFSR, %o0
lda [%o0] ASI_M_MMUREGS, %g0
set AC_M_SFAR, %o0
lda [%o0] ASI_M_MMUREGS, %g0
/* Fujitsu MicroSPARC-II has no asynchronous flavors of FARs */
subcc %o1, 0, %g0
be continue_boot
nop
set AC_M_AFSR, %o0
lda [%o0] ASI_M_MMUREGS, %g0
set AC_M_AFAR, %o0
lda [%o0] ASI_M_MMUREGS, %g0
nop
continue_boot:
/* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
* show-time!
*/
/* Turn on Supervisor, EnableFloating, and all the PIL bits.
* Also puts us in register window zero with traps off.
*/
set (PSR_PS | PSR_S | PSR_PIL | PSR_EF), %g2
wr %g2, 0x0, %psr
WRITE_PAUSE
/* I want a kernel stack NOW! */
set init_thread_union, %g1
set (THREAD_SIZE - STACKFRAME_SZ), %g2
add %g1, %g2, %sp
mov 0, %fp /* And for good luck */
/* Zero out our BSS section. */
set __bss_start , %o0 ! First address of BSS
set _end , %o1 ! Last address of BSS
add %o0, 0x1, %o0
1:
stb %g0, [%o0]
subcc %o0, %o1, %g0
bl 1b
add %o0, 0x1, %o0
/* If boot_cpu_id has not been setup by machine specific
* init-code above we default it to zero.
*/
sethi %hi(boot_cpu_id), %g2
ldub [%g2 + %lo(boot_cpu_id)], %g3
cmp %g3, 0xff
bne 1f
nop
mov %g0, %g3
stub %g3, [%g2 + %lo(boot_cpu_id)]
1: sll %g3, 2, %g3
/* Initialize the uwinmask value for init task just in case.
* But first make current_set[boot_cpu_id] point to something useful.
*/
set init_thread_union, %g6
set current_set, %g2
#ifdef CONFIG_SMP
st %g6, [%g2]
add %g2, %g3, %g2
#endif
st %g6, [%g2]
st %g0, [%g6 + TI_UWINMASK]
/* Compute NWINDOWS and stash it away. Now uses %wim trick explained
* in the V8 manual. Ok, this method seems to work, Sparc is cool...
* No, it doesn't work, have to play the save/readCWP/restore trick.
*/
wr %g0, 0x0, %wim ! so we do not get a trap
WRITE_PAUSE
save
rd %psr, %g3
restore
and %g3, 0x1f, %g3
add %g3, 0x1, %g3
mov 2, %g1
wr %g1, 0x0, %wim ! make window 1 invalid
WRITE_PAUSE
cmp %g3, 0x7
bne 2f
nop
/* Adjust our window handling routines to
* do things correctly on 7 window Sparcs.
*/
#define PATCH_INSN(src, dest) \
set src, %g5; \
set dest, %g2; \
ld [%g5], %g4; \
st %g4, [%g2];
/* Patch for window spills... */
PATCH_INSN(spnwin_patch1_7win, spnwin_patch1)
PATCH_INSN(spnwin_patch2_7win, spnwin_patch2)
PATCH_INSN(spnwin_patch3_7win, spnwin_patch3)
/* Patch for window fills... */
PATCH_INSN(fnwin_patch1_7win, fnwin_patch1)
PATCH_INSN(fnwin_patch2_7win, fnwin_patch2)
/* Patch for trap entry setup... */
PATCH_INSN(tsetup_7win_patch1, tsetup_patch1)
PATCH_INSN(tsetup_7win_patch2, tsetup_patch2)
PATCH_INSN(tsetup_7win_patch3, tsetup_patch3)
PATCH_INSN(tsetup_7win_patch4, tsetup_patch4)
PATCH_INSN(tsetup_7win_patch5, tsetup_patch5)
PATCH_INSN(tsetup_7win_patch6, tsetup_patch6)
/* Patch for returning from traps... */
PATCH_INSN(rtrap_7win_patch1, rtrap_patch1)
PATCH_INSN(rtrap_7win_patch2, rtrap_patch2)
PATCH_INSN(rtrap_7win_patch3, rtrap_patch3)
PATCH_INSN(rtrap_7win_patch4, rtrap_patch4)
PATCH_INSN(rtrap_7win_patch5, rtrap_patch5)
/* Patch for killing user windows from the register file. */
PATCH_INSN(kuw_patch1_7win, kuw_patch1)
/* Now patch the kernel window flush sequences.
* This saves 2 traps on every switch and fork.
*/
set 0x01000000, %g4
set flush_patch_one, %g5
st %g4, [%g5 + 0x18]
st %g4, [%g5 + 0x1c]
set flush_patch_two, %g5
st %g4, [%g5 + 0x18]
st %g4, [%g5 + 0x1c]
set flush_patch_three, %g5
st %g4, [%g5 + 0x18]
st %g4, [%g5 + 0x1c]
set flush_patch_four, %g5
st %g4, [%g5 + 0x18]
st %g4, [%g5 + 0x1c]
set flush_patch_exception, %g5
st %g4, [%g5 + 0x18]
st %g4, [%g5 + 0x1c]
set flush_patch_switch, %g5
st %g4, [%g5 + 0x18]
st %g4, [%g5 + 0x1c]
2:
sethi %hi(nwindows), %g4
st %g3, [%g4 + %lo(nwindows)] ! store final value
sub %g3, 0x1, %g3
sethi %hi(nwindowsm1), %g4
st %g3, [%g4 + %lo(nwindowsm1)]
/* Here we go, start using Linux's trap table... */
set trapbase, %g3
wr %g3, 0x0, %tbr
WRITE_PAUSE
/* Finally, turn on traps so that we can call c-code. */
rd %psr, %g3
wr %g3, 0x0, %psr
WRITE_PAUSE
wr %g3, PSR_ET, %psr
WRITE_PAUSE
/* Call sparc32_start_kernel(struct linux_romvec *rp) */
sethi %hi(prom_vector_p), %g5
ld [%g5 + %lo(prom_vector_p)], %o0
call sparc32_start_kernel
nop
/* We should not get here. */
call halt_me
nop
no_sun4e_here:
ld [%g7 + 0x68], %o1
set sun4e_notsup, %o0
call %o1
nop
b halt_me
nop
__INITDATA
sun4u_1:
.asciz "finddevice"
.align 4
sun4u_2:
.asciz "/chosen"
.align 4
sun4u_3:
.asciz "getprop"
.align 4
sun4u_4:
.asciz "stdout"
.align 4
sun4u_5:
.asciz "write"
.align 4
sun4u_6:
.asciz "\n\rOn sun4u you have to use sparc64 kernel\n\rand not a sparc32 version\n\r\n\r"
sun4u_6e:
.align 4
sun4u_7:
.asciz "exit"
.align 8
sun4u_a1:
.word 0, sun4u_1, 0, 1, 0, 1, 0, sun4u_2, 0
sun4u_r1:
.word 0
sun4u_a2:
.word 0, sun4u_3, 0, 4, 0, 1, 0
sun4u_i2:
.word 0, 0, sun4u_4, 0, sun4u_1, 0, 8, 0
sun4u_r2:
.word 0
sun4u_a3:
.word 0, sun4u_5, 0, 3, 0, 1, 0
sun4u_i3:
.word 0, 0, sun4u_6, 0, sun4u_6e - sun4u_6 - 1, 0
sun4u_r3:
.word 0
sun4u_a4:
.word 0, sun4u_7, 0, 0, 0, 0
sun4u_r4:
__INIT
no_sun4u_here:
set sun4u_a1, %o0
set current_pc, %l2
cmp %l2, %g3
be 1f
mov %o4, %l0
sub %g3, %l2, %l6
add %o0, %l6, %o0
mov %o0, %l4
mov sun4u_r4 - sun4u_a1, %l3
ld [%l4], %l5
2:
add %l4, 4, %l4
cmp %l5, %l2
add %l5, %l6, %l5
bgeu,a 3f
st %l5, [%l4 - 4]
3:
subcc %l3, 4, %l3
bne 2b
ld [%l4], %l5
1:
call %l0
mov %o0, %l1
ld [%l1 + (sun4u_r1 - sun4u_a1)], %o1
add %l1, (sun4u_a2 - sun4u_a1), %o0
call %l0
st %o1, [%o0 + (sun4u_i2 - sun4u_a2)]
ld [%l1 + (sun4u_1 - sun4u_a1)], %o1
add %l1, (sun4u_a3 - sun4u_a1), %o0
call %l0
st %o1, [%o0 + (sun4u_i3 - sun4u_a3)]
call %l0
add %l1, (sun4u_a4 - sun4u_a1), %o0
/* Not reached */
halt_me:
ld [%g7 + 0x74], %o0
call %o0 ! Get us out of here...
nop ! Apparently Solaris is better.
/* Ok, now we continue in the .data/.text sections */
.data
.align 4
/*
* Fill up the prom vector, note in particular the kind first element,
* no joke. I don't need all of them in here as the entire prom vector
* gets initialized in c-code so all routines can use it.
*/
prom_vector_p:
.word 0
/* We calculate the following at boot time, window fills/spills and trap entry
* code uses these to keep track of the register windows.
*/
.align 4
.globl nwindows
.globl nwindowsm1
nwindows:
.word 8
nwindowsm1:
.word 7
/* Boot time debugger vector value. We need this later on. */
.align 4
.globl linux_dbvec
linux_dbvec:
.word 0
.word 0
.align 8
.globl lvl14_save
lvl14_save:
.word 0
.word 0
.word 0
.word 0
.word t_irq14
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,433
|
arch/sparc/kernel/helpers.S
|
.align 32
.globl __flushw_user
.type __flushw_user,#function
__flushw_user:
rdpr %otherwin, %g1
brz,pn %g1, 2f
clr %g2
1: save %sp, -128, %sp
rdpr %otherwin, %g1
brnz,pt %g1, 1b
add %g2, 1, %g2
1: sub %g2, 1, %g2
brnz,pt %g2, 1b
restore %g0, %g0, %g0
2: retl
nop
.size __flushw_user,.-__flushw_user
EXPORT_SYMBOL(__flushw_user)
/* Flush %fp and %i7 to the stack for all register
* windows active inside of the cpu. This allows
* show_stack_trace() to avoid using an expensive
* 'flushw'.
*/
.globl stack_trace_flush
.type stack_trace_flush,#function
stack_trace_flush:
rdpr %pstate, %o0
wrpr %o0, PSTATE_IE, %pstate
rdpr %cwp, %g1
rdpr %canrestore, %g2
sub %g1, 1, %g3
1: brz,pn %g2, 2f
sub %g2, 1, %g2
wrpr %g3, %cwp
stx %fp, [%sp + STACK_BIAS + RW_V9_I6]
stx %i7, [%sp + STACK_BIAS + RW_V9_I7]
ba,pt %xcc, 1b
sub %g3, 1, %g3
2: wrpr %g1, %cwp
wrpr %o0, %pstate
retl
nop
.size stack_trace_flush,.-stack_trace_flush
#ifdef CONFIG_SMP
.globl hard_smp_processor_id
.type hard_smp_processor_id,#function
hard_smp_processor_id:
#endif
.globl real_hard_smp_processor_id
.type real_hard_smp_processor_id,#function
real_hard_smp_processor_id:
__GET_CPUID(%o0)
retl
nop
#ifdef CONFIG_SMP
.size hard_smp_processor_id,.-hard_smp_processor_id
#endif
.size real_hard_smp_processor_id,.-real_hard_smp_processor_id
EXPORT_SYMBOL_GPL(real_hard_smp_processor_id)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,983
|
arch/sparc/kernel/una_asm_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* una_asm.S: Kernel unaligned trap assembler helpers.
*
* Copyright (C) 1996,2005,2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/errno.h>
.text
retl_efault:
retl
mov -EFAULT, %o0
/* int __do_int_store(unsigned long *dst_addr, int size,
* unsigned long *src_val)
*
* %o0 = dest_addr
* %o1 = size
* %o2 = src_val
*
* Return '0' on success, -EFAULT on failure.
*/
.globl __do_int_store
__do_int_store:
ld [%o2], %g1
cmp %o1, 2
be 2f
cmp %o1, 4
be 1f
srl %g1, 24, %g2
srl %g1, 16, %g7
4: stb %g2, [%o0]
srl %g1, 8, %g2
5: stb %g7, [%o0 + 1]
ld [%o2 + 4], %g7
6: stb %g2, [%o0 + 2]
srl %g7, 24, %g2
7: stb %g1, [%o0 + 3]
srl %g7, 16, %g1
8: stb %g2, [%o0 + 4]
srl %g7, 8, %g2
9: stb %g1, [%o0 + 5]
10: stb %g2, [%o0 + 6]
b 0f
11: stb %g7, [%o0 + 7]
1: srl %g1, 16, %g7
12: stb %g2, [%o0]
srl %g1, 8, %g2
13: stb %g7, [%o0 + 1]
14: stb %g2, [%o0 + 2]
b 0f
15: stb %g1, [%o0 + 3]
2: srl %g1, 8, %g2
16: stb %g2, [%o0]
17: stb %g1, [%o0 + 1]
0: retl
mov 0, %o0
.section __ex_table,#alloc
.word 4b, retl_efault
.word 5b, retl_efault
.word 6b, retl_efault
.word 7b, retl_efault
.word 8b, retl_efault
.word 9b, retl_efault
.word 10b, retl_efault
.word 11b, retl_efault
.word 12b, retl_efault
.word 13b, retl_efault
.word 14b, retl_efault
.word 15b, retl_efault
.word 16b, retl_efault
.word 17b, retl_efault
.previous
/* int do_int_load(unsigned long *dest_reg, int size,
* unsigned long *saddr, int is_signed)
*
* %o0 = dest_reg
* %o1 = size
* %o2 = saddr
* %o3 = is_signed
*
* Return '0' on success, -EFAULT on failure.
*/
.globl do_int_load
do_int_load:
cmp %o1, 8
be 9f
cmp %o1, 4
be 6f
4: ldub [%o2], %g1
5: ldub [%o2 + 1], %g2
sll %g1, 8, %g1
tst %o3
be 3f
or %g1, %g2, %g1
sll %g1, 16, %g1
sra %g1, 16, %g1
3: b 0f
st %g1, [%o0]
6: ldub [%o2 + 1], %g2
sll %g1, 24, %g1
7: ldub [%o2 + 2], %g7
sll %g2, 16, %g2
8: ldub [%o2 + 3], %g3
sll %g7, 8, %g7
or %g3, %g2, %g3
or %g7, %g3, %g7
or %g1, %g7, %g1
b 0f
st %g1, [%o0]
9: ldub [%o2], %g1
10: ldub [%o2 + 1], %g2
sll %g1, 24, %g1
11: ldub [%o2 + 2], %g7
sll %g2, 16, %g2
12: ldub [%o2 + 3], %g3
sll %g7, 8, %g7
or %g1, %g2, %g1
or %g7, %g3, %g7
or %g1, %g7, %g7
13: ldub [%o2 + 4], %g1
st %g7, [%o0]
14: ldub [%o2 + 5], %g2
sll %g1, 24, %g1
15: ldub [%o2 + 6], %g7
sll %g2, 16, %g2
16: ldub [%o2 + 7], %g3
sll %g7, 8, %g7
or %g1, %g2, %g1
or %g7, %g3, %g7
or %g1, %g7, %g7
st %g7, [%o0 + 4]
0: retl
mov 0, %o0
.section __ex_table,#alloc
.word 4b, retl_efault
.word 5b, retl_efault
.word 6b, retl_efault
.word 7b, retl_efault
.word 8b, retl_efault
.word 9b, retl_efault
.word 10b, retl_efault
.word 11b, retl_efault
.word 12b, retl_efault
.word 13b, retl_efault
.word 14b, retl_efault
.word 15b, retl_efault
.word 16b, retl_efault
.previous
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,377
|
arch/sparc/kernel/dtlb_prot.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* dtlb_prot.S: DTLB protection trap strategy.
* This is included directly into the trap table.
*
* Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
/* Ways we can get here:
*
* [TL == 0] 1) User stores to readonly pages.
* [TL == 0] 2) Nucleus stores to user readonly pages.
* [TL > 0] 3) Nucleus stores to user readonly stack frame.
*/
/* PROT ** ICACHE line 1: User DTLB protection trap */
mov TLB_SFSR, %g1
stxa %g0, [%g1] ASI_DMMU ! Clear FaultValid bit
membar #Sync ! Synchronize stores
rdpr %pstate, %g5 ! Move into alt-globals
wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
rdpr %tl, %g1 ! Need a winfixup?
cmp %g1, 1 ! Trap level >1?
mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr
/* PROT ** ICACHE line 2: More real fault processing */
ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
srlx %g5, PAGE_SHIFT, %g5
sllx %g5, PAGE_SHIFT, %g5 ! Clear context ID bits
bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
nop
nop
/* PROT ** ICACHE line 3: Unused... */
nop
nop
nop
nop
nop
nop
nop
nop
/* PROT ** ICACHE line 4: Unused... */
nop
nop
nop
nop
nop
nop
nop
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,986
|
arch/sparc/kernel/winfixup.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* winfixup.S: Handle cases where user stack pointer is found to be bogus.
*
* Copyright (C) 1997, 2006 David S. Miller (davem@davemloft.net)
*/
#include <asm/asi.h>
#include <asm/head.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/spitfire.h>
#include <asm/thread_info.h>
.text
/* It used to be the case that these register window fault
* handlers could run via the save and restore instructions
* done by the trap entry and exit code. They now do the
* window spill/fill by hand, so that case no longer can occur.
*/
.align 32
fill_fixup:
TRAP_LOAD_THREAD_REG(%g6, %g1)
rdpr %tstate, %g1
and %g1, TSTATE_CWP, %g1
or %g4, FAULT_CODE_WINFIXUP, %g4
stb %g4, [%g6 + TI_FAULT_CODE]
stx %g5, [%g6 + TI_FAULT_ADDR]
wrpr %g1, %cwp
ba,pt %xcc, etrap
rd %pc, %g7
call do_sparc64_fault
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Be very careful about usage of the trap globals here.
* You cannot touch %g5 as that has the fault information.
*/
spill_fixup:
spill_fixup_mna:
spill_fixup_dax:
TRAP_LOAD_THREAD_REG(%g6, %g1)
ldx [%g6 + TI_FLAGS], %g1
andcc %sp, 0x1, %g0
movne %icc, 0, %g1
andcc %g1, _TIF_32BIT, %g0
ldub [%g6 + TI_WSAVED], %g1
sll %g1, 3, %g3
add %g6, %g3, %g3
stx %sp, [%g3 + TI_RWIN_SPTRS]
sll %g1, 7, %g3
bne,pt %xcc, 1f
add %g6, %g3, %g3
stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
ba,pt %xcc, 2f
stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
1: stw %l0, [%g3 + TI_REG_WINDOW + 0x00]
stw %l1, [%g3 + TI_REG_WINDOW + 0x04]
stw %l2, [%g3 + TI_REG_WINDOW + 0x08]
stw %l3, [%g3 + TI_REG_WINDOW + 0x0c]
stw %l4, [%g3 + TI_REG_WINDOW + 0x10]
stw %l5, [%g3 + TI_REG_WINDOW + 0x14]
stw %l6, [%g3 + TI_REG_WINDOW + 0x18]
stw %l7, [%g3 + TI_REG_WINDOW + 0x1c]
stw %i0, [%g3 + TI_REG_WINDOW + 0x20]
stw %i1, [%g3 + TI_REG_WINDOW + 0x24]
stw %i2, [%g3 + TI_REG_WINDOW + 0x28]
stw %i3, [%g3 + TI_REG_WINDOW + 0x2c]
stw %i4, [%g3 + TI_REG_WINDOW + 0x30]
stw %i5, [%g3 + TI_REG_WINDOW + 0x34]
stw %i6, [%g3 + TI_REG_WINDOW + 0x38]
stw %i7, [%g3 + TI_REG_WINDOW + 0x3c]
2: add %g1, 1, %g1
stb %g1, [%g6 + TI_WSAVED]
rdpr %tstate, %g1
andcc %g1, TSTATE_PRIV, %g0
saved
be,pn %xcc, 1f
and %g1, TSTATE_CWP, %g1
retry
1: mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4
stb %g4, [%g6 + TI_FAULT_CODE]
stx %g5, [%g6 + TI_FAULT_ADDR]
wrpr %g1, %cwp
ba,pt %xcc, etrap
rd %pc, %g7
call do_sparc64_fault
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
winfix_mna:
andn %g3, 0x7f, %g3
add %g3, 0x78, %g3
wrpr %g3, %tnpc
done
fill_fixup_mna:
rdpr %tstate, %g1
and %g1, TSTATE_CWP, %g1
wrpr %g1, %cwp
ba,pt %xcc, etrap
rd %pc, %g7
sethi %hi(tlb_type), %g1
lduw [%g1 + %lo(tlb_type)], %g1
cmp %g1, 3
bne,pt %icc, 1f
add %sp, PTREGS_OFF, %o0
mov %l4, %o2
call sun4v_do_mna
mov %l5, %o1
ba,a,pt %xcc, rtrap
1: mov %l4, %o1
mov %l5, %o2
call mem_address_unaligned
nop
ba,a,pt %xcc, rtrap
winfix_dax:
andn %g3, 0x7f, %g3
add %g3, 0x74, %g3
wrpr %g3, %tnpc
done
fill_fixup_dax:
rdpr %tstate, %g1
and %g1, TSTATE_CWP, %g1
wrpr %g1, %cwp
ba,pt %xcc, etrap
rd %pc, %g7
sethi %hi(tlb_type), %g1
mov %l4, %o1
lduw [%g1 + %lo(tlb_type)], %g1
mov %l5, %o2
cmp %g1, 3
bne,pt %icc, 1f
add %sp, PTREGS_OFF, %o0
call sun4v_data_access_exception
nop
ba,a,pt %xcc, rtrap
nop
1: call spitfire_data_access_exception
nop
ba,a,pt %xcc, rtrap
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,374
|
arch/sparc/kernel/rtrap_64.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* rtrap.S: Preparing for return from trap on Sparc V9.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/visasm.h>
#include <asm/processor.h>
#ifdef CONFIG_CONTEXT_TRACKING
# define SCHEDULE_USER schedule_user
#else
# define SCHEDULE_USER schedule
#endif
.text
.align 32
__handle_preemption:
call SCHEDULE_USER
661: wrpr %g0, RTRAP_PSTATE, %pstate
/* If userspace is using ADI, it could potentially pass
* a pointer with version tag embedded in it. To maintain
* the ADI security, we must re-enable PSTATE.mcde before
* we continue execution in the kernel for another thread.
*/
.section .sun_m7_1insn_patch, "ax"
.word 661b
wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
.previous
ba,pt %xcc, __handle_preemption_continue
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
__handle_user_windows:
add %sp, PTREGS_OFF, %o0
call fault_in_user_windows
661: wrpr %g0, RTRAP_PSTATE, %pstate
/* If userspace is using ADI, it could potentially pass
* a pointer with version tag embedded in it. To maintain
* the ADI security, we must re-enable PSTATE.mcde before
* we continue execution in the kernel for another thread.
*/
.section .sun_m7_1insn_patch, "ax"
.word 661b
wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
.previous
ba,pt %xcc, __handle_preemption_continue
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
__handle_userfpu:
rd %fprs, %l5
andcc %l5, FPRS_FEF, %g0
sethi %hi(TSTATE_PEF), %o0
be,a,pn %icc, __handle_userfpu_continue
andn %l1, %o0, %l1
ba,a,pt %xcc, __handle_userfpu_continue
__handle_signal:
mov %l5, %o1
add %sp, PTREGS_OFF, %o0
mov %l0, %o2
call do_notify_resume
661: wrpr %g0, RTRAP_PSTATE, %pstate
/* If userspace is using ADI, it could potentially pass
* a pointer with version tag embedded in it. To maintain
* the ADI security, we must re-enable PSTATE.mcde before
* we continue execution in the kernel for another thread.
*/
.section .sun_m7_1insn_patch, "ax"
.word 661b
wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
.previous
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
/* Signal delivery can modify pt_regs tstate, so we must
* reload it.
*/
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
andn %l1, %l4, %l1
ba,pt %xcc, __handle_preemption_continue
srl %l4, 20, %l4
/* When returning from a NMI (%pil==15) interrupt we want to
* avoid running softirqs, doing IRQ tracing, preempting, etc.
*/
.globl rtrap_nmi
rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
andn %l1, %l4, %l1
srl %l4, 20, %l4
ba,pt %xcc, rtrap_no_irq_enable
nop
/* Do not actually set the %pil here. We will do that
* below after we clear PSTATE_IE in the %pstate register.
* If we re-enable interrupts here, we can recurse down
* the hardirq stack potentially endlessly, causing a
* stack overflow.
*/
.align 64
.globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
rtrap_irq:
rtrap:
/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
rtrap_xcall:
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
andn %l1, %l4, %l1
srl %l4, 20, %l4
#ifdef CONFIG_TRACE_IRQFLAGS
brnz,pn %l4, rtrap_no_irq_enable
nop
call trace_hardirqs_on
nop
/* Do not actually set the %pil here. We will do that
* below after we clear PSTATE_IE in the %pstate register.
* If we re-enable interrupts here, we can recurse down
* the hardirq stack potentially endlessly, causing a
* stack overflow.
*
* It is tempting to put this test and trace_hardirqs_on
* call at the 'rt_continue' label, but that will not work
* as that path hits unconditionally and we do not want to
* execute this in NMI return paths, for example.
*/
#endif
rtrap_no_irq_enable:
andcc %l1, TSTATE_PRIV, %l3
bne,pn %icc, to_kernel
nop
/* We must hold IRQs off and atomically test schedule+signal
* state, then hold them off all the way back to userspace.
* If we are returning to kernel, none of this matters. Note
* that we are disabling interrupts via PSTATE_IE, not using
* %pil.
*
* If we do not do this, there is a window where we would do
* the tests, later the signal/resched event arrives but we do
* not process it since we are still in kernel mode. It would
* take until the next local IRQ before the signal/resched
* event would be handled.
*
* This also means that if we have to deal with user
* windows, we have to redo all of these sched+signal checks
* with IRQs disabled.
*/
to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
wrpr 0, %pil
__handle_preemption_continue:
ldx [%g6 + TI_FLAGS], %l0
sethi %hi(_TIF_USER_WORK_MASK), %o0
or %o0, %lo(_TIF_USER_WORK_MASK), %o0
andcc %l0, %o0, %g0
sethi %hi(TSTATE_PEF), %o0
be,pt %xcc, user_nowork
andcc %l1, %o0, %g0
andcc %l0, _TIF_NEED_RESCHED, %g0
bne,pn %xcc, __handle_preemption
andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
bne,pn %xcc, __handle_signal
ldub [%g6 + TI_WSAVED], %o2
brnz,pn %o2, __handle_user_windows
nop
sethi %hi(TSTATE_PEF), %o0
andcc %l1, %o0, %g0
/* This fpdepth clear is necessary for non-syscall rtraps only */
user_nowork:
bne,pn %xcc, __handle_userfpu
stb %g0, [%g6 + TI_FPDEPTH]
__handle_userfpu_continue:
rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
brz,pt %l3, 1f
mov %g6, %l2
/* Must do this before thread reg is clobbered below. */
LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
1:
ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
/* Normal globals are restored, go to trap globals. */
661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
nop
.section .sun4v_2insn_patch, "ax"
.word 661b
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
SET_GL(1)
.previous
mov %l2, %g6
ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
wr %o3, %g0, %y
wrpr %l4, 0x0, %pil
wrpr %g0, 0x1, %tl
andn %l1, TSTATE_SYSCALL, %l1
wrpr %l1, %g0, %tstate
wrpr %l2, %g0, %tpc
wrpr %o2, %g0, %tnpc
brnz,pn %l3, kern_rtt
mov PRIMARY_CONTEXT, %l7
661: ldxa [%l7 + %l7] ASI_DMMU, %l0
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%l7 + %l7] ASI_MMU, %l0
.previous
sethi %hi(sparc64_kern_pri_nuc_bits), %l1
ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
or %l0, %l1, %l0
661: stxa %l0, [%l7] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %l0, [%l7] ASI_MMU
.previous
sethi %hi(KERNBASE), %l7
flush %l7
rdpr %wstate, %l1
rdpr %otherwin, %l2
srl %l1, 3, %l1
661: wrpr %l2, %g0, %canrestore
.section .fast_win_ctrl_1insn_patch, "ax"
.word 661b
.word 0x89880000 ! normalw
.previous
wrpr %l1, %g0, %wstate
brnz,pt %l2, user_rtt_restore
661: wrpr %g0, %g0, %otherwin
.section .fast_win_ctrl_1insn_patch, "ax"
.word 661b
nop
.previous
ldx [%g6 + TI_FLAGS], %g3
wr %g0, ASI_AIUP, %asi
rdpr %cwp, %g1
andcc %g3, _TIF_32BIT, %g0
sub %g1, 1, %g1
bne,pt %xcc, user_rtt_fill_32bit
wrpr %g1, %cwp
ba,a,pt %xcc, user_rtt_fill_64bit
nop
user_rtt_fill_fixup_dax:
ba,pt %xcc, user_rtt_fill_fixup_common
mov 1, %g3
user_rtt_fill_fixup_mna:
ba,pt %xcc, user_rtt_fill_fixup_common
mov 2, %g3
user_rtt_fill_fixup:
ba,pt %xcc, user_rtt_fill_fixup_common
clr %g3
user_rtt_pre_restore:
add %g1, 1, %g1
wrpr %g1, 0x0, %cwp
user_rtt_restore:
restore
rdpr %canrestore, %g1
wrpr %g1, 0x0, %cleanwin
retry
nop
kern_rtt: rdpr %canrestore, %g1
brz,pn %g1, kern_rtt_fill
nop
kern_rtt_restore:
stw %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC]
restore
retry
to_kernel:
#ifdef CONFIG_PREEMPT
ldsw [%g6 + TI_PRE_COUNT], %l5
brnz %l5, kern_fpucheck
ldx [%g6 + TI_FLAGS], %l5
andcc %l5, _TIF_NEED_RESCHED, %g0
be,pt %xcc, kern_fpucheck
nop
cmp %l4, 0
bne,pn %xcc, kern_fpucheck
nop
call preempt_schedule_irq
nop
ba,pt %xcc, rtrap
#endif
kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
brz,pt %l5, rt_continue
srl %l5, 1, %o0
add %g6, TI_FPSAVED, %l6
ldub [%l6 + %o0], %l2
sub %l5, 2, %l5
add %g6, TI_GSR, %o1
andcc %l2, (FPRS_FEF|FPRS_DU), %g0
be,pt %icc, 2f
and %l2, FPRS_DL, %l6
andcc %l2, FPRS_FEF, %g0
be,pn %icc, 5f
sll %o0, 3, %o5
rd %fprs, %g1
wr %g1, FPRS_FEF, %fprs
ldx [%o1 + %o5], %g1
add %g6, TI_XFSR, %o1
sll %o0, 8, %o2
add %g6, TI_FPREGS, %o3
brz,pn %l6, 1f
add %g6, TI_FPREGS+0x40, %o4
membar #Sync
ldda [%o3 + %o2] ASI_BLK_P, %f0
ldda [%o4 + %o2] ASI_BLK_P, %f16
membar #Sync
1: andcc %l2, FPRS_DU, %g0
be,pn %icc, 1f
wr %g1, 0, %gsr
add %o2, 0x80, %o2
membar #Sync
ldda [%o3 + %o2] ASI_BLK_P, %f32
ldda [%o4 + %o2] ASI_BLK_P, %f48
1: membar #Sync
ldx [%o1 + %o5], %fsr
2: stb %l5, [%g6 + TI_FPDEPTH]
ba,pt %xcc, rt_continue
nop
5: wr %g0, FPRS_FEF, %fprs
sll %o0, 8, %o2
add %g6, TI_FPREGS+0x80, %o3
add %g6, TI_FPREGS+0xc0, %o4
membar #Sync
ldda [%o3 + %o2] ASI_BLK_P, %f32
ldda [%o4 + %o2] ASI_BLK_P, %f48
membar #Sync
wr %g0, FPRS_DU, %fprs
ba,pt %xcc, rt_continue
stb %l5, [%g6 + TI_FPDEPTH]
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,208
|
arch/sparc/kernel/ivec.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* The registers for cross calls will be:
*
* DATA 0: [low 32-bits] Address of function to call, jmp to this
* [high 32-bits] MMU Context Argument 0, place in %g5
* DATA 1: Address Argument 1, place in %g1
* DATA 2: Address Argument 2, place in %g7
*
* With this method we can do most of the cross-call tlb/cache
* flushing very quickly.
*/
.align 32
.globl do_ivec
.type do_ivec,#function
do_ivec:
mov 0x40, %g3
ldxa [%g3 + %g0] ASI_INTR_R, %g3
sethi %hi(KERNBASE), %g4
cmp %g3, %g4
bgeu,pn %xcc, do_ivec_xcall
srlx %g3, 32, %g5
stxa %g0, [%g0] ASI_INTR_RECEIVE
membar #Sync
sethi %hi(ivector_table_pa), %g2
ldx [%g2 + %lo(ivector_table_pa)], %g2
sllx %g3, 4, %g3
add %g2, %g3, %g3
TRAP_LOAD_IRQ_WORK_PA(%g6, %g1)
ldx [%g6], %g5
stxa %g5, [%g3] ASI_PHYS_USE_EC
stx %g3, [%g6]
wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
retry
do_ivec_xcall:
mov 0x50, %g1
ldxa [%g1 + %g0] ASI_INTR_R, %g1
srl %g3, 0, %g3
mov 0x60, %g7
ldxa [%g7 + %g0] ASI_INTR_R, %g7
stxa %g0, [%g0] ASI_INTR_RECEIVE
membar #Sync
ba,pt %xcc, 1f
nop
.align 32
1: jmpl %g3, %g0
nop
.size do_ivec,.-do_ivec
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,625
|
arch/sparc/kernel/vmlinux.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* ld script for sparc32/sparc64 kernel */
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#ifdef CONFIG_SPARC32
#define INITIAL_ADDRESS 0x10000 + SIZEOF_HEADERS
#define TEXTSTART 0xf0004000
#define SMP_CACHE_BYTES_SHIFT 5
#else
#define SMP_CACHE_BYTES_SHIFT 6
#define INITIAL_ADDRESS 0x4000
#define TEXTSTART 0x0000000000404000
#endif
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT)
#ifdef CONFIG_SPARC32
OUTPUT_FORMAT("elf32-sparc", "elf32-sparc", "elf32-sparc")
OUTPUT_ARCH(sparc)
ENTRY(_start)
jiffies = jiffies_64 + 4;
#else
/* sparc64 */
OUTPUT_FORMAT("elf64-sparc", "elf64-sparc", "elf64-sparc")
OUTPUT_ARCH(sparc:v9a)
ENTRY(_start)
jiffies = jiffies_64;
#endif
#ifdef CONFIG_SPARC64
ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
#endif
SECTIONS
{
#ifdef CONFIG_SPARC64
swapper_pg_dir = 0x0000000000402000;
#endif
. = INITIAL_ADDRESS;
.text TEXTSTART :
{
_text = .;
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.gnu.warning)
} = 0
_etext = .;
RO_DATA(PAGE_SIZE)
/* Start of data section */
_sdata = .;
.data1 : {
*(.data1)
}
RW_DATA_SECTION(SMP_CACHE_BYTES, 0, THREAD_SIZE)
/* End of data section */
_edata = .;
.fixup : {
__start___fixup = .;
*(.fixup)
__stop___fixup = .;
}
EXCEPTION_TABLE(16)
NOTES
. = ALIGN(PAGE_SIZE);
__init_begin = ALIGN(PAGE_SIZE);
INIT_TEXT_SECTION(PAGE_SIZE)
__init_text_end = .;
INIT_DATA_SECTION(16)
. = ALIGN(4);
.tsb_ldquad_phys_patch : {
__tsb_ldquad_phys_patch = .;
*(.tsb_ldquad_phys_patch)
__tsb_ldquad_phys_patch_end = .;
}
.tsb_phys_patch : {
__tsb_phys_patch = .;
*(.tsb_phys_patch)
__tsb_phys_patch_end = .;
}
.cpuid_patch : {
__cpuid_patch = .;
*(.cpuid_patch)
__cpuid_patch_end = .;
}
.sun4v_1insn_patch : {
__sun4v_1insn_patch = .;
*(.sun4v_1insn_patch)
__sun4v_1insn_patch_end = .;
}
.sun4v_2insn_patch : {
__sun4v_2insn_patch = .;
*(.sun4v_2insn_patch)
__sun4v_2insn_patch_end = .;
}
.leon_1insn_patch : {
__leon_1insn_patch = .;
*(.leon_1insn_patch)
__leon_1insn_patch_end = .;
}
.swapper_tsb_phys_patch : {
__swapper_tsb_phys_patch = .;
*(.swapper_tsb_phys_patch)
__swapper_tsb_phys_patch_end = .;
}
.swapper_4m_tsb_phys_patch : {
__swapper_4m_tsb_phys_patch = .;
*(.swapper_4m_tsb_phys_patch)
__swapper_4m_tsb_phys_patch_end = .;
}
.popc_3insn_patch : {
__popc_3insn_patch = .;
*(.popc_3insn_patch)
__popc_3insn_patch_end = .;
}
.popc_6insn_patch : {
__popc_6insn_patch = .;
*(.popc_6insn_patch)
__popc_6insn_patch_end = .;
}
.pause_3insn_patch : {
__pause_3insn_patch = .;
*(.pause_3insn_patch)
__pause_3insn_patch_end = .;
}
.sun_m7_1insn_patch : {
__sun_m7_1insn_patch = .;
*(.sun_m7_1insn_patch)
__sun_m7_1insn_patch_end = .;
}
.sun_m7_2insn_patch : {
__sun_m7_2insn_patch = .;
*(.sun_m7_2insn_patch)
__sun_m7_2insn_patch_end = .;
}
.get_tick_patch : {
__get_tick_patch = .;
*(.get_tick_patch)
__get_tick_patch_end = .;
}
.pud_huge_patch : {
__pud_huge_patch = .;
*(.pud_huge_patch)
__pud_huge_patch_end = .;
}
.fast_win_ctrl_1insn_patch : {
__fast_win_ctrl_1insn_patch = .;
*(.fast_win_ctrl_1insn_patch)
__fast_win_ctrl_1insn_patch_end = .;
}
PERCPU_SECTION(SMP_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
.exit.text : {
EXIT_TEXT
}
.exit.data : {
EXIT_DATA
}
. = ALIGN(PAGE_SIZE);
__init_end = .;
BSS_SECTION(0, 0, 0)
_end = . ;
STABS_DEBUG
DWARF_DEBUG
DISCARDS
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 13,921
|
arch/sparc/kernel/systbls_64.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
* Copyright (C) 1995, 1996, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*
* Based upon preliminary work which is:
*
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
*/
.text
.align 4
#ifdef CONFIG_COMPAT
/* First, the 32-bit Linux native syscall table. */
.globl sys_call_table32
sys_call_table32:
/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
/*5*/ .word compat_sys_open, sys_close, compat_sys_wait4, sys_creat, sys_link
/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, compat_sys_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
/*25*/ .word compat_sys_vmsplice, compat_sys_ptrace, sys_alarm, compat_sys_sigaltstack, sys_pause
/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
.word sys_chown, sys_sync, sys_kill, compat_sys_newstat, compat_sys_sendfile
/*40*/ .word compat_sys_newlstat, sys_dup, sys_sparc_pipe, compat_sys_times, sys_getuid
.word sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16
/*50*/ .word sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl
.word sys_reboot, sys32_mmap2, sys_symlink, sys_readlink, sys32_execve
/*60*/ .word sys_umask, sys_chroot, compat_sys_newfstat, compat_sys_fstat64, sys_getpagesize
.word sys_msync, sys_vfork, compat_sys_pread64, compat_sys_pwrite64, sys_geteuid
/*70*/ .word sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect
.word sys_madvise, sys_vhangup, compat_sys_truncate64, sys_mincore, sys_getgroups16
/*80*/ .word sys_setgroups16, sys_getpgrp, sys_setgroups, compat_sys_setitimer, compat_sys_ftruncate64
.word sys_swapon, compat_sys_getitimer, sys_setuid, sys_sethostname, sys_setgid
/*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, compat_sys_select, sys_setfsgid
.word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
/*100*/ .word sys_getpriority, sys32_rt_sigreturn, compat_sys_rt_sigaction, compat_sys_rt_sigprocmask, compat_sys_rt_sigpending
.word compat_sys_rt_sigtimedwait, compat_sys_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid
/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, compat_sys_recvmsg, compat_sys_sendmsg
.word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, compat_sys_getsockopt, sys_getcwd
/*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod
.word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown
.word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
/*140*/ .word sys_sendfile64, sys_getpeername, compat_sys_futex, sys_gettid, compat_sys_getrlimit
.word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
.word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
.word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr
/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
.word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
.word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname
/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
.word sys_epoll_wait, sys_ioprio_set, sys_getppid, compat_sys_sparc_sigaction, sys_sgetmask
/*200*/ .word sys_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
.word compat_sys_readahead, sys32_socketcall, sys_syslog, compat_sys_lookup_dcookie, compat_sys_fadvise64
/*210*/ .word compat_sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, compat_sys_sysinfo
.word compat_sys_ipc, sys32_sigreturn, sys_clone, sys_ioprio_get, compat_sys_adjtimex
/*220*/ .word compat_sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
.word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16
/*230*/ .word compat_sys_select, compat_sys_time, sys_splice, compat_sys_stime, compat_sys_statfs64
.word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
.word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, compat_sys_sched_rr_get_interval, compat_sys_nanosleep
/*250*/ .word sys_mremap, compat_sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall
.word compat_sys_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, compat_sys_clock_nanosleep
/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, compat_sys_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
.word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
/*270*/ .word compat_sys_io_submit, sys_io_cancel, compat_sys_io_getevents, compat_sys_mq_open, sys_mq_unlink
.word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
/*280*/ .word sys_tee, sys_add_key, sys_request_key, compat_sys_keyctl, compat_sys_openat
.word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64
/*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
.word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare
/*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list, compat_sys_migrate_pages, compat_sys_mbind, compat_sys_get_mempolicy
.word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
.word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
/*360*/ .word sys_statx, compat_sys_io_pgetevents
#endif /* CONFIG_COMPAT */
/* Now the 64-bit native Linux syscall table. */
.align 4
.globl sys_call_table64, sys_call_table
sys_call_table64:
sys_call_table:
/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_nis_syscall, sys_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
/*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
.word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64
/*40*/ .word sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_nis_syscall
.word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid
/*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl
.word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys64_execve
/*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize
.word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall
/*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys_64_munmap, sys_mprotect
.word sys_madvise, sys_vhangup, sys_nis_syscall, sys_mincore, sys_getgroups
/*80*/ .word sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall
.word sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall
/*90*/ .word sys_dup2, sys_nis_syscall, sys_fcntl, sys_select, sys_nis_syscall
.word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
/*100*/ .word sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
.word sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
/*110*/ .word sys_setresgid, sys_getresgid, sys_nis_syscall, sys_recvmsg, sys_sendmsg
.word sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd
/*120*/ .word sys_readv, sys_writev, sys_settimeofday, sys_fchown, sys_fchmod
.word sys_recvfrom, sys_setreuid, sys_setregid, sys_rename, sys_truncate
/*130*/ .word sys_ftruncate, sys_flock, sys_lstat64, sys_sendto, sys_shutdown
.word sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
/*140*/ .word sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit
.word sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
.word sys_nis_syscall, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount
/*160*/ .word sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_utrap_install
.word sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
.word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall
.word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname
/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
.word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask
/*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
.word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
/*210*/ .word sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
.word sys_sparc_ipc, sys_nis_syscall, sys_clone, sys_ioprio_get, sys_adjtimex
/*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
.word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
/*230*/ .word sys_select, sys_nis_syscall, sys_splice, sys_stime, sys_statfs64
.word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
.word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
/*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall
.word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
.word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
.word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
/*280*/ .word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
.word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64
/*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
.word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
/*300*/ .word sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
.word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
.word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
/*360*/ .word sys_statx, sys_io_pgetevents
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,614
|
arch/sparc/kernel/wuf.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* wuf.S: Window underflow trap handler for the Sparc.
*
* Copyright (C) 1995 David S. Miller
*/
#include <asm/contregs.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/smp.h>
#include <asm/asi.h>
#include <asm/winmacro.h>
#include <asm/asmmacro.h>
#include <asm/thread_info.h>
/* Just like the overflow handler we define macros for registers
* with fixed meanings in this routine.
*/
#define t_psr l0
#define t_pc l1
#define t_npc l2
#define t_wim l3
/* Don't touch the above registers or else you die horribly... */
/* Now macros for the available scratch registers in this routine. */
#define twin_tmp1 l4
#define twin_tmp2 l5
#define curptr g6
.text
.align 4
/* The trap entry point has executed the following:
*
* rd %psr, %l0
* rd %wim, %l3
* b fill_window_entry
* andcc %l0, PSR_PS, %g0
*/
/* Datum current_thread_info->uwinmask contains at all times a bitmask
* where if any user windows are active, at least one bit will
* be set in to mask. If no user windows are active, the bitmask
* will be all zeroes.
*/
/* To get an idea of what has just happened to cause this
* trap take a look at this diagram:
*
* 1 2 3 4 <-- Window number
* ----------
* T O W I <-- Symbolic name
*
* O == the window that execution was in when
* the restore was attempted
*
* T == the trap itself has save'd us into this
* window
*
* W == this window is the one which is now invalid
* and must be made valid plus loaded from the
* stack
*
* I == this window will be the invalid one when we
* are done and return from trap if successful
*/
/* BEGINNING OF PATCH INSTRUCTIONS */
/* On 7-window Sparc the boot code patches fnwin_patch1
* with the following instruction.
*/
.globl fnwin_patch1_7win, fnwin_patch2_7win
fnwin_patch1_7win: srl %t_wim, 6, %twin_tmp2
fnwin_patch2_7win: and %twin_tmp1, 0x7f, %twin_tmp1
/* END OF PATCH INSTRUCTIONS */
.globl fill_window_entry, fnwin_patch1, fnwin_patch2
fill_window_entry:
/* LOCATION: Window 'T' */
/* Compute what the new %wim is going to be if we retrieve
* the proper window off of the stack.
*/
sll %t_wim, 1, %twin_tmp1
fnwin_patch1: srl %t_wim, 7, %twin_tmp2
or %twin_tmp1, %twin_tmp2, %twin_tmp1
fnwin_patch2: and %twin_tmp1, 0xff, %twin_tmp1
wr %twin_tmp1, 0x0, %wim /* Make window 'I' invalid */
andcc %t_psr, PSR_PS, %g0
be fwin_from_user
restore %g0, %g0, %g0 /* Restore to window 'O' */
/* Trapped from kernel, we trust that the kernel does not
* 'over restore' sorta speak and just grab the window
* from the stack and return. Easy enough.
*/
fwin_from_kernel:
/* LOCATION: Window 'O' */
restore %g0, %g0, %g0
/* LOCATION: Window 'W' */
LOAD_WINDOW(sp) /* Load it up */
/* Spin the wheel... */
save %g0, %g0, %g0
save %g0, %g0, %g0
/* I'd like to buy a vowel please... */
/* LOCATION: Window 'T' */
/* Now preserve the condition codes in %psr, pause, and
* return from trap. This is the simplest case of all.
*/
wr %t_psr, 0x0, %psr
WRITE_PAUSE
jmp %t_pc
rett %t_npc
fwin_from_user:
/* LOCATION: Window 'O' */
restore %g0, %g0, %g0 /* Restore to window 'W' */
/* LOCATION: Window 'W' */
/* Branch to the stack validation routine */
b srmmu_fwin_stackchk
andcc %sp, 0x7, %g0
#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
fwin_user_stack_is_bolixed:
/* LOCATION: Window 'W' */
/* Place a pt_regs frame on the kernel stack, save back
* to the trap window and call c-code to deal with this.
*/
LOAD_CURRENT(l4, l5)
sethi %hi(STACK_OFFSET), %l5
or %l5, %lo(STACK_OFFSET), %l5
add %l4, %l5, %l5
/* Store globals into pt_regs frame. */
STORE_PT_GLOBALS(l5)
STORE_PT_YREG(l5, g3)
/* Save current in a global while we change windows. */
mov %l4, %curptr
save %g0, %g0, %g0
/* LOCATION: Window 'O' */
rd %psr, %g3 /* Read %psr in live user window */
mov %fp, %g4 /* Save bogus frame pointer. */
save %g0, %g0, %g0
/* LOCATION: Window 'T' */
sethi %hi(STACK_OFFSET), %l5
or %l5, %lo(STACK_OFFSET), %l5
add %curptr, %l5, %sp
/* Build rest of pt_regs. */
STORE_PT_INS(sp)
STORE_PT_PRIV(sp, t_psr, t_pc, t_npc)
/* re-set trap time %wim value */
wr %t_wim, 0x0, %wim
/* Fix users window mask and buffer save count. */
mov 0x1, %g5
sll %g5, %g3, %g5
st %g5, [%curptr + TI_UWINMASK] ! one live user window still
st %g0, [%curptr + TI_W_SAVED] ! no windows in the buffer
wr %t_psr, PSR_ET, %psr ! enable traps
nop
call window_underflow_fault
mov %g4, %o0
b ret_trap_entry
clr %l6
fwin_user_stack_is_ok:
/* LOCATION: Window 'W' */
/* The users stack area is kosher and mapped, load the
* window and fall through to the finish up routine.
*/
LOAD_WINDOW(sp)
/* Round and round she goes... */
save %g0, %g0, %g0 /* Save to window 'O' */
save %g0, %g0, %g0 /* Save to window 'T' */
/* Where she'll trap nobody knows... */
/* LOCATION: Window 'T' */
fwin_user_finish_up:
/* LOCATION: Window 'T' */
wr %t_psr, 0x0, %psr
WRITE_PAUSE
jmp %t_pc
rett %t_npc
/* Here come the architecture specific checks for stack.
* mappings. Note that unlike the window overflow handler
* we only need to check whether the user can read from
* the appropriate addresses. Also note that we are in
* an invalid window which will be loaded, and this means
* that until we actually load the window up we are free
* to use any of the local registers contained within.
*
* On success these routine branch to fwin_user_stack_is_ok
* if the area at %sp is user readable and the window still
* needs to be loaded, else fwin_user_finish_up if the
* routine has done the loading itself. On failure (bogus
* user stack) the routine shall branch to the label called
* fwin_user_stack_is_bolixed.
*
* Contrary to the arch-specific window overflow stack
* check routines in wof.S, these routines are free to use
* any of the local registers they want to as this window
* does not belong to anyone at this point, however the
* outs and ins are still verboten as they are part of
* 'someone elses' window possibly.
*/
.globl srmmu_fwin_stackchk
srmmu_fwin_stackchk:
/* LOCATION: Window 'W' */
/* Caller did 'andcc %sp, 0x7, %g0' */
bne fwin_user_stack_is_bolixed
sethi %hi(PAGE_OFFSET), %l5
/* Check if the users stack is in kernel vma, then our
* trial and error technique below would succeed for
* the 'wrong' reason.
*/
mov AC_M_SFSR, %l4
cmp %l5, %sp
bleu fwin_user_stack_is_bolixed
LEON_PI( lda [%l4] ASI_LEON_MMUREGS, %g0) ! clear fault status
SUN_PI_( lda [%l4] ASI_M_MMUREGS, %g0) ! clear fault status
/* The technique is, turn off faults on this processor,
* just let the load rip, then check the sfsr to see if
* a fault did occur. Then we turn on fault traps again
* and branch conditionally based upon what happened.
*/
LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %l5) ! read mmu-ctrl reg
SUN_PI_(lda [%g0] ASI_M_MMUREGS, %l5) ! read mmu-ctrl reg
or %l5, 0x2, %l5 ! turn on no-fault bit
LEON_PI(sta %l5, [%g0] ASI_LEON_MMUREGS) ! store it
SUN_PI_(sta %l5, [%g0] ASI_M_MMUREGS) ! store it
/* Cross fingers and go for it. */
LOAD_WINDOW(sp)
/* A penny 'saved'... */
save %g0, %g0, %g0
save %g0, %g0, %g0
/* Is a BADTRAP earned... */
/* LOCATION: Window 'T' */
LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
SUN_PI_(lda [%g0] ASI_M_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit
LEON_PI(sta %twin_tmp1, [%g0] ASI_LEON_MMUREGS) ! store it
SUN_PI_(sta %twin_tmp1, [%g0] ASI_M_MMUREGS) ! store it
mov AC_M_SFAR, %twin_tmp2
LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %g0) ! read fault address
SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %g0) ! read fault address
mov AC_M_SFSR, %twin_tmp2
LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %twin_tmp2) ! read fault status
SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2) ! read fault status
andcc %twin_tmp2, 0x2, %g0 ! did fault occur?
bne 1f ! yep, cleanup
nop
wr %t_psr, 0x0, %psr
nop
b fwin_user_finish_up + 0x4
nop
/* Did I ever tell you about my window lobotomy?
* anyways... fwin_user_stack_is_bolixed expects
* to be in window 'W' so make it happy or else
* we watchdog badly.
*/
1:
restore %g0, %g0, %g0
b fwin_user_stack_is_bolixed ! oh well
restore %g0, %g0, %g0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,845
|
arch/sparc/kernel/systbls_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
* Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
*
* Based upon preliminary work which is:
*
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
*/
.data
.align 4
/* First, the Linux native syscall table. */
.globl sys_call_table
sys_call_table:
/*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write
/*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link
/*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
/*15*/ .long sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek
/*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
/*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
/*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
/*35*/ .long sys_chown, sys_sync, sys_kill, sys_newstat, sys_sendfile
/*40*/ .long sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_getuid
/*45*/ .long sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16
/*50*/ .long sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, sys_ioctl
/*55*/ .long sys_reboot, sys_mmap2, sys_symlink, sys_readlink, sys_execve
/*60*/ .long sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize
/*65*/ .long sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_geteuid
/*70*/ .long sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect
/*75*/ .long sys_madvise, sys_vhangup, sys_truncate64, sys_mincore, sys_getgroups16
/*80*/ .long sys_setgroups16, sys_getpgrp, sys_setgroups, sys_setitimer, sys_ftruncate64
/*85*/ .long sys_swapon, sys_getitimer, sys_setuid, sys_sethostname, sys_setgid
/*90*/ .long sys_dup2, sys_setfsuid, sys_fcntl, sys_select, sys_setfsgid
/*95*/ .long sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
/*100*/ .long sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
/*105*/ .long sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
/*110*/ .long sys_setresgid, sys_getresgid, sys_setregid, sys_recvmsg, sys_sendmsg
/*115*/ .long sys_getgroups, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd
/*120*/ .long sys_readv, sys_writev, sys_settimeofday, sys_fchown16, sys_fchmod
/*125*/ .long sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_sendto, sys_shutdown
/*135*/ .long sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
/*140*/ .long sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit
/*145*/ .long sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
/*150*/ .long sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
/*155*/ .long sys_fcntl64, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount
/*160*/ .long sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
/*165*/ .long sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
/*170*/ .long sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
/*175*/ .long sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
/*180*/ .long sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_sigpending, sys_ni_syscall
/*185*/ .long sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname
/*190*/ .long sys_init_module, sys_personality, sys_sparc_remap_file_pages, sys_epoll_create, sys_epoll_ctl
/*195*/ .long sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_sparc_sigaction, sys_sgetmask
/*200*/ .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_old_readdir
/*205*/ .long sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
/*210*/ .long sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
/*215*/ .long sys_ipc, sys_sigreturn, sys_clone, sys_ioprio_get, sys_adjtimex
/*220*/ .long sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
/*225*/ .long sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16
/*230*/ .long sys_select, sys_time, sys_splice, sys_stime, sys_statfs64
/* "We are the Knights of the Forest of Ni!!" */
/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_ni_syscall
/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
/*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
/*280*/ .long sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
/*285*/ .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64
/*290*/ .long sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
/*300*/ .long sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
/*360*/ .long sys_statx, sys_io_pgetevents
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,358
|
arch/sparc/kernel/fpu_traps.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* This is trivial with the new code... */
.globl do_fpdis
.type do_fpdis,#function
do_fpdis:
sethi %hi(TSTATE_PEF), %g4
rdpr %tstate, %g5
andcc %g5, %g4, %g0
be,pt %xcc, 1f
nop
rd %fprs, %g5
andcc %g5, FPRS_FEF, %g0
be,pt %xcc, 1f
nop
/* Legal state when DCR_IFPOE is set in Cheetah %dcr. */
sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
add %g0, %g0, %g0
ba,a,pt %xcc, rtrap
1: TRAP_LOAD_THREAD_REG(%g6, %g1)
ldub [%g6 + TI_FPSAVED], %g5
wr %g0, FPRS_FEF, %fprs
andcc %g5, FPRS_FEF, %g0
be,a,pt %icc, 1f
clr %g7
ldx [%g6 + TI_GSR], %g7
1: andcc %g5, FPRS_DL, %g0
bne,pn %icc, 2f
fzero %f0
andcc %g5, FPRS_DU, %g0
bne,pn %icc, 1f
fzero %f2
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
faddd %f0, %f2, %f8
fmuld %f0, %f2, %f10
faddd %f0, %f2, %f12
fmuld %f0, %f2, %f14
faddd %f0, %f2, %f16
fmuld %f0, %f2, %f18
faddd %f0, %f2, %f20
fmuld %f0, %f2, %f22
faddd %f0, %f2, %f24
fmuld %f0, %f2, %f26
faddd %f0, %f2, %f28
fmuld %f0, %f2, %f30
faddd %f0, %f2, %f32
fmuld %f0, %f2, %f34
faddd %f0, %f2, %f36
fmuld %f0, %f2, %f38
faddd %f0, %f2, %f40
fmuld %f0, %f2, %f42
faddd %f0, %f2, %f44
fmuld %f0, %f2, %f46
faddd %f0, %f2, %f48
fmuld %f0, %f2, %f50
faddd %f0, %f2, %f52
fmuld %f0, %f2, %f54
faddd %f0, %f2, %f56
fmuld %f0, %f2, %f58
b,pt %xcc, fpdis_exit2
faddd %f0, %f2, %f60
1: mov SECONDARY_CONTEXT, %g3
add %g6, TI_FPREGS + 0x80, %g1
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
661: ldxa [%g3] ASI_DMMU, %g5
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%g3] ASI_MMU, %g5
.previous
sethi %hi(sparc64_kern_sec_context), %g2
ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
661: stxa %g2, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g3] ASI_MMU
.previous
membar #Sync
add %g6, TI_FPREGS + 0xc0, %g2
faddd %f0, %f2, %f8
fmuld %f0, %f2, %f10
membar #Sync
ldda [%g1] ASI_BLK_S, %f32
ldda [%g2] ASI_BLK_S, %f48
membar #Sync
faddd %f0, %f2, %f12
fmuld %f0, %f2, %f14
faddd %f0, %f2, %f16
fmuld %f0, %f2, %f18
faddd %f0, %f2, %f20
fmuld %f0, %f2, %f22
faddd %f0, %f2, %f24
fmuld %f0, %f2, %f26
faddd %f0, %f2, %f28
fmuld %f0, %f2, %f30
ba,a,pt %xcc, fpdis_exit
2: andcc %g5, FPRS_DU, %g0
bne,pt %icc, 3f
fzero %f32
mov SECONDARY_CONTEXT, %g3
fzero %f34
661: ldxa [%g3] ASI_DMMU, %g5
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%g3] ASI_MMU, %g5
.previous
add %g6, TI_FPREGS, %g1
sethi %hi(sparc64_kern_sec_context), %g2
ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
661: stxa %g2, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g3] ASI_MMU
.previous
membar #Sync
add %g6, TI_FPREGS + 0x40, %g2
faddd %f32, %f34, %f36
fmuld %f32, %f34, %f38
membar #Sync
ldda [%g1] ASI_BLK_S, %f0
ldda [%g2] ASI_BLK_S, %f16
membar #Sync
faddd %f32, %f34, %f40
fmuld %f32, %f34, %f42
faddd %f32, %f34, %f44
fmuld %f32, %f34, %f46
faddd %f32, %f34, %f48
fmuld %f32, %f34, %f50
faddd %f32, %f34, %f52
fmuld %f32, %f34, %f54
faddd %f32, %f34, %f56
fmuld %f32, %f34, %f58
faddd %f32, %f34, %f60
fmuld %f32, %f34, %f62
ba,a,pt %xcc, fpdis_exit
3: mov SECONDARY_CONTEXT, %g3
add %g6, TI_FPREGS, %g1
661: ldxa [%g3] ASI_DMMU, %g5
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%g3] ASI_MMU, %g5
.previous
sethi %hi(sparc64_kern_sec_context), %g2
ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
661: stxa %g2, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g3] ASI_MMU
.previous
membar #Sync
mov 0x40, %g2
membar #Sync
ldda [%g1] ASI_BLK_S, %f0
ldda [%g1 + %g2] ASI_BLK_S, %f16
add %g1, 0x80, %g1
ldda [%g1] ASI_BLK_S, %f32
ldda [%g1 + %g2] ASI_BLK_S, %f48
membar #Sync
fpdis_exit:
661: stxa %g5, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g5, [%g3] ASI_MMU
.previous
membar #Sync
fpdis_exit2:
wr %g7, 0, %gsr
ldx [%g6 + TI_XFSR], %fsr
rdpr %tstate, %g3
or %g3, %g4, %g3 ! anal...
wrpr %g3, %tstate
wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits
retry
.size do_fpdis,.-do_fpdis
.align 32
.type fp_other_bounce,#function
fp_other_bounce:
call do_fpother
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
.size fp_other_bounce,.-fp_other_bounce
.align 32
.globl do_fpother_check_fitos
.type do_fpother_check_fitos,#function
do_fpother_check_fitos:
TRAP_LOAD_THREAD_REG(%g6, %g1)
sethi %hi(fp_other_bounce - 4), %g7
or %g7, %lo(fp_other_bounce - 4), %g7
/* NOTE: Need to preserve %g7 until we fully commit
* to the fitos fixup.
*/
stx %fsr, [%g6 + TI_XFSR]
rdpr %tstate, %g3
andcc %g3, TSTATE_PRIV, %g0
bne,pn %xcc, do_fptrap_after_fsr
nop
ldx [%g6 + TI_XFSR], %g3
srlx %g3, 14, %g1
and %g1, 7, %g1
cmp %g1, 2 ! Unfinished FP-OP
bne,pn %xcc, do_fptrap_after_fsr
sethi %hi(1 << 23), %g1 ! Inexact
andcc %g3, %g1, %g0
bne,pn %xcc, do_fptrap_after_fsr
rdpr %tpc, %g1
lduwa [%g1] ASI_AIUP, %g3 ! This cannot ever fail
#define FITOS_MASK 0xc1f83fe0
#define FITOS_COMPARE 0x81a01880
sethi %hi(FITOS_MASK), %g1
or %g1, %lo(FITOS_MASK), %g1
and %g3, %g1, %g1
sethi %hi(FITOS_COMPARE), %g2
or %g2, %lo(FITOS_COMPARE), %g2
cmp %g1, %g2
bne,pn %xcc, do_fptrap_after_fsr
nop
std %f62, [%g6 + TI_FPREGS + (62 * 4)]
sethi %hi(fitos_table_1), %g1
and %g3, 0x1f, %g2
or %g1, %lo(fitos_table_1), %g1
sllx %g2, 2, %g2
jmpl %g1 + %g2, %g0
ba,pt %xcc, fitos_emul_continue
fitos_table_1:
fitod %f0, %f62
fitod %f1, %f62
fitod %f2, %f62
fitod %f3, %f62
fitod %f4, %f62
fitod %f5, %f62
fitod %f6, %f62
fitod %f7, %f62
fitod %f8, %f62
fitod %f9, %f62
fitod %f10, %f62
fitod %f11, %f62
fitod %f12, %f62
fitod %f13, %f62
fitod %f14, %f62
fitod %f15, %f62
fitod %f16, %f62
fitod %f17, %f62
fitod %f18, %f62
fitod %f19, %f62
fitod %f20, %f62
fitod %f21, %f62
fitod %f22, %f62
fitod %f23, %f62
fitod %f24, %f62
fitod %f25, %f62
fitod %f26, %f62
fitod %f27, %f62
fitod %f28, %f62
fitod %f29, %f62
fitod %f30, %f62
fitod %f31, %f62
fitos_emul_continue:
sethi %hi(fitos_table_2), %g1
srl %g3, 25, %g2
or %g1, %lo(fitos_table_2), %g1
and %g2, 0x1f, %g2
sllx %g2, 2, %g2
jmpl %g1 + %g2, %g0
ba,pt %xcc, fitos_emul_fini
fitos_table_2:
fdtos %f62, %f0
fdtos %f62, %f1
fdtos %f62, %f2
fdtos %f62, %f3
fdtos %f62, %f4
fdtos %f62, %f5
fdtos %f62, %f6
fdtos %f62, %f7
fdtos %f62, %f8
fdtos %f62, %f9
fdtos %f62, %f10
fdtos %f62, %f11
fdtos %f62, %f12
fdtos %f62, %f13
fdtos %f62, %f14
fdtos %f62, %f15
fdtos %f62, %f16
fdtos %f62, %f17
fdtos %f62, %f18
fdtos %f62, %f19
fdtos %f62, %f20
fdtos %f62, %f21
fdtos %f62, %f22
fdtos %f62, %f23
fdtos %f62, %f24
fdtos %f62, %f25
fdtos %f62, %f26
fdtos %f62, %f27
fdtos %f62, %f28
fdtos %f62, %f29
fdtos %f62, %f30
fdtos %f62, %f31
fitos_emul_fini:
ldd [%g6 + TI_FPREGS + (62 * 4)], %f62
done
.size do_fpother_check_fitos,.-do_fpother_check_fitos
.align 32
.globl do_fptrap
.type do_fptrap,#function
do_fptrap:
TRAP_LOAD_THREAD_REG(%g6, %g1)
stx %fsr, [%g6 + TI_XFSR]
do_fptrap_after_fsr:
ldub [%g6 + TI_FPSAVED], %g3
rd %fprs, %g1
or %g3, %g1, %g3
stb %g3, [%g6 + TI_FPSAVED]
rd %gsr, %g3
stx %g3, [%g6 + TI_GSR]
mov SECONDARY_CONTEXT, %g3
661: ldxa [%g3] ASI_DMMU, %g5
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%g3] ASI_MMU, %g5
.previous
sethi %hi(sparc64_kern_sec_context), %g2
ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
661: stxa %g2, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g3] ASI_MMU
.previous
membar #Sync
add %g6, TI_FPREGS, %g2
andcc %g1, FPRS_DL, %g0
be,pn %icc, 4f
mov 0x40, %g3
stda %f0, [%g2] ASI_BLK_S
stda %f16, [%g2 + %g3] ASI_BLK_S
andcc %g1, FPRS_DU, %g0
be,pn %icc, 5f
4: add %g2, 128, %g2
stda %f32, [%g2] ASI_BLK_S
stda %f48, [%g2 + %g3] ASI_BLK_S
5: mov SECONDARY_CONTEXT, %g1
membar #Sync
661: stxa %g5, [%g1] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g5, [%g1] ASI_MMU
.previous
membar #Sync
ba,pt %xcc, etrap
wr %g0, 0, %fprs
.size do_fptrap,.-do_fptrap
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,705
|
arch/sparc/kernel/hvtramp.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* hvtramp.S: Hypervisor start-cpu trampoline code.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
*/
#include <asm/thread_info.h>
#include <asm/hypervisor.h>
#include <asm/scratchpad.h>
#include <asm/spitfire.h>
#include <asm/hvtramp.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/pil.h>
.align 8
.globl hv_cpu_startup, hv_cpu_startup_end
/* This code executes directly out of the hypervisor
* with physical addressing (va==pa). %o0 contains
* our client argument which for Linux points to
* a descriptor data structure which defines the
* MMU entries we need to load up.
*
* After we set things up we enable the MMU and call
* into the kernel.
*
* First setup basic privileged cpu state.
*/
hv_cpu_startup:
SET_GL(0)
wrpr %g0, PIL_NORMAL_MAX, %pil
wrpr %g0, 0, %canrestore
wrpr %g0, 0, %otherwin
wrpr %g0, 6, %cansave
wrpr %g0, 6, %cleanwin
wrpr %g0, 0, %cwp
wrpr %g0, 0, %wstate
wrpr %g0, 0, %tl
sethi %hi(sparc64_ttable_tl0), %g1
wrpr %g1, %tba
mov %o0, %l0
lduw [%l0 + HVTRAMP_DESCR_CPU], %g1
mov SCRATCHPAD_CPUID, %g2
stxa %g1, [%g2] ASI_SCRATCHPAD
ldx [%l0 + HVTRAMP_DESCR_FAULT_INFO_VA], %g2
stxa %g2, [%g0] ASI_SCRATCHPAD
mov 0, %l1
lduw [%l0 + HVTRAMP_DESCR_NUM_MAPPINGS], %l2
add %l0, HVTRAMP_DESCR_MAPS, %l3
1: ldx [%l3 + HVTRAMP_MAPPING_VADDR], %o0
clr %o1
ldx [%l3 + HVTRAMP_MAPPING_TTE], %o2
mov HV_MMU_IMMU | HV_MMU_DMMU, %o3
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
ta HV_FAST_TRAP
brnz,pn %o0, 80f
nop
add %l1, 1, %l1
cmp %l1, %l2
blt,a,pt %xcc, 1b
add %l3, HVTRAMP_MAPPING_SIZE, %l3
ldx [%l0 + HVTRAMP_DESCR_FAULT_INFO_PA], %o0
mov HV_FAST_MMU_FAULT_AREA_CONF, %o5
ta HV_FAST_TRAP
brnz,pn %o0, 80f
nop
wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
ldx [%l0 + HVTRAMP_DESCR_THREAD_REG], %l6
mov 1, %o0
set 1f, %o1
mov HV_FAST_MMU_ENABLE, %o5
ta HV_FAST_TRAP
ba,pt %xcc, 80f
nop
1:
wr %g0, 0, %fprs
wr %g0, ASI_P, %asi
mov PRIMARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_MMU
membar #Sync
mov SECONDARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_MMU
membar #Sync
mov %l6, %g6
ldx [%g6 + TI_TASK], %g4
mov 1, %g5
sllx %g5, THREAD_SHIFT, %g5
sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
add %g6, %g5, %sp
call init_irqwork_curcpu
nop
call hard_smp_processor_id
nop
call sun4v_register_mondo_queues
nop
call init_cur_cpu_trap
mov %g6, %o0
wrpr %g0, (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE), %pstate
call smp_callin
nop
call cpu_panic
nop
80: ba,pt %xcc, 80b
nop
.align 8
hv_cpu_startup_end:
|
AirFortressIlikara/LS2K0300-linux-4.19
| 23,501
|
arch/sparc/kernel/head_64.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* head.S: Initial boot code for the Sparc64 port of Linux.
*
* Copyright (C) 1996, 1997, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
* Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
*/
#include <linux/version.h>
#include <linux/errno.h>
#include <linux/threads.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
#include <asm/spitfire.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/errno.h>
#include <asm/signal.h>
#include <asm/processor.h>
#include <asm/lsu.h>
#include <asm/dcr.h>
#include <asm/dcu.h>
#include <asm/head.h>
#include <asm/ttable.h>
#include <asm/mmu.h>
#include <asm/cpudata.h>
#include <asm/pil.h>
#include <asm/estate.h>
#include <asm/sfafsr.h>
#include <asm/unistd.h>
#include <asm/export.h>
/* This section from from _start to sparc64_boot_end should fit into
* 0x0000000000404000 to 0x0000000000408000.
*/
.text
.globl start, _start, stext, _stext
_start:
start:
_stext:
stext:
! 0x0000000000404000
b sparc64_boot
flushw /* Flush register file. */
/* This stuff has to be in sync with SILO and other potential boot loaders
* Fields should be kept upward compatible and whenever any change is made,
* HdrS version should be incremented.
*/
.global root_flags, ram_flags, root_dev
.global sparc_ramdisk_image, sparc_ramdisk_size
.global sparc_ramdisk_image64
.ascii "HdrS"
.word LINUX_VERSION_CODE
/* History:
*
* 0x0300 : Supports being located at other than 0x4000
* 0x0202 : Supports kernel params string
* 0x0201 : Supports reboot_command
*/
.half 0x0301 /* HdrS version */
root_flags:
.half 1
root_dev:
.half 0
ram_flags:
.half 0
sparc_ramdisk_image:
.word 0
sparc_ramdisk_size:
.word 0
.xword reboot_command
.xword bootstr_info
sparc_ramdisk_image64:
.xword 0
.word _end
/* PROM cif handler code address is in %o4. */
sparc64_boot:
mov %o4, %l7
/* We need to remap the kernel. Use position independent
* code to remap us to KERNBASE.
*
* SILO can invoke us with 32-bit address masking enabled,
* so make sure that's clear.
*/
rdpr %pstate, %g1
andn %g1, PSTATE_AM, %g1
wrpr %g1, 0x0, %pstate
ba,a,pt %xcc, 1f
nop
.globl prom_finddev_name, prom_chosen_path, prom_root_node
.globl prom_getprop_name, prom_mmu_name, prom_peer_name
.globl prom_callmethod_name, prom_translate_name, prom_root_compatible
.globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache
.globl prom_boot_mapped_pc, prom_boot_mapping_mode
.globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low
.globl prom_compatible_name, prom_cpu_path, prom_cpu_compatible
.globl is_sun4v, sun4v_chip_type, prom_set_trap_table_name
prom_peer_name:
.asciz "peer"
prom_compatible_name:
.asciz "compatible"
prom_finddev_name:
.asciz "finddevice"
prom_chosen_path:
.asciz "/chosen"
prom_cpu_path:
.asciz "/cpu"
prom_getprop_name:
.asciz "getprop"
prom_mmu_name:
.asciz "mmu"
prom_callmethod_name:
.asciz "call-method"
prom_translate_name:
.asciz "translate"
prom_map_name:
.asciz "map"
prom_unmap_name:
.asciz "unmap"
prom_set_trap_table_name:
.asciz "SUNW,set-trap-table"
prom_sun4v_name:
.asciz "sun4v"
prom_niagara_prefix:
.asciz "SUNW,UltraSPARC-T"
prom_sparc_prefix:
.asciz "SPARC-"
prom_sparc64x_prefix:
.asciz "SPARC64-X"
.align 4
prom_root_compatible:
.skip 64
prom_cpu_compatible:
.skip 64
prom_root_node:
.word 0
EXPORT_SYMBOL(prom_root_node)
prom_mmu_ihandle_cache:
.word 0
prom_boot_mapped_pc:
.word 0
prom_boot_mapping_mode:
.word 0
.align 8
prom_boot_mapping_phys_high:
.xword 0
prom_boot_mapping_phys_low:
.xword 0
is_sun4v:
.word 0
sun4v_chip_type:
.word SUN4V_CHIP_INVALID
EXPORT_SYMBOL(sun4v_chip_type)
1:
rd %pc, %l0
mov (1b - prom_peer_name), %l1
sub %l0, %l1, %l1
mov 0, %l2
/* prom_root_node = prom_peer(0) */
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "peer"
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, 0
stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
ldx [%sp + 2047 + 128 + 0x20], %l4 ! prom root node
mov (1b - prom_root_node), %l1
sub %l0, %l1, %l1
stw %l4, [%l1]
mov (1b - prom_getprop_name), %l1
mov (1b - prom_compatible_name), %l2
mov (1b - prom_root_compatible), %l5
sub %l0, %l1, %l1
sub %l0, %l2, %l2
sub %l0, %l5, %l5
/* prom_getproperty(prom_root_node, "compatible",
* &prom_root_compatible, 64)
*/
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
mov 4, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, prom_root_node
stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible"
stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_root_compatible
mov 64, %l3
stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size
stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
mov (1b - prom_finddev_name), %l1
mov (1b - prom_chosen_path), %l2
mov (1b - prom_boot_mapped_pc), %l3
sub %l0, %l1, %l1
sub %l0, %l2, %l2
sub %l0, %l3, %l3
stw %l0, [%l3]
sub %sp, (192 + 128), %sp
/* chosen_node = prom_finddevice("/chosen") */
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice"
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/chosen"
stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
ldx [%sp + 2047 + 128 + 0x20], %l4 ! chosen device node
mov (1b - prom_getprop_name), %l1
mov (1b - prom_mmu_name), %l2
mov (1b - prom_mmu_ihandle_cache), %l5
sub %l0, %l1, %l1
sub %l0, %l2, %l2
sub %l0, %l5, %l5
/* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
mov 4, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, chosen_node
stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "mmu"
stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_mmu_ihandle_cache
mov 4, %l3
stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, sizeof(arg3)
stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
mov (1b - prom_callmethod_name), %l1
mov (1b - prom_translate_name), %l2
sub %l0, %l1, %l1
sub %l0, %l2, %l2
lduw [%l5], %l5 ! prom_mmu_ihandle_cache
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "call-method"
mov 3, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 3
mov 5, %l3
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5
stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate"
stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache
/* PAGE align */
srlx %l0, 13, %l3
sllx %l3, 13, %l3
stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC
stx %g0, [%sp + 2047 + 128 + 0x30] ! res1
stx %g0, [%sp + 2047 + 128 + 0x38] ! res2
stx %g0, [%sp + 2047 + 128 + 0x40] ! res3
stx %g0, [%sp + 2047 + 128 + 0x48] ! res4
stx %g0, [%sp + 2047 + 128 + 0x50] ! res5
call %l7
add %sp, (2047 + 128), %o0 ! argument array
ldx [%sp + 2047 + 128 + 0x40], %l1 ! translation mode
mov (1b - prom_boot_mapping_mode), %l4
sub %l0, %l4, %l4
stw %l1, [%l4]
mov (1b - prom_boot_mapping_phys_high), %l4
sub %l0, %l4, %l4
ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high
stx %l2, [%l4 + 0x0]
ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low
/* 4MB align */
srlx %l3, ILOG2_4MB, %l3
sllx %l3, ILOG2_4MB, %l3
stx %l3, [%l4 + 0x8]
/* Leave service as-is, "call-method" */
mov 7, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 7
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
mov (1b - prom_map_name), %l3
sub %l0, %l3, %l3
stx %l3, [%sp + 2047 + 128 + 0x18] ! arg1: "map"
/* Leave arg2 as-is, prom_mmu_ihandle_cache */
mov -1, %l3
stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default)
/* 4MB align the kernel image size. */
set (_end - KERNBASE), %l3
set ((4 * 1024 * 1024) - 1), %l4
add %l3, %l4, %l3
andn %l3, %l4, %l3
stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: roundup(ksize, 4MB)
sethi %hi(KERNBASE), %l3
stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE)
stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
mov (1b - prom_boot_mapping_phys_low), %l3
sub %l0, %l3, %l3
ldx [%l3], %l3
stx %l3, [%sp + 2047 + 128 + 0x48] ! arg7: phys addr
call %l7
add %sp, (2047 + 128), %o0 ! argument array
add %sp, (192 + 128), %sp
sethi %hi(prom_root_compatible), %g1
or %g1, %lo(prom_root_compatible), %g1
sethi %hi(prom_sun4v_name), %g7
or %g7, %lo(prom_sun4v_name), %g7
mov 5, %g3
90: ldub [%g7], %g2
ldub [%g1], %g4
cmp %g2, %g4
bne,pn %icc, 80f
add %g7, 1, %g7
subcc %g3, 1, %g3
bne,pt %xcc, 90b
add %g1, 1, %g1
sethi %hi(is_sun4v), %g1
or %g1, %lo(is_sun4v), %g1
mov 1, %g7
stw %g7, [%g1]
/* cpu_node = prom_finddevice("/cpu") */
mov (1b - prom_finddev_name), %l1
mov (1b - prom_cpu_path), %l2
sub %l0, %l1, %l1
sub %l0, %l2, %l2
sub %sp, (192 + 128), %sp
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice"
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/cpu"
stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
ldx [%sp + 2047 + 128 + 0x20], %l4 ! cpu device node
mov (1b - prom_getprop_name), %l1
mov (1b - prom_compatible_name), %l2
mov (1b - prom_cpu_compatible), %l5
sub %l0, %l1, %l1
sub %l0, %l2, %l2
sub %l0, %l5, %l5
/* prom_getproperty(cpu_node, "compatible",
* &prom_cpu_compatible, 64)
*/
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
mov 4, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, cpu_node
stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible"
stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_cpu_compatible
mov 64, %l3
stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size
stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
add %sp, (192 + 128), %sp
sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
sethi %hi(prom_niagara_prefix), %g7
or %g7, %lo(prom_niagara_prefix), %g7
mov 17, %g3
90: ldub [%g7], %g2
ldub [%g1], %g4
cmp %g2, %g4
bne,pn %icc, 89f
add %g7, 1, %g7
subcc %g3, 1, %g3
bne,pt %xcc, 90b
add %g1, 1, %g1
ba,pt %xcc, 91f
nop
89: sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
sethi %hi(prom_sparc_prefix), %g7
or %g7, %lo(prom_sparc_prefix), %g7
mov 6, %g3
90: ldub [%g7], %g2
ldub [%g1], %g4
cmp %g2, %g4
bne,pn %icc, 4f
add %g7, 1, %g7
subcc %g3, 1, %g3
bne,pt %xcc, 90b
add %g1, 1, %g1
sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
ldub [%g1 + 6], %g2
cmp %g2, 'T'
be,pt %xcc, 70f
cmp %g2, 'M'
be,pt %xcc, 70f
cmp %g2, 'S'
bne,pn %xcc, 49f
nop
70: ldub [%g1 + 7], %g2
cmp %g2, CPU_ID_NIAGARA3
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA3, %g4
cmp %g2, CPU_ID_NIAGARA4
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA4, %g4
cmp %g2, CPU_ID_NIAGARA5
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA5, %g4
cmp %g2, CPU_ID_M6
be,pt %xcc, 5f
mov SUN4V_CHIP_SPARC_M6, %g4
cmp %g2, CPU_ID_M7
be,pt %xcc, 5f
mov SUN4V_CHIP_SPARC_M7, %g4
cmp %g2, CPU_ID_M8
be,pt %xcc, 5f
mov SUN4V_CHIP_SPARC_M8, %g4
cmp %g2, CPU_ID_SONOMA1
be,pt %xcc, 5f
mov SUN4V_CHIP_SPARC_SN, %g4
ba,pt %xcc, 49f
nop
91: sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
ldub [%g1 + 17], %g2
cmp %g2, CPU_ID_NIAGARA1
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA1, %g4
cmp %g2, CPU_ID_NIAGARA2
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA2, %g4
4:
/* Athena */
sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
sethi %hi(prom_sparc64x_prefix), %g7
or %g7, %lo(prom_sparc64x_prefix), %g7
mov 9, %g3
41: ldub [%g7], %g2
ldub [%g1], %g4
cmp %g2, %g4
bne,pn %icc, 49f
add %g7, 1, %g7
subcc %g3, 1, %g3
bne,pt %xcc, 41b
add %g1, 1, %g1
ba,pt %xcc, 5f
mov SUN4V_CHIP_SPARC64X, %g4
49:
mov SUN4V_CHIP_UNKNOWN, %g4
5: sethi %hi(sun4v_chip_type), %g2
or %g2, %lo(sun4v_chip_type), %g2
stw %g4, [%g2]
80:
BRANCH_IF_SUN4V(g1, jump_to_sun4u_init)
BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
ba,pt %xcc, spitfire_boot
nop
cheetah_plus_boot:
/* Preserve OBP chosen DCU and DCR register settings. */
ba,pt %xcc, cheetah_generic_boot
nop
cheetah_boot:
mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
wr %g1, %asr18
sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
or %g7, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
sllx %g7, 32, %g7
or %g7, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g7
stxa %g7, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
cheetah_generic_boot:
mov TSB_EXTENSION_P, %g3
stxa %g0, [%g3] ASI_DMMU
stxa %g0, [%g3] ASI_IMMU
membar #Sync
mov TSB_EXTENSION_S, %g3
stxa %g0, [%g3] ASI_DMMU
membar #Sync
mov TSB_EXTENSION_N, %g3
stxa %g0, [%g3] ASI_DMMU
stxa %g0, [%g3] ASI_IMMU
membar #Sync
ba,a,pt %xcc, jump_to_sun4u_init
spitfire_boot:
/* Typically PROM has already enabled both MMU's and both on-chip
* caches, but we do it here anyway just to be paranoid.
*/
mov (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
stxa %g1, [%g0] ASI_LSU_CONTROL
membar #Sync
jump_to_sun4u_init:
/*
* Make sure we are in privileged mode, have address masking,
* using the ordinary globals and have enabled floating
* point.
*
* Again, typically PROM has left %pil at 13 or similar, and
* (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
*/
wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
wr %g0, 0, %fprs
set sun4u_init, %g2
jmpl %g2 + %g0, %g0
nop
__REF
sun4u_init:
BRANCH_IF_SUN4V(g1, sun4v_init)
/* Set ctx 0 */
mov PRIMARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_DMMU
membar #Sync
mov SECONDARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_DMMU
membar #Sync
ba,a,pt %xcc, sun4u_continue
sun4v_init:
/* Set ctx 0 */
mov PRIMARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_MMU
membar #Sync
mov SECONDARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_MMU
membar #Sync
ba,a,pt %xcc, niagara_tlb_fixup
sun4u_continue:
BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
ba,a,pt %xcc, spitfire_tlb_fixup
niagara_tlb_fixup:
mov 3, %g2 /* Set TLB type to hypervisor. */
sethi %hi(tlb_type), %g1
stw %g2, [%g1 + %lo(tlb_type)]
/* Patch copy/clear ops. */
sethi %hi(sun4v_chip_type), %g1
lduw [%g1 + %lo(sun4v_chip_type)], %g1
cmp %g1, SUN4V_CHIP_NIAGARA1
be,pt %xcc, niagara_patch
cmp %g1, SUN4V_CHIP_NIAGARA2
be,pt %xcc, niagara2_patch
nop
cmp %g1, SUN4V_CHIP_NIAGARA3
be,pt %xcc, niagara2_patch
nop
cmp %g1, SUN4V_CHIP_NIAGARA4
be,pt %xcc, niagara4_patch
nop
cmp %g1, SUN4V_CHIP_NIAGARA5
be,pt %xcc, niagara4_patch
nop
cmp %g1, SUN4V_CHIP_SPARC_M6
be,pt %xcc, niagara4_patch
nop
cmp %g1, SUN4V_CHIP_SPARC_M7
be,pt %xcc, sparc_m7_patch
nop
cmp %g1, SUN4V_CHIP_SPARC_M8
be,pt %xcc, sparc_m7_patch
nop
cmp %g1, SUN4V_CHIP_SPARC_SN
be,pt %xcc, niagara4_patch
nop
call generic_patch_copyops
nop
call generic_patch_bzero
nop
call generic_patch_pageops
nop
ba,a,pt %xcc, 80f
nop
sparc_m7_patch:
call m7_patch_copyops
nop
call m7_patch_bzero
nop
call m7_patch_pageops
nop
ba,a,pt %xcc, 80f
nop
niagara4_patch:
call niagara4_patch_copyops
nop
call niagara4_patch_bzero
nop
call niagara4_patch_pageops
nop
call niagara4_patch_fls
nop
ba,a,pt %xcc, 80f
nop
niagara2_patch:
call niagara2_patch_copyops
nop
call niagara_patch_bzero
nop
call niagara_patch_pageops
nop
ba,a,pt %xcc, 80f
nop
niagara_patch:
call niagara_patch_copyops
nop
call niagara_patch_bzero
nop
call niagara_patch_pageops
nop
80:
/* Patch TLB/cache ops. */
call hypervisor_patch_cachetlbops
nop
ba,a,pt %xcc, tlb_fixup_done
cheetah_tlb_fixup:
mov 2, %g2 /* Set TLB type to cheetah+. */
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
mov 1, %g2 /* Set TLB type to cheetah. */
1: sethi %hi(tlb_type), %g1
stw %g2, [%g1 + %lo(tlb_type)]
/* Patch copy/page operations to cheetah optimized versions. */
call cheetah_patch_copyops
nop
call cheetah_patch_copy_page
nop
call cheetah_patch_cachetlbops
nop
ba,a,pt %xcc, tlb_fixup_done
spitfire_tlb_fixup:
/* Set TLB type to spitfire. */
mov 0, %g2
sethi %hi(tlb_type), %g1
stw %g2, [%g1 + %lo(tlb_type)]
tlb_fixup_done:
sethi %hi(init_thread_union), %g6
or %g6, %lo(init_thread_union), %g6
ldx [%g6 + TI_TASK], %g4
wr %g0, ASI_P, %asi
mov 1, %g1
sllx %g1, THREAD_SHIFT, %g1
sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
add %g6, %g1, %sp
/* Set per-cpu pointer initially to zero, this makes
* the boot-cpu use the in-kernel-image per-cpu areas
* before setup_per_cpu_area() is invoked.
*/
clr %g5
wrpr %g0, 0, %wstate
wrpr %g0, 0x0, %tl
/* Clear the bss */
sethi %hi(__bss_start), %o0
or %o0, %lo(__bss_start), %o0
sethi %hi(_end), %o1
or %o1, %lo(_end), %o1
call __bzero
sub %o1, %o0, %o1
call prom_init
mov %l7, %o0 ! OpenPROM cif handler
/* To create a one-register-window buffer between the kernel's
* initial stack and the last stack frame we use from the firmware,
* do the rest of the boot from a C helper function.
*/
call start_early_boot
nop
/* Not reached... */
.previous
/* This is meant to allow the sharing of this code between
* boot processor invocation (via setup_tba() below) and
* secondary processor startup (via trampoline.S). The
* former does use this code, the latter does not yet due
* to some complexities. That should be fixed up at some
* point.
*
* There used to be enormous complexity wrt. transferring
* over from the firmware's trap table to the Linux kernel's.
* For example, there was a chicken & egg problem wrt. building
* the OBP page tables, yet needing to be on the Linux kernel
* trap table (to translate PAGE_OFFSET addresses) in order to
* do that.
*
* We now handle OBP tlb misses differently, via linear lookups
* into the prom_trans[] array. So that specific problem no
* longer exists. Yet, unfortunately there are still some issues
* preventing trampoline.S from using this code... ho hum.
*/
.globl setup_trap_table
setup_trap_table:
save %sp, -192, %sp
/* Force interrupts to be disabled. */
rdpr %pstate, %l0
andn %l0, PSTATE_IE, %o1
wrpr %o1, 0x0, %pstate
rdpr %pil, %l1
wrpr %g0, PIL_NORMAL_MAX, %pil
/* Make the firmware call to jump over to the Linux trap table. */
sethi %hi(is_sun4v), %o0
lduw [%o0 + %lo(is_sun4v)], %o0
brz,pt %o0, 1f
nop
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
add %g2, TRAP_PER_CPU_FAULT_INFO, %g2
stxa %g2, [%g0] ASI_SCRATCHPAD
/* Compute physical address:
*
* paddr = kern_base + (mmfsa_vaddr - KERNBASE)
*/
sethi %hi(KERNBASE), %g3
sub %g2, %g3, %g2
sethi %hi(kern_base), %g3
ldx [%g3 + %lo(kern_base)], %g3
add %g2, %g3, %o1
sethi %hi(sparc64_ttable_tl0), %o0
set prom_set_trap_table_name, %g2
stx %g2, [%sp + 2047 + 128 + 0x00]
mov 2, %g2
stx %g2, [%sp + 2047 + 128 + 0x08]
mov 0, %g2
stx %g2, [%sp + 2047 + 128 + 0x10]
stx %o0, [%sp + 2047 + 128 + 0x18]
stx %o1, [%sp + 2047 + 128 + 0x20]
sethi %hi(p1275buf), %g2
or %g2, %lo(p1275buf), %g2
ldx [%g2 + 0x08], %o1
call %o1
add %sp, (2047 + 128), %o0
ba,a,pt %xcc, 2f
1: sethi %hi(sparc64_ttable_tl0), %o0
set prom_set_trap_table_name, %g2
stx %g2, [%sp + 2047 + 128 + 0x00]
mov 1, %g2
stx %g2, [%sp + 2047 + 128 + 0x08]
mov 0, %g2
stx %g2, [%sp + 2047 + 128 + 0x10]
stx %o0, [%sp + 2047 + 128 + 0x18]
sethi %hi(p1275buf), %g2
or %g2, %lo(p1275buf), %g2
ldx [%g2 + 0x08], %o1
call %o1
add %sp, (2047 + 128), %o0
/* Start using proper page size encodings in ctx register. */
2: sethi %hi(sparc64_kern_pri_context), %g3
ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1
661: stxa %g2, [%g1] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g1] ASI_MMU
.previous
membar #Sync
BRANCH_IF_SUN4V(o2, 1f)
/* Kill PROM timer */
sethi %hi(0x80000000), %o2
sllx %o2, 32, %o2
wr %o2, 0, %tick_cmpr
BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
ba,a,pt %xcc, 2f
/* Disable STICK_INT interrupts. */
1:
sethi %hi(0x80000000), %o2
sllx %o2, 32, %o2
wr %o2, %asr25
2:
wrpr %g0, %g0, %wstate
call init_irqwork_curcpu
nop
/* Now we can restore interrupt state. */
wrpr %l0, 0, %pstate
wrpr %l1, 0x0, %pil
ret
restore
.globl setup_tba
setup_tba:
save %sp, -192, %sp
/* The boot processor is the only cpu which invokes this
* routine, the other cpus set things up via trampoline.S.
* So save the OBP trap table address here.
*/
rdpr %tba, %g7
sethi %hi(prom_tba), %o1
or %o1, %lo(prom_tba), %o1
stx %g7, [%o1]
call setup_trap_table
nop
ret
restore
sparc64_boot_end:
#include "etrap_64.S"
#include "rtrap_64.S"
#include "winfixup.S"
#include "fpu_traps.S"
#include "ivec.S"
#include "getsetcc.S"
#include "utrap.S"
#include "spiterrs.S"
#include "cherrs.S"
#include "misctrap.S"
#include "syscalls.S"
#include "helpers.S"
#include "sun4v_tlb_miss.S"
#include "sun4v_mcd.S"
#include "sun4v_ivec.S"
#include "ktlb.S"
#include "tsb.S"
/*
* The following skip makes sure the trap table in ttable.S is aligned
* on a 32K boundary as required by the v9 specs for TBA register.
*
* We align to a 32K boundary, then we have the 32K kernel TSB,
* the 64K kernel 4MB TSB, and then the 32K aligned trap table.
*/
1:
.skip 0x4000 + _start - 1b
! 0x0000000000408000
.globl swapper_tsb
swapper_tsb:
.skip (32 * 1024)
.globl swapper_4m_tsb
swapper_4m_tsb:
.skip (64 * 1024)
! 0x0000000000420000
/* Some care needs to be exercised if you try to move the
* location of the trap table relative to other things. For
* one thing there are br* instructions in some of the
* trap table entires which branch back to code in ktlb.S
* Those instructions can only handle a signed 16-bit
* displacement.
*
* There is a binutils bug (bugzilla #4558) which causes
* the relocation overflow checks for such instructions to
* not be done correctly. So bintuils will not notice the
* error and will instead write junk into the relocation and
* you'll have an unbootable kernel.
*/
#include "ttable_64.S"
! 0x0000000000428000
#include "hvcalls.S"
#include "systbls_64.S"
.data
.align 8
.globl prom_tba, tlb_type
prom_tba: .xword 0
tlb_type: .word 0 /* Must NOT end up in BSS */
EXPORT_SYMBOL(tlb_type)
.section ".fixup",#alloc,#execinstr
ENTRY(__retl_efault)
retl
mov -EFAULT, %o0
ENDPROC(__retl_efault)
ENTRY(__retl_o1)
retl
mov %o1, %o0
ENDPROC(__retl_o1)
ENTRY(__retl_o1_asi)
wr %o5, 0x0, %asi
retl
mov %o1, %o0
ENDPROC(__retl_o1_asi)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,235
|
arch/sparc/kernel/etrap_64.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* etrap.S: Preparing for entry into the kernel on Sparc V9.
*
* Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
#include <asm/page.h>
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#define TASK_REGOFF (THREAD_SIZE-TRACEREG_SZ-STACKFRAME_SZ)
#define ETRAP_PSTATE1 (PSTATE_TSO | PSTATE_PRIV)
#define ETRAP_PSTATE2 \
(PSTATE_TSO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE)
/*
* On entry, %g7 is return address - 0x4.
* %g4 and %g5 will be preserved %l4 and %l5 respectively.
*/
.text
.align 64
.globl etrap_syscall, etrap, etrap_irq, etraptl1
etrap: rdpr %pil, %g2
etrap_irq: clr %g3
etrap_syscall: TRAP_LOAD_THREAD_REG(%g6, %g1)
rdpr %tstate, %g1
or %g1, %g3, %g1
sllx %g2, 20, %g3
andcc %g1, TSTATE_PRIV, %g0
or %g1, %g3, %g1
bne,pn %xcc, 1f
sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2
661: wrpr %g0, 7, %cleanwin
.section .fast_win_ctrl_1insn_patch, "ax"
.word 661b
.word 0x85880000 ! allclean
.previous
sethi %hi(TASK_REGOFF), %g2
sethi %hi(TSTATE_PEF), %g3
or %g2, %lo(TASK_REGOFF), %g2
and %g1, %g3, %g3
brnz,pn %g3, 1f
add %g6, %g2, %g2
wr %g0, 0, %fprs
1: rdpr %tpc, %g3
stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
rdpr %tnpc, %g1
stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
rd %y, %g3
stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
rdpr %tt, %g1
st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y]
sethi %hi(PT_REGS_MAGIC), %g3
or %g3, %g1, %g1
st %g1, [%g2 + STACKFRAME_SZ + PT_V9_MAGIC]
rdpr %cansave, %g1
brnz,pt %g1, etrap_save
nop
rdpr %cwp, %g1
add %g1, 2, %g1
wrpr %g1, %cwp
be,pt %xcc, etrap_user_spill
mov ASI_AIUP, %g3
rdpr %otherwin, %g3
brz %g3, etrap_kernel_spill
mov ASI_AIUS, %g3
etrap_user_spill:
wr %g3, 0x0, %asi
ldx [%g6 + TI_FLAGS], %g3
and %g3, _TIF_32BIT, %g3
brnz,pt %g3, etrap_user_spill_32bit
nop
ba,a,pt %xcc, etrap_user_spill_64bit
etrap_save: save %g2, -STACK_BIAS, %sp
mov %g6, %l6
bne,pn %xcc, 3f
mov PRIMARY_CONTEXT, %l4
661: rdpr %canrestore, %g3
.section .fast_win_ctrl_1insn_patch, "ax"
.word 661b
nop
.previous
rdpr %wstate, %g2
661: wrpr %g0, 0, %canrestore
.section .fast_win_ctrl_1insn_patch, "ax"
.word 661b
nop
.previous
sll %g2, 3, %g2
/* Set TI_SYS_FPDEPTH to 1 and clear TI_SYS_NOERROR. */
mov 1, %l5
sth %l5, [%l6 + TI_SYS_NOERROR]
661: wrpr %g3, 0, %otherwin
.section .fast_win_ctrl_1insn_patch, "ax"
.word 661b
.word 0x87880000 ! otherw
.previous
wrpr %g2, 0, %wstate
sethi %hi(sparc64_kern_pri_context), %g2
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
661: stxa %g3, [%l4] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g3, [%l4] ASI_MMU
.previous
sethi %hi(KERNBASE), %l4
flush %l4
mov ASI_AIUS, %l7
2: mov %g4, %l4
mov %g5, %l5
add %g7, 4, %l2
/* Go to trap time globals so we can save them. */
661: wrpr %g0, ETRAP_PSTATE1, %pstate
.section .sun4v_1insn_patch, "ax"
.word 661b
SET_GL(0)
.previous
stx %g1, [%sp + PTREGS_OFF + PT_V9_G1]
stx %g2, [%sp + PTREGS_OFF + PT_V9_G2]
sllx %l7, 24, %l7
stx %g3, [%sp + PTREGS_OFF + PT_V9_G3]
rdpr %cwp, %l0
stx %g4, [%sp + PTREGS_OFF + PT_V9_G4]
stx %g5, [%sp + PTREGS_OFF + PT_V9_G5]
stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
or %l7, %l0, %l7
661: sethi %hi(TSTATE_TSO | TSTATE_PEF), %l0
/* If userspace is using ADI, it could potentially pass
* a pointer with version tag embedded in it. To maintain
* the ADI security, we must enable PSTATE.mcde. Userspace
* would have already set TTE.mcd in an earlier call to
* kernel and set the version tag for the address being
* dereferenced. Setting PSTATE.mcde would ensure any
* access to userspace data through a system call honors
* ADI and does not allow a rogue app to bypass ADI by
* using system calls. Setting PSTATE.mcde only affects
* accesses to virtual addresses that have TTE.mcd set.
* Set PMCDPER to ensure any exceptions caused by ADI
* version tag mismatch are exposed before system call
* returns to userspace. Setting PMCDPER affects only
* writes to virtual addresses that have TTE.mcd set and
* have a version tag set as well.
*/
.section .sun_m7_1insn_patch, "ax"
.word 661b
sethi %hi(TSTATE_TSO | TSTATE_PEF | TSTATE_MCDE), %l0
.previous
661: nop
.section .sun_m7_1insn_patch, "ax"
.word 661b
.word 0xaf902001 /* wrpr %g0, 1, %pmcdper */
.previous
or %l7, %l0, %l7
wrpr %l2, %tnpc
wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate
stx %i0, [%sp + PTREGS_OFF + PT_V9_I0]
stx %i1, [%sp + PTREGS_OFF + PT_V9_I1]
stx %i2, [%sp + PTREGS_OFF + PT_V9_I2]
stx %i3, [%sp + PTREGS_OFF + PT_V9_I3]
stx %i4, [%sp + PTREGS_OFF + PT_V9_I4]
stx %i5, [%sp + PTREGS_OFF + PT_V9_I5]
stx %i6, [%sp + PTREGS_OFF + PT_V9_I6]
mov %l6, %g6
stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1)
ldx [%g6 + TI_TASK], %g4
done
3: mov ASI_P, %l7
ldub [%l6 + TI_FPDEPTH], %l5
add %l6, TI_FPSAVED + 1, %l4
srl %l5, 1, %l3
add %l5, 2, %l5
/* Set TI_SYS_FPDEPTH to %l5 and clear TI_SYS_NOERROR. */
sth %l5, [%l6 + TI_SYS_NOERROR]
ba,pt %xcc, 2b
stb %g0, [%l4 + %l3]
nop
etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
* We place this right after pt_regs on the trap stack.
* The layout is:
* 0x00 TL1's TSTATE
* 0x08 TL1's TPC
* 0x10 TL1's TNPC
* 0x18 TL1's TT
* ...
* 0x58 TL4's TT
* 0x60 TL
*/
TRAP_LOAD_THREAD_REG(%g6, %g1)
sub %sp, ((4 * 8) * 4) + 8, %g2
rdpr %tl, %g1
wrpr %g0, 1, %tl
rdpr %tstate, %g3
stx %g3, [%g2 + STACK_BIAS + 0x00]
rdpr %tpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x08]
rdpr %tnpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x10]
rdpr %tt, %g3
stx %g3, [%g2 + STACK_BIAS + 0x18]
wrpr %g0, 2, %tl
rdpr %tstate, %g3
stx %g3, [%g2 + STACK_BIAS + 0x20]
rdpr %tpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x28]
rdpr %tnpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x30]
rdpr %tt, %g3
stx %g3, [%g2 + STACK_BIAS + 0x38]
sethi %hi(is_sun4v), %g3
lduw [%g3 + %lo(is_sun4v)], %g3
brnz,pn %g3, finish_tl1_capture
nop
wrpr %g0, 3, %tl
rdpr %tstate, %g3
stx %g3, [%g2 + STACK_BIAS + 0x40]
rdpr %tpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x48]
rdpr %tnpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x50]
rdpr %tt, %g3
stx %g3, [%g2 + STACK_BIAS + 0x58]
wrpr %g0, 4, %tl
rdpr %tstate, %g3
stx %g3, [%g2 + STACK_BIAS + 0x60]
rdpr %tpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x68]
rdpr %tnpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x70]
rdpr %tt, %g3
stx %g3, [%g2 + STACK_BIAS + 0x78]
stx %g1, [%g2 + STACK_BIAS + 0x80]
finish_tl1_capture:
wrpr %g0, 1, %tl
661: nop
.section .sun4v_1insn_patch, "ax"
.word 661b
SET_GL(1)
.previous
rdpr %tstate, %g1
sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2
ba,pt %xcc, 1b
andcc %g1, TSTATE_PRIV, %g0
#undef TASK_REGOFF
#undef ETRAP_PSTATE1
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,080
|
arch/sparc/kernel/misctrap.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_KGDB
.globl arch_kgdb_breakpoint
.type arch_kgdb_breakpoint,#function
arch_kgdb_breakpoint:
ta 0x72
retl
nop
.size arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
#endif
.type __do_privact,#function
__do_privact:
mov TLB_SFSR, %g3
stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
call do_privact
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
.size __do_privact,.-__do_privact
.type do_mna,#function
do_mna:
rdpr %tl, %g3
cmp %g3, 1
/* Setup %g4/%g5 now as they are used in the
* winfixup code.
*/
mov TLB_SFSR, %g3
mov DMMU_SFAR, %g4
ldxa [%g4] ASI_DMMU, %g4
ldxa [%g3] ASI_DMMU, %g5
stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
membar #Sync
bgu,pn %icc, winfix_mna
rdpr %tpc, %g3
1: sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call mem_address_unaligned
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
.size do_mna,.-do_mna
.type do_lddfmna,#function
do_lddfmna:
sethi %hi(109f), %g7
mov TLB_SFSR, %g4
ldxa [%g4] ASI_DMMU, %g5
stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
membar #Sync
mov DMMU_SFAR, %g4
ldxa [%g4] ASI_DMMU, %g4
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call handle_lddfmna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
.size do_lddfmna,.-do_lddfmna
.type do_stdfmna,#function
do_stdfmna:
sethi %hi(109f), %g7
mov TLB_SFSR, %g4
ldxa [%g4] ASI_DMMU, %g5
stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
membar #Sync
mov DMMU_SFAR, %g4
ldxa [%g4] ASI_DMMU, %g4
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call handle_stdfmna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
nop
.size do_stdfmna,.-do_stdfmna
.type breakpoint_trap,#function
breakpoint_trap:
call sparc_breakpoint
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
.size breakpoint_trap,.-breakpoint_trap
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,772
|
arch/sparc/kernel/syscalls.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* SunOS's execv() call only specifies the argv argument, the
* environment settings are the same as the calling processes.
*/
sys64_execve:
set sys_execve, %g1
jmpl %g1, %g0
flushw
sys64_execveat:
set sys_execveat, %g1
jmpl %g1, %g0
flushw
#ifdef CONFIG_COMPAT
sunos_execv:
mov %g0, %o2
sys32_execve:
set compat_sys_execve, %g1
jmpl %g1, %g0
flushw
sys32_execveat:
set compat_sys_execveat, %g1
jmpl %g1, %g0
flushw
#endif
.align 32
#ifdef CONFIG_COMPAT
sys32_sigstack:
ba,pt %xcc, do_sys32_sigstack
mov %i6, %o2
#endif
.align 32
#ifdef CONFIG_COMPAT
sys32_sigreturn:
add %sp, PTREGS_OFF, %o0
call do_sigreturn32
add %o7, 1f-.-4, %o7
nop
#endif
sys_rt_sigreturn:
add %sp, PTREGS_OFF, %o0
call do_rt_sigreturn
add %o7, 1f-.-4, %o7
nop
#ifdef CONFIG_COMPAT
sys32_rt_sigreturn:
add %sp, PTREGS_OFF, %o0
call do_rt_sigreturn32
add %o7, 1f-.-4, %o7
nop
#endif
.align 32
1: ldx [%g6 + TI_FLAGS], %l5
andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
be,pt %icc, rtrap
nop
call syscall_trace_leave
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
/* This is how fork() was meant to be done, 8 instruction entry.
*
* I questioned the following code briefly, let me clear things
* up so you must not reason on it like I did.
*
* Know the fork_kpsr etc. we use in the sparc32 port? We don't
* need it here because the only piece of window state we copy to
* the child is the CWP register. Even if the parent sleeps,
* we are safe because we stuck it into pt_regs of the parent
* so it will not change.
*
* XXX This raises the question, whether we can do the same on
* XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim. The
* XXX answer is yes. We stick fork_kpsr in UREG_G0 and
* XXX fork_kwim in UREG_G1 (global registers are considered
* XXX volatile across a system call in the sparc ABI I think
* XXX if it isn't we can use regs->y instead, anyone who depends
* XXX upon the Y register being preserved across a fork deserves
* XXX to lose).
*
* In fact we should take advantage of that fact for other things
* during system calls...
*/
.align 32
sys_vfork: /* Under Linux, vfork and fork are just special cases of clone. */
sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
ba,pt %xcc, sys_clone
sys_fork:
clr %o1
mov SIGCHLD, %o0
sys_clone:
flushw
movrz %o1, %fp, %o1
mov 0, %o3
ba,pt %xcc, sparc_do_fork
add %sp, PTREGS_OFF, %o2
.globl ret_from_fork
ret_from_fork:
/* Clear current_thread_info()->new_child. */
stb %g0, [%g6 + TI_NEW_CHILD]
call schedule_tail
mov %g7, %o0
ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0
brnz,pt %o0, ret_sys_call
ldx [%g6 + TI_FLAGS], %l0
ldx [%sp + PTREGS_OFF + PT_V9_G1], %l1
call %l1
ldx [%sp + PTREGS_OFF + PT_V9_G2], %o0
ba,pt %xcc, ret_sys_call
mov 0, %o0
.globl sparc_exit_group
.type sparc_exit_group,#function
sparc_exit_group:
sethi %hi(sys_exit_group), %g7
ba,pt %xcc, 1f
or %g7, %lo(sys_exit_group), %g7
.size sparc_exit_group,.-sparc_exit_group
.globl sparc_exit
.type sparc_exit,#function
sparc_exit:
sethi %hi(sys_exit), %g7
or %g7, %lo(sys_exit), %g7
1: rdpr %pstate, %g2
wrpr %g2, PSTATE_IE, %pstate
rdpr %otherwin, %g1
rdpr %cansave, %g3
add %g3, %g1, %g3
wrpr %g3, 0x0, %cansave
wrpr %g0, 0x0, %otherwin
wrpr %g2, 0x0, %pstate
jmpl %g7, %g0
stb %g0, [%g6 + TI_WSAVED]
.size sparc_exit,.-sparc_exit
linux_sparc_ni_syscall:
sethi %hi(sys_ni_syscall), %l7
ba,pt %xcc, 4f
or %l7, %lo(sys_ni_syscall), %l7
linux_syscall_trace32:
call syscall_trace_enter
add %sp, PTREGS_OFF, %o0
brnz,pn %o0, 3f
mov -ENOSYS, %o0
/* Syscall tracing can modify the registers. */
ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
sethi %hi(sys_call_table32), %l7
ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
or %l7, %lo(sys_call_table32), %l7
ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
cmp %g1, NR_syscalls
bgeu,pn %xcc, 3f
mov -ENOSYS, %o0
sll %g1, 2, %l4
srl %i0, 0, %o0
lduw [%l7 + %l4], %l7
srl %i4, 0, %o4
srl %i1, 0, %o1
srl %i2, 0, %o2
ba,pt %xcc, 5f
srl %i3, 0, %o3
linux_syscall_trace:
call syscall_trace_enter
add %sp, PTREGS_OFF, %o0
brnz,pn %o0, 3f
mov -ENOSYS, %o0
/* Syscall tracing can modify the registers. */
ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
sethi %hi(sys_call_table64), %l7
ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
or %l7, %lo(sys_call_table64), %l7
ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
cmp %g1, NR_syscalls
bgeu,pn %xcc, 3f
mov -ENOSYS, %o0
sll %g1, 2, %l4
mov %i0, %o0
lduw [%l7 + %l4], %l7
mov %i1, %o1
mov %i2, %o2
mov %i3, %o3
b,pt %xcc, 2f
mov %i4, %o4
/* Linux 32-bit system calls enter here... */
.align 32
.globl linux_sparc_syscall32
linux_sparc_syscall32:
/* Direct access to user regs, much faster. */
cmp %g1, NR_syscalls ! IEU1 Group
bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
srl %i0, 0, %o0 ! IEU0
sll %g1, 2, %l4 ! IEU0 Group
srl %i4, 0, %o4 ! IEU1
lduw [%l7 + %l4], %l7 ! Load
srl %i1, 0, %o1 ! IEU0 Group
ldx [%g6 + TI_FLAGS], %l0 ! Load
srl %i3, 0, %o3 ! IEU0
srl %i2, 0, %o2 ! IEU0 Group
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
bne,pn %icc, linux_syscall_trace32 ! CTI
mov %i0, %l5 ! IEU1
5: call %l7 ! CTI Group brk forced
srl %i5, 0, %o5 ! IEU1
ba,pt %xcc, 3f
sra %o0, 0, %o0
/* Linux native system calls enter here... */
.align 32
.globl linux_sparc_syscall
linux_sparc_syscall:
/* Direct access to user regs, much faster. */
cmp %g1, NR_syscalls ! IEU1 Group
bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
mov %i0, %o0 ! IEU0
sll %g1, 2, %l4 ! IEU0 Group
mov %i1, %o1 ! IEU1
lduw [%l7 + %l4], %l7 ! Load
4: mov %i2, %o2 ! IEU0 Group
ldx [%g6 + TI_FLAGS], %l0 ! Load
mov %i3, %o3 ! IEU1
mov %i4, %o4 ! IEU0 Group
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
bne,pn %icc, linux_syscall_trace ! CTI Group
mov %i0, %l5 ! IEU0
2: call %l7 ! CTI Group brk forced
mov %i5, %o5 ! IEU0
nop
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ret_sys_call:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
sllx %g2, 32, %g2
cmp %o0, -ERESTART_RESTARTBLOCK
bgeu,pn %xcc, 1f
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
2:
/* System call success, clear Carry condition code. */
andn %g3, %g2, %g3
3:
stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
bne,pn %icc, linux_syscall_trace2
add %l1, 0x4, %l2 ! npc = npc+4
stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
ba,pt %xcc, rtrap
stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1:
/* Check if force_successful_syscall_return()
* was invoked.
*/
ldub [%g6 + TI_SYS_NOERROR], %l2
brnz,pn %l2, 2b
ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
/* System call failure, set Carry condition code.
* Also, get abs(errno) to return to the process.
*/
sub %g0, %o0, %o0
stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ba,pt %xcc, 3b
or %g3, %g2, %g3
linux_syscall_trace2:
call syscall_trace_leave
add %sp, PTREGS_OFF, %o0
stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
ba,pt %xcc, rtrap
stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
|
AirFortressIlikara/LS2K0300-linux-4.19
| 39,199
|
arch/sparc/crypto/aes_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/visasm.h>
#include "opcodes.h"
#define ENCRYPT_TWO_ROUNDS(KEY_BASE, I0, I1, T0, T1) \
AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \
AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \
AES_EROUND01(KEY_BASE + 4, T0, T1, I0) \
AES_EROUND23(KEY_BASE + 6, T0, T1, I1)
#define ENCRYPT_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \
AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \
AES_EROUND01(KEY_BASE + 0, I2, I3, T2) \
AES_EROUND23(KEY_BASE + 2, I2, I3, T3) \
AES_EROUND01(KEY_BASE + 4, T0, T1, I0) \
AES_EROUND23(KEY_BASE + 6, T0, T1, I1) \
AES_EROUND01(KEY_BASE + 4, T2, T3, I2) \
AES_EROUND23(KEY_BASE + 6, T2, T3, I3)
#define ENCRYPT_TWO_ROUNDS_LAST(KEY_BASE, I0, I1, T0, T1) \
AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \
AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \
AES_EROUND01_L(KEY_BASE + 4, T0, T1, I0) \
AES_EROUND23_L(KEY_BASE + 6, T0, T1, I1)
#define ENCRYPT_TWO_ROUNDS_LAST_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \
AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \
AES_EROUND01(KEY_BASE + 0, I2, I3, T2) \
AES_EROUND23(KEY_BASE + 2, I2, I3, T3) \
AES_EROUND01_L(KEY_BASE + 4, T0, T1, I0) \
AES_EROUND23_L(KEY_BASE + 6, T0, T1, I1) \
AES_EROUND01_L(KEY_BASE + 4, T2, T3, I2) \
AES_EROUND23_L(KEY_BASE + 6, T2, T3, I3)
/* 10 rounds */
#define ENCRYPT_128(KEY_BASE, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 0, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 8, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 16, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 24, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS_LAST(KEY_BASE + 32, I0, I1, T0, T1)
#define ENCRYPT_128_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 0, I0, I1, I2, I3, T0, T1, T2, T3) \
ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 8, I0, I1, I2, I3, T0, T1, T2, T3) \
ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 16, I0, I1, I2, I3, T0, T1, T2, T3) \
ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 24, I0, I1, I2, I3, T0, T1, T2, T3) \
ENCRYPT_TWO_ROUNDS_LAST_2(KEY_BASE + 32, I0, I1, I2, I3, T0, T1, T2, T3)
/* 12 rounds */
#define ENCRYPT_192(KEY_BASE, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 0, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 8, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 16, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 24, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 32, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS_LAST(KEY_BASE + 40, I0, I1, T0, T1)
#define ENCRYPT_192_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 0, I0, I1, I2, I3, T0, T1, T2, T3) \
ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 8, I0, I1, I2, I3, T0, T1, T2, T3) \
ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 16, I0, I1, I2, I3, T0, T1, T2, T3) \
ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 24, I0, I1, I2, I3, T0, T1, T2, T3) \
ENCRYPT_TWO_ROUNDS_2(KEY_BASE + 32, I0, I1, I2, I3, T0, T1, T2, T3) \
ENCRYPT_TWO_ROUNDS_LAST_2(KEY_BASE + 40, I0, I1, I2, I3, T0, T1, T2, T3)
/* 14 rounds */
#define ENCRYPT_256(KEY_BASE, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 0, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 8, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 16, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 24, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 32, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS(KEY_BASE + 40, I0, I1, T0, T1) \
ENCRYPT_TWO_ROUNDS_LAST(KEY_BASE + 48, I0, I1, T0, T1)
#define ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, TMP_BASE) \
ENCRYPT_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, \
TMP_BASE + 0, TMP_BASE + 2, TMP_BASE + 4, TMP_BASE + 6)
#define ENCRYPT_256_2(KEY_BASE, I0, I1, I2, I3) \
ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE + 0, I0, I1, I2, I3, KEY_BASE + 48) \
ldd [%o0 + 0xd0], %f56; \
ldd [%o0 + 0xd8], %f58; \
ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE + 8, I0, I1, I2, I3, KEY_BASE + 0) \
ldd [%o0 + 0xe0], %f60; \
ldd [%o0 + 0xe8], %f62; \
ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE + 16, I0, I1, I2, I3, KEY_BASE + 0) \
ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE + 24, I0, I1, I2, I3, KEY_BASE + 0) \
ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE + 32, I0, I1, I2, I3, KEY_BASE + 0) \
ENCRYPT_256_TWO_ROUNDS_2(KEY_BASE + 40, I0, I1, I2, I3, KEY_BASE + 0) \
AES_EROUND01(KEY_BASE + 48, I0, I1, KEY_BASE + 0) \
AES_EROUND23(KEY_BASE + 50, I0, I1, KEY_BASE + 2) \
AES_EROUND01(KEY_BASE + 48, I2, I3, KEY_BASE + 4) \
AES_EROUND23(KEY_BASE + 50, I2, I3, KEY_BASE + 6) \
AES_EROUND01_L(KEY_BASE + 52, KEY_BASE + 0, KEY_BASE + 2, I0) \
AES_EROUND23_L(KEY_BASE + 54, KEY_BASE + 0, KEY_BASE + 2, I1) \
ldd [%o0 + 0x10], %f8; \
ldd [%o0 + 0x18], %f10; \
AES_EROUND01_L(KEY_BASE + 52, KEY_BASE + 4, KEY_BASE + 6, I2) \
AES_EROUND23_L(KEY_BASE + 54, KEY_BASE + 4, KEY_BASE + 6, I3) \
ldd [%o0 + 0x20], %f12; \
ldd [%o0 + 0x28], %f14;
#define DECRYPT_TWO_ROUNDS(KEY_BASE, I0, I1, T0, T1) \
AES_DROUND23(KEY_BASE + 0, I0, I1, T1) \
AES_DROUND01(KEY_BASE + 2, I0, I1, T0) \
AES_DROUND23(KEY_BASE + 4, T0, T1, I1) \
AES_DROUND01(KEY_BASE + 6, T0, T1, I0)
#define DECRYPT_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
AES_DROUND23(KEY_BASE + 0, I0, I1, T1) \
AES_DROUND01(KEY_BASE + 2, I0, I1, T0) \
AES_DROUND23(KEY_BASE + 0, I2, I3, T3) \
AES_DROUND01(KEY_BASE + 2, I2, I3, T2) \
AES_DROUND23(KEY_BASE + 4, T0, T1, I1) \
AES_DROUND01(KEY_BASE + 6, T0, T1, I0) \
AES_DROUND23(KEY_BASE + 4, T2, T3, I3) \
AES_DROUND01(KEY_BASE + 6, T2, T3, I2)
#define DECRYPT_TWO_ROUNDS_LAST(KEY_BASE, I0, I1, T0, T1) \
AES_DROUND23(KEY_BASE + 0, I0, I1, T1) \
AES_DROUND01(KEY_BASE + 2, I0, I1, T0) \
AES_DROUND23_L(KEY_BASE + 4, T0, T1, I1) \
AES_DROUND01_L(KEY_BASE + 6, T0, T1, I0)
#define DECRYPT_TWO_ROUNDS_LAST_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
AES_DROUND23(KEY_BASE + 0, I0, I1, T1) \
AES_DROUND01(KEY_BASE + 2, I0, I1, T0) \
AES_DROUND23(KEY_BASE + 0, I2, I3, T3) \
AES_DROUND01(KEY_BASE + 2, I2, I3, T2) \
AES_DROUND23_L(KEY_BASE + 4, T0, T1, I1) \
AES_DROUND01_L(KEY_BASE + 6, T0, T1, I0) \
AES_DROUND23_L(KEY_BASE + 4, T2, T3, I3) \
AES_DROUND01_L(KEY_BASE + 6, T2, T3, I2)
/* 10 rounds */
#define DECRYPT_128(KEY_BASE, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 0, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 8, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 16, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 24, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS_LAST(KEY_BASE + 32, I0, I1, T0, T1)
#define DECRYPT_128_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
DECRYPT_TWO_ROUNDS_2(KEY_BASE + 0, I0, I1, I2, I3, T0, T1, T2, T3) \
DECRYPT_TWO_ROUNDS_2(KEY_BASE + 8, I0, I1, I2, I3, T0, T1, T2, T3) \
DECRYPT_TWO_ROUNDS_2(KEY_BASE + 16, I0, I1, I2, I3, T0, T1, T2, T3) \
DECRYPT_TWO_ROUNDS_2(KEY_BASE + 24, I0, I1, I2, I3, T0, T1, T2, T3) \
DECRYPT_TWO_ROUNDS_LAST_2(KEY_BASE + 32, I0, I1, I2, I3, T0, T1, T2, T3)
/* 12 rounds */
#define DECRYPT_192(KEY_BASE, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 0, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 8, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 16, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 24, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 32, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS_LAST(KEY_BASE + 40, I0, I1, T0, T1)
#define DECRYPT_192_2(KEY_BASE, I0, I1, I2, I3, T0, T1, T2, T3) \
DECRYPT_TWO_ROUNDS_2(KEY_BASE + 0, I0, I1, I2, I3, T0, T1, T2, T3) \
DECRYPT_TWO_ROUNDS_2(KEY_BASE + 8, I0, I1, I2, I3, T0, T1, T2, T3) \
DECRYPT_TWO_ROUNDS_2(KEY_BASE + 16, I0, I1, I2, I3, T0, T1, T2, T3) \
DECRYPT_TWO_ROUNDS_2(KEY_BASE + 24, I0, I1, I2, I3, T0, T1, T2, T3) \
DECRYPT_TWO_ROUNDS_2(KEY_BASE + 32, I0, I1, I2, I3, T0, T1, T2, T3) \
DECRYPT_TWO_ROUNDS_LAST_2(KEY_BASE + 40, I0, I1, I2, I3, T0, T1, T2, T3)
/* 14 rounds */
#define DECRYPT_256(KEY_BASE, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 0, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 8, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 16, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 24, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 32, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS(KEY_BASE + 40, I0, I1, T0, T1) \
DECRYPT_TWO_ROUNDS_LAST(KEY_BASE + 48, I0, I1, T0, T1)
#define DECRYPT_256_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, TMP_BASE) \
DECRYPT_TWO_ROUNDS_2(KEY_BASE, I0, I1, I2, I3, \
TMP_BASE + 0, TMP_BASE + 2, TMP_BASE + 4, TMP_BASE + 6)
#define DECRYPT_256_2(KEY_BASE, I0, I1, I2, I3) \
DECRYPT_256_TWO_ROUNDS_2(KEY_BASE + 0, I0, I1, I2, I3, KEY_BASE + 48) \
ldd [%o0 + 0x18], %f56; \
ldd [%o0 + 0x10], %f58; \
DECRYPT_256_TWO_ROUNDS_2(KEY_BASE + 8, I0, I1, I2, I3, KEY_BASE + 0) \
ldd [%o0 + 0x08], %f60; \
ldd [%o0 + 0x00], %f62; \
DECRYPT_256_TWO_ROUNDS_2(KEY_BASE + 16, I0, I1, I2, I3, KEY_BASE + 0) \
DECRYPT_256_TWO_ROUNDS_2(KEY_BASE + 24, I0, I1, I2, I3, KEY_BASE + 0) \
DECRYPT_256_TWO_ROUNDS_2(KEY_BASE + 32, I0, I1, I2, I3, KEY_BASE + 0) \
DECRYPT_256_TWO_ROUNDS_2(KEY_BASE + 40, I0, I1, I2, I3, KEY_BASE + 0) \
AES_DROUND23(KEY_BASE + 48, I0, I1, KEY_BASE + 2) \
AES_DROUND01(KEY_BASE + 50, I0, I1, KEY_BASE + 0) \
AES_DROUND23(KEY_BASE + 48, I2, I3, KEY_BASE + 6) \
AES_DROUND01(KEY_BASE + 50, I2, I3, KEY_BASE + 4) \
AES_DROUND23_L(KEY_BASE + 52, KEY_BASE + 0, KEY_BASE + 2, I1) \
AES_DROUND01_L(KEY_BASE + 54, KEY_BASE + 0, KEY_BASE + 2, I0) \
ldd [%o0 + 0xd8], %f8; \
ldd [%o0 + 0xd0], %f10; \
AES_DROUND23_L(KEY_BASE + 52, KEY_BASE + 4, KEY_BASE + 6, I3) \
AES_DROUND01_L(KEY_BASE + 54, KEY_BASE + 4, KEY_BASE + 6, I2) \
ldd [%o0 + 0xc8], %f12; \
ldd [%o0 + 0xc0], %f14;
.align 32
ENTRY(aes_sparc64_key_expand)
/* %o0=input_key, %o1=output_key, %o2=key_len */
VISEntry
ld [%o0 + 0x00], %f0
ld [%o0 + 0x04], %f1
ld [%o0 + 0x08], %f2
ld [%o0 + 0x0c], %f3
std %f0, [%o1 + 0x00]
std %f2, [%o1 + 0x08]
add %o1, 0x10, %o1
cmp %o2, 24
bl 2f
nop
be 1f
nop
/* 256-bit key expansion */
ld [%o0 + 0x10], %f4
ld [%o0 + 0x14], %f5
ld [%o0 + 0x18], %f6
ld [%o0 + 0x1c], %f7
std %f4, [%o1 + 0x00]
std %f6, [%o1 + 0x08]
add %o1, 0x10, %o1
AES_KEXPAND1(0, 6, 0x0, 8)
AES_KEXPAND2(2, 8, 10)
AES_KEXPAND0(4, 10, 12)
AES_KEXPAND2(6, 12, 14)
AES_KEXPAND1(8, 14, 0x1, 16)
AES_KEXPAND2(10, 16, 18)
AES_KEXPAND0(12, 18, 20)
AES_KEXPAND2(14, 20, 22)
AES_KEXPAND1(16, 22, 0x2, 24)
AES_KEXPAND2(18, 24, 26)
AES_KEXPAND0(20, 26, 28)
AES_KEXPAND2(22, 28, 30)
AES_KEXPAND1(24, 30, 0x3, 32)
AES_KEXPAND2(26, 32, 34)
AES_KEXPAND0(28, 34, 36)
AES_KEXPAND2(30, 36, 38)
AES_KEXPAND1(32, 38, 0x4, 40)
AES_KEXPAND2(34, 40, 42)
AES_KEXPAND0(36, 42, 44)
AES_KEXPAND2(38, 44, 46)
AES_KEXPAND1(40, 46, 0x5, 48)
AES_KEXPAND2(42, 48, 50)
AES_KEXPAND0(44, 50, 52)
AES_KEXPAND2(46, 52, 54)
AES_KEXPAND1(48, 54, 0x6, 56)
AES_KEXPAND2(50, 56, 58)
std %f8, [%o1 + 0x00]
std %f10, [%o1 + 0x08]
std %f12, [%o1 + 0x10]
std %f14, [%o1 + 0x18]
std %f16, [%o1 + 0x20]
std %f18, [%o1 + 0x28]
std %f20, [%o1 + 0x30]
std %f22, [%o1 + 0x38]
std %f24, [%o1 + 0x40]
std %f26, [%o1 + 0x48]
std %f28, [%o1 + 0x50]
std %f30, [%o1 + 0x58]
std %f32, [%o1 + 0x60]
std %f34, [%o1 + 0x68]
std %f36, [%o1 + 0x70]
std %f38, [%o1 + 0x78]
std %f40, [%o1 + 0x80]
std %f42, [%o1 + 0x88]
std %f44, [%o1 + 0x90]
std %f46, [%o1 + 0x98]
std %f48, [%o1 + 0xa0]
std %f50, [%o1 + 0xa8]
std %f52, [%o1 + 0xb0]
std %f54, [%o1 + 0xb8]
std %f56, [%o1 + 0xc0]
ba,pt %xcc, 80f
std %f58, [%o1 + 0xc8]
1:
/* 192-bit key expansion */
ld [%o0 + 0x10], %f4
ld [%o0 + 0x14], %f5
std %f4, [%o1 + 0x00]
add %o1, 0x08, %o1
AES_KEXPAND1(0, 4, 0x0, 6)
AES_KEXPAND2(2, 6, 8)
AES_KEXPAND2(4, 8, 10)
AES_KEXPAND1(6, 10, 0x1, 12)
AES_KEXPAND2(8, 12, 14)
AES_KEXPAND2(10, 14, 16)
AES_KEXPAND1(12, 16, 0x2, 18)
AES_KEXPAND2(14, 18, 20)
AES_KEXPAND2(16, 20, 22)
AES_KEXPAND1(18, 22, 0x3, 24)
AES_KEXPAND2(20, 24, 26)
AES_KEXPAND2(22, 26, 28)
AES_KEXPAND1(24, 28, 0x4, 30)
AES_KEXPAND2(26, 30, 32)
AES_KEXPAND2(28, 32, 34)
AES_KEXPAND1(30, 34, 0x5, 36)
AES_KEXPAND2(32, 36, 38)
AES_KEXPAND2(34, 38, 40)
AES_KEXPAND1(36, 40, 0x6, 42)
AES_KEXPAND2(38, 42, 44)
AES_KEXPAND2(40, 44, 46)
AES_KEXPAND1(42, 46, 0x7, 48)
AES_KEXPAND2(44, 48, 50)
std %f6, [%o1 + 0x00]
std %f8, [%o1 + 0x08]
std %f10, [%o1 + 0x10]
std %f12, [%o1 + 0x18]
std %f14, [%o1 + 0x20]
std %f16, [%o1 + 0x28]
std %f18, [%o1 + 0x30]
std %f20, [%o1 + 0x38]
std %f22, [%o1 + 0x40]
std %f24, [%o1 + 0x48]
std %f26, [%o1 + 0x50]
std %f28, [%o1 + 0x58]
std %f30, [%o1 + 0x60]
std %f32, [%o1 + 0x68]
std %f34, [%o1 + 0x70]
std %f36, [%o1 + 0x78]
std %f38, [%o1 + 0x80]
std %f40, [%o1 + 0x88]
std %f42, [%o1 + 0x90]
std %f44, [%o1 + 0x98]
std %f46, [%o1 + 0xa0]
std %f48, [%o1 + 0xa8]
ba,pt %xcc, 80f
std %f50, [%o1 + 0xb0]
2:
/* 128-bit key expansion */
AES_KEXPAND1(0, 2, 0x0, 4)
AES_KEXPAND2(2, 4, 6)
AES_KEXPAND1(4, 6, 0x1, 8)
AES_KEXPAND2(6, 8, 10)
AES_KEXPAND1(8, 10, 0x2, 12)
AES_KEXPAND2(10, 12, 14)
AES_KEXPAND1(12, 14, 0x3, 16)
AES_KEXPAND2(14, 16, 18)
AES_KEXPAND1(16, 18, 0x4, 20)
AES_KEXPAND2(18, 20, 22)
AES_KEXPAND1(20, 22, 0x5, 24)
AES_KEXPAND2(22, 24, 26)
AES_KEXPAND1(24, 26, 0x6, 28)
AES_KEXPAND2(26, 28, 30)
AES_KEXPAND1(28, 30, 0x7, 32)
AES_KEXPAND2(30, 32, 34)
AES_KEXPAND1(32, 34, 0x8, 36)
AES_KEXPAND2(34, 36, 38)
AES_KEXPAND1(36, 38, 0x9, 40)
AES_KEXPAND2(38, 40, 42)
std %f4, [%o1 + 0x00]
std %f6, [%o1 + 0x08]
std %f8, [%o1 + 0x10]
std %f10, [%o1 + 0x18]
std %f12, [%o1 + 0x20]
std %f14, [%o1 + 0x28]
std %f16, [%o1 + 0x30]
std %f18, [%o1 + 0x38]
std %f20, [%o1 + 0x40]
std %f22, [%o1 + 0x48]
std %f24, [%o1 + 0x50]
std %f26, [%o1 + 0x58]
std %f28, [%o1 + 0x60]
std %f30, [%o1 + 0x68]
std %f32, [%o1 + 0x70]
std %f34, [%o1 + 0x78]
std %f36, [%o1 + 0x80]
std %f38, [%o1 + 0x88]
std %f40, [%o1 + 0x90]
std %f42, [%o1 + 0x98]
80:
retl
VISExit
ENDPROC(aes_sparc64_key_expand)
.align 32
ENTRY(aes_sparc64_encrypt_128)
/* %o0=key, %o1=input, %o2=output */
VISEntry
ld [%o1 + 0x00], %f4
ld [%o1 + 0x04], %f5
ld [%o1 + 0x08], %f6
ld [%o1 + 0x0c], %f7
ldd [%o0 + 0x00], %f8
ldd [%o0 + 0x08], %f10
ldd [%o0 + 0x10], %f12
ldd [%o0 + 0x18], %f14
ldd [%o0 + 0x20], %f16
ldd [%o0 + 0x28], %f18
ldd [%o0 + 0x30], %f20
ldd [%o0 + 0x38], %f22
ldd [%o0 + 0x40], %f24
ldd [%o0 + 0x48], %f26
ldd [%o0 + 0x50], %f28
ldd [%o0 + 0x58], %f30
ldd [%o0 + 0x60], %f32
ldd [%o0 + 0x68], %f34
ldd [%o0 + 0x70], %f36
ldd [%o0 + 0x78], %f38
ldd [%o0 + 0x80], %f40
ldd [%o0 + 0x88], %f42
ldd [%o0 + 0x90], %f44
ldd [%o0 + 0x98], %f46
ldd [%o0 + 0xa0], %f48
ldd [%o0 + 0xa8], %f50
fxor %f8, %f4, %f4
fxor %f10, %f6, %f6
ENCRYPT_128(12, 4, 6, 0, 2)
st %f4, [%o2 + 0x00]
st %f5, [%o2 + 0x04]
st %f6, [%o2 + 0x08]
st %f7, [%o2 + 0x0c]
retl
VISExit
ENDPROC(aes_sparc64_encrypt_128)
.align 32
ENTRY(aes_sparc64_encrypt_192)
/* %o0=key, %o1=input, %o2=output */
VISEntry
ld [%o1 + 0x00], %f4
ld [%o1 + 0x04], %f5
ld [%o1 + 0x08], %f6
ld [%o1 + 0x0c], %f7
ldd [%o0 + 0x00], %f8
ldd [%o0 + 0x08], %f10
fxor %f8, %f4, %f4
fxor %f10, %f6, %f6
ldd [%o0 + 0x10], %f8
ldd [%o0 + 0x18], %f10
ldd [%o0 + 0x20], %f12
ldd [%o0 + 0x28], %f14
add %o0, 0x20, %o0
ENCRYPT_TWO_ROUNDS(8, 4, 6, 0, 2)
ldd [%o0 + 0x10], %f12
ldd [%o0 + 0x18], %f14
ldd [%o0 + 0x20], %f16
ldd [%o0 + 0x28], %f18
ldd [%o0 + 0x30], %f20
ldd [%o0 + 0x38], %f22
ldd [%o0 + 0x40], %f24
ldd [%o0 + 0x48], %f26
ldd [%o0 + 0x50], %f28
ldd [%o0 + 0x58], %f30
ldd [%o0 + 0x60], %f32
ldd [%o0 + 0x68], %f34
ldd [%o0 + 0x70], %f36
ldd [%o0 + 0x78], %f38
ldd [%o0 + 0x80], %f40
ldd [%o0 + 0x88], %f42
ldd [%o0 + 0x90], %f44
ldd [%o0 + 0x98], %f46
ldd [%o0 + 0xa0], %f48
ldd [%o0 + 0xa8], %f50
ENCRYPT_128(12, 4, 6, 0, 2)
st %f4, [%o2 + 0x00]
st %f5, [%o2 + 0x04]
st %f6, [%o2 + 0x08]
st %f7, [%o2 + 0x0c]
retl
VISExit
ENDPROC(aes_sparc64_encrypt_192)
.align 32
ENTRY(aes_sparc64_encrypt_256)
/* %o0=key, %o1=input, %o2=output */
VISEntry
ld [%o1 + 0x00], %f4
ld [%o1 + 0x04], %f5
ld [%o1 + 0x08], %f6
ld [%o1 + 0x0c], %f7
ldd [%o0 + 0x00], %f8
ldd [%o0 + 0x08], %f10
fxor %f8, %f4, %f4
fxor %f10, %f6, %f6
ldd [%o0 + 0x10], %f8
ldd [%o0 + 0x18], %f10
ldd [%o0 + 0x20], %f12
ldd [%o0 + 0x28], %f14
add %o0, 0x20, %o0
ENCRYPT_TWO_ROUNDS(8, 4, 6, 0, 2)
ldd [%o0 + 0x10], %f8
ldd [%o0 + 0x18], %f10
ldd [%o0 + 0x20], %f12
ldd [%o0 + 0x28], %f14
add %o0, 0x20, %o0
ENCRYPT_TWO_ROUNDS(8, 4, 6, 0, 2)
ldd [%o0 + 0x10], %f12
ldd [%o0 + 0x18], %f14
ldd [%o0 + 0x20], %f16
ldd [%o0 + 0x28], %f18
ldd [%o0 + 0x30], %f20
ldd [%o0 + 0x38], %f22
ldd [%o0 + 0x40], %f24
ldd [%o0 + 0x48], %f26
ldd [%o0 + 0x50], %f28
ldd [%o0 + 0x58], %f30
ldd [%o0 + 0x60], %f32
ldd [%o0 + 0x68], %f34
ldd [%o0 + 0x70], %f36
ldd [%o0 + 0x78], %f38
ldd [%o0 + 0x80], %f40
ldd [%o0 + 0x88], %f42
ldd [%o0 + 0x90], %f44
ldd [%o0 + 0x98], %f46
ldd [%o0 + 0xa0], %f48
ldd [%o0 + 0xa8], %f50
ENCRYPT_128(12, 4, 6, 0, 2)
st %f4, [%o2 + 0x00]
st %f5, [%o2 + 0x04]
st %f6, [%o2 + 0x08]
st %f7, [%o2 + 0x0c]
retl
VISExit
ENDPROC(aes_sparc64_encrypt_256)
.align 32
ENTRY(aes_sparc64_decrypt_128)
/* %o0=key, %o1=input, %o2=output */
VISEntry
ld [%o1 + 0x00], %f4
ld [%o1 + 0x04], %f5
ld [%o1 + 0x08], %f6
ld [%o1 + 0x0c], %f7
ldd [%o0 + 0xa0], %f8
ldd [%o0 + 0xa8], %f10
ldd [%o0 + 0x98], %f12
ldd [%o0 + 0x90], %f14
ldd [%o0 + 0x88], %f16
ldd [%o0 + 0x80], %f18
ldd [%o0 + 0x78], %f20
ldd [%o0 + 0x70], %f22
ldd [%o0 + 0x68], %f24
ldd [%o0 + 0x60], %f26
ldd [%o0 + 0x58], %f28
ldd [%o0 + 0x50], %f30
ldd [%o0 + 0x48], %f32
ldd [%o0 + 0x40], %f34
ldd [%o0 + 0x38], %f36
ldd [%o0 + 0x30], %f38
ldd [%o0 + 0x28], %f40
ldd [%o0 + 0x20], %f42
ldd [%o0 + 0x18], %f44
ldd [%o0 + 0x10], %f46
ldd [%o0 + 0x08], %f48
ldd [%o0 + 0x00], %f50
fxor %f8, %f4, %f4
fxor %f10, %f6, %f6
DECRYPT_128(12, 4, 6, 0, 2)
st %f4, [%o2 + 0x00]
st %f5, [%o2 + 0x04]
st %f6, [%o2 + 0x08]
st %f7, [%o2 + 0x0c]
retl
VISExit
ENDPROC(aes_sparc64_decrypt_128)
.align 32
ENTRY(aes_sparc64_decrypt_192)
/* %o0=key, %o1=input, %o2=output */
VISEntry
ld [%o1 + 0x00], %f4
ld [%o1 + 0x04], %f5
ld [%o1 + 0x08], %f6
ld [%o1 + 0x0c], %f7
ldd [%o0 + 0xc0], %f8
ldd [%o0 + 0xc8], %f10
ldd [%o0 + 0xb8], %f12
ldd [%o0 + 0xb0], %f14
ldd [%o0 + 0xa8], %f16
ldd [%o0 + 0xa0], %f18
fxor %f8, %f4, %f4
fxor %f10, %f6, %f6
ldd [%o0 + 0x98], %f20
ldd [%o0 + 0x90], %f22
ldd [%o0 + 0x88], %f24
ldd [%o0 + 0x80], %f26
DECRYPT_TWO_ROUNDS(12, 4, 6, 0, 2)
ldd [%o0 + 0x78], %f28
ldd [%o0 + 0x70], %f30
ldd [%o0 + 0x68], %f32
ldd [%o0 + 0x60], %f34
ldd [%o0 + 0x58], %f36
ldd [%o0 + 0x50], %f38
ldd [%o0 + 0x48], %f40
ldd [%o0 + 0x40], %f42
ldd [%o0 + 0x38], %f44
ldd [%o0 + 0x30], %f46
ldd [%o0 + 0x28], %f48
ldd [%o0 + 0x20], %f50
ldd [%o0 + 0x18], %f52
ldd [%o0 + 0x10], %f54
ldd [%o0 + 0x08], %f56
ldd [%o0 + 0x00], %f58
DECRYPT_128(20, 4, 6, 0, 2)
st %f4, [%o2 + 0x00]
st %f5, [%o2 + 0x04]
st %f6, [%o2 + 0x08]
st %f7, [%o2 + 0x0c]
retl
VISExit
ENDPROC(aes_sparc64_decrypt_192)
.align 32
ENTRY(aes_sparc64_decrypt_256)
/* %o0=key, %o1=input, %o2=output */
VISEntry
ld [%o1 + 0x00], %f4
ld [%o1 + 0x04], %f5
ld [%o1 + 0x08], %f6
ld [%o1 + 0x0c], %f7
ldd [%o0 + 0xe0], %f8
ldd [%o0 + 0xe8], %f10
ldd [%o0 + 0xd8], %f12
ldd [%o0 + 0xd0], %f14
ldd [%o0 + 0xc8], %f16
fxor %f8, %f4, %f4
ldd [%o0 + 0xc0], %f18
fxor %f10, %f6, %f6
ldd [%o0 + 0xb8], %f20
AES_DROUND23(12, 4, 6, 2)
ldd [%o0 + 0xb0], %f22
AES_DROUND01(14, 4, 6, 0)
ldd [%o0 + 0xa8], %f24
AES_DROUND23(16, 0, 2, 6)
ldd [%o0 + 0xa0], %f26
AES_DROUND01(18, 0, 2, 4)
ldd [%o0 + 0x98], %f12
AES_DROUND23(20, 4, 6, 2)
ldd [%o0 + 0x90], %f14
AES_DROUND01(22, 4, 6, 0)
ldd [%o0 + 0x88], %f16
AES_DROUND23(24, 0, 2, 6)
ldd [%o0 + 0x80], %f18
AES_DROUND01(26, 0, 2, 4)
ldd [%o0 + 0x78], %f20
AES_DROUND23(12, 4, 6, 2)
ldd [%o0 + 0x70], %f22
AES_DROUND01(14, 4, 6, 0)
ldd [%o0 + 0x68], %f24
AES_DROUND23(16, 0, 2, 6)
ldd [%o0 + 0x60], %f26
AES_DROUND01(18, 0, 2, 4)
ldd [%o0 + 0x58], %f28
AES_DROUND23(20, 4, 6, 2)
ldd [%o0 + 0x50], %f30
AES_DROUND01(22, 4, 6, 0)
ldd [%o0 + 0x48], %f32
AES_DROUND23(24, 0, 2, 6)
ldd [%o0 + 0x40], %f34
AES_DROUND01(26, 0, 2, 4)
ldd [%o0 + 0x38], %f36
AES_DROUND23(28, 4, 6, 2)
ldd [%o0 + 0x30], %f38
AES_DROUND01(30, 4, 6, 0)
ldd [%o0 + 0x28], %f40
AES_DROUND23(32, 0, 2, 6)
ldd [%o0 + 0x20], %f42
AES_DROUND01(34, 0, 2, 4)
ldd [%o0 + 0x18], %f44
AES_DROUND23(36, 4, 6, 2)
ldd [%o0 + 0x10], %f46
AES_DROUND01(38, 4, 6, 0)
ldd [%o0 + 0x08], %f48
AES_DROUND23(40, 0, 2, 6)
ldd [%o0 + 0x00], %f50
AES_DROUND01(42, 0, 2, 4)
AES_DROUND23(44, 4, 6, 2)
AES_DROUND01(46, 4, 6, 0)
AES_DROUND23_L(48, 0, 2, 6)
AES_DROUND01_L(50, 0, 2, 4)
st %f4, [%o2 + 0x00]
st %f5, [%o2 + 0x04]
st %f6, [%o2 + 0x08]
st %f7, [%o2 + 0x0c]
retl
VISExit
ENDPROC(aes_sparc64_decrypt_256)
.align 32
ENTRY(aes_sparc64_load_encrypt_keys_128)
/* %o0=key */
VISEntry
ldd [%o0 + 0x10], %f8
ldd [%o0 + 0x18], %f10
ldd [%o0 + 0x20], %f12
ldd [%o0 + 0x28], %f14
ldd [%o0 + 0x30], %f16
ldd [%o0 + 0x38], %f18
ldd [%o0 + 0x40], %f20
ldd [%o0 + 0x48], %f22
ldd [%o0 + 0x50], %f24
ldd [%o0 + 0x58], %f26
ldd [%o0 + 0x60], %f28
ldd [%o0 + 0x68], %f30
ldd [%o0 + 0x70], %f32
ldd [%o0 + 0x78], %f34
ldd [%o0 + 0x80], %f36
ldd [%o0 + 0x88], %f38
ldd [%o0 + 0x90], %f40
ldd [%o0 + 0x98], %f42
ldd [%o0 + 0xa0], %f44
retl
ldd [%o0 + 0xa8], %f46
ENDPROC(aes_sparc64_load_encrypt_keys_128)
.align 32
ENTRY(aes_sparc64_load_encrypt_keys_192)
/* %o0=key */
VISEntry
ldd [%o0 + 0x10], %f8
ldd [%o0 + 0x18], %f10
ldd [%o0 + 0x20], %f12
ldd [%o0 + 0x28], %f14
ldd [%o0 + 0x30], %f16
ldd [%o0 + 0x38], %f18
ldd [%o0 + 0x40], %f20
ldd [%o0 + 0x48], %f22
ldd [%o0 + 0x50], %f24
ldd [%o0 + 0x58], %f26
ldd [%o0 + 0x60], %f28
ldd [%o0 + 0x68], %f30
ldd [%o0 + 0x70], %f32
ldd [%o0 + 0x78], %f34
ldd [%o0 + 0x80], %f36
ldd [%o0 + 0x88], %f38
ldd [%o0 + 0x90], %f40
ldd [%o0 + 0x98], %f42
ldd [%o0 + 0xa0], %f44
ldd [%o0 + 0xa8], %f46
ldd [%o0 + 0xb0], %f48
ldd [%o0 + 0xb8], %f50
ldd [%o0 + 0xc0], %f52
retl
ldd [%o0 + 0xc8], %f54
ENDPROC(aes_sparc64_load_encrypt_keys_192)
.align 32
ENTRY(aes_sparc64_load_encrypt_keys_256)
/* %o0=key */
VISEntry
ldd [%o0 + 0x10], %f8
ldd [%o0 + 0x18], %f10
ldd [%o0 + 0x20], %f12
ldd [%o0 + 0x28], %f14
ldd [%o0 + 0x30], %f16
ldd [%o0 + 0x38], %f18
ldd [%o0 + 0x40], %f20
ldd [%o0 + 0x48], %f22
ldd [%o0 + 0x50], %f24
ldd [%o0 + 0x58], %f26
ldd [%o0 + 0x60], %f28
ldd [%o0 + 0x68], %f30
ldd [%o0 + 0x70], %f32
ldd [%o0 + 0x78], %f34
ldd [%o0 + 0x80], %f36
ldd [%o0 + 0x88], %f38
ldd [%o0 + 0x90], %f40
ldd [%o0 + 0x98], %f42
ldd [%o0 + 0xa0], %f44
ldd [%o0 + 0xa8], %f46
ldd [%o0 + 0xb0], %f48
ldd [%o0 + 0xb8], %f50
ldd [%o0 + 0xc0], %f52
ldd [%o0 + 0xc8], %f54
ldd [%o0 + 0xd0], %f56
ldd [%o0 + 0xd8], %f58
ldd [%o0 + 0xe0], %f60
retl
ldd [%o0 + 0xe8], %f62
ENDPROC(aes_sparc64_load_encrypt_keys_256)
.align 32
ENTRY(aes_sparc64_load_decrypt_keys_128)
/* %o0=key */
VISEntry
ldd [%o0 + 0x98], %f8
ldd [%o0 + 0x90], %f10
ldd [%o0 + 0x88], %f12
ldd [%o0 + 0x80], %f14
ldd [%o0 + 0x78], %f16
ldd [%o0 + 0x70], %f18
ldd [%o0 + 0x68], %f20
ldd [%o0 + 0x60], %f22
ldd [%o0 + 0x58], %f24
ldd [%o0 + 0x50], %f26
ldd [%o0 + 0x48], %f28
ldd [%o0 + 0x40], %f30
ldd [%o0 + 0x38], %f32
ldd [%o0 + 0x30], %f34
ldd [%o0 + 0x28], %f36
ldd [%o0 + 0x20], %f38
ldd [%o0 + 0x18], %f40
ldd [%o0 + 0x10], %f42
ldd [%o0 + 0x08], %f44
retl
ldd [%o0 + 0x00], %f46
ENDPROC(aes_sparc64_load_decrypt_keys_128)
.align 32
ENTRY(aes_sparc64_load_decrypt_keys_192)
/* %o0=key */
VISEntry
ldd [%o0 + 0xb8], %f8
ldd [%o0 + 0xb0], %f10
ldd [%o0 + 0xa8], %f12
ldd [%o0 + 0xa0], %f14
ldd [%o0 + 0x98], %f16
ldd [%o0 + 0x90], %f18
ldd [%o0 + 0x88], %f20
ldd [%o0 + 0x80], %f22
ldd [%o0 + 0x78], %f24
ldd [%o0 + 0x70], %f26
ldd [%o0 + 0x68], %f28
ldd [%o0 + 0x60], %f30
ldd [%o0 + 0x58], %f32
ldd [%o0 + 0x50], %f34
ldd [%o0 + 0x48], %f36
ldd [%o0 + 0x40], %f38
ldd [%o0 + 0x38], %f40
ldd [%o0 + 0x30], %f42
ldd [%o0 + 0x28], %f44
ldd [%o0 + 0x20], %f46
ldd [%o0 + 0x18], %f48
ldd [%o0 + 0x10], %f50
ldd [%o0 + 0x08], %f52
retl
ldd [%o0 + 0x00], %f54
ENDPROC(aes_sparc64_load_decrypt_keys_192)
.align 32
ENTRY(aes_sparc64_load_decrypt_keys_256)
/* %o0=key */
VISEntry
ldd [%o0 + 0xd8], %f8
ldd [%o0 + 0xd0], %f10
ldd [%o0 + 0xc8], %f12
ldd [%o0 + 0xc0], %f14
ldd [%o0 + 0xb8], %f16
ldd [%o0 + 0xb0], %f18
ldd [%o0 + 0xa8], %f20
ldd [%o0 + 0xa0], %f22
ldd [%o0 + 0x98], %f24
ldd [%o0 + 0x90], %f26
ldd [%o0 + 0x88], %f28
ldd [%o0 + 0x80], %f30
ldd [%o0 + 0x78], %f32
ldd [%o0 + 0x70], %f34
ldd [%o0 + 0x68], %f36
ldd [%o0 + 0x60], %f38
ldd [%o0 + 0x58], %f40
ldd [%o0 + 0x50], %f42
ldd [%o0 + 0x48], %f44
ldd [%o0 + 0x40], %f46
ldd [%o0 + 0x38], %f48
ldd [%o0 + 0x30], %f50
ldd [%o0 + 0x28], %f52
ldd [%o0 + 0x20], %f54
ldd [%o0 + 0x18], %f56
ldd [%o0 + 0x10], %f58
ldd [%o0 + 0x08], %f60
retl
ldd [%o0 + 0x00], %f62
ENDPROC(aes_sparc64_load_decrypt_keys_256)
.align 32
ENTRY(aes_sparc64_ecb_encrypt_128)
/* %o0=key, %o1=input, %o2=output, %o3=len */
ldx [%o0 + 0x00], %g1
subcc %o3, 0x10, %o3
be 10f
ldx [%o0 + 0x08], %g2
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
ldx [%o1 + 0x10], %o4
ldx [%o1 + 0x18], %o5
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
xor %g1, %o4, %g3
xor %g2, %o5, %g7
MOVXTOD_G3_F60
MOVXTOD_G7_F62
ENCRYPT_128_2(8, 4, 6, 60, 62, 0, 2, 56, 58)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
std %f60, [%o2 + 0x10]
std %f62, [%o2 + 0x18]
sub %o3, 0x20, %o3
add %o1, 0x20, %o1
brgz %o3, 1b
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
10: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
ENCRYPT_128(8, 4, 6, 0, 2)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
11: retl
nop
ENDPROC(aes_sparc64_ecb_encrypt_128)
.align 32
ENTRY(aes_sparc64_ecb_encrypt_192)
/* %o0=key, %o1=input, %o2=output, %o3=len */
ldx [%o0 + 0x00], %g1
subcc %o3, 0x10, %o3
be 10f
ldx [%o0 + 0x08], %g2
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
ldx [%o1 + 0x10], %o4
ldx [%o1 + 0x18], %o5
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
xor %g1, %o4, %g3
xor %g2, %o5, %g7
MOVXTOD_G3_F60
MOVXTOD_G7_F62
ENCRYPT_192_2(8, 4, 6, 60, 62, 0, 2, 56, 58)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
std %f60, [%o2 + 0x10]
std %f62, [%o2 + 0x18]
sub %o3, 0x20, %o3
add %o1, 0x20, %o1
brgz %o3, 1b
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
10: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
ENCRYPT_192(8, 4, 6, 0, 2)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
11: retl
nop
ENDPROC(aes_sparc64_ecb_encrypt_192)
.align 32
ENTRY(aes_sparc64_ecb_encrypt_256)
/* %o0=key, %o1=input, %o2=output, %o3=len */
ldx [%o0 + 0x00], %g1
subcc %o3, 0x10, %o3
be 10f
ldx [%o0 + 0x08], %g2
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
ldx [%o1 + 0x10], %o4
ldx [%o1 + 0x18], %o5
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
xor %g1, %o4, %g3
xor %g2, %o5, %g7
MOVXTOD_G3_F0
MOVXTOD_G7_F2
ENCRYPT_256_2(8, 4, 6, 0, 2)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
std %f0, [%o2 + 0x10]
std %f2, [%o2 + 0x18]
sub %o3, 0x20, %o3
add %o1, 0x20, %o1
brgz %o3, 1b
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
10: ldd [%o0 + 0xd0], %f56
ldd [%o0 + 0xd8], %f58
ldd [%o0 + 0xe0], %f60
ldd [%o0 + 0xe8], %f62
ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
ENCRYPT_256(8, 4, 6, 0, 2)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
11: retl
nop
ENDPROC(aes_sparc64_ecb_encrypt_256)
.align 32
ENTRY(aes_sparc64_ecb_decrypt_128)
/* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */
ldx [%o0 - 0x10], %g1
subcc %o3, 0x10, %o3
be 10f
ldx [%o0 - 0x08], %g2
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
ldx [%o1 + 0x10], %o4
ldx [%o1 + 0x18], %o5
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
xor %g1, %o4, %g3
xor %g2, %o5, %g7
MOVXTOD_G3_F60
MOVXTOD_G7_F62
DECRYPT_128_2(8, 4, 6, 60, 62, 0, 2, 56, 58)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
std %f60, [%o2 + 0x10]
std %f62, [%o2 + 0x18]
sub %o3, 0x20, %o3
add %o1, 0x20, %o1
brgz,pt %o3, 1b
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
10: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
DECRYPT_128(8, 4, 6, 0, 2)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
11: retl
nop
ENDPROC(aes_sparc64_ecb_decrypt_128)
.align 32
ENTRY(aes_sparc64_ecb_decrypt_192)
/* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */
ldx [%o0 - 0x10], %g1
subcc %o3, 0x10, %o3
be 10f
ldx [%o0 - 0x08], %g2
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
ldx [%o1 + 0x10], %o4
ldx [%o1 + 0x18], %o5
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
xor %g1, %o4, %g3
xor %g2, %o5, %g7
MOVXTOD_G3_F60
MOVXTOD_G7_F62
DECRYPT_192_2(8, 4, 6, 60, 62, 0, 2, 56, 58)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
std %f60, [%o2 + 0x10]
std %f62, [%o2 + 0x18]
sub %o3, 0x20, %o3
add %o1, 0x20, %o1
brgz,pt %o3, 1b
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
10: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
DECRYPT_192(8, 4, 6, 0, 2)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
11: retl
nop
ENDPROC(aes_sparc64_ecb_decrypt_192)
.align 32
ENTRY(aes_sparc64_ecb_decrypt_256)
/* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */
ldx [%o0 - 0x10], %g1
subcc %o3, 0x10, %o3
ldx [%o0 - 0x08], %g2
be 10f
sub %o0, 0xf0, %o0
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
ldx [%o1 + 0x10], %o4
ldx [%o1 + 0x18], %o5
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
xor %g1, %o4, %g3
xor %g2, %o5, %g7
MOVXTOD_G3_F0
MOVXTOD_G7_F2
DECRYPT_256_2(8, 4, 6, 0, 2)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
std %f0, [%o2 + 0x10]
std %f2, [%o2 + 0x18]
sub %o3, 0x20, %o3
add %o1, 0x20, %o1
brgz,pt %o3, 1b
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
10: ldd [%o0 + 0x18], %f56
ldd [%o0 + 0x10], %f58
ldd [%o0 + 0x08], %f60
ldd [%o0 + 0x00], %f62
ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
DECRYPT_256(8, 4, 6, 0, 2)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
11: retl
nop
ENDPROC(aes_sparc64_ecb_decrypt_256)
.align 32
ENTRY(aes_sparc64_cbc_encrypt_128)
/* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
ldd [%o4 + 0x00], %f4
ldd [%o4 + 0x08], %f6
ldx [%o0 + 0x00], %g1
ldx [%o0 + 0x08], %g2
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
add %o1, 0x10, %o1
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F0
MOVXTOD_G7_F2
fxor %f4, %f0, %f4
fxor %f6, %f2, %f6
ENCRYPT_128(8, 4, 6, 0, 2)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
subcc %o3, 0x10, %o3
bne,pt %xcc, 1b
add %o2, 0x10, %o2
std %f4, [%o4 + 0x00]
std %f6, [%o4 + 0x08]
retl
nop
ENDPROC(aes_sparc64_cbc_encrypt_128)
.align 32
ENTRY(aes_sparc64_cbc_encrypt_192)
/* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
ldd [%o4 + 0x00], %f4
ldd [%o4 + 0x08], %f6
ldx [%o0 + 0x00], %g1
ldx [%o0 + 0x08], %g2
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
add %o1, 0x10, %o1
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F0
MOVXTOD_G7_F2
fxor %f4, %f0, %f4
fxor %f6, %f2, %f6
ENCRYPT_192(8, 4, 6, 0, 2)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
subcc %o3, 0x10, %o3
bne,pt %xcc, 1b
add %o2, 0x10, %o2
std %f4, [%o4 + 0x00]
std %f6, [%o4 + 0x08]
retl
nop
ENDPROC(aes_sparc64_cbc_encrypt_192)
.align 32
ENTRY(aes_sparc64_cbc_encrypt_256)
/* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
ldd [%o4 + 0x00], %f4
ldd [%o4 + 0x08], %f6
ldx [%o0 + 0x00], %g1
ldx [%o0 + 0x08], %g2
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
add %o1, 0x10, %o1
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F0
MOVXTOD_G7_F2
fxor %f4, %f0, %f4
fxor %f6, %f2, %f6
ENCRYPT_256(8, 4, 6, 0, 2)
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
subcc %o3, 0x10, %o3
bne,pt %xcc, 1b
add %o2, 0x10, %o2
std %f4, [%o4 + 0x00]
std %f6, [%o4 + 0x08]
retl
nop
ENDPROC(aes_sparc64_cbc_encrypt_256)
.align 32
ENTRY(aes_sparc64_cbc_decrypt_128)
/* %o0=&key[key_len], %o1=input, %o2=output, %o3=len, %o4=iv */
ldx [%o0 - 0x10], %g1
ldx [%o0 - 0x08], %g2
ldx [%o4 + 0x00], %o0
ldx [%o4 + 0x08], %o5
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
add %o1, 0x10, %o1
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
DECRYPT_128(8, 4, 6, 0, 2)
MOVXTOD_O0_F0
MOVXTOD_O5_F2
xor %g1, %g3, %o0
xor %g2, %g7, %o5
fxor %f4, %f0, %f4
fxor %f6, %f2, %f6
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
subcc %o3, 0x10, %o3
bne,pt %xcc, 1b
add %o2, 0x10, %o2
stx %o0, [%o4 + 0x00]
stx %o5, [%o4 + 0x08]
retl
nop
ENDPROC(aes_sparc64_cbc_decrypt_128)
.align 32
ENTRY(aes_sparc64_cbc_decrypt_192)
/* %o0=&key[key_len], %o1=input, %o2=output, %o3=len, %o4=iv */
ldx [%o0 - 0x10], %g1
ldx [%o0 - 0x08], %g2
ldx [%o4 + 0x00], %o0
ldx [%o4 + 0x08], %o5
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
add %o1, 0x10, %o1
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
DECRYPT_192(8, 4, 6, 0, 2)
MOVXTOD_O0_F0
MOVXTOD_O5_F2
xor %g1, %g3, %o0
xor %g2, %g7, %o5
fxor %f4, %f0, %f4
fxor %f6, %f2, %f6
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
subcc %o3, 0x10, %o3
bne,pt %xcc, 1b
add %o2, 0x10, %o2
stx %o0, [%o4 + 0x00]
stx %o5, [%o4 + 0x08]
retl
nop
ENDPROC(aes_sparc64_cbc_decrypt_192)
.align 32
ENTRY(aes_sparc64_cbc_decrypt_256)
/* %o0=&key[key_len], %o1=input, %o2=output, %o3=len, %o4=iv */
ldx [%o0 - 0x10], %g1
ldx [%o0 - 0x08], %g2
ldx [%o4 + 0x00], %o0
ldx [%o4 + 0x08], %o5
1: ldx [%o1 + 0x00], %g3
ldx [%o1 + 0x08], %g7
add %o1, 0x10, %o1
xor %g1, %g3, %g3
xor %g2, %g7, %g7
MOVXTOD_G3_F4
MOVXTOD_G7_F6
DECRYPT_256(8, 4, 6, 0, 2)
MOVXTOD_O0_F0
MOVXTOD_O5_F2
xor %g1, %g3, %o0
xor %g2, %g7, %o5
fxor %f4, %f0, %f4
fxor %f6, %f2, %f6
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
subcc %o3, 0x10, %o3
bne,pt %xcc, 1b
add %o2, 0x10, %o2
stx %o0, [%o4 + 0x00]
stx %o5, [%o4 + 0x08]
retl
nop
ENDPROC(aes_sparc64_cbc_decrypt_256)
.align 32
ENTRY(aes_sparc64_ctr_crypt_128)
/* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
ldx [%o4 + 0x00], %g3
ldx [%o4 + 0x08], %g7
subcc %o3, 0x10, %o3
ldx [%o0 + 0x00], %g1
be 10f
ldx [%o0 + 0x08], %g2
1: xor %g1, %g3, %o5
MOVXTOD_O5_F0
xor %g2, %g7, %o5
MOVXTOD_O5_F2
add %g7, 1, %g7
add %g3, 1, %o5
movrz %g7, %o5, %g3
xor %g1, %g3, %o5
MOVXTOD_O5_F4
xor %g2, %g7, %o5
MOVXTOD_O5_F6
add %g7, 1, %g7
add %g3, 1, %o5
movrz %g7, %o5, %g3
ENCRYPT_128_2(8, 0, 2, 4, 6, 56, 58, 60, 62)
ldd [%o1 + 0x00], %f56
ldd [%o1 + 0x08], %f58
ldd [%o1 + 0x10], %f60
ldd [%o1 + 0x18], %f62
fxor %f56, %f0, %f56
fxor %f58, %f2, %f58
fxor %f60, %f4, %f60
fxor %f62, %f6, %f62
std %f56, [%o2 + 0x00]
std %f58, [%o2 + 0x08]
std %f60, [%o2 + 0x10]
std %f62, [%o2 + 0x18]
subcc %o3, 0x20, %o3
add %o1, 0x20, %o1
brgz %o3, 1b
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
10: xor %g1, %g3, %o5
MOVXTOD_O5_F0
xor %g2, %g7, %o5
MOVXTOD_O5_F2
add %g7, 1, %g7
add %g3, 1, %o5
movrz %g7, %o5, %g3
ENCRYPT_128(8, 0, 2, 4, 6)
ldd [%o1 + 0x00], %f4
ldd [%o1 + 0x08], %f6
fxor %f4, %f0, %f4
fxor %f6, %f2, %f6
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
11: stx %g3, [%o4 + 0x00]
retl
stx %g7, [%o4 + 0x08]
ENDPROC(aes_sparc64_ctr_crypt_128)
.align 32
ENTRY(aes_sparc64_ctr_crypt_192)
/* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
ldx [%o4 + 0x00], %g3
ldx [%o4 + 0x08], %g7
subcc %o3, 0x10, %o3
ldx [%o0 + 0x00], %g1
be 10f
ldx [%o0 + 0x08], %g2
1: xor %g1, %g3, %o5
MOVXTOD_O5_F0
xor %g2, %g7, %o5
MOVXTOD_O5_F2
add %g7, 1, %g7
add %g3, 1, %o5
movrz %g7, %o5, %g3
xor %g1, %g3, %o5
MOVXTOD_O5_F4
xor %g2, %g7, %o5
MOVXTOD_O5_F6
add %g7, 1, %g7
add %g3, 1, %o5
movrz %g7, %o5, %g3
ENCRYPT_192_2(8, 0, 2, 4, 6, 56, 58, 60, 62)
ldd [%o1 + 0x00], %f56
ldd [%o1 + 0x08], %f58
ldd [%o1 + 0x10], %f60
ldd [%o1 + 0x18], %f62
fxor %f56, %f0, %f56
fxor %f58, %f2, %f58
fxor %f60, %f4, %f60
fxor %f62, %f6, %f62
std %f56, [%o2 + 0x00]
std %f58, [%o2 + 0x08]
std %f60, [%o2 + 0x10]
std %f62, [%o2 + 0x18]
subcc %o3, 0x20, %o3
add %o1, 0x20, %o1
brgz %o3, 1b
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
10: xor %g1, %g3, %o5
MOVXTOD_O5_F0
xor %g2, %g7, %o5
MOVXTOD_O5_F2
add %g7, 1, %g7
add %g3, 1, %o5
movrz %g7, %o5, %g3
ENCRYPT_192(8, 0, 2, 4, 6)
ldd [%o1 + 0x00], %f4
ldd [%o1 + 0x08], %f6
fxor %f4, %f0, %f4
fxor %f6, %f2, %f6
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
11: stx %g3, [%o4 + 0x00]
retl
stx %g7, [%o4 + 0x08]
ENDPROC(aes_sparc64_ctr_crypt_192)
.align 32
ENTRY(aes_sparc64_ctr_crypt_256)
/* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
ldx [%o4 + 0x00], %g3
ldx [%o4 + 0x08], %g7
subcc %o3, 0x10, %o3
ldx [%o0 + 0x00], %g1
be 10f
ldx [%o0 + 0x08], %g2
1: xor %g1, %g3, %o5
MOVXTOD_O5_F0
xor %g2, %g7, %o5
MOVXTOD_O5_F2
add %g7, 1, %g7
add %g3, 1, %o5
movrz %g7, %o5, %g3
xor %g1, %g3, %o5
MOVXTOD_O5_F4
xor %g2, %g7, %o5
MOVXTOD_O5_F6
add %g7, 1, %g7
add %g3, 1, %o5
movrz %g7, %o5, %g3
ENCRYPT_256_2(8, 0, 2, 4, 6)
ldd [%o1 + 0x00], %f56
ldd [%o1 + 0x08], %f58
ldd [%o1 + 0x10], %f60
ldd [%o1 + 0x18], %f62
fxor %f56, %f0, %f56
fxor %f58, %f2, %f58
fxor %f60, %f4, %f60
fxor %f62, %f6, %f62
std %f56, [%o2 + 0x00]
std %f58, [%o2 + 0x08]
std %f60, [%o2 + 0x10]
std %f62, [%o2 + 0x18]
subcc %o3, 0x20, %o3
add %o1, 0x20, %o1
brgz %o3, 1b
add %o2, 0x20, %o2
brlz,pt %o3, 11f
nop
10: ldd [%o0 + 0xd0], %f56
ldd [%o0 + 0xd8], %f58
ldd [%o0 + 0xe0], %f60
ldd [%o0 + 0xe8], %f62
xor %g1, %g3, %o5
MOVXTOD_O5_F0
xor %g2, %g7, %o5
MOVXTOD_O5_F2
add %g7, 1, %g7
add %g3, 1, %o5
movrz %g7, %o5, %g3
ENCRYPT_256(8, 0, 2, 4, 6)
ldd [%o1 + 0x00], %f4
ldd [%o1 + 0x08], %f6
fxor %f4, %f0, %f4
fxor %f6, %f2, %f6
std %f4, [%o2 + 0x00]
std %f6, [%o2 + 0x08]
11: stx %g3, [%o4 + 0x00]
retl
stx %g7, [%o4 + 0x08]
ENDPROC(aes_sparc64_ctr_crypt_256)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,645
|
arch/sparc/crypto/des_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/visasm.h>
#include "opcodes.h"
.align 32
ENTRY(des_sparc64_key_expand)
/* %o0=input_key, %o1=output_key */
VISEntryHalf
ld [%o0 + 0x00], %f0
ld [%o0 + 0x04], %f1
DES_KEXPAND(0, 0, 0)
DES_KEXPAND(0, 1, 2)
DES_KEXPAND(2, 3, 6)
DES_KEXPAND(2, 2, 4)
DES_KEXPAND(6, 3, 10)
DES_KEXPAND(6, 2, 8)
DES_KEXPAND(10, 3, 14)
DES_KEXPAND(10, 2, 12)
DES_KEXPAND(14, 1, 16)
DES_KEXPAND(16, 3, 20)
DES_KEXPAND(16, 2, 18)
DES_KEXPAND(20, 3, 24)
DES_KEXPAND(20, 2, 22)
DES_KEXPAND(24, 3, 28)
DES_KEXPAND(24, 2, 26)
DES_KEXPAND(28, 1, 30)
std %f0, [%o1 + 0x00]
std %f2, [%o1 + 0x08]
std %f4, [%o1 + 0x10]
std %f6, [%o1 + 0x18]
std %f8, [%o1 + 0x20]
std %f10, [%o1 + 0x28]
std %f12, [%o1 + 0x30]
std %f14, [%o1 + 0x38]
std %f16, [%o1 + 0x40]
std %f18, [%o1 + 0x48]
std %f20, [%o1 + 0x50]
std %f22, [%o1 + 0x58]
std %f24, [%o1 + 0x60]
std %f26, [%o1 + 0x68]
std %f28, [%o1 + 0x70]
std %f30, [%o1 + 0x78]
retl
VISExitHalf
ENDPROC(des_sparc64_key_expand)
.align 32
ENTRY(des_sparc64_crypt)
/* %o0=key, %o1=input, %o2=output */
VISEntry
ldd [%o1 + 0x00], %f32
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
ldd [%o0 + 0x78], %f30
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
std %f32, [%o2 + 0x00]
retl
VISExit
ENDPROC(des_sparc64_crypt)
.align 32
ENTRY(des_sparc64_load_keys)
/* %o0=key */
VISEntry
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
retl
ldd [%o0 + 0x78], %f30
ENDPROC(des_sparc64_load_keys)
.align 32
ENTRY(des_sparc64_ecb_crypt)
/* %o0=input, %o1=output, %o2=len */
1: ldd [%o0 + 0x00], %f32
add %o0, 0x08, %o0
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
std %f32, [%o1 + 0x00]
subcc %o2, 0x08, %o2
bne,pt %icc, 1b
add %o1, 0x08, %o1
retl
nop
ENDPROC(des_sparc64_ecb_crypt)
.align 32
ENTRY(des_sparc64_cbc_encrypt)
/* %o0=input, %o1=output, %o2=len, %o3=IV */
ldd [%o3 + 0x00], %f32
1: ldd [%o0 + 0x00], %f34
fxor %f32, %f34, %f32
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
std %f32, [%o1 + 0x00]
add %o0, 0x08, %o0
subcc %o2, 0x08, %o2
bne,pt %icc, 1b
add %o1, 0x08, %o1
retl
std %f32, [%o3 + 0x00]
ENDPROC(des_sparc64_cbc_encrypt)
.align 32
ENTRY(des_sparc64_cbc_decrypt)
/* %o0=input, %o1=output, %o2=len, %o3=IV */
ldd [%o3 + 0x00], %f34
1: ldd [%o0 + 0x00], %f36
DES_IP(36, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
fxor %f32, %f34, %f32
fsrc2 %f36, %f34
std %f32, [%o1 + 0x00]
add %o0, 0x08, %o0
subcc %o2, 0x08, %o2
bne,pt %icc, 1b
add %o1, 0x08, %o1
retl
std %f36, [%o3 + 0x00]
ENDPROC(des_sparc64_cbc_decrypt)
.align 32
ENTRY(des3_ede_sparc64_crypt)
/* %o0=key, %o1=input, %o2=output */
VISEntry
ldd [%o1 + 0x00], %f32
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
ldd [%o0 + 0x78], %f30
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
ldd [%o0 + 0x80], %f0
ldd [%o0 + 0x88], %f2
DES_ROUND(4, 6, 32, 32)
ldd [%o0 + 0x90], %f4
ldd [%o0 + 0x98], %f6
DES_ROUND(8, 10, 32, 32)
ldd [%o0 + 0xa0], %f8
ldd [%o0 + 0xa8], %f10
DES_ROUND(12, 14, 32, 32)
ldd [%o0 + 0xb0], %f12
ldd [%o0 + 0xb8], %f14
DES_ROUND(16, 18, 32, 32)
ldd [%o0 + 0xc0], %f16
ldd [%o0 + 0xc8], %f18
DES_ROUND(20, 22, 32, 32)
ldd [%o0 + 0xd0], %f20
ldd [%o0 + 0xd8], %f22
DES_ROUND(24, 26, 32, 32)
ldd [%o0 + 0xe0], %f24
ldd [%o0 + 0xe8], %f26
DES_ROUND(28, 30, 32, 32)
ldd [%o0 + 0xf0], %f28
ldd [%o0 + 0xf8], %f30
DES_IIP(32, 32)
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
ldd [%o0 + 0x100], %f0
ldd [%o0 + 0x108], %f2
DES_ROUND(4, 6, 32, 32)
ldd [%o0 + 0x110], %f4
ldd [%o0 + 0x118], %f6
DES_ROUND(8, 10, 32, 32)
ldd [%o0 + 0x120], %f8
ldd [%o0 + 0x128], %f10
DES_ROUND(12, 14, 32, 32)
ldd [%o0 + 0x130], %f12
ldd [%o0 + 0x138], %f14
DES_ROUND(16, 18, 32, 32)
ldd [%o0 + 0x140], %f16
ldd [%o0 + 0x148], %f18
DES_ROUND(20, 22, 32, 32)
ldd [%o0 + 0x150], %f20
ldd [%o0 + 0x158], %f22
DES_ROUND(24, 26, 32, 32)
ldd [%o0 + 0x160], %f24
ldd [%o0 + 0x168], %f26
DES_ROUND(28, 30, 32, 32)
ldd [%o0 + 0x170], %f28
ldd [%o0 + 0x178], %f30
DES_IIP(32, 32)
DES_IP(32, 32)
DES_ROUND(0, 2, 32, 32)
DES_ROUND(4, 6, 32, 32)
DES_ROUND(8, 10, 32, 32)
DES_ROUND(12, 14, 32, 32)
DES_ROUND(16, 18, 32, 32)
DES_ROUND(20, 22, 32, 32)
DES_ROUND(24, 26, 32, 32)
DES_ROUND(28, 30, 32, 32)
DES_IIP(32, 32)
std %f32, [%o2 + 0x00]
retl
VISExit
ENDPROC(des3_ede_sparc64_crypt)
.align 32
ENTRY(des3_ede_sparc64_load_keys)
/* %o0=key */
VISEntry
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
ldd [%o0 + 0x30], %f12
ldd [%o0 + 0x38], %f14
ldd [%o0 + 0x40], %f16
ldd [%o0 + 0x48], %f18
ldd [%o0 + 0x50], %f20
ldd [%o0 + 0x58], %f22
ldd [%o0 + 0x60], %f24
ldd [%o0 + 0x68], %f26
ldd [%o0 + 0x70], %f28
ldd [%o0 + 0x78], %f30
ldd [%o0 + 0x80], %f32
ldd [%o0 + 0x88], %f34
ldd [%o0 + 0x90], %f36
ldd [%o0 + 0x98], %f38
ldd [%o0 + 0xa0], %f40
ldd [%o0 + 0xa8], %f42
ldd [%o0 + 0xb0], %f44
ldd [%o0 + 0xb8], %f46
ldd [%o0 + 0xc0], %f48
ldd [%o0 + 0xc8], %f50
ldd [%o0 + 0xd0], %f52
ldd [%o0 + 0xd8], %f54
ldd [%o0 + 0xe0], %f56
retl
ldd [%o0 + 0xe8], %f58
ENDPROC(des3_ede_sparc64_load_keys)
#define DES3_LOOP_BODY(X) \
DES_IP(X, X) \
DES_ROUND(0, 2, X, X) \
DES_ROUND(4, 6, X, X) \
DES_ROUND(8, 10, X, X) \
DES_ROUND(12, 14, X, X) \
DES_ROUND(16, 18, X, X) \
ldd [%o0 + 0xf0], %f16; \
ldd [%o0 + 0xf8], %f18; \
DES_ROUND(20, 22, X, X) \
ldd [%o0 + 0x100], %f20; \
ldd [%o0 + 0x108], %f22; \
DES_ROUND(24, 26, X, X) \
ldd [%o0 + 0x110], %f24; \
ldd [%o0 + 0x118], %f26; \
DES_ROUND(28, 30, X, X) \
ldd [%o0 + 0x120], %f28; \
ldd [%o0 + 0x128], %f30; \
DES_IIP(X, X) \
DES_IP(X, X) \
DES_ROUND(32, 34, X, X) \
ldd [%o0 + 0x130], %f0; \
ldd [%o0 + 0x138], %f2; \
DES_ROUND(36, 38, X, X) \
ldd [%o0 + 0x140], %f4; \
ldd [%o0 + 0x148], %f6; \
DES_ROUND(40, 42, X, X) \
ldd [%o0 + 0x150], %f8; \
ldd [%o0 + 0x158], %f10; \
DES_ROUND(44, 46, X, X) \
ldd [%o0 + 0x160], %f12; \
ldd [%o0 + 0x168], %f14; \
DES_ROUND(48, 50, X, X) \
DES_ROUND(52, 54, X, X) \
DES_ROUND(56, 58, X, X) \
DES_ROUND(16, 18, X, X) \
ldd [%o0 + 0x170], %f16; \
ldd [%o0 + 0x178], %f18; \
DES_IIP(X, X) \
DES_IP(X, X) \
DES_ROUND(20, 22, X, X) \
ldd [%o0 + 0x50], %f20; \
ldd [%o0 + 0x58], %f22; \
DES_ROUND(24, 26, X, X) \
ldd [%o0 + 0x60], %f24; \
ldd [%o0 + 0x68], %f26; \
DES_ROUND(28, 30, X, X) \
ldd [%o0 + 0x70], %f28; \
ldd [%o0 + 0x78], %f30; \
DES_ROUND(0, 2, X, X) \
ldd [%o0 + 0x00], %f0; \
ldd [%o0 + 0x08], %f2; \
DES_ROUND(4, 6, X, X) \
ldd [%o0 + 0x10], %f4; \
ldd [%o0 + 0x18], %f6; \
DES_ROUND(8, 10, X, X) \
ldd [%o0 + 0x20], %f8; \
ldd [%o0 + 0x28], %f10; \
DES_ROUND(12, 14, X, X) \
ldd [%o0 + 0x30], %f12; \
ldd [%o0 + 0x38], %f14; \
DES_ROUND(16, 18, X, X) \
ldd [%o0 + 0x40], %f16; \
ldd [%o0 + 0x48], %f18; \
DES_IIP(X, X)
.align 32
ENTRY(des3_ede_sparc64_ecb_crypt)
/* %o0=key, %o1=input, %o2=output, %o3=len */
1: ldd [%o1 + 0x00], %f60
DES3_LOOP_BODY(60)
std %f60, [%o2 + 0x00]
add %o1, 0x08, %o1
subcc %o3, 0x08, %o3
bne,pt %icc, 1b
add %o2, 0x08, %o2
retl
nop
ENDPROC(des3_ede_sparc64_ecb_crypt)
.align 32
ENTRY(des3_ede_sparc64_cbc_encrypt)
/* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
ldd [%o4 + 0x00], %f60
1: ldd [%o1 + 0x00], %f62
fxor %f60, %f62, %f60
DES3_LOOP_BODY(60)
std %f60, [%o2 + 0x00]
add %o1, 0x08, %o1
subcc %o3, 0x08, %o3
bne,pt %icc, 1b
add %o2, 0x08, %o2
retl
std %f60, [%o4 + 0x00]
ENDPROC(des3_ede_sparc64_cbc_encrypt)
.align 32
ENTRY(des3_ede_sparc64_cbc_decrypt)
/* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
ldd [%o4 + 0x00], %f62
1: ldx [%o1 + 0x00], %g1
MOVXTOD_G1_F60
DES3_LOOP_BODY(60)
fxor %f62, %f60, %f60
MOVXTOD_G1_F62
std %f60, [%o2 + 0x00]
add %o1, 0x08, %o1
subcc %o3, 0x08, %o3
bne,pt %icc, 1b
add %o2, 0x08, %o2
retl
stx %g1, [%o4 + 0x00]
ENDPROC(des3_ede_sparc64_cbc_decrypt)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,134
|
arch/sparc/crypto/sha512_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/visasm.h>
#include "opcodes.h"
ENTRY(sha512_sparc64_transform)
/* %o0 = digest, %o1 = data, %o2 = rounds */
VISEntry
ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
ldd [%o0 + 0x10], %f4
ldd [%o0 + 0x18], %f6
ldd [%o0 + 0x20], %f8
ldd [%o0 + 0x28], %f10
andcc %o1, 0x7, %g0
ldd [%o0 + 0x30], %f12
bne,pn %xcc, 10f
ldd [%o0 + 0x38], %f14
1:
ldd [%o1 + 0x00], %f16
ldd [%o1 + 0x08], %f18
ldd [%o1 + 0x10], %f20
ldd [%o1 + 0x18], %f22
ldd [%o1 + 0x20], %f24
ldd [%o1 + 0x28], %f26
ldd [%o1 + 0x30], %f28
ldd [%o1 + 0x38], %f30
ldd [%o1 + 0x40], %f32
ldd [%o1 + 0x48], %f34
ldd [%o1 + 0x50], %f36
ldd [%o1 + 0x58], %f38
ldd [%o1 + 0x60], %f40
ldd [%o1 + 0x68], %f42
ldd [%o1 + 0x70], %f44
ldd [%o1 + 0x78], %f46
SHA512
subcc %o2, 1, %o2
bne,pt %xcc, 1b
add %o1, 0x80, %o1
5:
std %f0, [%o0 + 0x00]
std %f2, [%o0 + 0x08]
std %f4, [%o0 + 0x10]
std %f6, [%o0 + 0x18]
std %f8, [%o0 + 0x20]
std %f10, [%o0 + 0x28]
std %f12, [%o0 + 0x30]
std %f14, [%o0 + 0x38]
retl
VISExit
10:
alignaddr %o1, %g0, %o1
ldd [%o1 + 0x00], %f18
1:
ldd [%o1 + 0x08], %f20
ldd [%o1 + 0x10], %f22
ldd [%o1 + 0x18], %f24
ldd [%o1 + 0x20], %f26
ldd [%o1 + 0x28], %f28
ldd [%o1 + 0x30], %f30
ldd [%o1 + 0x38], %f32
ldd [%o1 + 0x40], %f34
ldd [%o1 + 0x48], %f36
ldd [%o1 + 0x50], %f38
ldd [%o1 + 0x58], %f40
ldd [%o1 + 0x60], %f42
ldd [%o1 + 0x68], %f44
ldd [%o1 + 0x70], %f46
ldd [%o1 + 0x78], %f48
ldd [%o1 + 0x80], %f50
faligndata %f18, %f20, %f16
faligndata %f20, %f22, %f18
faligndata %f22, %f24, %f20
faligndata %f24, %f26, %f22
faligndata %f26, %f28, %f24
faligndata %f28, %f30, %f26
faligndata %f30, %f32, %f28
faligndata %f32, %f34, %f30
faligndata %f34, %f36, %f32
faligndata %f36, %f38, %f34
faligndata %f38, %f40, %f36
faligndata %f40, %f42, %f38
faligndata %f42, %f44, %f40
faligndata %f44, %f46, %f42
faligndata %f46, %f48, %f44
faligndata %f48, %f50, %f46
SHA512
subcc %o2, 1, %o2
fsrc2 %f50, %f18
bne,pt %xcc, 1b
add %o1, 0x80, %o1
ba,a,pt %xcc, 5b
ENDPROC(sha512_sparc64_transform)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,502
|
arch/sparc/crypto/sha256_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/visasm.h>
#include "opcodes.h"
ENTRY(sha256_sparc64_transform)
/* %o0 = digest, %o1 = data, %o2 = rounds */
VISEntryHalf
ld [%o0 + 0x00], %f0
ld [%o0 + 0x04], %f1
ld [%o0 + 0x08], %f2
ld [%o0 + 0x0c], %f3
ld [%o0 + 0x10], %f4
ld [%o0 + 0x14], %f5
andcc %o1, 0x7, %g0
ld [%o0 + 0x18], %f6
bne,pn %xcc, 10f
ld [%o0 + 0x1c], %f7
1:
ldd [%o1 + 0x00], %f8
ldd [%o1 + 0x08], %f10
ldd [%o1 + 0x10], %f12
ldd [%o1 + 0x18], %f14
ldd [%o1 + 0x20], %f16
ldd [%o1 + 0x28], %f18
ldd [%o1 + 0x30], %f20
ldd [%o1 + 0x38], %f22
SHA256
subcc %o2, 1, %o2
bne,pt %xcc, 1b
add %o1, 0x40, %o1
5:
st %f0, [%o0 + 0x00]
st %f1, [%o0 + 0x04]
st %f2, [%o0 + 0x08]
st %f3, [%o0 + 0x0c]
st %f4, [%o0 + 0x10]
st %f5, [%o0 + 0x14]
st %f6, [%o0 + 0x18]
st %f7, [%o0 + 0x1c]
retl
VISExitHalf
10:
alignaddr %o1, %g0, %o1
ldd [%o1 + 0x00], %f10
1:
ldd [%o1 + 0x08], %f12
ldd [%o1 + 0x10], %f14
ldd [%o1 + 0x18], %f16
ldd [%o1 + 0x20], %f18
ldd [%o1 + 0x28], %f20
ldd [%o1 + 0x30], %f22
ldd [%o1 + 0x38], %f24
ldd [%o1 + 0x40], %f26
faligndata %f10, %f12, %f8
faligndata %f12, %f14, %f10
faligndata %f14, %f16, %f12
faligndata %f16, %f18, %f14
faligndata %f18, %f20, %f16
faligndata %f20, %f22, %f18
faligndata %f22, %f24, %f20
faligndata %f24, %f26, %f22
SHA256
subcc %o2, 1, %o2
fsrc2 %f26, %f10
bne,pt %xcc, 1b
add %o1, 0x40, %o1
ba,a,pt %xcc, 5b
ENDPROC(sha256_sparc64_transform)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,314
|
arch/sparc/crypto/md5_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/visasm.h>
#include "opcodes.h"
ENTRY(md5_sparc64_transform)
/* %o0 = digest, %o1 = data, %o2 = rounds */
VISEntryHalf
ld [%o0 + 0x00], %f0
ld [%o0 + 0x04], %f1
andcc %o1, 0x7, %g0
ld [%o0 + 0x08], %f2
bne,pn %xcc, 10f
ld [%o0 + 0x0c], %f3
1:
ldd [%o1 + 0x00], %f8
ldd [%o1 + 0x08], %f10
ldd [%o1 + 0x10], %f12
ldd [%o1 + 0x18], %f14
ldd [%o1 + 0x20], %f16
ldd [%o1 + 0x28], %f18
ldd [%o1 + 0x30], %f20
ldd [%o1 + 0x38], %f22
MD5
subcc %o2, 1, %o2
bne,pt %xcc, 1b
add %o1, 0x40, %o1
5:
st %f0, [%o0 + 0x00]
st %f1, [%o0 + 0x04]
st %f2, [%o0 + 0x08]
st %f3, [%o0 + 0x0c]
retl
VISExitHalf
10:
alignaddr %o1, %g0, %o1
ldd [%o1 + 0x00], %f10
1:
ldd [%o1 + 0x08], %f12
ldd [%o1 + 0x10], %f14
ldd [%o1 + 0x18], %f16
ldd [%o1 + 0x20], %f18
ldd [%o1 + 0x28], %f20
ldd [%o1 + 0x30], %f22
ldd [%o1 + 0x38], %f24
ldd [%o1 + 0x40], %f26
faligndata %f10, %f12, %f8
faligndata %f12, %f14, %f10
faligndata %f14, %f16, %f12
faligndata %f16, %f18, %f14
faligndata %f18, %f20, %f16
faligndata %f20, %f22, %f18
faligndata %f22, %f24, %f20
faligndata %f24, %f26, %f22
MD5
subcc %o2, 1, %o2
fsrc2 %f26, %f10
bne,pt %xcc, 1b
add %o1, 0x40, %o1
ba,a,pt %xcc, 5b
ENDPROC(md5_sparc64_transform)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,362
|
arch/sparc/crypto/sha1_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/visasm.h>
#include "opcodes.h"
ENTRY(sha1_sparc64_transform)
/* %o0 = digest, %o1 = data, %o2 = rounds */
VISEntryHalf
ld [%o0 + 0x00], %f0
ld [%o0 + 0x04], %f1
ld [%o0 + 0x08], %f2
andcc %o1, 0x7, %g0
ld [%o0 + 0x0c], %f3
bne,pn %xcc, 10f
ld [%o0 + 0x10], %f4
1:
ldd [%o1 + 0x00], %f8
ldd [%o1 + 0x08], %f10
ldd [%o1 + 0x10], %f12
ldd [%o1 + 0x18], %f14
ldd [%o1 + 0x20], %f16
ldd [%o1 + 0x28], %f18
ldd [%o1 + 0x30], %f20
ldd [%o1 + 0x38], %f22
SHA1
subcc %o2, 1, %o2
bne,pt %xcc, 1b
add %o1, 0x40, %o1
5:
st %f0, [%o0 + 0x00]
st %f1, [%o0 + 0x04]
st %f2, [%o0 + 0x08]
st %f3, [%o0 + 0x0c]
st %f4, [%o0 + 0x10]
retl
VISExitHalf
10:
alignaddr %o1, %g0, %o1
ldd [%o1 + 0x00], %f10
1:
ldd [%o1 + 0x08], %f12
ldd [%o1 + 0x10], %f14
ldd [%o1 + 0x18], %f16
ldd [%o1 + 0x20], %f18
ldd [%o1 + 0x28], %f20
ldd [%o1 + 0x30], %f22
ldd [%o1 + 0x38], %f24
ldd [%o1 + 0x40], %f26
faligndata %f10, %f12, %f8
faligndata %f12, %f14, %f10
faligndata %f14, %f16, %f12
faligndata %f16, %f18, %f14
faligndata %f18, %f20, %f16
faligndata %f20, %f22, %f18
faligndata %f22, %f24, %f20
faligndata %f24, %f26, %f22
SHA1
subcc %o2, 1, %o2
fsrc2 %f26, %f10
bne,pt %xcc, 1b
add %o1, 0x40, %o1
ba,a,pt %xcc, 5b
ENDPROC(sha1_sparc64_transform)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 13,850
|
arch/sparc/crypto/camellia_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/visasm.h>
#include "opcodes.h"
#define CAMELLIA_6ROUNDS(KEY_BASE, I0, I1) \
CAMELLIA_F(KEY_BASE + 0, I1, I0, I1) \
CAMELLIA_F(KEY_BASE + 2, I0, I1, I0) \
CAMELLIA_F(KEY_BASE + 4, I1, I0, I1) \
CAMELLIA_F(KEY_BASE + 6, I0, I1, I0) \
CAMELLIA_F(KEY_BASE + 8, I1, I0, I1) \
CAMELLIA_F(KEY_BASE + 10, I0, I1, I0)
#define CAMELLIA_6ROUNDS_FL_FLI(KEY_BASE, I0, I1) \
CAMELLIA_6ROUNDS(KEY_BASE, I0, I1) \
CAMELLIA_FL(KEY_BASE + 12, I0, I0) \
CAMELLIA_FLI(KEY_BASE + 14, I1, I1)
.data
.align 8
SIGMA: .xword 0xA09E667F3BCC908B
.xword 0xB67AE8584CAA73B2
.xword 0xC6EF372FE94F82BE
.xword 0x54FF53A5F1D36F1C
.xword 0x10E527FADE682D1D
.xword 0xB05688C2B3E6C1FD
.text
.align 32
ENTRY(camellia_sparc64_key_expand)
/* %o0=in_key, %o1=encrypt_key, %o2=key_len, %o3=decrypt_key */
VISEntry
ld [%o0 + 0x00], %f0 ! i0, k[0]
ld [%o0 + 0x04], %f1 ! i1, k[1]
ld [%o0 + 0x08], %f2 ! i2, k[2]
ld [%o0 + 0x0c], %f3 ! i3, k[3]
std %f0, [%o1 + 0x00] ! k[0, 1]
fsrc2 %f0, %f28
std %f2, [%o1 + 0x08] ! k[2, 3]
cmp %o2, 16
be 10f
fsrc2 %f2, %f30
ld [%o0 + 0x10], %f0
ld [%o0 + 0x14], %f1
std %f0, [%o1 + 0x20] ! k[8, 9]
cmp %o2, 24
fone %f10
be,a 1f
fxor %f10, %f0, %f2
ld [%o0 + 0x18], %f2
ld [%o0 + 0x1c], %f3
1:
std %f2, [%o1 + 0x28] ! k[10, 11]
fxor %f28, %f0, %f0
fxor %f30, %f2, %f2
10:
sethi %hi(SIGMA), %g3
or %g3, %lo(SIGMA), %g3
ldd [%g3 + 0x00], %f16
ldd [%g3 + 0x08], %f18
ldd [%g3 + 0x10], %f20
ldd [%g3 + 0x18], %f22
ldd [%g3 + 0x20], %f24
ldd [%g3 + 0x28], %f26
CAMELLIA_F(16, 2, 0, 2)
CAMELLIA_F(18, 0, 2, 0)
fxor %f28, %f0, %f0
fxor %f30, %f2, %f2
CAMELLIA_F(20, 2, 0, 2)
CAMELLIA_F(22, 0, 2, 0)
#define ROTL128(S01, S23, TMP1, TMP2, N) \
srlx S01, (64 - N), TMP1; \
sllx S01, N, S01; \
srlx S23, (64 - N), TMP2; \
sllx S23, N, S23; \
or S01, TMP2, S01; \
or S23, TMP1, S23
cmp %o2, 16
bne 1f
nop
/* 128-bit key */
std %f0, [%o1 + 0x10] ! k[ 4, 5]
std %f2, [%o1 + 0x18] ! k[ 6, 7]
MOVDTOX_F0_O4
MOVDTOX_F2_O5
ROTL128(%o4, %o5, %g2, %g3, 15)
stx %o4, [%o1 + 0x30] ! k[12, 13]
stx %o5, [%o1 + 0x38] ! k[14, 15]
ROTL128(%o4, %o5, %g2, %g3, 15)
stx %o4, [%o1 + 0x40] ! k[16, 17]
stx %o5, [%o1 + 0x48] ! k[18, 19]
ROTL128(%o4, %o5, %g2, %g3, 15)
stx %o4, [%o1 + 0x60] ! k[24, 25]
ROTL128(%o4, %o5, %g2, %g3, 15)
stx %o4, [%o1 + 0x70] ! k[28, 29]
stx %o5, [%o1 + 0x78] ! k[30, 31]
ROTL128(%o4, %o5, %g2, %g3, 34)
stx %o4, [%o1 + 0xa0] ! k[40, 41]
stx %o5, [%o1 + 0xa8] ! k[42, 43]
ROTL128(%o4, %o5, %g2, %g3, 17)
stx %o4, [%o1 + 0xc0] ! k[48, 49]
stx %o5, [%o1 + 0xc8] ! k[50, 51]
ldx [%o1 + 0x00], %o4 ! k[ 0, 1]
ldx [%o1 + 0x08], %o5 ! k[ 2, 3]
ROTL128(%o4, %o5, %g2, %g3, 15)
stx %o4, [%o1 + 0x20] ! k[ 8, 9]
stx %o5, [%o1 + 0x28] ! k[10, 11]
ROTL128(%o4, %o5, %g2, %g3, 30)
stx %o4, [%o1 + 0x50] ! k[20, 21]
stx %o5, [%o1 + 0x58] ! k[22, 23]
ROTL128(%o4, %o5, %g2, %g3, 15)
stx %o5, [%o1 + 0x68] ! k[26, 27]
ROTL128(%o4, %o5, %g2, %g3, 17)
stx %o4, [%o1 + 0x80] ! k[32, 33]
stx %o5, [%o1 + 0x88] ! k[34, 35]
ROTL128(%o4, %o5, %g2, %g3, 17)
stx %o4, [%o1 + 0x90] ! k[36, 37]
stx %o5, [%o1 + 0x98] ! k[38, 39]
ROTL128(%o4, %o5, %g2, %g3, 17)
stx %o4, [%o1 + 0xb0] ! k[44, 45]
stx %o5, [%o1 + 0xb8] ! k[46, 47]
ba,pt %xcc, 2f
mov (3 * 16 * 4), %o0
1:
/* 192-bit or 256-bit key */
std %f0, [%o1 + 0x30] ! k[12, 13]
std %f2, [%o1 + 0x38] ! k[14, 15]
ldd [%o1 + 0x20], %f4 ! k[ 8, 9]
ldd [%o1 + 0x28], %f6 ! k[10, 11]
fxor %f0, %f4, %f0
fxor %f2, %f6, %f2
CAMELLIA_F(24, 2, 0, 2)
CAMELLIA_F(26, 0, 2, 0)
std %f0, [%o1 + 0x10] ! k[ 4, 5]
std %f2, [%o1 + 0x18] ! k[ 6, 7]
MOVDTOX_F0_O4
MOVDTOX_F2_O5
ROTL128(%o4, %o5, %g2, %g3, 30)
stx %o4, [%o1 + 0x50] ! k[20, 21]
stx %o5, [%o1 + 0x58] ! k[22, 23]
ROTL128(%o4, %o5, %g2, %g3, 30)
stx %o4, [%o1 + 0xa0] ! k[40, 41]
stx %o5, [%o1 + 0xa8] ! k[42, 43]
ROTL128(%o4, %o5, %g2, %g3, 51)
stx %o4, [%o1 + 0x100] ! k[64, 65]
stx %o5, [%o1 + 0x108] ! k[66, 67]
ldx [%o1 + 0x20], %o4 ! k[ 8, 9]
ldx [%o1 + 0x28], %o5 ! k[10, 11]
ROTL128(%o4, %o5, %g2, %g3, 15)
stx %o4, [%o1 + 0x20] ! k[ 8, 9]
stx %o5, [%o1 + 0x28] ! k[10, 11]
ROTL128(%o4, %o5, %g2, %g3, 15)
stx %o4, [%o1 + 0x40] ! k[16, 17]
stx %o5, [%o1 + 0x48] ! k[18, 19]
ROTL128(%o4, %o5, %g2, %g3, 30)
stx %o4, [%o1 + 0x90] ! k[36, 37]
stx %o5, [%o1 + 0x98] ! k[38, 39]
ROTL128(%o4, %o5, %g2, %g3, 34)
stx %o4, [%o1 + 0xd0] ! k[52, 53]
stx %o5, [%o1 + 0xd8] ! k[54, 55]
ldx [%o1 + 0x30], %o4 ! k[12, 13]
ldx [%o1 + 0x38], %o5 ! k[14, 15]
ROTL128(%o4, %o5, %g2, %g3, 15)
stx %o4, [%o1 + 0x30] ! k[12, 13]
stx %o5, [%o1 + 0x38] ! k[14, 15]
ROTL128(%o4, %o5, %g2, %g3, 30)
stx %o4, [%o1 + 0x70] ! k[28, 29]
stx %o5, [%o1 + 0x78] ! k[30, 31]
srlx %o4, 32, %g2
srlx %o5, 32, %g3
stw %o4, [%o1 + 0xc0] ! k[48]
stw %g3, [%o1 + 0xc4] ! k[49]
stw %o5, [%o1 + 0xc8] ! k[50]
stw %g2, [%o1 + 0xcc] ! k[51]
ROTL128(%o4, %o5, %g2, %g3, 49)
stx %o4, [%o1 + 0xe0] ! k[56, 57]
stx %o5, [%o1 + 0xe8] ! k[58, 59]
ldx [%o1 + 0x00], %o4 ! k[ 0, 1]
ldx [%o1 + 0x08], %o5 ! k[ 2, 3]
ROTL128(%o4, %o5, %g2, %g3, 45)
stx %o4, [%o1 + 0x60] ! k[24, 25]
stx %o5, [%o1 + 0x68] ! k[26, 27]
ROTL128(%o4, %o5, %g2, %g3, 15)
stx %o4, [%o1 + 0x80] ! k[32, 33]
stx %o5, [%o1 + 0x88] ! k[34, 35]
ROTL128(%o4, %o5, %g2, %g3, 17)
stx %o4, [%o1 + 0xb0] ! k[44, 45]
stx %o5, [%o1 + 0xb8] ! k[46, 47]
ROTL128(%o4, %o5, %g2, %g3, 34)
stx %o4, [%o1 + 0xf0] ! k[60, 61]
stx %o5, [%o1 + 0xf8] ! k[62, 63]
mov (4 * 16 * 4), %o0
2:
add %o1, %o0, %o1
ldd [%o1 + 0x00], %f0
ldd [%o1 + 0x08], %f2
std %f0, [%o3 + 0x00]
std %f2, [%o3 + 0x08]
add %o3, 0x10, %o3
1:
sub %o1, (16 * 4), %o1
ldd [%o1 + 0x38], %f0
ldd [%o1 + 0x30], %f2
ldd [%o1 + 0x28], %f4
ldd [%o1 + 0x20], %f6
ldd [%o1 + 0x18], %f8
ldd [%o1 + 0x10], %f10
std %f0, [%o3 + 0x00]
std %f2, [%o3 + 0x08]
std %f4, [%o3 + 0x10]
std %f6, [%o3 + 0x18]
std %f8, [%o3 + 0x20]
std %f10, [%o3 + 0x28]
ldd [%o1 + 0x08], %f0
ldd [%o1 + 0x00], %f2
std %f0, [%o3 + 0x30]
std %f2, [%o3 + 0x38]
subcc %o0, (16 * 4), %o0
bne,pt %icc, 1b
add %o3, (16 * 4), %o3
std %f2, [%o3 - 0x10]
std %f0, [%o3 - 0x08]
retl
VISExit
ENDPROC(camellia_sparc64_key_expand)
.align 32
ENTRY(camellia_sparc64_crypt)
/* %o0=key, %o1=input, %o2=output, %o3=key_len */
VISEntry
ld [%o1 + 0x00], %f0
ld [%o1 + 0x04], %f1
ld [%o1 + 0x08], %f2
ld [%o1 + 0x0c], %f3
ldd [%o0 + 0x00], %f4
ldd [%o0 + 0x08], %f6
cmp %o3, 16
fxor %f4, %f0, %f0
be 1f
fxor %f6, %f2, %f2
ldd [%o0 + 0x10], %f8
ldd [%o0 + 0x18], %f10
ldd [%o0 + 0x20], %f12
ldd [%o0 + 0x28], %f14
ldd [%o0 + 0x30], %f16
ldd [%o0 + 0x38], %f18
ldd [%o0 + 0x40], %f20
ldd [%o0 + 0x48], %f22
add %o0, 0x40, %o0
CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
1:
ldd [%o0 + 0x10], %f8
ldd [%o0 + 0x18], %f10
ldd [%o0 + 0x20], %f12
ldd [%o0 + 0x28], %f14
ldd [%o0 + 0x30], %f16
ldd [%o0 + 0x38], %f18
ldd [%o0 + 0x40], %f20
ldd [%o0 + 0x48], %f22
ldd [%o0 + 0x50], %f24
ldd [%o0 + 0x58], %f26
ldd [%o0 + 0x60], %f28
ldd [%o0 + 0x68], %f30
ldd [%o0 + 0x70], %f32
ldd [%o0 + 0x78], %f34
ldd [%o0 + 0x80], %f36
ldd [%o0 + 0x88], %f38
ldd [%o0 + 0x90], %f40
ldd [%o0 + 0x98], %f42
ldd [%o0 + 0xa0], %f44
ldd [%o0 + 0xa8], %f46
ldd [%o0 + 0xb0], %f48
ldd [%o0 + 0xb8], %f50
ldd [%o0 + 0xc0], %f52
ldd [%o0 + 0xc8], %f54
CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
CAMELLIA_6ROUNDS(40, 0, 2)
fxor %f52, %f2, %f2
fxor %f54, %f0, %f0
st %f2, [%o2 + 0x00]
st %f3, [%o2 + 0x04]
st %f0, [%o2 + 0x08]
st %f1, [%o2 + 0x0c]
retl
VISExit
ENDPROC(camellia_sparc64_crypt)
.align 32
ENTRY(camellia_sparc64_load_keys)
/* %o0=key, %o1=key_len */
VISEntry
ldd [%o0 + 0x00], %f4
ldd [%o0 + 0x08], %f6
ldd [%o0 + 0x10], %f8
ldd [%o0 + 0x18], %f10
ldd [%o0 + 0x20], %f12
ldd [%o0 + 0x28], %f14
ldd [%o0 + 0x30], %f16
ldd [%o0 + 0x38], %f18
ldd [%o0 + 0x40], %f20
ldd [%o0 + 0x48], %f22
ldd [%o0 + 0x50], %f24
ldd [%o0 + 0x58], %f26
ldd [%o0 + 0x60], %f28
ldd [%o0 + 0x68], %f30
ldd [%o0 + 0x70], %f32
ldd [%o0 + 0x78], %f34
ldd [%o0 + 0x80], %f36
ldd [%o0 + 0x88], %f38
ldd [%o0 + 0x90], %f40
ldd [%o0 + 0x98], %f42
ldd [%o0 + 0xa0], %f44
ldd [%o0 + 0xa8], %f46
ldd [%o0 + 0xb0], %f48
ldd [%o0 + 0xb8], %f50
ldd [%o0 + 0xc0], %f52
retl
ldd [%o0 + 0xc8], %f54
ENDPROC(camellia_sparc64_load_keys)
.align 32
ENTRY(camellia_sparc64_ecb_crypt_3_grand_rounds)
/* %o0=input, %o1=output, %o2=len, %o3=key */
1: ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
add %o0, 0x10, %o0
fxor %f4, %f0, %f0
fxor %f6, %f2, %f2
CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
CAMELLIA_6ROUNDS(40, 0, 2)
fxor %f52, %f2, %f2
fxor %f54, %f0, %f0
std %f2, [%o1 + 0x00]
std %f0, [%o1 + 0x08]
subcc %o2, 0x10, %o2
bne,pt %icc, 1b
add %o1, 0x10, %o1
retl
nop
ENDPROC(camellia_sparc64_ecb_crypt_3_grand_rounds)
.align 32
ENTRY(camellia_sparc64_ecb_crypt_4_grand_rounds)
/* %o0=input, %o1=output, %o2=len, %o3=key */
1: ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
add %o0, 0x10, %o0
fxor %f4, %f0, %f0
fxor %f6, %f2, %f2
CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
ldd [%o3 + 0xd0], %f8
ldd [%o3 + 0xd8], %f10
ldd [%o3 + 0xe0], %f12
ldd [%o3 + 0xe8], %f14
ldd [%o3 + 0xf0], %f16
ldd [%o3 + 0xf8], %f18
ldd [%o3 + 0x100], %f20
ldd [%o3 + 0x108], %f22
CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
CAMELLIA_6ROUNDS_FL_FLI(40, 0, 2)
CAMELLIA_F(8, 2, 0, 2)
CAMELLIA_F(10, 0, 2, 0)
ldd [%o3 + 0x10], %f8
ldd [%o3 + 0x18], %f10
CAMELLIA_F(12, 2, 0, 2)
CAMELLIA_F(14, 0, 2, 0)
ldd [%o3 + 0x20], %f12
ldd [%o3 + 0x28], %f14
CAMELLIA_F(16, 2, 0, 2)
CAMELLIA_F(18, 0, 2, 0)
ldd [%o3 + 0x30], %f16
ldd [%o3 + 0x38], %f18
fxor %f20, %f2, %f2
fxor %f22, %f0, %f0
ldd [%o3 + 0x40], %f20
ldd [%o3 + 0x48], %f22
std %f2, [%o1 + 0x00]
std %f0, [%o1 + 0x08]
subcc %o2, 0x10, %o2
bne,pt %icc, 1b
add %o1, 0x10, %o1
retl
nop
ENDPROC(camellia_sparc64_ecb_crypt_4_grand_rounds)
.align 32
ENTRY(camellia_sparc64_cbc_encrypt_3_grand_rounds)
/* %o0=input, %o1=output, %o2=len, %o3=key, %o4=IV */
ldd [%o4 + 0x00], %f60
ldd [%o4 + 0x08], %f62
1: ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
add %o0, 0x10, %o0
fxor %f60, %f0, %f0
fxor %f62, %f2, %f2
fxor %f4, %f0, %f0
fxor %f6, %f2, %f2
CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
CAMELLIA_6ROUNDS(40, 0, 2)
fxor %f52, %f2, %f60
fxor %f54, %f0, %f62
std %f60, [%o1 + 0x00]
std %f62, [%o1 + 0x08]
subcc %o2, 0x10, %o2
bne,pt %icc, 1b
add %o1, 0x10, %o1
std %f60, [%o4 + 0x00]
retl
std %f62, [%o4 + 0x08]
ENDPROC(camellia_sparc64_cbc_encrypt_3_grand_rounds)
.align 32
ENTRY(camellia_sparc64_cbc_encrypt_4_grand_rounds)
/* %o0=input, %o1=output, %o2=len, %o3=key, %o4=IV */
ldd [%o4 + 0x00], %f60
ldd [%o4 + 0x08], %f62
1: ldd [%o0 + 0x00], %f0
ldd [%o0 + 0x08], %f2
add %o0, 0x10, %o0
fxor %f60, %f0, %f0
fxor %f62, %f2, %f2
fxor %f4, %f0, %f0
fxor %f6, %f2, %f2
CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
ldd [%o3 + 0xd0], %f8
ldd [%o3 + 0xd8], %f10
ldd [%o3 + 0xe0], %f12
ldd [%o3 + 0xe8], %f14
ldd [%o3 + 0xf0], %f16
ldd [%o3 + 0xf8], %f18
ldd [%o3 + 0x100], %f20
ldd [%o3 + 0x108], %f22
CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
CAMELLIA_6ROUNDS_FL_FLI(40, 0, 2)
CAMELLIA_F(8, 2, 0, 2)
CAMELLIA_F(10, 0, 2, 0)
ldd [%o3 + 0x10], %f8
ldd [%o3 + 0x18], %f10
CAMELLIA_F(12, 2, 0, 2)
CAMELLIA_F(14, 0, 2, 0)
ldd [%o3 + 0x20], %f12
ldd [%o3 + 0x28], %f14
CAMELLIA_F(16, 2, 0, 2)
CAMELLIA_F(18, 0, 2, 0)
ldd [%o3 + 0x30], %f16
ldd [%o3 + 0x38], %f18
fxor %f20, %f2, %f60
fxor %f22, %f0, %f62
ldd [%o3 + 0x40], %f20
ldd [%o3 + 0x48], %f22
std %f60, [%o1 + 0x00]
std %f62, [%o1 + 0x08]
subcc %o2, 0x10, %o2
bne,pt %icc, 1b
add %o1, 0x10, %o1
std %f60, [%o4 + 0x00]
retl
std %f62, [%o4 + 0x08]
ENDPROC(camellia_sparc64_cbc_encrypt_4_grand_rounds)
.align 32
ENTRY(camellia_sparc64_cbc_decrypt_3_grand_rounds)
/* %o0=input, %o1=output, %o2=len, %o3=key, %o4=IV */
ldd [%o4 + 0x00], %f60
ldd [%o4 + 0x08], %f62
1: ldd [%o0 + 0x00], %f56
ldd [%o0 + 0x08], %f58
add %o0, 0x10, %o0
fxor %f4, %f56, %f0
fxor %f6, %f58, %f2
CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
CAMELLIA_6ROUNDS(40, 0, 2)
fxor %f52, %f2, %f2
fxor %f54, %f0, %f0
fxor %f60, %f2, %f2
fxor %f62, %f0, %f0
fsrc2 %f56, %f60
fsrc2 %f58, %f62
std %f2, [%o1 + 0x00]
std %f0, [%o1 + 0x08]
subcc %o2, 0x10, %o2
bne,pt %icc, 1b
add %o1, 0x10, %o1
std %f60, [%o4 + 0x00]
retl
std %f62, [%o4 + 0x08]
ENDPROC(camellia_sparc64_cbc_decrypt_3_grand_rounds)
.align 32
ENTRY(camellia_sparc64_cbc_decrypt_4_grand_rounds)
/* %o0=input, %o1=output, %o2=len, %o3=key, %o4=IV */
ldd [%o4 + 0x00], %f60
ldd [%o4 + 0x08], %f62
1: ldd [%o0 + 0x00], %f56
ldd [%o0 + 0x08], %f58
add %o0, 0x10, %o0
fxor %f4, %f56, %f0
fxor %f6, %f58, %f2
CAMELLIA_6ROUNDS_FL_FLI( 8, 0, 2)
ldd [%o3 + 0xd0], %f8
ldd [%o3 + 0xd8], %f10
ldd [%o3 + 0xe0], %f12
ldd [%o3 + 0xe8], %f14
ldd [%o3 + 0xf0], %f16
ldd [%o3 + 0xf8], %f18
ldd [%o3 + 0x100], %f20
ldd [%o3 + 0x108], %f22
CAMELLIA_6ROUNDS_FL_FLI(24, 0, 2)
CAMELLIA_6ROUNDS_FL_FLI(40, 0, 2)
CAMELLIA_F(8, 2, 0, 2)
CAMELLIA_F(10, 0, 2, 0)
ldd [%o3 + 0x10], %f8
ldd [%o3 + 0x18], %f10
CAMELLIA_F(12, 2, 0, 2)
CAMELLIA_F(14, 0, 2, 0)
ldd [%o3 + 0x20], %f12
ldd [%o3 + 0x28], %f14
CAMELLIA_F(16, 2, 0, 2)
CAMELLIA_F(18, 0, 2, 0)
ldd [%o3 + 0x30], %f16
ldd [%o3 + 0x38], %f18
fxor %f20, %f2, %f2
fxor %f22, %f0, %f0
ldd [%o3 + 0x40], %f20
ldd [%o3 + 0x48], %f22
fxor %f60, %f2, %f2
fxor %f62, %f0, %f0
fsrc2 %f56, %f60
fsrc2 %f58, %f62
std %f2, [%o1 + 0x00]
std %f0, [%o1 + 0x08]
subcc %o2, 0x10, %o2
bne,pt %icc, 1b
add %o1, 0x10, %o1
std %f60, [%o4 + 0x00]
retl
std %f62, [%o4 + 0x08]
ENDPROC(camellia_sparc64_cbc_decrypt_4_grand_rounds)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,967
|
arch/sparc/lib/memscan_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* memscan.S: Optimized memscan for the Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/export.h>
/* In essence, this is just a fancy strlen. */
#define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080
.text
.align 4
.globl __memscan_zero, __memscan_generic
.globl memscan
EXPORT_SYMBOL(__memscan_zero)
EXPORT_SYMBOL(__memscan_generic)
__memscan_zero:
/* %o0 = addr, %o1 = size */
cmp %o1, 0
bne,a 1f
andcc %o0, 3, %g0
retl
nop
1:
be mzero_scan_word
sethi %hi(HI_MAGIC), %g2
ldsb [%o0], %g3
mzero_still_not_word_aligned:
cmp %g3, 0
bne 1f
add %o0, 1, %o0
retl
sub %o0, 1, %o0
1:
subcc %o1, 1, %o1
bne,a 1f
andcc %o0, 3, %g0
retl
nop
1:
bne,a mzero_still_not_word_aligned
ldsb [%o0], %g3
sethi %hi(HI_MAGIC), %g2
mzero_scan_word:
or %g2, %lo(HI_MAGIC), %o3
sethi %hi(LO_MAGIC), %g3
or %g3, %lo(LO_MAGIC), %o2
mzero_next_word:
ld [%o0], %g2
mzero_next_word_preloaded:
sub %g2, %o2, %g2
mzero_next_word_preloaded_next:
andcc %g2, %o3, %g0
bne mzero_byte_zero
add %o0, 4, %o0
mzero_check_out_of_fuel:
subcc %o1, 4, %o1
bg,a 1f
ld [%o0], %g2
retl
nop
1:
b mzero_next_word_preloaded_next
sub %g2, %o2, %g2
/* Check every byte. */
mzero_byte_zero:
ldsb [%o0 - 4], %g2
cmp %g2, 0
bne mzero_byte_one
sub %o0, 4, %g3
retl
mov %g3, %o0
mzero_byte_one:
ldsb [%o0 - 3], %g2
cmp %g2, 0
bne,a mzero_byte_two_and_three
ldsb [%o0 - 2], %g2
retl
sub %o0, 3, %o0
mzero_byte_two_and_three:
cmp %g2, 0
bne,a 1f
ldsb [%o0 - 1], %g2
retl
sub %o0, 2, %o0
1:
cmp %g2, 0
bne,a mzero_next_word_preloaded
ld [%o0], %g2
retl
sub %o0, 1, %o0
mzero_found_it:
retl
sub %o0, 2, %o0
memscan:
__memscan_generic:
/* %o0 = addr, %o1 = c, %o2 = size */
cmp %o2, 0
bne,a 0f
ldub [%o0], %g2
b,a 2f
1:
ldub [%o0], %g2
0:
cmp %g2, %o1
be 2f
addcc %o2, -1, %o2
bne 1b
add %o0, 1, %o0
2:
retl
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,179
|
arch/sparc/lib/hweight.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/export.h>
.text
.align 32
ENTRY(__arch_hweight8)
sethi %hi(__sw_hweight8), %g1
jmpl %g1 + %lo(__sw_hweight8), %g0
nop
ENDPROC(__arch_hweight8)
EXPORT_SYMBOL(__arch_hweight8)
.section .popc_3insn_patch, "ax"
.word __arch_hweight8
sllx %o0, 64-8, %g1
retl
popc %g1, %o0
.previous
ENTRY(__arch_hweight16)
sethi %hi(__sw_hweight16), %g1
jmpl %g1 + %lo(__sw_hweight16), %g0
nop
ENDPROC(__arch_hweight16)
EXPORT_SYMBOL(__arch_hweight16)
.section .popc_3insn_patch, "ax"
.word __arch_hweight16
sllx %o0, 64-16, %g1
retl
popc %g1, %o0
.previous
ENTRY(__arch_hweight32)
sethi %hi(__sw_hweight32), %g1
jmpl %g1 + %lo(__sw_hweight32), %g0
nop
ENDPROC(__arch_hweight32)
EXPORT_SYMBOL(__arch_hweight32)
.section .popc_3insn_patch, "ax"
.word __arch_hweight32
sllx %o0, 64-32, %g1
retl
popc %g1, %o0
.previous
ENTRY(__arch_hweight64)
sethi %hi(__sw_hweight64), %g1
jmpl %g1 + %lo(__sw_hweight64), %g0
nop
ENDPROC(__arch_hweight64)
EXPORT_SYMBOL(__arch_hweight64)
.section .popc_3insn_patch, "ax"
.word __arch_hweight64
retl
popc %o0, %o0
nop
.previous
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,178
|
arch/sparc/lib/M7memset.S
|
/*
* M7memset.S: SPARC M7 optimized memset.
*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
*/
/*
* M7memset.S: M7 optimized memset.
*
* char *memset(sp, c, n)
*
* Set an array of n chars starting at sp to the character c.
* Return sp.
*
* Fast assembler language version of the following C-program for memset
* which represents the `standard' for the C-library.
*
* void *
* memset(void *sp1, int c, size_t n)
* {
* if (n != 0) {
* char *sp = sp1;
* do {
* *sp++ = (char)c;
* } while (--n != 0);
* }
* return (sp1);
* }
*
* The algorithm is as follows :
*
* For small 6 or fewer bytes stores, bytes will be stored.
*
* For less than 32 bytes stores, align the address on 4 byte boundary.
* Then store as many 4-byte chunks, followed by trailing bytes.
*
* For sizes greater than 32 bytes, align the address on 8 byte boundary.
* if (count >= 64) {
* store 8-bytes chunks to align the address on 64 byte boundary
* if (value to be set is zero && count >= MIN_ZERO) {
* Using BIS stores, set the first long word of each
* 64-byte cache line to zero which will also clear the
* other seven long words of the cache line.
* }
* else if (count >= MIN_LOOP) {
* Using BIS stores, set the first long word of each of
* ST_CHUNK cache lines (64 bytes each) before the main
* loop is entered.
* In the main loop, continue pre-setting the first long
* word of each cache line ST_CHUNK lines in advance while
* setting the other seven long words (56 bytes) of each
* cache line until fewer than ST_CHUNK*64 bytes remain.
* Then set the remaining seven long words of each cache
* line that has already had its first long word set.
* }
* store remaining data in 64-byte chunks until less than
* 64 bytes remain.
* }
* Store as many 8-byte chunks, followed by trailing bytes.
*
* BIS = Block Init Store
* Doing the advance store of the first element of the cache line
* initiates the displacement of a cache line while only using a single
* instruction in the pipeline. That avoids various pipeline delays,
* such as filling the miss buffer. The performance effect is
* similar to prefetching for normal stores.
* The special case for zero fills runs faster and uses fewer instruction
* cycles than the normal memset loop.
*
* We only use BIS for memset of greater than MIN_LOOP bytes because a sequence
* BIS stores must be followed by a membar #StoreStore. The benefit of
* the BIS store must be balanced against the cost of the membar operation.
*/
/*
* ASI_STBI_P marks the cache line as "least recently used"
* which means if many threads are active, it has a high chance
* of being pushed out of the cache between the first initializing
* store and the final stores.
* Thus, we use ASI_STBIMRU_P which marks the cache line as
* "most recently used" for all but the last store to the cache line.
*/
#include <asm/asi.h>
#include <asm/page.h>
#define ASI_STBI_P ASI_BLK_INIT_QUAD_LDD_P
#define ASI_STBIMRU_P ASI_ST_BLKINIT_MRU_P
#define ST_CHUNK 24 /* multiple of 4 due to loop unrolling */
#define MIN_LOOP 16320
#define MIN_ZERO 512
.section ".text"
.align 32
/*
* Define clear_page(dest) as memset(dest, 0, PAGE_SIZE)
* (can create a more optimized version later.)
*/
.globl M7clear_page
.globl M7clear_user_page
M7clear_page: /* clear_page(dest) */
M7clear_user_page:
set PAGE_SIZE, %o1
/* fall through into bzero code */
.size M7clear_page,.-M7clear_page
.size M7clear_user_page,.-M7clear_user_page
/*
* Define bzero(dest, n) as memset(dest, 0, n)
* (can create a more optimized version later.)
*/
.globl M7bzero
M7bzero: /* bzero(dest, size) */
mov %o1, %o2
mov 0, %o1
/* fall through into memset code */
.size M7bzero,.-M7bzero
.global M7memset
.type M7memset, #function
.register %g3, #scratch
M7memset:
mov %o0, %o5 ! copy sp1 before using it
cmp %o2, 7 ! if small counts, just write bytes
bleu,pn %xcc, .wrchar
and %o1, 0xff, %o1 ! o1 is (char)c
sll %o1, 8, %o3
or %o1, %o3, %o1 ! now o1 has 2 bytes of c
sll %o1, 16, %o3
cmp %o2, 32
blu,pn %xcc, .wdalign
or %o1, %o3, %o1 ! now o1 has 4 bytes of c
sllx %o1, 32, %o3
or %o1, %o3, %o1 ! now o1 has 8 bytes of c
.dbalign:
andcc %o5, 7, %o3 ! is sp1 aligned on a 8 byte bound?
bz,pt %xcc, .blkalign ! already long word aligned
sub %o3, 8, %o3 ! -(bytes till long word aligned)
add %o2, %o3, %o2 ! update o2 with new count
! Set -(%o3) bytes till sp1 long word aligned
1: stb %o1, [%o5] ! there is at least 1 byte to set
inccc %o3 ! byte clearing loop
bl,pt %xcc, 1b
inc %o5
! Now sp1 is long word aligned (sp1 is found in %o5)
.blkalign:
cmp %o2, 64 ! check if there are 64 bytes to set
blu,pn %xcc, .wrshort
mov %o2, %o3
andcc %o5, 63, %o3 ! is sp1 block aligned?
bz,pt %xcc, .blkwr ! now block aligned
sub %o3, 64, %o3 ! o3 is -(bytes till block aligned)
add %o2, %o3, %o2 ! o2 is the remainder
! Store -(%o3) bytes till dst is block (64 byte) aligned.
! Use long word stores.
! Recall that dst is already long word aligned
1:
addcc %o3, 8, %o3
stx %o1, [%o5]
bl,pt %xcc, 1b
add %o5, 8, %o5
! Now sp1 is block aligned
.blkwr:
andn %o2, 63, %o4 ! calculate size of blocks in bytes
brz,pn %o1, .wrzero ! special case if c == 0
and %o2, 63, %o3 ! %o3 = bytes left after blk stores.
set MIN_LOOP, %g1
cmp %o4, %g1 ! check there are enough bytes to set
blu,pn %xcc, .short_set ! to justify cost of membar
! must be > pre-cleared lines
nop
! initial cache-clearing stores
! get store pipeline moving
rd %asi, %g3 ! save %asi to be restored later
wr %g0, ASI_STBIMRU_P, %asi
! Primary memset loop for large memsets
.wr_loop:
sub %o5, 8, %o5 ! adjust %o5 for ASI store alignment
mov ST_CHUNK, %g1
.wr_loop_start:
stxa %o1, [%o5+8]%asi
subcc %g1, 4, %g1
stxa %o1, [%o5+8+64]%asi
add %o5, 256, %o5
stxa %o1, [%o5+8-128]%asi
bgu %xcc, .wr_loop_start
stxa %o1, [%o5+8-64]%asi
sub %o5, ST_CHUNK*64, %o5 ! reset %o5
mov ST_CHUNK, %g1
.wr_loop_rest:
stxa %o1, [%o5+8+8]%asi
sub %o4, 64, %o4
stxa %o1, [%o5+16+8]%asi
subcc %g1, 1, %g1
stxa %o1, [%o5+24+8]%asi
stxa %o1, [%o5+32+8]%asi
stxa %o1, [%o5+40+8]%asi
add %o5, 64, %o5
stxa %o1, [%o5-8]%asi
bgu %xcc, .wr_loop_rest
stxa %o1, [%o5]ASI_STBI_P
! If more than ST_CHUNK*64 bytes remain to set, continue
! setting the first long word of each cache line in advance
! to keep the store pipeline moving.
cmp %o4, ST_CHUNK*64
bge,pt %xcc, .wr_loop_start
mov ST_CHUNK, %g1
brz,a,pn %o4, .asi_done
add %o5, 8, %o5 ! restore %o5 offset
.wr_loop_small:
stxa %o1, [%o5+8]%asi
stxa %o1, [%o5+8+8]%asi
stxa %o1, [%o5+16+8]%asi
stxa %o1, [%o5+24+8]%asi
stxa %o1, [%o5+32+8]%asi
subcc %o4, 64, %o4
stxa %o1, [%o5+40+8]%asi
add %o5, 64, %o5
stxa %o1, [%o5-8]%asi
bgu,pt %xcc, .wr_loop_small
stxa %o1, [%o5]ASI_STBI_P
ba .asi_done
add %o5, 8, %o5 ! restore %o5 offset
! Special case loop for zero fill memsets
! For each 64 byte cache line, single STBI to first element
! clears line
.wrzero:
cmp %o4, MIN_ZERO ! check if enough bytes to set
! to pay %asi + membar cost
blu %xcc, .short_set
nop
sub %o4, 256, %o4
.wrzero_loop:
mov 64, %g3
stxa %o1, [%o5]ASI_STBI_P
subcc %o4, 256, %o4
stxa %o1, [%o5+%g3]ASI_STBI_P
add %o5, 256, %o5
sub %g3, 192, %g3
stxa %o1, [%o5+%g3]ASI_STBI_P
add %g3, 64, %g3
bge,pt %xcc, .wrzero_loop
stxa %o1, [%o5+%g3]ASI_STBI_P
add %o4, 256, %o4
brz,pn %o4, .bsi_done
nop
.wrzero_small:
stxa %o1, [%o5]ASI_STBI_P
subcc %o4, 64, %o4
bgu,pt %xcc, .wrzero_small
add %o5, 64, %o5
ba,a .bsi_done
.asi_done:
wr %g3, 0x0, %asi ! restored saved %asi
.bsi_done:
membar #StoreStore ! required by use of Block Store Init
.short_set:
cmp %o4, 64 ! check if 64 bytes to set
blu %xcc, 5f
nop
4: ! set final blocks of 64 bytes
stx %o1, [%o5]
stx %o1, [%o5+8]
stx %o1, [%o5+16]
stx %o1, [%o5+24]
subcc %o4, 64, %o4
stx %o1, [%o5+32]
stx %o1, [%o5+40]
add %o5, 64, %o5
stx %o1, [%o5-16]
bgu,pt %xcc, 4b
stx %o1, [%o5-8]
5:
! Set the remaining long words
.wrshort:
subcc %o3, 8, %o3 ! Can we store any long words?
blu,pn %xcc, .wrchars
and %o2, 7, %o2 ! calc bytes left after long words
6:
subcc %o3, 8, %o3
stx %o1, [%o5] ! store the long words
bgeu,pt %xcc, 6b
add %o5, 8, %o5
.wrchars: ! check for extra chars
brnz %o2, .wrfin
nop
retl
nop
.wdalign:
andcc %o5, 3, %o3 ! is sp1 aligned on a word boundary
bz,pn %xcc, .wrword
andn %o2, 3, %o3 ! create word sized count in %o3
dec %o2 ! decrement count
stb %o1, [%o5] ! clear a byte
b .wdalign
inc %o5 ! next byte
.wrword:
subcc %o3, 4, %o3
st %o1, [%o5] ! 4-byte writing loop
bnz,pt %xcc, .wrword
add %o5, 4, %o5
and %o2, 3, %o2 ! leftover count, if any
.wrchar:
! Set the remaining bytes, if any
brz %o2, .exit
nop
.wrfin:
deccc %o2
stb %o1, [%o5]
bgu,pt %xcc, .wrfin
inc %o5
.exit:
retl ! %o0 was preserved
nop
.size M7memset,.-M7memset
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,179
|
arch/sparc/lib/fls.S
|
/* fls.S: SPARC default fls definition.
*
* SPARC default fls definition, which follows the same algorithm as
* in generic fls(). This function will be boot time patched on T4
* and onward.
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
.register %g2, #scratch
.register %g3, #scratch
ENTRY(fls)
brz,pn %o0, 6f
mov 0, %o1
sethi %hi(0xffff0000), %g3
mov %o0, %g2
andcc %o0, %g3, %g0
be,pt %icc, 8f
mov 32, %o1
sethi %hi(0xff000000), %g3
andcc %g2, %g3, %g0
bne,pt %icc, 3f
sethi %hi(0xf0000000), %g3
sll %o0, 8, %o0
1:
add %o1, -8, %o1
sra %o0, 0, %o0
mov %o0, %g2
2:
sethi %hi(0xf0000000), %g3
3:
andcc %g2, %g3, %g0
bne,pt %icc, 4f
sethi %hi(0xc0000000), %g3
sll %o0, 4, %o0
add %o1, -4, %o1
sra %o0, 0, %o0
mov %o0, %g2
4:
andcc %g2, %g3, %g0
be,a,pt %icc, 7f
sll %o0, 2, %o0
5:
xnor %g0, %o0, %o0
srl %o0, 31, %o0
sub %o1, %o0, %o1
6:
jmp %o7 + 8
sra %o1, 0, %o0
7:
add %o1, -2, %o1
ba,pt %xcc, 5b
sra %o0, 0, %o0
8:
sll %o0, 16, %o0
sethi %hi(0xff000000), %g3
sra %o0, 0, %o0
mov %o0, %g2
andcc %g2, %g3, %g0
bne,pt %icc, 2b
mov 16, %o1
ba,pt %xcc, 1b
sll %o0, 8, %o0
ENDPROC(fls)
EXPORT_SYMBOL(fls)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,952
|
arch/sparc/lib/memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* memcpy.S: Sparc optimized memcpy and memmove code
* Hand optimized from GNU libc's memcpy and memmove
* Copyright (C) 1991,1996 Free Software Foundation
* Copyright (C) 1995 Linus Torvalds (Linus.Torvalds@helsinki.fi)
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <asm/export.h>
#define FUNC(x) \
.globl x; \
.type x,@function; \
.align 4; \
x:
/* Both these macros have to start with exactly the same insn */
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd [%src + (offset) + 0x00], %t0; \
ldd [%src + (offset) + 0x08], %t2; \
ldd [%src + (offset) + 0x10], %t4; \
ldd [%src + (offset) + 0x18], %t6; \
st %t0, [%dst + (offset) + 0x00]; \
st %t1, [%dst + (offset) + 0x04]; \
st %t2, [%dst + (offset) + 0x08]; \
st %t3, [%dst + (offset) + 0x0c]; \
st %t4, [%dst + (offset) + 0x10]; \
st %t5, [%dst + (offset) + 0x14]; \
st %t6, [%dst + (offset) + 0x18]; \
st %t7, [%dst + (offset) + 0x1c];
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd [%src + (offset) + 0x00], %t0; \
ldd [%src + (offset) + 0x08], %t2; \
ldd [%src + (offset) + 0x10], %t4; \
ldd [%src + (offset) + 0x18], %t6; \
std %t0, [%dst + (offset) + 0x00]; \
std %t2, [%dst + (offset) + 0x08]; \
std %t4, [%dst + (offset) + 0x10]; \
std %t6, [%dst + (offset) + 0x18];
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
ldd [%src - (offset) - 0x10], %t0; \
ldd [%src - (offset) - 0x08], %t2; \
st %t0, [%dst - (offset) - 0x10]; \
st %t1, [%dst - (offset) - 0x0c]; \
st %t2, [%dst - (offset) - 0x08]; \
st %t3, [%dst - (offset) - 0x04];
#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
ldd [%src - (offset) - 0x10], %t0; \
ldd [%src - (offset) - 0x08], %t2; \
std %t0, [%dst - (offset) - 0x10]; \
std %t2, [%dst - (offset) - 0x08];
#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
ldub [%src - (offset) - 0x02], %t0; \
ldub [%src - (offset) - 0x01], %t1; \
stb %t0, [%dst - (offset) - 0x02]; \
stb %t1, [%dst - (offset) - 0x01];
.text
.align 4
FUNC(memmove)
EXPORT_SYMBOL(memmove)
cmp %o0, %o1
mov %o0, %g7
bleu 9f
sub %o0, %o1, %o4
add %o1, %o2, %o3
cmp %o3, %o0
bleu 0f
andcc %o4, 3, %o5
add %o1, %o2, %o1
add %o0, %o2, %o0
sub %o1, 1, %o1
sub %o0, 1, %o0
1: /* reverse_bytes */
ldub [%o1], %o4
subcc %o2, 1, %o2
stb %o4, [%o0]
sub %o1, 1, %o1
bne 1b
sub %o0, 1, %o0
retl
mov %g7, %o0
/* NOTE: This code is executed just for the cases,
where %src (=%o1) & 3 is != 0.
We need to align it to 4. So, for (%src & 3)
1 we need to do ldub,lduh
2 lduh
3 just ldub
so even if it looks weird, the branches
are correct here. -jj
*/
78: /* dword_align */
andcc %o1, 1, %g0
be 4f
andcc %o1, 2, %g0
ldub [%o1], %g2
add %o1, 1, %o1
stb %g2, [%o0]
sub %o2, 1, %o2
bne 3f
add %o0, 1, %o0
4:
lduh [%o1], %g2
add %o1, 2, %o1
sth %g2, [%o0]
sub %o2, 2, %o2
b 3f
add %o0, 2, %o0
FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
EXPORT_SYMBOL(memcpy)
sub %o0, %o1, %o4
mov %o0, %g7
9:
andcc %o4, 3, %o5
0:
bne 86f
cmp %o2, 15
bleu 90f
andcc %o1, 3, %g0
bne 78b
3:
andcc %o1, 4, %g0
be 2f
mov %o2, %g1
ld [%o1], %o4
sub %g1, 4, %g1
st %o4, [%o0]
add %o1, 4, %o1
add %o0, 4, %o0
2:
andcc %g1, 0xffffff80, %g0
be 3f
andcc %o0, 4, %g0
be 82f + 4
5:
MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
sub %g1, 128, %g1
add %o1, 128, %o1
cmp %g1, 128
bge 5b
add %o0, 128, %o0
3:
andcc %g1, 0x70, %g4
be 80f
andcc %g1, 8, %g0
sethi %hi(80f), %o5
srl %g4, 1, %o4
add %g4, %o4, %o4
add %o1, %g4, %o1
sub %o5, %o4, %o5
jmpl %o5 + %lo(80f), %g0
add %o0, %g4, %o0
79: /* memcpy_table */
MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
80: /* memcpy_table_end */
be 81f
andcc %g1, 4, %g0
ldd [%o1], %g2
add %o0, 8, %o0
st %g2, [%o0 - 0x08]
add %o1, 8, %o1
st %g3, [%o0 - 0x04]
81: /* memcpy_last7 */
be 1f
andcc %g1, 2, %g0
ld [%o1], %g2
add %o1, 4, %o1
st %g2, [%o0]
add %o0, 4, %o0
1:
be 1f
andcc %g1, 1, %g0
lduh [%o1], %g2
add %o1, 2, %o1
sth %g2, [%o0]
add %o0, 2, %o0
1:
be 1f
nop
ldub [%o1], %g2
stb %g2, [%o0]
1:
retl
mov %g7, %o0
82: /* ldd_std */
MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
subcc %g1, 128, %g1
add %o1, 128, %o1
cmp %g1, 128
bge 82b
add %o0, 128, %o0
andcc %g1, 0x70, %g4
be 84f
andcc %g1, 8, %g0
sethi %hi(84f), %o5
add %o1, %g4, %o1
sub %o5, %g4, %o5
jmpl %o5 + %lo(84f), %g0
add %o0, %g4, %o0
83: /* amemcpy_table */
MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
84: /* amemcpy_table_end */
be 85f
andcc %g1, 4, %g0
ldd [%o1], %g2
add %o0, 8, %o0
std %g2, [%o0 - 0x08]
add %o1, 8, %o1
85: /* amemcpy_last7 */
be 1f
andcc %g1, 2, %g0
ld [%o1], %g2
add %o1, 4, %o1
st %g2, [%o0]
add %o0, 4, %o0
1:
be 1f
andcc %g1, 1, %g0
lduh [%o1], %g2
add %o1, 2, %o1
sth %g2, [%o0]
add %o0, 2, %o0
1:
be 1f
nop
ldub [%o1], %g2
stb %g2, [%o0]
1:
retl
mov %g7, %o0
86: /* non_aligned */
cmp %o2, 6
bleu 88f
nop
save %sp, -96, %sp
andcc %i0, 3, %g0
be 61f
andcc %i0, 1, %g0
be 60f
andcc %i0, 2, %g0
ldub [%i1], %g5
add %i1, 1, %i1
stb %g5, [%i0]
sub %i2, 1, %i2
bne 61f
add %i0, 1, %i0
60:
ldub [%i1], %g3
add %i1, 2, %i1
stb %g3, [%i0]
sub %i2, 2, %i2
ldub [%i1 - 1], %g3
add %i0, 2, %i0
stb %g3, [%i0 - 1]
61:
and %i1, 3, %g2
and %i2, 0xc, %g3
and %i1, -4, %i1
cmp %g3, 4
sll %g2, 3, %g4
mov 32, %g2
be 4f
sub %g2, %g4, %l0
blu 3f
cmp %g3, 0x8
be 2f
srl %i2, 2, %g3
ld [%i1], %i3
add %i0, -8, %i0
ld [%i1 + 4], %i4
b 8f
add %g3, 1, %g3
2:
ld [%i1], %i4
add %i0, -12, %i0
ld [%i1 + 4], %i5
add %g3, 2, %g3
b 9f
add %i1, -4, %i1
3:
ld [%i1], %g1
add %i0, -4, %i0
ld [%i1 + 4], %i3
srl %i2, 2, %g3
b 7f
add %i1, 4, %i1
4:
ld [%i1], %i5
cmp %i2, 7
ld [%i1 + 4], %g1
srl %i2, 2, %g3
bleu 10f
add %i1, 8, %i1
ld [%i1], %i3
add %g3, -1, %g3
5:
sll %i5, %g4, %g2
srl %g1, %l0, %g5
or %g2, %g5, %g2
st %g2, [%i0]
7:
ld [%i1 + 4], %i4
sll %g1, %g4, %g2
srl %i3, %l0, %g5
or %g2, %g5, %g2
st %g2, [%i0 + 4]
8:
ld [%i1 + 8], %i5
sll %i3, %g4, %g2
srl %i4, %l0, %g5
or %g2, %g5, %g2
st %g2, [%i0 + 8]
9:
ld [%i1 + 12], %g1
sll %i4, %g4, %g2
srl %i5, %l0, %g5
addcc %g3, -4, %g3
or %g2, %g5, %g2
add %i1, 16, %i1
st %g2, [%i0 + 12]
add %i0, 16, %i0
bne,a 5b
ld [%i1], %i3
10:
sll %i5, %g4, %g2
srl %g1, %l0, %g5
srl %l0, 3, %g3
or %g2, %g5, %g2
sub %i1, %g3, %i1
andcc %i2, 2, %g0
st %g2, [%i0]
be 1f
andcc %i2, 1, %g0
ldub [%i1], %g2
add %i1, 2, %i1
stb %g2, [%i0 + 4]
add %i0, 2, %i0
ldub [%i1 - 1], %g2
stb %g2, [%i0 + 3]
1:
be 1f
nop
ldub [%i1], %g2
stb %g2, [%i0 + 4]
1:
ret
restore %g7, %g0, %o0
88: /* short_end */
and %o2, 0xe, %o3
20:
sethi %hi(89f), %o5
sll %o3, 3, %o4
add %o0, %o3, %o0
sub %o5, %o4, %o5
add %o1, %o3, %o1
jmpl %o5 + %lo(89f), %g0
andcc %o2, 1, %g0
MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
89: /* short_table_end */
be 1f
nop
ldub [%o1], %g2
stb %g2, [%o0]
1:
retl
mov %g7, %o0
90: /* short_aligned_end */
bne 88b
andcc %o2, 8, %g0
be 1f
andcc %o2, 4, %g0
ld [%o1 + 0x00], %g2
ld [%o1 + 0x04], %g3
add %o1, 8, %o1
st %g2, [%o0 + 0x00]
st %g3, [%o0 + 0x04]
add %o0, 8, %o0
1:
b 81b
mov %o2, %g1
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,351
|
arch/sparc/lib/strncmp_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* strncmp.S: Hand optimized Sparc assembly of GCC output from GNU libc
* generic strncmp routine.
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
ENTRY(strncmp)
mov %o0, %g3
mov 0, %o3
cmp %o2, 3
ble 7f
mov 0, %g2
sra %o2, 2, %o4
ldub [%g3], %o3
0:
ldub [%o1], %g2
add %g3, 1, %g3
and %o3, 0xff, %o0
cmp %o0, 0
be 8f
add %o1, 1, %o1
cmp %o0, %g2
be,a 1f
ldub [%g3], %o3
retl
sub %o0, %g2, %o0
1:
ldub [%o1], %g2
add %g3,1, %g3
and %o3, 0xff, %o0
cmp %o0, 0
be 8f
add %o1, 1, %o1
cmp %o0, %g2
be,a 1f
ldub [%g3], %o3
retl
sub %o0, %g2, %o0
1:
ldub [%o1], %g2
add %g3, 1, %g3
and %o3, 0xff, %o0
cmp %o0, 0
be 8f
add %o1, 1, %o1
cmp %o0, %g2
be,a 1f
ldub [%g3], %o3
retl
sub %o0, %g2, %o0
1:
ldub [%o1], %g2
add %g3, 1, %g3
and %o3, 0xff, %o0
cmp %o0, 0
be 8f
add %o1, 1, %o1
cmp %o0, %g2
be 1f
add %o4, -1, %o4
retl
sub %o0, %g2, %o0
1:
cmp %o4, 0
bg,a 0b
ldub [%g3], %o3
b 7f
and %o2, 3, %o2
9:
ldub [%o1], %g2
add %g3, 1, %g3
and %o3, 0xff, %o0
cmp %o0, 0
be 8f
add %o1, 1, %o1
cmp %o0, %g2
be 7f
add %o2, -1, %o2
8:
retl
sub %o0, %g2, %o0
7:
cmp %o2, 0
bg,a 9b
ldub [%g3], %o3
and %g2, 0xff, %o0
retl
sub %o3, %o0, %o0
ENDPROC(strncmp)
EXPORT_SYMBOL(strncmp)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,491
|
arch/sparc/lib/memscan_64.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* memscan.S: Optimized memscan for Sparc64.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
*/
#include <asm/export.h>
#define HI_MAGIC 0x8080808080808080
#define LO_MAGIC 0x0101010101010101
#define ASI_PL 0x88
.text
.align 32
.globl __memscan_zero, __memscan_generic
.type __memscan_zero,#function
.type __memscan_generic,#function
.globl memscan
EXPORT_SYMBOL(__memscan_zero)
EXPORT_SYMBOL(__memscan_generic)
__memscan_zero:
/* %o0 = bufp, %o1 = size */
brlez,pn %o1, szzero
andcc %o0, 7, %g0
be,pt %icc, we_are_aligned
sethi %hi(HI_MAGIC), %o4
ldub [%o0], %o5
1: subcc %o1, 1, %o1
brz,pn %o5, 10f
add %o0, 1, %o0
be,pn %xcc, szzero
andcc %o0, 7, %g0
bne,a,pn %icc, 1b
ldub [%o0], %o5
we_are_aligned:
ldxa [%o0] ASI_PL, %o5
or %o4, %lo(HI_MAGIC), %o3
sllx %o3, 32, %o4
or %o4, %o3, %o3
srlx %o3, 7, %o2
msloop:
sub %o1, 8, %o1
add %o0, 8, %o0
sub %o5, %o2, %o4
xor %o4, %o5, %o4
andcc %o4, %o3, %g3
bne,pn %xcc, check_bytes
srlx %o4, 32, %g3
brgz,a,pt %o1, msloop
ldxa [%o0] ASI_PL, %o5
check_bytes:
bne,a,pn %icc, 2f
andcc %o5, 0xff, %g0
add %o0, -5, %g2
ba,pt %xcc, 3f
srlx %o5, 32, %g7
2: srlx %o5, 8, %g7
be,pn %icc, 1f
add %o0, -8, %g2
andcc %g7, 0xff, %g0
srlx %g7, 8, %g7
be,pn %icc, 1f
inc %g2
andcc %g7, 0xff, %g0
srlx %g7, 8, %g7
be,pn %icc, 1f
inc %g2
andcc %g7, 0xff, %g0
srlx %g7, 8, %g7
be,pn %icc, 1f
inc %g2
andcc %g3, %o3, %g0
be,a,pn %icc, 2f
mov %o0, %g2
3: andcc %g7, 0xff, %g0
srlx %g7, 8, %g7
be,pn %icc, 1f
inc %g2
andcc %g7, 0xff, %g0
srlx %g7, 8, %g7
be,pn %icc, 1f
inc %g2
andcc %g7, 0xff, %g0
srlx %g7, 8, %g7
be,pn %icc, 1f
inc %g2
andcc %g7, 0xff, %g0
srlx %g7, 8, %g7
be,pn %icc, 1f
inc %g2
2: brgz,a,pt %o1, msloop
ldxa [%o0] ASI_PL, %o5
inc %g2
1: add %o0, %o1, %o0
cmp %g2, %o0
retl
movle %xcc, %g2, %o0
10: retl
sub %o0, 1, %o0
szzero: retl
nop
memscan:
__memscan_generic:
/* %o0 = addr, %o1 = c, %o2 = size */
brz,pn %o2, 3f
add %o0, %o2, %o3
ldub [%o0], %o5
sub %g0, %o2, %o4
1:
cmp %o5, %o1
be,pn %icc, 2f
addcc %o4, 1, %o4
bne,a,pt %xcc, 1b
ldub [%o3 + %o4], %o5
retl
/* The delay slot is the same as the next insn, this is just to make it look more awful */
2:
add %o3, %o4, %o0
retl
sub %o0, 1, %o0
3:
retl
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,728
|
arch/sparc/lib/blockops.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* blockops.S: Common block zero optimized routines.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/export.h>
/* Zero out 64 bytes of memory at (buf + offset).
* Assumes %g1 contains zero.
*/
#define BLAST_BLOCK(buf, offset) \
std %g0, [buf + offset + 0x38]; \
std %g0, [buf + offset + 0x30]; \
std %g0, [buf + offset + 0x28]; \
std %g0, [buf + offset + 0x20]; \
std %g0, [buf + offset + 0x18]; \
std %g0, [buf + offset + 0x10]; \
std %g0, [buf + offset + 0x08]; \
std %g0, [buf + offset + 0x00];
/* Copy 32 bytes of memory at (src + offset) to
* (dst + offset).
*/
#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd [src + offset + 0x18], t0; \
ldd [src + offset + 0x10], t2; \
ldd [src + offset + 0x08], t4; \
ldd [src + offset + 0x00], t6; \
std t0, [dst + offset + 0x18]; \
std t2, [dst + offset + 0x10]; \
std t4, [dst + offset + 0x08]; \
std t6, [dst + offset + 0x00];
/* Profiling evidence indicates that memset() is
* commonly called for blocks of size PAGE_SIZE,
* and (2 * PAGE_SIZE) (for kernel stacks)
* and with a second arg of zero. We assume in
* all of these cases that the buffer is aligned
* on at least an 8 byte boundary.
*
* Therefore we special case them to make them
* as fast as possible.
*/
.text
ENTRY(bzero_1page)
/* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */
/* %o0 = buf */
or %g0, %g0, %g1
or %o0, %g0, %o1
or %g0, (PAGE_SIZE >> 8), %g2
1:
BLAST_BLOCK(%o0, 0x00)
BLAST_BLOCK(%o0, 0x40)
BLAST_BLOCK(%o0, 0x80)
BLAST_BLOCK(%o0, 0xc0)
subcc %g2, 1, %g2
bne 1b
add %o0, 0x100, %o0
retl
nop
ENDPROC(bzero_1page)
EXPORT_SYMBOL(bzero_1page)
ENTRY(__copy_1page)
/* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */
/* %o0 = dst, %o1 = src */
or %g0, (PAGE_SIZE >> 8), %g1
1:
MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
subcc %g1, 1, %g1
add %o0, 0x100, %o0
bne 1b
add %o1, 0x100, %o1
retl
nop
ENDPROC(__copy_1page)
EXPORT_SYMBOL(__copy_1page)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,668
|
arch/sparc/lib/bitops.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* bitops.S: Sparc64 atomic bit operations.
*
* Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
#include <asm/export.h>
.text
ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
sllx %g1, 3, %g3
and %o0, 63, %g2
sllx %o2, %g2, %o2
add %o1, %g3, %o1
1: ldx [%o1], %g7
or %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
and %g7, %o2, %g2
clr %o0
movrne %g2, 1, %o0
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(test_and_set_bit)
EXPORT_SYMBOL(test_and_set_bit)
ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
sllx %g1, 3, %g3
and %o0, 63, %g2
sllx %o2, %g2, %o2
add %o1, %g3, %o1
1: ldx [%o1], %g7
andn %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
and %g7, %o2, %g2
clr %o0
movrne %g2, 1, %o0
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(test_and_clear_bit)
EXPORT_SYMBOL(test_and_clear_bit)
ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
sllx %g1, 3, %g3
and %o0, 63, %g2
sllx %o2, %g2, %o2
add %o1, %g3, %o1
1: ldx [%o1], %g7
xor %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
and %g7, %o2, %g2
clr %o0
movrne %g2, 1, %o0
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(test_and_change_bit)
EXPORT_SYMBOL(test_and_change_bit)
ENTRY(set_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
sllx %g1, 3, %g3
and %o0, 63, %g2
sllx %o2, %g2, %o2
add %o1, %g3, %o1
1: ldx [%o1], %g7
or %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(set_bit)
EXPORT_SYMBOL(set_bit)
ENTRY(clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
sllx %g1, 3, %g3
and %o0, 63, %g2
sllx %o2, %g2, %o2
add %o1, %g3, %o1
1: ldx [%o1], %g7
andn %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(clear_bit)
EXPORT_SYMBOL(clear_bit)
ENTRY(change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
sllx %g1, 3, %g3
and %o0, 63, %g2
sllx %o2, %g2, %o2
add %o1, %g3, %o1
1: ldx [%o1], %g7
xor %g7, %o2, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(change_bit)
EXPORT_SYMBOL(change_bit)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,561
|
arch/sparc/lib/clear_page.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* clear_page.S: UltraSparc optimized clear page.
*
* Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
*/
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/export.h>
/* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then
* restore the original TLB entry. This was great for
* disturbing the TLB as little as possible, but it meant
* we had to keep interrupts disabled for a long time.
*
* Now, we simply use the normal TLB loading mechanism,
* and this makes the cpu choose a slot all by itself.
* Then we do a normal TLB flush on exit. We need only
* disable preemption during the clear.
*/
.text
.globl _clear_page
EXPORT_SYMBOL(_clear_page)
_clear_page: /* %o0=dest */
ba,pt %xcc, clear_page_common
clr %o4
/* This thing is pretty important, it shows up
* on the profiles via do_anonymous_page().
*/
.align 32
.globl clear_user_page
EXPORT_SYMBOL(clear_user_page)
clear_user_page: /* %o0=dest, %o1=vaddr */
lduw [%g6 + TI_PRE_COUNT], %o2
sethi %hi(PAGE_OFFSET), %g2
sethi %hi(PAGE_SIZE), %o4
ldx [%g2 + %lo(PAGE_OFFSET)], %g2
sethi %hi(PAGE_KERNEL_LOCKED), %g3
ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
sub %o0, %g2, %g1 ! paddr
and %o1, %o4, %o0 ! vaddr D-cache alias bit
or %g1, %g3, %g1 ! TTE data
sethi %hi(TLBTEMP_BASE), %o3
add %o2, 1, %o4
add %o0, %o3, %o0 ! TTE vaddr
/* Disable preemption. */
mov TLB_TAG_ACCESS, %g3
stw %o4, [%g6 + TI_PRE_COUNT]
/* Load TLB entry. */
rdpr %pstate, %o4
wrpr %o4, PSTATE_IE, %pstate
stxa %o0, [%g3] ASI_DMMU
stxa %g1, [%g0] ASI_DTLB_DATA_IN
sethi %hi(KERNBASE), %g1
flush %g1
wrpr %o4, 0x0, %pstate
mov 1, %o4
clear_page_common:
VISEntryHalf
membar #StoreLoad | #StoreStore | #LoadStore
fzero %f0
sethi %hi(PAGE_SIZE/64), %o1
mov %o0, %g1 ! remember vaddr for tlbflush
fzero %f2
or %o1, %lo(PAGE_SIZE/64), %o1
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
faddd %f0, %f2, %f8
fmuld %f0, %f2, %f10
faddd %f0, %f2, %f12
fmuld %f0, %f2, %f14
1: stda %f0, [%o0 + %g0] ASI_BLK_P
subcc %o1, 1, %o1
bne,pt %icc, 1b
add %o0, 0x40, %o0
membar #Sync
VISExitHalf
brz,pn %o4, out
nop
stxa %g0, [%g1] ASI_DMMU_DEMAP
membar #Sync
stw %o2, [%g6 + TI_PRE_COUNT]
out: retl
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,446
|
arch/sparc/lib/udivdi3.S
|
/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
.text
.align 4
.globl __udivdi3
__udivdi3:
save %sp,-104,%sp
mov %i3,%o3
cmp %i2,0
bne .LL40
mov %i1,%i3
cmp %o3,%i0
bleu .LL41
mov %i3,%o1
! Inlined udiv_qrnnd
mov 32,%g1
subcc %i0,%o3,%g0
1: bcs 5f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
sub %i0,%o3,%i0 ! this kills msb of n
addx %i0,%i0,%i0 ! so this cannot give carry
subcc %g1,1,%g1
2: bne 1b
subcc %i0,%o3,%g0
bcs 3f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
b 3f
sub %i0,%o3,%i0 ! this kills msb of n
4: sub %i0,%o3,%i0
5: addxcc %i0,%i0,%i0
bcc 2b
subcc %g1,1,%g1
! Got carry from n. Subtract next step to cancel this carry.
bne 4b
addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
sub %i0,%o3,%i0
3: xnor %o1,0,%o1
! End of inline udiv_qrnnd
b .LL45
mov 0,%o2
.LL41:
cmp %o3,0
bne .LL77
mov %i0,%o2
mov 1,%o0
mov 0,%o1
wr %g0, 0, %y
udiv %o0, %o1, %o0
mov %o0,%o3
mov %i0,%o2
.LL77:
mov 0,%o4
! Inlined udiv_qrnnd
mov 32,%g1
subcc %o4,%o3,%g0
1: bcs 5f
addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb
sub %o4,%o3,%o4 ! this kills msb of n
addx %o4,%o4,%o4 ! so this cannot give carry
subcc %g1,1,%g1
2: bne 1b
subcc %o4,%o3,%g0
bcs 3f
addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb
b 3f
sub %o4,%o3,%o4 ! this kills msb of n
4: sub %o4,%o3,%o4
5: addxcc %o4,%o4,%o4
bcc 2b
subcc %g1,1,%g1
! Got carry from n. Subtract next step to cancel this carry.
bne 4b
addcc %o2,%o2,%o2 ! shift n1n0 and a 0-bit in lsb
sub %o4,%o3,%o4
3: xnor %o2,0,%o2
! End of inline udiv_qrnnd
mov %o4,%i0
mov %i3,%o1
! Inlined udiv_qrnnd
mov 32,%g1
subcc %i0,%o3,%g0
1: bcs 5f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
sub %i0,%o3,%i0 ! this kills msb of n
addx %i0,%i0,%i0 ! so this cannot give carry
subcc %g1,1,%g1
2: bne 1b
subcc %i0,%o3,%g0
bcs 3f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
b 3f
sub %i0,%o3,%i0 ! this kills msb of n
4: sub %i0,%o3,%i0
5: addxcc %i0,%i0,%i0
bcc 2b
subcc %g1,1,%g1
! Got carry from n. Subtract next step to cancel this carry.
bne 4b
addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
sub %i0,%o3,%i0
3: xnor %o1,0,%o1
! End of inline udiv_qrnnd
b .LL78
mov %o1,%l1
.LL40:
cmp %i2,%i0
bleu .LL46
sethi %hi(65535),%o0
b .LL73
mov 0,%o1
.LL46:
or %o0,%lo(65535),%o0
cmp %i2,%o0
bgu .LL53
mov %i2,%o1
cmp %i2,256
addx %g0,-1,%o0
b .LL59
and %o0,8,%o2
.LL53:
sethi %hi(16777215),%o0
or %o0,%lo(16777215),%o0
cmp %o1,%o0
bgu .LL59
mov 24,%o2
mov 16,%o2
.LL59:
srl %o1,%o2,%o1
sethi %hi(__clz_tab),%o0
or %o0,%lo(__clz_tab),%o0
ldub [%o1+%o0],%o0
add %o0,%o2,%o0
mov 32,%o1
subcc %o1,%o0,%o2
bne,a .LL67
mov 32,%o0
cmp %i0,%i2
bgu .LL69
cmp %i3,%o3
blu .LL73
mov 0,%o1
.LL69:
b .LL73
mov 1,%o1
.LL67:
sub %o0,%o2,%o0
sll %i2,%o2,%i2
srl %o3,%o0,%o1
or %i2,%o1,%i2
sll %o3,%o2,%o3
srl %i0,%o0,%o1
sll %i0,%o2,%i0
srl %i3,%o0,%o0
or %i0,%o0,%i0
sll %i3,%o2,%i3
mov %i0,%o5
mov %o1,%o4
! Inlined udiv_qrnnd
mov 32,%g1
subcc %o4,%i2,%g0
1: bcs 5f
addxcc %o5,%o5,%o5 ! shift n1n0 and a q-bit in lsb
sub %o4,%i2,%o4 ! this kills msb of n
addx %o4,%o4,%o4 ! so this cannot give carry
subcc %g1,1,%g1
2: bne 1b
subcc %o4,%i2,%g0
bcs 3f
addxcc %o5,%o5,%o5 ! shift n1n0 and a q-bit in lsb
b 3f
sub %o4,%i2,%o4 ! this kills msb of n
4: sub %o4,%i2,%o4
5: addxcc %o4,%o4,%o4
bcc 2b
subcc %g1,1,%g1
! Got carry from n. Subtract next step to cancel this carry.
bne 4b
addcc %o5,%o5,%o5 ! shift n1n0 and a 0-bit in lsb
sub %o4,%i2,%o4
3: xnor %o5,0,%o5
! End of inline udiv_qrnnd
mov %o4,%i0
mov %o5,%o1
! Inlined umul_ppmm
wr %g0,%o1,%y ! SPARC has 0-3 delay insn after a wr
sra %o3,31,%g2 ! Do not move this insn
and %o1,%g2,%g2 ! Do not move this insn
andcc %g0,0,%g1 ! Do not move this insn
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,%o3,%g1
mulscc %g1,0,%g1
add %g1,%g2,%o0
rd %y,%o2
cmp %o0,%i0
bgu,a .LL73
add %o1,-1,%o1
bne,a .LL45
mov 0,%o2
cmp %o2,%i3
bleu .LL45
mov 0,%o2
add %o1,-1,%o1
.LL73:
mov 0,%o2
.LL45:
mov %o1,%l1
.LL78:
mov %o2,%l0
mov %l0,%i0
mov %l1,%i1
ret
restore
|
AirFortressIlikara/LS2K0300-linux-4.19
| 15,610
|
arch/sparc/lib/NG2memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* NG2memcpy.S: Niagara-2 optimized memcpy.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#ifdef __KERNEL__
#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
#else
#define ASI_PNF 0x82
#define ASI_BLK_P 0xf0
#define ASI_BLK_INIT_QUAD_LDD_P 0xe2
#define FPRS_FEF 0x04
#ifdef MEMCPY_DEBUG
#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
clr %g1; clr %g2; clr %g3; clr %g5; subcc %g0, %g0, %g0;
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#else
#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#endif
#define GLOBAL_SPARE %g5
#endif
#ifndef STORE_ASI
#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P
#else
#define STORE_ASI 0x80 /* ASI_P */
#endif
#endif
#ifndef EX_LD
#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
#endif
#ifndef LOAD_BLK
#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest
#endif
#ifndef STORE
#ifndef MEMCPY_DEBUG
#define STORE(type,src,addr) type src, [addr]
#else
#define STORE(type,src,addr) type##a src, [addr] 0x80
#endif
#endif
#ifndef STORE_BLK
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
#endif
#ifndef STORE_INIT
#define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI
#endif
#ifndef FUNC_NAME
#define FUNC_NAME NG2memcpy
#endif
#ifndef PREAMBLE
#define PREAMBLE
#endif
#ifndef XCC
#define XCC xcc
#endif
#define FREG_FROB(x0, x1, x2, x3, x4, x5, x6, x7, x8) \
faligndata %x0, %x1, %f0; \
faligndata %x1, %x2, %f2; \
faligndata %x2, %x3, %f4; \
faligndata %x3, %x4, %f6; \
faligndata %x4, %x5, %f8; \
faligndata %x5, %x6, %f10; \
faligndata %x6, %x7, %f12; \
faligndata %x7, %x8, %f14;
#define FREG_MOVE_1(x0) \
fsrc2 %x0, %f0;
#define FREG_MOVE_2(x0, x1) \
fsrc2 %x0, %f0; \
fsrc2 %x1, %f2;
#define FREG_MOVE_3(x0, x1, x2) \
fsrc2 %x0, %f0; \
fsrc2 %x1, %f2; \
fsrc2 %x2, %f4;
#define FREG_MOVE_4(x0, x1, x2, x3) \
fsrc2 %x0, %f0; \
fsrc2 %x1, %f2; \
fsrc2 %x2, %f4; \
fsrc2 %x3, %f6;
#define FREG_MOVE_5(x0, x1, x2, x3, x4) \
fsrc2 %x0, %f0; \
fsrc2 %x1, %f2; \
fsrc2 %x2, %f4; \
fsrc2 %x3, %f6; \
fsrc2 %x4, %f8;
#define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \
fsrc2 %x0, %f0; \
fsrc2 %x1, %f2; \
fsrc2 %x2, %f4; \
fsrc2 %x3, %f6; \
fsrc2 %x4, %f8; \
fsrc2 %x5, %f10;
#define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \
fsrc2 %x0, %f0; \
fsrc2 %x1, %f2; \
fsrc2 %x2, %f4; \
fsrc2 %x3, %f6; \
fsrc2 %x4, %f8; \
fsrc2 %x5, %f10; \
fsrc2 %x6, %f12;
#define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \
fsrc2 %x0, %f0; \
fsrc2 %x1, %f2; \
fsrc2 %x2, %f4; \
fsrc2 %x3, %f6; \
fsrc2 %x4, %f8; \
fsrc2 %x5, %f10; \
fsrc2 %x6, %f12; \
fsrc2 %x7, %f14;
#define FREG_LOAD_1(base, x0) \
EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1)
#define FREG_LOAD_2(base, x0, x1) \
EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1);
#define FREG_LOAD_3(base, x0, x1, x2) \
EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1);
#define FREG_LOAD_4(base, x0, x1, x2, x3) \
EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1);
#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1);
#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1);
#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); \
EX_LD_FP(LOAD(ldd, base + 0x30, %x6), NG2_retl_o2_plus_g1);
.register %g2,#scratch
.register %g3,#scratch
.text
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
__restore_fp:
VISExitHalf
__restore_asi:
retl
wr %g0, ASI_AIUS, %asi
ENTRY(NG2_retl_o2)
ba,pt %xcc, __restore_asi
mov %o2, %o0
ENDPROC(NG2_retl_o2)
ENTRY(NG2_retl_o2_plus_1)
ba,pt %xcc, __restore_asi
add %o2, 1, %o0
ENDPROC(NG2_retl_o2_plus_1)
ENTRY(NG2_retl_o2_plus_4)
ba,pt %xcc, __restore_asi
add %o2, 4, %o0
ENDPROC(NG2_retl_o2_plus_4)
ENTRY(NG2_retl_o2_plus_8)
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(NG2_retl_o2_plus_8)
ENTRY(NG2_retl_o2_plus_o4_plus_1)
add %o4, 1, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG2_retl_o2_plus_o4_plus_1)
ENTRY(NG2_retl_o2_plus_o4_plus_8)
add %o4, 8, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG2_retl_o2_plus_o4_plus_8)
ENTRY(NG2_retl_o2_plus_o4_plus_16)
add %o4, 16, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG2_retl_o2_plus_o4_plus_16)
ENTRY(NG2_retl_o2_plus_g1_fp)
ba,pt %xcc, __restore_fp
add %o2, %g1, %o0
ENDPROC(NG2_retl_o2_plus_g1_fp)
ENTRY(NG2_retl_o2_plus_g1_plus_64_fp)
add %g1, 64, %g1
ba,pt %xcc, __restore_fp
add %o2, %g1, %o0
ENDPROC(NG2_retl_o2_plus_g1_plus_64_fp)
ENTRY(NG2_retl_o2_plus_g1_plus_1)
add %g1, 1, %g1
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(NG2_retl_o2_plus_g1_plus_1)
ENTRY(NG2_retl_o2_and_7_plus_o4)
and %o2, 7, %o2
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG2_retl_o2_and_7_plus_o4)
ENTRY(NG2_retl_o2_and_7_plus_o4_plus_8)
and %o2, 7, %o2
add %o4, 8, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(NG2_retl_o2_and_7_plus_o4_plus_8)
#endif
.align 64
.globl FUNC_NAME
.type FUNC_NAME,#function
FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
srlx %o2, 31, %g2
cmp %g2, 0
tne %xcc, 5
PREAMBLE
mov %o0, %o3
cmp %o2, 0
be,pn %XCC, 85f
or %o0, %o1, GLOBAL_SPARE
cmp %o2, 16
blu,a,pn %XCC, 80f
or GLOBAL_SPARE, %o2, GLOBAL_SPARE
/* 2 blocks (128 bytes) is the minimum we can do the block
* copy with. We need to ensure that we'll iterate at least
* once in the block copy loop. At worst we'll need to align
* the destination to a 64-byte boundary which can chew up
* to (64 - 1) bytes from the length before we perform the
* block copy loop.
*
* However, the cut-off point, performance wise, is around
* 4 64-byte blocks.
*/
cmp %o2, (4 * 64)
blu,pt %XCC, 75f
andcc GLOBAL_SPARE, 0x7, %g0
/* %o0: dst
* %o1: src
* %o2: len (known to be >= 128)
*
* The block copy loops can use %o4, %g2, %g3 as
* temporaries while copying the data. %o5 must
* be preserved between VISEntryHalf and VISExitHalf
*/
LOAD(prefetch, %o1 + 0x000, #one_read)
LOAD(prefetch, %o1 + 0x040, #one_read)
LOAD(prefetch, %o1 + 0x080, #one_read)
/* Align destination on 64-byte boundary. */
andcc %o0, (64 - 1), %o4
be,pt %XCC, 2f
sub %o4, 64, %o4
sub %g0, %o4, %o4 ! bytes to align dst
sub %o2, %o4, %o2
1: subcc %o4, 1, %o4
EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_o4_plus_1)
EX_ST(STORE(stb, %g1, %o0), NG2_retl_o2_plus_o4_plus_1)
add %o1, 1, %o1
bne,pt %XCC, 1b
add %o0, 1, %o0
2:
/* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve
* o5 from here until we hit VISExitHalf.
*/
VISEntryHalf
membar #Sync
alignaddr %o1, %g0, %g0
add %o1, (64 - 1), %o4
andn %o4, (64 - 1), %o4
andn %o2, (64 - 1), %g1
sub %o2, %g1, %o2
and %o1, (64 - 1), %g2
add %o1, %g1, %o1
sub %o0, %o4, %g3
brz,pt %g2, 190f
cmp %g2, 32
blu,a 5f
cmp %g2, 16
cmp %g2, 48
blu,a 4f
cmp %g2, 40
cmp %g2, 56
blu 170f
nop
ba,a,pt %xcc, 180f
nop
4: /* 32 <= low bits < 48 */
blu 150f
nop
ba,a,pt %xcc, 160f
nop
5: /* 0 < low bits < 32 */
blu,a 6f
cmp %g2, 8
cmp %g2, 24
blu 130f
nop
ba,a,pt %xcc, 140f
nop
6: /* 0 < low bits < 16 */
bgeu 120f
nop
/* fall through for 0 < low bits < 8 */
110: sub %o4, 64, %g2
EX_LD_FP(LOAD_BLK(%g2, %f0), NG2_retl_o2_plus_g1)
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
ba,pt %xcc, 195f
nop
120: sub %o4, 56, %g2
FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
ba,pt %xcc, 195f
nop
130: sub %o4, 48, %g2
FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
ba,pt %xcc, 195f
nop
140: sub %o4, 40, %g2
FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_5(f22, f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
ba,pt %xcc, 195f
nop
150: sub %o4, 32, %g2
FREG_LOAD_4(%g2, f0, f2, f4, f6)
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_4(f24, f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
ba,pt %xcc, 195f
nop
160: sub %o4, 24, %g2
FREG_LOAD_3(%g2, f0, f2, f4)
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_3(f26, f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
ba,pt %xcc, 195f
nop
170: sub %o4, 16, %g2
FREG_LOAD_2(%g2, f0, f2)
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_2(f28, f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
ba,pt %xcc, 195f
nop
180: sub %o4, 8, %g2
FREG_LOAD_1(%g2, f0)
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
FREG_MOVE_1(f30)
subcc %g1, 64, %g1
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
ba,pt %xcc, 195f
nop
190:
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
subcc %g1, 64, %g1
EX_LD_FP(LOAD_BLK(%o4, %f0), NG2_retl_o2_plus_g1_plus_64)
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1_plus_64)
add %o4, 64, %o4
bne,pt %xcc, 1b
LOAD(prefetch, %o4 + 64, #one_read)
195:
add %o4, %g3, %o0
membar #Sync
VISExitHalf
/* %o2 contains any final bytes still needed to be copied
* over. If anything is left, we copy it one byte at a time.
*/
brz,pt %o2, 85f
sub %o0, %o1, GLOBAL_SPARE
ba,a,pt %XCC, 90f
nop
.align 64
75: /* 16 < len <= 64 */
bne,pn %XCC, 75f
sub %o0, %o1, GLOBAL_SPARE
72:
andn %o2, 0xf, %o4
and %o2, 0xf, %o2
1: subcc %o4, 0x10, %o4
EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_o4_plus_16)
add %o1, 0x08, %o1
EX_LD(LOAD(ldx, %o1, %g1), NG2_retl_o2_plus_o4_plus_16)
sub %o1, 0x08, %o1
EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_16)
add %o1, 0x8, %o1
EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_8)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x8, %o2
EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_8)
EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
EX_LD(LOAD(lduw, %o1, %o5), NG2_retl_o2_plus_4)
EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
nop
ba,pt %xcc, 90f
nop
75:
andcc %o0, 0x7, %g1
sub %g1, 0x8, %g1
be,pn %icc, 2f
sub %g0, %g1, %g1
sub %o2, %g1, %o2
1: subcc %g1, 1, %g1
EX_LD(LOAD(ldub, %o1, %o5), NG2_retl_o2_plus_g1_plus_1)
EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
2: add %o1, GLOBAL_SPARE, %o0
andcc %o1, 0x7, %g1
bne,pt %icc, 8f
sll %g1, 3, %g1
cmp %o2, 16
bgeu,pt %icc, 72b
nop
ba,a,pt %xcc, 73b
8: mov 64, GLOBAL_SPARE
andn %o1, 0x7, %o1
EX_LD(LOAD(ldx, %o1, %g2), NG2_retl_o2)
sub GLOBAL_SPARE, %g1, GLOBAL_SPARE
andn %o2, 0x7, %o4
sllx %g2, %g1, %g2
1: add %o1, 0x8, %o1
EX_LD(LOAD(ldx, %o1, %g3), NG2_retl_o2_and_7_plus_o4)
subcc %o4, 0x8, %o4
srlx %g3, GLOBAL_SPARE, %o5
or %o5, %g2, %o5
EX_ST(STORE(stx, %o5, %o0), NG2_retl_o2_and_7_plus_o4_plus_8)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
srl %g1, 3, %g1
andcc %o2, 0x7, %o2
be,pn %icc, 85f
add %o1, %g1, %o1
ba,pt %xcc, 90f
sub %o0, %o1, GLOBAL_SPARE
.align 64
80: /* 0 < len <= 16 */
andcc GLOBAL_SPARE, 0x3, %g0
bne,pn %XCC, 90f
sub %o0, %o1, GLOBAL_SPARE
1:
subcc %o2, 4, %o2
EX_LD(LOAD(lduw, %o1, %g1), NG2_retl_o2_plus_4)
EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
85: retl
mov EX_RETVAL(%o3), %o0
.align 32
90:
subcc %o2, 1, %o2
EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_1)
EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
mov EX_RETVAL(%o3), %o0
.size FUNC_NAME, .-FUNC_NAME
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,144
|
arch/sparc/lib/locks.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* locks.S: SMP low-level lock primitives on Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1998 Anton Blanchard (anton@progsoc.uts.edu.au)
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/smp.h>
#include <asm/spinlock.h>
#include <asm/export.h>
.text
.align 4
/* Read/writer locks, as usual this is overly clever to make it
* as fast as possible.
*/
/* caches... */
___rw_read_enter_spin_on_wlock:
orcc %g2, 0x0, %g0
be,a ___rw_read_enter
ldstub [%g1 + 3], %g2
b ___rw_read_enter_spin_on_wlock
ldub [%g1 + 3], %g2
___rw_read_try_spin_on_wlock:
andcc %g2, 0xff, %g0
be,a ___rw_read_try
ldstub [%g1 + 3], %g2
xnorcc %g2, 0x0, %o0 /* if g2 is ~0, set o0 to 0 and bugger off */
bne,a ___rw_read_enter_spin_on_wlock
ld [%g1], %g2
retl
mov %g4, %o7
___rw_read_exit_spin_on_wlock:
orcc %g2, 0x0, %g0
be,a ___rw_read_exit
ldstub [%g1 + 3], %g2
b ___rw_read_exit_spin_on_wlock
ldub [%g1 + 3], %g2
___rw_write_enter_spin_on_wlock:
orcc %g2, 0x0, %g0
be,a ___rw_write_enter
ldstub [%g1 + 3], %g2
b ___rw_write_enter_spin_on_wlock
ld [%g1], %g2
.globl ___rw_read_enter
EXPORT_SYMBOL(___rw_read_enter)
___rw_read_enter:
orcc %g2, 0x0, %g0
bne,a ___rw_read_enter_spin_on_wlock
ldub [%g1 + 3], %g2
ld [%g1], %g2
add %g2, 1, %g2
st %g2, [%g1]
retl
mov %g4, %o7
.globl ___rw_read_exit
EXPORT_SYMBOL(___rw_read_exit)
___rw_read_exit:
orcc %g2, 0x0, %g0
bne,a ___rw_read_exit_spin_on_wlock
ldub [%g1 + 3], %g2
ld [%g1], %g2
sub %g2, 0x1ff, %g2
st %g2, [%g1]
retl
mov %g4, %o7
.globl ___rw_read_try
EXPORT_SYMBOL(___rw_read_try)
___rw_read_try:
orcc %g2, 0x0, %g0
bne ___rw_read_try_spin_on_wlock
ld [%g1], %g2
add %g2, 1, %g2
st %g2, [%g1]
set 1, %o1
retl
mov %g4, %o7
.globl ___rw_write_enter
EXPORT_SYMBOL(___rw_write_enter)
___rw_write_enter:
orcc %g2, 0x0, %g0
bne ___rw_write_enter_spin_on_wlock
ld [%g1], %g2
andncc %g2, 0xff, %g0
bne,a ___rw_write_enter_spin_on_wlock
stb %g0, [%g1 + 3]
retl
mov %g4, %o7
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,450
|
arch/sparc/lib/GENmemcpy.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* GENmemcpy.S: Generic sparc64 memcpy.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#ifdef __KERNEL__
#include <linux/linkage.h>
#define GLOBAL_SPARE %g7
#else
#define GLOBAL_SPARE %g5
#endif
#ifndef EX_LD
#define EX_LD(x,y) x
#endif
#ifndef EX_ST
#define EX_ST(x,y) x
#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
#endif
#ifndef STORE
#define STORE(type,src,addr) type src, [addr]
#endif
#ifndef FUNC_NAME
#define FUNC_NAME GENmemcpy
#endif
#ifndef PREAMBLE
#define PREAMBLE
#endif
#ifndef XCC
#define XCC xcc
#endif
.register %g2,#scratch
.register %g3,#scratch
.text
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
ENTRY(GEN_retl_o4_1)
add %o4, %o2, %o4
retl
add %o4, 1, %o0
ENDPROC(GEN_retl_o4_1)
ENTRY(GEN_retl_g1_8)
add %g1, %o2, %g1
retl
add %g1, 8, %o0
ENDPROC(GEN_retl_g1_8)
ENTRY(GEN_retl_o2_4)
retl
add %o2, 4, %o0
ENDPROC(GEN_retl_o2_4)
ENTRY(GEN_retl_o2_1)
retl
add %o2, 1, %o0
ENDPROC(GEN_retl_o2_1)
#endif
.align 64
.globl FUNC_NAME
.type FUNC_NAME,#function
FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
srlx %o2, 31, %g2
cmp %g2, 0
tne %XCC, 5
PREAMBLE
mov %o0, GLOBAL_SPARE
cmp %o2, 0
be,pn %XCC, 85f
or %o0, %o1, %o3
cmp %o2, 16
blu,a,pn %XCC, 80f
or %o3, %o2, %o3
xor %o0, %o1, %o4
andcc %o4, 0x7, %g0
bne,a,pn %XCC, 90f
sub %o0, %o1, %o3
and %o0, 0x7, %o4
sub %o4, 0x8, %o4
sub %g0, %o4, %o4
sub %o2, %o4, %o2
1: subcc %o4, 1, %o4
EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o4_1)
EX_ST(STORE(stb, %g1, %o0),GEN_retl_o4_1)
add %o1, 1, %o1
bne,pt %XCC, 1b
add %o0, 1, %o0
andn %o2, 0x7, %g1
sub %o2, %g1, %o2
1: subcc %g1, 0x8, %g1
EX_LD(LOAD(ldx, %o1, %g2),GEN_retl_g1_8)
EX_ST(STORE(stx, %g2, %o0),GEN_retl_g1_8)
add %o1, 0x8, %o1
bne,pt %XCC, 1b
add %o0, 0x8, %o0
brz,pt %o2, 85f
sub %o0, %o1, %o3
ba,a,pt %XCC, 90f
.align 64
80: /* 0 < len <= 16 */
andcc %o3, 0x3, %g0
bne,pn %XCC, 90f
sub %o0, %o1, %o3
1:
subcc %o2, 4, %o2
EX_LD(LOAD(lduw, %o1, %g1),GEN_retl_o2_4)
EX_ST(STORE(stw, %g1, %o1 + %o3),GEN_retl_o2_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
85: retl
mov EX_RETVAL(GLOBAL_SPARE), %o0
.align 32
90:
subcc %o2, 1, %o2
EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o2_1)
EX_ST(STORE(stb, %g1, %o1 + %o3),GEN_retl_o2_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
mov EX_RETVAL(GLOBAL_SPARE), %o0
.size FUNC_NAME, .-FUNC_NAME
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,298
|
arch/sparc/lib/ffs.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/export.h>
.register %g2,#scratch
.text
.align 32
ENTRY(ffs)
brnz,pt %o0, 1f
mov 1, %o1
retl
clr %o0
nop
nop
ENTRY(__ffs)
sllx %o0, 32, %g1 /* 1 */
srlx %o0, 32, %g2
clr %o1 /* 2 */
movrz %g1, %g2, %o0
movrz %g1, 32, %o1 /* 3 */
1: clr %o2
sllx %o0, (64 - 16), %g1 /* 4 */
srlx %o0, 16, %g2
movrz %g1, %g2, %o0 /* 5 */
clr %o3
movrz %g1, 16, %o2 /* 6 */
clr %o4
and %o0, 0xff, %g1 /* 7 */
srlx %o0, 8, %g2
movrz %g1, %g2, %o0 /* 8 */
clr %o5
movrz %g1, 8, %o3 /* 9 */
add %o2, %o1, %o2
and %o0, 0xf, %g1 /* 10 */
srlx %o0, 4, %g2
movrz %g1, %g2, %o0 /* 11 */
add %o2, %o3, %o2
movrz %g1, 4, %o4 /* 12 */
and %o0, 0x3, %g1 /* 13 */
srlx %o0, 2, %g2
movrz %g1, %g2, %o0 /* 14 */
add %o2, %o4, %o2
movrz %g1, 2, %o5 /* 15 */
and %o0, 0x1, %g1 /* 16 */
add %o2, %o5, %o2 /* 17 */
xor %g1, 0x1, %g1
retl /* 18 */
add %o2, %g1, %o0
ENDPROC(ffs)
ENDPROC(__ffs)
EXPORT_SYMBOL(__ffs)
EXPORT_SYMBOL(ffs)
.section .popc_6insn_patch, "ax"
.word ffs
brz,pn %o0, 98f
neg %o0, %g1
xnor %o0, %g1, %o1
popc %o1, %o0
98: retl
nop
.word __ffs
neg %o0, %g1
xnor %o0, %g1, %o1
popc %o1, %o0
retl
sub %o0, 1, %o0
nop
.previous
|
AirFortressIlikara/LS2K0300-linux-4.19
| 13,406
|
arch/sparc/lib/NGmemcpy.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* NGmemcpy.S: Niagara optimized memcpy.
*
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
#ifdef __KERNEL__
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/thread_info.h>
#define GLOBAL_SPARE %g7
#define RESTORE_ASI(TMP) \
ldub [%g6 + TI_CURRENT_DS], TMP; \
wr TMP, 0x0, %asi;
#else
#define GLOBAL_SPARE %g5
#define RESTORE_ASI(TMP) \
wr %g0, ASI_PNF, %asi
#endif
#ifdef __sparc_v9__
#define SAVE_AMOUNT 128
#else
#define SAVE_AMOUNT 64
#endif
#ifndef STORE_ASI
#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P
#endif
#ifndef EX_LD
#define EX_LD(x,y) x
#endif
#ifndef EX_ST
#define EX_ST(x,y) x
#endif
#ifndef LOAD
#ifndef MEMCPY_DEBUG
#define LOAD(type,addr,dest) type [addr], dest
#else
#define LOAD(type,addr,dest) type##a [addr] 0x80, dest
#endif
#endif
#ifndef LOAD_TWIN
#define LOAD_TWIN(addr_reg,dest0,dest1) \
ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_P, dest0
#endif
#ifndef STORE
#define STORE(type,src,addr) type src, [addr]
#endif
#ifndef STORE_INIT
#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
#define STORE_INIT(src,addr) stxa src, [addr] %asi
#else
#define STORE_INIT(src,addr) stx src, [addr + 0x00]
#endif
#endif
#ifndef FUNC_NAME
#define FUNC_NAME NGmemcpy
#endif
#ifndef PREAMBLE
#define PREAMBLE
#endif
#ifndef XCC
#define XCC xcc
#endif
.register %g2,#scratch
.register %g3,#scratch
.text
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
__restore_asi:
ret
wr %g0, ASI_AIUS, %asi
restore
ENTRY(NG_ret_i2_plus_i4_plus_1)
ba,pt %xcc, __restore_asi
add %i2, %i5, %i0
ENDPROC(NG_ret_i2_plus_i4_plus_1)
ENTRY(NG_ret_i2_plus_g1)
ba,pt %xcc, __restore_asi
add %i2, %g1, %i0
ENDPROC(NG_ret_i2_plus_g1)
ENTRY(NG_ret_i2_plus_g1_minus_8)
sub %g1, 8, %g1
ba,pt %xcc, __restore_asi
add %i2, %g1, %i0
ENDPROC(NG_ret_i2_plus_g1_minus_8)
ENTRY(NG_ret_i2_plus_g1_minus_16)
sub %g1, 16, %g1
ba,pt %xcc, __restore_asi
add %i2, %g1, %i0
ENDPROC(NG_ret_i2_plus_g1_minus_16)
ENTRY(NG_ret_i2_plus_g1_minus_24)
sub %g1, 24, %g1
ba,pt %xcc, __restore_asi
add %i2, %g1, %i0
ENDPROC(NG_ret_i2_plus_g1_minus_24)
ENTRY(NG_ret_i2_plus_g1_minus_32)
sub %g1, 32, %g1
ba,pt %xcc, __restore_asi
add %i2, %g1, %i0
ENDPROC(NG_ret_i2_plus_g1_minus_32)
ENTRY(NG_ret_i2_plus_g1_minus_40)
sub %g1, 40, %g1
ba,pt %xcc, __restore_asi
add %i2, %g1, %i0
ENDPROC(NG_ret_i2_plus_g1_minus_40)
ENTRY(NG_ret_i2_plus_g1_minus_48)
sub %g1, 48, %g1
ba,pt %xcc, __restore_asi
add %i2, %g1, %i0
ENDPROC(NG_ret_i2_plus_g1_minus_48)
ENTRY(NG_ret_i2_plus_g1_minus_56)
sub %g1, 56, %g1
ba,pt %xcc, __restore_asi
add %i2, %g1, %i0
ENDPROC(NG_ret_i2_plus_g1_minus_56)
ENTRY(NG_ret_i2_plus_i4)
ba,pt %xcc, __restore_asi
add %i2, %i4, %i0
ENDPROC(NG_ret_i2_plus_i4)
ENTRY(NG_ret_i2_plus_i4_minus_8)
sub %i4, 8, %i4
ba,pt %xcc, __restore_asi
add %i2, %i4, %i0
ENDPROC(NG_ret_i2_plus_i4_minus_8)
ENTRY(NG_ret_i2_plus_8)
ba,pt %xcc, __restore_asi
add %i2, 8, %i0
ENDPROC(NG_ret_i2_plus_8)
ENTRY(NG_ret_i2_plus_4)
ba,pt %xcc, __restore_asi
add %i2, 4, %i0
ENDPROC(NG_ret_i2_plus_4)
ENTRY(NG_ret_i2_plus_1)
ba,pt %xcc, __restore_asi
add %i2, 1, %i0
ENDPROC(NG_ret_i2_plus_1)
ENTRY(NG_ret_i2_plus_g1_plus_1)
add %g1, 1, %g1
ba,pt %xcc, __restore_asi
add %i2, %g1, %i0
ENDPROC(NG_ret_i2_plus_g1_plus_1)
ENTRY(NG_ret_i2)
ba,pt %xcc, __restore_asi
mov %i2, %i0
ENDPROC(NG_ret_i2)
ENTRY(NG_ret_i2_and_7_plus_i4)
and %i2, 7, %i2
ba,pt %xcc, __restore_asi
add %i2, %i4, %i0
ENDPROC(NG_ret_i2_and_7_plus_i4)
#endif
.align 64
.globl FUNC_NAME
.type FUNC_NAME,#function
FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
PREAMBLE
save %sp, -SAVE_AMOUNT, %sp
srlx %i2, 31, %g2
cmp %g2, 0
tne %xcc, 5
mov %i0, %o0
cmp %i2, 0
be,pn %XCC, 85f
or %o0, %i1, %i3
cmp %i2, 16
blu,a,pn %XCC, 80f
or %i3, %i2, %i3
/* 2 blocks (128 bytes) is the minimum we can do the block
* copy with. We need to ensure that we'll iterate at least
* once in the block copy loop. At worst we'll need to align
* the destination to a 64-byte boundary which can chew up
* to (64 - 1) bytes from the length before we perform the
* block copy loop.
*/
cmp %i2, (2 * 64)
blu,pt %XCC, 70f
andcc %i3, 0x7, %g0
/* %o0: dst
* %i1: src
* %i2: len (known to be >= 128)
*
* The block copy loops will use %i4/%i5,%g2/%g3 as
* temporaries while copying the data.
*/
LOAD(prefetch, %i1, #one_read)
wr %g0, STORE_ASI, %asi
/* Align destination on 64-byte boundary. */
andcc %o0, (64 - 1), %i4
be,pt %XCC, 2f
sub %i4, 64, %i4
sub %g0, %i4, %i4 ! bytes to align dst
sub %i2, %i4, %i2
1: subcc %i4, 1, %i4
EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_i4_plus_1)
EX_ST(STORE(stb, %g1, %o0), NG_ret_i2_plus_i4_plus_1)
add %i1, 1, %i1
bne,pt %XCC, 1b
add %o0, 1, %o0
/* If the source is on a 16-byte boundary we can do
* the direct block copy loop. If it is 8-byte aligned
* we can do the 16-byte loads offset by -8 bytes and the
* init stores offset by one register.
*
* If the source is not even 8-byte aligned, we need to do
* shifting and masking (basically integer faligndata).
*
* The careful bit with init stores is that if we store
* to any part of the cache line we have to store the whole
* cacheline else we can end up with corrupt L2 cache line
* contents. Since the loop works on 64-bytes of 64-byte
* aligned store data at a time, this is easy to ensure.
*/
2:
andcc %i1, (16 - 1), %i4
andn %i2, (64 - 1), %g1 ! block copy loop iterator
be,pt %XCC, 50f
sub %i2, %g1, %i2 ! final sub-block copy bytes
cmp %i4, 8
be,pt %XCC, 10f
sub %i1, %i4, %i1
/* Neither 8-byte nor 16-byte aligned, shift and mask. */
and %i4, 0x7, GLOBAL_SPARE
sll GLOBAL_SPARE, 3, GLOBAL_SPARE
mov 64, %i5
EX_LD(LOAD_TWIN(%i1, %g2, %g3), NG_ret_i2_plus_g1)
sub %i5, GLOBAL_SPARE, %i5
mov 16, %o4
mov 32, %o5
mov 48, %o7
mov 64, %i3
bg,pn %XCC, 9f
nop
#define MIX_THREE_WORDS(WORD1, WORD2, WORD3, PRE_SHIFT, POST_SHIFT, TMP) \
sllx WORD1, POST_SHIFT, WORD1; \
srlx WORD2, PRE_SHIFT, TMP; \
sllx WORD2, POST_SHIFT, WORD2; \
or WORD1, TMP, WORD1; \
srlx WORD3, PRE_SHIFT, TMP; \
or WORD2, TMP, WORD2;
8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1)
MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
LOAD(prefetch, %i1 + %i3, #one_read)
EX_ST(STORE_INIT(%g2, %o0 + 0x00), NG_ret_i2_plus_g1)
EX_ST(STORE_INIT(%g3, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16)
MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
EX_ST(STORE_INIT(%g2, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
EX_ST(STORE_INIT(%g3, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 8b
add %o0, 64, %o0
ba,pt %XCC, 60f
add %i1, %i4, %i1
9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1)
MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
LOAD(prefetch, %i1 + %i3, #one_read)
EX_ST(STORE_INIT(%g3, %o0 + 0x00), NG_ret_i2_plus_g1)
EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16)
MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
EX_ST(STORE_INIT(%g2, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
EX_ST(STORE_INIT(%g3, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
EX_ST(STORE_INIT(%g2, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 9b
add %o0, 64, %o0
ba,pt %XCC, 60f
add %i1, %i4, %i1
10: /* Destination is 64-byte aligned, source was only 8-byte
* aligned but it has been subtracted by 8 and we perform
* one twin load ahead, then add 8 back into source when
* we finish the loop.
*/
EX_LD(LOAD_TWIN(%i1, %o4, %o5), NG_ret_i2_plus_g1)
mov 16, %o7
mov 32, %g2
mov 48, %g3
mov 64, %o1
1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1)
LOAD(prefetch, %i1 + %o1, #one_read)
EX_ST(STORE_INIT(%o5, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line
EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16)
EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
EX_ST(STORE_INIT(%o4, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
EX_ST(STORE_INIT(%o5, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5), NG_ret_i2_plus_g1_minus_48)
add %i1, 64, %i1
EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
EX_ST(STORE_INIT(%o4, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
ba,pt %XCC, 60f
add %i1, 0x8, %i1
50: /* Destination is 64-byte aligned, and source is 16-byte
* aligned.
*/
mov 16, %o7
mov 32, %g2
mov 48, %g3
mov 64, %o1
1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5), NG_ret_i2_plus_g1)
EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1)
LOAD(prefetch, %i1 + %o1, #one_read)
EX_ST(STORE_INIT(%o4, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line
EX_ST(STORE_INIT(%o5, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8)
EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16)
EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16)
EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24)
EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32)
add %i1, 64, %i1
EX_ST(STORE_INIT(%o4, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32)
EX_ST(STORE_INIT(%o5, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40)
EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48)
EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56)
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
/* fall through */
60:
membar #Sync
/* %i2 contains any final bytes still needed to be copied
* over. If anything is left, we copy it one byte at a time.
*/
RESTORE_ASI(%i3)
brz,pt %i2, 85f
sub %o0, %i1, %i3
ba,a,pt %XCC, 90f
nop
.align 64
70: /* 16 < len <= 64 */
bne,pn %XCC, 75f
sub %o0, %i1, %i3
72:
andn %i2, 0xf, %i4
and %i2, 0xf, %i2
1: subcc %i4, 0x10, %i4
EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4)
add %i1, 0x08, %i1
EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4)
sub %i1, 0x08, %i1
EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4)
add %i1, 0x8, %i1
EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8)
bgu,pt %XCC, 1b
add %i1, 0x8, %i1
73: andcc %i2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %i2, 0x8, %i2
EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_8)
EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_8)
add %i1, 0x8, %i1
1: andcc %i2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %i2, 0x4, %i2
EX_LD(LOAD(lduw, %i1, %i5), NG_ret_i2_plus_4)
EX_ST(STORE(stw, %i5, %i1 + %i3), NG_ret_i2_plus_4)
add %i1, 0x4, %i1
1: cmp %i2, 0
be,pt %XCC, 85f
nop
ba,pt %xcc, 90f
nop
75:
andcc %o0, 0x7, %g1
sub %g1, 0x8, %g1
be,pn %icc, 2f
sub %g0, %g1, %g1
sub %i2, %g1, %i2
1: subcc %g1, 1, %g1
EX_LD(LOAD(ldub, %i1, %i5), NG_ret_i2_plus_g1_plus_1)
EX_ST(STORE(stb, %i5, %i1 + %i3), NG_ret_i2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %i1, 1, %i1
2: add %i1, %i3, %o0
andcc %i1, 0x7, %g1
bne,pt %icc, 8f
sll %g1, 3, %g1
cmp %i2, 16
bgeu,pt %icc, 72b
nop
ba,a,pt %xcc, 73b
8: mov 64, %i3
andn %i1, 0x7, %i1
EX_LD(LOAD(ldx, %i1, %g2), NG_ret_i2)
sub %i3, %g1, %i3
andn %i2, 0x7, %i4
sllx %g2, %g1, %g2
1: add %i1, 0x8, %i1
EX_LD(LOAD(ldx, %i1, %g3), NG_ret_i2_and_7_plus_i4)
subcc %i4, 0x8, %i4
srlx %g3, %i3, %i5
or %i5, %g2, %i5
EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
srl %g1, 3, %g1
andcc %i2, 0x7, %i2
be,pn %icc, 85f
add %i1, %g1, %i1
ba,pt %xcc, 90f
sub %o0, %i1, %i3
.align 64
80: /* 0 < len <= 16 */
andcc %i3, 0x3, %g0
bne,pn %XCC, 90f
sub %o0, %i1, %i3
1:
subcc %i2, 4, %i2
EX_LD(LOAD(lduw, %i1, %g1), NG_ret_i2_plus_4)
EX_ST(STORE(stw, %g1, %i1 + %i3), NG_ret_i2_plus_4)
bgu,pt %XCC, 1b
add %i1, 4, %i1
85: ret
restore EX_RETVAL(%i0), %g0, %o0
.align 32
90:
subcc %i2, 1, %i2
EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_1)
EX_ST(STORE(stb, %g1, %i1 + %i3), NG_ret_i2_plus_1)
bgu,pt %XCC, 90b
add %i1, 1, %i1
ret
restore EX_RETVAL(%i0), %g0, %o0
.size FUNC_NAME, .-FUNC_NAME
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,664
|
arch/sparc/lib/NG4patch.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* NG4patch.S: Patch Ultra-I routines with Niagara-4 variant.
*
* Copyright (C) 2012 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define NG_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
sll %g1, 11, %g1; \
srl %g1, 11 + 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
.globl niagara4_patch_copyops
.type niagara4_patch_copyops,#function
niagara4_patch_copyops:
NG_DO_PATCH(memcpy, NG4memcpy)
NG_DO_PATCH(raw_copy_from_user, NG4copy_from_user)
NG_DO_PATCH(raw_copy_to_user, NG4copy_to_user)
retl
nop
.size niagara4_patch_copyops,.-niagara4_patch_copyops
.globl niagara4_patch_bzero
.type niagara4_patch_bzero,#function
niagara4_patch_bzero:
NG_DO_PATCH(memset, NG4memset)
NG_DO_PATCH(__bzero, NG4bzero)
NG_DO_PATCH(__clear_user, NGclear_user)
NG_DO_PATCH(tsb_init, NGtsb_init)
retl
nop
.size niagara4_patch_bzero,.-niagara4_patch_bzero
.globl niagara4_patch_pageops
.type niagara4_patch_pageops,#function
niagara4_patch_pageops:
NG_DO_PATCH(copy_user_page, NG4copy_user_page)
NG_DO_PATCH(_clear_page, NG4clear_page)
NG_DO_PATCH(clear_user_page, NG4clear_user_page)
retl
nop
.size niagara4_patch_pageops,.-niagara4_patch_pageops
ENTRY(niagara4_patch_fls)
NG_DO_PATCH(fls, NG4fls)
NG_DO_PATCH(__fls, __NG4fls)
retl
nop
ENDPROC(niagara4_patch_fls)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,287
|
arch/sparc/lib/memmove.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* memmove.S: Simple memmove implementation.
*
* Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
ENTRY(memmove) /* o0=dst o1=src o2=len */
brz,pn %o2, 99f
mov %o0, %g1
cmp %o0, %o1
bleu,pt %xcc, 2f
add %o1, %o2, %g7
cmp %g7, %o0
bleu,pt %xcc, memcpy
add %o0, %o2, %o5
sub %g7, 1, %o1
sub %o5, 1, %o0
1: ldub [%o1], %g7
subcc %o2, 1, %o2
sub %o1, 1, %o1
stb %g7, [%o0]
bne,pt %icc, 1b
sub %o0, 1, %o0
99:
retl
mov %g1, %o0
/* We can't just call memcpy for these memmove cases. On some
* chips the memcpy uses cache initializing stores and when dst
* and src are close enough, those can clobber the source data
* before we've loaded it in.
*/
2: or %o0, %o1, %g7
or %o2, %g7, %g7
andcc %g7, 0x7, %g0
bne,pn %xcc, 4f
nop
3: ldx [%o1], %g7
add %o1, 8, %o1
subcc %o2, 8, %o2
add %o0, 8, %o0
bne,pt %icc, 3b
stx %g7, [%o0 - 0x8]
ba,a,pt %xcc, 99b
4: ldub [%o1], %g7
add %o1, 1, %o1
subcc %o2, 1, %o2
add %o0, 1, %o0
bne,pt %icc, 4b
stb %g7, [%o0 - 0x1]
ba,a,pt %xcc, 99b
ENDPROC(memmove)
EXPORT_SYMBOL(memmove)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,465
|
arch/sparc/lib/NGbzero.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* NGbzero.S: Niagara optimized memset/clear_user.
*
* Copyright (C) 2006 David S. Miller (davem@davemloft.net)
*/
#include <asm/asi.h>
#define EX_ST(x,y) \
98: x,y; \
.section __ex_table,"a";\
.align 4; \
.word 98b, __retl_o1_asi;\
.text; \
.align 4;
.text
.globl NGmemset
.type NGmemset, #function
NGmemset: /* %o0=buf, %o1=pat, %o2=len */
and %o1, 0xff, %o3
mov %o2, %o1
sllx %o3, 8, %g1
or %g1, %o3, %o2
sllx %o2, 16, %g1
or %g1, %o2, %o2
sllx %o2, 32, %g1
ba,pt %xcc, 1f
or %g1, %o2, %o2
.globl NGbzero
.type NGbzero, #function
NGbzero:
clr %o2
1: brz,pn %o1, NGbzero_return
mov %o0, %o3
/* %o5: saved %asi, restored at NGbzero_done
* %g7: store-init %asi to use
* %o4: non-store-init %asi to use
*/
rd %asi, %o5
mov ASI_BLK_INIT_QUAD_LDD_P, %g7
mov ASI_P, %o4
wr %o4, 0x0, %asi
NGbzero_from_clear_user:
cmp %o1, 15
bl,pn %icc, NGbzero_tiny
andcc %o0, 0x7, %g1
be,pt %xcc, 2f
mov 8, %g2
sub %g2, %g1, %g1
sub %o1, %g1, %o1
1: EX_ST(stba %o2, [%o0 + 0x00] %asi)
subcc %g1, 1, %g1
bne,pt %xcc, 1b
add %o0, 1, %o0
2: cmp %o1, 128
bl,pn %icc, NGbzero_medium
andcc %o0, (64 - 1), %g1
be,pt %xcc, NGbzero_pre_loop
mov 64, %g2
sub %g2, %g1, %g1
sub %o1, %g1, %o1
1: EX_ST(stxa %o2, [%o0 + 0x00] %asi)
subcc %g1, 8, %g1
bne,pt %xcc, 1b
add %o0, 8, %o0
NGbzero_pre_loop:
wr %g7, 0x0, %asi
andn %o1, (64 - 1), %g1
sub %o1, %g1, %o1
NGbzero_loop:
EX_ST(stxa %o2, [%o0 + 0x00] %asi)
EX_ST(stxa %o2, [%o0 + 0x08] %asi)
EX_ST(stxa %o2, [%o0 + 0x10] %asi)
EX_ST(stxa %o2, [%o0 + 0x18] %asi)
EX_ST(stxa %o2, [%o0 + 0x20] %asi)
EX_ST(stxa %o2, [%o0 + 0x28] %asi)
EX_ST(stxa %o2, [%o0 + 0x30] %asi)
EX_ST(stxa %o2, [%o0 + 0x38] %asi)
subcc %g1, 64, %g1
bne,pt %xcc, NGbzero_loop
add %o0, 64, %o0
membar #Sync
wr %o4, 0x0, %asi
brz,pn %o1, NGbzero_done
NGbzero_medium:
andncc %o1, 0x7, %g1
be,pn %xcc, 2f
sub %o1, %g1, %o1
1: EX_ST(stxa %o2, [%o0 + 0x00] %asi)
subcc %g1, 8, %g1
bne,pt %xcc, 1b
add %o0, 8, %o0
2: brz,pt %o1, NGbzero_done
nop
NGbzero_tiny:
1: EX_ST(stba %o2, [%o0 + 0x00] %asi)
subcc %o1, 1, %o1
bne,pt %icc, 1b
add %o0, 1, %o0
/* fallthrough */
NGbzero_done:
wr %o5, 0x0, %asi
NGbzero_return:
retl
mov %o3, %o0
.size NGbzero, .-NGbzero
.size NGmemset, .-NGmemset
.globl NGclear_user
.type NGclear_user, #function
NGclear_user: /* %o0=buf, %o1=len */
rd %asi, %o5
brz,pn %o1, NGbzero_done
clr %o3
cmp %o5, ASI_AIUS
bne,pn %icc, NGbzero
clr %o2
mov ASI_BLK_INIT_QUAD_LDD_AIUS, %g7
ba,pt %xcc, NGbzero_from_clear_user
mov ASI_AIUS, %o4
.size NGclear_user, .-NGclear_user
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define NG_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
sll %g1, 11, %g1; \
srl %g1, 11 + 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
.globl niagara_patch_bzero
.type niagara_patch_bzero,#function
niagara_patch_bzero:
NG_DO_PATCH(memset, NGmemset)
NG_DO_PATCH(__bzero, NGbzero)
NG_DO_PATCH(__clear_user, NGclear_user)
NG_DO_PATCH(tsb_init, NGtsb_init)
retl
nop
.size niagara_patch_bzero,.-niagara_patch_bzero
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,346
|
arch/sparc/lib/GENbzero.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* GENbzero.S: Generic sparc64 memset/clear_user.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#include <asm/asi.h>
#define EX_ST(x,y) \
98: x,y; \
.section __ex_table,"a";\
.align 4; \
.word 98b, __retl_o1_asi;\
.text; \
.align 4;
.align 32
.text
.globl GENmemset
.type GENmemset, #function
GENmemset: /* %o0=buf, %o1=pat, %o2=len */
and %o1, 0xff, %o3
mov %o2, %o1
sllx %o3, 8, %g1
or %g1, %o3, %o2
sllx %o2, 16, %g1
or %g1, %o2, %o2
sllx %o2, 32, %g1
ba,pt %xcc, 1f
or %g1, %o2, %o2
.globl GENbzero
.type GENbzero, #function
GENbzero:
clr %o2
1: brz,pn %o1, GENbzero_return
mov %o0, %o3
/* %o5: saved %asi, restored at GENbzero_done
* %o4: store %asi to use
*/
rd %asi, %o5
mov ASI_P, %o4
wr %o4, 0x0, %asi
GENbzero_from_clear_user:
cmp %o1, 15
bl,pn %icc, GENbzero_tiny
andcc %o0, 0x7, %g1
be,pt %xcc, 2f
mov 8, %g2
sub %g2, %g1, %g1
sub %o1, %g1, %o1
1: EX_ST(stba %o2, [%o0 + 0x00] %asi)
subcc %g1, 1, %g1
bne,pt %xcc, 1b
add %o0, 1, %o0
2: cmp %o1, 128
bl,pn %icc, GENbzero_medium
andcc %o0, (64 - 1), %g1
be,pt %xcc, GENbzero_pre_loop
mov 64, %g2
sub %g2, %g1, %g1
sub %o1, %g1, %o1
1: EX_ST(stxa %o2, [%o0 + 0x00] %asi)
subcc %g1, 8, %g1
bne,pt %xcc, 1b
add %o0, 8, %o0
GENbzero_pre_loop:
andn %o1, (64 - 1), %g1
sub %o1, %g1, %o1
GENbzero_loop:
EX_ST(stxa %o2, [%o0 + 0x00] %asi)
EX_ST(stxa %o2, [%o0 + 0x08] %asi)
EX_ST(stxa %o2, [%o0 + 0x10] %asi)
EX_ST(stxa %o2, [%o0 + 0x18] %asi)
EX_ST(stxa %o2, [%o0 + 0x20] %asi)
EX_ST(stxa %o2, [%o0 + 0x28] %asi)
EX_ST(stxa %o2, [%o0 + 0x30] %asi)
EX_ST(stxa %o2, [%o0 + 0x38] %asi)
subcc %g1, 64, %g1
bne,pt %xcc, GENbzero_loop
add %o0, 64, %o0
membar #Sync
wr %o4, 0x0, %asi
brz,pn %o1, GENbzero_done
GENbzero_medium:
andncc %o1, 0x7, %g1
be,pn %xcc, 2f
sub %o1, %g1, %o1
1: EX_ST(stxa %o2, [%o0 + 0x00] %asi)
subcc %g1, 8, %g1
bne,pt %xcc, 1b
add %o0, 8, %o0
2: brz,pt %o1, GENbzero_done
nop
GENbzero_tiny:
1: EX_ST(stba %o2, [%o0 + 0x00] %asi)
subcc %o1, 1, %o1
bne,pt %icc, 1b
add %o0, 1, %o0
/* fallthrough */
GENbzero_done:
wr %o5, 0x0, %asi
GENbzero_return:
retl
mov %o3, %o0
.size GENbzero, .-GENbzero
.size GENmemset, .-GENmemset
.globl GENclear_user
.type GENclear_user, #function
GENclear_user: /* %o0=buf, %o1=len */
rd %asi, %o5
brz,pn %o1, GENbzero_done
clr %o3
cmp %o5, ASI_AIUS
bne,pn %icc, GENbzero
clr %o2
ba,pt %xcc, GENbzero_from_clear_user
mov ASI_AIUS, %o4
.size GENclear_user, .-GENclear_user
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define GEN_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
sll %g1, 11, %g1; \
srl %g1, 11 + 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
.globl generic_patch_bzero
.type generic_patch_bzero,#function
generic_patch_bzero:
GEN_DO_PATCH(memset, GENmemset)
GEN_DO_PATCH(__bzero, GENbzero)
GEN_DO_PATCH(__clear_user, GENclear_user)
retl
nop
.size generic_patch_bzero,.-generic_patch_bzero
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,598
|
arch/sparc/lib/NGpage.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* NGpage.S: Niagara optimize clear and copy page.
*
* Copyright (C) 2006 (davem@davemloft.net)
*/
#include <asm/asi.h>
#include <asm/page.h>
.text
.align 32
/* This is heavily simplified from the sun4u variants
* because Niagara does not have any D-cache aliasing issues
* and also we don't need to use the FPU in order to implement
* an optimal page copy/clear.
*/
NGcopy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
save %sp, -192, %sp
rd %asi, %g3
wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
set PAGE_SIZE, %g7
prefetch [%i1 + 0x00], #one_read
prefetch [%i1 + 0x40], #one_read
1: prefetch [%i1 + 0x80], #one_read
prefetch [%i1 + 0xc0], #one_read
ldda [%i1 + 0x00] %asi, %o2
ldda [%i1 + 0x10] %asi, %o4
ldda [%i1 + 0x20] %asi, %l2
ldda [%i1 + 0x30] %asi, %l4
stxa %o2, [%i0 + 0x00] %asi
stxa %o3, [%i0 + 0x08] %asi
stxa %o4, [%i0 + 0x10] %asi
stxa %o5, [%i0 + 0x18] %asi
stxa %l2, [%i0 + 0x20] %asi
stxa %l3, [%i0 + 0x28] %asi
stxa %l4, [%i0 + 0x30] %asi
stxa %l5, [%i0 + 0x38] %asi
ldda [%i1 + 0x40] %asi, %o2
ldda [%i1 + 0x50] %asi, %o4
ldda [%i1 + 0x60] %asi, %l2
ldda [%i1 + 0x70] %asi, %l4
stxa %o2, [%i0 + 0x40] %asi
stxa %o3, [%i0 + 0x48] %asi
stxa %o4, [%i0 + 0x50] %asi
stxa %o5, [%i0 + 0x58] %asi
stxa %l2, [%i0 + 0x60] %asi
stxa %l3, [%i0 + 0x68] %asi
stxa %l4, [%i0 + 0x70] %asi
stxa %l5, [%i0 + 0x78] %asi
add %i1, 128, %i1
subcc %g7, 128, %g7
bne,pt %xcc, 1b
add %i0, 128, %i0
wr %g3, 0x0, %asi
membar #Sync
ret
restore
.align 32
.globl NGclear_page
.globl NGclear_user_page
NGclear_page: /* %o0=dest */
NGclear_user_page: /* %o0=dest, %o1=vaddr */
rd %asi, %g3
wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
set PAGE_SIZE, %g7
1: stxa %g0, [%o0 + 0x00] %asi
stxa %g0, [%o0 + 0x08] %asi
stxa %g0, [%o0 + 0x10] %asi
stxa %g0, [%o0 + 0x18] %asi
stxa %g0, [%o0 + 0x20] %asi
stxa %g0, [%o0 + 0x28] %asi
stxa %g0, [%o0 + 0x30] %asi
stxa %g0, [%o0 + 0x38] %asi
stxa %g0, [%o0 + 0x40] %asi
stxa %g0, [%o0 + 0x48] %asi
stxa %g0, [%o0 + 0x50] %asi
stxa %g0, [%o0 + 0x58] %asi
stxa %g0, [%o0 + 0x60] %asi
stxa %g0, [%o0 + 0x68] %asi
stxa %g0, [%o0 + 0x70] %asi
stxa %g0, [%o0 + 0x78] %asi
stxa %g0, [%o0 + 0x80] %asi
stxa %g0, [%o0 + 0x88] %asi
stxa %g0, [%o0 + 0x90] %asi
stxa %g0, [%o0 + 0x98] %asi
stxa %g0, [%o0 + 0xa0] %asi
stxa %g0, [%o0 + 0xa8] %asi
stxa %g0, [%o0 + 0xb0] %asi
stxa %g0, [%o0 + 0xb8] %asi
stxa %g0, [%o0 + 0xc0] %asi
stxa %g0, [%o0 + 0xc8] %asi
stxa %g0, [%o0 + 0xd0] %asi
stxa %g0, [%o0 + 0xd8] %asi
stxa %g0, [%o0 + 0xe0] %asi
stxa %g0, [%o0 + 0xe8] %asi
stxa %g0, [%o0 + 0xf0] %asi
stxa %g0, [%o0 + 0xf8] %asi
subcc %g7, 256, %g7
bne,pt %xcc, 1b
add %o0, 256, %o0
wr %g3, 0x0, %asi
membar #Sync
retl
nop
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define NG_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
sll %g1, 11, %g1; \
srl %g1, 11 + 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
.globl niagara_patch_pageops
.type niagara_patch_pageops,#function
niagara_patch_pageops:
NG_DO_PATCH(copy_user_page, NGcopy_user_page)
NG_DO_PATCH(_clear_page, NGclear_page)
NG_DO_PATCH(clear_user_page, NGclear_user_page)
retl
nop
.size niagara_patch_pageops,.-niagara_patch_pageops
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,474
|
arch/sparc/lib/bzero.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* bzero.S: Simple prefetching memset, bzero, and clear_user
* implementations.
*
* Copyright (C) 2005 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
ENTRY(memset) /* %o0=buf, %o1=pat, %o2=len */
and %o1, 0xff, %o3
mov %o2, %o1
sllx %o3, 8, %g1
or %g1, %o3, %o2
sllx %o2, 16, %g1
or %g1, %o2, %o2
sllx %o2, 32, %g1
ba,pt %xcc, 1f
or %g1, %o2, %o2
ENTRY(__bzero) /* %o0=buf, %o1=len */
clr %o2
1: mov %o0, %o3
brz,pn %o1, __bzero_done
cmp %o1, 16
bl,pn %icc, __bzero_tiny
prefetch [%o0 + 0x000], #n_writes
andcc %o0, 0x3, %g0
be,pt %icc, 2f
1: stb %o2, [%o0 + 0x00]
add %o0, 1, %o0
andcc %o0, 0x3, %g0
bne,pn %icc, 1b
sub %o1, 1, %o1
2: andcc %o0, 0x7, %g0
be,pt %icc, 3f
stw %o2, [%o0 + 0x00]
sub %o1, 4, %o1
add %o0, 4, %o0
3: and %o1, 0x38, %g1
cmp %o1, 0x40
andn %o1, 0x3f, %o4
bl,pn %icc, 5f
and %o1, 0x7, %o1
prefetch [%o0 + 0x040], #n_writes
prefetch [%o0 + 0x080], #n_writes
prefetch [%o0 + 0x0c0], #n_writes
prefetch [%o0 + 0x100], #n_writes
prefetch [%o0 + 0x140], #n_writes
4: prefetch [%o0 + 0x180], #n_writes
stx %o2, [%o0 + 0x00]
stx %o2, [%o0 + 0x08]
stx %o2, [%o0 + 0x10]
stx %o2, [%o0 + 0x18]
stx %o2, [%o0 + 0x20]
stx %o2, [%o0 + 0x28]
stx %o2, [%o0 + 0x30]
stx %o2, [%o0 + 0x38]
subcc %o4, 0x40, %o4
bne,pt %icc, 4b
add %o0, 0x40, %o0
brz,pn %g1, 6f
nop
5: stx %o2, [%o0 + 0x00]
subcc %g1, 8, %g1
bne,pt %icc, 5b
add %o0, 0x8, %o0
6: brz,pt %o1, __bzero_done
nop
__bzero_tiny:
1: stb %o2, [%o0 + 0x00]
subcc %o1, 1, %o1
bne,pt %icc, 1b
add %o0, 1, %o0
__bzero_done:
retl
mov %o3, %o0
ENDPROC(__bzero)
ENDPROC(memset)
EXPORT_SYMBOL(__bzero)
EXPORT_SYMBOL(memset)
#define EX_ST(x,y) \
98: x,y; \
.section __ex_table,"a";\
.align 4; \
.word 98b, __retl_o1; \
.text; \
.align 4;
ENTRY(__clear_user) /* %o0=buf, %o1=len */
brz,pn %o1, __clear_user_done
cmp %o1, 16
bl,pn %icc, __clear_user_tiny
EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes)
andcc %o0, 0x3, %g0
be,pt %icc, 2f
1: EX_ST(stba %g0, [%o0 + 0x00] %asi)
add %o0, 1, %o0
andcc %o0, 0x3, %g0
bne,pn %icc, 1b
sub %o1, 1, %o1
2: andcc %o0, 0x7, %g0
be,pt %icc, 3f
EX_ST(stwa %g0, [%o0 + 0x00] %asi)
sub %o1, 4, %o1
add %o0, 4, %o0
3: and %o1, 0x38, %g1
cmp %o1, 0x40
andn %o1, 0x3f, %o4
bl,pn %icc, 5f
and %o1, 0x7, %o1
EX_ST(prefetcha [%o0 + 0x040] %asi, #n_writes)
EX_ST(prefetcha [%o0 + 0x080] %asi, #n_writes)
EX_ST(prefetcha [%o0 + 0x0c0] %asi, #n_writes)
EX_ST(prefetcha [%o0 + 0x100] %asi, #n_writes)
EX_ST(prefetcha [%o0 + 0x140] %asi, #n_writes)
4: EX_ST(prefetcha [%o0 + 0x180] %asi, #n_writes)
EX_ST(stxa %g0, [%o0 + 0x00] %asi)
EX_ST(stxa %g0, [%o0 + 0x08] %asi)
EX_ST(stxa %g0, [%o0 + 0x10] %asi)
EX_ST(stxa %g0, [%o0 + 0x18] %asi)
EX_ST(stxa %g0, [%o0 + 0x20] %asi)
EX_ST(stxa %g0, [%o0 + 0x28] %asi)
EX_ST(stxa %g0, [%o0 + 0x30] %asi)
EX_ST(stxa %g0, [%o0 + 0x38] %asi)
subcc %o4, 0x40, %o4
bne,pt %icc, 4b
add %o0, 0x40, %o0
brz,pn %g1, 6f
nop
5: EX_ST(stxa %g0, [%o0 + 0x00] %asi)
subcc %g1, 8, %g1
bne,pt %icc, 5b
add %o0, 0x8, %o0
6: brz,pt %o1, __clear_user_done
nop
__clear_user_tiny:
1: EX_ST(stba %g0, [%o0 + 0x00] %asi)
subcc %o1, 1, %o1
bne,pt %icc, 1b
add %o0, 1, %o0
__clear_user_done:
retl
clr %o0
ENDPROC(__clear_user)
EXPORT_SYMBOL(__clear_user)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,724
|
arch/sparc/lib/GENpage.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* GENpage.S: Generic clear and copy page.
*
* Copyright (C) 2007 (davem@davemloft.net)
*/
#include <asm/page.h>
.text
.align 32
GENcopy_user_page:
set PAGE_SIZE, %g7
1: ldx [%o1 + 0x00], %o2
ldx [%o1 + 0x08], %o3
ldx [%o1 + 0x10], %o4
ldx [%o1 + 0x18], %o5
stx %o2, [%o0 + 0x00]
stx %o3, [%o0 + 0x08]
stx %o4, [%o0 + 0x10]
stx %o5, [%o0 + 0x18]
ldx [%o1 + 0x20], %o2
ldx [%o1 + 0x28], %o3
ldx [%o1 + 0x30], %o4
ldx [%o1 + 0x38], %o5
stx %o2, [%o0 + 0x20]
stx %o3, [%o0 + 0x28]
stx %o4, [%o0 + 0x30]
stx %o5, [%o0 + 0x38]
subcc %g7, 64, %g7
add %o1, 64, %o1
bne,pt %xcc, 1b
add %o0, 64, %o0
retl
nop
GENclear_page:
GENclear_user_page:
set PAGE_SIZE, %g7
1: stx %g0, [%o0 + 0x00]
stx %g0, [%o0 + 0x08]
stx %g0, [%o0 + 0x10]
stx %g0, [%o0 + 0x18]
stx %g0, [%o0 + 0x20]
stx %g0, [%o0 + 0x28]
stx %g0, [%o0 + 0x30]
stx %g0, [%o0 + 0x38]
subcc %g7, 64, %g7
bne,pt %xcc, 1b
add %o0, 64, %o0
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define GEN_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
sll %g1, 11, %g1; \
srl %g1, 11 + 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
.globl generic_patch_pageops
.type generic_patch_pageops,#function
generic_patch_pageops:
GEN_DO_PATCH(copy_user_page, GENcopy_user_page)
GEN_DO_PATCH(_clear_page, GENclear_page)
GEN_DO_PATCH(clear_user_page, GENclear_user_page)
retl
nop
.size generic_patch_pageops,.-generic_patch_pageops
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,697
|
arch/sparc/lib/divdi3.S
|
/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <asm/export.h>
.text
.align 4
.globl __divdi3
__divdi3:
save %sp,-104,%sp
cmp %i0,0
bge .LL40
mov 0,%l4
mov -1,%l4
sub %g0,%i1,%o0
mov %o0,%o5
subcc %g0,%o0,%g0
sub %g0,%i0,%o0
subx %o0,0,%o4
mov %o4,%i0
mov %o5,%i1
.LL40:
cmp %i2,0
bge .LL84
mov %i3,%o4
xnor %g0,%l4,%l4
sub %g0,%i3,%o0
mov %o0,%o3
subcc %g0,%o0,%g0
sub %g0,%i2,%o0
subx %o0,0,%o2
mov %o2,%i2
mov %o3,%i3
mov %i3,%o4
.LL84:
cmp %i2,0
bne .LL45
mov %i1,%i3
cmp %o4,%i0
bleu .LL46
mov %i3,%o1
mov 32,%g1
subcc %i0,%o4,%g0
1: bcs 5f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
sub %i0,%o4,%i0 ! this kills msb of n
addx %i0,%i0,%i0 ! so this cannot give carry
subcc %g1,1,%g1
2: bne 1b
subcc %i0,%o4,%g0
bcs 3f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
b 3f
sub %i0,%o4,%i0 ! this kills msb of n
4: sub %i0,%o4,%i0
5: addxcc %i0,%i0,%i0
bcc 2b
subcc %g1,1,%g1
! Got carry from n. Subtract next step to cancel this carry.
bne 4b
addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
sub %i0,%o4,%i0
3: xnor %o1,0,%o1
b .LL50
mov 0,%o2
.LL46:
cmp %o4,0
bne .LL85
mov %i0,%o2
mov 1,%o0
mov 0,%o1
wr %g0, 0, %y
udiv %o0, %o1, %o0
mov %o0,%o4
mov %i0,%o2
.LL85:
mov 0,%g3
mov 32,%g1
subcc %g3,%o4,%g0
1: bcs 5f
addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb
sub %g3,%o4,%g3 ! this kills msb of n
addx %g3,%g3,%g3 ! so this cannot give carry
subcc %g1,1,%g1
2: bne 1b
subcc %g3,%o4,%g0
bcs 3f
addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb
b 3f
sub %g3,%o4,%g3 ! this kills msb of n
4: sub %g3,%o4,%g3
5: addxcc %g3,%g3,%g3
bcc 2b
subcc %g1,1,%g1
! Got carry from n. Subtract next step to cancel this carry.
bne 4b
addcc %o2,%o2,%o2 ! shift n1n0 and a 0-bit in lsb
sub %g3,%o4,%g3
3: xnor %o2,0,%o2
mov %g3,%i0
mov %i3,%o1
mov 32,%g1
subcc %i0,%o4,%g0
1: bcs 5f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
sub %i0,%o4,%i0 ! this kills msb of n
addx %i0,%i0,%i0 ! so this cannot give carry
subcc %g1,1,%g1
2: bne 1b
subcc %i0,%o4,%g0
bcs 3f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
b 3f
sub %i0,%o4,%i0 ! this kills msb of n
4: sub %i0,%o4,%i0
5: addxcc %i0,%i0,%i0
bcc 2b
subcc %g1,1,%g1
! Got carry from n. Subtract next step to cancel this carry.
bne 4b
addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
sub %i0,%o4,%i0
3: xnor %o1,0,%o1
b .LL86
mov %o1,%l1
.LL45:
cmp %i2,%i0
bleu .LL51
sethi %hi(65535),%o0
b .LL78
mov 0,%o1
.LL51:
or %o0,%lo(65535),%o0
cmp %i2,%o0
bgu .LL58
mov %i2,%o1
cmp %i2,256
addx %g0,-1,%o0
b .LL64
and %o0,8,%o2
.LL58:
sethi %hi(16777215),%o0
or %o0,%lo(16777215),%o0
cmp %i2,%o0
bgu .LL64
mov 24,%o2
mov 16,%o2
.LL64:
srl %o1,%o2,%o0
sethi %hi(__clz_tab),%o1
or %o1,%lo(__clz_tab),%o1
ldub [%o0+%o1],%o0
add %o0,%o2,%o0
mov 32,%o1
subcc %o1,%o0,%o3
bne,a .LL72
sub %o1,%o3,%o1
cmp %i0,%i2
bgu .LL74
cmp %i3,%o4
blu .LL78
mov 0,%o1
.LL74:
b .LL78
mov 1,%o1
.LL72:
sll %i2,%o3,%o2
srl %o4,%o1,%o0
or %o2,%o0,%i2
sll %o4,%o3,%o4
srl %i0,%o1,%o2
sll %i0,%o3,%o0
srl %i3,%o1,%o1
or %o0,%o1,%i0
sll %i3,%o3,%i3
mov %i0,%o1
mov 32,%g1
subcc %o2,%i2,%g0
1: bcs 5f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
sub %o2,%i2,%o2 ! this kills msb of n
addx %o2,%o2,%o2 ! so this cannot give carry
subcc %g1,1,%g1
2: bne 1b
subcc %o2,%i2,%g0
bcs 3f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
b 3f
sub %o2,%i2,%o2 ! this kills msb of n
4: sub %o2,%i2,%o2
5: addxcc %o2,%o2,%o2
bcc 2b
subcc %g1,1,%g1
! Got carry from n. Subtract next step to cancel this carry.
bne 4b
addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
sub %o2,%i2,%o2
3: xnor %o1,0,%o1
mov %o2,%i0
wr %g0,%o1,%y ! SPARC has 0-3 delay insn after a wr
sra %o4,31,%g2 ! Do not move this insn
and %o1,%g2,%g2 ! Do not move this insn
andcc %g0,0,%g1 ! Do not move this insn
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,0,%g1
add %g1,%g2,%o0
rd %y,%o2
cmp %o0,%i0
bgu,a .LL78
add %o1,-1,%o1
bne,a .LL50
mov 0,%o2
cmp %o2,%i3
bleu .LL50
mov 0,%o2
add %o1,-1,%o1
.LL78:
mov 0,%o2
.LL50:
mov %o1,%l1
.LL86:
mov %o2,%l0
mov %l0,%i0
mov %l1,%i1
cmp %l4,0
be .LL81
sub %g0,%i1,%o0
mov %o0,%l3
subcc %g0,%o0,%g0
sub %g0,%i0,%o0
subx %o0,0,%l2
mov %l2,%i0
mov %l3,%i1
.LL81:
ret
restore
EXPORT_SYMBOL(__divdi3)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 17,018
|
arch/sparc/lib/U1memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy.
*
* Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#ifdef __KERNEL__
#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/export.h>
#define GLOBAL_SPARE g7
#else
#define GLOBAL_SPARE g5
#define ASI_BLK_P 0xf0
#define FPRS_FEF 0x04
#ifdef MEMCPY_DEBUG
#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#else
#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#endif
#endif
#ifndef EX_LD
#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
#endif
#ifndef LOAD_BLK
#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest
#endif
#ifndef STORE
#define STORE(type,src,addr) type src, [addr]
#endif
#ifndef STORE_BLK
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
#endif
#ifndef FUNC_NAME
#define FUNC_NAME memcpy
#endif
#ifndef PREAMBLE
#define PREAMBLE
#endif
#ifndef XCC
#define XCC xcc
#endif
#define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \
faligndata %f1, %f2, %f48; \
faligndata %f2, %f3, %f50; \
faligndata %f3, %f4, %f52; \
faligndata %f4, %f5, %f54; \
faligndata %f5, %f6, %f56; \
faligndata %f6, %f7, %f58; \
faligndata %f7, %f8, %f60; \
faligndata %f8, %f9, %f62;
#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt) \
EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp); \
EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
add %src, 0x40, %src; \
subcc %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE; \
be,pn %xcc, jmptgt; \
add %dest, 0x40, %dest; \
#define LOOP_CHUNK1(src, dest, branch_dest) \
MAIN_LOOP_CHUNK(src, dest, f0, f48, branch_dest)
#define LOOP_CHUNK2(src, dest, branch_dest) \
MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest)
#define LOOP_CHUNK3(src, dest, branch_dest) \
MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest)
#define DO_SYNC membar #Sync;
#define STORE_SYNC(dest, fsrc) \
EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
add %dest, 0x40, %dest; \
DO_SYNC
#define STORE_JUMP(dest, fsrc, target) \
EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp); \
add %dest, 0x40, %dest; \
ba,pt %xcc, target; \
nop;
#define FINISH_VISCHUNK(dest, f0, f1) \
subcc %g3, 8, %g3; \
bl,pn %xcc, 95f; \
faligndata %f0, %f1, %f48; \
EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp); \
add %dest, 8, %dest;
#define UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
subcc %g3, 8, %g3; \
bl,pn %xcc, 95f; \
fsrc2 %f0, %f1;
#define UNEVEN_VISCHUNK(dest, f0, f1) \
UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
ba,a,pt %xcc, 93f;
.register %g2,#scratch
.register %g3,#scratch
.text
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
ENTRY(U1_g1_1_fp)
VISExitHalf
add %g1, 1, %g1
add %g1, %g2, %g1
retl
add %g1, %o2, %o0
ENDPROC(U1_g1_1_fp)
ENTRY(U1_g2_0_fp)
VISExitHalf
retl
add %g2, %o2, %o0
ENDPROC(U1_g2_0_fp)
ENTRY(U1_g2_8_fp)
VISExitHalf
add %g2, 8, %g2
retl
add %g2, %o2, %o0
ENDPROC(U1_g2_8_fp)
ENTRY(U1_gs_0_fp)
VISExitHalf
add %GLOBAL_SPARE, %g3, %o0
retl
add %o0, %o2, %o0
ENDPROC(U1_gs_0_fp)
ENTRY(U1_gs_80_fp)
VISExitHalf
add %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
add %GLOBAL_SPARE, %g3, %o0
retl
add %o0, %o2, %o0
ENDPROC(U1_gs_80_fp)
ENTRY(U1_gs_40_fp)
VISExitHalf
add %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE
add %GLOBAL_SPARE, %g3, %o0
retl
add %o0, %o2, %o0
ENDPROC(U1_gs_40_fp)
ENTRY(U1_g3_0_fp)
VISExitHalf
retl
add %g3, %o2, %o0
ENDPROC(U1_g3_0_fp)
ENTRY(U1_g3_8_fp)
VISExitHalf
add %g3, 8, %g3
retl
add %g3, %o2, %o0
ENDPROC(U1_g3_8_fp)
ENTRY(U1_o2_0_fp)
VISExitHalf
retl
mov %o2, %o0
ENDPROC(U1_o2_0_fp)
ENTRY(U1_o2_1_fp)
VISExitHalf
retl
add %o2, 1, %o0
ENDPROC(U1_o2_1_fp)
ENTRY(U1_gs_0)
VISExitHalf
retl
add %GLOBAL_SPARE, %o2, %o0
ENDPROC(U1_gs_0)
ENTRY(U1_gs_8)
VISExitHalf
add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
retl
add %GLOBAL_SPARE, 0x8, %o0
ENDPROC(U1_gs_8)
ENTRY(U1_gs_10)
VISExitHalf
add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
retl
add %GLOBAL_SPARE, 0x10, %o0
ENDPROC(U1_gs_10)
ENTRY(U1_o2_0)
retl
mov %o2, %o0
ENDPROC(U1_o2_0)
ENTRY(U1_o2_8)
retl
add %o2, 8, %o0
ENDPROC(U1_o2_8)
ENTRY(U1_o2_4)
retl
add %o2, 4, %o0
ENDPROC(U1_o2_4)
ENTRY(U1_o2_1)
retl
add %o2, 1, %o0
ENDPROC(U1_o2_1)
ENTRY(U1_g1_0)
retl
add %g1, %o2, %o0
ENDPROC(U1_g1_0)
ENTRY(U1_g1_1)
add %g1, 1, %g1
retl
add %g1, %o2, %o0
ENDPROC(U1_g1_1)
ENTRY(U1_gs_0_o2_adj)
and %o2, 7, %o2
retl
add %GLOBAL_SPARE, %o2, %o0
ENDPROC(U1_gs_0_o2_adj)
ENTRY(U1_gs_8_o2_adj)
and %o2, 7, %o2
add %GLOBAL_SPARE, 8, %GLOBAL_SPARE
retl
add %GLOBAL_SPARE, %o2, %o0
ENDPROC(U1_gs_8_o2_adj)
#endif
.align 64
.globl FUNC_NAME
.type FUNC_NAME,#function
FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
srlx %o2, 31, %g2
cmp %g2, 0
tne %xcc, 5
PREAMBLE
mov %o0, %o4
cmp %o2, 0
be,pn %XCC, 85f
or %o0, %o1, %o3
cmp %o2, 16
blu,a,pn %XCC, 80f
or %o3, %o2, %o3
cmp %o2, (5 * 64)
blu,pt %XCC, 70f
andcc %o3, 0x7, %g0
/* Clobbers o5/g1/g2/g3/g7/icc/xcc. */
VISEntry
/* Is 'dst' already aligned on an 64-byte boundary? */
andcc %o0, 0x3f, %g2
be,pt %XCC, 2f
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
sub %o0, %o1, %GLOBAL_SPARE
sub %g2, 0x40, %g2
sub %g0, %g2, %g2
sub %o2, %g2, %o2
andcc %g2, 0x7, %g1
be,pt %icc, 2f
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp)
EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp)
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
add %o1, %GLOBAL_SPARE, %o0
2: cmp %g2, 0x0
and %o1, 0x7, %g1
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp)
1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
be,pn %icc, 3f
add %o0, 0x8, %o0
EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f0
EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
bne,pt %icc, 1b
add %o0, 0x8, %o0
/* Destination is 64-byte aligned. */
3:
membar #LoadStore | #StoreStore | #StoreLoad
subcc %o2, 0x40, %GLOBAL_SPARE
add %o1, %g1, %g1
andncc %GLOBAL_SPARE, (0x40 - 1), %GLOBAL_SPARE
srl %g1, 3, %g2
sub %o2, %GLOBAL_SPARE, %g3
andn %o1, (0x40 - 1), %o1
and %g2, 7, %g2
andncc %g3, 0x7, %g3
fsrc2 %f0, %f2
sub %g3, 0x8, %g3
sub %o2, %GLOBAL_SPARE, %o2
add %g1, %GLOBAL_SPARE, %g1
subcc %o2, %g3, %o2
EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp)
add %o1, 0x40, %o1
add %g1, %g3, %g1
EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp)
add %o1, 0x40, %o1
sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp)
add %o1, 0x40, %o1
/* There are 8 instances of the unrolled loop,
* one for each possible alignment of the
* source buffer. Each loop instance is 452
* bytes.
*/
sll %g2, 3, %o3
sub %o3, %g2, %o3
sllx %o3, 4, %o3
add %o3, %g2, %o3
sllx %o3, 2, %g2
1: rd %pc, %o3
add %o3, %lo(1f - 1b), %o3
jmpl %o3 + %g2, %g0
nop
.align 64
1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f0, %f2, %f48
1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
STORE_SYNC(o0, f48)
FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
STORE_JUMP(o0, f48, 40f)
2: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
STORE_SYNC(o0, f48)
FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
STORE_JUMP(o0, f48, 48f)
3: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
STORE_SYNC(o0, f48)
FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
STORE_JUMP(o0, f48, 56f)
1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f2, %f4, %f48
1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
STORE_SYNC(o0, f48)
FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
STORE_JUMP(o0, f48, 41f)
2: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
STORE_SYNC(o0, f48)
FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
STORE_JUMP(o0, f48, 49f)
3: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
STORE_SYNC(o0, f48)
FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
STORE_JUMP(o0, f48, 57f)
1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f4, %f6, %f48
1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
STORE_SYNC(o0, f48)
FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
STORE_JUMP(o0, f48, 42f)
2: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
STORE_SYNC(o0, f48)
FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
STORE_JUMP(o0, f48, 50f)
3: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
STORE_SYNC(o0, f48)
FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
STORE_JUMP(o0, f48, 58f)
1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f6, %f8, %f48
1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
STORE_SYNC(o0, f48)
FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
STORE_JUMP(o0, f48, 43f)
2: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
STORE_SYNC(o0, f48)
FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
STORE_JUMP(o0, f48, 51f)
3: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
STORE_SYNC(o0, f48)
FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
STORE_JUMP(o0, f48, 59f)
1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f8, %f10, %f48
1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
STORE_SYNC(o0, f48)
FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
STORE_JUMP(o0, f48, 44f)
2: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
STORE_SYNC(o0, f48)
FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
STORE_JUMP(o0, f48, 52f)
3: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
STORE_SYNC(o0, f48)
FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
STORE_JUMP(o0, f48, 60f)
1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f10, %f12, %f48
1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
STORE_SYNC(o0, f48)
FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
STORE_JUMP(o0, f48, 45f)
2: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
STORE_SYNC(o0, f48)
FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
STORE_JUMP(o0, f48, 53f)
3: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
STORE_SYNC(o0, f48)
FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
STORE_JUMP(o0, f48, 61f)
1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f12, %f14, %f48
1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
STORE_SYNC(o0, f48)
FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
STORE_JUMP(o0, f48, 46f)
2: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
STORE_SYNC(o0, f48)
FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
STORE_JUMP(o0, f48, 54f)
3: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
STORE_SYNC(o0, f48)
FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
STORE_JUMP(o0, f48, 62f)
1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
LOOP_CHUNK1(o1, o0, 1f)
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
LOOP_CHUNK2(o1, o0, 2f)
FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
LOOP_CHUNK3(o1, o0, 3f)
ba,pt %xcc, 1b+4
faligndata %f14, %f16, %f48
1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
STORE_SYNC(o0, f48)
FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
STORE_JUMP(o0, f48, 47f)
2: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
STORE_SYNC(o0, f48)
FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
STORE_JUMP(o0, f48, 55f)
3: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
STORE_SYNC(o0, f48)
FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
STORE_JUMP(o0, f48, 63f)
40: FINISH_VISCHUNK(o0, f0, f2)
41: FINISH_VISCHUNK(o0, f2, f4)
42: FINISH_VISCHUNK(o0, f4, f6)
43: FINISH_VISCHUNK(o0, f6, f8)
44: FINISH_VISCHUNK(o0, f8, f10)
45: FINISH_VISCHUNK(o0, f10, f12)
46: FINISH_VISCHUNK(o0, f12, f14)
47: UNEVEN_VISCHUNK(o0, f14, f0)
48: FINISH_VISCHUNK(o0, f16, f18)
49: FINISH_VISCHUNK(o0, f18, f20)
50: FINISH_VISCHUNK(o0, f20, f22)
51: FINISH_VISCHUNK(o0, f22, f24)
52: FINISH_VISCHUNK(o0, f24, f26)
53: FINISH_VISCHUNK(o0, f26, f28)
54: FINISH_VISCHUNK(o0, f28, f30)
55: UNEVEN_VISCHUNK(o0, f30, f0)
56: FINISH_VISCHUNK(o0, f32, f34)
57: FINISH_VISCHUNK(o0, f34, f36)
58: FINISH_VISCHUNK(o0, f36, f38)
59: FINISH_VISCHUNK(o0, f38, f40)
60: FINISH_VISCHUNK(o0, f40, f42)
61: FINISH_VISCHUNK(o0, f42, f44)
62: FINISH_VISCHUNK(o0, f44, f46)
63: UNEVEN_VISCHUNK_LAST(o0, f46, f0)
93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp)
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f0, %f2, %f8
EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
bl,pn %xcc, 95f
add %o0, 8, %o0
EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp)
add %o1, 8, %o1
subcc %g3, 8, %g3
faligndata %f2, %f0, %f8
EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
bge,pt %xcc, 93b
add %o0, 8, %o0
95: brz,pt %o2, 2f
mov %g1, %o1
1: EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp)
add %o1, 1, %o1
subcc %o2, 1, %o2
EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp)
bne,pt %xcc, 1b
add %o0, 1, %o0
2: membar #StoreLoad | #StoreStore
VISExit
retl
mov EX_RETVAL(%o4), %o0
.align 64
70: /* 16 < len <= (5 * 64) */
bne,pn %XCC, 75f
sub %o0, %o1, %o3
72: andn %o2, 0xf, %GLOBAL_SPARE
and %o2, 0xf, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0)
EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0)
subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10)
add %o1, 0x8, %o1
EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0)
sub %o2, 0x8, %o2
EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0)
sub %o2, 0x4, %o2
EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, 85f
nop
ba,pt %xcc, 90f
nop
75: andcc %o0, 0x7, %g1
sub %g1, 0x8, %g1
be,pn %icc, 2f
sub %g0, %g1, %g1
sub %o2, %g1, %o2
1: EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0)
subcc %g1, 1, %g1
EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
2: add %o1, %o3, %o0
andcc %o1, 0x7, %g1
bne,pt %icc, 8f
sll %g1, 3, %g1
cmp %o2, 16
bgeu,pt %icc, 72b
nop
ba,a,pt %xcc, 73b
8: mov 64, %o3
andn %o1, 0x7, %o1
EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0)
sub %o3, %g1, %o3
andn %o2, 0x7, %GLOBAL_SPARE
sllx %g2, %g1, %g2
1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj)
subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
add %o1, 0x8, %o1
srlx %g3, %o3, %o5
or %o5, %g2, %o5
EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
srl %g1, 3, %g1
andcc %o2, 0x7, %o2
be,pn %icc, 85f
add %o1, %g1, %o1
ba,pt %xcc, 90f
sub %o0, %o1, %o3
.align 64
80: /* 0 < len <= 16 */
andcc %o3, 0x3, %g0
bne,pn %XCC, 90f
sub %o0, %o1, %o3
1: EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0)
subcc %o2, 4, %o2
EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
85: retl
mov EX_RETVAL(%o4), %o0
.align 32
90: EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0)
subcc %o2, 1, %o2
EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
mov EX_RETVAL(%o4), %o0
.size FUNC_NAME, .-FUNC_NAME
EXPORT_SYMBOL(FUNC_NAME)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,022
|
arch/sparc/lib/copy_page.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* clear_page.S: UltraSparc optimized copy page.
*
* Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
*/
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/export.h>
/* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then
* restore the original TLB entry. This was great for
* disturbing the TLB as little as possible, but it meant
* we had to keep interrupts disabled for a long time.
*
* Now, we simply use the normal TLB loading mechanism,
* and this makes the cpu choose a slot all by itself.
* Then we do a normal TLB flush on exit. We need only
* disable preemption during the clear.
*/
#define DCACHE_SIZE (PAGE_SIZE * 2)
#if (PAGE_SHIFT == 13)
#define PAGE_SIZE_REM 0x80
#elif (PAGE_SHIFT == 16)
#define PAGE_SIZE_REM 0x100
#else
#error Wrong PAGE_SHIFT specified
#endif
#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \
fsrc2 %reg0, %f48; fsrc2 %reg1, %f50; \
fsrc2 %reg2, %f52; fsrc2 %reg3, %f54; \
fsrc2 %reg4, %f56; fsrc2 %reg5, %f58; \
fsrc2 %reg6, %f60; fsrc2 %reg7, %f62;
.text
.align 32
.globl copy_user_page
.type copy_user_page,#function
EXPORT_SYMBOL(copy_user_page)
copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
lduw [%g6 + TI_PRE_COUNT], %o4
sethi %hi(PAGE_OFFSET), %g2
sethi %hi(PAGE_SIZE), %o3
ldx [%g2 + %lo(PAGE_OFFSET)], %g2
sethi %hi(PAGE_KERNEL_LOCKED), %g3
ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
sub %o0, %g2, %g1 ! dest paddr
sub %o1, %g2, %g2 ! src paddr
and %o2, %o3, %o0 ! vaddr D-cache alias bit
or %g1, %g3, %g1 ! dest TTE data
or %g2, %g3, %g2 ! src TTE data
sethi %hi(TLBTEMP_BASE), %o3
sethi %hi(DCACHE_SIZE), %o1
add %o0, %o3, %o0 ! dest TTE vaddr
add %o4, 1, %o2
add %o0, %o1, %o1 ! src TTE vaddr
/* Disable preemption. */
mov TLB_TAG_ACCESS, %g3
stw %o2, [%g6 + TI_PRE_COUNT]
/* Load TLB entries. */
rdpr %pstate, %o2
wrpr %o2, PSTATE_IE, %pstate
stxa %o0, [%g3] ASI_DMMU
stxa %g1, [%g0] ASI_DTLB_DATA_IN
membar #Sync
stxa %o1, [%g3] ASI_DMMU
stxa %g2, [%g0] ASI_DTLB_DATA_IN
membar #Sync
wrpr %o2, 0x0, %pstate
cheetah_copy_page_insn:
ba,pt %xcc, 9f
nop
1:
VISEntryHalf
membar #StoreLoad | #StoreStore | #LoadStore
sethi %hi((PAGE_SIZE/64)-2), %o2
mov %o0, %g1
prefetch [%o1 + 0x000], #one_read
or %o2, %lo((PAGE_SIZE/64)-2), %o2
prefetch [%o1 + 0x040], #one_read
prefetch [%o1 + 0x080], #one_read
prefetch [%o1 + 0x0c0], #one_read
ldd [%o1 + 0x000], %f0
prefetch [%o1 + 0x100], #one_read
ldd [%o1 + 0x008], %f2
prefetch [%o1 + 0x140], #one_read
ldd [%o1 + 0x010], %f4
prefetch [%o1 + 0x180], #one_read
fsrc2 %f0, %f16
ldd [%o1 + 0x018], %f6
fsrc2 %f2, %f18
ldd [%o1 + 0x020], %f8
fsrc2 %f4, %f20
ldd [%o1 + 0x028], %f10
fsrc2 %f6, %f22
ldd [%o1 + 0x030], %f12
fsrc2 %f8, %f24
ldd [%o1 + 0x038], %f14
fsrc2 %f10, %f26
ldd [%o1 + 0x040], %f0
1: ldd [%o1 + 0x048], %f2
fsrc2 %f12, %f28
ldd [%o1 + 0x050], %f4
fsrc2 %f14, %f30
stda %f16, [%o0] ASI_BLK_P
ldd [%o1 + 0x058], %f6
fsrc2 %f0, %f16
ldd [%o1 + 0x060], %f8
fsrc2 %f2, %f18
ldd [%o1 + 0x068], %f10
fsrc2 %f4, %f20
ldd [%o1 + 0x070], %f12
fsrc2 %f6, %f22
ldd [%o1 + 0x078], %f14
fsrc2 %f8, %f24
ldd [%o1 + 0x080], %f0
prefetch [%o1 + 0x180], #one_read
fsrc2 %f10, %f26
subcc %o2, 1, %o2
add %o0, 0x40, %o0
bne,pt %xcc, 1b
add %o1, 0x40, %o1
ldd [%o1 + 0x048], %f2
fsrc2 %f12, %f28
ldd [%o1 + 0x050], %f4
fsrc2 %f14, %f30
stda %f16, [%o0] ASI_BLK_P
ldd [%o1 + 0x058], %f6
fsrc2 %f0, %f16
ldd [%o1 + 0x060], %f8
fsrc2 %f2, %f18
ldd [%o1 + 0x068], %f10
fsrc2 %f4, %f20
ldd [%o1 + 0x070], %f12
fsrc2 %f6, %f22
add %o0, 0x40, %o0
ldd [%o1 + 0x078], %f14
fsrc2 %f8, %f24
fsrc2 %f10, %f26
fsrc2 %f12, %f28
fsrc2 %f14, %f30
stda %f16, [%o0] ASI_BLK_P
membar #Sync
VISExitHalf
ba,pt %xcc, 5f
nop
9:
VISEntry
ldub [%g6 + TI_FAULT_CODE], %g3
mov %o0, %g1
cmp %g3, 0
rd %asi, %g3
be,a,pt %icc, 1f
wr %g0, ASI_BLK_P, %asi
wr %g0, ASI_BLK_COMMIT_P, %asi
1: ldda [%o1] ASI_BLK_P, %f0
add %o1, 0x40, %o1
ldda [%o1] ASI_BLK_P, %f16
add %o1, 0x40, %o1
sethi %hi(PAGE_SIZE), %o2
1: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
ldda [%o1] ASI_BLK_P, %f32
stda %f48, [%o0] %asi
add %o1, 0x40, %o1
sub %o2, 0x40, %o2
add %o0, 0x40, %o0
TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
ldda [%o1] ASI_BLK_P, %f0
stda %f48, [%o0] %asi
add %o1, 0x40, %o1
sub %o2, 0x40, %o2
add %o0, 0x40, %o0
TOUCH(f32, f34, f36, f38, f40, f42, f44, f46)
ldda [%o1] ASI_BLK_P, %f16
stda %f48, [%o0] %asi
sub %o2, 0x40, %o2
add %o1, 0x40, %o1
cmp %o2, PAGE_SIZE_REM
bne,pt %xcc, 1b
add %o0, 0x40, %o0
#if (PAGE_SHIFT == 16)
TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
ldda [%o1] ASI_BLK_P, %f32
stda %f48, [%o0] %asi
add %o1, 0x40, %o1
sub %o2, 0x40, %o2
add %o0, 0x40, %o0
TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
ldda [%o1] ASI_BLK_P, %f0
stda %f48, [%o0] %asi
add %o1, 0x40, %o1
sub %o2, 0x40, %o2
add %o0, 0x40, %o0
membar #Sync
stda %f32, [%o0] %asi
add %o0, 0x40, %o0
stda %f0, [%o0] %asi
#else
membar #Sync
stda %f0, [%o0] %asi
add %o0, 0x40, %o0
stda %f16, [%o0] %asi
#endif
membar #Sync
wr %g3, 0x0, %asi
VISExit
5:
stxa %g0, [%g1] ASI_DMMU_DEMAP
membar #Sync
sethi %hi(DCACHE_SIZE), %g2
stxa %g0, [%g1 + %g2] ASI_DMMU_DEMAP
membar #Sync
retl
stw %o4, [%g6 + TI_PRE_COUNT]
.size copy_user_page, .-copy_user_page
.globl cheetah_patch_copy_page
cheetah_patch_copy_page:
sethi %hi(0x01000000), %o1 ! NOP
sethi %hi(cheetah_copy_page_insn), %o0
or %o0, %lo(cheetah_copy_page_insn), %o0
stw %o1, [%o0]
membar #StoreStore
flush %o0
retl
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,068
|
arch/sparc/lib/copy_in_user.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* copy_in_user.S: Copy from userspace to userspace.
*
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/export.h>
#define XCC xcc
#define EX(x,y,z) \
98: x,y; \
.section __ex_table,"a";\
.align 4; \
.word 98b, z; \
.text; \
.align 4;
#define EX_O4(x,y) EX(x,y,__retl_o4_plus_8)
#define EX_O2_4(x,y) EX(x,y,__retl_o2_plus_4)
#define EX_O2_1(x,y) EX(x,y,__retl_o2_plus_1)
.register %g2,#scratch
.register %g3,#scratch
.text
__retl_o4_plus_8:
add %o4, %o2, %o4
retl
add %o4, 8, %o0
__retl_o2_plus_4:
retl
add %o2, 4, %o0
__retl_o2_plus_1:
retl
add %o2, 1, %o0
.align 32
/* Don't try to get too fancy here, just nice and
* simple. This is predominantly used for well aligned
* small copies in the compat layer. It is also used
* to copy register windows around during thread cloning.
*/
ENTRY(raw_copy_in_user) /* %o0=dst, %o1=src, %o2=len */
cmp %o2, 0
be,pn %XCC, 85f
or %o0, %o1, %o3
cmp %o2, 16
bleu,a,pn %XCC, 80f
or %o3, %o2, %o3
/* 16 < len <= 64 */
andcc %o3, 0x7, %g0
bne,pn %XCC, 90f
nop
andn %o2, 0x7, %o4
and %o2, 0x7, %o2
1: subcc %o4, 0x8, %o4
EX_O4(ldxa [%o1] %asi, %o5)
EX_O4(stxa %o5, [%o0] %asi)
add %o1, 0x8, %o1
bgu,pt %XCC, 1b
add %o0, 0x8, %o0
andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
EX_O2_4(lduwa [%o1] %asi, %o5)
EX_O2_4(stwa %o5, [%o0] %asi)
add %o1, 0x4, %o1
add %o0, 0x4, %o0
1: cmp %o2, 0
be,pt %XCC, 85f
nop
ba,pt %xcc, 90f
nop
80: /* 0 < len <= 16 */
andcc %o3, 0x3, %g0
bne,pn %XCC, 90f
nop
82:
subcc %o2, 4, %o2
EX_O2_4(lduwa [%o1] %asi, %g1)
EX_O2_4(stwa %g1, [%o0] %asi)
add %o1, 4, %o1
bgu,pt %XCC, 82b
add %o0, 4, %o0
85: retl
clr %o0
.align 32
90:
subcc %o2, 1, %o2
EX_O2_1(lduba [%o1] %asi, %g1)
EX_O2_1(stba %g1, [%o0] %asi)
add %o1, 1, %o1
bgu,pt %XCC, 90b
add %o0, 1, %o0
retl
clr %o0
ENDPROC(raw_copy_in_user)
EXPORT_SYMBOL(raw_copy_in_user)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,552
|
arch/sparc/lib/NG4copy_page.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* NG4copy_page.S: Niagara-4 optimized copy page.
*
* Copyright (C) 2012 (davem@davemloft.net)
*/
#include <asm/asi.h>
#include <asm/page.h>
.text
.align 32
.register %g2, #scratch
.register %g3, #scratch
.globl NG4copy_user_page
NG4copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
prefetch [%o1 + 0x000], #n_reads_strong
prefetch [%o1 + 0x040], #n_reads_strong
prefetch [%o1 + 0x080], #n_reads_strong
prefetch [%o1 + 0x0c0], #n_reads_strong
set PAGE_SIZE, %g7
prefetch [%o1 + 0x100], #n_reads_strong
prefetch [%o1 + 0x140], #n_reads_strong
prefetch [%o1 + 0x180], #n_reads_strong
prefetch [%o1 + 0x1c0], #n_reads_strong
1:
ldx [%o1 + 0x00], %o2
subcc %g7, 0x40, %g7
ldx [%o1 + 0x08], %o3
ldx [%o1 + 0x10], %o4
ldx [%o1 + 0x18], %o5
ldx [%o1 + 0x20], %g1
stxa %o2, [%o0] ASI_ST_BLKINIT_MRU_P
add %o0, 0x08, %o0
ldx [%o1 + 0x28], %g2
stxa %o3, [%o0] ASI_ST_BLKINIT_MRU_P
add %o0, 0x08, %o0
ldx [%o1 + 0x30], %g3
stxa %o4, [%o0] ASI_ST_BLKINIT_MRU_P
add %o0, 0x08, %o0
ldx [%o1 + 0x38], %o2
add %o1, 0x40, %o1
stxa %o5, [%o0] ASI_ST_BLKINIT_MRU_P
add %o0, 0x08, %o0
stxa %g1, [%o0] ASI_ST_BLKINIT_MRU_P
add %o0, 0x08, %o0
stxa %g2, [%o0] ASI_ST_BLKINIT_MRU_P
add %o0, 0x08, %o0
stxa %g3, [%o0] ASI_ST_BLKINIT_MRU_P
add %o0, 0x08, %o0
stxa %o2, [%o0] ASI_ST_BLKINIT_MRU_P
add %o0, 0x08, %o0
bne,pt %icc, 1b
prefetch [%o1 + 0x200], #n_reads_strong
retl
membar #StoreLoad | #StoreStore
.size NG4copy_user_page,.-NG4copy_user_page
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,972
|
arch/sparc/lib/muldi3.S
|
/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <asm/export.h>
.text
.align 4
.globl __muldi3
__muldi3:
save %sp, -104, %sp
wr %g0, %i1, %y
sra %i3, 0x1f, %g2
and %i1, %g2, %g2
andcc %g0, 0, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, %i3, %g1
mulscc %g1, 0, %g1
add %g1, %g2, %l2
rd %y, %o1
mov %o1, %l3
mov %i1, %o0
mov %i2, %o1
umul %o0, %o1, %o0
mov %o0, %l0
mov %i0, %o0
mov %i3, %o1
umul %o0, %o1, %o0
add %l0, %o0, %l0
mov %l2, %i0
add %l2, %l0, %i0
ret
restore %g0, %l3, %o1
EXPORT_SYMBOL(__muldi3)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,445
|
arch/sparc/lib/Memcpy_utils.S
|
#ifndef __ASM_MEMCPY_UTILS
#define __ASM_MEMCPY_UTILS
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/visasm.h>
ENTRY(__restore_asi_fp)
VISExitHalf
retl
wr %g0, ASI_AIUS, %asi
ENDPROC(__restore_asi_fp)
ENTRY(__restore_asi)
retl
wr %g0, ASI_AIUS, %asi
ENDPROC(__restore_asi)
ENTRY(memcpy_retl_o2)
ba,pt %xcc, __restore_asi
mov %o2, %o0
ENDPROC(memcpy_retl_o2)
ENTRY(memcpy_retl_o2_plus_1)
ba,pt %xcc, __restore_asi
add %o2, 1, %o0
ENDPROC(memcpy_retl_o2_plus_1)
ENTRY(memcpy_retl_o2_plus_3)
ba,pt %xcc, __restore_asi
add %o2, 3, %o0
ENDPROC(memcpy_retl_o2_plus_3)
ENTRY(memcpy_retl_o2_plus_4)
ba,pt %xcc, __restore_asi
add %o2, 4, %o0
ENDPROC(memcpy_retl_o2_plus_4)
ENTRY(memcpy_retl_o2_plus_5)
ba,pt %xcc, __restore_asi
add %o2, 5, %o0
ENDPROC(memcpy_retl_o2_plus_5)
ENTRY(memcpy_retl_o2_plus_6)
ba,pt %xcc, __restore_asi
add %o2, 6, %o0
ENDPROC(memcpy_retl_o2_plus_6)
ENTRY(memcpy_retl_o2_plus_7)
ba,pt %xcc, __restore_asi
add %o2, 7, %o0
ENDPROC(memcpy_retl_o2_plus_7)
ENTRY(memcpy_retl_o2_plus_8)
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_8)
ENTRY(memcpy_retl_o2_plus_15)
ba,pt %xcc, __restore_asi
add %o2, 15, %o0
ENDPROC(memcpy_retl_o2_plus_15)
ENTRY(memcpy_retl_o2_plus_15_8)
add %o2, 15, %o2
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_15_8)
ENTRY(memcpy_retl_o2_plus_16)
ba,pt %xcc, __restore_asi
add %o2, 16, %o0
ENDPROC(memcpy_retl_o2_plus_16)
ENTRY(memcpy_retl_o2_plus_24)
ba,pt %xcc, __restore_asi
add %o2, 24, %o0
ENDPROC(memcpy_retl_o2_plus_24)
ENTRY(memcpy_retl_o2_plus_31)
ba,pt %xcc, __restore_asi
add %o2, 31, %o0
ENDPROC(memcpy_retl_o2_plus_31)
ENTRY(memcpy_retl_o2_plus_32)
ba,pt %xcc, __restore_asi
add %o2, 32, %o0
ENDPROC(memcpy_retl_o2_plus_32)
ENTRY(memcpy_retl_o2_plus_31_32)
add %o2, 31, %o2
ba,pt %xcc, __restore_asi
add %o2, 32, %o0
ENDPROC(memcpy_retl_o2_plus_31_32)
ENTRY(memcpy_retl_o2_plus_31_24)
add %o2, 31, %o2
ba,pt %xcc, __restore_asi
add %o2, 24, %o0
ENDPROC(memcpy_retl_o2_plus_31_24)
ENTRY(memcpy_retl_o2_plus_31_16)
add %o2, 31, %o2
ba,pt %xcc, __restore_asi
add %o2, 16, %o0
ENDPROC(memcpy_retl_o2_plus_31_16)
ENTRY(memcpy_retl_o2_plus_31_8)
add %o2, 31, %o2
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_31_8)
ENTRY(memcpy_retl_o2_plus_63)
ba,pt %xcc, __restore_asi
add %o2, 63, %o0
ENDPROC(memcpy_retl_o2_plus_63)
ENTRY(memcpy_retl_o2_plus_63_64)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 64, %o0
ENDPROC(memcpy_retl_o2_plus_63_64)
ENTRY(memcpy_retl_o2_plus_63_56)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 56, %o0
ENDPROC(memcpy_retl_o2_plus_63_56)
ENTRY(memcpy_retl_o2_plus_63_48)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 48, %o0
ENDPROC(memcpy_retl_o2_plus_63_48)
ENTRY(memcpy_retl_o2_plus_63_40)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 40, %o0
ENDPROC(memcpy_retl_o2_plus_63_40)
ENTRY(memcpy_retl_o2_plus_63_32)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 32, %o0
ENDPROC(memcpy_retl_o2_plus_63_32)
ENTRY(memcpy_retl_o2_plus_63_24)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 24, %o0
ENDPROC(memcpy_retl_o2_plus_63_24)
ENTRY(memcpy_retl_o2_plus_63_16)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 16, %o0
ENDPROC(memcpy_retl_o2_plus_63_16)
ENTRY(memcpy_retl_o2_plus_63_8)
add %o2, 63, %o2
ba,pt %xcc, __restore_asi
add %o2, 8, %o0
ENDPROC(memcpy_retl_o2_plus_63_8)
ENTRY(memcpy_retl_o2_plus_o5)
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5)
ENTRY(memcpy_retl_o2_plus_o5_plus_1)
add %o5, 1, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_1)
ENTRY(memcpy_retl_o2_plus_o5_plus_4)
add %o5, 4, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_4)
ENTRY(memcpy_retl_o2_plus_o5_plus_8)
add %o5, 8, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_8)
ENTRY(memcpy_retl_o2_plus_o5_plus_16)
add %o5, 16, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_16)
ENTRY(memcpy_retl_o2_plus_o5_plus_24)
add %o5, 24, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_24)
ENTRY(memcpy_retl_o2_plus_o5_plus_32)
add %o5, 32, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_32)
ENTRY(memcpy_retl_o2_plus_o5_64)
add %o5, 32, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_64)
ENTRY(memcpy_retl_o2_plus_g1)
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(memcpy_retl_o2_plus_g1)
ENTRY(memcpy_retl_o2_plus_g1_plus_1)
add %g1, 1, %g1
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(memcpy_retl_o2_plus_g1_plus_1)
ENTRY(memcpy_retl_o2_plus_g1_plus_8)
add %g1, 8, %g1
ba,pt %xcc, __restore_asi
add %o2, %g1, %o0
ENDPROC(memcpy_retl_o2_plus_g1_plus_8)
ENTRY(memcpy_retl_o2_plus_o4)
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4)
ENTRY(memcpy_retl_o2_plus_o4_plus_8)
add %o4, 8, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_8)
ENTRY(memcpy_retl_o2_plus_o4_plus_16)
add %o4, 16, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_16)
ENTRY(memcpy_retl_o2_plus_o4_plus_24)
add %o4, 24, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_24)
ENTRY(memcpy_retl_o2_plus_o4_plus_32)
add %o4, 32, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_32)
ENTRY(memcpy_retl_o2_plus_o4_plus_40)
add %o4, 40, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_40)
ENTRY(memcpy_retl_o2_plus_o4_plus_48)
add %o4, 48, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_48)
ENTRY(memcpy_retl_o2_plus_o4_plus_56)
add %o4, 56, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_56)
ENTRY(memcpy_retl_o2_plus_o4_plus_64)
add %o4, 64, %o4
ba,pt %xcc, __restore_asi
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_64)
ENTRY(memcpy_retl_o2_plus_o5_plus_64)
add %o5, 64, %o5
ba,pt %xcc, __restore_asi
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_64)
ENTRY(memcpy_retl_o2_plus_o3_fp)
ba,pt %xcc, __restore_asi_fp
add %o2, %o3, %o0
ENDPROC(memcpy_retl_o2_plus_o3_fp)
ENTRY(memcpy_retl_o2_plus_o3_plus_1_fp)
add %o3, 1, %o3
ba,pt %xcc, __restore_asi_fp
add %o2, %o3, %o0
ENDPROC(memcpy_retl_o2_plus_o3_plus_1_fp)
ENTRY(memcpy_retl_o2_plus_o3_plus_4_fp)
add %o3, 4, %o3
ba,pt %xcc, __restore_asi_fp
add %o2, %o3, %o0
ENDPROC(memcpy_retl_o2_plus_o3_plus_4_fp)
ENTRY(memcpy_retl_o2_plus_o4_fp)
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_8_fp)
add %o4, 8, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_8_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_16_fp)
add %o4, 16, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_16_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_24_fp)
add %o4, 24, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_24_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_32_fp)
add %o4, 32, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_32_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_40_fp)
add %o4, 40, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_40_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_48_fp)
add %o4, 48, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_48_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_56_fp)
add %o4, 56, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_56_fp)
ENTRY(memcpy_retl_o2_plus_o4_plus_64_fp)
add %o4, 64, %o4
ba,pt %xcc, __restore_asi_fp
add %o2, %o4, %o0
ENDPROC(memcpy_retl_o2_plus_o4_plus_64_fp)
ENTRY(memcpy_retl_o2_plus_o5_fp)
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_64_fp)
add %o5, 64, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_64_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_56_fp)
add %o5, 56, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_56_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_48_fp)
add %o5, 48, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_48_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_40_fp)
add %o5, 40, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_40_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_32_fp)
add %o5, 32, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_32_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_24_fp)
add %o5, 24, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_24_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_16_fp)
add %o5, 16, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_16_fp)
ENTRY(memcpy_retl_o2_plus_o5_plus_8_fp)
add %o5, 8, %o5
ba,pt %xcc, __restore_asi_fp
add %o2, %o5, %o0
ENDPROC(memcpy_retl_o2_plus_o5_plus_8_fp)
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 13,345
|
arch/sparc/lib/U3memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* U3memcpy.S: UltraSparc-III optimized memcpy.
*
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
#ifdef __KERNEL__
#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
#else
#define ASI_BLK_P 0xf0
#define FPRS_FEF 0x04
#ifdef MEMCPY_DEBUG
#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#else
#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#endif
#define GLOBAL_SPARE %g5
#endif
#ifndef EX_LD
#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
#endif
#ifndef STORE
#define STORE(type,src,addr) type src, [addr]
#endif
#ifndef STORE_BLK
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
#endif
#ifndef FUNC_NAME
#define FUNC_NAME U3memcpy
#endif
#ifndef PREAMBLE
#define PREAMBLE
#endif
#ifndef XCC
#define XCC xcc
#endif
.register %g2,#scratch
.register %g3,#scratch
/* Special/non-trivial issues of this code:
*
* 1) %o5 is preserved from VISEntryHalf to VISExitHalf
* 2) Only low 32 FPU registers are used so that only the
* lower half of the FPU register set is dirtied by this
* code. This is especially important in the kernel.
* 3) This code never prefetches cachelines past the end
* of the source buffer.
*/
.text
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
__restore_fp:
VISExitHalf
retl
nop
ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
add %g1, 1, %g1
add %g2, %g1, %g2
ba,pt %xcc, __restore_fp
add %o2, %g2, %o0
ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
ENTRY(U3_retl_o2_plus_g2_fp)
ba,pt %xcc, __restore_fp
add %o2, %g2, %o0
ENDPROC(U3_retl_o2_plus_g2_fp)
ENTRY(U3_retl_o2_plus_g2_plus_8_fp)
add %g2, 8, %g2
ba,pt %xcc, __restore_fp
add %o2, %g2, %o0
ENDPROC(U3_retl_o2_plus_g2_plus_8_fp)
ENTRY(U3_retl_o2)
retl
mov %o2, %o0
ENDPROC(U3_retl_o2)
ENTRY(U3_retl_o2_plus_1)
retl
add %o2, 1, %o0
ENDPROC(U3_retl_o2_plus_1)
ENTRY(U3_retl_o2_plus_4)
retl
add %o2, 4, %o0
ENDPROC(U3_retl_o2_plus_4)
ENTRY(U3_retl_o2_plus_8)
retl
add %o2, 8, %o0
ENDPROC(U3_retl_o2_plus_8)
ENTRY(U3_retl_o2_plus_g1_plus_1)
add %g1, 1, %g1
retl
add %o2, %g1, %o0
ENDPROC(U3_retl_o2_plus_g1_plus_1)
ENTRY(U3_retl_o2_fp)
ba,pt %xcc, __restore_fp
mov %o2, %o0
ENDPROC(U3_retl_o2_fp)
ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
sll %o3, 6, %o3
add %o3, 0x80, %o3
ba,pt %xcc, __restore_fp
add %o2, %o3, %o0
ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
sll %o3, 6, %o3
add %o3, 0x40, %o3
ba,pt %xcc, __restore_fp
add %o2, %o3, %o0
ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
ENTRY(U3_retl_o2_plus_GS_plus_0x10)
add GLOBAL_SPARE, 0x10, GLOBAL_SPARE
retl
add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_plus_GS_plus_0x10)
ENTRY(U3_retl_o2_plus_GS_plus_0x08)
add GLOBAL_SPARE, 0x08, GLOBAL_SPARE
retl
add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
ENTRY(U3_retl_o2_and_7_plus_GS)
and %o2, 7, %o2
retl
add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_and_7_plus_GS)
ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
add GLOBAL_SPARE, 8, GLOBAL_SPARE
and %o2, 7, %o2
retl
add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
#endif
.align 64
/* The cheetah's flexible spine, oversized liver, enlarged heart,
* slender muscular body, and claws make it the swiftest hunter
* in Africa and the fastest animal on land. Can reach speeds
* of up to 2.4GB per second.
*/
.globl FUNC_NAME
.type FUNC_NAME,#function
FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
srlx %o2, 31, %g2
cmp %g2, 0
/* software trap 5 "Range Check" if dst >= 0x80000000 */
tne %xcc, 5
PREAMBLE
mov %o0, %o4
/* if len == 0 */
cmp %o2, 0
be,pn %XCC, end_return
or %o0, %o1, %o3
/* if len < 16 */
cmp %o2, 16
blu,a,pn %XCC, less_than_16
or %o3, %o2, %o3
/* if len < 192 */
cmp %o2, (3 * 64)
blu,pt %XCC, less_than_192
andcc %o3, 0x7, %g0
/* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve
* o5 from here until we hit VISExitHalf.
*/
VISEntryHalf
/* Is 'dst' already aligned on an 64-byte boundary? */
andcc %o0, 0x3f, %g2
be,pt %XCC, 2f
/* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
* of bytes to copy to make 'dst' 64-byte aligned. We pre-
* subtract this from 'len'.
*/
sub %o0, %o1, GLOBAL_SPARE
sub %g2, 0x40, %g2
sub %g0, %g2, %g2
sub %o2, %g2, %o2
andcc %g2, 0x7, %g1
be,pt %icc, 2f
and %g2, 0x38, %g2
1: subcc %g1, 0x1, %g1
EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1)
EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1)
bgu,pt %XCC, 1b
add %o1, 0x1, %o1
add %o1, GLOBAL_SPARE, %o0
2: cmp %g2, 0x0
and %o1, 0x7, %g1
be,pt %icc, 3f
alignaddr %o1, %g0, %o1
EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2)
1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f4, %f6, %f0
EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8)
be,pn %icc, 3f
add %o0, 0x8, %o0
EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f6, %f4, %f2
EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8)
bne,pt %icc, 1b
add %o0, 0x8, %o0
3: LOAD(prefetch, %o1 + 0x000, #one_read)
LOAD(prefetch, %o1 + 0x040, #one_read)
andn %o2, (0x40 - 1), GLOBAL_SPARE
LOAD(prefetch, %o1 + 0x080, #one_read)
LOAD(prefetch, %o1 + 0x0c0, #one_read)
LOAD(prefetch, %o1 + 0x100, #one_read)
EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2)
LOAD(prefetch, %o1 + 0x140, #one_read)
EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2)
LOAD(prefetch, %o1 + 0x180, #one_read)
EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2)
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f0, %f2, %f16
EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2)
faligndata %f2, %f4, %f18
EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2)
faligndata %f4, %f6, %f20
EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2)
faligndata %f6, %f8, %f22
EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2)
faligndata %f8, %f10, %f24
EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2)
faligndata %f10, %f12, %f26
EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2)
subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
add %o1, 0x40, %o1
bgu,pt %XCC, 1f
srl GLOBAL_SPARE, 6, %o3
ba,pt %xcc, 2f
nop
.align 64
1:
EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f12, %f14, %f28
EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f14, %f0, %f30
EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f0, %f2, %f16
add %o0, 0x40, %o0
EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f2, %f4, %f18
EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f4, %f6, %f20
EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
subcc %o3, 0x01, %o3
faligndata %f6, %f8, %f22
EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f8, %f10, %f24
EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
LOAD(prefetch, %o1 + 0x1c0, #one_read)
faligndata %f10, %f12, %f26
bg,pt %XCC, 1b
add %o1, 0x40, %o1
/* Finally we copy the last full 64-byte block. */
2:
EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f12, %f14, %f28
EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
faligndata %f14, %f0, %f30
EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f0, %f2, %f16
EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f2, %f4, %f18
EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f4, %f6, %f20
EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f6, %f8, %f22
EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40)
faligndata %f8, %f10, %f24
cmp %g1, 0
be,pt %XCC, 1f
add %o0, 0x40, %o0
EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
1: faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
add %o0, 0x40, %o0
add %o1, 0x40, %o1
membar #Sync
/* Now we copy the (len modulo 64) bytes at the end.
* Note how we borrow the %f0 loaded above.
*
* Also notice how this code is careful not to perform a
* load past the end of the src buffer.
*/
and %o2, 0x3f, %o2
andcc %o2, 0x38, %g2
be,pn %XCC, 2f
subcc %g2, 0x8, %g2
be,pn %XCC, 2f
cmp %g1, 0
sub %o2, %g2, %o2
be,a,pt %XCC, 1f
EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2)
1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f0, %f2, %f8
EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
be,pn %XCC, 2f
add %o0, 0x8, %o0
EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2)
add %o1, 0x8, %o1
subcc %g2, 0x8, %g2
faligndata %f2, %f0, %f8
EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
bne,pn %XCC, 1b
add %o0, 0x8, %o0
/* If anything is left, we copy it one byte at a time.
* Note that %g1 is (src & 0x3) saved above before the
* alignaddr was performed.
*/
2:
cmp %o2, 0
add %o1, %g1, %o1
VISExitHalf
be,pn %XCC, end_return
sub %o0, %o1, %o3
andcc %g1, 0x7, %g0
bne,pn %icc, 90f
andcc %o2, 0x8, %g0
be,pt %icc, 1f
nop
EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2)
EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x8, %o1
sub %o2, 8, %o2
1: andcc %o2, 0x4, %g0
be,pt %icc, 1f
nop
EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2)
EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x4, %o1
sub %o2, 4, %o2
1: andcc %o2, 0x2, %g0
be,pt %icc, 1f
nop
EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2)
EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2)
add %o1, 0x2, %o1
sub %o2, 2, %o2
1: andcc %o2, 0x1, %g0
be,pt %icc, end_return
nop
EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2)
ba,pt %xcc, end_return
EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2)
.align 64
/* 16 <= len < 192 */
less_than_192:
bne,pn %XCC, 75f
sub %o0, %o1, %o3
72:
andn %o2, 0xf, GLOBAL_SPARE
and %o2, 0xf, %o2
1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE
EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10)
EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10)
EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10)
add %o1, 0x8, %o1
EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08)
bgu,pt %XCC, 1b
add %o1, 0x8, %o1
73: andcc %o2, 0x8, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x8, %o2
EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8)
EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8)
add %o1, 0x8, %o1
1: andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4)
EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4)
add %o1, 0x4, %o1
1: cmp %o2, 0
be,pt %XCC, end_return
nop
ba,pt %xcc, 90f
nop
75:
andcc %o0, 0x7, %g1
sub %g1, 0x8, %g1
be,pn %icc, 2f
sub %g0, %g1, %g1
sub %o2, %g1, %o2
1: subcc %g1, 1, %g1
EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1)
EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1)
bgu,pt %icc, 1b
add %o1, 1, %o1
2: add %o1, %o3, %o0
andcc %o1, 0x7, %g1
bne,pt %icc, 8f
sll %g1, 3, %g1
cmp %o2, 16
bgeu,pt %icc, 72b
nop
ba,a,pt %xcc, 73b
8: mov 64, %o3
andn %o1, 0x7, %o1
EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2)
sub %o3, %g1, %o3
andn %o2, 0x7, GLOBAL_SPARE
sllx %g2, %g1, %g2
1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS)
subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE
add %o1, 0x8, %o1
srlx %g3, %o3, %o5
or %o5, %g2, %o5
EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8)
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
srl %g1, 3, %g1
andcc %o2, 0x7, %o2
be,pn %icc, end_return
add %o1, %g1, %o1
ba,pt %xcc, 90f
sub %o0, %o1, %o3
.align 64
/* 0 < len < 16 */
less_than_16:
andcc %o3, 0x3, %g0
bne,pn %XCC, 90f
sub %o0, %o1, %o3
1:
subcc %o2, 4, %o2
EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4)
EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4)
bgu,pt %XCC, 1b
add %o1, 4, %o1
end_return:
retl
mov EX_RETVAL(%o4), %o0
.align 32
90:
subcc %o2, 1, %o2
EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1)
EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1)
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
mov EX_RETVAL(%o4), %o0
.size FUNC_NAME, .-FUNC_NAME
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,618
|
arch/sparc/lib/NG4memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* NG4memcpy.S: Niagara-4 optimized memcpy.
*
* Copyright (C) 2012 David S. Miller (davem@davemloft.net)
*/
#ifdef __KERNEL__
#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#define GLOBAL_SPARE %g7
#else
#define ASI_BLK_INIT_QUAD_LDD_P 0xe2
#define FPRS_FEF 0x04
/* On T4 it is very expensive to access ASRs like %fprs and
* %asi, avoiding a read or a write can save ~50 cycles.
*/
#define FPU_ENTER \
rd %fprs, %o5; \
andcc %o5, FPRS_FEF, %g0; \
be,a,pn %icc, 999f; \
wr %g0, FPRS_FEF, %fprs; \
999:
#ifdef MEMCPY_DEBUG
#define VISEntryHalf FPU_ENTER; \
clr %g1; clr %g2; clr %g3; clr %g5; subcc %g0, %g0, %g0;
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#else
#define VISEntryHalf FPU_ENTER
#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
#endif
#define GLOBAL_SPARE %g5
#endif
#ifndef STORE_ASI
#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P
#else
#define STORE_ASI 0x80 /* ASI_P */
#endif
#endif
#if !defined(EX_LD) && !defined(EX_ST)
#define NON_USER_COPY
#endif
#ifndef EX_LD
#define EX_LD(x,y) x
#endif
#ifndef EX_LD_FP
#define EX_LD_FP(x,y) x
#endif
#ifndef EX_ST
#define EX_ST(x,y) x
#endif
#ifndef EX_ST_FP
#define EX_ST_FP(x,y) x
#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
#endif
#ifndef STORE
#ifndef MEMCPY_DEBUG
#define STORE(type,src,addr) type src, [addr]
#else
#define STORE(type,src,addr) type##a src, [addr] %asi
#endif
#endif
#ifndef STORE_INIT
#define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI
#endif
#ifndef FUNC_NAME
#define FUNC_NAME NG4memcpy
#endif
#ifndef PREAMBLE
#define PREAMBLE
#endif
#ifndef XCC
#define XCC xcc
#endif
.register %g2,#scratch
.register %g3,#scratch
.text
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
#endif
.align 64
.globl FUNC_NAME
.type FUNC_NAME,#function
FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
#ifdef MEMCPY_DEBUG
wr %g0, 0x80, %asi
#endif
srlx %o2, 31, %g2
cmp %g2, 0
tne %XCC, 5
PREAMBLE
mov %o0, %o3
brz,pn %o2, .Lexit
cmp %o2, 3
ble,pn %icc, .Ltiny
cmp %o2, 19
ble,pn %icc, .Lsmall
or %o0, %o1, %g2
cmp %o2, 128
bl,pn %icc, .Lmedium
nop
.Llarge:/* len >= 0x80 */
/* First get dest 8 byte aligned. */
sub %g0, %o0, %g1
and %g1, 0x7, %g1
brz,pt %g1, 51f
sub %o2, %g1, %o2
1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1)
add %o1, 1, %o1
subcc %g1, 1, %g1
add %o0, 1, %o0
bne,pt %icc, 1b
EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1)
51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong)
LOAD(prefetch, %o1 + 0x080, #n_reads_strong)
LOAD(prefetch, %o1 + 0x0c0, #n_reads_strong)
LOAD(prefetch, %o1 + 0x100, #n_reads_strong)
LOAD(prefetch, %o1 + 0x140, #n_reads_strong)
LOAD(prefetch, %o1 + 0x180, #n_reads_strong)
LOAD(prefetch, %o1 + 0x1c0, #n_reads_strong)
LOAD(prefetch, %o1 + 0x200, #n_reads_strong)
/* Check if we can use the straight fully aligned
* loop, or we require the alignaddr/faligndata variant.
*/
andcc %o1, 0x7, %o5
bne,pn %icc, .Llarge_src_unaligned
sub %g0, %o0, %g1
/* Legitimize the use of initializing stores by getting dest
* to be 64-byte aligned.
*/
and %g1, 0x3f, %g1
brz,pt %g1, .Llarge_aligned
sub %o2, %g1, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1)
add %o1, 8, %o1
subcc %g1, 8, %g1
add %o0, 8, %o0
bne,pt %icc, 1b
EX_ST(STORE(stx, %g2, %o0 - 0x08), memcpy_retl_o2_plus_g1_plus_8)
.Llarge_aligned:
/* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */
andn %o2, 0x3f, %o4
sub %o2, %o4, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o4)
add %o1, 0x40, %o1
EX_LD(LOAD(ldx, %o1 - 0x38, %g2), memcpy_retl_o2_plus_o4)
subcc %o4, 0x40, %o4
EX_LD(LOAD(ldx, %o1 - 0x30, %g3), memcpy_retl_o2_plus_o4_plus_64)
EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), memcpy_retl_o2_plus_o4_plus_64)
EX_LD(LOAD(ldx, %o1 - 0x20, %o5), memcpy_retl_o2_plus_o4_plus_64)
EX_ST(STORE_INIT(%g1, %o0), memcpy_retl_o2_plus_o4_plus_64)
add %o0, 0x08, %o0
EX_ST(STORE_INIT(%g2, %o0), memcpy_retl_o2_plus_o4_plus_56)
add %o0, 0x08, %o0
EX_LD(LOAD(ldx, %o1 - 0x18, %g2), memcpy_retl_o2_plus_o4_plus_48)
EX_ST(STORE_INIT(%g3, %o0), memcpy_retl_o2_plus_o4_plus_48)
add %o0, 0x08, %o0
EX_LD(LOAD(ldx, %o1 - 0x10, %g3), memcpy_retl_o2_plus_o4_plus_40)
EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), memcpy_retl_o2_plus_o4_plus_40)
add %o0, 0x08, %o0
EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), memcpy_retl_o2_plus_o4_plus_32)
EX_ST(STORE_INIT(%o5, %o0), memcpy_retl_o2_plus_o4_plus_32)
add %o0, 0x08, %o0
EX_ST(STORE_INIT(%g2, %o0), memcpy_retl_o2_plus_o4_plus_24)
add %o0, 0x08, %o0
EX_ST(STORE_INIT(%g3, %o0), memcpy_retl_o2_plus_o4_plus_16)
add %o0, 0x08, %o0
EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), memcpy_retl_o2_plus_o4_plus_8)
add %o0, 0x08, %o0
bne,pt %icc, 1b
LOAD(prefetch, %o1 + 0x200, #n_reads_strong)
membar #StoreLoad | #StoreStore
brz,pn %o2, .Lexit
cmp %o2, 19
ble,pn %icc, .Lsmall_unaligned
nop
ba,a,pt %icc, .Lmedium_noprefetch
.Lexit: retl
mov EX_RETVAL(%o3), %o0
.Llarge_src_unaligned:
#ifdef NON_USER_COPY
VISEntryHalfFast(.Lmedium_vis_entry_fail)
#else
VISEntryHalf
#endif
andn %o2, 0x3f, %o4
sub %o2, %o4, %o2
alignaddr %o1, %g0, %g1
add %o1, %o4, %o1
EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), memcpy_retl_o2_plus_o4)
1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), memcpy_retl_o2_plus_o4)
subcc %o4, 0x40, %o4
EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), memcpy_retl_o2_plus_o4_plus_64)
EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), memcpy_retl_o2_plus_o4_plus_64)
EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), memcpy_retl_o2_plus_o4_plus_64)
EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), memcpy_retl_o2_plus_o4_plus_64)
EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), memcpy_retl_o2_plus_o4_plus_64)
EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), memcpy_retl_o2_plus_o4_plus_64)
faligndata %f0, %f2, %f16
EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), memcpy_retl_o2_plus_o4_plus_64)
faligndata %f2, %f4, %f18
add %g1, 0x40, %g1
faligndata %f4, %f6, %f20
faligndata %f6, %f8, %f22
faligndata %f8, %f10, %f24
faligndata %f10, %f12, %f26
faligndata %f12, %f14, %f28
faligndata %f14, %f0, %f30
EX_ST_FP(STORE(std, %f16, %o0 + 0x00), memcpy_retl_o2_plus_o4_plus_64)
EX_ST_FP(STORE(std, %f18, %o0 + 0x08), memcpy_retl_o2_plus_o4_plus_56)
EX_ST_FP(STORE(std, %f20, %o0 + 0x10), memcpy_retl_o2_plus_o4_plus_48)
EX_ST_FP(STORE(std, %f22, %o0 + 0x18), memcpy_retl_o2_plus_o4_plus_40)
EX_ST_FP(STORE(std, %f24, %o0 + 0x20), memcpy_retl_o2_plus_o4_plus_32)
EX_ST_FP(STORE(std, %f26, %o0 + 0x28), memcpy_retl_o2_plus_o4_plus_24)
EX_ST_FP(STORE(std, %f28, %o0 + 0x30), memcpy_retl_o2_plus_o4_plus_16)
EX_ST_FP(STORE(std, %f30, %o0 + 0x38), memcpy_retl_o2_plus_o4_plus_8)
add %o0, 0x40, %o0
bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
#ifdef NON_USER_COPY
VISExitHalfFast
#else
VISExitHalf
#endif
brz,pn %o2, .Lexit
cmp %o2, 19
ble,pn %icc, .Lsmall_unaligned
nop
ba,a,pt %icc, .Lmedium_unaligned
#ifdef NON_USER_COPY
.Lmedium_vis_entry_fail:
or %o0, %o1, %g2
#endif
.Lmedium:
LOAD(prefetch, %o1 + 0x40, #n_reads_strong)
andcc %g2, 0x7, %g0
bne,pn %icc, .Lmedium_unaligned
nop
.Lmedium_noprefetch:
andncc %o2, 0x20 - 1, %o5
be,pn %icc, 2f
sub %o2, %o5, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), memcpy_retl_o2_plus_o5)
EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5)
add %o1, 0x20, %o1
subcc %o5, 0x20, %o5
EX_ST(STORE(stx, %g1, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32)
EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24)
EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24)
EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8)
bne,pt %icc, 1b
add %o0, 0x20, %o0
2: andcc %o2, 0x18, %o5
be,pt %icc, 3f
sub %o2, %o5, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5)
add %o1, 0x08, %o1
add %o0, 0x08, %o0
subcc %o5, 0x08, %o5
bne,pt %icc, 1b
EX_ST(STORE(stx, %g1, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8)
3: brz,pt %o2, .Lexit
cmp %o2, 0x04
bl,pn %icc, .Ltiny
nop
EX_LD(LOAD(lduw, %o1 + 0x00, %g1), memcpy_retl_o2)
add %o1, 0x04, %o1
add %o0, 0x04, %o0
subcc %o2, 0x04, %o2
bne,pn %icc, .Ltiny
EX_ST(STORE(stw, %g1, %o0 - 0x04), memcpy_retl_o2_plus_4)
ba,a,pt %icc, .Lexit
.Lmedium_unaligned:
/* First get dest 8 byte aligned. */
sub %g0, %o0, %g1
and %g1, 0x7, %g1
brz,pt %g1, 2f
sub %o2, %g1, %o2
1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1)
add %o1, 1, %o1
subcc %g1, 1, %g1
add %o0, 1, %o0
bne,pt %icc, 1b
EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1)
2:
and %o1, 0x7, %g1
brz,pn %g1, .Lmedium_noprefetch
sll %g1, 3, %g1
mov 64, %g2
sub %g2, %g1, %g2
andn %o1, 0x7, %o1
EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2)
sllx %o4, %g1, %o4
andn %o2, 0x08 - 1, %o5
sub %o2, %o5, %o2
1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5)
add %o1, 0x08, %o1
subcc %o5, 0x08, %o5
srlx %g3, %g2, GLOBAL_SPARE
or GLOBAL_SPARE, %o4, GLOBAL_SPARE
EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8)
add %o0, 0x08, %o0
bne,pt %icc, 1b
sllx %g3, %g1, %o4
srl %g1, 3, %g1
add %o1, %g1, %o1
brz,pn %o2, .Lexit
nop
ba,pt %icc, .Lsmall_unaligned
.Ltiny:
EX_LD(LOAD(ldub, %o1 + 0x00, %g1), memcpy_retl_o2)
subcc %o2, 1, %o2
be,pn %icc, .Lexit
EX_ST(STORE(stb, %g1, %o0 + 0x00), memcpy_retl_o2_plus_1)
EX_LD(LOAD(ldub, %o1 + 0x01, %g1), memcpy_retl_o2)
subcc %o2, 1, %o2
be,pn %icc, .Lexit
EX_ST(STORE(stb, %g1, %o0 + 0x01), memcpy_retl_o2_plus_1)
EX_LD(LOAD(ldub, %o1 + 0x02, %g1), memcpy_retl_o2)
ba,pt %icc, .Lexit
EX_ST(STORE(stb, %g1, %o0 + 0x02), memcpy_retl_o2)
.Lsmall:
andcc %g2, 0x3, %g0
bne,pn %icc, .Lsmall_unaligned
andn %o2, 0x4 - 1, %o5
sub %o2, %o5, %o2
1:
EX_LD(LOAD(lduw, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5)
add %o1, 0x04, %o1
subcc %o5, 0x04, %o5
add %o0, 0x04, %o0
bne,pt %icc, 1b
EX_ST(STORE(stw, %g1, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4)
brz,pt %o2, .Lexit
nop
ba,a,pt %icc, .Ltiny
.Lsmall_unaligned:
1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), memcpy_retl_o2)
add %o1, 1, %o1
add %o0, 1, %o0
subcc %o2, 1, %o2
bne,pt %icc, 1b
EX_ST(STORE(stb, %g1, %o0 - 0x01), memcpy_retl_o2_plus_1)
ba,a,pt %icc, .Lexit
nop
.size FUNC_NAME, .-FUNC_NAME
|
AirFortressIlikara/LS2K0300-linux-4.19
| 17,353
|
arch/sparc/lib/checksum_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* checksum.S: Sparc optimized checksum code.
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Linux/Alpha checksum c-code
* Linux/ix86 inline checksum assembly
* RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
* David Mosberger-Tang for optimized reference c-code
* BSD4.4 portable checksum routine
*/
#include <asm/errno.h>
#include <asm/export.h>
#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \
ldd [buf + offset + 0x00], t0; \
ldd [buf + offset + 0x08], t2; \
addxcc t0, sum, sum; \
addxcc t1, sum, sum; \
ldd [buf + offset + 0x10], t4; \
addxcc t2, sum, sum; \
addxcc t3, sum, sum; \
ldd [buf + offset + 0x18], t0; \
addxcc t4, sum, sum; \
addxcc t5, sum, sum; \
addxcc t0, sum, sum; \
addxcc t1, sum, sum;
#define CSUM_LASTCHUNK(buf, offset, sum, t0, t1, t2, t3) \
ldd [buf - offset - 0x08], t0; \
ldd [buf - offset - 0x00], t2; \
addxcc t0, sum, sum; \
addxcc t1, sum, sum; \
addxcc t2, sum, sum; \
addxcc t3, sum, sum;
/* Do end cruft out of band to get better cache patterns. */
csum_partial_end_cruft:
be 1f ! caller asks %o1 & 0x8
andcc %o1, 4, %g0 ! nope, check for word remaining
ldd [%o0], %g2 ! load two
addcc %g2, %o2, %o2 ! add first word to sum
addxcc %g3, %o2, %o2 ! add second word as well
add %o0, 8, %o0 ! advance buf ptr
addx %g0, %o2, %o2 ! add in final carry
andcc %o1, 4, %g0 ! check again for word remaining
1: be 1f ! nope, skip this code
andcc %o1, 3, %o1 ! check for trailing bytes
ld [%o0], %g2 ! load it
addcc %g2, %o2, %o2 ! add to sum
add %o0, 4, %o0 ! advance buf ptr
addx %g0, %o2, %o2 ! add in final carry
andcc %o1, 3, %g0 ! check again for trailing bytes
1: be 1f ! no trailing bytes, return
addcc %o1, -1, %g0 ! only one byte remains?
bne 2f ! at least two bytes more
subcc %o1, 2, %o1 ! only two bytes more?
b 4f ! only one byte remains
or %g0, %g0, %o4 ! clear fake hword value
2: lduh [%o0], %o4 ! get hword
be 6f ! jmp if only hword remains
add %o0, 2, %o0 ! advance buf ptr either way
sll %o4, 16, %o4 ! create upper hword
4: ldub [%o0], %o5 ! get final byte
sll %o5, 8, %o5 ! put into place
or %o5, %o4, %o4 ! coalese with hword (if any)
6: addcc %o4, %o2, %o2 ! add to sum
1: retl ! get outta here
addx %g0, %o2, %o0 ! add final carry into retval
/* Also do alignment out of band to get better cache patterns. */
csum_partial_fix_alignment:
cmp %o1, 6
bl cpte - 0x4
andcc %o0, 0x2, %g0
be 1f
andcc %o0, 0x4, %g0
lduh [%o0 + 0x00], %g2
sub %o1, 2, %o1
add %o0, 2, %o0
sll %g2, 16, %g2
addcc %g2, %o2, %o2
srl %o2, 16, %g3
addx %g0, %g3, %g2
sll %o2, 16, %o2
sll %g2, 16, %g3
srl %o2, 16, %o2
andcc %o0, 0x4, %g0
or %g3, %o2, %o2
1: be cpa
andcc %o1, 0xffffff80, %o3
ld [%o0 + 0x00], %g2
sub %o1, 4, %o1
addcc %g2, %o2, %o2
add %o0, 4, %o0
addx %g0, %o2, %o2
b cpa
andcc %o1, 0xffffff80, %o3
/* The common case is to get called with a nicely aligned
* buffer of size 0x20. Follow the code path for that case.
*/
.globl csum_partial
EXPORT_SYMBOL(csum_partial)
csum_partial: /* %o0=buf, %o1=len, %o2=sum */
andcc %o0, 0x7, %g0 ! alignment problems?
bne csum_partial_fix_alignment ! yep, handle it
sethi %hi(cpte - 8), %g7 ! prepare table jmp ptr
andcc %o1, 0xffffff80, %o3 ! num loop iterations
cpa: be 3f ! none to do
andcc %o1, 0x70, %g1 ! clears carry flag too
5: CSUM_BIGCHUNK(%o0, 0x00, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
CSUM_BIGCHUNK(%o0, 0x20, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
CSUM_BIGCHUNK(%o0, 0x40, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
CSUM_BIGCHUNK(%o0, 0x60, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
addx %g0, %o2, %o2 ! sink in final carry
subcc %o3, 128, %o3 ! detract from loop iters
bne 5b ! more to do
add %o0, 128, %o0 ! advance buf ptr
andcc %o1, 0x70, %g1 ! clears carry flag too
3: be cpte ! nope
andcc %o1, 0xf, %g0 ! anything left at all?
srl %g1, 1, %o4 ! compute offset
sub %g7, %g1, %g7 ! adjust jmp ptr
sub %g7, %o4, %g7 ! final jmp ptr adjust
jmp %g7 + %lo(cpte - 8) ! enter the table
add %o0, %g1, %o0 ! advance buf ptr
cptbl: CSUM_LASTCHUNK(%o0, 0x68, %o2, %g2, %g3, %g4, %g5)
CSUM_LASTCHUNK(%o0, 0x58, %o2, %g2, %g3, %g4, %g5)
CSUM_LASTCHUNK(%o0, 0x48, %o2, %g2, %g3, %g4, %g5)
CSUM_LASTCHUNK(%o0, 0x38, %o2, %g2, %g3, %g4, %g5)
CSUM_LASTCHUNK(%o0, 0x28, %o2, %g2, %g3, %g4, %g5)
CSUM_LASTCHUNK(%o0, 0x18, %o2, %g2, %g3, %g4, %g5)
CSUM_LASTCHUNK(%o0, 0x08, %o2, %g2, %g3, %g4, %g5)
addx %g0, %o2, %o2 ! fetch final carry
andcc %o1, 0xf, %g0 ! anything left at all?
cpte: bne csum_partial_end_cruft ! yep, handle it
andcc %o1, 8, %g0 ! check how much
cpout: retl ! get outta here
mov %o2, %o0 ! return computed csum
.globl __csum_partial_copy_start, __csum_partial_copy_end
__csum_partial_copy_start:
/* Work around cpp -rob */
#define ALLOC #alloc
#define EXECINSTR #execinstr
#define EX(x,y,a,b) \
98: x,y; \
.section .fixup,ALLOC,EXECINSTR; \
.align 4; \
99: ba 30f; \
a, b, %o3; \
.section __ex_table,ALLOC; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4
#define EX2(x,y) \
98: x,y; \
.section __ex_table,ALLOC; \
.align 4; \
.word 98b, 30f; \
.text; \
.align 4
#define EX3(x,y) \
98: x,y; \
.section __ex_table,ALLOC; \
.align 4; \
.word 98b, 96f; \
.text; \
.align 4
#define EXT(start,end,handler) \
.section __ex_table,ALLOC; \
.align 4; \
.word start, 0, end, handler; \
.text; \
.align 4
/* This aligned version executes typically in 8.5 superscalar cycles, this
* is the best I can do. I say 8.5 because the final add will pair with
* the next ldd in the main unrolled loop. Thus the pipe is always full.
* If you change these macros (including order of instructions),
* please check the fixup code below as well.
*/
#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd [src + off + 0x00], t0; \
ldd [src + off + 0x08], t2; \
addxcc t0, sum, sum; \
ldd [src + off + 0x10], t4; \
addxcc t1, sum, sum; \
ldd [src + off + 0x18], t6; \
addxcc t2, sum, sum; \
std t0, [dst + off + 0x00]; \
addxcc t3, sum, sum; \
std t2, [dst + off + 0x08]; \
addxcc t4, sum, sum; \
std t4, [dst + off + 0x10]; \
addxcc t5, sum, sum; \
std t6, [dst + off + 0x18]; \
addxcc t6, sum, sum; \
addxcc t7, sum, sum;
/* 12 superscalar cycles seems to be the limit for this case,
* because of this we thus do all the ldd's together to get
* Viking MXCC into streaming mode. Ho hum...
*/
#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd [src + off + 0x00], t0; \
ldd [src + off + 0x08], t2; \
ldd [src + off + 0x10], t4; \
ldd [src + off + 0x18], t6; \
st t0, [dst + off + 0x00]; \
addxcc t0, sum, sum; \
st t1, [dst + off + 0x04]; \
addxcc t1, sum, sum; \
st t2, [dst + off + 0x08]; \
addxcc t2, sum, sum; \
st t3, [dst + off + 0x0c]; \
addxcc t3, sum, sum; \
st t4, [dst + off + 0x10]; \
addxcc t4, sum, sum; \
st t5, [dst + off + 0x14]; \
addxcc t5, sum, sum; \
st t6, [dst + off + 0x18]; \
addxcc t6, sum, sum; \
st t7, [dst + off + 0x1c]; \
addxcc t7, sum, sum;
/* Yuck, 6 superscalar cycles... */
#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
ldd [src - off - 0x08], t0; \
ldd [src - off - 0x00], t2; \
addxcc t0, sum, sum; \
st t0, [dst - off - 0x08]; \
addxcc t1, sum, sum; \
st t1, [dst - off - 0x04]; \
addxcc t2, sum, sum; \
st t2, [dst - off - 0x00]; \
addxcc t3, sum, sum; \
st t3, [dst - off + 0x04];
/* Handle the end cruft code out of band for better cache patterns. */
cc_end_cruft:
be 1f
andcc %o3, 4, %g0
EX(ldd [%o0 + 0x00], %g2, and %o3, 0xf)
add %o1, 8, %o1
addcc %g2, %g7, %g7
add %o0, 8, %o0
addxcc %g3, %g7, %g7
EX2(st %g2, [%o1 - 0x08])
addx %g0, %g7, %g7
andcc %o3, 4, %g0
EX2(st %g3, [%o1 - 0x04])
1: be 1f
andcc %o3, 3, %o3
EX(ld [%o0 + 0x00], %g2, add %o3, 4)
add %o1, 4, %o1
addcc %g2, %g7, %g7
EX2(st %g2, [%o1 - 0x04])
addx %g0, %g7, %g7
andcc %o3, 3, %g0
add %o0, 4, %o0
1: be 1f
addcc %o3, -1, %g0
bne 2f
subcc %o3, 2, %o3
b 4f
or %g0, %g0, %o4
2: EX(lduh [%o0 + 0x00], %o4, add %o3, 2)
add %o0, 2, %o0
EX2(sth %o4, [%o1 + 0x00])
be 6f
add %o1, 2, %o1
sll %o4, 16, %o4
4: EX(ldub [%o0 + 0x00], %o5, add %g0, 1)
EX2(stb %o5, [%o1 + 0x00])
sll %o5, 8, %o5
or %o5, %o4, %o4
6: addcc %o4, %g7, %g7
1: retl
addx %g0, %g7, %o0
/* Also, handle the alignment code out of band. */
cc_dword_align:
cmp %g1, 16
bge 1f
srl %g1, 1, %o3
2: cmp %o3, 0
be,a ccte
andcc %g1, 0xf, %o3
andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits)
be,a 2b
srl %o3, 1, %o3
1: andcc %o0, 0x1, %g0
bne ccslow
andcc %o0, 0x2, %g0
be 1f
andcc %o0, 0x4, %g0
EX(lduh [%o0 + 0x00], %g4, add %g1, 0)
sub %g1, 2, %g1
EX2(sth %g4, [%o1 + 0x00])
add %o0, 2, %o0
sll %g4, 16, %g4
addcc %g4, %g7, %g7
add %o1, 2, %o1
srl %g7, 16, %g3
addx %g0, %g3, %g4
sll %g7, 16, %g7
sll %g4, 16, %g3
srl %g7, 16, %g7
andcc %o0, 0x4, %g0
or %g3, %g7, %g7
1: be 3f
andcc %g1, 0xffffff80, %g0
EX(ld [%o0 + 0x00], %g4, add %g1, 0)
sub %g1, 4, %g1
EX2(st %g4, [%o1 + 0x00])
add %o0, 4, %o0
addcc %g4, %g7, %g7
add %o1, 4, %o1
addx %g0, %g7, %g7
b 3f
andcc %g1, 0xffffff80, %g0
/* Sun, you just can't beat me, you just can't. Stop trying,
* give up. I'm serious, I am going to kick the living shit
* out of you, game over, lights out.
*/
.align 8
.globl __csum_partial_copy_sparc_generic
EXPORT_SYMBOL(__csum_partial_copy_sparc_generic)
__csum_partial_copy_sparc_generic:
/* %o0=src, %o1=dest, %g1=len, %g7=sum */
xor %o0, %o1, %o4 ! get changing bits
andcc %o4, 3, %g0 ! check for mismatched alignment
bne ccslow ! better this than unaligned/fixups
andcc %o0, 7, %g0 ! need to align things?
bne cc_dword_align ! yes, we check for short lengths there
andcc %g1, 0xffffff80, %g0 ! can we use unrolled loop?
3: be 3f ! nope, less than one loop remains
andcc %o1, 4, %g0 ! dest aligned on 4 or 8 byte boundary?
be ccdbl + 4 ! 8 byte aligned, kick ass
5: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
10: EXT(5b, 10b, 20f) ! note for exception handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
add %o0, 128, %o0 ! advance src ptr
bne 5b ! we did not go negative, continue looping
add %o1, 128, %o1 ! advance dest ptr
3: andcc %g1, 0x70, %o2 ! can use table?
ccmerge:be ccte ! nope, go and check for end cruft
andcc %g1, 0xf, %o3 ! get low bits of length (clears carry btw)
srl %o2, 1, %o4 ! begin negative offset computation
sethi %hi(12f), %o5 ! set up table ptr end
add %o0, %o2, %o0 ! advance src ptr
sub %o5, %o4, %o5 ! continue table calculation
sll %o2, 1, %g2 ! constant multiplies are fun...
sub %o5, %g2, %o5 ! some more adjustments
jmp %o5 + %lo(12f) ! jump into it, duff style, wheee...
add %o1, %o2, %o1 ! advance dest ptr (carry is clear btw)
cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
12: EXT(cctbl, 12b, 22f) ! note for exception table handling
addx %g0, %g7, %g7
andcc %o3, 0xf, %g0 ! check for low bits set
ccte: bne cc_end_cruft ! something left, handle it out of band
andcc %o3, 8, %g0 ! begin checks for that code
retl ! return
mov %g7, %o0 ! give em the computed checksum
ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
11: EXT(ccdbl, 11b, 21f) ! note for exception table handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
add %o0, 128, %o0 ! advance src ptr
bne ccdbl ! we did not go negative, continue looping
add %o1, 128, %o1 ! advance dest ptr
b ccmerge ! finish it off, above
andcc %g1, 0x70, %o2 ! can use table? (clears carry btw)
ccslow: cmp %g1, 0
mov 0, %g5
bleu 4f
andcc %o0, 1, %o5
be,a 1f
srl %g1, 1, %g4
sub %g1, 1, %g1
EX(ldub [%o0], %g5, add %g1, 1)
add %o0, 1, %o0
EX2(stb %g5, [%o1])
srl %g1, 1, %g4
add %o1, 1, %o1
1: cmp %g4, 0
be,a 3f
andcc %g1, 1, %g0
andcc %o0, 2, %g0
be,a 1f
srl %g4, 1, %g4
EX(lduh [%o0], %o4, add %g1, 0)
sub %g1, 2, %g1
srl %o4, 8, %g2
sub %g4, 1, %g4
EX2(stb %g2, [%o1])
add %o4, %g5, %g5
EX2(stb %o4, [%o1 + 1])
add %o0, 2, %o0
srl %g4, 1, %g4
add %o1, 2, %o1
1: cmp %g4, 0
be,a 2f
andcc %g1, 2, %g0
EX3(ld [%o0], %o4)
5: srl %o4, 24, %g2
srl %o4, 16, %g3
EX2(stb %g2, [%o1])
srl %o4, 8, %g2
EX2(stb %g3, [%o1 + 1])
add %o0, 4, %o0
EX2(stb %g2, [%o1 + 2])
addcc %o4, %g5, %g5
EX2(stb %o4, [%o1 + 3])
addx %g5, %g0, %g5 ! I am now to lazy to optimize this (question it
add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl
subcc %g4, 1, %g4 ! tricks
bne,a 5b
EX3(ld [%o0], %o4)
sll %g5, 16, %g2
srl %g5, 16, %g5
srl %g2, 16, %g2
andcc %g1, 2, %g0
add %g2, %g5, %g5
2: be,a 3f
andcc %g1, 1, %g0
EX(lduh [%o0], %o4, and %g1, 3)
andcc %g1, 1, %g0
srl %o4, 8, %g2
add %o0, 2, %o0
EX2(stb %g2, [%o1])
add %g5, %o4, %g5
EX2(stb %o4, [%o1 + 1])
add %o1, 2, %o1
3: be,a 1f
sll %g5, 16, %o4
EX(ldub [%o0], %g2, add %g0, 1)
sll %g2, 8, %o4
EX2(stb %g2, [%o1])
add %g5, %o4, %g5
sll %g5, 16, %o4
1: addcc %o4, %g5, %g5
srl %g5, 16, %o4
addx %g0, %o4, %g5
orcc %o5, %g0, %g0
be 4f
srl %g5, 8, %o4
and %g5, 0xff, %g2
and %o4, 0xff, %o4
sll %g2, 8, %g2
or %g2, %o4, %g5
4: addcc %g7, %g5, %g7
retl
addx %g0, %g7, %o0
__csum_partial_copy_end:
/* We do these strange calculations for the csum_*_from_user case only, ie.
* we only bother with faults on loads... */
/* o2 = ((g2%20)&3)*8
* o3 = g1 - (g2/20)*32 - o2 */
20:
cmp %g2, 20
blu,a 1f
and %g2, 3, %o2
sub %g1, 32, %g1
b 20b
sub %g2, 20, %g2
1:
sll %o2, 3, %o2
b 31f
sub %g1, %o2, %o3
/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8)
* o3 = g1 - (g2/16)*32 - o2 */
21:
andcc %g2, 15, %o3
srl %g2, 4, %g2
be,a 1f
clr %o2
add %o3, 1, %o3
and %o3, 14, %o3
sll %o3, 3, %o2
1:
sll %g2, 5, %g2
sub %g1, %g2, %o3
b 31f
sub %o3, %o2, %o3
/* o0 += (g2/10)*16 - 0x70
* 01 += (g2/10)*16 - 0x70
* o2 = (g2 % 10) ? 8 : 0
* o3 += 0x70 - (g2/10)*16 - o2 */
22:
cmp %g2, 10
blu,a 1f
sub %o0, 0x70, %o0
add %o0, 16, %o0
add %o1, 16, %o1
sub %o3, 16, %o3
b 22b
sub %g2, 10, %g2
1:
sub %o1, 0x70, %o1
add %o3, 0x70, %o3
clr %o2
tst %g2
bne,a 1f
mov 8, %o2
1:
b 31f
sub %o3, %o2, %o3
96:
and %g1, 3, %g1
sll %g4, 2, %g4
add %g1, %g4, %o3
30:
/* %o1 is dst
* %o3 is # bytes to zero out
* %o4 is faulting address
* %o5 is %pc where fault occurred */
clr %o2
31:
/* %o0 is src
* %o1 is dst
* %o2 is # of bytes to copy from src to dst
* %o3 is # bytes to zero out
* %o4 is faulting address
* %o5 is %pc where fault occurred */
save %sp, -104, %sp
mov %i5, %o0
mov %i7, %o1
mov %i4, %o2
call lookup_fault
mov %g7, %i4
cmp %o0, 2
bne 1f
add %g0, -EFAULT, %i5
tst %i2
be 2f
mov %i0, %o1
mov %i1, %o0
5:
call memcpy
mov %i2, %o2
tst %o0
bne,a 2f
add %i3, %i2, %i3
add %i1, %i2, %i1
2:
mov %i1, %o0
6:
call __bzero
mov %i3, %o1
1:
ld [%sp + 168], %o2 ! struct_ptr of parent
st %i5, [%o2]
ret
restore
.section __ex_table,#alloc
.align 4
.word 5b,2
.word 6b,2
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,066
|
arch/sparc/lib/memset.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* linux/arch/sparc/lib/memset.S: Sparc optimized memset, bzero and clear_user code
* Copyright (C) 1991,1996 Free Software Foundation
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*
* Calls to memset returns initial %o0. Calls to bzero returns 0, if ok, and
* number of bytes not yet set if exception occurs and we were called as
* clear_user.
*/
#include <asm/ptrace.h>
#include <asm/export.h>
/* Work around cpp -rob */
#define ALLOC #alloc
#define EXECINSTR #execinstr
#define EX(x,y,a,b) \
98: x,y; \
.section .fixup,ALLOC,EXECINSTR; \
.align 4; \
99: ba 30f; \
a, b, %o0; \
.section __ex_table,ALLOC; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4
#define EXT(start,end,handler) \
.section __ex_table,ALLOC; \
.align 4; \
.word start, 0, end, handler; \
.text; \
.align 4
/* Please don't change these macros, unless you change the logic
* in the .fixup section below as well.
* Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
#define ZERO_BIG_BLOCK(base, offset, source) \
std source, [base + offset + 0x00]; \
std source, [base + offset + 0x08]; \
std source, [base + offset + 0x10]; \
std source, [base + offset + 0x18]; \
std source, [base + offset + 0x20]; \
std source, [base + offset + 0x28]; \
std source, [base + offset + 0x30]; \
std source, [base + offset + 0x38];
#define ZERO_LAST_BLOCKS(base, offset, source) \
std source, [base - offset - 0x38]; \
std source, [base - offset - 0x30]; \
std source, [base - offset - 0x28]; \
std source, [base - offset - 0x20]; \
std source, [base - offset - 0x18]; \
std source, [base - offset - 0x10]; \
std source, [base - offset - 0x08]; \
std source, [base - offset - 0x00];
.text
.align 4
.globl __bzero_begin
__bzero_begin:
.globl __bzero
.type __bzero,#function
.globl memset
EXPORT_SYMBOL(__bzero)
EXPORT_SYMBOL(memset)
.globl __memset_start, __memset_end
__memset_start:
memset:
mov %o0, %g1
mov 1, %g4
and %o1, 0xff, %g3
sll %g3, 8, %g2
or %g3, %g2, %g3
sll %g3, 16, %g2
or %g3, %g2, %g3
b 1f
mov %o2, %o1
3:
cmp %o2, 3
be 2f
EX(stb %g3, [%o0], sub %o1, 0)
cmp %o2, 2
be 2f
EX(stb %g3, [%o0 + 0x01], sub %o1, 1)
EX(stb %g3, [%o0 + 0x02], sub %o1, 2)
2:
sub %o2, 4, %o2
add %o1, %o2, %o1
b 4f
sub %o0, %o2, %o0
__bzero:
clr %g4
mov %g0, %g3
1:
cmp %o1, 7
bleu 7f
andcc %o0, 3, %o2
bne 3b
4:
andcc %o0, 4, %g0
be 2f
mov %g3, %g2
EX(st %g3, [%o0], sub %o1, 0)
sub %o1, 4, %o1
add %o0, 4, %o0
2:
andcc %o1, 0xffffff80, %o3 ! Now everything is 8 aligned and o1 is len to run
be 9f
andcc %o1, 0x78, %o2
10:
ZERO_BIG_BLOCK(%o0, 0x00, %g2)
subcc %o3, 128, %o3
ZERO_BIG_BLOCK(%o0, 0x40, %g2)
11:
EXT(10b, 11b, 20f)
bne 10b
add %o0, 128, %o0
orcc %o2, %g0, %g0
9:
be 13f
andcc %o1, 7, %o1
srl %o2, 1, %o3
set 13f, %o4
sub %o4, %o3, %o4
jmp %o4
add %o0, %o2, %o0
12:
ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
13:
EXT(12b, 13b, 21f)
be 8f
andcc %o1, 4, %g0
be 1f
andcc %o1, 2, %g0
EX(st %g3, [%o0], and %o1, 7)
add %o0, 4, %o0
1:
be 1f
andcc %o1, 1, %g0
EX(sth %g3, [%o0], and %o1, 3)
add %o0, 2, %o0
1:
bne,a 8f
EX(stb %g3, [%o0], and %o1, 1)
8:
b 0f
nop
7:
be 13b
orcc %o1, 0, %g0
be 0f
8:
add %o0, 1, %o0
subcc %o1, 1, %o1
bne 8b
EX(stb %g3, [%o0 - 1], add %o1, 1)
0:
andcc %g4, 1, %g0
be 5f
nop
retl
mov %g1, %o0
5:
retl
clr %o0
__memset_end:
.section .fixup,#alloc,#execinstr
.align 4
20:
cmp %g2, 8
bleu 1f
and %o1, 0x7f, %o1
sub %g2, 9, %g2
add %o3, 64, %o3
1:
sll %g2, 3, %g2
add %o3, %o1, %o0
b 30f
sub %o0, %g2, %o0
21:
mov 8, %o0
and %o1, 7, %o1
sub %o0, %g2, %o0
sll %o0, 3, %o0
b 30f
add %o0, %o1, %o0
30:
/* %o4 is faulting address, %o5 is %pc where fault occurred */
save %sp, -104, %sp
mov %i5, %o0
mov %i7, %o1
call lookup_fault
mov %i4, %o2
ret
restore
.globl __bzero_end
__bzero_end:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.