repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,261
|
arch/arc/lib/memcpy-700.S
|
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
ENTRY_CFI(memcpy)
or r3,r0,r1
asl_s r3,r3,30
mov_s r5,r0
brls.d r2,r3,.Lcopy_bytewise
sub.f r3,r2,1
ld_s r12,[r1,0]
asr.f lp_count,r3,3
bbit0.d r3,2,.Lnox4
bmsk_s r2,r2,1
st.ab r12,[r5,4]
ld.a r12,[r1,4]
.Lnox4:
lppnz .Lendloop
ld_s r3,[r1,4]
st.ab r12,[r5,4]
ld.a r12,[r1,8]
st.ab r3,[r5,4]
.Lendloop:
breq r2,0,.Last_store
ld r3,[r5,0]
#ifdef __LITTLE_ENDIAN__
add3 r2,-1,r2
; uses long immediate
xor_s r12,r12,r3
bmsk r12,r12,r2
xor_s r12,r12,r3
#else /* BIG ENDIAN */
sub3 r2,31,r2
; uses long immediate
xor_s r3,r3,r12
bmsk r3,r3,r2
xor_s r12,r12,r3
#endif /* ENDIAN */
.Last_store:
j_s.d [blink]
st r12,[r5,0]
.balign 4
.Lcopy_bytewise:
jcs [blink]
ldb_s r12,[r1,0]
lsr.f lp_count,r3
bhs_s .Lnox1
stb.ab r12,[r5,1]
ldb.a r12,[r1,1]
.Lnox1:
lppnz .Lendbloop
ldb_s r3,[r1,1]
stb.ab r12,[r5,1]
ldb.a r12,[r1,2]
stb.ab r3,[r5,1]
.Lendbloop:
j_s.d [blink]
stb r12,[r5,0]
END_CFI(memcpy)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,551
|
arch/arc/lib/memcpy-archs.S
|
/*
* Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#ifdef __LITTLE_ENDIAN__
# define SHIFT_1(RX,RY,IMM) asl RX, RY, IMM ; <<
# define SHIFT_2(RX,RY,IMM) lsr RX, RY, IMM ; >>
# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM
# define MERGE_2(RX,RY,IMM)
# define EXTRACT_1(RX,RY,IMM) and RX, RY, 0xFFFF
# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, IMM
#else
# define SHIFT_1(RX,RY,IMM) lsr RX, RY, IMM ; >>
# define SHIFT_2(RX,RY,IMM) asl RX, RY, IMM ; <<
# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM ; <<
# define MERGE_2(RX,RY,IMM) asl RX, RY, IMM ; <<
# define EXTRACT_1(RX,RY,IMM) lsr RX, RY, IMM
# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08
#endif
#ifdef CONFIG_ARC_HAS_LL64
# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
# define ZOLSHFT 5
# define ZOLAND 0x1F
#else
# define LOADX(DST,RX) ld.ab DST, [RX, 4]
# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
# define ZOLSHFT 4
# define ZOLAND 0xF
#endif
ENTRY_CFI(memcpy)
mov.f 0, r2
;;; if size is zero
jz.d [blink]
mov r3, r0 ; don;t clobber ret val
;;; if size <= 8
cmp r2, 8
bls.d @.Lsmallchunk
mov.f lp_count, r2
and.f r4, r0, 0x03
rsub lp_count, r4, 4
lpnz @.Laligndestination
;; LOOP BEGIN
ldb.ab r5, [r1,1]
sub r2, r2, 1
stb.ab r5, [r3,1]
.Laligndestination:
;;; Check the alignment of the source
and.f r4, r1, 0x03
bnz.d @.Lsourceunaligned
;;; CASE 0: Both source and destination are 32bit aligned
;;; Convert len to Dwords, unfold x4
lsr.f lp_count, r2, ZOLSHFT
lpnz @.Lcopy32_64bytes
;; LOOP START
LOADX (r6, r1)
LOADX (r8, r1)
LOADX (r10, r1)
LOADX (r4, r1)
STOREX (r6, r3)
STOREX (r8, r3)
STOREX (r10, r3)
STOREX (r4, r3)
.Lcopy32_64bytes:
and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes
.Lsmallchunk:
lpnz @.Lcopyremainingbytes
;; LOOP START
ldb.ab r5, [r1,1]
stb.ab r5, [r3,1]
.Lcopyremainingbytes:
j [blink]
;;; END CASE 0
.Lsourceunaligned:
cmp r4, 2
beq.d @.LunalignedOffby2
sub r2, r2, 1
bhi.d @.LunalignedOffby3
ldb.ab r5, [r1, 1]
;;; CASE 1: The source is unaligned, off by 1
;; Hence I need to read 1 byte for a 16bit alignment
;; and 2bytes to reach 32bit alignment
ldh.ab r6, [r1, 2]
sub r2, r2, 2
;; Convert to words, unfold x2
lsr.f lp_count, r2, 3
MERGE_1 (r6, r6, 8)
MERGE_2 (r5, r5, 24)
or r5, r5, r6
;; Both src and dst are aligned
lpnz @.Lcopy8bytes_1
;; LOOP START
ld.ab r6, [r1, 4]
ld.ab r8, [r1,4]
SHIFT_1 (r7, r6, 24)
or r7, r7, r5
SHIFT_2 (r5, r6, 8)
SHIFT_1 (r9, r8, 24)
or r9, r9, r5
SHIFT_2 (r5, r8, 8)
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
.Lcopy8bytes_1:
;; Write back the remaining 16bits
EXTRACT_1 (r6, r5, 16)
sth.ab r6, [r3, 2]
;; Write back the remaining 8bits
EXTRACT_2 (r5, r5, 16)
stb.ab r5, [r3, 1]
and.f lp_count, r2, 0x07 ;Last 8bytes
lpnz @.Lcopybytewise_1
;; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
.Lcopybytewise_1:
j [blink]
.LunalignedOffby2:
;;; CASE 2: The source is unaligned, off by 2
ldh.ab r5, [r1, 2]
sub r2, r2, 1
;; Both src and dst are aligned
;; Convert to words, unfold x2
lsr.f lp_count, r2, 3
#ifdef __BIG_ENDIAN__
asl.nz r5, r5, 16
#endif
lpnz @.Lcopy8bytes_2
;; LOOP START
ld.ab r6, [r1, 4]
ld.ab r8, [r1,4]
SHIFT_1 (r7, r6, 16)
or r7, r7, r5
SHIFT_2 (r5, r6, 16)
SHIFT_1 (r9, r8, 16)
or r9, r9, r5
SHIFT_2 (r5, r8, 16)
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
.Lcopy8bytes_2:
#ifdef __BIG_ENDIAN__
lsr.nz r5, r5, 16
#endif
sth.ab r5, [r3, 2]
and.f lp_count, r2, 0x07 ;Last 8bytes
lpnz @.Lcopybytewise_2
;; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
.Lcopybytewise_2:
j [blink]
.LunalignedOffby3:
;;; CASE 3: The source is unaligned, off by 3
;;; Hence, I need to read 1byte for achieve the 32bit alignment
;; Both src and dst are aligned
;; Convert to words, unfold x2
lsr.f lp_count, r2, 3
#ifdef __BIG_ENDIAN__
asl.ne r5, r5, 24
#endif
lpnz @.Lcopy8bytes_3
;; LOOP START
ld.ab r6, [r1, 4]
ld.ab r8, [r1,4]
SHIFT_1 (r7, r6, 8)
or r7, r7, r5
SHIFT_2 (r5, r6, 24)
SHIFT_1 (r9, r8, 8)
or r9, r9, r5
SHIFT_2 (r5, r8, 24)
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
.Lcopy8bytes_3:
#ifdef __BIG_ENDIAN__
lsr.nz r5, r5, 24
#endif
stb.ab r5, [r3, 1]
and.f lp_count, r2, 0x07 ;Last 8bytes
lpnz @.Lcopybytewise_3
;; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
.Lcopybytewise_3:
j [blink]
END_CFI(memcpy)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,921
|
arch/arc/lib/memset-archs.S
|
/*
* Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/cache.h>
/*
* The memset implementation below is optimized to use prefetchw and prealloc
* instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
* If you want to implement optimized memset for other possible L1 data cache
* line lengths (32B and 128B) you should rewrite code carefully checking
* we don't call any prefetchw/prealloc instruction for L1 cache lines which
* don't belongs to memset area.
*/
#if L1_CACHE_SHIFT == 6
.macro PREALLOC_INSTR reg, off
prealloc [\reg, \off]
.endm
.macro PREFETCHW_INSTR reg, off
prefetchw [\reg, \off]
.endm
#else
.macro PREALLOC_INSTR
.endm
.macro PREFETCHW_INSTR
.endm
#endif
ENTRY_CFI(memset)
PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
mov r3, r0 ; don't clobber ret val
;;; if length < 8
brls.d.nt r2, 8, .Lsmallchunk
mov.f lp_count,r2
and.f r4, r0, 0x03
rsub lp_count, r4, 4
lpnz @.Laligndestination
;; LOOP BEGIN
stb.ab r1, [r3,1]
sub r2, r2, 1
.Laligndestination:
;;; Destination is aligned
and r1, r1, 0xFF
asl r4, r1, 8
or r4, r4, r1
asl r5, r4, 16
or r5, r5, r4
mov r4, r5
sub3 lp_count, r2, 8
cmp r2, 64
bmsk.hi r2, r2, 5
mov.ls lp_count, 0
add3.hi r2, r2, 8
;;; Convert len to Dwords, unfold x8
lsr.f lp_count, lp_count, 6
lpnz @.Lset64bytes
;; LOOP START
PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
#else
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
#endif
.Lset64bytes:
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes
;; LOOP START
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
#else
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
#endif
.Lset32bytes:
and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
.Lsmallchunk:
lpnz .Lcopy3bytes
;; LOOP START
stb.ab r1, [r3, 1]
.Lcopy3bytes:
j [blink]
END_CFI(memset)
ENTRY_CFI(memzero)
; adjust bzero args to memset args
mov r2, r1
b.d memset ;tail call so need to tinker with blink
mov r1, 0
END_CFI(memzero)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,230
|
arch/arc/lib/memset.S
|
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */
ENTRY_CFI(memset)
mov_s r4,r0
or r12,r0,r2
bmsk.f r12,r12,1
extb_s r1,r1
asl r3,r1,8
beq.d .Laligned
or_s r1,r1,r3
brls r2,SMALL,.Ltiny
add r3,r2,r0
stb r1,[r3,-1]
bclr_s r3,r3,0
stw r1,[r3,-2]
bmsk.f r12,r0,1
add_s r2,r2,r12
sub.ne r2,r2,4
stb.ab r1,[r4,1]
and r4,r4,-2
stw.ab r1,[r4,2]
and r4,r4,-4
.Laligned: ; This code address should be aligned for speed.
asl r3,r1,16
lsr.f lp_count,r2,2
or_s r1,r1,r3
lpne .Loop_end
st.ab r1,[r4,4]
.Loop_end:
j_s [blink]
.balign 4
.Ltiny:
mov.f lp_count,r2
lpne .Ltiny_end
stb.ab r1,[r4,1]
.Ltiny_end:
j_s [blink]
END_CFI(memset)
; memzero: @r0 = mem, @r1 = size_t
; memset: @r0 = mem, @r1 = char, @r2 = size_t
ENTRY_CFI(memzero)
; adjust bzero args to memset args
mov r2, r1
mov r1, 0
b memset ;tail call so need to tinker with blink
END_CFI(memzero)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,350
|
arch/arc/lib/strcmp-archs.S
|
/*
* Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
ENTRY_CFI(strcmp)
or r2, r0, r1
bmsk_s r2, r2, 1
brne r2, 0, @.Lcharloop
;;; s1 and s2 are word aligned
ld.ab r2, [r0, 4]
mov_s r12, 0x01010101
ror r11, r12
.align 4
.LwordLoop:
ld.ab r3, [r1, 4]
;; Detect NULL char in str1
sub r4, r2, r12
ld.ab r5, [r0, 4]
bic r4, r4, r2
and r4, r4, r11
brne.d.nt r4, 0, .LfoundNULL
;; Check if the read locations are the same
cmp r2, r3
beq.d .LwordLoop
mov.eq r2, r5
;; A match is found, spot it out
#ifdef __LITTLE_ENDIAN__
swape r3, r3
mov_s r0, 1
swape r2, r2
#else
mov_s r0, 1
#endif
cmp_s r2, r3
j_s.d [blink]
bset.lo r0, r0, 31
.align 4
.LfoundNULL:
#ifdef __BIG_ENDIAN__
swape r4, r4
swape r2, r2
swape r3, r3
#endif
;; Find null byte
ffs r0, r4
bmsk r2, r2, r0
bmsk r3, r3, r0
swape r2, r2
swape r3, r3
;; make the return value
sub.f r0, r2, r3
mov.hi r0, 1
j_s.d [blink]
bset.lo r0, r0, 31
.align 4
.Lcharloop:
ldb.ab r2, [r0, 1]
ldb.ab r3, [r1, 1]
nop
breq r2, 0, .Lcmpend
breq r2, r3, .Lcharloop
.align 4
.Lcmpend:
j_s.d [blink]
sub r0, r2, r3
END_CFI(strcmp)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,653
|
arch/arc/lib/memcmp.S
|
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#ifdef __LITTLE_ENDIAN__
#define WORD2 r2
#define SHIFT r3
#else /* BIG ENDIAN */
#define WORD2 r3
#define SHIFT r2
#endif
ENTRY_CFI(memcmp)
or r12,r0,r1
asl_s r12,r12,30
sub r3,r2,1
brls r2,r12,.Lbytewise
ld r4,[r0,0]
ld r5,[r1,0]
lsr.f lp_count,r3,3
#ifdef CONFIG_ISA_ARCV2
/* In ARCv2 a branch can't be the last instruction in a zero overhead
* loop.
* So we move the branch to the start of the loop, duplicate it
* after the end, and set up r12 so that the branch isn't taken
* initially.
*/
mov_s r12,WORD2
lpne .Loop_end
brne WORD2,r12,.Lodd
ld WORD2,[r0,4]
#else
lpne .Loop_end
ld_s WORD2,[r0,4]
#endif
ld_s r12,[r1,4]
brne r4,r5,.Leven
ld.a r4,[r0,8]
ld.a r5,[r1,8]
#ifdef CONFIG_ISA_ARCV2
.Loop_end:
brne WORD2,r12,.Lodd
#else
brne WORD2,r12,.Lodd
.Loop_end:
#endif
asl_s SHIFT,SHIFT,3
bhs_s .Last_cmp
brne r4,r5,.Leven
ld r4,[r0,4]
ld r5,[r1,4]
#ifdef __LITTLE_ENDIAN__
nop_s
; one more load latency cycle
.Last_cmp:
xor r0,r4,r5
bset r0,r0,SHIFT
sub_s r1,r0,1
bic_s r1,r1,r0
norm r1,r1
b.d .Leven_cmp
and r1,r1,24
.Leven:
xor r0,r4,r5
sub_s r1,r0,1
bic_s r1,r1,r0
norm r1,r1
; slow track insn
and r1,r1,24
.Leven_cmp:
asl r2,r4,r1
asl r12,r5,r1
lsr_s r2,r2,1
lsr_s r12,r12,1
j_s.d [blink]
sub r0,r2,r12
.balign 4
.Lodd:
xor r0,WORD2,r12
sub_s r1,r0,1
bic_s r1,r1,r0
norm r1,r1
; slow track insn
and r1,r1,24
asl_s r2,r2,r1
asl_s r12,r12,r1
lsr_s r2,r2,1
lsr_s r12,r12,1
j_s.d [blink]
sub r0,r2,r12
#else /* BIG ENDIAN */
.Last_cmp:
neg_s SHIFT,SHIFT
lsr r4,r4,SHIFT
lsr r5,r5,SHIFT
; slow track insn
.Leven:
sub.f r0,r4,r5
mov.ne r0,1
j_s.d [blink]
bset.cs r0,r0,31
.Lodd:
cmp_s WORD2,r12
mov_s r0,1
j_s.d [blink]
bset.cs r0,r0,31
#endif /* ENDIAN */
.balign 4
.Lbytewise:
breq r2,0,.Lnil
ldb r4,[r0,0]
ldb r5,[r1,0]
lsr.f lp_count,r3
#ifdef CONFIG_ISA_ARCV2
mov r12,r3
lpne .Lbyte_end
brne r3,r12,.Lbyte_odd
#else
lpne .Lbyte_end
#endif
ldb_s r3,[r0,1]
ldb r12,[r1,1]
brne r4,r5,.Lbyte_even
ldb.a r4,[r0,2]
ldb.a r5,[r1,2]
#ifdef CONFIG_ISA_ARCV2
.Lbyte_end:
brne r3,r12,.Lbyte_odd
#else
brne r3,r12,.Lbyte_odd
.Lbyte_end:
#endif
bcc .Lbyte_even
brne r4,r5,.Lbyte_even
ldb_s r3,[r0,1]
ldb_s r12,[r1,1]
.Lbyte_odd:
j_s.d [blink]
sub r0,r3,r12
.Lbyte_even:
j_s.d [blink]
sub r0,r4,r5
.Lnil:
j_s.d [blink]
mov r0,0
END_CFI(memcmp)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,471
|
arch/arc/lib/strlen.S
|
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
ENTRY_CFI(strlen)
or r3,r0,7
ld r2,[r3,-7]
ld.a r6,[r3,-3]
mov r4,0x01010101
; uses long immediate
#ifdef __LITTLE_ENDIAN__
asl_s r1,r0,3
btst_s r0,2
asl r7,r4,r1
ror r5,r4
sub r1,r2,r7
bic_s r1,r1,r2
mov.eq r7,r4
sub r12,r6,r7
bic r12,r12,r6
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#else /* BIG ENDIAN */
ror r5,r4
btst_s r0,2
mov_s r1,31
sub3 r7,r1,r0
sub r1,r2,r4
bic_s r1,r1,r2
bmsk r1,r1,r7
sub r12,r6,r4
bic r12,r12,r6
bmsk.ne r12,r12,r7
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#endif /* ENDIAN */
.Loop:
ld_s r2,[r3,4]
ld.a r6,[r3,8]
; stall for load result
sub r1,r2,r4
bic_s r1,r1,r2
sub r12,r6,r4
bic r12,r12,r6
or r12,r12,r1
and r12,r12,r5
breq r12,0,.Loop
.Lend:
and.f r1,r1,r5
sub.ne r3,r3,4
mov.eq r1,r12
#ifdef __LITTLE_ENDIAN__
sub_s r2,r1,1
bic_s r2,r2,r1
norm r1,r2
sub_s r0,r0,3
lsr_s r1,r1,3
sub r0,r3,r0
j_s.d [blink]
sub r0,r0,r1
#else /* BIG ENDIAN */
lsr_s r1,r1,7
mov.eq r2,r6
bic_s r1,r1,r2
norm r1,r1
sub r0,r3,r0
lsr_s r1,r1,3
j_s.d [blink]
add r0,r0,r1
#endif /* ENDIAN */
.Learly_end:
b.d .Lend
sub_s.ne r1,r1,r1
END_CFI(strlen)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,657
|
arch/arc/lib/strcmp.S
|
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* This is optimized primarily for the ARC700.
It would be possible to speed up the loops by one cycle / word
respective one cycle / byte by forcing double source 1 alignment, unrolling
by a factor of two, and speculatively loading the second word / byte of
source 1; however, that would increase the overhead for loop setup / finish,
and strcmp might often terminate early. */
#include <linux/linkage.h>
ENTRY_CFI(strcmp)
or r2,r0,r1
bmsk_s r2,r2,1
brne r2,0,.Lcharloop
mov_s r12,0x01010101
ror r5,r12
.Lwordloop:
ld.ab r2,[r0,4]
ld.ab r3,[r1,4]
nop_s
sub r4,r2,r12
bic r4,r4,r2
and r4,r4,r5
brne r4,0,.Lfound0
breq r2,r3,.Lwordloop
#ifdef __LITTLE_ENDIAN__
xor r0,r2,r3 ; mask for difference
sub_s r1,r0,1
bic_s r0,r0,r1 ; mask for least significant difference bit
sub r1,r5,r0
xor r0,r5,r1 ; mask for least significant difference byte
and_s r2,r2,r0
and_s r3,r3,r0
#endif /* LITTLE ENDIAN */
cmp_s r2,r3
mov_s r0,1
j_s.d [blink]
bset.lo r0,r0,31
.balign 4
#ifdef __LITTLE_ENDIAN__
.Lfound0:
xor r0,r2,r3 ; mask for difference
or r0,r0,r4 ; or in zero indicator
sub_s r1,r0,1
bic_s r0,r0,r1 ; mask for least significant difference bit
sub r1,r5,r0
xor r0,r5,r1 ; mask for least significant difference byte
and_s r2,r2,r0
and_s r3,r3,r0
sub.f r0,r2,r3
mov.hi r0,1
j_s.d [blink]
bset.lo r0,r0,31
#else /* BIG ENDIAN */
/* The zero-detection above can mis-detect 0x01 bytes as zeroes
because of carry-propagateion from a lower significant zero byte.
We can compensate for this by checking that bit0 is zero.
This compensation is not necessary in the step where we
get a low estimate for r2, because in any affected bytes
we already have 0x00 or 0x01, which will remain unchanged
when bit 7 is cleared. */
.balign 4
.Lfound0:
lsr r0,r4,8
lsr_s r1,r2
bic_s r2,r2,r0 ; get low estimate for r2 and get ...
bic_s r0,r0,r1 ; <this is the adjusted mask for zeros>
or_s r3,r3,r0 ; ... high estimate r3 so that r2 > r3 will ...
cmp_s r3,r2 ; ... be independent of trailing garbage
or_s r2,r2,r0 ; likewise for r3 > r2
bic_s r3,r3,r0
rlc r0,0 ; r0 := r2 > r3 ? 1 : 0
cmp_s r2,r3
j_s.d [blink]
bset.lo r0,r0,31
#endif /* ENDIAN */
.balign 4
.Lcharloop:
ldb.ab r2,[r0,1]
ldb.ab r3,[r1,1]
nop_s
breq r2,0,.Lcmpend
breq r2,r3,.Lcharloop
.Lcmpend:
j_s.d [blink]
sub r0,r2,r3
END_CFI(strcmp)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,461
|
arch/arc/plat-eznps/entry.S
|
/*******************************************************************************
EZNPS CPU startup Code
Copyright(c) 2012 EZchip Technologies.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
*******************************************************************************/
#include <linux/linkage.h>
#include <asm/entry.h>
#include <asm/cache.h>
#include <plat/ctop.h>
.cpu A7
.section .init.text, "ax",@progbits
.align 1024 ; HW requierment for restart first PC
ENTRY(res_service)
#if defined(CONFIG_EZNPS_MTM_EXT) && defined(CONFIG_EZNPS_SHARED_AUX_REGS)
; There is no work for HW thread id != 0
lr r3, [CTOP_AUX_THREAD_ID]
cmp r3, 0
jne stext
#endif
#ifdef CONFIG_ARC_HAS_DCACHE
; With no cache coherency mechanism D$ need to be used very carefully.
; Address space:
; 0G-2G: We disable CONFIG_ARC_CACHE_PAGES.
; 2G-3G: We disable D$ by setting this bit.
; 3G-4G: D$ is disabled by architecture.
; FMT are huge pages for user application reside at 0-2G.
; Only FMT left as one who can use D$ where each such page got
; disable/enable bit for cachability.
; Programmer will use FMT pages for private data so cache coherency
; would not be a problem.
; First thing we invalidate D$
sr 1, [ARC_REG_DC_IVDC]
sr HW_COMPLY_KRN_NOT_D_CACHED, [CTOP_AUX_HW_COMPLY]
#endif
#ifdef CONFIG_SMP
; We set logical cpuid to be used by GET_CPUID
; We do not use physical cpuid since we want ids to be continious when
; it comes to cpus on the same quad cluster.
; This is useful for applications that used shared resources of a quad
; cluster such SRAMS.
lr r3, [CTOP_AUX_CORE_ID]
sr r3, [CTOP_AUX_LOGIC_CORE_ID]
lr r3, [CTOP_AUX_CLUSTER_ID]
; Set logical is acheived by swap of 2 middle bits of cluster id (4 bit)
; r3 is used since we use short instruction and we need q-class reg
.short CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST
.word CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM
sr r3, [CTOP_AUX_LOGIC_CLUSTER_ID]
#endif
j stext
END(res_service)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 12,540
|
arch/arc/mm/tlbex.S
|
/*
* TLB Exception Handling for ARC
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Vineetg: April 2011 :
* -MMU v1: moved out legacy code into a seperate file
* -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
* helps avoid a shift when preparing PD0 from PTE
*
* Vineetg: July 2009
* -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
* entry, so that it doesn't knock out it's I-TLB entry
* -Some more fine tuning:
* bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
*
* Vineetg: July 2009
* -Practically rewrote the I/D TLB Miss handlers
* Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
* Hence Leaner by 1.5 K
* Used Conditional arithmetic to replace excessive branching
* Also used short instructions wherever possible
*
* Vineetg: Aug 13th 2008
* -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing
* more information in case of a Fatality
*
* Vineetg: March 25th Bug #92690
* -Added Debug Code to check if sw-ASID == hw-ASID
* Rahul Trivedi, Amit Bhor: Codito Technologies 2004
*/
#include <linux/linkage.h>
#include <asm/entry.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
#include <asm/processor.h>
#include <asm/tlb-mmu1.h>
#ifdef CONFIG_ISA_ARCOMPACT
;-----------------------------------------------------------------
; ARC700 Exception Handling doesn't auto-switch stack and it only provides
; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
;
; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
; "global" is used to free-up FIRST core reg to be able to code the rest of
; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
; need to be saved as well by extending the "global" to be 4 words. Hence
; ".size ex_saved_reg1, 16"
; [All of this dance is to avoid stack switching for each TLB Miss, since we
; only need to save only a handful of regs, as opposed to complete reg file]
;
; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
; core reg as it will not be SMP safe.
; Thus scratch AUX reg is used (and no longer used to cache task PGD).
; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
; Epilogue thus has to locate the "per-cpu" storage for regs.
; To avoid cache line bouncing the per-cpu global is aligned/sized per
; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
; As simple as that....
;--------------------------------------------------------------------------
; scratch memory to save [r0-r3] used to code TLB refill Handler
ARCFP_DATA ex_saved_reg1
.align 1 << L1_CACHE_SHIFT
.type ex_saved_reg1, @object
#ifdef CONFIG_SMP
.size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
ex_saved_reg1:
.zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
#else
.size ex_saved_reg1, 16
ex_saved_reg1:
.zero 16
#endif
.macro TLBMISS_FREEUP_REGS
#ifdef CONFIG_SMP
sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
GET_CPU_ID r0 ; get to per cpu scratch mem,
asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
add r0, @ex_saved_reg1, r0
#else
st r0, [@ex_saved_reg1]
mov_s r0, @ex_saved_reg1
#endif
st_s r1, [r0, 4]
st_s r2, [r0, 8]
st_s r3, [r0, 12]
; VERIFY if the ASID in MMU-PID Reg is same as
; one in Linux data structures
tlb_paranoid_check_asm
.endm
.macro TLBMISS_RESTORE_REGS
#ifdef CONFIG_SMP
GET_CPU_ID r0 ; get to per cpu scratch mem
asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
add r0, @ex_saved_reg1, r0
ld_s r3, [r0,12]
ld_s r2, [r0, 8]
ld_s r1, [r0, 4]
lr r0, [ARC_REG_SCRATCH_DATA0]
#else
mov_s r0, @ex_saved_reg1
ld_s r3, [r0,12]
ld_s r2, [r0, 8]
ld_s r1, [r0, 4]
ld_s r0, [r0]
#endif
.endm
#else /* ARCv2 */
.macro TLBMISS_FREEUP_REGS
PUSH r0
PUSH r1
PUSH r2
PUSH r3
.endm
.macro TLBMISS_RESTORE_REGS
POP r3
POP r2
POP r1
POP r0
.endm
#endif
;============================================================================
; Troubleshooting Stuff
;============================================================================
; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid
; When Creating TLB Entries, instead of doing 3 dependent loads from memory,
; we use the MMU PID Reg to get current ASID.
; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble.
; So we try to detect this in TLB Mis shandler
.macro tlb_paranoid_check_asm
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
GET_CURR_TASK_ON_CPU r3
ld r0, [r3, TASK_ACT_MM]
ld r0, [r0, MM_CTXT+MM_CTXT_ASID]
breq r0, 0, 55f ; Error if no ASID allocated
lr r1, [ARC_REG_PID]
and r1, r1, 0xFF
and r2, r0, 0xFF ; MMU PID bits only for comparison
breq r1, r2, 5f
55:
; Error if H/w and S/w ASID don't match, but NOT if in kernel mode
lr r2, [erstatus]
bbit0 r2, STATUS_U_BIT, 5f
; We sure are in troubled waters, Flag the error, but to do so
; need to switch to kernel mode stack to call error routine
GET_TSK_STACK_BASE r3, sp
; Call printk to shoutout aloud
mov r2, 1
j print_asid_mismatch
5: ; ASIDs match so proceed normally
nop
#endif
.endm
;============================================================================
;TLB Miss handling Code
;============================================================================
;-----------------------------------------------------------------------------
; This macro does the page-table lookup for the faulting address.
; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address
.macro LOAD_FAULT_PTE
lr r2, [efa]
#ifndef CONFIG_SMP
lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
#else
GET_CURR_TASK_ON_CPU r1
ld r1, [r1, TASK_ACT_MM]
ld r1, [r1, MM_PGD]
#endif
lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr
tst r3, r3
bz do_slow_path_pf ; if no Page Table, do page fault
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp)
add2.nz r1, r1, r0
bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk
mov.nz r0, r3
#endif
and r1, r3, PAGE_MASK
; Get the PTE entry: The idea is
; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
; (3) z = (pgtbl + y * 4)
#ifdef CONFIG_ARC_HAS_PAE40
#define PTE_SIZE_LOG 3 /* 8 == 2 ^ 3 */
#else
#define PTE_SIZE_LOG 2 /* 4 == 2 ^ 2 */
#endif
; multiply in step (3) above avoided by shifting lesser in step (1)
lsr r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG )
and r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG )
ld.aw r0, [r1, r0] ; r0: PTE (lower word only for PAE40)
; r1: PTE ptr
2:
.endm
;-----------------------------------------------------------------
; Convert Linux PTE entry into TLB entry
; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
; (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI])
; IN: r0 = PTE, r1 = ptr to PTE
.macro CONV_PTE_TO_TLB
and r3, r0, PTE_BITS_RWX ; r w x
asl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only)
and.f 0, r0, _PAGE_GLOBAL
or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page)
and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
or r3, r3, r2
sr r3, [ARC_REG_TLBPD1] ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C
#ifdef CONFIG_ARC_HAS_PAE40
ld r3, [r1, 4] ; paddr[39..32]
sr r3, [ARC_REG_TLBPD1HI]
#endif
and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid
or r3, r3, r2 ; S | vaddr | {sasid|asid}
sr r3,[ARC_REG_TLBPD0] ; rewrite PD0
.endm
;-----------------------------------------------------------------
; Commit the TLB entry into MMU
.macro COMMIT_ENTRY_TO_MMU
#if (CONFIG_ARC_MMU_VER < 4)
#ifdef CONFIG_EZNPS_MTM_EXT
/* verify if entry for this vaddr+ASID already exists */
sr TLBProbe, [ARC_REG_TLBCOMMAND]
lr r0, [ARC_REG_TLBINDEX]
bbit0 r0, 31, 88f
#endif
/* Get free TLB slot: Set = computed from vaddr, way = random */
sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
/* Commit the Write */
#if (CONFIG_ARC_MMU_VER >= 2) /* introduced in v2 */
sr TLBWriteNI, [ARC_REG_TLBCOMMAND]
#else
sr TLBWrite, [ARC_REG_TLBCOMMAND]
#endif
#else
sr TLBInsertEntry, [ARC_REG_TLBCOMMAND]
#endif
88:
.endm
ARCFP_CODE ;Fast Path Code, candidate for ICCM
;-----------------------------------------------------------------------------
; I-TLB Miss Exception Handler
;-----------------------------------------------------------------------------
ENTRY(EV_TLBMissI)
TLBMISS_FREEUP_REGS
;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
LOAD_FAULT_PTE
;----------------------------------------------------------------
; VERIFY_PTE: Check if PTE permissions approp for executing code
cmp_s r2, VMALLOC_START
mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE)
or.hs r2, r2, _PAGE_GLOBAL
and r3, r0, r2 ; Mask out NON Flag bits from PTE
xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
bnz do_slow_path_pf
; Let Linux VM know that the page was accessed
or r0, r0, _PAGE_ACCESSED ; set Accessed Bit
st_s r0, [r1] ; Write back PTE
CONV_PTE_TO_TLB
COMMIT_ENTRY_TO_MMU
TLBMISS_RESTORE_REGS
EV_TLBMissI_fast_ret: ; additional label for VDK OS-kit instrumentation
rtie
END(EV_TLBMissI)
;-----------------------------------------------------------------------------
; D-TLB Miss Exception Handler
;-----------------------------------------------------------------------------
ENTRY(EV_TLBMissD)
TLBMISS_FREEUP_REGS
;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed
; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
LOAD_FAULT_PTE
;----------------------------------------------------------------
; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
cmp_s r2, VMALLOC_START
mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE
or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only
; Linux PTE [RWX] bits are semantically overloaded:
; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc)
; -Otherwise they are user-mode permissions, and those are exactly
; same for kernel mode as well (e.g. copy_(to|from)_user)
lr r3, [ecr]
btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
; Above laddering takes care of XCHG access (both R and W)
; By now, r2 setup with all the Flags we need to check in PTE
and r3, r0, r2 ; Mask out NON Flag bits from PTE
brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test)
;----------------------------------------------------------------
; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
lr r3, [ecr]
or r0, r0, _PAGE_ACCESSED ; Accessed bit always
btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well
st_s r0, [r1] ; Write back PTE
CONV_PTE_TO_TLB
#if (CONFIG_ARC_MMU_VER == 1)
; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of
; memcpy where 3 parties contend for 2 ways, ensuing a livelock.
; But only for old MMU or one with Metal Fix
TLB_WRITE_HEURISTICS
#endif
COMMIT_ENTRY_TO_MMU
TLBMISS_RESTORE_REGS
EV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation
rtie
;-------- Common routine to call Linux Page Fault Handler -----------
do_slow_path_pf:
; Restore the 4-scratch regs saved by fast path miss handler
TLBMISS_RESTORE_REGS
; Slow path TLB Miss handled as a regular ARC Exception
; (stack switching / save the complete reg-file).
b call_do_page_fault
END(EV_TLBMissD)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 44,858
|
arch/openrisc/kernel/head.S
|
/*
* OpenRISC head.S
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/linkage.h>
#include <linux/threads.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/serial_reg.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/spr_defs.h>
#include <asm/asm-offsets.h>
#include <linux/of_fdt.h>
#define tophys(rd,rs) \
l.movhi rd,hi(-KERNELBASE) ;\
l.add rd,rd,rs
#define CLEAR_GPR(gpr) \
l.movhi gpr,0x0
#define LOAD_SYMBOL_2_GPR(gpr,symbol) \
l.movhi gpr,hi(symbol) ;\
l.ori gpr,gpr,lo(symbol)
#define UART_BASE_ADD 0x90000000
#define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
#define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
/* ============================================[ tmp store locations ]=== */
#define SPR_SHADOW_GPR(x) ((x) + SPR_GPR_BASE + 32)
/*
* emergency_print temporary stores
*/
#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
#define EMERGENCY_PRINT_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(14)
#define EMERGENCY_PRINT_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(14)
#define EMERGENCY_PRINT_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(15)
#define EMERGENCY_PRINT_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(15)
#define EMERGENCY_PRINT_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(16)
#define EMERGENCY_PRINT_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(16)
#define EMERGENCY_PRINT_STORE_GPR7 l.mtspr r0,r7,SPR_SHADOW_GPR(7)
#define EMERGENCY_PRINT_LOAD_GPR7 l.mfspr r7,r0,SPR_SHADOW_GPR(7)
#define EMERGENCY_PRINT_STORE_GPR8 l.mtspr r0,r8,SPR_SHADOW_GPR(8)
#define EMERGENCY_PRINT_LOAD_GPR8 l.mfspr r8,r0,SPR_SHADOW_GPR(8)
#define EMERGENCY_PRINT_STORE_GPR9 l.mtspr r0,r9,SPR_SHADOW_GPR(9)
#define EMERGENCY_PRINT_LOAD_GPR9 l.mfspr r9,r0,SPR_SHADOW_GPR(9)
#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
#define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4
#define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0)
#define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5
#define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0)
#define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6
#define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0)
#define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7
#define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0)
#define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8
#define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0)
#define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9
#define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0)
#endif
/*
* TLB miss handlers temorary stores
*/
#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
#define EXCEPTION_STORE_GPR2 l.mtspr r0,r2,SPR_SHADOW_GPR(2)
#define EXCEPTION_LOAD_GPR2 l.mfspr r2,r0,SPR_SHADOW_GPR(2)
#define EXCEPTION_STORE_GPR3 l.mtspr r0,r3,SPR_SHADOW_GPR(3)
#define EXCEPTION_LOAD_GPR3 l.mfspr r3,r0,SPR_SHADOW_GPR(3)
#define EXCEPTION_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(4)
#define EXCEPTION_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(4)
#define EXCEPTION_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(5)
#define EXCEPTION_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(5)
#define EXCEPTION_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(6)
#define EXCEPTION_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(6)
#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
#define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2
#define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0)
#define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3
#define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0)
#define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4
#define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0)
#define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5
#define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0)
#define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6
#define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0)
#endif
/*
* EXCEPTION_HANDLE temporary stores
*/
#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
#define EXCEPTION_T_STORE_GPR30 l.mtspr r0,r30,SPR_SHADOW_GPR(30)
#define EXCEPTION_T_LOAD_GPR30(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(30)
#define EXCEPTION_T_STORE_GPR10 l.mtspr r0,r10,SPR_SHADOW_GPR(10)
#define EXCEPTION_T_LOAD_GPR10(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(10)
#define EXCEPTION_T_STORE_SP l.mtspr r0,r1,SPR_SHADOW_GPR(1)
#define EXCEPTION_T_LOAD_SP(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(1)
#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
#define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30
#define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0)
#define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10
#define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0)
#define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1
#define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0)
#endif
/* =========================================================[ macros ]=== */
#ifdef CONFIG_SMP
#define GET_CURRENT_PGD(reg,t1) \
LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
l.mfspr t1,r0,SPR_COREID ;\
l.slli t1,t1,2 ;\
l.add reg,reg,t1 ;\
tophys (t1,reg) ;\
l.lwz reg,0(t1)
#else
#define GET_CURRENT_PGD(reg,t1) \
LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
tophys (t1,reg) ;\
l.lwz reg,0(t1)
#endif
/* Load r10 from current_thread_info_set - clobbers r1 and r30 */
#ifdef CONFIG_SMP
#define GET_CURRENT_THREAD_INFO \
LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
tophys (r30,r1) ;\
l.mfspr r10,r0,SPR_COREID ;\
l.slli r10,r10,2 ;\
l.add r30,r30,r10 ;\
/* r10: current_thread_info */ ;\
l.lwz r10,0(r30)
#else
#define GET_CURRENT_THREAD_INFO \
LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
tophys (r30,r1) ;\
/* r10: current_thread_info */ ;\
l.lwz r10,0(r30)
#endif
/*
* DSCR: this is a common hook for handling exceptions. it will save
* the needed registers, set up stack and pointer to current
* then jump to the handler while enabling MMU
*
* PRMS: handler - a function to jump to. it has to save the
* remaining registers to kernel stack, call
* appropriate arch-independant exception handler
* and finaly jump to ret_from_except
*
* PREQ: unchanged state from the time exception happened
*
* POST: SAVED the following registers original value
* to the new created exception frame pointed to by r1
*
* r1 - ksp pointing to the new (exception) frame
* r4 - EEAR exception EA
* r10 - current pointing to current_thread_info struct
* r12 - syscall 0, since we didn't come from syscall
* r30 - handler address of the handler we'll jump to
*
* handler has to save remaining registers to the exception
* ksp frame *before* tainting them!
*
* NOTE: this function is not reentrant per se. reentrancy is guaranteed
* by processor disabling all exceptions/interrupts when exception
* accours.
*
* OPTM: no need to make it so wasteful to extract ksp when in user mode
*/
#define EXCEPTION_HANDLE(handler) \
EXCEPTION_T_STORE_GPR30 ;\
l.mfspr r30,r0,SPR_ESR_BASE ;\
l.andi r30,r30,SPR_SR_SM ;\
l.sfeqi r30,0 ;\
EXCEPTION_T_STORE_GPR10 ;\
l.bnf 2f /* kernel_mode */ ;\
EXCEPTION_T_STORE_SP /* delay slot */ ;\
1: /* user_mode: */ ;\
GET_CURRENT_THREAD_INFO ;\
tophys (r30,r10) ;\
l.lwz r1,(TI_KSP)(r30) ;\
/* fall through */ ;\
2: /* kernel_mode: */ ;\
/* create new stack frame, save only needed gprs */ ;\
/* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\
/* r12: temp, syscall indicator */ ;\
l.addi r1,r1,-(INT_FRAME_SIZE) ;\
/* r1 is KSP, r30 is __pa(KSP) */ ;\
tophys (r30,r1) ;\
l.sw PT_GPR12(r30),r12 ;\
/* r4 use for tmp before EA */ ;\
l.mfspr r12,r0,SPR_EPCR_BASE ;\
l.sw PT_PC(r30),r12 ;\
l.mfspr r12,r0,SPR_ESR_BASE ;\
l.sw PT_SR(r30),r12 ;\
/* save r30 */ ;\
EXCEPTION_T_LOAD_GPR30(r12) ;\
l.sw PT_GPR30(r30),r12 ;\
/* save r10 as was prior to exception */ ;\
EXCEPTION_T_LOAD_GPR10(r12) ;\
l.sw PT_GPR10(r30),r12 ;\
/* save PT_SP as was prior to exception */ ;\
EXCEPTION_T_LOAD_SP(r12) ;\
l.sw PT_SP(r30),r12 ;\
/* save exception r4, set r4 = EA */ ;\
l.sw PT_GPR4(r30),r4 ;\
l.mfspr r4,r0,SPR_EEAR_BASE ;\
/* r12 == 1 if we come from syscall */ ;\
CLEAR_GPR(r12) ;\
/* ----- turn on MMU ----- */ ;\
/* Carry DSX into exception SR */ ;\
l.mfspr r30,r0,SPR_SR ;\
l.andi r30,r30,SPR_SR_DSX ;\
l.ori r30,r30,(EXCEPTION_SR) ;\
l.mtspr r0,r30,SPR_ESR_BASE ;\
/* r30: EA address of handler */ ;\
LOAD_SYMBOL_2_GPR(r30,handler) ;\
l.mtspr r0,r30,SPR_EPCR_BASE ;\
l.rfe
/*
* this doesn't work
*
*
* #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
* #define UNHANDLED_EXCEPTION(handler) \
* l.ori r3,r0,0x1 ;\
* l.mtspr r0,r3,SPR_SR ;\
* l.movhi r3,hi(0xf0000100) ;\
* l.ori r3,r3,lo(0xf0000100) ;\
* l.jr r3 ;\
* l.nop 1
*
* #endif
*/
/* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
* a bit more carefull (if we have a PT_SP or current pointer
* corruption) and set them up from 'current_set'
*
*/
#define UNHANDLED_EXCEPTION(handler) \
EXCEPTION_T_STORE_GPR30 ;\
EXCEPTION_T_STORE_GPR10 ;\
EXCEPTION_T_STORE_SP ;\
/* temporary store r3, r9 into r1, r10 */ ;\
l.addi r1,r3,0x0 ;\
l.addi r10,r9,0x0 ;\
/* the string referenced by r3 must be low enough */ ;\
l.jal _emergency_print ;\
l.ori r3,r0,lo(_string_unhandled_exception) ;\
l.mfspr r3,r0,SPR_NPC ;\
l.jal _emergency_print_nr ;\
l.andi r3,r3,0x1f00 ;\
/* the string referenced by r3 must be low enough */ ;\
l.jal _emergency_print ;\
l.ori r3,r0,lo(_string_epc_prefix) ;\
l.jal _emergency_print_nr ;\
l.mfspr r3,r0,SPR_EPCR_BASE ;\
l.jal _emergency_print ;\
l.ori r3,r0,lo(_string_nl) ;\
/* end of printing */ ;\
l.addi r3,r1,0x0 ;\
l.addi r9,r10,0x0 ;\
/* extract current, ksp from current_set */ ;\
LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\
LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\
/* create new stack frame, save only needed gprs */ ;\
/* r1: KSP, r10: current, r31: __pa(KSP) */ ;\
/* r12: temp, syscall indicator, r13 temp */ ;\
l.addi r1,r1,-(INT_FRAME_SIZE) ;\
/* r1 is KSP, r30 is __pa(KSP) */ ;\
tophys (r30,r1) ;\
l.sw PT_GPR12(r30),r12 ;\
l.mfspr r12,r0,SPR_EPCR_BASE ;\
l.sw PT_PC(r30),r12 ;\
l.mfspr r12,r0,SPR_ESR_BASE ;\
l.sw PT_SR(r30),r12 ;\
/* save r31 */ ;\
EXCEPTION_T_LOAD_GPR30(r12) ;\
l.sw PT_GPR30(r30),r12 ;\
/* save r10 as was prior to exception */ ;\
EXCEPTION_T_LOAD_GPR10(r12) ;\
l.sw PT_GPR10(r30),r12 ;\
/* save PT_SP as was prior to exception */ ;\
EXCEPTION_T_LOAD_SP(r12) ;\
l.sw PT_SP(r30),r12 ;\
l.sw PT_GPR13(r30),r13 ;\
/* --> */ ;\
/* save exception r4, set r4 = EA */ ;\
l.sw PT_GPR4(r30),r4 ;\
l.mfspr r4,r0,SPR_EEAR_BASE ;\
/* r12 == 1 if we come from syscall */ ;\
CLEAR_GPR(r12) ;\
/* ----- play a MMU trick ----- */ ;\
l.ori r30,r0,(EXCEPTION_SR) ;\
l.mtspr r0,r30,SPR_ESR_BASE ;\
/* r31: EA address of handler */ ;\
LOAD_SYMBOL_2_GPR(r30,handler) ;\
l.mtspr r0,r30,SPR_EPCR_BASE ;\
l.rfe
/* =====================================================[ exceptions] === */
/* ---[ 0x100: RESET exception ]----------------------------------------- */
.org 0x100
/* Jump to .init code at _start which lives in the .head section
* and will be discarded after boot.
*/
LOAD_SYMBOL_2_GPR(r15, _start)
tophys (r13,r15) /* MMU disabled */
l.jr r13
l.nop
/* ---[ 0x200: BUS exception ]------------------------------------------- */
.org 0x200
_dispatch_bus_fault:
EXCEPTION_HANDLE(_bus_fault_handler)
/* ---[ 0x300: Data Page Fault exception ]------------------------------- */
.org 0x300
_dispatch_do_dpage_fault:
// totaly disable timer interrupt
// l.mtspr r0,r0,SPR_TTMR
// DEBUG_TLB_PROBE(0x300)
// EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
EXCEPTION_HANDLE(_data_page_fault_handler)
/* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
.org 0x400
_dispatch_do_ipage_fault:
// totaly disable timer interrupt
// l.mtspr r0,r0,SPR_TTMR
// DEBUG_TLB_PROBE(0x400)
// EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
EXCEPTION_HANDLE(_insn_page_fault_handler)
/* ---[ 0x500: Timer exception ]----------------------------------------- */
.org 0x500
EXCEPTION_HANDLE(_timer_handler)
/* ---[ 0x600: Alignment exception ]-------------------------------------- */
.org 0x600
EXCEPTION_HANDLE(_alignment_handler)
/* ---[ 0x700: Illegal insn exception ]---------------------------------- */
.org 0x700
EXCEPTION_HANDLE(_illegal_instruction_handler)
/* ---[ 0x800: External interrupt exception ]---------------------------- */
.org 0x800
EXCEPTION_HANDLE(_external_irq_handler)
/* ---[ 0x900: DTLB miss exception ]------------------------------------- */
.org 0x900
l.j boot_dtlb_miss_handler
l.nop
/* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
.org 0xa00
l.j boot_itlb_miss_handler
l.nop
/* ---[ 0xb00: Range exception ]----------------------------------------- */
.org 0xb00
UNHANDLED_EXCEPTION(_vector_0xb00)
/* ---[ 0xc00: Syscall exception ]--------------------------------------- */
.org 0xc00
EXCEPTION_HANDLE(_sys_call_handler)
/* ---[ 0xd00: Trap exception ]------------------------------------------ */
.org 0xd00
UNHANDLED_EXCEPTION(_vector_0xd00)
/* ---[ 0xe00: Trap exception ]------------------------------------------ */
.org 0xe00
// UNHANDLED_EXCEPTION(_vector_0xe00)
EXCEPTION_HANDLE(_trap_handler)
/* ---[ 0xf00: Reserved exception ]-------------------------------------- */
.org 0xf00
UNHANDLED_EXCEPTION(_vector_0xf00)
/* ---[ 0x1000: Reserved exception ]------------------------------------- */
.org 0x1000
UNHANDLED_EXCEPTION(_vector_0x1000)
/* ---[ 0x1100: Reserved exception ]------------------------------------- */
.org 0x1100
UNHANDLED_EXCEPTION(_vector_0x1100)
/* ---[ 0x1200: Reserved exception ]------------------------------------- */
.org 0x1200
UNHANDLED_EXCEPTION(_vector_0x1200)
/* ---[ 0x1300: Reserved exception ]------------------------------------- */
.org 0x1300
UNHANDLED_EXCEPTION(_vector_0x1300)
/* ---[ 0x1400: Reserved exception ]------------------------------------- */
.org 0x1400
UNHANDLED_EXCEPTION(_vector_0x1400)
/* ---[ 0x1500: Reserved exception ]------------------------------------- */
.org 0x1500
UNHANDLED_EXCEPTION(_vector_0x1500)
/* ---[ 0x1600: Reserved exception ]------------------------------------- */
.org 0x1600
UNHANDLED_EXCEPTION(_vector_0x1600)
/* ---[ 0x1700: Reserved exception ]------------------------------------- */
.org 0x1700
UNHANDLED_EXCEPTION(_vector_0x1700)
/* ---[ 0x1800: Reserved exception ]------------------------------------- */
.org 0x1800
UNHANDLED_EXCEPTION(_vector_0x1800)
/* ---[ 0x1900: Reserved exception ]------------------------------------- */
.org 0x1900
UNHANDLED_EXCEPTION(_vector_0x1900)
/* ---[ 0x1a00: Reserved exception ]------------------------------------- */
.org 0x1a00
UNHANDLED_EXCEPTION(_vector_0x1a00)
/* ---[ 0x1b00: Reserved exception ]------------------------------------- */
.org 0x1b00
UNHANDLED_EXCEPTION(_vector_0x1b00)
/* ---[ 0x1c00: Reserved exception ]------------------------------------- */
.org 0x1c00
UNHANDLED_EXCEPTION(_vector_0x1c00)
/* ---[ 0x1d00: Reserved exception ]------------------------------------- */
.org 0x1d00
UNHANDLED_EXCEPTION(_vector_0x1d00)
/* ---[ 0x1e00: Reserved exception ]------------------------------------- */
.org 0x1e00
UNHANDLED_EXCEPTION(_vector_0x1e00)
/* ---[ 0x1f00: Reserved exception ]------------------------------------- */
.org 0x1f00
UNHANDLED_EXCEPTION(_vector_0x1f00)
.org 0x2000
/* ===================================================[ kernel start ]=== */
/* .text*/
/* This early stuff belongs in HEAD, but some of the functions below definitely
* don't... */
__HEAD
.global _start
_start:
/* Init r0 to zero as per spec */
CLEAR_GPR(r0)
/* save kernel parameters */
l.or r25,r0,r3 /* pointer to fdt */
/*
* ensure a deterministic start
*/
l.ori r3,r0,0x1
l.mtspr r0,r3,SPR_SR
CLEAR_GPR(r1)
CLEAR_GPR(r2)
CLEAR_GPR(r3)
CLEAR_GPR(r4)
CLEAR_GPR(r5)
CLEAR_GPR(r6)
CLEAR_GPR(r7)
CLEAR_GPR(r8)
CLEAR_GPR(r9)
CLEAR_GPR(r10)
CLEAR_GPR(r11)
CLEAR_GPR(r12)
CLEAR_GPR(r13)
CLEAR_GPR(r14)
CLEAR_GPR(r15)
CLEAR_GPR(r16)
CLEAR_GPR(r17)
CLEAR_GPR(r18)
CLEAR_GPR(r19)
CLEAR_GPR(r20)
CLEAR_GPR(r21)
CLEAR_GPR(r22)
CLEAR_GPR(r23)
CLEAR_GPR(r24)
CLEAR_GPR(r26)
CLEAR_GPR(r27)
CLEAR_GPR(r28)
CLEAR_GPR(r29)
CLEAR_GPR(r30)
CLEAR_GPR(r31)
#ifdef CONFIG_SMP
l.mfspr r26,r0,SPR_COREID
l.sfeq r26,r0
l.bnf secondary_wait
l.nop
#endif
/*
* set up initial ksp and current
*/
/* setup kernel stack */
LOAD_SYMBOL_2_GPR(r1,init_thread_union + THREAD_SIZE)
LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current
tophys (r31,r10)
l.sw TI_KSP(r31), r1
l.ori r4,r0,0x0
/*
* .data contains initialized data,
* .bss contains uninitialized data - clear it up
*/
clear_bss:
LOAD_SYMBOL_2_GPR(r24, __bss_start)
LOAD_SYMBOL_2_GPR(r26, _end)
tophys(r28,r24)
tophys(r30,r26)
CLEAR_GPR(r24)
CLEAR_GPR(r26)
1:
l.sw (0)(r28),r0
l.sfltu r28,r30
l.bf 1b
l.addi r28,r28,4
enable_ic:
l.jal _ic_enable
l.nop
enable_dc:
l.jal _dc_enable
l.nop
flush_tlb:
l.jal _flush_tlb
l.nop
/* The MMU needs to be enabled before or32_early_setup is called */
enable_mmu:
/*
* enable dmmu & immu
* SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
*/
l.mfspr r30,r0,SPR_SR
l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
l.or r30,r30,r28
l.mtspr r0,r30,SPR_SR
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
// reset the simulation counters
l.nop 5
/* check fdt header magic word */
l.lwz r3,0(r25) /* load magic from fdt into r3 */
l.movhi r4,hi(OF_DT_HEADER)
l.ori r4,r4,lo(OF_DT_HEADER)
l.sfeq r3,r4
l.bf _fdt_found
l.nop
/* magic number mismatch, set fdt pointer to null */
l.or r25,r0,r0
_fdt_found:
/* pass fdt pointer to or32_early_setup in r3 */
l.or r3,r0,r25
LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
l.jalr r24
l.nop
clear_regs:
/*
* clear all GPRS to increase determinism
*/
CLEAR_GPR(r2)
CLEAR_GPR(r3)
CLEAR_GPR(r4)
CLEAR_GPR(r5)
CLEAR_GPR(r6)
CLEAR_GPR(r7)
CLEAR_GPR(r8)
CLEAR_GPR(r9)
CLEAR_GPR(r11)
CLEAR_GPR(r12)
CLEAR_GPR(r13)
CLEAR_GPR(r14)
CLEAR_GPR(r15)
CLEAR_GPR(r16)
CLEAR_GPR(r17)
CLEAR_GPR(r18)
CLEAR_GPR(r19)
CLEAR_GPR(r20)
CLEAR_GPR(r21)
CLEAR_GPR(r22)
CLEAR_GPR(r23)
CLEAR_GPR(r24)
CLEAR_GPR(r25)
CLEAR_GPR(r26)
CLEAR_GPR(r27)
CLEAR_GPR(r28)
CLEAR_GPR(r29)
CLEAR_GPR(r30)
CLEAR_GPR(r31)
jump_start_kernel:
/*
* jump to kernel entry (start_kernel)
*/
LOAD_SYMBOL_2_GPR(r30, start_kernel)
l.jr r30
l.nop
_flush_tlb:
/*
* I N V A L I D A T E T L B e n t r i e s
*/
LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
l.addi r7,r0,128 /* Maximum number of sets */
1:
l.mtspr r5,r0,0x0
l.mtspr r6,r0,0x0
l.addi r5,r5,1
l.addi r6,r6,1
l.sfeq r7,r0
l.bnf 1b
l.addi r7,r7,-1
l.jr r9
l.nop
#ifdef CONFIG_SMP
secondary_wait:
/* Doze the cpu until we are asked to run */
/* If we dont have power management skip doze */
l.mfspr r25,r0,SPR_UPR
l.andi r25,r25,SPR_UPR_PMP
l.sfeq r25,r0
l.bf secondary_check_release
l.nop
/* Setup special secondary exception handler */
LOAD_SYMBOL_2_GPR(r3, _secondary_evbar)
tophys(r25,r3)
l.mtspr r0,r25,SPR_EVBAR
/* Enable Interrupts */
l.mfspr r25,r0,SPR_SR
l.ori r25,r25,SPR_SR_IEE
l.mtspr r0,r25,SPR_SR
/* Unmask interrupts interrupts */
l.mfspr r25,r0,SPR_PICMR
l.ori r25,r25,0xffff
l.mtspr r0,r25,SPR_PICMR
/* Doze */
l.mfspr r25,r0,SPR_PMR
LOAD_SYMBOL_2_GPR(r3, SPR_PMR_DME)
l.or r25,r25,r3
l.mtspr r0,r25,SPR_PMR
/* Wakeup - Restore exception handler */
l.mtspr r0,r0,SPR_EVBAR
secondary_check_release:
/*
* Check if we actually got the release signal, if not go-back to
* sleep.
*/
l.mfspr r25,r0,SPR_COREID
LOAD_SYMBOL_2_GPR(r3, secondary_release)
tophys(r4, r3)
l.lwz r3,0(r4)
l.sfeq r25,r3
l.bnf secondary_wait
l.nop
/* fall through to secondary_init */
secondary_init:
/*
* set up initial ksp and current
*/
LOAD_SYMBOL_2_GPR(r10, secondary_thread_info)
tophys (r30,r10)
l.lwz r10,0(r30)
l.addi r1,r10,THREAD_SIZE
tophys (r30,r10)
l.sw TI_KSP(r30),r1
l.jal _ic_enable
l.nop
l.jal _dc_enable
l.nop
l.jal _flush_tlb
l.nop
/*
* enable dmmu & immu
*/
l.mfspr r30,r0,SPR_SR
l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
l.or r30,r30,r28
/*
* This is a bit tricky, we need to switch over from physical addresses
* to virtual addresses on the fly.
* To do that, we first set up ESR with the IME and DME bits set.
* Then EPCR is set to secondary_start and then a l.rfe is issued to
* "jump" to that.
*/
l.mtspr r0,r30,SPR_ESR_BASE
LOAD_SYMBOL_2_GPR(r30, secondary_start)
l.mtspr r0,r30,SPR_EPCR_BASE
l.rfe
secondary_start:
LOAD_SYMBOL_2_GPR(r30, secondary_start_kernel)
l.jr r30
l.nop
#endif
/* ========================================[ cache ]=== */
/* alignment here so we don't change memory offsets with
* memory controller defined
*/
.align 0x2000
_ic_enable:
/* Check if IC present and skip enabling otherwise */
l.mfspr r24,r0,SPR_UPR
l.andi r26,r24,SPR_UPR_ICP
l.sfeq r26,r0
l.bf 9f
l.nop
/* Disable IC */
l.mfspr r6,r0,SPR_SR
l.addi r5,r0,-1
l.xori r5,r5,SPR_SR_ICE
l.and r5,r6,r5
l.mtspr r0,r5,SPR_SR
/* Establish cache block size
If BS=0, 16;
If BS=1, 32;
r14 contain block size
*/
l.mfspr r24,r0,SPR_ICCFGR
l.andi r26,r24,SPR_ICCFGR_CBS
l.srli r28,r26,7
l.ori r30,r0,16
l.sll r14,r30,r28
/* Establish number of cache sets
r16 contains number of cache sets
r28 contains log(# of cache sets)
*/
l.andi r26,r24,SPR_ICCFGR_NCS
l.srli r28,r26,3
l.ori r30,r0,1
l.sll r16,r30,r28
/* Invalidate IC */
l.addi r6,r0,0
l.sll r5,r14,r28
// l.mul r5,r14,r16
// l.trap 1
// l.addi r5,r0,IC_SIZE
1:
l.mtspr r0,r6,SPR_ICBIR
l.sfne r6,r5
l.bf 1b
l.add r6,r6,r14
// l.addi r6,r6,IC_LINE
/* Enable IC */
l.mfspr r6,r0,SPR_SR
l.ori r6,r6,SPR_SR_ICE
l.mtspr r0,r6,SPR_SR
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
l.nop
9:
l.jr r9
l.nop
_dc_enable:
/* Check if DC present and skip enabling otherwise */
l.mfspr r24,r0,SPR_UPR
l.andi r26,r24,SPR_UPR_DCP
l.sfeq r26,r0
l.bf 9f
l.nop
/* Disable DC */
l.mfspr r6,r0,SPR_SR
l.addi r5,r0,-1
l.xori r5,r5,SPR_SR_DCE
l.and r5,r6,r5
l.mtspr r0,r5,SPR_SR
/* Establish cache block size
If BS=0, 16;
If BS=1, 32;
r14 contain block size
*/
l.mfspr r24,r0,SPR_DCCFGR
l.andi r26,r24,SPR_DCCFGR_CBS
l.srli r28,r26,7
l.ori r30,r0,16
l.sll r14,r30,r28
/* Establish number of cache sets
r16 contains number of cache sets
r28 contains log(# of cache sets)
*/
l.andi r26,r24,SPR_DCCFGR_NCS
l.srli r28,r26,3
l.ori r30,r0,1
l.sll r16,r30,r28
/* Invalidate DC */
l.addi r6,r0,0
l.sll r5,r14,r28
1:
l.mtspr r0,r6,SPR_DCBIR
l.sfne r6,r5
l.bf 1b
l.add r6,r6,r14
/* Enable DC */
l.mfspr r6,r0,SPR_SR
l.ori r6,r6,SPR_SR_DCE
l.mtspr r0,r6,SPR_SR
9:
l.jr r9
l.nop
/* ===============================================[ page table masks ]=== */
#define DTLB_UP_CONVERT_MASK 0x3fa
#define ITLB_UP_CONVERT_MASK 0x3a
/* for SMP we'd have (this is a bit subtle, CC must be always set
* for SMP, but since we have _PAGE_PRESENT bit always defined
* we can just modify the mask)
*/
#define DTLB_SMP_CONVERT_MASK 0x3fb
#define ITLB_SMP_CONVERT_MASK 0x3b
/* ---[ boot dtlb miss handler ]----------------------------------------- */
boot_dtlb_miss_handler:
/* mask for DTLB_MR register: - (0) sets V (valid) bit,
* - (31-12) sets bits belonging to VPN (31-12)
*/
#define DTLB_MR_MASK 0xfffff001
/* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
* - (4) sets A (access) bit,
* - (5) sets D (dirty) bit,
* - (8) sets SRE (superuser read) bit
* - (9) sets SWE (superuser write) bit
* - (31-12) sets bits belonging to VPN (31-12)
*/
#define DTLB_TR_MASK 0xfffff332
/* These are for masking out the VPN/PPN value from the MR/TR registers...
* it's not the same as the PFN */
#define VPN_MASK 0xfffff000
#define PPN_MASK 0xfffff000
EXCEPTION_STORE_GPR6
#if 0
l.mfspr r6,r0,SPR_ESR_BASE //
l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
l.sfeqi r6,0 // r6 == 0x1 --> SM
l.bf exit_with_no_dtranslation //
l.nop
#endif
/* this could be optimized by moving storing of
* non r6 registers here, and jumping r6 restore
* if not in supervisor mode
*/
EXCEPTION_STORE_GPR2
EXCEPTION_STORE_GPR3
EXCEPTION_STORE_GPR4
EXCEPTION_STORE_GPR5
l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
immediate_translation:
CLEAR_GPR(r6)
l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
l.mfspr r6, r0, SPR_DMMUCFGR
l.andi r6, r6, SPR_DMMUCFGR_NTS
l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
l.ori r5, r0, 0x1
l.sll r5, r5, r6 // r5 = number DMMU sets
l.addi r6, r5, -1 // r6 = nsets mask
l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
l.or r6,r6,r4 // r6 <- r4
l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000
l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry
l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR
/* set up DTLB with no translation for EA <= 0xbfffffff */
LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA)
l.bf 1f // goto out
l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
tophys(r3,r4) // r3 <- PA
1:
l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000
l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry
l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR
EXCEPTION_LOAD_GPR6
EXCEPTION_LOAD_GPR5
EXCEPTION_LOAD_GPR4
EXCEPTION_LOAD_GPR3
EXCEPTION_LOAD_GPR2
l.rfe // SR <- ESR, PC <- EPC
exit_with_no_dtranslation:
/* EA out of memory or not in supervisor mode */
EXCEPTION_LOAD_GPR6
EXCEPTION_LOAD_GPR4
l.j _dispatch_bus_fault
/* ---[ boot itlb miss handler ]----------------------------------------- */
boot_itlb_miss_handler:
/* mask for ITLB_MR register: - sets V (valid) bit,
* - sets bits belonging to VPN (15-12)
*/
#define ITLB_MR_MASK 0xfffff001
/* mask for ITLB_TR register: - sets A (access) bit,
* - sets SXE (superuser execute) bit
* - sets bits belonging to VPN (15-12)
*/
#define ITLB_TR_MASK 0xfffff050
/*
#define VPN_MASK 0xffffe000
#define PPN_MASK 0xffffe000
*/
EXCEPTION_STORE_GPR2
EXCEPTION_STORE_GPR3
EXCEPTION_STORE_GPR4
EXCEPTION_STORE_GPR5
EXCEPTION_STORE_GPR6
#if 0
l.mfspr r6,r0,SPR_ESR_BASE //
l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
l.sfeqi r6,0 // r6 == 0x1 --> SM
l.bf exit_with_no_itranslation
l.nop
#endif
l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
earlyearly:
CLEAR_GPR(r6)
l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
l.mfspr r6, r0, SPR_IMMUCFGR
l.andi r6, r6, SPR_IMMUCFGR_NTS
l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
l.ori r5, r0, 0x1
l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR
l.addi r6, r5, -1 // r6 = nsets mask
l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
l.or r6,r6,r4 // r6 <- r4
l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000
l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry
l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR
/*
* set up ITLB with no translation for EA <= 0x0fffffff
*
* we need this for head.S mapping (EA = PA). if we move all functions
* which run with mmu enabled into entry.S, we might be able to eliminate this.
*
*/
LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA)
l.bf 1f // goto out
l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
tophys(r3,r4) // r3 <- PA
1:
l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000
l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry
l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR
EXCEPTION_LOAD_GPR6
EXCEPTION_LOAD_GPR5
EXCEPTION_LOAD_GPR4
EXCEPTION_LOAD_GPR3
EXCEPTION_LOAD_GPR2
l.rfe // SR <- ESR, PC <- EPC
exit_with_no_itranslation:
EXCEPTION_LOAD_GPR4
EXCEPTION_LOAD_GPR6
l.j _dispatch_bus_fault
l.nop
/* ====================================================================== */
/*
* Stuff below here shouldn't go into .head section... maybe this stuff
* can be moved to entry.S ???
*/
/* ==============================================[ DTLB miss handler ]=== */
/*
* Comments:
* Exception handlers are entered with MMU off so the following handler
* needs to use physical addressing
*
*/
.text
ENTRY(dtlb_miss_handler)
EXCEPTION_STORE_GPR2
EXCEPTION_STORE_GPR3
EXCEPTION_STORE_GPR4
/*
* get EA of the miss
*/
l.mfspr r2,r0,SPR_EEAR_BASE
/*
* pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
*/
GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r4 is temp
l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
l.slli r4,r4,0x2 // to get address << 2
l.add r3,r4,r3 // r4 is pgd_index(daddr)
/*
* if (pmd_none(*pmd))
* goto pmd_none:
*/
tophys (r4,r3)
l.lwz r3,0x0(r4) // get *pmd value
l.sfne r3,r0
l.bnf d_pmd_none
l.addi r3,r0,0xffffe000 // PAGE_MASK
d_pmd_good:
/*
* pte = *pte_offset(pmd, daddr);
*/
l.lwz r4,0x0(r4) // get **pmd value
l.and r4,r4,r3 // & PAGE_MASK
l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
l.slli r3,r3,0x2 // to get address << 2
l.add r3,r3,r4
l.lwz r3,0x0(r3) // this is pte at last
/*
* if (!pte_present(pte))
*/
l.andi r4,r3,0x1
l.sfne r4,r0 // is pte present
l.bnf d_pte_not_present
l.addi r4,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK
/*
* fill DTLB TR register
*/
l.and r4,r3,r4 // apply the mask
// Determine number of DMMU sets
l.mfspr r2, r0, SPR_DMMUCFGR
l.andi r2, r2, SPR_DMMUCFGR_NTS
l.srli r2, r2, SPR_DMMUCFGR_NTS_OFF
l.ori r3, r0, 0x1
l.sll r3, r3, r2 // r3 = number DMMU sets DMMUCFGR
l.addi r2, r3, -1 // r2 = nsets mask
l.mfspr r3, r0, SPR_EEAR_BASE
l.srli r3, r3, 0xd // >> PAGE_SHIFT
l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1)
//NUM_TLB_ENTRIES
l.mtspr r2,r4,SPR_DTLBTR_BASE(0)
/*
* fill DTLB MR register
*/
l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */
l.ori r4,r3,0x1 // set hardware valid bit: DTBL_MR entry
l.mtspr r2,r4,SPR_DTLBMR_BASE(0)
EXCEPTION_LOAD_GPR2
EXCEPTION_LOAD_GPR3
EXCEPTION_LOAD_GPR4
l.rfe
d_pmd_none:
d_pte_not_present:
EXCEPTION_LOAD_GPR2
EXCEPTION_LOAD_GPR3
EXCEPTION_LOAD_GPR4
EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler)
/* ==============================================[ ITLB miss handler ]=== */
ENTRY(itlb_miss_handler)
EXCEPTION_STORE_GPR2
EXCEPTION_STORE_GPR3
EXCEPTION_STORE_GPR4
/*
* get EA of the miss
*/
l.mfspr r2,r0,SPR_EEAR_BASE
/*
* pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
*
*/
GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r5 is temp
l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
l.slli r4,r4,0x2 // to get address << 2
l.add r3,r4,r3 // r4 is pgd_index(daddr)
/*
* if (pmd_none(*pmd))
* goto pmd_none:
*/
tophys (r4,r3)
l.lwz r3,0x0(r4) // get *pmd value
l.sfne r3,r0
l.bnf i_pmd_none
l.addi r3,r0,0xffffe000 // PAGE_MASK
i_pmd_good:
/*
* pte = *pte_offset(pmd, iaddr);
*
*/
l.lwz r4,0x0(r4) // get **pmd value
l.and r4,r4,r3 // & PAGE_MASK
l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
l.slli r3,r3,0x2 // to get address << 2
l.add r3,r3,r4
l.lwz r3,0x0(r3) // this is pte at last
/*
* if (!pte_present(pte))
*
*/
l.andi r4,r3,0x1
l.sfne r4,r0 // is pte present
l.bnf i_pte_not_present
l.addi r4,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK
/*
* fill ITLB TR register
*/
l.and r4,r3,r4 // apply the mask
l.andi r3,r3,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE
l.sfeq r3,r0
l.bf itlb_tr_fill //_workaround
// Determine number of IMMU sets
l.mfspr r2, r0, SPR_IMMUCFGR
l.andi r2, r2, SPR_IMMUCFGR_NTS
l.srli r2, r2, SPR_IMMUCFGR_NTS_OFF
l.ori r3, r0, 0x1
l.sll r3, r3, r2 // r3 = number IMMU sets IMMUCFGR
l.addi r2, r3, -1 // r2 = nsets mask
l.mfspr r3, r0, SPR_EEAR_BASE
l.srli r3, r3, 0xd // >> PAGE_SHIFT
l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1)
/*
* __PHX__ :: fixme
* we should not just blindly set executable flags,
* but it does help with ping. the clean way would be to find out
* (and fix it) why stack doesn't have execution permissions
*/
itlb_tr_fill_workaround:
l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
itlb_tr_fill:
l.mtspr r2,r4,SPR_ITLBTR_BASE(0)
/*
* fill DTLB MR register
*/
l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */
l.ori r4,r3,0x1 // set hardware valid bit: ITBL_MR entry
l.mtspr r2,r4,SPR_ITLBMR_BASE(0)
EXCEPTION_LOAD_GPR2
EXCEPTION_LOAD_GPR3
EXCEPTION_LOAD_GPR4
l.rfe
i_pmd_none:
i_pte_not_present:
EXCEPTION_LOAD_GPR2
EXCEPTION_LOAD_GPR3
EXCEPTION_LOAD_GPR4
EXCEPTION_HANDLE(_itlb_miss_page_fault_handler)
/* ==============================================[ boot tlb handlers ]=== */
/* =================================================[ debugging aids ]=== */
.align 64
_immu_trampoline:
.space 64
_immu_trampoline_top:
#define TRAMP_SLOT_0 (0x0)
#define TRAMP_SLOT_1 (0x4)
#define TRAMP_SLOT_2 (0x8)
#define TRAMP_SLOT_3 (0xc)
#define TRAMP_SLOT_4 (0x10)
#define TRAMP_SLOT_5 (0x14)
#define TRAMP_FRAME_SIZE (0x18)
ENTRY(_immu_trampoline_workaround)
// r2 EEA
// r6 is physical EEA
tophys(r6,r2)
LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
tophys (r3,r5) // r3 is trampoline (physical)
LOAD_SYMBOL_2_GPR(r4,0x15000000)
l.sw TRAMP_SLOT_0(r3),r4
l.sw TRAMP_SLOT_1(r3),r4
l.sw TRAMP_SLOT_4(r3),r4
l.sw TRAMP_SLOT_5(r3),r4
// EPC = EEA - 0x4
l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address)
l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data
l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address)
l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data
l.srli r5,r4,26 // check opcode for write access
l.sfeqi r5,0 // l.j
l.bf 0f
l.sfeqi r5,0x11 // l.jr
l.bf 1f
l.sfeqi r5,1 // l.jal
l.bf 2f
l.sfeqi r5,0x12 // l.jalr
l.bf 3f
l.sfeqi r5,3 // l.bnf
l.bf 4f
l.sfeqi r5,4 // l.bf
l.bf 5f
99:
l.nop
l.j 99b // should never happen
l.nop 1
// r2 is EEA
// r3 is trampoline address (physical)
// r4 is instruction
// r6 is physical(EEA)
//
// r5
2: // l.jal
/* 19 20 aa aa l.movhi r9,0xaaaa
* a9 29 bb bb l.ori r9,0xbbbb
*
* where 0xaaaabbbb is EEA + 0x4 shifted right 2
*/
l.addi r6,r2,0x4 // this is 0xaaaabbbb
// l.movhi r9,0xaaaa
l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
l.sh (TRAMP_SLOT_0+0x0)(r3),r5
l.srli r5,r6,16
l.sh (TRAMP_SLOT_0+0x2)(r3),r5
// l.ori r9,0xbbbb
l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
l.sh (TRAMP_SLOT_1+0x0)(r3),r5
l.andi r5,r6,0xffff
l.sh (TRAMP_SLOT_1+0x2)(r3),r5
/* falthrough, need to set up new jump offset */
0: // l.j
l.slli r6,r4,6 // original offset shifted left 6 - 2
// l.srli r6,r6,6 // original offset shifted right 2
l.slli r4,r2,4 // old jump position: EEA shifted left 4
// l.srli r4,r4,6 // old jump position: shifted right 2
l.addi r5,r3,0xc // new jump position (physical)
l.slli r5,r5,4 // new jump position: shifted left 4
// calculate new jump offset
// new_off = old_off + (old_jump - new_jump)
l.sub r5,r4,r5 // old_jump - new_jump
l.add r5,r6,r5 // orig_off + (old_jump - new_jump)
l.srli r5,r5,6 // new offset shifted right 2
// r5 is new jump offset
// l.j has opcode 0x0...
l.sw TRAMP_SLOT_2(r3),r5 // write it back
l.j trampoline_out
l.nop
/* ----------------------------- */
3: // l.jalr
/* 19 20 aa aa l.movhi r9,0xaaaa
* a9 29 bb bb l.ori r9,0xbbbb
*
* where 0xaaaabbbb is EEA + 0x4 shifted right 2
*/
l.addi r6,r2,0x4 // this is 0xaaaabbbb
// l.movhi r9,0xaaaa
l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
l.sh (TRAMP_SLOT_0+0x0)(r3),r5
l.srli r5,r6,16
l.sh (TRAMP_SLOT_0+0x2)(r3),r5
// l.ori r9,0xbbbb
l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
l.sh (TRAMP_SLOT_1+0x0)(r3),r5
l.andi r5,r6,0xffff
l.sh (TRAMP_SLOT_1+0x2)(r3),r5
l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction
l.andi r5,r5,0x3ff // clear out opcode part
l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr
l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
/* falthrough */
1: // l.jr
l.j trampoline_out
l.nop
/* ----------------------------- */
4: // l.bnf
5: // l.bf
l.slli r6,r4,6 // original offset shifted left 6 - 2
// l.srli r6,r6,6 // original offset shifted right 2
l.slli r4,r2,4 // old jump position: EEA shifted left 4
// l.srli r4,r4,6 // old jump position: shifted right 2
l.addi r5,r3,0xc // new jump position (physical)
l.slli r5,r5,4 // new jump position: shifted left 4
// calculate new jump offset
// new_off = old_off + (old_jump - new_jump)
l.add r6,r6,r4 // (orig_off + old_jump)
l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump
l.srli r6,r6,6 // new offset shifted right 2
// r6 is new jump offset
l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction
l.srli r4,r4,16
l.andi r4,r4,0xfc00 // get opcode part
l.slli r4,r4,16
l.or r6,r4,r6 // l.b(n)f new offset
l.sw TRAMP_SLOT_2(r3),r6 // write it back
/* we need to add l.j to EEA + 0x8 */
tophys (r4,r2) // may not be needed (due to shifts down_
l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8)
// jump position = r5 + 0x8 (0x8 compensated)
l.sub r4,r4,r5 // jump offset = target - new_position + 0x8
l.slli r4,r4,4 // the amount of info in imediate of jump
l.srli r4,r4,6 // jump instruction with offset
l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot
/* fallthrough */
trampoline_out:
// set up new EPC to point to our trampoline code
LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
l.mtspr r0,r5,SPR_EPCR_BASE
// immu_trampoline is (4x) CACHE_LINE aligned
// and only 6 instructions long,
// so we need to invalidate only 2 lines
/* Establish cache block size
If BS=0, 16;
If BS=1, 32;
r14 contain block size
*/
l.mfspr r21,r0,SPR_ICCFGR
l.andi r21,r21,SPR_ICCFGR_CBS
l.srli r21,r21,7
l.ori r23,r0,16
l.sll r14,r23,r21
l.mtspr r0,r5,SPR_ICBIR
l.add r5,r5,r14
l.mtspr r0,r5,SPR_ICBIR
l.jr r9
l.nop
/*
* DSCR: prints a string referenced by r3.
*
* PRMS: r3 - address of the first character of null
* terminated string to be printed
*
* PREQ: UART at UART_BASE_ADD has to be initialized
*
* POST: caller should be aware that r3, r9 are changed
*/
ENTRY(_emergency_print)
EMERGENCY_PRINT_STORE_GPR4
EMERGENCY_PRINT_STORE_GPR5
EMERGENCY_PRINT_STORE_GPR6
EMERGENCY_PRINT_STORE_GPR7
2:
l.lbz r7,0(r3)
l.sfeq r7,r0
l.bf 9f
l.nop
// putc:
l.movhi r4,hi(UART_BASE_ADD)
l.addi r6,r0,0x20
1: l.lbz r5,5(r4)
l.andi r5,r5,0x20
l.sfeq r5,r6
l.bnf 1b
l.nop
l.sb 0(r4),r7
l.addi r6,r0,0x60
1: l.lbz r5,5(r4)
l.andi r5,r5,0x60
l.sfeq r5,r6
l.bnf 1b
l.nop
/* next character */
l.j 2b
l.addi r3,r3,0x1
9:
EMERGENCY_PRINT_LOAD_GPR7
EMERGENCY_PRINT_LOAD_GPR6
EMERGENCY_PRINT_LOAD_GPR5
EMERGENCY_PRINT_LOAD_GPR4
l.jr r9
l.nop
ENTRY(_emergency_print_nr)
EMERGENCY_PRINT_STORE_GPR4
EMERGENCY_PRINT_STORE_GPR5
EMERGENCY_PRINT_STORE_GPR6
EMERGENCY_PRINT_STORE_GPR7
EMERGENCY_PRINT_STORE_GPR8
l.addi r8,r0,32 // shift register
1: /* remove leading zeros */
l.addi r8,r8,-0x4
l.srl r7,r3,r8
l.andi r7,r7,0xf
/* don't skip the last zero if number == 0x0 */
l.sfeqi r8,0x4
l.bf 2f
l.nop
l.sfeq r7,r0
l.bf 1b
l.nop
2:
l.srl r7,r3,r8
l.andi r7,r7,0xf
l.sflts r8,r0
l.bf 9f
l.sfgtui r7,0x9
l.bnf 8f
l.nop
l.addi r7,r7,0x27
8:
l.addi r7,r7,0x30
// putc:
l.movhi r4,hi(UART_BASE_ADD)
l.addi r6,r0,0x20
1: l.lbz r5,5(r4)
l.andi r5,r5,0x20
l.sfeq r5,r6
l.bnf 1b
l.nop
l.sb 0(r4),r7
l.addi r6,r0,0x60
1: l.lbz r5,5(r4)
l.andi r5,r5,0x60
l.sfeq r5,r6
l.bnf 1b
l.nop
/* next character */
l.j 2b
l.addi r8,r8,-0x4
9:
EMERGENCY_PRINT_LOAD_GPR8
EMERGENCY_PRINT_LOAD_GPR7
EMERGENCY_PRINT_LOAD_GPR6
EMERGENCY_PRINT_LOAD_GPR5
EMERGENCY_PRINT_LOAD_GPR4
l.jr r9
l.nop
/*
* This should be used for debugging only.
* It messes up the Linux early serial output
* somehow, so use it sparingly and essentially
* only if you need to debug something that goes wrong
* before Linux gets the early serial going.
*
* Furthermore, you'll have to make sure you set the
* UART_DEVISOR correctly according to the system
* clock rate.
*
*
*/
#define SYS_CLK 20000000
//#define SYS_CLK 1843200
#define OR32_CONSOLE_BAUD 115200
#define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD)
ENTRY(_early_uart_init)
l.movhi r3,hi(UART_BASE_ADD)
l.addi r4,r0,0x7
l.sb 0x2(r3),r4
l.addi r4,r0,0x0
l.sb 0x1(r3),r4
l.addi r4,r0,0x3
l.sb 0x3(r3),r4
l.lbz r5,3(r3)
l.ori r4,r5,0x80
l.sb 0x3(r3),r4
l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
l.sb UART_DLM(r3),r4
l.addi r4,r0,((UART_DIVISOR) & 0x000000ff)
l.sb UART_DLL(r3),r4
l.sb 0x3(r3),r5
l.jr r9
l.nop
.align 0x1000
.global _secondary_evbar
_secondary_evbar:
.space 0x800
/* Just disable interrupts and Return */
l.ori r3,r0,SPR_SR_SM
l.mtspr r0,r3,SPR_ESR_BASE
l.rfe
.section .rodata
_string_unhandled_exception:
.string "\n\rRunarunaround: Unhandled exception 0x\0"
_string_epc_prefix:
.string ": EPC=0x\0"
_string_nl:
.string "\n\r\0"
/* ========================================[ page aligned structures ]=== */
/*
* .data section should be page aligned
* (look into arch/openrisc/kernel/vmlinux.lds.S)
*/
.section .data,"aw"
.align 8192
.global empty_zero_page
empty_zero_page:
.space 8192
.global swapper_pg_dir
swapper_pg_dir:
.space 8192
.global _unhandled_stack
_unhandled_stack:
.space 8192
_unhandled_stack_top:
/* ============================================================[ EOF ]=== */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 33,765
|
arch/openrisc/kernel/entry.S
|
/*
* OpenRISC entry.S
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/linkage.h>
#include <asm/processor.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/spr_defs.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/asm-offsets.h>
#define DISABLE_INTERRUPTS(t1,t2) \
l.mfspr t2,r0,SPR_SR ;\
l.movhi t1,hi(~(SPR_SR_IEE|SPR_SR_TEE)) ;\
l.ori t1,t1,lo(~(SPR_SR_IEE|SPR_SR_TEE)) ;\
l.and t2,t2,t1 ;\
l.mtspr r0,t2,SPR_SR
#define ENABLE_INTERRUPTS(t1) \
l.mfspr t1,r0,SPR_SR ;\
l.ori t1,t1,lo(SPR_SR_IEE|SPR_SR_TEE) ;\
l.mtspr r0,t1,SPR_SR
/* =========================================================[ macros ]=== */
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* Trace irq on/off creating a stack frame.
*/
#define TRACE_IRQS_OP(trace_op) \
l.sw -8(r1),r2 /* store frame pointer */ ;\
l.sw -4(r1),r9 /* store return address */ ;\
l.addi r2,r1,0 /* move sp to fp */ ;\
l.jal trace_op ;\
l.addi r1,r1,-8 ;\
l.ori r1,r2,0 /* restore sp */ ;\
l.lwz r9,-4(r1) /* restore return address */ ;\
l.lwz r2,-8(r1) /* restore fp */ ;\
/*
* Trace irq on/off and save registers we need that would otherwise be
* clobbered.
*/
#define TRACE_IRQS_SAVE(t1,trace_op) \
l.sw -12(r1),t1 /* save extra reg */ ;\
l.sw -8(r1),r2 /* store frame pointer */ ;\
l.sw -4(r1),r9 /* store return address */ ;\
l.addi r2,r1,0 /* move sp to fp */ ;\
l.jal trace_op ;\
l.addi r1,r1,-12 ;\
l.ori r1,r2,0 /* restore sp */ ;\
l.lwz r9,-4(r1) /* restore return address */ ;\
l.lwz r2,-8(r1) /* restore fp */ ;\
l.lwz t1,-12(r1) /* restore extra reg */
#define TRACE_IRQS_OFF TRACE_IRQS_OP(trace_hardirqs_off)
#define TRACE_IRQS_ON TRACE_IRQS_OP(trace_hardirqs_on)
#define TRACE_IRQS_ON_SYSCALL \
TRACE_IRQS_SAVE(r10,trace_hardirqs_on) ;\
l.lwz r3,PT_GPR3(r1) ;\
l.lwz r4,PT_GPR4(r1) ;\
l.lwz r5,PT_GPR5(r1) ;\
l.lwz r6,PT_GPR6(r1) ;\
l.lwz r7,PT_GPR7(r1) ;\
l.lwz r8,PT_GPR8(r1) ;\
l.lwz r11,PT_GPR11(r1)
#define TRACE_IRQS_OFF_ENTRY \
l.lwz r5,PT_SR(r1) ;\
l.andi r3,r5,(SPR_SR_IEE|SPR_SR_TEE) ;\
l.sfeq r5,r0 /* skip trace if irqs were already off */;\
l.bf 1f ;\
l.nop ;\
TRACE_IRQS_SAVE(r4,trace_hardirqs_off) ;\
1:
#else
#define TRACE_IRQS_OFF
#define TRACE_IRQS_ON
#define TRACE_IRQS_OFF_ENTRY
#define TRACE_IRQS_ON_SYSCALL
#endif
/*
* We need to disable interrupts at beginning of RESTORE_ALL
* since interrupt might come in after we've loaded EPC return address
* and overwrite EPC with address somewhere in RESTORE_ALL
* which is of course wrong!
*/
#define RESTORE_ALL \
DISABLE_INTERRUPTS(r3,r4) ;\
l.lwz r3,PT_PC(r1) ;\
l.mtspr r0,r3,SPR_EPCR_BASE ;\
l.lwz r3,PT_SR(r1) ;\
l.mtspr r0,r3,SPR_ESR_BASE ;\
l.lwz r2,PT_GPR2(r1) ;\
l.lwz r3,PT_GPR3(r1) ;\
l.lwz r4,PT_GPR4(r1) ;\
l.lwz r5,PT_GPR5(r1) ;\
l.lwz r6,PT_GPR6(r1) ;\
l.lwz r7,PT_GPR7(r1) ;\
l.lwz r8,PT_GPR8(r1) ;\
l.lwz r9,PT_GPR9(r1) ;\
l.lwz r10,PT_GPR10(r1) ;\
l.lwz r11,PT_GPR11(r1) ;\
l.lwz r12,PT_GPR12(r1) ;\
l.lwz r13,PT_GPR13(r1) ;\
l.lwz r14,PT_GPR14(r1) ;\
l.lwz r15,PT_GPR15(r1) ;\
l.lwz r16,PT_GPR16(r1) ;\
l.lwz r17,PT_GPR17(r1) ;\
l.lwz r18,PT_GPR18(r1) ;\
l.lwz r19,PT_GPR19(r1) ;\
l.lwz r20,PT_GPR20(r1) ;\
l.lwz r21,PT_GPR21(r1) ;\
l.lwz r22,PT_GPR22(r1) ;\
l.lwz r23,PT_GPR23(r1) ;\
l.lwz r24,PT_GPR24(r1) ;\
l.lwz r25,PT_GPR25(r1) ;\
l.lwz r26,PT_GPR26(r1) ;\
l.lwz r27,PT_GPR27(r1) ;\
l.lwz r28,PT_GPR28(r1) ;\
l.lwz r29,PT_GPR29(r1) ;\
l.lwz r30,PT_GPR30(r1) ;\
l.lwz r31,PT_GPR31(r1) ;\
l.lwz r1,PT_SP(r1) ;\
l.rfe
#define EXCEPTION_ENTRY(handler) \
.global handler ;\
handler: ;\
/* r1, EPCR, ESR a already saved */ ;\
l.sw PT_GPR2(r1),r2 ;\
l.sw PT_GPR3(r1),r3 ;\
/* r4 already save */ ;\
l.sw PT_GPR5(r1),r5 ;\
l.sw PT_GPR6(r1),r6 ;\
l.sw PT_GPR7(r1),r7 ;\
l.sw PT_GPR8(r1),r8 ;\
l.sw PT_GPR9(r1),r9 ;\
/* r10 already saved */ ;\
l.sw PT_GPR11(r1),r11 ;\
/* r12 already saved */ ;\
l.sw PT_GPR13(r1),r13 ;\
l.sw PT_GPR14(r1),r14 ;\
l.sw PT_GPR15(r1),r15 ;\
l.sw PT_GPR16(r1),r16 ;\
l.sw PT_GPR17(r1),r17 ;\
l.sw PT_GPR18(r1),r18 ;\
l.sw PT_GPR19(r1),r19 ;\
l.sw PT_GPR20(r1),r20 ;\
l.sw PT_GPR21(r1),r21 ;\
l.sw PT_GPR22(r1),r22 ;\
l.sw PT_GPR23(r1),r23 ;\
l.sw PT_GPR24(r1),r24 ;\
l.sw PT_GPR25(r1),r25 ;\
l.sw PT_GPR26(r1),r26 ;\
l.sw PT_GPR27(r1),r27 ;\
l.sw PT_GPR28(r1),r28 ;\
l.sw PT_GPR29(r1),r29 ;\
/* r30 already save */ ;\
/* l.sw PT_GPR30(r1),r30*/ ;\
l.sw PT_GPR31(r1),r31 ;\
TRACE_IRQS_OFF_ENTRY ;\
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
l.addi r30,r0,-1 ;\
l.sw PT_ORIG_GPR11(r1),r30
#define UNHANDLED_EXCEPTION(handler,vector) \
.global handler ;\
handler: ;\
/* r1, EPCR, ESR already saved */ ;\
l.sw PT_GPR2(r1),r2 ;\
l.sw PT_GPR3(r1),r3 ;\
l.sw PT_GPR5(r1),r5 ;\
l.sw PT_GPR6(r1),r6 ;\
l.sw PT_GPR7(r1),r7 ;\
l.sw PT_GPR8(r1),r8 ;\
l.sw PT_GPR9(r1),r9 ;\
/* r10 already saved */ ;\
l.sw PT_GPR11(r1),r11 ;\
/* r12 already saved */ ;\
l.sw PT_GPR13(r1),r13 ;\
l.sw PT_GPR14(r1),r14 ;\
l.sw PT_GPR15(r1),r15 ;\
l.sw PT_GPR16(r1),r16 ;\
l.sw PT_GPR17(r1),r17 ;\
l.sw PT_GPR18(r1),r18 ;\
l.sw PT_GPR19(r1),r19 ;\
l.sw PT_GPR20(r1),r20 ;\
l.sw PT_GPR21(r1),r21 ;\
l.sw PT_GPR22(r1),r22 ;\
l.sw PT_GPR23(r1),r23 ;\
l.sw PT_GPR24(r1),r24 ;\
l.sw PT_GPR25(r1),r25 ;\
l.sw PT_GPR26(r1),r26 ;\
l.sw PT_GPR27(r1),r27 ;\
l.sw PT_GPR28(r1),r28 ;\
l.sw PT_GPR29(r1),r29 ;\
/* r31 already saved */ ;\
l.sw PT_GPR30(r1),r30 ;\
/* l.sw PT_GPR31(r1),r31 */ ;\
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
l.addi r30,r0,-1 ;\
l.sw PT_ORIG_GPR11(r1),r30 ;\
l.addi r3,r1,0 ;\
/* r4 is exception EA */ ;\
l.addi r5,r0,vector ;\
l.jal unhandled_exception ;\
l.nop ;\
l.j _ret_from_exception ;\
l.nop
/* clobbers 'reg' */
#define CLEAR_LWA_FLAG(reg) \
l.movhi reg,hi(lwa_flag) ;\
l.ori reg,reg,lo(lwa_flag) ;\
l.sw 0(reg),r0
/*
* NOTE: one should never assume that SPR_EPC, SPR_ESR, SPR_EEAR
* contain the same values as when exception we're handling
* occured. in fact they never do. if you need them use
* values saved on stack (for SPR_EPC, SPR_ESR) or content
* of r4 (for SPR_EEAR). for details look at EXCEPTION_HANDLE()
* in 'arch/openrisc/kernel/head.S'
*/
/* =====================================================[ exceptions] === */
/* ---[ 0x100: RESET exception ]----------------------------------------- */
EXCEPTION_ENTRY(_tng_kernel_start)
l.jal _start
l.andi r0,r0,0
/* ---[ 0x200: BUS exception ]------------------------------------------- */
EXCEPTION_ENTRY(_bus_fault_handler)
CLEAR_LWA_FLAG(r3)
/* r4: EA of fault (set by EXCEPTION_HANDLE) */
l.jal do_bus_fault
l.addi r3,r1,0 /* pt_regs */
l.j _ret_from_exception
l.nop
/* ---[ 0x300: Data Page Fault exception ]------------------------------- */
EXCEPTION_ENTRY(_dtlb_miss_page_fault_handler)
CLEAR_LWA_FLAG(r3)
l.and r5,r5,r0
l.j 1f
l.nop
EXCEPTION_ENTRY(_data_page_fault_handler)
CLEAR_LWA_FLAG(r3)
/* set up parameters for do_page_fault */
l.ori r5,r0,0x300 // exception vector
1:
l.addi r3,r1,0 // pt_regs
/* r4 set be EXCEPTION_HANDLE */ // effective address of fault
#ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
l.lwz r6,PT_PC(r3) // address of an offending insn
l.lwz r6,0(r6) // instruction that caused pf
l.srli r6,r6,26 // check opcode for jump insn
l.sfeqi r6,0 // l.j
l.bf 8f
l.sfeqi r6,1 // l.jal
l.bf 8f
l.sfeqi r6,3 // l.bnf
l.bf 8f
l.sfeqi r6,4 // l.bf
l.bf 8f
l.sfeqi r6,0x11 // l.jr
l.bf 8f
l.sfeqi r6,0x12 // l.jalr
l.bf 8f
l.nop
l.j 9f
l.nop
8: // offending insn is in delay slot
l.lwz r6,PT_PC(r3) // address of an offending insn
l.addi r6,r6,4
l.lwz r6,0(r6) // instruction that caused pf
l.srli r6,r6,26 // get opcode
9: // offending instruction opcode loaded in r6
#else
l.mfspr r6,r0,SPR_SR // SR
l.andi r6,r6,SPR_SR_DSX // check for delay slot exception
l.sfne r6,r0 // exception happened in delay slot
l.bnf 7f
l.lwz r6,PT_PC(r3) // address of an offending insn
l.addi r6,r6,4 // offending insn is in delay slot
7:
l.lwz r6,0(r6) // instruction that caused pf
l.srli r6,r6,26 // check opcode for write access
#endif
l.sfgeui r6,0x33 // check opcode for write access
l.bnf 1f
l.sfleui r6,0x37
l.bnf 1f
l.ori r6,r0,0x1 // write access
l.j 2f
l.nop
1: l.ori r6,r0,0x0 // !write access
2:
/* call fault.c handler in or32/mm/fault.c */
l.jal do_page_fault
l.nop
l.j _ret_from_exception
l.nop
/* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
EXCEPTION_ENTRY(_itlb_miss_page_fault_handler)
CLEAR_LWA_FLAG(r3)
l.and r5,r5,r0
l.j 1f
l.nop
EXCEPTION_ENTRY(_insn_page_fault_handler)
CLEAR_LWA_FLAG(r3)
/* set up parameters for do_page_fault */
l.ori r5,r0,0x400 // exception vector
1:
l.addi r3,r1,0 // pt_regs
/* r4 set be EXCEPTION_HANDLE */ // effective address of fault
l.ori r6,r0,0x0 // !write access
/* call fault.c handler in or32/mm/fault.c */
l.jal do_page_fault
l.nop
l.j _ret_from_exception
l.nop
/* ---[ 0x500: Timer exception ]----------------------------------------- */
EXCEPTION_ENTRY(_timer_handler)
CLEAR_LWA_FLAG(r3)
l.jal timer_interrupt
l.addi r3,r1,0 /* pt_regs */
l.j _ret_from_intr
l.nop
/* ---[ 0x600: Alignment exception ]-------------------------------------- */
EXCEPTION_ENTRY(_alignment_handler)
CLEAR_LWA_FLAG(r3)
/* r4: EA of fault (set by EXCEPTION_HANDLE) */
l.jal do_unaligned_access
l.addi r3,r1,0 /* pt_regs */
l.j _ret_from_exception
l.nop
#if 0
EXCEPTION_ENTRY(_alignment_handler)
// l.mfspr r2,r0,SPR_EEAR_BASE /* Load the effective address */
l.addi r2,r4,0
// l.mfspr r5,r0,SPR_EPCR_BASE /* Load the insn address */
l.lwz r5,PT_PC(r1)
l.lwz r3,0(r5) /* Load insn */
l.srli r4,r3,26 /* Shift left to get the insn opcode */
l.sfeqi r4,0x00 /* Check if the load/store insn is in delay slot */
l.bf jmp
l.sfeqi r4,0x01
l.bf jmp
l.sfeqi r4,0x03
l.bf jmp
l.sfeqi r4,0x04
l.bf jmp
l.sfeqi r4,0x11
l.bf jr
l.sfeqi r4,0x12
l.bf jr
l.nop
l.j 1f
l.addi r5,r5,4 /* Increment PC to get return insn address */
jmp:
l.slli r4,r3,6 /* Get the signed extended jump length */
l.srai r4,r4,4
l.lwz r3,4(r5) /* Load the real load/store insn */
l.add r5,r5,r4 /* Calculate jump target address */
l.j 1f
l.srli r4,r3,26 /* Shift left to get the insn opcode */
jr:
l.slli r4,r3,9 /* Shift to get the reg nb */
l.andi r4,r4,0x7c
l.lwz r3,4(r5) /* Load the real load/store insn */
l.add r4,r4,r1 /* Load the jump register value from the stack */
l.lwz r5,0(r4)
l.srli r4,r3,26 /* Shift left to get the insn opcode */
1:
// l.mtspr r0,r5,SPR_EPCR_BASE
l.sw PT_PC(r1),r5
l.sfeqi r4,0x26
l.bf lhs
l.sfeqi r4,0x25
l.bf lhz
l.sfeqi r4,0x22
l.bf lws
l.sfeqi r4,0x21
l.bf lwz
l.sfeqi r4,0x37
l.bf sh
l.sfeqi r4,0x35
l.bf sw
l.nop
1: l.j 1b /* I don't know what to do */
l.nop
lhs: l.lbs r5,0(r2)
l.slli r5,r5,8
l.lbz r6,1(r2)
l.or r5,r5,r6
l.srli r4,r3,19
l.andi r4,r4,0x7c
l.add r4,r4,r1
l.j align_end
l.sw 0(r4),r5
lhz: l.lbz r5,0(r2)
l.slli r5,r5,8
l.lbz r6,1(r2)
l.or r5,r5,r6
l.srli r4,r3,19
l.andi r4,r4,0x7c
l.add r4,r4,r1
l.j align_end
l.sw 0(r4),r5
lws: l.lbs r5,0(r2)
l.slli r5,r5,24
l.lbz r6,1(r2)
l.slli r6,r6,16
l.or r5,r5,r6
l.lbz r6,2(r2)
l.slli r6,r6,8
l.or r5,r5,r6
l.lbz r6,3(r2)
l.or r5,r5,r6
l.srli r4,r3,19
l.andi r4,r4,0x7c
l.add r4,r4,r1
l.j align_end
l.sw 0(r4),r5
lwz: l.lbz r5,0(r2)
l.slli r5,r5,24
l.lbz r6,1(r2)
l.slli r6,r6,16
l.or r5,r5,r6
l.lbz r6,2(r2)
l.slli r6,r6,8
l.or r5,r5,r6
l.lbz r6,3(r2)
l.or r5,r5,r6
l.srli r4,r3,19
l.andi r4,r4,0x7c
l.add r4,r4,r1
l.j align_end
l.sw 0(r4),r5
sh:
l.srli r4,r3,9
l.andi r4,r4,0x7c
l.add r4,r4,r1
l.lwz r5,0(r4)
l.sb 1(r2),r5
l.srli r5,r5,8
l.j align_end
l.sb 0(r2),r5
sw:
l.srli r4,r3,9
l.andi r4,r4,0x7c
l.add r4,r4,r1
l.lwz r5,0(r4)
l.sb 3(r2),r5
l.srli r5,r5,8
l.sb 2(r2),r5
l.srli r5,r5,8
l.sb 1(r2),r5
l.srli r5,r5,8
l.j align_end
l.sb 0(r2),r5
align_end:
l.j _ret_from_intr
l.nop
#endif
/* ---[ 0x700: Illegal insn exception ]---------------------------------- */
EXCEPTION_ENTRY(_illegal_instruction_handler)
/* r4: EA of fault (set by EXCEPTION_HANDLE) */
l.jal do_illegal_instruction
l.addi r3,r1,0 /* pt_regs */
l.j _ret_from_exception
l.nop
/* ---[ 0x800: External interrupt exception ]---------------------------- */
EXCEPTION_ENTRY(_external_irq_handler)
#ifdef CONFIG_OPENRISC_ESR_EXCEPTION_BUG_CHECK
l.lwz r4,PT_SR(r1) // were interrupts enabled ?
l.andi r4,r4,SPR_SR_IEE
l.sfeqi r4,0
l.bnf 1f // ext irq enabled, all ok.
l.nop
l.addi r1,r1,-0x8
l.movhi r3,hi(42f)
l.ori r3,r3,lo(42f)
l.sw 0x0(r1),r3
l.jal printk
l.sw 0x4(r1),r4
l.addi r1,r1,0x8
.section .rodata, "a"
42:
.string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r"
.align 4
.previous
l.ori r4,r4,SPR_SR_IEE // fix the bug
// l.sw PT_SR(r1),r4
1:
#endif
CLEAR_LWA_FLAG(r3)
l.addi r3,r1,0
l.movhi r8,hi(do_IRQ)
l.ori r8,r8,lo(do_IRQ)
l.jalr r8
l.nop
l.j _ret_from_intr
l.nop
/* ---[ 0x900: DTLB miss exception ]------------------------------------- */
/* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
/* ---[ 0xb00: Range exception ]----------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0xb00,0xb00)
/* ---[ 0xc00: Syscall exception ]--------------------------------------- */
/*
* Syscalls are a special type of exception in that they are
* _explicitly_ invoked by userspace and can therefore be
* held to conform to the same ABI as normal functions with
* respect to whether registers are preserved across the call
* or not.
*/
/* Upon syscall entry we just save the callee-saved registers
* and not the call-clobbered ones.
*/
_string_syscall_return:
.string "syscall return %ld \n\r\0"
.align 4
ENTRY(_sys_call_handler)
/* r1, EPCR, ESR a already saved */
l.sw PT_GPR2(r1),r2
/* r3-r8 must be saved because syscall restart relies
* on us being able to restart the syscall args... technically
* they should be clobbered, otherwise
*/
l.sw PT_GPR3(r1),r3
/*
* r4 already saved
* r4 holds the EEAR address of the fault, use it as screatch reg and
* then load the original r4
*/
CLEAR_LWA_FLAG(r4)
l.lwz r4,PT_GPR4(r1)
l.sw PT_GPR5(r1),r5
l.sw PT_GPR6(r1),r6
l.sw PT_GPR7(r1),r7
l.sw PT_GPR8(r1),r8
l.sw PT_GPR9(r1),r9
/* r10 already saved */
l.sw PT_GPR11(r1),r11
/* orig_gpr11 must be set for syscalls */
l.sw PT_ORIG_GPR11(r1),r11
/* r12,r13 already saved */
/* r14-r28 (even) aren't touched by the syscall fast path below
* so we don't need to save them. However, the functions that return
* to userspace via a call to switch() DO need to save these because
* switch() effectively clobbers them... saving these registers for
* such functions is handled in their syscall wrappers (see fork, vfork,
* and clone, below).
/* r30 is the only register we clobber in the fast path */
/* r30 already saved */
/* l.sw PT_GPR30(r1),r30 */
_syscall_check_trace_enter:
/* syscalls run with interrupts enabled */
TRACE_IRQS_ON_SYSCALL
ENABLE_INTERRUPTS(r29) // enable interrupts, r29 is temp
/* If TIF_SYSCALL_TRACE is set, then we want to do syscall tracing */
l.lwz r30,TI_FLAGS(r10)
l.andi r30,r30,_TIF_SYSCALL_TRACE
l.sfne r30,r0
l.bf _syscall_trace_enter
l.nop
_syscall_check:
/* Ensure that the syscall number is reasonable */
l.sfgeui r11,__NR_syscalls
l.bf _syscall_badsys
l.nop
_syscall_call:
l.movhi r29,hi(sys_call_table)
l.ori r29,r29,lo(sys_call_table)
l.slli r11,r11,2
l.add r29,r29,r11
l.lwz r29,0(r29)
l.jalr r29
l.nop
_syscall_return:
/* All syscalls return here... just pay attention to ret_from_fork
* which does it in a round-about way.
*/
l.sw PT_GPR11(r1),r11 // save return value
#if 0
_syscall_debug:
l.movhi r3,hi(_string_syscall_return)
l.ori r3,r3,lo(_string_syscall_return)
l.ori r27,r0,1
l.sw -4(r1),r27
l.sw -8(r1),r11
l.addi r1,r1,-8
l.movhi r27,hi(printk)
l.ori r27,r27,lo(printk)
l.jalr r27
l.nop
l.addi r1,r1,8
#endif
_syscall_check_trace_leave:
/* r30 is a callee-saved register so this should still hold the
* _TIF_SYSCALL_TRACE flag from _syscall_check_trace_enter above...
* _syscall_trace_leave expects syscall result to be in pt_regs->r11.
*/
l.sfne r30,r0
l.bf _syscall_trace_leave
l.nop
/* This is where the exception-return code begins... interrupts need to be
* disabled the rest of the way here because we can't afford to miss any
* interrupts that set NEED_RESCHED or SIGNALPENDING... really true? */
_syscall_check_work:
/* Here we need to disable interrupts */
DISABLE_INTERRUPTS(r27,r29)
TRACE_IRQS_OFF
l.lwz r30,TI_FLAGS(r10)
l.andi r30,r30,_TIF_WORK_MASK
l.sfne r30,r0
l.bnf _syscall_resume_userspace
l.nop
/* Work pending follows a different return path, so we need to
* make sure that all the call-saved registers get into pt_regs
* before branching...
*/
l.sw PT_GPR14(r1),r14
l.sw PT_GPR16(r1),r16
l.sw PT_GPR18(r1),r18
l.sw PT_GPR20(r1),r20
l.sw PT_GPR22(r1),r22
l.sw PT_GPR24(r1),r24
l.sw PT_GPR26(r1),r26
l.sw PT_GPR28(r1),r28
/* _work_pending needs to be called with interrupts disabled */
l.j _work_pending
l.nop
_syscall_resume_userspace:
// ENABLE_INTERRUPTS(r29)
/* This is the hot path for returning to userspace from a syscall. If there's
* work to be done and the branch to _work_pending was taken above, then the
* return to userspace will be done via the normal exception return path...
* that path restores _all_ registers and will overwrite the "clobbered"
* registers with whatever garbage is in pt_regs -- that's OK because those
* registers are clobbered anyway and because the extra work is insignificant
* in the context of the extra work that _work_pending is doing.
/* Once again, syscalls are special and only guarantee to preserve the
* same registers as a normal function call */
/* The assumption here is that the registers r14-r28 (even) are untouched and
* don't need to be restored... be sure that that's really the case!
*/
/* This is still too much... we should only be restoring what we actually
* clobbered... we should even be using 'scratch' (odd) regs above so that
* we don't need to restore anything, hardly...
*/
l.lwz r2,PT_GPR2(r1)
/* Restore args */
/* r3-r8 are technically clobbered, but syscall restart needs these
* to be restored...
*/
l.lwz r3,PT_GPR3(r1)
l.lwz r4,PT_GPR4(r1)
l.lwz r5,PT_GPR5(r1)
l.lwz r6,PT_GPR6(r1)
l.lwz r7,PT_GPR7(r1)
l.lwz r8,PT_GPR8(r1)
l.lwz r9,PT_GPR9(r1)
l.lwz r10,PT_GPR10(r1)
l.lwz r11,PT_GPR11(r1)
/* r30 is the only register we clobber in the fast path */
l.lwz r30,PT_GPR30(r1)
/* Here we use r13-r19 (odd) as scratch regs */
l.lwz r13,PT_PC(r1)
l.lwz r15,PT_SR(r1)
l.lwz r1,PT_SP(r1)
/* Interrupts need to be disabled for setting EPCR and ESR
* so that another interrupt doesn't come in here and clobber
* them before we can use them for our l.rfe */
DISABLE_INTERRUPTS(r17,r19)
l.mtspr r0,r13,SPR_EPCR_BASE
l.mtspr r0,r15,SPR_ESR_BASE
l.rfe
/* End of hot path!
* Keep the below tracing and error handling out of the hot path...
*/
_syscall_trace_enter:
/* Here we pass pt_regs to do_syscall_trace_enter. Make sure
* that function is really getting all the info it needs as
* pt_regs isn't a complete set of userspace regs, just the
* ones relevant to the syscall...
*
* Note use of delay slot for setting argument.
*/
l.jal do_syscall_trace_enter
l.addi r3,r1,0
/* Restore arguments (not preserved across do_syscall_trace_enter)
* so that we can do the syscall for real and return to the syscall
* hot path.
*/
l.lwz r11,PT_GPR11(r1)
l.lwz r3,PT_GPR3(r1)
l.lwz r4,PT_GPR4(r1)
l.lwz r5,PT_GPR5(r1)
l.lwz r6,PT_GPR6(r1)
l.lwz r7,PT_GPR7(r1)
l.j _syscall_check
l.lwz r8,PT_GPR8(r1)
_syscall_trace_leave:
l.jal do_syscall_trace_leave
l.addi r3,r1,0
l.j _syscall_check_work
l.nop
_syscall_badsys:
/* Here we effectively pretend to have executed an imaginary
* syscall that returns -ENOSYS and then return to the regular
* syscall hot path.
* Note that "return value" is set in the delay slot...
*/
l.j _syscall_return
l.addi r11,r0,-ENOSYS
/******* END SYSCALL HANDLING *******/
/* ---[ 0xd00: Trap exception ]------------------------------------------ */
UNHANDLED_EXCEPTION(_vector_0xd00,0xd00)
/* ---[ 0xe00: Trap exception ]------------------------------------------ */
EXCEPTION_ENTRY(_trap_handler)
CLEAR_LWA_FLAG(r3)
/* r4: EA of fault (set by EXCEPTION_HANDLE) */
l.jal do_trap
l.addi r3,r1,0 /* pt_regs */
l.j _ret_from_exception
l.nop
/* ---[ 0xf00: Reserved exception ]-------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0xf00,0xf00)
/* ---[ 0x1000: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1000,0x1000)
/* ---[ 0x1100: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1100,0x1100)
/* ---[ 0x1200: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1200,0x1200)
/* ---[ 0x1300: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1300,0x1300)
/* ---[ 0x1400: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1400,0x1400)
/* ---[ 0x1500: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1500,0x1500)
/* ---[ 0x1600: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1600,0x1600)
/* ---[ 0x1700: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1700,0x1700)
/* ---[ 0x1800: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1800,0x1800)
/* ---[ 0x1900: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1900,0x1900)
/* ---[ 0x1a00: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1a00,0x1a00)
/* ---[ 0x1b00: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1b00,0x1b00)
/* ---[ 0x1c00: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1c00,0x1c00)
/* ---[ 0x1d00: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1d00,0x1d00)
/* ---[ 0x1e00: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1e00,0x1e00)
/* ---[ 0x1f00: Reserved exception ]------------------------------------- */
UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00)
/* ========================================================[ return ] === */
_resume_userspace:
DISABLE_INTERRUPTS(r3,r4)
TRACE_IRQS_OFF
l.lwz r4,TI_FLAGS(r10)
l.andi r13,r4,_TIF_WORK_MASK
l.sfeqi r13,0
l.bf _restore_all
l.nop
_work_pending:
l.lwz r5,PT_ORIG_GPR11(r1)
l.sfltsi r5,0
l.bnf 1f
l.nop
l.andi r5,r5,0
1:
l.jal do_work_pending
l.ori r3,r1,0 /* pt_regs */
l.sfeqi r11,0
l.bf _restore_all
l.nop
l.sfltsi r11,0
l.bnf 1f
l.nop
l.and r11,r11,r0
l.ori r11,r11,__NR_restart_syscall
l.j _syscall_check_trace_enter
l.nop
1:
l.lwz r11,PT_ORIG_GPR11(r1)
/* Restore arg registers */
l.lwz r3,PT_GPR3(r1)
l.lwz r4,PT_GPR4(r1)
l.lwz r5,PT_GPR5(r1)
l.lwz r6,PT_GPR6(r1)
l.lwz r7,PT_GPR7(r1)
l.j _syscall_check_trace_enter
l.lwz r8,PT_GPR8(r1)
_restore_all:
#ifdef CONFIG_TRACE_IRQFLAGS
l.lwz r4,PT_SR(r1)
l.andi r3,r4,(SPR_SR_IEE|SPR_SR_TEE)
l.sfeq r3,r0 /* skip trace if irqs were off */
l.bf skip_hardirqs_on
l.nop
TRACE_IRQS_ON
skip_hardirqs_on:
#endif
RESTORE_ALL
/* This returns to userspace code */
ENTRY(_ret_from_intr)
ENTRY(_ret_from_exception)
l.lwz r4,PT_SR(r1)
l.andi r3,r4,SPR_SR_SM
l.sfeqi r3,0
l.bnf _restore_all
l.nop
l.j _resume_userspace
l.nop
ENTRY(ret_from_fork)
l.jal schedule_tail
l.nop
/* Check if we are a kernel thread */
l.sfeqi r20,0
l.bf 1f
l.nop
/* ...we are a kernel thread so invoke the requested callback */
l.jalr r20
l.or r3,r22,r0
1:
/* _syscall_returns expect r11 to contain return value */
l.lwz r11,PT_GPR11(r1)
/* The syscall fast path return expects call-saved registers
* r12-r28 to be untouched, so we restore them here as they
* will have been effectively clobbered when arriving here
* via the call to switch()
*/
l.lwz r12,PT_GPR12(r1)
l.lwz r14,PT_GPR14(r1)
l.lwz r16,PT_GPR16(r1)
l.lwz r18,PT_GPR18(r1)
l.lwz r20,PT_GPR20(r1)
l.lwz r22,PT_GPR22(r1)
l.lwz r24,PT_GPR24(r1)
l.lwz r26,PT_GPR26(r1)
l.lwz r28,PT_GPR28(r1)
l.j _syscall_return
l.nop
/* ========================================================[ switch ] === */
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
* of the other is restored from its kernel stack. The memory
* management hardware is updated to the second process's state.
* Finally, we can return to the second process, via the 'return'.
*
* Note: there are two ways to get to the "going out" portion
* of this code; either by coming in via the entry (_switch)
* or via "fork" which must set up an environment equivalent
* to the "_switch" path. If you change this (or in particular, the
* SAVE_REGS macro), you'll have to change the fork code also.
*/
/* _switch MUST never lay on page boundry, cause it runs from
* effective addresses and beeing interrupted by iTLB miss would kill it.
* dTLB miss seams to never accour in the bad place since data accesses
* are from task structures which are always page aligned.
*
* The problem happens in RESTORE_ALL_NO_R11 where we first set the EPCR
* register, then load the previous register values and only at the end call
* the l.rfe instruction. If get TLB miss in beetwen the EPCR register gets
* garbled and we end up calling l.rfe with the wrong EPCR. (same probably
* holds for ESR)
*
* To avoid this problems it is sufficient to align _switch to
* some nice round number smaller than it's size...
*/
/* ABI rules apply here... we either enter _switch via schedule() or via
* an imaginary call to which we shall return at return_from_fork. Either
* way, we are a function call and only need to preserve the callee-saved
* registers when we return. As such, we don't need to save the registers
* on the stack that we won't be returning as they were...
*/
.align 0x400
ENTRY(_switch)
/* We don't store SR as _switch only gets called in a context where
* the SR will be the same going in and coming out... */
/* Set up new pt_regs struct for saving task state */
l.addi r1,r1,-(INT_FRAME_SIZE)
/* No need to store r1/PT_SP as it goes into KSP below */
l.sw PT_GPR2(r1),r2
l.sw PT_GPR9(r1),r9
/* This is wrong, r12 shouldn't be here... but GCC is broken for the time being
* and expects r12 to be callee-saved... */
l.sw PT_GPR12(r1),r12
l.sw PT_GPR14(r1),r14
l.sw PT_GPR16(r1),r16
l.sw PT_GPR18(r1),r18
l.sw PT_GPR20(r1),r20
l.sw PT_GPR22(r1),r22
l.sw PT_GPR24(r1),r24
l.sw PT_GPR26(r1),r26
l.sw PT_GPR28(r1),r28
l.sw PT_GPR30(r1),r30
l.addi r11,r10,0 /* Save old 'current' to 'last' return value*/
/* We use thread_info->ksp for storing the address of the above
* structure so that we can get back to it later... we don't want
* to lose the value of thread_info->ksp, though, so store it as
* pt_regs->sp so that we can easily restore it when we are made
* live again...
*/
/* Save the old value of thread_info->ksp as pt_regs->sp */
l.lwz r29,TI_KSP(r10)
l.sw PT_SP(r1),r29
/* Swap kernel stack pointers */
l.sw TI_KSP(r10),r1 /* Save old stack pointer */
l.or r10,r4,r0 /* Set up new current_thread_info */
l.lwz r1,TI_KSP(r10) /* Load new stack pointer */
/* Restore the old value of thread_info->ksp */
l.lwz r29,PT_SP(r1)
l.sw TI_KSP(r10),r29
/* ...and restore the registers, except r11 because the return value
* has already been set above.
*/
l.lwz r2,PT_GPR2(r1)
l.lwz r9,PT_GPR9(r1)
/* No need to restore r10 */
/* ...and do not restore r11 */
/* This is wrong, r12 shouldn't be here... but GCC is broken for the time being
* and expects r12 to be callee-saved... */
l.lwz r12,PT_GPR12(r1)
l.lwz r14,PT_GPR14(r1)
l.lwz r16,PT_GPR16(r1)
l.lwz r18,PT_GPR18(r1)
l.lwz r20,PT_GPR20(r1)
l.lwz r22,PT_GPR22(r1)
l.lwz r24,PT_GPR24(r1)
l.lwz r26,PT_GPR26(r1)
l.lwz r28,PT_GPR28(r1)
l.lwz r30,PT_GPR30(r1)
/* Unwind stack to pre-switch state */
l.addi r1,r1,(INT_FRAME_SIZE)
/* Return via the link-register back to where we 'came from', where
* that may be either schedule(), ret_from_fork(), or
* ret_from_kernel_thread(). If we are returning to a new thread,
* we are expected to have set up the arg to schedule_tail already,
* hence we do so here unconditionally:
*/
l.lwz r3,TI_TASK(r3) /* Load 'prev' as schedule_tail arg */
l.jr r9
l.nop
/* ==================================================================== */
/* These all use the delay slot for setting the argument register, so the
* jump is always happening after the l.addi instruction.
*
* These are all just wrappers that don't touch the link-register r9, so the
* return from the "real" syscall function will return back to the syscall
* code that did the l.jal that brought us here.
*/
/* fork requires that we save all the callee-saved registers because they
* are all effectively clobbered by the call to _switch. Here we store
* all the registers that aren't touched by the syscall fast path and thus
* weren't saved there.
*/
_fork_save_extra_regs_and_call:
l.sw PT_GPR14(r1),r14
l.sw PT_GPR16(r1),r16
l.sw PT_GPR18(r1),r18
l.sw PT_GPR20(r1),r20
l.sw PT_GPR22(r1),r22
l.sw PT_GPR24(r1),r24
l.sw PT_GPR26(r1),r26
l.jr r29
l.sw PT_GPR28(r1),r28
ENTRY(__sys_clone)
l.movhi r29,hi(sys_clone)
l.ori r29,r29,lo(sys_clone)
l.j _fork_save_extra_regs_and_call
l.nop
ENTRY(__sys_fork)
l.movhi r29,hi(sys_fork)
l.ori r29,r29,lo(sys_fork)
l.j _fork_save_extra_regs_and_call
l.nop
ENTRY(sys_rt_sigreturn)
l.jal _sys_rt_sigreturn
l.addi r3,r1,0
l.sfne r30,r0
l.bnf _no_syscall_trace
l.nop
l.jal do_syscall_trace_leave
l.addi r3,r1,0
_no_syscall_trace:
l.j _resume_userspace
l.nop
/* This is a catch-all syscall for atomic instructions for the OpenRISC 1000.
* The functions takes a variable number of parameters depending on which
* particular flavour of atomic you want... parameter 1 is a flag identifying
* the atomic in question. Currently, this function implements the
* following variants:
*
* XCHG:
* @flag: 1
* @ptr1:
* @ptr2:
* Atomically exchange the values in pointers 1 and 2.
*
*/
ENTRY(sys_or1k_atomic)
/* FIXME: This ignores r3 and always does an XCHG */
DISABLE_INTERRUPTS(r17,r19)
l.lwz r29,0(r4)
l.lwz r27,0(r5)
l.sw 0(r4),r27
l.sw 0(r5),r29
ENABLE_INTERRUPTS(r17)
l.jr r9
l.or r11,r0,r0
/* ============================================================[ EOF ]=== */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,785
|
arch/openrisc/kernel/vmlinux.lds.S
|
/*
* OpenRISC vmlinux.lds.S
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* ld script for OpenRISC architecture
*/
/* TODO
* - clean up __offset & stuff
* - change all 8192 alignment to PAGE !!!
* - recheck if all alignments are really needed
*/
# define LOAD_OFFSET PAGE_OFFSET
# define LOAD_BASE PAGE_OFFSET
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm-generic/vmlinux.lds.h>
#ifdef __OR1K__
#define __OUTPUT_FORMAT "elf32-or1k"
#else
#define __OUTPUT_FORMAT "elf32-or32"
#endif
OUTPUT_FORMAT(__OUTPUT_FORMAT, __OUTPUT_FORMAT, __OUTPUT_FORMAT)
jiffies = jiffies_64 + 4;
SECTIONS
{
/* Read-only sections, merged into text segment: */
. = LOAD_BASE ;
_text = .;
/* _s_kernel_ro must be page aligned */
. = ALIGN(PAGE_SIZE);
_s_kernel_ro = .;
.text : AT(ADDR(.text) - LOAD_OFFSET)
{
_stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.fixup)
*(.text.__*)
_etext = .;
}
/* TODO: Check if fixup and text.__* are really necessary
* fixup is definitely necessary
*/
_sdata = .;
/* Page alignment required for RO_DATA_SECTION */
RO_DATA_SECTION(PAGE_SIZE)
_e_kernel_ro = .;
/* Whatever comes after _e_kernel_ro had better be page-aligend, too */
/* 32 here is cacheline size... recheck this */
RW_DATA_SECTION(32, PAGE_SIZE, PAGE_SIZE)
_edata = .;
EXCEPTION_TABLE(4)
NOTES
/* Init code and data */
. = ALIGN(PAGE_SIZE);
__init_begin = .;
HEAD_TEXT_SECTION
/* Page aligned */
INIT_TEXT_SECTION(PAGE_SIZE)
/* Align __setup_start on 16 byte boundary */
INIT_DATA_SECTION(16)
PERCPU_SECTION(L1_CACHE_BYTES)
__init_end = .;
. = ALIGN(PAGE_SIZE);
.initrd : AT(ADDR(.initrd) - LOAD_OFFSET)
{
__initrd_start = .;
*(.initrd)
__initrd_end = .;
FILL (0);
. = ALIGN (PAGE_SIZE);
}
__vmlinux_end = .; /* last address of the physical file */
BSS_SECTION(0, 0, 0x20)
_end = .;
/* Throw in the debugging sections */
STABS_DEBUG
DWARF_DEBUG
/* Sections to be discarded -- must be last */
DISCARDS
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,065
|
arch/openrisc/lib/string.S
|
/*
* OpenRISC string.S
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/linkage.h>
#include <asm/errno.h>
/*
* this can be optimized by doing gcc inline assemlby with
* proper constraints (no need to save args registers...)
*
*/
/*
*
* int __copy_tofrom_user(void *to, const void *from, unsigned long size);
*
* NOTE: it returns number of bytes NOT copied !!!
*
*/
.global __copy_tofrom_user
__copy_tofrom_user:
l.addi r1,r1,-12
l.sw 0(r1),r6
l.sw 4(r1),r4
l.sw 8(r1),r3
l.addi r11,r5,0
2: l.sfeq r11,r0
l.bf 1f
l.addi r11,r11,-1
8: l.lbz r6,0(r4)
9: l.sb 0(r3),r6
l.addi r3,r3,1
l.j 2b
l.addi r4,r4,1
1:
l.addi r11,r11,1 // r11 holds the return value
l.lwz r6,0(r1)
l.lwz r4,4(r1)
l.lwz r3,8(r1)
l.jr r9
l.addi r1,r1,12
.section .fixup, "ax"
99:
l.j 1b
l.nop
.previous
.section __ex_table, "a"
.long 8b, 99b // read fault
.long 9b, 99b // write fault
.previous
/*
* unsigned long clear_user(void *addr, unsigned long size) ;
*
* NOTE: it returns number of bytes NOT cleared !!!
*/
.global __clear_user
__clear_user:
l.addi r1,r1,-8
l.sw 0(r1),r4
l.sw 4(r1),r3
2: l.sfeq r4,r0
l.bf 1f
l.addi r4,r4,-1
9: l.sb 0(r3),r0
l.j 2b
l.addi r3,r3,1
1:
l.addi r11,r4,1
l.lwz r4,0(r1)
l.lwz r3,4(r1)
l.jr r9
l.addi r1,r1,8
.section .fixup, "ax"
99:
l.j 1b
l.nop
.previous
.section __ex_table, "a"
.long 9b, 99b // write fault
.previous
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,345
|
arch/openrisc/lib/memset.S
|
/*
* OpenRISC memset.S
*
* Hand-optimized assembler version of memset for OpenRISC.
* Algorithm inspired by several other arch-specific memset routines
* in the kernel tree
*
* Copyright (C) 2015 Olof Kindgren <olof.kindgren@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
.global memset
.type memset, @function
memset:
/* arguments:
* r3 = *s
* r4 = c
* r5 = n
* r13, r15, r17, r19 used as temp regs
*/
/* Exit if n == 0 */
l.sfeqi r5, 0
l.bf 4f
/* Truncate c to char */
l.andi r13, r4, 0xff
/* Skip word extension if c is 0 */
l.sfeqi r13, 0
l.bf 1f
/* Check for at least two whole words (8 bytes) */
l.sfleui r5, 7
/* Extend char c to 32-bit word cccc in r13 */
l.slli r15, r13, 16 // r13 = 000c, r15 = 0c00
l.or r13, r13, r15 // r13 = 0c0c, r15 = 0c00
l.slli r15, r13, 8 // r13 = 0c0c, r15 = c0c0
l.or r13, r13, r15 // r13 = cccc, r15 = c0c0
1: l.addi r19, r3, 0 // Set r19 = src
/* Jump to byte copy loop if less than two words */
l.bf 3f
l.or r17, r5, r0 // Set r17 = n
/* Mask out two LSBs to check alignment */
l.andi r15, r3, 0x3
/* lsb == 00, jump to word copy loop */
l.sfeqi r15, 0
l.bf 2f
l.addi r19, r3, 0 // Set r19 = src
/* lsb == 01,10 or 11 */
l.sb 0(r3), r13 // *src = c
l.addi r17, r17, -1 // Decrease n
l.sfeqi r15, 3
l.bf 2f
l.addi r19, r3, 1 // src += 1
/* lsb == 01 or 10 */
l.sb 1(r3), r13 // *(src+1) = c
l.addi r17, r17, -1 // Decrease n
l.sfeqi r15, 2
l.bf 2f
l.addi r19, r3, 2 // src += 2
/* lsb == 01 */
l.sb 2(r3), r13 // *(src+2) = c
l.addi r17, r17, -1 // Decrease n
l.addi r19, r3, 3 // src += 3
/* Word copy loop */
2: l.sw 0(r19), r13 // *src = cccc
l.addi r17, r17, -4 // Decrease n
l.sfgeui r17, 4
l.bf 2b
l.addi r19, r19, 4 // Increase src
/* When n > 0, copy the remaining bytes, otherwise jump to exit */
l.sfeqi r17, 0
l.bf 4f
/* Byte copy loop */
3: l.addi r17, r17, -1 // Decrease n
l.sb 0(r19), r13 // *src = cccc
l.sfnei r17, 0
l.bf 3b
l.addi r19, r19, 1 // Increase src
4: l.jr r9
l.ori r11, r3, 0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,348
|
arch/riscv/kernel/head.S
|
/*
* Copyright (C) 2012 Regents of the University of California
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/asm.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/csr.h>
__INIT
ENTRY(_start)
/* Mask all interrupts */
csrw sie, zero
/* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
/*
* Disable FPU to detect illegal usage of
* floating point in kernel space
*/
li t0, SR_FS
csrc sstatus, t0
/* Pick one hart to run the main boot sequence */
la a3, hart_lottery
li a2, 1
amoadd.w a3, a2, (a3)
bnez a3, .Lsecondary_start
/* Save hart ID and DTB physical address */
mv s0, a0
mv s1, a1
/* Initialize page tables and relocate to virtual addresses */
la sp, init_thread_union + THREAD_SIZE
call setup_vm
call relocate
/* Restore C environment */
la tp, init_task
sw s0, TASK_TI_CPU(tp)
la sp, init_thread_union
li a0, ASM_THREAD_SIZE
add sp, sp, a0
/* Start the kernel */
mv a0, s0
mv a1, s1
call parse_dtb
tail start_kernel
relocate:
/* Relocate return address */
li a1, PAGE_OFFSET
la a0, _start
sub a1, a1, a0
add ra, ra, a1
/* Point stvec to virtual address of intruction after satp write */
la a0, 1f
add a0, a0, a1
csrw stvec, a0
/* Compute satp for kernel page tables, but don't load it yet */
la a2, swapper_pg_dir
srl a2, a2, PAGE_SHIFT
li a1, SATP_MODE
or a2, a2, a1
/*
* Load trampoline page directory, which will cause us to trap to
* stvec if VA != PA, or simply fall through if VA == PA
*/
la a0, trampoline_pg_dir
srl a0, a0, PAGE_SHIFT
or a0, a0, a1
sfence.vma
csrw sptbr, a0
.align 2
1:
/* Set trap vector to spin forever to help debug */
la a0, .Lsecondary_park
csrw stvec, a0
/* Reload the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
/* Switch to kernel page tables */
csrw sptbr, a2
ret
.Lsecondary_start:
#ifdef CONFIG_SMP
li a1, CONFIG_NR_CPUS
bgeu a0, a1, .Lsecondary_park
/* Set trap vector to spin forever to help debug */
la a3, .Lsecondary_park
csrw stvec, a3
slli a3, a0, LGREG
la a1, __cpu_up_stack_pointer
la a2, __cpu_up_task_pointer
add a1, a3, a1
add a2, a3, a2
/*
* This hart didn't win the lottery, so we wait for the winning hart to
* get far enough along the boot process that it should continue.
*/
.Lwait_for_cpu_up:
/* FIXME: We should WFI to save some energy here. */
REG_L sp, (a1)
REG_L tp, (a2)
beqz sp, .Lwait_for_cpu_up
beqz tp, .Lwait_for_cpu_up
fence
/* Enable virtual memory and relocate to virtual address */
call relocate
tail smp_callin
#endif
.align 2
.Lsecondary_park:
/* We lack SMP support or have too many harts, so park this hart */
wfi
j .Lsecondary_park
END(_start)
__PAGE_ALIGNED_BSS
/* Empty zero page */
.balign PAGE_SIZE
|
AirFortressIlikara/LS2K0300-linux-4.19
| 11,897
|
arch/riscv/kernel/entry.S
|
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
.text
.altmacro
/*
* Prepares to enter a system call or exception by saving all registers to the
* stack.
*/
.macro SAVE_ALL
LOCAL _restore_kernel_tpsp
LOCAL _save_context
/*
* If coming from userspace, preserve the user thread pointer and load
* the kernel thread pointer. If we came from the kernel, sscratch
* will contain 0, and we should continue on the current TP.
*/
csrrw tp, sscratch, tp
bnez tp, _save_context
_restore_kernel_tpsp:
csrr tp, sscratch
REG_S sp, TASK_TI_KERNEL_SP(tp)
_save_context:
REG_S sp, TASK_TI_USER_SP(tp)
REG_L sp, TASK_TI_KERNEL_SP(tp)
addi sp, sp, -(PT_SIZE_ON_STACK)
REG_S x1, PT_RA(sp)
REG_S x3, PT_GP(sp)
REG_S x5, PT_T0(sp)
REG_S x6, PT_T1(sp)
REG_S x7, PT_T2(sp)
REG_S x8, PT_S0(sp)
REG_S x9, PT_S1(sp)
REG_S x10, PT_A0(sp)
REG_S x11, PT_A1(sp)
REG_S x12, PT_A2(sp)
REG_S x13, PT_A3(sp)
REG_S x14, PT_A4(sp)
REG_S x15, PT_A5(sp)
REG_S x16, PT_A6(sp)
REG_S x17, PT_A7(sp)
REG_S x18, PT_S2(sp)
REG_S x19, PT_S3(sp)
REG_S x20, PT_S4(sp)
REG_S x21, PT_S5(sp)
REG_S x22, PT_S6(sp)
REG_S x23, PT_S7(sp)
REG_S x24, PT_S8(sp)
REG_S x25, PT_S9(sp)
REG_S x26, PT_S10(sp)
REG_S x27, PT_S11(sp)
REG_S x28, PT_T3(sp)
REG_S x29, PT_T4(sp)
REG_S x30, PT_T5(sp)
REG_S x31, PT_T6(sp)
/*
* Disable user-mode memory access as it should only be set in the
* actual user copy routines.
*
* Disable the FPU to detect illegal usage of floating point in kernel
* space.
*/
li t0, SR_SUM | SR_FS
REG_L s0, TASK_TI_USER_SP(tp)
csrrc s1, sstatus, t0
csrr s2, sepc
csrr s3, sbadaddr
csrr s4, scause
csrr s5, sscratch
REG_S s0, PT_SP(sp)
REG_S s1, PT_SSTATUS(sp)
REG_S s2, PT_SEPC(sp)
REG_S s3, PT_SBADADDR(sp)
REG_S s4, PT_SCAUSE(sp)
REG_S s5, PT_TP(sp)
.endm
/*
* Prepares to return from a system call or exception by restoring all
* registers from the stack.
*/
.macro RESTORE_ALL
REG_L a0, PT_SSTATUS(sp)
REG_L a2, PT_SEPC(sp)
csrw sstatus, a0
csrw sepc, a2
REG_L x1, PT_RA(sp)
REG_L x3, PT_GP(sp)
REG_L x4, PT_TP(sp)
REG_L x5, PT_T0(sp)
REG_L x6, PT_T1(sp)
REG_L x7, PT_T2(sp)
REG_L x8, PT_S0(sp)
REG_L x9, PT_S1(sp)
REG_L x10, PT_A0(sp)
REG_L x11, PT_A1(sp)
REG_L x12, PT_A2(sp)
REG_L x13, PT_A3(sp)
REG_L x14, PT_A4(sp)
REG_L x15, PT_A5(sp)
REG_L x16, PT_A6(sp)
REG_L x17, PT_A7(sp)
REG_L x18, PT_S2(sp)
REG_L x19, PT_S3(sp)
REG_L x20, PT_S4(sp)
REG_L x21, PT_S5(sp)
REG_L x22, PT_S6(sp)
REG_L x23, PT_S7(sp)
REG_L x24, PT_S8(sp)
REG_L x25, PT_S9(sp)
REG_L x26, PT_S10(sp)
REG_L x27, PT_S11(sp)
REG_L x28, PT_T3(sp)
REG_L x29, PT_T4(sp)
REG_L x30, PT_T5(sp)
REG_L x31, PT_T6(sp)
REG_L x2, PT_SP(sp)
.endm
ENTRY(handle_exception)
SAVE_ALL
/*
* Set sscratch register to 0, so that if a recursive exception
* occurs, the exception vector knows it came from the kernel
*/
csrw sscratch, x0
/* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
la ra, ret_from_exception
/*
* MSB of cause differentiates between
* interrupts and exceptions
*/
bge s4, zero, 1f
/* Handle interrupts */
move a0, sp /* pt_regs */
move a1, s4 /* scause */
tail do_IRQ
1:
/* Exceptions run with interrupts enabled or disabled
depending on the state of sstatus.SR_SPIE */
andi t0, s1, SR_SPIE
beqz t0, 1f
csrs sstatus, SR_SIE
1:
/* Handle syscalls */
li t0, EXC_SYSCALL
beq s4, t0, handle_syscall
/* Handle other exceptions */
slli t0, s4, RISCV_LGPTR
la t1, excp_vect_table
la t2, excp_vect_table_end
move a0, sp /* pt_regs */
add t0, t1, t0
/* Check if exception code lies within bounds */
bgeu t0, t2, 1f
REG_L t0, 0(t0)
jr t0
1:
tail do_trap_unknown
handle_syscall:
/* save the initial A0 value (needed in signal handlers) */
REG_S a0, PT_ORIG_A0(sp)
/*
* Advance SEPC to avoid executing the original
* scall instruction on sret
*/
addi s2, s2, 0x4
REG_S s2, PT_SEPC(sp)
/* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp)
andi t0, t0, _TIF_SYSCALL_TRACE
bnez t0, handle_syscall_trace_enter
check_syscall_nr:
/* Check to make sure we don't jump to a bogus syscall number. */
li t0, __NR_syscalls
la s0, sys_ni_syscall
/* Syscall number held in a7 */
bgeu a7, t0, 1f
la s0, sys_call_table
slli t0, a7, RISCV_LGPTR
add s0, s0, t0
REG_L s0, 0(s0)
1:
jalr s0
ret_from_syscall:
/* Set user a0 to kernel a0 */
REG_S a0, PT_A0(sp)
/* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp)
andi t0, t0, _TIF_SYSCALL_TRACE
bnez t0, handle_syscall_trace_exit
ret_from_exception:
REG_L s0, PT_SSTATUS(sp)
csrc sstatus, SR_SIE
andi s0, s0, SR_SPP
bnez s0, restore_all
resume_userspace:
/* Interrupts must be disabled here so flags are checked atomically */
REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
andi s1, s0, _TIF_WORK_MASK
bnez s1, work_pending
/* Save unwound kernel stack pointer in thread_info */
addi s0, sp, PT_SIZE_ON_STACK
REG_S s0, TASK_TI_KERNEL_SP(tp)
/*
* Save TP into sscratch, so we can find the kernel data structures
* again.
*/
csrw sscratch, tp
restore_all:
RESTORE_ALL
sret
work_pending:
/* Enter slow path for supplementary processing */
la ra, ret_from_exception
andi s1, s0, _TIF_NEED_RESCHED
bnez s1, work_resched
work_notifysig:
/* Handle pending signals and notify-resume requests */
csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
move a0, sp /* pt_regs */
move a1, s0 /* current_thread_info->flags */
tail do_notify_resume
work_resched:
tail schedule
/* Slow paths for ptrace. */
handle_syscall_trace_enter:
move a0, sp
call do_syscall_trace_enter
REG_L a0, PT_A0(sp)
REG_L a1, PT_A1(sp)
REG_L a2, PT_A2(sp)
REG_L a3, PT_A3(sp)
REG_L a4, PT_A4(sp)
REG_L a5, PT_A5(sp)
REG_L a6, PT_A6(sp)
REG_L a7, PT_A7(sp)
j check_syscall_nr
handle_syscall_trace_exit:
move a0, sp
call do_syscall_trace_exit
j ret_from_exception
END(handle_exception)
ENTRY(ret_from_fork)
la ra, ret_from_exception
tail schedule_tail
ENDPROC(ret_from_fork)
ENTRY(ret_from_kernel_thread)
call schedule_tail
/* Call fn(arg) */
la ra, ret_from_exception
move a0, s1
jr s0
ENDPROC(ret_from_kernel_thread)
/*
* Integer register context switch
* The callee-saved registers must be saved and restored.
*
* a0: previous task_struct (must be preserved across the switch)
* a1: next task_struct
*
* The value of a0 and a1 must be preserved by this function, as that's how
* arguments are passed to schedule_tail.
*/
ENTRY(__switch_to)
/* Save context into prev->thread */
li a4, TASK_THREAD_RA
add a3, a0, a4
add a4, a1, a4
REG_S ra, TASK_THREAD_RA_RA(a3)
REG_S sp, TASK_THREAD_SP_RA(a3)
REG_S s0, TASK_THREAD_S0_RA(a3)
REG_S s1, TASK_THREAD_S1_RA(a3)
REG_S s2, TASK_THREAD_S2_RA(a3)
REG_S s3, TASK_THREAD_S3_RA(a3)
REG_S s4, TASK_THREAD_S4_RA(a3)
REG_S s5, TASK_THREAD_S5_RA(a3)
REG_S s6, TASK_THREAD_S6_RA(a3)
REG_S s7, TASK_THREAD_S7_RA(a3)
REG_S s8, TASK_THREAD_S8_RA(a3)
REG_S s9, TASK_THREAD_S9_RA(a3)
REG_S s10, TASK_THREAD_S10_RA(a3)
REG_S s11, TASK_THREAD_S11_RA(a3)
/* Restore context from next->thread */
REG_L ra, TASK_THREAD_RA_RA(a4)
REG_L sp, TASK_THREAD_SP_RA(a4)
REG_L s0, TASK_THREAD_S0_RA(a4)
REG_L s1, TASK_THREAD_S1_RA(a4)
REG_L s2, TASK_THREAD_S2_RA(a4)
REG_L s3, TASK_THREAD_S3_RA(a4)
REG_L s4, TASK_THREAD_S4_RA(a4)
REG_L s5, TASK_THREAD_S5_RA(a4)
REG_L s6, TASK_THREAD_S6_RA(a4)
REG_L s7, TASK_THREAD_S7_RA(a4)
REG_L s8, TASK_THREAD_S8_RA(a4)
REG_L s9, TASK_THREAD_S9_RA(a4)
REG_L s10, TASK_THREAD_S10_RA(a4)
REG_L s11, TASK_THREAD_S11_RA(a4)
/* Swap the CPU entry around. */
lw a3, TASK_TI_CPU(a0)
lw a4, TASK_TI_CPU(a1)
sw a3, TASK_TI_CPU(a1)
sw a4, TASK_TI_CPU(a0)
#if TASK_TI != 0
#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
addi tp, a1, TASK_TI
#else
move tp, a1
#endif
ret
ENDPROC(__switch_to)
ENTRY(__fstate_save)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
csrs sstatus, t1
frcsr t0
fsd f0, TASK_THREAD_F0_F0(a0)
fsd f1, TASK_THREAD_F1_F0(a0)
fsd f2, TASK_THREAD_F2_F0(a0)
fsd f3, TASK_THREAD_F3_F0(a0)
fsd f4, TASK_THREAD_F4_F0(a0)
fsd f5, TASK_THREAD_F5_F0(a0)
fsd f6, TASK_THREAD_F6_F0(a0)
fsd f7, TASK_THREAD_F7_F0(a0)
fsd f8, TASK_THREAD_F8_F0(a0)
fsd f9, TASK_THREAD_F9_F0(a0)
fsd f10, TASK_THREAD_F10_F0(a0)
fsd f11, TASK_THREAD_F11_F0(a0)
fsd f12, TASK_THREAD_F12_F0(a0)
fsd f13, TASK_THREAD_F13_F0(a0)
fsd f14, TASK_THREAD_F14_F0(a0)
fsd f15, TASK_THREAD_F15_F0(a0)
fsd f16, TASK_THREAD_F16_F0(a0)
fsd f17, TASK_THREAD_F17_F0(a0)
fsd f18, TASK_THREAD_F18_F0(a0)
fsd f19, TASK_THREAD_F19_F0(a0)
fsd f20, TASK_THREAD_F20_F0(a0)
fsd f21, TASK_THREAD_F21_F0(a0)
fsd f22, TASK_THREAD_F22_F0(a0)
fsd f23, TASK_THREAD_F23_F0(a0)
fsd f24, TASK_THREAD_F24_F0(a0)
fsd f25, TASK_THREAD_F25_F0(a0)
fsd f26, TASK_THREAD_F26_F0(a0)
fsd f27, TASK_THREAD_F27_F0(a0)
fsd f28, TASK_THREAD_F28_F0(a0)
fsd f29, TASK_THREAD_F29_F0(a0)
fsd f30, TASK_THREAD_F30_F0(a0)
fsd f31, TASK_THREAD_F31_F0(a0)
sw t0, TASK_THREAD_FCSR_F0(a0)
csrc sstatus, t1
ret
ENDPROC(__fstate_save)
ENTRY(__fstate_restore)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
lw t0, TASK_THREAD_FCSR_F0(a0)
csrs sstatus, t1
fld f0, TASK_THREAD_F0_F0(a0)
fld f1, TASK_THREAD_F1_F0(a0)
fld f2, TASK_THREAD_F2_F0(a0)
fld f3, TASK_THREAD_F3_F0(a0)
fld f4, TASK_THREAD_F4_F0(a0)
fld f5, TASK_THREAD_F5_F0(a0)
fld f6, TASK_THREAD_F6_F0(a0)
fld f7, TASK_THREAD_F7_F0(a0)
fld f8, TASK_THREAD_F8_F0(a0)
fld f9, TASK_THREAD_F9_F0(a0)
fld f10, TASK_THREAD_F10_F0(a0)
fld f11, TASK_THREAD_F11_F0(a0)
fld f12, TASK_THREAD_F12_F0(a0)
fld f13, TASK_THREAD_F13_F0(a0)
fld f14, TASK_THREAD_F14_F0(a0)
fld f15, TASK_THREAD_F15_F0(a0)
fld f16, TASK_THREAD_F16_F0(a0)
fld f17, TASK_THREAD_F17_F0(a0)
fld f18, TASK_THREAD_F18_F0(a0)
fld f19, TASK_THREAD_F19_F0(a0)
fld f20, TASK_THREAD_F20_F0(a0)
fld f21, TASK_THREAD_F21_F0(a0)
fld f22, TASK_THREAD_F22_F0(a0)
fld f23, TASK_THREAD_F23_F0(a0)
fld f24, TASK_THREAD_F24_F0(a0)
fld f25, TASK_THREAD_F25_F0(a0)
fld f26, TASK_THREAD_F26_F0(a0)
fld f27, TASK_THREAD_F27_F0(a0)
fld f28, TASK_THREAD_F28_F0(a0)
fld f29, TASK_THREAD_F29_F0(a0)
fld f30, TASK_THREAD_F30_F0(a0)
fld f31, TASK_THREAD_F31_F0(a0)
fscsr t0
csrc sstatus, t1
ret
ENDPROC(__fstate_restore)
.section ".rodata"
.align LGREG
/* Exception vector table */
ENTRY(excp_vect_table)
RISCV_PTR do_trap_insn_misaligned
RISCV_PTR do_trap_insn_fault
RISCV_PTR do_trap_insn_illegal
RISCV_PTR do_trap_break
RISCV_PTR do_trap_load_misaligned
RISCV_PTR do_trap_load_fault
RISCV_PTR do_trap_store_misaligned
RISCV_PTR do_trap_store_fault
RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
RISCV_PTR do_trap_ecall_s
RISCV_PTR do_trap_unknown
RISCV_PTR do_trap_ecall_m
RISCV_PTR do_page_fault /* instruction page fault */
RISCV_PTR do_page_fault /* load page fault */
RISCV_PTR do_trap_unknown
RISCV_PTR do_page_fault /* store page fault */
excp_vect_table_end:
END(excp_vect_table)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,737
|
arch/riscv/kernel/mcount-dyn.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2017 Andes Technology Corporation */
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm-generic/export.h>
#include <asm/ftrace.h>
.text
.macro SAVE_ABI_STATE
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
addi sp, sp, -48
sd s0, 32(sp)
sd ra, 40(sp)
addi s0, sp, 48
sd t0, 24(sp)
sd t1, 16(sp)
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
sd t2, 8(sp)
#endif
#else
addi sp, sp, -16
sd s0, 0(sp)
sd ra, 8(sp)
addi s0, sp, 16
#endif
.endm
.macro RESTORE_ABI_STATE
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ld s0, 32(sp)
ld ra, 40(sp)
addi sp, sp, 48
#else
ld ra, 8(sp)
ld s0, 0(sp)
addi sp, sp, 16
#endif
.endm
.macro RESTORE_GRAPH_ARGS
ld a0, 24(sp)
ld a1, 16(sp)
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld a2, 8(sp)
#endif
.endm
ENTRY(ftrace_graph_caller)
addi sp, sp, -16
sd s0, 0(sp)
sd ra, 8(sp)
addi s0, sp, 16
ftrace_graph_call:
.global ftrace_graph_call
/*
* Calling ftrace_enable/disable_ftrace_graph_caller would overwrite the
* call below. Check ftrace_modify_all_code for details.
*/
call ftrace_stub
ld ra, 8(sp)
ld s0, 0(sp)
addi sp, sp, 16
ret
ENDPROC(ftrace_graph_caller)
ENTRY(ftrace_caller)
/*
* a0: the address in the caller when calling ftrace_caller
* a1: the caller's return address
* a2: the address of global variable function_trace_op
*/
ld a1, -8(s0)
addi a0, ra, -MCOUNT_INSN_SIZE
la t5, function_trace_op
ld a2, 0(t5)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* the graph tracer (specifically, prepare_ftrace_return) needs these
* arguments but for now the function tracer occupies the regs, so we
* save them in temporary regs to recover later.
*/
addi t0, s0, -8
mv t1, a0
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld t2, -16(s0)
#endif
#endif
SAVE_ABI_STATE
ftrace_call:
.global ftrace_call
/*
* For the dynamic ftrace to work, here we should reserve at least
* 8 bytes for a functional auipc-jalr pair. The following call
* serves this purpose.
*
* Calling ftrace_update_ftrace_func would overwrite the nops below.
* Check ftrace_modify_all_code for details.
*/
call ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
RESTORE_GRAPH_ARGS
call ftrace_graph_caller
#endif
RESTORE_ABI_STATE
ret
ENDPROC(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
.macro SAVE_ALL
addi sp, sp, -(PT_SIZE_ON_STACK+16)
sd s0, (PT_SIZE_ON_STACK)(sp)
sd ra, (PT_SIZE_ON_STACK+8)(sp)
addi s0, sp, (PT_SIZE_ON_STACK+16)
sd x1, PT_RA(sp)
sd x2, PT_SP(sp)
sd x3, PT_GP(sp)
sd x4, PT_TP(sp)
sd x5, PT_T0(sp)
sd x6, PT_T1(sp)
sd x7, PT_T2(sp)
sd x8, PT_S0(sp)
sd x9, PT_S1(sp)
sd x10, PT_A0(sp)
sd x11, PT_A1(sp)
sd x12, PT_A2(sp)
sd x13, PT_A3(sp)
sd x14, PT_A4(sp)
sd x15, PT_A5(sp)
sd x16, PT_A6(sp)
sd x17, PT_A7(sp)
sd x18, PT_S2(sp)
sd x19, PT_S3(sp)
sd x20, PT_S4(sp)
sd x21, PT_S5(sp)
sd x22, PT_S6(sp)
sd x23, PT_S7(sp)
sd x24, PT_S8(sp)
sd x25, PT_S9(sp)
sd x26, PT_S10(sp)
sd x27, PT_S11(sp)
sd x28, PT_T3(sp)
sd x29, PT_T4(sp)
sd x30, PT_T5(sp)
sd x31, PT_T6(sp)
.endm
.macro RESTORE_ALL
ld x1, PT_RA(sp)
ld x2, PT_SP(sp)
ld x3, PT_GP(sp)
ld x4, PT_TP(sp)
ld x5, PT_T0(sp)
ld x6, PT_T1(sp)
ld x7, PT_T2(sp)
ld x8, PT_S0(sp)
ld x9, PT_S1(sp)
ld x10, PT_A0(sp)
ld x11, PT_A1(sp)
ld x12, PT_A2(sp)
ld x13, PT_A3(sp)
ld x14, PT_A4(sp)
ld x15, PT_A5(sp)
ld x16, PT_A6(sp)
ld x17, PT_A7(sp)
ld x18, PT_S2(sp)
ld x19, PT_S3(sp)
ld x20, PT_S4(sp)
ld x21, PT_S5(sp)
ld x22, PT_S6(sp)
ld x23, PT_S7(sp)
ld x24, PT_S8(sp)
ld x25, PT_S9(sp)
ld x26, PT_S10(sp)
ld x27, PT_S11(sp)
ld x28, PT_T3(sp)
ld x29, PT_T4(sp)
ld x30, PT_T5(sp)
ld x31, PT_T6(sp)
ld s0, (PT_SIZE_ON_STACK)(sp)
ld ra, (PT_SIZE_ON_STACK+8)(sp)
addi sp, sp, (PT_SIZE_ON_STACK+16)
.endm
.macro RESTORE_GRAPH_REG_ARGS
ld a0, PT_T0(sp)
ld a1, PT_T1(sp)
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld a2, PT_T2(sp)
#endif
.endm
/*
* Most of the contents are the same as ftrace_caller.
*/
ENTRY(ftrace_regs_caller)
/*
* a3: the address of all registers in the stack
*/
ld a1, -8(s0)
addi a0, ra, -MCOUNT_INSN_SIZE
la t5, function_trace_op
ld a2, 0(t5)
addi a3, sp, -(PT_SIZE_ON_STACK+16)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
addi t0, s0, -8
mv t1, a0
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld t2, -16(s0)
#endif
#endif
SAVE_ALL
ftrace_regs_call:
.global ftrace_regs_call
call ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
RESTORE_GRAPH_REG_ARGS
call ftrace_graph_caller
#endif
RESTORE_ALL
ret
ENDPROC(ftrace_regs_caller)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,654
|
arch/riscv/kernel/mcount.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2017 Andes Technology Corporation */
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm-generic/export.h>
#include <asm/ftrace.h>
.text
.macro SAVE_ABI_STATE
addi sp, sp, -16
sd s0, 0(sp)
sd ra, 8(sp)
addi s0, sp, 16
.endm
/*
* The call to ftrace_return_to_handler would overwrite the return
* register if a0 was not saved.
*/
.macro SAVE_RET_ABI_STATE
addi sp, sp, -32
sd s0, 16(sp)
sd ra, 24(sp)
sd a0, 8(sp)
addi s0, sp, 32
.endm
.macro RESTORE_ABI_STATE
ld ra, 8(sp)
ld s0, 0(sp)
addi sp, sp, 16
.endm
.macro RESTORE_RET_ABI_STATE
ld ra, 24(sp)
ld s0, 16(sp)
ld a0, 8(sp)
addi sp, sp, 32
.endm
ENTRY(ftrace_stub)
#ifdef CONFIG_DYNAMIC_FTRACE
.global _mcount
.set _mcount, ftrace_stub
#endif
ret
ENDPROC(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(return_to_handler)
/*
* On implementing the frame point test, the ideal way is to compare the
* s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
* However, the psABI of variable-length-argument functions does not allow this.
*
* So alternatively we check the *old* frame pointer position, that is, the
* value stored in -16(s0) on entry, and the s0 on return.
*/
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
mv t6, s0
#endif
SAVE_RET_ABI_STATE
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
mv a0, t6
#endif
call ftrace_return_to_handler
mv a1, a0
RESTORE_RET_ABI_STATE
jalr a1
ENDPROC(return_to_handler)
EXPORT_SYMBOL(return_to_handler)
#endif
#ifndef CONFIG_DYNAMIC_FTRACE
ENTRY(_mcount)
la t4, ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return
ld t1, 0(t0)
bne t1, t4, do_ftrace_graph_caller
la t3, ftrace_graph_entry
ld t2, 0(t3)
la t6, ftrace_graph_entry_stub
bne t2, t6, do_ftrace_graph_caller
#endif
la t3, ftrace_trace_function
ld t5, 0(t3)
bne t5, t4, do_trace
ret
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* A pseudo representation for the function graph tracer:
* prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
*/
do_ftrace_graph_caller:
addi a0, s0, -8
mv a1, ra
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld a2, -16(s0)
#endif
SAVE_ABI_STATE
call prepare_ftrace_return
RESTORE_ABI_STATE
ret
#endif
/*
* A pseudo representation for the function tracer:
* (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
*/
do_trace:
ld a1, -8(s0)
mv a0, ra
SAVE_ABI_STATE
jalr t5
RESTORE_ABI_STATE
ret
ENDPROC(_mcount)
#endif
EXPORT_SYMBOL(_mcount)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,689
|
arch/riscv/kernel/vmlinux.lds.S
|
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define LOAD_OFFSET PAGE_OFFSET
#include <asm/vmlinux.lds.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
OUTPUT_ARCH(riscv)
ENTRY(_start)
jiffies = jiffies_64;
SECTIONS
{
/* Beginning of code and text segment */
. = LOAD_OFFSET;
_start = .;
__init_begin = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
/* we have to discard exit text and such at runtime, not link time */
.exit.text :
{
EXIT_TEXT
}
.exit.data :
{
EXIT_DATA
}
PERCPU_SECTION(L1_CACHE_BYTES)
__init_end = .;
.text : {
_text = .;
_stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
ENTRY_TEXT
IRQENTRY_TEXT
*(.fixup)
_etext = .;
}
/* Start of data section */
_sdata = .;
RO_DATA_SECTION(L1_CACHE_BYTES)
.srodata : {
*(.srodata*)
}
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
.sdata : {
__global_pointer$ = . + 0x800;
*(.sdata*)
/* End of data section */
_edata = .;
*(.sbss*)
}
BSS_SECTION(0, 0, 0)
EXCEPTION_TABLE(0x10)
NOTES
.rel.dyn : {
*(.rel.dyn*)
}
_end = .;
STABS_DEBUG
DWARF_DEBUG
DISCARDS
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,540
|
arch/riscv/lib/memcpy.S
|
/*
* Copyright (C) 2013 Regents of the University of California
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/linkage.h>
#include <asm/asm.h>
/* void *memcpy(void *, const void *, size_t) */
ENTRY(memcpy)
move t6, a0 /* Preserve return value */
/* Defer to byte-oriented copy for small sizes */
sltiu a3, a2, 128
bnez a3, 4f
/* Use word-oriented copy only if low-order bits match */
andi a3, t6, SZREG-1
andi a4, a1, SZREG-1
bne a3, a4, 4f
beqz a3, 2f /* Skip if already aligned */
/*
* Round to nearest double word-aligned address
* greater than or equal to start address
*/
andi a3, a1, ~(SZREG-1)
addi a3, a3, SZREG
/* Handle initial misalignment */
sub a4, a3, a1
1:
lb a5, 0(a1)
addi a1, a1, 1
sb a5, 0(t6)
addi t6, t6, 1
bltu a1, a3, 1b
sub a2, a2, a4 /* Update count */
2:
andi a4, a2, ~((16*SZREG)-1)
beqz a4, 4f
add a3, a1, a4
3:
REG_L a4, 0(a1)
REG_L a5, SZREG(a1)
REG_L a6, 2*SZREG(a1)
REG_L a7, 3*SZREG(a1)
REG_L t0, 4*SZREG(a1)
REG_L t1, 5*SZREG(a1)
REG_L t2, 6*SZREG(a1)
REG_L t3, 7*SZREG(a1)
REG_L t4, 8*SZREG(a1)
REG_L t5, 9*SZREG(a1)
REG_S a4, 0(t6)
REG_S a5, SZREG(t6)
REG_S a6, 2*SZREG(t6)
REG_S a7, 3*SZREG(t6)
REG_S t0, 4*SZREG(t6)
REG_S t1, 5*SZREG(t6)
REG_S t2, 6*SZREG(t6)
REG_S t3, 7*SZREG(t6)
REG_S t4, 8*SZREG(t6)
REG_S t5, 9*SZREG(t6)
REG_L a4, 10*SZREG(a1)
REG_L a5, 11*SZREG(a1)
REG_L a6, 12*SZREG(a1)
REG_L a7, 13*SZREG(a1)
REG_L t0, 14*SZREG(a1)
REG_L t1, 15*SZREG(a1)
addi a1, a1, 16*SZREG
REG_S a4, 10*SZREG(t6)
REG_S a5, 11*SZREG(t6)
REG_S a6, 12*SZREG(t6)
REG_S a7, 13*SZREG(t6)
REG_S t0, 14*SZREG(t6)
REG_S t1, 15*SZREG(t6)
addi t6, t6, 16*SZREG
bltu a1, a3, 3b
andi a2, a2, (16*SZREG)-1 /* Update count */
4:
/* Handle trailing misalignment */
beqz a2, 6f
add a3, a1, a2
/* Use word-oriented copy if co-aligned to word boundary */
or a5, a1, t6
or a5, a5, a3
andi a5, a5, 3
bnez a5, 5f
7:
lw a4, 0(a1)
addi a1, a1, 4
sw a4, 0(t6)
addi t6, t6, 4
bltu a1, a3, 7b
ret
5:
lb a4, 0(a1)
addi a1, a1, 1
sb a4, 0(t6)
addi t6, t6, 1
bltu a1, a3, 5b
6:
ret
END(memcpy)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,268
|
arch/riscv/lib/uaccess.S
|
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/csr.h>
.altmacro
.macro fixup op reg addr lbl
LOCAL _epc
_epc:
\op \reg, \addr
.section __ex_table,"a"
.balign RISCV_SZPTR
RISCV_PTR _epc, \lbl
.previous
.endm
ENTRY(__asm_copy_to_user)
ENTRY(__asm_copy_from_user)
/* Enable access to user memory */
li t6, SR_SUM
csrs sstatus, t6
add a3, a1, a2
/* Use word-oriented copy only if low-order bits match */
andi t0, a0, SZREG-1
andi t1, a1, SZREG-1
bne t0, t1, 2f
addi t0, a1, SZREG-1
andi t1, a3, ~(SZREG-1)
andi t0, t0, ~(SZREG-1)
/*
* a3: terminal address of source region
* t0: lowest XLEN-aligned address in source
* t1: highest XLEN-aligned address in source
*/
bgeu t0, t1, 2f
bltu a1, t0, 4f
1:
fixup REG_L, t2, (a1), 10f
fixup REG_S, t2, (a0), 10f
addi a1, a1, SZREG
addi a0, a0, SZREG
bltu a1, t1, 1b
2:
bltu a1, a3, 5f
3:
/* Disable access to user memory */
csrc sstatus, t6
li a0, 0
ret
4: /* Edge case: unalignment */
fixup lbu, t2, (a1), 10f
fixup sb, t2, (a0), 10f
addi a1, a1, 1
addi a0, a0, 1
bltu a1, t0, 4b
j 1b
5: /* Edge case: remainder */
fixup lbu, t2, (a1), 10f
fixup sb, t2, (a0), 10f
addi a1, a1, 1
addi a0, a0, 1
bltu a1, a3, 5b
j 3b
ENDPROC(__asm_copy_to_user)
ENDPROC(__asm_copy_from_user)
ENTRY(__clear_user)
/* Enable access to user memory */
li t6, SR_SUM
csrs sstatus, t6
add a3, a0, a1
addi t0, a0, SZREG-1
andi t1, a3, ~(SZREG-1)
andi t0, t0, ~(SZREG-1)
/*
* a3: terminal address of target region
* t0: lowest doubleword-aligned address in target region
* t1: highest doubleword-aligned address in target region
*/
bgeu t0, t1, 2f
bltu a0, t0, 4f
1:
fixup REG_S, zero, (a0), 11f
addi a0, a0, SZREG
bltu a0, t1, 1b
2:
bltu a0, a3, 5f
3:
/* Disable access to user memory */
csrc sstatus, t6
li a0, 0
ret
4: /* Edge case: unalignment */
fixup sb, zero, (a0), 11f
addi a0, a0, 1
bltu a0, t0, 4b
j 1b
5: /* Edge case: remainder */
fixup sb, zero, (a0), 11f
addi a0, a0, 1
bltu a0, a3, 5b
j 3b
ENDPROC(__clear_user)
.section .fixup,"ax"
.balign 4
/* Fixup code for __copy_user(10) and __clear_user(11) */
10:
/* Disable access to user memory */
csrs sstatus, t6
mv a0, a2
ret
11:
csrs sstatus, t6
mv a0, a1
ret
.previous
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,756
|
arch/riscv/lib/memset.S
|
/*
* Copyright (C) 2013 Regents of the University of California
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/linkage.h>
#include <asm/asm.h>
/* void *memset(void *, int, size_t) */
ENTRY(memset)
move t0, a0 /* Preserve return value */
/* Defer to byte-oriented fill for small sizes */
sltiu a3, a2, 16
bnez a3, 4f
/*
* Round to nearest XLEN-aligned address
* greater than or equal to start address
*/
addi a3, t0, SZREG-1
andi a3, a3, ~(SZREG-1)
beq a3, t0, 2f /* Skip if already aligned */
/* Handle initial misalignment */
sub a4, a3, t0
1:
sb a1, 0(t0)
addi t0, t0, 1
bltu t0, a3, 1b
sub a2, a2, a4 /* Update count */
2: /* Duff's device with 32 XLEN stores per iteration */
/* Broadcast value into all bytes */
andi a1, a1, 0xff
slli a3, a1, 8
or a1, a3, a1
slli a3, a1, 16
or a1, a3, a1
#ifdef CONFIG_64BIT
slli a3, a1, 32
or a1, a3, a1
#endif
/* Calculate end address */
andi a4, a2, ~(SZREG-1)
add a3, t0, a4
andi a4, a4, 31*SZREG /* Calculate remainder */
beqz a4, 3f /* Shortcut if no remainder */
neg a4, a4
addi a4, a4, 32*SZREG /* Calculate initial offset */
/* Adjust start address with offset */
sub t0, t0, a4
/* Jump into loop body */
/* Assumes 32-bit instruction lengths */
la a5, 3f
#ifdef CONFIG_64BIT
srli a4, a4, 1
#endif
add a5, a5, a4
jr a5
3:
REG_S a1, 0(t0)
REG_S a1, SZREG(t0)
REG_S a1, 2*SZREG(t0)
REG_S a1, 3*SZREG(t0)
REG_S a1, 4*SZREG(t0)
REG_S a1, 5*SZREG(t0)
REG_S a1, 6*SZREG(t0)
REG_S a1, 7*SZREG(t0)
REG_S a1, 8*SZREG(t0)
REG_S a1, 9*SZREG(t0)
REG_S a1, 10*SZREG(t0)
REG_S a1, 11*SZREG(t0)
REG_S a1, 12*SZREG(t0)
REG_S a1, 13*SZREG(t0)
REG_S a1, 14*SZREG(t0)
REG_S a1, 15*SZREG(t0)
REG_S a1, 16*SZREG(t0)
REG_S a1, 17*SZREG(t0)
REG_S a1, 18*SZREG(t0)
REG_S a1, 19*SZREG(t0)
REG_S a1, 20*SZREG(t0)
REG_S a1, 21*SZREG(t0)
REG_S a1, 22*SZREG(t0)
REG_S a1, 23*SZREG(t0)
REG_S a1, 24*SZREG(t0)
REG_S a1, 25*SZREG(t0)
REG_S a1, 26*SZREG(t0)
REG_S a1, 27*SZREG(t0)
REG_S a1, 28*SZREG(t0)
REG_S a1, 29*SZREG(t0)
REG_S a1, 30*SZREG(t0)
REG_S a1, 31*SZREG(t0)
addi t0, t0, 32*SZREG
bltu t0, a3, 3b
andi a2, a2, SZREG-1 /* Update count */
4:
/* Handle trailing misalignment */
beqz a2, 6f
add a3, t0, a2
5:
sb a1, 0(t0)
addi t0, t0, 1
bltu t0, a3, 5b
6:
ret
END(memset)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,066
|
arch/riscv/kernel/vdso/vdso.lds.S
|
/*
* Copyright (C) 2012 Regents of the University of California
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
OUTPUT_ARCH(riscv)
SECTIONS
{
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.note : { *(.note.*) } :text :note
.dynamic : { *(.dynamic) } :text :dynamic
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
/*
* This linker script is used both with -r and with -shared.
* For the layouts to match, we need to skip more than enough
* space for the dynamic symbol table, etc. If this amount is
* insufficient, ld -shared will error; simply increase it here.
*/
. = 0x800;
.text : { *(.text .text.*) } :text
.data : {
*(.got.plt) *(.got)
*(.data .data.* .gnu.linkonce.d.*)
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
}
}
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
* This controls what symbols we export from the DSO.
*/
VERSION
{
LINUX_4.15 {
global:
__vdso_rt_sigreturn;
__vdso_gettimeofday;
__vdso_clock_gettime;
__vdso_clock_getres;
__vdso_getcpu;
__vdso_flush_icache;
local: *;
};
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,723
|
arch/sh/kernel/syscalls_64.S
|
/*
* arch/sh/kernel/syscalls_64.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2004 - 2007 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sys.h>
.section .data, "aw"
.balign 32
/*
* System calls jump table
*/
.globl sys_call_table
sys_call_table:
.long sys_restart_syscall /* 0 - old "setup()" system call */
.long sys_exit
.long sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_lchown16
.long sys_ni_syscall /* old break syscall holder */
.long sys_stat
.long sys_lseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_oldumount
.long sys_setuid16
.long sys_getuid16
.long sys_stime /* 25 */
.long sh64_ptrace
.long sys_alarm
.long sys_fstat
.long sys_pause
.long sys_utime /* 30 */
.long sys_ni_syscall /* old stty syscall holder */
.long sys_ni_syscall /* old gtty syscall holder */
.long sys_access
.long sys_nice
.long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
.long sys_sync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
.long sys_setgid16
.long sys_getgid16
.long sys_signal
.long sys_geteuid16
.long sys_getegid16 /* 50 */
.long sys_acct
.long sys_umount /* recycled never used phys( */
.long sys_ni_syscall /* old lock syscall holder */
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_ni_syscall /* old mpx syscall holder */
.long sys_setpgid
.long sys_ni_syscall /* old ulimit syscall holder */
.long sys_ni_syscall /* sys_olduname */
.long sys_umask /* 60 */
.long sys_chroot
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_sigaction
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid16 /* 70 */
.long sys_setregid16
.long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_old_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups16 /* 80 */
.long sys_setgroups16
.long sys_ni_syscall /* sys_oldselect */
.long sys_symlink
.long sys_lstat
.long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long sys_old_readdir
.long old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown16 /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_ni_syscall /* old profil syscall holder */
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_ni_syscall /* ioperm */
.long sys_socketcall /* Obsolete implementation of socket syscall */
.long sys_syslog
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_uname
.long sys_ni_syscall /* 110 */ /* iopl */
.long sys_vhangup
.long sys_ni_syscall /* idle */
.long sys_ni_syscall /* vm86old */
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc /* Obsolete ipc syscall implementation */
.long sys_fsync
.long sys_sigreturn
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_cacheflush /* x86: sys_modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
.long sys_ni_syscall /* old "create_module" */
.long sys_init_module
.long sys_delete_module
.long sys_ni_syscall /* 130: old "get_kernel_syms" */
.long sys_quotactl
.long sys_getpgid
.long sys_fchdir
.long sys_bdflush
.long sys_sysfs /* 135 */
.long sys_personality
.long sys_ni_syscall /* for afs_syscall */
.long sys_setfsuid16
.long sys_setfsgid16
.long sys_llseek /* 140 */
.long sys_getdents
.long sys_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl
.long sys_mlock /* 150 */
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
.long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max
.long sys_sched_get_priority_min /* 160 */
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
.long sys_setresuid16
.long sys_getresuid16 /* 165 */
.long sys_ni_syscall /* vm86 */
.long sys_ni_syscall /* old "query_module" */
.long sys_poll
.long sys_ni_syscall /* was nfsservctl */
.long sys_setresgid16 /* 170 */
.long sys_getresgid16
.long sys_prctl
.long sys_rt_sigreturn
.long sys_rt_sigaction
.long sys_rt_sigprocmask /* 175 */
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long sys_pread64 /* 180 */
.long sys_pwrite64
.long sys_chown16
.long sys_getcwd
.long sys_capget
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
.long sys_ni_syscall /* getpmsg */
.long sys_ni_syscall /* putpmsg */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
.long sys_lstat64
.long sys_fstat64
.long sys_lchown
.long sys_getuid
.long sys_getgid /* 200 */
.long sys_geteuid
.long sys_getegid
.long sys_setreuid
.long sys_setregid
.long sys_getgroups /* 205 */
.long sys_setgroups
.long sys_fchown
.long sys_setresuid
.long sys_getresuid
.long sys_setresgid /* 210 */
.long sys_getresgid
.long sys_chown
.long sys_setuid
.long sys_setgid
.long sys_setfsuid /* 215 */
.long sys_setfsgid
.long sys_pivot_root
.long sys_mincore
.long sys_madvise
/* Broken-out socket family (maintain backwards compatibility in syscall
numbering with 2.4) */
.long sys_socket /* 220 */
.long sys_bind
.long sys_connect
.long sys_listen
.long sys_accept
.long sys_getsockname /* 225 */
.long sys_getpeername
.long sys_socketpair
.long sys_send
.long sys_sendto
.long sys_recv /* 230*/
.long sys_recvfrom
.long sys_shutdown
.long sys_setsockopt
.long sys_getsockopt
.long sys_sendmsg /* 235 */
.long sys_recvmsg
/* Broken-out IPC family (maintain backwards compatibility in syscall
numbering with 2.4) */
.long sys_semop
.long sys_semget
.long sys_semctl
.long sys_msgsnd /* 240 */
.long sys_msgrcv
.long sys_msgget
.long sys_msgctl
.long sys_shmat
.long sys_shmdt /* 245 */
.long sys_shmget
.long sys_shmctl
/* Rest of syscalls listed in 2.4 i386 unistd.h */
.long sys_getdents64
.long sys_fcntl64
.long sys_ni_syscall /* 250 reserved for TUX */
.long sys_ni_syscall /* Reserved for Security */
.long sys_gettid
.long sys_readahead
.long sys_setxattr
.long sys_lsetxattr /* 255 */
.long sys_fsetxattr
.long sys_getxattr
.long sys_lgetxattr
.long sys_fgetxattr
.long sys_listxattr /* 260 */
.long sys_llistxattr
.long sys_flistxattr
.long sys_removexattr
.long sys_lremovexattr
.long sys_fremovexattr /* 265 */
.long sys_tkill
.long sys_sendfile64
.long sys_futex
.long sys_sched_setaffinity
.long sys_sched_getaffinity /* 270 */
.long sys_ni_syscall /* reserved for set_thread_area */
.long sys_ni_syscall /* reserved for get_thread_area */
.long sys_io_setup
.long sys_io_destroy
.long sys_io_getevents /* 275 */
.long sys_io_submit
.long sys_io_cancel
.long sys_fadvise64
.long sys_ni_syscall
.long sys_exit_group /* 280 */
/* Rest of new 2.6 syscalls */
.long sys_lookup_dcookie
.long sys_epoll_create
.long sys_epoll_ctl
.long sys_epoll_wait
.long sys_remap_file_pages /* 285 */
.long sys_set_tid_address
.long sys_timer_create
.long sys_timer_settime
.long sys_timer_gettime
.long sys_timer_getoverrun /* 290 */
.long sys_timer_delete
.long sys_clock_settime
.long sys_clock_gettime
.long sys_clock_getres
.long sys_clock_nanosleep /* 295 */
.long sys_statfs64
.long sys_fstatfs64
.long sys_tgkill
.long sys_utimes
.long sys_fadvise64_64 /* 300 */
.long sys_ni_syscall /* Reserved for vserver */
.long sys_ni_syscall /* Reserved for mbind */
.long sys_ni_syscall /* get_mempolicy */
.long sys_ni_syscall /* set_mempolicy */
.long sys_mq_open /* 305 */
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive
.long sys_mq_notify
.long sys_mq_getsetattr /* 310 */
.long sys_ni_syscall /* Reserved for kexec */
.long sys_waitid
.long sys_add_key
.long sys_request_key
.long sys_keyctl /* 315 */
.long sys_ioprio_set
.long sys_ioprio_get
.long sys_inotify_init
.long sys_inotify_add_watch
.long sys_inotify_rm_watch /* 320 */
.long sys_ni_syscall
.long sys_migrate_pages
.long sys_openat
.long sys_mkdirat
.long sys_mknodat /* 325 */
.long sys_fchownat
.long sys_futimesat
.long sys_fstatat64
.long sys_unlinkat
.long sys_renameat /* 330 */
.long sys_linkat
.long sys_symlinkat
.long sys_readlinkat
.long sys_fchmodat
.long sys_faccessat /* 335 */
.long sys_pselect6
.long sys_ppoll
.long sys_unshare
.long sys_set_robust_list
.long sys_get_robust_list /* 340 */
.long sys_splice
.long sys_sync_file_range
.long sys_tee
.long sys_vmsplice
.long sys_move_pages /* 345 */
.long sys_getcpu
.long sys_epoll_pwait
.long sys_utimensat
.long sys_signalfd
.long sys_timerfd_create /* 350 */
.long sys_eventfd
.long sys_fallocate
.long sys_timerfd_settime
.long sys_timerfd_gettime
.long sys_signalfd4 /* 355 */
.long sys_eventfd2
.long sys_epoll_create1
.long sys_dup3
.long sys_pipe2
.long sys_inotify_init1 /* 360 */
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo
.long sys_perf_event_open
.long sys_recvmmsg /* 365 */
.long sys_accept4
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
.long sys_name_to_handle_at /* 370 */
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg
.long sys_setns /* 375 */
.long sys_process_vm_readv
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module
.long sys_sched_getattr /* 380 */
.long sys_sched_setattr
.long sys_renameat2
.long sys_seccomp
.long sys_getrandom
.long sys_memfd_create /* 385 */
.long sys_bpf
.long sys_execveat
.long sys_userfaultfd
.long sys_membarrier
.long sys_mlock2 /* 390 */
.long sys_copy_file_range
.long sys_preadv2
.long sys_pwritev2
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,179
|
arch/sh/kernel/relocate_kernel.S
|
/*
* relocate_kernel.S - put the kernel image in place to boot
* 2005.9.17 kogiidena@eggplant.ddo.jp
*
* LANDISK/sh4 is supported. Maybe, SH archtecture works well.
*
* 2009-03-18 Magnus Damm - Added Kexec Jump support
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/linkage.h>
#include <asm/addrspace.h>
#include <asm/page.h>
.globl relocate_new_kernel
relocate_new_kernel:
/* r4 = indirection_page */
/* r5 = reboot_code_buffer */
/* r6 = start_address */
mov.l 10f, r0 /* PAGE_SIZE */
add r5, r0 /* setup new stack at end of control page */
/* save r15->r8 to new stack */
mov.l r15, @-r0
mov r0, r15
mov.l r14, @-r15
mov.l r13, @-r15
mov.l r12, @-r15
mov.l r11, @-r15
mov.l r10, @-r15
mov.l r9, @-r15
mov.l r8, @-r15
/* save other random registers */
sts.l macl, @-r15
sts.l mach, @-r15
stc.l gbr, @-r15
stc.l ssr, @-r15
stc.l sr, @-r15
sts.l pr, @-r15
stc.l spc, @-r15
/* switch to bank1 and save r7->r0 */
mov.l 12f, r9
stc sr, r8
or r9, r8
ldc r8, sr
mov.l r7, @-r15
mov.l r6, @-r15
mov.l r5, @-r15
mov.l r4, @-r15
mov.l r3, @-r15
mov.l r2, @-r15
mov.l r1, @-r15
mov.l r0, @-r15
/* switch to bank0 and save r7->r0 */
mov.l 12f, r9
not r9, r9
stc sr, r8
and r9, r8
ldc r8, sr
mov.l r7, @-r15
mov.l r6, @-r15
mov.l r5, @-r15
mov.l r4, @-r15
mov.l r3, @-r15
mov.l r2, @-r15
mov.l r1, @-r15
mov.l r0, @-r15
mov.l r4, @-r15 /* save indirection page again */
bsr swap_pages /* swap pages before jumping to new kernel */
nop
mova 11f, r0
mov.l r15, @r0 /* save pointer to stack */
jsr @r6 /* hand over control to new kernel */
nop
mov.l 11f, r15 /* get pointer to stack */
mov.l @r15+, r4 /* restore r4 to get indirection page */
bsr swap_pages /* swap pages back to previous state */
nop
/* make sure bank0 is active and restore r0->r7 */
mov.l 12f, r9
not r9, r9
stc sr, r8
and r9, r8
ldc r8, sr
mov.l @r15+, r0
mov.l @r15+, r1
mov.l @r15+, r2
mov.l @r15+, r3
mov.l @r15+, r4
mov.l @r15+, r5
mov.l @r15+, r6
mov.l @r15+, r7
/* switch to bank1 and restore r0->r7 */
mov.l 12f, r9
stc sr, r8
or r9, r8
ldc r8, sr
mov.l @r15+, r0
mov.l @r15+, r1
mov.l @r15+, r2
mov.l @r15+, r3
mov.l @r15+, r4
mov.l @r15+, r5
mov.l @r15+, r6
mov.l @r15+, r7
/* switch back to bank0 */
mov.l 12f, r9
not r9, r9
stc sr, r8
and r9, r8
ldc r8, sr
/* restore other random registers */
ldc.l @r15+, spc
lds.l @r15+, pr
ldc.l @r15+, sr
ldc.l @r15+, ssr
ldc.l @r15+, gbr
lds.l @r15+, mach
lds.l @r15+, macl
/* restore r8->r15 */
mov.l @r15+, r8
mov.l @r15+, r9
mov.l @r15+, r10
mov.l @r15+, r11
mov.l @r15+, r12
mov.l @r15+, r13
mov.l @r15+, r14
mov.l @r15+, r15
rts
nop
swap_pages:
bra 1f
mov r4,r0 /* cmd = indirection_page */
0:
mov.l @r4+,r0 /* cmd = *ind++ */
1: /* addr = cmd & 0xfffffff0 */
mov r0,r2
mov #-16,r1
and r1,r2
/* if(cmd & IND_DESTINATION) dst = addr */
tst #1,r0
bt 2f
bra 0b
mov r2,r5
2: /* else if(cmd & IND_INDIRECTION) ind = addr */
tst #2,r0
bt 3f
bra 0b
mov r2,r4
3: /* else if(cmd & IND_DONE) return */
tst #4,r0
bt 4f
rts
nop
4: /* else if(cmd & IND_SOURCE) memcpy(dst,addr,PAGE_SIZE) */
tst #8,r0
bt 0b
mov.l 10f,r3 /* PAGE_SIZE */
shlr2 r3
shlr2 r3
5:
dt r3
/* regular kexec just overwrites the destination page
* with the contents of the source page.
* for the kexec jump case we need to swap the contents
* of the pages.
* to keep it simple swap the contents for both cases.
*/
mov.l @(0, r2), r8
mov.l @(0, r5), r1
mov.l r8, @(0, r5)
mov.l r1, @(0, r2)
mov.l @(4, r2), r8
mov.l @(4, r5), r1
mov.l r8, @(4, r5)
mov.l r1, @(4, r2)
mov.l @(8, r2), r8
mov.l @(8, r5), r1
mov.l r8, @(8, r5)
mov.l r1, @(8, r2)
mov.l @(12, r2), r8
mov.l @(12, r5), r1
mov.l r8, @(12, r5)
mov.l r1, @(12, r2)
add #16,r5
add #16,r2
bf 5b
bra 0b
nop
.align 2
10:
.long PAGE_SIZE
11:
.long 0
12:
.long 0x20000000 ! RB=1
relocate_new_kernel_end:
.globl relocate_new_kernel_size
relocate_new_kernel_size:
.long relocate_new_kernel_end - relocate_new_kernel
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,645
|
arch/sh/kernel/head_32.S
|
/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
*
* arch/sh/kernel/head.S
*
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2010 Matt Fleming
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Head.S contains the SH exception handlers and startup code.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/mmu.h>
#include <cpu/mmu_context.h>
#ifdef CONFIG_CPU_SH4A
#define SYNCO() synco
#define PREFI(label, reg) \
mov.l label, reg; \
prefi @reg
#else
#define SYNCO()
#define PREFI(label, reg)
#endif
.section .empty_zero_page, "aw"
ENTRY(empty_zero_page)
.long 1 /* MOUNT_ROOT_RDONLY */
.long 0 /* RAMDISK_FLAGS */
.long 0x0200 /* ORIG_ROOT_DEV */
.long 1 /* LOADER_TYPE */
.long 0x00000000 /* INITRD_START */
.long 0x00000000 /* INITRD_SIZE */
#ifdef CONFIG_32BIT
.long 0x53453f00 + 32 /* "SE?" = 32 bit */
#else
.long 0x53453f00 + 29 /* "SE?" = 29 bit */
#endif
1:
.skip PAGE_SIZE - empty_zero_page - 1b
__HEAD
/*
* Condition at the entry of _stext:
*
* BSC has already been initialized.
* INTC may or may not be initialized.
* VBR may or may not be initialized.
* MMU may or may not be initialized.
* Cache may or may not be initialized.
* Hardware (including on-chip modules) may or may not be initialized.
*
*/
ENTRY(_stext)
! Initialize Status Register
mov.l 1f, r0 ! MD=1, RB=0, BL=0, IMASK=0xF
ldc r0, sr
! Initialize global interrupt mask
#ifdef CONFIG_CPU_HAS_SR_RB
mov #0, r0
ldc r0, r6_bank
#endif
#ifdef CONFIG_OF_FLATTREE
mov r4, r12 ! Store device tree blob pointer in r12
#endif
/*
* Prefetch if possible to reduce cache miss penalty.
*
* We do this early on for SH-4A as a micro-optimization,
* as later on we will have speculative execution enabled
* and this will become less of an issue.
*/
PREFI(5f, r0)
PREFI(6f, r0)
!
mov.l 2f, r0
mov r0, r15 ! Set initial r15 (stack pointer)
#ifdef CONFIG_CPU_HAS_SR_RB
mov.l 7f, r0
ldc r0, r7_bank ! ... and initial thread_info
#endif
#ifdef CONFIG_PMB
/*
* Reconfigure the initial PMB mappings setup by the hardware.
*
* When we boot in 32-bit MMU mode there are 2 PMB entries already
* setup for us.
*
* Entry VPN PPN V SZ C UB WT
* ---------------------------------------------------------------
* 0 0x80000000 0x00000000 1 512MB 1 0 1
* 1 0xA0000000 0x00000000 1 512MB 0 0 0
*
* But we reprogram them here because we want complete control over
* our address space and the initial mappings may not map PAGE_OFFSET
* to __MEMORY_START (or even map all of our RAM).
*
* Once we've setup cached and uncached mappings we clear the rest of the
* PMB entries. This clearing also deals with the fact that PMB entries
* can persist across reboots. The PMB could have been left in any state
* when the reboot occurred, so to be safe we clear all entries and start
* with with a clean slate.
*
* The uncached mapping is constructed using the smallest possible
* mapping with a single unbufferable page. Only the kernel text needs to
* be covered via the uncached mapping so that certain functions can be
* run uncached.
*
* Drivers and the like that have previously abused the 1:1 identity
* mapping are unsupported in 32-bit mode and must specify their caching
* preference when page tables are constructed.
*
* This frees up the P2 space for more nefarious purposes.
*
* Register utilization is as follows:
*
* r0 = PMB_DATA data field
* r1 = PMB_DATA address field
* r2 = PMB_ADDR data field
* r3 = PMB_ADDR address field
* r4 = PMB_E_SHIFT
* r5 = remaining amount of RAM to map
* r6 = PMB mapping size we're trying to use
* r7 = cached_to_uncached
* r8 = scratch register
* r9 = scratch register
* r10 = number of PMB entries we've setup
* r11 = scratch register
*/
mov.l .LMMUCR, r1 /* Flush the TLB */
mov.l @r1, r0
or #MMUCR_TI, r0
mov.l r0, @r1
mov.l .LMEMORY_SIZE, r5
mov #PMB_E_SHIFT, r0
mov #0x1, r4
shld r0, r4
mov.l .LFIRST_DATA_ENTRY, r0
mov.l .LPMB_DATA, r1
mov.l .LFIRST_ADDR_ENTRY, r2
mov.l .LPMB_ADDR, r3
/*
* First we need to walk the PMB and figure out if there are any
* existing mappings that match the initial mappings VPN/PPN.
* If these have already been established by the bootloader, we
* don't bother setting up new entries here, and let the late PMB
* initialization take care of things instead.
*
* Note that we may need to coalesce and merge entries in order
* to reclaim more available PMB slots, which is much more than
* we want to do at this early stage.
*/
mov #0, r10
mov #NR_PMB_ENTRIES, r9
mov r1, r7 /* temporary PMB_DATA iter */
.Lvalidate_existing_mappings:
mov.l .LPMB_DATA_MASK, r11
mov.l @r7, r8
and r11, r8
cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
bt .Lpmb_done
add #1, r10 /* Increment the loop counter */
cmp/eq r9, r10
bf/s .Lvalidate_existing_mappings
add r4, r7 /* Increment to the next PMB_DATA entry */
/*
* If we've fallen through, continue with setting up the initial
* mappings.
*/
mov r5, r7 /* cached_to_uncached */
mov #0, r10
#ifdef CONFIG_UNCACHED_MAPPING
/*
* Uncached mapping
*/
mov #(PMB_SZ_16M >> 2), r9
shll2 r9
mov #(PMB_UB >> 8), r8
shll8 r8
or r0, r8
or r9, r8
mov.l r8, @r1
mov r2, r8
add r7, r8
mov.l r8, @r3
add r4, r1
add r4, r3
add #1, r10
#endif
/*
* Iterate over all of the available sizes from largest to
* smallest for constructing the cached mapping.
*/
#define __PMB_ITER_BY_SIZE(size) \
.L##size: \
mov #(size >> 4), r6; \
shll16 r6; \
shll8 r6; \
\
cmp/hi r5, r6; \
bt 9999f; \
\
mov #(PMB_SZ_##size##M >> 2), r9; \
shll2 r9; \
\
/* \
* Cached mapping \
*/ \
mov #PMB_C, r8; \
or r0, r8; \
or r9, r8; \
mov.l r8, @r1; \
mov.l r2, @r3; \
\
/* Increment to the next PMB_DATA entry */ \
add r4, r1; \
/* Increment to the next PMB_ADDR entry */ \
add r4, r3; \
/* Increment number of PMB entries */ \
add #1, r10; \
\
sub r6, r5; \
add r6, r0; \
add r6, r2; \
\
bra .L##size; \
9999:
__PMB_ITER_BY_SIZE(512)
__PMB_ITER_BY_SIZE(128)
__PMB_ITER_BY_SIZE(64)
__PMB_ITER_BY_SIZE(16)
#ifdef CONFIG_UNCACHED_MAPPING
/*
* Now that we can access it, update cached_to_uncached and
* uncached_size.
*/
mov.l .Lcached_to_uncached, r0
mov.l r7, @r0
mov.l .Luncached_size, r0
mov #1, r7
shll16 r7
shll8 r7
mov.l r7, @r0
#endif
/*
* Clear the remaining PMB entries.
*
* r3 = entry to begin clearing from
* r10 = number of entries we've setup so far
*/
mov #0, r1
mov #NR_PMB_ENTRIES, r0
.Lagain:
mov.l r1, @r3 /* Clear PMB_ADDR entry */
add #1, r10 /* Increment the loop counter */
cmp/eq r0, r10
bf/s .Lagain
add r4, r3 /* Increment to the next PMB_ADDR entry */
mov.l 6f, r0
icbi @r0
.Lpmb_done:
#endif /* CONFIG_PMB */
#ifndef CONFIG_SH_NO_BSS_INIT
/*
* Don't clear BSS if running on slow platforms such as an RTL simulation,
* remote memory via SHdebug link, etc. For these the memory can be guaranteed
* to be all zero on boot anyway.
*/
! Clear BSS area
#ifdef CONFIG_SMP
mov.l 3f, r0
cmp/eq #0, r0 ! skip clear if set to zero
bt 10f
#endif
mov.l 3f, r1
add #4, r1
mov.l 4f, r2
mov #0, r0
9: cmp/hs r2, r1
bf/s 9b ! while (r1 < r2)
mov.l r0,@-r2
10:
#endif
#ifdef CONFIG_OF_FLATTREE
mov.l 8f, r0 ! Make flat device tree available early.
jsr @r0
mov r12, r4
#endif
! Additional CPU initialization
mov.l 6f, r0
jsr @r0
nop
SYNCO() ! Wait for pending instructions..
! Start kernel
mov.l 5f, r0
jmp @r0
nop
.balign 4
#if defined(CONFIG_CPU_SH2)
1: .long 0x000000F0 ! IMASK=0xF
#else
1: .long 0x500080F0 ! MD=1, RB=0, BL=1, FD=1, IMASK=0xF
#endif
ENTRY(stack_start)
2: .long init_thread_union+THREAD_SIZE
3: .long __bss_start
4: .long _end
5: .long start_kernel
6: .long cpu_init
7: .long init_thread_union
#if defined(CONFIG_OF_FLATTREE)
8: .long sh_fdt_init
#endif
#ifdef CONFIG_PMB
.LPMB_ADDR: .long PMB_ADDR
.LPMB_DATA: .long PMB_DATA
.LPMB_DATA_MASK: .long PMB_PFN_MASK | PMB_V
.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
.LMMUCR: .long MMUCR
.LMEMORY_SIZE: .long __MEMORY_SIZE
#ifdef CONFIG_UNCACHED_MAPPING
.Lcached_to_uncached: .long cached_to_uncached
.Luncached_size: .long uncached_size
#endif
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,158
|
arch/sh/kernel/debugtraps.S
|
/*
* arch/sh/kernel/debugtraps.S
*
* Debug trap jump tables for SuperH
*
* Copyright (C) 2006 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#if !defined(CONFIG_KGDB)
#define singlestep_trap_handler debug_trap_handler
#endif
#if !defined(CONFIG_SH_STANDARD_BIOS)
#define sh_bios_handler debug_trap_handler
#endif
.data
ENTRY(debug_trap_table)
.long debug_trap_handler /* 0x30 */
.long debug_trap_handler /* 0x31 */
.long debug_trap_handler /* 0x32 */
.long debug_trap_handler /* 0x33 */
.long debug_trap_handler /* 0x34 */
.long debug_trap_handler /* 0x35 */
.long debug_trap_handler /* 0x36 */
.long debug_trap_handler /* 0x37 */
.long debug_trap_handler /* 0x38 */
.long debug_trap_handler /* 0x39 */
.long debug_trap_handler /* 0x3a */
.long debug_trap_handler /* 0x3b */
.long breakpoint_trap_handler /* 0x3c */
.long singlestep_trap_handler /* 0x3d */
.long bug_trap_handler /* 0x3e */
.long sh_bios_handler /* 0x3f */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,892
|
arch/sh/kernel/entry-common.S
|
/*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2003 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
! to be jumped is too far, but it causes illegal slot exception.
/*
* entry.S contains the system-call and fault low-level handling routines.
* This also contains the timer-interrupt handler, as well as all interrupts
* and faults that can result in a task-switch.
*
* NOTE: This code handles signal-recognition, which happens every time
* after a timer-interrupt and after each system call.
*
* NOTE: This code uses a convention that instructions in the delay slot
* of a transfer-control instruction are indented by an extra space, thus:
*
* jmp @k0 ! control-transfer instruction
* ldc k1, ssr ! delay slot
*
* Stack layout in 'ret_from_syscall':
* ptrace needs to have all regs on the stack.
* if the order here is changed, it needs to be
* updated in ptrace.c and ptrace.h
*
* r0
* ...
* r15 = stack pointer
* spc
* pr
* ssr
* gbr
* mach
* macl
* syscall #
*
*/
#include <asm/dwarf.h>
#if defined(CONFIG_PREEMPT)
# define preempt_stop() cli ; TRACE_IRQS_OFF
#else
# define preempt_stop()
# define resume_kernel __restore_all
#endif
.align 2
ENTRY(exception_error)
!
TRACE_IRQS_ON
sti
mov.l 1f, r0
jmp @r0
nop
.align 2
1: .long do_exception_error
.align 2
ret_from_exception:
CFI_STARTPROC simple
CFI_DEF_CFA r14, 0
CFI_REL_OFFSET 17, 64
CFI_REL_OFFSET 15, 60
CFI_REL_OFFSET 14, 56
CFI_REL_OFFSET 13, 52
CFI_REL_OFFSET 12, 48
CFI_REL_OFFSET 11, 44
CFI_REL_OFFSET 10, 40
CFI_REL_OFFSET 9, 36
CFI_REL_OFFSET 8, 32
preempt_stop()
ENTRY(ret_from_irq)
!
mov #OFF_SR, r0
mov.l @(r0,r15), r0 ! get status register
shll r0
shll r0 ! kernel space?
get_current_thread_info r8, r0
bt resume_kernel ! Yes, it's from kernel, go back soon
#ifdef CONFIG_PREEMPT
bra resume_userspace
nop
ENTRY(resume_kernel)
cli
TRACE_IRQS_OFF
mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
tst r0, r0
bf noresched
need_resched:
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
tst #_TIF_NEED_RESCHED, r0 ! need_resched set?
bt noresched
mov #OFF_SR, r0
mov.l @(r0,r15), r0 ! get status register
shlr r0
and #(0xf0>>1), r0 ! interrupts off (exception path)?
cmp/eq #(0xf0>>1), r0
bt noresched
mov.l 1f, r0
jsr @r0 ! call preempt_schedule_irq
nop
bra need_resched
nop
noresched:
bra __restore_all
nop
.align 2
1: .long preempt_schedule_irq
#endif
ENTRY(resume_userspace)
! r8: current_thread_info
cli
TRACE_IRQS_OFF
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
tst #(_TIF_WORK_MASK & 0xff), r0
bt/s __restore_all
tst #_TIF_NEED_RESCHED, r0
.align 2
work_pending:
! r0: current_thread_info->flags
! r8: current_thread_info
! t: result of "tst #_TIF_NEED_RESCHED, r0"
bf/s work_resched
tst #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME), r0
work_notifysig:
bt/s __restore_all
mov r15, r4
mov r12, r5 ! set arg1(save_r0)
mov r0, r6
sti
mov.l 2f, r1
mov.l 3f, r0
jmp @r1
lds r0, pr
work_resched:
mov.l 1f, r1
jsr @r1 ! schedule
nop
cli
TRACE_IRQS_OFF
!
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
tst #(_TIF_WORK_MASK & 0xff), r0
bt __restore_all
bra work_pending
tst #_TIF_NEED_RESCHED, r0
.align 2
1: .long schedule
2: .long do_notify_resume
3: .long resume_userspace
.align 2
syscall_exit_work:
! r0: current_thread_info->flags
! r8: current_thread_info
tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0
bt/s work_pending
tst #_TIF_NEED_RESCHED, r0
TRACE_IRQS_ON
sti
mov r15, r4
mov.l 8f, r0 ! do_syscall_trace_leave
jsr @r0
nop
bra resume_userspace
nop
.align 2
syscall_trace_entry:
! Yes it is traced.
mov r15, r4
mov.l 7f, r11 ! Call do_syscall_trace_enter which notifies
jsr @r11 ! superior (will chomp R[0-7])
nop
mov.l r0, @(OFF_R0,r15) ! Save return value
! Reload R0-R4 from kernel stack, where the
! parent may have modified them using
! ptrace(POKEUSR). (Note that R0-R2 are
! reloaded from the kernel stack by syscall_call
! below, so don't need to be reloaded here.)
! This allows the parent to rewrite system calls
! and args on the fly.
mov.l @(OFF_R4,r15), r4 ! arg0
mov.l @(OFF_R5,r15), r5
mov.l @(OFF_R6,r15), r6
mov.l @(OFF_R7,r15), r7 ! arg3
mov.l @(OFF_R3,r15), r3 ! syscall_nr
!
mov.l 6f, r10 ! Number of syscalls
cmp/hs r10, r3
bf syscall_call
mov #-ENOSYS, r0
bra syscall_exit
mov.l r0, @(OFF_R0,r15) ! Return value
__restore_all:
mov #OFF_SR, r0
mov.l @(r0,r15), r0 ! get status register
shlr2 r0
and #0x3c, r0
cmp/eq #0x3c, r0
bt 1f
TRACE_IRQS_ON
bra 2f
nop
1:
TRACE_IRQS_OFF
2:
mov.l 3f, r0
jmp @r0
nop
.align 2
3: .long restore_all
.align 2
syscall_badsys: ! Bad syscall number
get_current_thread_info r8, r0
mov #-ENOSYS, r0
bra resume_userspace
mov.l r0, @(OFF_R0,r15) ! Return value
/*
* The main debug trap handler.
*
* r8=TRA (not the trap number!)
*
* Note: This assumes that the trapa value is left in its original
* form (without the shlr2 shift) so the calculation for the jump
* call table offset remains a simple in place mask.
*/
debug_trap:
mov r8, r0
and #(0xf << 2), r0
mov.l 1f, r8
add r0, r8
mov.l @r8, r8
jsr @r8
nop
bra ret_from_exception
nop
CFI_ENDPROC
.align 2
1: .long debug_trap_table
/*
* Syscall interface:
*
* Syscall #: R3
* Arguments #0 to #3: R4--R7
* Arguments #4 to #6: R0, R1, R2
* TRA: See following table.
*
* (TRA>>2) Purpose
* -------- -------
* 0x00-0x0f original SH-3/4 syscall ABI (not in general use).
* 0x10-0x1f general SH-3/4 syscall ABI.
* 0x1f unified SH-2/3/4 syscall ABI (preferred).
* 0x20-0x2f original SH-2 syscall ABI.
* 0x30-0x3f debug traps used by the kernel.
* 0x40-0xff Not supported by all parts, so left unhandled.
*
* For making system calls, any trap number in the range for the
* given cpu model may be used, but the unified trap number 0x1f is
* preferred for compatibility with all models.
*
* The low bits of the trap number were once documented as matching
* the number of arguments, but they were never actually used as such
* by the kernel. SH-2 originally used its own separate trap range
* because several hardware exceptions fell in the range used for the
* SH-3/4 syscall ABI.
*
* This code also handles delegating other traps to the BIOS/gdb stub.
*
* Note: When we're first called, the TRA value must be shifted
* right 2 bits in order to get the value that was used as the "trapa"
* argument.
*/
.align 2
.globl ret_from_fork
ret_from_fork:
mov.l 1f, r8
jsr @r8
mov r0, r4
bra syscall_exit
nop
.align 2
.globl ret_from_kernel_thread
ret_from_kernel_thread:
mov.l 1f, r8
jsr @r8
mov r0, r4
mov.l @(OFF_R5,r15), r5 ! fn
jsr @r5
mov.l @(OFF_R4,r15), r4 ! arg
bra syscall_exit
nop
.align 2
1: .long schedule_tail
/*
* The poorly named main trapa decode and dispatch routine, for
* system calls and debug traps through their respective jump tables.
*/
ENTRY(system_call)
setup_frame_reg
#if !defined(CONFIG_CPU_SH2)
mov.l 1f, r9
mov.l @r9, r8 ! Read from TRA (Trap Address) Register
#endif
mov #OFF_TRA, r10
add r15, r10
mov.l r8, @r10 ! set TRA value to tra
/*
* Check the trap type
*/
mov #((0x20 << 2) - 1), r9
cmp/hi r9, r8
bt/s debug_trap ! it's a debug trap..
nop
TRACE_IRQS_ON
sti
!
get_current_thread_info r8, r10
mov.l @(TI_FLAGS,r8), r8
mov #(_TIF_WORK_SYSCALL_MASK & 0xff), r10
mov #(_TIF_WORK_SYSCALL_MASK >> 8), r9
tst r10, r8
shll8 r9
bf syscall_trace_entry
tst r9, r8
bf syscall_trace_entry
!
mov.l 6f, r8 ! Number of syscalls
cmp/hs r8, r3
bt syscall_badsys
!
syscall_call:
shll2 r3 ! x4
mov.l 3f, r8 ! Load the address of sys_call_table
add r8, r3
mov.l @r3, r8
mov.l @(OFF_R2,r15), r2
mov.l @(OFF_R1,r15), r1
mov.l @(OFF_R0,r15), r0
mov.l r2, @-r15
mov.l r1, @-r15
mov.l r0, @-r15
jsr @r8 ! jump to specific syscall handler
nop
add #12, r15
mov.l @(OFF_R0,r15), r12 ! save r0
mov.l r0, @(OFF_R0,r15) ! save the return value
!
syscall_exit:
cli
TRACE_IRQS_OFF
!
get_current_thread_info r8, r0
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
tst #(_TIF_ALLWORK_MASK & 0xff), r0
mov #(_TIF_ALLWORK_MASK >> 8), r1
bf syscall_exit_work
shlr8 r0
tst r0, r1
bf syscall_exit_work
bra __restore_all
nop
.align 2
#if !defined(CONFIG_CPU_SH2)
1: .long TRA
#endif
6: .long NR_syscalls
3: .long sys_call_table
7: .long do_syscall_trace_enter
8: .long do_syscall_trace_leave
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,729
|
arch/sh/kernel/vmlinux.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* ld script to make SuperH Linux kernel
* Written by Niibe Yutaka and Paul Mundt
*/
#ifdef CONFIG_SUPERH64
#define LOAD_OFFSET PAGE_OFFSET
OUTPUT_ARCH(sh:sh5)
#else
#define LOAD_OFFSET 0
OUTPUT_ARCH(sh)
#endif
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>
#ifdef CONFIG_PMB
#define MEMORY_OFFSET 0
#else
#define MEMORY_OFFSET __MEMORY_START
#endif
ENTRY(_start)
SECTIONS
{
. = PAGE_OFFSET + MEMORY_OFFSET + PHYSICAL_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
_text = .; /* Text and read-only data */
.empty_zero_page : AT(ADDR(.empty_zero_page) - LOAD_OFFSET) {
*(.empty_zero_page)
} = 0
.text : AT(ADDR(.text) - LOAD_OFFSET) {
HEAD_TEXT
TEXT_TEXT
EXTRA_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
_etext = .; /* End of text section */
} = 0x0009
EXCEPTION_TABLE(16)
NOTES
_sdata = .;
RO_DATA(PAGE_SIZE)
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
DWARF_EH_FRAME
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
. = ALIGN(4);
.machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) {
__machvec_start = .;
*(.machvec.init)
__machvec_end = .;
}
PERCPU_SECTION(L1_CACHE_BYTES)
/*
* .exit.text is discarded at runtime, not link time, to deal with
* references from __bug_table
*/
.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT }
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA }
. = ALIGN(PAGE_SIZE);
__init_end = .;
BSS_SECTION(0, PAGE_SIZE, 4)
_end = . ;
STABS_DEBUG
DWARF_DEBUG
DISCARDS
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,396
|
arch/sh/kernel/head_64.S
|
/*
* arch/sh/kernel/head_64.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/tlb.h>
#include <cpu/registers.h>
#include <cpu/mmu_context.h>
#include <asm/thread_info.h>
/*
* MMU defines: TLB boundaries.
*/
#define MMUIR_FIRST ITLB_FIXED
#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
#define MMUIR_STEP TLB_STEP
#define MMUDR_FIRST DTLB_FIXED
#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
#define MMUDR_STEP TLB_STEP
/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
#endif
/*
* MMU defines: Fixed TLBs.
*/
/* Deal safely with the case where the base of RAM is not 512Mb aligned */
#define ALIGN_512M_MASK (0xffffffffe0000000)
#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
/* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
/* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
#ifdef CONFIG_CACHE_OFF
#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
#else
#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
#endif
#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
#if defined (CONFIG_CACHE_OFF)
#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
#elif defined (CONFIG_CACHE_WRITETHROUGH)
#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
/* WT, invalidate */
#elif defined (CONFIG_CACHE_WRITEBACK)
#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
/* WB, invalidate */
#else
#error preprocessor flag CONFIG_CACHE_... not recognized!
#endif
#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
.section .empty_zero_page, "aw"
.global empty_zero_page
empty_zero_page:
.long 1 /* MOUNT_ROOT_RDONLY */
.long 0 /* RAMDISK_FLAGS */
.long 0x0200 /* ORIG_ROOT_DEV */
.long 1 /* LOADER_TYPE */
.long 0x00800000 /* INITRD_START */
.long 0x00800000 /* INITRD_SIZE */
.long 0
.text
.balign 4096,0,4096
.section .data, "aw"
.balign PAGE_SIZE
.section .data, "aw"
.balign PAGE_SIZE
.global mmu_pdtp_cache
mmu_pdtp_cache:
.space PAGE_SIZE, 0
.global fpu_in_use
fpu_in_use: .quad 0
__HEAD
.balign L1_CACHE_BYTES
/*
* Condition at the entry of __stext:
* . Reset state:
* . SR.FD = 1 (FPU disabled)
* . SR.BL = 1 (Exceptions disabled)
* . SR.MD = 1 (Privileged Mode)
* . SR.MMU = 0 (MMU Disabled)
* . SR.CD = 0 (CTC User Visible)
* . SR.IMASK = Undefined (Interrupt Mask)
*
* Operations supposed to be performed by __stext:
* . prevent speculative fetch onto device memory while MMU is off
* . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
* . first, save CPU state and set it to something harmless
* . any CPU detection and/or endianness settings (?)
* . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
* . set initial TLB entries for cached and uncached regions
* (no fine granularity paging)
* . set initial cache state
* . enable MMU and caches
* . set CPU to a consistent state
* . registers (including stack pointer and current/KCR0)
* . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
* at this stage. This is all to later Linux initialization steps.
* . initialize FPU
* . clear BSS
* . jump into start_kernel()
* . be prepared to hopeless start_kernel() returns.
*
*/
.global _stext
_stext:
/*
* Prevent speculative fetch on device memory due to
* uninitialized target registers.
*/
ptabs/u ZERO, tr0
ptabs/u ZERO, tr1
ptabs/u ZERO, tr2
ptabs/u ZERO, tr3
ptabs/u ZERO, tr4
ptabs/u ZERO, tr5
ptabs/u ZERO, tr6
ptabs/u ZERO, tr7
synci
/*
* Read/Set CPU state. After this block:
* r29 = Initial SR
*/
getcon SR, r29
movi SR_HARMLESS, r20
putcon r20, SR
/*
* Initialize EMI/LMI. To Be Done.
*/
/*
* CPU detection and/or endianness settings (?). To Be Done.
* Pure PIC code here, please ! Just save state into r30.
* After this block:
* r30 = CPU type/Platform Endianness
*/
/*
* Set initial TLB entries for cached and uncached regions.
* Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
*/
/* Clear ITLBs */
pta clear_ITLB, tr1
movi MMUIR_FIRST, r21
movi MMUIR_END, r22
clear_ITLB:
putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
addi r21, MMUIR_STEP, r21
bne r21, r22, tr1
/* Clear DTLBs */
pta clear_DTLB, tr1
movi MMUDR_FIRST, r21
movi MMUDR_END, r22
clear_DTLB:
putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
addi r21, MMUDR_STEP, r21
bne r21, r22, tr1
/* Map one big (512Mb) page for ITLB */
movi MMUIR_FIRST, r21
movi MMUIR_TEXT_L, r22 /* PTEL first */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
movi MMUIR_TEXT_H, r22 /* PTEH last */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
/* Map one big CACHED (512Mb) page for DTLB */
movi MMUDR_FIRST, r21
movi MMUDR_CACHED_L, r22 /* PTEL first */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
movi MMUDR_CACHED_H, r22 /* PTEH last */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
/*
* Setup a DTLB translation for SCIF phys.
*/
addi r21, MMUDR_STEP, r21
movi 0x0a03, r22 /* SCIF phys */
shori 0x0148, r22
putcfg r21, 1, r22 /* PTEL first */
movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
shori 0x0003, r22
putcfg r21, 0, r22 /* PTEH last */
/*
* Set cache behaviours.
*/
/* ICache */
movi ICCR_BASE, r21
movi ICCR0_INIT_VAL, r22
movi ICCR1_INIT_VAL, r23
putcfg r21, ICCR_REG0, r22
putcfg r21, ICCR_REG1, r23
/* OCache */
movi OCCR_BASE, r21
movi OCCR0_INIT_VAL, r22
movi OCCR1_INIT_VAL, r23
putcfg r21, OCCR_REG0, r22
putcfg r21, OCCR_REG1, r23
/*
* Enable Caches and MMU. Do the first non-PIC jump.
* Now head.S global variables, constants and externs
* can be used.
*/
getcon SR, r21
movi SR_ENABLE_MMU, r22
or r21, r22, r21
putcon r21, SSR
movi hyperspace, r22
ori r22, 1, r22 /* Make it SHmedia, not required but..*/
putcon r22, SPC
synco
rte /* And now go into the hyperspace ... */
hyperspace: /* ... that's the next instruction ! */
/*
* Set CPU to a consistent state.
* r31 = FPU support flag
* tr0/tr7 in use. Others give a chance to loop somewhere safe
*/
movi start_kernel, r32
ori r32, 1, r32
ptabs r32, tr0 /* r32 = _start_kernel address */
pta/u hopeless, tr1
pta/u hopeless, tr2
pta/u hopeless, tr3
pta/u hopeless, tr4
pta/u hopeless, tr5
pta/u hopeless, tr6
pta/u hopeless, tr7
gettr tr1, r28 /* r28 = hopeless address */
/* Set initial stack pointer */
movi init_thread_union, SP
putcon SP, KCR0 /* Set current to init_task */
movi THREAD_SIZE, r22 /* Point to the end */
add SP, r22, SP
/*
* Initialize FPU.
* Keep FPU flag in r31. After this block:
* r31 = FPU flag
*/
movi fpu_in_use, r31 /* Temporary */
#ifdef CONFIG_SH_FPU
getcon SR, r21
movi SR_ENABLE_FPU, r22
and r21, r22, r22
putcon r22, SR /* Try to enable */
getcon SR, r22
xor r21, r22, r21
shlri r21, 15, r21 /* Supposedly 0/1 */
st.q r31, 0 , r21 /* Set fpu_in_use */
#else
movi 0, r21
st.q r31, 0 , r21 /* Set fpu_in_use */
#endif
or r21, ZERO, r31 /* Set FPU flag at last */
#ifndef CONFIG_SH_NO_BSS_INIT
/* Don't clear BSS if running on slow platforms such as an RTL simulation,
remote memory via SHdebug link, etc. For these the memory can be guaranteed
to be all zero on boot anyway. */
/*
* Clear bss
*/
pta clear_quad, tr1
movi __bss_start, r22
movi _end, r23
clear_quad:
st.q r22, 0, ZERO
addi r22, 8, r22
bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
#endif
pta/u hopeless, tr1
/* Say bye to head.S but be prepared to wrongly get back ... */
blink tr0, LINK
/* If we ever get back here through LINK/tr1-tr7 */
pta/u hopeless, tr7
hopeless:
/*
* Something's badly wrong here. Loop endlessly,
* there's nothing more we can do about it.
*
* Note on hopeless: it can be jumped into invariably
* before or after jumping into hyperspace. The only
* requirement is to be PIC called (PTA) before and
* any way (PTA/PTABS) after. According to Virtual
* to Physical mapping a simulator/emulator can easily
* tell where we came here from just looking at hopeless
* (PC) address.
*
* For debugging purposes:
* (r28) hopeless/loop address
* (r29) Original SR
* (r30) CPU type/Platform endianness
* (r31) FPU Support
* (r32) _start_kernel address
*/
blink tr7, ZERO
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,033
|
arch/sh/kernel/syscalls_32.S
|
/*
* arch/sh/kernel/syscalls.S
*
* System call table for SuperH
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
#include <linux/sys.h>
#include <linux/linkage.h>
.data
ENTRY(sys_call_table)
.long sys_restart_syscall /* 0 - old "setup()" system call*/
.long sys_exit
.long sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_lchown16
.long sys_ni_syscall /* old break syscall holder */
.long sys_stat
.long sys_lseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_oldumount
.long sys_setuid16
.long sys_getuid16
.long sys_stime /* 25 */
.long sys_ptrace
.long sys_alarm
.long sys_fstat
.long sys_pause
.long sys_utime /* 30 */
.long sys_ni_syscall /* old stty syscall holder */
.long sys_ni_syscall /* old gtty syscall holder */
.long sys_access
.long sys_nice
.long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
.long sys_sync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_sh_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
.long sys_setgid16
.long sys_getgid16
.long sys_signal
.long sys_geteuid16
.long sys_getegid16 /* 50 */
.long sys_acct
.long sys_umount /* recycled never used phys() */
.long sys_ni_syscall /* old lock syscall holder */
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_ni_syscall /* old mpx syscall holder */
.long sys_setpgid
.long sys_ni_syscall /* old ulimit syscall holder */
.long sys_ni_syscall /* sys_olduname */
.long sys_umask /* 60 */
.long sys_chroot
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_sigaction
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid16 /* 70 */
.long sys_setregid16
.long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_old_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups16 /* 80 */
.long sys_setgroups16
.long sys_ni_syscall /* sys_oldselect */
.long sys_symlink
.long sys_lstat
.long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long sys_old_readdir
.long old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown16 /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_ni_syscall /* old profil syscall holder */
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_ni_syscall /* ioperm */
.long sys_socketcall
.long sys_syslog
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_uname
.long sys_ni_syscall /* 110 */ /* iopl */
.long sys_vhangup
.long sys_ni_syscall /* idle */
.long sys_ni_syscall /* vm86old */
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc
.long sys_fsync
.long sys_sigreturn
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_cacheflush /* x86: sys_modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
.long sys_ni_syscall /* old "create_module" */
.long sys_init_module
.long sys_delete_module
.long sys_ni_syscall /* 130: old "get_kernel_syms" */
.long sys_quotactl
.long sys_getpgid
.long sys_fchdir
.long sys_bdflush
.long sys_sysfs /* 135 */
.long sys_personality
.long sys_ni_syscall /* for afs_syscall */
.long sys_setfsuid16
.long sys_setfsgid16
.long sys_llseek /* 140 */
.long sys_getdents
.long sys_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl
.long sys_mlock /* 150 */
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
.long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max
.long sys_sched_get_priority_min /* 160 */
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
.long sys_setresuid16
.long sys_getresuid16 /* 165 */
.long sys_ni_syscall /* vm86 */
.long sys_ni_syscall /* old "query_module" */
.long sys_poll
.long sys_ni_syscall /* was nfsservctl */
.long sys_setresgid16 /* 170 */
.long sys_getresgid16
.long sys_prctl
.long sys_rt_sigreturn
.long sys_rt_sigaction
.long sys_rt_sigprocmask /* 175 */
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long sys_pread_wrapper /* 180 */
.long sys_pwrite_wrapper
.long sys_chown16
.long sys_getcwd
.long sys_capget
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
.long sys_ni_syscall /* getpmsg */
.long sys_ni_syscall /* putpmsg */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
.long sys_lstat64
.long sys_fstat64
.long sys_lchown
.long sys_getuid
.long sys_getgid /* 200 */
.long sys_geteuid
.long sys_getegid
.long sys_setreuid
.long sys_setregid
.long sys_getgroups /* 205 */
.long sys_setgroups
.long sys_fchown
.long sys_setresuid
.long sys_getresuid
.long sys_setresgid /* 210 */
.long sys_getresgid
.long sys_chown
.long sys_setuid
.long sys_setgid
.long sys_setfsuid /* 215 */
.long sys_setfsgid
.long sys_pivot_root
.long sys_mincore
.long sys_madvise
.long sys_getdents64 /* 220 */
.long sys_fcntl64
.long sys_ni_syscall /* reserved for TUX */
.long sys_ni_syscall /* Reserved for Security */
.long sys_gettid
.long sys_readahead /* 225 */
.long sys_setxattr
.long sys_lsetxattr
.long sys_fsetxattr
.long sys_getxattr
.long sys_lgetxattr /* 230 */
.long sys_fgetxattr
.long sys_listxattr
.long sys_llistxattr
.long sys_flistxattr
.long sys_removexattr /* 235 */
.long sys_lremovexattr
.long sys_fremovexattr
.long sys_tkill
.long sys_sendfile64
.long sys_futex /* 240 */
.long sys_sched_setaffinity
.long sys_sched_getaffinity
.long sys_ni_syscall /* reserved for set_thread_area */
.long sys_ni_syscall /* reserved for get_thread_area */
.long sys_io_setup /* 245 */
.long sys_io_destroy
.long sys_io_getevents
.long sys_io_submit
.long sys_io_cancel
.long sys_fadvise64 /* 250 */
.long sys_ni_syscall
.long sys_exit_group
.long sys_lookup_dcookie
.long sys_epoll_create
.long sys_epoll_ctl /* 255 */
.long sys_epoll_wait
.long sys_remap_file_pages
.long sys_set_tid_address
.long sys_timer_create
.long sys_timer_settime /* 260 */
.long sys_timer_gettime
.long sys_timer_getoverrun
.long sys_timer_delete
.long sys_clock_settime
.long sys_clock_gettime /* 265 */
.long sys_clock_getres
.long sys_clock_nanosleep
.long sys_statfs64
.long sys_fstatfs64
.long sys_tgkill /* 270 */
.long sys_utimes
.long sys_fadvise64_64_wrapper
.long sys_ni_syscall /* Reserved for vserver */
.long sys_mbind
.long sys_get_mempolicy /* 275 */
.long sys_set_mempolicy
.long sys_mq_open
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive /* 280 */
.long sys_mq_notify
.long sys_mq_getsetattr
.long sys_kexec_load
.long sys_waitid
.long sys_add_key /* 285 */
.long sys_request_key
.long sys_keyctl
.long sys_ioprio_set
.long sys_ioprio_get
.long sys_inotify_init /* 290 */
.long sys_inotify_add_watch
.long sys_inotify_rm_watch
.long sys_ni_syscall
.long sys_migrate_pages
.long sys_openat /* 295 */
.long sys_mkdirat
.long sys_mknodat
.long sys_fchownat
.long sys_futimesat
.long sys_fstatat64 /* 300 */
.long sys_unlinkat
.long sys_renameat
.long sys_linkat
.long sys_symlinkat
.long sys_readlinkat /* 305 */
.long sys_fchmodat
.long sys_faccessat
.long sys_pselect6
.long sys_ppoll
.long sys_unshare /* 310 */
.long sys_set_robust_list
.long sys_get_robust_list
.long sys_splice
.long sys_sync_file_range
.long sys_tee /* 315 */
.long sys_vmsplice
.long sys_move_pages
.long sys_getcpu
.long sys_epoll_pwait
.long sys_utimensat /* 320 */
.long sys_signalfd
.long sys_timerfd_create
.long sys_eventfd
.long sys_fallocate
.long sys_timerfd_settime /* 325 */
.long sys_timerfd_gettime
.long sys_signalfd4
.long sys_eventfd2
.long sys_epoll_create1
.long sys_dup3 /* 330 */
.long sys_pipe2
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 335 */
.long sys_perf_event_open
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
/* Broken-out socket family */
.long sys_socket /* 340 */
.long sys_bind
.long sys_connect
.long sys_listen
.long sys_accept
.long sys_getsockname /* 345 */
.long sys_getpeername
.long sys_socketpair
.long sys_send
.long sys_sendto
.long sys_recv /* 350 */
.long sys_recvfrom
.long sys_shutdown
.long sys_setsockopt
.long sys_getsockopt
.long sys_sendmsg /* 355 */
.long sys_recvmsg
.long sys_recvmmsg
.long sys_accept4
.long sys_name_to_handle_at
.long sys_open_by_handle_at /* 360 */
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg
.long sys_setns
.long sys_process_vm_readv /* 365 */
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module
.long sys_sched_getattr
.long sys_sched_setattr /* 370 */
.long sys_renameat2
.long sys_seccomp
.long sys_getrandom
.long sys_memfd_create
.long sys_bpf /* 375 */
.long sys_execveat
.long sys_userfaultfd
.long sys_membarrier
.long sys_mlock2
.long sys_copy_file_range /* 380 */
.long sys_preadv2
.long sys_pwritev2
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,233
|
arch/sh/lib/udivsi3_i4i.S
|
/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
!! libgcc routines for the Renesas / SuperH SH CPUs.
!! Contributed by Steve Chamberlain.
!! sac@cygnus.com
!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines
!! recoded in assembly by Toshiyasu Morita
!! tm@netcom.com
/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and
ELF local label prefixes by J"orn Rennecke
amylaar@cygnus.com */
/* This code used shld, thus is not suitable for SH1 / SH2. */
/* Signed / unsigned division without use of FPU, optimized for SH4.
Uses a lookup table for divisors in the range -128 .. +128, and
div1 with case distinction for larger divisors in three more ranges.
The code is lumped together with the table to allow the use of mova. */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define L_LSB 0
#define L_LSWMSB 1
#define L_MSWLSB 2
#else
#define L_LSB 3
#define L_LSWMSB 2
#define L_MSWLSB 1
#endif
.balign 4
.global __udivsi3_i4i
.global __udivsi3_i4
.set __udivsi3_i4, __udivsi3_i4i
.type __udivsi3_i4i, @function
__udivsi3_i4i:
mov.w c128_w, r1
div0u
mov r4,r0
shlr8 r0
cmp/hi r1,r5
extu.w r5,r1
bf udiv_le128
cmp/eq r5,r1
bf udiv_ge64k
shlr r0
mov r5,r1
shll16 r5
mov.l r4,@-r15
div1 r5,r0
mov.l r1,@-r15
div1 r5,r0
div1 r5,r0
bra udiv_25
div1 r5,r0
div_le128:
mova div_table_ix,r0
bra div_le128_2
mov.b @(r0,r5),r1
udiv_le128:
mov.l r4,@-r15
mova div_table_ix,r0
mov.b @(r0,r5),r1
mov.l r5,@-r15
div_le128_2:
mova div_table_inv,r0
mov.l @(r0,r1),r1
mov r5,r0
tst #0xfe,r0
mova div_table_clz,r0
dmulu.l r1,r4
mov.b @(r0,r5),r1
bt/s div_by_1
mov r4,r0
mov.l @r15+,r5
sts mach,r0
/* clrt */
addc r4,r0
mov.l @r15+,r4
rotcr r0
rts
shld r1,r0
div_by_1_neg:
neg r4,r0
div_by_1:
mov.l @r15+,r5
rts
mov.l @r15+,r4
div_ge64k:
bt/s div_r8
div0u
shll8 r5
bra div_ge64k_2
div1 r5,r0
udiv_ge64k:
cmp/hi r0,r5
mov r5,r1
bt udiv_r8
shll8 r5
mov.l r4,@-r15
div1 r5,r0
mov.l r1,@-r15
div_ge64k_2:
div1 r5,r0
mov.l zero_l,r1
.rept 4
div1 r5,r0
.endr
mov.l r1,@-r15
div1 r5,r0
mov.w m256_w,r1
div1 r5,r0
mov.b r0,@(L_LSWMSB,r15)
xor r4,r0
and r1,r0
bra div_ge64k_end
xor r4,r0
div_r8:
shll16 r4
bra div_r8_2
shll8 r4
udiv_r8:
mov.l r4,@-r15
shll16 r4
clrt
shll8 r4
mov.l r5,@-r15
div_r8_2:
rotcl r4
mov r0,r1
div1 r5,r1
mov r4,r0
rotcl r0
mov r5,r4
div1 r5,r1
.rept 5
rotcl r0; div1 r5,r1
.endr
rotcl r0
mov.l @r15+,r5
div1 r4,r1
mov.l @r15+,r4
rts
rotcl r0
.global __sdivsi3_i4i
.global __sdivsi3_i4
.global __sdivsi3
.set __sdivsi3_i4, __sdivsi3_i4i
.set __sdivsi3, __sdivsi3_i4i
.type __sdivsi3_i4i, @function
/* This is link-compatible with a __sdivsi3 call,
but we effectively clobber only r1. */
__sdivsi3_i4i:
mov.l r4,@-r15
cmp/pz r5
mov.w c128_w, r1
bt/s pos_divisor
cmp/pz r4
mov.l r5,@-r15
neg r5,r5
bt/s neg_result
cmp/hi r1,r5
neg r4,r4
pos_result:
extu.w r5,r0
bf div_le128
cmp/eq r5,r0
mov r4,r0
shlr8 r0
bf/s div_ge64k
cmp/hi r0,r5
div0u
shll16 r5
div1 r5,r0
div1 r5,r0
div1 r5,r0
udiv_25:
mov.l zero_l,r1
div1 r5,r0
div1 r5,r0
mov.l r1,@-r15
.rept 3
div1 r5,r0
.endr
mov.b r0,@(L_MSWLSB,r15)
xtrct r4,r0
swap.w r0,r0
.rept 8
div1 r5,r0
.endr
mov.b r0,@(L_LSWMSB,r15)
div_ge64k_end:
.rept 8
div1 r5,r0
.endr
mov.l @r15+,r4 ! zero-extension and swap using LS unit.
extu.b r0,r0
mov.l @r15+,r5
or r4,r0
mov.l @r15+,r4
rts
rotcl r0
div_le128_neg:
tst #0xfe,r0
mova div_table_ix,r0
mov.b @(r0,r5),r1
mova div_table_inv,r0
bt/s div_by_1_neg
mov.l @(r0,r1),r1
mova div_table_clz,r0
dmulu.l r1,r4
mov.b @(r0,r5),r1
mov.l @r15+,r5
sts mach,r0
/* clrt */
addc r4,r0
mov.l @r15+,r4
rotcr r0
shld r1,r0
rts
neg r0,r0
pos_divisor:
mov.l r5,@-r15
bt/s pos_result
cmp/hi r1,r5
neg r4,r4
neg_result:
extu.w r5,r0
bf div_le128_neg
cmp/eq r5,r0
mov r4,r0
shlr8 r0
bf/s div_ge64k_neg
cmp/hi r0,r5
div0u
mov.l zero_l,r1
shll16 r5
div1 r5,r0
mov.l r1,@-r15
.rept 7
div1 r5,r0
.endr
mov.b r0,@(L_MSWLSB,r15)
xtrct r4,r0
swap.w r0,r0
.rept 8
div1 r5,r0
.endr
mov.b r0,@(L_LSWMSB,r15)
div_ge64k_neg_end:
.rept 8
div1 r5,r0
.endr
mov.l @r15+,r4 ! zero-extension and swap using LS unit.
extu.b r0,r1
mov.l @r15+,r5
or r4,r1
div_r8_neg_end:
mov.l @r15+,r4
rotcl r1
rts
neg r1,r0
div_ge64k_neg:
bt/s div_r8_neg
div0u
shll8 r5
mov.l zero_l,r1
.rept 6
div1 r5,r0
.endr
mov.l r1,@-r15
div1 r5,r0
mov.w m256_w,r1
div1 r5,r0
mov.b r0,@(L_LSWMSB,r15)
xor r4,r0
and r1,r0
bra div_ge64k_neg_end
xor r4,r0
c128_w:
.word 128
div_r8_neg:
clrt
shll16 r4
mov r4,r1
shll8 r1
mov r5,r4
.rept 7
rotcl r1; div1 r5,r0
.endr
mov.l @r15+,r5
rotcl r1
bra div_r8_neg_end
div1 r4,r0
m256_w:
.word 0xff00
/* This table has been generated by divtab-sh4.c. */
.balign 4
div_table_clz:
.byte 0
.byte 1
.byte 0
.byte -1
.byte -1
.byte -2
.byte -2
.byte -2
.byte -2
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
/* Lookup table translating positive divisor to index into table of
normalized inverse. N.B. the '0' entry is also the last entry of the
previous table, and causes an unaligned access for division by zero. */
div_table_ix:
.byte -6
.byte -128
.byte -128
.byte 0
.byte -128
.byte -64
.byte 0
.byte 64
.byte -128
.byte -96
.byte -64
.byte -32
.byte 0
.byte 32
.byte 64
.byte 96
.byte -128
.byte -112
.byte -96
.byte -80
.byte -64
.byte -48
.byte -32
.byte -16
.byte 0
.byte 16
.byte 32
.byte 48
.byte 64
.byte 80
.byte 96
.byte 112
.byte -128
.byte -120
.byte -112
.byte -104
.byte -96
.byte -88
.byte -80
.byte -72
.byte -64
.byte -56
.byte -48
.byte -40
.byte -32
.byte -24
.byte -16
.byte -8
.byte 0
.byte 8
.byte 16
.byte 24
.byte 32
.byte 40
.byte 48
.byte 56
.byte 64
.byte 72
.byte 80
.byte 88
.byte 96
.byte 104
.byte 112
.byte 120
.byte -128
.byte -124
.byte -120
.byte -116
.byte -112
.byte -108
.byte -104
.byte -100
.byte -96
.byte -92
.byte -88
.byte -84
.byte -80
.byte -76
.byte -72
.byte -68
.byte -64
.byte -60
.byte -56
.byte -52
.byte -48
.byte -44
.byte -40
.byte -36
.byte -32
.byte -28
.byte -24
.byte -20
.byte -16
.byte -12
.byte -8
.byte -4
.byte 0
.byte 4
.byte 8
.byte 12
.byte 16
.byte 20
.byte 24
.byte 28
.byte 32
.byte 36
.byte 40
.byte 44
.byte 48
.byte 52
.byte 56
.byte 60
.byte 64
.byte 68
.byte 72
.byte 76
.byte 80
.byte 84
.byte 88
.byte 92
.byte 96
.byte 100
.byte 104
.byte 108
.byte 112
.byte 116
.byte 120
.byte 124
.byte -128
/* 1/64 .. 1/127, normalized. There is an implicit leading 1 in bit 32. */
.balign 4
zero_l:
.long 0x0
.long 0xF81F81F9
.long 0xF07C1F08
.long 0xE9131AC0
.long 0xE1E1E1E2
.long 0xDAE6076C
.long 0xD41D41D5
.long 0xCD856891
.long 0xC71C71C8
.long 0xC0E07039
.long 0xBACF914D
.long 0xB4E81B4F
.long 0xAF286BCB
.long 0xA98EF607
.long 0xA41A41A5
.long 0x9EC8E952
.long 0x9999999A
.long 0x948B0FCE
.long 0x8F9C18FA
.long 0x8ACB90F7
.long 0x86186187
.long 0x81818182
.long 0x7D05F418
.long 0x78A4C818
.long 0x745D1746
.long 0x702E05C1
.long 0x6C16C16D
.long 0x68168169
.long 0x642C8591
.long 0x60581606
.long 0x5C9882BA
.long 0x58ED2309
div_table_inv:
.long 0x55555556
.long 0x51D07EAF
.long 0x4E5E0A73
.long 0x4AFD6A06
.long 0x47AE147B
.long 0x446F8657
.long 0x41414142
.long 0x3E22CBCF
.long 0x3B13B13C
.long 0x38138139
.long 0x3521CFB3
.long 0x323E34A3
.long 0x2F684BDB
.long 0x2C9FB4D9
.long 0x29E4129F
.long 0x27350B89
.long 0x24924925
.long 0x21FB7813
.long 0x1F7047DD
.long 0x1CF06ADB
.long 0x1A7B9612
.long 0x18118119
.long 0x15B1E5F8
.long 0x135C8114
.long 0x11111112
.long 0xECF56BF
.long 0xC9714FC
.long 0xA6810A7
.long 0x8421085
.long 0x624DD30
.long 0x4104105
.long 0x2040811
/* maximum error: 0.987342 scaled: 0.921875*/
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,918
|
arch/sh/lib/ashrsi3.S
|
/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
!! libgcc routines for the Renesas / SuperH SH CPUs.
!! Contributed by Steve Chamberlain.
!! sac@cygnus.com
!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines
!! recoded in assembly by Toshiyasu Morita
!! tm@netcom.com
/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and
ELF local label prefixes by J"orn Rennecke
amylaar@cygnus.com */
!
! __ashrsi3
!
! Entry:
!
! r4: Value to shift
! r5: Shifts
!
! Exit:
!
! r0: Result
!
! Destroys:
!
! (none)
!
! __ashrsi3_r0
!
! Entry:
!
! r4: Value to shift
! r0: Shifts
!
! Exit:
!
! r0: Result
!
! Destroys:
!
! (none)
.global __ashrsi3
.global __ashrsi3_r0
.align 2
__ashrsi3:
mov r5,r0
.align 2
__ashrsi3_r0:
and #31,r0
mov.l r4,@-r15
mov r0,r4
mova ashrsi3_table,r0
mov.b @(r0,r4),r4
add r4,r0
jmp @r0
mov.l @r15+,r0
.align 2
ashrsi3_table:
.byte ashrsi3_0-ashrsi3_table
.byte ashrsi3_1-ashrsi3_table
.byte ashrsi3_2-ashrsi3_table
.byte ashrsi3_3-ashrsi3_table
.byte ashrsi3_4-ashrsi3_table
.byte ashrsi3_5-ashrsi3_table
.byte ashrsi3_6-ashrsi3_table
.byte ashrsi3_7-ashrsi3_table
.byte ashrsi3_8-ashrsi3_table
.byte ashrsi3_9-ashrsi3_table
.byte ashrsi3_10-ashrsi3_table
.byte ashrsi3_11-ashrsi3_table
.byte ashrsi3_12-ashrsi3_table
.byte ashrsi3_13-ashrsi3_table
.byte ashrsi3_14-ashrsi3_table
.byte ashrsi3_15-ashrsi3_table
.byte ashrsi3_16-ashrsi3_table
.byte ashrsi3_17-ashrsi3_table
.byte ashrsi3_18-ashrsi3_table
.byte ashrsi3_19-ashrsi3_table
.byte ashrsi3_20-ashrsi3_table
.byte ashrsi3_21-ashrsi3_table
.byte ashrsi3_22-ashrsi3_table
.byte ashrsi3_23-ashrsi3_table
.byte ashrsi3_24-ashrsi3_table
.byte ashrsi3_25-ashrsi3_table
.byte ashrsi3_26-ashrsi3_table
.byte ashrsi3_27-ashrsi3_table
.byte ashrsi3_28-ashrsi3_table
.byte ashrsi3_29-ashrsi3_table
.byte ashrsi3_30-ashrsi3_table
.byte ashrsi3_31-ashrsi3_table
ashrsi3_31:
rotcl r0
rts
subc r0,r0
ashrsi3_30:
shar r0
ashrsi3_29:
shar r0
ashrsi3_28:
shar r0
ashrsi3_27:
shar r0
ashrsi3_26:
shar r0
ashrsi3_25:
shar r0
ashrsi3_24:
shlr16 r0
shlr8 r0
rts
exts.b r0,r0
ashrsi3_23:
shar r0
ashrsi3_22:
shar r0
ashrsi3_21:
shar r0
ashrsi3_20:
shar r0
ashrsi3_19:
shar r0
ashrsi3_18:
shar r0
ashrsi3_17:
shar r0
ashrsi3_16:
shlr16 r0
rts
exts.w r0,r0
ashrsi3_15:
shar r0
ashrsi3_14:
shar r0
ashrsi3_13:
shar r0
ashrsi3_12:
shar r0
ashrsi3_11:
shar r0
ashrsi3_10:
shar r0
ashrsi3_9:
shar r0
ashrsi3_8:
shar r0
ashrsi3_7:
shar r0
ashrsi3_6:
shar r0
ashrsi3_5:
shar r0
ashrsi3_4:
shar r0
ashrsi3_3:
shar r0
ashrsi3_2:
shar r0
ashrsi3_1:
rts
shar r0
ashrsi3_0:
rts
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,969
|
arch/sh/lib/ashlsi3.S
|
/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
!! libgcc routines for the Renesas / SuperH SH CPUs.
!! Contributed by Steve Chamberlain.
!! sac@cygnus.com
!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines
!! recoded in assembly by Toshiyasu Morita
!! tm@netcom.com
/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and
ELF local label prefixes by J"orn Rennecke
amylaar@cygnus.com */
!
! __ashlsi3
!
! Entry:
!
! r4: Value to shift
! r5: Shifts
!
! Exit:
!
! r0: Result
!
! Destroys:
!
! (none)
!
! __ashlsi3_r0
!
! Entry:
!
! r4: Value to shift
! r0: Shifts
!
! Exit:
!
! r0: Result
!
! Destroys:
!
! (none)
.global __ashlsi3
.global __ashlsi3_r0
.align 2
__ashlsi3:
mov r5,r0
.align 2
__ashlsi3_r0:
and #31,r0
mov.l r4,@-r15
mov r0,r4
mova ashlsi3_table,r0
mov.b @(r0,r4),r4
add r4,r0
jmp @r0
mov.l @r15+,r0
.align 2
ashlsi3_table:
.byte ashlsi3_0-ashlsi3_table
.byte ashlsi3_1-ashlsi3_table
.byte ashlsi3_2-ashlsi3_table
.byte ashlsi3_3-ashlsi3_table
.byte ashlsi3_4-ashlsi3_table
.byte ashlsi3_5-ashlsi3_table
.byte ashlsi3_6-ashlsi3_table
.byte ashlsi3_7-ashlsi3_table
.byte ashlsi3_8-ashlsi3_table
.byte ashlsi3_9-ashlsi3_table
.byte ashlsi3_10-ashlsi3_table
.byte ashlsi3_11-ashlsi3_table
.byte ashlsi3_12-ashlsi3_table
.byte ashlsi3_13-ashlsi3_table
.byte ashlsi3_14-ashlsi3_table
.byte ashlsi3_15-ashlsi3_table
.byte ashlsi3_16-ashlsi3_table
.byte ashlsi3_17-ashlsi3_table
.byte ashlsi3_18-ashlsi3_table
.byte ashlsi3_19-ashlsi3_table
.byte ashlsi3_20-ashlsi3_table
.byte ashlsi3_21-ashlsi3_table
.byte ashlsi3_22-ashlsi3_table
.byte ashlsi3_23-ashlsi3_table
.byte ashlsi3_24-ashlsi3_table
.byte ashlsi3_25-ashlsi3_table
.byte ashlsi3_26-ashlsi3_table
.byte ashlsi3_27-ashlsi3_table
.byte ashlsi3_28-ashlsi3_table
.byte ashlsi3_29-ashlsi3_table
.byte ashlsi3_30-ashlsi3_table
.byte ashlsi3_31-ashlsi3_table
ashlsi3_6:
shll2 r0
ashlsi3_4:
shll2 r0
ashlsi3_2:
rts
shll2 r0
ashlsi3_7:
shll2 r0
ashlsi3_5:
shll2 r0
ashlsi3_3:
shll2 r0
ashlsi3_1:
rts
shll r0
ashlsi3_14:
shll2 r0
ashlsi3_12:
shll2 r0
ashlsi3_10:
shll2 r0
ashlsi3_8:
rts
shll8 r0
ashlsi3_15:
shll2 r0
ashlsi3_13:
shll2 r0
ashlsi3_11:
shll2 r0
ashlsi3_9:
shll8 r0
rts
shll r0
ashlsi3_22:
shll2 r0
ashlsi3_20:
shll2 r0
ashlsi3_18:
shll2 r0
ashlsi3_16:
rts
shll16 r0
ashlsi3_23:
shll2 r0
ashlsi3_21:
shll2 r0
ashlsi3_19:
shll2 r0
ashlsi3_17:
shll16 r0
rts
shll r0
ashlsi3_30:
shll2 r0
ashlsi3_28:
shll2 r0
ashlsi3_26:
shll2 r0
ashlsi3_24:
shll16 r0
rts
shll8 r0
ashlsi3_31:
shll2 r0
ashlsi3_29:
shll2 r0
ashlsi3_27:
shll2 r0
ashlsi3_25:
shll16 r0
shll8 r0
rts
shll r0
ashlsi3_0:
rts
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,689
|
arch/sh/lib/memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: memcpy.S,v 1.3 2001/07/27 11:50:52 gniibe Exp $
*
* "memcpy" implementation of SuperH
*
* Copyright (C) 1999 Niibe Yutaka
*
*/
/*
* void *memcpy(void *dst, const void *src, size_t n);
* No overlap between the memory of DST and of SRC are assumed.
*/
#include <linux/linkage.h>
ENTRY(memcpy)
tst r6,r6
bt/s 9f ! if n=0, do nothing
mov r4,r0
sub r4,r5 ! From here, r5 has the distance to r0
add r6,r0 ! From here, r0 points the end of copying point
mov #12,r1
cmp/gt r6,r1
bt/s 7f ! if it's too small, copy a byte at once
add #-1,r5
add #1,r5
! From here, r6 is free
!
! r4 --> [ ... ] DST [ ... ] SRC
! [ ... ] [ ... ]
! : :
! r0 --> [ ... ] r0+r5 --> [ ... ]
!
!
mov r5,r1
mov #3,r2
and r2,r1
shll2 r1
mov r0,r3 ! Save the value on R0 to R3
mova jmptable,r0
add r1,r0
mov.l @r0,r1
jmp @r1
mov r3,r0 ! and back to R0
.balign 4
jmptable:
.long case0
.long case1
.long case2
.long case3
! copy a byte at once
7: mov r4,r2
add #1,r2
8:
cmp/hi r2,r0
mov.b @(r0,r5),r1
bt/s 8b ! while (r0>r2)
mov.b r1,@-r0
9:
rts
nop
case0:
!
! GHIJ KLMN OPQR --> GHIJ KLMN OPQR
!
! First, align to long word boundary
mov r0,r3
and r2,r3
tst r3,r3
bt/s 2f
add #-4,r5
add #3,r5
1: dt r3
mov.b @(r0,r5),r1
bf/s 1b
mov.b r1,@-r0
!
add #-3,r5
2: ! Second, copy a long word at once
mov r4,r2
add #7,r2
3: mov.l @(r0,r5),r1
cmp/hi r2,r0
bt/s 3b
mov.l r1,@-r0
!
! Third, copy a byte at once, if necessary
cmp/eq r4,r0
bt/s 9b
add #3,r5
bra 8b
add #-6,r2
case1:
!
! GHIJ KLMN OPQR --> ...G HIJK LMNO PQR.
!
! First, align to long word boundary
mov r0,r3
and r2,r3
tst r3,r3
bt/s 2f
add #-1,r5
1: dt r3
mov.b @(r0,r5),r1
bf/s 1b
mov.b r1,@-r0
!
2: ! Second, read a long word and write a long word at once
mov.l @(r0,r5),r1
add #-4,r5
mov r4,r2
add #7,r2
!
#ifdef __LITTLE_ENDIAN__
3: mov r1,r3 ! RQPO
shll16 r3
shll8 r3 ! Oxxx
mov.l @(r0,r5),r1 ! NMLK
mov r1,r6
shlr8 r6 ! xNML
or r6,r3 ! ONML
cmp/hi r2,r0
bt/s 3b
mov.l r3,@-r0
#else
3: mov r1,r3 ! OPQR
shlr16 r3
shlr8 r3 ! xxxO
mov.l @(r0,r5),r1 ! KLMN
mov r1,r6
shll8 r6 ! LMNx
or r6,r3 ! LMNO
cmp/hi r2,r0
bt/s 3b
mov.l r3,@-r0
#endif
!
! Third, copy a byte at once, if necessary
cmp/eq r4,r0
bt/s 9b
add #4,r5
bra 8b
add #-6,r2
case2:
!
! GHIJ KLMN OPQR --> ..GH IJKL MNOP QR..
!
! First, align to word boundary
tst #1,r0
bt/s 2f
add #-1,r5
mov.b @(r0,r5),r1
mov.b r1,@-r0
!
2: ! Second, read a word and write a word at once
add #-1,r5
mov r4,r2
add #3,r2
!
3: mov.w @(r0,r5),r1
cmp/hi r2,r0
bt/s 3b
mov.w r1,@-r0
!
! Third, copy a byte at once, if necessary
cmp/eq r4,r0
bt/s 9b
add #1,r5
mov.b @(r0,r5),r1
rts
mov.b r1,@-r0
case3:
!
! GHIJ KLMN OPQR --> .GHI JKLM NOPQ R...
!
! First, align to long word boundary
mov r0,r3
and r2,r3
tst r3,r3
bt/s 2f
add #-1,r5
1: dt r3
mov.b @(r0,r5),r1
bf/s 1b
mov.b r1,@-r0
!
2: ! Second, read a long word and write a long word at once
add #-2,r5
mov.l @(r0,r5),r1
add #-4,r5
mov r4,r2
add #7,r2
!
#ifdef __LITTLE_ENDIAN__
3: mov r1,r3 ! RQPO
shll8 r3 ! QPOx
mov.l @(r0,r5),r1 ! NMLK
mov r1,r6
shlr16 r6
shlr8 r6 ! xxxN
or r6,r3 ! QPON
cmp/hi r2,r0
bt/s 3b
mov.l r3,@-r0
#else
3: mov r1,r3 ! OPQR
shlr8 r3 ! xOPQ
mov.l @(r0,r5),r1 ! KLMN
mov r1,r6
shll16 r6
shll8 r6 ! Nxxx
or r6,r3 ! NOPQ
cmp/hi r2,r0
bt/s 3b
mov.l r3,@-r0
#endif
!
! Third, copy a byte at once, if necessary
cmp/eq r4,r0
bt/s 9b
add #6,r5
bra 8b
add #-6,r2
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,117
|
arch/sh/lib/udivsi3.S
|
/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
!! libgcc routines for the Renesas / SuperH SH CPUs.
!! Contributed by Steve Chamberlain.
!! sac@cygnus.com
.balign 4
.global __udivsi3
.type __udivsi3, @function
div8:
div1 r5,r4
div7:
div1 r5,r4; div1 r5,r4; div1 r5,r4
div1 r5,r4; div1 r5,r4; div1 r5,r4; rts; div1 r5,r4
divx4:
div1 r5,r4; rotcl r0
div1 r5,r4; rotcl r0
div1 r5,r4; rotcl r0
rts; div1 r5,r4
__udivsi3:
sts.l pr,@-r15
extu.w r5,r0
cmp/eq r5,r0
bf/s large_divisor
div0u
swap.w r4,r0
shlr16 r4
bsr div8
shll16 r5
bsr div7
div1 r5,r4
xtrct r4,r0
xtrct r0,r4
bsr div8
swap.w r4,r4
bsr div7
div1 r5,r4
lds.l @r15+,pr
xtrct r4,r0
swap.w r0,r0
rotcl r0
rts
shlr16 r5
large_divisor:
mov #0,r0
xtrct r4,r0
xtrct r0,r4
bsr divx4
rotcl r0
bsr divx4
rotcl r0
bsr divx4
rotcl r0
bsr divx4
rotcl r0
lds.l @r15+,pr
rts
rotcl r0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,514
|
arch/sh/lib/checksum.S
|
/* $Id: checksum.S,v 1.10 2001/07/06 13:11:32 gniibe Exp $
*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IP/TCP/UDP checksumming routines
*
* Authors: Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Tom May, <ftom@netcom.com>
* Pentium Pro/II routines:
* Alexander Kjeldaas <astor@guardian.no>
* Finn Arne Gangstad <finnag@guardian.no>
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*
* Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
* handling.
* Andi Kleen, add zeroing on error
* converted to pure assembler
*
* SuperH version: Copyright (C) 1999 Niibe Yutaka
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/errno.h>
#include <linux/linkage.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
/*
* asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum);
*/
.text
ENTRY(csum_partial)
/*
* Experiments with Ethernet and SLIP connections show that buff
* is aligned on either a 2-byte or 4-byte boundary. We get at
* least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
* Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop.
*/
mov r4, r0
tst #3, r0 ! Check alignment.
bt/s 2f ! Jump if alignment is ok.
mov r4, r7 ! Keep a copy to check for alignment
!
tst #1, r0 ! Check alignment.
bt 21f ! Jump if alignment is boundary of 2bytes.
! buf is odd
tst r5, r5
add #-1, r5
bt 9f
mov.b @r4+, r0
extu.b r0, r0
addc r0, r6 ! t=0 from previous tst
mov r6, r0
shll8 r6
shlr16 r0
shlr8 r0
or r0, r6
mov r4, r0
tst #2, r0
bt 2f
21:
! buf is 2 byte aligned (len could be 0)
add #-2, r5 ! Alignment uses up two bytes.
cmp/pz r5 !
bt/s 1f ! Jump if we had at least two bytes.
clrt
bra 6f
add #2, r5 ! r5 was < 2. Deal with it.
1:
mov.w @r4+, r0
extu.w r0, r0
addc r0, r6
bf 2f
add #1, r6
2:
! buf is 4 byte aligned (len could be 0)
mov r5, r1
mov #-5, r0
shld r0, r1
tst r1, r1
bt/s 4f ! if it's =0, go to 4f
clrt
.align 2
3:
mov.l @r4+, r0
mov.l @r4+, r2
mov.l @r4+, r3
addc r0, r6
mov.l @r4+, r0
addc r2, r6
mov.l @r4+, r2
addc r3, r6
mov.l @r4+, r3
addc r0, r6
mov.l @r4+, r0
addc r2, r6
mov.l @r4+, r2
addc r3, r6
addc r0, r6
addc r2, r6
movt r0
dt r1
bf/s 3b
cmp/eq #1, r0
! here, we know r1==0
addc r1, r6 ! add carry to r6
4:
mov r5, r0
and #0x1c, r0
tst r0, r0
bt 6f
! 4 bytes or more remaining
mov r0, r1
shlr2 r1
mov #0, r2
5:
addc r2, r6
mov.l @r4+, r2
movt r0
dt r1
bf/s 5b
cmp/eq #1, r0
addc r2, r6
addc r1, r6 ! r1==0 here, so it means add carry-bit
6:
! 3 bytes or less remaining
mov #3, r0
and r0, r5
tst r5, r5
bt 9f ! if it's =0 go to 9f
mov #2, r1
cmp/hs r1, r5
bf 7f
mov.w @r4+, r0
extu.w r0, r0
cmp/eq r1, r5
bt/s 8f
clrt
shll16 r0
addc r0, r6
7:
mov.b @r4+, r0
extu.b r0, r0
#ifndef __LITTLE_ENDIAN__
shll8 r0
#endif
8:
addc r0, r6
mov #0, r0
addc r0, r6
9:
! Check if the buffer was misaligned, if so realign sum
mov r7, r0
tst #1, r0
bt 10f
mov r6, r0
shll8 r6
shlr16 r0
shlr8 r0
or r0, r6
10:
rts
mov r6, r0
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
int sum, int *src_err_ptr, int *dst_err_ptr)
*/
/*
* Copy from ds while checksumming, otherwise like csum_partial
*
* The macros SRC and DST specify the type of access for the instruction.
* thus we can call a custom exception handler for all access types.
*
* FIXME: could someone double-check whether I haven't mixed up some SRC and
* DST definitions? It's damn hard to trigger all cases. I hope I got
* them all but there's no guarantee.
*/
#define SRC(...) \
9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6001f ; \
.previous
#define DST(...) \
9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6002f ; \
.previous
!
! r4: const char *SRC
! r5: char *DST
! r6: int LEN
! r7: int SUM
!
! on stack:
! int *SRC_ERR_PTR
! int *DST_ERR_PTR
!
ENTRY(csum_partial_copy_generic)
mov.l r5,@-r15
mov.l r6,@-r15
mov #3,r0 ! Check src and dest are equally aligned
mov r4,r1
and r0,r1
and r5,r0
cmp/eq r1,r0
bf 3f ! Different alignments, use slow version
tst #1,r0 ! Check dest word aligned
bf 3f ! If not, do it the slow way
mov #2,r0
tst r0,r5 ! Check dest alignment.
bt 2f ! Jump if alignment is ok.
add #-2,r6 ! Alignment uses up two bytes.
cmp/pz r6 ! Jump if we had at least two bytes.
bt/s 1f
clrt
add #2,r6 ! r6 was < 2. Deal with it.
bra 4f
mov r6,r2
3: ! Handle different src and dest alignments.
! This is not common, so simple byte by byte copy will do.
mov r6,r2
shlr r6
tst r6,r6
bt 4f
clrt
.align 2
5:
SRC( mov.b @r4+,r1 )
SRC( mov.b @r4+,r0 )
extu.b r1,r1
DST( mov.b r1,@r5 )
DST( mov.b r0,@(1,r5) )
extu.b r0,r0
add #2,r5
#ifdef __LITTLE_ENDIAN__
shll8 r0
#else
shll8 r1
#endif
or r1,r0
addc r0,r7
movt r0
dt r6
bf/s 5b
cmp/eq #1,r0
mov #0,r0
addc r0, r7
mov r2, r0
tst #1, r0
bt 7f
bra 5f
clrt
! src and dest equally aligned, but to a two byte boundary.
! Handle first two bytes as a special case
.align 2
1:
SRC( mov.w @r4+,r0 )
DST( mov.w r0,@r5 )
add #2,r5
extu.w r0,r0
addc r0,r7
mov #0,r0
addc r0,r7
2:
mov r6,r2
mov #-5,r0
shld r0,r6
tst r6,r6
bt/s 2f
clrt
.align 2
1:
SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@r5 )
DST( mov.l r1,@(4,r5) )
addc r1,r7
SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@(8,r5) )
DST( mov.l r1,@(12,r5) )
addc r1,r7
SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@(16,r5) )
DST( mov.l r1,@(20,r5) )
addc r1,r7
SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@(24,r5) )
DST( mov.l r1,@(28,r5) )
addc r1,r7
add #32,r5
movt r0
dt r6
bf/s 1b
cmp/eq #1,r0
mov #0,r0
addc r0,r7
2: mov r2,r6
mov #0x1c,r0
and r0,r6
cmp/pl r6
bf/s 4f
clrt
shlr2 r6
3:
SRC( mov.l @r4+,r0 )
addc r0,r7
DST( mov.l r0,@r5 )
add #4,r5
movt r0
dt r6
bf/s 3b
cmp/eq #1,r0
mov #0,r0
addc r0,r7
4: mov r2,r6
mov #3,r0
and r0,r6
cmp/pl r6
bf 7f
mov #2,r1
cmp/hs r1,r6
bf 5f
SRC( mov.w @r4+,r0 )
DST( mov.w r0,@r5 )
extu.w r0,r0
add #2,r5
cmp/eq r1,r6
bt/s 6f
clrt
shll16 r0
addc r0,r7
5:
SRC( mov.b @r4+,r0 )
DST( mov.b r0,@r5 )
extu.b r0,r0
#ifndef __LITTLE_ENDIAN__
shll8 r0
#endif
6: addc r0,r7
mov #0,r0
addc r0,r7
7:
5000:
# Exception handler:
.section .fixup, "ax"
6001:
mov.l @(8,r15),r0 ! src_err_ptr
mov #-EFAULT,r1
mov.l r1,@r0
! zero the complete destination - computing the rest
! is too much work
mov.l @(4,r15),r5 ! dst
mov.l @r15,r6 ! len
mov #0,r7
1: mov.b r7,@r5
dt r6
bf/s 1b
add #1,r5
mov.l 8000f,r0
jmp @r0
nop
.align 2
8000: .long 5000b
6002:
mov.l @(12,r15),r0 ! dst_err_ptr
mov #-EFAULT,r1
mov.l r1,@r0
mov.l 8001f,r0
jmp @r0
nop
.align 2
8001: .long 5000b
.previous
add #8,r15
rts
mov r7,r0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,969
|
arch/sh/lib/lshrsi3.S
|
/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
!! libgcc routines for the Renesas / SuperH SH CPUs.
!! Contributed by Steve Chamberlain.
!! sac@cygnus.com
!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines
!! recoded in assembly by Toshiyasu Morita
!! tm@netcom.com
/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and
ELF local label prefixes by J"orn Rennecke
amylaar@cygnus.com */
!
! __lshrsi3
!
! Entry:
!
! r4: Value to shift
! r5: Shifts
!
! Exit:
!
! r0: Result
!
! Destroys:
!
! (none)
!
! __lshrsi3_r0
!
! Entry:
!
! r0: Value to shift
! r5: Shifts
!
! Exit:
!
! r0: Result
!
! Destroys:
!
! (none)
!
.global __lshrsi3
.global __lshrsi3_r0
.align 2
__lshrsi3:
mov r5,r0
.align 2
__lshrsi3_r0:
and #31,r0
mov.l r4,@-r15
mov r0,r4
mova lshrsi3_table,r0
mov.b @(r0,r4),r4
add r4,r0
jmp @r0
mov.l @r15+,r0
.align 2
lshrsi3_table:
.byte lshrsi3_0-lshrsi3_table
.byte lshrsi3_1-lshrsi3_table
.byte lshrsi3_2-lshrsi3_table
.byte lshrsi3_3-lshrsi3_table
.byte lshrsi3_4-lshrsi3_table
.byte lshrsi3_5-lshrsi3_table
.byte lshrsi3_6-lshrsi3_table
.byte lshrsi3_7-lshrsi3_table
.byte lshrsi3_8-lshrsi3_table
.byte lshrsi3_9-lshrsi3_table
.byte lshrsi3_10-lshrsi3_table
.byte lshrsi3_11-lshrsi3_table
.byte lshrsi3_12-lshrsi3_table
.byte lshrsi3_13-lshrsi3_table
.byte lshrsi3_14-lshrsi3_table
.byte lshrsi3_15-lshrsi3_table
.byte lshrsi3_16-lshrsi3_table
.byte lshrsi3_17-lshrsi3_table
.byte lshrsi3_18-lshrsi3_table
.byte lshrsi3_19-lshrsi3_table
.byte lshrsi3_20-lshrsi3_table
.byte lshrsi3_21-lshrsi3_table
.byte lshrsi3_22-lshrsi3_table
.byte lshrsi3_23-lshrsi3_table
.byte lshrsi3_24-lshrsi3_table
.byte lshrsi3_25-lshrsi3_table
.byte lshrsi3_26-lshrsi3_table
.byte lshrsi3_27-lshrsi3_table
.byte lshrsi3_28-lshrsi3_table
.byte lshrsi3_29-lshrsi3_table
.byte lshrsi3_30-lshrsi3_table
.byte lshrsi3_31-lshrsi3_table
lshrsi3_6:
shlr2 r0
lshrsi3_4:
shlr2 r0
lshrsi3_2:
rts
shlr2 r0
lshrsi3_7:
shlr2 r0
lshrsi3_5:
shlr2 r0
lshrsi3_3:
shlr2 r0
lshrsi3_1:
rts
shlr r0
lshrsi3_14:
shlr2 r0
lshrsi3_12:
shlr2 r0
lshrsi3_10:
shlr2 r0
lshrsi3_8:
rts
shlr8 r0
lshrsi3_15:
shlr2 r0
lshrsi3_13:
shlr2 r0
lshrsi3_11:
shlr2 r0
lshrsi3_9:
shlr8 r0
rts
shlr r0
lshrsi3_22:
shlr2 r0
lshrsi3_20:
shlr2 r0
lshrsi3_18:
shlr2 r0
lshrsi3_16:
rts
shlr16 r0
lshrsi3_23:
shlr2 r0
lshrsi3_21:
shlr2 r0
lshrsi3_19:
shlr2 r0
lshrsi3_17:
shlr16 r0
rts
shlr r0
lshrsi3_30:
shlr2 r0
lshrsi3_28:
shlr2 r0
lshrsi3_26:
shlr2 r0
lshrsi3_24:
shlr16 r0
rts
shlr8 r0
lshrsi3_31:
shlr2 r0
lshrsi3_29:
shlr2 r0
lshrsi3_27:
shlr2 r0
lshrsi3_25:
shlr16 r0
shlr8 r0
rts
shlr r0
lshrsi3_0:
rts
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,213
|
arch/sh/lib/udiv_qrnnd.S
|
/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
!! libgcc routines for the Renesas / SuperH SH CPUs.
!! Contributed by Steve Chamberlain.
!! sac@cygnus.com
!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines
!! recoded in assembly by Toshiyasu Morita
!! tm@netcom.com
/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and
ELF local label prefixes by J"orn Rennecke
amylaar@cygnus.com */
/* r0: rn r1: qn */ /* r0: n1 r4: n0 r5: d r6: d1 */ /* r2: __m */
/* n1 < d, but n1 might be larger than d1. */
.global __udiv_qrnnd_16
.balign 8
__udiv_qrnnd_16:
div0u
cmp/hi r6,r0
bt .Lots
.rept 16
div1 r6,r0
.endr
extu.w r0,r1
bt 0f
add r6,r0
0: rotcl r1
mulu.w r1,r5
xtrct r4,r0
swap.w r0,r0
sts macl,r2
cmp/hs r2,r0
sub r2,r0
bt 0f
addc r5,r0
add #-1,r1
bt 0f
1: add #-1,r1
rts
add r5,r0
.balign 8
.Lots:
sub r5,r0
swap.w r4,r1
xtrct r0,r1
clrt
mov r1,r0
addc r5,r0
mov #-1,r1
bf/s 1b
shlr16 r1
0: rts
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,048
|
arch/sh/lib/memmove.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: memmove.S,v 1.2 2001/07/27 11:51:09 gniibe Exp $
*
* "memmove" implementation of SuperH
*
* Copyright (C) 1999 Niibe Yutaka
*
*/
/*
* void *memmove(void *dst, const void *src, size_t n);
* The memory areas may overlap.
*/
#include <linux/linkage.h>
ENTRY(memmove)
! if dest > src, call memcpy (it copies in decreasing order)
cmp/hi r5,r4
bf 1f
mov.l 2f,r0
jmp @r0
nop
.balign 4
2: .long memcpy
1:
sub r5,r4 ! From here, r4 has the distance to r0
tst r6,r6
bt/s 9f ! if n=0, do nothing
mov r5,r0
add r6,r5
mov #12,r1
cmp/gt r6,r1
bt/s 8f ! if it's too small, copy a byte at once
add #-1,r4
add #1,r4
!
! [ ... ] DST [ ... ] SRC
! [ ... ] [ ... ]
! : :
! r0+r4--> [ ... ] r0 --> [ ... ]
! : :
! [ ... ] [ ... ]
! r5 -->
!
mov r4,r1
mov #3,r2
and r2,r1
shll2 r1
mov r0,r3 ! Save the value on R0 to R3
mova jmptable,r0
add r1,r0
mov.l @r0,r1
jmp @r1
mov r3,r0 ! and back to R0
.balign 4
jmptable:
.long case0
.long case1
.long case2
.long case3
! copy a byte at once
8: mov.b @r0+,r1
cmp/hs r5,r0
bf/s 8b ! while (r0<r5)
mov.b r1,@(r0,r4)
add #1,r4
9:
add r4,r0
rts
sub r6,r0
case_none:
bra 8b
add #-1,r4
case0:
!
! GHIJ KLMN OPQR --> GHIJ KLMN OPQR
!
! First, align to long word boundary
mov r0,r3
and r2,r3
tst r3,r3
bt/s 2f
add #-1,r4
mov #4,r2
sub r3,r2
1: dt r2
mov.b @r0+,r1
bf/s 1b
mov.b r1,@(r0,r4)
!
2: ! Second, copy a long word at once
add #-3,r4
add #-3,r5
3: mov.l @r0+,r1
cmp/hs r5,r0
bf/s 3b
mov.l r1,@(r0,r4)
add #3,r5
!
! Third, copy a byte at once, if necessary
cmp/eq r5,r0
bt/s 9b
add #4,r4
bra 8b
add #-1,r4
case3:
!
! GHIJ KLMN OPQR --> ...G HIJK LMNO PQR.
!
! First, align to long word boundary
mov r0,r3
and r2,r3
tst r3,r3
bt/s 2f
add #-1,r4
mov #4,r2
sub r3,r2
1: dt r2
mov.b @r0+,r1
bf/s 1b
mov.b r1,@(r0,r4)
!
2: ! Second, read a long word and write a long word at once
add #-2,r4
mov.l @(r0,r4),r1
add #-7,r5
add #-4,r4
!
#ifdef __LITTLE_ENDIAN__
shll8 r1
3: mov r1,r3 ! JIHG
shlr8 r3 ! xJIH
mov.l @r0+,r1 ! NMLK
mov r1,r2
shll16 r2
shll8 r2 ! Kxxx
or r2,r3 ! KJIH
cmp/hs r5,r0
bf/s 3b
mov.l r3,@(r0,r4)
#else
shlr8 r1
3: mov r1,r3 ! GHIJ
shll8 r3 ! HIJx
mov.l @r0+,r1 ! KLMN
mov r1,r2
shlr16 r2
shlr8 r2 ! xxxK
or r2,r3 ! HIJK
cmp/hs r5,r0
bf/s 3b
mov.l r3,@(r0,r4)
#endif
add #7,r5
!
! Third, copy a byte at once, if necessary
cmp/eq r5,r0
bt/s 9b
add #7,r4
add #-3,r0
bra 8b
add #-1,r4
case2:
!
! GHIJ KLMN OPQR --> ..GH IJKL MNOP QR..
!
! First, align to word boundary
tst #1,r0
bt/s 2f
add #-1,r4
mov.b @r0+,r1
mov.b r1,@(r0,r4)
!
2: ! Second, read a word and write a word at once
add #-1,r4
add #-1,r5
!
3: mov.w @r0+,r1
cmp/hs r5,r0
bf/s 3b
mov.w r1,@(r0,r4)
add #1,r5
!
! Third, copy a byte at once, if necessary
cmp/eq r5,r0
bt/s 9b
add #2,r4
mov.b @r0,r1
mov.b r1,@(r0,r4)
bra 9b
add #1,r0
case1:
!
! GHIJ KLMN OPQR --> .GHI JKLM NOPQ R...
!
! First, align to long word boundary
mov r0,r3
and r2,r3
tst r3,r3
bt/s 2f
add #-1,r4
mov #4,r2
sub r3,r2
1: dt r2
mov.b @r0+,r1
bf/s 1b
mov.b r1,@(r0,r4)
!
2: ! Second, read a long word and write a long word at once
mov.l @(r0,r4),r1
add #-7,r5
add #-4,r4
!
#ifdef __LITTLE_ENDIAN__
shll16 r1
shll8 r1
3: mov r1,r3 ! JIHG
shlr16 r3
shlr8 r3 ! xxxJ
mov.l @r0+,r1 ! NMLK
mov r1,r2
shll8 r2 ! MLKx
or r2,r3 ! MLKJ
cmp/hs r5,r0
bf/s 3b
mov.l r3,@(r0,r4)
#else
shlr16 r1
shlr8 r1
3: mov r1,r3 ! GHIJ
shll16 r3
shll8 r3 ! Jxxx
mov.l @r0+,r1 ! KLMN
mov r1,r2
shlr8 r2 ! xKLM
or r2,r3 ! JKLM
cmp/hs r5,r0
bf/s 3b ! while(r0<r5)
mov.l r3,@(r0,r4)
#endif
add #7,r5
!
! Third, copy a byte at once, if necessary
cmp/eq r5,r0
bt/s 9b
add #5,r4
add #-3,r0
bra 8b
add #-1,r4
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,414
|
arch/sh/lib/udivsi3_i4i-Os.S
|
/* Copyright (C) 2006 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
/* Moderately Space-optimized libgcc routines for the Renesas SH /
STMicroelectronics ST40 CPUs.
Contributed by J"orn Rennecke joern.rennecke@st.com. */
/* Size: 186 bytes jointly for udivsi3_i4i and sdivsi3_i4i
sh4-200 run times:
udiv small divisor: 55 cycles
udiv large divisor: 52 cycles
sdiv small divisor, positive result: 59 cycles
sdiv large divisor, positive result: 56 cycles
sdiv small divisor, negative result: 65 cycles (*)
sdiv large divisor, negative result: 62 cycles (*)
(*): r2 is restored in the rts delay slot and has a lingering latency
of two more cycles. */
.balign 4
.global __udivsi3_i4i
.global __udivsi3_i4
.set __udivsi3_i4, __udivsi3_i4i
.type __udivsi3_i4i, @function
.type __sdivsi3_i4i, @function
__udivsi3_i4i:
sts pr,r1
mov.l r4,@-r15
extu.w r5,r0
cmp/eq r5,r0
swap.w r4,r0
shlr16 r4
bf/s large_divisor
div0u
mov.l r5,@-r15
shll16 r5
sdiv_small_divisor:
div1 r5,r4
bsr div6
div1 r5,r4
div1 r5,r4
bsr div6
div1 r5,r4
xtrct r4,r0
xtrct r0,r4
bsr div7
swap.w r4,r4
div1 r5,r4
bsr div7
div1 r5,r4
xtrct r4,r0
mov.l @r15+,r5
swap.w r0,r0
mov.l @r15+,r4
jmp @r1
rotcl r0
div7:
div1 r5,r4
div6:
div1 r5,r4; div1 r5,r4; div1 r5,r4
div1 r5,r4; div1 r5,r4; rts; div1 r5,r4
divx3:
rotcl r0
div1 r5,r4
rotcl r0
div1 r5,r4
rotcl r0
rts
div1 r5,r4
large_divisor:
mov.l r5,@-r15
sdiv_large_divisor:
xor r4,r0
.rept 4
rotcl r0
bsr divx3
div1 r5,r4
.endr
mov.l @r15+,r5
mov.l @r15+,r4
jmp @r1
rotcl r0
.global __sdivsi3_i4i
.global __sdivsi3_i4
.global __sdivsi3
.set __sdivsi3_i4, __sdivsi3_i4i
.set __sdivsi3, __sdivsi3_i4i
__sdivsi3_i4i:
mov.l r4,@-r15
cmp/pz r5
mov.l r5,@-r15
bt/s pos_divisor
cmp/pz r4
neg r5,r5
extu.w r5,r0
bt/s neg_result
cmp/eq r5,r0
neg r4,r4
pos_result:
swap.w r4,r0
bra sdiv_check_divisor
sts pr,r1
pos_divisor:
extu.w r5,r0
bt/s pos_result
cmp/eq r5,r0
neg r4,r4
neg_result:
mova negate_result,r0
;
mov r0,r1
swap.w r4,r0
lds r2,macl
sts pr,r2
sdiv_check_divisor:
shlr16 r4
bf/s sdiv_large_divisor
div0u
bra sdiv_small_divisor
shll16 r5
.balign 4
negate_result:
neg r0,r0
jmp @r2
sts macl,r2
|
AirFortressIlikara/LS2K0300-linux-4.19
| 15,652
|
arch/sh/lib/memcpy-sh4.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* "memcpy" implementation of SuperH
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (c) 2002 STMicroelectronics Ltd
* Modified from memcpy.S and micro-optimised for SH4
* Stuart Menefy (stuart.menefy@st.com)
*
*/
#include <linux/linkage.h>
/*
* void *memcpy(void *dst, const void *src, size_t n);
*
* It is assumed that there is no overlap between src and dst.
* If there is an overlap, then the results are undefined.
*/
!
! GHIJ KLMN OPQR --> ...G HIJK LMNO PQR.
!
! Size is 16 or greater, and may have trailing bytes
.balign 32
.Lcase1:
! Read a long word and write a long word at once
! At the start of each iteration, r7 contains last long load
add #-1,r5 ! 79 EX
mov r4,r2 ! 5 MT (0 cycles latency)
mov.l @(r0,r5),r7 ! 21 LS (2 cycles latency)
add #-4,r5 ! 50 EX
add #7,r2 ! 79 EX
!
#ifdef CONFIG_CPU_LITTLE_ENDIAN
! 6 cycles, 4 bytes per iteration
3: mov.l @(r0,r5),r1 ! 21 LS (latency=2) ! NMLK
mov r7, r3 ! 5 MT (latency=0) ! RQPO
cmp/hi r2,r0 ! 57 MT
shll16 r3 ! 103 EX
mov r1,r6 ! 5 MT (latency=0)
shll8 r3 ! 102 EX ! Oxxx
shlr8 r6 ! 106 EX ! xNML
mov r1, r7 ! 5 MT (latency=0)
or r6,r3 ! 82 EX ! ONML
bt/s 3b ! 109 BR
mov.l r3,@-r0 ! 30 LS
#else
3: mov.l @(r0,r5),r1 ! 21 LS (latency=2) ! KLMN
mov r7,r3 ! 5 MT (latency=0) ! OPQR
cmp/hi r2,r0 ! 57 MT
shlr16 r3 ! 107 EX
shlr8 r3 ! 106 EX ! xxxO
mov r1,r6 ! 5 MT (latency=0)
shll8 r6 ! 102 EX ! LMNx
mov r1,r7 ! 5 MT (latency=0)
or r6,r3 ! 82 EX ! LMNO
bt/s 3b ! 109 BR
mov.l r3,@-r0 ! 30 LS
#endif
! Finally, copy a byte at once, if necessary
add #4,r5 ! 50 EX
cmp/eq r4,r0 ! 54 MT
add #-6,r2 ! 50 EX
bt 9f ! 109 BR
8: cmp/hi r2,r0 ! 57 MT
mov.b @(r0,r5),r1 ! 20 LS (latency=2)
bt/s 8b ! 109 BR
mov.b r1,@-r0 ! 29 LS
9: rts
nop
!
! GHIJ KLMN OPQR --> .GHI JKLM NOPQ R...
!
! Size is 16 or greater, and may have trailing bytes
.balign 32
.Lcase3:
! Read a long word and write a long word at once
! At the start of each iteration, r7 contains last long load
add #-3,r5 ! 79 EX
mov r4,r2 ! 5 MT (0 cycles latency)
mov.l @(r0,r5),r7 ! 21 LS (2 cycles latency)
add #-4,r5 ! 50 EX
add #7,r2 ! 79 EX
!
#ifdef CONFIG_CPU_LITTLE_ENDIAN
! 6 cycles, 4 bytes per iteration
3: mov.l @(r0,r5),r1 ! 21 LS (latency=2) ! NMLK
mov r7, r3 ! 5 MT (latency=0) ! RQPO
cmp/hi r2,r0 ! 57 MT
shll8 r3 ! 102 EX ! QPOx
mov r1,r6 ! 5 MT (latency=0)
shlr16 r6 ! 107 EX
shlr8 r6 ! 106 EX ! xxxN
mov r1, r7 ! 5 MT (latency=0)
or r6,r3 ! 82 EX ! QPON
bt/s 3b ! 109 BR
mov.l r3,@-r0 ! 30 LS
#else
3: mov r7,r3 ! OPQR
shlr8 r3 ! xOPQ
mov.l @(r0,r5),r7 ! KLMN
mov r7,r6
shll16 r6
shll8 r6 ! Nxxx
or r6,r3 ! NOPQ
cmp/hi r2,r0
bt/s 3b
mov.l r3,@-r0
#endif
! Finally, copy a byte at once, if necessary
add #6,r5 ! 50 EX
cmp/eq r4,r0 ! 54 MT
add #-6,r2 ! 50 EX
bt 9f ! 109 BR
8: cmp/hi r2,r0 ! 57 MT
mov.b @(r0,r5),r1 ! 20 LS (latency=2)
bt/s 8b ! 109 BR
mov.b r1,@-r0 ! 29 LS
9: rts
nop
ENTRY(memcpy)
! Calculate the invariants which will be used in the remainder
! of the code:
!
! r4 --> [ ... ] DST [ ... ] SRC
! [ ... ] [ ... ]
! : :
! r0 --> [ ... ] r0+r5 --> [ ... ]
!
!
! Short circuit the common case of src, dst and len being 32 bit aligned
! and test for zero length move
mov r6, r0 ! 5 MT (0 cycle latency)
or r4, r0 ! 82 EX
or r5, r0 ! 82 EX
tst r6, r6 ! 86 MT
bt/s 99f ! 111 BR (zero len)
tst #3, r0 ! 87 MT
mov r4, r0 ! 5 MT (0 cycle latency)
add r6, r0 ! 49 EX
mov #16, r1 ! 6 EX
bt/s .Lcase00 ! 111 BR (aligned)
sub r4, r5 ! 75 EX
! Arguments are not nicely long word aligned or zero len.
! Check for small copies, and if so do a simple byte at a time copy.
!
! Deciding on an exact value of 'small' is not easy, as the point at which
! using the optimised routines become worthwhile varies (these are the
! cycle counts for differnet sizes using byte-at-a-time vs. optimised):
! size byte-at-time long word byte
! 16 42 39-40 46-50 50-55
! 24 58 43-44 54-58 62-67
! 36 82 49-50 66-70 80-85
! However the penalty for getting it 'wrong' is much higher for long word
! aligned data (and this is more common), so use a value of 16.
cmp/gt r6,r1 ! 56 MT
add #-1,r5 ! 50 EX
bf/s 6f ! 108 BR (not small)
mov r5, r3 ! 5 MT (latency=0)
shlr r6 ! 104 EX
mov.b @(r0,r5),r1 ! 20 LS (latency=2)
bf/s 4f ! 111 BR
add #-1,r3 ! 50 EX
tst r6, r6 ! 86 MT
bt/s 98f ! 110 BR
mov.b r1,@-r0 ! 29 LS
! 4 cycles, 2 bytes per iteration
3: mov.b @(r0,r5),r1 ! 20 LS (latency=2)
4: mov.b @(r0,r3),r2 ! 20 LS (latency=2)
dt r6 ! 67 EX
mov.b r1,@-r0 ! 29 LS
bf/s 3b ! 111 BR
mov.b r2,@-r0 ! 29 LS
98:
rts
nop
99: rts
mov r4, r0
! Size is not small, so its worthwhile looking for optimisations.
! First align destination to a long word boundary.
!
! r5 = normal value -1
6: tst #3, r0 ! 87 MT
mov #3, r3 ! 6 EX
bt/s 2f ! 111 BR
and r0,r3 ! 78 EX
! 3 cycles, 1 byte per iteration
1: dt r3 ! 67 EX
mov.b @(r0,r5),r1 ! 19 LS (latency=2)
add #-1, r6 ! 79 EX
bf/s 1b ! 109 BR
mov.b r1,@-r0 ! 28 LS
2: add #1, r5 ! 79 EX
! Now select the appropriate bulk transfer code based on relative
! alignment of src and dst.
mov r0, r3 ! 5 MT (latency=0)
mov r5, r0 ! 5 MT (latency=0)
tst #1, r0 ! 87 MT
bf/s 1f ! 111 BR
mov #64, r7 ! 6 EX
! bit 0 clear
cmp/ge r7, r6 ! 55 MT
bt/s 2f ! 111 BR
tst #2, r0 ! 87 MT
! small
bt/s .Lcase0
mov r3, r0
bra .Lcase2
nop
! big
2: bt/s .Lcase0b
mov r3, r0
bra .Lcase2b
nop
! bit 0 set
1: tst #2, r0 ! 87 MT
bt/s .Lcase1
mov r3, r0
bra .Lcase3
nop
!
! GHIJ KLMN OPQR --> GHIJ KLMN OPQR
!
! src, dst and size are all long word aligned
! size is non-zero
.balign 32
.Lcase00:
mov #64, r1 ! 6 EX
mov r5, r3 ! 5 MT (latency=0)
cmp/gt r6, r1 ! 56 MT
add #-4, r5 ! 50 EX
bf .Lcase00b ! 108 BR (big loop)
shlr2 r6 ! 105 EX
shlr r6 ! 104 EX
mov.l @(r0, r5), r1 ! 21 LS (latency=2)
bf/s 4f ! 111 BR
add #-8, r3 ! 50 EX
tst r6, r6 ! 86 MT
bt/s 5f ! 110 BR
mov.l r1,@-r0 ! 30 LS
! 4 cycles, 2 long words per iteration
3: mov.l @(r0, r5), r1 ! 21 LS (latency=2)
4: mov.l @(r0, r3), r2 ! 21 LS (latency=2)
dt r6 ! 67 EX
mov.l r1, @-r0 ! 30 LS
bf/s 3b ! 109 BR
mov.l r2, @-r0 ! 30 LS
5: rts
nop
! Size is 16 or greater and less than 64, but may have trailing bytes
.balign 32
.Lcase0:
add #-4, r5 ! 50 EX
mov r4, r7 ! 5 MT (latency=0)
mov.l @(r0, r5), r1 ! 21 LS (latency=2)
mov #4, r2 ! 6 EX
add #11, r7 ! 50 EX
tst r2, r6 ! 86 MT
mov r5, r3 ! 5 MT (latency=0)
bt/s 4f ! 111 BR
add #-4, r3 ! 50 EX
mov.l r1,@-r0 ! 30 LS
! 4 cycles, 2 long words per iteration
3: mov.l @(r0, r5), r1 ! 21 LS (latency=2)
4: mov.l @(r0, r3), r2 ! 21 LS (latency=2)
cmp/hi r7, r0
mov.l r1, @-r0 ! 30 LS
bt/s 3b ! 109 BR
mov.l r2, @-r0 ! 30 LS
! Copy the final 0-3 bytes
add #3,r5 ! 50 EX
cmp/eq r0, r4 ! 54 MT
add #-10, r7 ! 50 EX
bt 9f ! 110 BR
! 3 cycles, 1 byte per iteration
1: mov.b @(r0,r5),r1 ! 19 LS
cmp/hi r7,r0 ! 57 MT
bt/s 1b ! 111 BR
mov.b r1,@-r0 ! 28 LS
9: rts
nop
! Size is at least 64 bytes, so will be going round the big loop at least once.
!
! r2 = rounded up r4
! r3 = rounded down r0
.balign 32
.Lcase0b:
add #-4, r5 ! 50 EX
.Lcase00b:
mov r0, r3 ! 5 MT (latency=0)
mov #(~0x1f), r1 ! 6 EX
and r1, r3 ! 78 EX
mov r4, r2 ! 5 MT (latency=0)
cmp/eq r3, r0 ! 54 MT
add #0x1f, r2 ! 50 EX
bt/s 1f ! 110 BR
and r1, r2 ! 78 EX
! copy initial words until cache line aligned
mov.l @(r0, r5), r1 ! 21 LS (latency=2)
tst #4, r0 ! 87 MT
mov r5, r6 ! 5 MT (latency=0)
add #-4, r6 ! 50 EX
bt/s 4f ! 111 BR
add #8, r3 ! 50 EX
tst #0x18, r0 ! 87 MT
bt/s 1f ! 109 BR
mov.l r1,@-r0 ! 30 LS
! 4 cycles, 2 long words per iteration
3: mov.l @(r0, r5), r1 ! 21 LS (latency=2)
4: mov.l @(r0, r6), r7 ! 21 LS (latency=2)
cmp/eq r3, r0 ! 54 MT
mov.l r1, @-r0 ! 30 LS
bf/s 3b ! 109 BR
mov.l r7, @-r0 ! 30 LS
! Copy the cache line aligned blocks
!
! In use: r0, r2, r4, r5
! Scratch: r1, r3, r6, r7
!
! We could do this with the four scratch registers, but if src
! and dest hit the same cache line, this will thrash, so make
! use of additional registers.
!
! We also need r0 as a temporary (for movca), so 'undo' the invariant:
! r5: src (was r0+r5)
! r1: dest (was r0)
! this can be reversed at the end, so we don't need to save any extra
! state.
!
1: mov.l r8, @-r15 ! 30 LS
add r0, r5 ! 49 EX
mov.l r9, @-r15 ! 30 LS
mov r0, r1 ! 5 MT (latency=0)
mov.l r10, @-r15 ! 30 LS
add #-0x1c, r5 ! 50 EX
mov.l r11, @-r15 ! 30 LS
! 16 cycles, 32 bytes per iteration
2: mov.l @(0x00,r5),r0 ! 18 LS (latency=2)
add #-0x20, r1 ! 50 EX
mov.l @(0x04,r5),r3 ! 18 LS (latency=2)
mov.l @(0x08,r5),r6 ! 18 LS (latency=2)
mov.l @(0x0c,r5),r7 ! 18 LS (latency=2)
mov.l @(0x10,r5),r8 ! 18 LS (latency=2)
mov.l @(0x14,r5),r9 ! 18 LS (latency=2)
mov.l @(0x18,r5),r10 ! 18 LS (latency=2)
mov.l @(0x1c,r5),r11 ! 18 LS (latency=2)
movca.l r0,@r1 ! 40 LS (latency=3-7)
mov.l r3,@(0x04,r1) ! 33 LS
mov.l r6,@(0x08,r1) ! 33 LS
mov.l r7,@(0x0c,r1) ! 33 LS
mov.l r8,@(0x10,r1) ! 33 LS
add #-0x20, r5 ! 50 EX
mov.l r9,@(0x14,r1) ! 33 LS
cmp/eq r2,r1 ! 54 MT
mov.l r10,@(0x18,r1) ! 33 LS
bf/s 2b ! 109 BR
mov.l r11,@(0x1c,r1) ! 33 LS
mov r1, r0 ! 5 MT (latency=0)
mov.l @r15+, r11 ! 15 LS
sub r1, r5 ! 75 EX
mov.l @r15+, r10 ! 15 LS
cmp/eq r4, r0 ! 54 MT
bf/s 1f ! 109 BR
mov.l @r15+, r9 ! 15 LS
rts
1: mov.l @r15+, r8 ! 15 LS
sub r4, r1 ! 75 EX (len remaining)
! number of trailing bytes is non-zero
!
! invariants restored (r5 already decremented by 4)
! also r1=num bytes remaining
mov #4, r2 ! 6 EX
mov r4, r7 ! 5 MT (latency=0)
add #0x1c, r5 ! 50 EX (back to -4)
cmp/hs r2, r1 ! 58 MT
bf/s 5f ! 108 BR
add #11, r7 ! 50 EX
mov.l @(r0, r5), r6 ! 21 LS (latency=2)
tst r2, r1 ! 86 MT
mov r5, r3 ! 5 MT (latency=0)
bt/s 4f ! 111 BR
add #-4, r3 ! 50 EX
cmp/hs r2, r1 ! 58 MT
bt/s 5f ! 111 BR
mov.l r6,@-r0 ! 30 LS
! 4 cycles, 2 long words per iteration
3: mov.l @(r0, r5), r6 ! 21 LS (latency=2)
4: mov.l @(r0, r3), r2 ! 21 LS (latency=2)
cmp/hi r7, r0
mov.l r6, @-r0 ! 30 LS
bt/s 3b ! 109 BR
mov.l r2, @-r0 ! 30 LS
! Copy the final 0-3 bytes
5: cmp/eq r0, r4 ! 54 MT
add #-10, r7 ! 50 EX
bt 9f ! 110 BR
add #3,r5 ! 50 EX
! 3 cycles, 1 byte per iteration
1: mov.b @(r0,r5),r1 ! 19 LS
cmp/hi r7,r0 ! 57 MT
bt/s 1b ! 111 BR
mov.b r1,@-r0 ! 28 LS
9: rts
nop
!
! GHIJ KLMN OPQR --> ..GH IJKL MNOP QR..
!
.balign 32
.Lcase2:
! Size is 16 or greater and less then 64, but may have trailing bytes
2: mov r5, r6 ! 5 MT (latency=0)
add #-2,r5 ! 50 EX
mov r4,r2 ! 5 MT (latency=0)
add #-4,r6 ! 50 EX
add #7,r2 ! 50 EX
3: mov.w @(r0,r5),r1 ! 20 LS (latency=2)
mov.w @(r0,r6),r3 ! 20 LS (latency=2)
cmp/hi r2,r0 ! 57 MT
mov.w r1,@-r0 ! 29 LS
bt/s 3b ! 111 BR
mov.w r3,@-r0 ! 29 LS
bra 10f
nop
.balign 32
.Lcase2b:
! Size is at least 64 bytes, so will be going round the big loop at least once.
!
! r2 = rounded up r4
! r3 = rounded down r0
mov r0, r3 ! 5 MT (latency=0)
mov #(~0x1f), r1 ! 6 EX
and r1, r3 ! 78 EX
mov r4, r2 ! 5 MT (latency=0)
cmp/eq r3, r0 ! 54 MT
add #0x1f, r2 ! 50 EX
add #-2, r5 ! 50 EX
bt/s 1f ! 110 BR
and r1, r2 ! 78 EX
! Copy a short word one at a time until we are cache line aligned
! Normal values: r0, r2, r3, r4
! Unused: r1, r6, r7
! Mod: r5 (=r5-2)
!
add #2, r3 ! 50 EX
2: mov.w @(r0,r5),r1 ! 20 LS (latency=2)
cmp/eq r3,r0 ! 54 MT
bf/s 2b ! 111 BR
mov.w r1,@-r0 ! 29 LS
! Copy the cache line aligned blocks
!
! In use: r0, r2, r4, r5 (=r5-2)
! Scratch: r1, r3, r6, r7
!
! We could do this with the four scratch registers, but if src
! and dest hit the same cache line, this will thrash, so make
! use of additional registers.
!
! We also need r0 as a temporary (for movca), so 'undo' the invariant:
! r5: src (was r0+r5)
! r1: dest (was r0)
! this can be reversed at the end, so we don't need to save any extra
! state.
!
1: mov.l r8, @-r15 ! 30 LS
add r0, r5 ! 49 EX
mov.l r9, @-r15 ! 30 LS
mov r0, r1 ! 5 MT (latency=0)
mov.l r10, @-r15 ! 30 LS
add #-0x1e, r5 ! 50 EX
mov.l r11, @-r15 ! 30 LS
mov.l r12, @-r15 ! 30 LS
! 17 cycles, 32 bytes per iteration
#ifdef CONFIG_CPU_LITTLE_ENDIAN
2: mov.w @r5+, r0 ! 14 LS (latency=2) ..JI
add #-0x20, r1 ! 50 EX
mov.l @r5+, r3 ! 15 LS (latency=2) NMLK
mov.l @r5+, r6 ! 15 LS (latency=2) RQPO
shll16 r0 ! 103 EX JI..
mov.l @r5+, r7 ! 15 LS (latency=2)
xtrct r3, r0 ! 48 EX LKJI
mov.l @r5+, r8 ! 15 LS (latency=2)
xtrct r6, r3 ! 48 EX PONM
mov.l @r5+, r9 ! 15 LS (latency=2)
xtrct r7, r6 ! 48 EX
mov.l @r5+, r10 ! 15 LS (latency=2)
xtrct r8, r7 ! 48 EX
mov.l @r5+, r11 ! 15 LS (latency=2)
xtrct r9, r8 ! 48 EX
mov.w @r5+, r12 ! 15 LS (latency=2)
xtrct r10, r9 ! 48 EX
movca.l r0,@r1 ! 40 LS (latency=3-7)
xtrct r11, r10 ! 48 EX
mov.l r3, @(0x04,r1) ! 33 LS
xtrct r12, r11 ! 48 EX
mov.l r6, @(0x08,r1) ! 33 LS
mov.l r7, @(0x0c,r1) ! 33 LS
mov.l r8, @(0x10,r1) ! 33 LS
add #-0x40, r5 ! 50 EX
mov.l r9, @(0x14,r1) ! 33 LS
cmp/eq r2,r1 ! 54 MT
mov.l r10, @(0x18,r1) ! 33 LS
bf/s 2b ! 109 BR
mov.l r11, @(0x1c,r1) ! 33 LS
#else
2: mov.w @(0x1e,r5), r0 ! 17 LS (latency=2)
add #-2, r5 ! 50 EX
mov.l @(0x1c,r5), r3 ! 18 LS (latency=2)
add #-4, r1 ! 50 EX
mov.l @(0x18,r5), r6 ! 18 LS (latency=2)
shll16 r0 ! 103 EX
mov.l @(0x14,r5), r7 ! 18 LS (latency=2)
xtrct r3, r0 ! 48 EX
mov.l @(0x10,r5), r8 ! 18 LS (latency=2)
xtrct r6, r3 ! 48 EX
mov.l @(0x0c,r5), r9 ! 18 LS (latency=2)
xtrct r7, r6 ! 48 EX
mov.l @(0x08,r5), r10 ! 18 LS (latency=2)
xtrct r8, r7 ! 48 EX
mov.l @(0x04,r5), r11 ! 18 LS (latency=2)
xtrct r9, r8 ! 48 EX
mov.l @(0x00,r5), r12 ! 18 LS (latency=2)
xtrct r10, r9 ! 48 EX
movca.l r0,@r1 ! 40 LS (latency=3-7)
add #-0x1c, r1 ! 50 EX
mov.l r3, @(0x18,r1) ! 33 LS
xtrct r11, r10 ! 48 EX
mov.l r6, @(0x14,r1) ! 33 LS
xtrct r12, r11 ! 48 EX
mov.l r7, @(0x10,r1) ! 33 LS
mov.l r8, @(0x0c,r1) ! 33 LS
add #-0x1e, r5 ! 50 EX
mov.l r9, @(0x08,r1) ! 33 LS
cmp/eq r2,r1 ! 54 MT
mov.l r10, @(0x04,r1) ! 33 LS
bf/s 2b ! 109 BR
mov.l r11, @(0x00,r1) ! 33 LS
#endif
mov.l @r15+, r12
mov r1, r0 ! 5 MT (latency=0)
mov.l @r15+, r11 ! 15 LS
sub r1, r5 ! 75 EX
mov.l @r15+, r10 ! 15 LS
cmp/eq r4, r0 ! 54 MT
bf/s 1f ! 109 BR
mov.l @r15+, r9 ! 15 LS
rts
1: mov.l @r15+, r8 ! 15 LS
add #0x1e, r5 ! 50 EX
! Finish off a short word at a time
! r5 must be invariant - 2
10: mov r4,r2 ! 5 MT (latency=0)
add #1,r2 ! 50 EX
cmp/hi r2, r0 ! 57 MT
bf/s 1f ! 109 BR
add #2, r2 ! 50 EX
3: mov.w @(r0,r5),r1 ! 20 LS
cmp/hi r2,r0 ! 57 MT
bt/s 3b ! 109 BR
mov.w r1,@-r0 ! 29 LS
1:
!
! Finally, copy the last byte if necessary
cmp/eq r4,r0 ! 54 MT
bt/s 9b
add #1,r5
mov.b @(r0,r5),r1
rts
mov.b r1,@-r0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,226
|
arch/sh/lib/copy_page.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* copy_page, __copy_user_page, __copy_user implementation of SuperH
*
* Copyright (C) 2001 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2002 Toshinobu Sugioka
* Copyright (C) 2006 Paul Mundt
*/
#include <linux/linkage.h>
#include <asm/page.h>
/*
* copy_page
* @to: P1 address
* @from: P1 address
*
* void copy_page(void *to, void *from)
*/
/*
* r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
* r8 --- from + PAGE_SIZE
* r9 --- not used
* r10 --- to
* r11 --- from
*/
ENTRY(copy_page)
mov.l r8,@-r15
mov.l r10,@-r15
mov.l r11,@-r15
mov r4,r10
mov r5,r11
mov r5,r8
mov #(PAGE_SIZE >> 10), r0
shll8 r0
shll2 r0
add r0,r8
!
1: mov.l @r11+,r0
mov.l @r11+,r1
mov.l @r11+,r2
mov.l @r11+,r3
mov.l @r11+,r4
mov.l @r11+,r5
mov.l @r11+,r6
mov.l @r11+,r7
#if defined(CONFIG_CPU_SH4)
movca.l r0,@r10
#else
mov.l r0,@r10
#endif
add #32,r10
mov.l r7,@-r10
mov.l r6,@-r10
mov.l r5,@-r10
mov.l r4,@-r10
mov.l r3,@-r10
mov.l r2,@-r10
mov.l r1,@-r10
cmp/eq r11,r8
bf/s 1b
add #28,r10
!
mov.l @r15+,r11
mov.l @r15+,r10
mov.l @r15+,r8
rts
nop
/*
* __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
* Return the number of bytes NOT copied
*/
#define EX(...) \
9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6000f ; \
.previous
#define EX_NO_POP(...) \
9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6005f ; \
.previous
ENTRY(__copy_user)
! Check if small number of bytes
mov #11,r0
mov r4,r3
cmp/gt r0,r6 ! r6 (len) > r0 (11)
bf/s .L_cleanup_loop_no_pop
add r6,r3 ! last destination address
! Calculate bytes needed to align to src
mov.l r11,@-r15
neg r5,r0
mov.l r10,@-r15
add #4,r0
mov.l r9,@-r15
and #3,r0
mov.l r8,@-r15
tst r0,r0
bt 2f
1:
! Copy bytes to long word align src
EX( mov.b @r5+,r1 )
dt r0
add #-1,r6
EX( mov.b r1,@r4 )
bf/s 1b
add #1,r4
! Jump to appropriate routine depending on dest
2: mov #3,r1
mov r6, r2
and r4,r1
shlr2 r2
shll2 r1
mova .L_jump_tbl,r0
mov.l @(r0,r1),r1
jmp @r1
nop
.align 2
.L_jump_tbl:
.long .L_dest00
.long .L_dest01
.long .L_dest10
.long .L_dest11
/*
* Come here if there are less than 12 bytes to copy
*
* Keep the branch target close, so the bf/s callee doesn't overflow
* and result in a more expensive branch being inserted. This is the
* fast-path for small copies, the jump via the jump table will hit the
* default slow-path cleanup. -PFM.
*/
.L_cleanup_loop_no_pop:
tst r6,r6 ! Check explicitly for zero
bt 1f
2:
EX_NO_POP( mov.b @r5+,r0 )
dt r6
EX_NO_POP( mov.b r0,@r4 )
bf/s 2b
add #1,r4
1: mov #0,r0 ! normal return
5000:
# Exception handler:
.section .fixup, "ax"
6005:
mov.l 8000f,r1
mov r3,r0
jmp @r1
sub r4,r0
.align 2
8000: .long 5000b
.previous
rts
nop
! Destination = 00
.L_dest00:
! Skip the large copy for small transfers
mov #(32+32-4), r0
cmp/gt r6, r0 ! r0 (60) > r6 (len)
bt 1f
! Align dest to a 32 byte boundary
neg r4,r0
add #0x20, r0
and #0x1f, r0
tst r0, r0
bt 2f
sub r0, r6
shlr2 r0
3:
EX( mov.l @r5+,r1 )
dt r0
EX( mov.l r1,@r4 )
bf/s 3b
add #4,r4
2:
EX( mov.l @r5+,r0 )
EX( mov.l @r5+,r1 )
EX( mov.l @r5+,r2 )
EX( mov.l @r5+,r7 )
EX( mov.l @r5+,r8 )
EX( mov.l @r5+,r9 )
EX( mov.l @r5+,r10 )
EX( mov.l @r5+,r11 )
#ifdef CONFIG_CPU_SH4
EX( movca.l r0,@r4 )
#else
EX( mov.l r0,@r4 )
#endif
add #-32, r6
EX( mov.l r1,@(4,r4) )
mov #32, r0
EX( mov.l r2,@(8,r4) )
cmp/gt r6, r0 ! r0 (32) > r6 (len)
EX( mov.l r7,@(12,r4) )
EX( mov.l r8,@(16,r4) )
EX( mov.l r9,@(20,r4) )
EX( mov.l r10,@(24,r4) )
EX( mov.l r11,@(28,r4) )
bf/s 2b
add #32,r4
1: mov r6, r0
shlr2 r0
tst r0, r0
bt .L_cleanup
1:
EX( mov.l @r5+,r1 )
dt r0
EX( mov.l r1,@r4 )
bf/s 1b
add #4,r4
bra .L_cleanup
nop
! Destination = 10
.L_dest10:
mov r2,r7
shlr2 r7
shlr r7
tst r7,r7
mov #7,r0
bt/s 1f
and r0,r2
2:
dt r7
#ifdef CONFIG_CPU_LITTLE_ENDIAN
EX( mov.l @r5+,r0 )
EX( mov.l @r5+,r1 )
EX( mov.l @r5+,r8 )
EX( mov.l @r5+,r9 )
EX( mov.l @r5+,r10 )
EX( mov.w r0,@r4 )
add #2,r4
xtrct r1,r0
xtrct r8,r1
xtrct r9,r8
xtrct r10,r9
EX( mov.l r0,@r4 )
EX( mov.l r1,@(4,r4) )
EX( mov.l r8,@(8,r4) )
EX( mov.l r9,@(12,r4) )
EX( mov.l @r5+,r1 )
EX( mov.l @r5+,r8 )
EX( mov.l @r5+,r0 )
xtrct r1,r10
xtrct r8,r1
xtrct r0,r8
shlr16 r0
EX( mov.l r10,@(16,r4) )
EX( mov.l r1,@(20,r4) )
EX( mov.l r8,@(24,r4) )
EX( mov.w r0,@(28,r4) )
bf/s 2b
add #30,r4
#else
EX( mov.l @(28,r5),r0 )
EX( mov.l @(24,r5),r8 )
EX( mov.l @(20,r5),r9 )
EX( mov.l @(16,r5),r10 )
EX( mov.w r0,@(30,r4) )
add #-2,r4
xtrct r8,r0
xtrct r9,r8
xtrct r10,r9
EX( mov.l r0,@(28,r4) )
EX( mov.l r8,@(24,r4) )
EX( mov.l r9,@(20,r4) )
EX( mov.l @(12,r5),r0 )
EX( mov.l @(8,r5),r8 )
xtrct r0,r10
EX( mov.l @(4,r5),r9 )
mov.l r10,@(16,r4)
EX( mov.l @r5,r10 )
xtrct r8,r0
xtrct r9,r8
xtrct r10,r9
EX( mov.l r0,@(12,r4) )
EX( mov.l r8,@(8,r4) )
swap.w r10,r0
EX( mov.l r9,@(4,r4) )
EX( mov.w r0,@(2,r4) )
add #32,r5
bf/s 2b
add #34,r4
#endif
tst r2,r2
bt .L_cleanup
1: ! Read longword, write two words per iteration
EX( mov.l @r5+,r0 )
dt r2
#ifdef CONFIG_CPU_LITTLE_ENDIAN
EX( mov.w r0,@r4 )
shlr16 r0
EX( mov.w r0,@(2,r4) )
#else
EX( mov.w r0,@(2,r4) )
shlr16 r0
EX( mov.w r0,@r4 )
#endif
bf/s 1b
add #4,r4
bra .L_cleanup
nop
! Destination = 01 or 11
.L_dest01:
.L_dest11:
! Read longword, write byte, word, byte per iteration
EX( mov.l @r5+,r0 )
dt r2
#ifdef CONFIG_CPU_LITTLE_ENDIAN
EX( mov.b r0,@r4 )
shlr8 r0
add #1,r4
EX( mov.w r0,@r4 )
shlr16 r0
EX( mov.b r0,@(2,r4) )
bf/s .L_dest01
add #3,r4
#else
EX( mov.b r0,@(3,r4) )
shlr8 r0
swap.w r0,r7
EX( mov.b r7,@r4 )
add #1,r4
EX( mov.w r0,@r4 )
bf/s .L_dest01
add #3,r4
#endif
! Cleanup last few bytes
.L_cleanup:
mov r6,r0
and #3,r0
tst r0,r0
bt .L_exit
mov r0,r6
.L_cleanup_loop:
EX( mov.b @r5+,r0 )
dt r6
EX( mov.b r0,@r4 )
bf/s .L_cleanup_loop
add #1,r4
.L_exit:
mov #0,r0 ! normal return
5000:
# Exception handler:
.section .fixup, "ax"
6000:
mov.l 8000f,r1
mov r3,r0
jmp @r1
sub r4,r0
.align 2
8000: .long 5000b
.previous
mov.l @r15+,r8
mov.l @r15+,r9
mov.l @r15+,r10
rts
mov.l @r15+,r11
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,457
|
arch/sh/lib/mcount.S
|
/*
* arch/sh/lib/mcount.S
*
* Copyright (C) 2008, 2009 Paul Mundt
* Copyright (C) 2008, 2009 Matt Fleming
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <asm/ftrace.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#define MCOUNT_ENTER() \
mov.l r4, @-r15; \
mov.l r5, @-r15; \
mov.l r6, @-r15; \
mov.l r7, @-r15; \
sts.l pr, @-r15; \
\
mov.l @(20,r15),r4; \
sts pr, r5
#define MCOUNT_LEAVE() \
lds.l @r15+, pr; \
mov.l @r15+, r7; \
mov.l @r15+, r6; \
mov.l @r15+, r5; \
rts; \
mov.l @r15+, r4
#ifdef CONFIG_STACK_DEBUG
/*
* Perform diagnostic checks on the state of the kernel stack.
*
* Check for stack overflow. If there is less than 1KB free
* then it has overflowed.
*
* Make sure the stack pointer contains a valid address. Valid
* addresses for kernel stacks are anywhere after the bss
* (after __bss_stop) and anywhere in init_thread_union (init_stack).
*/
#define STACK_CHECK() \
mov #(THREAD_SIZE >> 10), r0; \
shll8 r0; \
shll2 r0; \
\
/* r1 = sp & (THREAD_SIZE - 1) */ \
mov #-1, r1; \
add r0, r1; \
and r15, r1; \
\
mov #TI_SIZE, r3; \
mov #(STACK_WARN >> 8), r2; \
shll8 r2; \
add r3, r2; \
\
/* Is the stack overflowing? */ \
cmp/hi r2, r1; \
bf stack_panic; \
\
/* If sp > __bss_stop then we're OK. */ \
mov.l .L_ebss, r1; \
cmp/hi r1, r15; \
bt 1f; \
\
/* If sp < init_stack, we're not OK. */ \
mov.l .L_init_thread_union, r1; \
cmp/hs r1, r15; \
bf stack_panic; \
\
/* If sp > init_stack && sp < __bss_stop, not OK. */ \
add r0, r1; \
cmp/hs r1, r15; \
bt stack_panic; \
1:
#else
#define STACK_CHECK()
#endif /* CONFIG_STACK_DEBUG */
.align 2
.globl _mcount
.type _mcount,@function
.globl mcount
.type mcount,@function
_mcount:
mcount:
STACK_CHECK()
#ifndef CONFIG_FUNCTION_TRACER
rts
nop
#else
MCOUNT_ENTER()
#ifdef CONFIG_DYNAMIC_FTRACE
.globl mcount_call
mcount_call:
mov.l .Lftrace_stub, r6
#else
mov.l .Lftrace_trace_function, r6
mov.l ftrace_stub, r7
cmp/eq r6, r7
bt skip_trace
mov.l @r6, r6
#endif
jsr @r6
nop
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
mov.l .Lftrace_graph_return, r6
mov.l .Lftrace_stub, r7
cmp/eq r6, r7
bt 1f
mov.l .Lftrace_graph_caller, r0
jmp @r0
nop
1:
mov.l .Lftrace_graph_entry, r6
mov.l .Lftrace_graph_entry_stub, r7
cmp/eq r6, r7
bt skip_trace
mov.l .Lftrace_graph_caller, r0
jmp @r0
nop
.align 2
.Lftrace_graph_return:
.long ftrace_graph_return
.Lftrace_graph_entry:
.long ftrace_graph_entry
.Lftrace_graph_entry_stub:
.long ftrace_graph_entry_stub
.Lftrace_graph_caller:
.long ftrace_graph_caller
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
.globl skip_trace
skip_trace:
MCOUNT_LEAVE()
.align 2
.Lftrace_trace_function:
.long ftrace_trace_function
#ifdef CONFIG_DYNAMIC_FTRACE
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* NOTE: Do not move either ftrace_graph_call or ftrace_caller
* as this will affect the calculation of GRAPH_INSN_OFFSET.
*/
.globl ftrace_graph_call
ftrace_graph_call:
mov.l .Lskip_trace, r0
jmp @r0
nop
.align 2
.Lskip_trace:
.long skip_trace
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
.globl ftrace_caller
ftrace_caller:
MCOUNT_ENTER()
.globl ftrace_call
ftrace_call:
mov.l .Lftrace_stub, r6
jsr @r6
nop
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
bra ftrace_graph_call
nop
#else
MCOUNT_LEAVE()
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE */
.align 2
/*
* NOTE: From here on the locations of the .Lftrace_stub label and
* ftrace_stub itself are fixed. Adding additional data here will skew
* the displacement for the memory table and break the block replacement.
* Place new labels either after the ftrace_stub body, or before
* ftrace_caller. You have been warned.
*/
.Lftrace_stub:
.long ftrace_stub
.globl ftrace_stub
ftrace_stub:
rts
nop
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_caller
ftrace_graph_caller:
mov.l 2f, r1
jmp @r1
nop
1:
/*
* MCOUNT_ENTER() pushed 5 registers onto the stack, so
* the stack address containing our return address is
* r15 + 20.
*/
mov #20, r0
add r15, r0
mov r0, r4
mov.l .Lprepare_ftrace_return, r0
jsr @r0
nop
MCOUNT_LEAVE()
.align 2
2: .long skip_trace
.Lprepare_ftrace_return:
.long prepare_ftrace_return
.globl return_to_handler
return_to_handler:
/*
* Save the return values.
*/
mov.l r0, @-r15
mov.l r1, @-r15
mov #0, r4
mov.l .Lftrace_return_to_handler, r0
jsr @r0
nop
/*
* The return value from ftrace_return_handler has the real
* address that we should return to.
*/
lds r0, pr
mov.l @r15+, r1
rts
mov.l @r15+, r0
.align 2
.Lftrace_return_to_handler:
.long ftrace_return_to_handler
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_STACK_DEBUG
.globl stack_panic
stack_panic:
mov.l .Ldump_stack, r0
jsr @r0
nop
mov.l .Lpanic, r0
jsr @r0
mov.l .Lpanic_s, r4
rts
nop
.align 2
.L_init_thread_union:
.long init_thread_union
.L_ebss:
.long __bss_stop
.Lpanic:
.long panic
.Lpanic_s:
.long .Lpanic_str
.Ldump_stack:
.long dump_stack
.section .rodata
.align 2
.Lpanic_str:
.string "Stack error"
#endif /* CONFIG_STACK_DEBUG */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,827
|
arch/sh/lib/__clear_user.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* __clear_user_page, __clear_user, clear_page implementation of SuperH
*
* Copyright (C) 2001 Kaz Kojima
* Copyright (C) 2001, 2002 Niibe Yutaka
* Copyright (C) 2006 Paul Mundt
*/
#include <linux/linkage.h>
#include <asm/page.h>
ENTRY(__clear_user)
!
mov #0, r0
mov #0xffffffe0, r1
!
! r4..(r4+31)&~32 -------- not aligned [ Area 0 ]
! (r4+31)&~32..(r4+r5)&~32 -------- aligned [ Area 1 ]
! (r4+r5)&~32..r4+r5 -------- not aligned [ Area 2 ]
!
! Clear area 0
mov r4, r2
!
tst r1, r5 ! length < 32
bt .Larea2 ! skip to remainder
!
add #31, r2
and r1, r2
cmp/eq r4, r2
bt .Larea1
mov r2, r3
sub r4, r3
mov r3, r7
mov r4, r2
!
.L0: dt r3
0: mov.b r0, @r2
bf/s .L0
add #1, r2
!
sub r7, r5
mov r2, r4
.Larea1:
mov r4, r3
add r5, r3
and r1, r3
cmp/hi r2, r3
bf .Larea2
!
! Clear area 1
#if defined(CONFIG_CPU_SH4)
1: movca.l r0, @r2
#else
1: mov.l r0, @r2
#endif
add #4, r2
2: mov.l r0, @r2
add #4, r2
3: mov.l r0, @r2
add #4, r2
4: mov.l r0, @r2
add #4, r2
5: mov.l r0, @r2
add #4, r2
6: mov.l r0, @r2
add #4, r2
7: mov.l r0, @r2
add #4, r2
8: mov.l r0, @r2
add #4, r2
cmp/hi r2, r3
bt/s 1b
nop
!
! Clear area 2
.Larea2:
mov r4, r3
add r5, r3
cmp/hs r3, r2
bt/s .Ldone
sub r2, r3
.L2: dt r3
9: mov.b r0, @r2
bf/s .L2
add #1, r2
!
.Ldone: rts
mov #0, r0 ! return 0 as normal return
! return the number of bytes remained
.Lbad_clear_user:
mov r4, r0
add r5, r0
rts
sub r2, r0
.section __ex_table,"a"
.align 2
.long 0b, .Lbad_clear_user
.long 1b, .Lbad_clear_user
.long 2b, .Lbad_clear_user
.long 3b, .Lbad_clear_user
.long 4b, .Lbad_clear_user
.long 5b, .Lbad_clear_user
.long 6b, .Lbad_clear_user
.long 7b, .Lbad_clear_user
.long 8b, .Lbad_clear_user
.long 9b, .Lbad_clear_user
.previous
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,468
|
arch/sh/lib/ashiftrt.S
|
/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
!! libgcc routines for the Renesas / SuperH SH CPUs.
!! Contributed by Steve Chamberlain.
!! sac@cygnus.com
!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines
!! recoded in assembly by Toshiyasu Morita
!! tm@netcom.com
/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and
ELF local label prefixes by J"orn Rennecke
amylaar@cygnus.com */
.global __ashiftrt_r4_0
.global __ashiftrt_r4_1
.global __ashiftrt_r4_2
.global __ashiftrt_r4_3
.global __ashiftrt_r4_4
.global __ashiftrt_r4_5
.global __ashiftrt_r4_6
.global __ashiftrt_r4_7
.global __ashiftrt_r4_8
.global __ashiftrt_r4_9
.global __ashiftrt_r4_10
.global __ashiftrt_r4_11
.global __ashiftrt_r4_12
.global __ashiftrt_r4_13
.global __ashiftrt_r4_14
.global __ashiftrt_r4_15
.global __ashiftrt_r4_16
.global __ashiftrt_r4_17
.global __ashiftrt_r4_18
.global __ashiftrt_r4_19
.global __ashiftrt_r4_20
.global __ashiftrt_r4_21
.global __ashiftrt_r4_22
.global __ashiftrt_r4_23
.global __ashiftrt_r4_24
.global __ashiftrt_r4_25
.global __ashiftrt_r4_26
.global __ashiftrt_r4_27
.global __ashiftrt_r4_28
.global __ashiftrt_r4_29
.global __ashiftrt_r4_30
.global __ashiftrt_r4_31
.global __ashiftrt_r4_32
.align 1
__ashiftrt_r4_32:
__ashiftrt_r4_31:
rotcl r4
rts
subc r4,r4
__ashiftrt_r4_30:
shar r4
__ashiftrt_r4_29:
shar r4
__ashiftrt_r4_28:
shar r4
__ashiftrt_r4_27:
shar r4
__ashiftrt_r4_26:
shar r4
__ashiftrt_r4_25:
shar r4
__ashiftrt_r4_24:
shlr16 r4
shlr8 r4
rts
exts.b r4,r4
__ashiftrt_r4_23:
shar r4
__ashiftrt_r4_22:
shar r4
__ashiftrt_r4_21:
shar r4
__ashiftrt_r4_20:
shar r4
__ashiftrt_r4_19:
shar r4
__ashiftrt_r4_18:
shar r4
__ashiftrt_r4_17:
shar r4
__ashiftrt_r4_16:
shlr16 r4
rts
exts.w r4,r4
__ashiftrt_r4_15:
shar r4
__ashiftrt_r4_14:
shar r4
__ashiftrt_r4_13:
shar r4
__ashiftrt_r4_12:
shar r4
__ashiftrt_r4_11:
shar r4
__ashiftrt_r4_10:
shar r4
__ashiftrt_r4_9:
shar r4
__ashiftrt_r4_8:
shar r4
__ashiftrt_r4_7:
shar r4
__ashiftrt_r4_6:
shar r4
__ashiftrt_r4_5:
shar r4
__ashiftrt_r4_4:
shar r4
__ashiftrt_r4_3:
shar r4
__ashiftrt_r4_2:
shar r4
__ashiftrt_r4_1:
rts
shar r4
__ashiftrt_r4_0:
rts
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,450
|
arch/sh/lib/movmem.S
|
/* Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006
Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
!! libgcc routines for the Renesas / SuperH SH CPUs.
!! Contributed by Steve Chamberlain.
!! sac@cygnus.com
!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines
!! recoded in assembly by Toshiyasu Morita
!! tm@netcom.com
/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and
ELF local label prefixes by J"orn Rennecke
amylaar@cygnus.com */
.text
.balign 4
.global __movmem
.global __movstr
.set __movstr, __movmem
/* This would be a lot simpler if r6 contained the byte count
minus 64, and we wouldn't be called here for a byte count of 64. */
__movmem:
sts.l pr,@-r15
shll2 r6
bsr __movmemSI52+2
mov.l @(48,r5),r0
.balign 4
movmem_loop: /* Reached with rts */
mov.l @(60,r5),r0
add #-64,r6
mov.l r0,@(60,r4)
tst r6,r6
mov.l @(56,r5),r0
bt movmem_done
mov.l r0,@(56,r4)
cmp/pl r6
mov.l @(52,r5),r0
add #64,r5
mov.l r0,@(52,r4)
add #64,r4
bt __movmemSI52
! done all the large groups, do the remainder
! jump to movmem+
mova __movmemSI4+4,r0
add r6,r0
jmp @r0
movmem_done: ! share slot insn, works out aligned.
lds.l @r15+,pr
mov.l r0,@(56,r4)
mov.l @(52,r5),r0
rts
mov.l r0,@(52,r4)
.balign 4
.global __movmemSI64
.global __movstrSI64
.set __movstrSI64, __movmemSI64
__movmemSI64:
mov.l @(60,r5),r0
mov.l r0,@(60,r4)
.global __movmemSI60
.global __movstrSI60
.set __movstrSI60, __movmemSI60
__movmemSI60:
mov.l @(56,r5),r0
mov.l r0,@(56,r4)
.global __movmemSI56
.global __movstrSI56
.set __movstrSI56, __movmemSI56
__movmemSI56:
mov.l @(52,r5),r0
mov.l r0,@(52,r4)
.global __movmemSI52
.global __movstrSI52
.set __movstrSI52, __movmemSI52
__movmemSI52:
mov.l @(48,r5),r0
mov.l r0,@(48,r4)
.global __movmemSI48
.global __movstrSI48
.set __movstrSI48, __movmemSI48
__movmemSI48:
mov.l @(44,r5),r0
mov.l r0,@(44,r4)
.global __movmemSI44
.global __movstrSI44
.set __movstrSI44, __movmemSI44
__movmemSI44:
mov.l @(40,r5),r0
mov.l r0,@(40,r4)
.global __movmemSI40
.global __movstrSI40
.set __movstrSI40, __movmemSI40
__movmemSI40:
mov.l @(36,r5),r0
mov.l r0,@(36,r4)
.global __movmemSI36
.global __movstrSI36
.set __movstrSI36, __movmemSI36
__movmemSI36:
mov.l @(32,r5),r0
mov.l r0,@(32,r4)
.global __movmemSI32
.global __movstrSI32
.set __movstrSI32, __movmemSI32
__movmemSI32:
mov.l @(28,r5),r0
mov.l r0,@(28,r4)
.global __movmemSI28
.global __movstrSI28
.set __movstrSI28, __movmemSI28
__movmemSI28:
mov.l @(24,r5),r0
mov.l r0,@(24,r4)
.global __movmemSI24
.global __movstrSI24
.set __movstrSI24, __movmemSI24
__movmemSI24:
mov.l @(20,r5),r0
mov.l r0,@(20,r4)
.global __movmemSI20
.global __movstrSI20
.set __movstrSI20, __movmemSI20
__movmemSI20:
mov.l @(16,r5),r0
mov.l r0,@(16,r4)
.global __movmemSI16
.global __movstrSI16
.set __movstrSI16, __movmemSI16
__movmemSI16:
mov.l @(12,r5),r0
mov.l r0,@(12,r4)
.global __movmemSI12
.global __movstrSI12
.set __movstrSI12, __movmemSI12
__movmemSI12:
mov.l @(8,r5),r0
mov.l r0,@(8,r4)
.global __movmemSI8
.global __movstrSI8
.set __movstrSI8, __movmemSI8
__movmemSI8:
mov.l @(4,r5),r0
mov.l r0,@(4,r4)
.global __movmemSI4
.global __movstrSI4
.set __movstrSI4, __movmemSI4
__movmemSI4:
mov.l @(0,r5),r0
rts
mov.l r0,@(0,r4)
.global __movmem_i4_even
.global __movstr_i4_even
.set __movstr_i4_even, __movmem_i4_even
.global __movmem_i4_odd
.global __movstr_i4_odd
.set __movstr_i4_odd, __movmem_i4_odd
.global __movmemSI12_i4
.global __movstrSI12_i4
.set __movstrSI12_i4, __movmemSI12_i4
.p2align 5
L_movmem_2mod4_end:
mov.l r0,@(16,r4)
rts
mov.l r1,@(20,r4)
.p2align 2
__movmem_i4_even:
mov.l @r5+,r0
bra L_movmem_start_even
mov.l @r5+,r1
__movmem_i4_odd:
mov.l @r5+,r1
add #-4,r4
mov.l @r5+,r2
mov.l @r5+,r3
mov.l r1,@(4,r4)
mov.l r2,@(8,r4)
L_movmem_loop:
mov.l r3,@(12,r4)
dt r6
mov.l @r5+,r0
bt/s L_movmem_2mod4_end
mov.l @r5+,r1
add #16,r4
L_movmem_start_even:
mov.l @r5+,r2
mov.l @r5+,r3
mov.l r0,@r4
dt r6
mov.l r1,@(4,r4)
bf/s L_movmem_loop
mov.l r2,@(8,r4)
rts
mov.l r3,@(12,r4)
.p2align 4
__movmemSI12_i4:
mov.l @r5,r0
mov.l @(4,r5),r1
mov.l @(8,r5),r2
mov.l r0,@r4
mov.l r1,@(4,r4)
rts
mov.l r2,@(8,r4)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,668
|
arch/sh/lib/memset-sh4.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* "memset" implementation for SH4
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (c) 2009 STMicroelectronics Limited
* Author: Stuart Menefy <stuart.menefy:st.com>
*/
/*
* void *memset(void *s, int c, size_t n);
*/
#include <linux/linkage.h>
ENTRY(memset)
mov #12,r0
add r6,r4
cmp/gt r6,r0
bt/s 40f ! if it's too small, set a byte at once
mov r4,r0
and #3,r0
cmp/eq #0,r0
bt/s 2f ! It's aligned
sub r0,r6
1:
dt r0
bf/s 1b
mov.b r5,@-r4
2: ! make VVVV
extu.b r5,r5
swap.b r5,r0 ! V0
or r0,r5 ! VV
swap.w r5,r0 ! VV00
or r0,r5 ! VVVV
! Check if enough bytes need to be copied to be worth the big loop
mov #0x40, r0 ! (MT)
cmp/gt r6,r0 ! (MT) 64 > len => slow loop
bt/s 22f
mov r6,r0
! align the dst to the cache block size if necessary
mov r4, r3
mov #~(0x1f), r1
and r3, r1
cmp/eq r3, r1
bt/s 11f ! dst is already aligned
sub r1, r3 ! r3-r1 -> r3
shlr2 r3 ! number of loops
10: mov.l r5,@-r4
dt r3
bf/s 10b
add #-4, r6
11: ! dst is 32byte aligned
mov r6,r2
mov #-5,r0
shld r0,r2 ! number of loops
add #-32, r4
mov r5, r0
12:
movca.l r0,@r4
mov.l r5,@(4, r4)
mov.l r5,@(8, r4)
mov.l r5,@(12,r4)
mov.l r5,@(16,r4)
mov.l r5,@(20,r4)
add #-0x20, r6
mov.l r5,@(24,r4)
dt r2
mov.l r5,@(28,r4)
bf/s 12b
add #-32, r4
add #32, r4
mov #8, r0
cmp/ge r0, r6
bf 40f
mov r6,r0
22:
shlr2 r0
shlr r0 ! r0 = r6 >> 3
3:
dt r0
mov.l r5,@-r4 ! set 8-byte at once
bf/s 3b
mov.l r5,@-r4
!
mov #7,r0
and r0,r6
! fill bytes (length may be zero)
40: tst r6,r6
bt 5f
4:
dt r6
bf/s 4b
mov.b r5,@-r4
5:
rts
mov r4,r0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,280
|
arch/sh/lib64/copy_user_memcpy.S
|
! SPDX-License-Identifier: GPL-2.0
!
! Fast SH memcpy
!
! by Toshiyasu Morita (tm@netcom.com)
! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
! SH5 code Copyright 2002 SuperH Ltd.
!
! Entry: ARG0: destination pointer
! ARG1: source pointer
! ARG2: byte count
!
! Exit: RESULT: destination pointer
! any other registers in the range r0-r7: trashed
!
! Notes: Usually one wants to do small reads and write a longword, but
! unfortunately it is difficult in some cases to concatanate bytes
! into a longword on the SH, so this does a longword read and small
! writes.
!
! This implementation makes two assumptions about how it is called:
!
! 1.: If the byte count is nonzero, the address of the last byte to be
! copied is unsigned greater than the address of the first byte to
! be copied. This could be easily swapped for a signed comparison,
! but the algorithm used needs some comparison.
!
! 2.: When there are two or three bytes in the last word of an 11-or-more
! bytes memory chunk to b copied, the rest of the word can be read
! without side effects.
! This could be easily changed by increasing the minimum size of
! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
! however, this would cost a few extra cyles on average.
! For SHmedia, the assumption is that any quadword can be read in its
! enirety if at least one byte is included in the copy.
/* Imported into Linux kernel by Richard Curnow. This is used to implement the
__copy_user function in the general case, so it has to be a distinct
function from intra-kernel memcpy to allow for exception fix-ups in the
event that the user pointer is bad somewhere in the copy (e.g. due to
running off the end of the vma).
Note, this algorithm will be slightly wasteful in the case where the source
and destination pointers are equally aligned, because the stlo/sthi pairs
could then be merged back into single stores. If there are a lot of cache
misses, this is probably offset by the stall lengths on the preloads.
*/
/* NOTE : Prefetches removed and allocos guarded by synco to avoid TAKum03020
* erratum. The first two prefetches are nop-ed out to avoid upsetting the
* instruction counts used in the jump address calculation.
* */
.section .text..SHmedia32,"ax"
.little
.balign 32
.global copy_user_memcpy
.global copy_user_memcpy_end
copy_user_memcpy:
#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
nop ! ld.b r3,0,r63 ! TAKum03020
pta/l Large,tr0
movi 25,r0
bgeu/u r4,r0,tr0
nsb r4,r0
shlli r0,5,r0
movi (L1-L0+63*32 + 1) & 0xffff,r1
sub r1, r0, r0
L0: ptrel r0,tr0
add r2,r4,r5
ptabs r18,tr1
add r3,r4,r6
blink tr0,r63
/* Rearranged to make cut2 safe */
.balign 8
L4_7: /* 4..7 byte memcpy cntd. */
stlo.l r2, 0, r0
or r6, r7, r6
sthi.l r5, -1, r6
stlo.l r5, -4, r6
blink tr1,r63
.balign 8
L1: /* 0 byte memcpy */
nop
blink tr1,r63
nop
nop
nop
nop
L2_3: /* 2 or 3 byte memcpy cntd. */
st.b r5,-1,r6
blink tr1,r63
/* 1 byte memcpy */
ld.b r3,0,r0
st.b r2,0,r0
blink tr1,r63
L8_15: /* 8..15 byte memcpy cntd. */
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
/* 2 or 3 byte memcpy */
ld.b r3,0,r0
nop ! ld.b r2,0,r63 ! TAKum03020
ld.b r3,1,r1
st.b r2,0,r0
pta/l L2_3,tr0
ld.b r6,-1,r6
st.b r2,1,r1
blink tr0, r63
/* 4 .. 7 byte memcpy */
LDUAL (r3, 0, r0, r1)
pta L4_7, tr0
ldlo.l r6, -4, r7
or r0, r1, r0
sthi.l r2, 3, r0
ldhi.l r6, -1, r6
blink tr0, r63
/* 8 .. 15 byte memcpy */
LDUAQ (r3, 0, r0, r1)
pta L8_15, tr0
ldlo.q r6, -8, r7
or r0, r1, r0
sthi.q r2, 7, r0
ldhi.q r6, -1, r6
blink tr0, r63
/* 16 .. 24 byte memcpy */
LDUAQ (r3, 0, r0, r1)
LDUAQ (r3, 8, r8, r9)
or r0, r1, r0
sthi.q r2, 7, r0
or r8, r9, r8
sthi.q r2, 15, r8
ldlo.q r6, -8, r7
ldhi.q r6, -1, r6
stlo.q r2, 8, r8
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
Large:
! ld.b r2, 0, r63 ! TAKum03020
pta/l Loop_ua, tr1
ori r3, -8, r7
sub r2, r7, r22
sub r3, r2, r6
add r2, r4, r5
ldlo.q r3, 0, r0
addi r5, -16, r5
movi 64+8, r27 ! could subtract r7 from that.
stlo.q r2, 0, r0
sthi.q r2, 7, r0
ldx.q r22, r6, r0
bgtu/l r27, r4, tr1
addi r5, -48, r27
pta/l Loop_line, tr0
addi r6, 64, r36
addi r6, -24, r19
addi r6, -16, r20
addi r6, -8, r21
Loop_line:
! ldx.q r22, r36, r63 ! TAKum03020
alloco r22, 32
synco
addi r22, 32, r22
ldx.q r22, r19, r23
sthi.q r22, -25, r0
ldx.q r22, r20, r24
ldx.q r22, r21, r25
stlo.q r22, -32, r0
ldx.q r22, r6, r0
sthi.q r22, -17, r23
sthi.q r22, -9, r24
sthi.q r22, -1, r25
stlo.q r22, -24, r23
stlo.q r22, -16, r24
stlo.q r22, -8, r25
bgeu r27, r22, tr0
Loop_ua:
addi r22, 8, r22
sthi.q r22, -1, r0
stlo.q r22, -8, r0
ldx.q r22, r6, r0
bgtu/l r5, r22, tr1
add r3, r4, r7
ldlo.q r7, -8, r1
sthi.q r22, 7, r0
ldhi.q r7, -1, r7
ptabs r18,tr1
stlo.q r22, 0, r0
or r1, r7, r1
sthi.q r5, 15, r1
stlo.q r5, 8, r1
blink tr1, r63
copy_user_memcpy_end:
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,415
|
arch/sh/lib64/memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
/* Modified by SuperH, Inc. September 2003 */
!
! Fast SH memcpy
!
! by Toshiyasu Morita (tm@netcom.com)
! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
! SH5 code Copyright 2002 SuperH Ltd.
!
! Entry: ARG0: destination pointer
! ARG1: source pointer
! ARG2: byte count
!
! Exit: RESULT: destination pointer
! any other registers in the range r0-r7: trashed
!
! Notes: Usually one wants to do small reads and write a longword, but
! unfortunately it is difficult in some cases to concatanate bytes
! into a longword on the SH, so this does a longword read and small
! writes.
!
! This implementation makes two assumptions about how it is called:
!
! 1.: If the byte count is nonzero, the address of the last byte to be
! copied is unsigned greater than the address of the first byte to
! be copied. This could be easily swapped for a signed comparison,
! but the algorithm used needs some comparison.
!
! 2.: When there are two or three bytes in the last word of an 11-or-more
! bytes memory chunk to b copied, the rest of the word can be read
! without side effects.
! This could be easily changed by increasing the minimum size of
! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
! however, this would cost a few extra cyles on average.
! For SHmedia, the assumption is that any quadword can be read in its
! enirety if at least one byte is included in the copy.
!
.section .text..SHmedia32,"ax"
.globl memcpy
.type memcpy, @function
.align 5
memcpy:
#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
ld.b r3,0,r63
pta/l Large,tr0
movi 25,r0
bgeu/u r4,r0,tr0
nsb r4,r0
shlli r0,5,r0
movi (L1-L0+63*32 + 1) & 0xffff,r1
sub r1, r0, r0
L0: ptrel r0,tr0
add r2,r4,r5
ptabs r18,tr1
add r3,r4,r6
blink tr0,r63
/* Rearranged to make cut2 safe */
.balign 8
L4_7: /* 4..7 byte memcpy cntd. */
stlo.l r2, 0, r0
or r6, r7, r6
sthi.l r5, -1, r6
stlo.l r5, -4, r6
blink tr1,r63
.balign 8
L1: /* 0 byte memcpy */
nop
blink tr1,r63
nop
nop
nop
nop
L2_3: /* 2 or 3 byte memcpy cntd. */
st.b r5,-1,r6
blink tr1,r63
/* 1 byte memcpy */
ld.b r3,0,r0
st.b r2,0,r0
blink tr1,r63
L8_15: /* 8..15 byte memcpy cntd. */
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
/* 2 or 3 byte memcpy */
ld.b r3,0,r0
ld.b r2,0,r63
ld.b r3,1,r1
st.b r2,0,r0
pta/l L2_3,tr0
ld.b r6,-1,r6
st.b r2,1,r1
blink tr0, r63
/* 4 .. 7 byte memcpy */
LDUAL (r3, 0, r0, r1)
pta L4_7, tr0
ldlo.l r6, -4, r7
or r0, r1, r0
sthi.l r2, 3, r0
ldhi.l r6, -1, r6
blink tr0, r63
/* 8 .. 15 byte memcpy */
LDUAQ (r3, 0, r0, r1)
pta L8_15, tr0
ldlo.q r6, -8, r7
or r0, r1, r0
sthi.q r2, 7, r0
ldhi.q r6, -1, r6
blink tr0, r63
/* 16 .. 24 byte memcpy */
LDUAQ (r3, 0, r0, r1)
LDUAQ (r3, 8, r8, r9)
or r0, r1, r0
sthi.q r2, 7, r0
or r8, r9, r8
sthi.q r2, 15, r8
ldlo.q r6, -8, r7
ldhi.q r6, -1, r6
stlo.q r2, 8, r8
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
Large:
ld.b r2, 0, r63
pta/l Loop_ua, tr1
ori r3, -8, r7
sub r2, r7, r22
sub r3, r2, r6
add r2, r4, r5
ldlo.q r3, 0, r0
addi r5, -16, r5
movi 64+8, r27 // could subtract r7 from that.
stlo.q r2, 0, r0
sthi.q r2, 7, r0
ldx.q r22, r6, r0
bgtu/l r27, r4, tr1
addi r5, -48, r27
pta/l Loop_line, tr0
addi r6, 64, r36
addi r6, -24, r19
addi r6, -16, r20
addi r6, -8, r21
Loop_line:
ldx.q r22, r36, r63
alloco r22, 32
addi r22, 32, r22
ldx.q r22, r19, r23
sthi.q r22, -25, r0
ldx.q r22, r20, r24
ldx.q r22, r21, r25
stlo.q r22, -32, r0
ldx.q r22, r6, r0
sthi.q r22, -17, r23
sthi.q r22, -9, r24
sthi.q r22, -1, r25
stlo.q r22, -24, r23
stlo.q r22, -16, r24
stlo.q r22, -8, r25
bgeu r27, r22, tr0
Loop_ua:
addi r22, 8, r22
sthi.q r22, -1, r0
stlo.q r22, -8, r0
ldx.q r22, r6, r0
bgtu/l r5, r22, tr1
add r3, r4, r7
ldlo.q r7, -8, r1
sthi.q r22, 7, r0
ldhi.q r7, -1, r7
ptabs r18,tr1
stlo.q r22, 0, r0
or r1, r7, r1
sthi.q r5, 15, r1
stlo.q r5, 8, r1
blink tr1, r63
.size memcpy,.-memcpy
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,115
|
arch/sh/lib64/udivsi3.S
|
/* SPDX-License-Identifier: GPL-2.0 */
.global __udivsi3
.section .text..SHmedia32,"ax"
.align 2
/*
inputs: r4,r5
clobbered: r18,r19,r20,r21,r22,r25,tr0
result in r0.
*/
__udivsi3:
addz.l r5,r63,r22
nsb r22,r0
shlld r22,r0,r25
shlri r25,48,r25
movi 0xffffffffffffbb0c,r20 /* shift count eqiv 76 */
sub r20,r25,r21
mmulfx.w r21,r21,r19
mshflo.w r21,r63,r21
ptabs r18,tr0
mmulfx.w r25,r19,r19
sub r20,r0,r0
/* bubble */
msub.w r21,r19,r19
/*
* It would be nice for scheduling to do this add to r21 before
* the msub.w, but we need a different value for r19 to keep
* errors under control.
*/
addi r19,-2,r21
mulu.l r4,r21,r18
mmulfx.w r19,r19,r19
shlli r21,15,r21
shlrd r18,r0,r18
mulu.l r18,r22,r20
mmacnfx.wl r25,r19,r21
/* bubble */
sub r4,r20,r25
mulu.l r25,r21,r19
addi r0,14,r0
/* bubble */
shlrd r19,r0,r19
mulu.l r19,r22,r20
add r18,r19,r18
/* bubble */
sub.l r25,r20,r25
mulu.l r25,r21,r19
addz.l r25,r63,r25
sub r25,r22,r25
shlrd r19,r0,r19
mulu.l r19,r22,r20
addi r25,1,r25
add r18,r19,r18
cmpgt r25,r20,r25
add.l r18,r25,r0
blink tr0,r63
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,275
|
arch/sh/lib64/udivdi3.S
|
/* SPDX-License-Identifier: GPL-2.0 */
.section .text..SHmedia32,"ax"
.align 2
.global __udivdi3
__udivdi3:
shlri r3,1,r4
nsb r4,r22
shlld r3,r22,r6
shlri r6,49,r5
movi 0xffffffffffffbaf1,r21 /* .l shift count 17. */
sub r21,r5,r1
mmulfx.w r1,r1,r4
mshflo.w r1,r63,r1
sub r63,r22,r20 // r63 == 64 % 64
mmulfx.w r5,r4,r4
pta large_divisor,tr0
addi r20,32,r9
msub.w r1,r4,r1
madd.w r1,r1,r1
mmulfx.w r1,r1,r4
shlri r6,32,r7
bgt/u r9,r63,tr0 // large_divisor
mmulfx.w r5,r4,r4
shlri r2,32+14,r19
addi r22,-31,r0
msub.w r1,r4,r1
mulu.l r1,r7,r4
addi r1,-3,r5
mulu.l r5,r19,r5
sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
the case may be, %0000000000000000 000.11111111111, still */
muls.l r1,r4,r4 /* leaving at least one sign bit. */
mulu.l r5,r3,r8
mshalds.l r1,r21,r1
shari r4,26,r4
shlld r8,r0,r8
add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
sub r2,r8,r2
/* Can do second step of 64 : 32 div now, using r1 and the rest in r2. */
shlri r2,22,r21
mulu.l r21,r1,r21
shlld r5,r0,r8
addi r20,30-22,r0
shlrd r21,r0,r21
mulu.l r21,r3,r5
add r8,r21,r8
mcmpgt.l r21,r63,r21 // See Note 1
addi r20,30,r0
mshfhi.l r63,r21,r21
sub r2,r5,r2
andc r2,r21,r2
/* small divisor: need a third divide step */
mulu.l r2,r1,r7
ptabs r18,tr0
addi r2,1,r2
shlrd r7,r0,r7
mulu.l r7,r3,r5
add r8,r7,r8
sub r2,r3,r2
cmpgt r2,r5,r5
add r8,r5,r2
/* could test r3 here to check for divide by zero. */
blink tr0,r63
large_divisor:
mmulfx.w r5,r4,r4
shlrd r2,r9,r25
shlri r25,32,r8
msub.w r1,r4,r1
mulu.l r1,r7,r4
addi r1,-3,r5
mulu.l r5,r8,r5
sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
the case may be, %0000000000000000 000.11111111111, still */
muls.l r1,r4,r4 /* leaving at least one sign bit. */
shlri r5,14-1,r8
mulu.l r8,r7,r5
mshalds.l r1,r21,r1
shari r4,26,r4
add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
sub r25,r5,r25
/* Can do second step of 64 : 32 div now, using r1 and the rest in r25. */
shlri r25,22,r21
mulu.l r21,r1,r21
pta no_lo_adj,tr0
addi r22,32,r0
shlri r21,40,r21
mulu.l r21,r7,r5
add r8,r21,r8
shlld r2,r0,r2
sub r25,r5,r25
bgtu/u r7,r25,tr0 // no_lo_adj
addi r8,1,r8
sub r25,r7,r25
no_lo_adj:
mextr4 r2,r25,r2
/* large_divisor: only needs a few adjustments. */
mulu.l r8,r6,r5
ptabs r18,tr0
/* bubble */
cmpgtu r5,r2,r5
sub r8,r5,r2
blink tr0,r63
/* Note 1: To shift the result of the second divide stage so that the result
always fits into 32 bits, yet we still reduce the rest sufficiently
would require a lot of instructions to do the shifts just right. Using
the full 64 bit shift result to multiply with the divisor would require
four extra instructions for the upper 32 bits (shift / mulu / shift / sub).
Fortunately, if the upper 32 bits of the shift result are nonzero, we
know that the rest after taking this partial result into account will
fit into 32 bits. So we just clear the upper 32 bits of the rest if the
upper 32 bits of the partial result are nonzero. */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,975
|
arch/sh/lib64/strcpy.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
/* Modified by SuperH, Inc. September 2003 */
! Entry: arg0: destination
! arg1: source
! Exit: result: destination
!
! SH5 code Copyright 2002 SuperH Ltd.
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define SHHI shlld
#define SHLO shlrd
#else
#define SHHI shlrd
#define SHLO shlld
#endif
.section .text..SHmedia32,"ax"
.globl strcpy
.type strcpy, @function
.align 5
strcpy:
pta/l shortstring,tr1
ldlo.q r3,0,r4
ptabs r18,tr4
shlli r3,3,r7
addi r2, 8, r0
mcmpeq.b r4,r63,r6
SHHI r6,r7,r6
bnei/u r6,0,tr1 // shortstring
pta/l no_lddst, tr2
ori r3,-8,r23
sub r2, r23, r0
sub r3, r2, r21
addi r21, 8, r20
ldx.q r0, r21, r5
pta/l loop, tr0
ori r2,-8,r22
mcmpeq.b r5, r63, r6
bgt/u r22, r23, tr2 // no_lddst
// r22 < r23 : Need to do a load from the destination.
// r22 == r23 : Doesn't actually need to load from destination,
// but still can be handled here.
ldlo.q r2, 0, r9
movi -1, r8
SHLO r8, r7, r8
mcmv r4, r8, r9
stlo.q r2, 0, r9
beqi/l r6, 0, tr0 // loop
add r5, r63, r4
addi r0, 8, r0
blink tr1, r63 // shortstring
no_lddst:
// r22 > r23: note that for r22 == r23 the sthi.q would clobber
// bytes before the destination region.
stlo.q r2, 0, r4
SHHI r4, r7, r4
sthi.q r0, -1, r4
beqi/l r6, 0, tr0 // loop
add r5, r63, r4
addi r0, 8, r0
shortstring:
#if __BYTE_ORDER != __LITTLE_ENDIAN
pta/l shortstring2,tr1
byterev r4,r4
#endif
shortstring2:
st.b r0,-8,r4
andi r4,0xff,r5
shlri r4,8,r4
addi r0,1,r0
bnei/l r5,0,tr1
blink tr4,r63 // return
.balign 8
loop:
stlo.q r0, 0, r5
ldx.q r0, r20, r4
addi r0, 16, r0
sthi.q r0, -9, r5
mcmpeq.b r4, r63, r6
bnei/u r6, 0, tr1 // shortstring
ldx.q r0, r21, r5
stlo.q r0, -8, r4
sthi.q r0, -1, r4
mcmpeq.b r5, r63, r6
beqi/l r6, 0, tr0 // loop
add r5, r63, r4
addi r0, 8, r0
blink tr1, r63 // shortstring
.size strcpy,.-strcpy
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,176
|
arch/sh/lib64/copy_page.S
|
/*
Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
This file is subject to the terms and conditions of the GNU General Public
License. See the file "COPYING" in the main directory of this archive
for more details.
Tight version of mempy for the case of just copying a page.
Prefetch strategy empirically optimised against RTL simulations
of SH5-101 cut2 eval chip with Cayman board DDR memory.
Parameters:
r2 : destination effective address (start of page)
r3 : source effective address (start of page)
Always copies 4096 bytes.
Points to review.
* Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead.
It seems like the prefetch needs to be at at least 4 lines ahead to get
the data into the cache in time, and the allocos contend with outstanding
prefetches for the same cache set, so it's better to have the numbers
different.
*/
.section .text..SHmedia32,"ax"
.little
.balign 8
.global copy_page
copy_page:
/* Copy 4096 bytes worth of data from r3 to r2.
Do prefetches 4 lines ahead.
Do alloco 2 lines ahead */
pta 1f, tr1
pta 2f, tr2
pta 3f, tr3
ptabs r18, tr0
#if 0
/* TAKum03020 */
ld.q r3, 0x00, r63
ld.q r3, 0x20, r63
ld.q r3, 0x40, r63
ld.q r3, 0x60, r63
#endif
alloco r2, 0x00
synco ! TAKum03020
alloco r2, 0x20
synco ! TAKum03020
movi 3968, r6
add r2, r6, r6
addi r6, 64, r7
addi r7, 64, r8
sub r3, r2, r60
addi r60, 8, r61
addi r61, 8, r62
addi r62, 8, r23
addi r60, 0x80, r22
/* Minimal code size. The extra branches inside the loop don't cost much
because they overlap with the time spent waiting for prefetches to
complete. */
1:
#if 0
/* TAKum03020 */
bge/u r2, r6, tr2 ! skip prefetch for last 4 lines
ldx.q r2, r22, r63 ! prefetch 4 lines hence
#endif
2:
bge/u r2, r7, tr3 ! skip alloco for last 2 lines
alloco r2, 0x40 ! alloc destination line 2 lines ahead
synco ! TAKum03020
3:
ldx.q r2, r60, r36
ldx.q r2, r61, r37
ldx.q r2, r62, r38
ldx.q r2, r23, r39
st.q r2, 0, r36
st.q r2, 8, r37
st.q r2, 16, r38
st.q r2, 24, r39
addi r2, 32, r2
bgt/l r8, r2, tr1
blink tr0, r63 ! return
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,991
|
arch/sh/lib64/memset.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
/* Modified by SuperH, Inc. September 2003 */
!
! Fast SH memset
!
! by Toshiyasu Morita (tm@netcom.com)
!
! SH5 code by J"orn Rennecke (joern.rennecke@superh.com)
! Copyright 2002 SuperH Ltd.
!
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define SHHI shlld
#define SHLO shlrd
#else
#define SHHI shlrd
#define SHLO shlld
#endif
.section .text..SHmedia32,"ax"
.globl memset
.type memset, @function
.align 5
memset:
pta/l multiquad, tr0
andi r2, 7, r22
ptabs r18, tr2
mshflo.b r3,r3,r3
add r4, r22, r23
mperm.w r3, r63, r3 // Fill pattern now in every byte of r3
movi 8, r9
bgtu/u r23, r9, tr0 // multiquad
beqi/u r4, 0, tr2 // Return with size 0 - ensures no mem accesses
ldlo.q r2, 0, r7
shlli r4, 2, r4
movi -1, r8
SHHI r8, r4, r8
SHHI r8, r4, r8
mcmv r7, r8, r3
stlo.q r2, 0, r3
blink tr2, r63
multiquad:
pta/l lastquad, tr0
stlo.q r2, 0, r3
shlri r23, 3, r24
add r2, r4, r5
beqi/u r24, 1, tr0 // lastquad
pta/l loop, tr1
sub r2, r22, r25
andi r5, -8, r20 // calculate end address and
addi r20, -7*8, r8 // loop end address; This might overflow, so we need
// to use a different test before we start the loop
bge/u r24, r9, tr1 // loop
st.q r25, 8, r3
st.q r20, -8, r3
shlri r24, 1, r24
beqi/u r24, 1, tr0 // lastquad
st.q r25, 16, r3
st.q r20, -16, r3
beqi/u r24, 2, tr0 // lastquad
st.q r25, 24, r3
st.q r20, -24, r3
lastquad:
sthi.q r5, -1, r3
blink tr2,r63
loop:
!!! alloco r25, 32 // QQQ comment out for short-term fix to SHUK #3895.
// QQQ commenting out is locically correct, but sub-optimal
// QQQ Sean McGoogan - 4th April 2003.
st.q r25, 8, r3
st.q r25, 16, r3
st.q r25, 24, r3
st.q r25, 32, r3
addi r25, 32, r25
bgeu/l r8, r25, tr1 // loop
st.q r20, -40, r3
st.q r20, -32, r3
st.q r20, -24, r3
st.q r20, -16, r3
st.q r20, -8, r3
sthi.q r5, -1, r3
blink tr2,r63
.size memset,.-memset
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,796
|
arch/sh/lib64/sdivsi3.S
|
/* SPDX-License-Identifier: GPL-2.0 */
.global __sdivsi3
.global __sdivsi3_1
.global __sdivsi3_2
.section .text..SHmedia32,"ax"
.align 2
/* inputs: r4,r5 */
/* clobbered: r1,r18,r19,r20,r21,r25,tr0 */
/* result in r0 */
__sdivsi3:
__sdivsi3_1:
ptb __div_table,tr0
gettr tr0,r20
__sdivsi3_2:
nsb r5, r1
shlld r5, r1, r25 /* normalize; [-2 ..1, 1..2) in s2.62 */
shari r25, 58, r21 /* extract 5(6) bit index (s2.4 with hole -1..1) */
/* bubble */
ldx.ub r20, r21, r19 /* u0.8 */
shari r25, 32, r25 /* normalize to s2.30 */
shlli r21, 1, r21
muls.l r25, r19, r19 /* s2.38 */
ldx.w r20, r21, r21 /* s2.14 */
ptabs r18, tr0
shari r19, 24, r19 /* truncate to s2.14 */
sub r21, r19, r19 /* some 11 bit inverse in s1.14 */
muls.l r19, r19, r21 /* u0.28 */
sub r63, r1, r1
addi r1, 92, r1
muls.l r25, r21, r18 /* s2.58 */
shlli r19, 45, r19 /* multiply by two and convert to s2.58 */
/* bubble */
sub r19, r18, r18
shari r18, 28, r18 /* some 22 bit inverse in s1.30 */
muls.l r18, r25, r0 /* s2.60 */
muls.l r18, r4, r25 /* s32.30 */
/* bubble */
shari r0, 16, r19 /* s-16.44 */
muls.l r19, r18, r19 /* s-16.74 */
shari r25, 63, r0
shari r4, 14, r18 /* s19.-14 */
shari r19, 30, r19 /* s-16.44 */
muls.l r19, r18, r19 /* s15.30 */
xor r21, r0, r21 /* You could also use the constant 1 << 27. */
add r21, r25, r21
sub r21, r19, r21
shard r21, r1, r21
sub r21, r0, r0
blink tr0, r63
/* This table has been generated by divtab.c .
Defects for bias -330:
Max defect: 6.081536e-07 at -1.000000e+00
Min defect: 2.849516e-08 at 1.030651e+00
Max 2nd step defect: 9.606539e-12 at -1.000000e+00
Min 2nd step defect: 0.000000e+00 at 0.000000e+00
Defect at 1: 1.238659e-07
Defect at -2: 1.061708e-07 */
.balign 2
.type __div_table,@object
.size __div_table,128
/* negative division constants */
.word -16638
.word -17135
.word -17737
.word -18433
.word -19103
.word -19751
.word -20583
.word -21383
.word -22343
.word -23353
.word -24407
.word -25582
.word -26863
.word -28382
.word -29965
.word -31800
/* negative division factors */
.byte 66
.byte 70
.byte 75
.byte 81
.byte 87
.byte 93
.byte 101
.byte 109
.byte 119
.byte 130
.byte 142
.byte 156
.byte 172
.byte 192
.byte 214
.byte 241
.skip 16
.global __div_table
__div_table:
.skip 16
/* positive division factors */
.byte 241
.byte 214
.byte 192
.byte 172
.byte 156
.byte 142
.byte 130
.byte 119
.byte 109
.byte 101
.byte 93
.byte 87
.byte 81
.byte 75
.byte 70
.byte 66
/* positive division constants */
.word 31801
.word 29966
.word 28383
.word 26864
.word 25583
.word 24408
.word 23354
.word 22344
.word 21384
.word 20584
.word 19752
.word 19104
.word 18434
.word 17738
.word 17136
.word 16639
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,121
|
arch/sh/kernel/vsyscall/vsyscall-trapa.S
|
/* SPDX-License-Identifier: GPL-2.0 */
.text
.globl __kernel_vsyscall
.type __kernel_vsyscall,@function
__kernel_vsyscall:
.LSTART_vsyscall:
trapa #0x10
nop
.LEND_vsyscall:
.size __kernel_vsyscall,.-.LSTART_vsyscall
.previous
.section .eh_frame,"a",@progbits
.LCIE:
.ualong .LCIE_end - .LCIE_start
.LCIE_start:
.ualong 0 /* CIE ID */
.byte 0x1 /* Version number */
.string "zR" /* NUL-terminated augmentation string */
.uleb128 0x1 /* Code alignment factor */
.sleb128 -4 /* Data alignment factor */
.byte 0x11 /* Return address register column */
.uleb128 0x1 /* Augmentation length and data */
.byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
.byte 0xc,0xf,0x0 /* DW_CFA_def_cfa: r15 ofs 0 */
.align 2
.LCIE_end:
.ualong .LFDE_end-.LFDE_start /* Length FDE */
.LFDE_start:
.ualong .LFDE_start-.LCIE /* CIE pointer */
.ualong .LSTART_vsyscall-. /* PC-relative start address */
.ualong .LEND_vsyscall-.LSTART_vsyscall
.uleb128 0 /* Augmentation */
.align 2
.LFDE_end:
.previous
/* Get the common code for the sigreturn entry points */
#include "vsyscall-sigreturn.S"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,169
|
arch/sh/kernel/vsyscall/vsyscall.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Linker script for vsyscall DSO. The vsyscall page is an ELF shared
* object prelinked to its virtual address, and with only one read-only
* segment (that fits in one page). This script controls its layout.
*/
#include <asm/asm-offsets.h>
#ifdef CONFIG_CPU_LITTLE_ENDIAN
OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
#else
OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
#endif
OUTPUT_ARCH(sh)
/* The ELF entry point can be used to set the AT_SYSINFO value. */
ENTRY(__kernel_vsyscall);
SECTIONS
{
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
/*
* This linker script is used both with -r and with -shared.
* For the layouts to match, we need to skip more than enough
* space for the dynamic symbol table et al. If this amount
* is insufficient, ld -shared will barf. Just increase it here.
*/
. = 0x400;
.text : { *(.text) } :text =0x90909090
.note : { *(.note.*) } :text :note
.eh_frame_hdr : { *(.eh_frame_hdr ) } :text :eh_frame_hdr
.eh_frame : {
KEEP (*(.eh_frame))
LONG (0)
} :text
.dynamic : { *(.dynamic) } :text :dynamic
.useless : {
*(.got.plt) *(.got)
*(.data .data.* .gnu.linkonce.d.*)
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
} :text
}
/*
* Very old versions of ld do not recognize this name token; use the constant.
*/
#define PT_GNU_EH_FRAME 0x6474e550
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
* This controls what symbols we export from the DSO.
*/
VERSION
{
LINUX_2.6 {
global:
__kernel_vsyscall;
__kernel_sigreturn;
__kernel_rt_sigreturn;
local: *;
};
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,775
|
arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/unistd.h>
.text
.balign 32
.globl __kernel_sigreturn
.type __kernel_sigreturn,@function
__kernel_sigreturn:
.LSTART_sigreturn:
mov.w 1f, r3
trapa #0x10
or r0, r0
or r0, r0
or r0, r0
or r0, r0
or r0, r0
1: .short __NR_sigreturn
.LEND_sigreturn:
.size __kernel_sigreturn,.-.LSTART_sigreturn
.balign 32
.globl __kernel_rt_sigreturn
.type __kernel_rt_sigreturn,@function
__kernel_rt_sigreturn:
.LSTART_rt_sigreturn:
mov.w 1f, r3
trapa #0x10
or r0, r0
or r0, r0
or r0, r0
or r0, r0
or r0, r0
1: .short __NR_rt_sigreturn
.LEND_rt_sigreturn:
.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
.previous
.section .eh_frame,"a",@progbits
.LCIE1:
.ualong .LCIE1_end - .LCIE1_start
.LCIE1_start:
.ualong 0 /* CIE ID */
.byte 0x1 /* Version number */
.string "zRS" /* NUL-terminated augmentation string */
.uleb128 0x1 /* Code alignment factor */
.sleb128 -4 /* Data alignment factor */
.byte 0x11 /* Return address register column */
.uleb128 0x1 /* Augmentation length and data */
.byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
.byte 0xc, 0xf, 0x0 /* DW_CFA_def_cfa: r15 ofs 0 */
.align 2
.LCIE1_end:
.ualong .LFDE0_end-.LFDE0_start /* Length FDE0 */
.LFDE0_start:
.ualong .LFDE0_start-.LCIE1 /* CIE pointer */
.ualong .LSTART_sigreturn-. /* PC-relative start address */
.ualong .LEND_sigreturn-.LSTART_sigreturn
.uleb128 0 /* Augmentation */
.align 2
.LFDE0_end:
.ualong .LFDE1_end-.LFDE1_start /* Length FDE1 */
.LFDE1_start:
.ualong .LFDE1_start-.LCIE1 /* CIE pointer */
.ualong .LSTART_rt_sigreturn-. /* PC-relative start address */
.ualong .LEND_rt_sigreturn-.LSTART_rt_sigreturn
.uleb128 0 /* Augmentation */
.align 2
.LFDE1_end:
.previous
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,827
|
arch/sh/kernel/cpu/sh3/ex.S
|
/*
* arch/sh/kernel/cpu/sh3/ex.S
*
* The SH-3 and SH-4 exception vector table.
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2003 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/linkage.h>
#if !defined(CONFIG_MMU)
#define tlb_miss_load exception_error
#define tlb_miss_store exception_error
#define initial_page_write exception_error
#define tlb_protection_violation_load exception_error
#define tlb_protection_violation_store exception_error
#define address_error_load exception_error
#define address_error_store exception_error
#endif
#if !defined(CONFIG_SH_FPU)
#define fpu_error_trap_handler exception_error
#endif
#if !defined(CONFIG_KGDB)
#define kgdb_handle_exception exception_error
#endif
.align 2
.data
ENTRY(exception_handling_table)
.long exception_error /* 000 */
.long exception_error
.long tlb_miss_load /* 040 */
.long tlb_miss_store
.long initial_page_write
.long tlb_protection_violation_load
.long tlb_protection_violation_store
.long address_error_load
.long address_error_store /* 100 */
.long fpu_error_trap_handler /* 120 */
.long exception_error /* 140 */
.long system_call ! Unconditional Trap /* 160 */
.long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
.long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
.long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger
.long breakpoint_trap_handler /* 1E0 */
/*
* Pad the remainder of the table out, exceptions residing in far
* away offsets can be manually inserted in to their appropriate
* location via set_exception_table_{evt,vec}().
*/
.balign 4096,0,4096
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,880
|
arch/sh/kernel/cpu/sh3/entry.S
|
/*
* arch/sh/kernel/cpu/sh3/entry.S
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2003 - 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <cpu/mmu_context.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
! to be jumped is too far, but it causes illegal slot exception.
/*
* entry.S contains the system-call and fault low-level handling routines.
* This also contains the timer-interrupt handler, as well as all interrupts
* and faults that can result in a task-switch.
*
* NOTE: This code handles signal-recognition, which happens every time
* after a timer-interrupt and after each system call.
*
* NOTE: This code uses a convention that instructions in the delay slot
* of a transfer-control instruction are indented by an extra space, thus:
*
* jmp @k0 ! control-transfer instruction
* ldc k1, ssr ! delay slot
*
* Stack layout in 'ret_from_syscall':
* ptrace needs to have all regs on the stack.
* if the order here is changed, it needs to be
* updated in ptrace.c and ptrace.h
*
* r0
* ...
* r15 = stack pointer
* spc
* pr
* ssr
* gbr
* mach
* macl
* syscall #
*
*/
/* Offsets to the stack */
OFF_R0 = 0 /* Return value. New ABI also arg4 */
OFF_R1 = 4 /* New ABI: arg5 */
OFF_R2 = 8 /* New ABI: arg6 */
OFF_R3 = 12 /* New ABI: syscall_nr */
OFF_R4 = 16 /* New ABI: arg0 */
OFF_R5 = 20 /* New ABI: arg1 */
OFF_R6 = 24 /* New ABI: arg2 */
OFF_R7 = 28 /* New ABI: arg3 */
OFF_SP = (15*4)
OFF_PC = (16*4)
OFF_SR = (16*4+8)
OFF_TRA = (16*4+6*4)
#define k0 r0
#define k1 r1
#define k2 r2
#define k3 r3
#define k4 r4
#define g_imask r6 /* r6_bank1 */
#define k_g_imask r6_bank /* r6_bank1 */
#define current r7 /* r7_bank1 */
#include <asm/entry-macros.S>
/*
* Kernel mode register usage:
* k0 scratch
* k1 scratch
* k2 scratch (Exception code)
* k3 scratch (Return address)
* k4 scratch
* k5 reserved
* k6 Global Interrupt Mask (0--15 << 4)
* k7 CURRENT_THREAD_INFO (pointer to current thread info)
*/
!
! TLB Miss / Initial Page write exception handling
! _and_
! TLB hits, but the access violate the protection.
! It can be valid access, such as stack grow and/or C-O-W.
!
!
! Find the pmd/pte entry and loadtlb
! If it's not found, cause address error (SEGV)
!
! Although this could be written in assembly language (and it'd be faster),
! this first version depends *much* on C implementation.
!
#if defined(CONFIG_MMU)
.align 2
ENTRY(tlb_miss_load)
bra call_handle_tlbmiss
mov #0, r5
.align 2
ENTRY(tlb_miss_store)
bra call_handle_tlbmiss
mov #FAULT_CODE_WRITE, r5
.align 2
ENTRY(initial_page_write)
bra call_handle_tlbmiss
mov #FAULT_CODE_INITIAL, r5
.align 2
ENTRY(tlb_protection_violation_load)
bra call_do_page_fault
mov #FAULT_CODE_PROT, r5
.align 2
ENTRY(tlb_protection_violation_store)
bra call_do_page_fault
mov #(FAULT_CODE_PROT | FAULT_CODE_WRITE), r5
call_handle_tlbmiss:
mov.l 1f, r0
mov r5, r8
mov.l @r0, r6
mov.l 2f, r0
sts pr, r10
jsr @r0
mov r15, r4
!
tst r0, r0
bf/s 0f
lds r10, pr
rts
nop
0:
mov r8, r5
call_do_page_fault:
mov.l 1f, r0
mov.l @r0, r6
mov.l 3f, r0
mov.l 4f, r1
mov r15, r4
jmp @r0
lds r1, pr
.align 2
1: .long MMU_TEA
2: .long handle_tlbmiss
3: .long do_page_fault
4: .long ret_from_exception
.align 2
ENTRY(address_error_load)
bra call_dae
mov #0,r5 ! writeaccess = 0
.align 2
ENTRY(address_error_store)
bra call_dae
mov #1,r5 ! writeaccess = 1
.align 2
call_dae:
mov.l 1f, r0
mov.l @r0, r6 ! address
mov.l 2f, r0
jmp @r0
mov r15, r4 ! regs
.align 2
1: .long MMU_TEA
2: .long do_address_error
#endif /* CONFIG_MMU */
#if defined(CONFIG_SH_STANDARD_BIOS)
/* Unwind the stack and jmp to the debug entry */
ENTRY(sh_bios_handler)
mov.l 1f, r8
bsr restore_regs
nop
lds k2, pr ! restore pr
mov k4, r15
!
mov.l 2f, k0
mov.l @k0, k0
jmp @k0
ldc k3, ssr
.align 2
1: .long 0x300000f0
2: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
! restore_regs()
! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
! - switch bank
! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
! k2 returns original pr
! k3 returns original sr
! k4 returns original stack pointer
! r8 passes SR bitmask, overwritten with restored data on return
! r9 trashed
! BL=0 on entry, on exit BL=1 (depending on r8).
ENTRY(restore_regs)
mov.l @r15+, r0
mov.l @r15+, r1
mov.l @r15+, r2
mov.l @r15+, r3
mov.l @r15+, r4
mov.l @r15+, r5
mov.l @r15+, r6
mov.l @r15+, r7
!
stc sr, r9
or r8, r9
ldc r9, sr
!
mov.l @r15+, r8
mov.l @r15+, r9
mov.l @r15+, r10
mov.l @r15+, r11
mov.l @r15+, r12
mov.l @r15+, r13
mov.l @r15+, r14
mov.l @r15+, k4 ! original stack pointer
ldc.l @r15+, spc
mov.l @r15+, k2 ! original PR
mov.l @r15+, k3 ! original SR
ldc.l @r15+, gbr
lds.l @r15+, mach
lds.l @r15+, macl
rts
add #4, r15 ! Skip syscall number
restore_all:
mov.l 7f, r8
bsr restore_regs
nop
lds k2, pr ! restore pr
!
! Calculate new SR value
mov k3, k2 ! original SR value
mov #0xfffffff0, k1
extu.b k1, k1
not k1, k1
and k1, k2 ! Mask original SR value
!
mov k3, k0 ! Calculate IMASK-bits
shlr2 k0
and #0x3c, k0
cmp/eq #0x3c, k0
bt/s 6f
shll2 k0
mov g_imask, k0
!
6: or k0, k2 ! Set the IMASK-bits
ldc k2, ssr
!
mov k4, r15
rte
nop
.align 2
5: .long 0x00001000 ! DSP
7: .long 0x30000000
! common exception handler
#include "../../entry-common.S"
! Exception Vector Base
!
! Should be aligned page boundary.
!
.balign 4096,0,4096
ENTRY(vbr_base)
.long 0
!
! 0x100: General exception vector
!
.balign 256,0,256
general_exception:
bra handle_exception
sts pr, k3 ! save original pr value in k3
! prepare_stack()
! - roll back gRB
! - switch to kernel stack
! k0 returns original sp (after roll back)
! k1 trashed
! k2 trashed
prepare_stack:
#ifdef CONFIG_GUSA
! Check for roll back gRB (User and Kernel)
mov r15, k0
shll k0
bf/s 1f
shll k0
bf/s 1f
stc spc, k1
stc r0_bank, k0
cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0)
bt/s 2f
stc r1_bank, k1
add #-2, k0
add r15, k0
ldc k0, spc ! PC = saved r0 + r15 - 2
2: mov k1, r15 ! SP = r1
1:
#endif
! Switch to kernel stack if needed
stc ssr, k0 ! Is it from kernel space?
shll k0 ! Check MD bit (bit30) by shifting it into...
shll k0 ! ...the T bit
bt/s 1f ! It's a kernel to kernel transition.
mov r15, k0 ! save original stack to k0
/* User space to kernel */
mov #(THREAD_SIZE >> 10), k1
shll8 k1 ! k1 := THREAD_SIZE
shll2 k1
add current, k1
mov k1, r15 ! change to kernel stack
!
1:
rts
nop
!
! 0x400: Instruction and Data TLB miss exception vector
!
.balign 1024,0,1024
tlb_miss:
sts pr, k3 ! save original pr value in k3
handle_exception:
mova exception_data, k0
! Setup stack and save DSP context (k0 contains original r15 on return)
bsr prepare_stack
PREF(k0)
! Save registers / Switch to bank 0
mov.l 5f, k2 ! vector register address
mov.l 1f, k4 ! SR bits to clear in k4
bsr save_regs ! needs original pr value in k3
mov.l @k2, k2 ! read out vector and keep in k2
handle_exception_special:
setup_frame_reg
! Setup return address and jump to exception handler
mov.l 7f, r9 ! fetch return address
stc r2_bank, r0 ! k2 (vector)
mov.l 6f, r10
shlr2 r0
shlr r0
mov.l @(r0, r10), r10
jmp @r10
lds r9, pr ! put return address in pr
.align L1_CACHE_SHIFT
! save_regs()
! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
! - switch bank
! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
! k0 contains original stack pointer*
! k1 trashed
! k3 passes original pr*
! k4 passes SR bitmask
! BL=1 on entry, on exit BL=0.
ENTRY(save_regs)
mov #-1, r1
mov.l k1, @-r15 ! set TRA (default: -1)
sts.l macl, @-r15
sts.l mach, @-r15
stc.l gbr, @-r15
stc.l ssr, @-r15
mov.l k3, @-r15 ! original pr in k3
stc.l spc, @-r15
mov.l k0, @-r15 ! original stack pointer in k0
mov.l r14, @-r15
mov.l r13, @-r15
mov.l r12, @-r15
mov.l r11, @-r15
mov.l r10, @-r15
mov.l r9, @-r15
mov.l r8, @-r15
mov.l 0f, k3 ! SR bits to set in k3
! fall-through
! save_low_regs()
! - modify SR for bank switch
! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
! k3 passes bits to set in SR
! k4 passes bits to clear in SR
ENTRY(save_low_regs)
stc sr, r8
or k3, r8
and k4, r8
ldc r8, sr
mov.l r7, @-r15
mov.l r6, @-r15
mov.l r5, @-r15
mov.l r4, @-r15
mov.l r3, @-r15
mov.l r2, @-r15
mov.l r1, @-r15
rts
mov.l r0, @-r15
!
! 0x600: Interrupt / NMI vector
!
.balign 512,0,512
ENTRY(handle_interrupt)
sts pr, k3 ! save original pr value in k3
mova exception_data, k0
! Setup stack and save DSP context (k0 contains original r15 on return)
bsr prepare_stack
PREF(k0)
! Save registers / Switch to bank 0
mov.l 1f, k4 ! SR bits to clear in k4
bsr save_regs ! needs original pr value in k3
mov #-1, k2 ! default vector kept in k2
setup_frame_reg
stc sr, r0 ! get status register
shlr2 r0
and #0x3c, r0
cmp/eq #0x3c, r0
bf 9f
TRACE_IRQS_OFF
9:
! Setup return address and jump to do_IRQ
mov.l 4f, r9 ! fetch return address
lds r9, pr ! put return address in pr
mov.l 2f, r4
mov.l 3f, r9
mov.l @r4, r4 ! pass INTEVT vector as arg0
shlr2 r4
shlr r4
mov r4, r0 ! save vector->jmp table offset for later
shlr2 r4 ! vector to IRQ# conversion
add #-0x10, r4
cmp/pz r4 ! is it a valid IRQ?
bt 10f
/*
* We got here as a result of taking the INTEVT path for something
* that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
* path and special case the event dispatch instead. This is the
* expected path for the NMI (and any other brilliantly implemented
* exception), which effectively wants regular exception dispatch
* but is unfortunately reported through INTEVT rather than
* EXPEVT. Grr.
*/
mov.l 6f, r9
mov.l @(r0, r9), r9
jmp @r9
mov r15, r8 ! trap handlers take saved regs in r8
10:
jmp @r9 ! Off to do_IRQ() we go.
mov r15, r5 ! pass saved registers as arg1
ENTRY(exception_none)
rts
nop
.align L1_CACHE_SHIFT
exception_data:
0: .long 0x000080f0 ! FD=1, IMASK=15
1: .long 0xcfffffff ! RB=0, BL=0
2: .long INTEVT
3: .long do_IRQ
4: .long ret_from_irq
5: .long EXPEVT
6: .long exception_handling_table
7: .long ret_from_exception
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,009
|
arch/sh/kernel/cpu/sh3/swsusp.S
|
/*
* arch/sh/kernel/cpu/sh3/swsusp.S
*
* Copyright (C) 2009 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#define k0 r0
#define k1 r1
#define k2 r2
#define k3 r3
#define k4 r4
! swsusp_arch_resume()
! - copy restore_pblist pages
! - restore registers from swsusp_arch_regs_cpu0
ENTRY(swsusp_arch_resume)
mov.l 1f, r15
mov.l 2f, r4
mov.l @r4, r4
swsusp_copy_loop:
mov r4, r0
cmp/eq #0, r0
bt swsusp_restore_regs
mov.l @(PBE_ADDRESS, r4), r2
mov.l @(PBE_ORIG_ADDRESS, r4), r5
mov #(PAGE_SIZE >> 10), r3
shll8 r3
shlr2 r3 /* PAGE_SIZE / 16 */
swsusp_copy_page:
dt r3
mov.l @r2+,r1 /* 16n+0 */
mov.l r1,@r5
add #4,r5
mov.l @r2+,r1 /* 16n+4 */
mov.l r1,@r5
add #4,r5
mov.l @r2+,r1 /* 16n+8 */
mov.l r1,@r5
add #4,r5
mov.l @r2+,r1 /* 16n+12 */
mov.l r1,@r5
bf/s swsusp_copy_page
add #4,r5
bra swsusp_copy_loop
mov.l @(PBE_NEXT, r4), r4
swsusp_restore_regs:
! BL=0: R7->R0 is bank0
mov.l 3f, r8
mov.l 4f, r5
jsr @r5
nop
! BL=1: R7->R0 is bank1
lds k2, pr
ldc k3, ssr
mov.l @r15+, r0
mov.l @r15+, r1
mov.l @r15+, r2
mov.l @r15+, r3
mov.l @r15+, r4
mov.l @r15+, r5
mov.l @r15+, r6
mov.l @r15+, r7
rte
nop
! BL=0: R7->R0 is bank0
.align 2
1: .long swsusp_arch_regs_cpu0
2: .long restore_pblist
3: .long 0x20000000 ! RB=1
4: .long restore_regs
! swsusp_arch_suspend()
! - prepare pc for resume, return from function without swsusp_save on resume
! - save registers in swsusp_arch_regs_cpu0
! - call swsusp_save write suspend image
ENTRY(swsusp_arch_suspend)
sts pr, r0 ! save pr in r0
mov r15, r2 ! save sp in r2
mov r8, r5 ! save r8 in r5
stc sr, r1
ldc r1, ssr ! save sr in ssr
mov.l 1f, r1
ldc r1, spc ! setup pc value for resuming
mov.l 5f, r15 ! use swsusp_arch_regs_cpu0 as stack
mov.l 6f, r3
add r3, r15 ! save from top of structure
! BL=0: R7->R0 is bank0
mov.l 2f, r3 ! get new SR value for bank1
mov #0, r4
mov.l 7f, r1
jsr @r1 ! switch to bank1 and save bank1 r7->r0
not r4, r4
! BL=1: R7->R0 is bank1
stc r2_bank, k0 ! fetch old sp from r2_bank0
mov.l 3f, k4 ! SR bits to clear in k4
mov.l 8f, k1
jsr @k1 ! switch to bank0 and save all regs
stc r0_bank, k3 ! fetch old pr from r0_bank0
! BL=0: R7->R0 is bank0
mov r2, r15 ! restore old sp
mov r5, r8 ! restore old r8
stc ssr, r1
ldc r1, sr ! restore old sr
lds r0, pr ! restore old pr
mov.l 4f, r0
jmp @r0
nop
swsusp_call_save:
mov r2, r15 ! restore old sp
mov r5, r8 ! restore old r8
lds r0, pr ! restore old pr
rts
mov #0, r0
.align 2
1: .long swsusp_call_save
2: .long 0x20000000 ! RB=1
3: .long 0xdfffffff ! RB=0
4: .long swsusp_save
5: .long swsusp_arch_regs_cpu0
6: .long SWSUSP_ARCH_REGS_SIZE
7: .long save_low_regs
8: .long save_regs
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,160
|
arch/sh/kernel/cpu/sh2a/ex.S
|
/*
* arch/sh/kernel/cpu/sh2a/ex.S
*
* The SH-2A exception vector table
*
* Copyright (C) 2008 Yoshinori Sato
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/linkage.h>
!
! convert Exception Vector to Exception Number
!
! exception no 0 to 255
exception_entry0:
no = 0
.rept 256
mov.l r1,@-sp
bra exception_trampoline0
mov #no,r1
no = no + 1
.endr
exception_trampoline0:
mov.l r0,@-sp
mov.l 1f,r0
extu.b r1,r1
jmp @r0
extu.w r1,r1
.align 2
1: .long exception_handler
! exception no 256 to 511
exception_entry1:
no = 0
.rept 256
mov.l r1,@-sp
bra exception_trampoline1
mov #no,r1
no = no + 1
.endr
exception_trampoline1:
mov.l r0,@-sp
extu.b r1,r1
movi20 #0x100,r0
add r0,r1
mov.l 1f,r0
jmp @r0
extu.w r1,r1
.align 2
1: .long exception_handler
!
! Exception Vector Base
!
.align 2
ENTRY(vbr_base)
vector = 0
.rept 256
.long exception_entry0 + vector * 6
vector = vector + 1
.endr
vector = 0
.rept 256
.long exception_entry1 + vector * 6
vector = vector + 1
.endr
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,133
|
arch/sh/kernel/cpu/sh2a/entry.S
|
/*
* arch/sh/kernel/cpu/sh2a/entry.S
*
* The SH-2A exception entry
*
* Copyright (C) 2008 Yoshinori Sato
* Based on arch/sh/kernel/cpu/sh2/entry.S
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <cpu/mmu_context.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/page.h>
/* Offsets to the stack */
OFF_R0 = 0 /* Return value. New ABI also arg4 */
OFF_R1 = 4 /* New ABI: arg5 */
OFF_R2 = 8 /* New ABI: arg6 */
OFF_R3 = 12 /* New ABI: syscall_nr */
OFF_R4 = 16 /* New ABI: arg0 */
OFF_R5 = 20 /* New ABI: arg1 */
OFF_R6 = 24 /* New ABI: arg2 */
OFF_R7 = 28 /* New ABI: arg3 */
OFF_SP = (15*4)
OFF_PC = (16*4)
OFF_SR = (16*4+2*4)
OFF_TRA = (16*4+6*4)
#include <asm/entry-macros.S>
ENTRY(exception_handler)
! stack
! r0 <- point sp
! r1
! pc
! sr
! r0 = temporary
! r1 = vector (pseudo EXPEVT / INTEVT / TRA)
mov.l r2,@-sp
cli
mov.l $cpu_mode,r2
bld.b #6,@(0,r2) !previus SR.MD
bst.b #6,@(4*4,r15) !set cpu mode to SR.MD
bt 1f
! switch to kernel mode
bset.b #6,@(0,r2) !set SR.MD
mov.l $current_thread_info,r2
mov.l @r2,r2
mov #(THREAD_SIZE >> 8),r0
shll8 r0
add r2,r0 ! r0 = kernel stack tail
mov r15,r2 ! r2 = user stack top
mov r0,r15 ! switch kernel stack
mov.l r1,@-r15 ! TRA
sts.l macl, @-r15
sts.l mach, @-r15
stc.l gbr, @-r15
mov.l @(4*4,r2),r0
mov.l r0,@-r15 ! original SR
sts.l pr,@-r15
mov.l @(3*4,r2),r0
mov.l r0,@-r15 ! original PC
mov r2,r0
add #(3+2)*4,r0 ! rewind r0 - r3 + exception frame
lds r0,pr ! pr = original SP
movmu.l r3,@-r15 ! save regs
mov r2,r8 ! r8 = previus stack top
mov r1,r9 ! r9 = interrupt vector
! restore previous stack
mov.l @r8+,r2
mov.l @r8+,r0
mov.l @r8+,r1
bra 2f
movml.l r2,@-r15
1:
! in kernel exception
mov r15,r2
add #-((OFF_TRA + 4) - OFF_PC) + 5*4,r15
movmu.l r3,@-r15
mov r2,r8 ! r8 = previous stack top
mov r1,r9 ! r9 = interrupt vector
! restore exception frame & regs
mov.l @r8+,r2 ! old R2
mov.l @r8+,r0 ! old R0
mov.l @r8+,r1 ! old R1
mov.l @r8+,r10 ! old PC
mov.l @r8+,r11 ! old SR
movml.l r2,@-r15
mov.l r10,@(OFF_PC,r15)
mov.l r11,@(OFF_SR,r15)
mov.l r8,@(OFF_SP,r15) ! save old sp
mov r15,r8
add #OFF_TRA + 4,r8
mov.l r9,@-r8
sts.l macl,@-r8
sts.l mach,@-r8
stc.l gbr,@-r8
add #-4,r8
sts.l pr,@-r8
2:
! dispatch exception / interrupt
mov #64,r8
cmp/hs r8,r9
bt interrupt_entry ! vec >= 64 is interrupt
mov #31,r8
cmp/hs r8,r9
bt trap_entry ! 64 > vec >= 31 is trap
mov.l 4f,r8
mov r9,r4
shll2 r9
add r9,r8
mov.l @r8,r8 ! exception handler address
tst r8,r8
bf 3f
mov.l 8f,r8 ! unhandled exception
3:
mov.l 5f,r10
jmp @r8
lds r10,pr
interrupt_entry:
mov r9,r4
mov r15,r5
mov.l 7f,r8
mov.l 6f,r9
jmp @r8
lds r9,pr
.align 2
4: .long exception_handling_table
5: .long ret_from_exception
6: .long ret_from_irq
7: .long do_IRQ
8: .long exception_error
trap_entry:
mov #0x30,r8
cmp/ge r8,r9 ! vector 0x1f-0x2f is systemcall
bt 1f
mov #0x1f,r9 ! convert to unified SH2/3/4 trap number
1:
shll2 r9 ! TRA
bra system_call ! jump common systemcall entry
mov r9,r8
#if defined(CONFIG_SH_STANDARD_BIOS)
/* Unwind the stack and jmp to the debug entry */
ENTRY(sh_bios_handler)
mov r15,r0
add #(22-4)*4-4,r0
ldc.l @r0+,gbr
lds.l @r0+,mach
lds.l @r0+,macl
mov r15,r0
mov.l @(OFF_SP,r0),r1
mov.l @(OFF_SR,r2),r3
mov.l r3,@-r1
mov.l @(OFF_SP,r2),r3
mov.l r3,@-r1
mov r15,r0
add #(22-4)*4-8,r0
mov.l 1f,r2
mov.l @r2,r2
stc sr,r3
mov.l r2,@r0
mov.l r3,@(4,r0)
mov.l r1,@(8,r0)
movml.l @r15+,r14
add #8,r15
lds.l @r15+, pr
mov.l @r15+,r15
rte
nop
.align 2
1: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
ENTRY(address_error_trap_handler)
mov r15,r4 ! regs
mov.l @(OFF_PC,r15),r6 ! pc
mov.l 1f,r0
jmp @r0
mov #0,r5 ! writeaccess is unknown
.align 2
1: .long do_address_error
restore_all:
stc sr,r0
or #0xf0,r0
ldc r0,sr ! all interrupt block (same BL = 1)
! restore special register
! overlap exception frame
mov r15,r0
add #17*4,r0
lds.l @r0+,pr
add #4,r0
ldc.l @r0+,gbr
lds.l @r0+,mach
lds.l @r0+,macl
mov r15,r0
mov.l $cpu_mode,r2
bld.b #6,@(OFF_SR,r15)
bst.b #6,@(0,r2) ! save CPU mode
mov.l @(OFF_SR,r0),r1
shll2 r1
shlr2 r1 ! clear MD bit
mov.l @(OFF_SP,r0),r2
add #-8,r2
mov.l r2,@(OFF_SP,r0) ! point exception frame top
mov.l r1,@(4,r2) ! set sr
mov.l @(OFF_PC,r0),r1
mov.l r1,@r2 ! set pc
get_current_thread_info r0, r1
mov.l $current_thread_info,r1
mov.l r0,@r1
movml.l @r15+,r14
mov.l @r15,r15
rte
nop
.align 2
$current_thread_info:
.long __current_thread_info
$cpu_mode:
.long __cpu_mode
! common exception handler
#include "../../entry-common.S"
.data
! cpu operation mode
! bit30 = MD (compatible SH3/4)
__cpu_mode:
.long 0x40000000
.section .bss
__current_thread_info:
.long 0
ENTRY(exception_handling_table)
.space 4*32
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,932
|
arch/sh/kernel/cpu/sh2/entry.S
|
/*
* arch/sh/kernel/cpu/sh2/entry.S
*
* The SH-2 exception entry
*
* Copyright (C) 2005-2008 Yoshinori Sato
* Copyright (C) 2005 AXE,Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <cpu/mmu_context.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/page.h>
/* Offsets to the stack */
OFF_R0 = 0 /* Return value. New ABI also arg4 */
OFF_R1 = 4 /* New ABI: arg5 */
OFF_R2 = 8 /* New ABI: arg6 */
OFF_R3 = 12 /* New ABI: syscall_nr */
OFF_R4 = 16 /* New ABI: arg0 */
OFF_R5 = 20 /* New ABI: arg1 */
OFF_R6 = 24 /* New ABI: arg2 */
OFF_R7 = 28 /* New ABI: arg3 */
OFF_SP = (15*4)
OFF_PC = (16*4)
OFF_SR = (16*4+2*4)
OFF_TRA = (16*4+6*4)
#include <asm/entry-macros.S>
ENTRY(exception_handler)
! stack
! r0 <- point sp
! r1
! pc
! sr
! r0 = temporary
! r1 = vector (pseudo EXPEVT / INTEVT / TRA)
mov.l r2,@-sp
mov.l r3,@-sp
cli
mov.l $cpu_mode,r2
#ifdef CONFIG_SMP
mov.l $cpuid,r3
mov.l @r3,r3
mov.l @r3,r3
shll2 r3
add r3,r2
#endif
mov.l @r2,r0
mov.l @(5*4,r15),r3 ! previous SR
or r0,r3 ! set MD
tst r0,r0
bf/s 1f ! previous mode check
mov.l r3,@(5*4,r15) ! update SR
! switch to kernel mode
mov.l __md_bit,r0
mov.l r0,@r2 ! enter kernel mode
mov.l $current_thread_info,r2
#ifdef CONFIG_SMP
mov.l $cpuid,r0
mov.l @r0,r0
mov.l @r0,r0
shll2 r0
add r0,r2
#endif
mov.l @r2,r2
mov #(THREAD_SIZE >> 8),r0
shll8 r0
add r2,r0
mov r15,r2 ! r2 = user stack top
mov r0,r15 ! switch kernel stack
mov.l r1,@-r15 ! TRA
sts.l macl, @-r15
sts.l mach, @-r15
stc.l gbr, @-r15
mov.l @(5*4,r2),r0
mov.l r0,@-r15 ! original SR
sts.l pr,@-r15
mov.l @(4*4,r2),r0
mov.l r0,@-r15 ! original PC
mov r2,r3
add #(4+2)*4,r3 ! rewind r0 - r3 + exception frame
mov.l r3,@-r15 ! original SP
mov.l r14,@-r15
mov.l r13,@-r15
mov.l r12,@-r15
mov.l r11,@-r15
mov.l r10,@-r15
mov.l r9,@-r15
mov.l r8,@-r15
mov.l r7,@-r15
mov.l r6,@-r15
mov.l r5,@-r15
mov.l r4,@-r15
mov r1,r9 ! save TRA
mov r2,r8 ! copy user -> kernel stack
mov.l @(0,r8),r3
mov.l r3,@-r15
mov.l @(4,r8),r2
mov.l r2,@-r15
mov.l @(12,r8),r1
mov.l r1,@-r15
mov.l @(8,r8),r0
bra 2f
mov.l r0,@-r15
1:
! in kernel exception
mov #(22-4-4-1)*4+4,r0
mov r15,r2
sub r0,r15
mov.l @r2+,r0 ! old R3
mov.l r0,@-r15
mov.l @r2+,r0 ! old R2
mov.l r0,@-r15
mov.l @(4,r2),r0 ! old R1
mov.l r0,@-r15
mov.l @r2,r0 ! old R0
mov.l r0,@-r15
add #8,r2
mov.l @r2+,r3 ! old PC
mov.l @r2+,r0 ! old SR
add #-4,r2 ! exception frame stub (sr)
mov.l r1,@-r2 ! TRA
sts.l macl, @-r2
sts.l mach, @-r2
stc.l gbr, @-r2
mov.l r0,@-r2 ! save old SR
sts.l pr,@-r2
mov.l r3,@-r2 ! save old PC
mov r2,r0
add #8*4,r0
mov.l r0,@-r2 ! save old SP
mov.l r14,@-r2
mov.l r13,@-r2
mov.l r12,@-r2
mov.l r11,@-r2
mov.l r10,@-r2
mov.l r9,@-r2
mov.l r8,@-r2
mov.l r7,@-r2
mov.l r6,@-r2
mov.l r5,@-r2
mov.l r4,@-r2
mov r1,r9
mov.l @(OFF_R0,r15),r0
mov.l @(OFF_R1,r15),r1
mov.l @(OFF_R2,r15),r2
mov.l @(OFF_R3,r15),r3
2:
mov #64,r8
cmp/hs r8,r9
bt interrupt_entry ! vec >= 64 is interrupt
mov #31,r8
cmp/hs r8,r9
bt trap_entry ! 64 > vec >= 31 is trap
#ifdef CONFIG_CPU_J2
mov #16,r8
cmp/hs r8,r9
bt interrupt_entry ! 31 > vec >= 16 is interrupt
#endif
mov.l 4f,r8
mov r9,r4
shll2 r9
add r9,r8
mov.l @r8,r8 ! exception handler address
tst r8,r8
bf 3f
mov.l 8f,r8 ! unhandled exception
3:
mov.l 5f,r10
jmp @r8
lds r10,pr
interrupt_entry:
mov r9,r4
mov r15,r5
mov.l 6f,r9
mov.l 7f,r8
jmp @r8
lds r9,pr
.align 2
4: .long exception_handling_table
5: .long ret_from_exception
6: .long ret_from_irq
7: .long do_IRQ
8: .long exception_error
trap_entry:
mov #0x30,r8
cmp/ge r8,r9 ! vector 0x1f-0x2f is systemcall
bt 1f
mov #0x1f,r9 ! convert to unified SH2/3/4 trap number
1:
shll2 r9 ! TRA
bra system_call ! jump common systemcall entry
mov r9,r8
#if defined(CONFIG_SH_STANDARD_BIOS)
/* Unwind the stack and jmp to the debug entry */
ENTRY(sh_bios_handler)
mov r15,r0
add #(22-4)*4-4,r0
ldc.l @r0+,gbr
lds.l @r0+,mach
lds.l @r0+,macl
mov r15,r0
mov.l @(OFF_SP,r0),r1
mov #OFF_SR,r2
mov.l @(r0,r2),r3
mov.l r3,@-r1
mov #OFF_SP,r2
mov.l @(r0,r2),r3
mov.l r3,@-r1
mov r15,r0
add #(22-4)*4-8,r0
mov.l 1f,r2
mov.l @r2,r2
stc sr,r3
mov.l r2,@r0
mov.l r3,@(4,r0)
mov.l r1,@(8,r0)
mov.l @r15+, r0
mov.l @r15+, r1
mov.l @r15+, r2
mov.l @r15+, r3
mov.l @r15+, r4
mov.l @r15+, r5
mov.l @r15+, r6
mov.l @r15+, r7
mov.l @r15+, r8
mov.l @r15+, r9
mov.l @r15+, r10
mov.l @r15+, r11
mov.l @r15+, r12
mov.l @r15+, r13
mov.l @r15+, r14
add #8,r15
lds.l @r15+, pr
mov.l @r15+,r15
rte
nop
.align 2
1: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
ENTRY(address_error_trap_handler)
mov r15,r4 ! regs
mov #OFF_PC,r0
mov.l @(r0,r15),r6 ! pc
mov.l 1f,r0
jmp @r0
mov #0,r5 ! writeaccess is unknown
.align 2
1: .long do_address_error
restore_all:
stc sr,r0
or #0xf0,r0
ldc r0,sr ! all interrupt block (same BL = 1)
! restore special register
! overlap exception frame
mov r15,r0
add #17*4,r0
lds.l @r0+,pr
add #4,r0
ldc.l @r0+,gbr
lds.l @r0+,mach
lds.l @r0+,macl
mov r15,r0
mov.l $cpu_mode,r2
#ifdef CONFIG_SMP
mov.l $cpuid,r3
mov.l @r3,r3
mov.l @r3,r3
shll2 r3
add r3,r2
#endif
mov #OFF_SR,r3
mov.l @(r0,r3),r1
mov.l __md_bit,r3
and r1,r3 ! copy MD bit
mov.l r3,@r2
shll2 r1 ! clear MD bit
shlr2 r1
mov.l @(OFF_SP,r0),r2
add #-8,r2
mov.l r2,@(OFF_SP,r0) ! point exception frame top
mov.l r1,@(4,r2) ! set sr
mov #OFF_PC,r3
mov.l @(r0,r3),r1
mov.l r1,@r2 ! set pc
get_current_thread_info r0, r1
mov.l $current_thread_info,r1
#ifdef CONFIG_SMP
mov.l $cpuid,r3
mov.l @r3,r3
mov.l @r3,r3
shll2 r3
add r3,r1
#endif
mov.l r0,@r1
mov.l @r15+,r0
mov.l @r15+,r1
mov.l @r15+,r2
mov.l @r15+,r3
mov.l @r15+,r4
mov.l @r15+,r5
mov.l @r15+,r6
mov.l @r15+,r7
mov.l @r15+,r8
mov.l @r15+,r9
mov.l @r15+,r10
mov.l @r15+,r11
mov.l @r15+,r12
mov.l @r15+,r13
mov.l @r15+,r14
mov.l @r15,r15
rte
nop
.align 2
__md_bit:
.long 0x40000000
$current_thread_info:
.long __current_thread_info
$cpu_mode:
.long __cpu_mode
#ifdef CONFIG_SMP
$cpuid:
.long sh2_cpuid_addr
#endif
! common exception handler
#include "../../entry-common.S"
#ifdef CONFIG_NR_CPUS
#define NR_CPUS CONFIG_NR_CPUS
#else
#define NR_CPUS 1
#endif
.data
! cpu operation mode
! bit30 = MD (compatible SH3/4)
__cpu_mode:
.rept NR_CPUS
.long 0x40000000
.endr
#ifdef CONFIG_SMP
.global sh2_cpuid_addr
sh2_cpuid_addr:
.long dummy_cpuid
dummy_cpuid:
.long 0
#endif
.section .bss
__current_thread_info:
.rept NR_CPUS
.long 0
.endr
ENTRY(exception_handling_table)
.space 4*32
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,017
|
arch/sh/kernel/cpu/shmobile/sleep.S
|
/*
* arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S
*
* Sleep mode and Standby modes support for SuperH Mobile
*
* Copyright (C) 2009 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
/*
* Kernel mode register usage, see entry.S:
* k0 scratch
* k1 scratch
*/
#define k0 r0
#define k1 r1
/* manage self-refresh and enter standby mode. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4
ENTRY(sh_mobile_sleep_enter_start)
/* save mode flags */
mov.l r4, @(SH_SLEEP_MODE, r5)
/* save original vbr */
stc vbr, r0
mov.l r0, @(SH_SLEEP_VBR, r5)
/* point vbr to our on-chip memory page */
ldc r5, vbr
/* save return address */
sts pr, r0
mov.l r0, @(SH_SLEEP_SPC, r5)
/* save sr */
stc sr, r0
mov.l r0, @(SH_SLEEP_SR, r5)
/* save general purpose registers to stack if needed */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_REGS, r0
bt skip_regs_save
sts.l pr, @-r15
mov.l r14, @-r15
mov.l r13, @-r15
mov.l r12, @-r15
mov.l r11, @-r15
mov.l r10, @-r15
mov.l r9, @-r15
mov.l r8, @-r15
/* make sure bank0 is selected, save low registers */
mov.l rb_bit, r9
not r9, r9
bsr set_sr
mov #0, r10
bsr save_low_regs
nop
/* switch to bank 1, save low registers */
mov.l rb_bit, r10
bsr set_sr
mov #-1, r9
bsr save_low_regs
nop
/* switch back to bank 0 */
mov.l rb_bit, r9
not r9, r9
bsr set_sr
mov #0, r10
skip_regs_save:
/* save sp, also set to internal ram */
mov.l r15, @(SH_SLEEP_SP, r5)
mov r5, r15
/* save stbcr */
bsr save_register
mov #SH_SLEEP_REG_STBCR, r0
/* save mmu and cache context if needed */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_MMU, r0
bt skip_mmu_save_disable
/* save mmu state */
bsr save_register
mov #SH_SLEEP_REG_PTEH, r0
bsr save_register
mov #SH_SLEEP_REG_PTEL, r0
bsr save_register
mov #SH_SLEEP_REG_TTB, r0
bsr save_register
mov #SH_SLEEP_REG_TEA, r0
bsr save_register
mov #SH_SLEEP_REG_MMUCR, r0
bsr save_register
mov #SH_SLEEP_REG_PTEA, r0
bsr save_register
mov #SH_SLEEP_REG_PASCR, r0
bsr save_register
mov #SH_SLEEP_REG_IRMCR, r0
/* invalidate TLBs and disable the MMU */
bsr get_register
mov #SH_SLEEP_REG_MMUCR, r0
mov #4, r1
mov.l r1, @r0
icbi @r0
/* save cache registers and disable caches */
bsr save_register
mov #SH_SLEEP_REG_CCR, r0
bsr save_register
mov #SH_SLEEP_REG_RAMCR, r0
bsr get_register
mov #SH_SLEEP_REG_CCR, r0
mov #0, r1
mov.l r1, @r0
icbi @r0
skip_mmu_save_disable:
/* call self-refresh entering code if needed */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_SF, r0
bt skip_set_sf
mov.l @(SH_SLEEP_SF_PRE, r5), r0
jsr @r0
nop
skip_set_sf:
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_STANDBY, r0
bt test_rstandby
/* set mode to "software standby mode" */
bra do_sleep
mov #0x80, r1
test_rstandby:
tst #SUSP_SH_RSTANDBY, r0
bt test_ustandby
/* setup BAR register */
bsr get_register
mov #SH_SLEEP_REG_BAR, r0
mov.l @(SH_SLEEP_RESUME, r5), r1
mov.l r1, @r0
/* set mode to "r-standby mode" */
bra do_sleep
mov #0x20, r1
test_ustandby:
tst #SUSP_SH_USTANDBY, r0
bt force_sleep
/* set mode to "u-standby mode" */
bra do_sleep
mov #0x10, r1
force_sleep:
/* set mode to "sleep mode" */
mov #0x00, r1
do_sleep:
/* setup and enter selected standby mode */
bsr get_register
mov #SH_SLEEP_REG_STBCR, r0
mov.l r1, @r0
again:
sleep
bra again
nop
save_register:
add #SH_SLEEP_BASE_ADDR, r0
mov.l @(r0, r5), r1
add #-SH_SLEEP_BASE_ADDR, r0
mov.l @r1, r1
add #SH_SLEEP_BASE_DATA, r0
mov.l r1, @(r0, r5)
add #-SH_SLEEP_BASE_DATA, r0
rts
nop
get_register:
add #SH_SLEEP_BASE_ADDR, r0
mov.l @(r0, r5), r0
rts
nop
set_sr:
stc sr, r8
and r9, r8
or r10, r8
ldc r8, sr
rts
nop
save_low_regs:
mov.l r7, @-r15
mov.l r6, @-r15
mov.l r5, @-r15
mov.l r4, @-r15
mov.l r3, @-r15
mov.l r2, @-r15
mov.l r1, @-r15
rts
mov.l r0, @-r15
.balign 4
rb_bit: .long 0x20000000 ! RB=1
ENTRY(sh_mobile_sleep_enter_end)
.balign 4
ENTRY(sh_mobile_sleep_resume_start)
/* figure out start address */
bsr 0f
nop
0:
sts pr, k1
mov.l 1f, k0
and k0, k1
/* store pointer to data area in VBR */
ldc k1, vbr
/* setup sr with saved sr */
mov.l @(SH_SLEEP_SR, k1), k0
ldc k0, sr
/* now: user register set! */
stc vbr, r5
/* setup spc with return address to c code */
mov.l @(SH_SLEEP_SPC, r5), r0
ldc r0, spc
/* restore vbr */
mov.l @(SH_SLEEP_VBR, r5), r0
ldc r0, vbr
/* setup ssr with saved sr */
mov.l @(SH_SLEEP_SR, r5), r0
ldc r0, ssr
/* restore sp */
mov.l @(SH_SLEEP_SP, r5), r15
/* restore sleep mode register */
bsr restore_register
mov #SH_SLEEP_REG_STBCR, r0
/* call self-refresh resume code if needed */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_SF, r0
bt skip_restore_sf
mov.l @(SH_SLEEP_SF_POST, r5), r0
jsr @r0
nop
skip_restore_sf:
/* restore mmu and cache state if needed */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_MMU, r0
bt skip_restore_mmu
/* restore mmu state */
bsr restore_register
mov #SH_SLEEP_REG_PTEH, r0
bsr restore_register
mov #SH_SLEEP_REG_PTEL, r0
bsr restore_register
mov #SH_SLEEP_REG_TTB, r0
bsr restore_register
mov #SH_SLEEP_REG_TEA, r0
bsr restore_register
mov #SH_SLEEP_REG_PTEA, r0
bsr restore_register
mov #SH_SLEEP_REG_PASCR, r0
bsr restore_register
mov #SH_SLEEP_REG_IRMCR, r0
bsr restore_register
mov #SH_SLEEP_REG_MMUCR, r0
icbi @r0
/* restore cache settings */
bsr restore_register
mov #SH_SLEEP_REG_RAMCR, r0
icbi @r0
bsr restore_register
mov #SH_SLEEP_REG_CCR, r0
icbi @r0
skip_restore_mmu:
/* restore general purpose registers if needed */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_REGS, r0
bt skip_restore_regs
/* switch to bank 1, restore low registers */
mov.l _rb_bit, r10
bsr _set_sr
mov #-1, r9
bsr restore_low_regs
nop
/* switch to bank0, restore low registers */
mov.l _rb_bit, r9
not r9, r9
bsr _set_sr
mov #0, r10
bsr restore_low_regs
nop
/* restore the rest of the registers */
mov.l @r15+, r8
mov.l @r15+, r9
mov.l @r15+, r10
mov.l @r15+, r11
mov.l @r15+, r12
mov.l @r15+, r13
mov.l @r15+, r14
lds.l @r15+, pr
skip_restore_regs:
rte
nop
restore_register:
add #SH_SLEEP_BASE_DATA, r0
mov.l @(r0, r5), r1
add #-SH_SLEEP_BASE_DATA, r0
add #SH_SLEEP_BASE_ADDR, r0
mov.l @(r0, r5), r0
mov.l r1, @r0
rts
nop
_set_sr:
stc sr, r8
and r9, r8
or r10, r8
ldc r8, sr
rts
nop
restore_low_regs:
mov.l @r15+, r0
mov.l @r15+, r1
mov.l @r15+, r2
mov.l @r15+, r3
mov.l @r15+, r4
mov.l @r15+, r5
mov.l @r15+, r6
rts
mov.l @r15+, r7
.balign 4
_rb_bit: .long 0x20000000 ! RB=1
1: .long ~0x7ff
ENTRY(sh_mobile_sleep_resume_end)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 47,149
|
arch/sh/kernel/cpu/sh5/entry.S
|
/*
* arch/sh/kernel/cpu/sh5/entry.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2004 - 2008 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/sys.h>
#include <cpu/registers.h>
#include <asm/processor.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
/*
* SR fields.
*/
#define SR_ASID_MASK 0x00ff0000
#define SR_FD_MASK 0x00008000
#define SR_SS 0x08000000
#define SR_BL 0x10000000
#define SR_MD 0x40000000
/*
* Event code.
*/
#define EVENT_INTERRUPT 0
#define EVENT_FAULT_TLB 1
#define EVENT_FAULT_NOT_TLB 2
#define EVENT_DEBUG 3
/* EXPEVT values */
#define RESET_CAUSE 0x20
#define DEBUGSS_CAUSE 0x980
/*
* Frame layout. Quad index.
*/
#define FRAME_T(x) FRAME_TBASE+(x*8)
#define FRAME_R(x) FRAME_RBASE+(x*8)
#define FRAME_S(x) FRAME_SBASE+(x*8)
#define FSPC 0
#define FSSR 1
#define FSYSCALL_ID 2
/* Arrange the save frame to be a multiple of 32 bytes long */
#define FRAME_SBASE 0
#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
#define FP_FRAME_BASE 0
#define SAVED_R2 0*8
#define SAVED_R3 1*8
#define SAVED_R4 2*8
#define SAVED_R5 3*8
#define SAVED_R18 4*8
#define SAVED_R6 5*8
#define SAVED_TR0 6*8
/* These are the registers saved in the TLB path that aren't saved in the first
level of the normal one. */
#define TLB_SAVED_R25 7*8
#define TLB_SAVED_TR1 8*8
#define TLB_SAVED_TR2 9*8
#define TLB_SAVED_TR3 10*8
#define TLB_SAVED_TR4 11*8
/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
breakage otherwise. */
#define TLB_SAVED_R0 12*8
#define TLB_SAVED_R1 13*8
#define CLI() \
getcon SR, r6; \
ori r6, 0xf0, r6; \
putcon r6, SR;
#define STI() \
getcon SR, r6; \
andi r6, ~0xf0, r6; \
putcon r6, SR;
#ifdef CONFIG_PREEMPT
# define preempt_stop() CLI()
#else
# define preempt_stop()
# define resume_kernel restore_all
#endif
.section .data, "aw"
#define FAST_TLBMISS_STACK_CACHELINES 4
#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
/* Register back-up area for all exceptions */
.balign 32
/* Allow for 16 quadwords to be pushed by fast tlbmiss handling
* register saves etc. */
.fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
/* This is 32 byte aligned by construction */
/* Register back-up area for all exceptions */
reg_save_area:
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
* reentrancy. Note this area may be accessed via physical address.
* Align so this fits a whole single cache line, for ease of purging.
*/
.balign 32,0,32
resvec_save_area:
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.balign 32,0,32
/* Jump table of 3rd level handlers */
trap_jtable:
.long do_exception_error /* 0x000 */
.long do_exception_error /* 0x020 */
#ifdef CONFIG_MMU
.long tlb_miss_load /* 0x040 */
.long tlb_miss_store /* 0x060 */
#else
.long do_exception_error
.long do_exception_error
#endif
! ARTIFICIAL pseudo-EXPEVT setting
.long do_debug_interrupt /* 0x080 */
#ifdef CONFIG_MMU
.long tlb_miss_load /* 0x0A0 */
.long tlb_miss_store /* 0x0C0 */
#else
.long do_exception_error
.long do_exception_error
#endif
.long do_address_error_load /* 0x0E0 */
.long do_address_error_store /* 0x100 */
#ifdef CONFIG_SH_FPU
.long do_fpu_error /* 0x120 */
#else
.long do_exception_error /* 0x120 */
#endif
.long do_exception_error /* 0x140 */
.long system_call /* 0x160 */
.long do_reserved_inst /* 0x180 */
.long do_illegal_slot_inst /* 0x1A0 */
.long do_exception_error /* 0x1C0 - NMI */
.long do_exception_error /* 0x1E0 */
.rept 15
.long do_IRQ /* 0x200 - 0x3C0 */
.endr
.long do_exception_error /* 0x3E0 */
.rept 32
.long do_IRQ /* 0x400 - 0x7E0 */
.endr
.long fpu_error_or_IRQA /* 0x800 */
.long fpu_error_or_IRQB /* 0x820 */
.long do_IRQ /* 0x840 */
.long do_IRQ /* 0x860 */
.rept 6
.long do_exception_error /* 0x880 - 0x920 */
.endr
.long breakpoint_trap_handler /* 0x940 */
.long do_exception_error /* 0x960 */
.long do_single_step /* 0x980 */
.rept 3
.long do_exception_error /* 0x9A0 - 0x9E0 */
.endr
.long do_IRQ /* 0xA00 */
.long do_IRQ /* 0xA20 */
#ifdef CONFIG_MMU
.long itlb_miss_or_IRQ /* 0xA40 */
#else
.long do_IRQ
#endif
.long do_IRQ /* 0xA60 */
.long do_IRQ /* 0xA80 */
#ifdef CONFIG_MMU
.long itlb_miss_or_IRQ /* 0xAA0 */
#else
.long do_IRQ
#endif
.long do_exception_error /* 0xAC0 */
.long do_address_error_exec /* 0xAE0 */
.rept 8
.long do_exception_error /* 0xB00 - 0xBE0 */
.endr
.rept 18
.long do_IRQ /* 0xC00 - 0xE20 */
.endr
.section .text64, "ax"
/*
* --- Exception/Interrupt/Event Handling Section
*/
/*
* VBR and RESVEC blocks.
*
* First level handler for VBR-based exceptions.
*
* To avoid waste of space, align to the maximum text block size.
* This is assumed to be at most 128 bytes or 32 instructions.
* DO NOT EXCEED 32 instructions on the first level handlers !
*
* Also note that RESVEC is contained within the VBR block
* where the room left (1KB - TEXT_SIZE) allows placing
* the RESVEC block (at most 512B + TEXT_SIZE).
*
* So first (and only) level handler for RESVEC-based exceptions.
*
* Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
* and interrupt) we are a lot tight with register space until
* saving onto the stack frame, which is done in handle_exception().
*
*/
#define TEXT_SIZE 128
#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
.balign TEXT_SIZE
LVBR_block:
.space 256, 0 /* Power-on class handler, */
/* not required here */
not_a_tlb_miss:
synco /* TAKum03020 (but probably a good idea anyway.) */
/* Save original stack pointer into KCR1 */
putcon SP, KCR1
/* Save other original registers into reg_save_area */
movi reg_save_area, SP
st.q SP, SAVED_R2, r2
st.q SP, SAVED_R3, r3
st.q SP, SAVED_R4, r4
st.q SP, SAVED_R5, r5
st.q SP, SAVED_R6, r6
st.q SP, SAVED_R18, r18
gettr tr0, r3
st.q SP, SAVED_TR0, r3
/* Set args for Non-debug, Not a TLB miss class handler */
getcon EXPEVT, r2
movi ret_from_exception, r3
ori r3, 1, r3
movi EVENT_FAULT_NOT_TLB, r4
or SP, ZERO, r5
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
.balign 256
! VBR+0x200
nop
.balign 256
! VBR+0x300
nop
.balign 256
/*
* Instead of the natural .balign 1024 place RESVEC here
* respecting the final 1KB alignment.
*/
.balign TEXT_SIZE
/*
* Instead of '.space 1024-TEXT_SIZE' place the RESVEC
* block making sure the final alignment is correct.
*/
#ifdef CONFIG_MMU
tlb_miss:
synco /* TAKum03020 (but probably a good idea anyway.) */
putcon SP, KCR1
movi reg_save_area, SP
/* SP is guaranteed 32-byte aligned. */
st.q SP, TLB_SAVED_R0 , r0
st.q SP, TLB_SAVED_R1 , r1
st.q SP, SAVED_R2 , r2
st.q SP, SAVED_R3 , r3
st.q SP, SAVED_R4 , r4
st.q SP, SAVED_R5 , r5
st.q SP, SAVED_R6 , r6
st.q SP, SAVED_R18, r18
/* Save R25 for safety; as/ld may want to use it to achieve the call to
* the code in mm/tlbmiss.c */
st.q SP, TLB_SAVED_R25, r25
gettr tr0, r2
gettr tr1, r3
gettr tr2, r4
gettr tr3, r5
gettr tr4, r18
st.q SP, SAVED_TR0 , r2
st.q SP, TLB_SAVED_TR1 , r3
st.q SP, TLB_SAVED_TR2 , r4
st.q SP, TLB_SAVED_TR3 , r5
st.q SP, TLB_SAVED_TR4 , r18
pt do_fast_page_fault, tr0
getcon SSR, r2
getcon EXPEVT, r3
getcon TEA, r4
shlri r2, 30, r2
andi r2, 1, r2 /* r2 = SSR.MD */
blink tr0, LINK
pt fixup_to_invoke_general_handler, tr1
/* If the fast path handler fixed the fault, just drop through quickly
to the restore code right away to return to the excepting context.
*/
bnei/u r2, 0, tr1
fast_tlb_miss_restore:
ld.q SP, SAVED_TR0, r2
ld.q SP, TLB_SAVED_TR1, r3
ld.q SP, TLB_SAVED_TR2, r4
ld.q SP, TLB_SAVED_TR3, r5
ld.q SP, TLB_SAVED_TR4, r18
ptabs r2, tr0
ptabs r3, tr1
ptabs r4, tr2
ptabs r5, tr3
ptabs r18, tr4
ld.q SP, TLB_SAVED_R0, r0
ld.q SP, TLB_SAVED_R1, r1
ld.q SP, SAVED_R2, r2
ld.q SP, SAVED_R3, r3
ld.q SP, SAVED_R4, r4
ld.q SP, SAVED_R5, r5
ld.q SP, SAVED_R6, r6
ld.q SP, SAVED_R18, r18
ld.q SP, TLB_SAVED_R25, r25
getcon KCR1, SP
rte
nop /* for safety, in case the code is run on sh5-101 cut1.x */
fixup_to_invoke_general_handler:
/* OK, new method. Restore stuff that's not expected to get saved into
the 'first-level' reg save area, then just fall through to setting
up the registers and calling the second-level handler. */
/* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
r25,tr1-4 and save r6 to get into the right state. */
ld.q SP, TLB_SAVED_TR1, r3
ld.q SP, TLB_SAVED_TR2, r4
ld.q SP, TLB_SAVED_TR3, r5
ld.q SP, TLB_SAVED_TR4, r18
ld.q SP, TLB_SAVED_R25, r25
ld.q SP, TLB_SAVED_R0, r0
ld.q SP, TLB_SAVED_R1, r1
ptabs/u r3, tr1
ptabs/u r4, tr2
ptabs/u r5, tr3
ptabs/u r18, tr4
/* Set args for Non-debug, TLB miss class handler */
getcon EXPEVT, r2
movi ret_from_exception, r3
ori r3, 1, r3
movi EVENT_FAULT_TLB, r4
or SP, ZERO, r5
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
#else /* CONFIG_MMU */
.balign 256
#endif
/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
DOES END UP AT VBR+0x600 */
nop
nop
nop
nop
nop
nop
.balign 256
/* VBR + 0x600 */
interrupt:
synco /* TAKum03020 (but probably a good idea anyway.) */
/* Save original stack pointer into KCR1 */
putcon SP, KCR1
/* Save other original registers into reg_save_area */
movi reg_save_area, SP
st.q SP, SAVED_R2, r2
st.q SP, SAVED_R3, r3
st.q SP, SAVED_R4, r4
st.q SP, SAVED_R5, r5
st.q SP, SAVED_R6, r6
st.q SP, SAVED_R18, r18
gettr tr0, r3
st.q SP, SAVED_TR0, r3
/* Set args for interrupt class handler */
getcon INTEVT, r2
movi ret_from_irq, r3
ori r3, 1, r3
movi EVENT_INTERRUPT, r4
or SP, ZERO, r5
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
.balign TEXT_SIZE /* let's waste the bare minimum */
LVBR_block_end: /* Marker. Used for total checking */
.balign 256
LRESVEC_block:
/* Panic handler. Called with MMU off. Possible causes/actions:
* - Reset: Jump to program start.
* - Single Step: Turn off Single Step & return.
* - Others: Call panic handler, passing PC as arg.
* (this may need to be extended...)
*/
reset_or_panic:
synco /* TAKum03020 (but probably a good idea anyway.) */
putcon SP, DCR
/* First save r0-1 and tr0, as we need to use these */
movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
st.q SP, 0, r0
st.q SP, 8, r1
gettr tr0, r0
st.q SP, 32, r0
/* Check cause */
getcon EXPEVT, r0
movi RESET_CAUSE, r1
sub r1, r0, r1 /* r1=0 if reset */
movi _stext-CONFIG_PAGE_OFFSET, r0
ori r0, 1, r0
ptabs r0, tr0
beqi r1, 0, tr0 /* Jump to start address if reset */
getcon EXPEVT, r0
movi DEBUGSS_CAUSE, r1
sub r1, r0, r1 /* r1=0 if single step */
pta single_step_panic, tr0
beqi r1, 0, tr0 /* jump if single step */
/* Now jump to where we save the registers. */
movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
ptabs r1, tr0
blink tr0, r63
single_step_panic:
/* We are in a handler with Single Step set. We need to resume the
* handler, by turning on MMU & turning off Single Step. */
getcon SSR, r0
movi SR_MMU, r1
or r0, r1, r0
movi ~SR_SS, r1
and r0, r1, r0
putcon r0, SSR
/* Restore EXPEVT, as the rte won't do this */
getcon PEXPEVT, r0
putcon r0, EXPEVT
/* Restore regs */
ld.q SP, 32, r0
ptabs r0, tr0
ld.q SP, 0, r0
ld.q SP, 8, r1
getcon DCR, SP
synco
rte
.balign 256
debug_exception:
synco /* TAKum03020 (but probably a good idea anyway.) */
/*
* Single step/software_break_point first level handler.
* Called with MMU off, so the first thing we do is enable it
* by doing an rte with appropriate SSR.
*/
putcon SP, DCR
/* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
/* With the MMU off, we are bypassing the cache, so purge any
* data that will be made stale by the following stores.
*/
ocbp SP, 0
synco
st.q SP, 0, r0
st.q SP, 8, r1
getcon SPC, r0
st.q SP, 16, r0
getcon SSR, r0
st.q SP, 24, r0
/* Enable MMU, block exceptions, set priv mode, disable single step */
movi SR_MMU | SR_BL | SR_MD, r1
or r0, r1, r0
movi ~SR_SS, r1
and r0, r1, r0
putcon r0, SSR
/* Force control to debug_exception_2 when rte is executed */
movi debug_exeception_2, r0
ori r0, 1, r0 /* force SHmedia, just in case */
putcon r0, SPC
getcon DCR, SP
synco
rte
debug_exeception_2:
/* Restore saved regs */
putcon SP, KCR1
movi resvec_save_area, SP
ld.q SP, 24, r0
putcon r0, SSR
ld.q SP, 16, r0
putcon r0, SPC
ld.q SP, 0, r0
ld.q SP, 8, r1
/* Save other original registers into reg_save_area */
movi reg_save_area, SP
st.q SP, SAVED_R2, r2
st.q SP, SAVED_R3, r3
st.q SP, SAVED_R4, r4
st.q SP, SAVED_R5, r5
st.q SP, SAVED_R6, r6
st.q SP, SAVED_R18, r18
gettr tr0, r3
st.q SP, SAVED_TR0, r3
/* Set args for debug class handler */
getcon EXPEVT, r2
movi ret_from_exception, r3
ori r3, 1, r3
movi EVENT_DEBUG, r4
or SP, ZERO, r5
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
.balign 256
debug_interrupt:
/* !!! WE COME HERE IN REAL MODE !!! */
/* Hook-up debug interrupt to allow various debugging options to be
* hooked into its handler. */
/* Save original stack pointer into KCR1 */
synco
putcon SP, KCR1
movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
ocbp SP, 0
ocbp SP, 32
synco
/* Save other original registers into reg_save_area thru real addresses */
st.q SP, SAVED_R2, r2
st.q SP, SAVED_R3, r3
st.q SP, SAVED_R4, r4
st.q SP, SAVED_R5, r5
st.q SP, SAVED_R6, r6
st.q SP, SAVED_R18, r18
gettr tr0, r3
st.q SP, SAVED_TR0, r3
/* move (spc,ssr)->(pspc,pssr). The rte will shift
them back again, so that they look like the originals
as far as the real handler code is concerned. */
getcon spc, r6
putcon r6, pspc
getcon ssr, r6
putcon r6, pssr
! construct useful SR for handle_exception
movi 3, r6
shlli r6, 30, r6
getcon sr, r18
or r18, r6, r6
putcon r6, ssr
! SSR is now the current SR with the MD and MMU bits set
! i.e. the rte will switch back to priv mode and put
! the mmu back on
! construct spc
movi handle_exception, r18
ori r18, 1, r18 ! for safety (do we need this?)
putcon r18, spc
/* Set args for Non-debug, Not a TLB miss class handler */
! EXPEVT==0x80 is unused, so 'steal' this value to put the
! debug interrupt handler in the vectoring table
movi 0x80, r2
movi ret_from_exception, r3
ori r3, 1, r3
movi EVENT_FAULT_NOT_TLB, r4
or SP, ZERO, r5
movi CONFIG_PAGE_OFFSET, r6
add r6, r5, r5
getcon KCR1, SP
synco ! for safety
rte ! -> handle_exception, switch back to priv mode again
LRESVEC_block_end: /* Marker. Unused. */
.balign TEXT_SIZE
/*
* Second level handler for VBR-based exceptions. Pre-handler.
* In common to all stack-frame sensitive handlers.
*
* Inputs:
* (KCR0) Current [current task union]
* (KCR1) Original SP
* (r2) INTEVT/EXPEVT
* (r3) appropriate return address
* (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
* (r5) Pointer to reg_save_area
* (SP) Original SP
*
* Available registers:
* (r6)
* (r18)
* (tr0)
*
*/
handle_exception:
/* Common 2nd level handler. */
/* First thing we need an appropriate stack pointer */
getcon SSR, r6
shlri r6, 30, r6
andi r6, 1, r6
pta stack_ok, tr0
bne r6, ZERO, tr0 /* Original stack pointer is fine */
/* Set stack pointer for user fault */
getcon KCR0, SP
movi THREAD_SIZE, r6 /* Point to the end */
add SP, r6, SP
stack_ok:
/* DEBUG : check for underflow/overflow of the kernel stack */
pta no_underflow, tr0
getcon KCR0, r6
movi 1024, r18
add r6, r18, r6
bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
/* Just panic to cause a crash. */
bad_sp:
ld.b r63, 0, r6
nop
no_underflow:
pta bad_sp, tr0
getcon kcr0, r6
movi THREAD_SIZE, r18
add r18, r6, r6
bgt SP, r6, tr0 ! sp above the stack
/* Make some room for the BASIC frame. */
movi -(FRAME_SIZE), r6
add SP, r6, SP
/* Could do this with no stalling if we had another spare register, but the
code below will be OK. */
ld.q r5, SAVED_R2, r6
ld.q r5, SAVED_R3, r18
st.q SP, FRAME_R(2), r6
ld.q r5, SAVED_R4, r6
st.q SP, FRAME_R(3), r18
ld.q r5, SAVED_R5, r18
st.q SP, FRAME_R(4), r6
ld.q r5, SAVED_R6, r6
st.q SP, FRAME_R(5), r18
ld.q r5, SAVED_R18, r18
st.q SP, FRAME_R(6), r6
ld.q r5, SAVED_TR0, r6
st.q SP, FRAME_R(18), r18
st.q SP, FRAME_T(0), r6
/* Keep old SP around */
getcon KCR1, r6
/* Save the rest of the general purpose registers */
st.q SP, FRAME_R(0), r0
st.q SP, FRAME_R(1), r1
st.q SP, FRAME_R(7), r7
st.q SP, FRAME_R(8), r8
st.q SP, FRAME_R(9), r9
st.q SP, FRAME_R(10), r10
st.q SP, FRAME_R(11), r11
st.q SP, FRAME_R(12), r12
st.q SP, FRAME_R(13), r13
st.q SP, FRAME_R(14), r14
/* SP is somewhere else */
st.q SP, FRAME_R(15), r6
st.q SP, FRAME_R(16), r16
st.q SP, FRAME_R(17), r17
/* r18 is saved earlier. */
st.q SP, FRAME_R(19), r19
st.q SP, FRAME_R(20), r20
st.q SP, FRAME_R(21), r21
st.q SP, FRAME_R(22), r22
st.q SP, FRAME_R(23), r23
st.q SP, FRAME_R(24), r24
st.q SP, FRAME_R(25), r25
st.q SP, FRAME_R(26), r26
st.q SP, FRAME_R(27), r27
st.q SP, FRAME_R(28), r28
st.q SP, FRAME_R(29), r29
st.q SP, FRAME_R(30), r30
st.q SP, FRAME_R(31), r31
st.q SP, FRAME_R(32), r32
st.q SP, FRAME_R(33), r33
st.q SP, FRAME_R(34), r34
st.q SP, FRAME_R(35), r35
st.q SP, FRAME_R(36), r36
st.q SP, FRAME_R(37), r37
st.q SP, FRAME_R(38), r38
st.q SP, FRAME_R(39), r39
st.q SP, FRAME_R(40), r40
st.q SP, FRAME_R(41), r41
st.q SP, FRAME_R(42), r42
st.q SP, FRAME_R(43), r43
st.q SP, FRAME_R(44), r44
st.q SP, FRAME_R(45), r45
st.q SP, FRAME_R(46), r46
st.q SP, FRAME_R(47), r47
st.q SP, FRAME_R(48), r48
st.q SP, FRAME_R(49), r49
st.q SP, FRAME_R(50), r50
st.q SP, FRAME_R(51), r51
st.q SP, FRAME_R(52), r52
st.q SP, FRAME_R(53), r53
st.q SP, FRAME_R(54), r54
st.q SP, FRAME_R(55), r55
st.q SP, FRAME_R(56), r56
st.q SP, FRAME_R(57), r57
st.q SP, FRAME_R(58), r58
st.q SP, FRAME_R(59), r59
st.q SP, FRAME_R(60), r60
st.q SP, FRAME_R(61), r61
st.q SP, FRAME_R(62), r62
/*
* Save the S* registers.
*/
getcon SSR, r61
st.q SP, FRAME_S(FSSR), r61
getcon SPC, r62
st.q SP, FRAME_S(FSPC), r62
movi -1, r62 /* Reset syscall_nr */
st.q SP, FRAME_S(FSYSCALL_ID), r62
/* Save the rest of the target registers */
gettr tr1, r6
st.q SP, FRAME_T(1), r6
gettr tr2, r6
st.q SP, FRAME_T(2), r6
gettr tr3, r6
st.q SP, FRAME_T(3), r6
gettr tr4, r6
st.q SP, FRAME_T(4), r6
gettr tr5, r6
st.q SP, FRAME_T(5), r6
gettr tr6, r6
st.q SP, FRAME_T(6), r6
gettr tr7, r6
st.q SP, FRAME_T(7), r6
! setup FP so that unwinder can wind back through nested kernel mode
! exceptions
add SP, ZERO, r14
/* For syscall and debug race condition, get TRA now */
getcon TRA, r5
/* We are in a safe position to turn SR.BL off, but set IMASK=0xf
* Also set FD, to catch FPU usage in the kernel.
*
* benedict.gaster@superh.com 29/07/2002
*
* On all SH5-101 revisions it is unsafe to raise the IMASK and at the
* same time change BL from 1->0, as any pending interrupt of a level
* higher than he previous value of IMASK will leak through and be
* taken unexpectedly.
*
* To avoid this we raise the IMASK and then issue another PUTCON to
* enable interrupts.
*/
getcon SR, r6
movi SR_IMASK | SR_FD, r7
or r6, r7, r6
putcon r6, SR
movi SR_UNBLOCK_EXC, r7
and r6, r7, r6
putcon r6, SR
/* Now call the appropriate 3rd level handler */
or r3, ZERO, LINK
movi trap_jtable, r3
shlri r2, 3, r2
ldx.l r2, r3, r3
shlri r2, 2, r2
ptabs r3, tr0
or SP, ZERO, r3
blink tr0, ZERO
/*
* Second level handler for VBR-based exceptions. Post-handlers.
*
* Post-handlers for interrupts (ret_from_irq), exceptions
* (ret_from_exception) and common reentrance doors (restore_all
* to get back to the original context, ret_from_syscall loop to
* check kernel exiting).
*
* ret_with_reschedule and work_notifysig are an inner lables of
* the ret_from_syscall loop.
*
* In common to all stack-frame sensitive handlers.
*
* Inputs:
* (SP) struct pt_regs *, original register's frame pointer (basic)
*
*/
.global ret_from_irq
ret_from_irq:
ld.q SP, FRAME_S(FSSR), r6
shlri r6, 30, r6
andi r6, 1, r6
pta resume_kernel, tr0
bne r6, ZERO, tr0 /* no further checks */
STI()
pta ret_with_reschedule, tr0
blink tr0, ZERO /* Do not check softirqs */
.global ret_from_exception
ret_from_exception:
preempt_stop()
ld.q SP, FRAME_S(FSSR), r6
shlri r6, 30, r6
andi r6, 1, r6
pta resume_kernel, tr0
bne r6, ZERO, tr0 /* no further checks */
/* Check softirqs */
#ifdef CONFIG_PREEMPT
pta ret_from_syscall, tr0
blink tr0, ZERO
resume_kernel:
CLI()
pta restore_all, tr0
getcon KCR0, r6
ld.l r6, TI_PRE_COUNT, r7
beq/u r7, ZERO, tr0
need_resched:
ld.l r6, TI_FLAGS, r7
movi (1 << TIF_NEED_RESCHED), r8
and r8, r7, r8
bne r8, ZERO, tr0
getcon SR, r7
andi r7, 0xf0, r7
bne r7, ZERO, tr0
movi preempt_schedule_irq, r7
ori r7, 1, r7
ptabs r7, tr1
blink tr1, LINK
pta need_resched, tr1
blink tr1, ZERO
#endif
.global ret_from_syscall
ret_from_syscall:
ret_with_reschedule:
getcon KCR0, r6 ! r6 contains current_thread_info
ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
movi _TIF_NEED_RESCHED, r8
and r8, r7, r8
pta work_resched, tr0
bne r8, ZERO, tr0
pta restore_all, tr1
movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
and r8, r7, r8
pta work_notifysig, tr0
bne r8, ZERO, tr0
blink tr1, ZERO
work_resched:
pta ret_from_syscall, tr0
gettr tr0, LINK
movi schedule, r6
ptabs r6, tr0
blink tr0, ZERO /* Call schedule(), return on top */
work_notifysig:
gettr tr1, LINK
movi do_notify_resume, r6
ptabs r6, tr0
or SP, ZERO, r2
or r7, ZERO, r3
blink tr0, LINK /* Call do_notify_resume(regs, current_thread_info->flags), return here */
restore_all:
/* Do prefetches */
ld.q SP, FRAME_T(0), r6
ld.q SP, FRAME_T(1), r7
ld.q SP, FRAME_T(2), r8
ld.q SP, FRAME_T(3), r9
ptabs r6, tr0
ptabs r7, tr1
ptabs r8, tr2
ptabs r9, tr3
ld.q SP, FRAME_T(4), r6
ld.q SP, FRAME_T(5), r7
ld.q SP, FRAME_T(6), r8
ld.q SP, FRAME_T(7), r9
ptabs r6, tr4
ptabs r7, tr5
ptabs r8, tr6
ptabs r9, tr7
ld.q SP, FRAME_R(0), r0
ld.q SP, FRAME_R(1), r1
ld.q SP, FRAME_R(2), r2
ld.q SP, FRAME_R(3), r3
ld.q SP, FRAME_R(4), r4
ld.q SP, FRAME_R(5), r5
ld.q SP, FRAME_R(6), r6
ld.q SP, FRAME_R(7), r7
ld.q SP, FRAME_R(8), r8
ld.q SP, FRAME_R(9), r9
ld.q SP, FRAME_R(10), r10
ld.q SP, FRAME_R(11), r11
ld.q SP, FRAME_R(12), r12
ld.q SP, FRAME_R(13), r13
ld.q SP, FRAME_R(14), r14
ld.q SP, FRAME_R(16), r16
ld.q SP, FRAME_R(17), r17
ld.q SP, FRAME_R(18), r18
ld.q SP, FRAME_R(19), r19
ld.q SP, FRAME_R(20), r20
ld.q SP, FRAME_R(21), r21
ld.q SP, FRAME_R(22), r22
ld.q SP, FRAME_R(23), r23
ld.q SP, FRAME_R(24), r24
ld.q SP, FRAME_R(25), r25
ld.q SP, FRAME_R(26), r26
ld.q SP, FRAME_R(27), r27
ld.q SP, FRAME_R(28), r28
ld.q SP, FRAME_R(29), r29
ld.q SP, FRAME_R(30), r30
ld.q SP, FRAME_R(31), r31
ld.q SP, FRAME_R(32), r32
ld.q SP, FRAME_R(33), r33
ld.q SP, FRAME_R(34), r34
ld.q SP, FRAME_R(35), r35
ld.q SP, FRAME_R(36), r36
ld.q SP, FRAME_R(37), r37
ld.q SP, FRAME_R(38), r38
ld.q SP, FRAME_R(39), r39
ld.q SP, FRAME_R(40), r40
ld.q SP, FRAME_R(41), r41
ld.q SP, FRAME_R(42), r42
ld.q SP, FRAME_R(43), r43
ld.q SP, FRAME_R(44), r44
ld.q SP, FRAME_R(45), r45
ld.q SP, FRAME_R(46), r46
ld.q SP, FRAME_R(47), r47
ld.q SP, FRAME_R(48), r48
ld.q SP, FRAME_R(49), r49
ld.q SP, FRAME_R(50), r50
ld.q SP, FRAME_R(51), r51
ld.q SP, FRAME_R(52), r52
ld.q SP, FRAME_R(53), r53
ld.q SP, FRAME_R(54), r54
ld.q SP, FRAME_R(55), r55
ld.q SP, FRAME_R(56), r56
ld.q SP, FRAME_R(57), r57
ld.q SP, FRAME_R(58), r58
getcon SR, r59
movi SR_BLOCK_EXC, r60
or r59, r60, r59
putcon r59, SR /* SR.BL = 1, keep nesting out */
ld.q SP, FRAME_S(FSSR), r61
ld.q SP, FRAME_S(FSPC), r62
movi SR_ASID_MASK, r60
and r59, r60, r59
andc r61, r60, r61 /* Clear out older ASID */
or r59, r61, r61 /* Retain current ASID */
putcon r61, SSR
putcon r62, SPC
/* Ignore FSYSCALL_ID */
ld.q SP, FRAME_R(59), r59
ld.q SP, FRAME_R(60), r60
ld.q SP, FRAME_R(61), r61
ld.q SP, FRAME_R(62), r62
/* Last touch */
ld.q SP, FRAME_R(15), SP
rte
nop
/*
* Third level handlers for VBR-based exceptions. Adapting args to
* and/or deflecting to fourth level handlers.
*
* Fourth level handlers interface.
* Most are C-coded handlers directly pointed by the trap_jtable.
* (Third = Fourth level)
* Inputs:
* (r2) fault/interrupt code, entry number (e.g. NMI = 14,
* IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
* (r3) struct pt_regs *, original register's frame pointer
* (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
* (r5) TRA control register (for syscall/debug benefit only)
* (LINK) return address
* (SP) = r3
*
* Kernel TLB fault handlers will get a slightly different interface.
* (r2) struct pt_regs *, original register's frame pointer
* (r3) page fault error code (see asm/thread_info.h)
* (r4) Effective Address of fault
* (LINK) return address
* (SP) = r2
*
* fpu_error_or_IRQ? is a helper to deflect to the right cause.
*
*/
#ifdef CONFIG_MMU
tlb_miss_load:
or SP, ZERO, r2
or ZERO, ZERO, r3 /* Read */
getcon TEA, r4
pta call_do_page_fault, tr0
beq ZERO, ZERO, tr0
tlb_miss_store:
or SP, ZERO, r2
movi FAULT_CODE_WRITE, r3 /* Write */
getcon TEA, r4
pta call_do_page_fault, tr0
beq ZERO, ZERO, tr0
itlb_miss_or_IRQ:
pta its_IRQ, tr0
beqi/u r4, EVENT_INTERRUPT, tr0
/* ITLB miss */
or SP, ZERO, r2
movi FAULT_CODE_ITLB, r3
getcon TEA, r4
/* Fall through */
call_do_page_fault:
movi do_page_fault, r6
ptabs r6, tr0
blink tr0, ZERO
#endif /* CONFIG_MMU */
fpu_error_or_IRQA:
pta its_IRQ, tr0
beqi/l r4, EVENT_INTERRUPT, tr0
#ifdef CONFIG_SH_FPU
movi fpu_state_restore_trap_handler, r6
#else
movi do_exception_error, r6
#endif
ptabs r6, tr0
blink tr0, ZERO
fpu_error_or_IRQB:
pta its_IRQ, tr0
beqi/l r4, EVENT_INTERRUPT, tr0
#ifdef CONFIG_SH_FPU
movi fpu_state_restore_trap_handler, r6
#else
movi do_exception_error, r6
#endif
ptabs r6, tr0
blink tr0, ZERO
its_IRQ:
movi do_IRQ, r6
ptabs r6, tr0
blink tr0, ZERO
/*
* system_call/unknown_trap third level handler:
*
* Inputs:
* (r2) fault/interrupt code, entry number (TRAP = 11)
* (r3) struct pt_regs *, original register's frame pointer
* (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
* (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
* (SP) = r3
* (LINK) return address: ret_from_exception
* (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
*
* Outputs:
* (*r3) Syscall reply (Saved r2)
* (LINK) In case of syscall only it can be scrapped.
* Common second level post handler will be ret_from_syscall.
* Common (non-trace) exit point to that is syscall_ret (saving
* result to r2). Common bad exit point is syscall_bad (returning
* ENOSYS then saved to r2).
*
*/
unknown_trap:
/* Unknown Trap or User Trace */
movi do_unknown_trapa, r6
ptabs r6, tr0
ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
andi r2, 0x1ff, r2 /* r2 = syscall # */
blink tr0, LINK
pta syscall_ret, tr0
blink tr0, ZERO
/* New syscall implementation*/
system_call:
pta unknown_trap, tr0
or r5, ZERO, r4 /* TRA (=r5) -> r4 */
shlri r4, 20, r4
bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
/* It's a system call */
st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
andi r5, 0x1ff, r5 /* syscall # -> r5 */
STI()
pta syscall_allowed, tr0
movi NR_syscalls - 1, r4 /* Last valid */
bgeu/l r4, r5, tr0
syscall_bad:
/* Return ENOSYS ! */
movi -(ENOSYS), r2 /* Fall-through */
.global syscall_ret
syscall_ret:
st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
st.q SP, FRAME_S(FSPC), r2
pta ret_from_syscall, tr0
blink tr0, ZERO
/* A different return path for ret_from_fork, because we now need
* to call schedule_tail with the later kernels. Because prev is
* loaded into r2 by switch_to() means we can just call it straight away
*/
.global ret_from_fork
ret_from_fork:
movi schedule_tail,r5
ori r5, 1, r5
ptabs r5, tr0
blink tr0, LINK
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
st.q SP, FRAME_S(FSPC), r2
pta ret_from_syscall, tr0
blink tr0, ZERO
.global ret_from_kernel_thread
ret_from_kernel_thread:
movi schedule_tail,r5
ori r5, 1, r5
ptabs r5, tr0
blink tr0, LINK
ld.q SP, FRAME_R(2), r2
ld.q SP, FRAME_R(3), r3
ptabs r3, tr0
blink tr0, LINK
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
st.q SP, FRAME_S(FSPC), r2
pta ret_from_syscall, tr0
blink tr0, ZERO
syscall_allowed:
/* Use LINK to deflect the exit point, default is syscall_ret */
pta syscall_ret, tr0
gettr tr0, LINK
pta syscall_notrace, tr0
getcon KCR0, r2
ld.l r2, TI_FLAGS, r4
movi _TIF_WORK_SYSCALL_MASK, r6
and r6, r4, r6
beq/l r6, ZERO, tr0
/* Trace it by calling syscall_trace before and after */
movi do_syscall_trace_enter, r4
or SP, ZERO, r2
ptabs r4, tr0
blink tr0, LINK
/* Save the retval */
st.q SP, FRAME_R(2), r2
/* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
ld.q SP, FRAME_S(FSYSCALL_ID), r5
andi r5, 0x1ff, r5
pta syscall_ret_trace, tr0
gettr tr0, LINK
syscall_notrace:
/* Now point to the appropriate 4th level syscall handler */
movi sys_call_table, r4
shlli r5, 2, r5
ldx.l r4, r5, r5
ptabs r5, tr0
/* Prepare original args */
ld.q SP, FRAME_R(2), r2
ld.q SP, FRAME_R(3), r3
ld.q SP, FRAME_R(4), r4
ld.q SP, FRAME_R(5), r5
ld.q SP, FRAME_R(6), r6
ld.q SP, FRAME_R(7), r7
/* And now the trick for those syscalls requiring regs * ! */
or SP, ZERO, r8
/* Call it */
blink tr0, ZERO /* LINK is already properly set */
syscall_ret_trace:
/* We get back here only if under trace */
st.q SP, FRAME_R(9), r2 /* Save return value */
movi do_syscall_trace_leave, LINK
or SP, ZERO, r2
ptabs LINK, tr0
blink tr0, LINK
/* This needs to be done after any syscall tracing */
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
st.q SP, FRAME_S(FSPC), r2
pta ret_from_syscall, tr0
blink tr0, ZERO /* Resume normal return sequence */
/*
* --- Switch to running under a particular ASID and return the previous ASID value
* --- The caller is assumed to have done a cli before calling this.
*
* Input r2 : new ASID
* Output r2 : old ASID
*/
.global switch_and_save_asid
switch_and_save_asid:
getcon sr, r0
movi 255, r4
shlli r4, 16, r4 /* r4 = mask to select ASID */
and r0, r4, r3 /* r3 = shifted old ASID */
andi r2, 255, r2 /* mask down new ASID */
shlli r2, 16, r2 /* align new ASID against SR.ASID */
andc r0, r4, r0 /* efface old ASID from SR */
or r0, r2, r0 /* insert the new ASID */
putcon r0, ssr
movi 1f, r0
putcon r0, spc
rte
nop
1:
ptabs LINK, tr0
shlri r3, 16, r2 /* r2 = old ASID */
blink tr0, r63
.global route_to_panic_handler
route_to_panic_handler:
/* Switch to real mode, goto panic_handler, don't return. Useful for
last-chance debugging, e.g. if no output wants to go to the console.
*/
movi panic_handler - CONFIG_PAGE_OFFSET, r1
ptabs r1, tr0
pta 1f, tr1
gettr tr1, r0
putcon r0, spc
getcon sr, r0
movi 1, r1
shlli r1, 31, r1
andc r0, r1, r0
putcon r0, ssr
rte
nop
1: /* Now in real mode */
blink tr0, r63
nop
.global peek_real_address_q
peek_real_address_q:
/* Two args:
r2 : real mode address to peek
r2(out) : result quadword
This is provided as a cheapskate way of manipulating device
registers for debugging (to avoid the need to ioremap the debug
module, and to avoid the need to ioremap the watchpoint
controller in a way that identity maps sufficient bits to avoid the
SH5-101 cut2 silicon defect).
This code is not performance critical
*/
add.l r2, r63, r2 /* sign extend address */
getcon sr, r0 /* r0 = saved original SR */
movi 1, r1
shlli r1, 28, r1
or r0, r1, r1 /* r0 with block bit set */
putcon r1, sr /* now in critical section */
movi 1, r36
shlli r36, 31, r36
andc r1, r36, r1 /* turn sr.mmu off in real mode section */
putcon r1, ssr
movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
movi 1f, r37 /* virtual mode return addr */
putcon r36, spc
synco
rte
nop
.peek0: /* come here in real mode, don't touch caches!!
still in critical section (sr.bl==1) */
putcon r0, ssr
putcon r37, spc
/* Here's the actual peek. If the address is bad, all bets are now off
* what will happen (handlers invoked in real-mode = bad news) */
ld.q r2, 0, r2
synco
rte /* Back to virtual mode */
nop
1:
ptabs LINK, tr0
blink tr0, r63
.global poke_real_address_q
poke_real_address_q:
/* Two args:
r2 : real mode address to poke
r3 : quadword value to write.
This is provided as a cheapskate way of manipulating device
registers for debugging (to avoid the need to ioremap the debug
module, and to avoid the need to ioremap the watchpoint
controller in a way that identity maps sufficient bits to avoid the
SH5-101 cut2 silicon defect).
This code is not performance critical
*/
add.l r2, r63, r2 /* sign extend address */
getcon sr, r0 /* r0 = saved original SR */
movi 1, r1
shlli r1, 28, r1
or r0, r1, r1 /* r0 with block bit set */
putcon r1, sr /* now in critical section */
movi 1, r36
shlli r36, 31, r36
andc r1, r36, r1 /* turn sr.mmu off in real mode section */
putcon r1, ssr
movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
movi 1f, r37 /* virtual mode return addr */
putcon r36, spc
synco
rte
nop
.poke0: /* come here in real mode, don't touch caches!!
still in critical section (sr.bl==1) */
putcon r0, ssr
putcon r37, spc
/* Here's the actual poke. If the address is bad, all bets are now off
* what will happen (handlers invoked in real-mode = bad news) */
st.q r2, 0, r3
synco
rte /* Back to virtual mode */
nop
1:
ptabs LINK, tr0
blink tr0, r63
#ifdef CONFIG_MMU
/*
* --- User Access Handling Section
*/
/*
* User Access support. It all moved to non inlined Assembler
* functions in here.
*
* __kernel_size_t __copy_user(void *__to, const void *__from,
* __kernel_size_t __n)
*
* Inputs:
* (r2) target address
* (r3) source address
* (r4) size in bytes
*
* Ouputs:
* (*r2) target data
* (r2) non-copied bytes
*
* If a fault occurs on the user pointer, bail out early and return the
* number of bytes not copied in r2.
* Strategy : for large blocks, call a real memcpy function which can
* move >1 byte at a time using unaligned ld/st instructions, and can
* manipulate the cache using prefetch + alloco to improve the speed
* further. If a fault occurs in that function, just revert to the
* byte-by-byte approach used for small blocks; this is rare so the
* performance hit for that case does not matter.
*
* For small blocks it's not worth the overhead of setting up and calling
* the memcpy routine; do the copy a byte at a time.
*
*/
.global __copy_user
__copy_user:
pta __copy_user_byte_by_byte, tr1
movi 16, r0 ! this value is a best guess, should tune it by benchmarking
bge/u r0, r4, tr1
pta copy_user_memcpy, tr0
addi SP, -32, SP
/* Save arguments in case we have to fix-up unhandled page fault */
st.q SP, 0, r2
st.q SP, 8, r3
st.q SP, 16, r4
st.q SP, 24, r35 ! r35 is callee-save
/* Save LINK in a register to reduce RTS time later (otherwise
ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
ori LINK, 0, r35
blink tr0, LINK
/* Copy completed normally if we get back here */
ptabs r35, tr0
ld.q SP, 24, r35
/* don't restore r2-r4, pointless */
/* set result=r2 to zero as the copy must have succeeded. */
or r63, r63, r2
addi SP, 32, SP
blink tr0, r63 ! RTS
.global __copy_user_fixup
__copy_user_fixup:
/* Restore stack frame */
ori r35, 0, LINK
ld.q SP, 24, r35
ld.q SP, 16, r4
ld.q SP, 8, r3
ld.q SP, 0, r2
addi SP, 32, SP
/* Fall through to original code, in the 'same' state we entered with */
/* The slow byte-by-byte method is used if the fast copy traps due to a bad
user address. In that rare case, the speed drop can be tolerated. */
__copy_user_byte_by_byte:
pta ___copy_user_exit, tr1
pta ___copy_user1, tr0
beq/u r4, r63, tr1 /* early exit for zero length copy */
sub r2, r3, r0
addi r0, -1, r0
___copy_user1:
ld.b r3, 0, r5 /* Fault address 1 */
/* Could rewrite this to use just 1 add, but the second comes 'free'
due to load latency */
addi r3, 1, r3
addi r4, -1, r4 /* No real fixup required */
___copy_user2:
stx.b r3, r0, r5 /* Fault address 2 */
bne r4, ZERO, tr0
___copy_user_exit:
or r4, ZERO, r2
ptabs LINK, tr0
blink tr0, ZERO
/*
* __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
*
* Inputs:
* (r2) target address
* (r3) size in bytes
*
* Ouputs:
* (*r2) zero-ed target data
* (r2) non-zero-ed bytes
*/
.global __clear_user
__clear_user:
pta ___clear_user_exit, tr1
pta ___clear_user1, tr0
beq/u r3, r63, tr1
___clear_user1:
st.b r2, 0, ZERO /* Fault address */
addi r2, 1, r2
addi r3, -1, r3 /* No real fixup required */
bne r3, ZERO, tr0
___clear_user_exit:
or r3, ZERO, r2
ptabs LINK, tr0
blink tr0, ZERO
#endif /* CONFIG_MMU */
/*
* extern long __get_user_asm_?(void *val, long addr)
*
* Inputs:
* (r2) dest address
* (r3) source address (in User Space)
*
* Ouputs:
* (r2) -EFAULT (faulting)
* 0 (not faulting)
*/
.global __get_user_asm_b
__get_user_asm_b:
or r2, ZERO, r4
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___get_user_asm_b1:
ld.b r3, 0, r5 /* r5 = data */
st.b r4, 0, r5
or ZERO, ZERO, r2
___get_user_asm_b_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __get_user_asm_w
__get_user_asm_w:
or r2, ZERO, r4
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___get_user_asm_w1:
ld.w r3, 0, r5 /* r5 = data */
st.w r4, 0, r5
or ZERO, ZERO, r2
___get_user_asm_w_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __get_user_asm_l
__get_user_asm_l:
or r2, ZERO, r4
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___get_user_asm_l1:
ld.l r3, 0, r5 /* r5 = data */
st.l r4, 0, r5
or ZERO, ZERO, r2
___get_user_asm_l_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __get_user_asm_q
__get_user_asm_q:
or r2, ZERO, r4
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___get_user_asm_q1:
ld.q r3, 0, r5 /* r5 = data */
st.q r4, 0, r5
or ZERO, ZERO, r2
___get_user_asm_q_exit:
ptabs LINK, tr0
blink tr0, ZERO
/*
* extern long __put_user_asm_?(void *pval, long addr)
*
* Inputs:
* (r2) kernel pointer to value
* (r3) dest address (in User Space)
*
* Ouputs:
* (r2) -EFAULT (faulting)
* 0 (not faulting)
*/
.global __put_user_asm_b
__put_user_asm_b:
ld.b r2, 0, r4 /* r4 = data */
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___put_user_asm_b1:
st.b r3, 0, r4
or ZERO, ZERO, r2
___put_user_asm_b_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __put_user_asm_w
__put_user_asm_w:
ld.w r2, 0, r4 /* r4 = data */
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___put_user_asm_w1:
st.w r3, 0, r4
or ZERO, ZERO, r2
___put_user_asm_w_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __put_user_asm_l
__put_user_asm_l:
ld.l r2, 0, r4 /* r4 = data */
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___put_user_asm_l1:
st.l r3, 0, r4
or ZERO, ZERO, r2
___put_user_asm_l_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __put_user_asm_q
__put_user_asm_q:
ld.q r2, 0, r4 /* r4 = data */
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___put_user_asm_q1:
st.q r3, 0, r4
or ZERO, ZERO, r2
___put_user_asm_q_exit:
ptabs LINK, tr0
blink tr0, ZERO
panic_stash_regs:
/* The idea is : when we get an unhandled panic, we dump the registers
to a known memory location, the just sit in a tight loop.
This allows the human to look at the memory region through the GDB
session (assuming the debug module's SHwy initiator isn't locked up
or anything), to hopefully analyze the cause of the panic. */
/* On entry, former r15 (SP) is in DCR
former r0 is at resvec_saved_area + 0
former r1 is at resvec_saved_area + 8
former tr0 is at resvec_saved_area + 32
DCR is the only register whose value is lost altogether.
*/
movi 0xffffffff80000000, r0 ! phy of dump area
ld.q SP, 0x000, r1 ! former r0
st.q r0, 0x000, r1
ld.q SP, 0x008, r1 ! former r1
st.q r0, 0x008, r1
st.q r0, 0x010, r2
st.q r0, 0x018, r3
st.q r0, 0x020, r4
st.q r0, 0x028, r5
st.q r0, 0x030, r6
st.q r0, 0x038, r7
st.q r0, 0x040, r8
st.q r0, 0x048, r9
st.q r0, 0x050, r10
st.q r0, 0x058, r11
st.q r0, 0x060, r12
st.q r0, 0x068, r13
st.q r0, 0x070, r14
getcon dcr, r14
st.q r0, 0x078, r14
st.q r0, 0x080, r16
st.q r0, 0x088, r17
st.q r0, 0x090, r18
st.q r0, 0x098, r19
st.q r0, 0x0a0, r20
st.q r0, 0x0a8, r21
st.q r0, 0x0b0, r22
st.q r0, 0x0b8, r23
st.q r0, 0x0c0, r24
st.q r0, 0x0c8, r25
st.q r0, 0x0d0, r26
st.q r0, 0x0d8, r27
st.q r0, 0x0e0, r28
st.q r0, 0x0e8, r29
st.q r0, 0x0f0, r30
st.q r0, 0x0f8, r31
st.q r0, 0x100, r32
st.q r0, 0x108, r33
st.q r0, 0x110, r34
st.q r0, 0x118, r35
st.q r0, 0x120, r36
st.q r0, 0x128, r37
st.q r0, 0x130, r38
st.q r0, 0x138, r39
st.q r0, 0x140, r40
st.q r0, 0x148, r41
st.q r0, 0x150, r42
st.q r0, 0x158, r43
st.q r0, 0x160, r44
st.q r0, 0x168, r45
st.q r0, 0x170, r46
st.q r0, 0x178, r47
st.q r0, 0x180, r48
st.q r0, 0x188, r49
st.q r0, 0x190, r50
st.q r0, 0x198, r51
st.q r0, 0x1a0, r52
st.q r0, 0x1a8, r53
st.q r0, 0x1b0, r54
st.q r0, 0x1b8, r55
st.q r0, 0x1c0, r56
st.q r0, 0x1c8, r57
st.q r0, 0x1d0, r58
st.q r0, 0x1d8, r59
st.q r0, 0x1e0, r60
st.q r0, 0x1e8, r61
st.q r0, 0x1f0, r62
st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
ld.q SP, 0x020, r1 ! former tr0
st.q r0, 0x200, r1
gettr tr1, r1
st.q r0, 0x208, r1
gettr tr2, r1
st.q r0, 0x210, r1
gettr tr3, r1
st.q r0, 0x218, r1
gettr tr4, r1
st.q r0, 0x220, r1
gettr tr5, r1
st.q r0, 0x228, r1
gettr tr6, r1
st.q r0, 0x230, r1
gettr tr7, r1
st.q r0, 0x238, r1
getcon sr, r1
getcon ssr, r2
getcon pssr, r3
getcon spc, r4
getcon pspc, r5
getcon intevt, r6
getcon expevt, r7
getcon pexpevt, r8
getcon tra, r9
getcon tea, r10
getcon kcr0, r11
getcon kcr1, r12
getcon vbr, r13
getcon resvec, r14
st.q r0, 0x240, r1
st.q r0, 0x248, r2
st.q r0, 0x250, r3
st.q r0, 0x258, r4
st.q r0, 0x260, r5
st.q r0, 0x268, r6
st.q r0, 0x270, r7
st.q r0, 0x278, r8
st.q r0, 0x280, r9
st.q r0, 0x288, r10
st.q r0, 0x290, r11
st.q r0, 0x298, r12
st.q r0, 0x2a0, r13
st.q r0, 0x2a8, r14
getcon SPC,r2
getcon SSR,r3
getcon EXPEVT,r4
/* Prepare to jump to C - physical address */
movi panic_handler-CONFIG_PAGE_OFFSET, r1
ori r1, 1, r1
ptabs r1, tr0
getcon DCR, SP
blink tr0, ZERO
nop
nop
nop
nop
/*
* --- Signal Handling Section
*/
/*
* extern long long _sa_default_rt_restorer
* extern long long _sa_default_restorer
*
* or, better,
*
* extern void _sa_default_rt_restorer(void)
* extern void _sa_default_restorer(void)
*
* Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
* from user space. Copied into user space by signal management.
* Both must be quad aligned and 2 quad long (4 instructions).
*
*/
.balign 8
.global sa_default_rt_restorer
sa_default_rt_restorer:
movi 0x10, r9
shori __NR_rt_sigreturn, r9
trapa r9
nop
.balign 8
.global sa_default_restorer
sa_default_restorer:
movi 0x10, r9
shori __NR_sigreturn, r9
trapa r9
nop
/*
* --- __ex_table Section
*/
/*
* User Access Exception Table.
*/
.section __ex_table, "a"
.global asm_uaccess_start /* Just a marker */
asm_uaccess_start:
#ifdef CONFIG_MMU
.long ___copy_user1, ___copy_user_exit
.long ___copy_user2, ___copy_user_exit
.long ___clear_user1, ___clear_user_exit
#endif
.long ___get_user_asm_b1, ___get_user_asm_b_exit
.long ___get_user_asm_w1, ___get_user_asm_w_exit
.long ___get_user_asm_l1, ___get_user_asm_l_exit
.long ___get_user_asm_q1, ___get_user_asm_q_exit
.long ___put_user_asm_b1, ___put_user_asm_b_exit
.long ___put_user_asm_w1, ___put_user_asm_w_exit
.long ___put_user_asm_l1, ___put_user_asm_l_exit
.long ___put_user_asm_q1, ___put_user_asm_q_exit
.global asm_uaccess_end /* Just a marker */
asm_uaccess_end:
/*
* --- .init.text Section
*/
__INIT
/*
* void trap_init (void)
*
*/
.global trap_init
trap_init:
addi SP, -24, SP /* Room to save r28/r29/r30 */
st.q SP, 0, r28
st.q SP, 8, r29
st.q SP, 16, r30
/* Set VBR and RESVEC */
movi LVBR_block, r19
andi r19, -4, r19 /* reset MMUOFF + reserved */
/* For RESVEC exceptions we force the MMU off, which means we need the
physical address. */
movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
andi r20, -4, r20 /* reset reserved */
ori r20, 1, r20 /* set MMUOFF */
putcon r19, VBR
putcon r20, RESVEC
/* Sanity check */
movi LVBR_block_end, r21
andi r21, -4, r21
movi BLOCK_SIZE, r29 /* r29 = expected size */
or r19, ZERO, r30
add r19, r29, r19
/*
* Ugly, but better loop forever now than crash afterwards.
* We should print a message, but if we touch LVBR or
* LRESVEC blocks we should not be surprised if we get stuck
* in trap_init().
*/
pta trap_init_loop, tr1
gettr tr1, r28 /* r28 = trap_init_loop */
sub r21, r30, r30 /* r30 = actual size */
/*
* VBR/RESVEC handlers overlap by being bigger than
* allowed. Very bad. Just loop forever.
* (r28) panic/loop address
* (r29) expected size
* (r30) actual size
*/
trap_init_loop:
bne r19, r21, tr1
/* Now that exception vectors are set up reset SR.BL */
getcon SR, r22
movi SR_UNBLOCK_EXC, r23
and r22, r23, r22
putcon r22, SR
addi SP, 24, SP
ptabs LINK, tr0
blink tr0, ZERO
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,914
|
arch/sh/kernel/cpu/sh5/switchto.S
|
/*
* arch/sh/kernel/cpu/sh5/switchto.S
*
* sh64 context switch
*
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
.section .text..SHmedia32,"ax"
.little
.balign 32
.type sh64_switch_to,@function
.global sh64_switch_to
.global __sh64_switch_to_end
sh64_switch_to:
/* Incoming args
r2 - prev
r3 - &prev->thread
r4 - next
r5 - &next->thread
Outgoing results
r2 - last (=prev) : this just stays in r2 throughout
Want to create a full (struct pt_regs) on the stack to allow backtracing
functions to work. However, we only need to populate the callee-save
register slots in this structure; since we're a function our ancestors must
have themselves preserved all caller saved state in the stack. This saves
some wasted effort since we won't need to look at the values.
In particular, all caller-save registers are immediately available for
scratch use.
*/
#define FRAME_SIZE (76*8 + 8)
movi FRAME_SIZE, r0
sub.l r15, r0, r15
! Do normal-style register save to support backtrace
st.l r15, 0, r18 ! save link reg
st.l r15, 4, r14 ! save fp
add.l r15, r63, r14 ! setup frame pointer
! hopefully this looks normal to the backtrace now.
addi.l r15, 8, r1 ! base of pt_regs
addi.l r1, 24, r0 ! base of pt_regs.regs
addi.l r0, (63*8), r8 ! base of pt_regs.trregs
/* Note : to be fixed?
struct pt_regs is really designed for holding the state on entry
to an exception, i.e. pc,sr,regs etc. However, for the context
switch state, some of this is not required. But the unwinder takes
struct pt_regs * as an arg so we have to build this structure
to allow unwinding switched tasks in show_state() */
st.q r0, ( 9*8), r9
st.q r0, (10*8), r10
st.q r0, (11*8), r11
st.q r0, (12*8), r12
st.q r0, (13*8), r13
st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
! the point where the process is left in suspended animation, i.e. current
! fp here, not the saved one.
st.q r0, (16*8), r16
st.q r0, (24*8), r24
st.q r0, (25*8), r25
st.q r0, (26*8), r26
st.q r0, (27*8), r27
st.q r0, (28*8), r28
st.q r0, (29*8), r29
st.q r0, (30*8), r30
st.q r0, (31*8), r31
st.q r0, (32*8), r32
st.q r0, (33*8), r33
st.q r0, (34*8), r34
st.q r0, (35*8), r35
st.q r0, (44*8), r44
st.q r0, (45*8), r45
st.q r0, (46*8), r46
st.q r0, (47*8), r47
st.q r0, (48*8), r48
st.q r0, (49*8), r49
st.q r0, (50*8), r50
st.q r0, (51*8), r51
st.q r0, (52*8), r52
st.q r0, (53*8), r53
st.q r0, (54*8), r54
st.q r0, (55*8), r55
st.q r0, (56*8), r56
st.q r0, (57*8), r57
st.q r0, (58*8), r58
st.q r0, (59*8), r59
! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
! Use a local label to avoid creating a symbol that will confuse the !
! backtrace
pta .Lsave_pc, tr0
gettr tr5, r45
gettr tr6, r46
gettr tr7, r47
st.q r8, (5*8), r45
st.q r8, (6*8), r46
st.q r8, (7*8), r47
! Now switch context
gettr tr0, r9
st.l r3, 0, r15 ! prev->thread.sp
st.l r3, 8, r1 ! prev->thread.kregs
st.l r3, 4, r9 ! prev->thread.pc
st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
! Load PC for next task (init value or save_pc later)
ld.l r5, 4, r18 ! next->thread.pc
! Switch stacks
ld.l r5, 0, r15 ! next->thread.sp
ptabs r18, tr0
! Update current
ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
putcon r9, kcr0 ! current = next->thread_info
! go to save_pc for a reschedule, or the initial thread.pc for a new process
blink tr0, r63
! Restore (when we come back to a previously saved task)
.Lsave_pc:
addi.l r15, 32, r0 ! r0 = next's regs
addi.l r0, (63*8), r8 ! r8 = next's tr_regs
ld.q r8, (5*8), r45
ld.q r8, (6*8), r46
ld.q r8, (7*8), r47
ptabs r45, tr5
ptabs r46, tr6
ptabs r47, tr7
ld.q r0, ( 9*8), r9
ld.q r0, (10*8), r10
ld.q r0, (11*8), r11
ld.q r0, (12*8), r12
ld.q r0, (13*8), r13
ld.q r0, (14*8), r14
ld.q r0, (16*8), r16
ld.q r0, (24*8), r24
ld.q r0, (25*8), r25
ld.q r0, (26*8), r26
ld.q r0, (27*8), r27
ld.q r0, (28*8), r28
ld.q r0, (29*8), r29
ld.q r0, (30*8), r30
ld.q r0, (31*8), r31
ld.q r0, (32*8), r32
ld.q r0, (33*8), r33
ld.q r0, (34*8), r34
ld.q r0, (35*8), r35
ld.q r0, (44*8), r44
ld.q r0, (45*8), r45
ld.q r0, (46*8), r46
ld.q r0, (47*8), r47
ld.q r0, (48*8), r48
ld.q r0, (49*8), r49
ld.q r0, (50*8), r50
ld.q r0, (51*8), r51
ld.q r0, (52*8), r52
ld.q r0, (53*8), r53
ld.q r0, (54*8), r54
ld.q r0, (55*8), r55
ld.q r0, (56*8), r56
ld.q r0, (57*8), r57
ld.q r0, (58*8), r58
ld.q r0, (59*8), r59
! epilogue
ld.l r15, 0, r18
ld.l r15, 4, r14
ptabs r18, tr0
movi FRAME_SIZE, r0
add r15, r0, r15
blink tr0, r63
__sh64_switch_to_end:
.LFE1:
.size sh64_switch_to,.LFE1-sh64_switch_to
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,430
|
arch/sh/boot/compressed/head_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/arch/sh/boot/compressed/head.S
*
* Copyright (C) 1999 Stuart Menefy
* Copyright (C) 2003 SUGIOKA Toshinobu
*/
.text
#include <asm/page.h>
.global startup
startup:
/* Load initial status register */
mov.l init_sr, r1
ldc r1, sr
/* Move myself to proper location if necessary */
mova 1f, r0
mov.l 1f, r2
cmp/eq r2, r0
bt clear_bss
sub r0, r2
mov.l bss_start_addr, r0
mov #0xffffffe0, r1
and r1, r0 ! align cache line
mov.l text_start_addr, r3
mov r0, r1
sub r2, r1
3:
mov.l @r1, r4
mov.l @(4,r1), r5
mov.l @(8,r1), r6
mov.l @(12,r1), r7
mov.l @(16,r1), r8
mov.l @(20,r1), r9
mov.l @(24,r1), r10
mov.l @(28,r1), r11
mov.l r4, @r0
mov.l r5, @(4,r0)
mov.l r6, @(8,r0)
mov.l r7, @(12,r0)
mov.l r8, @(16,r0)
mov.l r9, @(20,r0)
mov.l r10, @(24,r0)
mov.l r11, @(28,r0)
#ifdef CONFIG_CPU_SH4
ocbwb @r0
#endif
cmp/hi r3, r0
add #-32, r0
bt/s 3b
add #-32, r1
mov.l 2f, r0
jmp @r0
nop
.align 2
1: .long 1b
2: .long clear_bss
text_start_addr:
.long startup
/* Clear BSS */
clear_bss:
mov.l end_addr, r1
mov.l bss_start_addr, r2
mov #0, r0
l1:
mov.l r0, @-r1
cmp/eq r1,r2
bf l1
/* Set the initial pointer. */
mov.l init_stack_addr, r0
mov.l @r0, r15
/* Decompress the kernel */
mov.l decompress_kernel_addr, r0
jsr @r0
nop
/* Jump to the start of the decompressed kernel */
mov.l kernel_start_addr, r0
jmp @r0
nop
.align 2
bss_start_addr:
.long __bss_start
end_addr:
.long _end
init_sr:
.long 0x500000F0 /* Privileged mode, Bank=0, Block=1, IMASK=0xF */
kexec_magic:
.long 0x400000F0 /* magic used by kexec to parse zImage format */
init_stack_addr:
.long stack_start
decompress_kernel_addr:
.long decompress_kernel
kernel_start_addr:
#ifdef CONFIG_32BIT
.long ___pa(_text+PAGE_SIZE)
#else
.long _text+PAGE_SIZE
#endif
.align 9
fake_headers_as_bzImage:
.word 0
.ascii "HdrS" ! header signature
.word 0x0202 ! header version number (>= 0x0105)
! or else old loadlin-1.5 will fail)
.word 0 ! default_switch
.word 0 ! SETUPSEG
.word 0x1000
.word 0 ! pointing to kernel version string
.byte 0 ! = 0, old one (LILO, Loadlin,
! 0xTV: T=0 for LILO
! V = version
.byte 1 ! Load flags bzImage=1
.word 0x8000 ! size to move, when setup is not
.long 0x100000 ! 0x100000 = default for big kernel
.long 0 ! address of loaded ramdisk image
.long 0 # its size in bytes
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,021
|
arch/sh/boot/compressed/head_64.S
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/shmedia/boot/compressed/head.S
*
* Copied from
* arch/shmedia/kernel/head.S
* which carried the copyright:
* Copyright (C) 2000, 2001 Paolo Alberelli
*
* Modification for compressed loader:
* Copyright (C) 2002 Stuart Menefy (stuart.menefy@st.com)
*/
#include <asm/cache.h>
#include <asm/tlb.h>
#include <cpu/mmu_context.h>
#include <cpu/registers.h>
/*
* Fixed TLB entries to identity map the beginning of RAM
*/
#define MMUIR_TEXT_H 0x0000000000000003 | CONFIG_MEMORY_START
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUIR_TEXT_L 0x000000000000009a | CONFIG_MEMORY_START
/* 512 Mb, Cacheable (Write-back), execute, Not User, Ph. Add. */
#define MMUDR_CACHED_H 0x0000000000000003 | CONFIG_MEMORY_START
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUDR_CACHED_L 0x000000000000015a | CONFIG_MEMORY_START
/* 512 Mb, Cacheable (Write-back), read/write, Not User, Ph. Add. */
#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* OCE + OCI + WB */
#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
.text
.global startup
startup:
/*
* Prevent speculative fetch on device memory due to
* uninitialized target registers.
* This must be executed before the first branch.
*/
ptabs/u r63, tr0
ptabs/u r63, tr1
ptabs/u r63, tr2
ptabs/u r63, tr3
ptabs/u r63, tr4
ptabs/u r63, tr5
ptabs/u r63, tr6
ptabs/u r63, tr7
synci
/*
* Set initial TLB entries for cached and uncached regions.
* Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
*/
/* Clear ITLBs */
pta 1f, tr1
movi ITLB_FIXED, r21
movi ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22
1: putcfg r21, 0, r63 /* Clear MMUIR[n].PTEH.V */
addi r21, TLB_STEP, r21
bne r21, r22, tr1
/* Clear DTLBs */
pta 1f, tr1
movi DTLB_FIXED, r21
movi DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22
1: putcfg r21, 0, r63 /* Clear MMUDR[n].PTEH.V */
addi r21, TLB_STEP, r21
bne r21, r22, tr1
/* Map one big (512Mb) page for ITLB */
movi ITLB_FIXED, r21
movi MMUIR_TEXT_L, r22 /* PTEL first */
putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
movi MMUIR_TEXT_H, r22 /* PTEH last */
putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
/* Map one big CACHED (512Mb) page for DTLB */
movi DTLB_FIXED, r21
movi MMUDR_CACHED_L, r22 /* PTEL first */
putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
movi MMUDR_CACHED_H, r22 /* PTEH last */
putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
/* ICache */
movi ICCR_BASE, r21
movi ICCR0_INIT_VAL, r22
movi ICCR1_INIT_VAL, r23
putcfg r21, ICCR_REG0, r22
putcfg r21, ICCR_REG1, r23
synci
/* OCache */
movi OCCR_BASE, r21
movi OCCR0_INIT_VAL, r22
movi OCCR1_INIT_VAL, r23
putcfg r21, OCCR_REG0, r22
putcfg r21, OCCR_REG1, r23
synco
/*
* Enable the MMU.
* From here-on code can be non-PIC.
*/
movi SR_HARMLESS | SR_ENABLE_MMU, r22
putcon r22, SSR
movi 1f, r22
putcon r22, SPC
synco
rte /* And now go into the hyperspace ... */
1: /* ... that's the next instruction ! */
/* Set initial stack pointer */
movi datalabel stack_start, r0
ld.l r0, 0, r15
/*
* Clear bss
*/
pt 1f, tr1
movi datalabel __bss_start, r22
movi datalabel _end, r23
1: st.l r22, 0, r63
addi r22, 4, r22
bne r22, r23, tr1
/*
* Decompress the kernel.
*/
pt decompress_kernel, tr0
blink tr0, r18
/*
* Disable the MMU.
*/
movi SR_HARMLESS, r22
putcon r22, SSR
movi 1f, r22
putcon r22, SPC
synco
rte /* And now go into the hyperspace ... */
1: /* ... that's the next instruction ! */
/* Jump into the decompressed kernel */
movi datalabel (CONFIG_MEMORY_START + 0x2000)+1, r19
ptabs r19, tr0
blink tr0, r18
/* Shouldn't return here, but just in case, loop forever */
pt 1f, tr0
1: blink tr0, r63
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,637
|
arch/sh/boot/romimage/head.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/arch/sh/boot/romimage/head.S
*
* Board specific setup code, executed before zImage loader
*/
.text
#include <asm/page.h>
.global romstart
romstart:
/* include board specific setup code */
#include <mach/romimage.h>
#ifdef CONFIG_ROMIMAGE_MMCIF
/* load the romImage to above the empty zero page */
mov.l empty_zero_page_dst, r4
mov.l empty_zero_page_dst_adj, r5
add r5, r4
mov.l bytes_to_load, r5
mov.l loader_function, r7
jsr @r7
mov r4, r15
mov.l empty_zero_page_dst, r4
mov.l empty_zero_page_dst_adj, r5
add r5, r4
mov.l loaded_code_offs, r5
add r5, r4
jmp @r4
nop
.balign 4
empty_zero_page_dst_adj:
.long PAGE_SIZE
bytes_to_load:
.long end_data - romstart
loader_function:
.long mmcif_loader
loaded_code_offs:
.long loaded_code - romstart
loaded_code:
#endif /* CONFIG_ROMIMAGE_MMCIF */
/* copy the empty_zero_page contents to where vmlinux expects it */
mova extra_data_pos, r0
mov.l extra_data_size, r1
add r1, r0
mov.l empty_zero_page_dst, r1
mov #(PAGE_SHIFT - 4), r4
mov #1, r3
shld r4, r3 /* r3 = PAGE_SIZE / 16 */
1:
mov.l @r0, r4
mov.l @(4, r0), r5
mov.l @(8, r0), r6
mov.l @(12, r0), r7
add #16,r0
mov.l r4, @r1
mov.l r5, @(4, r1)
mov.l r6, @(8, r1)
mov.l r7, @(12, r1)
dt r3
add #16,r1
bf 1b
/* jump to the zImage entry point located after the zero page data */
mov #PAGE_SHIFT, r4
mov #1, r1
shld r4, r1
mova extra_data_pos, r0
add r1, r0
mov.l extra_data_size, r1
add r1, r0
jmp @r0
nop
.align 2
empty_zero_page_dst:
.long _text
extra_data_pos:
extra_data_size:
.long zero_page_pos - extra_data_pos
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,889
|
arch/sh/boards/mach-ecovec24/sdram.S
|
/*
* Ecovec24 sdram self/auto-refresh setup code
*
* Copyright (C) 2009 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
#include <asm/romimage-macros.h>
/* code to enter and leave self-refresh. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4
ENTRY(ecovec24_sdram_enter_start)
/* DBSC: put memory in self-refresh mode */
ED 0xFD000010, 0x00000000 /* DBEN */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
rts
nop
ENTRY(ecovec24_sdram_enter_end)
.balign 4
ENTRY(ecovec24_sdram_leave_start)
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_RSTANDBY, r0
bf resume_rstandby
/* DBSC: put memory in auto-refresh mode */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
WAIT 1
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000010, 0x00000001 /* DBEN */
ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
rts
nop
resume_rstandby:
/* DBSC: re-initialize and put in auto-refresh */
ED 0xFD000108, 0x00000181 /* DBPDCNT0 */
ED 0xFD000020, 0x015B0002 /* DBCONF */
ED 0xFD000030, 0x03071502 /* DBTR0 */
ED 0xFD000034, 0x02020102 /* DBTR1 */
ED 0xFD000038, 0x01090405 /* DBTR2 */
ED 0xFD00003C, 0x00000002 /* DBTR3 */
ED 0xFD000008, 0x00000005 /* DBKIND */
ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
ED 0xFD000018, 0x00000001 /* DBCKECNT */
mov #100,r0
WAIT_400NS:
dt r0
bf WAIT_400NS
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000060, 0x00020000 /* DBMRCNT (EMR2) */
ED 0xFD000060, 0x00030000 /* DBMRCNT (EMR3) */
ED 0xFD000060, 0x00010004 /* DBMRCNT (EMR) */
ED 0xFD000060, 0x00000532 /* DBMRCNT (MRS) */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000060, 0x00000432 /* DBMRCNT (MRS) */
ED 0xFD000060, 0x000103c0 /* DBMRCNT (EMR) */
ED 0xFD000060, 0x00010040 /* DBMRCNT (EMR) */
mov #100,r0
WAIT_400NS_2:
dt r0
bf WAIT_400NS_2
ED 0xFD000010, 0x00000001 /* DBEN */
ED 0xFD000044, 0x0000050f /* DBRFPDN1 */
ED 0xFD000048, 0x236800e6 /* DBRFPDN2 */
mov.l DUMMY,r0
mov.l @r0, r1 /* force single dummy read */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000108, 0x00000080 /* DBPDCNT0 */
ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
rts
nop
.balign 4
DUMMY: .long 0xac400000
ENTRY(ecovec24_sdram_leave_end)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,078
|
arch/sh/boards/mach-kfr2r09/sdram.S
|
/*
* KFR2R09 sdram self/auto-refresh setup code
*
* Copyright (C) 2009 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
#include <asm/romimage-macros.h>
/* code to enter and leave self-refresh. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4
ENTRY(kfr2r09_sdram_enter_start)
/* DBSC: put memory in self-refresh mode */
ED 0xFD000010, 0x00000000 /* DBEN */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
rts
nop
ENTRY(kfr2r09_sdram_enter_end)
.balign 4
ENTRY(kfr2r09_sdram_leave_start)
/* DBSC: put memory in auto-refresh mode */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_RSTANDBY, r0
bf resume_rstandby
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
WAIT 1
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000010, 0x00000001 /* DBEN */
ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
rts
nop
resume_rstandby:
/* DBSC: re-initialize and put in auto-refresh */
ED 0xFD000108, 0x40000301 /* DBPDCNT0 */
ED 0xFD000020, 0x011B0002 /* DBCONF */
ED 0xFD000030, 0x03060E02 /* DBTR0 */
ED 0xFD000034, 0x01020102 /* DBTR1 */
ED 0xFD000038, 0x01090406 /* DBTR2 */
ED 0xFD000008, 0x00000004 /* DBKIND */
ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
ED 0xFD000018, 0x00000001 /* DBCKECNT */
WAIT 1
ED 0xFD000010, 0x00000001 /* DBEN */
ED 0xFD000044, 0x000004AF /* DBRFPDN1 */
ED 0xFD000048, 0x20CF0037 /* DBRFPDN2 */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000108, 0x40000300 /* DBPDCNT0 */
ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
rts
nop
ENTRY(kfr2r09_sdram_leave_end)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,350
|
arch/sh/boards/mach-migor/sdram.S
|
/*
* Migo-R sdram self/auto-refresh setup code
*
* Copyright (C) 2009 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
#include <asm/romimage-macros.h>
/* code to enter and leave self-refresh. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4
ENTRY(migor_sdram_enter_start)
/* SBSC: disable power down and put in self-refresh mode */
mov.l 1f, r4
mov.l 2f, r1
mov.l @r4, r2
or r1, r2
mov.l 3f, r3
and r3, r2
mov.l r2, @r4
rts
nop
.balign 4
1: .long 0xfe400008 /* SDCR0 */
2: .long 0x00000400
3: .long 0xffff7fff
ENTRY(migor_sdram_enter_end)
.balign 4
ENTRY(migor_sdram_leave_start)
/* SBSC: set auto-refresh mode */
mov.l 1f, r4
mov.l @r4, r0
mov.l 4f, r1
and r1, r0
mov.l r0, @r4
mov.l 6f, r4
mov.l 8f, r0
mov.l @r4, r1
mov #-1, r4
add r4, r1
or r1, r0
mov.l 7f, r1
mov.l r0, @r1
rts
nop
.balign 4
1: .long 0xfe400008 /* SDCR0 */
4: .long 0xfffffbff
6: .long 0xfe40001c /* RTCOR */
7: .long 0xfe400018 /* RTCNT */
8: .long 0xa55a0000
ENTRY(migor_sdram_leave_end)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,364
|
arch/sh/boards/mach-ap325rxa/sdram.S
|
/*
* AP325RXA sdram self/auto-refresh setup code
*
* Copyright (C) 2009 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
#include <asm/romimage-macros.h>
/* code to enter and leave self-refresh. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4
ENTRY(ap325rxa_sdram_enter_start)
/* SBSC: disable power down and put in self-refresh mode */
mov.l 1f, r4
mov.l 2f, r1
mov.l @r4, r2
or r1, r2
mov.l 3f, r3
and r3, r2
mov.l r2, @r4
rts
nop
.balign 4
1: .long 0xfe400008 /* SDCR0 */
2: .long 0x00000400
3: .long 0xffff7fff
ENTRY(ap325rxa_sdram_enter_end)
.balign 4
ENTRY(ap325rxa_sdram_leave_start)
/* SBSC: set auto-refresh mode */
mov.l 1f, r4
mov.l @r4, r0
mov.l 4f, r1
and r1, r0
mov.l r0, @r4
mov.l 6f, r4
mov.l 8f, r0
mov.l @r4, r1
mov #-1, r4
add r4, r1
or r1, r0
mov.l 7f, r1
mov.l r0, @r1
rts
nop
.balign 4
1: .long 0xfe400008 /* SDCR0 */
4: .long 0xfffffbff
6: .long 0xfe40001c /* RTCOR */
7: .long 0xfe400018 /* RTCNT */
8: .long 0xa55a0000
ENTRY(ap325rxa_sdram_leave_end)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,218
|
arch/sh/boards/mach-se/7724/sdram.S
|
/*
* MS7724SE sdram self/auto-refresh setup code
*
* Copyright (C) 2009 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/suspend.h>
#include <asm/romimage-macros.h>
/* code to enter and leave self-refresh. must be self-contained.
* this code will be copied to on-chip memory and executed from there.
*/
.balign 4
ENTRY(ms7724se_sdram_enter_start)
/* DBSC: put memory in self-refresh mode */
ED 0xFD000010, 0x00000000 /* DBEN */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
rts
nop
ENTRY(ms7724se_sdram_enter_end)
.balign 4
ENTRY(ms7724se_sdram_leave_start)
/* DBSC: put memory in auto-refresh mode */
mov.l @(SH_SLEEP_MODE, r5), r0
tst #SUSP_SH_RSTANDBY, r0
bf resume_rstandby
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
WAIT 1
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000010, 0x00000001 /* DBEN */
ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
rts
nop
resume_rstandby:
/* CPG: setup clocks before restarting external memory */
ED 0xA4150024, 0x00004000 /* PLLCR */
mov.l FRQCRA,r0
mov.l @r0,r3
mov.l KICK,r1
or r1, r3
mov.l r3, @r0
mov.l LSTATS,r0
mov #1,r1
WAIT_LSTATS:
mov.l @r0,r3
tst r1,r3
bf WAIT_LSTATS
/* DBSC: re-initialize and put in auto-refresh */
ED 0xFD000108, 0x00000181 /* DBPDCNT0 */
ED 0xFD000020, 0x015B0002 /* DBCONF */
ED 0xFD000030, 0x03071502 /* DBTR0 */
ED 0xFD000034, 0x02020102 /* DBTR1 */
ED 0xFD000038, 0x01090405 /* DBTR2 */
ED 0xFD00003C, 0x00000002 /* DBTR3 */
ED 0xFD000008, 0x00000005 /* DBKIND */
ED 0xFD000040, 0x00000001 /* DBRFPDN0 */
ED 0xFD000040, 0x00000000 /* DBRFPDN0 */
ED 0xFD000018, 0x00000001 /* DBCKECNT */
mov #100,r0
WAIT_400NS:
dt r0
bf WAIT_400NS
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000060, 0x00020000 /* DBMRCNT (EMR2) */
ED 0xFD000060, 0x00030000 /* DBMRCNT (EMR3) */
ED 0xFD000060, 0x00010004 /* DBMRCNT (EMR) */
ED 0xFD000060, 0x00000532 /* DBMRCNT (MRS) */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000060, 0x00000432 /* DBMRCNT (MRS) */
ED 0xFD000060, 0x000103c0 /* DBMRCNT (EMR) */
ED 0xFD000060, 0x00010040 /* DBMRCNT (EMR) */
mov #100,r0
WAIT_400NS_2:
dt r0
bf WAIT_400NS_2
ED 0xFD000010, 0x00000001 /* DBEN */
ED 0xFD000044, 0x0000050f /* DBRFPDN1 */
ED 0xFD000048, 0x236800e6 /* DBRFPDN2 */
mov.l DUMMY,r0
mov.l @r0, r1 /* force single dummy read */
ED 0xFD000014, 0x00000002 /* DBCMDCNT (PALL) */
ED 0xFD000014, 0x00000004 /* DBCMDCNT (REF) */
ED 0xFD000108, 0x00000080 /* DBPDCNT0 */
ED 0xFD000040, 0x00010000 /* DBRFPDN0 */
rts
nop
.balign 4
DUMMY: .long 0xac400000
FRQCRA: .long 0xa4150000
KICK: .long 0x80000000
LSTATS: .long 0xa4150060
ENTRY(ms7724se_sdram_leave_end)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,897
|
arch/sh/include/asm/entry-macros.S
|
! SPDX-License-Identifier: GPL-2.0
! entry.S macro define
.macro cli
stc sr, r0
or #0xf0, r0
ldc r0, sr
.endm
.macro sti
mov #0xfffffff0, r11
extu.b r11, r11
not r11, r11
stc sr, r10
and r11, r10
#ifdef CONFIG_CPU_HAS_SR_RB
stc k_g_imask, r11
or r11, r10
#endif
ldc r10, sr
.endm
.macro get_current_thread_info, ti, tmp
#ifdef CONFIG_CPU_HAS_SR_RB
stc r7_bank, \ti
#else
mov #((THREAD_SIZE - 1) >> 10) ^ 0xff, \tmp
shll8 \tmp
shll2 \tmp
mov r15, \ti
and \tmp, \ti
#endif
.endm
#ifdef CONFIG_TRACE_IRQFLAGS
.macro TRACE_IRQS_ON
mov.l r0, @-r15
mov.l r1, @-r15
mov.l r2, @-r15
mov.l r3, @-r15
mov.l r4, @-r15
mov.l r5, @-r15
mov.l r6, @-r15
mov.l r7, @-r15
mov.l 7834f, r0
jsr @r0
nop
mov.l @r15+, r7
mov.l @r15+, r6
mov.l @r15+, r5
mov.l @r15+, r4
mov.l @r15+, r3
mov.l @r15+, r2
mov.l @r15+, r1
mov.l @r15+, r0
mov.l 7834f, r0
bra 7835f
nop
.balign 4
7834: .long trace_hardirqs_on
7835:
.endm
.macro TRACE_IRQS_OFF
mov.l r0, @-r15
mov.l r1, @-r15
mov.l r2, @-r15
mov.l r3, @-r15
mov.l r4, @-r15
mov.l r5, @-r15
mov.l r6, @-r15
mov.l r7, @-r15
mov.l 7834f, r0
jsr @r0
nop
mov.l @r15+, r7
mov.l @r15+, r6
mov.l @r15+, r5
mov.l @r15+, r4
mov.l @r15+, r3
mov.l @r15+, r2
mov.l @r15+, r1
mov.l @r15+, r0
mov.l 7834f, r0
bra 7835f
nop
.balign 4
7834: .long trace_hardirqs_off
7835:
.endm
#else
.macro TRACE_IRQS_ON
.endm
.macro TRACE_IRQS_OFF
.endm
#endif
#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
# define PREF(x) pref @x
#else
# define PREF(x) nop
#endif
/*
* Macro for use within assembly. Because the DWARF unwinder
* needs to use the frame register to unwind the stack, we
* need to setup r14 with the value of the stack pointer as
* the return address is usually on the stack somewhere.
*/
.macro setup_frame_reg
#ifdef CONFIG_DWARF_UNWINDER
mov r15, r14
#endif
.endm
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,547
|
arch/arm/mach-davinci/sleep.S
|
/*
* (C) Copyright 2009, Texas Instruments, Inc. http://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
/* replicated define because linux/bitops.h cannot be included in assembly */
#define BIT(nr) (1 << (nr))
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "psc.h"
#include "ddr2.h"
#include "clock.h"
/* Arbitrary, hardware currently does not update PHYRDY correctly */
#define PHYRDY_CYCLES 0x1000
/* Assume 25 MHz speed for the cycle conversions since PLLs are bypassed */
#define PLL_BYPASS_CYCLES (PLL_BYPASS_TIME * 25)
#define PLL_RESET_CYCLES (PLL_RESET_TIME * 25)
#define PLL_LOCK_CYCLES (PLL_LOCK_TIME * 25)
#define DEEPSLEEP_SLEEPENABLE_BIT BIT(31)
.text
.arch armv5te
/*
* Move DaVinci into deep sleep state
*
* Note: This code is copied to internal SRAM by PM code. When the DaVinci
* wakes up it continues execution at the point it went to sleep.
* Register Usage:
* r0: contains virtual base for DDR2 controller
* r1: contains virtual base for DDR2 Power and Sleep controller (PSC)
* r2: contains PSC number for DDR2
* r3: contains virtual base DDR2 PLL controller
* r4: contains virtual address of the DEEPSLEEP register
*/
ENTRY(davinci_cpu_suspend)
stmfd sp!, {r0-r12, lr} @ save registers on stack
ldr ip, CACHE_FLUSH
blx ip
ldmia r0, {r0-r4}
/*
* Switch DDR to self-refresh mode.
*/
/* calculate SDRCR address */
ldr ip, [r0, #DDR2_SDRCR_OFFSET]
bic ip, ip, #DDR2_SRPD_BIT
orr ip, ip, #DDR2_LPMODEN_BIT
str ip, [r0, #DDR2_SDRCR_OFFSET]
ldr ip, [r0, #DDR2_SDRCR_OFFSET]
orr ip, ip, #DDR2_MCLKSTOPEN_BIT
str ip, [r0, #DDR2_SDRCR_OFFSET]
mov ip, #PHYRDY_CYCLES
1: subs ip, ip, #0x1
bne 1b
/* Disable DDR2 LPSC */
mov r7, r0
mov r0, #0x2
bl davinci_ddr_psc_config
mov r0, r7
/* Disable clock to DDR PHY */
ldr ip, [r3, #PLLDIV1]
bic ip, ip, #PLLDIV_EN
str ip, [r3, #PLLDIV1]
/* Put the DDR PLL in bypass and power down */
ldr ip, [r3, #PLLCTL]
bic ip, ip, #PLLCTL_PLLENSRC
bic ip, ip, #PLLCTL_PLLEN
str ip, [r3, #PLLCTL]
/* Wait for PLL to switch to bypass */
mov ip, #PLL_BYPASS_CYCLES
2: subs ip, ip, #0x1
bne 2b
/* Power down the PLL */
ldr ip, [r3, #PLLCTL]
orr ip, ip, #PLLCTL_PLLPWRDN
str ip, [r3, #PLLCTL]
/* Go to deep sleep */
ldr ip, [r4]
orr ip, ip, #DEEPSLEEP_SLEEPENABLE_BIT
/* System goes to sleep beyond after this instruction */
str ip, [r4]
/* Wake up from sleep */
/* Clear sleep enable */
ldr ip, [r4]
bic ip, ip, #DEEPSLEEP_SLEEPENABLE_BIT
str ip, [r4]
/* initialize the DDR PLL controller */
/* Put PLL in reset */
ldr ip, [r3, #PLLCTL]
bic ip, ip, #PLLCTL_PLLRST
str ip, [r3, #PLLCTL]
/* Clear PLL power down */
ldr ip, [r3, #PLLCTL]
bic ip, ip, #PLLCTL_PLLPWRDN
str ip, [r3, #PLLCTL]
mov ip, #PLL_RESET_CYCLES
3: subs ip, ip, #0x1
bne 3b
/* Bring PLL out of reset */
ldr ip, [r3, #PLLCTL]
orr ip, ip, #PLLCTL_PLLRST
str ip, [r3, #PLLCTL]
/* Wait for PLL to lock (assume prediv = 1, 25MHz OSCIN) */
mov ip, #PLL_LOCK_CYCLES
4: subs ip, ip, #0x1
bne 4b
/* Remove PLL from bypass mode */
ldr ip, [r3, #PLLCTL]
bic ip, ip, #PLLCTL_PLLENSRC
orr ip, ip, #PLLCTL_PLLEN
str ip, [r3, #PLLCTL]
/* Start 2x clock to DDR2 */
ldr ip, [r3, #PLLDIV1]
orr ip, ip, #PLLDIV_EN
str ip, [r3, #PLLDIV1]
/* Enable VCLK */
/* Enable DDR2 LPSC */
mov r7, r0
mov r0, #0x3
bl davinci_ddr_psc_config
mov r0, r7
/* clear MCLKSTOPEN */
ldr ip, [r0, #DDR2_SDRCR_OFFSET]
bic ip, ip, #DDR2_MCLKSTOPEN_BIT
str ip, [r0, #DDR2_SDRCR_OFFSET]
ldr ip, [r0, #DDR2_SDRCR_OFFSET]
bic ip, ip, #DDR2_LPMODEN_BIT
str ip, [r0, #DDR2_SDRCR_OFFSET]
/* Restore registers and return */
ldmfd sp!, {r0-r12, pc}
ENDPROC(davinci_cpu_suspend)
/*
* Disables or Enables DDR2 LPSC
* Register Usage:
* r0: Enable or Disable LPSC r0 = 0x3 => Enable, r0 = 0x2 => Disable LPSC
* r1: contains virtual base for DDR2 Power and Sleep controller (PSC)
* r2: contains PSC number for DDR2
*/
ENTRY(davinci_ddr_psc_config)
/* Set next state in mdctl for DDR2 */
mov r6, #MDCTL
add r6, r6, r2, lsl #2
ldr ip, [r1, r6]
bic ip, ip, #MDSTAT_STATE_MASK
orr ip, ip, r0
str ip, [r1, r6]
/* Enable the Power Domain Transition Command */
ldr ip, [r1, #PTCMD]
orr ip, ip, #0x1
str ip, [r1, #PTCMD]
/* Check for Transition Complete (PTSTAT) */
ptstat_done:
ldr ip, [r1, #PTSTAT]
and ip, ip, #0x1
cmp ip, #0x0
bne ptstat_done
/* Check for DDR2 clock disable completion; */
mov r6, #MDSTAT
add r6, r6, r2, lsl #2
ddr2clk_stop_done:
ldr ip, [r1, r6]
and ip, ip, #MDSTAT_STATE_MASK
cmp ip, r0
bne ddr2clk_stop_done
ret lr
ENDPROC(davinci_ddr_psc_config)
CACHE_FLUSH:
#ifdef CONFIG_CPU_V6
.word v6_flush_kern_cache_all
#else
.word arm926_flush_kern_cache_all
#endif
ENTRY(davinci_cpu_suspend_sz)
.word . - davinci_cpu_suspend
ENDPROC(davinci_cpu_suspend_sz)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,012
|
arch/arm/mach-sunxi/headsmp.S
|
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (c) 2018 Chen-Yu Tsai
* Copyright (c) 2018 Bootlin
*
* Chen-Yu Tsai <wens@csie.org>
* Mylène Josserand <mylene.josserand@bootlin.com>
*
* SMP support for sunxi based systems with Cortex A7/A15
*
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/cputype.h>
ENTRY(sunxi_mc_smp_cluster_cache_enable)
.arch armv7-a
/*
* Enable cluster-level coherency, in preparation for turning on the MMU.
*
* Also enable regional clock gating and L2 data latency settings for
* Cortex-A15. These settings are from the vendor kernel.
*/
mrc p15, 0, r1, c0, c0, 0
movw r2, #(ARM_CPU_PART_MASK & 0xffff)
movt r2, #(ARM_CPU_PART_MASK >> 16)
and r1, r1, r2
movw r2, #(ARM_CPU_PART_CORTEX_A15 & 0xffff)
movt r2, #(ARM_CPU_PART_CORTEX_A15 >> 16)
cmp r1, r2
bne not_a15
/* The following is Cortex-A15 specific */
/* ACTLR2: Enable CPU regional clock gates */
mrc p15, 1, r1, c15, c0, 4
orr r1, r1, #(0x1 << 31)
mcr p15, 1, r1, c15, c0, 4
/* L2ACTLR */
mrc p15, 1, r1, c15, c0, 0
/* Enable L2, GIC, and Timer regional clock gates */
orr r1, r1, #(0x1 << 26)
/* Disable clean/evict from being pushed to external */
orr r1, r1, #(0x1<<3)
mcr p15, 1, r1, c15, c0, 0
/* L2CTRL: L2 data RAM latency */
mrc p15, 1, r1, c9, c0, 2
bic r1, r1, #(0x7 << 0)
orr r1, r1, #(0x3 << 0)
mcr p15, 1, r1, c9, c0, 2
/* End of Cortex-A15 specific setup */
not_a15:
/* Get value of sunxi_mc_smp_first_comer */
adr r1, first
ldr r0, [r1]
ldr r0, [r1, r0]
/* Skip cci_enable_port_for_self if not first comer */
cmp r0, #0
bxeq lr
b cci_enable_port_for_self
.align 2
first: .word sunxi_mc_smp_first_comer - .
ENDPROC(sunxi_mc_smp_cluster_cache_enable)
ENTRY(sunxi_mc_smp_secondary_startup)
bl sunxi_mc_smp_cluster_cache_enable
bl secure_cntvoff_init
b secondary_startup
ENDPROC(sunxi_mc_smp_secondary_startup)
ENTRY(sunxi_mc_smp_resume)
bl sunxi_mc_smp_cluster_cache_enable
b cpu_resume
ENDPROC(sunxi_mc_smp_resume)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,036
|
arch/arm/mach-sti/headsmp.S
|
/*
* arch/arm/mach-sti/headsmp.S
*
* Copyright (C) 2013 STMicroelectronics (R&D) Limited.
* http://www.st.com
*
* Cloned from linux/arch/arm/mach-vexpress/headsmp.S
*
* Copyright (c) 2003 ARM Limited
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <linux/init.h>
/*
* ST specific entry point for secondary CPUs. This provides
* a "holding pen" into which all secondary cores are held until we're
* ready for them to initialise.
*/
ENTRY(sti_secondary_startup)
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #15
adr r4, 1f
ldmia r4, {r5, r6}
sub r4, r4, r5
add r6, r6, r4
pen: ldr r7, [r6]
cmp r7, r0
bne pen
/*
* we've been released from the holding pen: secondary_stack
* should now contain the SVC stack for this core
*/
b secondary_startup
ENDPROC(sti_secondary_startup)
1: .long .
.long pen_release
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,838
|
arch/arm/xen/hypercall.S
|
/******************************************************************************
* hypercall.S
*
* Xen hypercall wrappers
*
* Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/*
* The Xen hypercall calling convention is very similar to the ARM
* procedure calling convention: the first paramter is passed in r0, the
* second in r1, the third in r2 and the fourth in r3. Considering that
* Xen hypercalls have 5 arguments at most, the fifth paramter is passed
* in r4, differently from the procedure calling convention of using the
* stack for that case.
*
* The hypercall number is passed in r12.
*
* The return value is in r0.
*
* The hvc ISS is required to be 0xEA1, that is the Xen specific ARM
* hypercall tag.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/opcodes-virt.h>
#include <xen/interface/xen.h>
#define XEN_IMM 0xEA1
#define HYPERCALL_SIMPLE(hypercall) \
ENTRY(HYPERVISOR_##hypercall) \
mov r12, #__HYPERVISOR_##hypercall; \
__HVC(XEN_IMM); \
ret lr; \
ENDPROC(HYPERVISOR_##hypercall)
#define HYPERCALL0 HYPERCALL_SIMPLE
#define HYPERCALL1 HYPERCALL_SIMPLE
#define HYPERCALL2 HYPERCALL_SIMPLE
#define HYPERCALL3 HYPERCALL_SIMPLE
#define HYPERCALL4 HYPERCALL_SIMPLE
#define HYPERCALL5(hypercall) \
ENTRY(HYPERVISOR_##hypercall) \
stmdb sp!, {r4} \
ldr r4, [sp, #4] \
mov r12, #__HYPERVISOR_##hypercall; \
__HVC(XEN_IMM); \
ldm sp!, {r4} \
ret lr \
ENDPROC(HYPERVISOR_##hypercall)
.text
HYPERCALL2(xen_version);
HYPERCALL3(console_io);
HYPERCALL3(grant_table_op);
HYPERCALL2(sched_op);
HYPERCALL2(event_channel_op);
HYPERCALL2(hvm_op);
HYPERCALL2(memory_op);
HYPERCALL2(physdev_op);
HYPERCALL3(vcpu_op);
HYPERCALL1(tmem_op);
HYPERCALL1(platform_op_raw);
HYPERCALL2(multicall);
HYPERCALL2(vm_assist);
HYPERCALL3(dm_op);
ENTRY(privcmd_call)
stmdb sp!, {r4}
mov r12, r0
mov r0, r1
mov r1, r2
mov r2, r3
ldr r3, [sp, #8]
/*
* Privcmd calls are issued by the userspace. We need to allow the
* kernel to access the userspace memory before issuing the hypercall.
*/
uaccess_enable r4
/* r4 is loaded now as we use it as scratch register before */
ldr r4, [sp, #4]
__HVC(XEN_IMM)
/*
* Disable userspace access from kernel. This is fine to do it
* unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is
* called before.
*/
uaccess_disable r4
ldm sp!, {r4}
ret lr
ENDPROC(privcmd_call);
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,269
|
arch/arm/mach-prima2/sleep.S
|
/*
* sleep mode for CSR SiRFprimaII
*
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
*
* Licensed under GPLv2 or later.
*/
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/assembler.h>
#include "pm.h"
#define DENALI_CTL_22_OFF 0x58
#define DENALI_CTL_112_OFF 0x1c0
.text
ENTRY(sirfsoc_finish_suspend)
@ r5: mem controller
ldr r0, =sirfsoc_memc_base
ldr r5, [r0]
@ r6: pwrc base offset
ldr r0, =sirfsoc_pwrc_base
ldr r6, [r0]
@ r7: rtc iobrg controller
ldr r0, =sirfsoc_rtciobrg_base
ldr r7, [r0]
@ Read the power control register and set the
@ sleep force bit.
add r0, r6, #SIRFSOC_PWRC_PDN_CTRL
bl __sirfsoc_rtc_iobrg_readl
orr r0,r0,#SIRFSOC_PWR_SLEEPFORCE
add r1, r6, #SIRFSOC_PWRC_PDN_CTRL
bl sirfsoc_rtc_iobrg_pre_writel
mov r1, #0x1
@ read the MEM ctl register and set the self
@ refresh bit
ldr r2, [r5, #DENALI_CTL_22_OFF]
orr r2, r2, #0x1
@ Following code has to run from cache since
@ the RAM is going to self refresh mode
.align 5
str r2, [r5, #DENALI_CTL_22_OFF]
1:
ldr r4, [r5, #DENALI_CTL_112_OFF]
tst r4, #0x1
bne 1b
@ write SLEEPFORCE through rtc iobridge
str r1, [r7]
@ wait rtc io bridge sync
1:
ldr r3, [r7]
tst r3, #0x01
bne 1b
b .
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,639
|
arch/arm/mach-ep93xx/crunch-bits.S
|
/*
* arch/arm/kernel/crunch-bits.S
* Cirrus MaverickCrunch context switching and handling
*
* Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
*
* Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
* Copyright (c) 2003-2004, MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <mach/ep93xx-regs.h>
/*
* We can't use hex constants here due to a bug in gas.
*/
#define CRUNCH_MVDX0 0
#define CRUNCH_MVDX1 8
#define CRUNCH_MVDX2 16
#define CRUNCH_MVDX3 24
#define CRUNCH_MVDX4 32
#define CRUNCH_MVDX5 40
#define CRUNCH_MVDX6 48
#define CRUNCH_MVDX7 56
#define CRUNCH_MVDX8 64
#define CRUNCH_MVDX9 72
#define CRUNCH_MVDX10 80
#define CRUNCH_MVDX11 88
#define CRUNCH_MVDX12 96
#define CRUNCH_MVDX13 104
#define CRUNCH_MVDX14 112
#define CRUNCH_MVDX15 120
#define CRUNCH_MVAX0L 128
#define CRUNCH_MVAX0M 132
#define CRUNCH_MVAX0H 136
#define CRUNCH_MVAX1L 140
#define CRUNCH_MVAX1M 144
#define CRUNCH_MVAX1H 148
#define CRUNCH_MVAX2L 152
#define CRUNCH_MVAX2M 156
#define CRUNCH_MVAX2H 160
#define CRUNCH_MVAX3L 164
#define CRUNCH_MVAX3M 168
#define CRUNCH_MVAX3H 172
#define CRUNCH_DSPSC 176
#define CRUNCH_SIZE 184
.text
/*
* Lazy switching of crunch coprocessor context
*
* r10 = struct thread_info pointer
* r9 = ret_from_exception
* lr = undefined instr exit
*
* called from prefetch exception handler with interrupts enabled
*/
ENTRY(crunch_task_enable)
inc_preempt_count r10, r3
ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
ldr r1, [r8, #0x80]
tst r1, #0x00800000 @ access to crunch enabled?
bne 2f @ if so no business here
mov r3, #0xaa @ unlock syscon swlock
str r3, [r8, #0xc0]
orr r1, r1, #0x00800000 @ enable access to crunch
str r1, [r8, #0x80]
ldr r3, =crunch_owner
add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area
ldr r2, [sp, #60] @ current task pc value
ldr r1, [r3] @ get current crunch owner
str r0, [r3] @ this task now owns crunch
sub r2, r2, #4 @ adjust pc back
str r2, [sp, #60]
ldr r2, [r8, #0x80]
mov r2, r2 @ flush out enable (@@@)
teq r1, #0 @ test for last ownership
mov lr, r9 @ normal exit from exception
beq crunch_load @ no owner, skip save
crunch_save:
cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers
cfstr64 mvdx1, [r1, #CRUNCH_MVDX1]
cfstr64 mvdx2, [r1, #CRUNCH_MVDX2]
cfstr64 mvdx3, [r1, #CRUNCH_MVDX3]
cfstr64 mvdx4, [r1, #CRUNCH_MVDX4]
cfstr64 mvdx5, [r1, #CRUNCH_MVDX5]
cfstr64 mvdx6, [r1, #CRUNCH_MVDX6]
cfstr64 mvdx7, [r1, #CRUNCH_MVDX7]
cfstr64 mvdx8, [r1, #CRUNCH_MVDX8]
cfstr64 mvdx9, [r1, #CRUNCH_MVDX9]
cfstr64 mvdx10, [r1, #CRUNCH_MVDX10]
cfstr64 mvdx11, [r1, #CRUNCH_MVDX11]
cfstr64 mvdx12, [r1, #CRUNCH_MVDX12]
cfstr64 mvdx13, [r1, #CRUNCH_MVDX13]
cfstr64 mvdx14, [r1, #CRUNCH_MVDX14]
cfstr64 mvdx15, [r1, #CRUNCH_MVDX15]
#ifdef __ARMEB__
#error fix me for ARMEB
#endif
cfmv32al mvfx0, mvax0 @ save 72b accumulators
cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L]
cfmv32am mvfx0, mvax0
cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M]
cfmv32ah mvfx0, mvax0
cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H]
cfmv32al mvfx0, mvax1
cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L]
cfmv32am mvfx0, mvax1
cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M]
cfmv32ah mvfx0, mvax1
cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H]
cfmv32al mvfx0, mvax2
cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L]
cfmv32am mvfx0, mvax2
cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M]
cfmv32ah mvfx0, mvax2
cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H]
cfmv32al mvfx0, mvax3
cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L]
cfmv32am mvfx0, mvax3
cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M]
cfmv32ah mvfx0, mvax3
cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H]
cfmv32sc mvdx0, dspsc @ save status word
cfstr64 mvdx0, [r1, #CRUNCH_DSPSC]
teq r0, #0 @ anything to load?
cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered
beq 1f
crunch_load:
cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word
cfmvsc32 dspsc, mvdx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators
cfmval32 mvax0, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M]
cfmvam32 mvax0, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H]
cfmvah32 mvax0, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L]
cfmval32 mvax1, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M]
cfmvam32 mvax1, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H]
cfmvah32 mvax1, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L]
cfmval32 mvax2, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M]
cfmvam32 mvax2, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H]
cfmvah32 mvax2, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L]
cfmval32 mvax3, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M]
cfmvam32 mvax3, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H]
cfmvah32 mvax3, mvfx0
cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers
cfldr64 mvdx1, [r0, #CRUNCH_MVDX1]
cfldr64 mvdx2, [r0, #CRUNCH_MVDX2]
cfldr64 mvdx3, [r0, #CRUNCH_MVDX3]
cfldr64 mvdx4, [r0, #CRUNCH_MVDX4]
cfldr64 mvdx5, [r0, #CRUNCH_MVDX5]
cfldr64 mvdx6, [r0, #CRUNCH_MVDX6]
cfldr64 mvdx7, [r0, #CRUNCH_MVDX7]
cfldr64 mvdx8, [r0, #CRUNCH_MVDX8]
cfldr64 mvdx9, [r0, #CRUNCH_MVDX9]
cfldr64 mvdx10, [r0, #CRUNCH_MVDX10]
cfldr64 mvdx11, [r0, #CRUNCH_MVDX11]
cfldr64 mvdx12, [r0, #CRUNCH_MVDX12]
cfldr64 mvdx13, [r0, #CRUNCH_MVDX13]
cfldr64 mvdx14, [r0, #CRUNCH_MVDX14]
cfldr64 mvdx15, [r0, #CRUNCH_MVDX15]
1:
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
#endif
2: dec_preempt_count r10, r3
ret lr
/*
* Back up crunch regs to save area and disable access to them
* (mainly for gdb or sleep mode usage)
*
* r0 = struct thread_info pointer of target task or NULL for any
*/
ENTRY(crunch_task_disable)
stmfd sp!, {r4, r5, lr}
mrs ip, cpsr
orr r2, ip, #PSR_I_BIT @ disable interrupts
msr cpsr_c, r2
ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
ldr r3, =crunch_owner
add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
ldr r1, [r3] @ get current crunch owner
teq r1, #0 @ any current owner?
beq 1f @ no: quit
teq r0, #0 @ any owner?
teqne r1, r2 @ or specified one?
bne 1f @ no: quit
ldr r5, [r4, #0x80] @ enable access to crunch
mov r2, #0xaa
str r2, [r4, #0xc0]
orr r5, r5, #0x00800000
str r5, [r4, #0x80]
mov r0, #0 @ nothing to load
str r0, [r3] @ no more current owner
ldr r2, [r4, #0x80] @ flush out enable (@@@)
mov r2, r2
bl crunch_save
mov r2, #0xaa @ disable access to crunch
str r2, [r4, #0xc0]
bic r5, r5, #0x00800000
str r5, [r4, #0x80]
ldr r5, [r4, #0x80] @ flush out enable (@@@)
mov r5, r5
1: msr cpsr_c, ip @ restore interrupt mode
ldmfd sp!, {r4, r5, pc}
/*
* Copy crunch state to given memory address
*
* r0 = struct thread_info pointer of target task
* r1 = memory address where to store crunch state
*
* this is called mainly in the creation of signal stack frames
*/
ENTRY(crunch_task_copy)
mrs ip, cpsr
orr r2, ip, #PSR_I_BIT @ disable interrupts
msr cpsr_c, r2
ldr r3, =crunch_owner
add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
ldr r3, [r3] @ get current crunch owner
teq r2, r3 @ does this task own it...
beq 1f
@ current crunch values are in the task save area
msr cpsr_c, ip @ restore interrupt mode
mov r0, r1
mov r1, r2
mov r2, #CRUNCH_SIZE
b memcpy
1: @ this task owns crunch regs -- grab a copy from there
mov r0, #0 @ nothing to load
mov r3, lr @ preserve return address
bl crunch_save
msr cpsr_c, ip @ restore interrupt mode
ret r3
/*
* Restore crunch state from given memory address
*
* r0 = struct thread_info pointer of target task
* r1 = memory address where to get crunch state from
*
* this is used to restore crunch state when unwinding a signal stack frame
*/
ENTRY(crunch_task_restore)
mrs ip, cpsr
orr r2, ip, #PSR_I_BIT @ disable interrupts
msr cpsr_c, r2
ldr r3, =crunch_owner
add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
ldr r3, [r3] @ get current crunch owner
teq r2, r3 @ does this task own it...
beq 1f
@ this task doesn't own crunch regs -- use its save area
msr cpsr_c, ip @ restore interrupt mode
mov r0, r2
mov r2, #CRUNCH_SIZE
b memcpy
1: @ this task owns crunch regs -- load them directly
mov r0, r1
mov r1, #0 @ nothing to save
mov r3, lr @ preserve return address
bl crunch_load
msr cpsr_c, ip @ restore interrupt mode
ret r3
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,126
|
arch/arm/vdso/vdso.lds.S
|
/*
* Adapted from arm64 version.
*
* GNU linker script for the VDSO library.
*
* Copyright (C) 2012 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author: Will Deacon <will.deacon@arm.com>
* Heavily based on the vDSO linker scripts for other archs.
*/
#include <linux/const.h>
#include <asm/page.h>
#include <asm/vdso.h>
OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm")
OUTPUT_ARCH(arm)
SECTIONS
{
PROVIDE(_start = .);
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.note : { *(.note.*) } :text :note
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.dynamic : { *(.dynamic) } :text :dynamic
.rodata : { *(.rodata*) } :text
.text : { *(.text*) } :text =0xe7f001f2
.got : { *(.got) }
.rel.plt : { *(.rel.plt) }
/DISCARD/ : {
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
}
}
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
VERSION
{
LINUX_2.6 {
global:
__vdso_clock_gettime;
__vdso_gettimeofday;
local: *;
};
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,832
|
arch/arm/mach-pxa/standby.S
|
/*
* PXA27x standby mode
*
* Author: David Burrage
*
* 2005 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/hardware.h>
#include <mach/pxa2xx-regs.h>
.text
#ifdef CONFIG_PXA27x
ENTRY(pxa_cpu_standby)
ldr r0, =PSSR
mov r1, #(PSSR_PH | PSSR_STS)
mov r2, #PWRMODE_STANDBY
mov r3, #UNCACHED_PHYS_0 @ Read mem context in.
ldr ip, [r3]
b 1f
.align 5
1: mcr p14, 0, r2, c7, c0, 0 @ put the system into Standby
str r1, [r0] @ make sure PSSR_PH/STS are clear
ret lr
#endif
#ifdef CONFIG_PXA3xx
#define PXA3_MDCNFG 0x0000
#define PXA3_MDCNFG_DMCEN (1 << 30)
#define PXA3_DDR_HCAL 0x0060
#define PXA3_DDR_HCAL_HCRNG 0x1f
#define PXA3_DDR_HCAL_HCPROG (1 << 28)
#define PXA3_DDR_HCAL_HCEN (1 << 31)
#define PXA3_DMCIER 0x0070
#define PXA3_DMCIER_EDLP (1 << 29)
#define PXA3_DMCISR 0x0078
#define PXA3_RCOMP 0x0100
#define PXA3_RCOMP_SWEVAL (1 << 31)
ENTRY(pm_enter_standby_start)
mov r1, #0xf6000000 @ DMEMC_REG_BASE (PXA3_MDCNFG)
add r1, r1, #0x00100000
/*
* Preload the TLB entry for accessing the dynamic memory
* controller registers. Note that page table lookups will
* fail until the dynamic memory controller has been
* reinitialised - and that includes MMU page table walks.
* This also means that only the dynamic memory controller
* can be reliably accessed in the code following standby.
*/
ldr r2, [r1] @ Dummy read PXA3_MDCNFG
mcr p14, 0, r0, c7, c0, 0
.rept 8
nop
.endr
ldr r0, [r1, #PXA3_DDR_HCAL] @ Clear (and wait for) HCEN
bic r0, r0, #PXA3_DDR_HCAL_HCEN
str r0, [r1, #PXA3_DDR_HCAL]
1: ldr r0, [r1, #PXA3_DDR_HCAL]
tst r0, #PXA3_DDR_HCAL_HCEN
bne 1b
ldr r0, [r1, #PXA3_RCOMP] @ Initiate RCOMP
orr r0, r0, #PXA3_RCOMP_SWEVAL
str r0, [r1, #PXA3_RCOMP]
mov r0, #~0 @ Clear interrupts
str r0, [r1, #PXA3_DMCISR]
ldr r0, [r1, #PXA3_DMCIER] @ set DMIER[EDLP]
orr r0, r0, #PXA3_DMCIER_EDLP
str r0, [r1, #PXA3_DMCIER]
ldr r0, [r1, #PXA3_DDR_HCAL] @ clear HCRNG, set HCPROG, HCEN
bic r0, r0, #PXA3_DDR_HCAL_HCRNG
orr r0, r0, #PXA3_DDR_HCAL_HCEN | PXA3_DDR_HCAL_HCPROG
str r0, [r1, #PXA3_DDR_HCAL]
1: ldr r0, [r1, #PXA3_DMCISR]
tst r0, #PXA3_DMCIER_EDLP
beq 1b
ldr r0, [r1, #PXA3_MDCNFG] @ set PXA3_MDCNFG[DMCEN]
orr r0, r0, #PXA3_MDCNFG_DMCEN
str r0, [r1, #PXA3_MDCNFG]
1: ldr r0, [r1, #PXA3_MDCNFG]
tst r0, #PXA3_MDCNFG_DMCEN
beq 1b
ldr r0, [r1, #PXA3_DDR_HCAL] @ set PXA3_DDR_HCAL[HCRNG]
orr r0, r0, #2 @ HCRNG
str r0, [r1, #PXA3_DDR_HCAL]
ldr r0, [r1, #PXA3_DMCIER] @ Clear the interrupt
bic r0, r0, #0x20000000
str r0, [r1, #PXA3_DMCIER]
ret lr
ENTRY(pm_enter_standby_end)
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,238
|
arch/arm/mach-pxa/sleep.S
|
/*
* Low-level PXA250/210 sleep/wakeUp support
*
* Initial SA1110 code:
* Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
*
* Adapted for PXA by Nicolas Pitre:
* Copyright (c) 2002 Monta Vista Software, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/hardware.h>
#include <mach/smemc.h>
#include <mach/pxa2xx-regs.h>
#define MDREFR_KDIV 0x200a4000 // all banks
#define CCCR_SLEEP 0x00000107 // L=7 2N=2 A=0 PPDIS=0 CPDIS=0
.text
#ifdef CONFIG_PXA3xx
/*
* pxa3xx_finish_suspend() - forces CPU into sleep state (S2D3C4)
*/
ENTRY(pxa3xx_finish_suspend)
mov r0, #0x06 @ S2D3C4 mode
mcr p14, 0, r0, c7, c0, 0 @ enter sleep
20: b 20b @ waiting for sleep
#endif /* CONFIG_PXA3xx */
#ifdef CONFIG_PXA27x
/*
* pxa27x_finish_suspend()
*
* Forces CPU into sleep state.
*
* r0 = value for PWRMODE M field for desired sleep state
*/
ENTRY(pxa27x_finish_suspend)
@ Put the processor to sleep
@ (also workaround for sighting 28071)
@ prepare value for sleep mode
mov r1, r0 @ sleep mode
@ prepare pointer to physical address 0 (virtual mapping in generic.c)
mov r2, #UNCACHED_PHYS_0
@ prepare SDRAM refresh settings
ldr r4, =MDREFR
ldr r5, [r4]
@ enable SDRAM self-refresh mode
orr r5, r5, #MDREFR_SLFRSH
@ set SDCLKx divide-by-2 bits (this is part of a workaround for Errata 50)
ldr r6, =MDREFR_KDIV
orr r5, r5, r6
@ Intel PXA270 Specification Update notes problems sleeping
@ with core operating above 91 MHz
@ (see Errata 50, ...processor does not exit from sleep...)
ldr r6, =CCCR
ldr r8, [r6] @ keep original value for resume
ldr r7, =CCCR_SLEEP @ prepare CCCR sleep value
mov r0, #0x2 @ prepare value for CLKCFG
@ align execution to a cache line
b pxa_cpu_do_suspend
#endif
#ifdef CONFIG_PXA25x
/*
* pxa25x_finish_suspend()
*
* Forces CPU into sleep state.
*
* r0 = value for PWRMODE M field for desired sleep state
*/
ENTRY(pxa25x_finish_suspend)
@ prepare value for sleep mode
mov r1, r0 @ sleep mode
@ prepare pointer to physical address 0 (virtual mapping in generic.c)
mov r2, #UNCACHED_PHYS_0
@ prepare SDRAM refresh settings
ldr r4, =MDREFR
ldr r5, [r4]
@ enable SDRAM self-refresh mode
orr r5, r5, #MDREFR_SLFRSH
@ Intel PXA255 Specification Update notes problems
@ about suspending with PXBus operating above 133MHz
@ (see Errata 31, GPIO output signals, ... unpredictable in sleep
@
@ We keep the change-down close to the actual suspend on SDRAM
@ as possible to eliminate messing about with the refresh clock
@ as the system will restore with the original speed settings
@
@ Ben Dooks, 13-Sep-2004
ldr r6, =CCCR
ldr r8, [r6] @ keep original value for resume
@ ensure x1 for run and turbo mode with memory clock
bic r7, r8, #CCCR_M_MASK | CCCR_N_MASK
orr r7, r7, #(1<<5) | (2<<7)
@ check that the memory frequency is within limits
and r14, r7, #CCCR_L_MASK
teq r14, #1
bicne r7, r7, #CCCR_L_MASK
orrne r7, r7, #1 @@ 99.53MHz
@ get ready for the change
@ note, turbo is not preserved over sleep so there is no
@ point in preserving it here. we save it on the stack with the
@ other CP registers instead.
mov r0, #0
mcr p14, 0, r0, c6, c0, 0
orr r0, r0, #2 @ initiate change bit
b pxa_cpu_do_suspend
#endif
.ltorg
.align 5
pxa_cpu_do_suspend:
@ All needed values are now in registers.
@ These last instructions should be in cache
@ initiate the frequency change...
str r7, [r6]
mcr p14, 0, r0, c6, c0, 0
@ restore the original cpu speed value for resume
str r8, [r6]
@ need 6 13-MHz cycles before changing PWRMODE
@ just set frequency to 91-MHz... 6*91/13 = 42
mov r0, #42
10: subs r0, r0, #1
bne 10b
@ Do not reorder...
@ Intel PXA270 Specification Update notes problems performing
@ external accesses after SDRAM is put in self-refresh mode
@ (see Errata 38 ...hangs when entering self-refresh mode)
@ force address lines low by reading at physical address 0
ldr r3, [r2]
@ put SDRAM into self-refresh
str r5, [r4]
@ enter sleep mode
mcr p14, 0, r1, c7, c0, 0 @ PWRMODE
20: b 20b @ loop waiting for sleep
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,654
|
arch/arm/mach-omap2/omap-headsmp.S
|
/*
* Secondary CPU startup routine source file.
*
* Copyright (C) 2009-2014 Texas Instruments, Inc.
*
* Author:
* Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* Interface functions needed for the SMP. This file is based on arm
* realview smp platform.
* Copyright (c) 2003 ARM Limited.
*
* This program is free software,you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include "omap44xx.h"
/* Physical address needed since MMU not enabled yet on secondary core */
#define AUX_CORE_BOOT0_PA 0x48281800
#define API_HYP_ENTRY 0x102
ENTRY(omap_secondary_startup)
#ifdef CONFIG_SMP
b secondary_startup
#else
/* Should never get here */
again: wfi
b again
#endif
#ENDPROC(omap_secondary_startup)
/*
* OMAP5 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
* secondary core is held until we're ready for it to initialise.
* The primary core will update this flag using a hardware
* register AuxCoreBoot0.
*/
ENTRY(omap5_secondary_startup)
wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
ldr r0, [r2]
mov r0, r0, lsr #5
mrc p15, 0, r4, c0, c0, 5
and r4, r4, #0x0f
cmp r0, r4
bne wait
b omap_secondary_startup
ENDPROC(omap5_secondary_startup)
/*
* Same as omap5_secondary_startup except we call into the ROM to
* enable HYP mode first. This is called instead of
* omap5_secondary_startup if the primary CPU was put into HYP mode by
* the boot loader.
*/
ENTRY(omap5_secondary_hyp_startup)
wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
ldr r0, [r2]
mov r0, r0, lsr #5
mrc p15, 0, r4, c0, c0, 5
and r4, r4, #0x0f
cmp r0, r4
bne wait_2
ldr r12, =API_HYP_ENTRY
badr r0, hyp_boot
smc #0
hyp_boot:
b omap_secondary_startup
ENDPROC(omap5_secondary_hyp_startup)
/*
* OMAP4 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
* secondary core is held until we're ready for it to initialise.
* The primary core will update this flag using a hardware
* register AuxCoreBoot0.
*/
ENTRY(omap4_secondary_startup)
hold: ldr r12,=0x103
dsb
smc #0 @ read from AuxCoreBoot0
mov r0, r0, lsr #9
mrc p15, 0, r4, c0, c0, 5
and r4, r4, #0x0f
cmp r0, r4
bne hold
/*
* we've been released from the wait loop,secondary_stack
* should now contain the SVC stack for this core
*/
b omap_secondary_startup
ENDPROC(omap4_secondary_startup)
ENTRY(omap4460_secondary_startup)
hold_2: ldr r12,=0x103
dsb
smc #0 @ read from AuxCoreBoot0
mov r0, r0, lsr #9
mrc p15, 0, r4, c0, c0, 5
and r4, r4, #0x0f
cmp r0, r4
bne hold_2
/*
* GIC distributor control register has changed between
* CortexA9 r1pX and r2pX. The Control Register secure
* banked version is now composed of 2 bits:
* bit 0 == Secure Enable
* bit 1 == Non-Secure Enable
* The Non-Secure banked register has not changed
* Because the ROM Code is based on the r1pX GIC, the CPU1
* GIC restoration will cause a problem to CPU0 Non-Secure SW.
* The workaround must be:
* 1) Before doing the CPU1 wakeup, CPU0 must disable
* the GIC distributor
* 2) CPU1 must re-enable the GIC distributor on
* it's wakeup path.
*/
ldr r1, =OMAP44XX_GIC_DIST_BASE
ldr r0, [r1]
orr r0, #1
str r0, [r1]
/*
* we've been released from the wait loop,secondary_stack
* should now contain the SVC stack for this core
*/
b omap_secondary_startup
ENDPROC(omap4460_secondary_startup)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,560
|
arch/arm/mach-omap2/omap-smc.S
|
/*
* OMAP34xx and OMAP44xx secure APIs file.
*
* Copyright (C) 2010 Texas Instruments, Inc.
* Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* Copyright (C) 2012 Ivaylo Dimitrov <freemangordon@abv.bg>
* Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
*
* This program is free software,you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
/*
* This is common routine to manage secure monitor API
* used to modify the PL310 secure registers.
* 'r0' contains the value to be modified and 'r12' contains
* the monitor API number. It uses few CPU registers
* internally and hence they need be backed up including
* link register "lr".
* Function signature : void omap_smc1(u32 fn, u32 arg)
*/
ENTRY(omap_smc1)
stmfd sp!, {r2-r12, lr}
mov r12, r0
mov r0, r1
dsb
smc #0
ldmfd sp!, {r2-r12, pc}
ENDPROC(omap_smc1)
/**
* u32 omap_smc2(u32 id, u32 falg, u32 pargs)
* Low level common routine for secure HAL and PPA APIs.
* @id: Application ID of HAL APIs
* @flag: Flag to indicate the criticality of operation
* @pargs: Physical address of parameter list starting
* with number of parametrs
*/
ENTRY(omap_smc2)
stmfd sp!, {r4-r12, lr}
mov r3, r2
mov r2, r1
mov r1, #0x0 @ Process ID
mov r6, #0xff
mov r12, #0x00 @ Secure Service ID
mov r7, #0
mcr p15, 0, r7, c7, c5, 6
dsb
dmb
smc #0
ldmfd sp!, {r4-r12, pc}
ENDPROC(omap_smc2)
/**
* u32 omap_smc3(u32 service_id, u32 process_id, u32 flag, u32 pargs)
* Low level common routine for secure HAL and PPA APIs via smc #1
* r0 - @service_id: Secure Service ID
* r1 - @process_id: Process ID
* r2 - @flag: Flag to indicate the criticality of operation
* r3 - @pargs: Physical address of parameter list
*/
ENTRY(omap_smc3)
stmfd sp!, {r4-r11, lr}
mov r12, r0 @ Copy the secure service ID
mov r6, #0xff @ Indicate new Task call
dsb @ Memory Barrier (not sure if needed, copied from omap_smc2)
smc #1 @ Call PPA service
ldmfd sp!, {r4-r11, pc}
ENDPROC(omap_smc3)
ENTRY(omap_modify_auxcoreboot0)
stmfd sp!, {r1-r12, lr}
ldr r12, =0x104
dsb
smc #0
ldmfd sp!, {r1-r12, pc}
ENDPROC(omap_modify_auxcoreboot0)
ENTRY(omap_auxcoreboot_addr)
stmfd sp!, {r2-r12, lr}
ldr r12, =0x105
dsb
smc #0
ldmfd sp!, {r2-r12, pc}
ENDPROC(omap_auxcoreboot_addr)
ENTRY(omap_read_auxcoreboot0)
stmfd sp!, {r2-r12, lr}
ldr r12, =0x103
dsb
smc #0
ldmfd sp!, {r2-r12, pc}
ENDPROC(omap_read_auxcoreboot0)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,675
|
arch/arm/mach-omap2/sleep33xx.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Low level suspend code for AM33XX SoCs
*
* Copyright (C) 2012-2018 Texas Instruments Incorporated - http://www.ti.com/
* Dave Gerlach, Vaibhav Bedia
*/
#include <generated/ti-pm-asm-offsets.h>
#include <linux/linkage.h>
#include <linux/platform_data/pm33xx.h>
#include <linux/ti-emif-sram.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include "iomap.h"
#include "cm33xx.h"
#define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED 0x00030000
#define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003
#define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE 0x0002
/* replicated define because linux/bitops.h cannot be included in assembly */
#define BIT(nr) (1 << (nr))
.arm
.align 3
ENTRY(am33xx_do_wfi)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
/* Save wfi_flags arg to data space */
mov r4, r0
adr r3, am33xx_pm_ro_sram_data
ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
str r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
/* Only flush cache is we know we are losing MPU context */
tst r4, #WFI_FLAG_FLUSH_CACHE
beq cache_skip_flush
/*
* Flush all data from the L1 and L2 data cache before disabling
* SCTLR.C bit.
*/
ldr r1, kernel_flush
blx r1
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
* strongly ordered and would not hit the cache.
*/
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #(1 << 2) @ Disable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
/*
* Invalidate L1 and L2 data cache.
*/
ldr r1, kernel_flush
blx r1
adr r3, am33xx_pm_ro_sram_data
ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
ldr r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
cache_skip_flush:
/* Check if we want self refresh */
tst r4, #WFI_FLAG_SELF_REFRESH
beq emif_skip_enter_sr
adr r9, am33xx_emif_sram_table
ldr r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
blx r3
emif_skip_enter_sr:
/* Only necessary if PER is losing context */
tst r4, #WFI_FLAG_SAVE_EMIF
beq emif_skip_save
ldr r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
blx r3
emif_skip_save:
/* Only can disable EMIF if we have entered self refresh */
tst r4, #WFI_FLAG_SELF_REFRESH
beq emif_skip_disable
/* Disable EMIF */
ldr r1, virt_emif_clkctrl
ldr r2, [r1]
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
ldr r1, virt_emif_clkctrl
wait_emif_disable:
ldr r2, [r1]
mov r3, #AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED
cmp r2, r3
bne wait_emif_disable
emif_skip_disable:
tst r4, #WFI_FLAG_WAKE_M3
beq wkup_m3_skip
/*
* For the MPU WFI to be registered as an interrupt
* to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
* to DISABLED
*/
ldr r1, virt_mpu_clkctrl
ldr r2, [r1]
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
wkup_m3_skip:
/*
* Execute an ISB instruction to ensure that all of the
* CP15 register changes have been committed.
*/
isb
/*
* Execute a barrier instruction to ensure that all cache,
* TLB and branch predictor maintenance operations issued
* have completed.
*/
dsb
dmb
/*
* Execute a WFI instruction and wait until the
* STANDBYWFI output is asserted to indicate that the
* CPU is in idle and low power state. CPU can specualatively
* prefetch the instructions so add NOPs after WFI. Thirteen
* NOPs as per Cortex-A8 pipeline.
*/
wfi
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
/* We come here in case of an abort due to a late interrupt */
/* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
ldr r1, virt_mpu_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
/* Re-enable EMIF */
ldr r1, virt_emif_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
wait_emif_enable:
ldr r3, [r1]
cmp r2, r3
bne wait_emif_enable
/* Only necessary if PER is losing context */
tst r4, #WFI_FLAG_SELF_REFRESH
beq emif_skip_exit_sr_abt
adr r9, am33xx_emif_sram_table
ldr r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
blx r1
emif_skip_exit_sr_abt:
tst r4, #WFI_FLAG_FLUSH_CACHE
beq cache_skip_restore
/*
* Set SCTLR.C bit to allow data cache allocation
*/
mrc p15, 0, r0, c1, c0, 0
orr r0, r0, #(1 << 2) @ Enable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
cache_skip_restore:
/* Let the suspend code know about the abort */
mov r0, #1
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(am33xx_do_wfi)
.align
ENTRY(am33xx_resume_offset)
.word . - am33xx_do_wfi
ENTRY(am33xx_resume_from_deep_sleep)
/* Re-enable EMIF */
ldr r0, phys_emif_clkctrl
mov r1, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r1, [r0]
wait_emif_enable1:
ldr r2, [r0]
cmp r1, r2
bne wait_emif_enable1
adr r9, am33xx_emif_sram_table
ldr r1, [r9, #EMIF_PM_RESTORE_CONTEXT_OFFSET]
blx r1
ldr r1, [r9, #EMIF_PM_EXIT_SR_OFFSET]
blx r1
resume_to_ddr:
/* We are back. Branch to the common CPU resume routine */
mov r0, #0
ldr pc, resume_addr
ENDPROC(am33xx_resume_from_deep_sleep)
/*
* Local variables
*/
.align
kernel_flush:
.word v7_flush_dcache_all
virt_mpu_clkctrl:
.word AM33XX_CM_MPU_MPU_CLKCTRL
virt_emif_clkctrl:
.word AM33XX_CM_PER_EMIF_CLKCTRL
phys_emif_clkctrl:
.word (AM33XX_CM_BASE + AM33XX_CM_PER_MOD + \
AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET)
.align 3
/* DDR related defines */
am33xx_emif_sram_table:
.space EMIF_PM_FUNCTIONS_SIZE
ENTRY(am33xx_pm_sram)
.word am33xx_do_wfi
.word am33xx_do_wfi_sz
.word am33xx_resume_offset
.word am33xx_emif_sram_table
.word am33xx_pm_ro_sram_data
resume_addr:
.word cpu_resume - PAGE_OFFSET + 0x80000000
.align 3
ENTRY(am33xx_pm_ro_sram_data)
.space AMX3_PM_RO_SRAM_DATA_SIZE
ENTRY(am33xx_do_wfi_sz)
.word . - am33xx_do_wfi
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,559
|
arch/arm/mach-omap2/sram243x.S
|
/*
* linux/arch/arm/mach-omap2/sram243x.S
*
* Omap2 specific functions that need to be run in internal SRAM
*
* (C) Copyright 2004
* Texas Instruments, <www.ti.com>
* Richard Woodruff <r-woodruff2@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*
* Richard Woodruff notes that any changes to this code must be carefully
* audited and tested to ensure that they don't cause a TLB miss while
* the SDRAM is inaccessible. Such a situation will crash the system
* since it will cause the ARM MMU to attempt to walk the page tables.
* These crashes may be intermittent.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "soc.h"
#include "iomap.h"
#include "prm2xxx.h"
#include "cm2xxx.h"
#include "sdrc.h"
.text
.align 3
ENTRY(omap243x_sram_ddr_init)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
mov r12, r2 @ capture CS1 vs CS0
mov r8, r3 @ capture force parameter
/* frequency shift down */
ldr r2, omap243x_sdi_cm_clksel2_pll @ get address of dpllout reg
mov r3, #0x1 @ value for 1x operation
str r3, [r2] @ go to L1-freq operation
/* voltage shift down */
mov r9, #0x1 @ set up for L1 voltage call
bl voltage_shift @ go drop voltage
/* dll lock mode */
ldr r11, omap243x_sdi_sdrc_dlla_ctrl @ addr of dlla ctrl
ldr r10, [r11] @ get current val
cmp r12, #0x1 @ cs1 base (2422 es2.05/1)
addeq r11, r11, #0x8 @ if cs1 base, move to DLLB
mvn r9, #0x4 @ mask to get clear bit2
and r10, r10, r9 @ clear bit2 for lock mode.
orr r10, r10, #0x8 @ make sure DLL on (es2 bit pos)
orr r10, r10, #0x2 @ 90 degree phase for all below 133MHz
str r10, [r11] @ commit to DLLA_CTRL
bl i_dll_wait @ wait for dll to lock
/* get dll value */
add r11, r11, #0x4 @ get addr of status reg
ldr r10, [r11] @ get locked value
/* voltage shift up */
mov r9, #0x0 @ shift back to L0-voltage
bl voltage_shift @ go raise voltage
/* frequency shift up */
mov r3, #0x2 @ value for 2x operation
str r3, [r2] @ go to L0-freq operation
/* reset entry mode for dllctrl */
sub r11, r11, #0x4 @ move from status to ctrl
cmp r12, #0x1 @ normalize if cs1 based
subeq r11, r11, #0x8 @ possibly back to DLLA
cmp r8, #0x1 @ if forced unlock exit
orreq r1, r1, #0x4 @ make sure exit with unlocked value
str r1, [r11] @ restore DLLA_CTRL high value
add r11, r11, #0x8 @ move to DLLB_CTRL addr
str r1, [r11] @ set value DLLB_CTRL
bl i_dll_wait @ wait for possible lock
/* set up for return, DDR should be good */
str r10, [r0] @ write dll_status and return counter
ldmfd sp!, {r0 - r12, pc} @ restore regs and return
/* ensure the DLL has relocked */
i_dll_wait:
mov r4, #0x800 @ delay DLL relock, min 0x400 L3 clocks
i_dll_delay:
subs r4, r4, #0x1
bne i_dll_delay
ret lr
/*
* shift up or down voltage, use R9 as input to tell level.
* wait for it to finish, use 32k sync counter, 1tick=31uS.
*/
voltage_shift:
ldr r4, omap243x_sdi_prcm_voltctrl @ get addr of volt ctrl.
ldr r5, [r4] @ get value.
ldr r6, prcm_mask_val @ get value of mask
and r5, r5, r6 @ apply mask to clear bits
orr r5, r5, r9 @ bulld value for L0/L1-volt operation.
str r5, [r4] @ set up for change.
mov r3, #0x4000 @ get val for force
orr r5, r5, r3 @ build value for force
str r5, [r4] @ Force transition to L1
ldr r3, omap243x_sdi_timer_32ksynct_cr @ get addr of counter
ldr r5, [r3] @ get value
add r5, r5, #0x3 @ give it at most 93uS
volt_delay:
ldr r7, [r3] @ get timer value
cmp r5, r7 @ time up?
bhi volt_delay @ not yet->branch
ret lr @ back to caller.
omap243x_sdi_cm_clksel2_pll:
.word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
omap243x_sdi_sdrc_dlla_ctrl:
.word OMAP243X_SDRC_REGADDR(SDRC_DLLA_CTRL)
omap243x_sdi_prcm_voltctrl:
.word OMAP2430_PRCM_VOLTCTRL
prcm_mask_val:
.word 0xFFFF3FFC
omap243x_sdi_timer_32ksynct_cr:
.word OMAP2_L4_IO_ADDRESS(OMAP2430_32KSYNCT_BASE + 0x010)
ENTRY(omap243x_sram_ddr_init_sz)
.word . - omap243x_sram_ddr_init
/*
* Reprograms memory timings.
* r0 = [PRCM_FULL | PRCM_HALF] r1 = SDRC_DLLA_CTRL value r2 = [DDR | SDR]
* PRCM_FULL = 2, PRCM_HALF = 1, DDR = 1, SDR = 0
*/
.align 3
ENTRY(omap243x_sram_reprogram_sdrc)
stmfd sp!, {r0 - r10, lr} @ save registers on stack
mov r3, #0x0 @ clear for mrc call
mcr p15, 0, r3, c7, c10, 4 @ memory barrier, finish ARM SDR/DDR
nop
nop
ldr r6, omap243x_srs_sdrc_rfr_ctrl @ get addr of refresh reg
ldr r5, [r6] @ get value
mov r5, r5, lsr #8 @ isolate rfr field and drop burst
cmp r0, #0x1 @ going to half speed?
movne r9, #0x0 @ if up set flag up for pre up, hi volt
blne voltage_shift_c @ adjust voltage
cmp r0, #0x1 @ going to half speed (post branch link)
moveq r5, r5, lsr #1 @ divide by 2 if to half
movne r5, r5, lsl #1 @ mult by 2 if to full
mov r5, r5, lsl #8 @ put rfr field back into place
add r5, r5, #0x1 @ turn on burst of 1
ldr r4, omap243x_srs_cm_clksel2_pll @ get address of out reg
ldr r3, [r4] @ get curr value
orr r3, r3, #0x3
bic r3, r3, #0x3 @ clear lower bits
orr r3, r3, r0 @ new state value
str r3, [r4] @ set new state (pll/x, x=1 or 2)
nop
nop
moveq r9, #0x1 @ if speed down, post down, drop volt
bleq voltage_shift_c
mcr p15, 0, r3, c7, c10, 4 @ memory barrier
str r5, [r6] @ set new RFR_1 value
add r6, r6, #0x30 @ get RFR_2 addr
str r5, [r6] @ set RFR_2
nop
cmp r2, #0x1 @ (SDR or DDR) do we need to adjust DLL
bne freq_out @ leave if SDR, no DLL function
/* With DDR, we need to take care of the DLL for the frequency change */
ldr r2, omap243x_srs_sdrc_dlla_ctrl @ addr of dlla ctrl
str r1, [r2] @ write out new SDRC_DLLA_CTRL
add r2, r2, #0x8 @ addr to SDRC_DLLB_CTRL
str r1, [r2] @ commit to SDRC_DLLB_CTRL
mov r1, #0x2000 @ wait DLL relock, min 0x400 L3 clocks
dll_wait:
subs r1, r1, #0x1
bne dll_wait
freq_out:
ldmfd sp!, {r0 - r10, pc} @ restore regs and return
/*
* shift up or down voltage, use R9 as input to tell level.
* wait for it to finish, use 32k sync counter, 1tick=31uS.
*/
voltage_shift_c:
ldr r10, omap243x_srs_prcm_voltctrl @ get addr of volt ctrl
ldr r8, [r10] @ get value
ldr r7, ddr_prcm_mask_val @ get value of mask
and r8, r8, r7 @ apply mask to clear bits
orr r8, r8, r9 @ bulld value for L0/L1-volt operation.
str r8, [r10] @ set up for change.
mov r7, #0x4000 @ get val for force
orr r8, r8, r7 @ build value for force
str r8, [r10] @ Force transition to L1
ldr r10, omap243x_srs_timer_32ksynct @ get addr of counter
ldr r8, [r10] @ get value
add r8, r8, #0x2 @ give it at most 62uS (min 31+)
volt_delay_c:
ldr r7, [r10] @ get timer value
cmp r8, r7 @ time up?
bhi volt_delay_c @ not yet->branch
ret lr @ back to caller
omap243x_srs_cm_clksel2_pll:
.word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
omap243x_srs_sdrc_dlla_ctrl:
.word OMAP243X_SDRC_REGADDR(SDRC_DLLA_CTRL)
omap243x_srs_sdrc_rfr_ctrl:
.word OMAP243X_SDRC_REGADDR(SDRC_RFR_CTRL_0)
omap243x_srs_prcm_voltctrl:
.word OMAP2430_PRCM_VOLTCTRL
ddr_prcm_mask_val:
.word 0xFFFF3FFC
omap243x_srs_timer_32ksynct:
.word OMAP2_L4_IO_ADDRESS(OMAP2430_32KSYNCT_BASE + 0x010)
ENTRY(omap243x_sram_reprogram_sdrc_sz)
.word . - omap243x_sram_reprogram_sdrc
/*
* Set dividers and pll. Also recalculate DLL value for DDR and unlock mode.
*/
.align 3
ENTRY(omap243x_sram_set_prcm)
stmfd sp!, {r0-r12, lr} @ regs to stack
adr r4, pbegin @ addr of preload start
adr r8, pend @ addr of preload end
mcrr p15, 1, r8, r4, c12 @ preload into icache
pbegin:
/* move into fast relock bypass */
ldr r8, omap243x_ssp_pll_ctl @ get addr
ldr r5, [r8] @ get val
mvn r6, #0x3 @ clear mask
and r5, r5, r6 @ clear field
orr r7, r5, #0x2 @ fast relock val
str r7, [r8] @ go to fast relock
ldr r4, omap243x_ssp_pll_stat @ addr of stat
block:
/* wait for bypass */
ldr r8, [r4] @ stat value
and r8, r8, #0x3 @ mask for stat
cmp r8, #0x1 @ there yet
bne block @ loop if not
/* set new dpll dividers _after_ in bypass */
ldr r4, omap243x_ssp_pll_div @ get addr
str r0, [r4] @ set dpll ctrl val
ldr r4, omap243x_ssp_set_config @ get addr
mov r8, #1 @ valid cfg msk
str r8, [r4] @ make dividers take
mov r4, #100 @ dead spin a bit
wait_a_bit:
subs r4, r4, #1 @ dec loop
bne wait_a_bit @ delay done?
/* check if staying in bypass */
cmp r2, #0x1 @ stay in bypass?
beq pend @ jump over dpll relock
/* relock DPLL with new vals */
ldr r5, omap243x_ssp_pll_stat @ get addr
ldr r4, omap243x_ssp_pll_ctl @ get addr
orr r8, r7, #0x3 @ val for lock dpll
str r8, [r4] @ set val
mov r0, #1000 @ dead spin a bit
wait_more:
subs r0, r0, #1 @ dec loop
bne wait_more @ delay done?
wait_lock:
ldr r8, [r5] @ get lock val
and r8, r8, #3 @ isolate field
cmp r8, #2 @ locked?
bne wait_lock @ wait if not
pend:
/* update memory timings & briefly lock dll */
ldr r4, omap243x_ssp_sdrc_rfr @ get addr
str r1, [r4] @ update refresh timing
ldr r11, omap243x_ssp_dlla_ctrl @ get addr of DLLA ctrl
ldr r10, [r11] @ get current val
mvn r9, #0x4 @ mask to get clear bit2
and r10, r10, r9 @ clear bit2 for lock mode
orr r10, r10, #0x8 @ make sure DLL on (es2 bit pos)
str r10, [r11] @ commit to DLLA_CTRL
add r11, r11, #0x8 @ move to dllb
str r10, [r11] @ hit DLLB also
mov r4, #0x800 @ relock time (min 0x400 L3 clocks)
wait_dll_lock:
subs r4, r4, #0x1
bne wait_dll_lock
nop
ldmfd sp!, {r0-r12, pc} @ restore regs and return
omap243x_ssp_set_config:
.word OMAP2430_PRCM_CLKCFG_CTRL
omap243x_ssp_pll_ctl:
.word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKEN)
omap243x_ssp_pll_stat:
.word OMAP2430_CM_REGADDR(PLL_MOD, CM_IDLEST)
omap243x_ssp_pll_div:
.word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL1)
omap243x_ssp_sdrc_rfr:
.word OMAP243X_SDRC_REGADDR(SDRC_RFR_CTRL_0)
omap243x_ssp_dlla_ctrl:
.word OMAP243X_SDRC_REGADDR(SDRC_DLLA_CTRL)
ENTRY(omap243x_sram_set_prcm_sz)
.word . - omap243x_sram_set_prcm
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,454
|
arch/arm/mach-omap2/sleep24xx.S
|
/*
* linux/arch/arm/mach-omap2/sleep.S
*
* (C) Copyright 2004
* Texas Instruments, <www.ti.com>
* Richard Woodruff <r-woodruff2@ti.com>
*
* (C) Copyright 2006 Nokia Corporation
* Fixed idle loop sleep
* Igor Stoppa <igor.stoppa@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "omap24xx.h"
#include "sdrc.h"
/* First address of reserved address space? apparently valid for OMAP2 & 3 */
#define A_SDRC0_V (0xC0000000)
.text
/*
* omap24xx_cpu_suspend() - Forces OMAP into deep sleep state by completing
* SDRC shutdown then ARM shutdown. Upon wake MPU is back on so just restore
* SDRC.
*
* Input:
* R0 : DLL ctrl value pre-Sleep
* R1 : SDRC_DLLA_CTRL
* R2 : SDRC_POWER
*
* The if the DPLL is going to AutoIdle. It seems like the DPLL may be back on
* when we get called, but the DLL probably isn't. We will wait a bit more in
* case the DPLL isn't quite there yet. The code will wait on DLL for DDR even
* if in unlocked mode.
*
* For less than 242x-ES2.2 upon wake from a sleep mode where the external
* oscillator was stopped, a timing bug exists where a non-stabilized 12MHz
* clock can pass into the PRCM can cause problems at DSP and IVA.
* To work around this the code will switch to the 32kHz source prior to sleep.
* Post sleep we will shift back to using the DPLL. Apparently,
* CM_IDLEST_CLKGEN does not reflect the full clock change so you need to wait
* 3x12MHz + 3x32kHz clocks for a full switch.
*
* The DLL load value is not kept in RETENTION or OFF. It needs to be restored
* at wake
*/
.align 3
ENTRY(omap24xx_cpu_suspend)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
mov r3, #0x0 @ clear for mcr call
mcr p15, 0, r3, c7, c10, 4 @ memory barrier, hope SDR/DDR finished
nop
nop
ldr r4, [r2] @ read SDRC_POWER
orr r4, r4, #0x40 @ enable self refresh on idle req
mov r5, #0x2000 @ set delay (DPLL relock + DLL relock)
str r4, [r2] @ make it so
nop
mcr p15, 0, r3, c7, c0, 4 @ wait for interrupt
nop
loop:
subs r5, r5, #0x1 @ awake, wait just a bit
bne loop
/* The DPLL has to be on before we take the DDR out of self refresh */
bic r4, r4, #0x40 @ now clear self refresh bit.
str r4, [r2] @ write to SDRC_POWER
ldr r4, A_SDRC0 @ make a clock happen
ldr r4, [r4] @ read A_SDRC0
nop @ start auto refresh only after clk ok
movs r0, r0 @ see if DDR or SDR
strne r0, [r1] @ rewrite DLLA to force DLL reload
addne r1, r1, #0x8 @ move to DLLB
strne r0, [r1] @ rewrite DLLB to force DLL reload
mov r5, #0x1000
loop2:
subs r5, r5, #0x1
bne loop2
/* resume*/
ldmfd sp!, {r0 - r12, pc} @ restore regs and return
A_SDRC0:
.word A_SDRC0_V
ENTRY(omap24xx_cpu_suspend_sz)
.word . - omap24xx_cpu_suspend
|
AirFortressIlikara/LS2K0300-linux-4.19
| 15,555
|
arch/arm/mach-omap2/sleep34xx.S
|
/*
* (C) Copyright 2007
* Texas Instruments
* Karthik Dasu <karthik-dp@ti.com>
*
* (C) Copyright 2004
* Texas Instruments, <www.ti.com>
* Richard Woodruff <r-woodruff2@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "omap34xx.h"
#include "iomap.h"
#include "cm3xxx.h"
#include "prm3xxx.h"
#include "sdrc.h"
#include "sram.h"
#include "control.h"
/*
* Registers access definitions
*/
#define SDRC_SCRATCHPAD_SEM_OFFS 0xc
#define SDRC_SCRATCHPAD_SEM_V OMAP343X_SCRATCHPAD_REGADDR\
(SDRC_SCRATCHPAD_SEM_OFFS)
#define PM_PREPWSTST_CORE_P OMAP3430_PRM_BASE + CORE_MOD +\
OMAP3430_PM_PREPWSTST
#define PM_PWSTCTRL_MPU_P OMAP3430_PRM_BASE + MPU_MOD + OMAP2_PM_PWSTCTRL
#define CM_IDLEST1_CORE_V OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST1)
#define CM_IDLEST_CKGEN_V OMAP34XX_CM_REGADDR(PLL_MOD, CM_IDLEST)
#define SRAM_BASE_P OMAP3_SRAM_PA
#define CONTROL_STAT OMAP343X_CTRL_BASE + OMAP343X_CONTROL_STATUS
#define CONTROL_MEM_RTA_CTRL (OMAP343X_CTRL_BASE +\
OMAP36XX_CONTROL_MEM_RTA_CTRL)
/* Move this as correct place is available */
#define SCRATCHPAD_MEM_OFFS 0x310
#define SCRATCHPAD_BASE_P (OMAP343X_CTRL_BASE +\
OMAP343X_CONTROL_MEM_WKUP +\
SCRATCHPAD_MEM_OFFS)
#define SDRC_POWER_V OMAP34XX_SDRC_REGADDR(SDRC_POWER)
#define SDRC_SYSCONFIG_P (OMAP343X_SDRC_BASE + SDRC_SYSCONFIG)
#define SDRC_MR_0_P (OMAP343X_SDRC_BASE + SDRC_MR_0)
#define SDRC_EMR2_0_P (OMAP343X_SDRC_BASE + SDRC_EMR2_0)
#define SDRC_MANUAL_0_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_0)
#define SDRC_MR_1_P (OMAP343X_SDRC_BASE + SDRC_MR_1)
#define SDRC_EMR2_1_P (OMAP343X_SDRC_BASE + SDRC_EMR2_1)
#define SDRC_MANUAL_1_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_1)
#define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
#define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
/*
* This file needs be built unconditionally as ARM to interoperate correctly
* with non-Thumb-2-capable firmware.
*/
.arm
/*
* API functions
*/
.text
/*
* L2 cache needs to be toggled for stable OFF mode functionality on 3630.
* This function sets up a flag that will allow for this toggling to take
* place on 3630. Hopefully some version in the future may not need this.
*/
ENTRY(enable_omap3630_toggle_l2_on_restore)
stmfd sp!, {lr} @ save registers on stack
/* Setup so that we will disable and enable l2 */
mov r1, #0x1
adrl r3, l2dis_3630_offset @ may be too distant for plain adr
ldr r2, [r3] @ value for offset
str r1, [r2, r3] @ write to l2dis_3630
ldmfd sp!, {pc} @ restore regs and return
ENDPROC(enable_omap3630_toggle_l2_on_restore)
/*
* Function to call rom code to save secure ram context.
*
* r0 = physical address of the parameters
*/
ENTRY(save_secure_ram_context)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
mov r3, r0 @ physical address of parameters
mov r0, #25 @ set service ID for PPA
mov r12, r0 @ copy secure service ID in r12
mov r1, #0 @ set task id for ROM code in r1
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
dsb @ data write barrier
dmb @ data memory barrier
smc #1 @ call SMI monitor (smi #1)
nop
nop
nop
nop
ldmfd sp!, {r4 - r11, pc}
ENDPROC(save_secure_ram_context)
/*
* ======================
* == Idle entry point ==
* ======================
*/
/*
* Forces OMAP into idle state
*
* omap34xx_cpu_suspend() - This bit of code saves the CPU context if needed
* and executes the WFI instruction. Calling WFI effectively changes the
* power domains states to the desired target power states.
*
*
* Notes:
* - only the minimum set of functions gets copied to internal SRAM at boot
* and after wake-up from OFF mode, cf. omap_push_sram_idle. The function
* pointers in SDRAM or SRAM are called depending on the desired low power
* target state.
* - when the OMAP wakes up it continues at different execution points
* depending on the low power mode (non-OFF vs OFF modes),
* cf. 'Resume path for xxx mode' comments.
*/
.align 3
ENTRY(omap34xx_cpu_suspend)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
/*
* r0 contains information about saving context:
* 0 - No context lost
* 1 - Only L1 and logic lost
* 2 - Only L2 lost (Even L1 is retained we clean it along with L2)
* 3 - Both L1 and L2 lost and logic lost
*/
/*
* For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi)
* For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram)
*/
ldr r4, omap3_do_wfi_sram_addr
ldr r5, [r4]
cmp r0, #0x0 @ If no context save required,
bxeq r5 @ jump to the WFI code in SRAM
/* Otherwise fall through to the save context code */
save_context_wfi:
/*
* jump out to kernel flush routine
* - reuse that code is better
* - it executes in a cached space so is faster than refetch per-block
* - should be faster and will change with kernel
* - 'might' have to copy address, load and jump to it
* Flush all data from the L1 data cache before disabling
* SCTLR.C bit.
*/
ldr r1, kernel_flush
mov lr, pc
bx r1
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
* strongly ordered and would not hit the cache.
*/
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #(1 << 2) @ Disable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
/*
* Invalidate L1 data cache. Even though only invalidate is
* necessary exported flush API is used here. Doing clean
* on already clean cache would be almost NOP.
*/
ldr r1, kernel_flush
blx r1
b omap3_do_wfi
ENDPROC(omap34xx_cpu_suspend)
omap3_do_wfi_sram_addr:
.word omap3_do_wfi_sram
kernel_flush:
.word v7_flush_dcache_all
/* ===================================
* == WFI instruction => Enter idle ==
* ===================================
*/
/*
* Do WFI instruction
* Includes the resume path for non-OFF modes
*
* This code gets copied to internal SRAM and is accessible
* from both SDRAM and SRAM:
* - executed from SRAM for non-off modes (omap3_do_wfi_sram),
* - executed from SDRAM for OFF mode (omap3_do_wfi).
*/
.align 3
ENTRY(omap3_do_wfi)
ldr r4, sdrc_power @ read the SDRC_POWER register
ldr r5, [r4] @ read the contents of SDRC_POWER
orr r5, r5, #0x40 @ enable self refresh on idle req
str r5, [r4] @ write back to SDRC_POWER register
/* Data memory barrier and Data sync barrier */
dsb
dmb
/*
* ===================================
* == WFI instruction => Enter idle ==
* ===================================
*/
wfi @ wait for interrupt
/*
* ===================================
* == Resume path for non-OFF modes ==
* ===================================
*/
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
/*
* This function implements the erratum ID i581 WA:
* SDRC state restore before accessing the SDRAM
*
* Only used at return from non-OFF mode. For OFF
* mode the ROM code configures the SDRC and
* the DPLL before calling the restore code directly
* from DDR.
*/
/* Make sure SDRC accesses are ok */
wait_sdrc_ok:
/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
ldr r4, cm_idlest_ckgen
wait_dpll3_lock:
ldr r5, [r4]
tst r5, #1
beq wait_dpll3_lock
ldr r4, cm_idlest1_core
wait_sdrc_ready:
ldr r5, [r4]
tst r5, #0x2
bne wait_sdrc_ready
/* allow DLL powerdown upon hw idle req */
ldr r4, sdrc_power
ldr r5, [r4]
bic r5, r5, #0x40
str r5, [r4]
is_dll_in_lock_mode:
/* Is dll in lock mode? */
ldr r4, sdrc_dlla_ctrl
ldr r5, [r4]
tst r5, #0x4
bne exit_nonoff_modes @ Return if locked
/* wait till dll locks */
wait_dll_lock_timed:
ldr r4, sdrc_dlla_status
/* Wait 20uS for lock */
mov r6, #8
wait_dll_lock:
subs r6, r6, #0x1
beq kick_dll
ldr r5, [r4]
and r5, r5, #0x4
cmp r5, #0x4
bne wait_dll_lock
b exit_nonoff_modes @ Return when locked
/* disable/reenable DLL if not locked */
kick_dll:
ldr r4, sdrc_dlla_ctrl
ldr r5, [r4]
mov r6, r5
bic r6, #(1<<3) @ disable dll
str r6, [r4]
dsb
orr r6, r6, #(1<<3) @ enable dll
str r6, [r4]
dsb
b wait_dll_lock_timed
exit_nonoff_modes:
/* Re-enable C-bit if needed */
mrc p15, 0, r0, c1, c0, 0
tst r0, #(1 << 2) @ Check C bit enabled?
orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
mcreq p15, 0, r0, c1, c0, 0
isb
/*
* ===================================
* == Exit point from non-OFF modes ==
* ===================================
*/
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(omap3_do_wfi)
sdrc_power:
.word SDRC_POWER_V
cm_idlest1_core:
.word CM_IDLEST1_CORE_V
cm_idlest_ckgen:
.word CM_IDLEST_CKGEN_V
sdrc_dlla_status:
.word SDRC_DLLA_STATUS_V
sdrc_dlla_ctrl:
.word SDRC_DLLA_CTRL_V
ENTRY(omap3_do_wfi_sz)
.word . - omap3_do_wfi
/*
* ==============================
* == Resume path for OFF mode ==
* ==============================
*/
/*
* The restore_* functions are called by the ROM code
* when back from WFI in OFF mode.
* Cf. the get_*restore_pointer functions.
*
* restore_es3: applies to 34xx >= ES3.0
* restore_3630: applies to 36xx
* restore: common code for 3xxx
*
* Note: when back from CORE and MPU OFF mode we are running
* from SDRAM, without MMU, without the caches and prediction.
* Also the SRAM content has been cleared.
*/
ENTRY(omap3_restore_es3)
ldr r5, pm_prepwstst_core_p
ldr r4, [r5]
and r4, r4, #0x3
cmp r4, #0x0 @ Check if previous power state of CORE is OFF
bne omap3_restore @ Fall through to OMAP3 common code
adr r0, es3_sdrc_fix
ldr r1, sram_base
ldr r2, es3_sdrc_fix_sz
mov r2, r2, ror #2
copy_to_sram:
ldmia r0!, {r3} @ val = *src
stmia r1!, {r3} @ *dst = val
subs r2, r2, #0x1 @ num_words--
bne copy_to_sram
ldr r1, sram_base
blx r1
b omap3_restore @ Fall through to OMAP3 common code
ENDPROC(omap3_restore_es3)
ENTRY(omap3_restore_3630)
ldr r1, pm_prepwstst_core_p
ldr r2, [r1]
and r2, r2, #0x3
cmp r2, #0x0 @ Check if previous power state of CORE is OFF
bne omap3_restore @ Fall through to OMAP3 common code
/* Disable RTA before giving control */
ldr r1, control_mem_rta
mov r2, #OMAP36XX_RTA_DISABLE
str r2, [r1]
ENDPROC(omap3_restore_3630)
/* Fall through to common code for the remaining logic */
ENTRY(omap3_restore)
/*
* Read the pwstctrl register to check the reason for mpu reset.
* This tells us what was lost.
*/
ldr r1, pm_pwstctrl_mpu
ldr r2, [r1]
and r2, r2, #0x3
cmp r2, #0x0 @ Check if target power state was OFF or RET
bne logic_l1_restore
adr r1, l2dis_3630_offset @ address for offset
ldr r0, [r1] @ value for offset
ldr r0, [r1, r0] @ value at l2dis_3630
cmp r0, #0x1 @ should we disable L2 on 3630?
bne skipl2dis
mrc p15, 0, r0, c1, c0, 1
bic r0, r0, #2 @ disable L2 cache
mcr p15, 0, r0, c1, c0, 1
skipl2dis:
ldr r0, control_stat
ldr r1, [r0]
and r1, #0x700
cmp r1, #0x300
beq l2_inv_gp
adr r0, l2_inv_api_params_offset
ldr r3, [r0]
add r3, r3, r0 @ r3 points to dummy parameters
mov r0, #40 @ set service ID for PPA
mov r12, r0 @ copy secure Service ID in r12
mov r1, #0 @ set task id for ROM code in r1
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
dsb @ data write barrier
dmb @ data memory barrier
smc #1 @ call SMI monitor (smi #1)
/* Write to Aux control register to set some bits */
mov r0, #42 @ set service ID for PPA
mov r12, r0 @ copy secure Service ID in r12
mov r1, #0 @ set task id for ROM code in r1
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
ldr r4, scratchpad_base
ldr r3, [r4, #0xBC] @ r3 points to parameters
dsb @ data write barrier
dmb @ data memory barrier
smc #1 @ call SMI monitor (smi #1)
#ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
/* Restore L2 aux control register */
@ set service ID for PPA
mov r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
mov r12, r0 @ copy service ID in r12
mov r1, #0 @ set task ID for ROM code in r1
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
ldr r4, scratchpad_base
ldr r3, [r4, #0xBC]
adds r3, r3, #8 @ r3 points to parameters
dsb @ data write barrier
dmb @ data memory barrier
smc #1 @ call SMI monitor (smi #1)
#endif
b logic_l1_restore
.align
l2_inv_api_params_offset:
.long l2_inv_api_params - .
l2_inv_gp:
/* Execute smi to invalidate L2 cache */
mov r12, #0x1 @ set up to invalidate L2
smc #0 @ Call SMI monitor (smieq)
/* Write to Aux control register to set some bits */
ldr r4, scratchpad_base
ldr r3, [r4,#0xBC]
ldr r0, [r3,#4]
mov r12, #0x3
smc #0 @ Call SMI monitor (smieq)
ldr r4, scratchpad_base
ldr r3, [r4,#0xBC]
ldr r0, [r3,#12]
mov r12, #0x2
smc #0 @ Call SMI monitor (smieq)
logic_l1_restore:
adr r0, l2dis_3630_offset @ adress for offset
ldr r1, [r0] @ value for offset
ldr r1, [r0, r1] @ value at l2dis_3630
cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
bne skipl2reen
mrc p15, 0, r1, c1, c0, 1
orr r1, r1, #2 @ re-enable L2 cache
mcr p15, 0, r1, c1, c0, 1
skipl2reen:
/* Now branch to the common CPU resume function */
b cpu_resume
ENDPROC(omap3_restore)
.ltorg
/*
* Local variables
*/
pm_prepwstst_core_p:
.word PM_PREPWSTST_CORE_P
pm_pwstctrl_mpu:
.word PM_PWSTCTRL_MPU_P
scratchpad_base:
.word SCRATCHPAD_BASE_P
sram_base:
.word SRAM_BASE_P + 0x8000
control_stat:
.word CONTROL_STAT
control_mem_rta:
.word CONTROL_MEM_RTA_CTRL
l2dis_3630_offset:
.long l2dis_3630 - .
.data
.align 2
l2dis_3630:
.word 0
.data
.align 2
l2_inv_api_params:
.word 0x1, 0x00
/*
* Internal functions
*/
/*
* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0
* Copied to and run from SRAM in order to reconfigure the SDRC parameters.
*/
.text
.align 3
ENTRY(es3_sdrc_fix)
ldr r4, sdrc_syscfg @ get config addr
ldr r5, [r4] @ get value
tst r5, #0x100 @ is part access blocked
it eq
biceq r5, r5, #0x100 @ clear bit if set
str r5, [r4] @ write back change
ldr r4, sdrc_mr_0 @ get config addr
ldr r5, [r4] @ get value
str r5, [r4] @ write back change
ldr r4, sdrc_emr2_0 @ get config addr
ldr r5, [r4] @ get value
str r5, [r4] @ write back change
ldr r4, sdrc_manual_0 @ get config addr
mov r5, #0x2 @ autorefresh command
str r5, [r4] @ kick off refreshes
ldr r4, sdrc_mr_1 @ get config addr
ldr r5, [r4] @ get value
str r5, [r4] @ write back change
ldr r4, sdrc_emr2_1 @ get config addr
ldr r5, [r4] @ get value
str r5, [r4] @ write back change
ldr r4, sdrc_manual_1 @ get config addr
mov r5, #0x2 @ autorefresh command
str r5, [r4] @ kick off refreshes
bx lr
/*
* Local variables
*/
.align
sdrc_syscfg:
.word SDRC_SYSCONFIG_P
sdrc_mr_0:
.word SDRC_MR_0_P
sdrc_emr2_0:
.word SDRC_EMR2_0_P
sdrc_manual_0:
.word SDRC_MANUAL_0_P
sdrc_mr_1:
.word SDRC_MR_1_P
sdrc_emr2_1:
.word SDRC_EMR2_1_P
sdrc_manual_1:
.word SDRC_MANUAL_1_P
ENDPROC(es3_sdrc_fix)
ENTRY(es3_sdrc_fix_sz)
.word . - es3_sdrc_fix
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,559
|
arch/arm/mach-omap2/sram242x.S
|
/*
* linux/arch/arm/mach-omap2/sram242x.S
*
* Omap2 specific functions that need to be run in internal SRAM
*
* (C) Copyright 2004
* Texas Instruments, <www.ti.com>
* Richard Woodruff <r-woodruff2@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*
* Richard Woodruff notes that any changes to this code must be carefully
* audited and tested to ensure that they don't cause a TLB miss while
* the SDRAM is inaccessible. Such a situation will crash the system
* since it will cause the ARM MMU to attempt to walk the page tables.
* These crashes may be intermittent.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "soc.h"
#include "iomap.h"
#include "prm2xxx.h"
#include "cm2xxx.h"
#include "sdrc.h"
.text
.align 3
ENTRY(omap242x_sram_ddr_init)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
mov r12, r2 @ capture CS1 vs CS0
mov r8, r3 @ capture force parameter
/* frequency shift down */
ldr r2, omap242x_sdi_cm_clksel2_pll @ get address of dpllout reg
mov r3, #0x1 @ value for 1x operation
str r3, [r2] @ go to L1-freq operation
/* voltage shift down */
mov r9, #0x1 @ set up for L1 voltage call
bl voltage_shift @ go drop voltage
/* dll lock mode */
ldr r11, omap242x_sdi_sdrc_dlla_ctrl @ addr of dlla ctrl
ldr r10, [r11] @ get current val
cmp r12, #0x1 @ cs1 base (2422 es2.05/1)
addeq r11, r11, #0x8 @ if cs1 base, move to DLLB
mvn r9, #0x4 @ mask to get clear bit2
and r10, r10, r9 @ clear bit2 for lock mode.
orr r10, r10, #0x8 @ make sure DLL on (es2 bit pos)
orr r10, r10, #0x2 @ 90 degree phase for all below 133MHz
str r10, [r11] @ commit to DLLA_CTRL
bl i_dll_wait @ wait for dll to lock
/* get dll value */
add r11, r11, #0x4 @ get addr of status reg
ldr r10, [r11] @ get locked value
/* voltage shift up */
mov r9, #0x0 @ shift back to L0-voltage
bl voltage_shift @ go raise voltage
/* frequency shift up */
mov r3, #0x2 @ value for 2x operation
str r3, [r2] @ go to L0-freq operation
/* reset entry mode for dllctrl */
sub r11, r11, #0x4 @ move from status to ctrl
cmp r12, #0x1 @ normalize if cs1 based
subeq r11, r11, #0x8 @ possibly back to DLLA
cmp r8, #0x1 @ if forced unlock exit
orreq r1, r1, #0x4 @ make sure exit with unlocked value
str r1, [r11] @ restore DLLA_CTRL high value
add r11, r11, #0x8 @ move to DLLB_CTRL addr
str r1, [r11] @ set value DLLB_CTRL
bl i_dll_wait @ wait for possible lock
/* set up for return, DDR should be good */
str r10, [r0] @ write dll_status and return counter
ldmfd sp!, {r0 - r12, pc} @ restore regs and return
/* ensure the DLL has relocked */
i_dll_wait:
mov r4, #0x800 @ delay DLL relock, min 0x400 L3 clocks
i_dll_delay:
subs r4, r4, #0x1
bne i_dll_delay
ret lr
/*
* shift up or down voltage, use R9 as input to tell level.
* wait for it to finish, use 32k sync counter, 1tick=31uS.
*/
voltage_shift:
ldr r4, omap242x_sdi_prcm_voltctrl @ get addr of volt ctrl.
ldr r5, [r4] @ get value.
ldr r6, prcm_mask_val @ get value of mask
and r5, r5, r6 @ apply mask to clear bits
orr r5, r5, r9 @ bulld value for L0/L1-volt operation.
str r5, [r4] @ set up for change.
mov r3, #0x4000 @ get val for force
orr r5, r5, r3 @ build value for force
str r5, [r4] @ Force transition to L1
ldr r3, omap242x_sdi_timer_32ksynct_cr @ get addr of counter
ldr r5, [r3] @ get value
add r5, r5, #0x3 @ give it at most 93uS
volt_delay:
ldr r7, [r3] @ get timer value
cmp r5, r7 @ time up?
bhi volt_delay @ not yet->branch
ret lr @ back to caller.
omap242x_sdi_cm_clksel2_pll:
.word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
omap242x_sdi_sdrc_dlla_ctrl:
.word OMAP242X_SDRC_REGADDR(SDRC_DLLA_CTRL)
omap242x_sdi_prcm_voltctrl:
.word OMAP2420_PRCM_VOLTCTRL
prcm_mask_val:
.word 0xFFFF3FFC
omap242x_sdi_timer_32ksynct_cr:
.word OMAP2_L4_IO_ADDRESS(OMAP2420_32KSYNCT_BASE + 0x010)
ENTRY(omap242x_sram_ddr_init_sz)
.word . - omap242x_sram_ddr_init
/*
* Reprograms memory timings.
* r0 = [PRCM_FULL | PRCM_HALF] r1 = SDRC_DLLA_CTRL value r2 = [DDR | SDR]
* PRCM_FULL = 2, PRCM_HALF = 1, DDR = 1, SDR = 0
*/
.align 3
ENTRY(omap242x_sram_reprogram_sdrc)
stmfd sp!, {r0 - r10, lr} @ save registers on stack
mov r3, #0x0 @ clear for mrc call
mcr p15, 0, r3, c7, c10, 4 @ memory barrier, finish ARM SDR/DDR
nop
nop
ldr r6, omap242x_srs_sdrc_rfr_ctrl @ get addr of refresh reg
ldr r5, [r6] @ get value
mov r5, r5, lsr #8 @ isolate rfr field and drop burst
cmp r0, #0x1 @ going to half speed?
movne r9, #0x0 @ if up set flag up for pre up, hi volt
blne voltage_shift_c @ adjust voltage
cmp r0, #0x1 @ going to half speed (post branch link)
moveq r5, r5, lsr #1 @ divide by 2 if to half
movne r5, r5, lsl #1 @ mult by 2 if to full
mov r5, r5, lsl #8 @ put rfr field back into place
add r5, r5, #0x1 @ turn on burst of 1
ldr r4, omap242x_srs_cm_clksel2_pll @ get address of out reg
ldr r3, [r4] @ get curr value
orr r3, r3, #0x3
bic r3, r3, #0x3 @ clear lower bits
orr r3, r3, r0 @ new state value
str r3, [r4] @ set new state (pll/x, x=1 or 2)
nop
nop
moveq r9, #0x1 @ if speed down, post down, drop volt
bleq voltage_shift_c
mcr p15, 0, r3, c7, c10, 4 @ memory barrier
str r5, [r6] @ set new RFR_1 value
add r6, r6, #0x30 @ get RFR_2 addr
str r5, [r6] @ set RFR_2
nop
cmp r2, #0x1 @ (SDR or DDR) do we need to adjust DLL
bne freq_out @ leave if SDR, no DLL function
/* With DDR, we need to take care of the DLL for the frequency change */
ldr r2, omap242x_srs_sdrc_dlla_ctrl @ addr of dlla ctrl
str r1, [r2] @ write out new SDRC_DLLA_CTRL
add r2, r2, #0x8 @ addr to SDRC_DLLB_CTRL
str r1, [r2] @ commit to SDRC_DLLB_CTRL
mov r1, #0x2000 @ wait DLL relock, min 0x400 L3 clocks
dll_wait:
subs r1, r1, #0x1
bne dll_wait
freq_out:
ldmfd sp!, {r0 - r10, pc} @ restore regs and return
/*
* shift up or down voltage, use R9 as input to tell level.
* wait for it to finish, use 32k sync counter, 1tick=31uS.
*/
voltage_shift_c:
ldr r10, omap242x_srs_prcm_voltctrl @ get addr of volt ctrl
ldr r8, [r10] @ get value
ldr r7, ddr_prcm_mask_val @ get value of mask
and r8, r8, r7 @ apply mask to clear bits
orr r8, r8, r9 @ bulld value for L0/L1-volt operation.
str r8, [r10] @ set up for change.
mov r7, #0x4000 @ get val for force
orr r8, r8, r7 @ build value for force
str r8, [r10] @ Force transition to L1
ldr r10, omap242x_srs_timer_32ksynct @ get addr of counter
ldr r8, [r10] @ get value
add r8, r8, #0x2 @ give it at most 62uS (min 31+)
volt_delay_c:
ldr r7, [r10] @ get timer value
cmp r8, r7 @ time up?
bhi volt_delay_c @ not yet->branch
ret lr @ back to caller
omap242x_srs_cm_clksel2_pll:
.word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
omap242x_srs_sdrc_dlla_ctrl:
.word OMAP242X_SDRC_REGADDR(SDRC_DLLA_CTRL)
omap242x_srs_sdrc_rfr_ctrl:
.word OMAP242X_SDRC_REGADDR(SDRC_RFR_CTRL_0)
omap242x_srs_prcm_voltctrl:
.word OMAP2420_PRCM_VOLTCTRL
ddr_prcm_mask_val:
.word 0xFFFF3FFC
omap242x_srs_timer_32ksynct:
.word OMAP2_L4_IO_ADDRESS(OMAP2420_32KSYNCT_BASE + 0x010)
ENTRY(omap242x_sram_reprogram_sdrc_sz)
.word . - omap242x_sram_reprogram_sdrc
/*
* Set dividers and pll. Also recalculate DLL value for DDR and unlock mode.
*/
.align 3
ENTRY(omap242x_sram_set_prcm)
stmfd sp!, {r0-r12, lr} @ regs to stack
adr r4, pbegin @ addr of preload start
adr r8, pend @ addr of preload end
mcrr p15, 1, r8, r4, c12 @ preload into icache
pbegin:
/* move into fast relock bypass */
ldr r8, omap242x_ssp_pll_ctl @ get addr
ldr r5, [r8] @ get val
mvn r6, #0x3 @ clear mask
and r5, r5, r6 @ clear field
orr r7, r5, #0x2 @ fast relock val
str r7, [r8] @ go to fast relock
ldr r4, omap242x_ssp_pll_stat @ addr of stat
block:
/* wait for bypass */
ldr r8, [r4] @ stat value
and r8, r8, #0x3 @ mask for stat
cmp r8, #0x1 @ there yet
bne block @ loop if not
/* set new dpll dividers _after_ in bypass */
ldr r4, omap242x_ssp_pll_div @ get addr
str r0, [r4] @ set dpll ctrl val
ldr r4, omap242x_ssp_set_config @ get addr
mov r8, #1 @ valid cfg msk
str r8, [r4] @ make dividers take
mov r4, #100 @ dead spin a bit
wait_a_bit:
subs r4, r4, #1 @ dec loop
bne wait_a_bit @ delay done?
/* check if staying in bypass */
cmp r2, #0x1 @ stay in bypass?
beq pend @ jump over dpll relock
/* relock DPLL with new vals */
ldr r5, omap242x_ssp_pll_stat @ get addr
ldr r4, omap242x_ssp_pll_ctl @ get addr
orr r8, r7, #0x3 @ val for lock dpll
str r8, [r4] @ set val
mov r0, #1000 @ dead spin a bit
wait_more:
subs r0, r0, #1 @ dec loop
bne wait_more @ delay done?
wait_lock:
ldr r8, [r5] @ get lock val
and r8, r8, #3 @ isolate field
cmp r8, #2 @ locked?
bne wait_lock @ wait if not
pend:
/* update memory timings & briefly lock dll */
ldr r4, omap242x_ssp_sdrc_rfr @ get addr
str r1, [r4] @ update refresh timing
ldr r11, omap242x_ssp_dlla_ctrl @ get addr of DLLA ctrl
ldr r10, [r11] @ get current val
mvn r9, #0x4 @ mask to get clear bit2
and r10, r10, r9 @ clear bit2 for lock mode
orr r10, r10, #0x8 @ make sure DLL on (es2 bit pos)
str r10, [r11] @ commit to DLLA_CTRL
add r11, r11, #0x8 @ move to dllb
str r10, [r11] @ hit DLLB also
mov r4, #0x800 @ relock time (min 0x400 L3 clocks)
wait_dll_lock:
subs r4, r4, #0x1
bne wait_dll_lock
nop
ldmfd sp!, {r0-r12, pc} @ restore regs and return
omap242x_ssp_set_config:
.word OMAP2420_PRCM_CLKCFG_CTRL
omap242x_ssp_pll_ctl:
.word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKEN)
omap242x_ssp_pll_stat:
.word OMAP2420_CM_REGADDR(PLL_MOD, CM_IDLEST)
omap242x_ssp_pll_div:
.word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL1)
omap242x_ssp_sdrc_rfr:
.word OMAP242X_SDRC_REGADDR(SDRC_RFR_CTRL_0)
omap242x_ssp_dlla_ctrl:
.word OMAP242X_SDRC_REGADDR(SDRC_DLLA_CTRL)
ENTRY(omap242x_sram_set_prcm_sz)
.word . - omap242x_sram_set_prcm
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,856
|
arch/arm/mach-omap2/sleep43xx.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Low level suspend code for AM43XX SoCs
*
* Copyright (C) 2013-2018 Texas Instruments Incorporated - http://www.ti.com/
* Dave Gerlach, Vaibhav Bedia
*/
#include <generated/ti-pm-asm-offsets.h>
#include <linux/linkage.h>
#include <linux/ti-emif-sram.h>
#include <linux/platform_data/pm33xx.h>
#include <asm/assembler.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/memory.h>
#include "cm33xx.h"
#include "common.h"
#include "iomap.h"
#include "omap-secure.h"
#include "omap44xx.h"
#include "prm33xx.h"
#include "prcm43xx.h"
/* replicated define because linux/bitops.h cannot be included in assembly */
#define BIT(nr) (1 << (nr))
#define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED 0x00030000
#define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003
#define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE 0x0002
#define AM43XX_EMIF_POWEROFF_ENABLE 0x1
#define AM43XX_EMIF_POWEROFF_DISABLE 0x0
#define AM43XX_CM_CLKSTCTRL_CLKTRCTRL_SW_SLEEP 0x1
#define AM43XX_CM_CLKSTCTRL_CLKTRCTRL_HW_AUTO 0x3
#define AM43XX_CM_BASE 0x44DF0000
#define AM43XX_CM_REGADDR(inst, reg) \
AM33XX_L4_WK_IO_ADDRESS(AM43XX_CM_BASE + (inst) + (reg))
#define AM43XX_CM_MPU_CLKSTCTRL AM43XX_CM_REGADDR(AM43XX_CM_MPU_INST, \
AM43XX_CM_MPU_MPU_CDOFFS)
#define AM43XX_CM_MPU_MPU_CLKCTRL AM43XX_CM_REGADDR(AM43XX_CM_MPU_INST, \
AM43XX_CM_MPU_MPU_CLKCTRL_OFFSET)
#define AM43XX_CM_PER_EMIF_CLKCTRL AM43XX_CM_REGADDR(AM43XX_CM_PER_INST, \
AM43XX_CM_PER_EMIF_CLKCTRL_OFFSET)
#define AM43XX_PRM_EMIF_CTRL_OFFSET 0x0030
#define RTC_SECONDS_REG 0x0
#define RTC_PMIC_REG 0x98
#define RTC_PMIC_POWER_EN BIT(16)
#define RTC_PMIC_EXT_WAKEUP_STS BIT(12)
#define RTC_PMIC_EXT_WAKEUP_POL BIT(4)
#define RTC_PMIC_EXT_WAKEUP_EN BIT(0)
.arm
.align 3
ENTRY(am43xx_do_wfi)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
/* Save wfi_flags arg to data space */
mov r4, r0
adr r3, am43xx_pm_ro_sram_data
ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
str r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
#ifdef CONFIG_CACHE_L2X0
/* Retrieve l2 cache virt address BEFORE we shut off EMIF */
ldr r1, get_l2cache_base
blx r1
mov r8, r0
#endif
/* Only flush cache is we know we are losing MPU context */
tst r4, #WFI_FLAG_FLUSH_CACHE
beq cache_skip_flush
/*
* Flush all data from the L1 and L2 data cache before disabling
* SCTLR.C bit.
*/
ldr r1, kernel_flush
blx r1
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
* strongly ordered and would not hit the cache.
*/
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #(1 << 2) @ Disable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
dsb
/*
* Invalidate L1 and L2 data cache.
*/
ldr r1, kernel_flush
blx r1
#ifdef CONFIG_CACHE_L2X0
/*
* Clean and invalidate the L2 cache.
*/
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x03
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
dsb
smc #0
dsb
#endif
mov r0, r8
adr r4, am43xx_pm_ro_sram_data
ldr r3, [r4, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
mov r2, r0
ldr r0, [r2, #L2X0_AUX_CTRL]
str r0, [r3, #AMX3_PM_L2_AUX_CTRL_VAL_OFFSET]
ldr r0, [r2, #L310_PREFETCH_CTRL]
str r0, [r3, #AMX3_PM_L2_PREFETCH_CTRL_VAL_OFFSET]
ldr r0, l2_val
str r0, [r2, #L2X0_CLEAN_INV_WAY]
wait:
ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
ldr r1, l2_val
ands r0, r0, r1
bne wait
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x00
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
dsb
smc #0
dsb
#endif
l2x_sync:
mov r0, r8
mov r2, r0
mov r0, #0x0
str r0, [r2, #L2X0_CACHE_SYNC]
sync:
ldr r0, [r2, #L2X0_CACHE_SYNC]
ands r0, r0, #0x1
bne sync
#endif
/* Restore wfi_flags */
adr r3, am43xx_pm_ro_sram_data
ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
ldr r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
cache_skip_flush:
/*
* If we are trying to enter RTC+DDR mode we must perform
* a read from the rtc address space to ensure translation
* presence in the TLB to avoid page table walk after DDR
* is unavailable.
*/
tst r4, #WFI_FLAG_RTC_ONLY
beq skip_rtc_va_refresh
adr r3, am43xx_pm_ro_sram_data
ldr r1, [r3, #AMX3_PM_RTC_BASE_VIRT_OFFSET]
ldr r0, [r1]
skip_rtc_va_refresh:
/* Check if we want self refresh */
tst r4, #WFI_FLAG_SELF_REFRESH
beq emif_skip_enter_sr
adr r9, am43xx_emif_sram_table
ldr r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
blx r3
emif_skip_enter_sr:
/* Only necessary if PER is losing context */
tst r4, #WFI_FLAG_SAVE_EMIF
beq emif_skip_save
ldr r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
blx r3
emif_skip_save:
/* Only can disable EMIF if we have entered self refresh */
tst r4, #WFI_FLAG_SELF_REFRESH
beq emif_skip_disable
/* Disable EMIF */
ldr r1, am43xx_virt_emif_clkctrl
ldr r2, [r1]
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
wait_emif_disable:
ldr r2, [r1]
mov r3, #AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED
cmp r2, r3
bne wait_emif_disable
emif_skip_disable:
tst r4, #WFI_FLAG_RTC_ONLY
beq skip_rtc_only
adr r3, am43xx_pm_ro_sram_data
ldr r1, [r3, #AMX3_PM_RTC_BASE_VIRT_OFFSET]
ldr r0, [r1, #RTC_PMIC_REG]
orr r0, r0, #RTC_PMIC_POWER_EN
orr r0, r0, #RTC_PMIC_EXT_WAKEUP_STS
orr r0, r0, #RTC_PMIC_EXT_WAKEUP_EN
orr r0, r0, #RTC_PMIC_EXT_WAKEUP_POL
str r0, [r1, #RTC_PMIC_REG]
ldr r0, [r1, #RTC_PMIC_REG]
/* Wait for 2 seconds to lose power */
mov r3, #2
ldr r2, [r1, #RTC_SECONDS_REG]
rtc_loop:
ldr r0, [r1, #RTC_SECONDS_REG]
cmp r0, r2
beq rtc_loop
mov r2, r0
subs r3, r3, #1
bne rtc_loop
b re_enable_emif
skip_rtc_only:
tst r4, #WFI_FLAG_WAKE_M3
beq wkup_m3_skip
/*
* For the MPU WFI to be registered as an interrupt
* to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
* to DISABLED
*/
ldr r1, am43xx_virt_mpu_clkctrl
ldr r2, [r1]
bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
str r2, [r1]
/*
* Put MPU CLKDM to SW_SLEEP
*/
ldr r1, am43xx_virt_mpu_clkstctrl
mov r2, #AM43XX_CM_CLKSTCTRL_CLKTRCTRL_SW_SLEEP
str r2, [r1]
wkup_m3_skip:
/*
* Execute a barrier instruction to ensure that all cache,
* TLB and branch predictor maintenance operations issued
* have completed.
*/
dsb
dmb
/*
* Execute a WFI instruction and wait until the
* STANDBYWFI output is asserted to indicate that the
* CPU is in idle and low power state. CPU can specualatively
* prefetch the instructions so add NOPs after WFI. Sixteen
* NOPs as per Cortex-A9 pipeline.
*/
wfi
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
/* We come here in case of an abort due to a late interrupt */
ldr r1, am43xx_virt_mpu_clkstctrl
mov r2, #AM43XX_CM_CLKSTCTRL_CLKTRCTRL_HW_AUTO
str r2, [r1]
/* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
ldr r1, am43xx_virt_mpu_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
re_enable_emif:
/* Re-enable EMIF */
ldr r1, am43xx_virt_emif_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
wait_emif_enable:
ldr r3, [r1]
cmp r2, r3
bne wait_emif_enable
tst r4, #WFI_FLAG_FLUSH_CACHE
beq cache_skip_restore
/*
* Set SCTLR.C bit to allow data cache allocation
*/
mrc p15, 0, r0, c1, c0, 0
orr r0, r0, #(1 << 2) @ Enable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
cache_skip_restore:
/* Only necessary if PER is losing context */
tst r4, #WFI_FLAG_SELF_REFRESH
beq emif_skip_exit_sr_abt
adr r9, am43xx_emif_sram_table
ldr r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
blx r1
emif_skip_exit_sr_abt:
/* Let the suspend code know about the abort */
mov r0, #1
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(am43xx_do_wfi)
.align
ENTRY(am43xx_resume_offset)
.word . - am43xx_do_wfi
ENTRY(am43xx_resume_from_deep_sleep)
/* Set MPU CLKSTCTRL to HW AUTO so that CPUidle works properly */
ldr r1, am43xx_virt_mpu_clkstctrl
mov r2, #AM43XX_CM_CLKSTCTRL_CLKTRCTRL_HW_AUTO
str r2, [r1]
/* For AM43xx, use EMIF power down until context is restored */
ldr r2, am43xx_phys_emif_poweroff
mov r1, #AM43XX_EMIF_POWEROFF_ENABLE
str r1, [r2, #0x0]
/* Re-enable EMIF */
ldr r1, am43xx_phys_emif_clkctrl
mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
str r2, [r1]
wait_emif_enable1:
ldr r3, [r1]
cmp r2, r3
bne wait_emif_enable1
adr r9, am43xx_emif_sram_table
ldr r1, [r9, #EMIF_PM_RESTORE_CONTEXT_OFFSET]
blx r1
ldr r1, [r9, #EMIF_PM_EXIT_SR_OFFSET]
blx r1
ldr r2, am43xx_phys_emif_poweroff
mov r1, #AM43XX_EMIF_POWEROFF_DISABLE
str r1, [r2, #0x0]
#ifdef CONFIG_CACHE_L2X0
ldr r2, l2_cache_base
ldr r0, [r2, #L2X0_CTRL]
and r0, #0x0f
cmp r0, #1
beq skip_l2en @ Skip if already enabled
adr r4, am43xx_pm_ro_sram_data
ldr r3, [r4, #AMX3_PM_RO_SRAM_DATA_PHYS_OFFSET]
ldr r0, [r3, #AMX3_PM_L2_PREFETCH_CTRL_VAL_OFFSET]
ldr r12, l2_smc1
dsb
smc #0
dsb
set_aux_ctrl:
ldr r0, [r3, #AMX3_PM_L2_AUX_CTRL_VAL_OFFSET]
ldr r12, l2_smc2
dsb
smc #0
dsb
/* L2 invalidate on resume */
ldr r0, l2_val
ldr r2, l2_cache_base
str r0, [r2, #L2X0_INV_WAY]
wait2:
ldr r0, [r2, #L2X0_INV_WAY]
ldr r1, l2_val
ands r0, r0, r1
bne wait2
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x00
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
dsb
smc #0
dsb
#endif
l2x_sync2:
ldr r2, l2_cache_base
mov r0, #0x0
str r0, [r2, #L2X0_CACHE_SYNC]
sync2:
ldr r0, [r2, #L2X0_CACHE_SYNC]
ands r0, r0, #0x1
bne sync2
mov r0, #0x1
ldr r12, l2_smc3
dsb
smc #0
dsb
#endif
skip_l2en:
/* We are back. Branch to the common CPU resume routine */
mov r0, #0
ldr pc, resume_addr
ENDPROC(am43xx_resume_from_deep_sleep)
/*
* Local variables
*/
.align
kernel_flush:
.word v7_flush_dcache_all
ddr_start:
.word PAGE_OFFSET
am43xx_phys_emif_poweroff:
.word (AM43XX_CM_BASE + AM43XX_PRM_DEVICE_INST + \
AM43XX_PRM_EMIF_CTRL_OFFSET)
am43xx_virt_mpu_clkstctrl:
.word (AM43XX_CM_MPU_CLKSTCTRL)
am43xx_virt_mpu_clkctrl:
.word (AM43XX_CM_MPU_MPU_CLKCTRL)
am43xx_virt_emif_clkctrl:
.word (AM43XX_CM_PER_EMIF_CLKCTRL)
am43xx_phys_emif_clkctrl:
.word (AM43XX_CM_BASE + AM43XX_CM_PER_INST + \
AM43XX_CM_PER_EMIF_CLKCTRL_OFFSET)
#ifdef CONFIG_CACHE_L2X0
/* L2 cache related defines for AM437x */
get_l2cache_base:
.word omap4_get_l2cache_base
l2_cache_base:
.word OMAP44XX_L2CACHE_BASE
l2_smc1:
.word OMAP4_MON_L2X0_PREFETCH_INDEX
l2_smc2:
.word OMAP4_MON_L2X0_AUXCTRL_INDEX
l2_smc3:
.word OMAP4_MON_L2X0_CTRL_INDEX
l2_val:
.word 0xffff
#endif
.align 3
/* DDR related defines */
ENTRY(am43xx_emif_sram_table)
.space EMIF_PM_FUNCTIONS_SIZE
ENTRY(am43xx_pm_sram)
.word am43xx_do_wfi
.word am43xx_do_wfi_sz
.word am43xx_resume_offset
.word am43xx_emif_sram_table
.word am43xx_pm_ro_sram_data
resume_addr:
.word cpu_resume - PAGE_OFFSET + 0x80000000
.align 3
ENTRY(am43xx_pm_ro_sram_data)
.space AMX3_PM_RO_SRAM_DATA_SIZE
ENTRY(am43xx_do_wfi_sz)
.word . - am43xx_do_wfi
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,310
|
arch/arm/mach-omap2/sleep44xx.S
|
/*
* OMAP44xx sleep code.
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* This program is free software,you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/smp_scu.h>
#include <asm/memory.h>
#include <asm/hardware/cache-l2x0.h>
#include "omap-secure.h"
#include "common.h"
#include "omap44xx.h"
#include "omap4-sar-layout.h"
#if defined(CONFIG_SMP) && defined(CONFIG_PM)
.macro DO_SMC
dsb
smc #0
dsb
.endm
#ifdef CONFIG_ARCH_OMAP4
/*
* =============================
* == CPU suspend finisher ==
* =============================
*
* void omap4_finish_suspend(unsigned long cpu_state)
*
* This function code saves the CPU context and performs the CPU
* power down sequence. Calling WFI effectively changes the CPU
* power domains states to the desired target power state.
*
* @cpu_state : contains context save state (r0)
* 0 - No context lost
* 1 - CPUx L1 and logic lost: MPUSS CSWR
* 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
* 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
* @return: This function never returns for CPU OFF and DORMANT power states.
* Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
* from this follows a full CPU reset path via ROM code to CPU restore code.
* The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
* It returns to the caller for CPU INACTIVE and ON power states or in case
* CPU failed to transition to targeted OFF/DORMANT state.
*
* omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
* stack frame and it expects the caller to take care of it. Hence the entire
* stack frame is saved to avoid possible stack corruption.
*/
ENTRY(omap4_finish_suspend)
stmfd sp!, {r4-r12, lr}
cmp r0, #0x0
beq do_WFI @ No lowpower state, jump to WFI
/*
* Flush all data from the L1 data cache before disabling
* SCTLR.C bit.
*/
bl omap4_get_sar_ram_base
ldr r9, [r0, #OMAP_TYPE_OFFSET]
cmp r9, #0x1 @ Check for HS device
bne skip_secure_l1_clean
mov r0, #SCU_PM_NORMAL
mov r1, #0xFF @ clean seucre L1
stmfd r13!, {r4-r12, r14}
ldr r12, =OMAP4_MON_SCU_PWR_INDEX
DO_SMC
ldmfd r13!, {r4-r12, r14}
skip_secure_l1_clean:
bl v7_flush_dcache_all
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
* strongly ordered and would not hit the cache.
*/
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #(1 << 2) @ Disable the C bit
mcr p15, 0, r0, c1, c0, 0
isb
bl v7_invalidate_l1
/*
* Switch the CPU from Symmetric Multiprocessing (SMP) mode
* to AsymmetricMultiprocessing (AMP) mode by programming
* the SCU power status to DORMANT or OFF mode.
* This enables the CPU to be taken out of coherency by
* preventing the CPU from receiving cache, TLB, or BTB
* maintenance operations broadcast by other CPUs in the cluster.
*/
bl omap4_get_sar_ram_base
mov r8, r0
ldr r9, [r8, #OMAP_TYPE_OFFSET]
cmp r9, #0x1 @ Check for HS device
bne scu_gp_set
mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
ands r0, r0, #0x0f
ldreq r0, [r8, #SCU_OFFSET0]
ldrne r0, [r8, #SCU_OFFSET1]
mov r1, #0x00
stmfd r13!, {r4-r12, r14}
ldr r12, =OMAP4_MON_SCU_PWR_INDEX
DO_SMC
ldmfd r13!, {r4-r12, r14}
b skip_scu_gp_set
scu_gp_set:
mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
ands r0, r0, #0x0f
ldreq r1, [r8, #SCU_OFFSET0]
ldrne r1, [r8, #SCU_OFFSET1]
bl omap4_get_scu_base
bl scu_power_mode
skip_scu_gp_set:
mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
tst r0, #(1 << 18)
mrcne p15, 0, r0, c1, c0, 1
bicne r0, r0, #(1 << 6) @ Disable SMP bit
mcrne p15, 0, r0, c1, c0, 1
isb
dsb
#ifdef CONFIG_CACHE_L2X0
/*
* Clean and invalidate the L2 cache.
* Common cache-l2x0.c functions can't be used here since it
* uses spinlocks. We are out of coherency here with data cache
* disabled. The spinlock implementation uses exclusive load/store
* instruction which can fail without data cache being enabled.
* OMAP4 hardware doesn't support exclusive monitor which can
* overcome exclusive access issue. Because of this, CPU can
* lead to deadlock.
*/
bl omap4_get_sar_ram_base
mov r8, r0
mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
ands r5, r5, #0x0f
ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR
ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory.
cmp r0, #3
bne do_WFI
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x03
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
DO_SMC
#endif
bl omap4_get_l2cache_base
mov r2, r0
ldr r0, =0xffff
str r0, [r2, #L2X0_CLEAN_INV_WAY]
wait:
ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
ldr r1, =0xffff
ands r0, r0, r1
bne wait
#ifdef CONFIG_PL310_ERRATA_727915
mov r0, #0x00
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
DO_SMC
#endif
l2x_sync:
bl omap4_get_l2cache_base
mov r2, r0
mov r0, #0x0
str r0, [r2, #L2X0_CACHE_SYNC]
sync:
ldr r0, [r2, #L2X0_CACHE_SYNC]
ands r0, r0, #0x1
bne sync
#endif
do_WFI:
bl omap_do_wfi
/*
* CPU is here when it failed to enter OFF/DORMANT or
* no low power state was attempted.
*/
mrc p15, 0, r0, c1, c0, 0
tst r0, #(1 << 2) @ Check C bit enabled?
orreq r0, r0, #(1 << 2) @ Enable the C bit
mcreq p15, 0, r0, c1, c0, 0
isb
/*
* Ensure the CPU power state is set to NORMAL in
* SCU power state so that CPU is back in coherency.
* In non-coherent mode CPU can lock-up and lead to
* system deadlock.
*/
mrc p15, 0, r0, c1, c0, 1
tst r0, #(1 << 6) @ Check SMP bit enabled?
orreq r0, r0, #(1 << 6)
mcreq p15, 0, r0, c1, c0, 1
isb
bl omap4_get_sar_ram_base
mov r8, r0
ldr r9, [r8, #OMAP_TYPE_OFFSET]
cmp r9, #0x1 @ Check for HS device
bne scu_gp_clear
mov r0, #SCU_PM_NORMAL
mov r1, #0x00
stmfd r13!, {r4-r12, r14}
ldr r12, =OMAP4_MON_SCU_PWR_INDEX
DO_SMC
ldmfd r13!, {r4-r12, r14}
b skip_scu_gp_clear
scu_gp_clear:
bl omap4_get_scu_base
mov r1, #SCU_PM_NORMAL
bl scu_power_mode
skip_scu_gp_clear:
isb
dsb
ldmfd sp!, {r4-r12, pc}
ENDPROC(omap4_finish_suspend)
/*
* ============================
* == CPU resume entry point ==
* ============================
*
* void omap4_cpu_resume(void)
*
* ROM code jumps to this function while waking up from CPU
* OFF or DORMANT state. Physical address of the function is
* stored in the SAR RAM while entering to OFF or DORMANT mode.
* The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
*/
ENTRY(omap4_cpu_resume)
/*
* Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
* OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
* init and for CPU1, a secure PPA API provided. CPU0 must be ON
* while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
* OMAP443X GP devices- SMP bit isn't accessible.
* OMAP446X GP devices - SMP bit access is enabled on both CPUs.
*/
ldr r8, =OMAP44XX_SAR_RAM_BASE
ldr r9, [r8, #OMAP_TYPE_OFFSET]
cmp r9, #0x1 @ Skip if GP device
bne skip_ns_smp_enable
mrc p15, 0, r0, c0, c0, 5
ands r0, r0, #0x0f
beq skip_ns_smp_enable
ppa_actrl_retry:
mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
adr r1, ppa_zero_params_offset
ldr r3, [r1]
add r3, r3, r1 @ Pointer to ppa_zero_params
mov r1, #0x0 @ Process ID
mov r2, #0x4 @ Flag
mov r6, #0xff
mov r12, #0x00 @ Secure Service ID
DO_SMC
cmp r0, #0x0 @ API returns 0 on success.
beq enable_smp_bit
b ppa_actrl_retry
enable_smp_bit:
mrc p15, 0, r0, c1, c0, 1
tst r0, #(1 << 6) @ Check SMP bit enabled?
orreq r0, r0, #(1 << 6)
mcreq p15, 0, r0, c1, c0, 1
isb
skip_ns_smp_enable:
#ifdef CONFIG_CACHE_L2X0
/*
* Restore the L2 AUXCTRL and enable the L2 cache.
* OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL
* OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL
* register r0 contains value to be programmed.
* L2 cache is already invalidate by ROM code as part
* of MPUSS OFF wakeup path.
*/
ldr r2, =OMAP44XX_L2CACHE_BASE
ldr r0, [r2, #L2X0_CTRL]
and r0, #0x0f
cmp r0, #1
beq skip_l2en @ Skip if already enabled
ldr r3, =OMAP44XX_SAR_RAM_BASE
ldr r1, [r3, #OMAP_TYPE_OFFSET]
cmp r1, #0x1 @ Check for HS device
bne set_gp_por
ldr r0, =OMAP4_PPA_L2_POR_INDEX
ldr r1, =OMAP44XX_SAR_RAM_BASE
ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
adr r1, ppa_por_params_offset
ldr r3, [r1]
add r3, r3, r1 @ Pointer to ppa_por_params
str r4, [r3, #0x04]
mov r1, #0x0 @ Process ID
mov r2, #0x4 @ Flag
mov r6, #0xff
mov r12, #0x00 @ Secure Service ID
DO_SMC
b set_aux_ctrl
set_gp_por:
ldr r1, =OMAP44XX_SAR_RAM_BASE
ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH
DO_SMC
set_aux_ctrl:
ldr r1, =OMAP44XX_SAR_RAM_BASE
ldr r0, [r1, #L2X0_AUXCTRL_OFFSET]
ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL
DO_SMC
mov r0, #0x1
ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache
DO_SMC
skip_l2en:
#endif
b cpu_resume @ Jump to generic resume
ppa_por_params_offset:
.long ppa_por_params - .
ENDPROC(omap4_cpu_resume)
#endif /* CONFIG_ARCH_OMAP4 */
#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
ENTRY(omap_do_wfi)
stmfd sp!, {lr}
#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
/* Drain interconnect write buffers. */
bl omap_interconnect_sync
#endif
/*
* Execute an ISB instruction to ensure that all of the
* CP15 register changes have been committed.
*/
isb
/*
* Execute a barrier instruction to ensure that all cache,
* TLB and branch predictor maintenance operations issued
* by any CPU in the cluster have completed.
*/
dsb
dmb
/*
* Execute a WFI instruction and wait until the
* STANDBYWFI output is asserted to indicate that the
* CPU is in idle and low power state. CPU can specualatively
* prefetch the instructions so add NOPs after WFI. Sixteen
* NOPs as per Cortex-A9 pipeline.
*/
wfi @ Wait For Interrupt
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
ldmfd sp!, {pc}
ppa_zero_params_offset:
.long ppa_zero_params - .
ENDPROC(omap_do_wfi)
.data
.align 2
ppa_zero_params:
.word 0
ppa_por_params:
.word 1, 0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 30,121
|
arch/arm/kernel/entry-armv.S
|
/*
* linux/arch/arm/kernel/entry-armv.S
*
* Copyright (C) 1996,1997,1998 Russell King.
* ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
* nommu support by Hyok S. Choi (hyok.choi@samsung.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Low-level vector interface routines
*
* Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
* that causes it to save wrong values... Be aware!
*/
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
#include <mach/entry-macro.S>
#endif
#include <asm/thread_notify.h>
#include <asm/unwind.h>
#include <asm/unistd.h>
#include <asm/tls.h>
#include <asm/system_info.h>
#include <asm/uaccess-asm.h>
#include "entry-header.S"
#include <asm/entry-macro-multi.S>
#include <asm/probes.h>
/*
* Interrupt handling.
*/
.macro irq_handler
#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
ldr r1, =handle_arch_irq
mov r0, sp
badr lr, 9997f
ldr pc, [r1]
#else
arch_irq_handler_default
#endif
9997:
.endm
.macro pabt_helper
@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
#ifdef MULTI_PABORT
ldr ip, .LCprocfns
mov lr, pc
ldr pc, [ip, #PROCESSOR_PABT_FUNC]
#else
bl CPU_PABORT_HANDLER
#endif
.endm
.macro dabt_helper
@
@ Call the processor-specific abort handler:
@
@ r2 - pt_regs
@ r4 - aborted context pc
@ r5 - aborted context psr
@
@ The abort handler must return the aborted address in r0, and
@ the fault status register in r1. r9 must be preserved.
@
#ifdef MULTI_DABORT
ldr ip, .LCprocfns
mov lr, pc
ldr pc, [ip, #PROCESSOR_DABT_FUNC]
#else
bl CPU_DABORT_HANDLER
#endif
.endm
.section .entry.text,"ax",%progbits
/*
* Invalid mode handlers
*/
.macro inv_entry, reason
sub sp, sp, #PT_REGS_SIZE
ARM( stmib sp, {r1 - lr} )
THUMB( stmia sp, {r0 - r12} )
THUMB( str sp, [sp, #S_SP] )
THUMB( str lr, [sp, #S_LR] )
mov r1, #\reason
.endm
__pabt_invalid:
inv_entry BAD_PREFETCH
b common_invalid
ENDPROC(__pabt_invalid)
__dabt_invalid:
inv_entry BAD_DATA
b common_invalid
ENDPROC(__dabt_invalid)
__irq_invalid:
inv_entry BAD_IRQ
b common_invalid
ENDPROC(__irq_invalid)
__und_invalid:
inv_entry BAD_UNDEFINSTR
@
@ XXX fall through to common_invalid
@
@
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
@
common_invalid:
zero_fp
ldmia r0, {r4 - r6}
add r0, sp, #S_PC @ here for interlock avoidance
mov r7, #-1 @ "" "" "" ""
str r4, [sp] @ save preserved r0
stmia r0, {r5 - r7} @ lr_<exception>,
@ cpsr_<exception>, "old_r0"
mov r0, sp
b bad_mode
ENDPROC(__und_invalid)
/*
* SVC mode handlers
*/
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
#define SPFIX(code...) code
#else
#define SPFIX(code...)
#endif
.macro svc_entry, stack_hole=0, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
SPFIX( str r0, [sp] ) @ temporarily saved
SPFIX( mov r0, sp )
SPFIX( tst r0, #4 ) @ test original stack alignment
SPFIX( ldr r0, [sp] ) @ restored
#else
SPFIX( tst sp, #4 )
#endif
SPFIX( subeq sp, sp, #4 )
stmia sp, {r1 - r12}
ldmia r0, {r3 - r5}
add r7, sp, #S_SP - 4 @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
SPFIX( addeq r2, r2, #4 )
str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
mov r3, lr
@
@ We are now ready to fill in the remaining blanks on the stack:
@
@ r2 - sp_svc
@ r3 - lr_svc
@ r4 - lr_<exception>, already fixed up for correct return/restart
@ r5 - spsr_<exception>
@ r6 - orig_r0 (see pt_regs definition in ptrace.h)
@
stmia r7, {r2 - r6}
get_thread_info tsk
uaccess_entry tsk, r0, r1, r2, \uaccess
.if \trace
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
.endif
.endm
.align 5
__dabt_svc:
svc_entry uaccess=0
mov r2, sp
dabt_helper
THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__dabt_svc)
.align 5
__irq_svc:
svc_entry
irq_handler
#ifdef CONFIG_PREEMPT
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0
movne r0, #0 @ force flags to 0
tst r0, #_TIF_NEED_RESCHED
blne svc_preempt
#endif
svc_exit r5, irq = 1 @ return from exception
UNWIND(.fnend )
ENDPROC(__irq_svc)
.ltorg
#ifdef CONFIG_PREEMPT
svc_preempt:
mov r8, lr
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
reteq r8 @ go again
b 1b
#endif
__und_fault:
@ Correct the PC such that it is pointing at the instruction
@ which caused the fault. If the faulting instruction was ARM
@ the PC will be pointing at the next instruction, and have to
@ subtract 4. Otherwise, it is Thumb, and the PC will be
@ pointing at the second half of the Thumb instruction. We
@ have to subtract 2.
ldr r2, [r0, #S_PC]
sub r2, r2, r1
str r2, [r0, #S_PC]
b do_undefinstr
ENDPROC(__und_fault)
.align 5
__und_svc:
#ifdef CONFIG_KPROBES
@ If a kprobe is about to simulate a "stmdb sp..." instruction,
@ it obviously needs free stack space which then will belong to
@ the saved context.
svc_entry MAX_STACK_SIZE
#else
svc_entry
#endif
@
@ call emulation code, which returns using r9 if it has emulated
@ the instruction, or the more conventional lr if we are to treat
@ this as a real undefined instruction
@
@ r0 - instruction
@
#ifndef CONFIG_THUMB2_KERNEL
ldr r0, [r4, #-4]
#else
mov r1, #2
ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
blo __und_svc_fault
ldrh r9, [r4] @ bottom 16 bits
add r4, r4, #2
str r4, [sp, #S_PC]
orr r0, r9, r0, lsl #16
#endif
badr r9, __und_svc_finish
mov r2, r4
bl call_fpe
mov r1, #4 @ PC correction to apply
__und_svc_fault:
mov r0, sp @ struct pt_regs *regs
bl __und_fault
__und_svc_finish:
get_thread_info tsk
ldr r5, [sp, #S_PSR] @ Get SVC cpsr
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__und_svc)
.align 5
__pabt_svc:
svc_entry
mov r2, sp @ regs
pabt_helper
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__pabt_svc)
.align 5
__fiq_svc:
svc_entry trace=0
mov r0, sp @ struct pt_regs *regs
bl handle_fiq_as_nmi
svc_exit_via_fiq
UNWIND(.fnend )
ENDPROC(__fiq_svc)
.align 5
.LCcralign:
.word cr_alignment
#ifdef MULTI_DABORT
.LCprocfns:
.word processor
#endif
.LCfp:
.word fp_enter
/*
* Abort mode handlers
*/
@
@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
@ and reuses the same macros. However in abort mode we must also
@ save/restore lr_abt and spsr_abt to make nested aborts safe.
@
.align 5
__fiq_abt:
svc_entry trace=0
ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( msr cpsr_c, r0 )
mov r1, lr @ Save lr_abt
mrs r2, spsr @ Save spsr_abt, abort is now safe
ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( msr cpsr_c, r0 )
stmfd sp!, {r1 - r2}
add r0, sp, #8 @ struct pt_regs *regs
bl handle_fiq_as_nmi
ldmfd sp!, {r1 - r2}
ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( msr cpsr_c, r0 )
mov lr, r1 @ Restore lr_abt, abort is unsafe
msr spsr_cxsf, r2 @ Restore spsr_abt
ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
THUMB( msr cpsr_c, r0 )
svc_exit_via_fiq
UNWIND(.fnend )
ENDPROC(__fiq_abt)
/*
* User mode handlers
*
* EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
*/
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif
.macro usr_entry, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #PT_REGS_SIZE
ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
ATRAP( mrc p15, 0, r7, c1, c0, 0)
ATRAP( ldr r8, .LCcralign)
ldmia r0, {r3 - r5}
add r0, sp, #S_PC @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
str r3, [sp] @ save the "real" r0 copied
@ from the exception stack
ATRAP( ldr r8, [r8, #0])
@
@ We are now ready to fill in the remaining blanks on the stack:
@
@ r4 - lr_<exception>, already fixed up for correct return/restart
@ r5 - spsr_<exception>
@ r6 - orig_r0 (see pt_regs definition in ptrace.h)
@
@ Also, separately save sp_usr and lr_usr
@
stmia r0, {r4 - r6}
ARM( stmdb r0, {sp, lr}^ )
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
.if \uaccess
uaccess_disable ip
.endif
@ Enable the alignment trap while in kernel mode
ATRAP( teq r8, r7)
ATRAP( mcrne p15, 0, r8, c1, c0, 0)
@
@ Clear FP to mark the first stack frame
@
zero_fp
.if \trace
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
ct_user_exit save = 0
.endif
.endm
.macro kuser_cmpxchg_check
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
@ Make sure our user space atomic helper is restarted
@ if it was interrupted in a critical region. Here we
@ perform a quick test inline since it should be false
@ 99.9999% of the time. The rest is done out of line.
cmp r4, #TASK_SIZE
blhs kuser_cmpxchg64_fixup
#endif
#endif
.endm
.align 5
__dabt_usr:
usr_entry uaccess=0
kuser_cmpxchg_check
mov r2, sp
dabt_helper
b ret_from_exception
UNWIND(.fnend )
ENDPROC(__dabt_usr)
.align 5
__irq_usr:
usr_entry
kuser_cmpxchg_check
irq_handler
get_thread_info tsk
mov why, #0
b ret_to_user_from_irq
UNWIND(.fnend )
ENDPROC(__irq_usr)
.ltorg
.align 5
__und_usr:
usr_entry uaccess=0
mov r2, r4
mov r3, r5
@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
@ faulting instruction depending on Thumb mode.
@ r3 = regs->ARM_cpsr
@
@ The emulation code returns using r9 if it has emulated the
@ instruction, or the more conventional lr if we are to treat
@ this as a real undefined instruction
@
badr r9, ret_from_exception
@ IRQs must be enabled before attempting to read the instruction from
@ user space since that could cause a page/translation fault if the
@ page table was modified by another CPU.
enable_irq
tst r3, #PSR_T_BIT @ Thumb mode?
bne __und_usr_thumb
sub r4, r2, #4 @ ARM instr at LR - 4
1: ldrt r0, [r4]
ARM_BE8(rev r0, r0) @ little endian instruction
uaccess_disable ip
@ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction
@ lr = 32-bit undefined instruction function
badr lr, __und_usr_fault_32
b call_fpe
__und_usr_thumb:
@ Thumb instruction
sub r4, r2, #2 @ First half of thumb instr at LR - 2
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
/*
* Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
* can never be supported in a single kernel, this code is not applicable at
* all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
* made about .arch directives.
*/
#if __LINUX_ARM_ARCH__ < 7
/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
#define NEED_CPU_ARCHITECTURE
ldr r5, .LCcpu_architecture
ldr r5, [r5]
cmp r5, #CPU_ARCH_ARMv7
blo __und_usr_fault_16 @ 16bit undefined instruction
/*
* The following code won't get run unless the running CPU really is v7, so
* coding round the lack of ldrht on older arches is pointless. Temporarily
* override the assembler target arch with the minimum required instead:
*/
.arch armv6t2
#endif
2: ldrht r5, [r4]
ARM_BE8(rev16 r5, r5) @ little endian instruction
cmp r5, #0xe800 @ 32bit instruction if xx != 0
blo __und_usr_fault_16_pan @ 16bit undefined instruction
3: ldrht r0, [r2]
ARM_BE8(rev16 r0, r0) @ little endian instruction
uaccess_disable ip
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
orr r0, r0, r5, lsl #16
badr lr, __und_usr_fault_32
@ r0 = the two 16-bit Thumb instructions which caused the exception
@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
@ r4 = PC value for the first 16-bit Thumb instruction
@ lr = 32bit undefined instruction function
#if __LINUX_ARM_ARCH__ < 7
/* If the target arch was overridden, change it back: */
#ifdef CONFIG_CPU_32v6K
.arch armv6k
#else
.arch armv6
#endif
#endif /* __LINUX_ARM_ARCH__ < 7 */
#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
b __und_usr_fault_16
#endif
UNWIND(.fnend)
ENDPROC(__und_usr)
/*
* The out of line fixup for the ldrt instructions above.
*/
.pushsection .text.fixup, "ax"
.align 2
4: str r4, [sp, #S_PC] @ retry current instruction
ret r9
.popsection
.pushsection __ex_table,"a"
.long 1b, 4b
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
.long 2b, 4b
.long 3b, 4b
#endif
.popsection
/*
* Check whether the instruction is a co-processor instruction.
* If yes, we need to call the relevant co-processor handler.
*
* Note that we don't do a full check here for the co-processor
* instructions; all instructions with bit 27 set are well
* defined. The only instructions that should fault are the
* co-processor instructions. However, we have to watch out
* for the ARM6/ARM7 SWI bug.
*
* NEON is a special case that has to be handled here. Not all
* NEON instructions are co-processor instructions, so we have
* to make a special case of checking for them. Plus, there's
* five groups of them, so we have a table of mask/opcode pairs
* to check against, and if any match then we branch off into the
* NEON handler code.
*
* Emulators may wish to make use of the following registers:
* r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
* r2 = PC value to resume execution after successful emulation
* r9 = normal "successful" return address
* r10 = this threads thread_info structure
* lr = unrecognised instruction return address
* IRQs enabled, FIQs enabled.
*/
@
@ Fall-through from Thumb-2 __und_usr
@
#ifdef CONFIG_NEON
get_thread_info r10 @ get current thread
adr r6, .LCneon_thumb_opcodes
b 2f
#endif
call_fpe:
get_thread_info r10 @ get current thread
#ifdef CONFIG_NEON
adr r6, .LCneon_arm_opcodes
2: ldr r5, [r6], #4 @ mask value
ldr r7, [r6], #4 @ opcode bits matching in mask
cmp r5, #0 @ end mask?
beq 1f
and r8, r0, r5
cmp r8, r7 @ NEON instruction?
bne 2b
mov r7, #1
strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
b do_vfp @ let VFP handler handle this
1:
#endif
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
reteq lr
and r8, r0, #0x00000f00 @ mask out CP number
THUMB( lsr r8, r8, #8 )
mov r7, #1
add r6, r10, #TI_USED_CP
ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT
@ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS]
rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
bcs iwmmxt_task_enable
#endif
ARM( add pc, pc, r8, lsr #6 )
THUMB( lsl r8, r8, #2 )
THUMB( add pc, r8 )
nop
ret.w lr @ CP#0
W(b) do_fpe @ CP#1 (FPE)
W(b) do_fpe @ CP#2 (FPE)
ret.w lr @ CP#3
#ifdef CONFIG_CRUNCH
b crunch_task_enable @ CP#4 (MaverickCrunch)
b crunch_task_enable @ CP#5 (MaverickCrunch)
b crunch_task_enable @ CP#6 (MaverickCrunch)
#else
ret.w lr @ CP#4
ret.w lr @ CP#5
ret.w lr @ CP#6
#endif
ret.w lr @ CP#7
ret.w lr @ CP#8
ret.w lr @ CP#9
#ifdef CONFIG_VFP
W(b) do_vfp @ CP#10 (VFP)
W(b) do_vfp @ CP#11 (VFP)
#else
ret.w lr @ CP#10 (VFP)
ret.w lr @ CP#11 (VFP)
#endif
ret.w lr @ CP#12
ret.w lr @ CP#13
ret.w lr @ CP#14 (Debug)
ret.w lr @ CP#15 (Control)
#ifdef NEED_CPU_ARCHITECTURE
.align 2
.LCcpu_architecture:
.word __cpu_architecture
#endif
#ifdef CONFIG_NEON
.align 6
.LCneon_arm_opcodes:
.word 0xfe000000 @ mask
.word 0xf2000000 @ opcode
.word 0xff100000 @ mask
.word 0xf4000000 @ opcode
.word 0x00000000 @ mask
.word 0x00000000 @ opcode
.LCneon_thumb_opcodes:
.word 0xef000000 @ mask
.word 0xef000000 @ opcode
.word 0xff100000 @ mask
.word 0xf9000000 @ opcode
.word 0x00000000 @ mask
.word 0x00000000 @ opcode
#endif
do_fpe:
ldr r4, .LCfp
add r10, r10, #TI_FPSTATE @ r10 = workspace
ldr pc, [r4] @ Call FP module USR entry point
/*
* The FP module is called with these registers set:
* r0 = instruction
* r2 = PC+4
* r9 = normal "successful" return address
* r10 = FP workspace
* lr = unrecognised FP instruction return address
*/
.pushsection .data
.align 2
ENTRY(fp_enter)
.word no_fp
.popsection
ENTRY(no_fp)
ret lr
ENDPROC(no_fp)
__und_usr_fault_32:
mov r1, #4
b 1f
__und_usr_fault_16_pan:
uaccess_disable ip
__und_usr_fault_16:
mov r1, #2
1: mov r0, sp
badr lr, ret_from_exception
b __und_fault
ENDPROC(__und_usr_fault_32)
ENDPROC(__und_usr_fault_16)
.align 5
__pabt_usr:
usr_entry
mov r2, sp @ regs
pabt_helper
UNWIND(.fnend )
/* fall through */
/*
* This is the return code to user mode for abort handlers
*/
ENTRY(ret_from_exception)
UNWIND(.fnstart )
UNWIND(.cantunwind )
get_thread_info tsk
mov why, #0
b ret_to_user
UNWIND(.fnend )
ENDPROC(__pabt_usr)
ENDPROC(ret_from_exception)
.align 5
__fiq_usr:
usr_entry trace=0
kuser_cmpxchg_check
mov r0, sp @ struct pt_regs *regs
bl handle_fiq_as_nmi
get_thread_info tsk
restore_user_regs fast = 0, offset = 0
UNWIND(.fnend )
ENDPROC(__fiq_usr)
/*
* Register switch for ARMv3 and ARMv4 processors
* r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
* previous and next are guaranteed not to be the same.
*/
ENTRY(__switch_to)
UNWIND(.fnstart )
UNWIND(.cantunwind )
add ip, r1, #TI_CPU_SAVE
ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
THUMB( str sp, [ip], #4 )
THUMB( str lr, [ip], #4 )
ldr r4, [r2, #TI_TP_VALUE]
ldr r5, [r2, #TI_TP_VALUE + 4]
#ifdef CONFIG_CPU_USE_DOMAINS
mrc p15, 0, r6, c3, c0, 0 @ Get domain register
str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
switch_tls r1, r4, r5, r3, r7
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
ldr r7, [r2, #TI_TASK]
ldr r8, =__stack_chk_guard
.if (TSK_STACK_CANARY > IMM12_MASK)
add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
.endif
ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
#endif
#ifdef CONFIG_CPU_USE_DOMAINS
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
mov r5, r0
add r4, r2, #TI_CPU_SAVE
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
str r7, [r8]
#endif
THUMB( mov ip, r4 )
mov r0, r5
ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
THUMB( ldr sp, [ip], #4 )
THUMB( ldr pc, [ip] )
UNWIND(.fnend )
ENDPROC(__switch_to)
__INIT
/*
* User helpers.
*
* Each segment is 32-byte aligned and will be moved to the top of the high
* vector page. New segments (if ever needed) must be added in front of
* existing ones. This mechanism should be used only for things that are
* really small and justified, and not be abused freely.
*
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/
THUMB( .arm )
.macro usr_ret, reg
#ifdef CONFIG_ARM_THUMB
bx \reg
#else
ret \reg
#endif
.endm
.macro kuser_pad, sym, size
.if (. - \sym) & 3
.rept 4 - (. - \sym) & 3
.byte 0
.endr
.endif
.rept (\size - (. - \sym)) / 4
.word 0xe7fddef1
.endr
.endm
#ifdef CONFIG_KUSER_HELPERS
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
/*
* Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
* kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
*/
__kuser_cmpxchg64: @ 0xffff0f60
#if defined(CONFIG_CPU_32v6K)
stmfd sp!, {r4, r5, r6, r7}
ldrd r4, r5, [r0] @ load old val
ldrd r6, r7, [r1] @ load new val
smp_dmb arm
1: ldrexd r0, r1, [r2] @ load current val
eors r3, r0, r4 @ compare with oldval (1)
eoreqs r3, r1, r5 @ compare with oldval (2)
strexdeq r3, r6, r7, [r2] @ store newval if eq
teqeq r3, #1 @ success?
beq 1b @ if no then retry
smp_dmb arm
rsbs r0, r3, #0 @ set returned val and C flag
ldmfd sp!, {r4, r5, r6, r7}
usr_ret lr
#elif !defined(CONFIG_SMP)
#ifdef CONFIG_MMU
/*
* The only thing that can break atomicity in this cmpxchg64
* implementation is either an IRQ or a data abort exception
* causing another process/thread to be scheduled in the middle of
* the critical sequence. The same strategy as for cmpxchg is used.
*/
stmfd sp!, {r4, r5, r6, lr}
ldmia r0, {r4, r5} @ load old val
ldmia r1, {r6, lr} @ load new val
1: ldmia r2, {r0, r1} @ load current val
eors r3, r0, r4 @ compare with oldval (1)
eoreqs r3, r1, r5 @ compare with oldval (2)
2: stmeqia r2, {r6, lr} @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
ldmfd sp!, {r4, r5, r6, pc}
.text
kuser_cmpxchg64_fixup:
@ Called from kuser_cmpxchg_fixup.
@ r4 = address of interrupted insn (must be preserved).
@ sp = saved regs. r7 and r8 are clobbered.
@ 1b = first critical insn, 2b = last critical insn.
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
mov r7, #0xffff0fff
sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
subs r8, r4, r7
rsbcss r8, r8, #(2b - 1b)
strcs r7, [sp, #S_PC]
#if __LINUX_ARM_ARCH__ < 6
bcc kuser_cmpxchg32_fixup
#endif
ret lr
.previous
#else
#warning "NPTL on non MMU needs fixing"
mov r0, #-1
adds r0, r0, #0
usr_ret lr
#endif
#else
#error "incoherent kernel configuration"
#endif
kuser_pad __kuser_cmpxchg64, 64
__kuser_memory_barrier: @ 0xffff0fa0
smp_dmb arm
usr_ret lr
kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0
#if __LINUX_ARM_ARCH__ < 6
#ifdef CONFIG_MMU
/*
* The only thing that can break atomicity in this cmpxchg
* implementation is either an IRQ or a data abort exception
* causing another process/thread to be scheduled in the middle
* of the critical sequence. To prevent this, code is added to
* the IRQ and data abort exception handlers to set the pc back
* to the beginning of the critical section if it is found to be
* within that critical section (see kuser_cmpxchg_fixup).
*/
1: ldr r3, [r2] @ load current val
subs r3, r3, r0 @ compare with oldval
2: streq r1, [r2] @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
usr_ret lr
.text
kuser_cmpxchg32_fixup:
@ Called from kuser_cmpxchg_check macro.
@ r4 = address of interrupted insn (must be preserved).
@ sp = saved regs. r7 and r8 are clobbered.
@ 1b = first critical insn, 2b = last critical insn.
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
mov r7, #0xffff0fff
sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
subs r8, r4, r7
rsbcss r8, r8, #(2b - 1b)
strcs r7, [sp, #S_PC]
ret lr
.previous
#else
#warning "NPTL on non MMU needs fixing"
mov r0, #-1
adds r0, r0, #0
usr_ret lr
#endif
#else
smp_dmb arm
1: ldrex r3, [r2]
subs r3, r3, r0
strexeq r3, r1, [r2]
teqeq r3, #1
beq 1b
rsbs r0, r3, #0
/* beware -- each __kuser slot must be 8 instructions max */
ALT_SMP(b __kuser_memory_barrier)
ALT_UP(usr_ret lr)
#endif
kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
usr_ret lr
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
kuser_pad __kuser_get_tls, 16
.rep 3
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
__kuser_helper_version: @ 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
#endif
THUMB( .thumb )
/*
* Vector stubs.
*
* This code is copied to 0xffff1000 so we can use branches in the
* vectors, rather than ldr's. Note that this code must not exceed
* a page size.
*
* Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
*
* SP points to a minimal amount of processor-private memory, the address
* of which is copied into r0 for the mode specific abort handler.
*/
.macro vector_stub, name, mode, correction=0
.align 5
vector_\name:
.if \correction
sub lr, lr, #\correction
.endif
@
@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
@ (parent CPSR)
@
stmia sp, {r0, lr} @ save r0, lr
mrs lr, spsr
str lr, [sp, #8] @ save spsr
@
@ Prepare for SVC32 mode. IRQs remain disabled.
@
mrs r0, cpsr
eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
msr spsr_cxsf, r0
@
@ the branch table must immediately follow this code
@
and lr, lr, #0x0f
THUMB( adr r0, 1f )
THUMB( ldr lr, [r0, lr, lsl #2] )
mov r0, sp
ARM( ldr lr, [pc, lr, lsl #2] )
movs pc, lr @ branch to handler in SVC mode
ENDPROC(vector_\name)
.align 2
@ handler addresses follow this label
1:
.endm
.section .stubs, "ax", %progbits
@ This must be the first word
.word vector_swi
vector_rst:
ARM( swi SYS_ERROR0 )
THUMB( svc #0 )
THUMB( nop )
b vector_und
/*
* Interrupt dispatcher
*/
vector_stub irq, IRQ_MODE, 4
.long __irq_usr @ 0 (USR_26 / USR_32)
.long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
.long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
.long __irq_svc @ 3 (SVC_26 / SVC_32)
.long __irq_invalid @ 4
.long __irq_invalid @ 5
.long __irq_invalid @ 6
.long __irq_invalid @ 7
.long __irq_invalid @ 8
.long __irq_invalid @ 9
.long __irq_invalid @ a
.long __irq_invalid @ b
.long __irq_invalid @ c
.long __irq_invalid @ d
.long __irq_invalid @ e
.long __irq_invalid @ f
/*
* Data abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/
vector_stub dabt, ABT_MODE, 8
.long __dabt_usr @ 0 (USR_26 / USR_32)
.long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
.long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
.long __dabt_svc @ 3 (SVC_26 / SVC_32)
.long __dabt_invalid @ 4
.long __dabt_invalid @ 5
.long __dabt_invalid @ 6
.long __dabt_invalid @ 7
.long __dabt_invalid @ 8
.long __dabt_invalid @ 9
.long __dabt_invalid @ a
.long __dabt_invalid @ b
.long __dabt_invalid @ c
.long __dabt_invalid @ d
.long __dabt_invalid @ e
.long __dabt_invalid @ f
/*
* Prefetch abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/
vector_stub pabt, ABT_MODE, 4
.long __pabt_usr @ 0 (USR_26 / USR_32)
.long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
.long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
.long __pabt_svc @ 3 (SVC_26 / SVC_32)
.long __pabt_invalid @ 4
.long __pabt_invalid @ 5
.long __pabt_invalid @ 6
.long __pabt_invalid @ 7
.long __pabt_invalid @ 8
.long __pabt_invalid @ 9
.long __pabt_invalid @ a
.long __pabt_invalid @ b
.long __pabt_invalid @ c
.long __pabt_invalid @ d
.long __pabt_invalid @ e
.long __pabt_invalid @ f
/*
* Undef instr entry dispatcher
* Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
*/
vector_stub und, UND_MODE
.long __und_usr @ 0 (USR_26 / USR_32)
.long __und_invalid @ 1 (FIQ_26 / FIQ_32)
.long __und_invalid @ 2 (IRQ_26 / IRQ_32)
.long __und_svc @ 3 (SVC_26 / SVC_32)
.long __und_invalid @ 4
.long __und_invalid @ 5
.long __und_invalid @ 6
.long __und_invalid @ 7
.long __und_invalid @ 8
.long __und_invalid @ 9
.long __und_invalid @ a
.long __und_invalid @ b
.long __und_invalid @ c
.long __und_invalid @ d
.long __und_invalid @ e
.long __und_invalid @ f
.align 5
/*=============================================================================
* Address exception handler
*-----------------------------------------------------------------------------
* These aren't too critical.
* (they're not supposed to happen, and won't happen in 32-bit data mode).
*/
vector_addrexcptn:
b vector_addrexcptn
/*=============================================================================
* FIQ "NMI" handler
*-----------------------------------------------------------------------------
* Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
* systems.
*/
vector_stub fiq, FIQ_MODE, 4
.long __fiq_usr @ 0 (USR_26 / USR_32)
.long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
.long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
.long __fiq_svc @ 3 (SVC_26 / SVC_32)
.long __fiq_svc @ 4
.long __fiq_svc @ 5
.long __fiq_svc @ 6
.long __fiq_abt @ 7
.long __fiq_svc @ 8
.long __fiq_svc @ 9
.long __fiq_svc @ a
.long __fiq_svc @ b
.long __fiq_svc @ c
.long __fiq_svc @ d
.long __fiq_svc @ e
.long __fiq_svc @ f
.globl vector_fiq
.section .vectors, "ax", %progbits
.L__vectors_start:
W(b) vector_rst
W(b) vector_und
W(ldr) pc, .L__vectors_start + 0x1000
W(b) vector_pabt
W(b) vector_dabt
W(b) vector_addrexcptn
W(b) vector_irq
W(b) vector_fiq
.data
.align 2
.globl cr_alignment
cr_alignment:
.space 4
|
AirFortressIlikara/LS2K0300-linux-4.19
| 19,255
|
arch/arm/kernel/head.S
|
/*
* linux/arch/arm/kernel/head.S
*
* Copyright (C) 1994-2002 Russell King
* Copyright (c) 2003 ARM Limited
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Kernel startup code for all 32-bit CPUs
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/cp15.h>
#include <asm/domain.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/pgtable.h>
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
#endif
/*
* swapper_pg_dir is the virtual address of the initial page table.
* We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
* make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect
* the least significant 16 bits to be 0x8000, but we could probably
* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
*/
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif
#ifdef CONFIG_ARM_LPAE
/* LPAE requires an additional page for the PGD */
#define PG_DIR_SIZE 0x5000
#define PMD_ORDER 3
#else
#define PG_DIR_SIZE 0x4000
#define PMD_ORDER 2
#endif
.globl swapper_pg_dir
.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
.macro pgtbl, rd, phys
add \rd, \phys, #TEXT_OFFSET
sub \rd, \rd, #PG_DIR_SIZE
.endm
/*
* Kernel startup entry point.
* ---------------------------
*
* This is normally called from the decompressor code. The requirements
* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
* r1 = machine nr, r2 = atags or dtb pointer.
*
* This code is mostly position independent, so if you link the kernel at
* 0xc0008000, you call this at __pa(0xc0008000).
*
* See linux/arch/arm/tools/mach-types for the complete list of machine
* numbers for r1.
*
* We're trying to keep crap to a minimum; DO NOT add any machine specific
* crap here - that's what the boot loader (or in extreme, well justified
* circumstances, zImage) is for.
*/
.arm
__HEAD
ENTRY(stext)
ARM_BE8(setend be ) @ ensure we are in BE8 mode
THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
THUMB( .thumb ) @ switch to Thumb now.
THUMB(1: )
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install
#endif
@ ensure svc mode and all interrupts masked
safe_svcmode_maskall r9
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)?
THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p @ yes, error 'p'
#ifdef CONFIG_ARM_LPAE
mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0
and r3, r3, #0xf @ extract VMSA support
cmp r3, #5 @ long-descriptor translation table format?
THUMB( it lo ) @ force fixup-able long branch encoding
blo __error_lpae @ only classic page table format
#endif
#ifndef CONFIG_XIP_KERNEL
adr r3, 2f
ldmia r3, {r4, r8}
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8, r8, r4 @ PHYS_OFFSET
#else
ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
#endif
/*
* r1 = machine no, r2 = atags or dtb,
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
*/
bl __vet_atags
#ifdef CONFIG_SMP_ON_UP
bl __fixup_smp
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
bl __fixup_pv_table
#endif
bl __create_page_tables
/*
* The following calls CPU specific code in a position independent
* manner. See arch/arm/mm/proc-*.S for details. r10 = base of
* xxx_proc_info structure selected by __lookup_processor_type
* above.
*
* The processor init function will be called with:
* r1 - machine type
* r2 - boot data (atags/dt) pointer
* r4 - translation table base (low word)
* r5 - translation table base (high word, if LPAE)
* r8 - translation table base 1 (pfn if LPAE)
* r9 - cpuid
* r13 - virtual address for __enable_mmu -> __turn_mmu_on
*
* On return, the CPU will be ready for the MMU to be turned on,
* r0 will hold the CPU control register value, r1, r2, r4, and
* r9 will be preserved. r5 will also be preserved if LPAE.
*/
ldr r13, =__mmap_switched @ address to jump to after
@ mmu has been enabled
badr lr, 1f @ return (PIC) address
#ifdef CONFIG_ARM_LPAE
mov r5, #0 @ high TTBR0
mov r8, r4, lsr #12 @ TTBR1 is swapper_pg_dir pfn
#else
mov r8, r4 @ set TTBR1 to swapper_pg_dir
#endif
ldr r12, [r10, #PROCINFO_INITFUNC]
add r12, r12, r10
ret r12
1: b __enable_mmu
ENDPROC(stext)
.ltorg
#ifndef CONFIG_XIP_KERNEL
2: .long .
.long PAGE_OFFSET
#endif
/*
* Setup the initial page tables. We only setup the barest
* amount which are required to get the kernel running, which
* generally means mapping in the kernel code.
*
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
*
* Returns:
* r0, r3, r5-r7 corrupted
* r4 = physical page table address
*/
__create_page_tables:
pgtbl r4, r8 @ page table address
/*
* Clear the swapper page table
*/
mov r0, r4
mov r3, #0
add r6, r0, #PG_DIR_SIZE
1: str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
teq r0, r6
bne 1b
#ifdef CONFIG_ARM_LPAE
/*
* Build the PGD table (first level) to point to the PMD table. A PGD
* entry is 64-bit wide.
*/
mov r0, r4
add r3, r4, #0x1000 @ first PMD table address
orr r3, r3, #3 @ PGD block type
mov r6, #4 @ PTRS_PER_PGD
mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
1:
#ifdef CONFIG_CPU_ENDIAN_BE8
str r7, [r0], #4 @ set top PGD entry bits
str r3, [r0], #4 @ set bottom PGD entry bits
#else
str r3, [r0], #4 @ set bottom PGD entry bits
str r7, [r0], #4 @ set top PGD entry bits
#endif
add r3, r3, #0x1000 @ next PMD table
subs r6, r6, #1
bne 1b
add r4, r4, #0x1000 @ point to the PMD tables
#ifdef CONFIG_CPU_ENDIAN_BE8
add r4, r4, #4 @ we only write the bottom word
#endif
#endif
ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
/*
* Create identity mapping to cater for __enable_mmu.
* This identity mapping will be removed by paging_init().
*/
adr r0, __turn_mmu_on_loc
ldmia r0, {r3, r5, r6}
sub r0, r0, r3 @ virt->phys offset
add r5, r5, r0 @ phys __turn_mmu_on
add r6, r6, r0 @ phys __turn_mmu_on_end
mov r5, r5, lsr #SECTION_SHIFT
mov r6, r6, lsr #SECTION_SHIFT
1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base
str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping
cmp r5, r6
addlo r5, r5, #1 @ next section
blo 1b
/*
* Map our RAM from the start to the end of the kernel .bss section.
*/
add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
ldr r6, =(_end - 1)
orr r3, r8, r7
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: str r3, [r0], #1 << PMD_ORDER
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
bls 1b
#ifdef CONFIG_XIP_KERNEL
/*
* Map the kernel image separately as it is not located in RAM.
*/
#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
mov r3, pc
mov r3, r3, lsr #SECTION_SHIFT
orr r3, r7, r3, lsl #SECTION_SHIFT
add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
ldr r6, =(_edata_loc - 1)
add r0, r0, #1 << PMD_ORDER
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: cmp r0, r6
add r3, r3, #1 << SECTION_SHIFT
strls r3, [r0], #1 << PMD_ORDER
bls 1b
#endif
/*
* Then map boot params address in r2 if specified.
* We map 2 sections in case the ATAGs/DTB crosses a section boundary.
*/
mov r0, r2, lsr #SECTION_SHIFT
movs r0, r0, lsl #SECTION_SHIFT
subne r3, r0, r8
addne r3, r3, #PAGE_OFFSET
addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
orrne r6, r7, r0
strne r6, [r3], #1 << PMD_ORDER
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]
#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
sub r4, r4, #4 @ Fixup page table pointer
@ for 64-bit descriptors
#endif
#ifdef CONFIG_DEBUG_LL
#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
/*
* Map in IO space for serial debugging.
* This allows debug messages to be output
* via a serial console before paging_init.
*/
addruart r7, r3, r0
mov r3, r3, lsr #SECTION_SHIFT
mov r3, r3, lsl #PMD_ORDER
add r0, r4, r3
mov r3, r7, lsr #SECTION_SHIFT
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
orr r3, r7, r3, lsl #SECTION_SHIFT
#ifdef CONFIG_ARM_LPAE
mov r7, #1 << (54 - 32) @ XN
#ifdef CONFIG_CPU_ENDIAN_BE8
str r7, [r0], #4
str r3, [r0], #4
#else
str r3, [r0], #4
str r7, [r0], #4
#endif
#else
orr r3, r3, #PMD_SECT_XN
str r3, [r0], #4
#endif
#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
/* we don't need any serial debugging mappings */
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
#endif
#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
/*
* If we're using the NetWinder or CATS, we also need to map
* in the 16550-type serial port for the debug messages
*/
add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
orr r3, r7, #0x7c000000
str r3, [r0]
#endif
#ifdef CONFIG_ARCH_RPC
/*
* Map in screen at 0x02000000 & SCREEN2_BASE
* Similar reasons here - for debug. This is
* only for Acorn RiscPC architectures.
*/
add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
orr r3, r7, #0x02000000
str r3, [r0]
add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
str r3, [r0]
#endif
#endif
#ifdef CONFIG_ARM_LPAE
sub r4, r4, #0x1000 @ point to the PGD table
#endif
ret lr
ENDPROC(__create_page_tables)
.ltorg
.align
__turn_mmu_on_loc:
.long .
.long __turn_mmu_on
.long __turn_mmu_on_end
#if defined(CONFIG_SMP)
.text
.arm
ENTRY(secondary_startup_arm)
THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
THUMB( .thumb ) @ switch to Thumb now.
THUMB(1: )
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
*
* Ensure that we're in SVC mode, and IRQs are disabled. Lookup
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
ARM_BE8(setend be) @ ensure we are in BE8 mode
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r9
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type
movs r10, r5 @ invalid processor?
moveq r0, #'p' @ yes, error 'p'
THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p
/*
* Use the page tables supplied from __cpu_up.
*/
adr r4, __secondary_data
ldmia r4, {r5, r7, r12} @ address to jump to after
sub lr, r4, r5 @ mmu has been enabled
add r3, r7, lr
ldrd r4, [r3, #0] @ get secondary_data.pgdir
ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
badr lr, __enable_mmu @ return address
mov r13, r12 @ __secondary_switched address
ldr r12, [r10, #PROCINFO_INITFUNC]
add r12, r12, r10 @ initialise processor
@ (return control reg)
ret r12
ENDPROC(secondary_startup)
ENDPROC(secondary_startup_arm)
/*
* r6 = &secondary_data
*/
ENTRY(__secondary_switched)
ldr sp, [r7, #12] @ get secondary_data.stack
mov fp, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
.align
.type __secondary_data, %object
__secondary_data:
.long .
.long secondary_data
.long __secondary_switched
#endif /* defined(CONFIG_SMP) */
/*
* Setup common bits before finally enabling the MMU. Essentially
* this is just loading the page table pointer and domain access
* registers. All these registers need to be preserved by the
* processor setup function (or set in the case of r0)
*
* r0 = cp#15 control register
* r1 = machine ID
* r2 = atags or dtb pointer
* r4 = TTBR pointer (low word)
* r5 = TTBR pointer (high word if LPAE)
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
*/
__enable_mmu:
#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
orr r0, r0, #CR_A
#else
bic r0, r0, #CR_A
#endif
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CR_C
#endif
#ifdef CONFIG_CPU_BPREDICT_DISABLE
bic r0, r0, #CR_Z
#endif
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
#ifdef CONFIG_ARM_LPAE
mcrr p15, 0, r4, r5, c2 @ load TTBR0
#else
mov r5, #DACR_INIT
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
#endif
b __turn_mmu_on
ENDPROC(__enable_mmu)
/*
* Enable the MMU. This completely changes the structure of the visible
* memory space. You will not be able to trace execution through this.
* If you have an enquiry about this, *please* check the linux-arm-kernel
* mailing list archives BEFORE sending another post to the list.
*
* r0 = cp#15 control register
* r1 = machine ID
* r2 = atags or dtb pointer
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
*
* other registers depend on the function called upon completion
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(__turn_mmu_on)
mov r0, r0
instr_sync
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mrc p15, 0, r3, c0, c0, 0 @ read id reg
instr_sync
mov r3, r3
mov r3, r13
ret r3
__turn_mmu_on_end:
ENDPROC(__turn_mmu_on)
.popsection
#ifdef CONFIG_SMP_ON_UP
__HEAD
__fixup_smp:
and r3, r9, #0x000f0000 @ architecture version
teq r3, #0x000f0000 @ CPU ID supported?
bne __fixup_smp_on_up @ no, assume UP
bic r3, r9, #0x00ff0000
bic r3, r3, #0x0000000f @ mask 0xff00fff0
mov r4, #0x41000000
orr r4, r4, #0x0000b000
orr r4, r4, #0x00000020 @ val 0x4100b020
teq r3, r4 @ ARM 11MPCore?
reteq lr @ yes, assume SMP
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
and r0, r0, #0xc0000000 @ multiprocessing extensions and
teq r0, #0x80000000 @ not part of a uniprocessor system?
bne __fixup_smp_on_up @ no, assume UP
@ Core indicates it is SMP. Check for Aegis SOC where a single
@ Cortex-A9 CPU is present but SMP operations fault.
mov r4, #0x41000000
orr r4, r4, #0x0000c000
orr r4, r4, #0x00000090
teq r3, r4 @ Check for ARM Cortex-A9
retne lr @ Not ARM Cortex-A9,
@ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
@ below address check will need to be #ifdef'd or equivalent
@ for the Aegis platform.
mrc p15, 4, r0, c15, c0 @ get SCU base address
teq r0, #0x0 @ '0' on actual UP A9 hardware
beq __fixup_smp_on_up @ So its an A9 UP
ldr r0, [r0, #4] @ read SCU Config
ARM_BE8(rev r0, r0) @ byteswap if big endian
and r0, r0, #0x3 @ number of CPUs
teq r0, #0x0 @ is 1?
retne lr
__fixup_smp_on_up:
adr r0, 1f
ldmia r0, {r3 - r5}
sub r3, r0, r3
add r4, r4, r3
add r5, r5, r3
b __do_fixup_smp_on_up
ENDPROC(__fixup_smp)
.align
1: .word .
.word __smpalt_begin
.word __smpalt_end
.pushsection .data
.align 2
.globl smp_on_up
smp_on_up:
ALT_SMP(.long 1)
ALT_UP(.long 0)
.popsection
#endif
.text
__do_fixup_smp_on_up:
cmp r4, r5
reths lr
ldmia r4!, {r0, r6}
ARM( str r6, [r0, r3] )
THUMB( add r0, r0, r3 )
#ifdef __ARMEB__
THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
#endif
THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
THUMB( strh r6, [r0] )
b __do_fixup_smp_on_up
ENDPROC(__do_fixup_smp_on_up)
ENTRY(fixup_smp)
stmfd sp!, {r4 - r6, lr}
mov r4, r0
add r5, r0, r1
mov r3, #0
bl __do_fixup_smp_on_up
ldmfd sp!, {r4 - r6, pc}
ENDPROC(fixup_smp)
#ifdef __ARMEB__
#define LOW_OFFSET 0x4
#define HIGH_OFFSET 0x0
#else
#define LOW_OFFSET 0x0
#define HIGH_OFFSET 0x4
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
/* __fixup_pv_table - patch the stub instructions with the delta between
* PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and
* can be expressed by an immediate shifter operand. The stub instruction
* has a form of '(add|sub) rd, rn, #imm'.
*/
__HEAD
__fixup_pv_table:
adr r0, 1f
ldmia r0, {r3-r7}
mvn ip, #0
subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
add r4, r4, r3 @ adjust table start address
add r5, r5, r3 @ adjust table end address
add r6, r6, r3 @ adjust __pv_phys_pfn_offset address
add r7, r7, r3 @ adjust __pv_offset address
mov r0, r8, lsr #PAGE_SHIFT @ convert to PFN
str r0, [r6] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
THUMB( it ne @ cross section branch )
bne __error
str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits
b __fixup_a_pv_table
ENDPROC(__fixup_pv_table)
.align
1: .long .
.long __pv_table_begin
.long __pv_table_end
2: .long __pv_phys_pfn_offset
.long __pv_offset
.text
__fixup_a_pv_table:
adr r0, 3f
ldr r6, [r0]
add r6, r6, r3
ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word
ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word
mov r6, r6, lsr #24
cmn r0, #1
#ifdef CONFIG_THUMB2_KERNEL
moveq r0, #0x200000 @ set bit 21, mov to mvn instruction
lsls r6, #24
beq 2f
clz r7, r6
lsr r6, #24
lsl r6, r7
bic r6, #0x0080
lsrs r7, #1
orrcs r6, #0x0080
orr r6, r6, r7, lsl #12
orr r6, #0x4000
b 2f
1: add r7, r3
ldrh ip, [r7, #2]
ARM_BE8(rev16 ip, ip)
tst ip, #0x4000
and ip, #0x8f00
orrne ip, r6 @ mask in offset bits 31-24
orreq ip, r0 @ mask in offset bits 7-0
ARM_BE8(rev16 ip, ip)
strh ip, [r7, #2]
bne 2f
ldrh ip, [r7]
ARM_BE8(rev16 ip, ip)
bic ip, #0x20
orr ip, ip, r0, lsr #16
ARM_BE8(rev16 ip, ip)
strh ip, [r7]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
bcc 1b
bx lr
#else
moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
b 2f
1: ldr ip, [r7, r3]
#ifdef CONFIG_CPU_ENDIAN_BE8
@ in BE8, we load data in BE, but instructions still in LE
bic ip, ip, #0xff000000
tst ip, #0x000f0000 @ check the rotation field
orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
biceq ip, ip, #0x00004000 @ clear bit 22
orreq ip, ip, r0, ror #8 @ mask in offset bits 7-0
#else
bic ip, ip, #0x000000ff
tst ip, #0xf00 @ check the rotation field
orrne ip, ip, r6 @ mask in offset bits 31-24
biceq ip, ip, #0x400000 @ clear bit 22
orreq ip, ip, r0 @ mask in offset bits 7-0
#endif
str ip, [r7, r3]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
bcc 1b
ret lr
#endif
ENDPROC(__fixup_a_pv_table)
.align
3: .long __pv_offset
ENTRY(fixup_pv_table)
stmfd sp!, {r4 - r7, lr}
mov r3, #0 @ no offset
mov r4, r0 @ r0 = table start
add r5, r0, r1 @ r1 = table size
bl __fixup_a_pv_table
ldmfd sp!, {r4 - r7, pc}
ENDPROC(fixup_pv_table)
.data
.align 2
.globl __pv_phys_pfn_offset
.type __pv_phys_pfn_offset, %object
__pv_phys_pfn_offset:
.word 0
.size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
.globl __pv_offset
.type __pv_offset, %object
__pv_offset:
.quad 0
.size __pv_offset, . -__pv_offset
#endif
#include "head-common.S"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 11,408
|
arch/arm/kernel/entry-header.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/thread_info.h>
#include <asm/uaccess-asm.h>
#include <asm/v7m.h>
@ Bad Abort numbers
@ -----------------
@
#define BAD_PREFETCH 0
#define BAD_DATA 1
#define BAD_ADDREXCPTN 2
#define BAD_IRQ 3
#define BAD_UNDEFINSTR 4
@
@ Most of the stack format comes from struct pt_regs, but with
@ the addition of 8 bytes for storing syscall args 5 and 6.
@ This _must_ remain a multiple of 8 for EABI.
@
#define S_OFF 8
/*
* The SWI code relies on the fact that R0 is at the bottom of the stack
* (due to slow/fast restore user regs).
*/
#if S_R0 != 0
#error "Please fix"
#endif
.macro zero_fp
#ifdef CONFIG_FRAME_POINTER
mov fp, #0
#endif
.endm
#ifdef CONFIG_ALIGNMENT_TRAP
#define ATRAP(x...) x
#else
#define ATRAP(x...)
#endif
.macro alignment_trap, rtmp1, rtmp2, label
#ifdef CONFIG_ALIGNMENT_TRAP
mrc p15, 0, \rtmp2, c1, c0, 0
ldr \rtmp1, \label
ldr \rtmp1, [\rtmp1]
teq \rtmp1, \rtmp2
mcrne p15, 0, \rtmp1, c1, c0, 0
#endif
.endm
#ifdef CONFIG_CPU_V7M
/*
* ARMv7-M exception entry/exit macros.
*
* xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
* automatically saved on the current stack (32 words) before
* switching to the exception stack (SP_main).
*
* If exception is taken while in user mode, SP_main is
* empty. Otherwise, SP_main is aligned to 64 bit automatically
* (CCR.STKALIGN set).
*
* Linux assumes that the interrupts are disabled when entering an
* exception handler and it may BUG if this is not the case. Interrupts
* are disabled during entry and reenabled in the exit macro.
*
* v7m_exception_slow_exit is used when returning from SVC or PendSV.
* When returning to kernel mode, we don't return from exception.
*/
.macro v7m_exception_entry
@ determine the location of the registers saved by the core during
@ exception entry. Depending on the mode the cpu was in when the
@ exception happend that is either on the main or the process stack.
@ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
@ was used.
tst lr, #EXC_RET_STACK_MASK
mrsne r12, psp
moveq r12, sp
@ we cannot rely on r0-r3 and r12 matching the value saved in the
@ exception frame because of tail-chaining. So these have to be
@ reloaded.
ldmia r12!, {r0-r3}
@ Linux expects to have irqs off. Do it here before taking stack space
cpsid i
sub sp, #PT_REGS_SIZE-S_IP
stmdb sp!, {r0-r11}
@ load saved r12, lr, return address and xPSR.
@ r0-r7 are used for signals and never touched from now on. Clobbering
@ r8-r12 is OK.
mov r9, r12
ldmia r9!, {r8, r10-r12}
@ calculate the original stack pointer value.
@ r9 currently points to the memory location just above the auto saved
@ xPSR.
@ The cpu might automatically 8-byte align the stack. Bit 9
@ of the saved xPSR specifies if stack aligning took place. In this case
@ another 32-bit value is included in the stack.
tst r12, V7M_xPSR_FRAMEPTRALIGN
addne r9, r9, #4
@ store saved r12 using str to have a register to hold the base for stm
str r8, [sp, #S_IP]
add r8, sp, #S_SP
@ store r13-r15, xPSR
stmia r8!, {r9-r12}
@ store old_r0
str r0, [r8]
.endm
/*
* PENDSV and SVCALL are configured to have the same exception
* priorities. As a kernel thread runs at SVCALL execution priority it
* can never be preempted and so we will never have to return to a
* kernel thread here.
*/
.macro v7m_exception_slow_exit ret_r0
cpsid i
ldr lr, =exc_ret
ldr lr, [lr]
@ read original r12, sp, lr, pc and xPSR
add r12, sp, #S_IP
ldmia r12, {r1-r5}
@ an exception frame is always 8-byte aligned. To tell the hardware if
@ the sp to be restored is aligned or not set bit 9 of the saved xPSR
@ accordingly.
tst r2, #4
subne r2, r2, #4
orrne r5, V7M_xPSR_FRAMEPTRALIGN
biceq r5, V7M_xPSR_FRAMEPTRALIGN
@ ensure bit 0 is cleared in the PC, otherwise behaviour is
@ unpredictable
bic r4, #1
@ write basic exception frame
stmdb r2!, {r1, r3-r5}
ldmia sp, {r1, r3-r5}
.if \ret_r0
stmdb r2!, {r0, r3-r5}
.else
stmdb r2!, {r1, r3-r5}
.endif
@ restore process sp
msr psp, r2
@ restore original r4-r11
ldmia sp!, {r0-r11}
@ restore main sp
add sp, sp, #PT_REGS_SIZE-S_IP
cpsie i
bx lr
.endm
#endif /* CONFIG_CPU_V7M */
@
@ Store/load the USER SP and LR registers by switching to the SYS
@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
@ available. Should only be called from SVC mode
@
.macro store_user_sp_lr, rd, rtemp, offset = 0
mrs \rtemp, cpsr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch to the SYS mode
str sp, [\rd, #\offset] @ save sp_usr
str lr, [\rd, #\offset + 4] @ save lr_usr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm
.macro load_user_sp_lr, rd, rtemp, offset = 0
mrs \rtemp, cpsr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch to the SYS mode
ldr sp, [\rd, #\offset] @ load sp_usr
ldr lr, [\rd, #\offset + 4] @ load lr_usr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm
.macro svc_exit, rpsr, irq = 0
.if \irq != 0
@ IRQs already off
#ifdef CONFIG_TRACE_IRQFLAGS
@ The parent context IRQs must have been enabled to get here in
@ the first place, so there's no point checking the PSR I bit.
bl trace_hardirqs_on
#endif
.else
@ IRQs off again before pulling preserved data off the stack
disable_irq_notrace
#ifdef CONFIG_TRACE_IRQFLAGS
tst \rpsr, #PSR_I_BIT
bleq trace_hardirqs_on
tst \rpsr, #PSR_I_BIT
blne trace_hardirqs_off
#endif
.endif
uaccess_exit tsk, r0, r1
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode SVC restore
msr spsr_cxsf, \rpsr
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
sub r0, sp, #4 @ uninhabited address
strex r1, r2, [r0] @ clear the exclusive monitor
#endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#else
@ Thumb mode SVC restore
ldr lr, [sp, #S_SP] @ top of the stack
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
@ We must avoid clrex due to Cortex-A15 erratum #830321
strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
ldmia sp, {r0 - r12}
mov sp, lr
ldr lr, [sp], #4
rfeia sp!
#endif
.endm
@
@ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
@
@ This macro acts in a similar manner to svc_exit but switches to FIQ
@ mode to restore the final part of the register state.
@
@ We cannot use the normal svc_exit procedure because that would
@ clobber spsr_svc (FIQ could be delivered during the first few
@ instructions of vector_swi meaning its contents have not been
@ saved anywhere).
@
@ Note that, unlike svc_exit, this macro also does not allow a caller
@ supplied rpsr. This is because the FIQ exceptions are not re-entrant
@ and the handlers cannot call into the scheduler (meaning the value
@ on the stack remains correct).
@
.macro svc_exit_via_fiq
uaccess_exit tsk, r0, r1
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r0, sp
ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
@ clobber state restored below)
msr cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
add r8, r0, #S_PC
ldr r9, [r0, #S_PSR]
msr spsr_cxsf, r9
ldr r0, [r0, #S_R0]
ldmia r8, {pc}^
#else
@ Thumb mode restore
add r0, sp, #S_R2
ldr lr, [sp, #S_LR]
ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
@ clobber state restored below)
ldmia r0, {r2 - r12}
mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
msr cpsr_c, r1
sub r0, #S_R2
add r8, r0, #S_PC
ldmia r0, {r0 - r1}
rfeia r8
#endif
.endm
.macro restore_user_regs, fast = 0, offset = 0
uaccess_enable r1, isb=0
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r2, sp
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
ldr lr, [r2, #\offset + S_PC]! @ get pc
tst r1, #PSR_I_BIT | 0x0f
bne 1f
msr spsr_cxsf, r1 @ save in spsr_svc
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
strex r1, r2, [r2] @ clear the exclusive monitor
#endif
.if \fast
ldmdb r2, {r1 - lr}^ @ get calling r1 - lr
.else
ldmdb r2, {r0 - lr}^ @ get calling r0 - lr
.endif
mov r0, r0 @ ARMv5T and earlier require a nop
@ after ldm {}^
add sp, sp, #\offset + PT_REGS_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
1: bug "Returning to usermode but unexpected PSR bits set?", \@
#elif defined(CONFIG_CPU_V7M)
@ V7M restore.
@ Note that we don't need to do clrex here as clearing the local
@ monitor is part of the exception entry and exit sequence.
.if \offset
add sp, #\offset
.endif
v7m_exception_slow_exit ret_r0 = \fast
#else
@ Thumb mode restore
mov r2, sp
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC] @ get pc
add sp, sp, #\offset + S_SP
tst r1, #PSR_I_BIT | 0x0f
bne 1f
msr spsr_cxsf, r1 @ save in spsr_svc
@ We must avoid clrex due to Cortex-A15 erratum #830321
strex r1, r2, [sp] @ clear the exclusive monitor
.if \fast
ldmdb sp, {r1 - r12} @ get calling r1 - r12
.else
ldmdb sp, {r0 - r12} @ get calling r0 - r12
.endif
add sp, sp, #PT_REGS_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr
1: bug "Returning to usermode but unexpected PSR bits set?", \@
#endif /* !CONFIG_THUMB2_KERNEL */
.endm
/*
* Context tracking subsystem. Used to instrument transitions
* between user and kernel mode.
*/
.macro ct_user_exit, save = 1
#ifdef CONFIG_CONTEXT_TRACKING
.if \save
stmdb sp!, {r0-r3, ip, lr}
bl context_tracking_user_exit
ldmia sp!, {r0-r3, ip, lr}
.else
bl context_tracking_user_exit
.endif
#endif
.endm
.macro ct_user_enter, save = 1
#ifdef CONFIG_CONTEXT_TRACKING
.if \save
stmdb sp!, {r0-r3, ip, lr}
bl context_tracking_user_enter
ldmia sp!, {r0-r3, ip, lr}
.else
bl context_tracking_user_enter
.endif
#endif
.endm
.macro invoke_syscall, table, nr, tmp, ret, reload=0
#ifdef CONFIG_CPU_SPECTRE
mov \tmp, \nr
cmp \tmp, #NR_syscalls @ check upper syscall limit
movcs \tmp, #0
csdb
badr lr, \ret @ return address
.if \reload
add r1, sp, #S_R0 + S_OFF @ pointer to regs
ldmccia r1, {r0 - r6} @ reload r0-r6
stmccia sp, {r4, r5} @ update stack arguments
.endif
ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
#else
cmp \nr, #NR_syscalls @ check upper syscall limit
badr lr, \ret @ return address
.if \reload
add r1, sp, #S_R0 + S_OFF @ pointer to regs
ldmccia r1, {r0 - r6} @ reload r0-r6
stmccia sp, {r4, r5} @ update stack arguments
.endif
ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
#endif
.endm
/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6.
*
* r7 is reserved for the system call number for thumb mode.
*
* Note that tbl == why is intentional.
*
* We must set at least "tsk" and "why" when calling ret_with_reschedule.
*/
scno .req r7 @ syscall number
tbl .req r8 @ syscall table pointer
why .req r8 @ Linux syscall (!= 0)
tsk .req r9 @ current thread_info
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,982
|
arch/arm/kernel/head-common.S
|
/*
* linux/arch/arm/kernel/head-common.S
*
* Copyright (C) 1994-2002 Russell King
* Copyright (c) 2003 ARM Limited
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <asm/assembler.h>
#define ATAG_CORE 0x54410001
#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
#define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2)
#ifdef CONFIG_CPU_BIG_ENDIAN
#define OF_DT_MAGIC 0xd00dfeed
#else
#define OF_DT_MAGIC 0xedfe0dd0 /* 0xd00dfeed in big-endian */
#endif
/*
* Exception handling. Something went wrong and we can't proceed. We
* ought to tell the user, but since we don't have any guarantee that
* we're even running on the right architecture, we do virtually nothing.
*
* If CONFIG_DEBUG_LL is set we try to print out something about the error
* and hope for the best (useful if bootloader fails to pass a proper
* machine ID for example).
*/
__HEAD
/* Determine validity of the r2 atags pointer. The heuristic requires
* that the pointer be aligned, in the first 16k of physical RAM and
* that the ATAG_CORE marker is first and present. If CONFIG_OF_FLATTREE
* is selected, then it will also accept a dtb pointer. Future revisions
* of this function may be more lenient with the physical address and
* may also be able to move the ATAGS block if necessary.
*
* Returns:
* r2 either valid atags pointer, valid dtb pointer, or zero
* r5, r6 corrupted
*/
__vet_atags:
tst r2, #0x3 @ aligned?
bne 1f
ldr r5, [r2, #0]
#ifdef CONFIG_OF_FLATTREE
ldr r6, =OF_DT_MAGIC @ is it a DTB?
cmp r5, r6
beq 2f
#endif
cmp r5, #ATAG_CORE_SIZE @ is first tag ATAG_CORE?
cmpne r5, #ATAG_CORE_SIZE_EMPTY
bne 1f
ldr r5, [r2, #4]
ldr r6, =ATAG_CORE
cmp r5, r6
bne 1f
2: ret lr @ atag/dtb pointer is ok
1: mov r2, #0
ret lr
ENDPROC(__vet_atags)
/*
* The following fragment of code is executed with the MMU on in MMU mode,
* and uses absolute addresses; this is not position independent.
*
* r0 = cp#15 control register (exc_ret for M-class)
* r1 = machine ID
* r2 = atags/dtb pointer
* r9 = processor ID
*/
__INIT
__mmap_switched:
mov r7, r1
mov r8, r2
mov r10, r0
adr r4, __mmap_switched_data
mov fp, #0
#if defined(CONFIG_XIP_DEFLATED_DATA)
ARM( ldr sp, [r4], #4 )
THUMB( ldr sp, [r4] )
THUMB( add r4, #4 )
bl __inflate_kernel_data @ decompress .data to RAM
teq r0, #0
bne __error
#elif defined(CONFIG_XIP_KERNEL)
ARM( ldmia r4!, {r0, r1, r2, sp} )
THUMB( ldmia r4!, {r0, r1, r2, r3} )
THUMB( mov sp, r3 )
sub r2, r2, r1
bl memcpy @ copy .data to RAM
#endif
ARM( ldmia r4!, {r0, r1, sp} )
THUMB( ldmia r4!, {r0, r1, r3} )
THUMB( mov sp, r3 )
sub r2, r1, r0
mov r1, #0
bl memset @ clear .bss
ldmia r4, {r0, r1, r2, r3}
str r9, [r0] @ Save processor ID
str r7, [r1] @ Save machine type
str r8, [r2] @ Save atags pointer
cmp r3, #0
strne r10, [r3] @ Save control register values
mov lr, #0
b start_kernel
ENDPROC(__mmap_switched)
.align 2
.type __mmap_switched_data, %object
__mmap_switched_data:
#ifdef CONFIG_XIP_KERNEL
#ifndef CONFIG_XIP_DEFLATED_DATA
.long _sdata @ r0
.long __data_loc @ r1
.long _edata_loc @ r2
#endif
.long __bss_stop @ sp (temporary stack in .bss)
#endif
.long __bss_start @ r0
.long __bss_stop @ r1
.long init_thread_union + THREAD_START_SP @ sp
.long processor_id @ r0
.long __machine_arch_type @ r1
.long __atags_pointer @ r2
#ifdef CONFIG_CPU_CP15
.long cr_alignment @ r3
#else
M_CLASS(.long exc_ret) @ r3
AR_CLASS(.long 0) @ r3
#endif
.size __mmap_switched_data, . - __mmap_switched_data
__FINIT
.text
/*
* This provides a C-API version of __lookup_processor_type
*/
ENTRY(lookup_processor_type)
stmfd sp!, {r4 - r6, r9, lr}
mov r9, r0
bl __lookup_processor_type
mov r0, r5
ldmfd sp!, {r4 - r6, r9, pc}
ENDPROC(lookup_processor_type)
/*
* Read processor ID register (CP#15, CR0), and look up in the linker-built
* supported processor list. Note that we can't use the absolute addresses
* for the __proc_info lists since we aren't running with the MMU on
* (and therefore, we are not in the correct address space). We have to
* calculate the offset.
*
* r9 = cpuid
* Returns:
* r3, r4, r6 corrupted
* r5 = proc_info pointer in physical address space
* r9 = cpuid (preserved)
*/
__lookup_processor_type:
adr r3, __lookup_processor_type_data
ldmia r3, {r4 - r6}
sub r3, r3, r4 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
add r6, r6, r3 @ physical address space
1: ldmia r5, {r3, r4} @ value, mask
and r4, r4, r9 @ mask wanted bits
teq r3, r4
beq 2f
add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
cmp r5, r6
blo 1b
mov r5, #0 @ unknown processor
2: ret lr
ENDPROC(__lookup_processor_type)
/*
* Look in <asm/procinfo.h> for information about the __proc_info structure.
*/
.align 2
.type __lookup_processor_type_data, %object
__lookup_processor_type_data:
.long .
.long __proc_info_begin
.long __proc_info_end
.size __lookup_processor_type_data, . - __lookup_processor_type_data
__error_lpae:
#ifdef CONFIG_DEBUG_LL
adr r0, str_lpae
bl printascii
b __error
str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n"
#else
b __error
#endif
.align
ENDPROC(__error_lpae)
__error_p:
#ifdef CONFIG_DEBUG_LL
adr r0, str_p1
bl printascii
mov r0, r9
bl printhex8
adr r0, str_p2
bl printascii
b __error
str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x"
str_p2: .asciz ").\n"
.align
#endif
ENDPROC(__error_p)
__error:
#ifdef CONFIG_ARCH_RPC
/*
* Turn the screen red on a error - RiscPC only.
*/
mov r0, #0x02000000
mov r3, #0x11
orr r3, r3, r3, lsl #8
orr r3, r3, r3, lsl #16
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
#endif
1: mov r0, r0
b 1b
ENDPROC(__error)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.