repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
4ms/metamodule-plugin-sdk
| 104,715
|
plugin-libc/libgcc/config/nds32/lib1asmsrc-mculib.S
|
/* mculib libgcc routines of Andes NDS32 cpu for GNU compiler
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Andes Technology Corporation.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
.section .mdebug.abi_nds32
.previous
/* ------------------------------------------- */
/* FPBIT floating point operations for libgcc */
/* ------------------------------------------- */
#ifdef L_addsub_sf
.text
.align 2
.global __subsf3
.type __subsf3, @function
__subsf3:
push $lp
pushm $r6, $r9
move $r2, #0x80000000
xor $r1, $r1, $r2
j .Lsfpadd
.global __addsf3
.type __addsf3, @function
__addsf3:
push $lp
pushm $r6, $r9
.Lsfpadd:
srli $r5, $r0, #23
andi $r5, $r5, #0xff
srli $r7, $r1, #23
andi $r7, $r7, #0xff
move $r3, #0x80000000
slli $r4, $r0, #8
or $r4, $r4, $r3
slli $r6, $r1, #8
or $r6, $r6, $r3
addi $r9, $r5, #-1
slti $r15, $r9, #0xfe
beqzs8 .LEspecA
.LElab1:
addi $r9, $r7, #-1
slti $r15, $r9, #0xfe
beqzs8 .LEspecB
.LElab2:
sub $r8, $r5, $r7
sltsi $r15, $r8, #0
bnezs8 .Li1
sltsi $r15, $r8, #0x20
bnezs8 .Li2
move $r6, #2
j .Le1
.Li2:
move $r2, $r6
srl $r6, $r6, $r8
sll $r9, $r6, $r8
beq $r9, $r2, .Le1
ori $r6, $r6, #2
j .Le1
.Li1:
move $r5, $r7
subri $r8, $r8, #0
sltsi $r15, $r8, #0x20
bnezs8 .Li4
move $r4, #2
j .Le1
.Li4:
move $r2, $r4
srl $r4, $r4, $r8
sll $r9, $r4, $r8
beq $r9, $r2, .Le1
ori $r4, $r4, #2
.Le1:
and $r8, $r0, $r3
xor $r9, $r8, $r1
sltsi $r15, $r9, #0
bnezs8 .LEsub1
#ADD($r4, $r6)
add $r4, $r4, $r6
slt $r15, $r4, $r6
beqzs8 .LEres
andi $r9, $r4, #1
beqz $r9, .Li7
ori $r4, $r4, #2
.Li7:
srli $r4, $r4, #1
addi $r5, $r5, #1
subri $r15, $r5, #0xff
bnezs8 .LEres
move $r4, #0
j .LEres
.LEsub1:
#SUB($r4, $r6)
move $r15, $r4
sub $r4, $r4, $r6
slt $r15, $r15, $r4
beqzs8 .Li9
subri $r4, $r4, #0
xor $r8, $r8, $r3
j .Le9
.Li9:
beqz $r4, .LEzer
.Le9:
#ifdef __NDS32_PERF_EXT__
clz $r2, $r4
#else
pushm $r0, $r1
pushm $r3, $r5
move $r0, $r4
bal __clzsi2
move $r2, $r0
popm $r3, $r5
popm $r0, $r1
#endif
sub $r5, $r5, $r2
sll $r4, $r4, $r2
.LEres:
blez $r5, .LEund
.LElab12:
#ADD($r4, $0x80)
move $r15, #0x80
add $r4, $r4, $r15
slt $r15, $r4, $r15
#ADDC($r5, $0x0)
add $r5, $r5, $r15
srli $r9, $r4, #8
andi $r9, $r9, #1
sub $r4, $r4, $r9
slli $r4, $r4, #1
srli $r4, $r4, #9
slli $r9, $r5, #23
or $r4, $r4, $r9
or $r0, $r4, $r8
.LE999:
popm $r6, $r9
pop $lp
ret5 $lp
.LEund:
subri $r2, $r5, #1
slti $r15, $r2, #0x20
beqzs8 .LEzer
move $r9, #0x80000000
or $r4, $r4, $r9
subri $r9, $r2, #0x20
sll $r5, $r4, $r9
srl $r4, $r4, $r2
beqz $r5, .Li10
ori $r4, $r4, #1
.Li10:
move $r5, #0
addi $r9, $r4, #0x80
sltsi $r15, $r9, #0
beqzs8 .LElab12
move $r5, #1
j .LElab12
.LEspecA:
bnez $r5, .Li12
add $r4, $r4, $r4
beqz $r4, .Li13
#ifdef __NDS32_PERF_EXT__
clz $r8, $r4
#else
pushm $r0, $r5
move $r0, $r4
bal __clzsi2
move $r8, $r0
popm $r0, $r5
#endif
sub $r5, $r5, $r8
sll $r4, $r4, $r8
j .LElab1
.Li13:
subri $r15, $r7, #0xff
beqzs8 .LEspecB
move $r9, #0x80000000
bne $r1, $r9, .LEretB
.Li12:
add $r9, $r4, $r4
bnez $r9, .LEnan
subri $r15, $r7, #0xff
bnezs8 .LEretA
xor $r9, $r0, $r1
sltsi $r15, $r9, #0
bnezs8 .LEnan
j .LEretB
.LEspecB:
bnez $r7, .Li15
add $r6, $r6, $r6
beqz $r6, .LEretA
#ifdef __NDS32_PERF_EXT__
clz $r8, $r6
#else
pushm $r0, $r5
move $r0, $r6
bal __clzsi2
move $r8, $r0
popm $r0, $r5
#endif
sub $r7, $r7, $r8
sll $r6, $r6, $r8
j .LElab2
.Li15:
add $r9, $r6, $r6
bnez $r9, .LEnan
.LEretB:
move $r0, $r1
j .LE999
.LEretA:
j .LE999
.LEzer:
move $r0, #0
j .LE999
.LEnan:
move $r0, #0xffc00000
j .LE999
.size __subsf3, .-__subsf3
.size __addsf3, .-__addsf3
#endif /* L_addsub_sf */
#ifdef L_sf_to_si
.text
.align 2
.global __fixsfsi
.type __fixsfsi, @function
__fixsfsi:
push $lp
slli $r1, $r0, #8
move $r3, #0x80000000
or $r1, $r1, $r3
srli $r3, $r0, #23
andi $r3, $r3, #0xff
subri $r2, $r3, #0x9e
blez $r2, .LJspec
sltsi $r15, $r2, #0x20
bnezs8 .Li42
move $r0, #0
j .LJ999
.Li42:
srl $r1, $r1, $r2
sltsi $r15, $r0, #0
beqzs8 .Li43
subri $r1, $r1, #0
.Li43:
move $r0, $r1
.LJ999:
pop $lp
ret5 $lp
.LJspec:
move $r3, #0x7f800000
slt $r15, $r3, $r0
beqzs8 .Li44
move $r0, #0x80000000
j .LJ999
.Li44:
move $r0, #0x7fffffff
j .LJ999
.size __fixsfsi, .-__fixsfsi
#endif /* L_sf_to_si */
#ifdef L_divsi3
.text
.align 2
.globl __divsi3
.type __divsi3, @function
__divsi3:
! ---------------------------------------------------------------------
! neg = 0;
! if (a < 0)
! { a = -a;
! neg = !neg;
! }
! ---------------------------------------------------------------------
sltsi $r5, $r0, 0 ! $r5 <- neg = (a < 0) ? 1 : 0
subri $r4, $r0, 0 ! $r4 <- a = -a
cmovn $r0, $r4, $r5 ! $r0 <- a = neg ? -a : a
.L2:
! ---------------------------------------------------------------------
! if (b < 0)
! ---------------------------------------------------------------------
bgez $r1, .L3 ! if b >= 0, skip
! ---------------------------------------------------------------------
! { b=-b;
! neg=!neg;
! }
! ---------------------------------------------------------------------
subri $r1, $r1, 0 ! $r1 <- b = -b
subri $r5, $r5, 1 ! $r5 <- neg = !neg
.L3:
! ---------------------------------------------------------------------
!!res = udivmodsi4 (a, b, 1);
! res = 0;
! if (den != 0)
! ---------------------------------------------------------------------
movi $r2, 0 ! $r2 <- res = 0
beqz $r1, .L1 ! if den == 0, skip
! ---------------------------------------------------------------------
! bit = 1;
! ---------------------------------------------------------------------
movi $r4, 1 ! $r4 <- bit = 1
#ifndef __OPTIMIZE_SIZE__
.L6:
#endif
! ---------------------------------------------------------------------
! while (den < num && bit && !(den & (1L << 31)))
! ---------------------------------------------------------------------
slt $ta, $r1, $r0 ! $ta <- den < num ?
beqz $ta, .L5 ! if no, skip
! ---------------------------------------------------------------------
! { den << = 1;
! bit << = 1;
! }
! ---------------------------------------------------------------------
#if defined (__OPTIMIZE_SIZE__) && !defined (__NDS32_ISA_V3M__)
clz $r3, $r1 ! $r3 <- leading zero count for den
clz $ta, $r0 ! $ta <- leading zero count for num
sub $r3, $r3, $ta ! $r3 <- number of bits to shift
sll $r1, $r1, $r3 ! $r1 <- den
sll $r4, $r4, $r3 ! $r2 <- bit
#else
slli $r1, $r1, 1 ! $r1 <- den << = 1
slli $r4, $r4, 1 ! $r4 <- bit << = 1
b .L6 ! continue loop
#endif
.L5:
! ---------------------------------------------------------------------
! while (bit)
! { if (num >= den)
! ---------------------------------------------------------------------
slt $ta, $r0, $r1 ! $ta <- num < den ?
bnez $ta, .L9 ! if yes, skip
! ---------------------------------------------------------------------
! { num -= den;
! res |= bit;
! }
! ---------------------------------------------------------------------
sub $r0, $r0, $r1 ! $r0 <- num -= den
or $r2, $r2, $r4 ! $r2 <- res |= bit
.L9:
! ---------------------------------------------------------------------
! bit >> = 1;
! den >> = 1;
! }
!!if (modwanted)
!! return num;
!!return res;
! ---------------------------------------------------------------------
srli $r4, $r4, 1 ! $r4 <- bit >> = 1
srli $r1, $r1, 1 ! $r1 <- den >> = 1
bnez $r4, .L5 ! if bit != 0, continue loop
.L1:
! ---------------------------------------------------------------------
! if (neg)
! res = -res;
! return res;
! ---------------------------------------------------------------------
subri $r0, $r2, 0 ! $r0 <- -res
cmovz $r0, $r2, $r5 ! $r0 <- neg ? -res : res
! ---------------------------------------------------------------------
ret
.size __divsi3, .-__divsi3
#endif /* L_divsi3 */
#ifdef L_divdi3
!--------------------------------------
#ifdef __big_endian__
#define V1H $r0
#define V1L $r1
#define V2H $r2
#define V2L $r3
#else
#define V1H $r1
#define V1L $r0
#define V2H $r3
#define V2L $r2
#endif
!--------------------------------------
.text
.align 2
.globl __divdi3
.type __divdi3, @function
__divdi3:
! prologue
#ifdef __NDS32_ISA_V3M__
push25 $r10, 0
#else
smw.adm $r6, [$sp], $r10, 2
#endif
! end of prologue
move $r8, V1L
move $r9, V1H
move $r6, V2L
move $r7, V2H
movi $r10, 0
bgez V1H, .L80
bal __negdi2
move $r8, V1L
move $r9, V1H
movi $r10, -1
.L80:
bgez $r7, .L81
move V1L, $r6
move V1H, $r7
bal __negdi2
move $r6, V1L
move $r7, V1H
nor $r10, $r10, $r10
.L81:
move V2L, $r6
move V2H, $r7
move V1L, $r8
move V1H, $r9
movi $r4, 0
bal __udivmoddi4
beqz $r10, .L82
bal __negdi2
.L82:
! epilogue
#ifdef __NDS32_ISA_V3M__
pop25 $r10, 0
#else
lmw.bim $r6, [$sp], $r10, 2
ret
#endif
.size __divdi3, .-__divdi3
#endif /* L_divdi3 */
#ifdef L_modsi3
.text
.align 2
.globl __modsi3
.type __modsi3, @function
__modsi3:
! ---------------------------------------------------------------------
! neg=0;
! if (a<0)
! { a=-a;
! neg=1;
! }
! ---------------------------------------------------------------------
sltsi $r5, $r0, 0 ! $r5 <- neg < 0 ? 1 : 0
subri $r4, $r0, 0 ! $r4 <- -a
cmovn $r0, $r4, $r5 ! $r0 <- |a|
! ---------------------------------------------------------------------
! if (b < 0)
#ifndef __NDS32_PERF_EXT__
! ---------------------------------------------------------------------
bgez $r1, .L3 ! if b >= 0, skip
! ---------------------------------------------------------------------
! b = -b;
! ---------------------------------------------------------------------
subri $r1, $r1, 0 ! $r1 <- |b|
.L3:
! ---------------------------------------------------------------------
!!res = udivmodsi4 (a, b, 1);
! if (den != 0)
! ---------------------------------------------------------------------
#else /* __NDS32_PERF_EXT__ */
! b = -b;
!!res = udivmodsi4 (a, b, 1);
! if (den != 0)
! ---------------------------------------------------------------------
abs $r1, $r1 ! $r1 <- |b|
#endif /* __NDS32_PERF_EXT__ */
beqz $r1, .L1 ! if den == 0, skip
! ---------------------------------------------------------------------
! { bit = 1;
! res = 0;
! ---------------------------------------------------------------------
movi $r4, 1 ! $r4 <- bit = 1
#ifndef __OPTIMIZE_SIZE__
.L6:
#endif
! ---------------------------------------------------------------------
! while (den < num&&bit && !(den & (1L << 31)))
! ---------------------------------------------------------------------
slt $ta, $r1, $r0 ! $ta <- den < num ?
beqz $ta, .L5 ! if no, skip
! ---------------------------------------------------------------------
! { den << = 1;
! bit << = 1;
! }
! ---------------------------------------------------------------------
#if defined (__OPTIMIZE_SIZE__) && ! defined (__NDS32_ISA_V3M__)
clz $r3, $r1 ! $r3 <- leading zero count for den
clz $ta, $r0 ! $ta <- leading zero count for num
sub $r3, $r3, $ta ! $r3 <- number of bits to shift
sll $r1, $r1, $r3 ! $r1 <- den
sll $r4, $r4, $r3 ! $r2 <- bit
#else
slli $r1, $r1, 1 ! $r1 <- den << = 1
slli $r4, $r4, 1 ! $r4 <- bit << = 1
b .L6 ! continue loop
#endif
.L5:
! ---------------------------------------------------------------------
! while (bit)
! { if (num >= den)
! { num -= den;
! res |= bit;
! }
! bit >> = 1;
! den >> = 1;
! }
! }
!!if (modwanted)
!! return num;
!!return res;
! ---------------------------------------------------------------------
sub $r2, $r0, $r1 ! $r2 <- num - den
slt $ta, $r0, $r1 ! $ta <- num < den ?
srli $r4, $r4, 1 ! $r4 <- bit >> = 1
cmovz $r0, $r2, $ta ! $r0 <- num = (num < den) ? num : num - den
srli $r1, $r1, 1 ! $r1 <- den >> = 1
bnez $r4, .L5 ! if bit != 0, continue loop
.L1:
! ---------------------------------------------------------------------
! if (neg)
! res = -res;
! return res;
! ---------------------------------------------------------------------
subri $r3, $r0, 0 ! $r3 <- -res
cmovn $r0, $r3, $r5 ! $r0 <- neg ? -res : res
! ---------------------------------------------------------------------
ret
.size __modsi3, .-__modsi3
#endif /* L_modsi3 */
#ifdef L_moddi3
!--------------------------------------
#ifdef __big_endian__
#define V1H $r0
#define V1L $r1
#define V2H $r2
#define V2L $r3
#else
#define V1H $r1
#define V1L $r0
#define V2H $r3
#define V2L $r2
#endif
!--------------------------------------
.text
.align 2
.globl __moddi3
.type __moddi3, @function
__moddi3:
! =====================================================================
! stack allocation:
! sp+32 +-----------------------+
! | $lp |
! sp+28 +-----------------------+
! | $r6 - $r10 |
! sp+8 +-----------------------+
! | |
! sp+4 +-----------------------+
! | |
! sp +-----------------------+
! =====================================================================
! prologue
#ifdef __NDS32_ISA_V3M__
push25 $r10, 8
#else
smw.adm $r6, [$sp], $r10, 2
addi $sp, $sp, -8
#endif
! end of prologue
!------------------------------------------
! __moddi3 (DWtype u, DWtype v)
! {
! word_type c = 0;
! DWunion uu = {.ll = u};
! DWunion vv = {.ll = v};
! DWtype w;
! if (uu.s.high < 0)
! c = ~c,
! uu.ll = -uu.ll;
!---------------------------------------------
move $r8, V1L
move $r9, V1H
move $r6, V2L
move $r7, V2H
movi $r10, 0 ! r10 = c = 0
bgez V1H, .L80 ! if u > 0 , go L80
bal __negdi2
move $r8, V1L
move $r9, V1H
movi $r10, -1 ! r10 = c = ~c
!------------------------------------------------
! if (vv.s.high < 0)
! vv.ll = -vv.ll;
!----------------------------------------------
.L80:
bgez $r7, .L81 ! if v > 0 , go L81
move V1L, $r6
move V1H, $r7
bal __negdi2
move $r6, V1L
move $r7, V1H
!------------------------------------------
! (void) __udivmoddi4 (uu.ll, vv.ll, &w);
! if (c)
! w = -w;
! return w;
!-----------------------------------------
.L81:
move V2L, $r6
move V2H, $r7
move V1L, $r8
move V1H, $r9
addi $r4, $sp, 0
bal __udivmoddi4
lwi $r0, [$sp+(0)] ! le: sp + 0 is low, be: sp + 0 is high
lwi $r1, [$sp+(4)] ! le: sp + 4 is low, be: sp + 4 is high
beqz $r10, .L82
bal __negdi2
.L82:
! epilogue
#ifdef __NDS32_ISA_V3M__
pop25 $r10, 8
#else
addi $sp, $sp, 8
lmw.bim $r6, [$sp], $r10, 2
ret
#endif
.size __moddi3, .-__moddi3
#endif /* L_moddi3 */
#ifdef L_mulsi3
.text
.align 2
.globl __mulsi3
.type __mulsi3, @function
__mulsi3:
! ---------------------------------------------------------------------
! r = 0;
! while (a)
! $r0: r
! $r1: b
! $r2: a
! ---------------------------------------------------------------------
beqz $r0, .L7 ! if a == 0, done
move $r2, $r0 ! $r2 <- a
movi $r0, 0 ! $r0 <- r <- 0
.L8:
! ---------------------------------------------------------------------
! { if (a & 1)
! r += b;
! a >> = 1;
! b << = 1;
! }
! $r0: r
! $r1: b
! $r2: a
! $r3: scratch
! $r4: scratch
! ---------------------------------------------------------------------
andi $r3, $r2, 1 ! $r3 <- a & 1
add $r4, $r0, $r1 ! $r4 <- r += b
cmovn $r0, $r4, $r3 ! $r0 <- r
srli $r2, $r2, 1 ! $r2 <- a >> = 1
slli $r1, $r1, 1 ! $r1 <- b << = 1
bnez $r2, .L8 ! if a != 0, continue loop
.L7:
! ---------------------------------------------------------------------
! $r0: return code
! ---------------------------------------------------------------------
ret
.size __mulsi3, .-__mulsi3
#endif /* L_mulsi3 */
#ifdef L_udivsi3
.text
.align 2
.globl __udivsi3
.type __udivsi3, @function
__udivsi3:
! ---------------------------------------------------------------------
!!res=udivmodsi4(a,b,0);
! res=0;
! if (den!=0)
! ---------------------------------------------------------------------
movi $r2, 0 ! $r2 <- res=0
beqz $r1, .L1 ! if den==0, skip
! ---------------------------------------------------------------------
! { bit=1;
! ---------------------------------------------------------------------
movi $r4, 1 ! $r4 <- bit=1
#ifndef __OPTIMIZE_SIZE__
.L6:
#endif
! ---------------------------------------------------------------------
! while (den<num
! ---------------------------------------------------------------------
slt $ta, $r1, $r0 ! $ta <- den<num?
beqz $ta, .L5 ! if no, skip
! ---------------------------------------------------------------------
! &&bit&&!(den&(1L<<31)))
! ---------------------------------------------------------------------
bltz $r1, .L5 ! if den<0, skip
! ---------------------------------------------------------------------
! { den<<=1;
! bit<<=1;
! }
! ---------------------------------------------------------------------
#if defined (__OPTIMIZE_SIZE__) && ! defined (__NDS32_ISA_V3M__)
clz $r3, $r1 ! $r3 <- leading zero count for den
clz $ta, $r0 ! $ta <- leading zero count for num
sub $r3, $r3, $ta ! $r3 <- number of bits to shift
sll $r1, $r1, $r3 ! $r1 <- den
sll $r2, $r2, $r3 ! $r2 <- bit
#else
slli $r1, $r1, 1 ! $r1 <- den<<=1
slli $r4, $r4, 1 ! $r4 <- bit<<=1
b .L6 ! continue loop
#endif
.L5:
! ---------------------------------------------------------------------
! while (bit)
! { if (num>=den)
! ---------------------------------------------------------------------
slt $ta, $r0, $r1 ! $ta <- num<den?
bnez $ta, .L9 ! if yes, skip
! ---------------------------------------------------------------------
! { num-=den;
! res|=bit;
! }
! ---------------------------------------------------------------------
sub $r0, $r0, $r1 ! $r0 <- num-=den
or $r2, $r2, $r4 ! $r2 <- res|=bit
.L9:
! ---------------------------------------------------------------------
! bit>>=1;
! den>>=1;
! }
! }
!!if (modwanted)
!! return num;
!!return res;
! ---------------------------------------------------------------------
srli $r4, $r4, 1 ! $r4 <- bit>>=1
srli $r1, $r1, 1 ! $r1 <- den>>=1
bnez $r4, .L5 ! if bit!=0, continue loop
.L1:
! ---------------------------------------------------------------------
! return res;
! ---------------------------------------------------------------------
move $r0, $r2 ! $r0 <- return value
! ---------------------------------------------------------------------
! ---------------------------------------------------------------------
ret
.size __udivsi3, .-__udivsi3
#endif /* L_udivsi3 */
#ifdef L_udivdi3
!--------------------------------------
#ifdef __big_endian__
#define V1H $r0
#define V1L $r1
#define V2H $r2
#define V2L $r3
#else
#define V1H $r1
#define V1L $r0
#define V2H $r3
#define V2L $r2
#endif
!--------------------------------------
.text
.align 2
.globl __udivdi3
.type __udivdi3, @function
__udivdi3:
! prologue
#ifdef __NDS32_ISA_V3M__
push25 $r8, 0
#else
smw.adm $r6, [$sp], $r8, 2
#endif
! end of prologue
movi $r4, 0
bal __udivmoddi4
! epilogue
#ifdef __NDS32_ISA_V3M__
pop25 $r8, 0
#else
lmw.bim $r6, [$sp], $r8, 2
ret
#endif
.size __udivdi3, .-__udivdi3
#endif /* L_udivdi3 */
#ifdef L_udivmoddi4
.text
.align 2
.globl fudiv_qrnnd
.type fudiv_qrnnd, @function
#ifdef __big_endian__
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#define W6H $r4
#define W6L $r5
#define OFFSET_L 4
#define OFFSET_H 0
#else
#define P1H $r1
#define P1L $r0
#define P2H $r3
#define P2L $r2
#define W6H $r5
#define W6L $r4
#define OFFSET_L 0
#define OFFSET_H 4
#endif
fudiv_qrnnd:
!------------------------------------------------------
! function: fudiv_qrnnd(quotient, remainder, high_numerator, low_numerator, denominator)
! divides a UDWtype, composed by the UWtype integers,HIGH_NUMERATOR (from $r4)
! and LOW_NUMERATOR(from $r5) by DENOMINATOR(from $r6), and places the quotient
! in $r7 and the remainder in $r8.
!------------------------------------------------------
! in reg:$r4(n1), $r5(n0), $r6(d0)
! __d1 = ((USItype) (d) >> ((4 * 8) / 2));
! __d0 = ((USItype) (d) & (((USItype) 1 << ((4 * 8) / 2)) - 1));
! __r1 = (n1) % __d1;
! __q1 = (n1) / __d1;
! __m = (USItype) __q1 * __d0;
! __r1 = __r1 * ((USItype) 1 << ((4 * 8) / 2)) | ((USItype) (n0) >> ((4 * 8) / 2));
! if (__r1 < __m)
! {
!------------------------------------------------------
smw.adm $r0, [$sp], $r4, 2 ! store $lp, when use BASELINE_V1,and must store $r0-$r3
srli $r7, $r6, 16 ! $r7 = d1 =__ll_highpart (d)
movi $ta, 65535
and $r8, $r6, $ta ! $r8 = d0 = __ll_lowpart (d)
divr $r9, $r10, $r4, $r7 ! $r9 = q1, $r10 = r1
and $r4, $r5, $ta ! $r4 = __ll_lowpart (n0)
slli $r10, $r10, 16 ! $r10 = r1 << 16
srli $ta, $r5, 16 ! $ta = __ll_highpart (n0)
or $r10, $r10, $ta ! $r10 <- $r0|$r3=__r1
mul $r5, $r9, $r8 ! $r5 = m = __q1*__d0
slt $ta, $r10, $r5 ! $ta <- __r1<__m
beqz $ta, .L2 !if yes,skip
!------------------------------------------------------
! __q1--, __r1 += (d);
! if (__r1 >= (d))
! {
!------------------------------------------------------
add $r10, $r10, $r6 !$r10 <- __r1+d=__r1
addi $r9, $r9, -1 !$r9 <- __q1--=__q1
slt $ta, $r10, $r6 !$ta <- __r1<d
bnez $ta, .L2 !if yes,skip
!------------------------------------------------------
! if (__r1 < __m)
! {
!------------------------------------------------------
slt $ta, $r10, $r5 !$ta <- __r1<__m
beqz $ta, .L2 !if yes,skip
!------------------------------------------------------
! __q1--, __r1 += (d);
! }
! }
! }
!------------------------------------------------------
addi $r9, $r9, -1 !$r9 <- __q1--=__q1
add $r10, $r10, $r6 !$r2 <- __r1+d=__r1
.L2:
!------------------------------------------------------
! __r1 -= __m;
! __r0 = __r1 % __d1;
! __q0 = __r1 / __d1;
! __m = (USItype) __q0 * __d0;
! __r0 = __r0 * ((USItype) 1 << ((4 * 8) / 2)) \
! | ((USItype) (n0) & (((USItype) 1 << ((4 * 8) / 2)) - 1));
! if (__r0 < __m)
! {
!------------------------------------------------------
sub $r10, $r10, $r5 !$r10 <- __r1-__m=__r1
divr $r7, $r10, $r10, $r7 !$r7 <- r1/__d1=__q0,$r10 <- r1%__d1=__r0
slli $r10, $r10, 16 !$r10 <- __r0<<16
mul $r5, $r8, $r7 !$r5 <- __q0*__d0=__m
or $r10, $r4, $r10 !$r3 <- $r0|__ll_lowpart (n0) =__r0
slt $ta, $r10, $r5 !$ta <- __r0<__m
beqz $ta, .L5 !if yes,skip
!------------------------------------------------------
! __q0--, __r0 += (d);
! if (__r0 >= (d))
! {
!------------------------------------------------------
add $r10, $r10, $r6 !$r10 <- __r0+d=__r0
addi $r7, $r7, -1 !$r7 <- __q0--=__q0
slt $ta, $r10, $r6 !$ta <- __r0<d
bnez $ta, .L5 !if yes,skip
!------------------------------------------------------
! if (__r0 < __m)
! {
!------------------------------------------------------
slt $ta, $r10, $r5 !$ta <- __r0<__m
beqz $ta, .L5 !if yes,skip
!------------------------------------------------------
! __q0--, __r0 += (d);
! }
! }
! }
!------------------------------------------------------
add $r10, $r10, $r6 !$r3 <- __r0+d=__r0
addi $r7, $r7, -1 !$r2 <- __q0--=__q0
.L5:
!------------------------------------------------------
! __r0 -= __m;
! *q = (USItype) __q1 * ((USItype) 1 << ((4 * 8) / 2)) | __q0;
! *r = __r0;
!}
!------------------------------------------------------
sub $r8, $r10, $r5 !$r8 = r = r0 = __r0-__m
slli $r9, $r9, 16 !$r9 <- __q1<<16
or $r7, $r9, $r7 !$r7 = q = $r9|__q0
lmw.bim $r0, [$sp], $r4, 2
ret
.size fudiv_qrnnd, .-fudiv_qrnnd
.align 2
.globl __udivmoddi4
.type __udivmoddi4, @function
__udivmoddi4:
! =====================================================================
! stack allocation:
! sp+40 +------------------+
! | q1 |
! sp+36 +------------------+
! | q0 |
! sp+32 +------------------+
! | bm |
! sp+28 +------------------+
! | $lp |
! sp+24 +------------------+
! | $fp |
! sp+20 +------------------+
! | $r6 - $r10 |
! sp +------------------+
! =====================================================================
addi $sp, $sp, -40
smw.bi $r6, [$sp], $r10, 10
!------------------------------------------------------
! d0 = dd.s.low;
! d1 = dd.s.high;
! n0 = nn.s.low;
! n1 = nn.s.high;
! if (d1 == 0)
! {
!------------------------------------------------------
move $fp, $r4 !$fp <- rp
bnez P2H, .L9 !if yes,skip
!------------------------------------------------------
! if (d0 > n1)
! {
!------------------------------------------------------
slt $ta, P1H, P2L !$ta <- n1<d0
beqz $ta, .L10 !if yes,skip
#ifndef __NDS32_PERF_EXT__
smw.adm $r0, [$sp], $r5, 0
move $r0, P2L
bal __clzsi2
move $r7, $r0
lmw.bim $r0, [$sp], $r5, 0
#else
clz $r7, P2L
#endif
swi $r7, [$sp+(28)]
beqz $r7, .L18 !if yes,skip
!------------------------------------------------------
! d0 = d0 << bm;
! n1 = (n1 << bm) | (n0 >> ((4 * 8) - bm));
! n0 = n0 << bm;
! }
!------------------------------------------------------
subri $r5, $r7, 32 !$r5 <- 32-bm
srl $r5, P1L, $r5 !$r5 <- n0>>$r5
sll $r6, P1H, $r7 !$r6 <- n1<<bm
or P1H, $r6, $r5 !P2h <- $r5|$r6=n1
sll P1L, P1L, $r7 !P1H <- n0<<bm=n0
sll P2L, P2L, $r7 !P2L <- d0<<bm=d0
.L18:
!------------------------------------------------------
! fudiv_qrnnd (&q0, &n0, n1, n0, d0);
! q1 = 0;
! } #if (d0 > n1)
!------------------------------------------------------
move $r4,P1H ! give fudiv_qrnnd args
move $r5,P1L !
move $r6,P2L !
bal fudiv_qrnnd !calcaulte q0 n0
movi $r6, 0 !P1L <- 0
swi $r7,[$sp+32] !q0
swi $r6,[$sp+36] !q1
move P1L,$r8 !n0
b .L19
.L10:
!------------------------------------------------------
! else #if (d0 > n1)
! {
! if(d0 == 0)
!------------------------------------------------------
bnez P2L, .L20 !if yes,skip
!------------------------------------------------------
! d0 = 1 / d0;
!------------------------------------------------------
movi $r4, 1 !P1L <- 1
divr P2L, $r4, $r4, P2L !$r9=1/d0,P1L=1%d0
.L20:
#ifndef __NDS32_PERF_EXT__
smw.adm $r0, [$sp], $r5, 0
move $r0, P2L
bal __clzsi2
move $r7, $r0
lmw.bim $r0, [$sp], $r5, 0
#else
clz $r7, P2L
#endif
swi $r7,[$sp+(28)] ! store bm
beqz $r7, .L28 ! if yes,skip
!------------------------------------------------------
! b = (4 * 8) - bm;
! d0 = d0 << bm;
! n2 = n1 >> b;
! n1 = (n1 << bm) | (n0 >> b);
! n0 = n0 << bm;
! fudiv_qrnnd (&q1, &n1, n2, n1, d0);
! }
!------------------------------------------------------
subri $r10, $r7, 32 !$r10 <- 32-bm=b
srl $r4, P1L, $r10 !$r4 <- n0>>b
sll $r5, P1H, $r7 !$r5 <- n1<<bm
or $r5, $r5, $r4 !$r5 <- $r5|$r4=n1 !for fun
sll P2L, P2L, $r7 !P2L <- d0<<bm=d0 !for fun
sll P1L, P1L, $r7 !P1L <- n0<<bm=n0
srl $r4, P1H, $r10 !$r4 <- n1>>b=n2 !for fun
move $r6,P2L !for fun
bal fudiv_qrnnd !caculate q1, n1
swi $r7,[$sp+(36)] ! q1 store
move P1H,$r8 ! n1 store
move $r4,$r8 ! prepare for next fudiv_qrnnd()
move $r5,P1L
move $r6,P2L
b .L29
.L28:
!------------------------------------------------------
! else // bm != 0
! {
! n1 -= d0;
! q1 = 1;
!
!------------------------------------------------------
sub P1H, P1H, P2L !P1L <- n1-d0=n1
movi $ta, 1 !
swi $ta, [$sp+(36)] !1 -> [$sp+(36)]
move $r4,P1H ! give fudiv_qrnnd args
move $r5,P1L
move $r6,P2L
.L29:
!------------------------------------------------------
! fudiv_qrnnd (&q0, &n0, n1, n0, d0);
!------------------------------------------------------
bal fudiv_qrnnd !calcuate q0, n0
swi $r7,[$sp+(32)] !q0 store
move P1L,$r8 !n0
.L19:
!------------------------------------------------------
! if (rp != 0)
! {
!------------------------------------------------------
beqz $fp, .L31 !if yes,skip
!------------------------------------------------------
! rr.s.low = n0 >> bm;
! rr.s.high = 0;
! *rp = rr.ll;
! }
!------------------------------------------------------
movi $r5, 0 !$r5 <- 0
lwi $r7,[$sp+(28)] !load bm
srl $r4, P1L, $r7 !$r4 <- n0>>bm
swi $r4, [$fp+OFFSET_L] !r0 !$r4 -> [$sp+(48)]
swi $r5, [$fp+OFFSET_H] !r1 !0 -> [$sp+(52)]
b .L31
.L9:
!------------------------------------------------------
! else # d1 == 0
! {
! if(d1 > n1)
! {
!------------------------------------------------------
slt $ta, P1H, P2H !$ta <- n1<d1
beqz $ta, .L32 !if yes,skip
!------------------------------------------------------
! q0 = 0;
! q1 = 0;
! if (rp != 0)
! {
!------------------------------------------------------
movi $r5, 0 !$r5 <- 0
swi $r5, [$sp+(32)] !q0 !0 -> [$sp+(40)]=q1
swi $r5, [$sp+(36)] !q1 !0 -> [$sp+(32)]=q0
beqz $fp, .L31 !if yes,skip
!------------------------------------------------------
! rr.s.low = n0;
! rr.s.high = n1;
! *rp = rr.ll;
! }
!------------------------------------------------------
swi P1L, [$fp+OFFSET_L] !P1L -> [rp]
swi P1H, [$fp+OFFSET_H] !P1H -> [rp+4]
b .L31
.L32:
#ifndef __NDS32_PERF_EXT__
smw.adm $r0, [$sp], $r5, 0
move $r0, P2H
bal __clzsi2
move $r7, $r0
lmw.bim $r0, [$sp], $r5, 0
#else
clz $r7,P2H
#endif
swi $r7,[$sp+(28)] !$r7=bm store
beqz $r7, .L42 !if yes,skip
!------------------------------------------------------
! USItype m1, m0;
! b = (4 * 8) - bm;
! d1 = (d0 >> b) | (d1 << bm);
! d0 = d0 << bm;
! n2 = n1 >> b;
! n1 = (n0 >> b) | (n1 << bm);
! n0 = n0 << bm;
! fudiv_qrnnd (&q0, &n1, n2, n1, d1);
!------------------------------------------------------
subri $r10, $r7, 32 !$r10 <- 32-bm=b
srl $r5, P2L, $r10 !$r5 <- d0>>b
sll $r6, P2H, $r7 !$r6 <- d1<<bm
or $r6, $r5, $r6 !$r6 <- $r5|$r6=d1 !! func
move P2H, $r6 !P2H <- d1
srl $r4, P1H, $r10 !$r4 <- n1>>b=n2 !!! func
srl $r8, P1L, $r10 !$r8 <- n0>>b !!$r8
sll $r9, P1H, $r7 !$r9 <- n1<<bm
or $r5, $r8, $r9 !$r5 <- $r8|$r9=n1 !func
sll P2L, P2L, $r7 !P2L <- d0<<bm=d0
sll P1L, P1L, $r7 !P1L <- n0<<bm=n0
bal fudiv_qrnnd ! cal q0,n1
swi $r7,[$sp+(32)]
move P1H,$r8 ! fudiv_qrnnd (&q0, &n1, n2, n1, d1);
move $r6, $r7 ! from func
!----------------------------------------------------
! #umul_ppmm (m1, m0, q0, d0);
! do
! { USItype __x0, __x1, __x2, __x3;
! USItype __ul, __vl, __uh, __vh;
! __ul = ((USItype) (q0) & (((USItype) 1 << ((4 * 8) / 2)) - 1));
! __uh = ((USItype) (q0) >> ((4 * 8) / 2));
! __vl = ((USItype) (d0) & (((USItype) 1 << ((4 * 8) / 2)) - 1));
! __vh = ((USItype) (d0) >> ((4 * 8) / 2));
! __x0 = (USItype) __ul * __vl;
! __x1 = (USItype) __ul * __vh;
! __x2 = (USItype) __uh * __vl;
! __x3 = (USItype) __uh * __vh;
! __x1 += ((USItype) (__x0) >> ((4 * 8) / 2));
! __x1 += __x2;
! if (__x1 < __x2)
! __x3 += ((USItype) 1 << ((4 * 8) / 2));
! (m1) = __x3 + ((USItype) (__x1) >> ((4 * 8) / 2));
! (m0) = (USItype)(q0*d0);
! }
! if (m1 > n1)
!---------------------------------------------------
#ifdef __NDS32_ISA_V3M__
!mulr64 $r4, P2L, $r6
smw.adm $r0, [$sp], $r3, 0
move P1L, P2L
move P2L, $r6
movi P1H, 0
movi P2H, 0
bal __muldi3
movd44 $r4, $r0
lmw.bim $r0, [$sp], $r3, 0
move $r8, W6H
move $r5, W6L
#else
mulr64 $r4, P2L, $r6
move $r8, W6H
move $r5, W6L
#endif
slt $ta, P1H, $r8 !$ta <- n1<m1
bnez $ta, .L46 !if yes,skip
!------------------------------------------------------
! if(m1 == n1)
!------------------------------------------------------
bne $r8, P1H, .L45 !if yes,skip
!------------------------------------------------------
! if(m0 > n0)
!------------------------------------------------------
slt $ta, P1L, $r5 !$ta <- n0<m0
beqz $ta, .L45 !if yes,skip
.L46:
!------------------------------------------------------
! {
! q0--;
! # sub_ddmmss (m1, m0, m1, m0, d1, d0);
! do
! { USItype __x;
! __x = (m0) - (d0);
! (m1) = (m1) - (d1) - (__x > (m0));
! (m0) = __x;
! }
! }
!------------------------------------------------------
sub $r4, $r5, P2L !$r4 <- m0-d0=__x
addi $r6, $r6, -1 !$r6 <- q0--=q0
sub $r8, $r8, P2H !$r8 <- m1-d1
swi $r6, [$sp+(32)] ! q0 !$r6->[$sp+(32)]
slt $ta, $r5, $r4 !$ta <- m0<__x
sub $r8, $r8, $ta !$r8 <- P1H-P1L=m1
move $r5, $r4 !$r5 <- __x=m0
.L45:
!------------------------------------------------------
! q1 = 0;
! if (rp != 0)
! {
!------------------------------------------------------
movi $r4, 0 !$r4 <- 0
swi $r4, [$sp+(36)] !0 -> [$sp+(40)]=q1
beqz $fp, .L31 !if yes,skip
!------------------------------------------------------
! # sub_ddmmss (n1, n0, n1, n0, m1, m0);
! do
! { USItype __x;
! __x = (n0) - (m0);
! (n1) = (n1) - (m1) - (__x > (n0));
! (n0) = __x;
! }
! rr.s.low = (n1 << b) | (n0 >> bm);
! rr.s.high = n1 >> bm;
! *rp = rr.ll;
!------------------------------------------------------
sub $r4, P1H, $r8 !$r4 <- n1-m1
sub $r6, P1L, $r5 !$r6 <- n0-m0=__x=n0
slt $ta, P1L, $r6 !$ta <- n0<__x
sub P1H, $r4, $ta !P1H <- $r4-$ta=n1
move P1L, $r6
lwi $r7,[$sp+(28)] ! load bm
subri $r10,$r7,32
sll $r4, P1H, $r10 !$r4 <- n1<<b
srl $r5, P1L, $r7 !$r5 <- __x>>bm
or $r6, $r5, $r4 !$r6 <- $r5|$r4=rr.s.low
srl $r8, P1H, $r7 !$r8 <- n1>>bm =rr.s.high
swi $r6, [$fp+OFFSET_L] !
swi $r8, [$fp+OFFSET_H] !
b .L31
.L42:
!------------------------------------------------------
! else
! {
! if(n1 > d1)
!------------------------------------------------------
slt $ta, P2H, P1H !$ta <- P2H<P1H
bnez $ta, .L52 !if yes,skip
!------------------------------------------------------
! if (n0 >= d0)
!------------------------------------------------------
slt $ta, P1L, P2L !$ta <- P1L<P2L
bnez $ta, .L51 !if yes,skip
!------------------------------------------------------
! q0 = 1;
! do
! { USItype __x;
! __x = (n0) - (d0);
! (n1) = (n1) - (d1) - (__x > (n0));
! (n0) = __x;
! }
!------------------------------------------------------
.L52:
sub $r4, P1H, P2H !$r4 <- P1H-P2H
sub $r6, P1L, P2L !$r6 <- no-d0=__x=n0
slt $ta, P1L, $r6 !$ta <- no<__x
sub P1H, $r4, $ta !P1H <- $r4-$ta=n1
move P1L, $r6 !n0
movi $r5, 1 !
swi $r5, [$sp+(32)] !1 -> [$sp+(32)]=q0
b .L54
.L51:
!------------------------------------------------------
! q0 = 0;
!------------------------------------------------------
movi $r5,0
swi $r5, [$sp+(32)] !$r5=0 -> [$sp+(32)]
.L54:
!------------------------------------------------------
! q1 = 0;
! if (rp != 0)
! {
!------------------------------------------------------
movi $r5, 0 !
swi $r5, [$sp+(36)] !0 -> [$sp+(36)]
beqz $fp, .L31
!------------------------------------------------------
! rr.s.low = n0;
! rr.s.high = n1;
! *rp = rr.ll;
! }
!------------------------------------------------------
swi P1L, [$fp+OFFSET_L] !remainder
swi P1H, [$fp+OFFSET_H] !
.L31:
!------------------------------------------------------
! const DWunion ww = {{.low = q0, .high = q1}};
! return ww.ll;
!}
!------------------------------------------------------
lwi P1L, [$sp+(32)] !quotient
lwi P1H, [$sp+(36)]
lmw.bim $r6, [$sp], $r10, 10
addi $sp, $sp, 12
ret
.size __udivmoddi4, .-__udivmoddi4
#endif /* L_udivmoddi4 */
#ifdef L_umodsi3
! =====================================================================
.text
.align 2
.globl __umodsi3
.type __umodsi3, @function
__umodsi3:
! ---------------------------------------------------------------------
!!res=udivmodsi4(a,b,1);
! if (den==0)
! return num;
! ---------------------------------------------------------------------
beqz $r1, .L1 ! if den==0, skip
! ---------------------------------------------------------------------
! bit=1;
! res=0;
! ---------------------------------------------------------------------
movi $r4, 1 ! $r4 <- bit=1
#ifndef __OPTIMIZE_SIZE__
.L6:
#endif
! ---------------------------------------------------------------------
! while (den<num
! ---------------------------------------------------------------------
slt $ta, $r1, $r0 ! $ta <- den<num?
beqz $ta, .L5 ! if no, skip
! ---------------------------------------------------------------------
! &&bit&&!(den&(1L<<31)))
! ---------------------------------------------------------------------
bltz $r1, .L5 ! if den<0, skip
! ---------------------------------------------------------------------
! { den<<=1;
! bit<<=1;
! }
! ---------------------------------------------------------------------
#if defined (__OPTIMIZE_SIZE__) && ! defined (__NDS32_ISA_V3M__)
clz $r3, $r1 ! $r3 <- leading zero count for den
clz $ta, $r0 ! $ta <- leading zero count for num
sub $r3, $r3, $ta ! $r3 <- number of bits to shift
sll $r1, $r1, $r3 ! $r1 <- den
sll $r4, $r4, $r3 ! $r2 <- bit
#else
slli $r1, $r1, 1 ! $r1 <- den<<=1
slli $r4, $r4, 1 ! $r4 <- bit<<=1
b .L6 ! continue loop
#endif
.L5:
! ---------------------------------------------------------------------
! while (bit)
! { if (num>=den)
! { num-=den;
! res|=bit;
! }
! bit>>=1;
! den>>=1;
! }
!!if (modwanted)
!! return num;
!!return res;
! ---------------------------------------------------------------------
sub $r2, $r0, $r1 ! $r2 <- num-den
slt $ta, $r0, $r1 ! $ta <- num<den?
srli $r4, $r4, 1 ! $r4 <- bit>>=1
cmovz $r0, $r2, $ta ! $r0 <- num=(num<den)?num:num-den
srli $r1, $r1, 1 ! $r1 <- den>>=1
bnez $r4, .L5 ! if bit!=0, continue loop
.L1:
! ---------------------------------------------------------------------
! return res;
! ---------------------------------------------------------------------
ret
.size __umodsi3, .-__umodsi3
#endif /* L_umodsi3 */
#ifdef L_umoddi3
!--------------------------------------
#ifdef __big_endian__
#define V1H $r0
#define V1L $r1
#define V2H $r2
#define V2L $r3
#else
#define V1H $r1
#define V1L $r0
#define V2H $r3
#define V2L $r2
#endif
!--------------------------------------
.text
.align 2
.globl __umoddi3
.type __umoddi3, @function
__umoddi3:
! prologue
addi $sp, $sp, -12
swi $lp, [$sp+(0)]
! end of prologue
addi $r4, $sp, 4
bal __udivmoddi4
lwi $r0, [$sp+(4)] ! __udivmoddi4 return low when LE mode or return high when BE mode
lwi $r1, [$sp+(8)] !
.L82:
! epilogue
lwi $lp, [$sp+(0)]
addi $sp, $sp, 12
ret
.size __umoddi3, .-__umoddi3
#endif /* L_umoddi3 */
#ifdef L_muldi3
#ifdef __big_endian__
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#define V2H $r4
#define V2L $r5
#else
#define P1H $r1
#define P1L $r0
#define P2H $r3
#define P2L $r2
#define V2H $r5
#define V2L $r4
#endif
! ====================================================================
.text
.align 2
.globl __muldi3
.type __muldi3, @function
__muldi3:
! parameter passing for libgcc functions normally involves 2 doubles
!---------------------------------------
#ifdef __NDS32_ISA_V3M__
! There is no mulr64 instruction in Andes ISA V3M.
! So we must provide a sequence of calculations to complete the job.
smw.adm $r6, [$sp], $r9, 0x0
zeh33 $r4, P1L
srli $r7, P1L, 16
zeh33 $r5, P2L
mul $r6, $r5, $r4
mul33 $r5, $r7
srli $r8, P2L, 16
mov55 $r9, $r5
maddr32 $r9, $r8, $r4
srli $r4, $r6, 16
add $r4, $r9, $r4
slt45 $r4, $r5
slli $r5, $r15, 16
maddr32 $r5, $r8, $r7
mul P2L, P1H, P2L
srli $r7, $r4, 16
maddr32 P2L, P2H, P1L
add333 P1H, $r5, $r7
slli $r4, $r4, 16
zeh33 $r6, $r6
add333 P1L, $r4, $r6
add333 P1H, P2L, P1H
lmw.bim $r6, [$sp], $r9, 0x0
ret
#else /* not __NDS32_ISA_V3M__ */
mul $ta, P1L, P2H
mulr64 $r4, P1L, P2L
maddr32 $ta, P1H, P2L
move P1L, V2L
add P1H, $ta, V2H
ret
#endif /* not __NDS32_ISA_V3M__ */
.size __muldi3, .-__muldi3
#endif /* L_muldi3 */
#ifdef L_addsub_df
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#define P2L $r2
#define P2H $r3
#define P3L $r4
#define P3H $r5
#define O1L $r7
#define O1H $r8
#else
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#define P3H $r4
#define P3L $r5
#define O1H $r7
#define O1L $r8
#endif
.text
.align 2
.global __subdf3
.type __subdf3, @function
__subdf3:
push $lp
pushm $r6, $r10
move $r4, #0x80000000
xor P2H, P2H, $r4
j .Lsdpadd
.global __adddf3
.type __adddf3, @function
__adddf3:
push $lp
pushm $r6, $r10
.Lsdpadd:
slli $r6, P1H, #1
srli $r6, $r6, #21
slli P3H, P1H, #11
srli $r10, P1L, #21
or P3H, P3H, $r10
slli P3L, P1L, #11
move O1L, #0x80000000
or P3H, P3H, O1L
slli $r9, P2H, #1
srli $r9, $r9, #21
slli O1H, P2H, #11
srli $r10, P2L, #21
or O1H, O1H, $r10
or O1H, O1H, O1L
slli O1L, P2L, #11
addi $r10, $r6, #-1
slti $r15, $r10, #0x7fe
beqzs8 .LEspecA
.LElab1:
addi $r10, $r9, #-1
slti $r15, $r10, #0x7fe
beqzs8 .LEspecB
.LElab2:
#NORMd($r4, P2L, P1L)
bnez P3H, .LL1
bnez P3L, .LL2
move $r6, #0
j .LL3
.LL2:
move P3H, P3L
move P3L, #0
move P2L, #32
sub $r6, $r6, P2L
.LL1:
#ifndef __big_endian__
#ifdef __NDS32_PERF_EXT__
clz $r2, $r5
#else
pushm $r0, $r1
pushm $r3, $r5
move $r0, $r5
bal __clzsi2
move $r2, $r0
popm $r3, $r5
popm $r0, $r1
#endif
#else /* __big_endian__ */
#ifdef __NDS32_PERF_EXT__
clz $r3, $r4
#else
pushm $r0, $r2
pushm $r4, $r5
move $r0, $r4
bal __clzsi2
move $r3, $r0
popm $r4, $r5
popm $r0, $r2
#endif
#endif /* __big_endian__ */
beqz P2L, .LL3
sub $r6, $r6, P2L
subri P1L, P2L, #32
srl P1L, P3L, P1L
sll P3L, P3L, P2L
sll P3H, P3H, P2L
or P3H, P3H, P1L
.LL3:
#NORMd End
#NORMd($r7, P2L, P1L)
bnez O1H, .LL4
bnez O1L, .LL5
move $r9, #0
j .LL6
.LL5:
move O1H, O1L
move O1L, #0
move P2L, #32
sub $r9, $r9, P2L
.LL4:
#ifndef __big_endian__
#ifdef __NDS32_PERF_EXT__
clz $r2, O1H
#else
pushm $r0, $r1
pushm $r3, $r5
move $r0, O1H
bal __clzsi2
move $r2, $r0
popm $r3, $r5
popm $r0, $r1
#endif
#else /* __big_endian__ */
#ifdef __NDS32_PERF_EXT__
clz $r3, O1H
#else
pushm $r0, $r2
pushm $r4, $r5
move $r0, O1H
bal __clzsi2
move $r3, $r0
popm $r4, $r5
popm $r0, $r2
#endif
#endif /* __big_endian__ */
beqz P2L, .LL6
sub $r9, $r9, P2L
subri P1L, P2L, #32
srl P1L, O1L, P1L
sll O1L, O1L, P2L
sll O1H, O1H, P2L
or O1H, O1H, P1L
.LL6:
#NORMd End
move $r10, #0x80000000
and P1H, P1H, $r10
beq $r6, $r9, .LEadd3
slts $r15, $r9, $r6
beqzs8 .Li1
sub $r9, $r6, $r9
move P2L, #0
.LL7:
move $r10, #0x20
slt $r15, $r9, $r10
bnezs8 .LL8
or P2L, P2L, O1L
move O1L, O1H
move O1H, #0
addi $r9, $r9, #0xffffffe0
bnez O1L, .LL7
.LL8:
beqz $r9, .LEadd3
move P1L, O1H
move $r10, O1L
srl O1L, O1L, $r9
srl O1H, O1H, $r9
subri $r9, $r9, #0x20
sll P1L, P1L, $r9
or O1L, O1L, P1L
sll $r10, $r10, $r9
or P2L, P2L, $r10
beqz P2L, .LEadd3
ori O1L, O1L, #1
j .LEadd3
.Li1:
move $r15, $r6
move $r6, $r9
sub $r9, $r9, $r15
move P2L, #0
.LL10:
move $r10, #0x20
slt $r15, $r9, $r10
bnezs8 .LL11
or P2L, P2L, P3L
move P3L, P3H
move P3H, #0
addi $r9, $r9, #0xffffffe0
bnez P3L, .LL10
.LL11:
beqz $r9, .LEadd3
move P1L, P3H
move $r10, P3L
srl P3L, P3L, $r9
srl P3H, P3H, $r9
subri $r9, $r9, #0x20
sll P1L, P1L, $r9
or P3L, P3L, P1L
sll $r10, $r10, $r9
or P2L, P2L, $r10
beqz P2L, .LEadd3
ori P3L, P3L, #1
.LEadd3:
xor $r10, P1H, P2H
sltsi $r15, $r10, #0
bnezs8 .LEsub1
#ADD(P3L, O1L)
add P3L, P3L, O1L
slt $r15, P3L, O1L
#ADDCC(P3H, O1H)
beqzs8 .LL13
add P3H, P3H, O1H
slt $r15, P3H, O1H
beqzs8 .LL14
addi P3H, P3H, #0x1
j .LL15
.LL14:
move $r15, #1
add P3H, P3H, $r15
slt $r15, P3H, $r15
j .LL15
.LL13:
add P3H, P3H, O1H
slt $r15, P3H, O1H
.LL15:
beqzs8 .LEres
andi $r10, P3L, #1
beqz $r10, .Li3
ori P3L, P3L, #2
.Li3:
srli P3L, P3L, #1
slli $r10, P3H, #31
or P3L, P3L, $r10
srli P3H, P3H, #1
move $r10, #0x80000000
or P3H, P3H, $r10
addi $r6, $r6, #1
subri $r15, $r6, #0x7ff
bnezs8 .LEres
move $r10, #0x7ff00000
or P1H, P1H, $r10
move P1L, #0
j .LEretA
.LEsub1:
#SUB(P3L, O1L)
move $r15, P3L
sub P3L, P3L, O1L
slt $r15, $r15, P3L
#SUBCC(P3H, O1H)
beqzs8 .LL16
move $r15, P3H
sub P3H, P3H, O1H
slt $r15, $r15, P3H
beqzs8 .LL17
subi333 P3H, P3H, #1
j .LL18
.LL17:
move $r15, P3H
subi333 P3H, P3H, #1
slt $r15, $r15, P3H
j .LL18
.LL16:
move $r15, P3H
sub P3H, P3H, O1H
slt $r15, $r15, P3H
.LL18:
beqzs8 .Li5
move $r10, #0x80000000
xor P1H, P1H, $r10
subri P3H, P3H, #0
beqz P3L, .LL19
subri P3L, P3L, #0
subi45 P3H, #1
.LL19:
.Li5:
#NORMd($r4, $r9, P1L)
bnez P3H, .LL20
bnez P3L, .LL21
move $r6, #0
j .LL22
.LL21:
move P3H, P3L
move P3L, #0
move $r9, #32
sub $r6, $r6, $r9
.LL20:
#ifdef __NDS32_PERF_EXT__
clz $r9, P3H
#else
pushm $r0, $r5
move $r0, P3H
bal __clzsi2
move $r9, $r0
popm $r0, $r5
#endif
beqz $r9, .LL22
sub $r6, $r6, $r9
subri P1L, $r9, #32
srl P1L, P3L, P1L
sll P3L, P3L, $r9
sll P3H, P3H, $r9
or P3H, P3H, P1L
.LL22:
#NORMd End
or $r10, P3H, P3L
bnez $r10, .LEres
move P1H, #0
.LEres:
blez $r6, .LEund
.LElab8:
#ADD(P3L, $0x400)
move $r15, #0x400
add P3L, P3L, $r15
slt $r15, P3L, $r15
#ADDCC(P3H, $0x0)
beqzs8 .LL25
add P3H, P3H, $r15
slt $r15, P3H, $r15
.LL25:
#ADDC($r6, $0x0)
add $r6, $r6, $r15
srli $r10, P3L, #11
andi $r10, $r10, #1
sub P3L, P3L, $r10
srli P1L, P3L, #11
slli $r10, P3H, #21
or P1L, P1L, $r10
slli $r10, P3H, #1
srli $r10, $r10, #12
or P1H, P1H, $r10
slli $r10, $r6, #20
or P1H, P1H, $r10
.LEretA:
.LE999:
popm $r6, $r10
pop $lp
ret5 $lp
.LEspecA:
#ADD(P3L, P3L)
move $r15, P3L
add P3L, P3L, P3L
slt $r15, P3L, $r15
#ADDC(P3H, P3H)
add P3H, P3H, P3H
add P3H, P3H, $r15
bnez $r6, .Li7
or $r10, P3H, P3L
beqz $r10, .Li8
j .LElab1
.Li8:
subri $r15, $r9, #0x7ff
beqzs8 .LEspecB
add P3L, P2H, P2H
or $r10, P3L, P2L
bnez $r10, .LEretB
sltsi $r15, P2H, #0
bnezs8 .LEretA
.LEretB:
move P1L, P2L
move P1H, P2H
j .LE999
.Li7:
or $r10, P3H, P3L
bnez $r10, .LEnan
subri $r15, $r9, #0x7ff
bnezs8 .LEretA
xor $r10, P1H, P2H
sltsi $r15, $r10, #0
bnezs8 .LEnan
j .LEretB
.LEspecB:
#ADD(O1L, O1L)
move $r15, O1L
add O1L, O1L, O1L
slt $r15, O1L, $r15
#ADDC(O1H, O1H)
add O1H, O1H, O1H
add O1H, O1H, $r15
bnez $r9, .Li11
or $r10, O1H, O1L
beqz $r10, .LEretA
j .LElab2
.Li11:
or $r10, O1H, O1L
beqz $r10, .LEretB
.LEnan:
move P1H, #0xfff80000
move P1L, #0
j .LEretA
.LEund:
subri $r9, $r6, #1
move P2L, #0
.LL26:
move $r10, #0x20
slt $r15, $r9, $r10
bnezs8 .LL27
or P2L, P2L, P3L
move P3L, P3H
move P3H, #0
addi $r9, $r9, #0xffffffe0
bnez P3L, .LL26
.LL27:
beqz $r9, .LL28
move P1L, P3H
move $r10, P3L
srl P3L, P3L, $r9
srl P3H, P3H, $r9
subri $r9, $r9, #0x20
sll P1L, P1L, $r9
or P3L, P3L, P1L
sll $r10, $r10, $r9
or P2L, P2L, $r10
beqz P2L, .LL28
ori P3L, P3L, #1
.LL28:
move $r6, #0
j .LElab8
.size __subdf3, .-__subdf3
.size __adddf3, .-__adddf3
#endif /* L_addsub_df */
#ifdef L_mul_sf
#if !defined (__big_endian__)
#define P1L $r0
#define P1H $r1
#define P2L $r2
#define P2H $r3
#else
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#endif
.text
.align 2
.global __mulsf3
.type __mulsf3, @function
__mulsf3:
push $lp
pushm $r6, $r10
srli $r3, $r0, #23
andi $r3, $r3, #0xff
srli $r5, $r1, #23
andi $r5, $r5, #0xff
move $r6, #0x80000000
slli $r2, $r0, #8
or $r2, $r2, $r6
slli $r4, $r1, #8
or $r4, $r4, $r6
xor $r8, $r0, $r1
and $r6, $r6, $r8
addi $r8, $r3, #-1
slti $r15, $r8, #0xfe
beqzs8 .LFspecA
.LFlab1:
addi $r8, $r5, #-1
slti $r15, $r8, #0xfe
beqzs8 .LFspecB
.LFlab2:
move $r10, $r3
/* This is a 64-bit multiple. ($r2, $r7) is (high, low). */
#ifndef __NDS32_ISA_V3M__
mulr64 $r2, $r2, $r4
#else
pushm $r0, $r1
pushm $r4, $r5
move P1L, $r2
movi P1H, #0
move P2L, $r4
movi P2H, #0
bal __muldi3
movd44 $r2, $r0
popm $r4, $r5
popm $r0, $r1
#endif
#ifndef __big_endian__
move $r7, $r2
move $r2, $r3
#else
move $r7, $r3
#endif
move $r3, $r10
beqz $r7, .Li17
ori $r2, $r2, #1
.Li17:
sltsi $r15, $r2, #0
bnezs8 .Li18
slli $r2, $r2, #1
addi $r3, $r3, #-1
.Li18:
addi $r8, $r5, #0xffffff82
add $r3, $r3, $r8
addi $r8, $r3, #-1
slti $r15, $r8, #0xfe
beqzs8 .LFoveund
.LFlab8:
#ADD($r2, $0x80)
move $r15, #0x80
add $r2, $r2, $r15
slt $r15, $r2, $r15
#ADDC($r3, $0x0)
add $r3, $r3, $r15
srli $r8, $r2, #8
andi $r8, $r8, #1
sub $r2, $r2, $r8
slli $r2, $r2, #1
srli $r2, $r2, #9
slli $r8, $r3, #23
or $r2, $r2, $r8
or $r0, $r2, $r6
.LF999:
popm $r6, $r10
pop $lp
ret5 $lp
.LFspecA:
bnez $r3, .Li19
add $r2, $r2, $r2
beqz $r2, .Li20
#ifdef __NDS32_PERF_EXT__
clz $r7, $r2
#else
pushm $r0, $r5
move $r0, $r2
bal __clzsi2
move $r7, $r0
popm $r0, $r5
#endif
sub $r3, $r3, $r7
sll $r2, $r2, $r7
j .LFlab1
.Li20:
subri $r15, $r5, #0xff
beqzs8 .LFnan
j .LFzer
.Li19:
add $r8, $r2, $r2
bnez $r8, .LFnan
bnez $r5, .Li21
add $r8, $r4, $r4
beqz $r8, .LFnan
.Li21:
subri $r15, $r5, #0xff
bnezs8 .LFinf
.LFspecB:
bnez $r5, .Li22
add $r4, $r4, $r4
beqz $r4, .LFzer
#ifdef __NDS32_PERF_EXT__
clz $r7, $r4
#else
pushm $r0, $r5
move $r0, $r4
bal __clzsi2
move $r7, $r0
popm $r0, $r5
#endif
sub $r5, $r5, $r7
sll $r4, $r4, $r7
j .LFlab2
.LFzer:
move $r0, $r6
j .LF999
.Li22:
add $r8, $r4, $r4
bnez $r8, .LFnan
.LFinf:
move $r8, #0x7f800000
or $r0, $r6, $r8
j .LF999
.LFnan:
move $r0, #0xffc00000
j .LF999
.LFoveund:
bgtz $r3, .LFinf
subri $r7, $r3, #1
slti $r15, $r7, #0x20
beqzs8 .LFzer
subri $r8, $r7, #0x20
sll $r3, $r2, $r8
srl $r2, $r2, $r7
beqz $r3, .Li25
ori $r2, $r2, #2
.Li25:
move $r3, #0
addi $r8, $r2, #0x80
sltsi $r15, $r8, #0
beqzs8 .LFlab8
move $r3, #1
j .LFlab8
.size __mulsf3, .-__mulsf3
#endif /* L_mul_sf */
#ifdef L_mul_df
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#define P2L $r2
#define P2H $r3
#define P3L $r4
#define P3H $r5
#define O1L $r7
#define O1H $r8
#else
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#define P3H $r4
#define P3L $r5
#define O1H $r7
#define O1L $r8
#endif
.text
.align 2
.global __muldf3
.type __muldf3, @function
__muldf3:
push $lp
pushm $r6, $r10
slli $r6, P1H, #1
srli $r6, $r6, #21
slli P3H, P1H, #11
srli $r10, P1L, #21
or P3H, P3H, $r10
slli P3L, P1L, #11
move O1L, #0x80000000
or P3H, P3H, O1L
slli $r9, P2H, #1
srli $r9, $r9, #21
slli O1H, P2H, #11
srli $r10, P2L, #21
or O1H, O1H, $r10
or O1H, O1H, O1L
xor P1H, P1H, P2H
and P1H, P1H, O1L
slli O1L, P2L, #11
addi $r10, $r6, #-1
slti $r15, $r10, #0x7fe
beqzs8 .LFspecA
.LFlab1:
addi $r10, $r9, #-1
slti $r15, $r10, #0x7fe
beqzs8 .LFspecB
.LFlab2:
addi $r10, $r9, #0xfffffc02
add $r6, $r6, $r10
move $r10, $r8
/* This is a 64-bit multiple. */
#ifndef __big_endian__
/* For little endian: ($r9, $r3) is (high, low). */
#ifndef __NDS32_ISA_V3M__
mulr64 $r8, $r5, $r8
#else
pushm $r0, $r5
move $r0, $r5
movi $r1, #0
move $r2, $r8
movi $r3, #0
bal __muldi3
movd44 $r8, $r0
popm $r0, $r5
#endif
move $r3, $r8
#else /* __big_endian__ */
/* For big endain: ($r9, $r2) is (high, low). */
#ifndef __NDS32_ISA_V3M__
mulr64 $r8, $r4, $r7
#else
pushm $r0, $r5
move $r1, $r4
movi $r0, #0
move $r3, $r7
movi $r2, #0
bal __muldi3
movd44 $r8, $r0
popm $r0, $r5
#endif
move $r2, $r9
move $r9, $r8
#endif /* __big_endian__ */
move $r8, $r10
move $r10, P1H
/* This is a 64-bit multiple. */
#ifndef __big_endian__
/* For little endian: ($r0, $r2) is (high, low). */
#ifndef __NDS32_ISA_V3M__
mulr64 $r0, $r4, $r8
#else
pushm $r2, $r5
move $r0, $r4
movi $r1, #0
move $r2, $r8
movi $r3, #0
bal __muldi3
popm $r2, $r5
#endif
move $r2, $r0
move $r0, $r1
#else /* __big_endian__ */
/* For big endain: ($r1, $r3) is (high, low). */
#ifndef __NDS32_ISA_V3M__
mulr64 $r0, $r5, $r7
#else
pushm $r2, $r5
move $r1, $r5
movi $r0, #0
move $r3, $r7
movi $r2, #0
bal __muldi3
popm $r2, $r5
#endif
move $r3, $r1
move $r1, $r0
#endif /* __big_endian__ */
move P1H, $r10
#ADD(P2H, P1L)
add P2H, P2H, P1L
slt $r15, P2H, P1L
#ADDC($r9, $0x0)
add $r9, $r9, $r15
move $r10, P1H
/* This is a 64-bit multiple. */
#ifndef __big_endian__
/* For little endian: ($r0, $r8) is (high, low). */
#ifndef __NDS32_ISA_V3M__
mulr64 $r0, $r5, $r7
#else
pushm $r2, $r5
move $r0, $r5
movi $r1, #0
move $r2, $r7
movi $r3, #0
bal __muldi3
popm $r2, $r5
#endif
move $r8, $r0
move $r0, $r1
#else /* __big_endian__ */
/* For big endian: ($r1, $r7) is (high, low). */
#ifndef __NDS32_ISA_V3M__
mulr64 $r0, $r4, $r8
#else
pushm $r2, $r5
move $r1, $r4
movi $r0, #0
move $r3, $r8
movi $r2, #0
bal __muldi3
popm $r2, $r5
#endif
move $r7, $r1
move $r1, $r0
#endif /* __big_endian__ */
move P1H, $r10
#ADD(P2L, O1H)
add P2L, P2L, O1H
slt $r15, P2L, O1H
#ADDCC(P2H, P1L)
beqzs8 .LL29
add P2H, P2H, P1L
slt $r15, P2H, P1L
beqzs8 .LL30
addi P2H, P2H, #0x1
j .LL31
.LL30:
move $r15, #1
add P2H, P2H, $r15
slt $r15, P2H, $r15
j .LL31
.LL29:
add P2H, P2H, P1L
slt $r15, P2H, P1L
.LL31:
#ADDC($r9, $0x0)
add $r9, $r9, $r15
/* This is a 64-bit multiple. */
#ifndef __big_endian__
/* For little endian: ($r8, $r0) is (high, low). */
move $r10, $r9
#ifndef __NDS32_ISA_V3M__
mulr64 $r8, $r4, $r7
#else
pushm $r0, $r5
move $r0, $r4
movi $r1, #0
move $r2, $r7
movi $r3, #0
bal __muldi3
movd44 $r8, $r0
popm $r0, $r5
#endif
move $r0, $r8
move $r8, $r9
move $r9, $r10
#else /* __big_endian__ */
/* For big endian: ($r7, $r1) is (high, low). */
move $r10, $r6
#ifndef __NDS32_ISA_V3M__
mulr64 $r6, $r5, $r8
#else
pushm $r0, $r5
move $r1, $r5
movi $r0, #0
move $r3, $r8
movi $r2, #0
bal __muldi3
movd44 $r6, $r0
popm $r0, $r5
#endif
move $r1, $r7
move $r7, $r6
move $r6, $r10
#endif /* __big_endian__ */
#ADD(P2L, O1H)
add P2L, P2L, O1H
slt $r15, P2L, O1H
#ADDCC(P2H, $0x0)
beqzs8 .LL34
add P2H, P2H, $r15
slt $r15, P2H, $r15
.LL34:
#ADDC($r9, $0x0)
add $r9, $r9, $r15
or $r10, P1L, P2L
beqz $r10, .Li13
ori P2H, P2H, #1
.Li13:
move P3H, $r9
move P3L, P2H
sltsi $r15, P3H, #0
bnezs8 .Li14
move $r15, P3L
add P3L, P3L, P3L
slt $r15, P3L, $r15
add P3H, P3H, P3H
add P3H, P3H, $r15
addi $r6, $r6, #-1
.Li14:
addi $r10, $r6, #-1
slti $r15, $r10, #0x7fe
beqzs8 .LFoveund
#ADD(P3L, $0x400)
move $r15, #0x400
add P3L, P3L, $r15
slt $r15, P3L, $r15
#ADDCC(P3H, $0x0)
beqzs8 .LL37
add P3H, P3H, $r15
slt $r15, P3H, $r15
.LL37:
#ADDC($r6, $0x0)
add $r6, $r6, $r15
.LFlab8:
srli $r10, P3L, #11
andi $r10, $r10, #1
sub P3L, P3L, $r10
srli P1L, P3L, #11
slli $r10, P3H, #21
or P1L, P1L, $r10
slli $r10, P3H, #1
srli $r10, $r10, #12
or P1H, P1H, $r10
slli $r10, $r6, #20
or P1H, P1H, $r10
.LFret:
.LF999:
popm $r6, $r10
pop $lp
ret5 $lp
.LFspecA:
#ADD(P3L, P3L)
move $r15, P3L
add P3L, P3L, P3L
slt $r15, P3L, $r15
#ADDC(P3H, P3H)
add P3H, P3H, P3H
add P3H, P3H, $r15
bnez $r6, .Li15
or $r10, P3H, P3L
beqz $r10, .Li16
#NORMd($r4, P1L, P2H)
bnez P3H, .LL38
bnez P3L, .LL39
move $r6, #0
j .LL40
.LL39:
move P3H, P3L
move P3L, #0
move P1L, #32
sub $r6, $r6, P1L
.LL38:
#ifndef __big_endian__
#ifdef __NDS32_PERF_EXT__
clz $r0, P3H
#else
pushm $r1, P3H
move $r0, P3H
bal __clzsi2
popm $r1, $r5
#endif
#else /* __big_endian__ */
#ifdef __NDS32_PERF_EXT__
clz $r1, $r4
#else
push $r0
pushm $r2, $r5
move $r0, $r4
bal __clzsi2
move $r1, $r0
popm $r2, $r5
pop $r0
#endif
#endif /* __big_endian__ */
beqz P1L, .LL40
sub $r6, $r6, P1L
subri P2H, P1L, #32
srl P2H, P3L, P2H
sll P3L, P3L, P1L
sll P3H, P3H, P1L
or P3H, P3H, P2H
.LL40:
#NORMd End
j .LFlab1
.Li16:
subri $r15, $r9, #0x7ff
beqzs8 .LFnan
j .LFret
.Li15:
or $r10, P3H, P3L
bnez $r10, .LFnan
bnez $r9, .Li17
slli $r10, O1H, #1
or $r10, $r10, O1L
beqz $r10, .LFnan
.Li17:
subri $r15, $r9, #0x7ff
bnezs8 .LFinf
.LFspecB:
#ADD(O1L, O1L)
move $r15, O1L
add O1L, O1L, O1L
slt $r15, O1L, $r15
#ADDC(O1H, O1H)
add O1H, O1H, O1H
add O1H, O1H, $r15
bnez $r9, .Li18
or $r10, O1H, O1L
beqz $r10, .Li19
#NORMd($r7, P2L, P1L)
bnez O1H, .LL41
bnez O1L, .LL42
move $r9, #0
j .LL43
.LL42:
move O1H, O1L
move O1L, #0
move P2L, #32
sub $r9, $r9, P2L
.LL41:
#ifndef __big_endian__
#ifdef __NDS32_PERF_EXT__
clz $r2, $r8
#else
pushm $r0, $r1
pushm $r3, $r5
move $r0, $r8
bal __clzsi2
move $r2, $r0
popm $r3, $r5
popm $r0, $r1
#endif
#else /* __big_endian__ */
#ifdef __NDS32_PERF_EXT__
clz $r3, $r7
#else
pushm $r0, $r2
pushm $r4, $r5
move $r0, $r7
bal __clzsi2
move $r3, $r0
popm $r4, $r5
popm $r0, $r2
#endif
#endif /* __big_endian__ */
beqz P2L, .LL43
sub $r9, $r9, P2L
subri P1L, P2L, #32
srl P1L, O1L, P1L
sll O1L, O1L, P2L
sll O1H, O1H, P2L
or O1H, O1H, P1L
.LL43:
#NORMd End
j .LFlab2
.Li19:
move P1L, #0
j .LFret
.Li18:
or $r10, O1H, O1L
bnez $r10, .LFnan
.LFinf:
move $r10, #0x7ff00000
or P1H, P1H, $r10
move P1L, #0
j .LFret
.LFnan:
move P1H, #0xfff80000
move P1L, #0
j .LFret
.LFoveund:
bgtz $r6, .LFinf
subri P1L, $r6, #1
move P2L, #0
.LL44:
move $r10, #0x20
slt $r15, P1L, $r10
bnezs8 .LL45
or P2L, P2L, P3L
move P3L, P3H
move P3H, #0
addi P1L, P1L, #0xffffffe0
bnez P3L, .LL44
.LL45:
beqz P1L, .LL46
move P2H, P3H
move $r10, P3L
srl P3L, P3L, P1L
srl P3H, P3H, P1L
subri P1L, P1L, #0x20
sll P2H, P2H, P1L
or P3L, P3L, P2H
sll $r10, $r10, P1L
or P2L, P2L, $r10
beqz P2L, .LL46
ori P3L, P3L, #1
.LL46:
#ADD(P3L, $0x400)
move $r15, #0x400
add P3L, P3L, $r15
slt $r15, P3L, $r15
#ADDC(P3H, $0x0)
add P3H, P3H, $r15
srli $r6, P3H, #31
j .LFlab8
.size __muldf3, .-__muldf3
#endif /* L_mul_df */
#ifdef L_div_sf
.text
.align 2
.global __divsf3
.type __divsf3, @function
__divsf3:
push $lp
pushm $r6, $r10
move $r7, #0x80000000
srli $r4, $r0, #23
andi $r4, $r4, #0xff
srli $r6, $r1, #23
andi $r6, $r6, #0xff
slli $r3, $r0, #8
or $r3, $r3, $r7
slli $r5, $r1, #8
or $r5, $r5, $r7
xor $r10, $r0, $r1
and $r7, $r7, $r10
addi $r10, $r4, #-1
slti $r15, $r10, #0xfe
beqzs8 .LGspecA
.LGlab1:
addi $r10, $r6, #-1
slti $r15, $r10, #0xfe
beqzs8 .LGspecB
.LGlab2:
slt $r15, $r3, $r5
bnezs8 .Li27
srli $r3, $r3, #1
addi $r4, $r4, #1
.Li27:
srli $r8, $r5, #14
divr $r0, $r2, $r3, $r8
andi $r9, $r5, #0x3fff
mul $r1, $r9, $r0
slli $r2, $r2, #14
#SUB($r2, $r1)
move $r15, $r2
sub $r2, $r2, $r1
slt $r15, $r15, $r2
beqzs8 .Li28
addi $r0, $r0, #-1
#ADD($r2, $r5)
add $r2, $r2, $r5
slt $r15, $r2, $r5
.Li28:
divr $r3, $r2, $r2, $r8
mul $r1, $r9, $r3
slli $r2, $r2, #14
#SUB($r2, $r1)
move $r15, $r2
sub $r2, $r2, $r1
slt $r15, $r15, $r2
beqzs8 .Li29
addi $r3, $r3, #-1
#ADD($r2, $r5)
add $r2, $r2, $r5
slt $r15, $r2, $r5
.Li29:
slli $r10, $r0, #14
add $r3, $r3, $r10
slli $r3, $r3, #4
beqz $r2, .Li30
ori $r3, $r3, #1
.Li30:
subri $r10, $r6, #0x7e
add $r4, $r4, $r10
addi $r10, $r4, #-1
slti $r15, $r10, #0xfe
beqzs8 .LGoveund
.LGlab8:
#ADD($r3, $0x80)
move $r15, #0x80
add $r3, $r3, $r15
slt $r15, $r3, $r15
#ADDC($r4, $0x0)
add $r4, $r4, $r15
srli $r10, $r3, #8
andi $r10, $r10, #1
sub $r3, $r3, $r10
slli $r3, $r3, #1
srli $r3, $r3, #9
slli $r10, $r4, #23
or $r3, $r3, $r10
or $r0, $r3, $r7
.LG999:
popm $r6, $r10
pop $lp
ret5 $lp
.LGspecA:
bnez $r4, .Li31
add $r3, $r3, $r3
beqz $r3, .Li31
#ifdef __NDS32_PERF_EXT__
clz $r8, $r3
#else
pushm $r0, $r5
move $r0, $r3
bal __clzsi2
move $r8, $r0
popm $r0, $r5
#endif
sub $r4, $r4, $r8
sll $r3, $r3, $r8
j .LGlab1
.Li31:
bne $r6, $r4, .Li33
add $r10, $r5, $r5
beqz $r10, .LGnan
.Li33:
subri $r15, $r6, #0xff
beqzs8 .LGspecB
beqz $r4, .LGzer
add $r10, $r3, $r3
bnez $r10, .LGnan
j .LGinf
.LGspecB:
bnez $r6, .Li34
add $r5, $r5, $r5
beqz $r5, .LGinf
#ifdef __NDS32_PERF_EXT__
clz $r8, $r5
#else
pushm $r0, $r5
move $r0, $r5
bal __clzsi2
move $r8, $r0
popm $r0, $r5
#endif
sub $r6, $r6, $r8
sll $r5, $r5, $r8
j .LGlab2
.Li34:
add $r10, $r5, $r5
bnez $r10, .LGnan
.LGzer:
move $r0, $r7
j .LG999
.LGoveund:
bgtz $r4, .LGinf
subri $r8, $r4, #1
slti $r15, $r8, #0x20
beqzs8 .LGzer
subri $r10, $r8, #0x20
sll $r4, $r3, $r10
srl $r3, $r3, $r8
beqz $r4, .Li37
ori $r3, $r3, #2
.Li37:
move $r4, #0
addi $r10, $r3, #0x80
sltsi $r15, $r10, #0
beqzs8 .LGlab8
move $r4, #1
j .LGlab8
.LGinf:
move $r10, #0x7f800000
or $r0, $r7, $r10
j .LG999
.LGnan:
move $r0, #0xffc00000
j .LG999
.size __divsf3, .-__divsf3
#endif /* L_div_sf */
#ifdef L_div_df
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#define P2L $r2
#define P2H $r3
#define P3L $r4
#define P3H $r5
#define O1L $r7
#define O1H $r8
#else
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#define P3H $r4
#define P3L $r5
#define O1H $r7
#define O1L $r8
#endif
.text
.align 2
.global __divdf3
.type __divdf3, @function
__divdf3:
push $lp
pushm $r6, $r10
slli $r6, P1H, #1
srli $r6, $r6, #21
slli P3H, P1H, #11
srli $r10, P1L, #21
or P3H, P3H, $r10
slli P3L, P1L, #11
move O1L, #0x80000000
or P3H, P3H, O1L
slli $r9, P2H, #1
srli $r9, $r9, #21
slli O1H, P2H, #11
srli $r10, P2L, #21
or O1H, O1H, $r10
or O1H, O1H, O1L
xor P1H, P1H, P2H
and P1H, P1H, O1L
slli O1L, P2L, #11
addi $r10, $r6, #-1
slti $r15, $r10, #0x7fe
beqzs8 .LGspecA
.LGlab1:
addi $r10, $r9, #-1
slti $r15, $r10, #0x7fe
beqzs8 .LGspecB
.LGlab2:
sub $r6, $r6, $r9
addi $r6, $r6, #0x3ff
srli P3L, P3L, #1
slli $r10, P3H, #31
or P3L, P3L, $r10
srli P3H, P3H, #1
srli $r9, O1H, #16
divr P2H, P3H, P3H, $r9
move $r10, #0xffff
and P2L, O1H, $r10
mul P1L, P2L, P2H
slli P3H, P3H, #16
srli $r10, P3L, #16
or P3H, P3H, $r10
#SUB(P3H, P1L)
move $r15, P3H
sub P3H, P3H, P1L
slt $r15, $r15, P3H
beqzs8 .Li20
.Lb21:
addi P2H, P2H, #-1
add P3H, P3H, O1H
slt $r15, P3H, O1H
beqzs8 .Lb21
.Li20:
divr $r9, P3H, P3H, $r9
mul P1L, P2L, $r9
slli P3H, P3H, #16
move $r15, #0xffff
and $r10, P3L, $r15
or P3H, P3H, $r10
#SUB(P3H, P1L)
move $r15, P3H
sub P3H, P3H, P1L
slt $r15, $r15, P3H
beqzs8 .Li22
.Lb23:
addi $r9, $r9, #-1
add P3H, P3H, O1H
slt $r15, P3H, O1H
beqzs8 .Lb23
.Li22:
slli P2H, P2H, #16
add P2H, P2H, $r9
/* This is a 64-bit multiple. */
#ifndef __big_endian__
/* For little endian: ($r0, $r9) is (high, low). */
move $r10, $r1
#ifndef __NDS32_ISA_V3M__
mulr64 $r0, $r3, $r7
#else
pushm $r2, $r5
move $r0, $r3
movi $r1, #0
move $r2, $r7
movi $r3, #0
bal __muldi3
popm $r2, $r5
#endif
move $r9, $r0
move $r0, $r1
move $r1, $r10
#else /* __big_endian__ */
/* For big endian: ($r1, $r9) is (high, low). */
move $r10, $r0
#ifndef __NDS32_ISA_V3M__
mulr64 $r0, $r2, $r8
#else
pushm $r2, $r5
move $r1, $r2
movi $r0, #0
move $r3, $r8
movi $r2, #0
bal __muldi3
popm $r2, $r5
#endif
move $r9, $r1
move $r1, $r0
move $r0, $r10
#endif /* __big_endian__ */
move P3L, #0
#SUB(P3L, $r9)
move $r15, P3L
sub P3L, P3L, $r9
slt $r15, $r15, P3L
#SUBCC(P3H, P1L)
beqzs8 .LL47
move $r15, P3H
sub P3H, P3H, P1L
slt $r15, $r15, P3H
beqzs8 .LL48
subi333 P3H, P3H, #1
j .LL49
.LL48:
move $r15, P3H
subi333 P3H, P3H, #1
slt $r15, $r15, P3H
j .LL49
.LL47:
move $r15, P3H
sub P3H, P3H, P1L
slt $r15, $r15, P3H
.LL49:
beqzs8 .Li24
.LGlab3:
addi P2H, P2H, #-1
#ADD(P3L, O1L)
add P3L, P3L, O1L
slt $r15, P3L, O1L
#ADDCC(P3H, O1H)
beqzs8 .LL50
add P3H, P3H, O1H
slt $r15, P3H, O1H
beqzs8 .LL51
addi P3H, P3H, #0x1
j .LL52
.LL51:
move $r15, #1
add P3H, P3H, $r15
slt $r15, P3H, $r15
j .LL52
.LL50:
add P3H, P3H, O1H
slt $r15, P3H, O1H
.LL52:
beqzs8 .LGlab3
.Li24:
bne P3H, O1H, .Li25
move P1L, O1L
move P3H, P3L
move $r9, #0
move P2L, $r9
j .Le25
.Li25:
srli P2L, O1H, #16
divr $r9, P3H, P3H, P2L
move $r10, #0xffff
and $r10, O1H, $r10
mul P1L, $r10, $r9
slli P3H, P3H, #16
srli $r15, P3L, #16
or P3H, P3H, $r15
#SUB(P3H, P1L)
move $r15, P3H
sub P3H, P3H, P1L
slt $r15, $r15, P3H
beqzs8 .Li26
.Lb27:
addi $r9, $r9, #-1
add P3H, P3H, O1H
slt $r15, P3H, O1H
beqzs8 .Lb27
.Li26:
divr P2L, P3H, P3H, P2L
mul P1L, $r10, P2L
slli P3H, P3H, #16
move $r10, #0xffff
and $r10, P3L, $r10
or P3H, P3H, $r10
#SUB(P3H, P1L)
move $r15, P3H
sub P3H, P3H, P1L
slt $r15, $r15, P3H
beqzs8 .Li28
.Lb29:
addi P2L, P2L, #-1
add P3H, P3H, O1H
slt $r15, P3H, O1H
beqzs8 .Lb29
.Li28:
slli $r9, $r9, #16
add $r9, $r9, P2L
/* This is a 64-bit multiple. */
#ifndef __big_endian__
/* For little endian: ($r0, $r2) is (high, low). */
move $r10, $r1
#ifndef __NDS32_ISA_V3M__
mulr64 $r0, $r9, $r7
#else
pushm $r2, $r5
move $r0, $r9
movi $r1, #0
move $r2, $r7
movi $r3, #0
bal __muldi3
popm $r2, $r5
#endif
move $r2, $r0
move $r0, $r1
move $r1, $r10
#else /* __big_endian__ */
/* For big endian: ($r1, $r3) is (high, low). */
move $r10, $r0
#ifndef __NDS32_ISA_V3M__
mulr64 $r0, $r9, $r8
#else
pushm $r2, $r5
move $r0, $r9
movi $r1, #0
move $r2, $r7
movi $r3, #0
bal __muldi3
popm $r2, $r5
#endif
move $r3, $r1
move $r1, $r0
move $r0, $r10
#endif /* __big_endian__ */
.Le25:
move P3L, #0
#SUB(P3L, P2L)
move $r15, P3L
sub P3L, P3L, P2L
slt $r15, $r15, P3L
#SUBCC(P3H, P1L)
beqzs8 .LL53
move $r15, P3H
sub P3H, P3H, P1L
slt $r15, $r15, P3H
beqzs8 .LL54
subi333 P3H, P3H, #1
j .LL55
.LL54:
move $r15, P3H
subi333 P3H, P3H, #1
slt $r15, $r15, P3H
j .LL55
.LL53:
move $r15, P3H
sub P3H, P3H, P1L
slt $r15, $r15, P3H
.LL55:
beqzs8 .Li30
.LGlab4:
addi $r9, $r9, #-1
#ADD(P3L, O1L)
add P3L, P3L, O1L
slt $r15, P3L, O1L
#ADDCC(P3H, O1H)
beqzs8 .LL56
add P3H, P3H, O1H
slt $r15, P3H, O1H
beqzs8 .LL57
addi P3H, P3H, #0x1
j .LL58
.LL57:
move $r15, #1
add P3H, P3H, $r15
slt $r15, P3H, $r15
j .LL58
.LL56:
add P3H, P3H, O1H
slt $r15, P3H, O1H
.LL58:
beqzs8 .LGlab4
.Li30:
sltsi $r15, P2H, #0
bnezs8 .Li31
#ADD($r9, $r9)
move $r15, $r9
add $r9, $r9, $r9
slt $r15, $r9, $r15
#ADDC(P2H, P2H)
add P2H, P2H, P2H
add P2H, P2H, $r15
addi $r6, $r6, #-1
.Li31:
or $r10, P3H, P3L
beqz $r10, .Li32
ori $r9, $r9, #1
.Li32:
move P3H, P2H
move P3L, $r9
addi $r10, $r6, #-1
slti $r15, $r10, #0x7fe
beqzs8 .LGoveund
#ADD(P3L, $0x400)
move $r15, #0x400
add P3L, P3L, $r15
slt $r15, P3L, $r15
#ADDCC(P3H, $0x0)
beqzs8 .LL61
add P3H, P3H, $r15
slt $r15, P3H, $r15
.LL61:
#ADDC($r6, $0x0)
add $r6, $r6, $r15
.LGlab8:
srli $r10, P3L, #11
andi $r10, $r10, #1
sub P3L, P3L, $r10
srli P1L, P3L, #11
slli $r10, P3H, #21
or P1L, P1L, $r10
slli $r10, P3H, #1
srli $r10, $r10, #12
or P1H, P1H, $r10
slli $r10, $r6, #20
or P1H, P1H, $r10
.LGret:
.LG999:
popm $r6, $r10
pop $lp
ret5 $lp
.LGoveund:
bgtz $r6, .LGinf
subri P2H, $r6, #1
move P1L, #0
.LL62:
move $r10, #0x20
slt $r15, P2H, $r10
bnezs8 .LL63
or P1L, P1L, P3L
move P3L, P3H
move P3H, #0
addi P2H, P2H, #0xffffffe0
bnez P3L, .LL62
.LL63:
beqz P2H, .LL64
move P2L, P3H
move $r10, P3L
srl P3L, P3L, P2H
srl P3H, P3H, P2H
subri P2H, P2H, #0x20
sll P2L, P2L, P2H
or P3L, P3L, P2L
sll $r10, $r10, P2H
or P1L, P1L, $r10
beqz P1L, .LL64
ori P3L, P3L, #1
.LL64:
#ADD(P3L, $0x400)
move $r15, #0x400
add P3L, P3L, $r15
slt $r15, P3L, $r15
#ADDC(P3H, $0x0)
add P3H, P3H, $r15
srli $r6, P3H, #31
j .LGlab8
.LGspecA:
#ADD(P3L, P3L)
move $r15, P3L
add P3L, P3L, P3L
slt $r15, P3L, $r15
#ADDC(P3H, P3H)
add P3H, P3H, P3H
add P3H, P3H, $r15
bnez $r6, .Li33
or $r10, P3H, P3L
beqz $r10, .Li33
#NORMd($r4, P2H, P2L)
bnez P3H, .LL65
bnez P3L, .LL66
move $r6, #0
j .LL67
.LL66:
move P3H, P3L
move P3L, #0
move P2H, #32
sub $r6, $r6, P2H
.LL65:
#ifndef __big_endian__
#ifdef __NDS32_PERF_EXT__
clz $r3, $r5
#else
pushm $r0, $r2
pushm $r4, $r5
move $r0, $r5
bal __clzsi2
move $r3, $r0
popm $r4, $r5
popm $r0, $r2
#endif
#else /* __big_endian__ */
#ifdef __NDS32_PERF_EXT__
clz $r2, $r4
#else
pushm $r0, $r1
pushm $r3, $r5
move $r0, $r4
bal __clzsi2
move $r2, $r0
popm $r3, $r5
popm $r0, $r1
#endif
#endif /* __big_endian_ */
beqz P2H, .LL67
sub $r6, $r6, P2H
subri P2L, P2H, #32
srl P2L, P3L, P2L
sll P3L, P3L, P2H
sll P3H, P3H, P2H
or P3H, P3H, P2L
.LL67:
#NORMd End
j .LGlab1
.Li33:
bne $r6, $r9, .Li35
slli $r10, O1H, #1
or $r10, $r10, O1L
beqz $r10, .LGnan
.Li35:
subri $r15, $r9, #0x7ff
beqzs8 .LGspecB
beqz $r6, .LGret
or $r10, P3H, P3L
bnez $r10, .LGnan
.LGinf:
move $r10, #0x7ff00000
or P1H, P1H, $r10
move P1L, #0
j .LGret
.LGspecB:
#ADD(O1L, O1L)
move $r15, O1L
add O1L, O1L, O1L
slt $r15, O1L, $r15
#ADDC(O1H, O1H)
add O1H, O1H, O1H
add O1H, O1H, $r15
bnez $r9, .Li36
or $r10, O1H, O1L
beqz $r10, .LGinf
#NORMd($r7, P2H, P2L)
bnez O1H, .LL68
bnez O1L, .LL69
move $r9, #0
j .LL70
.LL69:
move O1H, O1L
move O1L, #0
move P2H, #32
sub $r9, $r9, P2H
.LL68:
#ifndef __big_endian__
#ifdef __NDS32_PERF_EXT__
clz $r3, $r8
#else
pushm $r0, $r2
pushm $r4, $r5
move $r0, $r8
bal __clzsi2
move $r3, $r0
popm $r4, $r5
popm $r0, $r2
#endif
#else /* __big_endian__ */
#ifdef __NDS32_PERF_EXT__
clz $r2, $r7
#else
pushm $r0, $r1
pushm $r3, $r5
move $r0, $r7
bal __clzsi2
move $r2, $r0
popm $r3, $r5
popm $r0, $r1
#endif
#endif /* __big_endian__ */
beqz P2H, .LL70
sub $r9, $r9, P2H
subri P2L, P2H, #32
srl P2L, O1L, P2L
sll O1L, O1L, P2H
sll O1H, O1H, P2H
or O1H, O1H, P2L
.LL70:
#NORMd End
j .LGlab2
.Li36:
or $r10, O1H, O1L
beqz $r10, .Li38
.LGnan:
move P1H, #0xfff80000
.Li38:
move P1L, #0
j .LGret
.size __divdf3, .-__divdf3
#endif /* L_div_df */
#ifdef L_negate_sf
.text
.align 2
.global __negsf2
.type __negsf2, @function
__negsf2:
push $lp
move $r1, #0x80000000
xor $r0, $r0, $r1
.LN999:
pop $lp
ret5 $lp
.size __negsf2, .-__negsf2
#endif /* L_negate_sf */
#ifdef L_negate_df
#ifndef __big_endian__
#define P1H $r1
#else
#define P1H $r0
#endif
.text
.align 2
.global __negdf2
.type __negdf2, @function
__negdf2:
push $lp
move $r2, #0x80000000
xor P1H, P1H, $r2
.LP999:
pop $lp
ret5 $lp
.size __negdf2, .-__negdf2
#endif /* L_negate_df */
#ifdef L_sf_to_df
#ifndef __big_endian__
#define O1L $r1
#define O1H $r2
#else
#define O1H $r1
#define O1L $r2
#endif
.text
.align 2
.global __extendsfdf2
.type __extendsfdf2, @function
__extendsfdf2:
push $lp
srli $r3, $r0, #23
andi $r3, $r3, #0xff
move $r5, #0x80000000
and O1H, $r0, $r5
addi $r5, $r3, #-1
slti $r15, $r5, #0xfe
beqzs8 .LJspec
.LJlab1:
addi $r3, $r3, #0x380
slli $r5, $r0, #9
srli $r5, $r5, #12
or O1H, O1H, $r5
slli O1L, $r0, #29
.LJret:
slli $r5, $r3, #20
or O1H, O1H, $r5
move $r0, $r1
move $r1, $r2
.LJ999:
pop $lp
ret5 $lp
.LJspec:
move O1L, #0
add $r0, $r0, $r0
beqz $r0, .LJret
bnez $r3, .Li42
.Lb43:
addi $r3, $r3, #-1
add $r0, $r0, $r0
move $r5, #0x800000
slt $r15, $r0, $r5
bnezs8 .Lb43
j .LJlab1
.Li42:
move $r3, #0x7ff
move $r5, #0xff000000
slt $r15, $r5, $r0
beqzs8 .LJret
move O1H, #0xfff80000
j .LJret
.size __extendsfdf2, .-__extendsfdf2
#endif /* L_sf_to_df */
#ifdef L_df_to_sf
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#define P2L $r2
#define P2H $r3
#else
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#endif
.text
.align 2
.global __truncdfsf2
.type __truncdfsf2, @function
__truncdfsf2:
push $lp
pushm $r6, $r8
slli P2H, P1H, #11
srli $r7, P1L, #21
or P2H, P2H, $r7
slli P2L, P1L, #11
move $r7, #0x80000000
or P2H, P2H, $r7
and $r5, P1H, $r7
slli $r4, P1H, #1
srli $r4, $r4, #21
addi $r4, $r4, #0xfffffc80
addi $r7, $r4, #-1
slti $r15, $r7, #0xfe
beqzs8 .LKspec
.LKlab1:
beqz P2L, .Li45
ori P2H, P2H, #1
.Li45:
#ADD(P2H, $0x80)
move $r15, #0x80
add P2H, P2H, $r15
slt $r15, P2H, $r15
#ADDC($r4, $0x0)
add $r4, $r4, $r15
srli $r7, P2H, #8
andi $r7, $r7, #1
sub P2H, P2H, $r7
slli P2H, P2H, #1
srli P2H, P2H, #9
slli $r7, $r4, #23
or P2H, P2H, $r7
or $r0, P2H, $r5
.LK999:
popm $r6, $r8
pop $lp
ret5 $lp
.LKspec:
subri $r15, $r4, #0x47f
bnezs8 .Li46
slli $r7, P2H, #1
or $r7, $r7, P2L
beqz $r7, .Li46
move $r0, #0xffc00000
j .LK999
.Li46:
sltsi $r15, $r4, #0xff
bnezs8 .Li48
move $r7, #0x7f800000
or $r0, $r5, $r7
j .LK999
.Li48:
subri $r6, $r4, #1
move $r7, #0x20
slt $r15, $r6, $r7
bnezs8 .Li49
move $r0, $r5
j .LK999
.Li49:
subri $r8, $r6, #0x20
sll $r7, P2H, $r8
or P2L, P2L, $r7
srl P2H, P2H, $r6
move $r4, #0
move $r7, #0x80000000
or P2H, P2H, $r7
j .LKlab1
.size __truncdfsf2, .-__truncdfsf2
#endif /* L_df_to_sf */
#ifdef L_df_to_si
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#else
#define P1H $r0
#define P1L $r1
#endif
.global __fixdfsi
.type __fixdfsi, @function
__fixdfsi:
push $lp
pushm $r6, $r6
slli $r3, P1H, #11
srli $r6, P1L, #21
or $r3, $r3, $r6
move $r6, #0x80000000
or $r3, $r3, $r6
slli $r6, P1H, #1
srli $r6, $r6, #21
subri $r2, $r6, #0x41e
blez $r2, .LLnaninf
move $r6, #0x20
slt $r15, $r2, $r6
bnezs8 .LL72
move $r3, #0
.LL72:
srl $r3, $r3, $r2
sltsi $r15, P1H, #0
beqzs8 .Li50
subri $r3, $r3, #0
.Li50:
move $r0, $r3
.LL999:
popm $r6, $r6
pop $lp
ret5 $lp
.LLnaninf:
beqz P1L, .Li51
ori P1H, P1H, #1
.Li51:
move $r6, #0x7ff00000
slt $r15, $r6, P1H
beqzs8 .Li52
move $r0, #0x80000000
j .LL999
.Li52:
move $r0, #0x7fffffff
j .LL999
.size __fixdfsi, .-__fixdfsi
#endif /* L_df_to_si */
#ifdef L_fixsfdi
#ifndef __big_endian__
#define O1L $r1
#define O1H $r2
#else
#define O1H $r1
#define O1L $r2
#endif
.text
.align 2
.global __fixsfdi
.type __fixsfdi, @function
__fixsfdi:
push $lp
srli $r3, $r0, #23
andi $r3, $r3, #0xff
slli O1H, $r0, #8
move $r5, #0x80000000
or O1H, O1H, $r5
move O1L, #0
sltsi $r15, $r3, #0xbe
beqzs8 .LCinfnan
subri $r3, $r3, #0xbe
.LL8:
move $r5, #0x20
slt $r15, $r3, $r5
bnezs8 .LL9
move O1L, O1H
move O1H, #0
addi $r3, $r3, #0xffffffe0
bnez O1L, .LL8
.LL9:
beqz $r3, .LL10
move $r4, O1H
srl O1L, O1L, $r3
srl O1H, O1H, $r3
subri $r3, $r3, #0x20
sll $r4, $r4, $r3
or O1L, O1L, $r4
.LL10:
sltsi $r15, $r0, #0
beqzs8 .LCret
subri O1H, O1H, #0
beqz O1L, .LL11
subri O1L, O1L, #0
subi45 O1H, #1
.LL11:
.LCret:
move $r0, $r1
move $r1, $r2
.LC999:
pop $lp
ret5 $lp
.LCinfnan:
sltsi $r15, $r0, #0
bnezs8 .LCret3
subri $r15, $r3, #0xff
bnezs8 .Li7
slli $r5, O1H, #1
beqz $r5, .Li7
.LCret3:
move O1H, #0x80000000
j .LCret
.Li7:
move O1H, #0x7fffffff
move O1L, #-1
j .LCret
.size __fixsfdi, .-__fixsfdi
#endif /* L_fixsfdi */
#ifdef L_fixdfdi
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#define O1L $r3
#define O1H $r4
#else
#define P1H $r0
#define P1L $r1
#define O1H $r3
#define O1L $r4
#endif
.text
.align 2
.global __fixdfdi
.type __fixdfdi, @function
__fixdfdi:
push $lp
pushm $r6, $r6
slli $r5, P1H, #1
srli $r5, $r5, #21
slli O1H, P1H, #11
srli $r6, P1L, #21
or O1H, O1H, $r6
slli O1L, P1L, #11
move $r6, #0x80000000
or O1H, O1H, $r6
slti $r15, $r5, #0x43e
beqzs8 .LCnaninf
subri $r2, $r5, #0x43e
.LL14:
move $r6, #0x20
slt $r15, $r2, $r6
bnezs8 .LL15
move O1L, O1H
move O1H, #0
addi $r2, $r2, #0xffffffe0
bnez O1L, .LL14
.LL15:
beqz $r2, .LL16
move P1L, O1H
srl O1L, O1L, $r2
srl O1H, O1H, $r2
subri $r2, $r2, #0x20
sll P1L, P1L, $r2
or O1L, O1L, P1L
.LL16:
sltsi $r15, P1H, #0
beqzs8 .LCret
subri O1H, O1H, #0
beqz O1L, .LL17
subri O1L, O1L, #0
subi45 O1H, #1
.LL17:
.LCret:
move P1L, O1L
move P1H, O1H
.LC999:
popm $r6, $r6
pop $lp
ret5 $lp
.LCnaninf:
sltsi $r15, P1H, #0
bnezs8 .LCret3
subri $r15, $r5, #0x7ff
bnezs8 .Li5
slli $r6, O1H, #1
or $r6, $r6, O1L
beqz $r6, .Li5
.LCret3:
move O1H, #0x80000000
move O1L, #0
j .LCret
.Li5:
move O1H, #0x7fffffff
move O1L, #-1
j .LCret
.size __fixdfdi, .-__fixdfdi
#endif /* L_fixdfdi */
#ifdef L_fixunssfsi
.global __fixunssfsi
.type __fixunssfsi, @function
__fixunssfsi:
push $lp
slli $r1, $r0, #8
move $r3, #0x80000000
or $r1, $r1, $r3
srli $r3, $r0, #23
andi $r3, $r3, #0xff
subri $r2, $r3, #0x9e
sltsi $r15, $r2, #0
bnezs8 .LLspec
sltsi $r15, $r2, #0x20
bnezs8 .Li45
move $r0, #0
j .LL999
.Li45:
srl $r1, $r1, $r2
sltsi $r15, $r0, #0
beqzs8 .Li46
subri $r1, $r1, #0
.Li46:
move $r0, $r1
.LL999:
pop $lp
ret5 $lp
.LLspec:
move $r3, #0x7f800000
slt $r15, $r3, $r0
beqzs8 .Li47
move $r0, #0x80000000
j .LL999
.Li47:
move $r0, #-1
j .LL999
.size __fixunssfsi, .-__fixunssfsi
#endif /* L_fixunssfsi */
#ifdef L_fixunsdfsi
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#else
#define P1H $r0
#define P1L $r1
#endif
.text
.align 2
.global __fixunsdfsi
.type __fixunsdfsi, @function
__fixunsdfsi:
push $lp
pushm $r6, $r6
slli $r3, P1H, #11
srli $r6, P1L, #21
or $r3, $r3, $r6
move $r6, #0x80000000
or $r3, $r3, $r6
slli $r6, P1H, #1
srli $r6, $r6, #21
subri $r2, $r6, #0x41e
sltsi $r15, $r2, #0
bnezs8 .LNnaninf
move $r6, #0x20
slt $r15, $r2, $r6
bnezs8 .LL73
move $r3, #0
.LL73:
srl $r3, $r3, $r2
sltsi $r15, P1H, #0
beqzs8 .Li53
subri $r3, $r3, #0
.Li53:
move $r0, $r3
.LN999:
popm $r6, $r6
pop $lp
ret5 $lp
.LNnaninf:
beqz P1L, .Li54
ori P1H, P1H, #1
.Li54:
move $r6, #0x7ff00000
slt $r15, $r6, P1H
beqzs8 .Li55
move $r0, #0x80000000
j .LN999
.Li55:
move $r0, #-1
j .LN999
.size __fixunsdfsi, .-__fixunsdfsi
#endif /* L_fixunsdfsi */
#ifdef L_fixunssfdi
#ifndef __big_endian__
#define O1L $r1
#define O1H $r2
#else
#define O1H $r1
#define O1L $r2
#endif
.text
.align 2
.global __fixunssfdi
.type __fixunssfdi, @function
__fixunssfdi:
push $lp
srli $r3, $r0, #23
andi $r3, $r3, #0xff
slli O1H, $r0, #8
move $r5, #0x80000000
or O1H, O1H, $r5
move O1L, #0
sltsi $r15, $r3, #0xbe
beqzs8 .LDinfnan
subri $r3, $r3, #0xbe
.LL12:
move $r5, #0x20
slt $r15, $r3, $r5
bnezs8 .LL13
move O1L, O1H
move O1H, #0
addi $r3, $r3, #0xffffffe0
bnez O1L, .LL12
.LL13:
beqz $r3, .LL14
move $r4, O1H
srl O1L, O1L, $r3
srl O1H, O1H, $r3
subri $r3, $r3, #0x20
sll $r4, $r4, $r3
or O1L, O1L, $r4
.LL14:
sltsi $r15, $r0, #0
beqzs8 .LDret
subri O1H, O1H, #0
beqz O1L, .LL15
subri O1L, O1L, #0
subi45 O1H, #1
.LL15:
.LDret:
move $r0, $r1
move $r1, $r2
.LD999:
pop $lp
ret5 $lp
.LDinfnan:
move O1H, #0x80000000
move O1L, #0
j .LDret
.size __fixunssfdi, .-__fixunssfdi
#endif /* L_fixunssfdi */
#ifdef L_fixunsdfdi
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#define O1L $r3
#define O1H $r4
#else
#define P1H $r0
#define P1L $r1
#define O1H $r3
#define O1L $r4
#endif
.text
.align 2
.global __fixunsdfdi
.type __fixunsdfdi, @function
__fixunsdfdi:
push $lp
pushm $r6, $r6
slli $r5, P1H, #1
srli $r5, $r5, #21
slli O1H, P1H, #11
srli $r6, P1L, #21
or O1H, O1H, $r6
slli O1L, P1L, #11
move $r6, #0x80000000
or O1H, O1H, $r6
slti $r15, $r5, #0x43e
beqzs8 .LDnaninf
subri $r2, $r5, #0x43e
.LL18:
move $r6, #0x20
slt $r15, $r2, $r6
bnezs8 .LL19
move O1L, O1H
move O1H, #0
addi $r2, $r2, #0xffffffe0
bnez O1L, .LL18
.LL19:
beqz $r2, .LL20
move P1L, O1H
srl O1L, O1L, $r2
srl O1H, O1H, $r2
subri $r2, $r2, #0x20
sll P1L, P1L, $r2
or O1L, O1L, P1L
.LL20:
sltsi $r15, P1H, #0
beqzs8 .LDret
subri O1H, O1H, #0
beqz O1L, .LL21
subri O1L, O1L, #0
subi45 O1H, #1
.LL21:
.LDret:
move P1L, O1L
move P1H, O1H
.LD999:
popm $r6, $r6
pop $lp
ret5 $lp
.LDnaninf:
move O1H, #0x80000000
move O1L, #0
j .LDret
.size __fixunsdfdi, .-__fixunsdfdi
#endif /* L_fixunsdfdi */
#ifdef L_si_to_sf
.text
.align 2
.global __floatsisf
.type __floatsisf, @function
__floatsisf:
push $lp
move $r4, #0x80000000
and $r2, $r0, $r4
beqz $r0, .Li39
sltsi $r15, $r0, #0
beqzs8 .Li40
subri $r0, $r0, #0
.Li40:
move $r1, #0x9e
#ifdef __NDS32_PERF_EXT__
clz $r3, $r0
#else
pushm $r0, $r2
pushm $r4, $r5
bal __clzsi2
move $r3, $r0
popm $r4, $r5
popm $r0, $r2
#endif
sub $r1, $r1, $r3
sll $r0, $r0, $r3
#ADD($r0, $0x80)
move $r15, #0x80
add $r0, $r0, $r15
slt $r15, $r0, $r15
#ADDC($r1, $0x0)
add $r1, $r1, $r15
srai $r4, $r0, #8
andi $r4, $r4, #1
sub $r0, $r0, $r4
slli $r0, $r0, #1
srli $r0, $r0, #9
slli $r4, $r1, #23
or $r0, $r0, $r4
.Li39:
or $r0, $r0, $r2
.LH999:
pop $lp
ret5 $lp
.size __floatsisf, .-__floatsisf
#endif /* L_si_to_sf */
#ifdef L_si_to_df
#ifndef __big_endian__
#define O1L $r1
#define O1H $r2
#define O2L $r4
#define O2H $r5
#else
#define O1H $r1
#define O1L $r2
#define O2H $r4
#define O2L $r5
#endif
.text
.align 2
.global __floatsidf
.type __floatsidf, @function
__floatsidf:
push $lp
pushm $r6, $r6
move O1L, #0
move O2H, O1L
move $r3, O1L
move O1H, $r0
beqz O1H, .Li39
sltsi $r15, O1H, #0
beqzs8 .Li40
move O2H, #0x80000000
subri O1H, O1H, #0
beqz O1L, .LL71
subri O1L, O1L, #0
subi45 O1H, #1
.LL71:
.Li40:
move $r3, #0x41e
#ifndef __big_endian__
#ifdef __NDS32_PERF_EXT__
clz $r4, $r2
#else
pushm $r0, $r3
push $r5
move $r0, $r2
bal __clzsi2
move $r4, $r0
pop $r5
popm $r0, $r3
#endif
#else /* __big_endian__ */
#ifdef __NDS32_PERF_EXT__
clz $r5, $r1
#else
pushm $r0, $r4
move $r0, $r1
bal __clzsi2
move $r5, $r0
popm $r0, $r4
#endif
#endif /* __big_endian__ */
sub $r3, $r3, O2L
sll O1H, O1H, O2L
.Li39:
srli O2L, O1L, #11
slli $r6, O1H, #21
or O2L, O2L, $r6
slli $r6, O1H, #1
srli $r6, $r6, #12
or O2H, O2H, $r6
slli $r6, $r3, #20
or O2H, O2H, $r6
move $r0, $r4
move $r1, $r5
.LH999:
popm $r6, $r6
pop $lp
ret5 $lp
.size __floatsidf, .-__floatsidf
#endif /* L_si_to_df */
#ifdef L_floatdisf
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#define P2L $r2
#define P2H $r3
#else
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#endif
.text
.align 2
.global __floatdisf
.type __floatdisf, @function
__floatdisf:
push $lp
pushm $r6, $r7
move $r7, #0x80000000
and $r5, P1H, $r7
move P2H, P1H
move P2L, P1L
or $r7, P1H, P1L
beqz $r7, .Li1
sltsi $r15, P1H, #0
beqzs8 .Li2
subri P2H, P2H, #0
beqz P2L, .LL1
subri P2L, P2L, #0
subi45 P2H, #1
.LL1:
.Li2:
move $r4, #0xbe
#NORMd($r2, $r6, P1L)
bnez P2H, .LL2
bnez P2L, .LL3
move $r4, #0
j .LL4
.LL3:
move P2H, P2L
move P2L, #0
move $r6, #32
sub $r4, $r4, $r6
.LL2:
#ifdef __NDS32_PERF_EXT__
clz $r6, P2H
#else
pushm $r0, $r5
move $r0, P2H
bal __clzsi2
move $r6, $r0
popm $r0, $r5
#endif
beqz $r6, .LL4
sub $r4, $r4, $r6
subri P1L, $r6, #32
srl P1L, P2L, P1L
sll P2L, P2L, $r6
sll P2H, P2H, $r6
or P2H, P2H, P1L
.LL4:
#NORMd End
beqz P2L, .Li3
ori P2H, P2H, #1
.Li3:
#ADD(P2H, $0x80)
move $r15, #0x80
add P2H, P2H, $r15
slt $r15, P2H, $r15
#ADDC($r4, $0x0)
add $r4, $r4, $r15
srli $r7, P2H, #8
andi $r7, $r7, #1
sub P2H, P2H, $r7
slli P2H, P2H, #1
srli P2H, P2H, #9
slli $r7, $r4, #23
or P2H, P2H, $r7
.Li1:
or $r0, P2H, $r5
.LA999:
popm $r6, $r7
pop $lp
ret5 $lp
.size __floatdisf, .-__floatdisf
#endif /* L_floatdisf */
#ifdef L_floatdidf
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#define P2L $r2
#define P2H $r3
#define O1L $r5
#define O1H $r6
#else
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#define O1H $r5
#define O1L $r6
#endif
.text
.align 2
.global __floatdidf
.type __floatdidf, @function
__floatdidf:
push $lp
pushm $r6, $r8
move $r4, #0
move $r7, $r4
move P2H, P1H
move P2L, P1L
or $r8, P1H, P1L
beqz $r8, .Li1
move $r4, #0x43e
sltsi $r15, P1H, #0
beqzs8 .Li2
move $r7, #0x80000000
subri P2H, P2H, #0
beqz P2L, .LL1
subri P2L, P2L, #0
subi45 P2H, #1
.LL1:
.Li2:
#NORMd($r2, O1H, O1L)
bnez P2H, .LL2
bnez P2L, .LL3
move $r4, #0
j .LL4
.LL3:
move P2H, P2L
move P2L, #0
move O1H, #32
sub $r4, $r4, O1H
.LL2:
#ifdef __NDS32_PERF_EXT__
clz O1H, P2H
#else /* not __NDS32_PERF_EXT__ */
/*
Replace clz with function call.
clz O1H, P2H
EL: clz $r6, $r3
EB: clz $r5, $r2
*/
#ifndef __big_endian__
pushm $r0, $r5
move $r0, $r3
bal __clzsi2
move $r6, $r0
popm $r0, $r5
#else
pushm $r0, $r4
move $r0, $r2
bal __clzsi2
move $r5, $r0
popm $r0, $r4
#endif
#endif /* not __NDS32_PERF_EXT__ */
beqz O1H, .LL4
sub $r4, $r4, O1H
subri O1L, O1H, #32
srl O1L, P2L, O1L
sll P2L, P2L, O1H
sll P2H, P2H, O1H
or P2H, P2H, O1L
.LL4:
#NORMd End
#ADD(P2L, $0x400)
move $r15, #0x400
add P2L, P2L, $r15
slt $r15, P2L, $r15
#ADDCC(P2H, $0x0)
beqzs8 .LL7
add P2H, P2H, $r15
slt $r15, P2H, $r15
.LL7:
#ADDC($r4, $0x0)
add $r4, $r4, $r15
srli $r8, P2L, #11
andi $r8, $r8, #1
sub P2L, P2L, $r8
.Li1:
srli O1L, P2L, #11
slli $r8, P2H, #21
or O1L, O1L, $r8
slli O1H, P2H, #1
srli O1H, O1H, #12
slli $r8, $r4, #20
or O1H, O1H, $r8
or O1H, O1H, $r7
move P1L, O1L
move P1H, O1H
.LA999:
popm $r6, $r8
pop $lp
ret5 $lp
.size __floatdidf, .-__floatdidf
#endif /* L_floatdidf */
#ifdef L_floatunsisf
.text
.align 2
.global __floatunsisf
.type __floatunsisf, @function
__floatunsisf:
push $lp
beqz $r0, .Li41
move $r2, #0x9e
#ifdef __NDS32_PERF_EXT__
clz $r1, $r0
#else
push $r0
pushm $r2, $r5
bal __clzsi2
move $r1, $r0
popm $r2, $r5
pop $r0
#endif
sub $r2, $r2, $r1
sll $r0, $r0, $r1
#ADD($r0, $0x80)
move $r15, #0x80
add $r0, $r0, $r15
slt $r15, $r0, $r15
#ADDC($r2, $0x0)
add $r2, $r2, $r15
srli $r3, $r0, #8
andi $r3, $r3, #1
sub $r0, $r0, $r3
slli $r0, $r0, #1
srli $r0, $r0, #9
slli $r3, $r2, #23
or $r0, $r0, $r3
.Li41:
.LI999:
pop $lp
ret5 $lp
.size __floatunsisf, .-__floatunsisf
#endif /* L_floatunsisf */
#ifdef L_floatunsidf
#ifndef __big_endian__
#define O1L $r1
#define O1H $r2
#define O2L $r4
#define O2H $r5
#else
#define O1H $r1
#define O1L $r2
#define O2H $r4
#define O2L $r5
#endif
.text
.align 2
.global __floatunsidf
.type __floatunsidf, @function
__floatunsidf:
push $lp
pushm $r6, $r6
move O1L, #0
move $r3, O1L
move O1H, $r0
beqz O1H, .Li41
move $r3, #0x41e
#ifndef __big_endian__
#ifdef __NDS32_PERF_EXT__
clz $r5, $r2
#else
pushm $r0, $r4
move $r0, $r2
bal __clzsi2
move $r5, $r0
popm $r0, $r4
#endif
#else /* __big_endian__ */
#ifdef __NDS32_PERF_EXT__
clz $r4, $r1
#else
pushm $r0, $r3
push $r5
move $r0, $r1
bal __clzsi2
move $r4, $r0
pop $r5
popm $r0, $r3
#endif
#endif /* __big_endian__ */
sub $r3, $r3, O2H
sll O1H, O1H, O2H
.Li41:
srli O2L, O1L, #11
slli $r6, O1H, #21
or O2L, O2L, $r6
slli O2H, O1H, #1
srli O2H, O2H, #12
slli $r6, $r3, #20
or O2H, O2H, $r6
move $r0, $r4
move $r1, $r5
.LI999:
popm $r6, $r6
pop $lp
ret5 $lp
.size __floatunsidf, .-__floatunsidf
#endif /* L_floatunsidf */
#ifdef L_floatundisf
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#define P2L $r2
#define P2H $r3
#else
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#endif
.text
.align 2
.global __floatundisf
.type __floatundisf, @function
__floatundisf:
push $lp
pushm $r6, $r6
move P2H, P1H
move P2L, P1L
or $r6, P1H, P1L
beqz $r6, .Li4
move $r4, #0xbe
#NORMd($r2, $r5, P1L)
bnez P2H, .LL5
bnez P2L, .LL6
move $r4, #0
j .LL7
.LL6:
move P2H, P2L
move P2L, #0
move $r5, #32
sub $r4, $r4, $r5
.LL5:
#ifdef __NDS32_PERF_EXT__
clz $r5, P2H
#else
pushm $r0, $r4
move $r0, P2H
bal __clzsi2
move $r5, $r0
popm $r0, $r4
#endif
beqz $r5, .LL7
sub $r4, $r4, $r5
subri P1L, $r5, #32
srl P1L, P2L, P1L
sll P2L, P2L, $r5
sll P2H, P2H, $r5
or P2H, P2H, P1L
.LL7:
#NORMd End
beqz P2L, .Li5
ori P2H, P2H, #1
.Li5:
#ADD(P2H, $0x80)
move $r15, #0x80
add P2H, P2H, $r15
slt $r15, P2H, $r15
#ADDC($r4, $0x0)
add $r4, $r4, $r15
srli $r6, P2H, #8
andi $r6, $r6, #1
sub P2H, P2H, $r6
slli P2H, P2H, #1
srli P2H, P2H, #9
slli $r6, $r4, #23
or P2H, P2H, $r6
.Li4:
move $r0, P2H
.LB999:
popm $r6, $r6
pop $lp
ret5 $lp
.size __floatundisf, .-__floatundisf
#endif /* L_floatundisf */
#ifdef L_floatundidf
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#define P2L $r2
#define P2H $r3
#define O1L $r5
#define O1H $r6
#else
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#define O1H $r5
#define O1L $r6
#endif
.text
.align 2
.global __floatundidf
.type __floatundidf, @function
__floatundidf:
push $lp
pushm $r6, $r7
move $r4, #0
move P2H, P1H
move P2L, P1L
or $r7, P1H, P1L
beqz $r7, .Li3
move $r4, #0x43e
#NORMd($r2, O1H, O1L)
bnez P2H, .LL8
bnez P2L, .LL9
move $r4, #0
j .LL10
.LL9:
move P2H, P2L
move P2L, #0
move O1H, #32
sub $r4, $r4, O1H
.LL8:
#ifdef __NDS32_PERF_EXT__
clz O1H, P2H
#else /* not __NDS32_PERF_EXT__ */
/*
Replace clz with function call.
clz O1H, P2H
EL: clz $r6, $r3
EB: clz $r5, $r2
*/
#ifndef __big_endian__
pushm $r0, $r5
move $r0, $r3
bal __clzsi2
move $r6, $r0
popm $r0, $r5
#else
pushm $r0, $r4
move $r0, $r2
bal __clzsi2
move $r5, $r0
popm $r0, $r4
#endif
#endif /* not __NDS32_PERF_EXT__ */
beqz O1H, .LL10
sub $r4, $r4, O1H
subri O1L, O1H, #32
srl O1L, P2L, O1L
sll P2L, P2L, O1H
sll P2H, P2H, O1H
or P2H, P2H, O1L
.LL10:
#NORMd End
#ADD(P2L, $0x400)
move $r15, #0x400
add P2L, P2L, $r15
slt $r15, P2L, $r15
#ADDCC(P2H, $0x0)
beqzs8 .LL13
add P2H, P2H, $r15
slt $r15, P2H, $r15
.LL13:
#ADDC($r4, $0x0)
add $r4, $r4, $r15
srli $r7, P2L, #11
andi $r7, $r7, #1
sub P2L, P2L, $r7
.Li3:
srli O1L, P2L, #11
slli $r7, P2H, #21
or O1L, O1L, $r7
slli O1H, P2H, #1
srli O1H, O1H, #12
slli $r7, $r4, #20
or O1H, O1H, $r7
move P1L, O1L
move P1H, O1H
.LB999:
popm $r6, $r7
pop $lp
ret5 $lp
.size __floatundidf, .-__floatundidf
#endif /* L_floatundidf */
#ifdef L_compare_sf
.text
.align 2
.global __cmpsf2
.type __cmpsf2, @function
__cmpsf2:
.global __eqsf2
.type __eqsf2, @function
__eqsf2:
.global __ltsf2
.type __ltsf2, @function
__ltsf2:
.global __lesf2
.type __lesf2, @function
__lesf2:
.global __nesf2
.type __nesf2, @function
__nesf2:
move $r4, #1
j .LA
.global __gesf2
.type __gesf2, @function
__gesf2:
.global __gtsf2
.type __gtsf2, @function
__gtsf2:
move $r4, #-1
.LA:
push $lp
slli $r2, $r0, #1
slli $r3, $r1, #1
or $r5, $r2, $r3
beqz $r5, .LMequ
move $r5, #0xff000000
slt $r15, $r5, $r2
bnezs8 .LMnan
slt $r15, $r5, $r3
bnezs8 .LMnan
srli $r2, $r2, #1
sltsi $r15, $r0, #0
beqzs8 .Li48
subri $r2, $r2, #0
.Li48:
srli $r3, $r3, #1
sltsi $r15, $r1, #0
beqzs8 .Li49
subri $r3, $r3, #0
.Li49:
slts $r15, $r2, $r3
beqzs8 .Li50
move $r0, #-1
j .LM999
.Li50:
slts $r15, $r3, $r2
beqzs8 .LMequ
move $r0, #1
j .LM999
.LMequ:
move $r0, #0
.LM999:
pop $lp
ret5 $lp
.LMnan:
move $r0, $r4
j .LM999
.size __cmpsf2, .-__cmpsf2
.size __eqsf2, .-__eqsf2
.size __ltsf2, .-__ltsf2
.size __lesf2, .-__lesf2
.size __nesf2, .-__nesf2
.size __gesf2, .-__gesf2
.size __gtsf2, .-__gtsf2
#endif /* L_compare_sf */
#ifdef L_compare_df
#ifdef __big_endian__
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#else
#define P1H $r1
#define P1L $r0
#define P2H $r3
#define P2L $r2
#endif
.align 2
.globl __gtdf2
.globl __gedf2
.globl __ltdf2
.globl __ledf2
.globl __eqdf2
.globl __nedf2
.globl __cmpdf2
.type __gtdf2, @function
.type __gedf2, @function
.type __ltdf2, @function
.type __ledf2, @function
.type __eqdf2, @function
.type __nedf2, @function
.type __cmpdf2, @function
__gtdf2:
__gedf2:
movi $r4, -1
b .L1
__ltdf2:
__ledf2:
__cmpdf2:
__nedf2:
__eqdf2:
movi $r4, 1
.L1:
#if defined (__NDS32_ISA_V3M__)
push25 $r10, 0
#else
smw.adm $r6, [$sp], $r9, 0
#endif
sethi $r5, 0x7ff00
and $r6, P1H, $r5 ! r6=aExp
and $r7, P2H, $r5 ! r7=bExp
slli $r8, P1H, 12 ! r8=aSig0
slli $r9, P2H, 12 ! r9=bSig0
beq $r6, $r5, .L11 ! aExp==0x7ff
beq $r7, $r5, .L12 ! bExp==0x7ff
.L2:
slli $ta, P1H, 1 ! ta=ahigh<<1
or $ta, P1L, $ta !
xor $r5, P1H, P2H ! r5=ahigh^bhigh
beqz $ta, .L3 ! if(ahigh<<1)==0,go .L3
!-------------------------------
! (ahigh<<1)!=0 || (bhigh<<1)!=0
!-------------------------------
.L4:
beqz $r5, .L5 ! ahigh==bhigh, go .L5
!--------------------
! a != b
!--------------------
.L6:
bltz $r5, .L7 ! if(aSign!=bSign), go .L7
!--------------------
! aSign==bSign
!--------------------
slt $ta, $r6, $r7 ! ta=(aExp<bExp)
bne $r6, $r7, .L8 ! if(aExp!=bExp),go .L8
slt $ta, $r8, $r9 ! ta=(aSig0<bSig0)
bne $r8, $r9, .L8 ! if(aSig0!=bSig0),go .L8
slt $ta, P1L, P2L ! ta=(aSig1<bSig1)
.L8:
beqz $ta, .L10 ! if(|a|>|b|), go .L10
nor $r0, P2H, P2H ! if(|a|<|b|),return (~yh)
.L14:
#if defined (__NDS32_ISA_V3M__)
pop25 $r10, 0
#else
lmw.bim $r6, [$sp], $r9, 0
ret
#endif
.L10:
ori $r0, P2H, 1 ! return (yh|1)
b .L14
!--------------------
! (ahigh<<1)=0
!--------------------
.L3:
slli $ta, P2H, 1 ! ta=bhigh<<1
or $ta, P2L, $ta !
bnez $ta, .L4 ! ta=(bhigh<<1)!=0,go .L4
.L5:
xor $ta, P1L, P2L ! ta=alow^blow
bnez $ta, .L6 ! alow!=blow,go .L6
movi $r0, 0 ! a==b, return 0
b .L14
!--------------------
! aExp=0x7ff;
!--------------------
.L11:
or P1L, P1L, $r8 ! x1=(aSig0|aSig1)
bnez P1L, .L13 ! if(a=nan), go.L13
xor $ta, $r7, $r5 ! ta=(bExp^0x7ff)
bnez $ta, .L2 ! if(bExp!=0x7ff), go .L2
!--------------------
! bExp=0x7ff;
!--------------------
.L12:
or $ta, P2L, $r9 ! ta=(bSig0|bSig1)
beqz $ta, .L2 ! if(b!=nan), go .L2
.L13:
move $r0, $r4
b .L14
!--------------------
! aSign!=bSign
!--------------------
.L7:
ori $r0, P1H, 1 ! if(aSign!=bSign), return (ahigh|1)
b .L14
.size __gtdf2, .-__gtdf2
.size __gedf2, .-__gedf2
.size __ltdf2, .-__ltdf2
.size __ledf2, .-__ledf2
.size __eqdf2, .-__eqdf2
.size __nedf2, .-__nedf2
.size __cmpdf2, .-__cmpdf2
#endif /* L_compare_df */
#ifdef L_unord_sf
.text
.align 2
.global __unordsf2
.type __unordsf2, @function
__unordsf2:
push $lp
slli $r2, $r0, #1
move $r3, #0xff000000
slt $r15, $r3, $r2
beqzs8 .Li52
move $r0, #1
j .LP999
.Li52:
slli $r2, $r1, #1
move $r3, #0xff000000
slt $r15, $r3, $r2
beqzs8 .Li53
move $r0, #1
j .LP999
.Li53:
move $r0, #0
.LP999:
pop $lp
ret5 $lp
.size __unordsf2, .-__unordsf2
#endif /* L_unord_sf */
#ifdef L_unord_df
#ifndef __big_endian__
#define P1L $r0
#define P1H $r1
#define P2L $r2
#define P2H $r3
#else
#define P1H $r0
#define P1L $r1
#define P2H $r2
#define P2L $r3
#endif
.text
.align 2
.global __unorddf2
.type __unorddf2, @function
__unorddf2:
push $lp
slli $r4, P1H, #1
beqz P1L, .Li66
addi $r4, $r4, #1
.Li66:
move $r5, #0xffe00000
slt $r15, $r5, $r4
beqzs8 .Li67
move $r0, #1
j .LR999
.Li67:
slli $r4, P2H, #1
beqz P2L, .Li68
addi $r4, $r4, #1
.Li68:
move $r5, #0xffe00000
slt $r15, $r5, $r4
beqzs8 .Li69
move $r0, #1
j .LR999
.Li69:
move $r0, #0
.LR999:
pop $lp
ret5 $lp
.size __unorddf2, .-__unorddf2
#endif /* L_unord_df */
/* ------------------------------------------- */
/* DPBIT floating point operations for libgcc */
/* ------------------------------------------- */
|
4ms/metamodule-plugin-sdk
| 3,888
|
plugin-libc/libgcc/config/nds32/lib1asmsrc-newlib.S
|
/* newlib libgcc routines of Andes NDS32 cpu for GNU compiler
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Andes Technology Corporation.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
.section .mdebug.abi_nds32
.previous
#ifdef L_divsi3
.text
.align 2
.globl __divsi3
.type __divsi3, @function
__divsi3:
movi $r5, 0 ! res = 0
xor $r4, $r0, $r1 ! neg
bltz $r0, .L1
bltz $r1, .L2
.L3:
movi $r2, 1 ! bit = 1
slt $r3, $r1, $r0 ! test if dividend is smaller than or equal to divisor
beqz $r3, .L5
bltz $r1, .L5
.L4:
slli $r2, $r2, 1
beqz $r2, .L6
slli $r1, $r1, 1
slt $r3, $r1, $r0
beqz $r3, .L5
bgez $r1, .L4
.L5:
slt $r3, $r0, $r1
bnez $r3, .L8
sub $r0, $r0, $r1
or $r5, $r5, $r2
.L8:
srli $r1, $r1, 1
srli $r2, $r2, 1
bnez $r2, .L5
.L6:
bgez $r4, .L7
subri $r5, $r5, 0 ! negate if $r4 < 0
.L7:
move $r0, $r5
ret
.L1:
subri $r0, $r0, 0 ! change neg to pos
bgez $r1, .L3
.L2:
subri $r1, $r1, 0 ! change neg to pos
j .L3
.size __divsi3, .-__divsi3
#endif /* L_divsi3 */
#ifdef L_modsi3
.text
.align 2
.globl __modsi3
.type __modsi3, @function
__modsi3:
movi $r5, 0 ! res = 0
move $r4, $r0 ! neg
bltz $r0, .L1
bltz $r1, .L2
.L3:
movi $r2, 1 ! bit = 1
slt $r3, $r1, $r0 ! test if dividend is smaller than or equal to divisor
beqz $r3, .L5
bltz $r1, .L5
.L4:
slli $r2, $r2, 1
beqz $r2, .L6
slli $r1, $r1, 1
slt $r3, $r1, $r0
beqz $r3, .L5
bgez $r1, .L4
.L5:
slt $r3, $r0, $r1
bnez $r3, .L8
sub $r0, $r0, $r1
or $r5, $r5, $r2
.L8:
srli $r1, $r1, 1
srli $r2, $r2, 1
bnez $r2, .L5
.L6:
bgez $r4, .L7
subri $r0, $r0, 0 ! negate if $r4 < 0
.L7:
ret
.L1:
subri $r0, $r0, 0 ! change neg to pos
bgez $r1, .L3
.L2:
subri $r1, $r1, 0 ! change neg to pos
j .L3
.size __modsi3, .-__modsi3
#endif /* L_modsi3 */
#ifdef L_udivsi3
.text
.align 2
.globl __udivsi3
.type __udivsi3, @function
__udivsi3:
movi $r5, 0 ! res = 0
movi $r2, 1 ! bit = 1
slt $r3, $r1, $r0 ! test if dividend is smaller than or equal to divisor
beqz $r3, .L5
bltz $r1, .L5
.L4:
slli $r2, $r2, 1
beqz $r2, .L6
slli $r1, $r1, 1
slt $r3, $r1, $r0
beqz $r3, .L5
bgez $r1, .L4
.L5:
slt $r3, $r0, $r1
bnez $r3, .L8
sub $r0, $r0, $r1
or $r5, $r5, $r2
.L8:
srli $r1, $r1, 1
srli $r2, $r2, 1
bnez $r2, .L5
.L6:
move $r0, $r5
ret
.size __udivsi3, .-__udivsi3
#endif /* L_udivsi3 */
#ifdef L_umodsi3
.text
.align 2
.globl __umodsi3
.type __umodsi3, @function
__umodsi3:
movi $r5, 0 ! res = 0
movi $r2, 1 ! bit = 1
slt $r3, $r1, $r0 ! test if dividend is smaller than or equal to divisor
beqz $r3, .L5
bltz $r1, .L5
.L4:
slli $r2, $r2, 1
beqz $r2, .L6
slli $r1, $r1, 1
slt $r3, $r1, $r0
beqz $r3, .L5
bgez $r1, .L4
.L5:
slt $r3, $r0, $r1
bnez $r3, .L8
sub $r0, $r0, $r1
or $r5, $r5, $r2
.L8:
srli $r1, $r1, 1
srli $r2, $r2, 1
bnez $r2, .L5
.L6:
ret
.size __umodsi3, .-__umodsi3
#endif /* L_umodsi3 */
/* ----------------------------------------------------------- */
|
4ms/metamodule-plugin-sdk
| 3,686
|
plugin-libc/libgcc/config/nds32/crtzero.S
|
/* The startup code sample of Andes NDS32 cpu for GNU compiler
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Andes Technology Corporation.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
!!==============================================================================
!!
!! crtzero.S
!!
!! This is JUST A SAMPLE of nds32 startup code !!
!! You can refer this content and implement
!! the actual one in newlib/mculib.
!!
!!==============================================================================
!!------------------------------------------------------------------------------
!! Jump to start up code
!!------------------------------------------------------------------------------
.section .nds32_init, "ax"
j _start
!!------------------------------------------------------------------------------
!! Startup code implementation
!!------------------------------------------------------------------------------
.section .text
.global _start
.weak _SDA_BASE_
.weak _FP_BASE_
.align 2
.func _start
.type _start, @function
_start:
.L_fp_gp_lp_init:
la $fp, _FP_BASE_ ! init $fp
la $gp, _SDA_BASE_ ! init $gp for small data access
movi $lp, 0 ! init $lp
.L_stack_init:
la $sp, _stack ! init $sp
movi $r0, -8 ! align $sp to 8-byte (use 0xfffffff8)
and $sp, $sp, $r0 ! align $sp to 8-byte (filter out lower 3-bit)
.L_bss_init:
! clear BSS, this process can be 4 time faster if data is 4 byte aligned
! if so, use swi.p instead of sbi.p
! the related stuff are defined in linker script
la $r0, _edata ! get the starting addr of bss
la $r2, _end ! get ending addr of bss
beq $r0, $r2, .L_call_main ! if no bss just do nothing
movi $r1, 0 ! should be cleared to 0
.L_clear_bss:
sbi.p $r1, [$r0], 1 ! Set 0 to bss
bne $r0, $r2, .L_clear_bss ! Still bytes left to set
!.L_stack_heap_check:
! la $r0, _end ! init heap_end
! s.w $r0, heap_end ! save it
!.L_init_argc_argv:
! ! argc/argv initialization if necessary; default implementation is in crt1.o
! la $r9, _arg_init ! load address of _arg_init?
! beqz $r9, .L4 ! has _arg_init? no, go check main()
! addi $sp, $sp, -512 ! allocate space for command line + arguments
! move $r6, $sp ! r6 = buffer addr of cmd line
! move $r0, $r6 ! r0 = buffer addr of cmd line
! syscall 6002 ! get cmd line
! move $r0, $r6 ! r0 = buffer addr of cmd line
! addi $r1, $r6, 256 ! r1 = argv
! jral $r9 ! init argc/argv
! addi $r1, $r6, 256 ! r1 = argv
.L_call_main:
! call main() if main() is provided
la $r15, main ! load address of main
jral $r15 ! call main
.L_terminate_program:
syscall 0x1 ! use syscall 0x1 to terminate program
.size _start, .-_start
.end
!! ------------------------------------------------------------------------
|
4ms/metamodule-plugin-sdk
| 1,425
|
plugin-libc/libgcc/config/ia64/crtn.S
|
# Copyright (C) 2000-2022 Free Software Foundation, Inc.
# Written By Timothy Wall
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just makes sure that the .fini and .init sections do in
# fact return. Users may put any desired instructions in those sections.
# This file is the last thing linked into any executable.
.section ".init"
;;
mov ar.pfs = r34
mov b0 = r33
.restore sp
mov r12 = r35
br.ret.sptk.many b0
.section ".fini"
;;
mov ar.pfs = r34
mov b0 = r33
.restore sp
mov r12 = r35
br.ret.sptk.many b0
# end of crtn.S
|
4ms/metamodule-plugin-sdk
| 1,534
|
plugin-libc/libgcc/config/ia64/crti.S
|
# Copyright (C) 2000-2022 Free Software Foundation, Inc.
# Written By Timothy Wall
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just make a stack frame for the contents of the .fini and
# .init sections. Users may put any desired instructions in those
# sections.
.section ".init"
.align 16
.global _init
_init:
.prologue 14, 33
.save ar.pfs, r34
alloc r34 = ar.pfs, 0, 4, 0, 0
.vframe r35
mov r35 = r12
.save rp, r33
mov r33 = b0
.body
.section ".fini"
.align 16
.global _fini
_fini:
.prologue 14, 33
.save ar.pfs, r34
alloc r34 = ar.pfs, 0, 4, 0, 0
.vframe r35
mov r35 = r12
.save rp, r33
mov r33 = b0
.body
# end of crti.S
|
4ms/metamodule-plugin-sdk
| 1,029
|
plugin-libc/libgcc/config/ia64/vms-crtinit.S
|
/* Copyright (C) 2009-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
.global LIB$INITIALIZE#
|
4ms/metamodule-plugin-sdk
| 2,883
|
plugin-libc/libgcc/config/ia64/crtend.S
|
/* Copyright (C) 2000-2022 Free Software Foundation, Inc.
Contributed by Jes Sorensen, <Jes.Sorensen@cern.ch>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "auto-host.h"
.section .ctors,"aw","progbits"
.align 8
__CTOR_END__:
data8 0
.section .dtors,"aw","progbits"
.align 8
__DTOR_END__:
data8 0
#if HAVE_INITFINI_ARRAY_SUPPORT
.global __do_global_ctors_aux
.hidden __do_global_ctors_aux
#else /* !HAVE_INITFINI_ARRAY_SUPPORT */
/*
* Fragment of the ELF _init routine that invokes our dtor cleanup.
*
* We make the call by indirection, because in large programs the
* .fini and .init sections are not in range of the destination, and
* we cannot allow the linker to insert a stub at the end of this
* fragment of the _fini function. Further, Itanium does not implement
* the long branch instructions, and we do not wish every program to
* trap to the kernel for emulation.
*
* Note that we require __do_global_ctors_aux to preserve the GP,
* so that the next fragment in .fini gets the right value.
*/
.section .init,"ax","progbits"
{ .mlx
movl r2 = @pcrel(__do_global_ctors_aux - 16)
}
{ .mii
mov r3 = ip
;;
add r2 = r2, r3
;;
}
{ .mib
mov b6 = r2
br.call.sptk.many b0 = b6
;;
}
#endif /* !HAVE_INITFINI_ARRAY_SUPPORT */
.text
.align 32
.proc __do_global_ctors_aux
__do_global_ctors_aux:
.prologue
/*
for (loc0 = __CTOR_END__-1; *p != -1; --p)
(*p) ();
*/
.save ar.pfs, r34
alloc loc2 = ar.pfs, 0, 5, 0, 0
movl loc0 = @gprel(__CTOR_END__ - 8)
;;
add loc0 = loc0, gp
;;
ld8 loc3 = [loc0], -8
.save rp, loc1
mov loc1 = rp
.body
;;
cmp.eq p6, p0 = -1, loc3
mov loc4 = gp
(p6) br.cond.spnt.few .exit
.loop: ld8 r15 = [loc3], 8
;;
ld8 gp = [loc3]
mov b6 = r15
ld8 loc3 = [loc0], -8
nop 0
br.call.sptk.many rp = b6
;;
cmp.ne p6, p0 = -1, loc3
nop 0
(p6) br.cond.sptk.few .loop
.exit: mov gp = loc3
mov rp = loc1
mov ar.pfs = loc2
br.ret.sptk.many rp
.endp __do_global_ctors_aux
|
4ms/metamodule-plugin-sdk
| 15,464
|
plugin-libc/libgcc/config/ia64/lib1funcs.S
|
/* Copyright (C) 2000-2022 Free Software Foundation, Inc.
Contributed by James E. Wilson <wilson@cygnus.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#ifdef L__divxf3
// Compute a 80-bit IEEE double-extended quotient.
//
// From the Intel IA-64 Optimization Guide, choose the minimum latency
// alternative.
//
// farg0 holds the dividend. farg1 holds the divisor.
//
// __divtf3 is an alternate symbol name for backward compatibility.
.text
.align 16
.global __divxf3
.proc __divxf3
__divxf3:
#ifdef SHARED
.global __divtf3
__divtf3:
#endif
cmp.eq p7, p0 = r0, r0
frcpa.s0 f10, p6 = farg0, farg1
;;
(p6) cmp.ne p7, p0 = r0, r0
.pred.rel.mutex p6, p7
(p6) fnma.s1 f11 = farg1, f10, f1
(p6) fma.s1 f12 = farg0, f10, f0
;;
(p6) fma.s1 f13 = f11, f11, f0
(p6) fma.s1 f14 = f11, f11, f11
;;
(p6) fma.s1 f11 = f13, f13, f11
(p6) fma.s1 f13 = f14, f10, f10
;;
(p6) fma.s1 f10 = f13, f11, f10
(p6) fnma.s1 f11 = farg1, f12, farg0
;;
(p6) fma.s1 f11 = f11, f10, f12
(p6) fnma.s1 f12 = farg1, f10, f1
;;
(p6) fma.s1 f10 = f12, f10, f10
(p6) fnma.s1 f12 = farg1, f11, farg0
;;
(p6) fma.s0 fret0 = f12, f10, f11
(p7) mov fret0 = f10
br.ret.sptk rp
.endp __divxf3
#endif
#ifdef L__divdf3
// Compute a 64-bit IEEE double quotient.
//
// From the Intel IA-64 Optimization Guide, choose the minimum latency
// alternative.
//
// farg0 holds the dividend. farg1 holds the divisor.
.text
.align 16
.global __divdf3
.proc __divdf3
__divdf3:
cmp.eq p7, p0 = r0, r0
frcpa.s0 f10, p6 = farg0, farg1
;;
(p6) cmp.ne p7, p0 = r0, r0
.pred.rel.mutex p6, p7
(p6) fmpy.s1 f11 = farg0, f10
(p6) fnma.s1 f12 = farg1, f10, f1
;;
(p6) fma.s1 f11 = f12, f11, f11
(p6) fmpy.s1 f13 = f12, f12
;;
(p6) fma.s1 f10 = f12, f10, f10
(p6) fma.s1 f11 = f13, f11, f11
;;
(p6) fmpy.s1 f12 = f13, f13
(p6) fma.s1 f10 = f13, f10, f10
;;
(p6) fma.d.s1 f11 = f12, f11, f11
(p6) fma.s1 f10 = f12, f10, f10
;;
(p6) fnma.d.s1 f8 = farg1, f11, farg0
;;
(p6) fma.d fret0 = f8, f10, f11
(p7) mov fret0 = f10
br.ret.sptk rp
;;
.endp __divdf3
#endif
#ifdef L__divsf3
// Compute a 32-bit IEEE float quotient.
//
// From the Intel IA-64 Optimization Guide, choose the minimum latency
// alternative.
//
// farg0 holds the dividend. farg1 holds the divisor.
.text
.align 16
.global __divsf3
.proc __divsf3
__divsf3:
cmp.eq p7, p0 = r0, r0
frcpa.s0 f10, p6 = farg0, farg1
;;
(p6) cmp.ne p7, p0 = r0, r0
.pred.rel.mutex p6, p7
(p6) fmpy.s1 f8 = farg0, f10
(p6) fnma.s1 f9 = farg1, f10, f1
;;
(p6) fma.s1 f8 = f9, f8, f8
(p6) fmpy.s1 f9 = f9, f9
;;
(p6) fma.s1 f8 = f9, f8, f8
(p6) fmpy.s1 f9 = f9, f9
;;
(p6) fma.d.s1 f10 = f9, f8, f8
;;
(p6) fnorm.s.s0 fret0 = f10
(p7) mov fret0 = f10
br.ret.sptk rp
;;
.endp __divsf3
#endif
#ifdef L__divdi3
// Compute a 64-bit integer quotient.
//
// From the Intel IA-64 Optimization Guide, choose the minimum latency
// alternative.
//
// in0 holds the dividend. in1 holds the divisor.
.text
.align 16
.global __divdi3
.proc __divdi3
__divdi3:
.regstk 2,0,0,0
// Transfer inputs to FP registers.
setf.sig f8 = in0
setf.sig f9 = in1
// Check divide by zero.
cmp.ne.unc p0,p7=0,in1
;;
// Convert the inputs to FP, so that they won't be treated as unsigned.
fcvt.xf f8 = f8
fcvt.xf f9 = f9
(p7) break 1
;;
// Compute the reciprocal approximation.
frcpa.s1 f10, p6 = f8, f9
;;
// 3 Newton-Raphson iterations.
(p6) fnma.s1 f11 = f9, f10, f1
(p6) fmpy.s1 f12 = f8, f10
;;
(p6) fmpy.s1 f13 = f11, f11
(p6) fma.s1 f12 = f11, f12, f12
;;
(p6) fma.s1 f10 = f11, f10, f10
(p6) fma.s1 f11 = f13, f12, f12
;;
(p6) fma.s1 f10 = f13, f10, f10
(p6) fnma.s1 f12 = f9, f11, f8
;;
(p6) fma.s1 f10 = f12, f10, f11
;;
// Round quotient to an integer.
fcvt.fx.trunc.s1 f10 = f10
;;
// Transfer result to GP registers.
getf.sig ret0 = f10
br.ret.sptk rp
;;
.endp __divdi3
#endif
#ifdef L__moddi3
// Compute a 64-bit integer modulus.
//
// From the Intel IA-64 Optimization Guide, choose the minimum latency
// alternative.
//
// in0 holds the dividend (a). in1 holds the divisor (b).
.text
.align 16
.global __moddi3
.proc __moddi3
__moddi3:
.regstk 2,0,0,0
// Transfer inputs to FP registers.
setf.sig f14 = in0
setf.sig f9 = in1
// Check divide by zero.
cmp.ne.unc p0,p7=0,in1
;;
// Convert the inputs to FP, so that they won't be treated as unsigned.
fcvt.xf f8 = f14
fcvt.xf f9 = f9
(p7) break 1
;;
// Compute the reciprocal approximation.
frcpa.s1 f10, p6 = f8, f9
;;
// 3 Newton-Raphson iterations.
(p6) fmpy.s1 f12 = f8, f10
(p6) fnma.s1 f11 = f9, f10, f1
;;
(p6) fma.s1 f12 = f11, f12, f12
(p6) fmpy.s1 f13 = f11, f11
;;
(p6) fma.s1 f10 = f11, f10, f10
(p6) fma.s1 f11 = f13, f12, f12
;;
sub in1 = r0, in1
(p6) fma.s1 f10 = f13, f10, f10
(p6) fnma.s1 f12 = f9, f11, f8
;;
setf.sig f9 = in1
(p6) fma.s1 f10 = f12, f10, f11
;;
fcvt.fx.trunc.s1 f10 = f10
;;
// r = q * (-b) + a
xma.l f10 = f10, f9, f14
;;
// Transfer result to GP registers.
getf.sig ret0 = f10
br.ret.sptk rp
;;
.endp __moddi3
#endif
#ifdef L__udivdi3
// Compute a 64-bit unsigned integer quotient.
//
// From the Intel IA-64 Optimization Guide, choose the minimum latency
// alternative.
//
// in0 holds the dividend. in1 holds the divisor.
.text
.align 16
.global __udivdi3
.proc __udivdi3
__udivdi3:
.regstk 2,0,0,0
// Transfer inputs to FP registers.
setf.sig f8 = in0
setf.sig f9 = in1
// Check divide by zero.
cmp.ne.unc p0,p7=0,in1
;;
// Convert the inputs to FP, to avoid FP software-assist faults.
fcvt.xuf.s1 f8 = f8
fcvt.xuf.s1 f9 = f9
(p7) break 1
;;
// Compute the reciprocal approximation.
frcpa.s1 f10, p6 = f8, f9
;;
// 3 Newton-Raphson iterations.
(p6) fnma.s1 f11 = f9, f10, f1
(p6) fmpy.s1 f12 = f8, f10
;;
(p6) fmpy.s1 f13 = f11, f11
(p6) fma.s1 f12 = f11, f12, f12
;;
(p6) fma.s1 f10 = f11, f10, f10
(p6) fma.s1 f11 = f13, f12, f12
;;
(p6) fma.s1 f10 = f13, f10, f10
(p6) fnma.s1 f12 = f9, f11, f8
;;
(p6) fma.s1 f10 = f12, f10, f11
;;
// Round quotient to an unsigned integer.
fcvt.fxu.trunc.s1 f10 = f10
;;
// Transfer result to GP registers.
getf.sig ret0 = f10
br.ret.sptk rp
;;
.endp __udivdi3
#endif
#ifdef L__umoddi3
// Compute a 64-bit unsigned integer modulus.
//
// From the Intel IA-64 Optimization Guide, choose the minimum latency
// alternative.
//
// in0 holds the dividend (a). in1 holds the divisor (b).
.text
.align 16
.global __umoddi3
.proc __umoddi3
__umoddi3:
.regstk 2,0,0,0
// Transfer inputs to FP registers.
setf.sig f14 = in0
setf.sig f9 = in1
// Check divide by zero.
cmp.ne.unc p0,p7=0,in1
;;
// Convert the inputs to FP, to avoid FP software assist faults.
fcvt.xuf.s1 f8 = f14
fcvt.xuf.s1 f9 = f9
(p7) break 1;
;;
// Compute the reciprocal approximation.
frcpa.s1 f10, p6 = f8, f9
;;
// 3 Newton-Raphson iterations.
(p6) fmpy.s1 f12 = f8, f10
(p6) fnma.s1 f11 = f9, f10, f1
;;
(p6) fma.s1 f12 = f11, f12, f12
(p6) fmpy.s1 f13 = f11, f11
;;
(p6) fma.s1 f10 = f11, f10, f10
(p6) fma.s1 f11 = f13, f12, f12
;;
sub in1 = r0, in1
(p6) fma.s1 f10 = f13, f10, f10
(p6) fnma.s1 f12 = f9, f11, f8
;;
setf.sig f9 = in1
(p6) fma.s1 f10 = f12, f10, f11
;;
// Round quotient to an unsigned integer.
fcvt.fxu.trunc.s1 f10 = f10
;;
// r = q * (-b) + a
xma.l f10 = f10, f9, f14
;;
// Transfer result to GP registers.
getf.sig ret0 = f10
br.ret.sptk rp
;;
.endp __umoddi3
#endif
#ifdef L__divsi3
// Compute a 32-bit integer quotient.
//
// From the Intel IA-64 Optimization Guide, choose the minimum latency
// alternative.
//
// in0 holds the dividend. in1 holds the divisor.
.text
.align 16
.global __divsi3
.proc __divsi3
__divsi3:
.regstk 2,0,0,0
// Check divide by zero.
cmp.ne.unc p0,p7=0,in1
sxt4 in0 = in0
sxt4 in1 = in1
;;
setf.sig f8 = in0
setf.sig f9 = in1
(p7) break 1
;;
mov r2 = 0x0ffdd
fcvt.xf f8 = f8
fcvt.xf f9 = f9
;;
setf.exp f11 = r2
frcpa.s1 f10, p6 = f8, f9
;;
(p6) fmpy.s1 f8 = f8, f10
(p6) fnma.s1 f9 = f9, f10, f1
;;
(p6) fma.s1 f8 = f9, f8, f8
(p6) fma.s1 f9 = f9, f9, f11
;;
(p6) fma.s1 f10 = f9, f8, f8
;;
fcvt.fx.trunc.s1 f10 = f10
;;
getf.sig ret0 = f10
br.ret.sptk rp
;;
.endp __divsi3
#endif
#ifdef L__modsi3
// Compute a 32-bit integer modulus.
//
// From the Intel IA-64 Optimization Guide, choose the minimum latency
// alternative.
//
// in0 holds the dividend. in1 holds the divisor.
.text
.align 16
.global __modsi3
.proc __modsi3
__modsi3:
.regstk 2,0,0,0
mov r2 = 0x0ffdd
sxt4 in0 = in0
sxt4 in1 = in1
;;
setf.sig f13 = r32
setf.sig f9 = r33
// Check divide by zero.
cmp.ne.unc p0,p7=0,in1
;;
sub in1 = r0, in1
fcvt.xf f8 = f13
fcvt.xf f9 = f9
;;
setf.exp f11 = r2
frcpa.s1 f10, p6 = f8, f9
(p7) break 1
;;
(p6) fmpy.s1 f12 = f8, f10
(p6) fnma.s1 f10 = f9, f10, f1
;;
setf.sig f9 = in1
(p6) fma.s1 f12 = f10, f12, f12
(p6) fma.s1 f10 = f10, f10, f11
;;
(p6) fma.s1 f10 = f10, f12, f12
;;
fcvt.fx.trunc.s1 f10 = f10
;;
xma.l f10 = f10, f9, f13
;;
getf.sig ret0 = f10
br.ret.sptk rp
;;
.endp __modsi3
#endif
#ifdef L__udivsi3
// Compute a 32-bit unsigned integer quotient.
//
// From the Intel IA-64 Optimization Guide, choose the minimum latency
// alternative.
//
// in0 holds the dividend. in1 holds the divisor.
.text
.align 16
.global __udivsi3
.proc __udivsi3
__udivsi3:
.regstk 2,0,0,0
mov r2 = 0x0ffdd
zxt4 in0 = in0
zxt4 in1 = in1
;;
setf.sig f8 = in0
setf.sig f9 = in1
// Check divide by zero.
cmp.ne.unc p0,p7=0,in1
;;
fcvt.xf f8 = f8
fcvt.xf f9 = f9
(p7) break 1
;;
setf.exp f11 = r2
frcpa.s1 f10, p6 = f8, f9
;;
(p6) fmpy.s1 f8 = f8, f10
(p6) fnma.s1 f9 = f9, f10, f1
;;
(p6) fma.s1 f8 = f9, f8, f8
(p6) fma.s1 f9 = f9, f9, f11
;;
(p6) fma.s1 f10 = f9, f8, f8
;;
fcvt.fxu.trunc.s1 f10 = f10
;;
getf.sig ret0 = f10
br.ret.sptk rp
;;
.endp __udivsi3
#endif
#ifdef L__umodsi3
// Compute a 32-bit unsigned integer modulus.
//
// From the Intel IA-64 Optimization Guide, choose the minimum latency
// alternative.
//
// in0 holds the dividend. in1 holds the divisor.
.text
.align 16
.global __umodsi3
.proc __umodsi3
__umodsi3:
.regstk 2,0,0,0
mov r2 = 0x0ffdd
zxt4 in0 = in0
zxt4 in1 = in1
;;
setf.sig f13 = in0
setf.sig f9 = in1
// Check divide by zero.
cmp.ne.unc p0,p7=0,in1
;;
sub in1 = r0, in1
fcvt.xf f8 = f13
fcvt.xf f9 = f9
;;
setf.exp f11 = r2
frcpa.s1 f10, p6 = f8, f9
(p7) break 1;
;;
(p6) fmpy.s1 f12 = f8, f10
(p6) fnma.s1 f10 = f9, f10, f1
;;
setf.sig f9 = in1
(p6) fma.s1 f12 = f10, f12, f12
(p6) fma.s1 f10 = f10, f10, f11
;;
(p6) fma.s1 f10 = f10, f12, f12
;;
fcvt.fxu.trunc.s1 f10 = f10
;;
xma.l f10 = f10, f9, f13
;;
getf.sig ret0 = f10
br.ret.sptk rp
;;
.endp __umodsi3
#endif
#ifdef L__save_stack_nonlocal
// Notes on save/restore stack nonlocal: We read ar.bsp but write
// ar.bspstore. This is because ar.bsp can be read at all times
// (independent of the RSE mode) but since it's read-only we need to
// restore the value via ar.bspstore. This is OK because
// ar.bsp==ar.bspstore after executing "flushrs".
// void __ia64_save_stack_nonlocal(void *save_area, void *stack_pointer)
.text
.align 16
.global __ia64_save_stack_nonlocal
.proc __ia64_save_stack_nonlocal
__ia64_save_stack_nonlocal:
{ .mmf
alloc r18 = ar.pfs, 2, 0, 0, 0
mov r19 = ar.rsc
;;
}
{ .mmi
flushrs
st8 [in0] = in1, 24
and r19 = 0x1c, r19
;;
}
{ .mmi
st8 [in0] = r18, -16
mov ar.rsc = r19
or r19 = 0x3, r19
;;
}
{ .mmi
mov r16 = ar.bsp
mov r17 = ar.rnat
adds r2 = 8, in0
;;
}
{ .mmi
st8 [in0] = r16
st8 [r2] = r17
}
{ .mib
mov ar.rsc = r19
br.ret.sptk.few rp
;;
}
.endp __ia64_save_stack_nonlocal
#endif
#ifdef L__nonlocal_goto
// void __ia64_nonlocal_goto(void *target_label, void *save_area,
// void *static_chain);
.text
.align 16
.global __ia64_nonlocal_goto
.proc __ia64_nonlocal_goto
__ia64_nonlocal_goto:
{ .mmi
alloc r20 = ar.pfs, 3, 0, 0, 0
ld8 r12 = [in1], 8
mov.ret.sptk rp = in0, .L0
;;
}
{ .mmf
ld8 r16 = [in1], 8
mov r19 = ar.rsc
;;
}
{ .mmi
flushrs
ld8 r17 = [in1], 8
and r19 = 0x1c, r19
;;
}
{ .mmi
ld8 r18 = [in1]
mov ar.rsc = r19
or r19 = 0x3, r19
;;
}
{ .mmi
mov ar.bspstore = r16
;;
mov ar.rnat = r17
;;
}
{ .mmi
loadrs
invala
mov r15 = in2
;;
}
.L0: { .mib
mov ar.rsc = r19
mov ar.pfs = r18
br.ret.sptk.few rp
;;
}
.endp __ia64_nonlocal_goto
#endif
#ifdef L__restore_stack_nonlocal
// This is mostly the same as nonlocal_goto above.
// ??? This has not been tested yet.
// void __ia64_restore_stack_nonlocal(void *save_area)
.text
.align 16
.global __ia64_restore_stack_nonlocal
.proc __ia64_restore_stack_nonlocal
__ia64_restore_stack_nonlocal:
{ .mmf
alloc r20 = ar.pfs, 4, 0, 0, 0
ld8 r12 = [in0], 8
;;
}
{ .mmb
ld8 r16=[in0], 8
mov r19 = ar.rsc
;;
}
{ .mmi
flushrs
ld8 r17 = [in0], 8
and r19 = 0x1c, r19
;;
}
{ .mmf
ld8 r18 = [in0]
mov ar.rsc = r19
;;
}
{ .mmi
mov ar.bspstore = r16
;;
mov ar.rnat = r17
or r19 = 0x3, r19
;;
}
{ .mmf
loadrs
invala
;;
}
.L0: { .mib
mov ar.rsc = r19
mov ar.pfs = r18
br.ret.sptk.few rp
;;
}
.endp __ia64_restore_stack_nonlocal
#endif
#ifdef L__trampoline
// Implement the nested function trampoline. This is out of line
// so that we don't have to bother with flushing the icache, as
// well as making the on-stack trampoline smaller.
//
// The trampoline has the following form:
//
// +-------------------+ >
// TRAMP: | __ia64_trampoline | |
// +-------------------+ > fake function descriptor
// | TRAMP+16 | |
// +-------------------+ >
// | target descriptor |
// +-------------------+
// | static link |
// +-------------------+
.text
.align 16
.global __ia64_trampoline
.proc __ia64_trampoline
__ia64_trampoline:
{ .mmi
ld8 r2 = [r1], 8
;;
ld8 r15 = [r1]
}
{ .mmi
ld8 r3 = [r2], 8
;;
ld8 r1 = [r2]
mov b6 = r3
}
{ .bbb
br.sptk.many b6
;;
}
.endp __ia64_trampoline
#endif
#ifdef SHARED
// Thunks for backward compatibility.
#ifdef L_fixtfdi
.text
.align 16
.global __fixtfti
.proc __fixtfti
__fixtfti:
{ .bbb
br.sptk.many __fixxfti
;;
}
.endp __fixtfti
#endif
#ifdef L_fixunstfdi
.align 16
.global __fixunstfti
.proc __fixunstfti
__fixunstfti:
{ .bbb
br.sptk.many __fixunsxfti
;;
}
.endp __fixunstfti
#endif
#ifdef L_floatditf
.align 16
.global __floattitf
.proc __floattitf
__floattitf:
{ .bbb
br.sptk.many __floattixf
;;
}
.endp __floattitf
#endif
#endif
|
4ms/metamodule-plugin-sdk
| 4,131
|
plugin-libc/libgcc/config/ia64/crtbegin.S
|
/* Copyright (C) 2000-2022 Free Software Foundation, Inc.
Contributed by Jes Sorensen, <Jes.Sorensen@cern.ch>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "auto-host.h"
.section .ctors,"aw","progbits"
.align 8
__CTOR_LIST__:
data8 -1
.section .dtors,"aw","progbits"
.align 8
__DTOR_LIST__:
data8 -1
.section .sdata
.type dtor_ptr,@object
.size dtor_ptr,8
dtor_ptr:
data8 @gprel(__DTOR_LIST__ + 8)
/* A handle for __cxa_finalize to manage c++ local destructors. */
.global __dso_handle
.type __dso_handle,@object
.size __dso_handle,8
#ifdef SHARED
.section .sdata
__dso_handle:
data8 __dso_handle
#else
.section .sbss
.align 8
__dso_handle:
.skip 8
#endif
.hidden __dso_handle
#if HAVE_INITFINI_ARRAY_SUPPORT
.section .fini_array, "a"
data8 @fptr(__do_global_dtors_aux)
.section .init_array, "a"
data8 @fptr(__do_global_ctors_aux)
#else /* !HAVE_INITFINI_ARRAY_SUPPORT */
/*
* Fragment of the ELF _fini routine that invokes our dtor cleanup.
*
* We make the call by indirection, because in large programs the
* .fini and .init sections are not in range of the destination, and
* we cannot allow the linker to insert a stub at the end of this
* fragment of the _fini function. Further, Itanium does not implement
* the long branch instructions, and we do not wish every program to
* trap to the kernel for emulation.
*
* Note that we require __do_global_dtors_aux to preserve the GP,
* so that the next fragment in .fini gets the right value.
*/
.section .fini,"ax","progbits"
{ .mlx
movl r2 = @pcrel(__do_global_dtors_aux - 16)
}
{ .mii
mov r3 = ip
;;
add r2 = r2, r3
;;
}
{ .mib
nop 0
mov b6 = r2
br.call.sptk.many b0 = b6
}
#endif /* !HAVE_INITFINI_ARRAY_SUPPORT */
.section .text
.align 32
.proc __do_global_dtors_aux
__do_global_dtors_aux:
.prologue
#ifndef SHARED
.save ar.pfs, r35
alloc loc3 = ar.pfs, 0, 4, 1, 0
addl loc0 = @gprel(dtor_ptr), gp
.save rp, loc1
mov loc1 = rp
.body
mov loc2 = gp
nop 0
br.sptk.many .entry
#else
/*
if (__cxa_finalize)
__cxa_finalize(__dso_handle)
*/
.save ar.pfs, r35
alloc loc3 = ar.pfs, 0, 4, 1, 0
addl loc0 = @gprel(dtor_ptr), gp
addl r16 = @ltoff(@fptr(__cxa_finalize)), gp
;;
ld8 r16 = [r16]
;;
addl out0 = @ltoff(__dso_handle), gp
cmp.ne p7, p0 = r0, r16
;;
ld8 out0 = [out0]
(p7) ld8 r18 = [r16], 8
.save rp, loc1
mov loc1 = rp
.body
;;
mov loc2 = gp
(p7) ld8 gp = [r16]
(p7) mov b6 = r18
nop 0
nop 0
(p7) br.call.sptk.many rp = b6
;;
nop 0
nop 0
br.sptk.many .entry
#endif
/*
do {
dtor_ptr++;
(*(dtor_ptr-1)) ();
} while (dtor_ptr);
*/
.loop:
st8 [loc0] = r15 // update dtor_ptr (in memory)
ld8 r17 = [r16], 8 // r17 <- dtor's entry-point
nop 0
;;
ld8 gp = [r16] // gp <- dtor's gp
mov b6 = r17
br.call.sptk.many rp = b6
.entry: ld8 r15 = [loc0] // r15 <- dtor_ptr (gp-relative)
;;
add r16 = r15, loc2 // r16 <- dtor_ptr (absolute)
adds r15 = 8, r15
;;
ld8 r16 = [r16] // r16 <- pointer to dtor's fdesc
mov rp = loc1
mov ar.pfs = loc3
;;
cmp.ne p6, p0 = r0, r16
(p6) br.cond.sptk.few .loop
br.ret.sptk.many rp
.endp __do_global_dtors_aux
#ifdef SHARED
.weak __cxa_finalize
#endif
.weak _Jv_RegisterClasses
|
4ms/metamodule-plugin-sdk
| 2,260
|
plugin-libc/libgcc/config/vax/lib1funcs.S
|
/* Copyright (C) 2009-2022 Free Software Foundation, Inc.
This file is part of GCC.
Contributed by Maciej W. Rozycki <macro@linux-mips.org>.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#ifdef L_udivsi3
.text
.globl __udivsi3
.type __udivsi3, @function
__udivsi3:
.word 0
movl 8(%ap), %r1
blss 0f /* Check bit #31 of divisor. */
movl 4(%ap), %r2
blss 1f /* Check bit #31 of dividend. */
/* Both zero, do a standard division. */
divl3 %r1, %r2, %r0
ret
/* MSB of divisor set, only 1 or 0 may result. */
0:
decl %r1
clrl %r0
cmpl %r1, 4(%ap)
adwc $0, %r0
ret
/* MSB of dividend set, do an extended division. */
1:
clrl %r3
ediv %r1, %r2, %r0, %r3
ret
.size __udivsi3, . - __udivsi3
.previous
#endif
#ifdef L_umodsi3
.text
.globl __umodsi3
.type __umodsi3, @function
__umodsi3:
.word 0
movl 8(%ap), %r1
blss 0f /* Check bit #31 of divisor. */
movl 4(%ap), %r2
blss 1f /* Check bit #31 of dividend. */
/* Both zero, do a standard division. */
divl3 %r1, %r2, %r0
mull2 %r0, %r1
subl3 %r1, %r2, %r0
ret
/* MSB of divisor set, subtract the divisor at most once. */
0:
movl 4(%ap), %r2
clrl %r0
cmpl %r2, %r1
sbwc $0, %r0
bicl2 %r0, %r1
subl3 %r1, %r2, %r0
ret
/* MSB of dividend set, do an extended division. */
1:
clrl %r3
ediv %r1, %r2, %r3, %r0
ret
.size __umodsi3, . - __umodsi3
.previous
#endif
|
4ms/metamodule-plugin-sdk
| 1,496
|
plugin-libc/libgcc/config/mcore/crtn.S
|
# crtn.S for ELF based systems
# Copyright (C) 1992-2022 Free Software Foundation, Inc.
# Written By David Vinayak Henkel-Wallace, June 1992
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just makes sure that the .fini and .init sections do in
# fact return. Users may put any desired instructions in those sections.
# This file is the last thing linked into any executable.
.section ".init"
.align 4
ldw r15,(r0, 12)
addi r0,16
jmp r15
.section ".fini"
.align 4
ldw r15, (r0, 12)
addi r0,16
jmp r15
# Th-th-th-that is all folks!
|
4ms/metamodule-plugin-sdk
| 1,657
|
plugin-libc/libgcc/config/mcore/crti.S
|
# crti.S for ELF based systems
# Copyright (C) 1992-2022 Free Software Foundation, Inc.
# Written By David Vinayak Henkel-Wallace, June 1992
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just makes a stack frame for the contents of the .fini and
# .init sections. Users may put any desired instructions in those
# sections.
.section ".init"
.global _init
.type _init,@function
.align 4
_init:
subi r0, 16
st.w r15, (r0, 12)
# These nops are here to align the end of this code with a 16 byte
# boundary. The linker will start inserting code into the .init
# section at such a boundary.
nop
nop
nop
nop
nop
nop
.section ".fini"
.global _fini
.type _fini,@function
.align 4
_fini:
subi r0, 16
st.w r15, (r0, 12)
nop
nop
nop
nop
nop
nop
|
4ms/metamodule-plugin-sdk
| 7,378
|
plugin-libc/libgcc/config/mcore/lib1funcs.S
|
/* libgcc routines for the MCore.
Copyright (C) 1993-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__, x)
#ifdef __ELF__
#define TYPE(x) .type SYM (x),@function
#define SIZE(x) .size SYM (x), . - SYM (x)
#else
#define TYPE(x)
#define SIZE(x)
#endif
.macro FUNC_START name
.text
.globl SYM (\name)
TYPE (\name)
SYM (\name):
.endm
.macro FUNC_END name
SIZE (\name)
.endm
#ifdef L_udivsi3
FUNC_START udiv32
FUNC_START udivsi32
movi r1,0 // r1-r2 form 64 bit dividend
movi r4,1 // r4 is quotient (1 for a sentinel)
cmpnei r3,0 // look for 0 divisor
bt 9f
trap 3 // divide by 0
9:
// control iterations; skip across high order 0 bits in dividend
mov r7,r2
cmpnei r7,0
bt 8f
movi r2,0 // 0 dividend
jmp r15 // quick return
8:
ff1 r7 // figure distance to skip
lsl r4,r7 // move the sentinel along (with 0's behind)
lsl r2,r7 // and the low 32 bits of numerator
// appears to be wrong...
// tested out incorrectly in our OS work...
// mov r7,r3 // looking at divisor
// ff1 r7 // I can move 32-r7 more bits to left.
// addi r7,1 // ok, one short of that...
// mov r1,r2
// lsr r1,r7 // bits that came from low order...
// rsubi r7,31 // r7 == "32-n" == LEFT distance
// addi r7,1 // this is (32-n)
// lsl r4,r7 // fixes the high 32 (quotient)
// lsl r2,r7
// cmpnei r4,0
// bf 4f // the sentinel went away...
// run the remaining bits
1: lslc r2,1 // 1 bit left shift of r1-r2
addc r1,r1
cmphs r1,r3 // upper 32 of dividend >= divisor?
bf 2f
sub r1,r3 // if yes, subtract divisor
2: addc r4,r4 // shift by 1 and count subtracts
bf 1b // if sentinel falls out of quotient, stop
4: mov r2,r4 // return quotient
mov r3,r1 // and piggyback the remainder
jmp r15
FUNC_END udiv32
FUNC_END udivsi32
#endif
#ifdef L_umodsi3
FUNC_START urem32
FUNC_START umodsi3
movi r1,0 // r1-r2 form 64 bit dividend
movi r4,1 // r4 is quotient (1 for a sentinel)
cmpnei r3,0 // look for 0 divisor
bt 9f
trap 3 // divide by 0
9:
// control iterations; skip across high order 0 bits in dividend
mov r7,r2
cmpnei r7,0
bt 8f
movi r2,0 // 0 dividend
jmp r15 // quick return
8:
ff1 r7 // figure distance to skip
lsl r4,r7 // move the sentinel along (with 0's behind)
lsl r2,r7 // and the low 32 bits of numerator
1: lslc r2,1 // 1 bit left shift of r1-r2
addc r1,r1
cmphs r1,r3 // upper 32 of dividend >= divisor?
bf 2f
sub r1,r3 // if yes, subtract divisor
2: addc r4,r4 // shift by 1 and count subtracts
bf 1b // if sentinel falls out of quotient, stop
mov r2,r1 // return remainder
jmp r15
FUNC_END urem32
FUNC_END umodsi3
#endif
#ifdef L_divsi3
FUNC_START div32
FUNC_START divsi3
mov r5,r2 // calc sign of quotient
xor r5,r3
abs r2 // do unsigned divide
abs r3
movi r1,0 // r1-r2 form 64 bit dividend
movi r4,1 // r4 is quotient (1 for a sentinel)
cmpnei r3,0 // look for 0 divisor
bt 9f
trap 3 // divide by 0
9:
// control iterations; skip across high order 0 bits in dividend
mov r7,r2
cmpnei r7,0
bt 8f
movi r2,0 // 0 dividend
jmp r15 // quick return
8:
ff1 r7 // figure distance to skip
lsl r4,r7 // move the sentinel along (with 0's behind)
lsl r2,r7 // and the low 32 bits of numerator
// tested out incorrectly in our OS work...
// mov r7,r3 // looking at divisor
// ff1 r7 // I can move 32-r7 more bits to left.
// addi r7,1 // ok, one short of that...
// mov r1,r2
// lsr r1,r7 // bits that came from low order...
// rsubi r7,31 // r7 == "32-n" == LEFT distance
// addi r7,1 // this is (32-n)
// lsl r4,r7 // fixes the high 32 (quotient)
// lsl r2,r7
// cmpnei r4,0
// bf 4f // the sentinel went away...
// run the remaining bits
1: lslc r2,1 // 1 bit left shift of r1-r2
addc r1,r1
cmphs r1,r3 // upper 32 of dividend >= divisor?
bf 2f
sub r1,r3 // if yes, subtract divisor
2: addc r4,r4 // shift by 1 and count subtracts
bf 1b // if sentinel falls out of quotient, stop
4: mov r2,r4 // return quotient
mov r3,r1 // piggyback the remainder
btsti r5,31 // after adjusting for sign
bf 3f
rsubi r2,0
rsubi r3,0
3: jmp r15
FUNC_END div32
FUNC_END divsi3
#endif
#ifdef L_modsi3
FUNC_START rem32
FUNC_START modsi3
mov r5,r2 // calc sign of remainder
abs r2 // do unsigned divide
abs r3
movi r1,0 // r1-r2 form 64 bit dividend
movi r4,1 // r4 is quotient (1 for a sentinel)
cmpnei r3,0 // look for 0 divisor
bt 9f
trap 3 // divide by 0
9:
// control iterations; skip across high order 0 bits in dividend
mov r7,r2
cmpnei r7,0
bt 8f
movi r2,0 // 0 dividend
jmp r15 // quick return
8:
ff1 r7 // figure distance to skip
lsl r4,r7 // move the sentinel along (with 0's behind)
lsl r2,r7 // and the low 32 bits of numerator
1: lslc r2,1 // 1 bit left shift of r1-r2
addc r1,r1
cmphs r1,r3 // upper 32 of dividend >= divisor?
bf 2f
sub r1,r3 // if yes, subtract divisor
2: addc r4,r4 // shift by 1 and count subtracts
bf 1b // if sentinel falls out of quotient, stop
mov r2,r1 // return remainder
btsti r5,31 // after adjusting for sign
bf 3f
rsubi r2,0
3: jmp r15
FUNC_END rem32
FUNC_END modsi3
#endif
/* GCC expects that {__eq,__ne,__gt,__ge,__le,__lt}{df2,sf2}
will behave as __cmpdf2. So, we stub the implementations to
jump on to __cmpdf2 and __cmpsf2.
All of these shortcircuit the return path so that __cmp{sd}f2
will go directly back to the caller. */
.macro COMPARE_DF_JUMP name
.import SYM (cmpdf2)
FUNC_START \name
jmpi SYM (cmpdf2)
FUNC_END \name
.endm
#ifdef L_eqdf2
COMPARE_DF_JUMP eqdf2
#endif /* L_eqdf2 */
#ifdef L_nedf2
COMPARE_DF_JUMP nedf2
#endif /* L_nedf2 */
#ifdef L_gtdf2
COMPARE_DF_JUMP gtdf2
#endif /* L_gtdf2 */
#ifdef L_gedf2
COMPARE_DF_JUMP gedf2
#endif /* L_gedf2 */
#ifdef L_ltdf2
COMPARE_DF_JUMP ltdf2
#endif /* L_ltdf2 */
#ifdef L_ledf2
COMPARE_DF_JUMP ledf2
#endif /* L_ledf2 */
/* SINGLE PRECISION FLOATING POINT STUBS */
.macro COMPARE_SF_JUMP name
.import SYM (cmpsf2)
FUNC_START \name
jmpi SYM (cmpsf2)
FUNC_END \name
.endm
#ifdef L_eqsf2
COMPARE_SF_JUMP eqsf2
#endif /* L_eqsf2 */
#ifdef L_nesf2
COMPARE_SF_JUMP nesf2
#endif /* L_nesf2 */
#ifdef L_gtsf2
COMPARE_SF_JUMP gtsf2
#endif /* L_gtsf2 */
#ifdef L_gesf2
COMPARE_SF_JUMP __gesf2
#endif /* L_gesf2 */
#ifdef L_ltsf2
COMPARE_SF_JUMP __ltsf2
#endif /* L_ltsf2 */
#ifdef L_lesf2
COMPARE_SF_JUMP lesf2
#endif /* L_lesf2 */
|
4ms/metamodule-plugin-sdk
| 1,511
|
plugin-libc/libgcc/config/arc/crtn.S
|
/* Ensure .fini/.init return for the Synopsys DesignWare ARC CPU.
Copyright (C) 1994-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
# This file just makes sure that the .fini and .init sections do in
# fact return. This file is the last thing linked into any executable.
#ifdef __ARC_RF16__
/* Use object attributes to inform other tools this file is
safe for RF16 configuration. */
.arc_attribute Tag_ARC_ABI_rf16, 1
#endif
.section .init
pop_s blink
j_s [blink]
.section .fini
pop_s blink
j_s [blink]
|
4ms/metamodule-plugin-sdk
| 2,339
|
plugin-libc/libgcc/config/arc/crttls.S
|
; newlib tls glue code for Synopsys DesignWare ARC cpu.
/* Copyright (C) 2016-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* As a special exception, if you link this library with other files,
some of which are compiled with GCC, to produce an executable,
this library does not by itself cause the resulting executable
to be covered by the GNU General Public License.
This exception does not however invalidate any other reasons why
the executable file might be covered by the GNU General Public License. */
#ifdef __ARC_RF16__
/* Use object attributes to inform other tools this file is
safe for RF16 configuration. */
.arc_attribute Tag_ARC_ABI_rf16, 1
#endif
#if (__ARC_TLS_REGNO__ != -1)
/* ANSI concatenation macros. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
#define FUNC(X) .type SYM(X),@function
#define ENDFUNC0(X) .Lfe_##X: .size X,.Lfe_##X-X
#define ENDFUNC(X) ENDFUNC0(X)
.global SYM(__read_tp)
SYM(__read_tp):
FUNC(__read_tp)
mov r0, CONCAT1 (r, __ARC_TLS_REGNO__)
nop
j [blink]
ENDFUNC(__read_tp)
.section .init
mov CONCAT1 (r, __ARC_TLS_REGNO__),__main_tcb_end+256
.section .tbss
__main_tcb:
.long 0
.long 0
__main_tcb_end:
#endif /*__ARC_TLS_REGNO__ != -1 */
|
4ms/metamodule-plugin-sdk
| 1,537
|
plugin-libc/libgcc/config/arc/crti.S
|
/* .fini/.init stack frame setup for the Synopsys DesignWare ARC CPU.
Copyright (C) 1994-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
# This file contains the stack frame setup for contents of the .fini and
# .init sections.
#ifdef __ARC_RF16__
/* Use object attributes to inform other tools this file is
safe for RF16 configuration. */
.arc_attribute Tag_ARC_ABI_rf16, 1
#endif
.section .init
.global _init
.word 0
.type _init,@function
_init:
push_s blink
.section .fini
.global _fini
.word 0
.type _fini,@function
_fini:
push_s blink
|
4ms/metamodule-plugin-sdk
| 30,805
|
plugin-libc/libgcc/config/arc/lib1funcs.S
|
; libgcc1 routines for Synopsys DesignWare ARC cpu.
/* Copyright (C) 1995-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* As a special exception, if you link this library with other files,
some of which are compiled with GCC, to produce an executable,
this library does not by itself cause the resulting executable
to be covered by the GNU General Public License.
This exception does not however invalidate any other reasons why
the executable file might be covered by the GNU General Public License. */
/* ANSI concatenation macros. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
#ifndef WORKING_ASSEMBLER
#define abs_l abs
#define asl_l asl
#define mov_l mov
#endif
#define FUNC(X) .type SYM(X),@function
#define HIDDEN_FUNC(X) FUNC(X)` .hidden X
#define ENDFUNC0(X) .Lfe_##X: .size X,.Lfe_##X-X
#define ENDFUNC(X) ENDFUNC0(X)
#ifdef __ARC_RF16__
/* Use object attributes to inform other tools this file is
safe for RF16 configuration. */
.arc_attribute Tag_ARC_ABI_rf16, 1
#endif
#ifdef L_mulsi3
.section .text
.align 4
.global SYM(__mulsi3)
SYM(__mulsi3):
/* This the simple version.
while (a)
{
if (a & 1)
r += b;
a >>= 1;
b <<= 1;
}
*/
#if defined (__ARC_MUL64__)
FUNC(__mulsi3)
mulu64 r0,r1
j_s.d [blink]
mov_s r0,mlo
ENDFUNC(__mulsi3)
#elif defined (__ARC_MPY__)
HIDDEN_FUNC(__mulsi3)
mpyu r0,r0,r1
nop_s
j_s [blink]
ENDFUNC(__mulsi3)
#elif defined (__ARC_NORM__)
FUNC(__mulsi3)
norm.f r2,r0
rsub lp_count,r2,31
mov.mi lp_count,32
mov_s r2,r0
mov_s r0,0
lpnz @.Lend ; loop is aligned
lsr.f r2,r2
add.cs r0,r0,r1
add_s r1,r1,r1
.Lend: j_s [blink]
ENDFUNC(__mulsi3)
#elif !defined (__OPTIMIZE_SIZE__) && defined (__ARC_BARREL_SHIFTER__)
/* Up to 3.5 times faster than the simpler code below, but larger. */
FUNC(__mulsi3)
ror.f r2,r0,4
mov_s r0,0
add3.mi r0,r0,r1
asl.f r2,r2,2
add2.cs r0,r0,r1
jeq_s [blink]
.Loop:
add1.mi r0,r0,r1
asl.f r2,r2,2
add.cs r0,r0,r1
asl_s r1,r1,4
ror.f r2,r2,8
add3.mi r0,r0,r1
asl.f r2,r2,2
bne.d .Loop
add2.cs r0,r0,r1
j_s [blink]
ENDFUNC(__mulsi3)
#elif !defined (__OPTIMIZE_SIZE__) /* __ARC601__ */
FUNC(__mulsi3)
lsr.f r2,r0
mov_s r0,0
mov_s r3,0
add.cs r0,r0,r1
.Loop:
lsr.f r2,r2
add1.cs r0,r0,r1
lsr.f r2,r2
add2.cs r0,r0,r1
lsr.f r2,r2
add3.cs r0,r0,r1
bne.d .Loop
add3 r1,r3,r1
j_s [blink]
ENDFUNC(__mulsi3)
#else
/********************************************************/
FUNC(__mulsi3)
mov_s r2,0 ; Accumulate result here.
.Lloop:
bbit0 r0,0,@.Ly
add_s r2,r2,r1 ; r += b
.Ly:
lsr_s r0,r0 ; a >>= 1
asl_s r1,r1 ; b <<= 1
brne_s r0,0,@.Lloop
.Ldone:
j_s.d [blink]
mov_s r0,r2
ENDFUNC(__mulsi3)
/********************************************************/
#endif
#endif /* L_mulsi3 */
#ifdef L_umulsidi3
.section .text
.align 4
.global SYM(__umulsidi3)
SYM(__umulsidi3):
HIDDEN_FUNC(__umulsidi3)
/* We need ARC700 /ARC_MUL64 definitions of __umulsidi3 / __umulsi3_highpart
in case some code has been compiled without multiply support enabled,
but linked with the multiply-support enabled libraries.
For ARC601 (i.e. without a barrel shifter), we also use umuldisi3 as our
umulsi3_highpart implementation; the use of the latter label doesn't
actually benefit ARC601 platforms, but is useful when ARC601 code is linked
against other libraries. */
#if defined (__ARC_MPY__) || defined (__ARC_MUL64__) \
|| !defined (__ARC_BARREL_SHIFTER__)
.global SYM(__umulsi3_highpart)
SYM(__umulsi3_highpart):
HIDDEN_FUNC(__umulsi3_highpart)
#endif
/* This the simple version.
while (a)
{
if (a & 1)
r += b;
a >>= 1;
b <<= 1;
}
*/
#include "ieee-754/arc-ieee-754.h"
#ifdef __ARC_MPY__
mov_s r12,DBL0L
mpyu DBL0L,r12,DBL0H
j_s.d [blink]
MPYHU DBL0H,r12,DBL0H
#elif defined (__ARC_MUL64__)
/* Likewise for __ARC_MUL64__ */
mulu64 r0,r1
mov_s DBL0L,mlo
j_s.d [blink]
mov_s DBL0H,mhi
#else /* !__ARC_MPY__ && !__ARC_MUL64__ */
/* Although it might look tempting to extend this to handle muldi3,
using mulsi3 twice with 2.25 cycles per 32 bit add is faster
than one loop with 3 or four cycles per 32 bit add. */
asl.f r12,0 ; Top part of b.
mov_s r2,0 ; Accumulate result here.
bbit1.d r0,0,@.Ladd
mov_s r3,0
.Llooptst:
rlc r12,r12
breq r0,0,@.Ldone ; while (a)
.Lloop:
asl.f r1,r1 ; b <<= 1
bbit0.d r0,1,@.Llooptst
lsr r0,r0 ; a >>= 1
rlc r12,r12
.Ladd:
add.f r3,r3,r1 ; r += b
brne.d r0,0,@.Lloop ; while (a);
adc r2,r2,r12
.Ldone:
mov_s DBL0L,r3
j_s.d [blink]
mov DBL0H,r2
#endif /* !__ARC_MPY__*/
ENDFUNC(__umulsidi3)
#if defined (__ARC_MPY__) || defined (__ARC_MUL64__) \
|| !defined (__ARC_BARREL_SHIFTER__)
ENDFUNC(__umulsi3_highpart)
#endif
#endif /* L_umulsidi3 */
#ifndef __ARC_RF16__
#ifdef L_muldi3
.section .text
.align 4
.global SYM(__muldi3)
SYM(__muldi3):
#ifdef __LITTLE_ENDIAN__
push_s blink
mov_s r4,r3 ;4
mov_s r5,r2 ;4
mov_s r9,r0 ;4
mov_s r8,r1 ;4
bl.d @__umulsidi3
mov_s r1,r2 ;4
mov_s r6,r0 ;4
mov_s r7,r1 ;4
mov_s r0,r9 ;4
bl.d @__mulsi3
mov_s r1,r4 ;4
mov_s r4,r0 ;4
mov_s r1,r8 ;4
bl.d @__mulsi3
mov_s r0,r5 ;4
pop_s blink
add_s r0,r0,r4 ;2
add r1,r0,r7
j_s.d [blink]
mov_s r0,r6 ;4
#else
push_s blink
mov_s r5,r3
mov_s r9,r2
mov_s r4,r1
mov_s r8,r0
mov_s r0,r1
bl.d @__umulsidi3
mov_s r1,r3
mov_s r7,r0
mov_s r6,r1
mov_s r0,r4
bl.d @__mulsi3
mov_s r1,r9
mov_s r4,r0
mov_s r1,r8
bl.d @__mulsi3
mov_s r0,r5
pop_s blink
add_s r0,r0,r4
add_s r0,r0,r7
j_s.d [blink]
mov_s r1,r6
#endif /* __LITTLE_ENDIAN__ */
ENDFUNC(__muldi3)
#endif /* L_muldi3 */
#endif /* !__ARC_RF16__ */
#ifdef L_umulsi3_highpart
#include "ieee-754/arc-ieee-754.h"
/* For use without a barrel shifter, and for ARC700 / ARC_MUL64, the
mulsidi3 algorithms above look better, so for these, there is an
extra label up there. */
#if !defined (__ARC_MPY__) && !defined (__ARC_MUL64__) \
&& defined (__ARC_BARREL_SHIFTER__)
.global SYM(__umulsi3_highpart)
SYM(__umulsi3_highpart):
HIDDEN_FUNC(__umulsi3_highpart)
mov_s r2,0
mov_s r3,32
.Loop:
lsr.f r0,r0
add.cs.f r2,r2,r1
sub_s r3,r3,1
brne.d r0,0,.Loop
rrc r2,r2
j_s.d [blink]
/* Make the result register peephole-compatible with mulsidi3. */
lsr DBL0H,r2,r3
ENDFUNC(__umulsi3_highpart)
#endif /* !__ARC_MPY__ && __ARC_BARREL_SHIFTER__ */
#endif /* L_umulsi3_highpart */
#ifdef L_divmod_tools
; Utilities used by all routines.
.section .text
/*
unsigned long
udivmodsi4(int modwanted, unsigned long num, unsigned long den)
{
unsigned long bit = 1;
unsigned long res = 0;
while (den < num && bit && !(den & (1L<<31)))
{
den <<=1;
bit <<=1;
}
while (bit)
{
if (num >= den)
{
num -= den;
res |= bit;
}
bit >>=1;
den >>=1;
}
if (modwanted) return num;
return res;
}
*/
; inputs: r0 = numerator, r1 = denominator
; outputs: r0 = quotient, r1 = remainder, r2/r3 trashed
.balign 4
.global SYM(__udivmodsi4)
FUNC(__udivmodsi4)
SYM(__udivmodsi4):
#if defined (__ARC_EA__)
/* Normalize divisor and divident, and then use the appropriate number of
divaw (the number of result bits, or one more) to produce the result.
There are some special conditions that need to be tested:
- We can only directly normalize unsigned numbers that fit in 31 bit. For
the divisor, we test early on that it is not 'negative'.
- divaw can't corrrectly process a divident that is larger than the divisor.
We handle this be checking that the divident prior to normalization is
not larger than the normalized divisor. As we then already know then
that the divisor fits 31 bit, this check also makes sure that the
divident fits.
- ordinary normalization of the divident could make it larger than the
normalized divisor, which again would be unsuitable for divaw.
Thus, we want to shift left the divident by one less, except that we
want to leave it alone if it is already 31 bit. To this end, we
double the input to norm with adds.
- If the divident has less bits than the divisor, that would leave us
with a negative number of divaw to execute. Although we could use a
conditional loop to avoid excess divaw, and then the quotient could
be extracted correctly as there'd be more than enough zero bits, the
remainder would be shifted left too far, requiring a conditional shift
right. The cost of that shift and the possible mispredict on the
conditional loop cost as much as putting in an early check for a zero
result. */
bmsk r3,r0,29
brne.d r3,r0,.Large_dividend
norm.f r2,r1
brlo r0,r1,.Lret0
norm r3,r0
asl_s r1,r1,r2
sub_s r3,r3,1
asl_l r0,r0,r3 ; not short to keep loop aligned
sub lp_count,r2,r3
lp .Ldiv_end
divaw r0,r0,r1
.Ldiv_end:sub_s r3,r2,1
lsr r1,r0,r2
j_s.d [blink]
bmsk r0,r0,r3
.balign 4
.Large_dividend:
bmi .Ltrivial
asl_s r1,r1,r2
mov_s r3,0
sub1.f r4,r0,r1
mov.lo r4,r0
mov.hs r3,2
cmp r4,r1
sub.hs r4,r4,r1
add.hs r3,r3,1
mov.f lp_count,r2
lpne .Ldiv_end2
divaw r4,r4,r1
.Ldiv_end2:asl r0,r3,r2
lsr r1,r4,r2
sub_s r2,r2,1
bmsk r4,r4,r2
j_s.d [blink]
or.ne r0,r0,r4
.Lret0:
mov_s r1,r0
j_s.d [blink]
mov_l r0,0
.balign 4
.Ltrivial:
sub.f r1,r0,r1
mov.c r1,r0
mov_s r0,1
j_s.d [blink]
mov.c r0,0
#elif !defined (__OPTIMIZE_SIZE__) && !defined (__ARC_RF16__)
#if defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__)
lsr_s r2,r0
brhs.d r1,r2,.Lret0_3
norm r2,r2
norm r3,r1
sub_s r3,r3,r2
asl_s r1,r1,r3
sub1.f 0,r0,r1
lsr.cs r1,r1,1
sbc r2,r3,0
sub1 r0,r0,r1
cmp_s r0,r1
mov.f lp_count,r2
#else /* ! __ARC_NORM__ */
lsr_s r2,r0
brhs.d r1,r2,.Lret0_3
mov lp_count,32
.Lloop1:
asl_s r1,r1 ; den <<= 1
brls.d r1,r2,@.Lloop1
sub lp_count,lp_count,1
sub_s r0,r0,r1
lsr_s r1,r1
cmp_s r0,r1
xor.f r2,lp_count,31
#if !defined (__ARCEM__) && !defined (__ARCHS__)
mov_s lp_count,r2
#else
mov lp_count,r2
nop_s
#endif /* !__ARCEM__ && !__ARCHS__ */
#endif /* !__ARC_NORM__ */
sub.cc r0,r0,r1
mov_s r3,3
sbc r3,r3,0
#if defined (__ARC_BARREL_SHIFTER__)
asl_s r3,r3,r2
rsub r1,r1,1
lpne @.Lloop2_end
add1.f r0,r1,r0
sub.cc r0,r0,r1
.Lloop2_end:
lsr r1,r0,r2
#else
rsub r1,r1,1
lpne @.Lloop2_end
asl_s r3,r3
add1.f r0,r1,r0
sub.cc r0,r0,r1
.Lloop2_end:
lsr_s r1,r0
lsr.f lp_count,r2
mov.cc r1,r0
lpnz 1f
lsr_s r1,r1
lsr_s r1,r1
1:
#endif
bmsk r0,r0,r2
bclr r0,r0,r2
j_s.d [blink]
or_s r0,r0,r3
.Lret0_3:
#if 0 /* Slightly shorter, but slower. */
lp .Loop3_end
brhi.d r1,r0,.Loop3_end
sub_s r0,r0,r1
.Loop3_end
add_s r1,r1,r0
j_s.d [blink]
rsub r0,lp_count,32-1
#else
mov_s r4,r1
sub.f r1,r0,r1
sbc r0,r0,r0
sub.cc.f r1,r1,r4
sbc r0,r0,0
sub.cc.f r1,r1,r4
sbc r0,r0,-3
j_s.d [blink]
add.cs r1,r1,r4
#endif
#else /* Arctangent-A5 */
breq_s r1,0,@.Ldivmodend
mov_s r2,1 ; bit = 1
mov_s r3,0 ; res = 0
.Lloop1:
brhs r1,r0,@.Lloop2
bbit1 r1,31,@.Lloop2
asl_s r1,r1 ; den <<= 1
b.d @.Lloop1
asl_s r2,r2 ; bit <<= 1
.Lloop2:
brlo r0,r1,@.Lshiftdown
sub_s r0,r0,r1 ; num -= den
or_s r3,r3,r2 ; res |= bit
.Lshiftdown:
lsr_s r2,r2 ; bit >>= 1
lsr_s r1,r1 ; den >>= 1
brne_s r2,0,@.Lloop2
.Ldivmodend:
mov_s r1,r0 ; r1 = mod
j.d [blink]
mov_s r0,r3 ; r0 = res
/******************************************************/
#endif
ENDFUNC(__udivmodsi4)
#endif
#ifdef L_udivsi3
.section .text
.align 4
.global SYM(__udivsi3)
FUNC(__udivsi3)
SYM(__udivsi3):
b @SYM(__udivmodsi4)
ENDFUNC(__udivsi3)
#endif /* L_udivsi3 */
#ifdef L_divsi3
.section .text
.align 4
.global SYM(__divsi3)
FUNC(__divsi3)
#ifndef __ARC_EA__
SYM(__divsi3):
/* A5 / ARC60? */
mov r12,blink
xor r11,r0,r1
abs_s r0,r0
bl.d @SYM(__udivmodsi4)
abs_s r1,r1
tst r11,r11
j.d [r12]
neg.mi r0,r0
#else /* !ifndef __ARC_EA__ */
;; We can use the abs, norm, divaw and mpy instructions for ARC700
#define MULDIV
#ifdef MULDIV
/* This table has been generated by divtab-arc700.c. */
/* 1/512 .. 1/256, normalized. There is a leading 1 in bit 31.
For powers of two, we list unnormalized numbers instead. The values
for powers of 2 are loaded, but not used. The value for 1 is actually
the first instruction after .Lmuldiv. */
.balign 4
.Ldivtab:
.long 0x1000000
.long 0x80808081
.long 0x81020409
.long 0x81848DA9
.long 0x82082083
.long 0x828CBFBF
.long 0x83126E98
.long 0x83993053
.long 0x84210843
.long 0x84A9F9C9
.long 0x85340854
.long 0x85BF3762
.long 0x864B8A7E
.long 0x86D90545
.long 0x8767AB60
.long 0x87F78088
.long 0x88888889
.long 0x891AC73B
.long 0x89AE408A
.long 0x8A42F871
.long 0x8AD8F2FC
.long 0x8B70344B
.long 0x8C08C08D
.long 0x8CA29C05
.long 0x8D3DCB09
.long 0x8DDA5203
.long 0x8E78356E
.long 0x8F1779DA
.long 0x8FB823EF
.long 0x905A3864
.long 0x90FDBC0A
.long 0x91A2B3C5
.long 0x92492493
.long 0x92F11385
.long 0x939A85C5
.long 0x94458095
.long 0x94F20950
.long 0x95A02569
.long 0x964FDA6D
.long 0x97012E03
.long 0x97B425EE
.long 0x9868C80A
.long 0x991F1A52
.long 0x99D722DB
.long 0x9A90E7DA
.long 0x9B4C6F9F
.long 0x9C09C09D
.long 0x9CC8E161
.long 0x9D89D89E
.long 0x9E4CAD24
.long 0x9F1165E8
.long 0x9FD809FE
.long 0xA0A0A0A1
.long 0xA16B312F
.long 0xA237C32C
.long 0xA3065E40
.long 0xA3D70A3E
.long 0xA4A9CF1E
.long 0xA57EB503
.long 0xA655C43A
.long 0xA72F053A
.long 0xA80A80A9
.long 0xA8E83F58
.long 0xA9C84A48
.long 0xAAAAAAAB
.long 0xAB8F69E3
.long 0xAC769185
.long 0xAD602B59
.long 0xAE4C415D
.long 0xAF3ADDC7
.long 0xB02C0B03
.long 0xB11FD3B9
.long 0xB21642C9
.long 0xB30F6353
.long 0xB40B40B5
.long 0xB509E68B
.long 0xB60B60B7
.long 0xB70FBB5B
.long 0xB81702E1
.long 0xB92143FB
.long 0xBA2E8BA3
.long 0xBB3EE722
.long 0xBC52640C
.long 0xBD691048
.long 0xBE82FA0C
.long 0xBFA02FE9
.long 0xC0C0C0C1
.long 0xC1E4BBD6
.long 0xC30C30C4
.long 0xC4372F86
.long 0xC565C87C
.long 0xC6980C6A
.long 0xC7CE0C7D
.long 0xC907DA4F
.long 0xCA4587E7
.long 0xCB8727C1
.long 0xCCCCCCCD
.long 0xCE168A78
.long 0xCF6474A9
.long 0xD0B69FCC
.long 0xD20D20D3
.long 0xD3680D37
.long 0xD4C77B04
.long 0xD62B80D7
.long 0xD79435E6
.long 0xD901B204
.long 0xDA740DA8
.long 0xDBEB61EF
.long 0xDD67C8A7
.long 0xDEE95C4D
.long 0xE070381D
.long 0xE1FC780F
.long 0xE38E38E4
.long 0xE525982B
.long 0xE6C2B449
.long 0xE865AC7C
.long 0xEA0EA0EB
.long 0xEBBDB2A6
.long 0xED7303B6
.long 0xEF2EB720
.long 0xF0F0F0F1
.long 0xF2B9D649
.long 0xF4898D60
.long 0xF6603D99
.long 0xF83E0F84
.long 0xFA232CF3
.long 0xFC0FC0FD
.long 0xFE03F810
.long 0x2000000
.long 0x81020409
.long 0x82082083
.long 0x83126E98
.long 0x84210843
.long 0x85340854
.long 0x864B8A7E
.long 0x8767AB60
.long 0x88888889
.long 0x89AE408A
.long 0x8AD8F2FC
.long 0x8C08C08D
.long 0x8D3DCB09
.long 0x8E78356E
.long 0x8FB823EF
.long 0x90FDBC0A
.long 0x92492493
.long 0x939A85C5
.long 0x94F20950
.long 0x964FDA6D
.long 0x97B425EE
.long 0x991F1A52
.long 0x9A90E7DA
.long 0x9C09C09D
.long 0x9D89D89E
.long 0x9F1165E8
.long 0xA0A0A0A1
.long 0xA237C32C
.long 0xA3D70A3E
.long 0xA57EB503
.long 0xA72F053A
.long 0xA8E83F58
.long 0xAAAAAAAB
.long 0xAC769185
.long 0xAE4C415D
.long 0xB02C0B03
.long 0xB21642C9
.long 0xB40B40B5
.long 0xB60B60B7
.long 0xB81702E1
.long 0xBA2E8BA3
.long 0xBC52640C
.long 0xBE82FA0C
.long 0xC0C0C0C1
.long 0xC30C30C4
.long 0xC565C87C
.long 0xC7CE0C7D
.long 0xCA4587E7
.long 0xCCCCCCCD
.long 0xCF6474A9
.long 0xD20D20D3
.long 0xD4C77B04
.long 0xD79435E6
.long 0xDA740DA8
.long 0xDD67C8A7
.long 0xE070381D
.long 0xE38E38E4
.long 0xE6C2B449
.long 0xEA0EA0EB
.long 0xED7303B6
.long 0xF0F0F0F1
.long 0xF4898D60
.long 0xF83E0F84
.long 0xFC0FC0FD
.long 0x4000000
.long 0x82082083
.long 0x84210843
.long 0x864B8A7E
.long 0x88888889
.long 0x8AD8F2FC
.long 0x8D3DCB09
.long 0x8FB823EF
.long 0x92492493
.long 0x94F20950
.long 0x97B425EE
.long 0x9A90E7DA
.long 0x9D89D89E
.long 0xA0A0A0A1
.long 0xA3D70A3E
.long 0xA72F053A
.long 0xAAAAAAAB
.long 0xAE4C415D
.long 0xB21642C9
.long 0xB60B60B7
.long 0xBA2E8BA3
.long 0xBE82FA0C
.long 0xC30C30C4
.long 0xC7CE0C7D
.long 0xCCCCCCCD
.long 0xD20D20D3
.long 0xD79435E6
.long 0xDD67C8A7
.long 0xE38E38E4
.long 0xEA0EA0EB
.long 0xF0F0F0F1
.long 0xF83E0F84
.long 0x8000000
.long 0x84210843
.long 0x88888889
.long 0x8D3DCB09
.long 0x92492493
.long 0x97B425EE
.long 0x9D89D89E
.long 0xA3D70A3E
.long 0xAAAAAAAB
.long 0xB21642C9
.long 0xBA2E8BA3
.long 0xC30C30C4
.long 0xCCCCCCCD
.long 0xD79435E6
.long 0xE38E38E4
.long 0xF0F0F0F1
.long 0x10000000
.long 0x88888889
.long 0x92492493
.long 0x9D89D89E
.long 0xAAAAAAAB
.long 0xBA2E8BA3
.long 0xCCCCCCCD
.long 0xE38E38E4
.long 0x20000000
.long 0x92492493
.long 0xAAAAAAAB
.long 0xCCCCCCCD
.long 0x40000000
.long 0xAAAAAAAB
.long 0x80000000
__muldiv:
neg r4,r2
ld.as r5,[pcl,r4]
abs_s r12,r0
bic.f 0,r2,r4
mpyhu.ne r12,r12,r5
norm r3,r2
xor.f 0,r0,r1
; write port allocation stall
rsub r3,r3,30
lsr r0,r12,r3
j_s.d [blink]
neg.mi r0,r0
.balign 4
SYM(__divsi3):
norm r3,r1
abs_s r2,r1
brhs r3,23,__muldiv
norm r4,r0
abs_l r12,r0
brhs r4,r3,.Lonebit
asl_s r2,r2,r3
asl r12,r12,r4
sub lp_count,r3,r4
sub.f r12,r12,r2
brge.d r12,r2,.Lsbit
sub r4,r3,r4
add.lo r12,r12,r2
lp .Ldivend
.Ldivstart:divaw r12,r12,r2
.Ldivend:xor_s r1,r1,r0
sub r0,r4,1
bmsk r0,r12,r0
bset.hs r0,r0,r4
tst_s r1,r1
j_s.d [blink]
neg.mi r0,r0
.Lonebit:
xor_s r1,r1,r0
asr_s r1,r1,31
sub1.f 0,r12,r2 ; special case: -2**(n+1) / 2**n
or r0,r1,1
add.eq r0,r0,r0
cmp_s r12,r2
j_s.d [blink]
mov.lo r0,0
.Lsbit:
; Need to handle special cases involving negative powers of two:
; r12,r2 are normalized dividend / divisor;
; divide anything by 0x80000000, or divide 0x80000000 by 0x40000000
add_s r12,r12,r2
xor_s r1,r1,r0
rsub r4,r4,-1
ror r0,r12,r4
tst_s r2,r2
bmsk r0,r0,r3
add.pl r0,r0,r0
tst_s r1,r1
j_s.d [blink]
neg.mi r0,r0
#else /* !MULDIV */
/* This version requires that divaw works with a divisor of 0x80000000U */
abs_s r2,r1
norm r4,r0
neg_s r3,r2
norm r3,r3
abs_s r12,r0
brhs r4,r3,.Lonebit
asl_s r2,r2,r3
asl r12,r12,r4
sub lp_count,r3,r4
cmp_s r12,r2
sub.hs r12,r12,r2
lp .Ldivend
.Ldivstart:divaw r12,r12,r2
.Ldivend:xor_s r1,r1,r0
sub_s r0,r3,1
bmsk r0,r12,r0
bset.hs r0,r0,r3
tst_s r1,r1
j_s.d [blink]
negmi r0,r0
.Lonebit:
xor_s r1,r1,r0
asr_s r1,r1,31
cmp_s r12,r2
mov_s r0,0
j_s.d [blink]
orhs r0,r1,1
#endif /* MULDIV */
#endif /* ifndef __ARC700__ */
ENDFUNC(__divsi3)
#endif /* L_divsi3 */
#ifdef L_umodsi3
.section .text
.align 4
.global SYM(__umodsi3)
FUNC(__umodsi3)
SYM(__umodsi3):
mov r7,blink
bl.nd @SYM(__udivmodsi4)
j.d [r7]
mov r0,r1
ENDFUNC(__umodsi3)
#endif /* L_umodsi3 */
#ifdef L_modsi3
.section .text
.align 4
.global SYM (__modsi3)
FUNC(__modsi3)
SYM(__modsi3):
#ifndef __ARC_EA__
/* A5 / ARC60? */
mov_s r12,blink
mov_s r11,r0
abs_s r0,r0
bl.d @SYM(__udivmodsi4)
abs_s r1,r1
tst r11,r11
neg_s r0,r1
j_s.d [r12]
mov.pl r0,r1
#else /* __ARC_EA__ */
abs_s r2,r1
norm.f r4,r0
neg r5,r2
norm r3,r5
abs_l r12,r0
brhs r4,r3,.Lonebit
asl_s r2,r2,r3
asl r12,r12,r4
sub lp_count,r3,r4
cmp_s r12,r2
sub.hs r12,r12,r2
tst_s r0,r0
lp .Ldivend
.Ldivstart:divaw r12,r12,r2
.Ldivend:
lsr r0,r12,r3
j_s.d [blink]
neg.mi r0,r0
.balign 4
.Lonebit:neg.pl r5,r5
cmp_s r12,r2
j_s.d [blink]
sub.hs r0,r0,r5
#endif /* !__ARC_EA__ */
ENDFUNC(__modsi3)
#endif /* L_modsi3 */
#ifdef L_clzsi2
.section .text
.align 4
.global SYM (__clzsi2)
SYM(__clzsi2):
#ifdef __ARC_NORM__
HIDDEN_FUNC(__clzsi2)
norm.f r0,r0
mov.n r0,0
j_s.d [blink]
add.pl r0,r0,1
ENDFUNC(__clzsi2)
#elif !defined (__ARC_BARREL_SHIFTER__)
FUNC(__clzsi2)
mov lp_count,10
mov_l r1,0
bset r2,r1,29
lp .Loop_end
brhs r0,r2,.Loop_end
add3 r0,r1,r0
.Loop_end:
asl.f 0,r0
sub2 r0,lp_count,lp_count
sub.cs.f r0,r0,1
add r0,r0,31
j_s.d [blink]
add.pl r0,r0,1
ENDFUNC(__clzsi2)
#else
FUNC(__clzsi2)
asl.f 0,r0,2
mov r1,-1
.Lcheck:
bbit1.d r0,31,.Ldone
asl.pl r0,r0,3
bcs.d .Ldone_1
add_s r1,r1,3
bpnz.d .Lcheck
asl.f 0,r0,2
mov_s r0,32
j_s.d [blink]
mov.ne r0,r1
.Ldone:
j_s.d [blink]
add_s r0,r1,1
.Ldone_1:
j_s.d [blink]
sub_s r0,r1,1
ENDFUNC(__clzsi2)
#endif
#endif /* L_clzsi2 */
.section .text
;;; MILLICODE THUNK LIB ;***************
;;; .macro push_regs from, to, offset
;;; st_s "\from", [sp, \offset]
;;; .if \to-\from
;;; push_regs "(\from+1)", \to, "(\offset+4)"
;;; .endif
;;; .endm
;;; push_regs 13, 18, 0
;;;
;;;; .macro sum from, to, three
;;;; .long \from
;;;; .long \three
;;;; .local regno
;;;; .set regno, \from+1
;;;; .set shift, 32
;;;; .set shift, shift - 1
;;;; # st_s %shift @3 lsl #shift
;;;; .if \to-\from
;;;; sum "(\from+1)", \to, "(\three)"
;;;; .endif
;;;; .endm
;;;;
;;;; SUM 0,5, 9
;;;;
; .altmacro
;; .macro push_regs from=0, to=3, offset
;; st_s r\from, [sp, \offset]
;; .if \to-\from
;; push_regs "\from+1 ",\to,"(\offset+4)"
;; .endif
;; .endm
;;
;; .macro expand_to_push from=13, to
;; ; .section .text
;; ; .align 4
;; ; .global st_
;; ; .type foo,
;; st_13_to_25:
;; ; push_regs \from, \to, 0
;; push_regs 0,3 ;
;; .endm
;;
;; expand_to_push 13,18
;;
;#endif
#ifndef __ARC_RF16__
#ifdef L_millicodethunk_st
.section .text
.align 4
.global SYM(__st_r13_to_r15)
.global SYM(__st_r13_to_r16)
.global SYM(__st_r13_to_r17)
.global SYM(__st_r13_to_r18)
.global SYM(__st_r13_to_r19)
.global SYM(__st_r13_to_r20)
.global SYM(__st_r13_to_r21)
.global SYM(__st_r13_to_r22)
.global SYM(__st_r13_to_r23)
.global SYM(__st_r13_to_r24)
.global SYM(__st_r13_to_r25)
HIDDEN_FUNC(__st_r13_to_r15)
HIDDEN_FUNC(__st_r13_to_r16)
HIDDEN_FUNC(__st_r13_to_r17)
HIDDEN_FUNC(__st_r13_to_r18)
HIDDEN_FUNC(__st_r13_to_r19)
HIDDEN_FUNC(__st_r13_to_r20)
HIDDEN_FUNC(__st_r13_to_r21)
HIDDEN_FUNC(__st_r13_to_r22)
HIDDEN_FUNC(__st_r13_to_r23)
HIDDEN_FUNC(__st_r13_to_r24)
HIDDEN_FUNC(__st_r13_to_r25)
.align 4
SYM(__st_r13_to_r25):
st r25, [sp,48]
SYM(__st_r13_to_r24):
st r24, [sp,44]
SYM(__st_r13_to_r23):
st r23, [sp,40]
SYM(__st_r13_to_r22):
st r22, [sp,36]
SYM(__st_r13_to_r21):
st r21, [sp,32]
SYM(__st_r13_to_r20):
st r20, [sp,28]
SYM(__st_r13_to_r19):
st r19, [sp,24]
SYM(__st_r13_to_r18):
st r18, [sp,20]
SYM(__st_r13_to_r17):
st r17, [sp,16]
SYM(__st_r13_to_r16):
st r16, [sp,12]
SYM(__st_r13_to_r15):
#ifdef __ARC700__
st r15, [sp,8] ; minimum function size to avoid stall: 6 bytes.
#else
st_s r15, [sp,8]
#endif
st_s r14, [sp,4]
j_s.d [%blink]
st_s r13, [sp,0]
ENDFUNC(__st_r13_to_r15)
ENDFUNC(__st_r13_to_r16)
ENDFUNC(__st_r13_to_r17)
ENDFUNC(__st_r13_to_r18)
ENDFUNC(__st_r13_to_r19)
ENDFUNC(__st_r13_to_r20)
ENDFUNC(__st_r13_to_r21)
ENDFUNC(__st_r13_to_r22)
ENDFUNC(__st_r13_to_r23)
ENDFUNC(__st_r13_to_r24)
ENDFUNC(__st_r13_to_r25)
#endif /* L_millicodethunk_st */
#ifdef L_millicodethunk_ld
.section .text
.align 4
; ==================================
; the loads
.global SYM(__ld_r13_to_r15)
.global SYM(__ld_r13_to_r16)
.global SYM(__ld_r13_to_r17)
.global SYM(__ld_r13_to_r18)
.global SYM(__ld_r13_to_r19)
.global SYM(__ld_r13_to_r20)
.global SYM(__ld_r13_to_r21)
.global SYM(__ld_r13_to_r22)
.global SYM(__ld_r13_to_r23)
.global SYM(__ld_r13_to_r24)
.global SYM(__ld_r13_to_r25)
HIDDEN_FUNC(__ld_r13_to_r15)
HIDDEN_FUNC(__ld_r13_to_r16)
HIDDEN_FUNC(__ld_r13_to_r17)
HIDDEN_FUNC(__ld_r13_to_r18)
HIDDEN_FUNC(__ld_r13_to_r19)
HIDDEN_FUNC(__ld_r13_to_r20)
HIDDEN_FUNC(__ld_r13_to_r21)
HIDDEN_FUNC(__ld_r13_to_r22)
HIDDEN_FUNC(__ld_r13_to_r23)
HIDDEN_FUNC(__ld_r13_to_r24)
HIDDEN_FUNC(__ld_r13_to_r25)
SYM(__ld_r13_to_r25):
ld r25, [sp,48]
SYM(__ld_r13_to_r24):
ld r24, [sp,44]
SYM(__ld_r13_to_r23):
ld r23, [sp,40]
SYM(__ld_r13_to_r22):
ld r22, [sp,36]
SYM(__ld_r13_to_r21):
ld r21, [sp,32]
SYM(__ld_r13_to_r20):
ld r20, [sp,28]
SYM(__ld_r13_to_r19):
ld r19, [sp,24]
SYM(__ld_r13_to_r18):
ld r18, [sp,20]
SYM(__ld_r13_to_r17):
ld r17, [sp,16]
SYM(__ld_r13_to_r16):
ld r16, [sp,12]
SYM(__ld_r13_to_r15):
#ifdef __ARC700__
ld r15, [sp,8] ; minimum function size to avoid stall: 6 bytes.
#else
ld_s r15, [sp,8]
#endif
ld_s r14, [sp,4]
j_s.d [%blink]
ld_s r13, [sp,0]
ENDFUNC(__ld_r13_to_r15)
ENDFUNC(__ld_r13_to_r16)
ENDFUNC(__ld_r13_to_r17)
ENDFUNC(__ld_r13_to_r18)
ENDFUNC(__ld_r13_to_r19)
ENDFUNC(__ld_r13_to_r20)
ENDFUNC(__ld_r13_to_r21)
ENDFUNC(__ld_r13_to_r22)
ENDFUNC(__ld_r13_to_r23)
ENDFUNC(__ld_r13_to_r24)
ENDFUNC(__ld_r13_to_r25)
#endif /* L_millicodethunk_ld */
#ifdef L_millicodethunk_ret
.global SYM(__ld_r13_to_r14_ret)
.global SYM(__ld_r13_to_r15_ret)
.global SYM(__ld_r13_to_r16_ret)
.global SYM(__ld_r13_to_r17_ret)
.global SYM(__ld_r13_to_r18_ret)
.global SYM(__ld_r13_to_r19_ret)
.global SYM(__ld_r13_to_r20_ret)
.global SYM(__ld_r13_to_r21_ret)
.global SYM(__ld_r13_to_r22_ret)
.global SYM(__ld_r13_to_r23_ret)
.global SYM(__ld_r13_to_r24_ret)
.global SYM(__ld_r13_to_r25_ret)
HIDDEN_FUNC(__ld_r13_to_r14_ret)
HIDDEN_FUNC(__ld_r13_to_r15_ret)
HIDDEN_FUNC(__ld_r13_to_r16_ret)
HIDDEN_FUNC(__ld_r13_to_r17_ret)
HIDDEN_FUNC(__ld_r13_to_r18_ret)
HIDDEN_FUNC(__ld_r13_to_r19_ret)
HIDDEN_FUNC(__ld_r13_to_r20_ret)
HIDDEN_FUNC(__ld_r13_to_r21_ret)
HIDDEN_FUNC(__ld_r13_to_r22_ret)
HIDDEN_FUNC(__ld_r13_to_r23_ret)
HIDDEN_FUNC(__ld_r13_to_r24_ret)
HIDDEN_FUNC(__ld_r13_to_r25_ret)
.section .text
.align 4
SYM(__ld_r13_to_r25_ret):
ld r25, [sp,48]
SYM(__ld_r13_to_r24_ret):
ld r24, [sp,44]
SYM(__ld_r13_to_r23_ret):
ld r23, [sp,40]
SYM(__ld_r13_to_r22_ret):
ld r22, [sp,36]
SYM(__ld_r13_to_r21_ret):
ld r21, [sp,32]
SYM(__ld_r13_to_r20_ret):
ld r20, [sp,28]
SYM(__ld_r13_to_r19_ret):
ld r19, [sp,24]
SYM(__ld_r13_to_r18_ret):
ld r18, [sp,20]
SYM(__ld_r13_to_r17_ret):
ld r17, [sp,16]
SYM(__ld_r13_to_r16_ret):
ld r16, [sp,12]
SYM(__ld_r13_to_r15_ret):
ld r15, [sp,8]
SYM(__ld_r13_to_r14_ret):
ld blink,[sp,r12]
ld_s r14, [sp,4]
ld.ab r13, [sp,r12]
j_s.d [%blink]
add_s sp,sp,4
ENDFUNC(__ld_r13_to_r14_ret)
ENDFUNC(__ld_r13_to_r15_ret)
ENDFUNC(__ld_r13_to_r16_ret)
ENDFUNC(__ld_r13_to_r17_ret)
ENDFUNC(__ld_r13_to_r18_ret)
ENDFUNC(__ld_r13_to_r19_ret)
ENDFUNC(__ld_r13_to_r20_ret)
ENDFUNC(__ld_r13_to_r21_ret)
ENDFUNC(__ld_r13_to_r22_ret)
ENDFUNC(__ld_r13_to_r23_ret)
ENDFUNC(__ld_r13_to_r24_ret)
ENDFUNC(__ld_r13_to_r25_ret)
#endif /* L_millicodethunk_ret */
#if defined (__ARC700__) || defined (__ARC_FPX_QUARK__)
#ifdef L_adddf3
#ifdef __ARC_NORM__
#include "ieee-754/adddf3.S"
#endif
#endif
#ifdef L_muldf3
#ifdef __ARC_MPY__
#include "ieee-754/muldf3.S"
#elif defined (__ARC_NORM__) && defined(__ARC_MUL64__)
#include "ieee-754/arc600-mul64/muldf3.S"
#elif defined (__ARC_NORM__) && defined(__ARC_MUL32BY16__)
#include "ieee-754/arc600-dsp/muldf3.S"
#endif
#endif
#ifdef L_addsf3
#ifdef __ARC_NORM__
#include "ieee-754/addsf3.S"
#endif
#endif
#ifdef L_mulsf3
#ifdef __ARC_MPY__
#include "ieee-754/mulsf3.S"
#elif defined (__ARC_NORM__) && defined(__ARC_MUL64__)
#include "ieee-754/arc600-mul64/mulsf3.S"
#elif defined (__ARC_NORM__) && defined(__ARC_MUL32BY16__)
#include "ieee-754/arc600-dsp/mulsf3.S"
#elif defined (__ARC_NORM__)
#include "ieee-754/arc600/mulsf3.S"
#endif
#endif
#ifdef L_divdf3
#ifdef __ARC_MPY__
#include "ieee-754/divdf3.S"
#elif defined (__ARC_NORM__) && defined(__ARC_MUL64__)
#include "ieee-754/arc600-mul64/divdf3.S"
#elif defined (__ARC_NORM__) && defined(__ARC_MUL32BY16__)
#include "ieee-754/arc600-dsp/divdf3.S"
#endif
#endif
#ifdef L_divsf3
#ifdef __ARC_MPY__
#include "ieee-754/divsf3-stdmul.S"
#elif defined (__ARC_NORM__) && defined(__ARC_MUL64__)
#include "ieee-754/arc600-mul64/divsf3.S"
#elif defined (__ARC_NORM__) && defined(__ARC_MUL32BY16__)
#include "ieee-754/arc600-dsp/divsf3.S"
#elif defined (__ARC_NORM__)
#include "ieee-754/arc600/divsf3.S"
#endif
#endif
#ifdef L_extendsfdf2
#ifdef __ARC_NORM__
#include "ieee-754/extendsfdf2.S"
#endif
#endif
#ifdef L_truncdfsf2
#ifdef __ARC_NORM__
#include "ieee-754/truncdfsf2.S"
#endif
#endif
#ifdef L_floatsidf
#ifdef __ARC_NORM__
#include "ieee-754/floatsidf.S"
#endif
#endif
#ifdef L_floatsisf
#ifdef __ARC_NORM__
#include "ieee-754/floatsisf.S"
#endif
#endif
#ifdef L_floatunsidf
#ifdef __ARC_NORM__
#include "ieee-754/floatunsidf.S"
#endif
#endif
#ifdef L_fixdfsi
#ifdef __ARC_NORM__
#include "ieee-754/fixdfsi.S"
#endif
#endif
#ifdef L_fixsfsi
#ifdef __ARC_NORM__
#include "ieee-754/fixsfsi.S"
#endif
#endif
#ifdef L_fixunsdfsi
#ifdef __ARC_NORM__
#include "ieee-754/fixunsdfsi.S"
#endif
#endif
#ifdef L_eqdf2
#ifdef __ARC_NORM__
#include "ieee-754/eqdf2.S"
#endif
#endif
#ifdef L_eqsf2
#ifdef __ARC_NORM__
#include "ieee-754/eqsf2.S"
#endif
#endif
#ifdef L_gtdf2
#ifdef __ARC_NORM__
#include "ieee-754/gtdf2.S"
#endif
#endif
#ifdef L_gtsf2
#ifdef __ARC_NORM__
#include "ieee-754/gtsf2.S"
#endif
#endif
#ifdef L_gedf2
#ifdef __ARC_NORM__
#include "ieee-754/gedf2.S"
#endif
#endif
#ifdef L_gesf2
#ifdef __ARC_NORM__
#include "ieee-754/gesf2.S"
#endif
#endif
#ifdef L_uneqdf2
#ifdef __ARC_NORM__
#include "ieee-754/uneqdf2.S"
#endif
#endif
#ifdef L_uneqsf2
#ifdef __ARC_NORM__
#include "ieee-754/uneqsf2.S"
#endif
#endif
#ifdef L_orddf2
#ifdef __ARC_NORM__
#include "ieee-754/orddf2.S"
#endif
#endif
#ifdef L_ordsf2
#ifdef __ARC_NORM__
#include "ieee-754/ordsf2.S"
#endif
#endif
#endif /* ARC_OPTFPE */
#endif /* !__ARC_RF16__ */
|
4ms/metamodule-plugin-sdk
| 11,199
|
plugin-libc/libgcc/config/riscv/save-restore.S
|
/* Callee-saved register spill and fill routines for RISC-V.
Copyright (C) 2016-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "riscv-asm.h"
.text
#if __riscv_xlen == 64
FUNC_BEGIN (__riscv_save_12)
.cfi_startproc
# __riscv_save_* routine use t0/x5 as return address
.cfi_return_column 5
addi sp, sp, -112
.cfi_def_cfa_offset 112
li t1, 0
sd s11, 8(sp)
.cfi_offset 27, -104
j .Ls10
FUNC_BEGIN (__riscv_save_11)
FUNC_BEGIN (__riscv_save_10)
.cfi_restore 27
addi sp, sp, -112
.cfi_def_cfa_offset 112
li t1, 1
.Ls10:
sd s10, 16(sp)
.cfi_offset 26, -96
sd s9, 24(sp)
.cfi_offset 25, -88
j .Ls8
FUNC_BEGIN (__riscv_save_9)
FUNC_BEGIN (__riscv_save_8)
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
addi sp, sp, -112
.cfi_def_cfa_offset 112
li t1, 2
.Ls8:
sd s8, 32(sp)
.cfi_offset 24, -80
sd s7, 40(sp)
.cfi_offset 23, -72
j .Ls6
FUNC_BEGIN (__riscv_save_7)
FUNC_BEGIN (__riscv_save_6)
.cfi_restore 23
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
addi sp, sp, -112
.cfi_def_cfa_offset 112
li t1, 3
.Ls6:
sd s6, 48(sp)
.cfi_offset 22, -64
sd s5, 56(sp)
.cfi_offset 21, -56
j .Ls4
FUNC_BEGIN (__riscv_save_5)
FUNC_BEGIN (__riscv_save_4)
.cfi_restore 21
.cfi_restore 22
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
addi sp, sp, -112
.cfi_def_cfa_offset 112
li t1, 4
.Ls4:
sd s4, 64(sp)
.cfi_offset 20, -48
sd s3, 72(sp)
.cfi_offset 19, -40
j .Ls2
FUNC_BEGIN (__riscv_save_3)
FUNC_BEGIN (__riscv_save_2)
.cfi_restore 19
.cfi_restore 20
.cfi_restore 21
.cfi_restore 22
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
addi sp, sp, -112
.cfi_def_cfa_offset 112
li t1, 5
.Ls2:
sd s2, 80(sp)
.cfi_offset 18, -32
sd s1, 88(sp)
.cfi_offset 9, -24
sd s0, 96(sp)
.cfi_offset 8, -16
sd ra, 104(sp)
.cfi_offset 1, -8
slli t1, t1, 4
# CFA info is not correct in next 2 instruction since t1's
# value is depend on how may register really save.
add sp, sp, t1
jr t0
.cfi_endproc
FUNC_END (__riscv_save_12)
FUNC_END (__riscv_save_11)
FUNC_END (__riscv_save_10)
FUNC_END (__riscv_save_9)
FUNC_END (__riscv_save_8)
FUNC_END (__riscv_save_7)
FUNC_END (__riscv_save_6)
FUNC_END (__riscv_save_5)
FUNC_END (__riscv_save_4)
FUNC_END (__riscv_save_3)
FUNC_END (__riscv_save_2)
FUNC_BEGIN (__riscv_save_1)
FUNC_BEGIN (__riscv_save_0)
.cfi_startproc
# __riscv_save_* routine use t0/x5 as return address
.cfi_return_column 5
addi sp, sp, -16
.cfi_def_cfa_offset 16
sd s0, 0(sp)
.cfi_offset 8, -16
sd ra, 8(sp)
.cfi_offset 1, -8
jr t0
.cfi_endproc
FUNC_END (__riscv_save_1)
FUNC_END (__riscv_save_0)
FUNC_BEGIN (__riscv_restore_12)
.cfi_startproc
.cfi_def_cfa_offset 112
.cfi_offset 27, -104
.cfi_offset 26, -96
.cfi_offset 25, -88
.cfi_offset 24, -80
.cfi_offset 23, -72
.cfi_offset 22, -64
.cfi_offset 21, -56
.cfi_offset 20, -48
.cfi_offset 19, -40
.cfi_offset 18, -32
.cfi_offset 9, -24
.cfi_offset 8, -16
.cfi_offset 1, -8
ld s11, 8(sp)
.cfi_restore 27
addi sp, sp, 16
FUNC_BEGIN (__riscv_restore_11)
FUNC_BEGIN (__riscv_restore_10)
.cfi_restore 27
.cfi_def_cfa_offset 96
ld s10, 0(sp)
.cfi_restore 26
ld s9, 8(sp)
.cfi_restore 25
addi sp, sp, 16
FUNC_BEGIN (__riscv_restore_9)
FUNC_BEGIN (__riscv_restore_8)
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
.cfi_def_cfa_offset 80
ld s8, 0(sp)
.cfi_restore 24
ld s7, 8(sp)
.cfi_restore 23
addi sp, sp, 16
FUNC_BEGIN (__riscv_restore_7)
FUNC_BEGIN (__riscv_restore_6)
.cfi_restore 23
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
.cfi_def_cfa_offset 64
ld s6, 0(sp)
.cfi_restore 22
ld s5, 8(sp)
.cfi_restore 21
addi sp, sp, 16
FUNC_BEGIN (__riscv_restore_5)
FUNC_BEGIN (__riscv_restore_4)
.cfi_restore 21
.cfi_restore 22
.cfi_restore 23
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
.cfi_def_cfa_offset 48
ld s4, 0(sp)
.cfi_restore 20
ld s3, 8(sp)
.cfi_restore 19
addi sp, sp, 16
FUNC_BEGIN (__riscv_restore_3)
FUNC_BEGIN (__riscv_restore_2)
.cfi_restore 19
.cfi_restore 20
.cfi_restore 21
.cfi_restore 22
.cfi_restore 23
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
.cfi_def_cfa_offset 32
ld s2, 0(sp)
.cfi_restore 18
ld s1, 8(sp)
.cfi_restore 9
addi sp, sp, 16
FUNC_BEGIN (__riscv_restore_1)
FUNC_BEGIN (__riscv_restore_0)
.cfi_restore 9
.cfi_restore 18
.cfi_restore 19
.cfi_restore 20
.cfi_restore 21
.cfi_restore 22
.cfi_restore 23
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
.cfi_def_cfa_offset 16
ld s0, 0(sp)
.cfi_restore 8
ld ra, 8(sp)
.cfi_restore 1
addi sp, sp, 16
.cfi_def_cfa_offset 0
ret
.cfi_endproc
FUNC_END (__riscv_restore_12)
FUNC_END (__riscv_restore_11)
FUNC_END (__riscv_restore_10)
FUNC_END (__riscv_restore_9)
FUNC_END (__riscv_restore_8)
FUNC_END (__riscv_restore_7)
FUNC_END (__riscv_restore_6)
FUNC_END (__riscv_restore_5)
FUNC_END (__riscv_restore_4)
FUNC_END (__riscv_restore_3)
FUNC_END (__riscv_restore_2)
FUNC_END (__riscv_restore_1)
FUNC_END (__riscv_restore_0)
#else
#ifdef __riscv_32e
FUNC_BEGIN(__riscv_save_2)
FUNC_BEGIN(__riscv_save_1)
FUNC_BEGIN(__riscv_save_0)
.cfi_startproc
# __riscv_save_* routine use t0/x5 as return address
.cfi_return_column 5
addi sp, sp, -12
.cfi_def_cfa_offset 12
sw s1, 0(sp)
.cfi_offset 9, -12
sw s0, 4(sp)
.cfi_offset 8, -8
sw ra, 8(sp)
.cfi_offset 1, 0
jr t0
.cfi_endproc
FUNC_END(__riscv_save_2)
FUNC_END(__riscv_save_1)
FUNC_END(__riscv_save_0)
FUNC_BEGIN(__riscv_restore_2)
FUNC_BEGIN(__riscv_restore_1)
FUNC_BEGIN(__riscv_restore_0)
.cfi_startproc
.cfi_def_cfa_offset 14
lw s1, 0(sp)
.cfi_restore 9
lw s0, 4(sp)
.cfi_restore 8
lw ra, 8(sp)
.cfi_restore 1
addi sp, sp, 12
.cfi_def_cfa_offset 0
ret
.cfi_endproc
FUNC_END(__riscv_restore_2)
FUNC_END(__riscv_restore_1)
FUNC_END(__riscv_restore_0)
#else
FUNC_BEGIN (__riscv_save_12)
.cfi_startproc
# __riscv_save_* routine use t0/x5 as return address
.cfi_return_column 5
addi sp, sp, -64
.cfi_def_cfa_offset 64
li t1, 0
sw s11, 12(sp)
.cfi_offset 27, -52
j .Ls10
FUNC_BEGIN (__riscv_save_11)
FUNC_BEGIN (__riscv_save_10)
FUNC_BEGIN (__riscv_save_9)
FUNC_BEGIN (__riscv_save_8)
.cfi_restore 27
addi sp, sp, -64
.cfi_def_cfa_offset 64
li t1, -16
.Ls10:
sw s10, 16(sp)
.cfi_offset 26, -48
sw s9, 20(sp)
.cfi_offset 25, -44
sw s8, 24(sp)
.cfi_offset 24, -40
sw s7, 28(sp)
.cfi_offset 23, -36
j .Ls6
FUNC_BEGIN (__riscv_save_7)
FUNC_BEGIN (__riscv_save_6)
FUNC_BEGIN (__riscv_save_5)
FUNC_BEGIN (__riscv_save_4)
.cfi_restore 23
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
addi sp, sp, -64
.cfi_def_cfa_offset 64
li t1, -32
.Ls6:
sw s6, 32(sp)
.cfi_offset 22, -32
sw s5, 36(sp)
.cfi_offset 21, -28
sw s4, 40(sp)
.cfi_offset 20, -24
sw s3, 44(sp)
.cfi_offset 19, -20
sw s2, 48(sp)
.cfi_offset 18, -16
sw s1, 52(sp)
.cfi_offset 9, -12
sw s0, 56(sp)
.cfi_offset 8, -8
sw ra, 60(sp)
.cfi_offset 1, -4
# CFA info is not correct in next 2 instruction since t1's
# value is depend on how may register really save.
sub sp, sp, t1
jr t0
.cfi_endproc
FUNC_END (__riscv_save_12)
FUNC_END (__riscv_save_11)
FUNC_END (__riscv_save_10)
FUNC_END (__riscv_save_9)
FUNC_END (__riscv_save_8)
FUNC_END (__riscv_save_7)
FUNC_END (__riscv_save_6)
FUNC_END (__riscv_save_5)
FUNC_END (__riscv_save_4)
FUNC_BEGIN (__riscv_save_3)
FUNC_BEGIN (__riscv_save_2)
FUNC_BEGIN (__riscv_save_1)
FUNC_BEGIN (__riscv_save_0)
.cfi_startproc
# __riscv_save_* routine use t0/x5 as return address
.cfi_return_column 5
addi sp, sp, -16
.cfi_def_cfa_offset 16
sw s2, 0(sp)
sw s1, 4(sp)
.cfi_offset 9, -16
sw s0, 8(sp)
.cfi_offset 8, -8
sw ra, 12(sp)
.cfi_offset 1, -4
jr t0
.cfi_endproc
FUNC_END (__riscv_save_3)
FUNC_END (__riscv_save_2)
FUNC_END (__riscv_save_1)
FUNC_END (__riscv_save_0)
FUNC_BEGIN (__riscv_restore_12)
.cfi_startproc
.cfi_def_cfa_offset 64
.cfi_offset 27, -52
.cfi_offset 26, -48
.cfi_offset 25, -44
.cfi_offset 24, -40
.cfi_offset 23, -36
.cfi_offset 22, -32
.cfi_offset 21, -28
.cfi_offset 20, -24
.cfi_offset 19, -20
.cfi_offset 18, -16
.cfi_offset 9, -12
.cfi_offset 8, -8
.cfi_offset 1, -4
lw s11, 12(sp)
.cfi_restore 27
addi sp, sp, 16
FUNC_BEGIN (__riscv_restore_11)
FUNC_BEGIN (__riscv_restore_10)
FUNC_BEGIN (__riscv_restore_9)
FUNC_BEGIN (__riscv_restore_8)
.cfi_restore 27
.cfi_def_cfa_offset 48
lw s10, 0(sp)
.cfi_restore 26
lw s9, 4(sp)
.cfi_restore 25
lw s8, 8(sp)
.cfi_restore 24
lw s7, 12(sp)
.cfi_restore 23
addi sp, sp, 16
FUNC_BEGIN (__riscv_restore_7)
FUNC_BEGIN (__riscv_restore_6)
FUNC_BEGIN (__riscv_restore_5)
FUNC_BEGIN (__riscv_restore_4)
.cfi_restore 23
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
.cfi_def_cfa_offset 32
lw s6, 0(sp)
.cfi_restore 22
lw s5, 4(sp)
.cfi_restore 21
lw s4, 8(sp)
.cfi_restore 20
lw s3, 12(sp)
.cfi_restore 19
addi sp, sp, 16
FUNC_BEGIN (__riscv_restore_3)
FUNC_BEGIN (__riscv_restore_2)
FUNC_BEGIN (__riscv_restore_1)
FUNC_BEGIN (__riscv_restore_0)
.cfi_restore 19
.cfi_restore 20
.cfi_restore 21
.cfi_restore 22
.cfi_restore 24
.cfi_restore 25
.cfi_restore 26
.cfi_restore 27
.cfi_def_cfa_offset 16
lw s2, 0(sp)
.cfi_restore 18
lw s1, 4(sp)
.cfi_restore 9
lw s0, 8(sp)
.cfi_restore 8
lw ra, 12(sp)
.cfi_restore 1
addi sp, sp, 16
.cfi_def_cfa_offset 0
ret
.cfi_endproc
FUNC_END (__riscv_restore_12)
FUNC_END (__riscv_restore_11)
FUNC_END (__riscv_restore_10)
FUNC_END (__riscv_restore_9)
FUNC_END (__riscv_restore_8)
FUNC_END (__riscv_restore_7)
FUNC_END (__riscv_restore_6)
FUNC_END (__riscv_restore_5)
FUNC_END (__riscv_restore_4)
FUNC_END (__riscv_restore_3)
FUNC_END (__riscv_restore_2)
FUNC_END (__riscv_restore_1)
FUNC_END (__riscv_restore_0)
#endif /* __riscv_32e */
#endif /* __riscv_xlen == 64 */
|
4ms/metamodule-plugin-sdk
| 1,379
|
plugin-libc/libgcc/config/riscv/muldi3.S
|
/* Integer multiplication routines for RISC-V.
Copyright (C) 2016-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "riscv-asm.h"
.text
.align 2
#if __riscv_xlen == 32
/* Our RV64 64-bit routine is equivalent to our RV32 32-bit routine. */
# define __muldi3 __mulsi3
#endif
FUNC_BEGIN (__muldi3)
mv a2, a0
li a0, 0
.L1:
andi a3, a1, 1
beqz a3, .L2
add a0, a0, a2
.L2:
srli a1, a1, 1
slli a2, a2, 1
bnez a1, .L1
ret
FUNC_END (__muldi3)
|
4ms/metamodule-plugin-sdk
| 3,706
|
plugin-libc/libgcc/config/riscv/div.S
|
/* Integer division routines for RISC-V.
Copyright (C) 2016-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "riscv-asm.h"
.text
.align 2
#if __riscv_xlen == 32
/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
# define __udivdi3 __udivsi3
# define __umoddi3 __umodsi3
# define __divdi3 __divsi3
# define __moddi3 __modsi3
#else
FUNC_BEGIN (__udivsi3)
/* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t. */
sll a0, a0, 32
sll a1, a1, 32
move t0, ra
jal HIDDEN_JUMPTARGET(__udivdi3)
sext.w a0, a0
jr t0
FUNC_END (__udivsi3)
FUNC_BEGIN (__umodsi3)
/* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t. */
sll a0, a0, 32
sll a1, a1, 32
srl a0, a0, 32
srl a1, a1, 32
move t0, ra
jal HIDDEN_JUMPTARGET(__udivdi3)
sext.w a0, a1
jr t0
FUNC_END (__umodsi3)
FUNC_ALIAS (__modsi3, __moddi3)
FUNC_BEGIN( __divsi3)
/* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3. */
li t0, -1
beq a1, t0, .L20
#endif
FUNC_BEGIN (__divdi3)
bltz a0, .L10
bltz a1, .L11
/* Since the quotient is positive, fall into __udivdi3. */
FUNC_BEGIN (__udivdi3)
mv a2, a1
mv a1, a0
li a0, -1
beqz a2, .L5
li a3, 1
bgeu a2, a1, .L2
.L1:
blez a2, .L2
slli a2, a2, 1
slli a3, a3, 1
bgtu a1, a2, .L1
.L2:
li a0, 0
.L3:
bltu a1, a2, .L4
sub a1, a1, a2
or a0, a0, a3
.L4:
srli a3, a3, 1
srli a2, a2, 1
bnez a3, .L3
.L5:
ret
FUNC_END (__udivdi3)
HIDDEN_DEF (__udivdi3)
FUNC_BEGIN (__umoddi3)
/* Call __udivdi3(a0, a1), then return the remainder, which is in a1. */
move t0, ra
jal HIDDEN_JUMPTARGET(__udivdi3)
move a0, a1
jr t0
FUNC_END (__umoddi3)
/* Handle negative arguments to __divdi3. */
.L10:
neg a0, a0
/* Zero is handled as a negative so that the result will not be inverted. */
bgtz a1, .L12 /* Compute __udivdi3(-a0, a1), then negate the result. */
neg a1, a1
j HIDDEN_JUMPTARGET(__udivdi3) /* Compute __udivdi3(-a0, -a1). */
.L11: /* Compute __udivdi3(a0, -a1), then negate the result. */
neg a1, a1
.L12:
move t0, ra
jal HIDDEN_JUMPTARGET(__udivdi3)
neg a0, a0
jr t0
FUNC_END (__divdi3)
FUNC_BEGIN (__moddi3)
move t0, ra
bltz a1, .L31
bltz a0, .L32
.L30:
jal HIDDEN_JUMPTARGET(__udivdi3) /* The dividend is not negative. */
move a0, a1
jr t0
.L31:
neg a1, a1
bgez a0, .L30
.L32:
neg a0, a0
jal HIDDEN_JUMPTARGET(__udivdi3) /* The dividend is hella negative. */
neg a0, a1
jr t0
FUNC_END (__moddi3)
#if __riscv_xlen == 64
/* continuation of __divsi3 */
.L20:
sll t0, t0, 31
bne a0, t0, __divdi3
ret
FUNC_END (__divsi3)
#endif
|
4ms/metamodule-plugin-sdk
| 1,296
|
plugin-libc/libgcc/config/sh/crtn.S
|
/* Copyright (C) 2000-2022 Free Software Foundation, Inc.
This file was adapted from glibc sources.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* See an explanation about .init and .fini in crti.S. */
.section .init
mov r14,r15
lds.l @r15+,pr
mov.l @r15+,r14
rts
#ifdef __ELF__
mov.l @r15+,r12
#else
nop
#endif
.section .fini
mov r14,r15
lds.l @r15+,pr
mov.l @r15+,r14
rts
#ifdef __ELF__
mov.l @r15+,r12
#else
nop
#endif
|
4ms/metamodule-plugin-sdk
| 15,380
|
plugin-libc/libgcc/config/sh/lib1funcs-4-300.S
|
/* Copyright (C) 2004-2022 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* libgcc routines for the STMicroelectronics ST40-300 CPU.
Contributed by J"orn Rennecke joern.rennecke@st.com. */
#include "lib1funcs.h"
#ifdef L_div_table
#if defined (__SH3__) || defined (__SH3E__) || defined (__SH4__) || defined (__SH4_SINGLE__) || defined (__SH4_SINGLE_ONLY__) || defined (__SH4_NOFPU__)
/* This code used shld, thus is not suitable for SH1 / SH2. */
/* Signed / unsigned division without use of FPU, optimized for SH4-300.
Uses a lookup table for divisors in the range -128 .. +127, and
div1 with case distinction for larger divisors in three more ranges.
The code is lumped together with the table to allow the use of mova. */
#ifdef __LITTLE_ENDIAN__
#define L_LSB 0
#define L_LSWMSB 1
#define L_MSWLSB 2
#else
#define L_LSB 3
#define L_LSWMSB 2
#define L_MSWLSB 1
#endif
.global GLOBAL(udivsi3_i4i)
.global GLOBAL(sdivsi3_i4i)
FUNC(GLOBAL(udivsi3_i4i))
FUNC(GLOBAL(sdivsi3_i4i))
.balign 4
LOCAL(div_ge8m): ! 10 cycles up to here
rotcr r1 ! signed shift must use original sign from r4
div0s r5,r4
mov #24,r7
shld r7,r6
shad r0,r1
rotcl r6
div1 r5,r1
swap.w r5,r0 ! detect -0x80000000 : 0x800000
rotcl r6
swap.w r4,r7
div1 r5,r1
swap.b r7,r7
rotcl r6
or r7,r0
div1 r5,r1
swap.w r0,r7
rotcl r6
or r7,r0
div1 r5,r1
add #-0x80,r0
rotcl r6
extu.w r0,r0
div1 r5,r1
neg r0,r0
rotcl r6
swap.w r0,r0
div1 r5,r1
mov.l @r15+,r7
and r6,r0
rotcl r6
div1 r5,r1
shll2 r0
rotcl r6
exts.b r0,r0
div1 r5,r1
swap.w r0,r0
exts.w r0,r1
exts.b r6,r0
mov.l @r15+,r6
rotcl r0
rts
sub r1,r0
! 31 cycles up to here
.balign 4
LOCAL(udiv_ge64k): ! 3 cycles up to here
mov r4,r0
shlr8 r0
div0u
cmp/hi r0,r5
bt LOCAL(udiv_r8)
mov.l r5,@-r15
shll8 r5
! 7 cycles up to here
.rept 8
div1 r5,r0
.endr
extu.b r4,r1 ! 15 cycles up to here
extu.b r0,r6
xor r1,r0
xor r6,r0
swap.b r6,r6
.rept 8
div1 r5,r0
.endr ! 25 cycles up to here
extu.b r0,r0
mov.l @r15+,r5
or r6,r0
mov.l @r15+,r6
rts
rotcl r0 ! 28 cycles up to here
.balign 4
LOCAL(udiv_r8): ! 6 cycles up to here
mov.l r4,@-r15
shll16 r4
shll8 r4
!
shll r4
mov r0,r1
div1 r5,r1
mov r4,r0
rotcl r0
mov.l @r15+,r4
div1 r5,r1
! 12 cycles up to here
.rept 6
rotcl r0; div1 r5,r1
.endr
mov.l @r15+,r6 ! 24 cycles up to here
rts
rotcl r0
.balign 4
LOCAL(div_ge32k): ! 6 cycles up to here
mov.l r7,@-r15
swap.w r5,r6
exts.b r6,r7
exts.w r6,r6
cmp/eq r6,r7
extu.b r1,r6
bf/s LOCAL(div_ge8m)
cmp/hi r1,r4 ! copy sign bit of r4 into T
rotcr r1 ! signed shift must use original sign from r4
div0s r5,r4
shad r0,r1
shll8 r5
div1 r5,r1
mov r5,r7 ! detect r4 == 0x80000000 && r5 == 0x8000(00)
div1 r5,r1
shlr8 r7
div1 r5,r1
swap.w r4,r0
div1 r5,r1
swap.b r0,r0
div1 r5,r1
or r0,r7
div1 r5,r1
add #-80,r7
div1 r5,r1
swap.w r7,r0
div1 r5,r1
or r0,r7
extu.b r1,r0
xor r6,r1
xor r0,r1
exts.b r0,r0
div1 r5,r1
extu.w r7,r7
div1 r5,r1
neg r7,r7 ! upper 16 bit of r7 == 0 if r4 == 0x80000000 && r5 == 0x8000
div1 r5,r1
and r0,r7
div1 r5,r1
swap.w r7,r7 ! 26 cycles up to here.
div1 r5,r1
shll8 r0
div1 r5,r1
exts.w r7,r7
div1 r5,r1
add r0,r0
div1 r5,r1
sub r7,r0
extu.b r1,r1
mov.l @r15+,r7
rotcl r1
mov.l @r15+,r6
add r1,r0
mov #-8,r1
rts
shad r1,r5 ! 34 cycles up to here
.balign 4
GLOBAL(udivsi3_i4i):
mov.l r6,@-r15
extu.w r5,r6
cmp/eq r5,r6
mov #0x7f,r0
bf LOCAL(udiv_ge64k)
cmp/hi r0,r5
bf LOCAL(udiv_le128)
mov r4,r1
shlr8 r1
div0u
shlr r1
shll16 r6
div1 r6,r1
extu.b r4,r0 ! 7 cycles up to here
.rept 8
div1 r6,r1
.endr ! 15 cycles up to here
xor r1,r0 ! xor dividend with result lsb
.rept 6
div1 r6,r1
.endr
mov.l r7,@-r15 ! 21 cycles up to here
div1 r6,r1
extu.b r0,r7
div1 r6,r1
shll8 r7
extu.w r1,r0
xor r7,r1 ! replace lsb of result with lsb of dividend
div1 r6,r1
mov #0,r7
div1 r6,r1
!
div1 r6,r1
bra LOCAL(div_end)
div1 r6,r1 ! 28 cycles up to here
/* This is link-compatible with a GLOBAL(sdivsi3) call,
but we effectively clobber only r1, macl and mach */
/* Because negative quotients are calculated as one's complements,
-0x80000000 divided by the smallest positive number of a number
range (0x80, 0x8000, 0x800000) causes saturation in the one's
complement representation, and we have to suppress the
one's -> two's complement adjustment. Since positive numbers
don't get such an adjustment, it's OK to also compute one's -> two's
complement adjustment suppression for a dividend of 0. */
.balign 4
GLOBAL(sdivsi3_i4i):
mov.l r6,@-r15
exts.b r5,r6
cmp/eq r5,r6
mov #-1,r1
bt/s LOCAL(div_le128)
cmp/pz r4
addc r4,r1
exts.w r5,r6
cmp/eq r5,r6
mov #-7,r0
bf/s LOCAL(div_ge32k)
cmp/hi r1,r4 ! copy sign bit of r4 into T
rotcr r1
shll16 r6 ! 7 cycles up to here
shad r0,r1
div0s r5,r4
div1 r6,r1
mov.l r7,@-r15
div1 r6,r1
mov r4,r0 ! re-compute adjusted dividend
div1 r6,r1
mov #-31,r7
div1 r6,r1
shad r7,r0
div1 r6,r1
add r4,r0 ! adjusted dividend
div1 r6,r1
mov.l r8,@-r15
div1 r6,r1
swap.w r4,r8 ! detect special case r4 = 0x80000000, r5 = 0x80
div1 r6,r1
swap.b r8,r8
xor r1,r0 ! xor dividend with result lsb
div1 r6,r1
div1 r6,r1
or r5,r8
div1 r6,r1
add #-0x80,r8 ! r8 is 0 iff there is a match
div1 r6,r1
swap.w r8,r7 ! or upper 16 bits...
div1 r6,r1
or r7,r8 !...into lower 16 bits
div1 r6,r1
extu.w r8,r8
div1 r6,r1
extu.b r0,r7
div1 r6,r1
shll8 r7
exts.w r1,r0
xor r7,r1 ! replace lsb of result with lsb of dividend
div1 r6,r1
neg r8,r8 ! upper 16 bits of r8 are now 0xffff iff we want end adjm.
div1 r6,r1
and r0,r8
div1 r6,r1
swap.w r8,r7
div1 r6,r1
mov.l @r15+,r8 ! 58 insns, 29 cycles up to here
LOCAL(div_end):
div1 r6,r1
shll8 r0
div1 r6,r1
exts.w r7,r7
div1 r6,r1
add r0,r0
div1 r6,r1
sub r7,r0
extu.b r1,r1
mov.l @r15+,r7
rotcl r1
mov.l @r15+,r6
rts
add r1,r0
.balign 4
LOCAL(udiv_le128): ! 4 cycles up to here (or 7 for mispredict)
mova LOCAL(div_table_inv),r0
shll2 r6
mov.l @(r0,r6),r1
mova LOCAL(div_table_clz),r0
lds r4,mach
!
!
!
tst r1,r1
!
bt 0f
dmulu.l r1,r4
0: mov.b @(r0,r5),r1
clrt
!
!
sts mach,r0
addc r4,r0
rotcr r0
mov.l @r15+,r6
rts
shld r1,r0
.balign 4
LOCAL(div_le128): ! 3 cycles up to here (or 6 for mispredict)
mova LOCAL(div_table_inv),r0
shll2 r6
mov.l @(r0,r6),r1
mova LOCAL(div_table_clz),r0
neg r4,r6
bf 0f
mov r4,r6
0: lds r6,mach
tst r1,r1
bt 0f
dmulu.l r1,r6
0: div0s r4,r5
mov.b @(r0,r5),r1
bt/s LOCAL(le128_neg)
clrt
!
sts mach,r0
addc r6,r0
rotcr r0
mov.l @r15+,r6
rts
shld r1,r0
/* Could trap divide by zero for the cost of one cycle more mispredict penalty:
...
dmulu.l r1,r6
0: div0s r4,r5
bt/s LOCAL(le128_neg)
tst r5,r5
bt LOCAL(div_by_zero)
mov.b @(r0,r5),r1
sts mach,r0
addc r6,r0
...
LOCAL(div_by_zero):
trapa #
.balign 4
LOCAL(le128_neg):
bt LOCAL(div_by_zero)
mov.b @(r0,r5),r1
sts mach,r0
addc r6,r0
... */
.balign 4
LOCAL(le128_neg):
sts mach,r0
addc r6,r0
rotcr r0
mov.l @r15+,r6
shad r1,r0
rts
neg r0,r0
ENDFUNC(GLOBAL(udivsi3_i4i))
ENDFUNC(GLOBAL(sdivsi3_i4i))
/* This table has been generated by divtab-sh4.c. */
.balign 4
.byte -7
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -2
.byte -2
.byte -2
.byte -2
.byte -1
.byte -1
.byte 0
LOCAL(div_table_clz):
.byte 0
.byte 0
.byte -1
.byte -1
.byte -2
.byte -2
.byte -2
.byte -2
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
/* 1/-128 .. 1/127, normalized. There is an implicit leading 1 in bit 32,
or in bit 33 for powers of two. */
.balign 4
.long 0x0
.long 0x2040811
.long 0x4104105
.long 0x624DD30
.long 0x8421085
.long 0xA6810A7
.long 0xC9714FC
.long 0xECF56BF
.long 0x11111112
.long 0x135C8114
.long 0x15B1E5F8
.long 0x18118119
.long 0x1A7B9612
.long 0x1CF06ADB
.long 0x1F7047DD
.long 0x21FB7813
.long 0x24924925
.long 0x27350B89
.long 0x29E4129F
.long 0x2C9FB4D9
.long 0x2F684BDB
.long 0x323E34A3
.long 0x3521CFB3
.long 0x38138139
.long 0x3B13B13C
.long 0x3E22CBCF
.long 0x41414142
.long 0x446F8657
.long 0x47AE147B
.long 0x4AFD6A06
.long 0x4E5E0A73
.long 0x51D07EAF
.long 0x55555556
.long 0x58ED2309
.long 0x5C9882BA
.long 0x60581606
.long 0x642C8591
.long 0x68168169
.long 0x6C16C16D
.long 0x702E05C1
.long 0x745D1746
.long 0x78A4C818
.long 0x7D05F418
.long 0x81818182
.long 0x86186187
.long 0x8ACB90F7
.long 0x8F9C18FA
.long 0x948B0FCE
.long 0x9999999A
.long 0x9EC8E952
.long 0xA41A41A5
.long 0xA98EF607
.long 0xAF286BCB
.long 0xB4E81B4F
.long 0xBACF914D
.long 0xC0E07039
.long 0xC71C71C8
.long 0xCD856891
.long 0xD41D41D5
.long 0xDAE6076C
.long 0xE1E1E1E2
.long 0xE9131AC0
.long 0xF07C1F08
.long 0xF81F81F9
.long 0x0
.long 0x4104105
.long 0x8421085
.long 0xC9714FC
.long 0x11111112
.long 0x15B1E5F8
.long 0x1A7B9612
.long 0x1F7047DD
.long 0x24924925
.long 0x29E4129F
.long 0x2F684BDB
.long 0x3521CFB3
.long 0x3B13B13C
.long 0x41414142
.long 0x47AE147B
.long 0x4E5E0A73
.long 0x55555556
.long 0x5C9882BA
.long 0x642C8591
.long 0x6C16C16D
.long 0x745D1746
.long 0x7D05F418
.long 0x86186187
.long 0x8F9C18FA
.long 0x9999999A
.long 0xA41A41A5
.long 0xAF286BCB
.long 0xBACF914D
.long 0xC71C71C8
.long 0xD41D41D5
.long 0xE1E1E1E2
.long 0xF07C1F08
.long 0x0
.long 0x8421085
.long 0x11111112
.long 0x1A7B9612
.long 0x24924925
.long 0x2F684BDB
.long 0x3B13B13C
.long 0x47AE147B
.long 0x55555556
.long 0x642C8591
.long 0x745D1746
.long 0x86186187
.long 0x9999999A
.long 0xAF286BCB
.long 0xC71C71C8
.long 0xE1E1E1E2
.long 0x0
.long 0x11111112
.long 0x24924925
.long 0x3B13B13C
.long 0x55555556
.long 0x745D1746
.long 0x9999999A
.long 0xC71C71C8
.long 0x0
.long 0x24924925
.long 0x55555556
.long 0x9999999A
.long 0x0
.long 0x55555556
.long 0x0
.long 0x0
LOCAL(div_table_inv):
.long 0x0
.long 0x0
.long 0x0
.long 0x55555556
.long 0x0
.long 0x9999999A
.long 0x55555556
.long 0x24924925
.long 0x0
.long 0xC71C71C8
.long 0x9999999A
.long 0x745D1746
.long 0x55555556
.long 0x3B13B13C
.long 0x24924925
.long 0x11111112
.long 0x0
.long 0xE1E1E1E2
.long 0xC71C71C8
.long 0xAF286BCB
.long 0x9999999A
.long 0x86186187
.long 0x745D1746
.long 0x642C8591
.long 0x55555556
.long 0x47AE147B
.long 0x3B13B13C
.long 0x2F684BDB
.long 0x24924925
.long 0x1A7B9612
.long 0x11111112
.long 0x8421085
.long 0x0
.long 0xF07C1F08
.long 0xE1E1E1E2
.long 0xD41D41D5
.long 0xC71C71C8
.long 0xBACF914D
.long 0xAF286BCB
.long 0xA41A41A5
.long 0x9999999A
.long 0x8F9C18FA
.long 0x86186187
.long 0x7D05F418
.long 0x745D1746
.long 0x6C16C16D
.long 0x642C8591
.long 0x5C9882BA
.long 0x55555556
.long 0x4E5E0A73
.long 0x47AE147B
.long 0x41414142
.long 0x3B13B13C
.long 0x3521CFB3
.long 0x2F684BDB
.long 0x29E4129F
.long 0x24924925
.long 0x1F7047DD
.long 0x1A7B9612
.long 0x15B1E5F8
.long 0x11111112
.long 0xC9714FC
.long 0x8421085
.long 0x4104105
.long 0x0
.long 0xF81F81F9
.long 0xF07C1F08
.long 0xE9131AC0
.long 0xE1E1E1E2
.long 0xDAE6076C
.long 0xD41D41D5
.long 0xCD856891
.long 0xC71C71C8
.long 0xC0E07039
.long 0xBACF914D
.long 0xB4E81B4F
.long 0xAF286BCB
.long 0xA98EF607
.long 0xA41A41A5
.long 0x9EC8E952
.long 0x9999999A
.long 0x948B0FCE
.long 0x8F9C18FA
.long 0x8ACB90F7
.long 0x86186187
.long 0x81818182
.long 0x7D05F418
.long 0x78A4C818
.long 0x745D1746
.long 0x702E05C1
.long 0x6C16C16D
.long 0x68168169
.long 0x642C8591
.long 0x60581606
.long 0x5C9882BA
.long 0x58ED2309
.long 0x55555556
.long 0x51D07EAF
.long 0x4E5E0A73
.long 0x4AFD6A06
.long 0x47AE147B
.long 0x446F8657
.long 0x41414142
.long 0x3E22CBCF
.long 0x3B13B13C
.long 0x38138139
.long 0x3521CFB3
.long 0x323E34A3
.long 0x2F684BDB
.long 0x2C9FB4D9
.long 0x29E4129F
.long 0x27350B89
.long 0x24924925
.long 0x21FB7813
.long 0x1F7047DD
.long 0x1CF06ADB
.long 0x1A7B9612
.long 0x18118119
.long 0x15B1E5F8
.long 0x135C8114
.long 0x11111112
.long 0xECF56BF
.long 0xC9714FC
.long 0xA6810A7
.long 0x8421085
.long 0x624DD30
.long 0x4104105
.long 0x2040811
/* maximum error: 0.987342 scaled: 0.921875*/
#endif /* SH3 / SH4 */
#endif /* L_div_table */
|
4ms/metamodule-plugin-sdk
| 2,709
|
plugin-libc/libgcc/config/sh/crti.S
|
/* Copyright (C) 2000-2022 Free Software Foundation, Inc.
This file was adapted from glibc sources.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "crt.h"
/* The code in sections .init and .fini is supposed to be a single
regular function. The function in .init is called directly from
start in crt1.S. The function in .fini is atexit()ed in crt1.S
too.
crti.S contributes the prologue of a function to these sections,
and crtn.S comes up the epilogue. STARTFILE_SPEC should list
crti.o before any other object files that might add code to .init
or .fini sections, and ENDFILE_SPEC should list crtn.o after any
such object files. */
.section .init
/* The alignment below can't be smaller, otherwise the mova below
breaks. Yes, we might align just the label, but then we'd be
exchanging an alignment here for one there, since the code fragment
below ensures 4-byte alignment on __ELF__. */
#ifdef __ELF__
.p2align 2
#else
.p2align 1
#endif
.global GLOBAL(_init)
GLOBAL(_init):
#ifdef __ELF__
mov.l r12,@-r15
mova 0f,r0
mov.l 0f,r12
#endif
mov.l r14,@-r15
#ifdef __ELF__
add r0,r12
#endif
sts.l pr,@-r15
#ifdef __ELF__
bra 1f
#endif
mov r15,r14
#ifdef __ELF__
0: .long _GLOBAL_OFFSET_TABLE_
1:
#endif
.section .fini
/* The alignment below can't be smaller, otherwise the mova below
breaks. Yes, we might align just the label, but then we'd be
exchanging an alignment here for one there, since the code fragment
below ensures 4-byte alignment on __ELF__. */
#ifdef __ELF__
.p2align 2
#else
.p2align 1
#endif
.global GLOBAL(_fini)
GLOBAL(_fini):
#ifdef __ELF__
mov.l r12,@-r15
mova 0f,r0
mov.l 0f,r12
#endif
mov.l r14,@-r15
#ifdef __ELF__
add r0,r12
#endif
sts.l pr,@-r15
#ifdef __ELF__
bra 1f
#endif
mov r15,r14
#ifdef __ELF__
0: .long _GLOBAL_OFFSET_TABLE_
1:
#endif
|
4ms/metamodule-plugin-sdk
| 15,005
|
plugin-libc/libgcc/config/sh/crt1.S
|
/* Copyright (C) 2000-2022 Free Software Foundation, Inc.
This file was pretty much copied from newlib.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "crt.h"
#ifdef MMU_SUPPORT
/* Section used for exception/timer interrupt stack area */
.section .data.vbr.stack,"aw"
.align 4
.global __ST_VBR
__ST_VBR:
.zero 1024 * 2 /* ; 2k for VBR handlers */
/* Label at the highest stack address where the stack grows from */
__timer_stack:
#endif /* MMU_SUPPORT */
/* ;----------------------------------------
Normal newlib crt1.S */
! make a place to keep any previous value of the vbr register
! this will only have a value if it has been set by redboot (for example)
.section .bss
old_vbr:
.long 0
#ifdef PROFILE
profiling_enabled:
.long 0
#endif
.section .text
.global start
.import ___rtos_profiler_start_timer
.weak ___rtos_profiler_start_timer
start:
mov.l stack_k,r15
#if defined (__SH3__) || (defined (__SH_FPU_ANY__) && ! defined (__SH2E__) && ! defined (__SH2A__)) || defined (__SH4_NOFPU__)
#define VBR_SETUP
! before zeroing the bss ...
! if the vbr is already set to vbr_start then the program has been restarted
! (i.e. it is not the first time the program has been run since reset)
! reset the vbr to its old value before old_vbr (in bss) is wiped
! this ensures that the later code does not create a circular vbr chain
stc vbr, r1
mov.l vbr_start_k, r2
cmp/eq r1, r2
bf 0f
! reset the old vbr value
mov.l old_vbr_k, r1
mov.l @r1, r2
ldc r2, vbr
0:
#endif /* VBR_SETUP */
! zero out bss
mov.l edata_k,r0
mov.l end_k,r1
mov #0,r2
start_l:
mov.l r2,@r0
add #4,r0
cmp/ge r0,r1
bt start_l
#if defined (__SH_FPU_ANY__)
mov.l set_fpscr_k, r1
mov #4,r4
jsr @r1
shll16 r4 ! Set DN bit (flush denormal inputs to zero)
lds r3,fpscr ! Switch to default precision
#endif /* defined (__SH_FPU_ANY__) */
#ifdef VBR_SETUP
! save the existing contents of the vbr
! there will only be a prior value when using something like redboot
! otherwise it will be zero
stc vbr, r1
mov.l old_vbr_k, r2
mov.l r1, @r2
! setup vbr
mov.l vbr_start_k, r1
ldc r1,vbr
#endif /* VBR_SETUP */
! if an rtos is exporting a timer start fn,
! then pick up an SR which does not enable ints
! (the rtos will take care of this)
mov.l rtos_start_fn, r0
mov.l sr_initial_bare, r1
tst r0, r0
bt set_sr
mov.l sr_initial_rtos, r1
set_sr:
! Set status register (sr)
ldc r1, sr
! arrange for exit to call fini
mov.l atexit_k,r0
mov.l fini_k,r4
jsr @r0
nop
#ifdef PROFILE
! arrange for exit to call _mcleanup (via stop_profiling)
mova stop_profiling,r0
mov.l atexit_k,r1
jsr @r1
mov r0, r4
! Call profiler startup code
mov.l monstartup_k, r0
mov.l start_k, r4
mov.l etext_k, r5
jsr @r0
nop
! enable profiling trap
! until now any trap 33s will have been ignored
! This means that all library functions called before this point
! (directly or indirectly) may have the profiling trap at the start.
! Therefore, only mcount itself may not have the extra header.
mov.l profiling_enabled_k2, r0
mov #1, r1
mov.l r1, @r0
#endif /* PROFILE */
! call init
mov.l init_k,r0
jsr @r0
nop
! call the mainline
mov.l main_k,r0
jsr @r0
nop
! call exit
mov r0,r4
mov.l exit_k,r0
jsr @r0
nop
.balign 4
#ifdef PROFILE
stop_profiling:
# stop mcount counting
mov.l profiling_enabled_k2, r0
mov #0, r1
mov.l r1, @r0
# call mcleanup
mov.l mcleanup_k, r0
jmp @r0
nop
.balign 4
mcleanup_k:
.long __mcleanup
monstartup_k:
.long ___monstartup
profiling_enabled_k2:
.long profiling_enabled
start_k:
.long _start
etext_k:
.long __etext
#endif /* PROFILE */
.align 2
#if defined (__SH_FPU_ANY__)
set_fpscr_k:
.long ___set_fpscr
#endif /* defined (__SH_FPU_ANY__) */
stack_k:
.long _stack
edata_k:
.long _edata
end_k:
.long _end
main_k:
.long ___setup_argv_and_call_main
exit_k:
.long _exit
atexit_k:
.long _atexit
init_k:
.long GLOBAL(_init)
fini_k:
.long GLOBAL(_fini)
#ifdef VBR_SETUP
old_vbr_k:
.long old_vbr
vbr_start_k:
.long vbr_start
#endif /* VBR_SETUP */
sr_initial_rtos:
! Privileged mode RB 1 BL 0. Keep BL 0 to allow default trap handlers to work.
! Whether profiling or not, keep interrupts masked,
! the RTOS will enable these if required.
.long 0x600000f1
rtos_start_fn:
.long ___rtos_profiler_start_timer
#ifdef PROFILE
sr_initial_bare:
! Privileged mode RB 1 BL 0. Keep BL 0 to allow default trap handlers to work.
! For bare machine, we need to enable interrupts to get profiling working
.long 0x60000001
#else
sr_initial_bare:
! Privileged mode RB 1 BL 0. Keep BL 0 to allow default trap handlers to work.
! Keep interrupts disabled - the application will enable as required.
.long 0x600000f1
#endif
! supplied for backward compatibility only, in case of linking
! code whose main() was compiled with an older version of GCC.
.global ___main
___main:
rts
nop
#ifdef VBR_SETUP
! Exception handlers
.section .text.vbr, "ax"
vbr_start:
.org 0x100
vbr_100:
#ifdef PROFILE
! Note on register usage.
! we use r0..r3 as scratch in this code. If we are here due to a trapa for profiling
! then this is OK as we are just before executing any function code.
! The other r4..r7 we save explicityl on the stack
! Remaining registers are saved by normal ABI conventions and we assert we do not
! use floating point registers.
mov.l expevt_k1, r1
mov.l @r1, r1
mov.l event_mask, r0
and r0,r1
mov.l trapcode_k, r2
cmp/eq r1,r2
bt 1f
bra handler_100 ! if not a trapa, go to default handler
nop
1:
mov.l trapa_k, r0
mov.l @r0, r0
shlr2 r0 ! trapa code is shifted by 2.
cmp/eq #33, r0
bt 2f
bra handler_100
nop
2:
! If here then it looks like we have trap #33
! Now we need to call mcount with the following convention
! Save and restore r4..r7
mov.l r4,@-r15
mov.l r5,@-r15
mov.l r6,@-r15
mov.l r7,@-r15
sts.l pr,@-r15
! r4 is frompc.
! r5 is selfpc
! r0 is the branch back address.
! The code sequence emitted by gcc for the profiling trap is
! .align 2
! trapa #33
! .align 2
! .long lab Where lab is planted by the compiler. This is the address
! of a datum that needs to be incremented.
sts pr, r4 ! frompc
stc spc, r5 ! selfpc
mov #2, r2
not r2, r2 ! pattern to align to 4
and r2, r5 ! r5 now has aligned address
! add #4, r5 ! r5 now has address of address
mov r5, r2 ! Remember it.
! mov.l @r5, r5 ! r5 has value of lable (lab in above example)
add #8, r2
ldc r2, spc ! our return address avoiding address word
! only call mcount if profiling is enabled
mov.l profiling_enabled_k, r0
mov.l @r0, r0
cmp/eq #0, r0
bt 3f
! call mcount
mov.l mcount_k, r2
jsr @r2
nop
3:
lds.l @r15+,pr
mov.l @r15+,r7
mov.l @r15+,r6
mov.l @r15+,r5
mov.l @r15+,r4
rte
nop
.balign 4
event_mask:
.long 0xfff
trapcode_k:
.long 0x160
expevt_k1:
.long 0xff000024 ! Address of expevt
trapa_k:
.long 0xff000020
mcount_k:
.long __call_mcount
profiling_enabled_k:
.long profiling_enabled
#endif
! Non profiling case.
handler_100:
mov.l 2f, r0 ! load the old vbr setting (if any)
mov.l @r0, r0
cmp/eq #0, r0
bf 1f
! no previous vbr - jump to own generic handler
bra handler
nop
1: ! there was a previous handler - chain them
add #0x7f, r0 ! 0x7f
add #0x7f, r0 ! 0xfe
add #0x2, r0 ! add 0x100 without corrupting another register
jmp @r0
nop
.balign 4
2:
.long old_vbr
.org 0x400
vbr_400: ! Should be at vbr+0x400
mov.l 2f, r0 ! load the old vbr setting (if any)
mov.l @r0, r0
cmp/eq #0, r0
! no previous vbr - jump to own generic handler
bt handler
! there was a previous handler - chain them
rotcr r0
rotcr r0
add #0x7f, r0 ! 0x1fc
add #0x7f, r0 ! 0x3f8
add #0x02, r0 ! 0x400
rotcl r0
rotcl r0 ! Add 0x400 without corrupting another register
jmp @r0
nop
.balign 4
2:
.long old_vbr
handler:
/* If the trap handler is there call it */
mov.l superh_trap_handler_k, r0
cmp/eq #0, r0 ! True if zero.
bf 3f
bra chandler
nop
3:
! Here handler available, call it.
/* Now call the trap handler with as much of the context unchanged as possible.
Move trapping address into PR to make it look like the trap point */
stc spc, r1
lds r1, pr
mov.l expevt_k, r4
mov.l @r4, r4 ! r4 is value of expevt, first parameter.
mov r1, r5 ! Remember trapping pc.
mov r1, r6 ! Remember trapping pc.
mov.l chandler_k, r1
mov.l superh_trap_handler_k, r2
! jmp to trap handler to avoid disturbing pr.
jmp @r2
nop
.org 0x600
vbr_600:
#ifdef PROFILE
! Should be at vbr+0x600
! Now we are in the land of interrupts so need to save more state.
! Save register state
mov.l interrupt_stack_k, r15 ! r15 has been saved to sgr.
mov.l r0,@-r15
mov.l r1,@-r15
mov.l r2,@-r15
mov.l r3,@-r15
mov.l r4,@-r15
mov.l r5,@-r15
mov.l r6,@-r15
mov.l r7,@-r15
sts.l pr,@-r15
sts.l mach,@-r15
sts.l macl,@-r15
#if defined(__SH_FPU_ANY__)
! Save fpul and fpscr, save fr0-fr7 in 64 bit mode
! and set the pervading precision for the timer_handler
mov #0,r0
sts.l fpul,@-r15
sts.l fpscr,@-r15
lds r0,fpscr ! Clear fpscr
fmov fr0,@-r15
fmov fr1,@-r15
fmov fr2,@-r15
fmov fr3,@-r15
mov.l pervading_precision_k,r0
fmov fr4,@-r15
fmov fr5,@-r15
mov.l @r0,r0
fmov fr6,@-r15
fmov fr7,@-r15
lds r0,fpscr
#endif /* __SH_FPU_ANY__ */
! Pass interrupted pc to timer_handler as first parameter (r4).
stc spc, r4
mov.l timer_handler_k, r0
jsr @r0
nop
#if defined(__SH_FPU_ANY__)
mov #0,r0
lds r0,fpscr ! Clear the fpscr
fmov @r15+,fr7
fmov @r15+,fr6
fmov @r15+,fr5
fmov @r15+,fr4
fmov @r15+,fr3
fmov @r15+,fr2
fmov @r15+,fr1
fmov @r15+,fr0
lds.l @r15+,fpscr
lds.l @r15+,fpul
#endif /* __SH_FPU_ANY__ */
lds.l @r15+,macl
lds.l @r15+,mach
lds.l @r15+,pr
mov.l @r15+,r7
mov.l @r15+,r6
mov.l @r15+,r5
mov.l @r15+,r4
mov.l @r15+,r3
mov.l @r15+,r2
mov.l @r15+,r1
mov.l @r15+,r0
stc sgr, r15 ! Restore r15, destroyed by this sequence.
rte
nop
#if defined(__SH_FPU_ANY__)
.balign 4
pervading_precision_k:
.long GLOBAL(__fpscr_values)+4
#endif
#else
mov.l 2f, r0 ! Load the old vbr setting (if any).
mov.l @r0, r0
cmp/eq #0, r0
! no previous vbr - jump to own handler
bt chandler
! there was a previous handler - chain them
rotcr r0
rotcr r0
add #0x7f, r0 ! 0x1fc
add #0x7f, r0 ! 0x3f8
add #0x7f, r0 ! 0x5f4
add #0x03, r0 ! 0x600
rotcl r0
rotcl r0 ! Add 0x600 without corrupting another register
jmp @r0
nop
.balign 4
2:
.long old_vbr
#endif /* PROFILE code */
chandler:
mov.l expevt_k, r4
mov.l @r4, r4 ! r4 is value of expevt hence making this the return code
mov.l handler_exit_k,r0
jsr @r0
nop
! We should never return from _exit but in case we do we would enter the
! the following tight loop
limbo:
bra limbo
nop
.balign 4
#ifdef PROFILE
interrupt_stack_k:
.long __timer_stack ! The high end of the stack
timer_handler_k:
.long __profil_counter
#endif
expevt_k:
.long 0xff000024 ! Address of expevt
chandler_k:
.long chandler
superh_trap_handler_k:
.long __superh_trap_handler
handler_exit_k:
.long _exit
.align 2
! Simulated compile of trap handler.
.section .debug_abbrev,"",@progbits
.Ldebug_abbrev0:
.section .debug_info,"",@progbits
.Ldebug_info0:
.section .debug_line,"",@progbits
.Ldebug_line0:
.text
.Ltext0:
.align 5
.type __superh_trap_handler,@function
__superh_trap_handler:
.LFB1:
mov.l r14,@-r15
.LCFI0:
add #-4,r15
.LCFI1:
mov r15,r14
.LCFI2:
mov.l r4,@r14
lds r1, pr
add #4,r14
mov r14,r15
mov.l @r15+,r14
rts
nop
.LFE1:
.Lfe1:
.size __superh_trap_handler,.Lfe1-__superh_trap_handler
.section .debug_frame,"",@progbits
.Lframe0:
.ualong .LECIE0-.LSCIE0
.LSCIE0:
.ualong 0xffffffff
.byte 0x1
.string ""
.uleb128 0x1
.sleb128 -4
.byte 0x11
.byte 0xc
.uleb128 0xf
.uleb128 0x0
.align 2
.LECIE0:
.LSFDE0:
.ualong .LEFDE0-.LASFDE0
.LASFDE0:
.ualong .Lframe0
.ualong .LFB1
.ualong .LFE1-.LFB1
.byte 0x4
.ualong .LCFI0-.LFB1
.byte 0xe
.uleb128 0x4
.byte 0x4
.ualong .LCFI1-.LCFI0
.byte 0xe
.uleb128 0x8
.byte 0x8e
.uleb128 0x1
.byte 0x4
.ualong .LCFI2-.LCFI1
.byte 0xd
.uleb128 0xe
.align 2
.LEFDE0:
.text
.Letext0:
.section .debug_info
.ualong 0xb3
.uaword 0x2
.ualong .Ldebug_abbrev0
.byte 0x4
.uleb128 0x1
.ualong .Ldebug_line0
.ualong .Letext0
.ualong .Ltext0
.string "trap_handler.c"
.string "xxxxxxxxxxxxxxxxxxxxxxxxxxxx"
.string "GNU C 3.2 20020529 (experimental)"
.byte 0x1
.uleb128 0x2
.ualong 0xa6
.byte 0x1
.string "_superh_trap_handler"
.byte 0x1
.byte 0x2
.byte 0x1
.ualong .LFB1
.ualong .LFE1
.byte 0x1
.byte 0x5e
.uleb128 0x3
.string "trap_reason"
.byte 0x1
.byte 0x1
.ualong 0xa6
.byte 0x2
.byte 0x91
.sleb128 0
.byte 0x0
.uleb128 0x4
.string "unsigned int"
.byte 0x4
.byte 0x7
.byte 0x0
.section .debug_abbrev
.uleb128 0x1
.uleb128 0x11
.byte 0x1
.uleb128 0x10
.uleb128 0x6
.uleb128 0x12
.uleb128 0x1
.uleb128 0x11
.uleb128 0x1
.uleb128 0x3
.uleb128 0x8
.uleb128 0x1b
.uleb128 0x8
.uleb128 0x25
.uleb128 0x8
.uleb128 0x13
.uleb128 0xb
.byte 0x0
.byte 0x0
.uleb128 0x2
.uleb128 0x2e
.byte 0x1
.uleb128 0x1
.uleb128 0x13
.uleb128 0x3f
.uleb128 0xc
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x27
.uleb128 0xc
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x1
.uleb128 0x40
.uleb128 0xa
.byte 0x0
.byte 0x0
.uleb128 0x3
.uleb128 0x5
.byte 0x0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0xa
.byte 0x0
.byte 0x0
.uleb128 0x4
.uleb128 0x24
.byte 0x0
.uleb128 0x3
.uleb128 0x8
.uleb128 0xb
.uleb128 0xb
.uleb128 0x3e
.uleb128 0xb
.byte 0x0
.byte 0x0
.byte 0x0
.section .debug_pubnames,"",@progbits
.ualong 0x27
.uaword 0x2
.ualong .Ldebug_info0
.ualong 0xb7
.ualong 0x67
.string "_superh_trap_handler"
.ualong 0x0
.section .debug_aranges,"",@progbits
.ualong 0x1c
.uaword 0x2
.ualong .Ldebug_info0
.byte 0x4
.byte 0x0
.uaword 0x0
.uaword 0x0
.ualong .Ltext0
.ualong .Letext0-.Ltext0
.ualong 0x0
.ualong 0x0
#endif /* VBR_SETUP */
|
4ms/metamodule-plugin-sdk
| 41,874
|
plugin-libc/libgcc/config/sh/lib1funcs.S
|
/* Copyright (C) 1994-2022 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
!! libgcc routines for the Renesas / SuperH SH CPUs.
!! Contributed by Steve Chamberlain.
!! sac@cygnus.com
!! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines
!! recoded in assembly by Toshiyasu Morita
!! tm@netcom.com
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
.previous
#endif
/* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and
ELF local label prefixes by J"orn Rennecke
amylaar@cygnus.com */
#include "lib1funcs.h"
/* t-vxworks needs to build both PIC and non-PIC versions of libgcc,
so it is more convenient to define NO_FPSCR_VALUES here than to
define it on the command line. */
#if defined __vxworks && defined __PIC__
#define NO_FPSCR_VALUES
#endif
#ifdef L_ashiftrt
.global GLOBAL(ashiftrt_r4_0)
.global GLOBAL(ashiftrt_r4_1)
.global GLOBAL(ashiftrt_r4_2)
.global GLOBAL(ashiftrt_r4_3)
.global GLOBAL(ashiftrt_r4_4)
.global GLOBAL(ashiftrt_r4_5)
.global GLOBAL(ashiftrt_r4_6)
.global GLOBAL(ashiftrt_r4_7)
.global GLOBAL(ashiftrt_r4_8)
.global GLOBAL(ashiftrt_r4_9)
.global GLOBAL(ashiftrt_r4_10)
.global GLOBAL(ashiftrt_r4_11)
.global GLOBAL(ashiftrt_r4_12)
.global GLOBAL(ashiftrt_r4_13)
.global GLOBAL(ashiftrt_r4_14)
.global GLOBAL(ashiftrt_r4_15)
.global GLOBAL(ashiftrt_r4_16)
.global GLOBAL(ashiftrt_r4_17)
.global GLOBAL(ashiftrt_r4_18)
.global GLOBAL(ashiftrt_r4_19)
.global GLOBAL(ashiftrt_r4_20)
.global GLOBAL(ashiftrt_r4_21)
.global GLOBAL(ashiftrt_r4_22)
.global GLOBAL(ashiftrt_r4_23)
.global GLOBAL(ashiftrt_r4_24)
.global GLOBAL(ashiftrt_r4_25)
.global GLOBAL(ashiftrt_r4_26)
.global GLOBAL(ashiftrt_r4_27)
.global GLOBAL(ashiftrt_r4_28)
.global GLOBAL(ashiftrt_r4_29)
.global GLOBAL(ashiftrt_r4_30)
.global GLOBAL(ashiftrt_r4_31)
.global GLOBAL(ashiftrt_r4_32)
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_0))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_1))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_2))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_3))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_4))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_5))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_6))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_7))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_8))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_9))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_10))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_11))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_12))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_13))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_14))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_15))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_16))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_17))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_18))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_19))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_20))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_21))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_22))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_23))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_24))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_25))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_26))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_27))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_28))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_29))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_30))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_31))
HIDDEN_FUNC(GLOBAL(ashiftrt_r4_32))
.align 1
GLOBAL(ashiftrt_r4_32):
GLOBAL(ashiftrt_r4_31):
rotcl r4
rts
subc r4,r4
GLOBAL(ashiftrt_r4_30):
shar r4
GLOBAL(ashiftrt_r4_29):
shar r4
GLOBAL(ashiftrt_r4_28):
shar r4
GLOBAL(ashiftrt_r4_27):
shar r4
GLOBAL(ashiftrt_r4_26):
shar r4
GLOBAL(ashiftrt_r4_25):
shar r4
GLOBAL(ashiftrt_r4_24):
shlr16 r4
shlr8 r4
rts
exts.b r4,r4
GLOBAL(ashiftrt_r4_23):
shar r4
GLOBAL(ashiftrt_r4_22):
shar r4
GLOBAL(ashiftrt_r4_21):
shar r4
GLOBAL(ashiftrt_r4_20):
shar r4
GLOBAL(ashiftrt_r4_19):
shar r4
GLOBAL(ashiftrt_r4_18):
shar r4
GLOBAL(ashiftrt_r4_17):
shar r4
GLOBAL(ashiftrt_r4_16):
shlr16 r4
rts
exts.w r4,r4
GLOBAL(ashiftrt_r4_15):
shar r4
GLOBAL(ashiftrt_r4_14):
shar r4
GLOBAL(ashiftrt_r4_13):
shar r4
GLOBAL(ashiftrt_r4_12):
shar r4
GLOBAL(ashiftrt_r4_11):
shar r4
GLOBAL(ashiftrt_r4_10):
shar r4
GLOBAL(ashiftrt_r4_9):
shar r4
GLOBAL(ashiftrt_r4_8):
shar r4
GLOBAL(ashiftrt_r4_7):
shar r4
GLOBAL(ashiftrt_r4_6):
shar r4
GLOBAL(ashiftrt_r4_5):
shar r4
GLOBAL(ashiftrt_r4_4):
shar r4
GLOBAL(ashiftrt_r4_3):
shar r4
GLOBAL(ashiftrt_r4_2):
shar r4
GLOBAL(ashiftrt_r4_1):
rts
shar r4
GLOBAL(ashiftrt_r4_0):
rts
nop
ENDFUNC(GLOBAL(ashiftrt_r4_0))
ENDFUNC(GLOBAL(ashiftrt_r4_1))
ENDFUNC(GLOBAL(ashiftrt_r4_2))
ENDFUNC(GLOBAL(ashiftrt_r4_3))
ENDFUNC(GLOBAL(ashiftrt_r4_4))
ENDFUNC(GLOBAL(ashiftrt_r4_5))
ENDFUNC(GLOBAL(ashiftrt_r4_6))
ENDFUNC(GLOBAL(ashiftrt_r4_7))
ENDFUNC(GLOBAL(ashiftrt_r4_8))
ENDFUNC(GLOBAL(ashiftrt_r4_9))
ENDFUNC(GLOBAL(ashiftrt_r4_10))
ENDFUNC(GLOBAL(ashiftrt_r4_11))
ENDFUNC(GLOBAL(ashiftrt_r4_12))
ENDFUNC(GLOBAL(ashiftrt_r4_13))
ENDFUNC(GLOBAL(ashiftrt_r4_14))
ENDFUNC(GLOBAL(ashiftrt_r4_15))
ENDFUNC(GLOBAL(ashiftrt_r4_16))
ENDFUNC(GLOBAL(ashiftrt_r4_17))
ENDFUNC(GLOBAL(ashiftrt_r4_18))
ENDFUNC(GLOBAL(ashiftrt_r4_19))
ENDFUNC(GLOBAL(ashiftrt_r4_20))
ENDFUNC(GLOBAL(ashiftrt_r4_21))
ENDFUNC(GLOBAL(ashiftrt_r4_22))
ENDFUNC(GLOBAL(ashiftrt_r4_23))
ENDFUNC(GLOBAL(ashiftrt_r4_24))
ENDFUNC(GLOBAL(ashiftrt_r4_25))
ENDFUNC(GLOBAL(ashiftrt_r4_26))
ENDFUNC(GLOBAL(ashiftrt_r4_27))
ENDFUNC(GLOBAL(ashiftrt_r4_28))
ENDFUNC(GLOBAL(ashiftrt_r4_29))
ENDFUNC(GLOBAL(ashiftrt_r4_30))
ENDFUNC(GLOBAL(ashiftrt_r4_31))
ENDFUNC(GLOBAL(ashiftrt_r4_32))
#endif
#ifdef L_ashiftrt_n
!
! GLOBAL(ashrsi3)
!
! Entry:
!
! r4: Value to shift
! r5: Shift count
!
! Exit:
!
! r0: Result
!
! Destroys:
!
! T bit, r5
!
.global GLOBAL(ashrsi3)
HIDDEN_FUNC(GLOBAL(ashrsi3))
.align 2
GLOBAL(ashrsi3):
mov #31,r0
and r0,r5
mova LOCAL(ashrsi3_table),r0
mov.b @(r0,r5),r5
#ifdef __sh1__
add r5,r0
jmp @r0
#else
braf r5
#endif
mov r4,r0
.align 2
LOCAL(ashrsi3_table):
.byte LOCAL(ashrsi3_0)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_1)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_2)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_3)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_4)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_5)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_6)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_7)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_8)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_9)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_10)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_11)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_12)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_13)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_14)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_15)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_16)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_17)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_18)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_19)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_20)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_21)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_22)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_23)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_24)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_25)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_26)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_27)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_28)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_29)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_30)-LOCAL(ashrsi3_table)
.byte LOCAL(ashrsi3_31)-LOCAL(ashrsi3_table)
LOCAL(ashrsi3_31):
rotcl r0
rts
subc r0,r0
LOCAL(ashrsi3_30):
shar r0
LOCAL(ashrsi3_29):
shar r0
LOCAL(ashrsi3_28):
shar r0
LOCAL(ashrsi3_27):
shar r0
LOCAL(ashrsi3_26):
shar r0
LOCAL(ashrsi3_25):
shar r0
LOCAL(ashrsi3_24):
shlr16 r0
shlr8 r0
rts
exts.b r0,r0
LOCAL(ashrsi3_23):
shar r0
LOCAL(ashrsi3_22):
shar r0
LOCAL(ashrsi3_21):
shar r0
LOCAL(ashrsi3_20):
shar r0
LOCAL(ashrsi3_19):
shar r0
LOCAL(ashrsi3_18):
shar r0
LOCAL(ashrsi3_17):
shar r0
LOCAL(ashrsi3_16):
shlr16 r0
rts
exts.w r0,r0
LOCAL(ashrsi3_15):
shar r0
LOCAL(ashrsi3_14):
shar r0
LOCAL(ashrsi3_13):
shar r0
LOCAL(ashrsi3_12):
shar r0
LOCAL(ashrsi3_11):
shar r0
LOCAL(ashrsi3_10):
shar r0
LOCAL(ashrsi3_9):
shar r0
LOCAL(ashrsi3_8):
shar r0
LOCAL(ashrsi3_7):
shar r0
LOCAL(ashrsi3_6):
shar r0
LOCAL(ashrsi3_5):
shar r0
LOCAL(ashrsi3_4):
shar r0
LOCAL(ashrsi3_3):
shar r0
LOCAL(ashrsi3_2):
shar r0
LOCAL(ashrsi3_1):
rts
shar r0
LOCAL(ashrsi3_0):
rts
nop
ENDFUNC(GLOBAL(ashrsi3))
#endif
#ifdef L_ashiftlt
!
! GLOBAL(ashlsi3)
! (For compatibility with older binaries, not used by compiler)
!
! Entry:
! r4: Value to shift
! r5: Shift count
!
! Exit:
! r0: Result
!
! Destroys:
! T bit
!
!
! GLOBAL(ashlsi3_r0)
!
! Entry:
! r4: Value to shift
! r0: Shift count
!
! Exit:
! r0: Result
!
! Destroys:
! T bit
.global GLOBAL(ashlsi3)
.global GLOBAL(ashlsi3_r0)
HIDDEN_FUNC(GLOBAL(ashlsi3))
HIDDEN_FUNC(GLOBAL(ashlsi3_r0))
GLOBAL(ashlsi3):
mov r5,r0
.align 2
GLOBAL(ashlsi3_r0):
#ifdef __sh1__
and #31,r0
shll2 r0
mov.l r4,@-r15
mov r0,r4
mova LOCAL(ashlsi3_table),r0
add r4,r0
mov.l @r15+,r4
jmp @r0
mov r4,r0
.align 2
#else
and #31,r0
shll2 r0
braf r0
mov r4,r0
#endif
LOCAL(ashlsi3_table):
rts // << 0
nop
LOCAL(ashlsi_1):
rts // << 1
shll r0
LOCAL(ashlsi_2): // << 2
rts
shll2 r0
bra LOCAL(ashlsi_1) // << 3
shll2 r0
bra LOCAL(ashlsi_2) // << 4
shll2 r0
bra LOCAL(ashlsi_5) // << 5
shll r0
bra LOCAL(ashlsi_6) // << 6
shll2 r0
bra LOCAL(ashlsi_7) // << 7
shll r0
LOCAL(ashlsi_8): // << 8
rts
shll8 r0
bra LOCAL(ashlsi_8) // << 9
shll r0
bra LOCAL(ashlsi_8) // << 10
shll2 r0
bra LOCAL(ashlsi_11) // << 11
shll r0
bra LOCAL(ashlsi_12) // << 12
shll2 r0
bra LOCAL(ashlsi_13) // << 13
shll r0
bra LOCAL(ashlsi_14) // << 14
shll8 r0
bra LOCAL(ashlsi_15) // << 15
shll8 r0
LOCAL(ashlsi_16): // << 16
rts
shll16 r0
bra LOCAL(ashlsi_16) // << 17
shll r0
bra LOCAL(ashlsi_16) // << 18
shll2 r0
bra LOCAL(ashlsi_19) // << 19
shll r0
bra LOCAL(ashlsi_20) // << 20
shll2 r0
bra LOCAL(ashlsi_21) // << 21
shll r0
bra LOCAL(ashlsi_22) // << 22
shll16 r0
bra LOCAL(ashlsi_23) // << 23
shll16 r0
bra LOCAL(ashlsi_16) // << 24
shll8 r0
bra LOCAL(ashlsi_25) // << 25
shll r0
bra LOCAL(ashlsi_26) // << 26
shll2 r0
bra LOCAL(ashlsi_27) // << 27
shll r0
bra LOCAL(ashlsi_28) // << 28
shll2 r0
bra LOCAL(ashlsi_29) // << 29
shll16 r0
bra LOCAL(ashlsi_30) // << 30
shll16 r0
and #1,r0 // << 31
rts
rotr r0
LOCAL(ashlsi_7):
shll2 r0
LOCAL(ashlsi_5):
LOCAL(ashlsi_6):
shll2 r0
rts
LOCAL(ashlsi_13):
shll2 r0
LOCAL(ashlsi_12):
LOCAL(ashlsi_11):
shll8 r0
rts
LOCAL(ashlsi_21):
shll2 r0
LOCAL(ashlsi_20):
LOCAL(ashlsi_19):
shll16 r0
rts
LOCAL(ashlsi_28):
LOCAL(ashlsi_27):
shll2 r0
LOCAL(ashlsi_26):
LOCAL(ashlsi_25):
shll16 r0
rts
shll8 r0
LOCAL(ashlsi_22):
LOCAL(ashlsi_14):
shlr2 r0
rts
shll8 r0
LOCAL(ashlsi_23):
LOCAL(ashlsi_15):
shlr r0
rts
shll8 r0
LOCAL(ashlsi_29):
shlr r0
LOCAL(ashlsi_30):
shlr2 r0
rts
shll16 r0
ENDFUNC(GLOBAL(ashlsi3))
ENDFUNC(GLOBAL(ashlsi3_r0))
#endif
#ifdef L_lshiftrt
!
! GLOBAL(lshrsi3)
! (For compatibility with older binaries, not used by compiler)
!
! Entry:
! r4: Value to shift
! r5: Shift count
!
! Exit:
! r0: Result
!
! Destroys:
! T bit
!
!
! GLOBAL(lshrsi3_r0)
!
! Entry:
! r4: Value to shift
! r0: Shift count
!
! Exit:
! r0: Result
!
! Destroys:
! T bit
.global GLOBAL(lshrsi3)
.global GLOBAL(lshrsi3_r0)
HIDDEN_FUNC(GLOBAL(lshrsi3))
HIDDEN_FUNC(GLOBAL(lshrsi3_r0))
GLOBAL(lshrsi3):
mov r5,r0
.align 2
GLOBAL(lshrsi3_r0):
#ifdef __sh1__
and #31,r0
shll2 r0
mov.l r4,@-r15
mov r0,r4
mova LOCAL(lshrsi3_table),r0
add r4,r0
mov.l @r15+,r4
jmp @r0
mov r4,r0
.align 2
#else
and #31,r0
shll2 r0
braf r0
mov r4,r0
#endif
LOCAL(lshrsi3_table):
rts // >> 0
nop
LOCAL(lshrsi_1): // >> 1
rts
shlr r0
LOCAL(lshrsi_2): // >> 2
rts
shlr2 r0
bra LOCAL(lshrsi_1) // >> 3
shlr2 r0
bra LOCAL(lshrsi_2) // >> 4
shlr2 r0
bra LOCAL(lshrsi_5) // >> 5
shlr r0
bra LOCAL(lshrsi_6) // >> 6
shlr2 r0
bra LOCAL(lshrsi_7) // >> 7
shlr r0
LOCAL(lshrsi_8): // >> 8
rts
shlr8 r0
bra LOCAL(lshrsi_8) // >> 9
shlr r0
bra LOCAL(lshrsi_8) // >> 10
shlr2 r0
bra LOCAL(lshrsi_11) // >> 11
shlr r0
bra LOCAL(lshrsi_12) // >> 12
shlr2 r0
bra LOCAL(lshrsi_13) // >> 13
shlr r0
bra LOCAL(lshrsi_14) // >> 14
shlr8 r0
bra LOCAL(lshrsi_15) // >> 15
shlr8 r0
LOCAL(lshrsi_16): // >> 16
rts
shlr16 r0
bra LOCAL(lshrsi_16) // >> 17
shlr r0
bra LOCAL(lshrsi_16) // >> 18
shlr2 r0
bra LOCAL(lshrsi_19) // >> 19
shlr r0
bra LOCAL(lshrsi_20) // >> 20
shlr2 r0
bra LOCAL(lshrsi_21) // >> 21
shlr r0
bra LOCAL(lshrsi_22) // >> 22
shlr16 r0
bra LOCAL(lshrsi_23) // >> 23
shlr16 r0
bra LOCAL(lshrsi_16) // >> 24
shlr8 r0
bra LOCAL(lshrsi_25) // >> 25
shlr r0
bra LOCAL(lshrsi_26) // >> 26
shlr2 r0
bra LOCAL(lshrsi_27) // >> 27
shlr r0
bra LOCAL(lshrsi_28) // >> 28
shlr2 r0
bra LOCAL(lshrsi_29) // >> 29
shlr16 r0
bra LOCAL(lshrsi_30) // >> 30
shlr16 r0
shll r0 // >> 31
rts
movt r0
LOCAL(lshrsi_7):
shlr2 r0
LOCAL(lshrsi_5):
LOCAL(lshrsi_6):
shlr2 r0
rts
LOCAL(lshrsi_13):
shlr2 r0
LOCAL(lshrsi_12):
LOCAL(lshrsi_11):
shlr8 r0
rts
LOCAL(lshrsi_21):
shlr2 r0
LOCAL(lshrsi_20):
LOCAL(lshrsi_19):
shlr16 r0
rts
LOCAL(lshrsi_28):
LOCAL(lshrsi_27):
shlr2 r0
LOCAL(lshrsi_26):
LOCAL(lshrsi_25):
shlr16 r0
rts
shlr8 r0
LOCAL(lshrsi_22):
LOCAL(lshrsi_14):
shll2 r0
rts
shlr8 r0
LOCAL(lshrsi_23):
LOCAL(lshrsi_15):
shll r0
rts
shlr8 r0
LOCAL(lshrsi_29):
shll r0
LOCAL(lshrsi_30):
shll2 r0
rts
shlr16 r0
ENDFUNC(GLOBAL(lshrsi3))
ENDFUNC(GLOBAL(lshrsi3_r0))
#endif
#ifdef L_movmem
.text
.balign 4
.global GLOBAL(movmem)
HIDDEN_FUNC(GLOBAL(movmem))
HIDDEN_ALIAS(movstr,movmem)
/* This would be a lot simpler if r6 contained the byte count
minus 64, and we wouldn't be called here for a byte count of 64. */
GLOBAL(movmem):
sts.l pr,@-r15
shll2 r6
bsr GLOBAL(movmemSI52+2)
mov.l @(48,r5),r0
.balign 4
LOCAL(movmem_loop): /* Reached with rts */
mov.l @(60,r5),r0
add #-64,r6
mov.l r0,@(60,r4)
tst r6,r6
mov.l @(56,r5),r0
bt LOCAL(movmem_done)
mov.l r0,@(56,r4)
cmp/pl r6
mov.l @(52,r5),r0
add #64,r5
mov.l r0,@(52,r4)
add #64,r4
bt GLOBAL(movmemSI52)
! done all the large groups, do the remainder
! jump to movmem+
mova GLOBAL(movmemSI4)+4,r0
add r6,r0
jmp @r0
LOCAL(movmem_done): ! share slot insn, works out aligned.
lds.l @r15+,pr
mov.l r0,@(56,r4)
mov.l @(52,r5),r0
rts
mov.l r0,@(52,r4)
.balign 4
! ??? We need aliases movstr* for movmem* for the older libraries. These
! aliases will be removed at the some point in the future.
.global GLOBAL(movmemSI64)
HIDDEN_FUNC(GLOBAL(movmemSI64))
HIDDEN_ALIAS(movstrSI64,movmemSI64)
GLOBAL(movmemSI64):
mov.l @(60,r5),r0
mov.l r0,@(60,r4)
.global GLOBAL(movmemSI60)
HIDDEN_FUNC(GLOBAL(movmemSI60))
HIDDEN_ALIAS(movstrSI60,movmemSI60)
GLOBAL(movmemSI60):
mov.l @(56,r5),r0
mov.l r0,@(56,r4)
.global GLOBAL(movmemSI56)
HIDDEN_FUNC(GLOBAL(movmemSI56))
HIDDEN_ALIAS(movstrSI56,movmemSI56)
GLOBAL(movmemSI56):
mov.l @(52,r5),r0
mov.l r0,@(52,r4)
.global GLOBAL(movmemSI52)
HIDDEN_FUNC(GLOBAL(movmemSI52))
HIDDEN_ALIAS(movstrSI52,movmemSI52)
GLOBAL(movmemSI52):
mov.l @(48,r5),r0
mov.l r0,@(48,r4)
.global GLOBAL(movmemSI48)
HIDDEN_FUNC(GLOBAL(movmemSI48))
HIDDEN_ALIAS(movstrSI48,movmemSI48)
GLOBAL(movmemSI48):
mov.l @(44,r5),r0
mov.l r0,@(44,r4)
.global GLOBAL(movmemSI44)
HIDDEN_FUNC(GLOBAL(movmemSI44))
HIDDEN_ALIAS(movstrSI44,movmemSI44)
GLOBAL(movmemSI44):
mov.l @(40,r5),r0
mov.l r0,@(40,r4)
.global GLOBAL(movmemSI40)
HIDDEN_FUNC(GLOBAL(movmemSI40))
HIDDEN_ALIAS(movstrSI40,movmemSI40)
GLOBAL(movmemSI40):
mov.l @(36,r5),r0
mov.l r0,@(36,r4)
.global GLOBAL(movmemSI36)
HIDDEN_FUNC(GLOBAL(movmemSI36))
HIDDEN_ALIAS(movstrSI36,movmemSI36)
GLOBAL(movmemSI36):
mov.l @(32,r5),r0
mov.l r0,@(32,r4)
.global GLOBAL(movmemSI32)
HIDDEN_FUNC(GLOBAL(movmemSI32))
HIDDEN_ALIAS(movstrSI32,movmemSI32)
GLOBAL(movmemSI32):
mov.l @(28,r5),r0
mov.l r0,@(28,r4)
.global GLOBAL(movmemSI28)
HIDDEN_FUNC(GLOBAL(movmemSI28))
HIDDEN_ALIAS(movstrSI28,movmemSI28)
GLOBAL(movmemSI28):
mov.l @(24,r5),r0
mov.l r0,@(24,r4)
.global GLOBAL(movmemSI24)
HIDDEN_FUNC(GLOBAL(movmemSI24))
HIDDEN_ALIAS(movstrSI24,movmemSI24)
GLOBAL(movmemSI24):
mov.l @(20,r5),r0
mov.l r0,@(20,r4)
.global GLOBAL(movmemSI20)
HIDDEN_FUNC(GLOBAL(movmemSI20))
HIDDEN_ALIAS(movstrSI20,movmemSI20)
GLOBAL(movmemSI20):
mov.l @(16,r5),r0
mov.l r0,@(16,r4)
.global GLOBAL(movmemSI16)
HIDDEN_FUNC(GLOBAL(movmemSI16))
HIDDEN_ALIAS(movstrSI16,movmemSI16)
GLOBAL(movmemSI16):
mov.l @(12,r5),r0
mov.l r0,@(12,r4)
.global GLOBAL(movmemSI12)
HIDDEN_FUNC(GLOBAL(movmemSI12))
HIDDEN_ALIAS(movstrSI12,movmemSI12)
GLOBAL(movmemSI12):
mov.l @(8,r5),r0
mov.l r0,@(8,r4)
.global GLOBAL(movmemSI8)
HIDDEN_FUNC(GLOBAL(movmemSI8))
HIDDEN_ALIAS(movstrSI8,movmemSI8)
GLOBAL(movmemSI8):
mov.l @(4,r5),r0
mov.l r0,@(4,r4)
.global GLOBAL(movmemSI4)
HIDDEN_FUNC(GLOBAL(movmemSI4))
HIDDEN_ALIAS(movstrSI4,movmemSI4)
GLOBAL(movmemSI4):
mov.l @(0,r5),r0
rts
mov.l r0,@(0,r4)
ENDFUNC(GLOBAL(movmemSI64))
ENDFUNC(GLOBAL(movmemSI60))
ENDFUNC(GLOBAL(movmemSI56))
ENDFUNC(GLOBAL(movmemSI52))
ENDFUNC(GLOBAL(movmemSI48))
ENDFUNC(GLOBAL(movmemSI44))
ENDFUNC(GLOBAL(movmemSI40))
ENDFUNC(GLOBAL(movmemSI36))
ENDFUNC(GLOBAL(movmemSI32))
ENDFUNC(GLOBAL(movmemSI28))
ENDFUNC(GLOBAL(movmemSI24))
ENDFUNC(GLOBAL(movmemSI20))
ENDFUNC(GLOBAL(movmemSI16))
ENDFUNC(GLOBAL(movmemSI12))
ENDFUNC(GLOBAL(movmemSI8))
ENDFUNC(GLOBAL(movmemSI4))
ENDFUNC(GLOBAL(movmem))
#endif
#ifdef L_movmem_i4
.text
.global GLOBAL(movmem_i4_even)
.global GLOBAL(movmem_i4_odd)
.global GLOBAL(movmemSI12_i4)
HIDDEN_FUNC(GLOBAL(movmem_i4_even))
HIDDEN_FUNC(GLOBAL(movmem_i4_odd))
HIDDEN_FUNC(GLOBAL(movmemSI12_i4))
HIDDEN_ALIAS(movstr_i4_even,movmem_i4_even)
HIDDEN_ALIAS(movstr_i4_odd,movmem_i4_odd)
HIDDEN_ALIAS(movstrSI12_i4,movmemSI12_i4)
.p2align 5
L_movmem_2mod4_end:
mov.l r0,@(16,r4)
rts
mov.l r1,@(20,r4)
.p2align 2
GLOBAL(movmem_i4_even):
mov.l @r5+,r0
bra L_movmem_start_even
mov.l @r5+,r1
GLOBAL(movmem_i4_odd):
mov.l @r5+,r1
add #-4,r4
mov.l @r5+,r2
mov.l @r5+,r3
mov.l r1,@(4,r4)
mov.l r2,@(8,r4)
L_movmem_loop:
mov.l r3,@(12,r4)
dt r6
mov.l @r5+,r0
bt/s L_movmem_2mod4_end
mov.l @r5+,r1
add #16,r4
L_movmem_start_even:
mov.l @r5+,r2
mov.l @r5+,r3
mov.l r0,@r4
dt r6
mov.l r1,@(4,r4)
bf/s L_movmem_loop
mov.l r2,@(8,r4)
rts
mov.l r3,@(12,r4)
ENDFUNC(GLOBAL(movmem_i4_even))
ENDFUNC(GLOBAL(movmem_i4_odd))
.p2align 4
GLOBAL(movmemSI12_i4):
mov.l @r5,r0
mov.l @(4,r5),r1
mov.l @(8,r5),r2
mov.l r0,@r4
mov.l r1,@(4,r4)
rts
mov.l r2,@(8,r4)
ENDFUNC(GLOBAL(movmemSI12_i4))
#endif
#ifdef L_mulsi3
.global GLOBAL(mulsi3)
HIDDEN_FUNC(GLOBAL(mulsi3))
! r4 = aabb
! r5 = ccdd
! r0 = aabb*ccdd via partial products
!
! if aa == 0 and cc = 0
! r0 = bb*dd
!
! else
! aa = bb*dd + (aa*dd*65536) + (cc*bb*65536)
!
GLOBAL(mulsi3):
mulu.w r4,r5 ! multiply the lsws macl=bb*dd
mov r5,r3 ! r3 = ccdd
swap.w r4,r2 ! r2 = bbaa
xtrct r2,r3 ! r3 = aacc
tst r3,r3 ! msws zero ?
bf hiset
rts ! yes - then we have the answer
sts macl,r0
hiset: sts macl,r0 ! r0 = bb*dd
mulu.w r2,r5 ! brewing macl = aa*dd
sts macl,r1
mulu.w r3,r4 ! brewing macl = cc*bb
sts macl,r2
add r1,r2
shll16 r2
rts
add r2,r0
ENDFUNC(GLOBAL(mulsi3))
#endif
/*------------------------------------------------------------------------------
32 bit signed integer division that uses FPU double precision division. */
#ifdef L_sdivsi3_i4
.title "SH DIVIDE"
#if defined (__SH4__) || defined (__SH2A__)
/* This variant is used when FPSCR.PR = 1 (double precision) is the default
setting.
Args in r4 and r5, result in fpul, clobber dr0, dr2. */
.global GLOBAL(sdivsi3_i4)
HIDDEN_FUNC(GLOBAL(sdivsi3_i4))
GLOBAL(sdivsi3_i4):
lds r4,fpul
float fpul,dr0
lds r5,fpul
float fpul,dr2
fdiv dr2,dr0
rts
ftrc dr0,fpul
ENDFUNC(GLOBAL(sdivsi3_i4))
#elif defined (__SH2A_SINGLE__) || defined (__SH2A_SINGLE_ONLY__) || defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__)
/* This variant is used when FPSCR.PR = 0 (sigle precision) is the default
setting.
Args in r4 and r5, result in fpul, clobber r2, dr0, dr2.
For this to work, we must temporarily switch the FPU do double precision,
but we better do not touch FPSCR.FR. See PR 6526. */
.global GLOBAL(sdivsi3_i4)
HIDDEN_FUNC(GLOBAL(sdivsi3_i4))
GLOBAL(sdivsi3_i4):
#ifndef __SH4A__
mov.l r3,@-r15
sts fpscr,r2
mov #8,r3
swap.w r3,r3 // r3 = 1 << 19 (FPSCR.PR bit)
or r2,r3
lds r3,fpscr // Set FPSCR.PR = 1.
lds r4,fpul
float fpul,dr0
lds r5,fpul
float fpul,dr2
fdiv dr2,dr0
ftrc dr0,fpul
lds r2,fpscr
rts
mov.l @r15+,r3
#else
/* On SH4A we can use the fpchg instruction to flip the FPSCR.PR bit. */
fpchg
lds r4,fpul
float fpul,dr0
lds r5,fpul
float fpul,dr2
fdiv dr2,dr0
ftrc dr0,fpul
rts
fpchg
#endif /* __SH4A__ */
ENDFUNC(GLOBAL(sdivsi3_i4))
#endif /* ! __SH4__ || __SH2A__ */
#endif /* L_sdivsi3_i4 */
//------------------------------------------------------------------------------
#ifdef L_sdivsi3
/* __SH4_SINGLE_ONLY__ keeps this part for link compatibility with
sh2e/sh3e code. */
!!
!! Steve Chamberlain
!! sac@cygnus.com
!!
!!
!! args in r4 and r5, result in r0 clobber r1, r2, r3, and t bit
.global GLOBAL(sdivsi3)
.align 2
FUNC(GLOBAL(sdivsi3))
GLOBAL(sdivsi3):
mov r4,r1
mov r5,r0
tst r0,r0
bt div0
mov #0,r2
div0s r2,r1
subc r3,r3
subc r2,r1
div0s r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
div1 r0,r3
rotcl r1
addc r2,r1
rts
mov r1,r0
div0: rts
mov #0,r0
ENDFUNC(GLOBAL(sdivsi3))
#endif /* L_sdivsi3 */
/*------------------------------------------------------------------------------
32 bit unsigned integer division that uses FPU double precision division. */
#ifdef L_udivsi3_i4
.title "SH DIVIDE"
#if defined (__SH4__) || defined (__SH2A__)
/* This variant is used when FPSCR.PR = 1 (double precision) is the default
setting.
Args in r4 and r5, result in fpul,
clobber r0, r1, r4, r5, dr0, dr2, dr4, and t bit */
.global GLOBAL(udivsi3_i4)
HIDDEN_FUNC(GLOBAL(udivsi3_i4))
GLOBAL(udivsi3_i4):
mov #1,r1
cmp/hi r1,r5
bf/s trivial
rotr r1
xor r1,r4
lds r4,fpul
mova L1,r0
#ifdef FMOVD_WORKS
fmov.d @r0+,dr4
#else
fmov.s @r0+,DR40
fmov.s @r0,DR41
#endif
float fpul,dr0
xor r1,r5
lds r5,fpul
float fpul,dr2
fadd dr4,dr0
fadd dr4,dr2
fdiv dr2,dr0
rts
ftrc dr0,fpul
trivial:
rts
lds r4,fpul
.align 2
#ifdef FMOVD_WORKS
.align 3 // Make the double below 8 byte aligned.
#endif
L1:
.double 2147483648
ENDFUNC(GLOBAL(udivsi3_i4))
#elif defined (__SH2A_SINGLE__) || defined (__SH2A_SINGLE_ONLY__) || defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__)
/* This variant is used when FPSCR.PR = 0 (sigle precision) is the default
setting.
Args in r4 and r5, result in fpul,
clobber r0, r1, r4, r5, dr0, dr2, dr4.
For this to work, we must temporarily switch the FPU do double precision,
but we better do not touch FPSCR.FR. See PR 6526. */
.global GLOBAL(udivsi3_i4)
HIDDEN_FUNC(GLOBAL(udivsi3_i4))
GLOBAL(udivsi3_i4):
#ifndef __SH4A__
mov #1,r1
cmp/hi r1,r5
bf/s trivial
rotr r1 // r1 = 1 << 31
sts.l fpscr,@-r15
xor r1,r4
mov.l @(0,r15),r0
xor r1,r5
mov.l L2,r1
lds r4,fpul
or r0,r1
mova L1,r0
lds r1,fpscr
#ifdef FMOVD_WORKS
fmov.d @r0+,dr4
#else
fmov.s @r0+,DR40
fmov.s @r0,DR41
#endif
float fpul,dr0
lds r5,fpul
float fpul,dr2
fadd dr4,dr0
fadd dr4,dr2
fdiv dr2,dr0
ftrc dr0,fpul
rts
lds.l @r15+,fpscr
#ifdef FMOVD_WORKS
.align 3 // Make the double below 8 byte aligned.
#endif
trivial:
rts
lds r4,fpul
.align 2
L2:
#ifdef FMOVD_WORKS
.long 0x180000 // FPSCR.PR = 1, FPSCR.SZ = 1
#else
.long 0x80000 // FPSCR.PR = 1
#endif
L1:
.double 2147483648
#else
/* On SH4A we can use the fpchg instruction to flip the FPSCR.PR bit.
Although on SH4A fmovd usually works, it would require either additional
two fschg instructions or an FPSCR push + pop. It's not worth the effort
for loading only one double constant. */
mov #1,r1
cmp/hi r1,r5
bf/s trivial
rotr r1 // r1 = 1 << 31
fpchg
mova L1,r0
xor r1,r4
fmov.s @r0+,DR40
lds r4,fpul
fmov.s @r0,DR41
xor r1,r5
float fpul,dr0
lds r5,fpul
float fpul,dr2
fadd dr4,dr0
fadd dr4,dr2
fdiv dr2,dr0
ftrc dr0,fpul
rts
fpchg
trivial:
rts
lds r4,fpul
.align 2
L1:
.double 2147483648
#endif /* __SH4A__ */
ENDFUNC(GLOBAL(udivsi3_i4))
#endif /* ! __SH4__ */
#endif /* L_udivsi3_i4 */
#ifdef L_udivsi3
/* __SH4_SINGLE_ONLY__ keeps this part for link compatibility with
sh2e/sh3e code. */
!! args in r4 and r5, result in r0, clobbers r4, pr, and t bit
.global GLOBAL(udivsi3)
HIDDEN_FUNC(GLOBAL(udivsi3))
LOCAL(div8):
div1 r5,r4
LOCAL(div7):
div1 r5,r4; div1 r5,r4; div1 r5,r4
div1 r5,r4; div1 r5,r4; div1 r5,r4; rts; div1 r5,r4
LOCAL(divx4):
div1 r5,r4; rotcl r0
div1 r5,r4; rotcl r0
div1 r5,r4; rotcl r0
rts; div1 r5,r4
GLOBAL(udivsi3):
sts.l pr,@-r15
extu.w r5,r0
cmp/eq r5,r0
#ifdef __sh1__
bf LOCAL(large_divisor)
#else
bf/s LOCAL(large_divisor)
#endif
div0u
swap.w r4,r0
shlr16 r4
bsr LOCAL(div8)
shll16 r5
bsr LOCAL(div7)
div1 r5,r4
xtrct r4,r0
xtrct r0,r4
bsr LOCAL(div8)
swap.w r4,r4
bsr LOCAL(div7)
div1 r5,r4
lds.l @r15+,pr
xtrct r4,r0
swap.w r0,r0
rotcl r0
rts
shlr16 r5
LOCAL(large_divisor):
#ifdef __sh1__
div0u
#endif
mov #0,r0
xtrct r4,r0
xtrct r0,r4
bsr LOCAL(divx4)
rotcl r0
bsr LOCAL(divx4)
rotcl r0
bsr LOCAL(divx4)
rotcl r0
bsr LOCAL(divx4)
rotcl r0
lds.l @r15+,pr
rts
rotcl r0
ENDFUNC(GLOBAL(udivsi3))
#endif /* L_udivsi3 */
#ifdef L_set_fpscr
#if !defined (__SH2A_NOFPU__)
#if defined (__SH2E__) || defined (__SH2A__) || defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__)
.global GLOBAL(set_fpscr)
HIDDEN_FUNC(GLOBAL(set_fpscr))
GLOBAL(set_fpscr):
lds r4,fpscr
#ifdef __PIC__
mov.l r12,@-r15
#ifdef __vxworks
mov.l LOCAL(set_fpscr_L0_base),r12
mov.l LOCAL(set_fpscr_L0_index),r0
mov.l @r12,r12
mov.l @(r0,r12),r12
#else
mova LOCAL(set_fpscr_L0),r0
mov.l LOCAL(set_fpscr_L0),r12
add r0,r12
#endif
mov.l LOCAL(set_fpscr_L1),r0
mov.l @(r0,r12),r1
mov.l @r15+,r12
#else
mov.l LOCAL(set_fpscr_L1),r1
#endif
swap.w r4,r0
or #24,r0
#ifndef FMOVD_WORKS
xor #16,r0
#endif
#if defined(__SH4__) || defined (__SH2A_DOUBLE__)
swap.w r0,r3
mov.l r3,@(4,r1)
#else /* defined (__SH2E__) || defined(__SH3E__) || defined(__SH4_SINGLE*__) */
swap.w r0,r2
mov.l r2,@r1
#endif
#ifndef FMOVD_WORKS
xor #8,r0
#else
xor #24,r0
#endif
#if defined(__SH4__) || defined (__SH2A_DOUBLE__)
swap.w r0,r2
rts
mov.l r2,@r1
#else /* defined(__SH2E__) || defined(__SH3E__) || defined(__SH4_SINGLE*__) */
swap.w r0,r3
rts
mov.l r3,@(4,r1)
#endif
.align 2
#ifdef __PIC__
#ifdef __vxworks
LOCAL(set_fpscr_L0_base):
.long ___GOTT_BASE__
LOCAL(set_fpscr_L0_index):
.long ___GOTT_INDEX__
#else
LOCAL(set_fpscr_L0):
.long _GLOBAL_OFFSET_TABLE_
#endif
LOCAL(set_fpscr_L1):
.long GLOBAL(fpscr_values@GOT)
#else
LOCAL(set_fpscr_L1):
.long GLOBAL(fpscr_values)
#endif
ENDFUNC(GLOBAL(set_fpscr))
#ifndef NO_FPSCR_VALUES
#ifdef __ELF__
.comm GLOBAL(fpscr_values),8,4
#else
.comm GLOBAL(fpscr_values),8
#endif /* ELF */
#endif /* NO_FPSCR_VALUES */
#endif /* SH2E / SH3E / SH4 */
#endif /* __SH2A_NOFPU__ */
#endif /* L_set_fpscr */
#ifdef L_ic_invalidate
#if defined(__SH4A__)
.global GLOBAL(ic_invalidate)
HIDDEN_FUNC(GLOBAL(ic_invalidate))
GLOBAL(ic_invalidate):
ocbwb @r4
synco
icbi @r4
rts
nop
ENDFUNC(GLOBAL(ic_invalidate))
#elif defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || defined(__SH4_NOFPU__)
/* For system code, we use ic_invalidate_line_i, but user code
needs a different mechanism. A kernel call is generally not
available, and it would also be slow. Different SH4 variants use
different sizes and associativities of the Icache. We use a small
bit of dispatch code that can be put hidden in every shared object,
which calls the actual processor-specific invalidation code in a
separate module.
Or if you have operating system support, the OS could mmap the
procesor-specific code from a single page, since it is highly
repetitive. */
.global GLOBAL(ic_invalidate)
HIDDEN_FUNC(GLOBAL(ic_invalidate))
GLOBAL(ic_invalidate):
#ifdef __pic__
#ifdef __vxworks
mov.l 1f,r1
mov.l 2f,r0
mov.l @r1,r1
mov.l 0f,r2
mov.l @(r0,r1),r0
#else
mov.l 1f,r1
mova 1f,r0
mov.l 0f,r2
add r1,r0
#endif
mov.l @(r0,r2),r1
#else
mov.l 0f,r1
#endif
ocbwb @r4
mov.l @(8,r1),r0
sub r1,r4
and r4,r0
add r1,r0
jmp @r0
mov.l @(4,r1),r0
.align 2
#ifndef __pic__
0: .long GLOBAL(ic_invalidate_array)
#else /* __pic__ */
.global GLOBAL(ic_invalidate_array)
0: .long GLOBAL(ic_invalidate_array)@GOT
#ifdef __vxworks
1: .long ___GOTT_BASE__
2: .long ___GOTT_INDEX__
#else
1: .long _GLOBAL_OFFSET_TABLE_
#endif
ENDFUNC(GLOBAL(ic_invalidate))
#endif /* __pic__ */
#endif /* SH4 */
#endif /* L_ic_invalidate */
#ifdef L_ic_invalidate_array
#if defined(__SH4A__) || (defined (__FORCE_SH4A__) && (defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || defined(__SH4_NOFPU__)))
.global GLOBAL(ic_invalidate_array)
/* This is needed when an SH4 dso with trampolines is used on SH4A. */
.global GLOBAL(ic_invalidate_array)
FUNC(GLOBAL(ic_invalidate_array))
GLOBAL(ic_invalidate_array):
add r1,r4
synco
icbi @r4
rts
nop
.align 2
.long 0
ENDFUNC(GLOBAL(ic_invalidate_array))
#elif defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || defined(__SH4_NOFPU__)
.global GLOBAL(ic_invalidate_array)
.p2align 5
FUNC(GLOBAL(ic_invalidate_array))
/* This must be aligned to the beginning of a cache line. */
GLOBAL(ic_invalidate_array):
#ifndef WAYS
#define WAYS 4
#define WAY_SIZE 0x4000
#endif
#if WAYS == 1
.rept WAY_SIZE * WAYS / 32
rts
nop
.rept 7
.long WAY_SIZE - 32
.endr
.endr
#elif WAYS <= 6
.rept WAY_SIZE * WAYS / 32
braf r0
add #-8,r0
.long WAY_SIZE + 8
.long WAY_SIZE - 32
.rept WAYS-2
braf r0
nop
.endr
.rept 7 - WAYS
rts
nop
.endr
.endr
#else /* WAYS > 6 */
/* This variant needs two different pages for mmap-ing. */
.rept WAYS-1
.rept WAY_SIZE / 32
braf r0
nop
.long WAY_SIZE
.rept 6
.long WAY_SIZE - 32
.endr
.endr
.endr
.rept WAY_SIZE / 32
rts
.rept 15
nop
.endr
.endr
#endif /* WAYS */
ENDFUNC(GLOBAL(ic_invalidate_array))
#endif /* SH4 */
#endif /* L_ic_invalidate_array */
#ifdef L_div_table
#if defined (__SH2A__) || defined (__SH3__) || defined (__SH3E__) || defined (__SH4__) || defined (__SH4_SINGLE__) || defined (__SH4_SINGLE_ONLY__) || defined (__SH4_NOFPU__)
/* This code uses shld, thus is not suitable for SH1 / SH2. */
/* Signed / unsigned division without use of FPU, optimized for SH4.
Uses a lookup table for divisors in the range -128 .. +128, and
div1 with case distinction for larger divisors in three more ranges.
The code is lumped together with the table to allow the use of mova. */
#ifdef __LITTLE_ENDIAN__
#define L_LSB 0
#define L_LSWMSB 1
#define L_MSWLSB 2
#else
#define L_LSB 3
#define L_LSWMSB 2
#define L_MSWLSB 1
#endif
.balign 4
.global GLOBAL(udivsi3_i4i)
FUNC(GLOBAL(udivsi3_i4i))
GLOBAL(udivsi3_i4i):
mov.w LOCAL(c128_w), r1
div0u
mov r4,r0
shlr8 r0
cmp/hi r1,r5
extu.w r5,r1
bf LOCAL(udiv_le128)
cmp/eq r5,r1
bf LOCAL(udiv_ge64k)
shlr r0
mov r5,r1
shll16 r5
mov.l r4,@-r15
div1 r5,r0
mov.l r1,@-r15
div1 r5,r0
div1 r5,r0
bra LOCAL(udiv_25)
div1 r5,r0
LOCAL(div_le128):
mova LOCAL(div_table_ix),r0
bra LOCAL(div_le128_2)
mov.b @(r0,r5),r1
LOCAL(udiv_le128):
mov.l r4,@-r15
mova LOCAL(div_table_ix),r0
mov.b @(r0,r5),r1
mov.l r5,@-r15
LOCAL(div_le128_2):
mova LOCAL(div_table_inv),r0
mov.l @(r0,r1),r1
mov r5,r0
tst #0xfe,r0
mova LOCAL(div_table_clz),r0
dmulu.l r1,r4
mov.b @(r0,r5),r1
bt/s LOCAL(div_by_1)
mov r4,r0
mov.l @r15+,r5
sts mach,r0
/* clrt */
addc r4,r0
mov.l @r15+,r4
rotcr r0
rts
shld r1,r0
LOCAL(div_by_1_neg):
neg r4,r0
LOCAL(div_by_1):
mov.l @r15+,r5
rts
mov.l @r15+,r4
LOCAL(div_ge64k):
bt/s LOCAL(div_r8)
div0u
shll8 r5
bra LOCAL(div_ge64k_2)
div1 r5,r0
LOCAL(udiv_ge64k):
cmp/hi r0,r5
mov r5,r1
bt LOCAL(udiv_r8)
shll8 r5
mov.l r4,@-r15
div1 r5,r0
mov.l r1,@-r15
LOCAL(div_ge64k_2):
div1 r5,r0
mov.l LOCAL(zero_l),r1
.rept 4
div1 r5,r0
.endr
mov.l r1,@-r15
div1 r5,r0
mov.w LOCAL(m256_w),r1
div1 r5,r0
mov.b r0,@(L_LSWMSB,r15)
xor r4,r0
and r1,r0
bra LOCAL(div_ge64k_end)
xor r4,r0
LOCAL(div_r8):
shll16 r4
bra LOCAL(div_r8_2)
shll8 r4
LOCAL(udiv_r8):
mov.l r4,@-r15
shll16 r4
clrt
shll8 r4
mov.l r5,@-r15
LOCAL(div_r8_2):
rotcl r4
mov r0,r1
div1 r5,r1
mov r4,r0
rotcl r0
mov r5,r4
div1 r5,r1
.rept 5
rotcl r0; div1 r5,r1
.endr
rotcl r0
mov.l @r15+,r5
div1 r4,r1
mov.l @r15+,r4
rts
rotcl r0
ENDFUNC(GLOBAL(udivsi3_i4i))
.global GLOBAL(sdivsi3_i4i)
FUNC(GLOBAL(sdivsi3_i4i))
/* This is link-compatible with a GLOBAL(sdivsi3) call,
but we effectively clobber only r1. */
GLOBAL(sdivsi3_i4i):
mov.l r4,@-r15
cmp/pz r5
mov.w LOCAL(c128_w), r1
bt/s LOCAL(pos_divisor)
cmp/pz r4
mov.l r5,@-r15
neg r5,r5
bt/s LOCAL(neg_result)
cmp/hi r1,r5
neg r4,r4
LOCAL(pos_result):
extu.w r5,r0
bf LOCAL(div_le128)
cmp/eq r5,r0
mov r4,r0
shlr8 r0
bf/s LOCAL(div_ge64k)
cmp/hi r0,r5
div0u
shll16 r5
div1 r5,r0
div1 r5,r0
div1 r5,r0
LOCAL(udiv_25):
mov.l LOCAL(zero_l),r1
div1 r5,r0
div1 r5,r0
mov.l r1,@-r15
.rept 3
div1 r5,r0
.endr
mov.b r0,@(L_MSWLSB,r15)
xtrct r4,r0
swap.w r0,r0
.rept 8
div1 r5,r0
.endr
mov.b r0,@(L_LSWMSB,r15)
LOCAL(div_ge64k_end):
.rept 8
div1 r5,r0
.endr
mov.l @r15+,r4 ! zero-extension and swap using LS unit.
extu.b r0,r0
mov.l @r15+,r5
or r4,r0
mov.l @r15+,r4
rts
rotcl r0
LOCAL(div_le128_neg):
tst #0xfe,r0
mova LOCAL(div_table_ix),r0
mov.b @(r0,r5),r1
mova LOCAL(div_table_inv),r0
bt/s LOCAL(div_by_1_neg)
mov.l @(r0,r1),r1
mova LOCAL(div_table_clz),r0
dmulu.l r1,r4
mov.b @(r0,r5),r1
mov.l @r15+,r5
sts mach,r0
/* clrt */
addc r4,r0
mov.l @r15+,r4
rotcr r0
shld r1,r0
rts
neg r0,r0
LOCAL(pos_divisor):
mov.l r5,@-r15
bt/s LOCAL(pos_result)
cmp/hi r1,r5
neg r4,r4
LOCAL(neg_result):
extu.w r5,r0
bf LOCAL(div_le128_neg)
cmp/eq r5,r0
mov r4,r0
shlr8 r0
bf/s LOCAL(div_ge64k_neg)
cmp/hi r0,r5
div0u
mov.l LOCAL(zero_l),r1
shll16 r5
div1 r5,r0
mov.l r1,@-r15
.rept 7
div1 r5,r0
.endr
mov.b r0,@(L_MSWLSB,r15)
xtrct r4,r0
swap.w r0,r0
.rept 8
div1 r5,r0
.endr
mov.b r0,@(L_LSWMSB,r15)
LOCAL(div_ge64k_neg_end):
.rept 8
div1 r5,r0
.endr
mov.l @r15+,r4 ! zero-extension and swap using LS unit.
extu.b r0,r1
mov.l @r15+,r5
or r4,r1
LOCAL(div_r8_neg_end):
mov.l @r15+,r4
rotcl r1
rts
neg r1,r0
LOCAL(div_ge64k_neg):
bt/s LOCAL(div_r8_neg)
div0u
shll8 r5
mov.l LOCAL(zero_l),r1
.rept 6
div1 r5,r0
.endr
mov.l r1,@-r15
div1 r5,r0
mov.w LOCAL(m256_w),r1
div1 r5,r0
mov.b r0,@(L_LSWMSB,r15)
xor r4,r0
and r1,r0
bra LOCAL(div_ge64k_neg_end)
xor r4,r0
LOCAL(c128_w):
.word 128
LOCAL(div_r8_neg):
clrt
shll16 r4
mov r4,r1
shll8 r1
mov r5,r4
.rept 7
rotcl r1; div1 r5,r0
.endr
mov.l @r15+,r5
rotcl r1
bra LOCAL(div_r8_neg_end)
div1 r4,r0
LOCAL(m256_w):
.word 0xff00
/* This table has been generated by divtab-sh4.c. */
.balign 4
LOCAL(div_table_clz):
.byte 0
.byte 1
.byte 0
.byte -1
.byte -1
.byte -2
.byte -2
.byte -2
.byte -2
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -3
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -4
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -5
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
.byte -6
/* Lookup table translating positive divisor to index into table of
normalized inverse. N.B. the '0' entry is also the last entry of the
previous table, and causes an unaligned access for division by zero. */
LOCAL(div_table_ix):
.byte -6
.byte -128
.byte -128
.byte 0
.byte -128
.byte -64
.byte 0
.byte 64
.byte -128
.byte -96
.byte -64
.byte -32
.byte 0
.byte 32
.byte 64
.byte 96
.byte -128
.byte -112
.byte -96
.byte -80
.byte -64
.byte -48
.byte -32
.byte -16
.byte 0
.byte 16
.byte 32
.byte 48
.byte 64
.byte 80
.byte 96
.byte 112
.byte -128
.byte -120
.byte -112
.byte -104
.byte -96
.byte -88
.byte -80
.byte -72
.byte -64
.byte -56
.byte -48
.byte -40
.byte -32
.byte -24
.byte -16
.byte -8
.byte 0
.byte 8
.byte 16
.byte 24
.byte 32
.byte 40
.byte 48
.byte 56
.byte 64
.byte 72
.byte 80
.byte 88
.byte 96
.byte 104
.byte 112
.byte 120
.byte -128
.byte -124
.byte -120
.byte -116
.byte -112
.byte -108
.byte -104
.byte -100
.byte -96
.byte -92
.byte -88
.byte -84
.byte -80
.byte -76
.byte -72
.byte -68
.byte -64
.byte -60
.byte -56
.byte -52
.byte -48
.byte -44
.byte -40
.byte -36
.byte -32
.byte -28
.byte -24
.byte -20
.byte -16
.byte -12
.byte -8
.byte -4
.byte 0
.byte 4
.byte 8
.byte 12
.byte 16
.byte 20
.byte 24
.byte 28
.byte 32
.byte 36
.byte 40
.byte 44
.byte 48
.byte 52
.byte 56
.byte 60
.byte 64
.byte 68
.byte 72
.byte 76
.byte 80
.byte 84
.byte 88
.byte 92
.byte 96
.byte 100
.byte 104
.byte 108
.byte 112
.byte 116
.byte 120
.byte 124
.byte -128
/* 1/64 .. 1/127, normalized. There is an implicit leading 1 in bit 32. */
.balign 4
LOCAL(zero_l):
.long 0x0
.long 0xF81F81F9
.long 0xF07C1F08
.long 0xE9131AC0
.long 0xE1E1E1E2
.long 0xDAE6076C
.long 0xD41D41D5
.long 0xCD856891
.long 0xC71C71C8
.long 0xC0E07039
.long 0xBACF914D
.long 0xB4E81B4F
.long 0xAF286BCB
.long 0xA98EF607
.long 0xA41A41A5
.long 0x9EC8E952
.long 0x9999999A
.long 0x948B0FCE
.long 0x8F9C18FA
.long 0x8ACB90F7
.long 0x86186187
.long 0x81818182
.long 0x7D05F418
.long 0x78A4C818
.long 0x745D1746
.long 0x702E05C1
.long 0x6C16C16D
.long 0x68168169
.long 0x642C8591
.long 0x60581606
.long 0x5C9882BA
.long 0x58ED2309
LOCAL(div_table_inv):
.long 0x55555556
.long 0x51D07EAF
.long 0x4E5E0A73
.long 0x4AFD6A06
.long 0x47AE147B
.long 0x446F8657
.long 0x41414142
.long 0x3E22CBCF
.long 0x3B13B13C
.long 0x38138139
.long 0x3521CFB3
.long 0x323E34A3
.long 0x2F684BDB
.long 0x2C9FB4D9
.long 0x29E4129F
.long 0x27350B89
.long 0x24924925
.long 0x21FB7813
.long 0x1F7047DD
.long 0x1CF06ADB
.long 0x1A7B9612
.long 0x18118119
.long 0x15B1E5F8
.long 0x135C8114
.long 0x11111112
.long 0xECF56BF
.long 0xC9714FC
.long 0xA6810A7
.long 0x8421085
.long 0x624DD30
.long 0x4104105
.long 0x2040811
/* maximum error: 0.987342 scaled: 0.921875*/
ENDFUNC(GLOBAL(sdivsi3_i4i))
#endif /* SH3 / SH4 */
#endif /* L_div_table */
#ifdef L_udiv_qrnnd_16
HIDDEN_FUNC(GLOBAL(udiv_qrnnd_16))
/* r0: rn r1: qn */ /* r0: n1 r4: n0 r5: d r6: d1 */ /* r2: __m */
/* n1 < d, but n1 might be larger than d1. */
.global GLOBAL(udiv_qrnnd_16)
.balign 8
GLOBAL(udiv_qrnnd_16):
div0u
cmp/hi r6,r0
bt .Lots
.rept 16
div1 r6,r0
.endr
extu.w r0,r1
bt 0f
add r6,r0
0: rotcl r1
mulu.w r1,r5
xtrct r4,r0
swap.w r0,r0
sts macl,r2
cmp/hs r2,r0
sub r2,r0
bt 0f
addc r5,r0
add #-1,r1
bt 0f
1: add #-1,r1
rts
add r5,r0
.balign 8
.Lots:
sub r5,r0
swap.w r4,r1
xtrct r0,r1
clrt
mov r1,r0
addc r5,r0
mov #-1,r1
SL1(bf, 1b,
shlr16 r1)
0: rts
nop
ENDFUNC(GLOBAL(udiv_qrnnd_16))
#endif /* L_udiv_qrnnd_16 */
|
4ms/metamodule-plugin-sdk
| 6,384
|
plugin-libc/libgcc/config/sh/lib1funcs-Os-4-200.S
|
/* Copyright (C) 2006-2022 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Moderately Space-optimized libgcc routines for the Renesas SH /
STMicroelectronics ST40 CPUs.
Contributed by J"orn Rennecke joern.rennecke@st.com. */
#include "lib1funcs.h"
#ifdef L_udivsi3_i4i
/* 88 bytes; sh4-200 cycle counts:
divisor >= 2G: 11 cycles
dividend < 2G: 48 cycles
dividend >= 2G: divisor != 1: 54 cycles
dividend >= 2G, divisor == 1: 22 cycles */
#if defined (__SH_FPU_DOUBLE__) || defined (__SH4_SINGLE_ONLY__)
!! args in r4 and r5, result in r0, clobber r1
.global GLOBAL(udivsi3_i4i)
FUNC(GLOBAL(udivsi3_i4i))
GLOBAL(udivsi3_i4i):
mova L1,r0
cmp/pz r5
sts fpscr,r1
lds.l @r0+,fpscr
sts.l fpul,@-r15
bf LOCAL(huge_divisor)
mov.l r1,@-r15
lds r4,fpul
cmp/pz r4
#ifdef FMOVD_WORKS
fmov.d dr0,@-r15
float fpul,dr0
fmov.d dr2,@-r15
bt LOCAL(dividend_adjusted)
mov #1,r1
fmov.d @r0,dr2
cmp/eq r1,r5
bt LOCAL(div_by_1)
fadd dr2,dr0
LOCAL(dividend_adjusted):
lds r5,fpul
float fpul,dr2
fdiv dr2,dr0
LOCAL(div_by_1):
fmov.d @r15+,dr2
ftrc dr0,fpul
fmov.d @r15+,dr0
#else /* !FMOVD_WORKS */
fmov.s DR01,@-r15
mov #1,r1
fmov.s DR00,@-r15
float fpul,dr0
fmov.s DR21,@-r15
bt/s LOCAL(dividend_adjusted)
fmov.s DR20,@-r15
cmp/eq r1,r5
bt LOCAL(div_by_1)
fmov.s @r0+,DR20
fmov.s @r0,DR21
fadd dr2,dr0
LOCAL(dividend_adjusted):
lds r5,fpul
float fpul,dr2
fdiv dr2,dr0
LOCAL(div_by_1):
fmov.s @r15+,DR20
fmov.s @r15+,DR21
ftrc dr0,fpul
fmov.s @r15+,DR00
fmov.s @r15+,DR01
#endif /* !FMOVD_WORKS */
lds.l @r15+,fpscr
sts fpul,r0
rts
lds.l @r15+,fpul
#ifdef FMOVD_WORKS
.p2align 3 ! make double below 8 byte aligned.
#endif
LOCAL(huge_divisor):
lds r1,fpscr
add #4,r15
cmp/hs r5,r4
rts
movt r0
.p2align 2
L1:
#ifndef FMOVD_WORKS
.long 0x80000
#else
.long 0x180000
#endif
.double 4294967296
ENDFUNC(GLOBAL(udivsi3_i4i))
#elif !defined (__sh1__) /* !__SH_FPU_DOUBLE__ */
#if 0
/* With 36 bytes, the following would probably be the most compact
implementation, but with 139 cycles on an sh4-200, it is extremely slow. */
GLOBAL(udivsi3_i4i):
mov.l r2,@-r15
mov #0,r1
div0u
mov r1,r2
mov.l r3,@-r15
mov r1,r3
sett
mov r4,r0
LOCAL(loop):
rotcr r2
;
bt/s LOCAL(end)
cmp/gt r2,r3
rotcl r0
bra LOCAL(loop)
div1 r5,r1
LOCAL(end):
rotcl r0
mov.l @r15+,r3
rts
mov.l @r15+,r2
#endif /* 0 */
/* Size: 186 bytes jointly for udivsi3_i4i and sdivsi3_i4i
sh4-200 run times:
udiv small divisor: 55 cycles
udiv large divisor: 52 cycles
sdiv small divisor, positive result: 59 cycles
sdiv large divisor, positive result: 56 cycles
sdiv small divisor, negative result: 65 cycles (*)
sdiv large divisor, negative result: 62 cycles (*)
(*): r2 is restored in the rts delay slot and has a lingering latency
of two more cycles. */
.balign 4
.global GLOBAL(udivsi3_i4i)
FUNC(GLOBAL(udivsi3_i4i))
FUNC(GLOBAL(sdivsi3_i4i))
GLOBAL(udivsi3_i4i):
sts pr,r1
mov.l r4,@-r15
extu.w r5,r0
cmp/eq r5,r0
swap.w r4,r0
shlr16 r4
bf/s LOCAL(large_divisor)
div0u
mov.l r5,@-r15
shll16 r5
LOCAL(sdiv_small_divisor):
div1 r5,r4
bsr LOCAL(div6)
div1 r5,r4
div1 r5,r4
bsr LOCAL(div6)
div1 r5,r4
xtrct r4,r0
xtrct r0,r4
bsr LOCAL(div7)
swap.w r4,r4
div1 r5,r4
bsr LOCAL(div7)
div1 r5,r4
xtrct r4,r0
mov.l @r15+,r5
swap.w r0,r0
mov.l @r15+,r4
jmp @r1
rotcl r0
LOCAL(div7):
div1 r5,r4
LOCAL(div6):
div1 r5,r4; div1 r5,r4; div1 r5,r4
div1 r5,r4; div1 r5,r4; rts; div1 r5,r4
LOCAL(divx3):
rotcl r0
div1 r5,r4
rotcl r0
div1 r5,r4
rotcl r0
rts
div1 r5,r4
LOCAL(large_divisor):
mov.l r5,@-r15
LOCAL(sdiv_large_divisor):
xor r4,r0
.rept 4
rotcl r0
bsr LOCAL(divx3)
div1 r5,r4
.endr
mov.l @r15+,r5
mov.l @r15+,r4
jmp @r1
rotcl r0
ENDFUNC(GLOBAL(udivsi3_i4i))
.global GLOBAL(sdivsi3_i4i)
GLOBAL(sdivsi3_i4i):
mov.l r4,@-r15
cmp/pz r5
mov.l r5,@-r15
bt/s LOCAL(pos_divisor)
cmp/pz r4
neg r5,r5
extu.w r5,r0
bt/s LOCAL(neg_result)
cmp/eq r5,r0
neg r4,r4
LOCAL(pos_result):
swap.w r4,r0
bra LOCAL(sdiv_check_divisor)
sts pr,r1
LOCAL(pos_divisor):
extu.w r5,r0
bt/s LOCAL(pos_result)
cmp/eq r5,r0
neg r4,r4
LOCAL(neg_result):
mova LOCAL(negate_result),r0
;
mov r0,r1
swap.w r4,r0
lds r2,macl
sts pr,r2
LOCAL(sdiv_check_divisor):
shlr16 r4
bf/s LOCAL(sdiv_large_divisor)
div0u
bra LOCAL(sdiv_small_divisor)
shll16 r5
.balign 4
LOCAL(negate_result):
neg r0,r0
jmp @r2
sts macl,r2
ENDFUNC(GLOBAL(sdivsi3_i4i))
#endif /* !__SH_FPU_DOUBLE__ */
#endif /* L_udivsi3_i4i */
#ifdef L_sdivsi3_i4i
#if defined (__SH_FPU_DOUBLE__) || defined (__SH4_SINGLE_ONLY__)
/* 48 bytes, 45 cycles on sh4-200 */
!! args in r4 and r5, result in r0, clobber r1
.global GLOBAL(sdivsi3_i4i)
FUNC(GLOBAL(sdivsi3_i4i))
GLOBAL(sdivsi3_i4i):
sts.l fpscr,@-r15
sts fpul,r1
mova L1,r0
lds.l @r0+,fpscr
lds r4,fpul
#ifdef FMOVD_WORKS
fmov.d dr0,@-r15
float fpul,dr0
lds r5,fpul
fmov.d dr2,@-r15
#else
fmov.s DR01,@-r15
fmov.s DR00,@-r15
float fpul,dr0
lds r5,fpul
fmov.s DR21,@-r15
fmov.s DR20,@-r15
#endif
float fpul,dr2
fdiv dr2,dr0
#ifdef FMOVD_WORKS
fmov.d @r15+,dr2
#else
fmov.s @r15+,DR20
fmov.s @r15+,DR21
#endif
ftrc dr0,fpul
#ifdef FMOVD_WORKS
fmov.d @r15+,dr0
#else
fmov.s @r15+,DR00
fmov.s @r15+,DR01
#endif
lds.l @r15+,fpscr
sts fpul,r0
rts
lds r1,fpul
.p2align 2
L1:
#ifndef FMOVD_WORKS
.long 0x80000
#else
.long 0x180000
#endif
ENDFUNC(GLOBAL(sdivsi3_i4i))
#endif /* __SH_FPU_DOUBLE__ */
#endif /* L_sdivsi3_i4i */
|
4ms/metamodule-plugin-sdk
| 7,018
|
plugin-libc/libgcc/config/cris/umulsidi3.S
|
;; Copyright (C) 2001-2022 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
;; GCC is free software; you can redistribute it and/or modify it under
;; the terms of the GNU General Public License as published by the Free
;; Software Foundation; either version 3, or (at your option) any later
;; version.
;;
;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
;; WARRANTY; without even the implied warranty of MERCHANTABILITY or
;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
;; for more details.
;;
;; Under Section 7 of GPL version 3, you are granted additional
;; permissions described in the GCC Runtime Library Exception, version
;; 3.1, as published by the Free Software Foundation.
;;
;; You should have received a copy of the GNU General Public License and
;; a copy of the GCC Runtime Library Exception along with this program;
;; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
;; <http://www.gnu.org/licenses/>.
;;
;; This code is derived from mulsi3.S, observing that the mstep*16-based
;; multiplications there, from which it is formed, are actually
;; zero-extending; in gcc-speak "umulhisi3". The difference to *this*
;; function is just a missing top mstep*16 sequence and shifts and 64-bit
;; additions for the high part. Compared to an implementation based on
;; calling __Mul four times (see default implementation of umul_ppmm in
;; longlong.h), this will complete in a time between a fourth and a third
;; of that, assuming the value-based optimizations don't strike. If they
;; all strike there (very often) but none here, we still win, though by a
;; lesser margin, due to lesser total overhead.
#define L(x) .x
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
#ifdef __USER_LABEL_PREFIX__
# define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
#else
# define SYM(x) x
#endif
.global SYM(__umulsidi3)
.type SYM(__umulsidi3),@function
SYM(__umulsidi3):
#if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10
;; Can't have the mulu.d last on a cache-line, due to a hardware bug. See
;; the documentation for -mmul-bug-workaround.
;; Not worthwhile to conditionalize here.
.p2alignw 2,0x050f
mulu.d $r11,$r10
ret
move $mof,$r11
#else
move.d $r11,$r9
bound.d $r10,$r9
cmpu.w 65535,$r9
bls L(L3)
move.d $r10,$r12
move.d $r10,$r13
movu.w $r11,$r9 ; ab*cd = (a*c)<<32 (a*d + b*c)<<16 + b*d
;; We're called for floating point numbers very often with the "low" 16
;; bits zero, so it's worthwhile to optimize for that.
beq L(L6) ; d == 0?
lslq 16,$r13
beq L(L7) ; b == 0?
clear.w $r10
mstep $r9,$r13 ; d*b
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
L(L7):
test.d $r10
mstep $r9,$r10 ; d*a
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
;; d*a in $r10, d*b in $r13, ab in $r12 and cd in $r11
;; $r9 = d, need to do b*c and a*c; we can drop d.
;; so $r9 is up for use and we can shift down $r11 as the mstep
;; source for the next mstep-part.
L(L8):
lsrq 16,$r11
move.d $r12,$r9
lslq 16,$r9
beq L(L9) ; b == 0?
mstep $r11,$r9
mstep $r11,$r9 ; b*c
mstep $r11,$r9
mstep $r11,$r9
mstep $r11,$r9
mstep $r11,$r9
mstep $r11,$r9
mstep $r11,$r9
mstep $r11,$r9
mstep $r11,$r9
mstep $r11,$r9
mstep $r11,$r9
mstep $r11,$r9
mstep $r11,$r9
mstep $r11,$r9
mstep $r11,$r9
L(L9):
;; d*a in $r10, d*b in $r13, c*b in $r9, ab in $r12 and c in $r11,
;; need to do a*c. We want that to end up in $r11, so we shift up $r11 to
;; now use as the destination operand. We'd need a test insn to update N
;; to do it the other way round.
lsrq 16,$r12
lslq 16,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
mstep $r12,$r11
;; d*a in $r10, d*b in $r13, c*b in $r9, a*c in $r11 ($r12 free).
;; Need (a*d + b*c)<<16 + b*d into $r10 and
;; a*c + (a*d + b*c)>>16 plus carry from the additions into $r11.
add.d $r9,$r10 ; (a*d + b*c) - may produce a carry.
scs $r12 ; The carry corresponds to bit 16 of $r11.
lslq 16,$r12
add.d $r12,$r11 ; $r11 = a*c + carry from (a*d + b*c).
#if defined (__CRIS_arch_version) && __CRIS_arch_version >= 8
swapw $r10
addu.w $r10,$r11 ; $r11 = a*c + (a*d + b*c) >> 16 including carry.
clear.w $r10 ; $r10 = (a*d + b*c) << 16
#else
move.d $r10,$r9
lsrq 16,$r9
add.d $r9,$r11 ; $r11 = a*c + (a*d + b*c) >> 16 including carry.
lslq 16,$r10 ; $r10 = (a*d + b*c) << 16
#endif
add.d $r13,$r10 ; $r10 = (a*d + b*c) << 16 + b*d - may produce a carry.
scs $r9
ret
add.d $r9,$r11 ; Last carry added to the high-order 32 bits.
L(L6):
clear.d $r13
ba L(L8)
clear.d $r10
L(L11):
clear.d $r10
ret
clear.d $r11
L(L3):
;; Form the maximum in $r10, by knowing the minimum, $r9.
;; (We don't know which one of $r10 or $r11 it is.)
;; Check if the largest operand is still just 16 bits.
xor $r9,$r10
xor $r11,$r10
cmpu.w 65535,$r10
bls L(L5)
movu.w $r9,$r13
;; We have ab*cd = (a*c)<<32 + (a*d + b*c)<<16 + b*d, but c==0
;; so we only need (a*d)<<16 + b*d with d = $r13, ab = $r10.
;; Remember that the upper part of (a*d)<<16 goes into the lower part
;; of $r11 and there may be a carry from adding the low 32 parts.
beq L(L11) ; d == 0?
move.d $r10,$r9
lslq 16,$r9
beq L(L10) ; b == 0?
clear.w $r10
mstep $r13,$r9 ; b*d
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
L(L10):
test.d $r10
mstep $r13,$r10 ; a*d
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
move.d $r10,$r11
lsrq 16,$r11
lslq 16,$r10
add.d $r9,$r10
scs $r12
ret
add.d $r12,$r11
L(L5):
;; We have ab*cd = (a*c)<<32 + (a*d + b*c)<<16 + b*d, but a and c==0
;; so b*d (with min=b=$r13, max=d=$r10) it is. As it won't overflow the
;; 32-bit part, just set $r11 to 0.
lslq 16,$r10
clear.d $r11
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
ret
mstep $r13,$r10
#endif
L(Lfe1):
.size SYM(__umulsidi3),L(Lfe1)-SYM(__umulsidi3)
|
4ms/metamodule-plugin-sdk
| 7,172
|
plugin-libc/libgcc/config/cris/mulsi3.S
|
;; Copyright (C) 2001-2022 Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
;; GCC is free software; you can redistribute it and/or modify it under
;; the terms of the GNU General Public License as published by the Free
;; Software Foundation; either version 3, or (at your option) any later
;; version.
;;
;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
;; WARRANTY; without even the implied warranty of MERCHANTABILITY or
;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
;; for more details.
;;
;; Under Section 7 of GPL version 3, you are granted additional
;; permissions described in the GCC Runtime Library Exception, version
;; 3.1, as published by the Free Software Foundation.
;;
;; You should have received a copy of the GNU General Public License and
;; a copy of the GCC Runtime Library Exception along with this program;
;; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
;; <http://www.gnu.org/licenses/>.
;;
;; This code used to be expanded through interesting expansions in
;; the machine description, compiled from this code:
;;
;; #ifdef L_mulsi3
;; long __Mul (unsigned long a, unsigned long b) __attribute__ ((__const__));
;;
;; /* This must be compiled with the -mexpand-mul flag, to synthesize the
;; multiplication from the mstep instructions. The check for
;; smaller-size multiplication pays off in the order of .5-10%;
;; estimated median 1%, depending on application.
;; FIXME: It can be further optimized if we go to assembler code, as
;; gcc 2.7.2 adds a few unnecessary instructions and does not put the
;; basic blocks in optimal order. */
;; long
;; __Mul (unsigned long a, unsigned long b)
;; {
;; #if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10
;; /* In case other code is compiled without -march=v10, they will
;; contain calls to __Mul, regardless of flags at link-time. The
;; "else"-code below will work, but is unnecessarily slow. This
;; sometimes cuts a few minutes off from simulation time by just
;; returning a "mulu.d". */
;; return a * b;
;; #else
;; unsigned long min;
;;
;; /* Get minimum via the bound insn. */
;; min = a < b ? a : b;
;;
;; /* Can we omit computation of the high part? */
;; if (min > 65535)
;; /* No. Perform full multiplication. */
;; return a * b;
;; else
;; {
;; /* Check if both operands are within 16 bits. */
;; unsigned long max;
;;
;; /* Get maximum, by knowing the minimum.
;; This will partition a and b into max and min.
;; This is not currently something GCC understands,
;; so do this trick by asm. */
;; __asm__ ("xor %1,%0\n\txor %2,%0"
;; : "=r" (max)
;; : "r" (b), "r" (a), "0" (min));
;;
;; if (max > 65535)
;; /* Make GCC understand that only the low part of "min" will be
;; used. */
;; return max * (unsigned short) min;
;; else
;; /* Only the low parts of both operands are necessary. */
;; return ((unsigned short) max) * (unsigned short) min;
;; }
;; #endif /* not __CRIS_arch_version >= 10 */
;; }
;; #endif /* L_mulsi3 */
;;
;; That approach was abandoned since the caveats outweighted the
;; benefits. The expand-multiplication machinery is also removed, so you
;; can't do this anymore.
;;
;; For doubters of there being any benefits, some where: insensitivity to:
;; - ABI changes (mostly for experimentation)
;; - assembler syntax differences (mostly debug format).
;; - insn scheduling issues.
;; Most ABI experiments will presumably happen with arches with mul insns,
;; so that argument doesn't really hold anymore, and it's unlikely there
;; being new arch variants needing insn scheduling and not having mul
;; insns.
;; ELF and a.out have different syntax for local labels: the "wrong"
;; one may not be omitted from the object.
#undef L
#ifdef __AOUT__
# define L(x) x
#else
# define L(x) .x
#endif
.global ___Mul
.type ___Mul,@function
___Mul:
#if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10
;; Can't have the mulu.d last on a cache-line (in the delay-slot of the
;; "ret"), due to hardware bug. See documentation for -mmul-bug-workaround.
;; Not worthwhile to conditionalize here.
.p2alignw 2,0x050f
mulu.d $r11,$r10
ret
nop
#else
;; See if we can avoid multiplying some of the parts, knowing
;; they're zero.
move.d $r11,$r9
bound.d $r10,$r9
cmpu.w 65535,$r9
bls L(L3)
move.d $r10,$r12
;; Nope, have to do all the parts of a 32-bit multiplication.
;; See head comment in optabs.c:expand_doubleword_mult.
move.d $r10,$r13
movu.w $r11,$r9 ; ab*cd = (a*d + b*c)<<16 + b*d
lslq 16,$r13
mstep $r9,$r13 ; d*b
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
mstep $r9,$r13
clear.w $r10
test.d $r10
mstep $r9,$r10 ; d*a
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
mstep $r9,$r10
movu.w $r12,$r12
clear.w $r11
move.d $r11,$r9 ; Doubles as a "test.d" preparing for the mstep.
mstep $r12,$r9 ; b*c
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
mstep $r12,$r9
add.w $r9,$r10
lslq 16,$r10
ret
add.d $r13,$r10
L(L3):
;; Form the maximum in $r10, by knowing the minimum, $r9.
;; (We don't know which one of $r10 or $r11 it is.)
;; Check if the largest operand is still just 16 bits.
xor $r9,$r10
xor $r11,$r10
cmpu.w 65535,$r10
bls L(L5)
movu.w $r9,$r13
;; We have ab*cd = (a*c)<<32 + (a*d + b*c)<<16 + b*d, but c==0
;; so we only need (a*d)<<16 + b*d with d = $r13, ab = $r10.
;; We drop the upper part of (a*d)<<16 as we're only doing a
;; 32-bit-result multiplication.
move.d $r10,$r9
lslq 16,$r9
mstep $r13,$r9 ; b*d
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
mstep $r13,$r9
clear.w $r10
test.d $r10
mstep $r13,$r10 ; a*d
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
lslq 16,$r10
ret
add.d $r9,$r10
L(L5):
;; We have ab*cd = (a*c)<<32 + (a*d + b*c)<<16 + b*d, but a and c==0
;; so b*d (with b=$r13, a=$r10) it is.
lslq 16,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
mstep $r13,$r10
ret
mstep $r13,$r10
#endif
L(Lfe1):
.size ___Mul,L(Lfe1)-___Mul
|
4ms/metamodule-plugin-sdk
| 5,835
|
plugin-libc/libgcc/config/arm/bpabi-v6m.S
|
/* Miscellaneous BPABI functions. Thumb-1 implementation, suitable for ARMv4T,
ARMv6-M and ARMv8-M Baseline like ISA variants.
Copyright (C) 2006-2022 Free Software Foundation, Inc.
Contributed by CodeSourcery.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#ifdef __ARM_EABI__
/* Some attributes that are common to all routines in this file. */
/* Tag_ABI_align_needed: This code does not require 8-byte
alignment from the caller. */
/* .eabi_attribute 24, 0 -- default setting. */
/* Tag_ABI_align_preserved: This code preserves 8-byte
alignment in any callee. */
.eabi_attribute 25, 1
#endif /* __ARM_EABI__ */
#ifdef L_aeabi_lcmp
FUNC_START aeabi_lcmp
cmp xxh, yyh
beq 1f
bgt 2f
movs r0, #1
negs r0, r0
RET
2:
movs r0, #1
RET
1:
subs r0, xxl, yyl
beq 1f
bhi 2f
movs r0, #1
negs r0, r0
RET
2:
movs r0, #1
1:
RET
FUNC_END aeabi_lcmp
#endif /* L_aeabi_lcmp */
#ifdef L_aeabi_ulcmp
FUNC_START aeabi_ulcmp
cmp xxh, yyh
bne 1f
subs r0, xxl, yyl
beq 2f
1:
bcs 1f
movs r0, #1
negs r0, r0
RET
1:
movs r0, #1
2:
RET
FUNC_END aeabi_ulcmp
#endif /* L_aeabi_ulcmp */
.macro test_div_by_zero signed
cmp yyh, #0
bne 7f
cmp yyl, #0
bne 7f
cmp xxh, #0
.ifc \signed, unsigned
bne 2f
cmp xxl, #0
2:
beq 3f
movs xxh, #0
mvns xxh, xxh @ 0xffffffff
movs xxl, xxh
3:
.else
blt 6f
bgt 4f
cmp xxl, #0
beq 5f
4: movs xxl, #0
mvns xxl, xxl @ 0xffffffff
lsrs xxh, xxl, #1 @ 0x7fffffff
b 5f
6: movs xxh, #0x80
lsls xxh, xxh, #24 @ 0x80000000
movs xxl, #0
5:
.endif
@ tailcalls are tricky on v6-m.
push {r0, r1, r2}
ldr r0, 1f
adr r1, 1f
adds r0, r1
str r0, [sp, #8]
@ We know we are not on armv4t, so pop pc is safe.
pop {r0, r1, pc}
.align 2
1:
.word __aeabi_ldiv0 - 1b
7:
.endm
#ifdef L_aeabi_ldivmod
FUNC_START aeabi_ldivmod
test_div_by_zero signed
push {r0, r1}
mov r0, sp
push {r0, lr}
ldr r0, [sp, #8]
bl SYM(__gnu_ldivmod_helper)
ldr r3, [sp, #4]
mov lr, r3
add sp, sp, #8
pop {r2, r3}
RET
FUNC_END aeabi_ldivmod
#endif /* L_aeabi_ldivmod */
#ifdef L_aeabi_uldivmod
FUNC_START aeabi_uldivmod
test_div_by_zero unsigned
push {r0, r1}
mov r0, sp
push {r0, lr}
ldr r0, [sp, #8]
bl SYM(__udivmoddi4)
ldr r3, [sp, #4]
mov lr, r3
add sp, sp, #8
pop {r2, r3}
RET
FUNC_END aeabi_uldivmod
#endif /* L_aeabi_uldivmod */
#ifdef L_arm_addsubsf3
FUNC_START aeabi_frsub
push {r4, lr}
movs r4, #1
lsls r4, #31
eors r0, r0, r4
bl __aeabi_fadd
pop {r4, pc}
FUNC_END aeabi_frsub
#endif /* L_arm_addsubsf3 */
#ifdef L_arm_cmpsf2
FUNC_START aeabi_cfrcmple
mov ip, r0
movs r0, r1
mov r1, ip
b 6f
FUNC_START aeabi_cfcmpeq
FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq
@ The status-returning routines are required to preserve all
@ registers except ip, lr, and cpsr.
6: push {r0, r1, r2, r3, r4, lr}
bl __lesf2
@ Set the Z flag correctly, and the C flag unconditionally.
cmp r0, #0
@ Clear the C flag if the return value was -1, indicating
@ that the first operand was smaller than the second.
bmi 1f
movs r1, #0
cmn r0, r1
1:
pop {r0, r1, r2, r3, r4, pc}
FUNC_END aeabi_cfcmple
FUNC_END aeabi_cfcmpeq
FUNC_END aeabi_cfrcmple
FUNC_START aeabi_fcmpeq
push {r4, lr}
bl __eqsf2
negs r0, r0
adds r0, r0, #1
pop {r4, pc}
FUNC_END aeabi_fcmpeq
.macro COMPARISON cond, helper, mode=sf2
FUNC_START aeabi_fcmp\cond
push {r4, lr}
bl __\helper\mode
cmp r0, #0
b\cond 1f
movs r0, #0
pop {r4, pc}
1:
movs r0, #1
pop {r4, pc}
FUNC_END aeabi_fcmp\cond
.endm
COMPARISON lt, le
COMPARISON le, le
COMPARISON gt, ge
COMPARISON ge, ge
#endif /* L_arm_cmpsf2 */
#ifdef L_arm_addsubdf3
FUNC_START aeabi_drsub
push {r4, lr}
movs r4, #1
lsls r4, #31
eors xxh, xxh, r4
bl __aeabi_dadd
pop {r4, pc}
FUNC_END aeabi_drsub
#endif /* L_arm_addsubdf3 */
#ifdef L_arm_cmpdf2
FUNC_START aeabi_cdrcmple
mov ip, r0
movs r0, r2
mov r2, ip
mov ip, r1
movs r1, r3
mov r3, ip
b 6f
FUNC_START aeabi_cdcmpeq
FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
@ The status-returning routines are required to preserve all
@ registers except ip, lr, and cpsr.
6: push {r0, r1, r2, r3, r4, lr}
bl __ledf2
@ Set the Z flag correctly, and the C flag unconditionally.
cmp r0, #0
@ Clear the C flag if the return value was -1, indicating
@ that the first operand was smaller than the second.
bmi 1f
movs r1, #0
cmn r0, r1
1:
pop {r0, r1, r2, r3, r4, pc}
FUNC_END aeabi_cdcmple
FUNC_END aeabi_cdcmpeq
FUNC_END aeabi_cdrcmple
FUNC_START aeabi_dcmpeq
push {r4, lr}
bl __eqdf2
negs r0, r0
adds r0, r0, #1
pop {r4, pc}
FUNC_END aeabi_dcmpeq
.macro COMPARISON cond, helper, mode=df2
FUNC_START aeabi_dcmp\cond
push {r4, lr}
bl __\helper\mode
cmp r0, #0
b\cond 1f
movs r0, #0
pop {r4, pc}
1:
movs r0, #1
pop {r4, pc}
FUNC_END aeabi_dcmp\cond
.endm
COMPARISON lt, le
COMPARISON le, le
COMPARISON gt, ge
COMPARISON ge, ge
#endif /* L_arm_cmpdf2 */
|
4ms/metamodule-plugin-sdk
| 2,567
|
plugin-libc/libgcc/config/arm/crtn.S
|
# Copyright (C) 2001-2022 Free Software Foundation, Inc.
# Written By Nick Clifton
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
/* An executable stack is *not* required for these functions. */
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
.previous
#endif
#ifdef __ARM_EABI__
/* Some attributes that are common to all routines in this file. */
/* Tag_ABI_align_needed: This code does not require 8-byte
alignment from the caller. */
/* .eabi_attribute 24, 0 -- default setting. */
/* Tag_ABI_align_preserved: This code preserves 8-byte
alignment in any callee. */
.eabi_attribute 25, 1
#endif /* __ARM_EABI__ */
# This file just makes sure that the .fini and .init sections do in
# fact return. Users may put any desired instructions in those sections.
# This file is the last thing linked into any executable.
# Note - this macro is complemented by the FUNC_START macro
# in crti.S. If you change this macro you must also change
# that macro match.
#
# Note - we do not try any fancy optimizations of the return
# sequences here, it is just not worth it. Instead keep things
# simple. Restore all the save registers, including the link
# register and then perform the correct function return instruction.
# We also save/restore r3 to ensure stack alignment.
.macro FUNC_END
#ifdef __thumb__
.thumb
pop {r3, r4, r5, r6, r7}
pop {r3}
mov lr, r3
#else
.arm
sub sp, fp, #40
ldmfd sp, {r4, r5, r6, r7, r8, r9, sl, fp, sp, lr}
#endif
#if defined __THUMB_INTERWORK__ || defined __thumb__
bx lr
#else
mov pc, lr
#endif
.endm
.section ".init"
;;
FUNC_END
.section ".fini"
;;
FUNC_END
# end of crtn.S
|
4ms/metamodule-plugin-sdk
| 3,926
|
plugin-libc/libgcc/config/arm/cmse_nonsecure_call.S
|
/* CMSE wrapper function used to save, clear and restore callee saved registers
for cmse_nonsecure_call's.
Copyright (C) 2016-2022 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
.syntax unified
#ifdef __ARM_PCS_VFP
# if (__ARM_FP & 0x8) || (__ARM_FEATURE_MVE & 1)
.fpu fpv5-d16
# else
.fpu fpv4-sp-d16
# endif
#endif
.thumb
.global __gnu_cmse_nonsecure_call
__gnu_cmse_nonsecure_call:
#if defined(__ARM_ARCH_8M_MAIN__)
push {r5-r11,lr}
mov r7, r4
mov r8, r4
mov r9, r4
mov r10, r4
mov r11, r4
mov ip, r4
/* Save and clear callee-saved registers only if we are dealing with hard float
ABI. The unused caller-saved registers have already been cleared by GCC
generated code. */
#ifdef __ARM_PCS_VFP
vpush.f64 {d8-d15}
mov r5, #0
vmov d8, r5, r5
#if __ARM_FP & 0x04
vmov s18, s19, r5, r5
vmov s20, s21, r5, r5
vmov s22, s23, r5, r5
vmov s24, s25, r5, r5
vmov s26, s27, r5, r5
vmov s28, s29, r5, r5
vmov s30, s31, r5, r5
#elif (__ARM_FP & 0x8) || (__ARM_FEATURE_MVE & 1)
vmov.f64 d9, d8
vmov.f64 d10, d8
vmov.f64 d11, d8
vmov.f64 d12, d8
vmov.f64 d13, d8
vmov.f64 d14, d8
vmov.f64 d15, d8
#else
#error "Half precision implementation not supported."
#endif
/* Clear the cumulative exception-status bits (0-4,7) and the
condition code bits (28-31) of the FPSCR. */
vmrs r5, fpscr
movw r6, #65376
movt r6, #4095
ands r5, r6
vmsr fpscr, r5
/* We are not dealing with hard float ABI, so we can safely use the vlstm and
vlldm instructions without needing to preserve the registers used for
argument passing. */
#else
sub sp, sp, #0x88 /* Reserve stack space to save all floating point
registers, including FPSCR. */
vlstm sp /* Lazy store and clearance of d0-d16 and FPSCR. */
#endif /* __ARM_PCS_VFP */
/* Make sure to clear the 'GE' bits of the APSR register if 32-bit SIMD
instructions are available. */
#if defined(__ARM_FEATURE_SIMD32)
msr APSR_nzcvqg, r4
#else
msr APSR_nzcvq, r4
#endif
mov r5, r4
mov r6, r4
blxns r4
#ifdef __ARM_PCS_VFP
vpop.f64 {d8-d15}
#else
/* VLLDM erratum mitigation sequence. */
mrs r5, control
tst r5, #8 /* CONTROL_S.SFPA */
it ne
.inst.w 0xeeb00a40 /* vmovne s0, s0 */
vlldm sp /* Lazy restore of d0-d16 and FPSCR. */
add sp, sp, #0x88 /* Free space used to save floating point registers. */
#endif /* __ARM_PCS_VFP */
pop {r5-r11, pc}
#elif defined (__ARM_ARCH_8M_BASE__)
push {r5-r7, lr}
mov r5, r8
mov r6, r9
mov r7, r10
push {r5-r7}
mov r5, r11
push {r5}
mov r5, r4
mov r6, r4
mov r7, r4
mov r8, r4
mov r9, r4
mov r10, r4
mov r11, r4
mov ip, r4
msr APSR_nzcvq, r4
blxns r4
pop {r5}
mov r11, r5
pop {r5-r7}
mov r10, r7
mov r9, r6
mov r8, r5
pop {r5-r7, pc}
#else
#error "This should only be used for armv8-m base- and mainline."
#endif
|
4ms/metamodule-plugin-sdk
| 5,907
|
plugin-libc/libgcc/config/arm/bpabi.S
|
/* Miscellaneous BPABI functions.
Copyright (C) 2003-2022 Free Software Foundation, Inc.
Contributed by CodeSourcery, LLC.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
.cfi_sections .debug_frame
#ifdef __ARM_EABI__
/* Some attributes that are common to all routines in this file. */
/* Tag_ABI_align_needed: This code does not require 8-byte
alignment from the caller. */
/* .eabi_attribute 24, 0 -- default setting. */
/* Tag_ABI_align_preserved: This code preserves 8-byte
alignment in any callee. */
.eabi_attribute 25, 1
#endif /* __ARM_EABI__ */
#ifdef L_aeabi_lcmp
ARM_FUNC_START aeabi_lcmp
cmp xxh, yyh
do_it lt
movlt r0, #-1
do_it gt
movgt r0, #1
do_it ne
RETc(ne)
subs r0, xxl, yyl
do_it lo
movlo r0, #-1
do_it hi
movhi r0, #1
RET
FUNC_END aeabi_lcmp
#endif /* L_aeabi_lcmp */
#ifdef L_aeabi_ulcmp
ARM_FUNC_START aeabi_ulcmp
cmp xxh, yyh
do_it lo
movlo r0, #-1
do_it hi
movhi r0, #1
do_it ne
RETc(ne)
cmp xxl, yyl
do_it lo
movlo r0, #-1
do_it hi
movhi r0, #1
do_it eq
moveq r0, #0
RET
FUNC_END aeabi_ulcmp
#endif /* L_aeabi_ulcmp */
.macro test_div_by_zero signed
/* Tail-call to divide-by-zero handlers which may be overridden by the user,
so unwinding works properly. */
#if defined(__thumb2__)
cbnz yyh, 2f
cbnz yyl, 2f
cmp xxh, #0
.ifc \signed, unsigned
do_it eq
cmpeq xxl, #0
do_it ne, t
movne xxh, #0xffffffff
movne xxl, #0xffffffff
.else
do_it lt, tt
movlt xxl, #0
movlt xxh, #0x80000000
blt 1f
do_it eq
cmpeq xxl, #0
do_it ne, t
movne xxh, #0x7fffffff
movne xxl, #0xffffffff
.endif
1:
b SYM (__aeabi_ldiv0) __PLT__
2:
#else
/* Note: Thumb-1 code calls via an ARM shim on processors which
support ARM mode. */
cmp yyh, #0
cmpeq yyl, #0
bne 2f
cmp xxh, #0
.ifc \signed, unsigned
cmpeq xxl, #0
movne xxh, #0xffffffff
movne xxl, #0xffffffff
.else
movlt xxh, #0x80000000
movlt xxl, #0
blt 1f
cmpeq xxl, #0
movne xxh, #0x7fffffff
movne xxl, #0xffffffff
.endif
1:
b SYM (__aeabi_ldiv0) __PLT__
2:
#endif
.endm
/* we can use STRD/LDRD on v5TE and later, and any Thumb-2 architecture. */
#if (defined(__ARM_EABI__) \
&& (defined(__thumb2__) \
|| (__ARM_ARCH >= 5 && defined(__TARGET_FEATURE_DSP))))
#define CAN_USE_LDRD 1
#else
#define CAN_USE_LDRD 0
#endif
/* set up stack from for call to __udivmoddi4. At the end of the macro the
stack is arranged as follows:
sp+12 / space for remainder
sp+8 \ (written by __udivmoddi4)
sp+4 lr
sp+0 sp+8 [rp (remainder pointer) argument for __udivmoddi4]
*/
.macro push_for_divide fname
#if defined(__thumb2__) && CAN_USE_LDRD
sub ip, sp, #8
strd ip, lr, [sp, #-16]!
#else
sub sp, sp, #8
do_push {sp, lr}
#endif
.cfi_adjust_cfa_offset 16
.cfi_offset 14, -12
.endm
/* restore stack */
.macro pop_for_divide
ldr lr, [sp, #4]
#if CAN_USE_LDRD
ldrd r2, r3, [sp, #8]
add sp, sp, #16
#else
add sp, sp, #8
do_pop {r2, r3}
#endif
.cfi_restore 14
.cfi_adjust_cfa_offset 0
.endm
#ifdef L_aeabi_ldivmod
/* Perform 64 bit signed division.
Inputs:
r0:r1 numerator
r2:r3 denominator
Outputs:
r0:r1 quotient
r2:r3 remainder
*/
ARM_FUNC_START aeabi_ldivmod
.cfi_startproc
test_div_by_zero signed
push_for_divide __aeabi_ldivmod
cmp xxh, #0
blt 1f
cmp yyh, #0
blt 2f
/* arguments in (r0:r1), (r2:r3) and *sp */
bl SYM(__udivmoddi4) __PLT__
.cfi_remember_state
pop_for_divide
RET
1: /* xxh:xxl is negative */
.cfi_restore_state
negs xxl, xxl
sbc xxh, xxh, xxh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */
cmp yyh, #0
blt 3f
/* arguments in (r0:r1), (r2:r3) and *sp */
bl SYM(__udivmoddi4) __PLT__
.cfi_remember_state
pop_for_divide
negs xxl, xxl
sbc xxh, xxh, xxh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */
negs yyl, yyl
sbc yyh, yyh, yyh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */
RET
2: /* only yyh:yyl is negative */
.cfi_restore_state
negs yyl, yyl
sbc yyh, yyh, yyh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */
/* arguments in (r0:r1), (r2:r3) and *sp */
bl SYM(__udivmoddi4) __PLT__
.cfi_remember_state
pop_for_divide
negs xxl, xxl
sbc xxh, xxh, xxh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */
RET
3: /* both xxh:xxl and yyh:yyl are negative */
.cfi_restore_state
negs yyl, yyl
sbc yyh, yyh, yyh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */
/* arguments in (r0:r1), (r2:r3) and *sp */
bl SYM(__udivmoddi4) __PLT__
pop_for_divide
negs yyl, yyl
sbc yyh, yyh, yyh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */
RET
.cfi_endproc
#endif /* L_aeabi_ldivmod */
#ifdef L_aeabi_uldivmod
/* Perform 64 bit signed division.
Inputs:
r0:r1 numerator
r2:r3 denominator
Outputs:
r0:r1 quotient
r2:r3 remainder
*/
ARM_FUNC_START aeabi_uldivmod
.cfi_startproc
test_div_by_zero unsigned
push_for_divide __aeabi_uldivmod
/* arguments in (r0:r1), (r2:r3) and *sp */
bl SYM(__udivmoddi4) __PLT__
pop_for_divide
RET
.cfi_endproc
#endif /* L_aeabi_divmod */
|
4ms/metamodule-plugin-sdk
| 23,200
|
plugin-libc/libgcc/config/arm/ieee754-sf.S
|
/* ieee754-sf.S single-precision floating point support for ARM
Copyright (C) 2003-2022 Free Software Foundation, Inc.
Contributed by Nicolas Pitre (nico@fluxnic.net)
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
* Notes:
*
* The goal of this code is to be as fast as possible. This is
* not meant to be easy to understand for the casual reader.
*
* Only the default rounding mode is intended for best performances.
* Exceptions aren't supported yet, but that can be added quite easily
* if necessary without impacting performances.
*
* In the CFI related comments, 'previousOffset' refers to the previous offset
* from sp used to compute the CFA.
*/
#ifdef L_arm_negsf2
ARM_FUNC_START negsf2
ARM_FUNC_ALIAS aeabi_fneg negsf2
CFI_START_FUNCTION
eor r0, r0, #0x80000000 @ flip sign bit
RET
CFI_END_FUNCTION
FUNC_END aeabi_fneg
FUNC_END negsf2
#endif
#ifdef L_arm_addsubsf3
ARM_FUNC_START aeabi_frsub
CFI_START_FUNCTION
eor r0, r0, #0x80000000 @ flip sign bit of first arg
b 1f
ARM_FUNC_START subsf3
ARM_FUNC_ALIAS aeabi_fsub subsf3
eor r1, r1, #0x80000000 @ flip sign bit of second arg
#if defined(__INTERWORKING_STUBS__)
b 1f @ Skip Thumb-code prologue
#endif
ARM_FUNC_START addsf3
ARM_FUNC_ALIAS aeabi_fadd addsf3
1: @ Look for zeroes, equal values, INF, or NAN.
movs r2, r0, lsl #1
do_it ne, ttt
COND(mov,s,ne) r3, r1, lsl #1
teqne r2, r3
COND(mvn,s,ne) ip, r2, asr #24
COND(mvn,s,ne) ip, r3, asr #24
beq LSYM(Lad_s)
@ Compute exponent difference. Make largest exponent in r2,
@ corresponding arg in r0, and positive exponent difference in r3.
mov r2, r2, lsr #24
rsbs r3, r2, r3, lsr #24
do_it gt, ttt
addgt r2, r2, r3
eorgt r1, r0, r1
eorgt r0, r1, r0
eorgt r1, r0, r1
do_it lt
rsblt r3, r3, #0
@ If exponent difference is too large, return largest argument
@ already in r0. We need up to 25 bit to handle proper rounding
@ of 0x1p25 - 1.1.
cmp r3, #25
do_it hi
RETc(hi)
@ Convert mantissa to signed integer.
tst r0, #0x80000000
orr r0, r0, #0x00800000
bic r0, r0, #0xff000000
do_it ne
rsbne r0, r0, #0
tst r1, #0x80000000
orr r1, r1, #0x00800000
bic r1, r1, #0xff000000
do_it ne
rsbne r1, r1, #0
@ If exponent == difference, one or both args were denormalized.
@ Since this is not common case, rescale them off line.
teq r2, r3
beq LSYM(Lad_d)
LSYM(Lad_x):
@ Compensate for the exponent overlapping the mantissa MSB added later
sub r2, r2, #1
@ Shift and add second arg to first arg in r0.
@ Keep leftover bits into r1.
shiftop adds r0 r0 r1 asr r3 ip
rsb r3, r3, #32
shift1 lsl, r1, r1, r3
@ Keep absolute value in r0-r1, sign in r3 (the n bit was set above)
and r3, r0, #0x80000000
bpl LSYM(Lad_p)
#if defined(__thumb2__)
negs r1, r1
sbc r0, r0, r0, lsl #1
#else
rsbs r1, r1, #0
rsc r0, r0, #0
#endif
@ Determine how to normalize the result.
LSYM(Lad_p):
cmp r0, #0x00800000
bcc LSYM(Lad_a)
cmp r0, #0x01000000
bcc LSYM(Lad_e)
@ Result needs to be shifted right.
movs r0, r0, lsr #1
mov r1, r1, rrx
add r2, r2, #1
@ Make sure we did not bust our exponent.
cmp r2, #254
bhs LSYM(Lad_o)
@ Our result is now properly aligned into r0, remaining bits in r1.
@ Pack final result together.
@ Round with MSB of r1. If halfway between two numbers, round towards
@ LSB of r0 = 0.
LSYM(Lad_e):
cmp r1, #0x80000000
adc r0, r0, r2, lsl #23
do_it eq
biceq r0, r0, #1
orr r0, r0, r3
RET
@ Result must be shifted left and exponent adjusted.
LSYM(Lad_a):
movs r1, r1, lsl #1
adc r0, r0, r0
subs r2, r2, #1
do_it hs
cmphs r0, #0x00800000
bhs LSYM(Lad_e)
@ No rounding necessary since r1 will always be 0 at this point.
LSYM(Lad_l):
#if !defined (__ARM_FEATURE_CLZ)
movs ip, r0, lsr #12
moveq r0, r0, lsl #12
subeq r2, r2, #12
tst r0, #0x00ff0000
moveq r0, r0, lsl #8
subeq r2, r2, #8
tst r0, #0x00f00000
moveq r0, r0, lsl #4
subeq r2, r2, #4
tst r0, #0x00c00000
moveq r0, r0, lsl #2
subeq r2, r2, #2
cmp r0, #0x00800000
movcc r0, r0, lsl #1
sbcs r2, r2, #0
#else
clz ip, r0
sub ip, ip, #8
subs r2, r2, ip
shift1 lsl, r0, r0, ip
#endif
@ Final result with sign
@ If exponent negative, denormalize result.
do_it ge, et
addge r0, r0, r2, lsl #23
rsblt r2, r2, #0
orrge r0, r0, r3
#if defined(__thumb2__)
do_it lt, t
lsrlt r0, r0, r2
orrlt r0, r3, r0
#else
orrlt r0, r3, r0, lsr r2
#endif
RET
@ Fixup and adjust bit position for denormalized arguments.
@ Note that r2 must not remain equal to 0.
LSYM(Lad_d):
teq r2, #0
eor r1, r1, #0x00800000
do_it eq, te
eoreq r0, r0, #0x00800000
addeq r2, r2, #1
subne r3, r3, #1
b LSYM(Lad_x)
LSYM(Lad_s):
mov r3, r1, lsl #1
mvns ip, r2, asr #24
do_it ne
COND(mvn,s,ne) ip, r3, asr #24
beq LSYM(Lad_i)
teq r2, r3
beq 1f
@ Result is x + 0.0 = x or 0.0 + y = y.
teq r2, #0
do_it eq
moveq r0, r1
RET
1: teq r0, r1
@ Result is x - x = 0.
do_it ne, t
movne r0, #0
RETc(ne)
@ Result is x + x = 2x.
tst r2, #0xff000000
bne 2f
movs r0, r0, lsl #1
do_it cs
orrcs r0, r0, #0x80000000
RET
2: adds r2, r2, #(2 << 24)
do_it cc, t
addcc r0, r0, #(1 << 23)
RETc(cc)
and r3, r0, #0x80000000
@ Overflow: return INF.
LSYM(Lad_o):
orr r0, r3, #0x7f000000
orr r0, r0, #0x00800000
RET
@ At least one of r0/r1 is INF/NAN.
@ if r0 != INF/NAN: return r1 (which is INF/NAN)
@ if r1 != INF/NAN: return r0 (which is INF/NAN)
@ if r0 or r1 is NAN: return NAN
@ if opposite sign: return NAN
@ otherwise return r0 (which is INF or -INF)
LSYM(Lad_i):
mvns r2, r2, asr #24
do_it ne, et
movne r0, r1
COND(mvn,s,eq) r3, r3, asr #24
movne r1, r0
movs r2, r0, lsl #9
do_it eq, te
COND(mov,s,eq) r3, r1, lsl #9
teqeq r0, r1
orrne r0, r0, #0x00400000 @ quiet NAN
RET
CFI_END_FUNCTION
FUNC_END aeabi_frsub
FUNC_END aeabi_fadd
FUNC_END addsf3
FUNC_END aeabi_fsub
FUNC_END subsf3
ARM_FUNC_START floatunsisf
ARM_FUNC_ALIAS aeabi_ui2f floatunsisf
CFI_START_FUNCTION
mov r3, #0
b 1f
ARM_FUNC_START floatsisf
ARM_FUNC_ALIAS aeabi_i2f floatsisf
ands r3, r0, #0x80000000
do_it mi
rsbmi r0, r0, #0
1: movs ip, r0
do_it eq
RETc(eq)
@ Add initial exponent to sign
orr r3, r3, #((127 + 23) << 23)
.ifnc ah, r0
mov ah, r0
.endif
mov al, #0
b 2f
CFI_END_FUNCTION
FUNC_END aeabi_i2f
FUNC_END floatsisf
FUNC_END aeabi_ui2f
FUNC_END floatunsisf
ARM_FUNC_START floatundisf
ARM_FUNC_ALIAS aeabi_ul2f floatundisf
CFI_START_FUNCTION
orrs r2, r0, r1
do_it eq
RETc(eq)
mov r3, #0
b 1f
ARM_FUNC_START floatdisf
ARM_FUNC_ALIAS aeabi_l2f floatdisf
orrs r2, r0, r1
do_it eq
RETc(eq)
ands r3, ah, #0x80000000 @ sign bit in r3
bpl 1f
#if defined(__thumb2__)
negs al, al
sbc ah, ah, ah, lsl #1
#else
rsbs al, al, #0
rsc ah, ah, #0
#endif
1:
movs ip, ah
do_it eq, tt
moveq ip, al
moveq ah, al
moveq al, #0
@ Add initial exponent to sign
orr r3, r3, #((127 + 23 + 32) << 23)
do_it eq
subeq r3, r3, #(32 << 23)
2: sub r3, r3, #(1 << 23)
#if !defined (__ARM_FEATURE_CLZ)
mov r2, #23
cmp ip, #(1 << 16)
do_it hs, t
movhs ip, ip, lsr #16
subhs r2, r2, #16
cmp ip, #(1 << 8)
do_it hs, t
movhs ip, ip, lsr #8
subhs r2, r2, #8
cmp ip, #(1 << 4)
do_it hs, t
movhs ip, ip, lsr #4
subhs r2, r2, #4
cmp ip, #(1 << 2)
do_it hs, e
subhs r2, r2, #2
sublo r2, r2, ip, lsr #1
subs r2, r2, ip, lsr #3
#else
clz r2, ip
subs r2, r2, #8
#endif
sub r3, r3, r2, lsl #23
blt 3f
shiftop add r3 r3 ah lsl r2 ip
shift1 lsl, ip, al, r2
rsb r2, r2, #32
cmp ip, #0x80000000
shiftop adc r0 r3 al lsr r2 r2
do_it eq
biceq r0, r0, #1
RET
3: add r2, r2, #32
shift1 lsl, ip, ah, r2
rsb r2, r2, #32
orrs al, al, ip, lsl #1
shiftop adc r0 r3 ah lsr r2 r2
do_it eq
biceq r0, r0, ip, lsr #31
RET
CFI_END_FUNCTION
FUNC_END floatdisf
FUNC_END aeabi_l2f
FUNC_END floatundisf
FUNC_END aeabi_ul2f
#endif /* L_addsubsf3 */
#if defined(L_arm_mulsf3) || defined(L_arm_muldivsf3)
@ Define multiplication as weak in _arm_mulsf3.o so that it can be overriden
@ by the global definition in _arm_muldivsf3.o. This allows a program only
@ using multiplication to take the weak definition which does not contain the
@ division code. Programs using only division or both division and
@ multiplication will pull _arm_muldivsf3.o from which both the multiplication
@ and division are taken thanks to the override.
#ifdef L_arm_mulsf3
WEAK mulsf3
WEAK aeabi_fmul
#endif
ARM_FUNC_START mulsf3
ARM_FUNC_ALIAS aeabi_fmul mulsf3
CFI_START_FUNCTION
@ Mask out exponents, trap any zero/denormal/INF/NAN.
mov ip, #0xff
ands r2, ip, r0, lsr #23
do_it ne, tt
COND(and,s,ne) r3, ip, r1, lsr #23
teqne r2, ip
teqne r3, ip
beq LSYM(Lml_s)
LSYM(Lml_x):
@ Add exponents together
add r2, r2, r3
@ Determine final sign.
eor ip, r0, r1
@ Convert mantissa to unsigned integer.
@ If power of two, branch to a separate path.
@ Make up for final alignment.
movs r0, r0, lsl #9
do_it ne
COND(mov,s,ne) r1, r1, lsl #9
beq LSYM(Lml_1)
mov r3, #0x08000000
orr r0, r3, r0, lsr #5
orr r1, r3, r1, lsr #5
@ The actual multiplication.
@ This code works on architecture versions >= 4
umull r3, r1, r0, r1
@ Put final sign in r0.
and r0, ip, #0x80000000
@ Adjust result upon the MSB position.
cmp r1, #(1 << 23)
do_it cc, tt
movcc r1, r1, lsl #1
orrcc r1, r1, r3, lsr #31
movcc r3, r3, lsl #1
@ Add sign to result.
orr r0, r0, r1
@ Apply exponent bias, check for under/overflow.
sbc r2, r2, #127
cmp r2, #(254 - 1)
bhi LSYM(Lml_u)
@ Round the result, merge final exponent.
cmp r3, #0x80000000
adc r0, r0, r2, lsl #23
do_it eq
biceq r0, r0, #1
RET
@ Multiplication by 0x1p*: let''s shortcut a lot of code.
LSYM(Lml_1):
teq r0, #0
and ip, ip, #0x80000000
do_it eq
moveq r1, r1, lsl #9
orr r0, ip, r0, lsr #9
orr r0, r0, r1, lsr #9
subs r2, r2, #127
do_it gt, tt
COND(rsb,s,gt) r3, r2, #255
orrgt r0, r0, r2, lsl #23
RETc(gt)
@ Under/overflow: fix things up for the code below.
orr r0, r0, #0x00800000
mov r3, #0
subs r2, r2, #1
LSYM(Lml_u):
@ Overflow?
bgt LSYM(Lml_o)
@ Check if denormalized result is possible, otherwise return signed 0.
cmn r2, #(24 + 1)
do_it le, t
bicle r0, r0, #0x7fffffff
RETc(le)
@ Shift value right, round, etc.
rsb r2, r2, #0
movs r1, r0, lsl #1
shift1 lsr, r1, r1, r2
rsb r2, r2, #32
shift1 lsl, ip, r0, r2
movs r0, r1, rrx
adc r0, r0, #0
orrs r3, r3, ip, lsl #1
do_it eq
biceq r0, r0, ip, lsr #31
RET
@ One or both arguments are denormalized.
@ Scale them leftwards and preserve sign bit.
LSYM(Lml_d):
teq r2, #0
and ip, r0, #0x80000000
1: do_it eq, tt
moveq r0, r0, lsl #1
tsteq r0, #0x00800000
subeq r2, r2, #1
beq 1b
orr r0, r0, ip
teq r3, #0
and ip, r1, #0x80000000
2: do_it eq, tt
moveq r1, r1, lsl #1
tsteq r1, #0x00800000
subeq r3, r3, #1
beq 2b
orr r1, r1, ip
b LSYM(Lml_x)
LSYM(Lml_s):
@ Isolate the INF and NAN cases away
and r3, ip, r1, lsr #23
teq r2, ip
do_it ne
teqne r3, ip
beq 1f
@ Here, one or more arguments are either denormalized or zero.
bics ip, r0, #0x80000000
do_it ne
COND(bic,s,ne) ip, r1, #0x80000000
bne LSYM(Lml_d)
@ Result is 0, but determine sign anyway.
LSYM(Lml_z):
eor r0, r0, r1
bic r0, r0, #0x7fffffff
RET
1: @ One or both args are INF or NAN.
teq r0, #0x0
do_it ne, ett
teqne r0, #0x80000000
moveq r0, r1
teqne r1, #0x0
teqne r1, #0x80000000
beq LSYM(Lml_n) @ 0 * INF or INF * 0 -> NAN
teq r2, ip
bne 1f
movs r2, r0, lsl #9
bne LSYM(Lml_n) @ NAN * <anything> -> NAN
1: teq r3, ip
bne LSYM(Lml_i)
movs r3, r1, lsl #9
do_it ne
movne r0, r1
bne LSYM(Lml_n) @ <anything> * NAN -> NAN
@ Result is INF, but we need to determine its sign.
LSYM(Lml_i):
eor r0, r0, r1
@ Overflow: return INF (sign already in r0).
LSYM(Lml_o):
and r0, r0, #0x80000000
orr r0, r0, #0x7f000000
orr r0, r0, #0x00800000
RET
@ Return a quiet NAN.
LSYM(Lml_n):
orr r0, r0, #0x7f000000
orr r0, r0, #0x00c00000
RET
CFI_END_FUNCTION
FUNC_END aeabi_fmul
FUNC_END mulsf3
#ifdef L_arm_muldivsf3
ARM_FUNC_START divsf3
ARM_FUNC_ALIAS aeabi_fdiv divsf3
CFI_START_FUNCTION
@ Mask out exponents, trap any zero/denormal/INF/NAN.
mov ip, #0xff
ands r2, ip, r0, lsr #23
do_it ne, tt
COND(and,s,ne) r3, ip, r1, lsr #23
teqne r2, ip
teqne r3, ip
beq LSYM(Ldv_s)
LSYM(Ldv_x):
@ Subtract divisor exponent from dividend''s
sub r2, r2, r3
@ Preserve final sign into ip.
eor ip, r0, r1
@ Convert mantissa to unsigned integer.
@ Dividend -> r3, divisor -> r1.
movs r1, r1, lsl #9
mov r0, r0, lsl #9
beq LSYM(Ldv_1)
mov r3, #0x10000000
orr r1, r3, r1, lsr #4
orr r3, r3, r0, lsr #4
@ Initialize r0 (result) with final sign bit.
and r0, ip, #0x80000000
@ Ensure result will land to known bit position.
@ Apply exponent bias accordingly.
cmp r3, r1
do_it cc
movcc r3, r3, lsl #1
adc r2, r2, #(127 - 2)
@ The actual division loop.
mov ip, #0x00800000
1: cmp r3, r1
do_it cs, t
subcs r3, r3, r1
orrcs r0, r0, ip
cmp r3, r1, lsr #1
do_it cs, t
subcs r3, r3, r1, lsr #1
orrcs r0, r0, ip, lsr #1
cmp r3, r1, lsr #2
do_it cs, t
subcs r3, r3, r1, lsr #2
orrcs r0, r0, ip, lsr #2
cmp r3, r1, lsr #3
do_it cs, t
subcs r3, r3, r1, lsr #3
orrcs r0, r0, ip, lsr #3
movs r3, r3, lsl #4
do_it ne
COND(mov,s,ne) ip, ip, lsr #4
bne 1b
@ Check exponent for under/overflow.
cmp r2, #(254 - 1)
bhi LSYM(Lml_u)
@ Round the result, merge final exponent.
cmp r3, r1
adc r0, r0, r2, lsl #23
do_it eq
biceq r0, r0, #1
RET
@ Division by 0x1p*: let''s shortcut a lot of code.
LSYM(Ldv_1):
and ip, ip, #0x80000000
orr r0, ip, r0, lsr #9
adds r2, r2, #127
do_it gt, tt
COND(rsb,s,gt) r3, r2, #255
orrgt r0, r0, r2, lsl #23
RETc(gt)
orr r0, r0, #0x00800000
mov r3, #0
subs r2, r2, #1
b LSYM(Lml_u)
@ One or both arguments are denormalized.
@ Scale them leftwards and preserve sign bit.
LSYM(Ldv_d):
teq r2, #0
and ip, r0, #0x80000000
1: do_it eq, tt
moveq r0, r0, lsl #1
tsteq r0, #0x00800000
subeq r2, r2, #1
beq 1b
orr r0, r0, ip
teq r3, #0
and ip, r1, #0x80000000
2: do_it eq, tt
moveq r1, r1, lsl #1
tsteq r1, #0x00800000
subeq r3, r3, #1
beq 2b
orr r1, r1, ip
b LSYM(Ldv_x)
@ One or both arguments are either INF, NAN, zero or denormalized.
LSYM(Ldv_s):
and r3, ip, r1, lsr #23
teq r2, ip
bne 1f
movs r2, r0, lsl #9
bne LSYM(Lml_n) @ NAN / <anything> -> NAN
teq r3, ip
bne LSYM(Lml_i) @ INF / <anything> -> INF
mov r0, r1
b LSYM(Lml_n) @ INF / (INF or NAN) -> NAN
1: teq r3, ip
bne 2f
movs r3, r1, lsl #9
beq LSYM(Lml_z) @ <anything> / INF -> 0
mov r0, r1
b LSYM(Lml_n) @ <anything> / NAN -> NAN
2: @ If both are nonzero, we need to normalize and resume above.
bics ip, r0, #0x80000000
do_it ne
COND(bic,s,ne) ip, r1, #0x80000000
bne LSYM(Ldv_d)
@ One or both arguments are zero.
bics r2, r0, #0x80000000
bne LSYM(Lml_i) @ <non_zero> / 0 -> INF
bics r3, r1, #0x80000000
bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
b LSYM(Lml_n) @ 0 / 0 -> NAN
CFI_END_FUNCTION
FUNC_END aeabi_fdiv
FUNC_END divsf3
#endif /* L_muldivsf3 */
#endif /* L_arm_mulsf3 || L_arm_muldivsf3 */
#ifdef L_arm_cmpsf2
@ The return value in r0 is
@
@ 0 if the operands are equal
@ 1 if the first operand is greater than the second, or
@ the operands are unordered and the operation is
@ CMP, LT, LE, NE, or EQ.
@ -1 if the first operand is less than the second, or
@ the operands are unordered and the operation is GT
@ or GE.
@
@ The Z flag will be set iff the operands are equal.
@
@ The following registers are clobbered by this function:
@ ip, r0, r1, r2, r3
ARM_FUNC_START gtsf2
ARM_FUNC_ALIAS gesf2 gtsf2
CFI_START_FUNCTION
mov ip, #-1
b 1f
ARM_FUNC_START ltsf2
ARM_FUNC_ALIAS lesf2 ltsf2
mov ip, #1
b 1f
ARM_FUNC_START cmpsf2
ARM_FUNC_ALIAS nesf2 cmpsf2
ARM_FUNC_ALIAS eqsf2 cmpsf2
mov ip, #1 @ how should we specify unordered here?
1: str ip, [sp, #-4]!
.cfi_adjust_cfa_offset 4 @ CFA is now sp + previousOffset + 4.
@ We're not adding CFI for ip as it's pushed into the stack only because
@ it may be popped off later as a return value (i.e. we're not preserving
@ it anyways).
@ Trap any INF/NAN first.
mov r2, r0, lsl #1
mov r3, r1, lsl #1
mvns ip, r2, asr #24
do_it ne
COND(mvn,s,ne) ip, r3, asr #24
beq 3f
.cfi_remember_state
@ Save the current CFI state. This is done because the branch is conditional,
@ and if we don't take it we'll issue a .cfi_adjust_cfa_offset and return.
@ If we do take it, however, the .cfi_adjust_cfa_offset from the non-branch
@ code will affect the branch code as well. To avoid this we'll restore
@ the current state before executing the branch code.
@ Compare values.
@ Note that 0.0 is equal to -0.0.
2: add sp, sp, #4
.cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset.
orrs ip, r2, r3, lsr #1 @ test if both are 0, clear C flag
do_it ne
teqne r0, r1 @ if not 0 compare sign
do_it pl
COND(sub,s,pl) r0, r2, r3 @ if same sign compare values, set r0
@ Result:
do_it hi
movhi r0, r1, asr #31
do_it lo
mvnlo r0, r1, asr #31
do_it ne
orrne r0, r0, #1
RET
3: @ Look for a NAN.
@ Restore the previous CFI state (i.e. keep the CFI state as it was
@ before the branch).
.cfi_restore_state
mvns ip, r2, asr #24
bne 4f
movs ip, r0, lsl #9
bne 5f @ r0 is NAN
4: mvns ip, r3, asr #24
bne 2b
movs ip, r1, lsl #9
beq 2b @ r1 is not NAN
5: ldr r0, [sp], #4 @ return unordered code.
.cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset.
RET
CFI_END_FUNCTION
FUNC_END gesf2
FUNC_END gtsf2
FUNC_END lesf2
FUNC_END ltsf2
FUNC_END nesf2
FUNC_END eqsf2
FUNC_END cmpsf2
ARM_FUNC_START aeabi_cfrcmple
CFI_START_FUNCTION
mov ip, r0
mov r0, r1
mov r1, ip
b 6f
ARM_FUNC_START aeabi_cfcmpeq
ARM_FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq
@ The status-returning routines are required to preserve all
@ registers except ip, lr, and cpsr.
6: do_push {r0, r1, r2, r3, lr}
.cfi_adjust_cfa_offset 20 @ CFA is at sp + previousOffset + 20
.cfi_rel_offset r0, 0 @ Registers are saved from sp to sp + 16
.cfi_rel_offset r1, 4
.cfi_rel_offset r2, 8
.cfi_rel_offset r3, 12
.cfi_rel_offset lr, 16
ARM_CALL cmpsf2
@ Set the Z flag correctly, and the C flag unconditionally.
cmp r0, #0
@ Clear the C flag if the return value was -1, indicating
@ that the first operand was smaller than the second.
do_it mi
cmnmi r0, #0
RETLDM "r0, r1, r2, r3"
CFI_END_FUNCTION
FUNC_END aeabi_cfcmple
FUNC_END aeabi_cfcmpeq
FUNC_END aeabi_cfrcmple
ARM_FUNC_START aeabi_fcmpeq
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
ARM_CALL aeabi_cfcmple
do_it eq, e
moveq r0, #1 @ Equal to.
movne r0, #0 @ Less than, greater than, or unordered.
RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_fcmpeq
ARM_FUNC_START aeabi_fcmplt
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
ARM_CALL aeabi_cfcmple
do_it cc, e
movcc r0, #1 @ Less than.
movcs r0, #0 @ Equal to, greater than, or unordered.
RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_fcmplt
ARM_FUNC_START aeabi_fcmple
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
ARM_CALL aeabi_cfcmple
do_it ls, e
movls r0, #1 @ Less than or equal to.
movhi r0, #0 @ Greater than or unordered.
RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_fcmple
ARM_FUNC_START aeabi_fcmpge
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
ARM_CALL aeabi_cfrcmple
do_it ls, e
movls r0, #1 @ Operand 2 is less than or equal to operand 1.
movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_fcmpge
ARM_FUNC_START aeabi_fcmpgt
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
ARM_CALL aeabi_cfrcmple
do_it cc, e
movcc r0, #1 @ Operand 2 is less than operand 1.
movcs r0, #0 @ Operand 2 is greater than or equal to operand 1,
@ or they are unordered.
RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_fcmpgt
#endif /* L_cmpsf2 */
#ifdef L_arm_unordsf2
ARM_FUNC_START unordsf2
ARM_FUNC_ALIAS aeabi_fcmpun unordsf2
CFI_START_FUNCTION
mov r2, r0, lsl #1
mov r3, r1, lsl #1
mvns ip, r2, asr #24
bne 1f
movs ip, r0, lsl #9
bne 3f @ r0 is NAN
1: mvns ip, r3, asr #24
bne 2f
movs ip, r1, lsl #9
bne 3f @ r1 is NAN
2: mov r0, #0 @ arguments are ordered.
RET
3: mov r0, #1 @ arguments are unordered.
RET
CFI_END_FUNCTION
FUNC_END aeabi_fcmpun
FUNC_END unordsf2
#endif /* L_unordsf2 */
#ifdef L_arm_fixsfsi
ARM_FUNC_START fixsfsi
ARM_FUNC_ALIAS aeabi_f2iz fixsfsi
CFI_START_FUNCTION
@ check exponent range.
mov r2, r0, lsl #1
cmp r2, #(127 << 24)
bcc 1f @ value is too small
mov r3, #(127 + 31)
subs r2, r3, r2, lsr #24
bls 2f @ value is too large
@ scale value
mov r3, r0, lsl #8
orr r3, r3, #0x80000000
tst r0, #0x80000000 @ the sign bit
shift1 lsr, r0, r3, r2
do_it ne
rsbne r0, r0, #0
RET
1: mov r0, #0
RET
2: cmp r2, #(127 + 31 - 0xff)
bne 3f
movs r2, r0, lsl #9
bne 4f @ r0 is NAN.
3: ands r0, r0, #0x80000000 @ the sign bit
do_it eq
moveq r0, #0x7fffffff @ the maximum signed positive si
RET
4: mov r0, #0 @ What should we convert NAN to?
RET
CFI_END_FUNCTION
FUNC_END aeabi_f2iz
FUNC_END fixsfsi
#endif /* L_fixsfsi */
#ifdef L_arm_fixunssfsi
ARM_FUNC_START fixunssfsi
ARM_FUNC_ALIAS aeabi_f2uiz fixunssfsi
CFI_START_FUNCTION
@ check exponent range.
movs r2, r0, lsl #1
bcs 1f @ value is negative
cmp r2, #(127 << 24)
bcc 1f @ value is too small
mov r3, #(127 + 31)
subs r2, r3, r2, lsr #24
bmi 2f @ value is too large
@ scale the value
mov r3, r0, lsl #8
orr r3, r3, #0x80000000
shift1 lsr, r0, r3, r2
RET
1: mov r0, #0
RET
2: cmp r2, #(127 + 31 - 0xff)
bne 3f
movs r2, r0, lsl #9
bne 4f @ r0 is NAN.
3: mov r0, #0xffffffff @ maximum unsigned si
RET
4: mov r0, #0 @ What should we convert NAN to?
RET
CFI_END_FUNCTION
FUNC_END aeabi_f2uiz
FUNC_END fixunssfsi
#endif /* L_fixunssfsi */
|
4ms/metamodule-plugin-sdk
| 2,413
|
plugin-libc/libgcc/config/arm/crti.S
|
# Copyright (C) 2001-2022 Free Software Foundation, Inc.
# Written By Nick Clifton
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
/* An executable stack is *not* required for these functions. */
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
.previous
#endif
# This file just make a stack frame for the contents of the .fini and
# .init sections. Users may put any desired instructions in those
# sections.
#ifdef __ELF__
#define TYPE(x) .type x,function
#else
#define TYPE(x)
#endif
#ifdef __ARM_EABI__
/* Some attributes that are common to all routines in this file. */
/* Tag_ABI_align_needed: This code does not require 8-byte
alignment from the caller. */
/* .eabi_attribute 24, 0 -- default setting. */
/* Tag_ABI_align_preserved: This code preserves 8-byte
alignment in any callee. */
.eabi_attribute 25, 1
#endif /* __ARM_EABI__ */
# Note - this macro is complemented by the FUNC_END macro
# in crtn.S. If you change this macro you must also change
# that macro match.
.macro FUNC_START
#ifdef __thumb__
.thumb
push {r3, r4, r5, r6, r7, lr}
#else
.arm
# Create a stack frame and save any call-preserved registers
mov ip, sp
stmdb sp!, {r3, r4, r5, r6, r7, r8, r9, sl, fp, ip, lr, pc}
sub fp, ip, #4
#endif
.endm
.section ".init"
.align 2
.global _init
#ifdef __thumb__
.thumb_func
#endif
TYPE(_init)
_init:
FUNC_START
.section ".fini"
.align 2
.global _fini
#ifdef __thumb__
.thumb_func
#endif
TYPE(_fini)
_fini:
FUNC_START
# end of crti.S
|
4ms/metamodule-plugin-sdk
| 32,761
|
plugin-libc/libgcc/config/arm/ieee754-df.S
|
/* ieee754-df.S double-precision floating point support for ARM
Copyright (C) 2003-2022 Free Software Foundation, Inc.
Contributed by Nicolas Pitre (nico@fluxnic.net)
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
* Notes:
*
* The goal of this code is to be as fast as possible. This is
* not meant to be easy to understand for the casual reader.
* For slightly simpler code please see the single precision version
* of this file.
*
* Only the default rounding mode is intended for best performances.
* Exceptions aren't supported yet, but that can be added quite easily
* if necessary without impacting performances.
*
* In the CFI related comments, 'previousOffset' refers to the previous offset
* from sp used to compute the CFA.
*/
.cfi_sections .debug_frame
#ifndef __ARMEB__
#define xl r0
#define xh r1
#define yl r2
#define yh r3
#else
#define xh r0
#define xl r1
#define yh r2
#define yl r3
#endif
#ifdef L_arm_negdf2
ARM_FUNC_START negdf2
ARM_FUNC_ALIAS aeabi_dneg negdf2
CFI_START_FUNCTION
@ flip sign bit
eor xh, xh, #0x80000000
RET
CFI_END_FUNCTION
FUNC_END aeabi_dneg
FUNC_END negdf2
#endif
#ifdef L_arm_addsubdf3
ARM_FUNC_START aeabi_drsub
CFI_START_FUNCTION
eor xh, xh, #0x80000000 @ flip sign bit of first arg
b 1f
ARM_FUNC_START subdf3
ARM_FUNC_ALIAS aeabi_dsub subdf3
eor yh, yh, #0x80000000 @ flip sign bit of second arg
#if defined(__INTERWORKING_STUBS__)
b 1f @ Skip Thumb-code prologue
#endif
ARM_FUNC_START adddf3
ARM_FUNC_ALIAS aeabi_dadd adddf3
1: do_push {r4, r5, lr} @ sp -= 12
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r4, 0 @ Registers are saved from sp to sp + 8
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
@ Look for zeroes, equal values, INF, or NAN.
shift1 lsl, r4, xh, #1
shift1 lsl, r5, yh, #1
teq r4, r5
do_it eq
teqeq xl, yl
do_it ne, ttt
COND(orr,s,ne) ip, r4, xl
COND(orr,s,ne) ip, r5, yl
COND(mvn,s,ne) ip, r4, asr #21
COND(mvn,s,ne) ip, r5, asr #21
beq LSYM(Lad_s)
@ Compute exponent difference. Make largest exponent in r4,
@ corresponding arg in xh-xl, and positive exponent difference in r5.
shift1 lsr, r4, r4, #21
rsbs r5, r4, r5, lsr #21
do_it lt
rsblt r5, r5, #0
ble 1f
add r4, r4, r5
eor yl, xl, yl
eor yh, xh, yh
eor xl, yl, xl
eor xh, yh, xh
eor yl, xl, yl
eor yh, xh, yh
1:
@ If exponent difference is too large, return largest argument
@ already in xh-xl. We need up to 54 bit to handle proper rounding
@ of 0x1p54 - 1.1.
cmp r5, #54
do_it hi
RETLDM "r4, r5" hi
@ Convert mantissa to signed integer.
tst xh, #0x80000000
mov xh, xh, lsl #12
mov ip, #0x00100000
orr xh, ip, xh, lsr #12
beq 1f
#if defined(__thumb2__)
negs xl, xl
sbc xh, xh, xh, lsl #1
#else
rsbs xl, xl, #0
rsc xh, xh, #0
#endif
1:
tst yh, #0x80000000
mov yh, yh, lsl #12
orr yh, ip, yh, lsr #12
beq 1f
#if defined(__thumb2__)
negs yl, yl
sbc yh, yh, yh, lsl #1
#else
rsbs yl, yl, #0
rsc yh, yh, #0
#endif
1:
@ If exponent == difference, one or both args were denormalized.
@ Since this is not common case, rescale them off line.
teq r4, r5
beq LSYM(Lad_d)
@ CFI note: we're lucky that the branches to Lad_* that appear after this
@ function have a CFI state that's exactly the same as the one we're in at this
@ point. Otherwise the CFI would change to a different state after the branch,
@ which would be disastrous for backtracing.
LSYM(Lad_x):
@ Compensate for the exponent overlapping the mantissa MSB added later
sub r4, r4, #1
@ Shift yh-yl right per r5, add to xh-xl, keep leftover bits into ip.
rsbs lr, r5, #32
blt 1f
shift1 lsl, ip, yl, lr
shiftop adds xl xl yl lsr r5 yl
adc xh, xh, #0
shiftop adds xl xl yh lsl lr yl
shiftop adcs xh xh yh asr r5 yh
b 2f
1: sub r5, r5, #32
add lr, lr, #32
cmp yl, #1
shift1 lsl,ip, yh, lr
do_it cs
orrcs ip, ip, #2 @ 2 not 1, to allow lsr #1 later
shiftop adds xl xl yh asr r5 yh
adcs xh, xh, yh, asr #31
2:
@ We now have a result in xh-xl-ip.
@ Keep absolute value in xh-xl-ip, sign in r5 (the n bit was set above)
and r5, xh, #0x80000000
bpl LSYM(Lad_p)
#if defined(__thumb2__)
mov lr, #0
negs ip, ip
sbcs xl, lr, xl
sbc xh, lr, xh
#else
rsbs ip, ip, #0
rscs xl, xl, #0
rsc xh, xh, #0
#endif
@ Determine how to normalize the result.
LSYM(Lad_p):
cmp xh, #0x00100000
bcc LSYM(Lad_a)
cmp xh, #0x00200000
bcc LSYM(Lad_e)
@ Result needs to be shifted right.
movs xh, xh, lsr #1
movs xl, xl, rrx
mov ip, ip, rrx
add r4, r4, #1
@ Make sure we did not bust our exponent.
mov r2, r4, lsl #21
cmn r2, #(2 << 21)
bcs LSYM(Lad_o)
@ Our result is now properly aligned into xh-xl, remaining bits in ip.
@ Round with MSB of ip. If halfway between two numbers, round towards
@ LSB of xl = 0.
@ Pack final result together.
LSYM(Lad_e):
cmp ip, #0x80000000
do_it eq
COND(mov,s,eq) ip, xl, lsr #1
adcs xl, xl, #0
adc xh, xh, r4, lsl #20
orr xh, xh, r5
RETLDM "r4, r5"
@ Result must be shifted left and exponent adjusted.
LSYM(Lad_a):
movs ip, ip, lsl #1
adcs xl, xl, xl
adc xh, xh, xh
subs r4, r4, #1
do_it hs
cmphs xh, #0x00100000
bhs LSYM(Lad_e)
@ No rounding necessary since ip will always be 0 at this point.
LSYM(Lad_l):
#if !defined (__ARM_FEATURE_CLZ)
teq xh, #0
movne r3, #20
moveq r3, #52
moveq xh, xl
moveq xl, #0
mov r2, xh
cmp r2, #(1 << 16)
movhs r2, r2, lsr #16
subhs r3, r3, #16
cmp r2, #(1 << 8)
movhs r2, r2, lsr #8
subhs r3, r3, #8
cmp r2, #(1 << 4)
movhs r2, r2, lsr #4
subhs r3, r3, #4
cmp r2, #(1 << 2)
subhs r3, r3, #2
sublo r3, r3, r2, lsr #1
sub r3, r3, r2, lsr #3
#else
teq xh, #0
do_it eq, t
moveq xh, xl
moveq xl, #0
clz r3, xh
do_it eq
addeq r3, r3, #32
sub r3, r3, #11
#endif
@ determine how to shift the value.
subs r2, r3, #32
bge 2f
adds r2, r2, #12
ble 1f
@ shift value left 21 to 31 bits, or actually right 11 to 1 bits
@ since a register switch happened above.
add ip, r2, #20
rsb r2, r2, #12
shift1 lsl, xl, xh, ip
shift1 lsr, xh, xh, r2
b 3f
@ actually shift value left 1 to 20 bits, which might also represent
@ 32 to 52 bits if counting the register switch that happened earlier.
1: add r2, r2, #20
2: do_it le
rsble ip, r2, #32
shift1 lsl, xh, xh, r2
#if defined(__thumb2__)
lsr ip, xl, ip
itt le
orrle xh, xh, ip
lslle xl, xl, r2
#else
orrle xh, xh, xl, lsr ip
movle xl, xl, lsl r2
#endif
@ adjust exponent accordingly.
3: subs r4, r4, r3
do_it ge, tt
addge xh, xh, r4, lsl #20
orrge xh, xh, r5
RETLDM "r4, r5" ge
@ Exponent too small, denormalize result.
@ Find out proper shift value.
mvn r4, r4
subs r4, r4, #31
bge 2f
adds r4, r4, #12
bgt 1f
@ shift result right of 1 to 20 bits, sign is in r5.
add r4, r4, #20
rsb r2, r4, #32
shift1 lsr, xl, xl, r4
shiftop orr xl xl xh lsl r2 yh
shiftop orr xh r5 xh lsr r4 yh
RETLDM "r4, r5"
@ shift result right of 21 to 31 bits, or left 11 to 1 bits after
@ a register switch from xh to xl.
1: rsb r4, r4, #12
rsb r2, r4, #32
shift1 lsr, xl, xl, r2
shiftop orr xl xl xh lsl r4 yh
mov xh, r5
RETLDM "r4, r5"
@ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
@ from xh to xl.
2: shift1 lsr, xl, xh, r4
mov xh, r5
RETLDM "r4, r5"
@ Adjust exponents for denormalized arguments.
@ Note that r4 must not remain equal to 0.
LSYM(Lad_d):
teq r4, #0
eor yh, yh, #0x00100000
do_it eq, te
eoreq xh, xh, #0x00100000
addeq r4, r4, #1
subne r5, r5, #1
b LSYM(Lad_x)
LSYM(Lad_s):
mvns ip, r4, asr #21
do_it ne
COND(mvn,s,ne) ip, r5, asr #21
beq LSYM(Lad_i)
teq r4, r5
do_it eq
teqeq xl, yl
beq 1f
@ Result is x + 0.0 = x or 0.0 + y = y.
orrs ip, r4, xl
do_it eq, t
moveq xh, yh
moveq xl, yl
RETLDM "r4, r5"
1: teq xh, yh
@ Result is x - x = 0.
do_it ne, tt
movne xh, #0
movne xl, #0
RETLDM "r4, r5" ne
@ Result is x + x = 2x.
movs ip, r4, lsr #21
bne 2f
movs xl, xl, lsl #1
adcs xh, xh, xh
do_it cs
orrcs xh, xh, #0x80000000
RETLDM "r4, r5"
2: adds r4, r4, #(2 << 21)
do_it cc, t
addcc xh, xh, #(1 << 20)
RETLDM "r4, r5" cc
and r5, xh, #0x80000000
@ Overflow: return INF.
LSYM(Lad_o):
orr xh, r5, #0x7f000000
orr xh, xh, #0x00f00000
mov xl, #0
RETLDM "r4, r5"
@ At least one of x or y is INF/NAN.
@ if xh-xl != INF/NAN: return yh-yl (which is INF/NAN)
@ if yh-yl != INF/NAN: return xh-xl (which is INF/NAN)
@ if either is NAN: return NAN
@ if opposite sign: return NAN
@ otherwise return xh-xl (which is INF or -INF)
LSYM(Lad_i):
mvns ip, r4, asr #21
do_it ne, te
movne xh, yh
movne xl, yl
COND(mvn,s,eq) ip, r5, asr #21
do_it ne, t
movne yh, xh
movne yl, xl
orrs r4, xl, xh, lsl #12
do_it eq, te
COND(orr,s,eq) r5, yl, yh, lsl #12
teqeq xh, yh
orrne xh, xh, #0x00080000 @ quiet NAN
RETLDM "r4, r5"
CFI_END_FUNCTION
FUNC_END aeabi_dsub
FUNC_END subdf3
FUNC_END aeabi_dadd
FUNC_END adddf3
ARM_FUNC_START floatunsidf
ARM_FUNC_ALIAS aeabi_ui2d floatunsidf
CFI_START_FUNCTION
teq r0, #0
do_it eq, t
moveq r1, #0
RETc(eq)
do_push {r4, r5, lr} @ sp -= 12
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8.
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
mov r4, #0x400 @ initial exponent
add r4, r4, #(52-1 - 1)
mov r5, #0 @ sign bit is 0
.ifnc xl, r0
mov xl, r0
.endif
mov xh, #0
b LSYM(Lad_l)
CFI_END_FUNCTION
FUNC_END aeabi_ui2d
FUNC_END floatunsidf
ARM_FUNC_START floatsidf
ARM_FUNC_ALIAS aeabi_i2d floatsidf
CFI_START_FUNCTION
teq r0, #0
do_it eq, t
moveq r1, #0
RETc(eq)
do_push {r4, r5, lr} @ sp -= 12
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8.
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
mov r4, #0x400 @ initial exponent
add r4, r4, #(52-1 - 1)
ands r5, r0, #0x80000000 @ sign bit in r5
do_it mi
rsbmi r0, r0, #0 @ absolute value
.ifnc xl, r0
mov xl, r0
.endif
mov xh, #0
b LSYM(Lad_l)
CFI_END_FUNCTION
FUNC_END aeabi_i2d
FUNC_END floatsidf
ARM_FUNC_START extendsfdf2
ARM_FUNC_ALIAS aeabi_f2d extendsfdf2
CFI_START_FUNCTION
movs r2, r0, lsl #1 @ toss sign bit
mov xh, r2, asr #3 @ stretch exponent
mov xh, xh, rrx @ retrieve sign bit
mov xl, r2, lsl #28 @ retrieve remaining bits
do_it ne, ttt
COND(and,s,ne) r3, r2, #0xff000000 @ isolate exponent
teqne r3, #0xff000000 @ if not 0, check if INF or NAN
eorne xh, xh, #0x38000000 @ fixup exponent otherwise.
RETc(ne) @ and return it.
bics r2, r2, #0xff000000 @ isolate mantissa
do_it eq @ if 0, that is ZERO or INF,
RETc(eq) @ we are done already.
teq r3, #0xff000000 @ check for NAN
do_it eq, t
orreq xh, xh, #0x00080000 @ change to quiet NAN
RETc(eq) @ and return it.
@ value was denormalized. We can normalize it now.
do_push {r4, r5, lr}
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8.
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
mov r4, #0x380 @ setup corresponding exponent
and r5, xh, #0x80000000 @ move sign bit in r5
bic xh, xh, #0x80000000
b LSYM(Lad_l)
CFI_END_FUNCTION
FUNC_END aeabi_f2d
FUNC_END extendsfdf2
ARM_FUNC_START floatundidf
ARM_FUNC_ALIAS aeabi_ul2d floatundidf
CFI_START_FUNCTION
.cfi_remember_state @ Save the current CFA state.
orrs r2, r0, r1
do_it eq
RETc(eq)
do_push {r4, r5, lr} @ sp -= 12
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
mov r5, #0
b 2f
ARM_FUNC_START floatdidf
ARM_FUNC_ALIAS aeabi_l2d floatdidf
.cfi_restore_state
@ Restore the CFI state we saved above. If we didn't do this then the
@ following instructions would have the CFI state that was set by the
@ offset adjustments made in floatundidf.
orrs r2, r0, r1
do_it eq
RETc(eq)
do_push {r4, r5, lr} @ sp -= 12
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r4, 0 @ Registers are saved from sp to sp + 8
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
ands r5, ah, #0x80000000 @ sign bit in r5
bpl 2f
#if defined(__thumb2__)
negs al, al
sbc ah, ah, ah, lsl #1
#else
rsbs al, al, #0
rsc ah, ah, #0
#endif
2:
mov r4, #0x400 @ initial exponent
add r4, r4, #(52-1 - 1)
@ If FP word order does not match integer word order, swap the words.
.ifnc xh, ah
mov ip, al
mov xh, ah
mov xl, ip
.endif
movs ip, xh, lsr #22
beq LSYM(Lad_p)
@ The value is too big. Scale it down a bit...
mov r2, #3
movs ip, ip, lsr #3
do_it ne
addne r2, r2, #3
movs ip, ip, lsr #3
do_it ne
addne r2, r2, #3
add r2, r2, ip, lsr #3
rsb r3, r2, #32
shift1 lsl, ip, xl, r3
shift1 lsr, xl, xl, r2
shiftop orr xl xl xh lsl r3 lr
shift1 lsr, xh, xh, r2
add r4, r4, r2
b LSYM(Lad_p)
CFI_END_FUNCTION
FUNC_END floatdidf
FUNC_END aeabi_l2d
FUNC_END floatundidf
FUNC_END aeabi_ul2d
#endif /* L_addsubdf3 */
#if defined(L_arm_muldf3) || defined(L_arm_muldivdf3)
@ Define multiplication as weak in _arm_muldf3.o so that it can be overriden
@ by the global definition in _arm_muldivdf3.o. This allows a program only
@ using multiplication to take the weak definition which does not contain the
@ division code. Programs using only division or both division and
@ multiplication will pull _arm_muldivdf3.o from which both the multiplication
@ and division are taken thanks to the override.
#ifdef L_arm_muldf3
WEAK muldf3
WEAK aeabi_dmul
#endif
ARM_FUNC_START muldf3
ARM_FUNC_ALIAS aeabi_dmul muldf3
CFI_START_FUNCTION
do_push {r4, r5, r6, lr} @ sp -= 16
.cfi_adjust_cfa_offset 16 @ CFA is now sp + previousOffset + 16
.cfi_rel_offset r4, 0 @ Registers are saved from sp to sp + 12.
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset lr, 12
@ Mask out exponents, trap any zero/denormal/INF/NAN.
mov ip, #0xff
orr ip, ip, #0x700
ands r4, ip, xh, lsr #20
do_it ne, tte
COND(and,s,ne) r5, ip, yh, lsr #20
teqne r4, ip
teqne r5, ip
bleq LSYM(Lml_s)
@ Add exponents together
add r4, r4, r5
@ Determine final sign.
eor r6, xh, yh
@ Convert mantissa to unsigned integer.
@ If power of two, branch to a separate path.
bic xh, xh, ip, lsl #21
bic yh, yh, ip, lsl #21
orrs r5, xl, xh, lsl #12
do_it ne
COND(orr,s,ne) r5, yl, yh, lsl #12
orr xh, xh, #0x00100000
orr yh, yh, #0x00100000
beq LSYM(Lml_1)
@ Here is the actual multiplication.
@ This code works on architecture versions >= 4
umull ip, lr, xl, yl
mov r5, #0
umlal lr, r5, xh, yl
and yl, r6, #0x80000000
umlal lr, r5, xl, yh
mov r6, #0
umlal r5, r6, xh, yh
@ The LSBs in ip are only significant for the final rounding.
@ Fold them into lr.
teq ip, #0
do_it ne
orrne lr, lr, #1
@ Adjust result upon the MSB position.
sub r4, r4, #0xff
cmp r6, #(1 << (20-11))
sbc r4, r4, #0x300
bcs 1f
movs lr, lr, lsl #1
adcs r5, r5, r5
adc r6, r6, r6
1:
@ Shift to final position, add sign to result.
orr xh, yl, r6, lsl #11
orr xh, xh, r5, lsr #21
mov xl, r5, lsl #11
orr xl, xl, lr, lsr #21
mov lr, lr, lsl #11
@ Check exponent range for under/overflow.
subs ip, r4, #(254 - 1)
do_it hi
cmphi ip, #0x700
bhi LSYM(Lml_u)
@ Round the result, merge final exponent.
cmp lr, #0x80000000
do_it eq
COND(mov,s,eq) lr, xl, lsr #1
adcs xl, xl, #0
adc xh, xh, r4, lsl #20
RETLDM "r4, r5, r6"
@ Multiplication by 0x1p*: let''s shortcut a lot of code.
LSYM(Lml_1):
and r6, r6, #0x80000000
orr xh, r6, xh
orr xl, xl, yl
eor xh, xh, yh
subs r4, r4, ip, lsr #1
do_it gt, tt
COND(rsb,s,gt) r5, r4, ip
orrgt xh, xh, r4, lsl #20
RETLDM "r4, r5, r6" gt
@ Under/overflow: fix things up for the code below.
orr xh, xh, #0x00100000
mov lr, #0
subs r4, r4, #1
LSYM(Lml_u):
@ Overflow?
bgt LSYM(Lml_o)
@ Check if denormalized result is possible, otherwise return signed 0.
cmn r4, #(53 + 1)
do_it le, tt
movle xl, #0
bicle xh, xh, #0x7fffffff
RETLDM "r4, r5, r6" le
@ Find out proper shift value.
rsb r4, r4, #0
subs r4, r4, #32
bge 2f
adds r4, r4, #12
bgt 1f
@ shift result right of 1 to 20 bits, preserve sign bit, round, etc.
add r4, r4, #20
rsb r5, r4, #32
shift1 lsl, r3, xl, r5
shift1 lsr, xl, xl, r4
shiftop orr xl xl xh lsl r5 r2
and r2, xh, #0x80000000
bic xh, xh, #0x80000000
adds xl, xl, r3, lsr #31
shiftop adc xh r2 xh lsr r4 r6
orrs lr, lr, r3, lsl #1
do_it eq
biceq xl, xl, r3, lsr #31
RETLDM "r4, r5, r6"
@ shift result right of 21 to 31 bits, or left 11 to 1 bits after
@ a register switch from xh to xl. Then round.
1: rsb r4, r4, #12
rsb r5, r4, #32
shift1 lsl, r3, xl, r4
shift1 lsr, xl, xl, r5
shiftop orr xl xl xh lsl r4 r2
bic xh, xh, #0x7fffffff
adds xl, xl, r3, lsr #31
adc xh, xh, #0
orrs lr, lr, r3, lsl #1
do_it eq
biceq xl, xl, r3, lsr #31
RETLDM "r4, r5, r6"
@ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
@ from xh to xl. Leftover bits are in r3-r6-lr for rounding.
2: rsb r5, r4, #32
shiftop orr lr lr xl lsl r5 r2
shift1 lsr, r3, xl, r4
shiftop orr r3 r3 xh lsl r5 r2
shift1 lsr, xl, xh, r4
bic xh, xh, #0x7fffffff
shiftop bic xl xl xh lsr r4 r2
add xl, xl, r3, lsr #31
orrs lr, lr, r3, lsl #1
do_it eq
biceq xl, xl, r3, lsr #31
RETLDM "r4, r5, r6"
@ One or both arguments are denormalized.
@ Scale them leftwards and preserve sign bit.
LSYM(Lml_d):
teq r4, #0
bne 2f
and r6, xh, #0x80000000
1: movs xl, xl, lsl #1
adc xh, xh, xh
tst xh, #0x00100000
do_it eq
subeq r4, r4, #1
beq 1b
orr xh, xh, r6
teq r5, #0
do_it ne
RETc(ne)
2: and r6, yh, #0x80000000
3: movs yl, yl, lsl #1
adc yh, yh, yh
tst yh, #0x00100000
do_it eq
subeq r5, r5, #1
beq 3b
orr yh, yh, r6
RET
LSYM(Lml_s):
@ Isolate the INF and NAN cases away
teq r4, ip
and r5, ip, yh, lsr #20
do_it ne
teqne r5, ip
beq 1f
@ Here, one or more arguments are either denormalized or zero.
orrs r6, xl, xh, lsl #1
do_it ne
COND(orr,s,ne) r6, yl, yh, lsl #1
bne LSYM(Lml_d)
@ Result is 0, but determine sign anyway.
LSYM(Lml_z):
eor xh, xh, yh
and xh, xh, #0x80000000
mov xl, #0
RETLDM "r4, r5, r6"
1: @ One or both args are INF or NAN.
orrs r6, xl, xh, lsl #1
do_it eq, te
moveq xl, yl
moveq xh, yh
COND(orr,s,ne) r6, yl, yh, lsl #1
beq LSYM(Lml_n) @ 0 * INF or INF * 0 -> NAN
teq r4, ip
bne 1f
orrs r6, xl, xh, lsl #12
bne LSYM(Lml_n) @ NAN * <anything> -> NAN
1: teq r5, ip
bne LSYM(Lml_i)
orrs r6, yl, yh, lsl #12
do_it ne, t
movne xl, yl
movne xh, yh
bne LSYM(Lml_n) @ <anything> * NAN -> NAN
@ Result is INF, but we need to determine its sign.
LSYM(Lml_i):
eor xh, xh, yh
@ Overflow: return INF (sign already in xh).
LSYM(Lml_o):
and xh, xh, #0x80000000
orr xh, xh, #0x7f000000
orr xh, xh, #0x00f00000
mov xl, #0
RETLDM "r4, r5, r6"
@ Return a quiet NAN.
LSYM(Lml_n):
orr xh, xh, #0x7f000000
orr xh, xh, #0x00f80000
RETLDM "r4, r5, r6"
CFI_END_FUNCTION
FUNC_END aeabi_dmul
FUNC_END muldf3
#ifdef L_arm_muldivdf3
ARM_FUNC_START divdf3
ARM_FUNC_ALIAS aeabi_ddiv divdf3
CFI_START_FUNCTION
do_push {r4, r5, r6, lr}
.cfi_adjust_cfa_offset 16
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset lr, 12
@ Mask out exponents, trap any zero/denormal/INF/NAN.
mov ip, #0xff
orr ip, ip, #0x700
ands r4, ip, xh, lsr #20
do_it ne, tte
COND(and,s,ne) r5, ip, yh, lsr #20
teqne r4, ip
teqne r5, ip
bleq LSYM(Ldv_s)
@ Subtract divisor exponent from dividend''s.
sub r4, r4, r5
@ Preserve final sign into lr.
eor lr, xh, yh
@ Convert mantissa to unsigned integer.
@ Dividend -> r5-r6, divisor -> yh-yl.
orrs r5, yl, yh, lsl #12
mov xh, xh, lsl #12
beq LSYM(Ldv_1)
mov yh, yh, lsl #12
mov r5, #0x10000000
orr yh, r5, yh, lsr #4
orr yh, yh, yl, lsr #24
mov yl, yl, lsl #8
orr r5, r5, xh, lsr #4
orr r5, r5, xl, lsr #24
mov r6, xl, lsl #8
@ Initialize xh with final sign bit.
and xh, lr, #0x80000000
@ Ensure result will land to known bit position.
@ Apply exponent bias accordingly.
cmp r5, yh
do_it eq
cmpeq r6, yl
adc r4, r4, #(255 - 2)
add r4, r4, #0x300
bcs 1f
movs yh, yh, lsr #1
mov yl, yl, rrx
1:
@ Perform first subtraction to align result to a nibble.
subs r6, r6, yl
sbc r5, r5, yh
movs yh, yh, lsr #1
mov yl, yl, rrx
mov xl, #0x00100000
mov ip, #0x00080000
@ The actual division loop.
1: subs lr, r6, yl
sbcs lr, r5, yh
do_it cs, tt
subcs r6, r6, yl
movcs r5, lr
orrcs xl, xl, ip
movs yh, yh, lsr #1
mov yl, yl, rrx
subs lr, r6, yl
sbcs lr, r5, yh
do_it cs, tt
subcs r6, r6, yl
movcs r5, lr
orrcs xl, xl, ip, lsr #1
movs yh, yh, lsr #1
mov yl, yl, rrx
subs lr, r6, yl
sbcs lr, r5, yh
do_it cs, tt
subcs r6, r6, yl
movcs r5, lr
orrcs xl, xl, ip, lsr #2
movs yh, yh, lsr #1
mov yl, yl, rrx
subs lr, r6, yl
sbcs lr, r5, yh
do_it cs, tt
subcs r6, r6, yl
movcs r5, lr
orrcs xl, xl, ip, lsr #3
orrs lr, r5, r6
beq 2f
mov r5, r5, lsl #4
orr r5, r5, r6, lsr #28
mov r6, r6, lsl #4
mov yh, yh, lsl #3
orr yh, yh, yl, lsr #29
mov yl, yl, lsl #3
movs ip, ip, lsr #4
bne 1b
@ We are done with a word of the result.
@ Loop again for the low word if this pass was for the high word.
tst xh, #0x00100000
bne 3f
orr xh, xh, xl
mov xl, #0
mov ip, #0x80000000
b 1b
2:
@ Be sure result starts in the high word.
tst xh, #0x00100000
do_it eq, t
orreq xh, xh, xl
moveq xl, #0
3:
@ Check exponent range for under/overflow.
subs ip, r4, #(254 - 1)
do_it hi
cmphi ip, #0x700
bhi LSYM(Lml_u)
@ Round the result, merge final exponent.
subs ip, r5, yh
do_it eq, t
COND(sub,s,eq) ip, r6, yl
COND(mov,s,eq) ip, xl, lsr #1
adcs xl, xl, #0
adc xh, xh, r4, lsl #20
RETLDM "r4, r5, r6"
@ Division by 0x1p*: shortcut a lot of code.
LSYM(Ldv_1):
and lr, lr, #0x80000000
orr xh, lr, xh, lsr #12
adds r4, r4, ip, lsr #1
do_it gt, tt
COND(rsb,s,gt) r5, r4, ip
orrgt xh, xh, r4, lsl #20
RETLDM "r4, r5, r6" gt
orr xh, xh, #0x00100000
mov lr, #0
subs r4, r4, #1
b LSYM(Lml_u)
@ Result mightt need to be denormalized: put remainder bits
@ in lr for rounding considerations.
LSYM(Ldv_u):
orr lr, r5, r6
b LSYM(Lml_u)
@ One or both arguments is either INF, NAN or zero.
LSYM(Ldv_s):
and r5, ip, yh, lsr #20
teq r4, ip
do_it eq
teqeq r5, ip
beq LSYM(Lml_n) @ INF/NAN / INF/NAN -> NAN
teq r4, ip
bne 1f
orrs r4, xl, xh, lsl #12
bne LSYM(Lml_n) @ NAN / <anything> -> NAN
teq r5, ip
bne LSYM(Lml_i) @ INF / <anything> -> INF
mov xl, yl
mov xh, yh
b LSYM(Lml_n) @ INF / (INF or NAN) -> NAN
1: teq r5, ip
bne 2f
orrs r5, yl, yh, lsl #12
beq LSYM(Lml_z) @ <anything> / INF -> 0
mov xl, yl
mov xh, yh
b LSYM(Lml_n) @ <anything> / NAN -> NAN
2: @ If both are nonzero, we need to normalize and resume above.
orrs r6, xl, xh, lsl #1
do_it ne
COND(orr,s,ne) r6, yl, yh, lsl #1
bne LSYM(Lml_d)
@ One or both arguments are 0.
orrs r4, xl, xh, lsl #1
bne LSYM(Lml_i) @ <non_zero> / 0 -> INF
orrs r5, yl, yh, lsl #1
bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
b LSYM(Lml_n) @ 0 / 0 -> NAN
CFI_END_FUNCTION
FUNC_END aeabi_ddiv
FUNC_END divdf3
#endif /* L_muldivdf3 */
#endif /* L_arm_muldf3 || L_arm_muldivdf3 */
#ifdef L_arm_cmpdf2
@ Note: only r0 (return value) and ip are clobbered here.
ARM_FUNC_START gtdf2
ARM_FUNC_ALIAS gedf2 gtdf2
CFI_START_FUNCTION
mov ip, #-1
b 1f
ARM_FUNC_START ltdf2
ARM_FUNC_ALIAS ledf2 ltdf2
mov ip, #1
b 1f
ARM_FUNC_START cmpdf2
ARM_FUNC_ALIAS nedf2 cmpdf2
ARM_FUNC_ALIAS eqdf2 cmpdf2
mov ip, #1 @ how should we specify unordered here?
1: str ip, [sp, #-4]!
.cfi_adjust_cfa_offset 4 @ CFA is now sp + previousOffset + 4.
@ We're not adding CFI for ip as it's pushed into the stack
@ only because it may be popped off later as a return value
@ (i.e. we're not preserving it anyways).
@ Trap any INF/NAN first.
mov ip, xh, lsl #1
mvns ip, ip, asr #21
mov ip, yh, lsl #1
do_it ne
COND(mvn,s,ne) ip, ip, asr #21
beq 3f
.cfi_remember_state
@ Save the current CFI state. This is done because the branch
@ is conditional, and if we don't take it we'll issue a
@ .cfi_adjust_cfa_offset and return. If we do take it,
@ however, the .cfi_adjust_cfa_offset from the non-branch code
@ will affect the branch code as well. To avoid this we'll
@ restore the current state before executing the branch code.
@ Test for equality. Note that 0.0 is equal to -0.0.
2: add sp, sp, #4
.cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset.
orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0
do_it eq, e
COND(orr,s,eq) ip, yl, yh, lsl #1 @ and y == 0.0 or -0.0
teqne xh, yh @ or xh == yh
do_it eq, tt
teqeq xl, yl @ and xl == yl
moveq r0, #0 @ then equal.
RETc(eq)
@ Clear C flag
cmn r0, #0
@ Compare sign,
teq xh, yh
@ Compare values if same sign
do_it pl
cmppl xh, yh
do_it eq
cmpeq xl, yl
@ Result:
do_it cs, e
movcs r0, yh, asr #31
mvncc r0, yh, asr #31
orr r0, r0, #1
RET
3: @ Look for a NAN.
@ Restore the previous CFI state (i.e. keep the CFI state as it was
@ before the branch).
.cfi_restore_state
mov ip, xh, lsl #1
mvns ip, ip, asr #21
bne 4f
orrs ip, xl, xh, lsl #12
bne 5f @ x is NAN
4: mov ip, yh, lsl #1
mvns ip, ip, asr #21
bne 2b
orrs ip, yl, yh, lsl #12
beq 2b @ y is not NAN
5: ldr r0, [sp], #4 @ unordered return code
.cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset.
RET
CFI_END_FUNCTION
FUNC_END gedf2
FUNC_END gtdf2
FUNC_END ledf2
FUNC_END ltdf2
FUNC_END nedf2
FUNC_END eqdf2
FUNC_END cmpdf2
ARM_FUNC_START aeabi_cdrcmple
CFI_START_FUNCTION
mov ip, r0
mov r0, r2
mov r2, ip
mov ip, r1
mov r1, r3
mov r3, ip
b 6f
ARM_FUNC_START aeabi_cdcmpeq
ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
@ The status-returning routines are required to preserve all
@ registers except ip, lr, and cpsr.
6: do_push {r0, lr}
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8.
.cfi_rel_offset r0, 0 @ Previous r0 is saved at sp.
.cfi_rel_offset lr, 4 @ Previous lr is saved at sp + 4.
ARM_CALL cmpdf2
@ Set the Z flag correctly, and the C flag unconditionally.
cmp r0, #0
@ Clear the C flag if the return value was -1, indicating
@ that the first operand was smaller than the second.
do_it mi
cmnmi r0, #0
RETLDM "r0"
CFI_END_FUNCTION
FUNC_END aeabi_cdcmple
FUNC_END aeabi_cdcmpeq
FUNC_END aeabi_cdrcmple
ARM_FUNC_START aeabi_dcmpeq
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
ARM_CALL aeabi_cdcmple
do_it eq, e
moveq r0, #1 @ Equal to.
movne r0, #0 @ Less than, greater than, or unordered.
RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_dcmpeq
ARM_FUNC_START aeabi_dcmplt
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
ARM_CALL aeabi_cdcmple
do_it cc, e
movcc r0, #1 @ Less than.
movcs r0, #0 @ Equal to, greater than, or unordered.
RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_dcmplt
ARM_FUNC_START aeabi_dcmple
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
ARM_CALL aeabi_cdcmple
do_it ls, e
movls r0, #1 @ Less than or equal to.
movhi r0, #0 @ Greater than or unordered.
RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_dcmple
ARM_FUNC_START aeabi_dcmpge
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
ARM_CALL aeabi_cdrcmple
do_it ls, e
movls r0, #1 @ Operand 2 is less than or equal to operand 1.
movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_dcmpge
ARM_FUNC_START aeabi_dcmpgt
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
ARM_CALL aeabi_cdrcmple
do_it cc, e
movcc r0, #1 @ Operand 2 is less than operand 1.
movcs r0, #0 @ Operand 2 is greater than or equal to operand 1,
@ or they are unordered.
RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_dcmpgt
#endif /* L_cmpdf2 */
#ifdef L_arm_unorddf2
ARM_FUNC_START unorddf2
ARM_FUNC_ALIAS aeabi_dcmpun unorddf2
.cfi_startproc
mov ip, xh, lsl #1
mvns ip, ip, asr #21
bne 1f
orrs ip, xl, xh, lsl #12
bne 3f @ x is NAN
1: mov ip, yh, lsl #1
mvns ip, ip, asr #21
bne 2f
orrs ip, yl, yh, lsl #12
bne 3f @ y is NAN
2: mov r0, #0 @ arguments are ordered.
RET
3: mov r0, #1 @ arguments are unordered.
RET
.cfi_endproc
FUNC_END aeabi_dcmpun
FUNC_END unorddf2
#endif /* L_unorddf2 */
#ifdef L_arm_fixdfsi
ARM_FUNC_START fixdfsi
ARM_FUNC_ALIAS aeabi_d2iz fixdfsi
CFI_START_FUNCTION
@ check exponent range.
mov r2, xh, lsl #1
adds r2, r2, #(1 << 21)
bcs 2f @ value is INF or NAN
bpl 1f @ value is too small
mov r3, #(0xfffffc00 + 31)
subs r2, r3, r2, asr #21
bls 3f @ value is too large
@ scale value
mov r3, xh, lsl #11
orr r3, r3, #0x80000000
orr r3, r3, xl, lsr #21
tst xh, #0x80000000 @ the sign bit
shift1 lsr, r0, r3, r2
do_it ne
rsbne r0, r0, #0
RET
1: mov r0, #0
RET
2: orrs xl, xl, xh, lsl #12
bne 4f @ x is NAN.
3: ands r0, xh, #0x80000000 @ the sign bit
do_it eq
moveq r0, #0x7fffffff @ maximum signed positive si
RET
4: mov r0, #0 @ How should we convert NAN?
RET
CFI_END_FUNCTION
FUNC_END aeabi_d2iz
FUNC_END fixdfsi
#endif /* L_fixdfsi */
#ifdef L_arm_fixunsdfsi
ARM_FUNC_START fixunsdfsi
ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi
CFI_START_FUNCTION
@ check exponent range.
movs r2, xh, lsl #1
bcs 1f @ value is negative
adds r2, r2, #(1 << 21)
bcs 2f @ value is INF or NAN
bpl 1f @ value is too small
mov r3, #(0xfffffc00 + 31)
subs r2, r3, r2, asr #21
bmi 3f @ value is too large
@ scale value
mov r3, xh, lsl #11
orr r3, r3, #0x80000000
orr r3, r3, xl, lsr #21
shift1 lsr, r0, r3, r2
RET
1: mov r0, #0
RET
2: orrs xl, xl, xh, lsl #12
bne 4f @ value is NAN.
3: mov r0, #0xffffffff @ maximum unsigned si
RET
4: mov r0, #0 @ How should we convert NAN?
RET
CFI_END_FUNCTION
FUNC_END aeabi_d2uiz
FUNC_END fixunsdfsi
#endif /* L_fixunsdfsi */
#ifdef L_arm_truncdfsf2
ARM_FUNC_START truncdfsf2
ARM_FUNC_ALIAS aeabi_d2f truncdfsf2
CFI_START_FUNCTION
@ check exponent range.
mov r2, xh, lsl #1
subs r3, r2, #((1023 - 127) << 21)
do_it cs, t
COND(sub,s,cs) ip, r3, #(1 << 21)
COND(rsb,s,cs) ip, ip, #(254 << 21)
bls 2f @ value is out of range
1: @ shift and round mantissa
and ip, xh, #0x80000000
mov r2, xl, lsl #3
orr xl, ip, xl, lsr #29
cmp r2, #0x80000000
adc r0, xl, r3, lsl #2
do_it eq
biceq r0, r0, #1
RET
2: @ either overflow or underflow
tst xh, #0x40000000
bne 3f @ overflow
@ check if denormalized value is possible
adds r2, r3, #(23 << 21)
do_it lt, t
andlt r0, xh, #0x80000000 @ too small, return signed 0.
RETc(lt)
@ denormalize value so we can resume with the code above afterwards.
orr xh, xh, #0x00100000
mov r2, r2, lsr #21
rsb r2, r2, #24
rsb ip, r2, #32
#if defined(__thumb2__)
lsls r3, xl, ip
#else
movs r3, xl, lsl ip
#endif
shift1 lsr, xl, xl, r2
do_it ne
orrne xl, xl, #1 @ fold r3 for rounding considerations.
mov r3, xh, lsl #11
mov r3, r3, lsr #11
shiftop orr xl xl r3 lsl ip ip
shift1 lsr, r3, r3, r2
mov r3, r3, lsl #1
b 1b
3: @ chech for NAN
mvns r3, r2, asr #21
bne 5f @ simple overflow
orrs r3, xl, xh, lsl #12
do_it ne, tt
movne r0, #0x7f000000
orrne r0, r0, #0x00c00000
RETc(ne) @ return NAN
5: @ return INF with sign
and r0, xh, #0x80000000
orr r0, r0, #0x7f000000
orr r0, r0, #0x00800000
RET
CFI_END_FUNCTION
FUNC_END aeabi_d2f
FUNC_END truncdfsf2
#endif /* L_truncdfsf2 */
|
4ms/metamodule-plugin-sdk
| 11,019
|
plugin-libc/libgcc/config/arm/libunwind.S
|
/* Support functions for the unwinder.
Copyright (C) 2003-2022 Free Software Foundation, Inc.
Contributed by Paul Brook
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* An executable stack is *not* required for these functions. */
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
.previous
#endif
#ifdef __ARM_EABI__
/* Some attributes that are common to all routines in this file. */
/* Tag_ABI_align_needed: This code does not require 8-byte
alignment from the caller. */
/* .eabi_attribute 24, 0 -- default setting. */
/* Tag_ABI_align_preserved: This code preserves 8-byte
alignment in any callee. */
.eabi_attribute 25, 1
#endif /* __ARM_EABI__ */
#ifndef __symbian__
#include "lib1funcs.S"
.macro UNPREFIX name
.global SYM (\name)
EQUIV SYM (\name), SYM (__\name)
.endm
#if (__ARM_ARCH == 4)
/* Some coprocessors require armv5t. We know this code will never be run on
other cpus. Tell gas to allow armv5t, but only mark the objects as armv4.
*/
.arch armv5t
#ifdef __ARM_ARCH_4T__
.object_arch armv4t
#else
.object_arch armv4
#endif
#endif
#if !__ARM_ARCH_ISA_ARM && __ARM_ARCH_ISA_THUMB == 1
/* r0 points to a 16-word block. Upload these values to the actual core
state. */
FUNC_START restore_core_regs
movs r1, r0
adds r1, r1, #52
ldmia r1!, {r3, r4, r5}
subs r3, r3, #4
mov ip, r3
str r5, [r3]
mov lr, r4
/* Restore r8-r11. */
movs r1, r0
adds r1, r1, #32
ldmia r1!, {r2, r3, r4, r5}
mov r8, r2
mov r9, r3
mov sl, r4
mov fp, r5
movs r1, r0
adds r1, r1, #8
ldmia r1!, {r2, r3, r4, r5, r6, r7}
ldr r1, [r0, #4]
ldr r0, [r0]
mov sp, ip
pop {pc}
FUNC_END restore_core_regs
UNPREFIX restore_core_regs
/* ARMV6M does not have coprocessors, so these should never be used. */
FUNC_START gnu_Unwind_Restore_VFP
RET
/* Store VFR regsters d0-d15 to the address in r0. */
FUNC_START gnu_Unwind_Save_VFP
RET
/* Load VFP registers d0-d15 from the address in r0.
Use this to load from FSTMD format. */
FUNC_START gnu_Unwind_Restore_VFP_D
RET
/* Store VFP registers d0-d15 to the address in r0.
Use this to store in FLDMD format. */
FUNC_START gnu_Unwind_Save_VFP_D
RET
/* Load VFP registers d16-d31 from the address in r0.
Use this to load from FSTMD (=VSTM) format. Needs VFPv3. */
FUNC_START gnu_Unwind_Restore_VFP_D_16_to_31
RET
/* Store VFP registers d16-d31 to the address in r0.
Use this to store in FLDMD (=VLDM) format. Needs VFPv3. */
FUNC_START gnu_Unwind_Save_VFP_D_16_to_31
RET
FUNC_START gnu_Unwind_Restore_WMMXD
RET
FUNC_START gnu_Unwind_Save_WMMXD
RET
FUNC_START gnu_Unwind_Restore_WMMXC
RET
FUNC_START gnu_Unwind_Save_WMMXC
RET
.macro UNWIND_WRAPPER name nargs
FUNC_START \name
/* Create a phase2_vrs structure. */
/* Save r0 in the PC slot so we can use it as a scratch register. */
push {r0}
add r0, sp, #4
push {r0, lr} /* Push original SP and LR. */
/* Make space for r8-r12. */
sub sp, sp, #20
/* Save low registers. */
push {r0, r1, r2, r3, r4, r5, r6, r7}
/* Save high registers. */
add r0, sp, #32
mov r1, r8
mov r2, r9
mov r3, sl
mov r4, fp
mov r5, ip
stmia r0!, {r1, r2, r3, r4, r5}
/* Restore original low register values. */
add r0, sp, #4
ldmia r0!, {r1, r2, r3, r4, r5}
/* Restore orginial r0. */
ldr r0, [sp, #60]
str r0, [sp]
/* Demand-save flags, plus an extra word for alignment. */
movs r3, #0
push {r2, r3}
/* Point r1 at the block. Pass r[0..nargs) unchanged. */
add r\nargs, sp, #4
bl SYM (__gnu\name)
ldr r3, [sp, #64]
add sp, sp, #72
bx r3
FUNC_END \name
UNPREFIX \name
.endm
#else /* __ARM_ARCH_ISA_ARM || __ARM_ARCH_ISA_THUMB != 1 */
/* r0 points to a 16-word block. Upload these values to the actual core
state. */
ARM_FUNC_START restore_core_regs
/* We must use sp as the base register when restoring sp. Push the
last 3 registers onto the top of the current stack to achieve
this. */
add r1, r0, #52
ldmia r1, {r3, r4, r5} /* {sp, lr, pc}. */
#if defined(__thumb2__)
/* Thumb-2 doesn't allow sp in a load-multiple instruction, so push
the target address onto the target stack. This is safe as
we're always returning to somewhere further up the call stack. */
mov ip, r3
mov lr, r4
str r5, [ip, #-4]!
#elif defined(__INTERWORKING__)
/* Restore pc into ip. */
mov r2, r5
stmfd sp!, {r2, r3, r4}
#else
stmfd sp!, {r3, r4, r5}
#endif
/* Don't bother restoring ip. */
ldmia r0, {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, sl, fp}
#if defined(__thumb2__)
/* Pop the return address off the target stack. */
mov sp, ip
pop {pc}
#elif defined(__INTERWORKING__)
/* Pop the three registers we pushed earlier. */
ldmfd sp, {ip, sp, lr}
bx ip
#else
ldmfd sp, {sp, lr, pc}
#endif
FUNC_END restore_core_regs
UNPREFIX restore_core_regs
/* Load VFP registers d0-d15 from the address in r0.
Use this to load from FSTMX format. */
ARM_FUNC_START gnu_Unwind_Restore_VFP
/* Use the generic coprocessor form so that gas doesn't complain
on soft-float targets. */
ldc p11,cr0,[r0],{0x21} /* fldmiax r0, {d0-d15} */
RET
/* Store VFP registers d0-d15 to the address in r0.
Use this to store in FSTMX format. */
ARM_FUNC_START gnu_Unwind_Save_VFP
/* Use the generic coprocessor form so that gas doesn't complain
on soft-float targets. */
stc p11,cr0,[r0],{0x21} /* fstmiax r0, {d0-d15} */
RET
/* Load VFP registers d0-d15 from the address in r0.
Use this to load from FSTMD format. */
ARM_FUNC_START gnu_Unwind_Restore_VFP_D
ldc p11,cr0,[r0],{0x20} /* fldmiad r0, {d0-d15} */
RET
/* Store VFP registers d0-d15 to the address in r0.
Use this to store in FLDMD format. */
ARM_FUNC_START gnu_Unwind_Save_VFP_D
stc p11,cr0,[r0],{0x20} /* fstmiad r0, {d0-d15} */
RET
/* Load VFP registers d16-d31 from the address in r0.
Use this to load from FSTMD (=VSTM) format. Needs VFPv3. */
ARM_FUNC_START gnu_Unwind_Restore_VFP_D_16_to_31
ldcl p11,cr0,[r0],{0x20} /* vldm r0, {d16-d31} */
RET
/* Store VFP registers d16-d31 to the address in r0.
Use this to store in FLDMD (=VLDM) format. Needs VFPv3. */
ARM_FUNC_START gnu_Unwind_Save_VFP_D_16_to_31
stcl p11,cr0,[r0],{0x20} /* vstm r0, {d16-d31} */
RET
ARM_FUNC_START gnu_Unwind_Restore_WMMXD
/* Use the generic coprocessor form so that gas doesn't complain
on non-iWMMXt targets. */
ldcl p1, cr0, [r0], #8 /* wldrd wr0, [r0], #8 */
ldcl p1, cr1, [r0], #8 /* wldrd wr1, [r0], #8 */
ldcl p1, cr2, [r0], #8 /* wldrd wr2, [r0], #8 */
ldcl p1, cr3, [r0], #8 /* wldrd wr3, [r0], #8 */
ldcl p1, cr4, [r0], #8 /* wldrd wr4, [r0], #8 */
ldcl p1, cr5, [r0], #8 /* wldrd wr5, [r0], #8 */
ldcl p1, cr6, [r0], #8 /* wldrd wr6, [r0], #8 */
ldcl p1, cr7, [r0], #8 /* wldrd wr7, [r0], #8 */
ldcl p1, cr8, [r0], #8 /* wldrd wr8, [r0], #8 */
ldcl p1, cr9, [r0], #8 /* wldrd wr9, [r0], #8 */
ldcl p1, cr10, [r0], #8 /* wldrd wr10, [r0], #8 */
ldcl p1, cr11, [r0], #8 /* wldrd wr11, [r0], #8 */
ldcl p1, cr12, [r0], #8 /* wldrd wr12, [r0], #8 */
ldcl p1, cr13, [r0], #8 /* wldrd wr13, [r0], #8 */
ldcl p1, cr14, [r0], #8 /* wldrd wr14, [r0], #8 */
ldcl p1, cr15, [r0], #8 /* wldrd wr15, [r0], #8 */
RET
ARM_FUNC_START gnu_Unwind_Save_WMMXD
/* Use the generic coprocessor form so that gas doesn't complain
on non-iWMMXt targets. */
stcl p1, cr0, [r0], #8 /* wstrd wr0, [r0], #8 */
stcl p1, cr1, [r0], #8 /* wstrd wr1, [r0], #8 */
stcl p1, cr2, [r0], #8 /* wstrd wr2, [r0], #8 */
stcl p1, cr3, [r0], #8 /* wstrd wr3, [r0], #8 */
stcl p1, cr4, [r0], #8 /* wstrd wr4, [r0], #8 */
stcl p1, cr5, [r0], #8 /* wstrd wr5, [r0], #8 */
stcl p1, cr6, [r0], #8 /* wstrd wr6, [r0], #8 */
stcl p1, cr7, [r0], #8 /* wstrd wr7, [r0], #8 */
stcl p1, cr8, [r0], #8 /* wstrd wr8, [r0], #8 */
stcl p1, cr9, [r0], #8 /* wstrd wr9, [r0], #8 */
stcl p1, cr10, [r0], #8 /* wstrd wr10, [r0], #8 */
stcl p1, cr11, [r0], #8 /* wstrd wr11, [r0], #8 */
stcl p1, cr12, [r0], #8 /* wstrd wr12, [r0], #8 */
stcl p1, cr13, [r0], #8 /* wstrd wr13, [r0], #8 */
stcl p1, cr14, [r0], #8 /* wstrd wr14, [r0], #8 */
stcl p1, cr15, [r0], #8 /* wstrd wr15, [r0], #8 */
RET
ARM_FUNC_START gnu_Unwind_Restore_WMMXC
/* Use the generic coprocessor form so that gas doesn't complain
on non-iWMMXt targets. */
ldc2 p1, cr8, [r0], #4 /* wldrw wcgr0, [r0], #4 */
ldc2 p1, cr9, [r0], #4 /* wldrw wcgr1, [r0], #4 */
ldc2 p1, cr10, [r0], #4 /* wldrw wcgr2, [r0], #4 */
ldc2 p1, cr11, [r0], #4 /* wldrw wcgr3, [r0], #4 */
RET
ARM_FUNC_START gnu_Unwind_Save_WMMXC
/* Use the generic coprocessor form so that gas doesn't complain
on non-iWMMXt targets. */
stc2 p1, cr8, [r0], #4 /* wstrw wcgr0, [r0], #4 */
stc2 p1, cr9, [r0], #4 /* wstrw wcgr1, [r0], #4 */
stc2 p1, cr10, [r0], #4 /* wstrw wcgr2, [r0], #4 */
stc2 p1, cr11, [r0], #4 /* wstrw wcgr3, [r0], #4 */
RET
/* Wrappers to save core registers, then call the real routine. */
.macro UNWIND_WRAPPER name nargs
ARM_FUNC_START \name
/* Create a phase2_vrs structure. */
/* Split reg push in two to ensure the correct value for sp. */
#if defined(__thumb2__)
mov ip, sp
push {lr} /* PC is ignored. */
push {ip, lr} /* Push original SP and LR. */
#else
stmfd sp!, {sp, lr, pc}
#endif
stmfd sp!, {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, sl, fp, ip}
/* Demand-save flags, plus an extra word for alignment. */
mov r3, #0
stmfd sp!, {r2, r3}
/* Point r1 at the block. Pass r[0..nargs) unchanged. */
add r\nargs, sp, #4
#if defined(__thumb__) && !defined(__thumb2__)
/* Switch back to thumb mode to avoid interworking hassle. */
adr ip, .L1_\name
orr ip, ip, #1
bx ip
.thumb
.L1_\name:
bl SYM (__gnu\name) __PLT__
ldr r3, [sp, #64]
add sp, #72
bx r3
#else
bl SYM (__gnu\name) __PLT__
ldr lr, [sp, #64]
add sp, sp, #72
RET
#endif
FUNC_END \name
UNPREFIX \name
.endm
#endif /* __ARM_ARCH_ISA_ARM || __ARM_ARCH_ISA_THUMB != 1 */
UNWIND_WRAPPER _Unwind_RaiseException 1
UNWIND_WRAPPER _Unwind_Resume 1
UNWIND_WRAPPER _Unwind_Resume_or_Rethrow 1
UNWIND_WRAPPER _Unwind_ForcedUnwind 3
UNWIND_WRAPPER _Unwind_Backtrace 2
#endif /* ndef __symbian__ */
|
4ms/metamodule-plugin-sdk
| 46,938
|
plugin-libc/libgcc/config/arm/lib1funcs.S
|
@ libgcc routines for ARM cpu.
@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
/* Copyright (C) 1995-2022 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Everything in this file should now use unified syntax. */
.syntax unified
/* An executable stack is *not* required for these functions. */
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
.previous
#endif /* __ELF__ and __linux__ */
#ifdef __ARM_EABI__
/* Some attributes that are common to all routines in this file. */
/* Tag_ABI_align_needed: This code does not require 8-byte
alignment from the caller. */
/* .eabi_attribute 24, 0 -- default setting. */
/* Tag_ABI_align_preserved: This code preserves 8-byte
alignment in any callee. */
.eabi_attribute 25, 1
#endif /* __ARM_EABI__ */
/* ------------------------------------------------------------------------ */
/* We need to know what prefix to add to function names. */
#ifndef __USER_LABEL_PREFIX__
#error __USER_LABEL_PREFIX__ not defined
#endif
/* ANSI concatenation macros. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
#ifdef __ELF__
#ifdef __thumb__
#define __PLT__ /* Not supported in Thumb assembler (for now). */
#elif defined __vxworks && !defined __PIC__
#define __PLT__ /* Not supported by the kernel loader. */
#else
#define __PLT__ (PLT)
#endif
#define TYPE(x) .type SYM(x),function
#define SIZE(x) .size SYM(x), . - SYM(x)
#define LSYM(x) .x
#else
#define __PLT__
#define TYPE(x)
#define SIZE(x)
#define LSYM(x) x
#endif
/* Function end macros. Variants for interworking. */
/* There are times when we might prefer Thumb1 code even if ARM code is
permitted, for example, the code might be smaller, or there might be
interworking problems with switching to ARM state if interworking is
disabled. */
#if (defined(__thumb__) \
&& !defined(__thumb2__) \
&& (!defined(__THUMB_INTERWORK__) \
|| defined (__OPTIMIZE_SIZE__) \
|| !__ARM_ARCH_ISA_ARM))
# define __prefer_thumb__
#endif
#if !__ARM_ARCH_ISA_ARM && __ARM_ARCH_ISA_THUMB == 1
#define NOT_ISA_TARGET_32BIT 1
#endif
/* How to return from a function call depends on the architecture variant. */
#if (__ARM_ARCH > 4) || defined(__ARM_ARCH_4T__)
# define RET bx lr
# define RETc(x) bx##x lr
/* Special precautions for interworking on armv4t. */
# if (__ARM_ARCH == 4)
/* Always use bx, not ldr pc. */
# if (defined(__thumb__) || defined(__THUMB_INTERWORK__))
# define __INTERWORKING__
# endif /* __THUMB__ || __THUMB_INTERWORK__ */
/* Include thumb stub before arm mode code. */
# if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
# define __INTERWORKING_STUBS__
# endif /* __thumb__ && !__THUMB_INTERWORK__ */
#endif /* __ARM_ARCH == 4 */
#else
# define RET mov pc, lr
# define RETc(x) mov##x pc, lr
#endif
.macro cfi_pop advance, reg, cfa_offset
#ifdef __ELF__
.pushsection .debug_frame
.byte 0x4 /* DW_CFA_advance_loc4 */
.4byte \advance
.byte (0xc0 | \reg) /* DW_CFA_restore */
.byte 0xe /* DW_CFA_def_cfa_offset */
.uleb128 \cfa_offset
.popsection
#endif
.endm
.macro cfi_push advance, reg, offset, cfa_offset
#ifdef __ELF__
.pushsection .debug_frame
.byte 0x4 /* DW_CFA_advance_loc4 */
.4byte \advance
.byte (0x80 | \reg) /* DW_CFA_offset */
.uleb128 (\offset / -4)
.byte 0xe /* DW_CFA_def_cfa_offset */
.uleb128 \cfa_offset
.popsection
#endif
.endm
.macro cfi_start start_label, end_label
#ifdef __ELF__
.pushsection .debug_frame
LSYM(Lstart_frame):
.4byte LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE
LSYM(Lstart_cie):
.4byte 0xffffffff @ CIE Identifier Tag
.byte 0x1 @ CIE Version
.ascii "\0" @ CIE Augmentation
.uleb128 0x1 @ CIE Code Alignment Factor
.sleb128 -4 @ CIE Data Alignment Factor
.byte 0xe @ CIE RA Column
.byte 0xc @ DW_CFA_def_cfa
.uleb128 0xd
.uleb128 0x0
.align 2
LSYM(Lend_cie):
.4byte LSYM(Lend_fde)-LSYM(Lstart_fde) @ FDE Length
LSYM(Lstart_fde):
.4byte LSYM(Lstart_frame) @ FDE CIE offset
.4byte \start_label @ FDE initial location
.4byte \end_label-\start_label @ FDE address range
.popsection
#endif
.endm
.macro cfi_end end_label
#ifdef __ELF__
.pushsection .debug_frame
.align 2
LSYM(Lend_fde):
.popsection
\end_label:
#endif
.endm
/* Don't pass dirn, it's there just to get token pasting right. */
.macro RETLDM regs=, cond=, unwind=, dirn=ia
#if defined (__INTERWORKING__)
.ifc "\regs",""
ldr\cond lr, [sp], #8
.else
# if defined(__thumb2__)
pop\cond {\regs, lr}
# else
ldm\cond\dirn sp!, {\regs, lr}
# endif
.endif
.ifnc "\unwind", ""
/* Mark LR as restored. */
97: cfi_pop 97b - \unwind, 0xe, 0x0
.endif
bx\cond lr
#else
/* Caller is responsible for providing IT instruction. */
.ifc "\regs",""
ldr\cond pc, [sp], #8
.else
# if defined(__thumb2__)
pop\cond {\regs, pc}
# else
ldm\cond\dirn sp!, {\regs, pc}
# endif
.endif
#endif
.endm
/* The Unified assembly syntax allows the same code to be assembled for both
ARM and Thumb-2. However this is only supported by recent gas, so define
a set of macros to allow ARM code on older assemblers. */
#if defined(__thumb2__)
.macro do_it cond, suffix=""
it\suffix \cond
.endm
.macro shift1 op, arg0, arg1, arg2
\op \arg0, \arg1, \arg2
.endm
#define do_push push
#define do_pop pop
/* Perform an arithmetic operation with a variable shift operand. This
requires two instructions and a scratch register on Thumb-2. */
.macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
\shiftop \tmp, \src2, \shiftreg
\name \dest, \src1, \tmp
.endm
#else
.macro do_it cond, suffix=""
.endm
.macro shift1 op, arg0, arg1, arg2
mov \arg0, \arg1, \op \arg2
.endm
#define do_push stmfd sp!,
#define do_pop ldmfd sp!,
.macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
\name \dest, \src1, \src2, \shiftop \shiftreg
.endm
#endif
#define COND(op1, op2, cond) op1 ## op2 ## cond
#ifdef __ARM_EABI__
.macro ARM_LDIV0 name signed
cmp r0, #0
.ifc \signed, unsigned
movne r0, #0xffffffff
.else
movgt r0, #0x7fffffff
movlt r0, #0x80000000
.endif
b SYM (__aeabi_idiv0) __PLT__
.endm
#else
.macro ARM_LDIV0 name signed
str lr, [sp, #-8]!
98: cfi_push 98b - __\name, 0xe, -0x8, 0x8
bl SYM (__div0) __PLT__
mov r0, #0 @ About as wrong as it could be.
RETLDM unwind=98b
.endm
#endif
#ifdef __ARM_EABI__
.macro THUMB_LDIV0 name signed
#ifdef NOT_ISA_TARGET_32BIT
push {r0, lr}
movs r0, #0
bl SYM(__aeabi_idiv0)
@ We know we are not on armv4t, so pop pc is safe.
pop {r1, pc}
#elif defined(__thumb2__)
.syntax unified
.ifc \signed, unsigned
cbz r0, 1f
mov r0, #0xffffffff
1:
.else
cmp r0, #0
do_it gt
movgt r0, #0x7fffffff
do_it lt
movlt r0, #0x80000000
.endif
b.w SYM(__aeabi_idiv0) __PLT__
#else
.align 2
bx pc
nop
.arm
cmp r0, #0
.ifc \signed, unsigned
movne r0, #0xffffffff
.else
movgt r0, #0x7fffffff
movlt r0, #0x80000000
.endif
b SYM(__aeabi_idiv0) __PLT__
.thumb
#endif
.endm
#else
.macro THUMB_LDIV0 name signed
push { r1, lr }
98: cfi_push 98b - __\name, 0xe, -0x4, 0x8
bl SYM (__div0)
movs r0, #0 @ About as wrong as it could be.
#if defined (__INTERWORKING__)
pop { r1, r2 }
bx r2
#else
pop { r1, pc }
#endif
.endm
#endif
.macro FUNC_END name
SIZE (__\name)
.endm
.macro DIV_FUNC_END name signed
cfi_start __\name, LSYM(Lend_div0)
LSYM(Ldiv0):
#ifdef __thumb__
THUMB_LDIV0 \name \signed
#else
ARM_LDIV0 \name \signed
#endif
cfi_end LSYM(Lend_div0)
FUNC_END \name
.endm
.macro THUMB_FUNC_START name
.globl SYM (\name)
TYPE (\name)
.thumb_func
SYM (\name):
.endm
/* Function start macros. Variants for ARM and Thumb. */
#ifdef __thumb__
#define THUMB_FUNC .thumb_func
#define THUMB_CODE .force_thumb
# if defined(__thumb2__)
#define THUMB_SYNTAX
# else
#define THUMB_SYNTAX
# endif
#else
#define THUMB_FUNC
#define THUMB_CODE
#define THUMB_SYNTAX
#endif
.macro FUNC_START name
.text
.globl SYM (__\name)
TYPE (__\name)
.align 0
THUMB_CODE
THUMB_FUNC
THUMB_SYNTAX
SYM (__\name):
.endm
.macro ARM_SYM_START name
TYPE (\name)
.align 0
SYM (\name):
.endm
.macro SYM_END name
SIZE (\name)
.endm
/* Special function that will always be coded in ARM assembly, even if
in Thumb-only compilation. */
#if defined(__thumb2__)
/* For Thumb-2 we build everything in thumb mode. */
.macro ARM_FUNC_START name
FUNC_START \name
.syntax unified
.endm
#define EQUIV .thumb_set
.macro ARM_CALL name
bl __\name
.endm
#elif defined(__INTERWORKING_STUBS__)
.macro ARM_FUNC_START name
FUNC_START \name
bx pc
nop
.arm
/* A hook to tell gdb that we've switched to ARM mode. Also used to call
directly from other local arm routines. */
_L__\name:
.endm
#define EQUIV .thumb_set
/* Branch directly to a function declared with ARM_FUNC_START.
Must be called in arm mode. */
.macro ARM_CALL name
bl _L__\name
.endm
#else /* !(__INTERWORKING_STUBS__ || __thumb2__) */
#ifdef NOT_ISA_TARGET_32BIT
#define EQUIV .thumb_set
#else
.macro ARM_FUNC_START name
.text
.globl SYM (__\name)
TYPE (__\name)
.align 0
.arm
SYM (__\name):
.endm
#define EQUIV .set
.macro ARM_CALL name
bl __\name
.endm
#endif
#endif
.macro FUNC_ALIAS new old
.globl SYM (__\new)
#if defined (__thumb__)
.thumb_set SYM (__\new), SYM (__\old)
#else
.set SYM (__\new), SYM (__\old)
#endif
.endm
#ifndef NOT_ISA_TARGET_32BIT
.macro ARM_FUNC_ALIAS new old
.globl SYM (__\new)
EQUIV SYM (__\new), SYM (__\old)
#if defined(__INTERWORKING_STUBS__)
.set SYM (_L__\new), SYM (_L__\old)
#endif
.endm
#endif
#ifdef __ARMEB__
#define xxh r0
#define xxl r1
#define yyh r2
#define yyl r3
#else
#define xxh r1
#define xxl r0
#define yyh r3
#define yyl r2
#endif
#ifdef __ARM_EABI__
.macro WEAK name
.weak SYM (__\name)
.endm
#endif
#ifdef __thumb__
/* Register aliases. */
work .req r4 @ XXXX is this safe ?
dividend .req r0
divisor .req r1
overdone .req r2
result .req r2
curbit .req r3
#endif
#if 0
ip .req r12
sp .req r13
lr .req r14
pc .req r15
#endif
/* ------------------------------------------------------------------------ */
/* Bodies of the division and modulo routines. */
/* ------------------------------------------------------------------------ */
.macro ARM_DIV_BODY dividend, divisor, result, curbit
#if defined (__ARM_FEATURE_CLZ) && ! defined (__OPTIMIZE_SIZE__)
#if defined (__thumb2__)
clz \curbit, \dividend
clz \result, \divisor
sub \curbit, \result, \curbit
rsb \curbit, \curbit, #31
adr \result, 1f
add \curbit, \result, \curbit, lsl #4
mov \result, #0
mov pc, \curbit
.p2align 3
1:
.set shift, 32
.rept 32
.set shift, shift - 1
cmp.w \dividend, \divisor, lsl #shift
nop.n
adc.w \result, \result, \result
it cs
subcs.w \dividend, \dividend, \divisor, lsl #shift
.endr
#else
clz \curbit, \dividend
clz \result, \divisor
sub \curbit, \result, \curbit
rsbs \curbit, \curbit, #31
addne \curbit, \curbit, \curbit, lsl #1
mov \result, #0
addne pc, pc, \curbit, lsl #2
nop
.set shift, 32
.rept 32
.set shift, shift - 1
cmp \dividend, \divisor, lsl #shift
adc \result, \result, \result
subcs \dividend, \dividend, \divisor, lsl #shift
.endr
#endif
#else /* !defined (__ARM_FEATURE_CLZ) || defined (__OPTIMIZE_SIZE__) */
#if defined (__ARM_FEATURE_CLZ)
clz \curbit, \divisor
clz \result, \dividend
sub \result, \curbit, \result
mov \curbit, #1
mov \divisor, \divisor, lsl \result
mov \curbit, \curbit, lsl \result
mov \result, #0
#else /* !defined (__ARM_FEATURE_CLZ) */
@ Initially shift the divisor left 3 bits if possible,
@ set curbit accordingly. This allows for curbit to be located
@ at the left end of each 4-bit nibbles in the division loop
@ to save one loop in most cases.
tst \divisor, #0xe0000000
moveq \divisor, \divisor, lsl #3
moveq \curbit, #8
movne \curbit, #1
@ Unless the divisor is very big, shift it up in multiples of
@ four bits, since this is the amount of unwinding in the main
@ division loop. Continue shifting until the divisor is
@ larger than the dividend.
1: cmp \divisor, #0x10000000
cmplo \divisor, \dividend
movlo \divisor, \divisor, lsl #4
movlo \curbit, \curbit, lsl #4
blo 1b
@ For very big divisors, we must shift it a bit at a time, or
@ we will be in danger of overflowing.
1: cmp \divisor, #0x80000000
cmplo \divisor, \dividend
movlo \divisor, \divisor, lsl #1
movlo \curbit, \curbit, lsl #1
blo 1b
mov \result, #0
#endif /* !defined (__ARM_FEATURE_CLZ) */
@ Division loop
1: cmp \dividend, \divisor
do_it hs, t
subhs \dividend, \dividend, \divisor
orrhs \result, \result, \curbit
cmp \dividend, \divisor, lsr #1
do_it hs, t
subhs \dividend, \dividend, \divisor, lsr #1
orrhs \result, \result, \curbit, lsr #1
cmp \dividend, \divisor, lsr #2
do_it hs, t
subhs \dividend, \dividend, \divisor, lsr #2
orrhs \result, \result, \curbit, lsr #2
cmp \dividend, \divisor, lsr #3
do_it hs, t
subhs \dividend, \dividend, \divisor, lsr #3
orrhs \result, \result, \curbit, lsr #3
cmp \dividend, #0 @ Early termination?
do_it ne, t
movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
movne \divisor, \divisor, lsr #4
bne 1b
#endif /* !defined (__ARM_FEATURE_CLZ) || defined (__OPTIMIZE_SIZE__) */
.endm
/* ------------------------------------------------------------------------ */
.macro ARM_DIV2_ORDER divisor, order
#if defined (__ARM_FEATURE_CLZ)
clz \order, \divisor
rsb \order, \order, #31
#else
cmp \divisor, #(1 << 16)
movhs \divisor, \divisor, lsr #16
movhs \order, #16
movlo \order, #0
cmp \divisor, #(1 << 8)
movhs \divisor, \divisor, lsr #8
addhs \order, \order, #8
cmp \divisor, #(1 << 4)
movhs \divisor, \divisor, lsr #4
addhs \order, \order, #4
cmp \divisor, #(1 << 2)
addhi \order, \order, #3
addls \order, \order, \divisor, lsr #1
#endif
.endm
/* ------------------------------------------------------------------------ */
.macro ARM_MOD_BODY dividend, divisor, order, spare
#if defined(__ARM_FEATURE_CLZ) && ! defined (__OPTIMIZE_SIZE__)
clz \order, \divisor
clz \spare, \dividend
sub \order, \order, \spare
rsbs \order, \order, #31
addne pc, pc, \order, lsl #3
nop
.set shift, 32
.rept 32
.set shift, shift - 1
cmp \dividend, \divisor, lsl #shift
subcs \dividend, \dividend, \divisor, lsl #shift
.endr
#else /* !defined (__ARM_FEATURE_CLZ) || defined (__OPTIMIZE_SIZE__) */
#if defined (__ARM_FEATURE_CLZ)
clz \order, \divisor
clz \spare, \dividend
sub \order, \order, \spare
mov \divisor, \divisor, lsl \order
#else /* !defined (__ARM_FEATURE_CLZ) */
mov \order, #0
@ Unless the divisor is very big, shift it up in multiples of
@ four bits, since this is the amount of unwinding in the main
@ division loop. Continue shifting until the divisor is
@ larger than the dividend.
1: cmp \divisor, #0x10000000
cmplo \divisor, \dividend
movlo \divisor, \divisor, lsl #4
addlo \order, \order, #4
blo 1b
@ For very big divisors, we must shift it a bit at a time, or
@ we will be in danger of overflowing.
1: cmp \divisor, #0x80000000
cmplo \divisor, \dividend
movlo \divisor, \divisor, lsl #1
addlo \order, \order, #1
blo 1b
#endif /* !defined (__ARM_FEATURE_CLZ) */
@ Perform all needed substractions to keep only the reminder.
@ Do comparisons in batch of 4 first.
subs \order, \order, #3 @ yes, 3 is intended here
blt 2f
1: cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
cmp \dividend, \divisor, lsr #1
subhs \dividend, \dividend, \divisor, lsr #1
cmp \dividend, \divisor, lsr #2
subhs \dividend, \dividend, \divisor, lsr #2
cmp \dividend, \divisor, lsr #3
subhs \dividend, \dividend, \divisor, lsr #3
cmp \dividend, #1
mov \divisor, \divisor, lsr #4
subges \order, \order, #4
bge 1b
tst \order, #3
teqne \dividend, #0
beq 5f
@ Either 1, 2 or 3 comparison/substractions are left.
2: cmn \order, #2
blt 4f
beq 3f
cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
mov \divisor, \divisor, lsr #1
3: cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
mov \divisor, \divisor, lsr #1
4: cmp \dividend, \divisor
subhs \dividend, \dividend, \divisor
5:
#endif /* !defined (__ARM_FEATURE_CLZ) || defined (__OPTIMIZE_SIZE__) */
.endm
/* ------------------------------------------------------------------------ */
.macro THUMB_DIV_MOD_BODY modulo
@ Load the constant 0x10000000 into our work register.
movs work, #1
lsls work, #28
LSYM(Loop1):
@ Unless the divisor is very big, shift it up in multiples of
@ four bits, since this is the amount of unwinding in the main
@ division loop. Continue shifting until the divisor is
@ larger than the dividend.
cmp divisor, work
bhs LSYM(Lbignum)
cmp divisor, dividend
bhs LSYM(Lbignum)
lsls divisor, #4
lsls curbit, #4
b LSYM(Loop1)
LSYM(Lbignum):
@ Set work to 0x80000000
lsls work, #3
LSYM(Loop2):
@ For very big divisors, we must shift it a bit at a time, or
@ we will be in danger of overflowing.
cmp divisor, work
bhs LSYM(Loop3)
cmp divisor, dividend
bhs LSYM(Loop3)
lsls divisor, #1
lsls curbit, #1
b LSYM(Loop2)
LSYM(Loop3):
@ Test for possible subtractions ...
.if \modulo
@ ... On the final pass, this may subtract too much from the dividend,
@ so keep track of which subtractions are done, we can fix them up
@ afterwards.
movs overdone, #0
cmp dividend, divisor
blo LSYM(Lover1)
subs dividend, dividend, divisor
LSYM(Lover1):
lsrs work, divisor, #1
cmp dividend, work
blo LSYM(Lover2)
subs dividend, dividend, work
mov ip, curbit
movs work, #1
rors curbit, work
orrs overdone, curbit
mov curbit, ip
LSYM(Lover2):
lsrs work, divisor, #2
cmp dividend, work
blo LSYM(Lover3)
subs dividend, dividend, work
mov ip, curbit
movs work, #2
rors curbit, work
orrs overdone, curbit
mov curbit, ip
LSYM(Lover3):
lsrs work, divisor, #3
cmp dividend, work
blo LSYM(Lover4)
subs dividend, dividend, work
mov ip, curbit
movs work, #3
rors curbit, work
orrs overdone, curbit
mov curbit, ip
LSYM(Lover4):
mov ip, curbit
.else
@ ... and note which bits are done in the result. On the final pass,
@ this may subtract too much from the dividend, but the result will be ok,
@ since the "bit" will have been shifted out at the bottom.
cmp dividend, divisor
blo LSYM(Lover1)
subs dividend, dividend, divisor
orrs result, result, curbit
LSYM(Lover1):
lsrs work, divisor, #1
cmp dividend, work
blo LSYM(Lover2)
subs dividend, dividend, work
lsrs work, curbit, #1
orrs result, work
LSYM(Lover2):
lsrs work, divisor, #2
cmp dividend, work
blo LSYM(Lover3)
subs dividend, dividend, work
lsrs work, curbit, #2
orrs result, work
LSYM(Lover3):
lsrs work, divisor, #3
cmp dividend, work
blo LSYM(Lover4)
subs dividend, dividend, work
lsrs work, curbit, #3
orrs result, work
LSYM(Lover4):
.endif
cmp dividend, #0 @ Early termination?
beq LSYM(Lover5)
lsrs curbit, #4 @ No, any more bits to do?
beq LSYM(Lover5)
lsrs divisor, #4
b LSYM(Loop3)
LSYM(Lover5):
.if \modulo
@ Any subtractions that we should not have done will be recorded in
@ the top three bits of "overdone". Exactly which were not needed
@ are governed by the position of the bit, stored in ip.
movs work, #0xe
lsls work, #28
ands overdone, work
beq LSYM(Lgot_result)
@ If we terminated early, because dividend became zero, then the
@ bit in ip will not be in the bottom nibble, and we should not
@ perform the additions below. We must test for this though
@ (rather relying upon the TSTs to prevent the additions) since
@ the bit in ip could be in the top two bits which might then match
@ with one of the smaller RORs.
mov curbit, ip
movs work, #0x7
tst curbit, work
beq LSYM(Lgot_result)
mov curbit, ip
movs work, #3
rors curbit, work
tst overdone, curbit
beq LSYM(Lover6)
lsrs work, divisor, #3
adds dividend, work
LSYM(Lover6):
mov curbit, ip
movs work, #2
rors curbit, work
tst overdone, curbit
beq LSYM(Lover7)
lsrs work, divisor, #2
adds dividend, work
LSYM(Lover7):
mov curbit, ip
movs work, #1
rors curbit, work
tst overdone, curbit
beq LSYM(Lgot_result)
lsrs work, divisor, #1
adds dividend, work
.endif
LSYM(Lgot_result):
.endm
/* If performance is preferred, the following functions are provided. */
#if defined(__prefer_thumb__) && !defined(__OPTIMIZE_SIZE__)
/* Branch to div(n), and jump to label if curbit is lo than divisior. */
.macro BranchToDiv n, label
lsrs curbit, dividend, \n
cmp curbit, divisor
blo \label
.endm
/* Body of div(n). Shift the divisor in n bits and compare the divisor
and dividend. Update the dividend as the substruction result. */
.macro DoDiv n
lsrs curbit, dividend, \n
cmp curbit, divisor
bcc 1f
lsls curbit, divisor, \n
subs dividend, dividend, curbit
1: adcs result, result
.endm
/* The body of division with positive divisor. Unless the divisor is very
big, shift it up in multiples of four bits, since this is the amount of
unwinding in the main division loop. Continue shifting until the divisor
is larger than the dividend. */
.macro THUMB1_Div_Positive
movs result, #0
BranchToDiv #1, LSYM(Lthumb1_div1)
BranchToDiv #4, LSYM(Lthumb1_div4)
BranchToDiv #8, LSYM(Lthumb1_div8)
BranchToDiv #12, LSYM(Lthumb1_div12)
BranchToDiv #16, LSYM(Lthumb1_div16)
LSYM(Lthumb1_div_large_positive):
movs result, #0xff
lsls divisor, divisor, #8
rev result, result
lsrs curbit, dividend, #16
cmp curbit, divisor
blo 1f
asrs result, #8
lsls divisor, divisor, #8
beq LSYM(Ldivbyzero_waypoint)
1: lsrs curbit, dividend, #12
cmp curbit, divisor
blo LSYM(Lthumb1_div12)
b LSYM(Lthumb1_div16)
LSYM(Lthumb1_div_loop):
lsrs divisor, divisor, #8
LSYM(Lthumb1_div16):
Dodiv #15
Dodiv #14
Dodiv #13
Dodiv #12
LSYM(Lthumb1_div12):
Dodiv #11
Dodiv #10
Dodiv #9
Dodiv #8
bcs LSYM(Lthumb1_div_loop)
LSYM(Lthumb1_div8):
Dodiv #7
Dodiv #6
Dodiv #5
LSYM(Lthumb1_div5):
Dodiv #4
LSYM(Lthumb1_div4):
Dodiv #3
LSYM(Lthumb1_div3):
Dodiv #2
LSYM(Lthumb1_div2):
Dodiv #1
LSYM(Lthumb1_div1):
subs divisor, dividend, divisor
bcs 1f
cpy divisor, dividend
1: adcs result, result
cpy dividend, result
RET
LSYM(Ldivbyzero_waypoint):
b LSYM(Ldiv0)
.endm
/* The body of division with negative divisor. Similar with
THUMB1_Div_Positive except that the shift steps are in multiples
of six bits. */
.macro THUMB1_Div_Negative
lsrs result, divisor, #31
beq 1f
negs divisor, divisor
1: asrs curbit, dividend, #32
bcc 2f
negs dividend, dividend
2: eors curbit, result
movs result, #0
cpy ip, curbit
BranchToDiv #4, LSYM(Lthumb1_div_negative4)
BranchToDiv #8, LSYM(Lthumb1_div_negative8)
LSYM(Lthumb1_div_large):
movs result, #0xfc
lsls divisor, divisor, #6
rev result, result
lsrs curbit, dividend, #8
cmp curbit, divisor
blo LSYM(Lthumb1_div_negative8)
lsls divisor, divisor, #6
asrs result, result, #6
cmp curbit, divisor
blo LSYM(Lthumb1_div_negative8)
lsls divisor, divisor, #6
asrs result, result, #6
cmp curbit, divisor
blo LSYM(Lthumb1_div_negative8)
lsls divisor, divisor, #6
beq LSYM(Ldivbyzero_negative)
asrs result, result, #6
b LSYM(Lthumb1_div_negative8)
LSYM(Lthumb1_div_negative_loop):
lsrs divisor, divisor, #6
LSYM(Lthumb1_div_negative8):
DoDiv #7
DoDiv #6
DoDiv #5
DoDiv #4
LSYM(Lthumb1_div_negative4):
DoDiv #3
DoDiv #2
bcs LSYM(Lthumb1_div_negative_loop)
DoDiv #1
subs divisor, dividend, divisor
bcs 1f
cpy divisor, dividend
1: cpy curbit, ip
adcs result, result
asrs curbit, curbit, #1
cpy dividend, result
bcc 2f
negs dividend, dividend
cmp curbit, #0
2: bpl 3f
negs divisor, divisor
3: RET
LSYM(Ldivbyzero_negative):
cpy curbit, ip
asrs curbit, curbit, #1
bcc LSYM(Ldiv0)
negs dividend, dividend
.endm
#endif /* ARM Thumb version. */
/* ------------------------------------------------------------------------ */
/* Start of the Real Functions */
/* ------------------------------------------------------------------------ */
#ifdef L_udivsi3
#if defined(__prefer_thumb__)
FUNC_START udivsi3
FUNC_ALIAS aeabi_uidiv udivsi3
#if defined(__OPTIMIZE_SIZE__)
cmp divisor, #0
beq LSYM(Ldiv0)
LSYM(udivsi3_skip_div0_test):
movs curbit, #1
movs result, #0
push { work }
cmp dividend, divisor
blo LSYM(Lgot_result)
THUMB_DIV_MOD_BODY 0
movs r0, result
pop { work }
RET
/* Implementation of aeabi_uidiv for ARMv6m. This version is only
used in ARMv6-M when we need an efficient implementation. */
#else
LSYM(udivsi3_skip_div0_test):
THUMB1_Div_Positive
#endif /* __OPTIMIZE_SIZE__ */
#elif defined(__ARM_ARCH_EXT_IDIV__)
ARM_FUNC_START udivsi3
ARM_FUNC_ALIAS aeabi_uidiv udivsi3
cmp r1, #0
beq LSYM(Ldiv0)
udiv r0, r0, r1
RET
#else /* ARM version/Thumb-2. */
ARM_FUNC_START udivsi3
ARM_FUNC_ALIAS aeabi_uidiv udivsi3
/* Note: if called via udivsi3_skip_div0_test, this will unnecessarily
check for division-by-zero a second time. */
LSYM(udivsi3_skip_div0_test):
subs r2, r1, #1
do_it eq
RETc(eq)
bcc LSYM(Ldiv0)
cmp r0, r1
bls 11f
tst r1, r2
beq 12f
ARM_DIV_BODY r0, r1, r2, r3
mov r0, r2
RET
11: do_it eq, e
moveq r0, #1
movne r0, #0
RET
12: ARM_DIV2_ORDER r1, r2
mov r0, r0, lsr r2
RET
#endif /* ARM version */
DIV_FUNC_END udivsi3 unsigned
#if defined(__prefer_thumb__)
FUNC_START aeabi_uidivmod
cmp r1, #0
beq LSYM(Ldiv0)
# if defined(__OPTIMIZE_SIZE__)
push {r0, r1, lr}
bl LSYM(udivsi3_skip_div0_test)
POP {r1, r2, r3}
muls r2, r0
subs r1, r1, r2
bx r3
# else
/* Both the quotient and remainder are calculated simultaneously
in THUMB1_Div_Positive. There is no need to calculate the
remainder again here. */
b LSYM(udivsi3_skip_div0_test)
RET
# endif /* __OPTIMIZE_SIZE__ */
#elif defined(__ARM_ARCH_EXT_IDIV__)
ARM_FUNC_START aeabi_uidivmod
cmp r1, #0
beq LSYM(Ldiv0)
mov r2, r0
udiv r0, r0, r1
mls r1, r0, r1, r2
RET
#else
ARM_FUNC_START aeabi_uidivmod
cmp r1, #0
beq LSYM(Ldiv0)
stmfd sp!, { r0, r1, lr }
bl LSYM(udivsi3_skip_div0_test)
ldmfd sp!, { r1, r2, lr }
mul r3, r2, r0
sub r1, r1, r3
RET
#endif
FUNC_END aeabi_uidivmod
#endif /* L_udivsi3 */
/* ------------------------------------------------------------------------ */
#ifdef L_umodsi3
#if defined(__ARM_ARCH_EXT_IDIV__) && __ARM_ARCH_ISA_THUMB != 1
ARM_FUNC_START umodsi3
cmp r1, #0
beq LSYM(Ldiv0)
udiv r2, r0, r1
mls r0, r1, r2, r0
RET
#elif defined(__thumb__)
FUNC_START umodsi3
cmp divisor, #0
beq LSYM(Ldiv0)
movs curbit, #1
cmp dividend, divisor
bhs LSYM(Lover10)
RET
LSYM(Lover10):
push { work }
THUMB_DIV_MOD_BODY 1
pop { work }
RET
#else /* ARM version. */
FUNC_START umodsi3
subs r2, r1, #1 @ compare divisor with 1
bcc LSYM(Ldiv0)
cmpne r0, r1 @ compare dividend with divisor
moveq r0, #0
tsthi r1, r2 @ see if divisor is power of 2
andeq r0, r0, r2
RETc(ls)
ARM_MOD_BODY r0, r1, r2, r3
RET
#endif /* ARM version. */
DIV_FUNC_END umodsi3 unsigned
#endif /* L_umodsi3 */
/* ------------------------------------------------------------------------ */
#ifdef L_divsi3
#if defined(__prefer_thumb__)
FUNC_START divsi3
FUNC_ALIAS aeabi_idiv divsi3
#if defined(__OPTIMIZE_SIZE__)
cmp divisor, #0
beq LSYM(Ldiv0)
LSYM(divsi3_skip_div0_test):
push { work }
movs work, dividend
eors work, divisor @ Save the sign of the result.
mov ip, work
movs curbit, #1
movs result, #0
cmp divisor, #0
bpl LSYM(Lover10)
negs divisor, divisor @ Loops below use unsigned.
LSYM(Lover10):
cmp dividend, #0
bpl LSYM(Lover11)
negs dividend, dividend
LSYM(Lover11):
cmp dividend, divisor
blo LSYM(Lgot_result)
THUMB_DIV_MOD_BODY 0
movs r0, result
mov work, ip
cmp work, #0
bpl LSYM(Lover12)
negs r0, r0
LSYM(Lover12):
pop { work }
RET
/* Implementation of aeabi_idiv for ARMv6m. This version is only
used in ARMv6-M when we need an efficient implementation. */
#else
LSYM(divsi3_skip_div0_test):
cpy curbit, dividend
orrs curbit, divisor
bmi LSYM(Lthumb1_div_negative)
LSYM(Lthumb1_div_positive):
THUMB1_Div_Positive
LSYM(Lthumb1_div_negative):
THUMB1_Div_Negative
#endif /* __OPTIMIZE_SIZE__ */
#elif defined(__ARM_ARCH_EXT_IDIV__)
ARM_FUNC_START divsi3
ARM_FUNC_ALIAS aeabi_idiv divsi3
cmp r1, #0
beq LSYM(Ldiv0)
sdiv r0, r0, r1
RET
#else /* ARM/Thumb-2 version. */
ARM_FUNC_START divsi3
ARM_FUNC_ALIAS aeabi_idiv divsi3
cmp r1, #0
beq LSYM(Ldiv0)
LSYM(divsi3_skip_div0_test):
eor ip, r0, r1 @ save the sign of the result.
do_it mi
rsbmi r1, r1, #0 @ loops below use unsigned.
subs r2, r1, #1 @ division by 1 or -1 ?
beq 10f
movs r3, r0
do_it mi
rsbmi r3, r0, #0 @ positive dividend value
cmp r3, r1
bls 11f
tst r1, r2 @ divisor is power of 2 ?
beq 12f
ARM_DIV_BODY r3, r1, r0, r2
cmp ip, #0
do_it mi
rsbmi r0, r0, #0
RET
10: teq ip, r0 @ same sign ?
do_it mi
rsbmi r0, r0, #0
RET
11: do_it lo
movlo r0, #0
do_it eq,t
moveq r0, ip, asr #31
orreq r0, r0, #1
RET
12: ARM_DIV2_ORDER r1, r2
cmp ip, #0
mov r0, r3, lsr r2
do_it mi
rsbmi r0, r0, #0
RET
#endif /* ARM version */
DIV_FUNC_END divsi3 signed
#if defined(__prefer_thumb__)
FUNC_START aeabi_idivmod
cmp r1, #0
beq LSYM(Ldiv0)
# if defined(__OPTIMIZE_SIZE__)
push {r0, r1, lr}
bl LSYM(divsi3_skip_div0_test)
POP {r1, r2, r3}
muls r2, r0
subs r1, r1, r2
bx r3
# else
/* Both the quotient and remainder are calculated simultaneously
in THUMB1_Div_Positive and THUMB1_Div_Negative. There is no
need to calculate the remainder again here. */
b LSYM(divsi3_skip_div0_test)
RET
# endif /* __OPTIMIZE_SIZE__ */
#elif defined(__ARM_ARCH_EXT_IDIV__)
ARM_FUNC_START aeabi_idivmod
cmp r1, #0
beq LSYM(Ldiv0)
mov r2, r0
sdiv r0, r0, r1
mls r1, r0, r1, r2
RET
#else
ARM_FUNC_START aeabi_idivmod
cmp r1, #0
beq LSYM(Ldiv0)
stmfd sp!, { r0, r1, lr }
bl LSYM(divsi3_skip_div0_test)
ldmfd sp!, { r1, r2, lr }
mul r3, r2, r0
sub r1, r1, r3
RET
#endif
FUNC_END aeabi_idivmod
#endif /* L_divsi3 */
/* ------------------------------------------------------------------------ */
#ifdef L_modsi3
#if defined(__ARM_ARCH_EXT_IDIV__) && __ARM_ARCH_ISA_THUMB != 1
ARM_FUNC_START modsi3
cmp r1, #0
beq LSYM(Ldiv0)
sdiv r2, r0, r1
mls r0, r1, r2, r0
RET
#elif defined(__thumb__)
FUNC_START modsi3
movs curbit, #1
cmp divisor, #0
beq LSYM(Ldiv0)
bpl LSYM(Lover10)
negs divisor, divisor @ Loops below use unsigned.
LSYM(Lover10):
push { work }
@ Need to save the sign of the dividend, unfortunately, we need
@ work later on. Must do this after saving the original value of
@ the work register, because we will pop this value off first.
push { dividend }
cmp dividend, #0
bpl LSYM(Lover11)
negs dividend, dividend
LSYM(Lover11):
cmp dividend, divisor
blo LSYM(Lgot_result)
THUMB_DIV_MOD_BODY 1
pop { work }
cmp work, #0
bpl LSYM(Lover12)
negs dividend, dividend
LSYM(Lover12):
pop { work }
RET
#else /* ARM version. */
FUNC_START modsi3
cmp r1, #0
beq LSYM(Ldiv0)
rsbmi r1, r1, #0 @ loops below use unsigned.
movs ip, r0 @ preserve sign of dividend
rsbmi r0, r0, #0 @ if negative make positive
subs r2, r1, #1 @ compare divisor with 1
cmpne r0, r1 @ compare dividend with divisor
moveq r0, #0
tsthi r1, r2 @ see if divisor is power of 2
andeq r0, r0, r2
bls 10f
ARM_MOD_BODY r0, r1, r2, r3
10: cmp ip, #0
rsbmi r0, r0, #0
RET
#endif /* ARM version */
DIV_FUNC_END modsi3 signed
#endif /* L_modsi3 */
/* ------------------------------------------------------------------------ */
#ifdef L_dvmd_tls
#ifdef __ARM_EABI__
WEAK aeabi_idiv0
WEAK aeabi_ldiv0
FUNC_START aeabi_idiv0
FUNC_START aeabi_ldiv0
RET
FUNC_END aeabi_ldiv0
FUNC_END aeabi_idiv0
#else
FUNC_START div0
RET
FUNC_END div0
#endif
#endif /* L_divmodsi_tools */
/* ------------------------------------------------------------------------ */
#ifdef L_dvmd_lnx
@ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls
/* Constant taken from <asm/signal.h>. */
#define SIGFPE 8
#ifdef __ARM_EABI__
cfi_start __aeabi_ldiv0, LSYM(Lend_aeabi_ldiv0)
WEAK aeabi_idiv0
WEAK aeabi_ldiv0
ARM_FUNC_START aeabi_idiv0
ARM_FUNC_START aeabi_ldiv0
do_push {r1, lr}
98: cfi_push 98b - __aeabi_ldiv0, 0xe, -0x4, 0x8
#else
cfi_start __div0, LSYM(Lend_div0)
ARM_FUNC_START div0
do_push {r1, lr}
98: cfi_push 98b - __div0, 0xe, -0x4, 0x8
#endif
mov r0, #SIGFPE
bl SYM(raise) __PLT__
RETLDM r1 unwind=98b
#ifdef __ARM_EABI__
cfi_end LSYM(Lend_aeabi_ldiv0)
FUNC_END aeabi_ldiv0
FUNC_END aeabi_idiv0
#else
cfi_end LSYM(Lend_div0)
FUNC_END div0
#endif
#endif /* L_dvmd_lnx */
#ifdef L_clear_cache
#if defined __ARM_EABI__ && defined __linux__
@ EABI GNU/Linux call to cacheflush syscall.
ARM_FUNC_START clear_cache
do_push {r7}
#if __ARM_ARCH >= 7 || defined(__ARM_ARCH_6T2__)
movw r7, #2
movt r7, #0xf
#else
mov r7, #0xf0000
add r7, r7, #2
#endif
mov r2, #0
swi 0
do_pop {r7}
RET
FUNC_END clear_cache
#else
#error "This is only for ARM EABI GNU/Linux"
#endif
#endif /* L_clear_cache */
#ifdef L_speculation_barrier
FUNC_START speculation_barrier
#if __ARM_ARCH >= 7
isb
dsb sy
#elif defined __ARM_EABI__ && defined __linux__
/* We don't have a speculation barrier directly for this
platform/architecture variant. But we can use a kernel
clear_cache service routine which will emit such instructions
if run on a later version of the architecture. We don't
really want to flush the cache, but we must give it a valid
address, so just clear pc..pc+1. */
#if defined __thumb__ && !defined __thumb2__
push {r7}
movs r7, #0xf
lsls r7, #16
adds r7, #2
adr r0, . + 4
adds r1, r0, #1
movs r2, #0
svc 0
pop {r7}
#else
do_push {r7}
#ifdef __ARM_ARCH_6T2__
movw r7, #2
movt r7, #0xf
#else
mov r7, #0xf0000
add r7, r7, #2
#endif
add r0, pc, #0 /* ADR. */
add r1, r0, #1
mov r2, #0
svc 0
do_pop {r7}
#endif /* Thumb1 only */
#else
#warning "No speculation barrier defined for this platform"
#endif
RET
FUNC_END speculation_barrier
#endif
/* ------------------------------------------------------------------------ */
/* Dword shift operations. */
/* All the following Dword shift variants rely on the fact that
shft xxx, Reg
is in fact done as
shft xxx, (Reg & 255)
so for Reg value in (32...63) and (-1...-31) we will get zero (in the
case of logical shifts) or the sign (for asr). */
#ifdef __ARMEB__
#define al r1
#define ah r0
#else
#define al r0
#define ah r1
#endif
/* Prevent __aeabi double-word shifts from being produced on SymbianOS. */
#ifndef __symbian__
#ifdef L_lshrdi3
FUNC_START lshrdi3
FUNC_ALIAS aeabi_llsr lshrdi3
#ifdef __thumb__
lsrs al, r2
movs r3, ah
lsrs ah, r2
mov ip, r3
subs r2, #32
lsrs r3, r2
orrs al, r3
negs r2, r2
mov r3, ip
lsls r3, r2
orrs al, r3
RET
#else
subs r3, r2, #32
rsb ip, r2, #32
movmi al, al, lsr r2
movpl al, ah, lsr r3
orrmi al, al, ah, lsl ip
mov ah, ah, lsr r2
RET
#endif
FUNC_END aeabi_llsr
FUNC_END lshrdi3
#endif
#ifdef L_ashrdi3
FUNC_START ashrdi3
FUNC_ALIAS aeabi_lasr ashrdi3
#ifdef __thumb__
lsrs al, r2
movs r3, ah
asrs ah, r2
subs r2, #32
@ If r2 is negative at this point the following step would OR
@ the sign bit into all of AL. That's not what we want...
bmi 1f
mov ip, r3
asrs r3, r2
orrs al, r3
mov r3, ip
1:
negs r2, r2
lsls r3, r2
orrs al, r3
RET
#else
subs r3, r2, #32
rsb ip, r2, #32
movmi al, al, lsr r2
movpl al, ah, asr r3
orrmi al, al, ah, lsl ip
mov ah, ah, asr r2
RET
#endif
FUNC_END aeabi_lasr
FUNC_END ashrdi3
#endif
#ifdef L_ashldi3
FUNC_START ashldi3
FUNC_ALIAS aeabi_llsl ashldi3
#ifdef __thumb__
lsls ah, r2
movs r3, al
lsls al, r2
mov ip, r3
subs r2, #32
lsls r3, r2
orrs ah, r3
negs r2, r2
mov r3, ip
lsrs r3, r2
orrs ah, r3
RET
#else
subs r3, r2, #32
rsb ip, r2, #32
movmi ah, ah, lsl r2
movpl ah, al, lsl r3
orrmi ah, ah, al, lsr ip
mov al, al, lsl r2
RET
#endif
FUNC_END aeabi_llsl
FUNC_END ashldi3
#endif
#endif /* __symbian__ */
#ifdef L_clzsi2
#ifdef NOT_ISA_TARGET_32BIT
FUNC_START clzsi2
movs r1, #28
movs r3, #1
lsls r3, r3, #16
cmp r0, r3 /* 0x10000 */
bcc 2f
lsrs r0, r0, #16
subs r1, r1, #16
2: lsrs r3, r3, #8
cmp r0, r3 /* #0x100 */
bcc 2f
lsrs r0, r0, #8
subs r1, r1, #8
2: lsrs r3, r3, #4
cmp r0, r3 /* #0x10 */
bcc 2f
lsrs r0, r0, #4
subs r1, r1, #4
2: adr r2, 1f
ldrb r0, [r2, r0]
adds r0, r0, r1
bx lr
.align 2
1:
.byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
FUNC_END clzsi2
#else
ARM_FUNC_START clzsi2
# if defined (__ARM_FEATURE_CLZ)
clz r0, r0
RET
# else
mov r1, #28
cmp r0, #0x10000
do_it cs, t
movcs r0, r0, lsr #16
subcs r1, r1, #16
cmp r0, #0x100
do_it cs, t
movcs r0, r0, lsr #8
subcs r1, r1, #8
cmp r0, #0x10
do_it cs, t
movcs r0, r0, lsr #4
subcs r1, r1, #4
adr r2, 1f
ldrb r0, [r2, r0]
add r0, r0, r1
RET
.align 2
1:
.byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
# endif /* !defined (__ARM_FEATURE_CLZ) */
FUNC_END clzsi2
#endif
#endif /* L_clzsi2 */
#ifdef L_clzdi2
#if !defined (__ARM_FEATURE_CLZ)
# ifdef NOT_ISA_TARGET_32BIT
FUNC_START clzdi2
push {r4, lr}
cmp xxh, #0
bne 1f
# ifdef __ARMEB__
movs r0, xxl
bl __clzsi2
adds r0, r0, #32
b 2f
1:
bl __clzsi2
# else
bl __clzsi2
adds r0, r0, #32
b 2f
1:
movs r0, xxh
bl __clzsi2
# endif
2:
pop {r4, pc}
# else /* NOT_ISA_TARGET_32BIT */
ARM_FUNC_START clzdi2
do_push {r4, lr}
cmp xxh, #0
bne 1f
# ifdef __ARMEB__
mov r0, xxl
bl __clzsi2
add r0, r0, #32
b 2f
1:
bl __clzsi2
# else
bl __clzsi2
add r0, r0, #32
b 2f
1:
mov r0, xxh
bl __clzsi2
# endif
2:
RETLDM r4
FUNC_END clzdi2
# endif /* NOT_ISA_TARGET_32BIT */
#else /* defined (__ARM_FEATURE_CLZ) */
ARM_FUNC_START clzdi2
cmp xxh, #0
do_it eq, et
clzeq r0, xxl
clzne r0, xxh
addeq r0, r0, #32
RET
FUNC_END clzdi2
#endif
#endif /* L_clzdi2 */
#ifdef L_ctzsi2
#ifdef NOT_ISA_TARGET_32BIT
FUNC_START ctzsi2
negs r1, r0
ands r0, r0, r1
movs r1, #28
movs r3, #1
lsls r3, r3, #16
cmp r0, r3 /* 0x10000 */
bcc 2f
lsrs r0, r0, #16
subs r1, r1, #16
2: lsrs r3, r3, #8
cmp r0, r3 /* #0x100 */
bcc 2f
lsrs r0, r0, #8
subs r1, r1, #8
2: lsrs r3, r3, #4
cmp r0, r3 /* #0x10 */
bcc 2f
lsrs r0, r0, #4
subs r1, r1, #4
2: adr r2, 1f
ldrb r0, [r2, r0]
subs r0, r0, r1
bx lr
.align 2
1:
.byte 27, 28, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31
FUNC_END ctzsi2
#else
ARM_FUNC_START ctzsi2
rsb r1, r0, #0
and r0, r0, r1
# if defined (__ARM_FEATURE_CLZ)
clz r0, r0
rsb r0, r0, #31
RET
# else
mov r1, #28
cmp r0, #0x10000
do_it cs, t
movcs r0, r0, lsr #16
subcs r1, r1, #16
cmp r0, #0x100
do_it cs, t
movcs r0, r0, lsr #8
subcs r1, r1, #8
cmp r0, #0x10
do_it cs, t
movcs r0, r0, lsr #4
subcs r1, r1, #4
adr r2, 1f
ldrb r0, [r2, r0]
sub r0, r0, r1
RET
.align 2
1:
.byte 27, 28, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31
# endif /* !defined (__ARM_FEATURE_CLZ) */
FUNC_END ctzsi2
#endif
#endif /* L_clzsi2 */
/* ------------------------------------------------------------------------ */
/* These next two sections are here despite the fact that they contain Thumb
assembler because their presence allows interworked code to be linked even
when the GCC library is this one. */
/* Do not build the interworking functions when the target architecture does
not support Thumb instructions. (This can be a multilib option). */
#if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\
|| defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \
|| __ARM_ARCH >= 6
#if defined L_call_via_rX
/* These labels & instructions are used by the Arm/Thumb interworking code.
The address of function to be called is loaded into a register and then
one of these labels is called via a BL instruction. This puts the
return address into the link register with the bottom bit set, and the
code here switches to the correct mode before executing the function. */
.text
.align 0
.force_thumb
.macro call_via register
THUMB_FUNC_START _call_via_\register
bx \register
nop
SIZE (_call_via_\register)
.endm
call_via r0
call_via r1
call_via r2
call_via r3
call_via r4
call_via r5
call_via r6
call_via r7
call_via r8
call_via r9
call_via sl
call_via fp
call_via ip
call_via sp
call_via lr
#endif /* L_call_via_rX */
/* Don't bother with the old interworking routines for Thumb-2. */
/* ??? Maybe only omit these on "m" variants. */
#if !defined(__thumb2__) && __ARM_ARCH_ISA_ARM
#if defined L_interwork_call_via_rX
/* These labels & instructions are used by the Arm/Thumb interworking code,
when the target address is in an unknown instruction set. The address
of function to be called is loaded into a register and then one of these
labels is called via a BL instruction. This puts the return address
into the link register with the bottom bit set, and the code here
switches to the correct mode before executing the function. Unfortunately
the target code cannot be relied upon to return via a BX instruction, so
instead we have to store the resturn address on the stack and allow the
called function to return here instead. Upon return we recover the real
return address and use a BX to get back to Thumb mode.
There are three variations of this code. The first,
_interwork_call_via_rN(), will push the return address onto the
stack and pop it in _arm_return(). It should only be used if all
arguments are passed in registers.
The second, _interwork_r7_call_via_rN(), instead stores the return
address at [r7, #-4]. It is the caller's responsibility to ensure
that this address is valid and contains no useful data.
The third, _interwork_r11_call_via_rN(), works in the same way but
uses r11 instead of r7. It is useful if the caller does not really
need a frame pointer. */
.text
.align 0
.code 32
.globl _arm_return
LSYM(Lstart_arm_return):
cfi_start LSYM(Lstart_arm_return) LSYM(Lend_arm_return)
cfi_push 0, 0xe, -0x8, 0x8
nop @ This nop is for the benefit of debuggers, so that
@ backtraces will use the correct unwind information.
_arm_return:
RETLDM unwind=LSYM(Lstart_arm_return)
cfi_end LSYM(Lend_arm_return)
.globl _arm_return_r7
_arm_return_r7:
ldr lr, [r7, #-4]
bx lr
.globl _arm_return_r11
_arm_return_r11:
ldr lr, [r11, #-4]
bx lr
.macro interwork_with_frame frame, register, name, return
.code 16
THUMB_FUNC_START \name
bx pc
nop
.code 32
tst \register, #1
streq lr, [\frame, #-4]
adreq lr, _arm_return_\frame
bx \register
SIZE (\name)
.endm
.macro interwork register
.code 16
THUMB_FUNC_START _interwork_call_via_\register
bx pc
nop
.code 32
.globl LSYM(Lchange_\register)
LSYM(Lchange_\register):
tst \register, #1
streq lr, [sp, #-8]!
adreq lr, _arm_return
bx \register
SIZE (_interwork_call_via_\register)
interwork_with_frame r7,\register,_interwork_r7_call_via_\register
interwork_with_frame r11,\register,_interwork_r11_call_via_\register
.endm
interwork r0
interwork r1
interwork r2
interwork r3
interwork r4
interwork r5
interwork r6
interwork r7
interwork r8
interwork r9
interwork sl
interwork fp
interwork ip
interwork sp
/* The LR case has to be handled a little differently... */
.code 16
THUMB_FUNC_START _interwork_call_via_lr
bx pc
nop
.code 32
.globl .Lchange_lr
.Lchange_lr:
tst lr, #1
stmeqdb r13!, {lr, pc}
mov ip, lr
adreq lr, _arm_return
bx ip
SIZE (_interwork_call_via_lr)
#endif /* L_interwork_call_via_rX */
#endif /* !__thumb2__ */
/* Functions to support compact pic switch tables in thumb1 state.
All these routines take an index into the table in r0. The
table is at LR & ~1 (but this must be rounded up in the case
of 32-bit entires). They are only permitted to clobber r12
and r14 and r0 must be preserved on exit. */
#ifdef L_thumb1_case_sqi
.text
.align 0
.force_thumb
.syntax unified
THUMB_FUNC_START __gnu_thumb1_case_sqi
push {r1}
mov r1, lr
lsrs r1, r1, #1
lsls r1, r1, #1
ldrsb r1, [r1, r0]
lsls r1, r1, #1
add lr, lr, r1
pop {r1}
bx lr
SIZE (__gnu_thumb1_case_sqi)
#endif
#ifdef L_thumb1_case_uqi
.text
.align 0
.force_thumb
.syntax unified
THUMB_FUNC_START __gnu_thumb1_case_uqi
push {r1}
mov r1, lr
lsrs r1, r1, #1
lsls r1, r1, #1
ldrb r1, [r1, r0]
lsls r1, r1, #1
add lr, lr, r1
pop {r1}
bx lr
SIZE (__gnu_thumb1_case_uqi)
#endif
#ifdef L_thumb1_case_shi
.text
.align 0
.force_thumb
.syntax unified
THUMB_FUNC_START __gnu_thumb1_case_shi
push {r0, r1}
mov r1, lr
lsrs r1, r1, #1
lsls r0, r0, #1
lsls r1, r1, #1
ldrsh r1, [r1, r0]
lsls r1, r1, #1
add lr, lr, r1
pop {r0, r1}
bx lr
SIZE (__gnu_thumb1_case_shi)
#endif
#ifdef L_thumb1_case_uhi
.text
.align 0
.force_thumb
.syntax unified
THUMB_FUNC_START __gnu_thumb1_case_uhi
push {r0, r1}
mov r1, lr
lsrs r1, r1, #1
lsls r0, r0, #1
lsls r1, r1, #1
ldrh r1, [r1, r0]
lsls r1, r1, #1
add lr, lr, r1
pop {r0, r1}
bx lr
SIZE (__gnu_thumb1_case_uhi)
#endif
#ifdef L_thumb1_case_si
.text
.align 0
.force_thumb
.syntax unified
THUMB_FUNC_START __gnu_thumb1_case_si
push {r0, r1}
mov r1, lr
adds.n r1, r1, #2 /* Align to word. */
lsrs r1, r1, #2
lsls r0, r0, #2
lsls r1, r1, #2
ldr r0, [r1, r0]
adds r0, r0, r1
mov lr, r0
pop {r0, r1}
mov pc, lr /* We know we were called from thumb code. */
SIZE (__gnu_thumb1_case_si)
#endif
#endif /* Arch supports thumb. */
.macro CFI_START_FUNCTION
.cfi_startproc
.cfi_remember_state
.endm
.macro CFI_END_FUNCTION
.cfi_restore_state
.cfi_endproc
.endm
#ifndef __symbian__
/* The condition here must match the one in gcc/config/arm/elf.h and
libgcc/config/arm/t-elf. */
#ifndef NOT_ISA_TARGET_32BIT
#include "ieee754-df.S"
#include "ieee754-sf.S"
#include "bpabi.S"
#else /* NOT_ISA_TARGET_32BIT */
#include "bpabi-v6m.S"
#endif /* NOT_ISA_TARGET_32BIT */
#endif /* !__symbian__ */
|
4ms/metamodule-plugin-sdk
| 1,698
|
plugin-libc/libgcc/config/pru/mpyll.S
|
/* Copyright (C) 2014-2022 Free Software Foundation, Inc.
Contributed by Dimitar Dimitrov <dimitar@dinux.eu>
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>.
(al + C * ah) * (bl + C * bh) = al * bl
+ C * ah * bl
+ C * al * bh
+ C * C * ah * bh -> discard, overflow
Where C=(1 << 32)
*/
#include "pru-asm.h"
.section .text.__pruabi_mpyll, "ax"
.global SYM(__pruabi_mpyll)
FUNC(__pruabi_mpyll)
SYM(__pruabi_mpyll):
/* + C * ah * bl */
mov r28, r15
mov r29, r16
nop
xin 0, r26, 8
/* + C * al * bh */
mov r28, r14
mov r29, r17
mov r15, r26 /* "Loose" ah, record only reslo. */
xin 0, r26, 8
/* + al * bl */
/* mov r28, r14 -> No need, already loaded. */
mov r29, r16
add r15, r15, r26
xin 0, r26, 8
mov r14, r26
add r15, r15, r27
ret
ENDFUNC(__pruabi_mpyll)
|
4ms/metamodule-plugin-sdk
| 1,477
|
plugin-libc/libgcc/config/xtensa/crtn.S
|
# End of .init and .fini sections.
# Copyright (C) 2003-2022 Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just makes sure that the .fini and .init sections do in
# fact return. Users may put any desired instructions in those sections.
# This file is the last thing linked into any executable.
#include "xtensa-config.h"
.section .init
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
retw
#else
l32i a0, sp, 0
addi sp, sp, 32
ret
#endif
.section .fini
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
retw
#else
l32i a0, sp, 0
addi sp, sp, 32
ret
#endif
|
4ms/metamodule-plugin-sdk
| 40,815
|
plugin-libc/libgcc/config/xtensa/ieee754-sf.S
|
/* IEEE-754 single-precision functions for Xtensa
Copyright (C) 2006-2022 Free Software Foundation, Inc.
Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#ifdef __XTENSA_EB__
#define xh a2
#define xl a3
#define yh a4
#define yl a5
#else
#define xh a3
#define xl a2
#define yh a5
#define yl a4
#endif
/* Warning! The branch displacements for some Xtensa branch instructions
are quite small, and this code has been carefully laid out to keep
branch targets in range. If you change anything, be sure to check that
the assembler is not relaxing anything to branch over a jump. */
#ifdef L_negsf2
.align 4
.global __negsf2
.type __negsf2, @function
__negsf2:
leaf_entry sp, 16
movi a4, 0x80000000
xor a2, a2, a4
leaf_return
#endif /* L_negsf2 */
#ifdef L_addsubsf3
.literal_position
/* Addition */
__addsf3_aux:
/* Handle NaNs and Infinities. (This code is placed before the
start of the function just to keep it in range of the limited
branch displacements.) */
.Ladd_xnan_or_inf:
/* If y is neither Infinity nor NaN, return x. */
bnall a3, a6, .Ladd_return_nan_or_inf
/* If x is a NaN, return it. Otherwise, return y. */
slli a7, a2, 9
bnez a7, .Ladd_return_nan
.Ladd_ynan_or_inf:
/* Return y. */
mov a2, a3
.Ladd_return_nan_or_inf:
slli a7, a2, 9
bnez a7, .Ladd_return_nan
leaf_return
.Ladd_return_nan:
movi a6, 0x400000 /* make it a quiet NaN */
or a2, a2, a6
leaf_return
.Ladd_opposite_signs:
/* Operand signs differ. Do a subtraction. */
slli a7, a6, 8
xor a3, a3, a7
j .Lsub_same_sign
.align 4
.global __addsf3
.type __addsf3, @function
__addsf3:
leaf_entry sp, 16
movi a6, 0x7f800000
/* Check if the two operands have the same sign. */
xor a7, a2, a3
bltz a7, .Ladd_opposite_signs
.Ladd_same_sign:
/* Check if either exponent == 0x7f8 (i.e., NaN or Infinity). */
ball a2, a6, .Ladd_xnan_or_inf
ball a3, a6, .Ladd_ynan_or_inf
/* Compare the exponents. The smaller operand will be shifted
right by the exponent difference and added to the larger
one. */
extui a7, a2, 23, 9
extui a8, a3, 23, 9
bltu a7, a8, .Ladd_shiftx
.Ladd_shifty:
/* Check if the smaller (or equal) exponent is zero. */
bnone a3, a6, .Ladd_yexpzero
/* Replace y sign/exponent with 0x008. */
or a3, a3, a6
slli a3, a3, 8
srli a3, a3, 8
.Ladd_yexpdiff:
/* Compute the exponent difference. */
sub a10, a7, a8
/* Exponent difference > 32 -- just return the bigger value. */
bgeui a10, 32, 1f
/* Shift y right by the exponent difference. Any bits that are
shifted out of y are saved in a9 for rounding the result. */
ssr a10
movi a9, 0
src a9, a3, a9
srl a3, a3
/* Do the addition. */
add a2, a2, a3
/* Check if the add overflowed into the exponent. */
extui a10, a2, 23, 9
beq a10, a7, .Ladd_round
mov a8, a7
j .Ladd_carry
.Ladd_yexpzero:
/* y is a subnormal value. Replace its sign/exponent with zero,
i.e., no implicit "1.0", and increment the apparent exponent
because subnormals behave as if they had the minimum (nonzero)
exponent. Test for the case when both exponents are zero. */
slli a3, a3, 9
srli a3, a3, 9
bnone a2, a6, .Ladd_bothexpzero
addi a8, a8, 1
j .Ladd_yexpdiff
.Ladd_bothexpzero:
/* Both exponents are zero. Handle this as a special case. There
is no need to shift or round, and the normal code for handling
a carry into the exponent field will not work because it
assumes there is an implicit "1.0" that needs to be added. */
add a2, a2, a3
1: leaf_return
.Ladd_xexpzero:
/* Same as "yexpzero" except skip handling the case when both
exponents are zero. */
slli a2, a2, 9
srli a2, a2, 9
addi a7, a7, 1
j .Ladd_xexpdiff
.Ladd_shiftx:
/* Same thing as the "shifty" code, but with x and y swapped. Also,
because the exponent difference is always nonzero in this version,
the shift sequence can use SLL and skip loading a constant zero. */
bnone a2, a6, .Ladd_xexpzero
or a2, a2, a6
slli a2, a2, 8
srli a2, a2, 8
.Ladd_xexpdiff:
sub a10, a8, a7
bgeui a10, 32, .Ladd_returny
ssr a10
sll a9, a2
srl a2, a2
add a2, a2, a3
/* Check if the add overflowed into the exponent. */
extui a10, a2, 23, 9
bne a10, a8, .Ladd_carry
.Ladd_round:
/* Round up if the leftover fraction is >= 1/2. */
bgez a9, 1f
addi a2, a2, 1
/* Check if the leftover fraction is exactly 1/2. */
slli a9, a9, 1
beqz a9, .Ladd_exactlyhalf
1: leaf_return
.Ladd_returny:
mov a2, a3
leaf_return
.Ladd_carry:
/* The addition has overflowed into the exponent field, so the
value needs to be renormalized. The mantissa of the result
can be recovered by subtracting the original exponent and
adding 0x800000 (which is the explicit "1.0" for the
mantissa of the non-shifted operand -- the "1.0" for the
shifted operand was already added). The mantissa can then
be shifted right by one bit. The explicit "1.0" of the
shifted mantissa then needs to be replaced by the exponent,
incremented by one to account for the normalizing shift.
It is faster to combine these operations: do the shift first
and combine the additions and subtractions. If x is the
original exponent, the result is:
shifted mantissa - (x << 22) + (1 << 22) + (x << 23)
or:
shifted mantissa + ((x + 1) << 22)
Note that the exponent is incremented here by leaving the
explicit "1.0" of the mantissa in the exponent field. */
/* Shift x right by one bit. Save the lsb. */
mov a10, a2
srli a2, a2, 1
/* See explanation above. The original exponent is in a8. */
addi a8, a8, 1
slli a8, a8, 22
add a2, a2, a8
/* Return an Infinity if the exponent overflowed. */
ball a2, a6, .Ladd_infinity
/* Same thing as the "round" code except the msb of the leftover
fraction is bit 0 of a10, with the rest of the fraction in a9. */
bbci.l a10, 0, 1f
addi a2, a2, 1
beqz a9, .Ladd_exactlyhalf
1: leaf_return
.Ladd_infinity:
/* Clear the mantissa. */
srli a2, a2, 23
slli a2, a2, 23
/* The sign bit may have been lost in a carry-out. Put it back. */
slli a8, a8, 1
or a2, a2, a8
leaf_return
.Ladd_exactlyhalf:
/* Round down to the nearest even value. */
srli a2, a2, 1
slli a2, a2, 1
leaf_return
/* Subtraction */
__subsf3_aux:
/* Handle NaNs and Infinities. (This code is placed before the
start of the function just to keep it in range of the limited
branch displacements.) */
.Lsub_xnan_or_inf:
/* If y is neither Infinity nor NaN, return x. */
bnall a3, a6, .Lsub_return_nan_or_inf
/* Both x and y are either NaN or Inf, so the result is NaN. */
.Lsub_return_nan:
movi a4, 0x400000 /* make it a quiet NaN */
or a2, a2, a4
leaf_return
.Lsub_ynan_or_inf:
/* Negate y and return it. */
slli a7, a6, 8
xor a2, a3, a7
.Lsub_return_nan_or_inf:
slli a7, a2, 9
bnez a7, .Lsub_return_nan
leaf_return
.Lsub_opposite_signs:
/* Operand signs differ. Do an addition. */
slli a7, a6, 8
xor a3, a3, a7
j .Ladd_same_sign
.align 4
.global __subsf3
.type __subsf3, @function
__subsf3:
leaf_entry sp, 16
movi a6, 0x7f800000
/* Check if the two operands have the same sign. */
xor a7, a2, a3
bltz a7, .Lsub_opposite_signs
.Lsub_same_sign:
/* Check if either exponent == 0x7f8 (i.e., NaN or Infinity). */
ball a2, a6, .Lsub_xnan_or_inf
ball a3, a6, .Lsub_ynan_or_inf
/* Compare the operands. In contrast to addition, the entire
value matters here. */
extui a7, a2, 23, 8
extui a8, a3, 23, 8
bltu a2, a3, .Lsub_xsmaller
.Lsub_ysmaller:
/* Check if the smaller (or equal) exponent is zero. */
bnone a3, a6, .Lsub_yexpzero
/* Replace y sign/exponent with 0x008. */
or a3, a3, a6
slli a3, a3, 8
srli a3, a3, 8
.Lsub_yexpdiff:
/* Compute the exponent difference. */
sub a10, a7, a8
/* Exponent difference > 32 -- just return the bigger value. */
bgeui a10, 32, 1f
/* Shift y right by the exponent difference. Any bits that are
shifted out of y are saved in a9 for rounding the result. */
ssr a10
movi a9, 0
src a9, a3, a9
srl a3, a3
sub a2, a2, a3
/* Subtract the leftover bits in a9 from zero and propagate any
borrow from a2. */
neg a9, a9
addi a10, a2, -1
movnez a2, a10, a9
/* Check if the subtract underflowed into the exponent. */
extui a10, a2, 23, 8
beq a10, a7, .Lsub_round
j .Lsub_borrow
.Lsub_yexpzero:
/* Return zero if the inputs are equal. (For the non-subnormal
case, subtracting the "1.0" will cause a borrow from the exponent
and this case can be detected when handling the borrow.) */
beq a2, a3, .Lsub_return_zero
/* y is a subnormal value. Replace its sign/exponent with zero,
i.e., no implicit "1.0". Unless x is also a subnormal, increment
y's apparent exponent because subnormals behave as if they had
the minimum (nonzero) exponent. */
slli a3, a3, 9
srli a3, a3, 9
bnone a2, a6, .Lsub_yexpdiff
addi a8, a8, 1
j .Lsub_yexpdiff
.Lsub_returny:
/* Negate and return y. */
slli a7, a6, 8
xor a2, a3, a7
1: leaf_return
.Lsub_xsmaller:
/* Same thing as the "ysmaller" code, but with x and y swapped and
with y negated. */
bnone a2, a6, .Lsub_xexpzero
or a2, a2, a6
slli a2, a2, 8
srli a2, a2, 8
.Lsub_xexpdiff:
sub a10, a8, a7
bgeui a10, 32, .Lsub_returny
ssr a10
movi a9, 0
src a9, a2, a9
srl a2, a2
/* Negate y. */
slli a11, a6, 8
xor a3, a3, a11
sub a2, a3, a2
neg a9, a9
addi a10, a2, -1
movnez a2, a10, a9
/* Check if the subtract underflowed into the exponent. */
extui a10, a2, 23, 8
bne a10, a8, .Lsub_borrow
.Lsub_round:
/* Round up if the leftover fraction is >= 1/2. */
bgez a9, 1f
addi a2, a2, 1
/* Check if the leftover fraction is exactly 1/2. */
slli a9, a9, 1
beqz a9, .Lsub_exactlyhalf
1: leaf_return
.Lsub_xexpzero:
/* Same as "yexpzero". */
beq a2, a3, .Lsub_return_zero
slli a2, a2, 9
srli a2, a2, 9
bnone a3, a6, .Lsub_xexpdiff
addi a7, a7, 1
j .Lsub_xexpdiff
.Lsub_return_zero:
movi a2, 0
leaf_return
.Lsub_borrow:
/* The subtraction has underflowed into the exponent field, so the
value needs to be renormalized. Shift the mantissa left as
needed to remove any leading zeros and adjust the exponent
accordingly. If the exponent is not large enough to remove
all the leading zeros, the result will be a subnormal value. */
slli a8, a2, 9
beqz a8, .Lsub_xzero
do_nsau a6, a8, a7, a11
srli a8, a8, 9
bge a6, a10, .Lsub_subnormal
addi a6, a6, 1
.Lsub_normalize_shift:
/* Shift the mantissa (a8/a9) left by a6. */
ssl a6
src a8, a8, a9
sll a9, a9
/* Combine the shifted mantissa with the sign and exponent,
decrementing the exponent by a6. (The exponent has already
been decremented by one due to the borrow from the subtraction,
but adding the mantissa will increment the exponent by one.) */
srli a2, a2, 23
sub a2, a2, a6
slli a2, a2, 23
add a2, a2, a8
j .Lsub_round
.Lsub_exactlyhalf:
/* Round down to the nearest even value. */
srli a2, a2, 1
slli a2, a2, 1
leaf_return
.Lsub_xzero:
/* If there was a borrow from the exponent, and the mantissa and
guard digits are all zero, then the inputs were equal and the
result should be zero. */
beqz a9, .Lsub_return_zero
/* Only the guard digit is nonzero. Shift by min(24, a10). */
addi a11, a10, -24
movi a6, 24
movltz a6, a10, a11
j .Lsub_normalize_shift
.Lsub_subnormal:
/* The exponent is too small to shift away all the leading zeros.
Set a6 to the current exponent (which has already been
decremented by the borrow) so that the exponent of the result
will be zero. Do not add 1 to a6 in this case, because: (1)
adding the mantissa will not increment the exponent, so there is
no need to subtract anything extra from the exponent to
compensate, and (2) the effective exponent of a subnormal is 1
not 0 so the shift amount must be 1 smaller than normal. */
mov a6, a10
j .Lsub_normalize_shift
#endif /* L_addsubsf3 */
#ifdef L_mulsf3
/* Multiplication */
#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
#define XCHAL_NO_MUL 1
#endif
.literal_position
__mulsf3_aux:
/* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
(This code is placed before the start of the function just to
keep it in range of the limited branch displacements.) */
.Lmul_xexpzero:
/* Clear the sign bit of x. */
slli a2, a2, 1
srli a2, a2, 1
/* If x is zero, return zero. */
beqz a2, .Lmul_return_zero
/* Normalize x. Adjust the exponent in a8. */
do_nsau a10, a2, a11, a12
addi a10, a10, -8
ssl a10
sll a2, a2
movi a8, 1
sub a8, a8, a10
j .Lmul_xnormalized
.Lmul_yexpzero:
/* Clear the sign bit of y. */
slli a3, a3, 1
srli a3, a3, 1
/* If y is zero, return zero. */
beqz a3, .Lmul_return_zero
/* Normalize y. Adjust the exponent in a9. */
do_nsau a10, a3, a11, a12
addi a10, a10, -8
ssl a10
sll a3, a3
movi a9, 1
sub a9, a9, a10
j .Lmul_ynormalized
.Lmul_return_zero:
/* Return zero with the appropriate sign bit. */
srli a2, a7, 31
slli a2, a2, 31
j .Lmul_done
.Lmul_xnan_or_inf:
/* If y is zero, return NaN. */
slli a8, a3, 1
beqz a8, .Lmul_return_nan
/* If y is NaN, return y. */
bnall a3, a6, .Lmul_returnx
slli a8, a3, 9
beqz a8, .Lmul_returnx
.Lmul_returny:
mov a2, a3
.Lmul_returnx:
slli a8, a2, 9
bnez a8, .Lmul_return_nan
/* Set the sign bit and return. */
extui a7, a7, 31, 1
slli a2, a2, 1
ssai 1
src a2, a7, a2
j .Lmul_done
.Lmul_ynan_or_inf:
/* If x is zero, return NaN. */
slli a8, a2, 1
bnez a8, .Lmul_returny
mov a2, a3
.Lmul_return_nan:
movi a4, 0x400000 /* make it a quiet NaN */
or a2, a2, a4
j .Lmul_done
.align 4
.global __mulsf3
.type __mulsf3, @function
__mulsf3:
#if __XTENSA_CALL0_ABI__
leaf_entry sp, 32
addi sp, sp, -32
s32i a12, sp, 16
s32i a13, sp, 20
s32i a14, sp, 24
s32i a15, sp, 28
#elif XCHAL_NO_MUL
/* This is not really a leaf function; allocate enough stack space
to allow CALL12s to a helper function. */
leaf_entry sp, 64
#else
leaf_entry sp, 32
#endif
movi a6, 0x7f800000
/* Get the sign of the result. */
xor a7, a2, a3
/* Check for NaN and infinity. */
ball a2, a6, .Lmul_xnan_or_inf
ball a3, a6, .Lmul_ynan_or_inf
/* Extract the exponents. */
extui a8, a2, 23, 8
extui a9, a3, 23, 8
beqz a8, .Lmul_xexpzero
.Lmul_xnormalized:
beqz a9, .Lmul_yexpzero
.Lmul_ynormalized:
/* Add the exponents. */
add a8, a8, a9
/* Replace sign/exponent fields with explicit "1.0". */
movi a10, 0xffffff
or a2, a2, a6
and a2, a2, a10
or a3, a3, a6
and a3, a3, a10
/* Multiply 32x32 to 64 bits. The result ends up in a2/a6. */
#if XCHAL_HAVE_MUL32_HIGH
mull a6, a2, a3
muluh a2, a2, a3
#else
/* Break the inputs into 16-bit chunks and compute 4 32-bit partial
products. These partial products are:
0 xl * yl
1 xl * yh
2 xh * yl
3 xh * yh
If using the Mul16 or Mul32 multiplier options, these input
chunks must be stored in separate registers. For Mac16, the
UMUL.AA.* opcodes can specify that the inputs come from either
half of the registers, so there is no need to shift them out
ahead of time. If there is no multiply hardware, the 16-bit
chunks can be extracted when setting up the arguments to the
separate multiply function. */
#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
/* Calling a separate multiply function will clobber a0 and requires
use of a8 as a temporary, so save those values now. (The function
uses a custom ABI so nothing else needs to be saved.) */
s32i a0, sp, 0
s32i a8, sp, 4
#endif
#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
#define a2h a4
#define a3h a5
/* Get the high halves of the inputs into registers. */
srli a2h, a2, 16
srli a3h, a3, 16
#define a2l a2
#define a3l a3
#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
/* Clear the high halves of the inputs. This does not matter
for MUL16 because the high bits are ignored. */
extui a2, a2, 0, 16
extui a3, a3, 0, 16
#endif
#endif /* MUL16 || MUL32 */
#if XCHAL_HAVE_MUL16
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
mul16u dst, xreg ## xhalf, yreg ## yhalf
#elif XCHAL_HAVE_MUL32
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
mull dst, xreg ## xhalf, yreg ## yhalf
#elif XCHAL_HAVE_MAC16
/* The preprocessor insists on inserting a space when concatenating after
a period in the definition of do_mul below. These macros are a workaround
using underscores instead of periods when doing the concatenation. */
#define umul_aa_ll umul.aa.ll
#define umul_aa_lh umul.aa.lh
#define umul_aa_hl umul.aa.hl
#define umul_aa_hh umul.aa.hh
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
umul_aa_ ## xhalf ## yhalf xreg, yreg; \
rsr dst, ACCLO
#else /* no multiply hardware */
#define set_arg_l(dst, src) \
extui dst, src, 0, 16
#define set_arg_h(dst, src) \
srli dst, src, 16
#if __XTENSA_CALL0_ABI__
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
set_arg_ ## xhalf (a13, xreg); \
set_arg_ ## yhalf (a14, yreg); \
call0 .Lmul_mulsi3; \
mov dst, a12
#else
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
set_arg_ ## xhalf (a14, xreg); \
set_arg_ ## yhalf (a15, yreg); \
call12 .Lmul_mulsi3; \
mov dst, a14
#endif /* __XTENSA_CALL0_ABI__ */
#endif /* no multiply hardware */
/* Add pp1 and pp2 into a6 with carry-out in a9. */
do_mul(a6, a2, l, a3, h) /* pp 1 */
do_mul(a11, a2, h, a3, l) /* pp 2 */
movi a9, 0
add a6, a6, a11
bgeu a6, a11, 1f
addi a9, a9, 1
1:
/* Shift the high half of a9/a6 into position in a9. Note that
this value can be safely incremented without any carry-outs. */
ssai 16
src a9, a9, a6
/* Compute the low word into a6. */
do_mul(a11, a2, l, a3, l) /* pp 0 */
sll a6, a6
add a6, a6, a11
bgeu a6, a11, 1f
addi a9, a9, 1
1:
/* Compute the high word into a2. */
do_mul(a2, a2, h, a3, h) /* pp 3 */
add a2, a2, a9
#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
/* Restore values saved on the stack during the multiplication. */
l32i a0, sp, 0
l32i a8, sp, 4
#endif
#endif /* ! XCHAL_HAVE_MUL32_HIGH */
/* Shift left by 9 bits, unless there was a carry-out from the
multiply, in which case, shift by 8 bits and increment the
exponent. */
movi a4, 9
srli a5, a2, 24 - 9
beqz a5, 1f
addi a4, a4, -1
addi a8, a8, 1
1: ssl a4
src a2, a2, a6
sll a6, a6
/* Subtract the extra bias from the exponent sum (plus one to account
for the explicit "1.0" of the mantissa that will be added to the
exponent in the final result). */
movi a4, 0x80
sub a8, a8, a4
/* Check for over/underflow. The value in a8 is one less than the
final exponent, so values in the range 0..fd are OK here. */
movi a4, 0xfe
bgeu a8, a4, .Lmul_overflow
.Lmul_round:
/* Round. */
bgez a6, .Lmul_rounded
addi a2, a2, 1
slli a6, a6, 1
beqz a6, .Lmul_exactlyhalf
.Lmul_rounded:
/* Add the exponent to the mantissa. */
slli a8, a8, 23
add a2, a2, a8
.Lmul_addsign:
/* Add the sign bit. */
srli a7, a7, 31
slli a7, a7, 31
or a2, a2, a7
.Lmul_done:
#if __XTENSA_CALL0_ABI__
l32i a12, sp, 16
l32i a13, sp, 20
l32i a14, sp, 24
l32i a15, sp, 28
addi sp, sp, 32
#endif
leaf_return
.Lmul_exactlyhalf:
/* Round down to the nearest even value. */
srli a2, a2, 1
slli a2, a2, 1
j .Lmul_rounded
.Lmul_overflow:
bltz a8, .Lmul_underflow
/* Return +/- Infinity. */
movi a8, 0xff
slli a2, a8, 23
j .Lmul_addsign
.Lmul_underflow:
/* Create a subnormal value, where the exponent field contains zero,
but the effective exponent is 1. The value of a8 is one less than
the actual exponent, so just negate it to get the shift amount. */
neg a8, a8
mov a9, a6
ssr a8
bgeui a8, 32, .Lmul_flush_to_zero
/* Shift a2 right. Any bits that are shifted out of a2 are saved
in a6 (combined with the shifted-out bits currently in a6) for
rounding the result. */
sll a6, a2
srl a2, a2
/* Set the exponent to zero. */
movi a8, 0
/* Pack any nonzero bits shifted out into a6. */
beqz a9, .Lmul_round
movi a9, 1
or a6, a6, a9
j .Lmul_round
.Lmul_flush_to_zero:
/* Return zero with the appropriate sign bit. */
srli a2, a7, 31
slli a2, a2, 31
j .Lmul_done
#if XCHAL_NO_MUL
/* For Xtensa processors with no multiply hardware, this simplified
version of _mulsi3 is used for multiplying 16-bit chunks of
the floating-point mantissas. When using CALL0, this function
uses a custom ABI: the inputs are passed in a13 and a14, the
result is returned in a12, and a8 and a15 are clobbered. */
.align 4
.Lmul_mulsi3:
leaf_entry sp, 16
.macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
movi \dst, 0
1: add \tmp1, \src2, \dst
extui \tmp2, \src1, 0, 1
movnez \dst, \tmp1, \tmp2
do_addx2 \tmp1, \src2, \dst, \tmp1
extui \tmp2, \src1, 1, 1
movnez \dst, \tmp1, \tmp2
do_addx4 \tmp1, \src2, \dst, \tmp1
extui \tmp2, \src1, 2, 1
movnez \dst, \tmp1, \tmp2
do_addx8 \tmp1, \src2, \dst, \tmp1
extui \tmp2, \src1, 3, 1
movnez \dst, \tmp1, \tmp2
srli \src1, \src1, 4
slli \src2, \src2, 4
bnez \src1, 1b
.endm
#if __XTENSA_CALL0_ABI__
mul_mulsi3_body a12, a13, a14, a15, a8
#else
/* The result will be written into a2, so save that argument in a4. */
mov a4, a2
mul_mulsi3_body a2, a4, a3, a5, a6
#endif
leaf_return
#endif /* XCHAL_NO_MUL */
#endif /* L_mulsf3 */
#ifdef L_divsf3
/* Division */
#if XCHAL_HAVE_FP_DIV
.align 4
.global __divsf3
.type __divsf3, @function
__divsf3:
leaf_entry sp, 16
wfr f1, a2 /* dividend */
wfr f2, a3 /* divisor */
div0.s f3, f2
nexp01.s f4, f2
const.s f5, 1
maddn.s f5, f4, f3
mov.s f6, f3
mov.s f7, f2
nexp01.s f2, f1
maddn.s f6, f5, f6
const.s f5, 1
const.s f0, 0
neg.s f8, f2
maddn.s f5, f4, f6
maddn.s f0, f8, f3
mkdadj.s f7, f1
maddn.s f6, f5, f6
maddn.s f8, f4, f0
const.s f3, 1
maddn.s f3, f4, f6
maddn.s f0, f8, f6
neg.s f2, f2
maddn.s f6, f3, f6
maddn.s f2, f4, f0
addexpm.s f0, f7
addexp.s f6, f7
divn.s f0, f2, f6
rfr a2, f0
leaf_return
#else
.literal_position
__divsf3_aux:
/* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
(This code is placed before the start of the function just to
keep it in range of the limited branch displacements.) */
.Ldiv_yexpzero:
/* Clear the sign bit of y. */
slli a3, a3, 1
srli a3, a3, 1
/* Check for division by zero. */
beqz a3, .Ldiv_yzero
/* Normalize y. Adjust the exponent in a9. */
do_nsau a10, a3, a4, a5
addi a10, a10, -8
ssl a10
sll a3, a3
movi a9, 1
sub a9, a9, a10
j .Ldiv_ynormalized
.Ldiv_yzero:
/* y is zero. Return NaN if x is also zero; otherwise, infinity. */
slli a4, a2, 1
srli a4, a4, 1
srli a2, a7, 31
slli a2, a2, 31
or a2, a2, a6
bnez a4, 1f
movi a4, 0x400000 /* make it a quiet NaN */
or a2, a2, a4
1: leaf_return
.Ldiv_xexpzero:
/* Clear the sign bit of x. */
slli a2, a2, 1
srli a2, a2, 1
/* If x is zero, return zero. */
beqz a2, .Ldiv_return_zero
/* Normalize x. Adjust the exponent in a8. */
do_nsau a10, a2, a4, a5
addi a10, a10, -8
ssl a10
sll a2, a2
movi a8, 1
sub a8, a8, a10
j .Ldiv_xnormalized
.Ldiv_return_zero:
/* Return zero with the appropriate sign bit. */
srli a2, a7, 31
slli a2, a2, 31
leaf_return
.Ldiv_xnan_or_inf:
/* Set the sign bit of the result. */
srli a7, a3, 31
slli a7, a7, 31
xor a2, a2, a7
/* If y is NaN or Inf, return NaN. */
ball a3, a6, .Ldiv_return_nan
slli a7, a2, 9
bnez a7, .Ldiv_return_nan
leaf_return
.Ldiv_ynan_or_inf:
/* If y is Infinity, return zero. */
slli a8, a3, 9
beqz a8, .Ldiv_return_zero
/* y is NaN; return it. */
mov a2, a3
.Ldiv_return_nan:
movi a4, 0x400000 /* make it a quiet NaN */
or a2, a2, a4
leaf_return
.align 4
.global __divsf3
.type __divsf3, @function
__divsf3:
leaf_entry sp, 16
movi a6, 0x7f800000
/* Get the sign of the result. */
xor a7, a2, a3
/* Check for NaN and infinity. */
ball a2, a6, .Ldiv_xnan_or_inf
ball a3, a6, .Ldiv_ynan_or_inf
/* Extract the exponents. */
extui a8, a2, 23, 8
extui a9, a3, 23, 8
beqz a9, .Ldiv_yexpzero
.Ldiv_ynormalized:
beqz a8, .Ldiv_xexpzero
.Ldiv_xnormalized:
/* Subtract the exponents. */
sub a8, a8, a9
/* Replace sign/exponent fields with explicit "1.0". */
movi a10, 0xffffff
or a2, a2, a6
and a2, a2, a10
or a3, a3, a6
and a3, a3, a10
/* The first digit of the mantissa division must be a one.
Shift x (and adjust the exponent) as needed to make this true. */
bltu a3, a2, 1f
slli a2, a2, 1
addi a8, a8, -1
1:
/* Do the first subtraction and shift. */
sub a2, a2, a3
slli a2, a2, 1
/* Put the quotient into a10. */
movi a10, 1
/* Divide one bit at a time for 23 bits. */
movi a9, 23
#if XCHAL_HAVE_LOOPS
loop a9, .Ldiv_loopend
#endif
.Ldiv_loop:
/* Shift the quotient << 1. */
slli a10, a10, 1
/* Is this digit a 0 or 1? */
bltu a2, a3, 1f
/* Output a 1 and subtract. */
addi a10, a10, 1
sub a2, a2, a3
/* Shift the dividend << 1. */
1: slli a2, a2, 1
#if !XCHAL_HAVE_LOOPS
addi a9, a9, -1
bnez a9, .Ldiv_loop
#endif
.Ldiv_loopend:
/* Add the exponent bias (less one to account for the explicit "1.0"
of the mantissa that will be added to the exponent in the final
result). */
addi a8, a8, 0x7e
/* Check for over/underflow. The value in a8 is one less than the
final exponent, so values in the range 0..fd are OK here. */
movi a4, 0xfe
bgeu a8, a4, .Ldiv_overflow
.Ldiv_round:
/* Round. The remainder (<< 1) is in a2. */
bltu a2, a3, .Ldiv_rounded
addi a10, a10, 1
beq a2, a3, .Ldiv_exactlyhalf
.Ldiv_rounded:
/* Add the exponent to the mantissa. */
slli a8, a8, 23
add a2, a10, a8
.Ldiv_addsign:
/* Add the sign bit. */
srli a7, a7, 31
slli a7, a7, 31
or a2, a2, a7
leaf_return
.Ldiv_overflow:
bltz a8, .Ldiv_underflow
/* Return +/- Infinity. */
addi a8, a4, 1 /* 0xff */
slli a2, a8, 23
j .Ldiv_addsign
.Ldiv_exactlyhalf:
/* Remainder is exactly half the divisor. Round even. */
srli a10, a10, 1
slli a10, a10, 1
j .Ldiv_rounded
.Ldiv_underflow:
/* Create a subnormal value, where the exponent field contains zero,
but the effective exponent is 1. The value of a8 is one less than
the actual exponent, so just negate it to get the shift amount. */
neg a8, a8
ssr a8
bgeui a8, 32, .Ldiv_flush_to_zero
/* Shift a10 right. Any bits that are shifted out of a10 are
saved in a6 for rounding the result. */
sll a6, a10
srl a10, a10
/* Set the exponent to zero. */
movi a8, 0
/* Pack any nonzero remainder (in a2) into a6. */
beqz a2, 1f
movi a9, 1
or a6, a6, a9
/* Round a10 based on the bits shifted out into a6. */
1: bgez a6, .Ldiv_rounded
addi a10, a10, 1
slli a6, a6, 1
bnez a6, .Ldiv_rounded
srli a10, a10, 1
slli a10, a10, 1
j .Ldiv_rounded
.Ldiv_flush_to_zero:
/* Return zero with the appropriate sign bit. */
srli a2, a7, 31
slli a2, a2, 31
leaf_return
#endif /* XCHAL_HAVE_FP_DIV */
#endif /* L_divsf3 */
#ifdef L_cmpsf2
/* Equal and Not Equal */
.align 4
.global __eqsf2
.global __nesf2
.set __nesf2, __eqsf2
.type __eqsf2, @function
__eqsf2:
leaf_entry sp, 16
bne a2, a3, 4f
/* The values are equal but NaN != NaN. Check the exponent. */
movi a6, 0x7f800000
ball a2, a6, 3f
/* Equal. */
movi a2, 0
leaf_return
/* Not equal. */
2: movi a2, 1
leaf_return
/* Check if the mantissas are nonzero. */
3: slli a7, a2, 9
j 5f
/* Check if x and y are zero with different signs. */
4: or a7, a2, a3
slli a7, a7, 1
/* Equal if a7 == 0, where a7 is either abs(x | y) or the mantissa
or x when exponent(x) = 0x7f8 and x == y. */
5: movi a2, 0
movi a3, 1
movnez a2, a3, a7
leaf_return
/* Greater Than */
.align 4
.global __gtsf2
.type __gtsf2, @function
__gtsf2:
leaf_entry sp, 16
movi a6, 0x7f800000
ball a2, a6, 2f
1: bnall a3, a6, .Lle_cmp
/* Check if y is a NaN. */
slli a7, a3, 9
beqz a7, .Lle_cmp
movi a2, 0
leaf_return
/* Check if x is a NaN. */
2: slli a7, a2, 9
beqz a7, 1b
movi a2, 0
leaf_return
/* Less Than or Equal */
.align 4
.global __lesf2
.type __lesf2, @function
__lesf2:
leaf_entry sp, 16
movi a6, 0x7f800000
ball a2, a6, 2f
1: bnall a3, a6, .Lle_cmp
/* Check if y is a NaN. */
slli a7, a3, 9
beqz a7, .Lle_cmp
movi a2, 1
leaf_return
/* Check if x is a NaN. */
2: slli a7, a2, 9
beqz a7, 1b
movi a2, 1
leaf_return
.Lle_cmp:
/* Check if x and y have different signs. */
xor a7, a2, a3
bltz a7, .Lle_diff_signs
/* Check if x is negative. */
bltz a2, .Lle_xneg
/* Check if x <= y. */
bltu a3, a2, 5f
4: movi a2, 0
leaf_return
.Lle_xneg:
/* Check if y <= x. */
bgeu a2, a3, 4b
5: movi a2, 1
leaf_return
.Lle_diff_signs:
bltz a2, 4b
/* Check if both x and y are zero. */
or a7, a2, a3
slli a7, a7, 1
movi a2, 1
movi a3, 0
moveqz a2, a3, a7
leaf_return
/* Greater Than or Equal */
.align 4
.global __gesf2
.type __gesf2, @function
__gesf2:
leaf_entry sp, 16
movi a6, 0x7f800000
ball a2, a6, 2f
1: bnall a3, a6, .Llt_cmp
/* Check if y is a NaN. */
slli a7, a3, 9
beqz a7, .Llt_cmp
movi a2, -1
leaf_return
/* Check if x is a NaN. */
2: slli a7, a2, 9
beqz a7, 1b
movi a2, -1
leaf_return
/* Less Than */
.align 4
.global __ltsf2
.type __ltsf2, @function
__ltsf2:
leaf_entry sp, 16
movi a6, 0x7f800000
ball a2, a6, 2f
1: bnall a3, a6, .Llt_cmp
/* Check if y is a NaN. */
slli a7, a3, 9
beqz a7, .Llt_cmp
movi a2, 0
leaf_return
/* Check if x is a NaN. */
2: slli a7, a2, 9
beqz a7, 1b
movi a2, 0
leaf_return
.Llt_cmp:
/* Check if x and y have different signs. */
xor a7, a2, a3
bltz a7, .Llt_diff_signs
/* Check if x is negative. */
bltz a2, .Llt_xneg
/* Check if x < y. */
bgeu a2, a3, 5f
4: movi a2, -1
leaf_return
.Llt_xneg:
/* Check if y < x. */
bltu a3, a2, 4b
5: movi a2, 0
leaf_return
.Llt_diff_signs:
bgez a2, 5b
/* Check if both x and y are nonzero. */
or a7, a2, a3
slli a7, a7, 1
movi a2, 0
movi a3, -1
movnez a2, a3, a7
leaf_return
/* Unordered */
.align 4
.global __unordsf2
.type __unordsf2, @function
__unordsf2:
leaf_entry sp, 16
movi a6, 0x7f800000
ball a2, a6, 3f
1: ball a3, a6, 4f
2: movi a2, 0
leaf_return
3: slli a7, a2, 9
beqz a7, 1b
movi a2, 1
leaf_return
4: slli a7, a3, 9
beqz a7, 2b
movi a2, 1
leaf_return
#endif /* L_cmpsf2 */
#ifdef L_fixsfsi
.align 4
.global __fixsfsi
.type __fixsfsi, @function
__fixsfsi:
leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7f800000
ball a2, a6, .Lfixsfsi_nan_or_inf
/* Extract the exponent and check if 0 < (exp - 0x7e) < 32. */
extui a4, a2, 23, 8
addi a4, a4, -0x7e
bgei a4, 32, .Lfixsfsi_maxint
blti a4, 1, .Lfixsfsi_zero
/* Add explicit "1.0" and shift << 8. */
or a7, a2, a6
slli a5, a7, 8
/* Shift back to the right, based on the exponent. */
ssl a4 /* shift by 32 - a4 */
srl a5, a5
/* Negate the result if sign != 0. */
neg a2, a5
movgez a2, a5, a7
leaf_return
.Lfixsfsi_nan_or_inf:
/* Handle Infinity and NaN. */
slli a4, a2, 9
beqz a4, .Lfixsfsi_maxint
/* Translate NaN to +maxint. */
movi a2, 0
.Lfixsfsi_maxint:
slli a4, a6, 8 /* 0x80000000 */
addi a5, a4, -1 /* 0x7fffffff */
movgez a4, a5, a2
mov a2, a4
leaf_return
.Lfixsfsi_zero:
movi a2, 0
leaf_return
#endif /* L_fixsfsi */
#ifdef L_fixsfdi
.align 4
.global __fixsfdi
.type __fixsfdi, @function
__fixsfdi:
leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7f800000
ball a2, a6, .Lfixsfdi_nan_or_inf
/* Extract the exponent and check if 0 < (exp - 0x7e) < 64. */
extui a4, a2, 23, 8
addi a4, a4, -0x7e
bgei a4, 64, .Lfixsfdi_maxint
blti a4, 1, .Lfixsfdi_zero
/* Add explicit "1.0" and shift << 8. */
or a7, a2, a6
slli xh, a7, 8
/* Shift back to the right, based on the exponent. */
ssl a4 /* shift by 64 - a4 */
bgei a4, 32, .Lfixsfdi_smallshift
srl xl, xh
movi xh, 0
.Lfixsfdi_shifted:
/* Negate the result if sign != 0. */
bgez a7, 1f
neg xl, xl
neg xh, xh
beqz xl, 1f
addi xh, xh, -1
1: leaf_return
.Lfixsfdi_smallshift:
movi xl, 0
sll xl, xh
srl xh, xh
j .Lfixsfdi_shifted
.Lfixsfdi_nan_or_inf:
/* Handle Infinity and NaN. */
slli a4, a2, 9
beqz a4, .Lfixsfdi_maxint
/* Translate NaN to +maxint. */
movi a2, 0
.Lfixsfdi_maxint:
slli a7, a6, 8 /* 0x80000000 */
bgez a2, 1f
mov xh, a7
movi xl, 0
leaf_return
1: addi xh, a7, -1 /* 0x7fffffff */
movi xl, -1
leaf_return
.Lfixsfdi_zero:
movi xh, 0
movi xl, 0
leaf_return
#endif /* L_fixsfdi */
#ifdef L_fixunssfsi
.align 4
.global __fixunssfsi
.type __fixunssfsi, @function
__fixunssfsi:
leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7f800000
ball a2, a6, .Lfixunssfsi_nan_or_inf
/* Extract the exponent and check if 0 <= (exp - 0x7f) < 32. */
extui a4, a2, 23, 8
addi a4, a4, -0x7f
bgei a4, 32, .Lfixunssfsi_maxint
bltz a4, .Lfixunssfsi_zero
/* Add explicit "1.0" and shift << 8. */
or a7, a2, a6
slli a5, a7, 8
/* Shift back to the right, based on the exponent. */
addi a4, a4, 1
beqi a4, 32, .Lfixunssfsi_bigexp
ssl a4 /* shift by 32 - a4 */
srl a5, a5
/* Negate the result if sign != 0. */
neg a2, a5
movgez a2, a5, a7
leaf_return
.Lfixunssfsi_nan_or_inf:
/* Handle Infinity and NaN. */
slli a4, a2, 9
beqz a4, .Lfixunssfsi_maxint
/* Translate NaN to 0xffffffff. */
movi a2, -1
leaf_return
.Lfixunssfsi_maxint:
slli a4, a6, 8 /* 0x80000000 */
movi a5, -1 /* 0xffffffff */
movgez a4, a5, a2
mov a2, a4
leaf_return
.Lfixunssfsi_zero:
movi a2, 0
leaf_return
.Lfixunssfsi_bigexp:
/* Handle unsigned maximum exponent case. */
bltz a2, 1f
mov a2, a5 /* no shift needed */
leaf_return
/* Return 0x80000000 if negative. */
1: slli a2, a6, 8
leaf_return
#endif /* L_fixunssfsi */
#ifdef L_fixunssfdi
.align 4
.global __fixunssfdi
.type __fixunssfdi, @function
__fixunssfdi:
leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7f800000
ball a2, a6, .Lfixunssfdi_nan_or_inf
/* Extract the exponent and check if 0 <= (exp - 0x7f) < 64. */
extui a4, a2, 23, 8
addi a4, a4, -0x7f
bgei a4, 64, .Lfixunssfdi_maxint
bltz a4, .Lfixunssfdi_zero
/* Add explicit "1.0" and shift << 8. */
or a7, a2, a6
slli xh, a7, 8
/* Shift back to the right, based on the exponent. */
addi a4, a4, 1
beqi a4, 64, .Lfixunssfdi_bigexp
ssl a4 /* shift by 64 - a4 */
bgei a4, 32, .Lfixunssfdi_smallshift
srl xl, xh
movi xh, 0
.Lfixunssfdi_shifted:
/* Negate the result if sign != 0. */
bgez a7, 1f
neg xl, xl
neg xh, xh
beqz xl, 1f
addi xh, xh, -1
1: leaf_return
.Lfixunssfdi_smallshift:
movi xl, 0
src xl, xh, xl
srl xh, xh
j .Lfixunssfdi_shifted
.Lfixunssfdi_nan_or_inf:
/* Handle Infinity and NaN. */
slli a4, a2, 9
beqz a4, .Lfixunssfdi_maxint
/* Translate NaN to 0xffffffff.... */
1: movi xh, -1
movi xl, -1
leaf_return
.Lfixunssfdi_maxint:
bgez a2, 1b
2: slli xh, a6, 8 /* 0x80000000 */
movi xl, 0
leaf_return
.Lfixunssfdi_zero:
movi xh, 0
movi xl, 0
leaf_return
.Lfixunssfdi_bigexp:
/* Handle unsigned maximum exponent case. */
bltz a7, 2b
movi xl, 0
leaf_return /* no shift needed */
#endif /* L_fixunssfdi */
#ifdef L_floatsisf
.align 4
.global __floatunsisf
.type __floatunsisf, @function
__floatunsisf:
leaf_entry sp, 16
beqz a2, .Lfloatsisf_return
/* Set the sign to zero and jump to the floatsisf code. */
movi a7, 0
j .Lfloatsisf_normalize
.align 4
.global __floatsisf
.type __floatsisf, @function
__floatsisf:
leaf_entry sp, 16
/* Check for zero. */
beqz a2, .Lfloatsisf_return
/* Save the sign. */
extui a7, a2, 31, 1
/* Get the absolute value. */
#if XCHAL_HAVE_ABS
abs a2, a2
#else
neg a4, a2
movltz a2, a4, a2
#endif
.Lfloatsisf_normalize:
/* Normalize with the first 1 bit in the msb. */
do_nsau a4, a2, a5, a6
ssl a4
sll a5, a2
/* Shift the mantissa into position, with rounding bits in a6. */
srli a2, a5, 8
slli a6, a5, (32 - 8)
/* Set the exponent. */
movi a5, 0x9d /* 0x7e + 31 */
sub a5, a5, a4
slli a5, a5, 23
add a2, a2, a5
/* Add the sign. */
slli a7, a7, 31
or a2, a2, a7
/* Round up if the leftover fraction is >= 1/2. */
bgez a6, .Lfloatsisf_return
addi a2, a2, 1 /* Overflow to the exponent is OK. */
/* Check if the leftover fraction is exactly 1/2. */
slli a6, a6, 1
beqz a6, .Lfloatsisf_exactlyhalf
.Lfloatsisf_return:
leaf_return
.Lfloatsisf_exactlyhalf:
/* Round down to the nearest even value. */
srli a2, a2, 1
slli a2, a2, 1
leaf_return
#endif /* L_floatsisf */
#ifdef L_floatdisf
.align 4
.global __floatundisf
.type __floatundisf, @function
__floatundisf:
leaf_entry sp, 16
/* Check for zero. */
or a4, xh, xl
beqz a4, 2f
/* Set the sign to zero and jump to the floatdisf code. */
movi a7, 0
j .Lfloatdisf_normalize
.align 4
.global __floatdisf
.type __floatdisf, @function
__floatdisf:
leaf_entry sp, 16
/* Check for zero. */
or a4, xh, xl
beqz a4, 2f
/* Save the sign. */
extui a7, xh, 31, 1
/* Get the absolute value. */
bgez xh, .Lfloatdisf_normalize
neg xl, xl
neg xh, xh
beqz xl, .Lfloatdisf_normalize
addi xh, xh, -1
.Lfloatdisf_normalize:
/* Normalize with the first 1 bit in the msb of xh. */
beqz xh, .Lfloatdisf_bigshift
do_nsau a4, xh, a5, a6
ssl a4
src xh, xh, xl
sll xl, xl
.Lfloatdisf_shifted:
/* Shift the mantissa into position, with rounding bits in a6. */
ssai 8
sll a5, xl
src a6, xh, xl
srl xh, xh
beqz a5, 1f
movi a5, 1
or a6, a6, a5
1:
/* Set the exponent. */
movi a5, 0xbd /* 0x7e + 63 */
sub a5, a5, a4
slli a5, a5, 23
add a2, xh, a5
/* Add the sign. */
slli a7, a7, 31
or a2, a2, a7
/* Round up if the leftover fraction is >= 1/2. */
bgez a6, 2f
addi a2, a2, 1 /* Overflow to the exponent is OK. */
/* Check if the leftover fraction is exactly 1/2. */
slli a6, a6, 1
beqz a6, .Lfloatdisf_exactlyhalf
2: leaf_return
.Lfloatdisf_bigshift:
/* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */
do_nsau a4, xl, a5, a6
ssl a4
sll xh, xl
movi xl, 0
addi a4, a4, 32
j .Lfloatdisf_shifted
.Lfloatdisf_exactlyhalf:
/* Round down to the nearest even value. */
srli a2, a2, 1
slli a2, a2, 1
leaf_return
#endif /* L_floatdisf */
#if XCHAL_HAVE_FP_SQRT
#ifdef L_sqrtf
/* Square root */
.align 4
.global __ieee754_sqrtf
.type __ieee754_sqrtf, @function
__ieee754_sqrtf:
leaf_entry sp, 16
wfr f1, a2
sqrt0.s f2, f1
const.s f3, 0
maddn.s f3, f2, f2
nexp01.s f4, f1
const.s f0, 3
addexp.s f4, f0
maddn.s f0, f3, f4
nexp01.s f3, f1
neg.s f5, f3
maddn.s f2, f0, f2
const.s f0, 0
const.s f6, 0
const.s f7, 0
maddn.s f0, f5, f2
maddn.s f6, f2, f4
const.s f4, 3
maddn.s f7, f4, f2
maddn.s f3, f0, f0
maddn.s f4, f6, f2
neg.s f2, f7
maddn.s f0, f3, f2
maddn.s f7, f4, f7
mksadj.s f2, f1
nexp01.s f1, f1
maddn.s f1, f0, f0
neg.s f3, f7
addexpm.s f0, f2
addexp.s f3, f2
divn.s f0, f1, f3
rfr a2, f0
leaf_return
#endif /* L_sqrtf */
#endif /* XCHAL_HAVE_FP_SQRT */
#if XCHAL_HAVE_FP_RECIP
#ifdef L_recipsf2
/* Reciprocal */
.align 4
.global __recipsf2
.type __recipsf2, @function
__recipsf2:
leaf_entry sp, 16
wfr f1, a2
recip0.s f0, f1
const.s f2, 1
msub.s f2, f1, f0
maddn.s f0, f0, f2
const.s f2, 1
msub.s f2, f1, f0
maddn.s f0, f0, f2
rfr a2, f0
leaf_return
#endif /* L_recipsf2 */
#endif /* XCHAL_HAVE_FP_RECIP */
#if XCHAL_HAVE_FP_RSQRT
#ifdef L_rsqrtsf2
/* Reciprocal square root */
.align 4
.global __rsqrtsf2
.type __rsqrtsf2, @function
__rsqrtsf2:
leaf_entry sp, 16
wfr f1, a2
rsqrt0.s f0, f1
mul.s f2, f1, f0
const.s f3, 3;
mul.s f4, f3, f0
const.s f5, 1
msub.s f5, f2, f0
maddn.s f0, f4, f5
mul.s f2, f1, f0
mul.s f1, f3, f0
const.s f3, 1
msub.s f3, f2, f0
maddn.s f0, f1, f3
rfr a2, f0
leaf_return
#endif /* L_rsqrtsf2 */
#endif /* XCHAL_HAVE_FP_RSQRT */
|
4ms/metamodule-plugin-sdk
| 1,541
|
plugin-libc/libgcc/config/xtensa/crti.S
|
# Start .init and .fini sections.
# Copyright (C) 2003-2022 Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just makes a stack frame for the contents of the .fini and
# .init sections. Users may put any desired instructions in those
# sections.
#include "xtensa-config.h"
.section .init
.globl _init
.type _init,@function
.align 4
_init:
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
entry sp, 64
#else
addi sp, sp, -32
s32i a0, sp, 0
#endif
.section .fini
.globl _fini
.type _fini,@function
.align 4
_fini:
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
entry sp, 64
#else
addi sp, sp, -32
s32i a0, sp, 0
#endif
|
4ms/metamodule-plugin-sdk
| 6,905
|
plugin-libc/libgcc/config/xtensa/lib2funcs.S
|
/* Assembly functions for libgcc2.
Copyright (C) 2001-2022 Free Software Foundation, Inc.
Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "xtensa-config.h"
/* __xtensa_libgcc_window_spill: This function flushes out all but the
current register window. This is used to set up the stack so that
arbitrary frames can be accessed. */
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
.align 4
.global __xtensa_libgcc_window_spill
.type __xtensa_libgcc_window_spill,@function
__xtensa_libgcc_window_spill:
entry sp, 48
#if XCHAL_NUM_AREGS > 16
call12 1f
retw
.align 4
1:
.rept (XCHAL_NUM_AREGS - 24) / 12
_entry sp, 48
mov a12, a0
.endr
_entry sp, 16
#if XCHAL_NUM_AREGS % 12 == 0
mov a4, a4
#elif XCHAL_NUM_AREGS % 12 == 4
mov a8, a8
#elif XCHAL_NUM_AREGS % 12 == 8
mov a12, a12
#endif
retw
#else
mov a8, a8
retw
#endif
.size __xtensa_libgcc_window_spill, .-__xtensa_libgcc_window_spill
#endif
/* __xtensa_nonlocal_goto: This code does all the hard work of a
nonlocal goto on Xtensa. It is here in the library to avoid the
code size bloat of generating it in-line. There are two
arguments:
a2 = frame pointer for the procedure containing the label
a3 = goto handler address
This function never returns to its caller but instead goes directly
to the address of the specified goto handler. */
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
.align 4
.global __xtensa_nonlocal_goto
.type __xtensa_nonlocal_goto,@function
__xtensa_nonlocal_goto:
entry sp, 32
/* Flush registers. */
call8 __xtensa_libgcc_window_spill
/* Because the save area for a0-a3 is stored one frame below
the one identified by a2, the only way to restore those
registers is to unwind the stack. If alloca() were never
called, we could just unwind until finding the sp value
matching a2. However, a2 is a frame pointer, not a stack
pointer, and may not be encountered during the unwinding.
The solution is to unwind until going _past_ the value
given by a2. This involves keeping three stack pointer
values during the unwinding:
next = sp of frame N-1
cur = sp of frame N
prev = sp of frame N+1
When next > a2, the desired save area is stored relative
to prev. At this point, cur will be the same as a2
except in the alloca() case.
Besides finding the values to be restored to a0-a3, we also
need to find the current window size for the target
function. This can be extracted from the high bits of the
return address, initially in a0. As the unwinding
proceeds, the window size is taken from the value of a0
saved _two_ frames below the current frame. */
addi a5, sp, -16 /* a5 = prev - save area */
l32i a6, a5, 4
addi a6, a6, -16 /* a6 = cur - save area */
mov a8, a0 /* a8 = return address (for window size) */
j .Lfirstframe
.Lnextframe:
l32i a8, a5, 0 /* next return address (for window size) */
mov a5, a6 /* advance prev */
addi a6, a7, -16 /* advance cur */
.Lfirstframe:
l32i a7, a6, 4 /* a7 = next */
bgeu a2, a7, .Lnextframe
/* At this point, prev (a5) points to the save area with the saved
values of a0-a3. Copy those values into the save area at the
current sp so they will be reloaded when the return from this
function underflows. We don't have to worry about exceptions
while updating the current save area, because the windows have
already been flushed. */
addi a4, sp, -16 /* a4 = save area of this function */
l32i a6, a5, 0
l32i a7, a5, 4
s32i a6, a4, 0
s32i a7, a4, 4
l32i a6, a5, 8
l32i a7, a5, 12
s32i a6, a4, 8
s32i a7, a4, 12
/* Set return address to goto handler. Use the window size bits
from the return address two frames below the target. */
extui a8, a8, 30, 2 /* get window size from return addr. */
slli a3, a3, 2 /* get goto handler addr. << 2 */
ssai 2
src a0, a8, a3 /* combine them with a funnel shift */
retw
.size __xtensa_nonlocal_goto, .-__xtensa_nonlocal_goto
#endif
/* __xtensa_sync_caches: This function is called after writing a trampoline
on the stack to force all the data writes to memory and invalidate the
instruction cache. a2 is the address of the new trampoline.
After the trampoline data is written out, it must be flushed out of
the data cache into memory. We use DHWB in case we have a writeback
cache. At least one DHWB instruction is needed for each data cache
line which may be touched by the trampoline. An ISYNC instruction
must follow the DHWBs.
We have to flush the i-cache to make sure that the new values get used.
At least one IHI instruction is needed for each i-cache line which may
be touched by the trampoline. An ISYNC instruction is also needed to
make sure that the modified instructions are loaded into the instruction
fetch buffer. */
/* Use the maximum trampoline size. Flushing a bit extra is OK. */
#define TRAMPOLINE_SIZE 60
.text
.align 4
.global __xtensa_sync_caches
.type __xtensa_sync_caches,@function
__xtensa_sync_caches:
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
entry sp, 32
#endif
#if XCHAL_DCACHE_SIZE > 0
/* Flush the trampoline from the data cache. */
extui a4, a2, 0, XCHAL_DCACHE_LINEWIDTH
addi a4, a4, TRAMPOLINE_SIZE
addi a4, a4, (1 << XCHAL_DCACHE_LINEWIDTH) - 1
srli a4, a4, XCHAL_DCACHE_LINEWIDTH
mov a3, a2
.Ldcache_loop:
dhwb a3, 0
addi a3, a3, (1 << XCHAL_DCACHE_LINEWIDTH)
addi a4, a4, -1
bnez a4, .Ldcache_loop
isync
#endif
#if XCHAL_ICACHE_SIZE > 0
/* Invalidate the corresponding lines in the instruction cache. */
extui a4, a2, 0, XCHAL_ICACHE_LINEWIDTH
addi a4, a4, TRAMPOLINE_SIZE
addi a4, a4, (1 << XCHAL_ICACHE_LINEWIDTH) - 1
srli a4, a4, XCHAL_ICACHE_LINEWIDTH
.Licache_loop:
ihi a2, 0
addi a2, a2, (1 << XCHAL_ICACHE_LINEWIDTH)
addi a4, a4, -1
bnez a4, .Licache_loop
#endif
isync
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
retw
#else
ret
#endif
.size __xtensa_sync_caches, .-__xtensa_sync_caches
|
4ms/metamodule-plugin-sdk
| 54,007
|
plugin-libc/libgcc/config/xtensa/ieee754-df.S
|
/* IEEE-754 double-precision functions for Xtensa
Copyright (C) 2006-2022 Free Software Foundation, Inc.
Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#ifdef __XTENSA_EB__
#define xh a2
#define xl a3
#define yh a4
#define yl a5
#else
#define xh a3
#define xl a2
#define yh a5
#define yl a4
#endif
/* Warning! The branch displacements for some Xtensa branch instructions
are quite small, and this code has been carefully laid out to keep
branch targets in range. If you change anything, be sure to check that
the assembler is not relaxing anything to branch over a jump. */
#ifdef L_negdf2
.align 4
.global __negdf2
.type __negdf2, @function
__negdf2:
leaf_entry sp, 16
movi a4, 0x80000000
xor xh, xh, a4
leaf_return
#endif /* L_negdf2 */
#ifdef L_addsubdf3
.literal_position
/* Addition */
__adddf3_aux:
/* Handle NaNs and Infinities. (This code is placed before the
start of the function just to keep it in range of the limited
branch displacements.) */
.Ladd_xnan_or_inf:
/* If y is neither Infinity nor NaN, return x. */
bnall yh, a6, .Ladd_return_nan_or_inf
/* If x is a NaN, return it. Otherwise, return y. */
slli a7, xh, 12
or a7, a7, xl
bnez a7, .Ladd_return_nan
.Ladd_ynan_or_inf:
/* Return y. */
mov xh, yh
mov xl, yl
.Ladd_return_nan_or_inf:
slli a7, xh, 12
or a7, a7, xl
bnez a7, .Ladd_return_nan
leaf_return
.Ladd_return_nan:
movi a4, 0x80000 /* make it a quiet NaN */
or xh, xh, a4
leaf_return
.Ladd_opposite_signs:
/* Operand signs differ. Do a subtraction. */
slli a7, a6, 11
xor yh, yh, a7
j .Lsub_same_sign
.align 4
.global __adddf3
.type __adddf3, @function
__adddf3:
leaf_entry sp, 16
movi a6, 0x7ff00000
/* Check if the two operands have the same sign. */
xor a7, xh, yh
bltz a7, .Ladd_opposite_signs
.Ladd_same_sign:
/* Check if either exponent == 0x7ff (i.e., NaN or Infinity). */
ball xh, a6, .Ladd_xnan_or_inf
ball yh, a6, .Ladd_ynan_or_inf
/* Compare the exponents. The smaller operand will be shifted
right by the exponent difference and added to the larger
one. */
extui a7, xh, 20, 12
extui a8, yh, 20, 12
bltu a7, a8, .Ladd_shiftx
.Ladd_shifty:
/* Check if the smaller (or equal) exponent is zero. */
bnone yh, a6, .Ladd_yexpzero
/* Replace yh sign/exponent with 0x001. */
or yh, yh, a6
slli yh, yh, 11
srli yh, yh, 11
.Ladd_yexpdiff:
/* Compute the exponent difference. Optimize for difference < 32. */
sub a10, a7, a8
bgeui a10, 32, .Ladd_bigshifty
/* Shift yh/yl right by the exponent difference. Any bits that are
shifted out of yl are saved in a9 for rounding the result. */
ssr a10
movi a9, 0
src a9, yl, a9
src yl, yh, yl
srl yh, yh
.Ladd_addy:
/* Do the 64-bit addition. */
add xl, xl, yl
add xh, xh, yh
bgeu xl, yl, 1f
addi xh, xh, 1
1:
/* Check if the add overflowed into the exponent. */
extui a10, xh, 20, 12
beq a10, a7, .Ladd_round
mov a8, a7
j .Ladd_carry
.Ladd_yexpzero:
/* y is a subnormal value. Replace its sign/exponent with zero,
i.e., no implicit "1.0", and increment the apparent exponent
because subnormals behave as if they had the minimum (nonzero)
exponent. Test for the case when both exponents are zero. */
slli yh, yh, 12
srli yh, yh, 12
bnone xh, a6, .Ladd_bothexpzero
addi a8, a8, 1
j .Ladd_yexpdiff
.Ladd_bothexpzero:
/* Both exponents are zero. Handle this as a special case. There
is no need to shift or round, and the normal code for handling
a carry into the exponent field will not work because it
assumes there is an implicit "1.0" that needs to be added. */
add xl, xl, yl
add xh, xh, yh
bgeu xl, yl, 1f
addi xh, xh, 1
1: leaf_return
.Ladd_bigshifty:
/* Exponent difference > 64 -- just return the bigger value. */
bgeui a10, 64, 1b
/* Shift yh/yl right by the exponent difference. Any bits that are
shifted out are saved in a9 for rounding the result. */
ssr a10
sll a11, yl /* lost bits shifted out of yl */
src a9, yh, yl
srl yl, yh
movi yh, 0
beqz a11, .Ladd_addy
or a9, a9, a10 /* any positive, nonzero value will work */
j .Ladd_addy
.Ladd_xexpzero:
/* Same as "yexpzero" except skip handling the case when both
exponents are zero. */
slli xh, xh, 12
srli xh, xh, 12
addi a7, a7, 1
j .Ladd_xexpdiff
.Ladd_shiftx:
/* Same thing as the "shifty" code, but with x and y swapped. Also,
because the exponent difference is always nonzero in this version,
the shift sequence can use SLL and skip loading a constant zero. */
bnone xh, a6, .Ladd_xexpzero
or xh, xh, a6
slli xh, xh, 11
srli xh, xh, 11
.Ladd_xexpdiff:
sub a10, a8, a7
bgeui a10, 32, .Ladd_bigshiftx
ssr a10
sll a9, xl
src xl, xh, xl
srl xh, xh
.Ladd_addx:
add xl, xl, yl
add xh, xh, yh
bgeu xl, yl, 1f
addi xh, xh, 1
1:
/* Check if the add overflowed into the exponent. */
extui a10, xh, 20, 12
bne a10, a8, .Ladd_carry
.Ladd_round:
/* Round up if the leftover fraction is >= 1/2. */
bgez a9, 1f
addi xl, xl, 1
beqz xl, .Ladd_roundcarry
/* Check if the leftover fraction is exactly 1/2. */
slli a9, a9, 1
beqz a9, .Ladd_exactlyhalf
1: leaf_return
.Ladd_bigshiftx:
/* Mostly the same thing as "bigshifty".... */
bgeui a10, 64, .Ladd_returny
ssr a10
sll a11, xl
src a9, xh, xl
srl xl, xh
movi xh, 0
beqz a11, .Ladd_addx
or a9, a9, a10
j .Ladd_addx
.Ladd_returny:
mov xh, yh
mov xl, yl
leaf_return
.Ladd_carry:
/* The addition has overflowed into the exponent field, so the
value needs to be renormalized. The mantissa of the result
can be recovered by subtracting the original exponent and
adding 0x100000 (which is the explicit "1.0" for the
mantissa of the non-shifted operand -- the "1.0" for the
shifted operand was already added). The mantissa can then
be shifted right by one bit. The explicit "1.0" of the
shifted mantissa then needs to be replaced by the exponent,
incremented by one to account for the normalizing shift.
It is faster to combine these operations: do the shift first
and combine the additions and subtractions. If x is the
original exponent, the result is:
shifted mantissa - (x << 19) + (1 << 19) + (x << 20)
or:
shifted mantissa + ((x + 1) << 19)
Note that the exponent is incremented here by leaving the
explicit "1.0" of the mantissa in the exponent field. */
/* Shift xh/xl right by one bit. Save the lsb of xl. */
mov a10, xl
ssai 1
src xl, xh, xl
srl xh, xh
/* See explanation above. The original exponent is in a8. */
addi a8, a8, 1
slli a8, a8, 19
add xh, xh, a8
/* Return an Infinity if the exponent overflowed. */
ball xh, a6, .Ladd_infinity
/* Same thing as the "round" code except the msb of the leftover
fraction is bit 0 of a10, with the rest of the fraction in a9. */
bbci.l a10, 0, 1f
addi xl, xl, 1
beqz xl, .Ladd_roundcarry
beqz a9, .Ladd_exactlyhalf
1: leaf_return
.Ladd_infinity:
/* Clear the mantissa. */
movi xl, 0
srli xh, xh, 20
slli xh, xh, 20
/* The sign bit may have been lost in a carry-out. Put it back. */
slli a8, a8, 1
or xh, xh, a8
leaf_return
.Ladd_exactlyhalf:
/* Round down to the nearest even value. */
srli xl, xl, 1
slli xl, xl, 1
leaf_return
.Ladd_roundcarry:
/* xl is always zero when the rounding increment overflows, so
there's no need to round it to an even value. */
addi xh, xh, 1
/* Overflow to the exponent is OK. */
leaf_return
/* Subtraction */
__subdf3_aux:
/* Handle NaNs and Infinities. (This code is placed before the
start of the function just to keep it in range of the limited
branch displacements.) */
.Lsub_xnan_or_inf:
/* If y is neither Infinity nor NaN, return x. */
bnall yh, a6, .Lsub_return_nan_or_inf
.Lsub_return_nan:
/* Both x and y are either NaN or Inf, so the result is NaN. */
movi a4, 0x80000 /* make it a quiet NaN */
or xh, xh, a4
leaf_return
.Lsub_ynan_or_inf:
/* Negate y and return it. */
slli a7, a6, 11
xor xh, yh, a7
mov xl, yl
.Lsub_return_nan_or_inf:
slli a7, xh, 12
or a7, a7, xl
bnez a7, .Lsub_return_nan
leaf_return
.Lsub_opposite_signs:
/* Operand signs differ. Do an addition. */
slli a7, a6, 11
xor yh, yh, a7
j .Ladd_same_sign
.align 4
.global __subdf3
.type __subdf3, @function
__subdf3:
leaf_entry sp, 16
movi a6, 0x7ff00000
/* Check if the two operands have the same sign. */
xor a7, xh, yh
bltz a7, .Lsub_opposite_signs
.Lsub_same_sign:
/* Check if either exponent == 0x7ff (i.e., NaN or Infinity). */
ball xh, a6, .Lsub_xnan_or_inf
ball yh, a6, .Lsub_ynan_or_inf
/* Compare the operands. In contrast to addition, the entire
value matters here. */
extui a7, xh, 20, 11
extui a8, yh, 20, 11
bltu xh, yh, .Lsub_xsmaller
beq xh, yh, .Lsub_compare_low
.Lsub_ysmaller:
/* Check if the smaller (or equal) exponent is zero. */
bnone yh, a6, .Lsub_yexpzero
/* Replace yh sign/exponent with 0x001. */
or yh, yh, a6
slli yh, yh, 11
srli yh, yh, 11
.Lsub_yexpdiff:
/* Compute the exponent difference. Optimize for difference < 32. */
sub a10, a7, a8
bgeui a10, 32, .Lsub_bigshifty
/* Shift yh/yl right by the exponent difference. Any bits that are
shifted out of yl are saved in a9 for rounding the result. */
ssr a10
movi a9, 0
src a9, yl, a9
src yl, yh, yl
srl yh, yh
.Lsub_suby:
/* Do the 64-bit subtraction. */
sub xh, xh, yh
bgeu xl, yl, 1f
addi xh, xh, -1
1: sub xl, xl, yl
/* Subtract the leftover bits in a9 from zero and propagate any
borrow from xh/xl. */
neg a9, a9
beqz a9, 1f
addi a5, xh, -1
moveqz xh, a5, xl
addi xl, xl, -1
1:
/* Check if the subtract underflowed into the exponent. */
extui a10, xh, 20, 11
beq a10, a7, .Lsub_round
j .Lsub_borrow
.Lsub_compare_low:
/* The high words are equal. Compare the low words. */
bltu xl, yl, .Lsub_xsmaller
bltu yl, xl, .Lsub_ysmaller
/* The operands are equal. Return 0.0. */
movi xh, 0
movi xl, 0
1: leaf_return
.Lsub_yexpzero:
/* y is a subnormal value. Replace its sign/exponent with zero,
i.e., no implicit "1.0". Unless x is also a subnormal, increment
y's apparent exponent because subnormals behave as if they had
the minimum (nonzero) exponent. */
slli yh, yh, 12
srli yh, yh, 12
bnone xh, a6, .Lsub_yexpdiff
addi a8, a8, 1
j .Lsub_yexpdiff
.Lsub_bigshifty:
/* Exponent difference > 64 -- just return the bigger value. */
bgeui a10, 64, 1b
/* Shift yh/yl right by the exponent difference. Any bits that are
shifted out are saved in a9 for rounding the result. */
ssr a10
sll a11, yl /* lost bits shifted out of yl */
src a9, yh, yl
srl yl, yh
movi yh, 0
beqz a11, .Lsub_suby
or a9, a9, a10 /* any positive, nonzero value will work */
j .Lsub_suby
.Lsub_xsmaller:
/* Same thing as the "ysmaller" code, but with x and y swapped and
with y negated. */
bnone xh, a6, .Lsub_xexpzero
or xh, xh, a6
slli xh, xh, 11
srli xh, xh, 11
.Lsub_xexpdiff:
sub a10, a8, a7
bgeui a10, 32, .Lsub_bigshiftx
ssr a10
movi a9, 0
src a9, xl, a9
src xl, xh, xl
srl xh, xh
/* Negate y. */
slli a11, a6, 11
xor yh, yh, a11
.Lsub_subx:
sub xl, yl, xl
sub xh, yh, xh
bgeu yl, xl, 1f
addi xh, xh, -1
1:
/* Subtract the leftover bits in a9 from zero and propagate any
borrow from xh/xl. */
neg a9, a9
beqz a9, 1f
addi a5, xh, -1
moveqz xh, a5, xl
addi xl, xl, -1
1:
/* Check if the subtract underflowed into the exponent. */
extui a10, xh, 20, 11
bne a10, a8, .Lsub_borrow
.Lsub_round:
/* Round up if the leftover fraction is >= 1/2. */
bgez a9, 1f
addi xl, xl, 1
beqz xl, .Lsub_roundcarry
/* Check if the leftover fraction is exactly 1/2. */
slli a9, a9, 1
beqz a9, .Lsub_exactlyhalf
1: leaf_return
.Lsub_xexpzero:
/* Same as "yexpzero". */
slli xh, xh, 12
srli xh, xh, 12
bnone yh, a6, .Lsub_xexpdiff
addi a7, a7, 1
j .Lsub_xexpdiff
.Lsub_bigshiftx:
/* Mostly the same thing as "bigshifty", but with the sign bit of the
shifted value set so that the subsequent subtraction flips the
sign of y. */
bgeui a10, 64, .Lsub_returny
ssr a10
sll a11, xl
src a9, xh, xl
srl xl, xh
slli xh, a6, 11 /* set sign bit of xh */
beqz a11, .Lsub_subx
or a9, a9, a10
j .Lsub_subx
.Lsub_returny:
/* Negate and return y. */
slli a7, a6, 11
xor xh, yh, a7
mov xl, yl
leaf_return
.Lsub_borrow:
/* The subtraction has underflowed into the exponent field, so the
value needs to be renormalized. Shift the mantissa left as
needed to remove any leading zeros and adjust the exponent
accordingly. If the exponent is not large enough to remove
all the leading zeros, the result will be a subnormal value. */
slli a8, xh, 12
beqz a8, .Lsub_xhzero
do_nsau a6, a8, a7, a11
srli a8, a8, 12
bge a6, a10, .Lsub_subnormal
addi a6, a6, 1
.Lsub_shift_lt32:
/* Shift the mantissa (a8/xl/a9) left by a6. */
ssl a6
src a8, a8, xl
src xl, xl, a9
sll a9, a9
/* Combine the shifted mantissa with the sign and exponent,
decrementing the exponent by a6. (The exponent has already
been decremented by one due to the borrow from the subtraction,
but adding the mantissa will increment the exponent by one.) */
srli xh, xh, 20
sub xh, xh, a6
slli xh, xh, 20
add xh, xh, a8
j .Lsub_round
.Lsub_exactlyhalf:
/* Round down to the nearest even value. */
srli xl, xl, 1
slli xl, xl, 1
leaf_return
.Lsub_roundcarry:
/* xl is always zero when the rounding increment overflows, so
there's no need to round it to an even value. */
addi xh, xh, 1
/* Overflow to the exponent is OK. */
leaf_return
.Lsub_xhzero:
/* When normalizing the result, all the mantissa bits in the high
word are zero. Shift by "20 + (leading zero count of xl) + 1". */
do_nsau a6, xl, a7, a11
addi a6, a6, 21
blt a10, a6, .Lsub_subnormal
.Lsub_normalize_shift:
bltui a6, 32, .Lsub_shift_lt32
ssl a6
src a8, xl, a9
sll xl, a9
movi a9, 0
srli xh, xh, 20
sub xh, xh, a6
slli xh, xh, 20
add xh, xh, a8
j .Lsub_round
.Lsub_subnormal:
/* The exponent is too small to shift away all the leading zeros.
Set a6 to the current exponent (which has already been
decremented by the borrow) so that the exponent of the result
will be zero. Do not add 1 to a6 in this case, because: (1)
adding the mantissa will not increment the exponent, so there is
no need to subtract anything extra from the exponent to
compensate, and (2) the effective exponent of a subnormal is 1
not 0 so the shift amount must be 1 smaller than normal. */
mov a6, a10
j .Lsub_normalize_shift
#endif /* L_addsubdf3 */
#ifdef L_muldf3
/* Multiplication */
#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
#define XCHAL_NO_MUL 1
#endif
.literal_position
__muldf3_aux:
/* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
(This code is placed before the start of the function just to
keep it in range of the limited branch displacements.) */
.Lmul_xexpzero:
/* Clear the sign bit of x. */
slli xh, xh, 1
srli xh, xh, 1
/* If x is zero, return zero. */
or a10, xh, xl
beqz a10, .Lmul_return_zero
/* Normalize x. Adjust the exponent in a8. */
beqz xh, .Lmul_xh_zero
do_nsau a10, xh, a11, a12
addi a10, a10, -11
ssl a10
src xh, xh, xl
sll xl, xl
movi a8, 1
sub a8, a8, a10
j .Lmul_xnormalized
.Lmul_xh_zero:
do_nsau a10, xl, a11, a12
addi a10, a10, -11
movi a8, -31
sub a8, a8, a10
ssl a10
bltz a10, .Lmul_xl_srl
sll xh, xl
movi xl, 0
j .Lmul_xnormalized
.Lmul_xl_srl:
srl xh, xl
sll xl, xl
j .Lmul_xnormalized
.Lmul_yexpzero:
/* Clear the sign bit of y. */
slli yh, yh, 1
srli yh, yh, 1
/* If y is zero, return zero. */
or a10, yh, yl
beqz a10, .Lmul_return_zero
/* Normalize y. Adjust the exponent in a9. */
beqz yh, .Lmul_yh_zero
do_nsau a10, yh, a11, a12
addi a10, a10, -11
ssl a10
src yh, yh, yl
sll yl, yl
movi a9, 1
sub a9, a9, a10
j .Lmul_ynormalized
.Lmul_yh_zero:
do_nsau a10, yl, a11, a12
addi a10, a10, -11
movi a9, -31
sub a9, a9, a10
ssl a10
bltz a10, .Lmul_yl_srl
sll yh, yl
movi yl, 0
j .Lmul_ynormalized
.Lmul_yl_srl:
srl yh, yl
sll yl, yl
j .Lmul_ynormalized
.Lmul_return_zero:
/* Return zero with the appropriate sign bit. */
srli xh, a7, 31
slli xh, xh, 31
movi xl, 0
j .Lmul_done
.Lmul_xnan_or_inf:
/* If y is zero, return NaN. */
bnez yl, 1f
slli a8, yh, 1
beqz a8, .Lmul_return_nan
1:
/* If y is NaN, return y. */
bnall yh, a6, .Lmul_returnx
slli a8, yh, 12
or a8, a8, yl
beqz a8, .Lmul_returnx
.Lmul_returny:
mov xh, yh
mov xl, yl
.Lmul_returnx:
slli a8, xh, 12
or a8, a8, xl
bnez a8, .Lmul_return_nan
/* Set the sign bit and return. */
extui a7, a7, 31, 1
slli xh, xh, 1
ssai 1
src xh, a7, xh
j .Lmul_done
.Lmul_ynan_or_inf:
/* If x is zero, return NaN. */
bnez xl, .Lmul_returny
slli a8, xh, 1
bnez a8, .Lmul_returny
mov xh, yh
.Lmul_return_nan:
movi a4, 0x80000 /* make it a quiet NaN */
or xh, xh, a4
j .Lmul_done
.align 4
.global __muldf3
.type __muldf3, @function
__muldf3:
#if __XTENSA_CALL0_ABI__
leaf_entry sp, 32
addi sp, sp, -32
s32i a12, sp, 16
s32i a13, sp, 20
s32i a14, sp, 24
s32i a15, sp, 28
#elif XCHAL_NO_MUL
/* This is not really a leaf function; allocate enough stack space
to allow CALL12s to a helper function. */
leaf_entry sp, 64
#else
leaf_entry sp, 32
#endif
movi a6, 0x7ff00000
/* Get the sign of the result. */
xor a7, xh, yh
/* Check for NaN and infinity. */
ball xh, a6, .Lmul_xnan_or_inf
ball yh, a6, .Lmul_ynan_or_inf
/* Extract the exponents. */
extui a8, xh, 20, 11
extui a9, yh, 20, 11
beqz a8, .Lmul_xexpzero
.Lmul_xnormalized:
beqz a9, .Lmul_yexpzero
.Lmul_ynormalized:
/* Add the exponents. */
add a8, a8, a9
/* Replace sign/exponent fields with explicit "1.0". */
movi a10, 0x1fffff
or xh, xh, a6
and xh, xh, a10
or yh, yh, a6
and yh, yh, a10
/* Multiply 64x64 to 128 bits. The result ends up in xh/xl/a6.
The least-significant word of the result is thrown away except
that if it is nonzero, the lsb of a6 is set to 1. */
#if XCHAL_HAVE_MUL32_HIGH
/* Compute a6 with any carry-outs in a10. */
movi a10, 0
mull a6, xl, yh
mull a11, xh, yl
add a6, a6, a11
bgeu a6, a11, 1f
addi a10, a10, 1
1:
muluh a11, xl, yl
add a6, a6, a11
bgeu a6, a11, 1f
addi a10, a10, 1
1:
/* If the low word of the result is nonzero, set the lsb of a6. */
mull a11, xl, yl
beqz a11, 1f
movi a9, 1
or a6, a6, a9
1:
/* Compute xl with any carry-outs in a9. */
movi a9, 0
mull a11, xh, yh
add a10, a10, a11
bgeu a10, a11, 1f
addi a9, a9, 1
1:
muluh a11, xh, yl
add a10, a10, a11
bgeu a10, a11, 1f
addi a9, a9, 1
1:
muluh xl, xl, yh
add xl, xl, a10
bgeu xl, a10, 1f
addi a9, a9, 1
1:
/* Compute xh. */
muluh xh, xh, yh
add xh, xh, a9
#else /* ! XCHAL_HAVE_MUL32_HIGH */
/* Break the inputs into 16-bit chunks and compute 16 32-bit partial
products. These partial products are:
0 xll * yll
1 xll * ylh
2 xlh * yll
3 xll * yhl
4 xlh * ylh
5 xhl * yll
6 xll * yhh
7 xlh * yhl
8 xhl * ylh
9 xhh * yll
10 xlh * yhh
11 xhl * yhl
12 xhh * ylh
13 xhl * yhh
14 xhh * yhl
15 xhh * yhh
where the input chunks are (hh, hl, lh, ll). If using the Mul16
or Mul32 multiplier options, these input chunks must be stored in
separate registers. For Mac16, the UMUL.AA.* opcodes can specify
that the inputs come from either half of the registers, so there
is no need to shift them out ahead of time. If there is no
multiply hardware, the 16-bit chunks can be extracted when setting
up the arguments to the separate multiply function. */
/* Save a7 since it is needed to hold a temporary value. */
s32i a7, sp, 4
#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
/* Calling a separate multiply function will clobber a0 and requires
use of a8 as a temporary, so save those values now. (The function
uses a custom ABI so nothing else needs to be saved.) */
s32i a0, sp, 0
s32i a8, sp, 8
#endif
#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
#define xlh a12
#define ylh a13
#define xhh a14
#define yhh a15
/* Get the high halves of the inputs into registers. */
srli xlh, xl, 16
srli ylh, yl, 16
srli xhh, xh, 16
srli yhh, yh, 16
#define xll xl
#define yll yl
#define xhl xh
#define yhl yh
#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
/* Clear the high halves of the inputs. This does not matter
for MUL16 because the high bits are ignored. */
extui xl, xl, 0, 16
extui xh, xh, 0, 16
extui yl, yl, 0, 16
extui yh, yh, 0, 16
#endif
#endif /* MUL16 || MUL32 */
#if XCHAL_HAVE_MUL16
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
mul16u dst, xreg ## xhalf, yreg ## yhalf
#elif XCHAL_HAVE_MUL32
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
mull dst, xreg ## xhalf, yreg ## yhalf
#elif XCHAL_HAVE_MAC16
/* The preprocessor insists on inserting a space when concatenating after
a period in the definition of do_mul below. These macros are a workaround
using underscores instead of periods when doing the concatenation. */
#define umul_aa_ll umul.aa.ll
#define umul_aa_lh umul.aa.lh
#define umul_aa_hl umul.aa.hl
#define umul_aa_hh umul.aa.hh
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
umul_aa_ ## xhalf ## yhalf xreg, yreg; \
rsr dst, ACCLO
#else /* no multiply hardware */
#define set_arg_l(dst, src) \
extui dst, src, 0, 16
#define set_arg_h(dst, src) \
srli dst, src, 16
#if __XTENSA_CALL0_ABI__
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
set_arg_ ## xhalf (a13, xreg); \
set_arg_ ## yhalf (a14, yreg); \
call0 .Lmul_mulsi3; \
mov dst, a12
#else
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
set_arg_ ## xhalf (a14, xreg); \
set_arg_ ## yhalf (a15, yreg); \
call12 .Lmul_mulsi3; \
mov dst, a14
#endif /* __XTENSA_CALL0_ABI__ */
#endif /* no multiply hardware */
/* Add pp1 and pp2 into a10 with carry-out in a9. */
do_mul(a10, xl, l, yl, h) /* pp 1 */
do_mul(a11, xl, h, yl, l) /* pp 2 */
movi a9, 0
add a10, a10, a11
bgeu a10, a11, 1f
addi a9, a9, 1
1:
/* Initialize a6 with a9/a10 shifted into position. Note that
this value can be safely incremented without any carry-outs. */
ssai 16
src a6, a9, a10
/* Compute the low word into a10. */
do_mul(a11, xl, l, yl, l) /* pp 0 */
sll a10, a10
add a10, a10, a11
bgeu a10, a11, 1f
addi a6, a6, 1
1:
/* Compute the contributions of pp0-5 to a6, with carry-outs in a9.
This is good enough to determine the low half of a6, so that any
nonzero bits from the low word of the result can be collapsed
into a6, freeing up a register. */
movi a9, 0
do_mul(a11, xl, l, yh, l) /* pp 3 */
add a6, a6, a11
bgeu a6, a11, 1f
addi a9, a9, 1
1:
do_mul(a11, xl, h, yl, h) /* pp 4 */
add a6, a6, a11
bgeu a6, a11, 1f
addi a9, a9, 1
1:
do_mul(a11, xh, l, yl, l) /* pp 5 */
add a6, a6, a11
bgeu a6, a11, 1f
addi a9, a9, 1
1:
/* Collapse any nonzero bits from the low word into a6. */
beqz a10, 1f
movi a11, 1
or a6, a6, a11
1:
/* Add pp6-9 into a11 with carry-outs in a10. */
do_mul(a7, xl, l, yh, h) /* pp 6 */
do_mul(a11, xh, h, yl, l) /* pp 9 */
movi a10, 0
add a11, a11, a7
bgeu a11, a7, 1f
addi a10, a10, 1
1:
do_mul(a7, xl, h, yh, l) /* pp 7 */
add a11, a11, a7
bgeu a11, a7, 1f
addi a10, a10, 1
1:
do_mul(a7, xh, l, yl, h) /* pp 8 */
add a11, a11, a7
bgeu a11, a7, 1f
addi a10, a10, 1
1:
/* Shift a10/a11 into position, and add low half of a11 to a6. */
src a10, a10, a11
add a10, a10, a9
sll a11, a11
add a6, a6, a11
bgeu a6, a11, 1f
addi a10, a10, 1
1:
/* Add pp10-12 into xl with carry-outs in a9. */
movi a9, 0
do_mul(xl, xl, h, yh, h) /* pp 10 */
add xl, xl, a10
bgeu xl, a10, 1f
addi a9, a9, 1
1:
do_mul(a10, xh, l, yh, l) /* pp 11 */
add xl, xl, a10
bgeu xl, a10, 1f
addi a9, a9, 1
1:
do_mul(a10, xh, h, yl, h) /* pp 12 */
add xl, xl, a10
bgeu xl, a10, 1f
addi a9, a9, 1
1:
/* Add pp13-14 into a11 with carry-outs in a10. */
do_mul(a11, xh, l, yh, h) /* pp 13 */
do_mul(a7, xh, h, yh, l) /* pp 14 */
movi a10, 0
add a11, a11, a7
bgeu a11, a7, 1f
addi a10, a10, 1
1:
/* Shift a10/a11 into position, and add low half of a11 to a6. */
src a10, a10, a11
add a10, a10, a9
sll a11, a11
add xl, xl, a11
bgeu xl, a11, 1f
addi a10, a10, 1
1:
/* Compute xh. */
do_mul(xh, xh, h, yh, h) /* pp 15 */
add xh, xh, a10
/* Restore values saved on the stack during the multiplication. */
l32i a7, sp, 4
#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
l32i a0, sp, 0
l32i a8, sp, 8
#endif
#endif /* ! XCHAL_HAVE_MUL32_HIGH */
/* Shift left by 12 bits, unless there was a carry-out from the
multiply, in which case, shift by 11 bits and increment the
exponent. Note: It is convenient to use the constant 0x3ff
instead of 0x400 when removing the extra exponent bias (so that
it is easy to construct 0x7fe for the overflow check). Reverse
the logic here to decrement the exponent sum by one unless there
was a carry-out. */
movi a4, 11
srli a5, xh, 21 - 12
bnez a5, 1f
addi a4, a4, 1
addi a8, a8, -1
1: ssl a4
src xh, xh, xl
src xl, xl, a6
sll a6, a6
/* Subtract the extra bias from the exponent sum (plus one to account
for the explicit "1.0" of the mantissa that will be added to the
exponent in the final result). */
movi a4, 0x3ff
sub a8, a8, a4
/* Check for over/underflow. The value in a8 is one less than the
final exponent, so values in the range 0..7fd are OK here. */
slli a4, a4, 1 /* 0x7fe */
bgeu a8, a4, .Lmul_overflow
.Lmul_round:
/* Round. */
bgez a6, .Lmul_rounded
addi xl, xl, 1
beqz xl, .Lmul_roundcarry
slli a6, a6, 1
beqz a6, .Lmul_exactlyhalf
.Lmul_rounded:
/* Add the exponent to the mantissa. */
slli a8, a8, 20
add xh, xh, a8
.Lmul_addsign:
/* Add the sign bit. */
srli a7, a7, 31
slli a7, a7, 31
or xh, xh, a7
.Lmul_done:
#if __XTENSA_CALL0_ABI__
l32i a12, sp, 16
l32i a13, sp, 20
l32i a14, sp, 24
l32i a15, sp, 28
addi sp, sp, 32
#endif
leaf_return
.Lmul_exactlyhalf:
/* Round down to the nearest even value. */
srli xl, xl, 1
slli xl, xl, 1
j .Lmul_rounded
.Lmul_roundcarry:
/* xl is always zero when the rounding increment overflows, so
there's no need to round it to an even value. */
addi xh, xh, 1
/* Overflow is OK -- it will be added to the exponent. */
j .Lmul_rounded
.Lmul_overflow:
bltz a8, .Lmul_underflow
/* Return +/- Infinity. */
addi a8, a4, 1 /* 0x7ff */
slli xh, a8, 20
movi xl, 0
j .Lmul_addsign
.Lmul_underflow:
/* Create a subnormal value, where the exponent field contains zero,
but the effective exponent is 1. The value of a8 is one less than
the actual exponent, so just negate it to get the shift amount. */
neg a8, a8
mov a9, a6
ssr a8
bgeui a8, 32, .Lmul_bigshift
/* Shift xh/xl right. Any bits that are shifted out of xl are saved
in a6 (combined with the shifted-out bits currently in a6) for
rounding the result. */
sll a6, xl
src xl, xh, xl
srl xh, xh
j 1f
.Lmul_bigshift:
bgeui a8, 64, .Lmul_flush_to_zero
sll a10, xl /* lost bits shifted out of xl */
src a6, xh, xl
srl xl, xh
movi xh, 0
or a9, a9, a10
/* Set the exponent to zero. */
1: movi a8, 0
/* Pack any nonzero bits shifted out into a6. */
beqz a9, .Lmul_round
movi a9, 1
or a6, a6, a9
j .Lmul_round
.Lmul_flush_to_zero:
/* Return zero with the appropriate sign bit. */
srli xh, a7, 31
slli xh, xh, 31
movi xl, 0
j .Lmul_done
#if XCHAL_NO_MUL
/* For Xtensa processors with no multiply hardware, this simplified
version of _mulsi3 is used for multiplying 16-bit chunks of
the floating-point mantissas. When using CALL0, this function
uses a custom ABI: the inputs are passed in a13 and a14, the
result is returned in a12, and a8 and a15 are clobbered. */
.align 4
.Lmul_mulsi3:
leaf_entry sp, 16
.macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
movi \dst, 0
1: add \tmp1, \src2, \dst
extui \tmp2, \src1, 0, 1
movnez \dst, \tmp1, \tmp2
do_addx2 \tmp1, \src2, \dst, \tmp1
extui \tmp2, \src1, 1, 1
movnez \dst, \tmp1, \tmp2
do_addx4 \tmp1, \src2, \dst, \tmp1
extui \tmp2, \src1, 2, 1
movnez \dst, \tmp1, \tmp2
do_addx8 \tmp1, \src2, \dst, \tmp1
extui \tmp2, \src1, 3, 1
movnez \dst, \tmp1, \tmp2
srli \src1, \src1, 4
slli \src2, \src2, 4
bnez \src1, 1b
.endm
#if __XTENSA_CALL0_ABI__
mul_mulsi3_body a12, a13, a14, a15, a8
#else
/* The result will be written into a2, so save that argument in a4. */
mov a4, a2
mul_mulsi3_body a2, a4, a3, a5, a6
#endif
leaf_return
#endif /* XCHAL_NO_MUL */
#endif /* L_muldf3 */
#ifdef L_divdf3
/* Division */
#if XCHAL_HAVE_DFP_DIV
.text
.align 4
.global __divdf3
.type __divdf3, @function
__divdf3:
leaf_entry sp, 16
wfrd f1, xh, xl
wfrd f2, yh, yl
div0.d f3, f2
nexp01.d f4, f2
const.d f0, 1
maddn.d f0, f4, f3
const.d f5, 0
mov.d f7, f2
mkdadj.d f7, f1
maddn.d f3, f0, f3
maddn.d f5, f0, f0
nexp01.d f1, f1
div0.d f2, f2
maddn.d f3, f5, f3
const.d f5, 1
const.d f0, 0
neg.d f6, f1
maddn.d f5, f4, f3
maddn.d f0, f6, f2
maddn.d f3, f5, f3
maddn.d f6, f4, f0
const.d f2, 1
maddn.d f2, f4, f3
maddn.d f0, f6, f3
neg.d f1, f1
maddn.d f3, f2, f3
maddn.d f1, f4, f0
addexpm.d f0, f7
addexp.d f3, f7
divn.d f0, f1, f3
rfr xl, f0
rfrd xh, f0
leaf_return
#else
.literal_position
__divdf3_aux:
/* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
(This code is placed before the start of the function just to
keep it in range of the limited branch displacements.) */
.Ldiv_yexpzero:
/* Clear the sign bit of y. */
slli yh, yh, 1
srli yh, yh, 1
/* Check for division by zero. */
or a10, yh, yl
beqz a10, .Ldiv_yzero
/* Normalize y. Adjust the exponent in a9. */
beqz yh, .Ldiv_yh_zero
do_nsau a10, yh, a11, a9
addi a10, a10, -11
ssl a10
src yh, yh, yl
sll yl, yl
movi a9, 1
sub a9, a9, a10
j .Ldiv_ynormalized
.Ldiv_yh_zero:
do_nsau a10, yl, a11, a9
addi a10, a10, -11
movi a9, -31
sub a9, a9, a10
ssl a10
bltz a10, .Ldiv_yl_srl
sll yh, yl
movi yl, 0
j .Ldiv_ynormalized
.Ldiv_yl_srl:
srl yh, yl
sll yl, yl
j .Ldiv_ynormalized
.Ldiv_yzero:
/* y is zero. Return NaN if x is also zero; otherwise, infinity. */
slli xh, xh, 1
srli xh, xh, 1
or xl, xl, xh
srli xh, a7, 31
slli xh, xh, 31
or xh, xh, a6
bnez xl, 1f
movi a4, 0x80000 /* make it a quiet NaN */
or xh, xh, a4
1: movi xl, 0
leaf_return
.Ldiv_xexpzero:
/* Clear the sign bit of x. */
slli xh, xh, 1
srli xh, xh, 1
/* If x is zero, return zero. */
or a10, xh, xl
beqz a10, .Ldiv_return_zero
/* Normalize x. Adjust the exponent in a8. */
beqz xh, .Ldiv_xh_zero
do_nsau a10, xh, a11, a8
addi a10, a10, -11
ssl a10
src xh, xh, xl
sll xl, xl
movi a8, 1
sub a8, a8, a10
j .Ldiv_xnormalized
.Ldiv_xh_zero:
do_nsau a10, xl, a11, a8
addi a10, a10, -11
movi a8, -31
sub a8, a8, a10
ssl a10
bltz a10, .Ldiv_xl_srl
sll xh, xl
movi xl, 0
j .Ldiv_xnormalized
.Ldiv_xl_srl:
srl xh, xl
sll xl, xl
j .Ldiv_xnormalized
.Ldiv_return_zero:
/* Return zero with the appropriate sign bit. */
srli xh, a7, 31
slli xh, xh, 31
movi xl, 0
leaf_return
.Ldiv_xnan_or_inf:
/* Set the sign bit of the result. */
srli a7, yh, 31
slli a7, a7, 31
xor xh, xh, a7
/* If y is NaN or Inf, return NaN. */
ball yh, a6, .Ldiv_return_nan
slli a8, xh, 12
or a8, a8, xl
bnez a8, .Ldiv_return_nan
leaf_return
.Ldiv_ynan_or_inf:
/* If y is Infinity, return zero. */
slli a8, yh, 12
or a8, a8, yl
beqz a8, .Ldiv_return_zero
/* y is NaN; return it. */
mov xh, yh
mov xl, yl
.Ldiv_return_nan:
movi a4, 0x80000 /* make it a quiet NaN */
or xh, xh, a4
leaf_return
.Ldiv_highequal1:
bltu xl, yl, 2f
j 3f
.align 4
.global __divdf3
.type __divdf3, @function
__divdf3:
leaf_entry sp, 16
movi a6, 0x7ff00000
/* Get the sign of the result. */
xor a7, xh, yh
/* Check for NaN and infinity. */
ball xh, a6, .Ldiv_xnan_or_inf
ball yh, a6, .Ldiv_ynan_or_inf
/* Extract the exponents. */
extui a8, xh, 20, 11
extui a9, yh, 20, 11
beqz a9, .Ldiv_yexpzero
.Ldiv_ynormalized:
beqz a8, .Ldiv_xexpzero
.Ldiv_xnormalized:
/* Subtract the exponents. */
sub a8, a8, a9
/* Replace sign/exponent fields with explicit "1.0". */
movi a10, 0x1fffff
or xh, xh, a6
and xh, xh, a10
or yh, yh, a6
and yh, yh, a10
/* Set SAR for left shift by one. */
ssai (32 - 1)
/* The first digit of the mantissa division must be a one.
Shift x (and adjust the exponent) as needed to make this true. */
bltu yh, xh, 3f
beq yh, xh, .Ldiv_highequal1
2: src xh, xh, xl
sll xl, xl
addi a8, a8, -1
3:
/* Do the first subtraction and shift. */
sub xh, xh, yh
bgeu xl, yl, 1f
addi xh, xh, -1
1: sub xl, xl, yl
src xh, xh, xl
sll xl, xl
/* Put the quotient into a10/a11. */
movi a10, 0
movi a11, 1
/* Divide one bit at a time for 52 bits. */
movi a9, 52
#if XCHAL_HAVE_LOOPS
loop a9, .Ldiv_loopend
#endif
.Ldiv_loop:
/* Shift the quotient << 1. */
src a10, a10, a11
sll a11, a11
/* Is this digit a 0 or 1? */
bltu xh, yh, 3f
beq xh, yh, .Ldiv_highequal2
/* Output a 1 and subtract. */
2: addi a11, a11, 1
sub xh, xh, yh
bgeu xl, yl, 1f
addi xh, xh, -1
1: sub xl, xl, yl
/* Shift the dividend << 1. */
3: src xh, xh, xl
sll xl, xl
#if !XCHAL_HAVE_LOOPS
addi a9, a9, -1
bnez a9, .Ldiv_loop
#endif
.Ldiv_loopend:
/* Add the exponent bias (less one to account for the explicit "1.0"
of the mantissa that will be added to the exponent in the final
result). */
movi a9, 0x3fe
add a8, a8, a9
/* Check for over/underflow. The value in a8 is one less than the
final exponent, so values in the range 0..7fd are OK here. */
addmi a9, a9, 0x400 /* 0x7fe */
bgeu a8, a9, .Ldiv_overflow
.Ldiv_round:
/* Round. The remainder (<< 1) is in xh/xl. */
bltu xh, yh, .Ldiv_rounded
beq xh, yh, .Ldiv_highequal3
.Ldiv_roundup:
addi a11, a11, 1
beqz a11, .Ldiv_roundcarry
.Ldiv_rounded:
mov xl, a11
/* Add the exponent to the mantissa. */
slli a8, a8, 20
add xh, a10, a8
.Ldiv_addsign:
/* Add the sign bit. */
srli a7, a7, 31
slli a7, a7, 31
or xh, xh, a7
leaf_return
.Ldiv_highequal2:
bgeu xl, yl, 2b
j 3b
.Ldiv_highequal3:
bltu xl, yl, .Ldiv_rounded
bne xl, yl, .Ldiv_roundup
/* Remainder is exactly half the divisor. Round even. */
addi a11, a11, 1
beqz a11, .Ldiv_roundcarry
srli a11, a11, 1
slli a11, a11, 1
j .Ldiv_rounded
.Ldiv_overflow:
bltz a8, .Ldiv_underflow
/* Return +/- Infinity. */
addi a8, a9, 1 /* 0x7ff */
slli xh, a8, 20
movi xl, 0
j .Ldiv_addsign
.Ldiv_underflow:
/* Create a subnormal value, where the exponent field contains zero,
but the effective exponent is 1. The value of a8 is one less than
the actual exponent, so just negate it to get the shift amount. */
neg a8, a8
ssr a8
bgeui a8, 32, .Ldiv_bigshift
/* Shift a10/a11 right. Any bits that are shifted out of a11 are
saved in a6 for rounding the result. */
sll a6, a11
src a11, a10, a11
srl a10, a10
j 1f
.Ldiv_bigshift:
bgeui a8, 64, .Ldiv_flush_to_zero
sll a9, a11 /* lost bits shifted out of a11 */
src a6, a10, a11
srl a11, a10
movi a10, 0
or xl, xl, a9
/* Set the exponent to zero. */
1: movi a8, 0
/* Pack any nonzero remainder (in xh/xl) into a6. */
or xh, xh, xl
beqz xh, 1f
movi a9, 1
or a6, a6, a9
/* Round a10/a11 based on the bits shifted out into a6. */
1: bgez a6, .Ldiv_rounded
addi a11, a11, 1
beqz a11, .Ldiv_roundcarry
slli a6, a6, 1
bnez a6, .Ldiv_rounded
srli a11, a11, 1
slli a11, a11, 1
j .Ldiv_rounded
.Ldiv_roundcarry:
/* a11 is always zero when the rounding increment overflows, so
there's no need to round it to an even value. */
addi a10, a10, 1
/* Overflow to the exponent field is OK. */
j .Ldiv_rounded
.Ldiv_flush_to_zero:
/* Return zero with the appropriate sign bit. */
srli xh, a7, 31
slli xh, xh, 31
movi xl, 0
leaf_return
#endif /* XCHAL_HAVE_DFP_DIV */
#endif /* L_divdf3 */
#ifdef L_cmpdf2
/* Equal and Not Equal */
.align 4
.global __eqdf2
.global __nedf2
.set __nedf2, __eqdf2
.type __eqdf2, @function
__eqdf2:
leaf_entry sp, 16
bne xl, yl, 2f
bne xh, yh, 4f
/* The values are equal but NaN != NaN. Check the exponent. */
movi a6, 0x7ff00000
ball xh, a6, 3f
/* Equal. */
movi a2, 0
leaf_return
/* Not equal. */
2: movi a2, 1
leaf_return
/* Check if the mantissas are nonzero. */
3: slli a7, xh, 12
or a7, a7, xl
j 5f
/* Check if x and y are zero with different signs. */
4: or a7, xh, yh
slli a7, a7, 1
or a7, a7, xl /* xl == yl here */
/* Equal if a7 == 0, where a7 is either abs(x | y) or the mantissa
or x when exponent(x) = 0x7ff and x == y. */
5: movi a2, 0
movi a3, 1
movnez a2, a3, a7
leaf_return
/* Greater Than */
.align 4
.global __gtdf2
.type __gtdf2, @function
__gtdf2:
leaf_entry sp, 16
movi a6, 0x7ff00000
ball xh, a6, 2f
1: bnall yh, a6, .Lle_cmp
/* Check if y is a NaN. */
slli a7, yh, 12
or a7, a7, yl
beqz a7, .Lle_cmp
movi a2, 0
leaf_return
/* Check if x is a NaN. */
2: slli a7, xh, 12
or a7, a7, xl
beqz a7, 1b
movi a2, 0
leaf_return
/* Less Than or Equal */
.align 4
.global __ledf2
.type __ledf2, @function
__ledf2:
leaf_entry sp, 16
movi a6, 0x7ff00000
ball xh, a6, 2f
1: bnall yh, a6, .Lle_cmp
/* Check if y is a NaN. */
slli a7, yh, 12
or a7, a7, yl
beqz a7, .Lle_cmp
movi a2, 1
leaf_return
/* Check if x is a NaN. */
2: slli a7, xh, 12
or a7, a7, xl
beqz a7, 1b
movi a2, 1
leaf_return
.Lle_cmp:
/* Check if x and y have different signs. */
xor a7, xh, yh
bltz a7, .Lle_diff_signs
/* Check if x is negative. */
bltz xh, .Lle_xneg
/* Check if x <= y. */
bltu xh, yh, 4f
bne xh, yh, 5f
bltu yl, xl, 5f
4: movi a2, 0
leaf_return
.Lle_xneg:
/* Check if y <= x. */
bltu yh, xh, 4b
bne yh, xh, 5f
bgeu xl, yl, 4b
5: movi a2, 1
leaf_return
.Lle_diff_signs:
bltz xh, 4b
/* Check if both x and y are zero. */
or a7, xh, yh
slli a7, a7, 1
or a7, a7, xl
or a7, a7, yl
movi a2, 1
movi a3, 0
moveqz a2, a3, a7
leaf_return
/* Greater Than or Equal */
.align 4
.global __gedf2
.type __gedf2, @function
__gedf2:
leaf_entry sp, 16
movi a6, 0x7ff00000
ball xh, a6, 2f
1: bnall yh, a6, .Llt_cmp
/* Check if y is a NaN. */
slli a7, yh, 12
or a7, a7, yl
beqz a7, .Llt_cmp
movi a2, -1
leaf_return
/* Check if x is a NaN. */
2: slli a7, xh, 12
or a7, a7, xl
beqz a7, 1b
movi a2, -1
leaf_return
/* Less Than */
.align 4
.global __ltdf2
.type __ltdf2, @function
__ltdf2:
leaf_entry sp, 16
movi a6, 0x7ff00000
ball xh, a6, 2f
1: bnall yh, a6, .Llt_cmp
/* Check if y is a NaN. */
slli a7, yh, 12
or a7, a7, yl
beqz a7, .Llt_cmp
movi a2, 0
leaf_return
/* Check if x is a NaN. */
2: slli a7, xh, 12
or a7, a7, xl
beqz a7, 1b
movi a2, 0
leaf_return
.Llt_cmp:
/* Check if x and y have different signs. */
xor a7, xh, yh
bltz a7, .Llt_diff_signs
/* Check if x is negative. */
bltz xh, .Llt_xneg
/* Check if x < y. */
bltu xh, yh, 4f
bne xh, yh, 5f
bgeu xl, yl, 5f
4: movi a2, -1
leaf_return
.Llt_xneg:
/* Check if y < x. */
bltu yh, xh, 4b
bne yh, xh, 5f
bltu yl, xl, 4b
5: movi a2, 0
leaf_return
.Llt_diff_signs:
bgez xh, 5b
/* Check if both x and y are nonzero. */
or a7, xh, yh
slli a7, a7, 1
or a7, a7, xl
or a7, a7, yl
movi a2, 0
movi a3, -1
movnez a2, a3, a7
leaf_return
/* Unordered */
.align 4
.global __unorddf2
.type __unorddf2, @function
__unorddf2:
leaf_entry sp, 16
movi a6, 0x7ff00000
ball xh, a6, 3f
1: ball yh, a6, 4f
2: movi a2, 0
leaf_return
3: slli a7, xh, 12
or a7, a7, xl
beqz a7, 1b
movi a2, 1
leaf_return
4: slli a7, yh, 12
or a7, a7, yl
beqz a7, 2b
movi a2, 1
leaf_return
#endif /* L_cmpdf2 */
#ifdef L_fixdfsi
.align 4
.global __fixdfsi
.type __fixdfsi, @function
__fixdfsi:
leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7ff00000
ball xh, a6, .Lfixdfsi_nan_or_inf
/* Extract the exponent and check if 0 < (exp - 0x3fe) < 32. */
extui a4, xh, 20, 11
extui a5, a6, 19, 10 /* 0x3fe */
sub a4, a4, a5
bgei a4, 32, .Lfixdfsi_maxint
blti a4, 1, .Lfixdfsi_zero
/* Add explicit "1.0" and shift << 11. */
or a7, xh, a6
ssai (32 - 11)
src a5, a7, xl
/* Shift back to the right, based on the exponent. */
ssl a4 /* shift by 32 - a4 */
srl a5, a5
/* Negate the result if sign != 0. */
neg a2, a5
movgez a2, a5, a7
leaf_return
.Lfixdfsi_nan_or_inf:
/* Handle Infinity and NaN. */
slli a4, xh, 12
or a4, a4, xl
beqz a4, .Lfixdfsi_maxint
/* Translate NaN to +maxint. */
movi xh, 0
.Lfixdfsi_maxint:
slli a4, a6, 11 /* 0x80000000 */
addi a5, a4, -1 /* 0x7fffffff */
movgez a4, a5, xh
mov a2, a4
leaf_return
.Lfixdfsi_zero:
movi a2, 0
leaf_return
#endif /* L_fixdfsi */
#ifdef L_fixdfdi
.align 4
.global __fixdfdi
.type __fixdfdi, @function
__fixdfdi:
leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7ff00000
ball xh, a6, .Lfixdfdi_nan_or_inf
/* Extract the exponent and check if 0 < (exp - 0x3fe) < 64. */
extui a4, xh, 20, 11
extui a5, a6, 19, 10 /* 0x3fe */
sub a4, a4, a5
bgei a4, 64, .Lfixdfdi_maxint
blti a4, 1, .Lfixdfdi_zero
/* Add explicit "1.0" and shift << 11. */
or a7, xh, a6
ssai (32 - 11)
src xh, a7, xl
sll xl, xl
/* Shift back to the right, based on the exponent. */
ssl a4 /* shift by 64 - a4 */
bgei a4, 32, .Lfixdfdi_smallshift
srl xl, xh
movi xh, 0
.Lfixdfdi_shifted:
/* Negate the result if sign != 0. */
bgez a7, 1f
neg xl, xl
neg xh, xh
beqz xl, 1f
addi xh, xh, -1
1: leaf_return
.Lfixdfdi_smallshift:
src xl, xh, xl
srl xh, xh
j .Lfixdfdi_shifted
.Lfixdfdi_nan_or_inf:
/* Handle Infinity and NaN. */
slli a4, xh, 12
or a4, a4, xl
beqz a4, .Lfixdfdi_maxint
/* Translate NaN to +maxint. */
movi xh, 0
.Lfixdfdi_maxint:
slli a7, a6, 11 /* 0x80000000 */
bgez xh, 1f
mov xh, a7
movi xl, 0
leaf_return
1: addi xh, a7, -1 /* 0x7fffffff */
movi xl, -1
leaf_return
.Lfixdfdi_zero:
movi xh, 0
movi xl, 0
leaf_return
#endif /* L_fixdfdi */
#ifdef L_fixunsdfsi
.align 4
.global __fixunsdfsi
.type __fixunsdfsi, @function
__fixunsdfsi:
leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7ff00000
ball xh, a6, .Lfixunsdfsi_nan_or_inf
/* Extract the exponent and check if 0 <= (exp - 0x3ff) < 32. */
extui a4, xh, 20, 11
extui a5, a6, 20, 10 /* 0x3ff */
sub a4, a4, a5
bgei a4, 32, .Lfixunsdfsi_maxint
bltz a4, .Lfixunsdfsi_zero
/* Add explicit "1.0" and shift << 11. */
or a7, xh, a6
ssai (32 - 11)
src a5, a7, xl
/* Shift back to the right, based on the exponent. */
addi a4, a4, 1
beqi a4, 32, .Lfixunsdfsi_bigexp
ssl a4 /* shift by 32 - a4 */
srl a5, a5
/* Negate the result if sign != 0. */
neg a2, a5
movgez a2, a5, a7
leaf_return
.Lfixunsdfsi_nan_or_inf:
/* Handle Infinity and NaN. */
slli a4, xh, 12
or a4, a4, xl
beqz a4, .Lfixunsdfsi_maxint
/* Translate NaN to 0xffffffff. */
movi a2, -1
leaf_return
.Lfixunsdfsi_maxint:
slli a4, a6, 11 /* 0x80000000 */
movi a5, -1 /* 0xffffffff */
movgez a4, a5, xh
mov a2, a4
leaf_return
.Lfixunsdfsi_zero:
movi a2, 0
leaf_return
.Lfixunsdfsi_bigexp:
/* Handle unsigned maximum exponent case. */
bltz xh, 1f
mov a2, a5 /* no shift needed */
leaf_return
/* Return 0x80000000 if negative. */
1: slli a2, a6, 11
leaf_return
#endif /* L_fixunsdfsi */
#ifdef L_fixunsdfdi
.align 4
.global __fixunsdfdi
.type __fixunsdfdi, @function
__fixunsdfdi:
leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7ff00000
ball xh, a6, .Lfixunsdfdi_nan_or_inf
/* Extract the exponent and check if 0 <= (exp - 0x3ff) < 64. */
extui a4, xh, 20, 11
extui a5, a6, 20, 10 /* 0x3ff */
sub a4, a4, a5
bgei a4, 64, .Lfixunsdfdi_maxint
bltz a4, .Lfixunsdfdi_zero
/* Add explicit "1.0" and shift << 11. */
or a7, xh, a6
ssai (32 - 11)
src xh, a7, xl
sll xl, xl
/* Shift back to the right, based on the exponent. */
addi a4, a4, 1
beqi a4, 64, .Lfixunsdfdi_bigexp
ssl a4 /* shift by 64 - a4 */
bgei a4, 32, .Lfixunsdfdi_smallshift
srl xl, xh
movi xh, 0
.Lfixunsdfdi_shifted:
/* Negate the result if sign != 0. */
bgez a7, 1f
neg xl, xl
neg xh, xh
beqz xl, 1f
addi xh, xh, -1
1: leaf_return
.Lfixunsdfdi_smallshift:
src xl, xh, xl
srl xh, xh
j .Lfixunsdfdi_shifted
.Lfixunsdfdi_nan_or_inf:
/* Handle Infinity and NaN. */
slli a4, xh, 12
or a4, a4, xl
beqz a4, .Lfixunsdfdi_maxint
/* Translate NaN to 0xffffffff.... */
1: movi xh, -1
movi xl, -1
leaf_return
.Lfixunsdfdi_maxint:
bgez xh, 1b
2: slli xh, a6, 11 /* 0x80000000 */
movi xl, 0
leaf_return
.Lfixunsdfdi_zero:
movi xh, 0
movi xl, 0
leaf_return
.Lfixunsdfdi_bigexp:
/* Handle unsigned maximum exponent case. */
bltz a7, 2b
leaf_return /* no shift needed */
#endif /* L_fixunsdfdi */
#ifdef L_floatsidf
.align 4
.global __floatunsidf
.type __floatunsidf, @function
__floatunsidf:
leaf_entry sp, 16
beqz a2, .Lfloatsidf_return_zero
/* Set the sign to zero and jump to the floatsidf code. */
movi a7, 0
j .Lfloatsidf_normalize
.align 4
.global __floatsidf
.type __floatsidf, @function
__floatsidf:
leaf_entry sp, 16
/* Check for zero. */
beqz a2, .Lfloatsidf_return_zero
/* Save the sign. */
extui a7, a2, 31, 1
/* Get the absolute value. */
#if XCHAL_HAVE_ABS
abs a2, a2
#else
neg a4, a2
movltz a2, a4, a2
#endif
.Lfloatsidf_normalize:
/* Normalize with the first 1 bit in the msb. */
do_nsau a4, a2, a5, a6
ssl a4
sll a5, a2
/* Shift the mantissa into position. */
srli xh, a5, 11
slli xl, a5, (32 - 11)
/* Set the exponent. */
movi a5, 0x41d /* 0x3fe + 31 */
sub a5, a5, a4
slli a5, a5, 20
add xh, xh, a5
/* Add the sign and return. */
slli a7, a7, 31
or xh, xh, a7
leaf_return
.Lfloatsidf_return_zero:
movi a3, 0
leaf_return
#endif /* L_floatsidf */
#ifdef L_floatdidf
.align 4
.global __floatundidf
.type __floatundidf, @function
__floatundidf:
leaf_entry sp, 16
/* Check for zero. */
or a4, xh, xl
beqz a4, 2f
/* Set the sign to zero and jump to the floatdidf code. */
movi a7, 0
j .Lfloatdidf_normalize
.align 4
.global __floatdidf
.type __floatdidf, @function
__floatdidf:
leaf_entry sp, 16
/* Check for zero. */
or a4, xh, xl
beqz a4, 2f
/* Save the sign. */
extui a7, xh, 31, 1
/* Get the absolute value. */
bgez xh, .Lfloatdidf_normalize
neg xl, xl
neg xh, xh
beqz xl, .Lfloatdidf_normalize
addi xh, xh, -1
.Lfloatdidf_normalize:
/* Normalize with the first 1 bit in the msb of xh. */
beqz xh, .Lfloatdidf_bigshift
do_nsau a4, xh, a5, a6
ssl a4
src xh, xh, xl
sll xl, xl
.Lfloatdidf_shifted:
/* Shift the mantissa into position, with rounding bits in a6. */
ssai 11
sll a6, xl
src xl, xh, xl
srl xh, xh
/* Set the exponent. */
movi a5, 0x43d /* 0x3fe + 63 */
sub a5, a5, a4
slli a5, a5, 20
add xh, xh, a5
/* Add the sign. */
slli a7, a7, 31
or xh, xh, a7
/* Round up if the leftover fraction is >= 1/2. */
bgez a6, 2f
addi xl, xl, 1
beqz xl, .Lfloatdidf_roundcarry
/* Check if the leftover fraction is exactly 1/2. */
slli a6, a6, 1
beqz a6, .Lfloatdidf_exactlyhalf
2: leaf_return
.Lfloatdidf_bigshift:
/* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */
do_nsau a4, xl, a5, a6
ssl a4
sll xh, xl
movi xl, 0
addi a4, a4, 32
j .Lfloatdidf_shifted
.Lfloatdidf_exactlyhalf:
/* Round down to the nearest even value. */
srli xl, xl, 1
slli xl, xl, 1
leaf_return
.Lfloatdidf_roundcarry:
/* xl is always zero when the rounding increment overflows, so
there's no need to round it to an even value. */
addi xh, xh, 1
/* Overflow to the exponent is OK. */
leaf_return
#endif /* L_floatdidf */
#ifdef L_truncdfsf2
.align 4
.global __truncdfsf2
.type __truncdfsf2, @function
__truncdfsf2:
leaf_entry sp, 16
/* Adjust the exponent bias. */
movi a4, (0x3ff - 0x7f) << 20
sub a5, xh, a4
/* Check for underflow. */
xor a6, xh, a5
bltz a6, .Ltrunc_underflow
extui a6, a5, 20, 11
beqz a6, .Ltrunc_underflow
/* Check for overflow. */
movi a4, 255
bge a6, a4, .Ltrunc_overflow
/* Shift a5/xl << 3 into a5/a4. */
ssai (32 - 3)
src a5, a5, xl
sll a4, xl
.Ltrunc_addsign:
/* Add the sign bit. */
extui a6, xh, 31, 1
slli a6, a6, 31
or a2, a6, a5
/* Round up if the leftover fraction is >= 1/2. */
bgez a4, 1f
addi a2, a2, 1
/* Overflow to the exponent is OK. The answer will be correct. */
/* Check if the leftover fraction is exactly 1/2. */
slli a4, a4, 1
beqz a4, .Ltrunc_exactlyhalf
1: leaf_return
.Ltrunc_exactlyhalf:
/* Round down to the nearest even value. */
srli a2, a2, 1
slli a2, a2, 1
leaf_return
.Ltrunc_overflow:
/* Check if exponent == 0x7ff. */
movi a4, 0x7ff00000
bnall xh, a4, 1f
/* Check if mantissa is nonzero. */
slli a5, xh, 12
or a5, a5, xl
beqz a5, 1f
/* Shift a4 to set a bit in the mantissa, making a quiet NaN. */
srli a4, a4, 1
1: slli a4, a4, 4 /* 0xff000000 or 0xff800000 */
/* Add the sign bit. */
extui a6, xh, 31, 1
ssai 1
src a2, a6, a4
leaf_return
.Ltrunc_underflow:
/* Find shift count for a subnormal. Flush to zero if >= 32. */
extui a6, xh, 20, 11
movi a5, 0x3ff - 0x7f
sub a6, a5, a6
addi a6, a6, 1
bgeui a6, 32, 1f
/* Replace the exponent with an explicit "1.0". */
slli a5, a5, 13 /* 0x700000 */
or a5, a5, xh
slli a5, a5, 11
srli a5, a5, 11
/* Shift the mantissa left by 3 bits (into a5/a4). */
ssai (32 - 3)
src a5, a5, xl
sll a4, xl
/* Shift right by a6. */
ssr a6
sll a7, a4
src a4, a5, a4
srl a5, a5
beqz a7, .Ltrunc_addsign
or a4, a4, a6 /* any positive, nonzero value will work */
j .Ltrunc_addsign
/* Return +/- zero. */
1: extui a2, xh, 31, 1
slli a2, a2, 31
leaf_return
#endif /* L_truncdfsf2 */
#ifdef L_extendsfdf2
.align 4
.global __extendsfdf2
.type __extendsfdf2, @function
__extendsfdf2:
leaf_entry sp, 16
/* Save the sign bit and then shift it off. */
extui a5, a2, 31, 1
slli a5, a5, 31
slli a4, a2, 1
/* Extract and check the exponent. */
extui a6, a2, 23, 8
beqz a6, .Lextend_expzero
addi a6, a6, 1
beqi a6, 256, .Lextend_nan_or_inf
/* Shift >> 3 into a4/xl. */
srli a4, a4, 4
slli xl, a2, (32 - 3)
/* Adjust the exponent bias. */
movi a6, (0x3ff - 0x7f) << 20
add a4, a4, a6
/* Add the sign bit. */
or xh, a4, a5
leaf_return
.Lextend_nan_or_inf:
movi a4, 0x7ff00000
/* Check for NaN. */
slli a7, a2, 9
beqz a7, 1f
slli a6, a6, 11 /* 0x80000 */
or a4, a4, a6
/* Add the sign and return. */
1: or xh, a4, a5
movi xl, 0
leaf_return
.Lextend_expzero:
beqz a4, 1b
/* Normalize it to have 8 zero bits before the first 1 bit. */
do_nsau a7, a4, a2, a3
addi a7, a7, -8
ssl a7
sll a4, a4
/* Shift >> 3 into a4/xl. */
slli xl, a4, (32 - 3)
srli a4, a4, 3
/* Set the exponent. */
movi a6, 0x3fe - 0x7f
sub a6, a6, a7
slli a6, a6, 20
add a4, a4, a6
/* Add the sign and return. */
or xh, a4, a5
leaf_return
#endif /* L_extendsfdf2 */
#if XCHAL_HAVE_DFP_SQRT
#ifdef L_sqrt
.text
.align 4
.global __ieee754_sqrt
.type __ieee754_sqrt, @function
__ieee754_sqrt:
leaf_entry sp, 16
wfrd f1, xh, xl
sqrt0.d f2, f1
const.d f4, 0
maddn.d f4, f2, f2
nexp01.d f3, f1
const.d f0, 3
addexp.d f3, f0
maddn.d f0, f4, f3
nexp01.d f4, f1
maddn.d f2, f0, f2
const.d f5, 0
maddn.d f5, f2, f3
const.d f0, 3
maddn.d f0, f5, f2
neg.d f6, f4
maddn.d f2, f0, f2
const.d f0, 0
const.d f5, 0
const.d f7, 0
maddn.d f0, f6, f2
maddn.d f5, f2, f3
const.d f3, 3
maddn.d f7, f3, f2
maddn.d f4, f0, f0
maddn.d f3, f5, f2
neg.d f2, f7
maddn.d f0, f4, f2
maddn.d f7, f3, f7
mksadj.d f2, f1
nexp01.d f1, f1
maddn.d f1, f0, f0
neg.d f3, f7
addexpm.d f0, f2
addexp.d f3, f2
divn.d f0, f1, f3
rfr xl, f0
rfrd xh, f0
leaf_return
#endif /* L_sqrt */
#endif /* XCHAL_HAVE_DFP_SQRT */
#if XCHAL_HAVE_DFP_RECIP
#ifdef L_recipdf2
/* Reciprocal */
.align 4
.global __recipdf2
.type __recipdf2, @function
__recipdf2:
leaf_entry sp, 16
wfrd f1, xh, xl
recip0.d f0, f1
const.d f2, 2
msub.d f2, f1, f0
mul.d f3, f1, f0
const.d f4, 2
mul.d f5, f0, f2
msub.d f4, f3, f2
const.d f2, 1
mul.d f0, f5, f4
msub.d f2, f1, f0
maddn.d f0, f0, f2
rfr xl, f0
rfrd xh, f0
leaf_return
#endif /* L_recipdf2 */
#endif /* XCHAL_HAVE_DFP_RECIP */
#if XCHAL_HAVE_DFP_RSQRT
#ifdef L_rsqrtdf2
/* Reciprocal square root */
.align 4
.global __rsqrtdf2
.type __rsqrtdf2, @function
__rsqrtdf2:
leaf_entry sp, 16
wfrd f1, xh, xl
rsqrt0.d f0, f1
mul.d f2, f1, f0
const.d f3, 3
mul.d f4, f3, f0
const.d f5, 1
msub.d f5, f2, f0
maddn.d f0, f4, f5
const.d f2, 1
mul.d f4, f1, f0
mul.d f5, f3, f0
msub.d f2, f4, f0
maddn.d f0, f5, f2
const.d f2, 1
mul.d f1, f1, f0
mul.d f3, f3, f0
msub.d f2, f1, f0
maddn.d f0, f3, f2
rfr xl, f0
rfrd xh, f0
leaf_return
#endif /* L_rsqrtdf2 */
#endif /* XCHAL_HAVE_DFP_RSQRT */
|
4ms/metamodule-plugin-sdk
| 19,046
|
plugin-libc/libgcc/config/xtensa/lib1funcs.S
|
/* Assembly functions for the Xtensa version of libgcc1.
Copyright (C) 2001-2022 Free Software Foundation, Inc.
Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "xtensa-config.h"
/* Define macros for the ABS and ADDX* instructions to handle cases
where they are not included in the Xtensa processor configuration. */
.macro do_abs dst, src, tmp
#if XCHAL_HAVE_ABS
abs \dst, \src
#else
neg \tmp, \src
movgez \tmp, \src, \src
mov \dst, \tmp
#endif
.endm
.macro do_addx2 dst, as, at, tmp
#if XCHAL_HAVE_ADDX
addx2 \dst, \as, \at
#else
slli \tmp, \as, 1
add \dst, \tmp, \at
#endif
.endm
.macro do_addx4 dst, as, at, tmp
#if XCHAL_HAVE_ADDX
addx4 \dst, \as, \at
#else
slli \tmp, \as, 2
add \dst, \tmp, \at
#endif
.endm
.macro do_addx8 dst, as, at, tmp
#if XCHAL_HAVE_ADDX
addx8 \dst, \as, \at
#else
slli \tmp, \as, 3
add \dst, \tmp, \at
#endif
.endm
/* Define macros for leaf function entry and return, supporting either the
standard register windowed ABI or the non-windowed call0 ABI. These
macros do not allocate any extra stack space, so they only work for
leaf functions that do not need to spill anything to the stack. */
.macro leaf_entry reg, size
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
entry \reg, \size
#else
/* do nothing */
#endif
.endm
.macro leaf_return
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
retw
#else
ret
#endif
.endm
#ifdef L_mulsi3
.align 4
.global __mulsi3
.type __mulsi3, @function
__mulsi3:
leaf_entry sp, 16
#if XCHAL_HAVE_MUL32
mull a2, a2, a3
#elif XCHAL_HAVE_MUL16
or a4, a2, a3
srai a4, a4, 16
bnez a4, .LMUL16
mul16u a2, a2, a3
leaf_return
.LMUL16:
srai a4, a2, 16
srai a5, a3, 16
mul16u a7, a4, a3
mul16u a6, a5, a2
mul16u a4, a2, a3
add a7, a7, a6
slli a7, a7, 16
add a2, a7, a4
#elif XCHAL_HAVE_MAC16
mul.aa.hl a2, a3
mula.aa.lh a2, a3
rsr a5, ACCLO
umul.aa.ll a2, a3
rsr a4, ACCLO
slli a5, a5, 16
add a2, a4, a5
#else /* !MUL32 && !MUL16 && !MAC16 */
/* Multiply one bit at a time, but unroll the loop 4x to better
exploit the addx instructions and avoid overhead.
Peel the first iteration to save a cycle on init. */
/* Avoid negative numbers. */
xor a5, a2, a3 /* Top bit is 1 if one input is negative. */
do_abs a3, a3, a6
do_abs a2, a2, a6
/* Swap so the second argument is smaller. */
sub a7, a2, a3
mov a4, a3
movgez a4, a2, a7 /* a4 = max (a2, a3) */
movltz a3, a2, a7 /* a3 = min (a2, a3) */
movi a2, 0
extui a6, a3, 0, 1
movnez a2, a4, a6
do_addx2 a7, a4, a2, a7
extui a6, a3, 1, 1
movnez a2, a7, a6
do_addx4 a7, a4, a2, a7
extui a6, a3, 2, 1
movnez a2, a7, a6
do_addx8 a7, a4, a2, a7
extui a6, a3, 3, 1
movnez a2, a7, a6
bgeui a3, 16, .Lmult_main_loop
neg a3, a2
movltz a2, a3, a5
leaf_return
.align 4
.Lmult_main_loop:
srli a3, a3, 4
slli a4, a4, 4
add a7, a4, a2
extui a6, a3, 0, 1
movnez a2, a7, a6
do_addx2 a7, a4, a2, a7
extui a6, a3, 1, 1
movnez a2, a7, a6
do_addx4 a7, a4, a2, a7
extui a6, a3, 2, 1
movnez a2, a7, a6
do_addx8 a7, a4, a2, a7
extui a6, a3, 3, 1
movnez a2, a7, a6
bgeui a3, 16, .Lmult_main_loop
neg a3, a2
movltz a2, a3, a5
#endif /* !MUL32 && !MUL16 && !MAC16 */
leaf_return
.size __mulsi3, . - __mulsi3
#endif /* L_mulsi3 */
#ifdef L_umulsidi3
#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
#define XCHAL_NO_MUL 1
#endif
.align 4
.global __umulsidi3
.type __umulsidi3, @function
__umulsidi3:
#if __XTENSA_CALL0_ABI__
leaf_entry sp, 32
addi sp, sp, -32
s32i a12, sp, 16
s32i a13, sp, 20
s32i a14, sp, 24
s32i a15, sp, 28
#elif XCHAL_NO_MUL
/* This is not really a leaf function; allocate enough stack space
to allow CALL12s to a helper function. */
leaf_entry sp, 48
#else
leaf_entry sp, 16
#endif
#ifdef __XTENSA_EB__
#define wh a2
#define wl a3
#else
#define wh a3
#define wl a2
#endif /* __XTENSA_EB__ */
/* This code is taken from the mulsf3 routine in ieee754-sf.S.
See more comments there. */
#if XCHAL_HAVE_MUL32_HIGH
mull a6, a2, a3
muluh wh, a2, a3
mov wl, a6
#else /* ! MUL32_HIGH */
#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
/* a0 and a8 will be clobbered by calling the multiply function
but a8 is not used here and need not be saved. */
s32i a0, sp, 0
#endif
#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
#define a2h a4
#define a3h a5
/* Get the high halves of the inputs into registers. */
srli a2h, a2, 16
srli a3h, a3, 16
#define a2l a2
#define a3l a3
#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
/* Clear the high halves of the inputs. This does not matter
for MUL16 because the high bits are ignored. */
extui a2, a2, 0, 16
extui a3, a3, 0, 16
#endif
#endif /* MUL16 || MUL32 */
#if XCHAL_HAVE_MUL16
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
mul16u dst, xreg ## xhalf, yreg ## yhalf
#elif XCHAL_HAVE_MUL32
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
mull dst, xreg ## xhalf, yreg ## yhalf
#elif XCHAL_HAVE_MAC16
/* The preprocessor insists on inserting a space when concatenating after
a period in the definition of do_mul below. These macros are a workaround
using underscores instead of periods when doing the concatenation. */
#define umul_aa_ll umul.aa.ll
#define umul_aa_lh umul.aa.lh
#define umul_aa_hl umul.aa.hl
#define umul_aa_hh umul.aa.hh
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
umul_aa_ ## xhalf ## yhalf xreg, yreg; \
rsr dst, ACCLO
#else /* no multiply hardware */
#define set_arg_l(dst, src) \
extui dst, src, 0, 16
#define set_arg_h(dst, src) \
srli dst, src, 16
#if __XTENSA_CALL0_ABI__
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
set_arg_ ## xhalf (a13, xreg); \
set_arg_ ## yhalf (a14, yreg); \
call0 .Lmul_mulsi3; \
mov dst, a12
#else
#define do_mul(dst, xreg, xhalf, yreg, yhalf) \
set_arg_ ## xhalf (a14, xreg); \
set_arg_ ## yhalf (a15, yreg); \
call12 .Lmul_mulsi3; \
mov dst, a14
#endif /* __XTENSA_CALL0_ABI__ */
#endif /* no multiply hardware */
/* Add pp1 and pp2 into a6 with carry-out in a9. */
do_mul(a6, a2, l, a3, h) /* pp 1 */
do_mul(a11, a2, h, a3, l) /* pp 2 */
movi a9, 0
add a6, a6, a11
bgeu a6, a11, 1f
addi a9, a9, 1
1:
/* Shift the high half of a9/a6 into position in a9. Note that
this value can be safely incremented without any carry-outs. */
ssai 16
src a9, a9, a6
/* Compute the low word into a6. */
do_mul(a11, a2, l, a3, l) /* pp 0 */
sll a6, a6
add a6, a6, a11
bgeu a6, a11, 1f
addi a9, a9, 1
1:
/* Compute the high word into wh. */
do_mul(wh, a2, h, a3, h) /* pp 3 */
add wh, wh, a9
mov wl, a6
#endif /* !MUL32_HIGH */
#if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
/* Restore the original return address. */
l32i a0, sp, 0
#endif
#if __XTENSA_CALL0_ABI__
l32i a12, sp, 16
l32i a13, sp, 20
l32i a14, sp, 24
l32i a15, sp, 28
addi sp, sp, 32
#endif
leaf_return
#if XCHAL_NO_MUL
/* For Xtensa processors with no multiply hardware, this simplified
version of _mulsi3 is used for multiplying 16-bit chunks of
the floating-point mantissas. When using CALL0, this function
uses a custom ABI: the inputs are passed in a13 and a14, the
result is returned in a12, and a8 and a15 are clobbered. */
.align 4
.Lmul_mulsi3:
leaf_entry sp, 16
.macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
movi \dst, 0
1: add \tmp1, \src2, \dst
extui \tmp2, \src1, 0, 1
movnez \dst, \tmp1, \tmp2
do_addx2 \tmp1, \src2, \dst, \tmp1
extui \tmp2, \src1, 1, 1
movnez \dst, \tmp1, \tmp2
do_addx4 \tmp1, \src2, \dst, \tmp1
extui \tmp2, \src1, 2, 1
movnez \dst, \tmp1, \tmp2
do_addx8 \tmp1, \src2, \dst, \tmp1
extui \tmp2, \src1, 3, 1
movnez \dst, \tmp1, \tmp2
srli \src1, \src1, 4
slli \src2, \src2, 4
bnez \src1, 1b
.endm
#if __XTENSA_CALL0_ABI__
mul_mulsi3_body a12, a13, a14, a15, a8
#else
/* The result will be written into a2, so save that argument in a4. */
mov a4, a2
mul_mulsi3_body a2, a4, a3, a5, a6
#endif
leaf_return
#endif /* XCHAL_NO_MUL */
.size __umulsidi3, . - __umulsidi3
#endif /* L_umulsidi3 */
/* Define a macro for the NSAU (unsigned normalize shift amount)
instruction, which computes the number of leading zero bits,
to handle cases where it is not included in the Xtensa processor
configuration. */
.macro do_nsau cnt, val, tmp, a
#if XCHAL_HAVE_NSA
nsau \cnt, \val
#else
mov \a, \val
movi \cnt, 0
extui \tmp, \a, 16, 16
bnez \tmp, 0f
movi \cnt, 16
slli \a, \a, 16
0:
extui \tmp, \a, 24, 8
bnez \tmp, 1f
addi \cnt, \cnt, 8
slli \a, \a, 8
1:
movi \tmp, __nsau_data
extui \a, \a, 24, 8
add \tmp, \tmp, \a
l8ui \tmp, \tmp, 0
add \cnt, \cnt, \tmp
#endif /* !XCHAL_HAVE_NSA */
.endm
#ifdef L_clz
.section .rodata
.align 4
.global __nsau_data
.type __nsau_data, @object
__nsau_data:
#if !XCHAL_HAVE_NSA
.byte 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4
.byte 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
.byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
.byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
.byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
.byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
.byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
.byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
#endif /* !XCHAL_HAVE_NSA */
.size __nsau_data, . - __nsau_data
.hidden __nsau_data
#endif /* L_clz */
#ifdef L_clzsi2
.align 4
.global __clzsi2
.type __clzsi2, @function
__clzsi2:
leaf_entry sp, 16
do_nsau a2, a2, a3, a4
leaf_return
.size __clzsi2, . - __clzsi2
#endif /* L_clzsi2 */
#ifdef L_ctzsi2
.align 4
.global __ctzsi2
.type __ctzsi2, @function
__ctzsi2:
leaf_entry sp, 16
neg a3, a2
and a3, a3, a2
do_nsau a2, a3, a4, a5
neg a2, a2
addi a2, a2, 31
leaf_return
.size __ctzsi2, . - __ctzsi2
#endif /* L_ctzsi2 */
#ifdef L_ffssi2
.align 4
.global __ffssi2
.type __ffssi2, @function
__ffssi2:
leaf_entry sp, 16
neg a3, a2
and a3, a3, a2
do_nsau a2, a3, a4, a5
neg a2, a2
addi a2, a2, 32
leaf_return
.size __ffssi2, . - __ffssi2
#endif /* L_ffssi2 */
#ifdef L_udivsi3
.align 4
.global __udivsi3
.type __udivsi3, @function
__udivsi3:
leaf_entry sp, 16
#if XCHAL_HAVE_DIV32
quou a2, a2, a3
#else
bltui a3, 2, .Lle_one /* check if the divisor <= 1 */
mov a6, a2 /* keep dividend in a6 */
do_nsau a5, a6, a2, a7 /* dividend_shift = nsau (dividend) */
do_nsau a4, a3, a2, a7 /* divisor_shift = nsau (divisor) */
bgeu a5, a4, .Lspecial
sub a4, a4, a5 /* count = divisor_shift - dividend_shift */
ssl a4
sll a3, a3 /* divisor <<= count */
movi a2, 0 /* quotient = 0 */
/* test-subtract-and-shift loop; one quotient bit on each iteration */
#if XCHAL_HAVE_LOOPS
loopnez a4, .Lloopend
#endif /* XCHAL_HAVE_LOOPS */
.Lloop:
bltu a6, a3, .Lzerobit
sub a6, a6, a3
addi a2, a2, 1
.Lzerobit:
slli a2, a2, 1
srli a3, a3, 1
#if !XCHAL_HAVE_LOOPS
addi a4, a4, -1
bnez a4, .Lloop
#endif /* !XCHAL_HAVE_LOOPS */
.Lloopend:
bltu a6, a3, .Lreturn
addi a2, a2, 1 /* increment quotient if dividend >= divisor */
.Lreturn:
leaf_return
.Lle_one:
beqz a3, .Lerror /* if divisor == 1, return the dividend */
leaf_return
.Lspecial:
/* return dividend >= divisor */
bltu a6, a3, .Lreturn0
movi a2, 1
leaf_return
.Lerror:
/* Divide by zero: Use an illegal instruction to force an exception.
The subsequent "DIV0" string can be recognized by the exception
handler to identify the real cause of the exception. */
ill
.ascii "DIV0"
.Lreturn0:
movi a2, 0
#endif /* XCHAL_HAVE_DIV32 */
leaf_return
.size __udivsi3, . - __udivsi3
#endif /* L_udivsi3 */
#ifdef L_divsi3
.align 4
.global __divsi3
.type __divsi3, @function
__divsi3:
leaf_entry sp, 16
#if XCHAL_HAVE_DIV32
quos a2, a2, a3
#else
xor a7, a2, a3 /* sign = dividend ^ divisor */
do_abs a6, a2, a4 /* udividend = abs (dividend) */
do_abs a3, a3, a4 /* udivisor = abs (divisor) */
bltui a3, 2, .Lle_one /* check if udivisor <= 1 */
do_nsau a5, a6, a2, a8 /* udividend_shift = nsau (udividend) */
do_nsau a4, a3, a2, a8 /* udivisor_shift = nsau (udivisor) */
bgeu a5, a4, .Lspecial
sub a4, a4, a5 /* count = udivisor_shift - udividend_shift */
ssl a4
sll a3, a3 /* udivisor <<= count */
movi a2, 0 /* quotient = 0 */
/* test-subtract-and-shift loop; one quotient bit on each iteration */
#if XCHAL_HAVE_LOOPS
loopnez a4, .Lloopend
#endif /* XCHAL_HAVE_LOOPS */
.Lloop:
bltu a6, a3, .Lzerobit
sub a6, a6, a3
addi a2, a2, 1
.Lzerobit:
slli a2, a2, 1
srli a3, a3, 1
#if !XCHAL_HAVE_LOOPS
addi a4, a4, -1
bnez a4, .Lloop
#endif /* !XCHAL_HAVE_LOOPS */
.Lloopend:
bltu a6, a3, .Lreturn
addi a2, a2, 1 /* increment if udividend >= udivisor */
.Lreturn:
neg a5, a2
movltz a2, a5, a7 /* return (sign < 0) ? -quotient : quotient */
leaf_return
.Lle_one:
beqz a3, .Lerror
neg a2, a6 /* if udivisor == 1, then return... */
movgez a2, a6, a7 /* (sign < 0) ? -udividend : udividend */
leaf_return
.Lspecial:
bltu a6, a3, .Lreturn0 /* if dividend < divisor, return 0 */
movi a2, 1
movi a4, -1
movltz a2, a4, a7 /* else return (sign < 0) ? -1 : 1 */
leaf_return
.Lerror:
/* Divide by zero: Use an illegal instruction to force an exception.
The subsequent "DIV0" string can be recognized by the exception
handler to identify the real cause of the exception. */
ill
.ascii "DIV0"
.Lreturn0:
movi a2, 0
#endif /* XCHAL_HAVE_DIV32 */
leaf_return
.size __divsi3, . - __divsi3
#endif /* L_divsi3 */
#ifdef L_umodsi3
.align 4
.global __umodsi3
.type __umodsi3, @function
__umodsi3:
leaf_entry sp, 16
#if XCHAL_HAVE_DIV32
remu a2, a2, a3
#else
bltui a3, 2, .Lle_one /* check if the divisor is <= 1 */
do_nsau a5, a2, a6, a7 /* dividend_shift = nsau (dividend) */
do_nsau a4, a3, a6, a7 /* divisor_shift = nsau (divisor) */
bgeu a5, a4, .Lspecial
sub a4, a4, a5 /* count = divisor_shift - dividend_shift */
ssl a4
sll a3, a3 /* divisor <<= count */
/* test-subtract-and-shift loop */
#if XCHAL_HAVE_LOOPS
loopnez a4, .Lloopend
#endif /* XCHAL_HAVE_LOOPS */
.Lloop:
bltu a2, a3, .Lzerobit
sub a2, a2, a3
.Lzerobit:
srli a3, a3, 1
#if !XCHAL_HAVE_LOOPS
addi a4, a4, -1
bnez a4, .Lloop
#endif /* !XCHAL_HAVE_LOOPS */
.Lloopend:
.Lspecial:
bltu a2, a3, .Lreturn
sub a2, a2, a3 /* subtract once more if dividend >= divisor */
.Lreturn:
leaf_return
.Lle_one:
bnez a3, .Lreturn0
/* Divide by zero: Use an illegal instruction to force an exception.
The subsequent "DIV0" string can be recognized by the exception
handler to identify the real cause of the exception. */
ill
.ascii "DIV0"
.Lreturn0:
movi a2, 0
#endif /* XCHAL_HAVE_DIV32 */
leaf_return
.size __umodsi3, . - __umodsi3
#endif /* L_umodsi3 */
#ifdef L_modsi3
.align 4
.global __modsi3
.type __modsi3, @function
__modsi3:
leaf_entry sp, 16
#if XCHAL_HAVE_DIV32
rems a2, a2, a3
#else
mov a7, a2 /* save original (signed) dividend */
do_abs a2, a2, a4 /* udividend = abs (dividend) */
do_abs a3, a3, a4 /* udivisor = abs (divisor) */
bltui a3, 2, .Lle_one /* check if udivisor <= 1 */
do_nsau a5, a2, a6, a8 /* udividend_shift = nsau (udividend) */
do_nsau a4, a3, a6, a8 /* udivisor_shift = nsau (udivisor) */
bgeu a5, a4, .Lspecial
sub a4, a4, a5 /* count = udivisor_shift - udividend_shift */
ssl a4
sll a3, a3 /* udivisor <<= count */
/* test-subtract-and-shift loop */
#if XCHAL_HAVE_LOOPS
loopnez a4, .Lloopend
#endif /* XCHAL_HAVE_LOOPS */
.Lloop:
bltu a2, a3, .Lzerobit
sub a2, a2, a3
.Lzerobit:
srli a3, a3, 1
#if !XCHAL_HAVE_LOOPS
addi a4, a4, -1
bnez a4, .Lloop
#endif /* !XCHAL_HAVE_LOOPS */
.Lloopend:
.Lspecial:
bltu a2, a3, .Lreturn
sub a2, a2, a3 /* subtract again if udividend >= udivisor */
.Lreturn:
bgez a7, .Lpositive
neg a2, a2 /* if (dividend < 0), return -udividend */
.Lpositive:
leaf_return
.Lle_one:
bnez a3, .Lreturn0
/* Divide by zero: Use an illegal instruction to force an exception.
The subsequent "DIV0" string can be recognized by the exception
handler to identify the real cause of the exception. */
ill
.ascii "DIV0"
.Lreturn0:
movi a2, 0
#endif /* XCHAL_HAVE_DIV32 */
leaf_return
.size __modsi3, . - __modsi3
#endif /* L_modsi3 */
#ifdef __XTENSA_EB__
#define uh a2
#define ul a3
#else
#define uh a3
#define ul a2
#endif /* __XTENSA_EB__ */
#ifdef L_ashldi3
.align 4
.global __ashldi3
.type __ashldi3, @function
__ashldi3:
leaf_entry sp, 16
ssl a4
bgei a4, 32, .Llow_only
src uh, uh, ul
sll ul, ul
leaf_return
.Llow_only:
sll uh, ul
movi ul, 0
leaf_return
.size __ashldi3, . - __ashldi3
#endif /* L_ashldi3 */
#ifdef L_ashrdi3
.align 4
.global __ashrdi3
.type __ashrdi3, @function
__ashrdi3:
leaf_entry sp, 16
ssr a4
bgei a4, 32, .Lhigh_only
src ul, uh, ul
sra uh, uh
leaf_return
.Lhigh_only:
sra ul, uh
srai uh, uh, 31
leaf_return
.size __ashrdi3, . - __ashrdi3
#endif /* L_ashrdi3 */
#ifdef L_lshrdi3
.align 4
.global __lshrdi3
.type __lshrdi3, @function
__lshrdi3:
leaf_entry sp, 16
ssr a4
bgei a4, 32, .Lhigh_only1
src ul, uh, ul
srl uh, uh
leaf_return
.Lhigh_only1:
srl ul, uh
movi uh, 0
leaf_return
.size __lshrdi3, . - __lshrdi3
#endif /* L_lshrdi3 */
#ifdef L_bswapsi2
.align 4
.global __bswapsi2
.type __bswapsi2, @function
__bswapsi2:
leaf_entry sp, 16
ssai 8
srli a3, a2, 16
src a3, a3, a2
src a3, a3, a3
src a2, a2, a3
leaf_return
.size __bswapsi2, . - __bswapsi2
#endif /* L_bswapsi2 */
#ifdef L_bswapdi2
.align 4
.global __bswapdi2
.type __bswapdi2, @function
__bswapdi2:
leaf_entry sp, 16
ssai 8
srli a4, a2, 16
src a4, a4, a2
src a4, a4, a4
src a4, a2, a4
srli a2, a3, 16
src a2, a2, a3
src a2, a2, a2
src a2, a3, a2
mov a3, a4
leaf_return
.size __bswapdi2, . - __bswapdi2
#endif /* L_bswapdi2 */
#include "ieee754-df.S"
#include "ieee754-sf.S"
|
4ms/metamodule-plugin-sdk
| 16,807
|
plugin-libc/libgcc/config/s390/morestack.S
|
# s390 support for -fsplit-stack.
# Copyright (C) 2015-2022 Free Software Foundation, Inc.
# Contributed by Marcin Kościelnicki <koriakin@0x04.net>.
# This file is part of GCC.
# GCC is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# Excess space needed to call ld.so resolver for lazy plt
# resolution. Go uses sigaltstack so this doesn't need to
# also cover signal frame size.
#define BACKOFF 0x1000
#include <auto-host.h>
# The __morestack function.
.global __morestack
.hidden __morestack
.type __morestack,@function
__morestack:
.LFB1:
.cfi_startproc
#ifndef __s390x__
# The 31-bit __morestack function.
# We use a cleanup to restore the stack guard if an exception
# is thrown through this code.
#ifndef __PIC__
.cfi_personality 0,__gcc_personality_v0
.cfi_lsda 0,.LLSDA1
#else
.cfi_personality 0x9b,DW.ref.__gcc_personality_v0
.cfi_lsda 0x1b,.LLSDA1
#endif
stm %r2, %r15, 0x8(%r15) # Save %r2-%r15.
.cfi_offset %r6, -0x48
.cfi_offset %r7, -0x44
.cfi_offset %r8, -0x40
.cfi_offset %r9, -0x3c
.cfi_offset %r10, -0x38
.cfi_offset %r11, -0x34
.cfi_offset %r12, -0x30
.cfi_offset %r13, -0x2c
.cfi_offset %r14, -0x28
.cfi_offset %r15, -0x24
lr %r11, %r15 # Make frame pointer for vararg.
.cfi_def_cfa_register %r11
ahi %r15, -0x60 # 0x60 for standard frame.
st %r11, 0(%r15) # Save back chain.
lr %r8, %r0 # Save %r0 (static chain).
lr %r10, %r1 # Save %r1 (address of parameter block).
l %r7, 0(%r10) # Required frame size to %r7
ear %r1, %a0 # Extract thread pointer.
l %r1, 0x20(%r1) # Get stack bounduary
ar %r1, %r7 # Stack bounduary + frame size
a %r1, 4(%r10) # + stack param size
clr %r1, %r15 # Compare with current stack pointer
jle .Lnoalloc # guard > sp - frame-size: need alloc
brasl %r14, __morestack_block_signals
# We abuse one of caller's fpr save slots (which we don't use for fprs)
# as a local variable. Not needed here, but done to be consistent with
# the below use.
ahi %r7, BACKOFF # Bump requested size a bit.
st %r7, 0x40(%r11) # Stuff frame size on stack.
la %r2, 0x40(%r11) # Pass its address as parameter.
la %r3, 0x60(%r11) # Caller's stack parameters.
l %r4, 4(%r10) # Size of stack parameters.
brasl %r14, __generic_morestack
lr %r15, %r2 # Switch to the new stack.
ahi %r15, -0x60 # Make a stack frame on it.
st %r11, 0(%r15) # Save back chain.
s %r2, 0x40(%r11) # The end of stack space.
ahi %r2, BACKOFF # Back off a bit.
ear %r1, %a0 # Extract thread pointer.
.LEHB0:
st %r2, 0x20(%r1) # Save the new stack boundary.
brasl %r14, __morestack_unblock_signals
lr %r0, %r8 # Static chain.
lm %r2, %r6, 0x8(%r11) # Paremeter registers.
# Third parameter is address of function meat - address of parameter
# block.
a %r10, 0x8(%r10)
# Leave vararg pointer in %r1, in case function uses it
la %r1, 0x60(%r11)
# State of registers:
# %r0: Static chain from entry.
# %r1: Vararg pointer.
# %r2-%r6: Parameters from entry.
# %r7-%r10: Indeterminate.
# %r11: Frame pointer (%r15 from entry).
# %r12-%r13: Indeterminate.
# %r14: Return address.
# %r15: Stack pointer.
basr %r14, %r10 # Call our caller.
stm %r2, %r3, 0x8(%r11) # Save return registers.
brasl %r14, __morestack_block_signals
# We need a stack slot now, but have no good way to get it - the frame
# on new stack had to be exactly 0x60 bytes, or stack parameters would
# be passed wrong. Abuse fpr save area in caller's frame (we don't
# save actual fprs).
la %r2, 0x40(%r11)
brasl %r14, __generic_releasestack
s %r2, 0x40(%r11) # Subtract available space.
ahi %r2, BACKOFF # Back off a bit.
ear %r1, %a0 # Extract thread pointer.
.LEHE0:
st %r2, 0x20(%r1) # Save the new stack boundary.
# We need to restore the old stack pointer before unblocking signals.
# We also need 0x60 bytes for a stack frame. Since we had a stack
# frame at this place before the stack switch, there's no need to
# write the back chain again.
lr %r15, %r11
ahi %r15, -0x60
brasl %r14, __morestack_unblock_signals
lm %r2, %r15, 0x8(%r11) # Restore all registers.
.cfi_remember_state
.cfi_restore %r15
.cfi_restore %r14
.cfi_restore %r13
.cfi_restore %r12
.cfi_restore %r11
.cfi_restore %r10
.cfi_restore %r9
.cfi_restore %r8
.cfi_restore %r7
.cfi_restore %r6
.cfi_def_cfa_register %r15
br %r14 # Return to caller's caller.
# Executed if no new stack allocation is needed.
.Lnoalloc:
.cfi_restore_state
# We may need to copy stack parameters.
l %r9, 0x4(%r10) # Load stack parameter size.
ltr %r9, %r9 # And check if it's 0.
je .Lnostackparm # Skip the copy if not needed.
sr %r15, %r9 # Make space on the stack.
la %r8, 0x60(%r15) # Destination.
la %r12, 0x60(%r11) # Source.
lr %r13, %r9 # Source size.
.Lcopy:
mvcle %r8, %r12, 0 # Copy.
jo .Lcopy
.Lnostackparm:
# Third parameter is address of function meat - address of parameter
# block.
a %r10, 0x8(%r10)
# Leave vararg pointer in %r1, in case function uses it
la %r1, 0x60(%r11)
# OK, no stack allocation needed. We still follow the protocol and
# call our caller - it doesn't cost much and makes sure vararg works.
# No need to set any registers here - %r0 and %r2-%r6 weren't modified.
basr %r14, %r10 # Call our caller.
lm %r6, %r15, 0x18(%r11) # Restore all callee-saved registers.
.cfi_remember_state
.cfi_restore %r15
.cfi_restore %r14
.cfi_restore %r13
.cfi_restore %r12
.cfi_restore %r11
.cfi_restore %r10
.cfi_restore %r9
.cfi_restore %r8
.cfi_restore %r7
.cfi_restore %r6
.cfi_def_cfa_register %r15
br %r14 # Return to caller's caller.
# This is the cleanup code called by the stack unwinder when unwinding
# through the code between .LEHB0 and .LEHE0 above.
.L1:
.cfi_restore_state
lr %r2, %r11 # Stack pointer after resume.
brasl %r14, __generic_findstack
lr %r3, %r11 # Get the stack pointer.
sr %r3, %r2 # Subtract available space.
ahi %r3, BACKOFF # Back off a bit.
ear %r1, %a0 # Extract thread pointer.
st %r3, 0x20(%r1) # Save the new stack boundary.
# We need GOT pointer in %r12 for PLT entry.
larl %r12,_GLOBAL_OFFSET_TABLE_
lr %r2, %r6 # Exception header.
#ifdef __PIC__
brasl %r14, _Unwind_Resume@PLT
#else
brasl %r14, _Unwind_Resume
#endif
#else /* defined(__s390x__) */
# The 64-bit __morestack function.
# We use a cleanup to restore the stack guard if an exception
# is thrown through this code.
#ifndef __PIC__
.cfi_personality 0x3,__gcc_personality_v0
.cfi_lsda 0x3,.LLSDA1
#else
.cfi_personality 0x9b,DW.ref.__gcc_personality_v0
.cfi_lsda 0x1b,.LLSDA1
#endif
stmg %r2, %r15, 0x10(%r15) # Save %r2-%r15.
.cfi_offset %r6, -0x70
.cfi_offset %r7, -0x68
.cfi_offset %r8, -0x60
.cfi_offset %r9, -0x58
.cfi_offset %r10, -0x50
.cfi_offset %r11, -0x48
.cfi_offset %r12, -0x40
.cfi_offset %r13, -0x38
.cfi_offset %r14, -0x30
.cfi_offset %r15, -0x28
lgr %r11, %r15 # Make frame pointer for vararg.
.cfi_def_cfa_register %r11
aghi %r15, -0xa0 # 0xa0 for standard frame.
stg %r11, 0(%r15) # Save back chain.
lgr %r8, %r0 # Save %r0 (static chain).
lgr %r10, %r1 # Save %r1 (address of parameter block).
lg %r7, 0(%r10) # Required frame size to %r7
ear %r1, %a0
sllg %r1, %r1, 32
ear %r1, %a1 # Extract thread pointer.
lg %r1, 0x38(%r1) # Get stack bounduary
agr %r1, %r7 # Stack bounduary + frame size
ag %r1, 8(%r10) # + stack param size
clgr %r1, %r15 # Compare with current stack pointer
jle .Lnoalloc # guard > sp - frame-size: need alloc
brasl %r14, __morestack_block_signals
# We abuse one of caller's fpr save slots (which we don't use for fprs)
# as a local variable. Not needed here, but done to be consistent with
# the below use.
aghi %r7, BACKOFF # Bump requested size a bit.
stg %r7, 0x80(%r11) # Stuff frame size on stack.
la %r2, 0x80(%r11) # Pass its address as parameter.
la %r3, 0xa0(%r11) # Caller's stack parameters.
lg %r4, 8(%r10) # Size of stack parameters.
brasl %r14, __generic_morestack
lgr %r15, %r2 # Switch to the new stack.
aghi %r15, -0xa0 # Make a stack frame on it.
stg %r11, 0(%r15) # Save back chain.
sg %r2, 0x80(%r11) # The end of stack space.
aghi %r2, BACKOFF # Back off a bit.
ear %r1, %a0
sllg %r1, %r1, 32
ear %r1, %a1 # Extract thread pointer.
.LEHB0:
stg %r2, 0x38(%r1) # Save the new stack boundary.
brasl %r14, __morestack_unblock_signals
lgr %r0, %r8 # Static chain.
lmg %r2, %r6, 0x10(%r11) # Paremeter registers.
# Third parameter is address of function meat - address of parameter
# block.
ag %r10, 0x10(%r10)
# Leave vararg pointer in %r1, in case function uses it
la %r1, 0xa0(%r11)
# State of registers:
# %r0: Static chain from entry.
# %r1: Vararg pointer.
# %r2-%r6: Parameters from entry.
# %r7-%r10: Indeterminate.
# %r11: Frame pointer (%r15 from entry).
# %r12-%r13: Indeterminate.
# %r14: Return address.
# %r15: Stack pointer.
basr %r14, %r10 # Call our caller.
stg %r2, 0x10(%r11) # Save return register.
brasl %r14, __morestack_block_signals
# We need a stack slot now, but have no good way to get it - the frame
# on new stack had to be exactly 0xa0 bytes, or stack parameters would
# be passed wrong. Abuse fpr save area in caller's frame (we don't
# save actual fprs).
la %r2, 0x80(%r11)
brasl %r14, __generic_releasestack
sg %r2, 0x80(%r11) # Subtract available space.
aghi %r2, BACKOFF # Back off a bit.
ear %r1, %a0
sllg %r1, %r1, 32
ear %r1, %a1 # Extract thread pointer.
.LEHE0:
stg %r2, 0x38(%r1) # Save the new stack boundary.
# We need to restore the old stack pointer before unblocking signals.
# We also need 0xa0 bytes for a stack frame. Since we had a stack
# frame at this place before the stack switch, there's no need to
# write the back chain again.
lgr %r15, %r11
aghi %r15, -0xa0
brasl %r14, __morestack_unblock_signals
lmg %r2, %r15, 0x10(%r11) # Restore all registers.
.cfi_remember_state
.cfi_restore %r15
.cfi_restore %r14
.cfi_restore %r13
.cfi_restore %r12
.cfi_restore %r11
.cfi_restore %r10
.cfi_restore %r9
.cfi_restore %r8
.cfi_restore %r7
.cfi_restore %r6
.cfi_def_cfa_register %r15
br %r14 # Return to caller's caller.
# Executed if no new stack allocation is needed.
.Lnoalloc:
.cfi_restore_state
# We may need to copy stack parameters.
lg %r9, 0x8(%r10) # Load stack parameter size.
ltgr %r9, %r9 # Check if it's 0.
je .Lnostackparm # Skip the copy if not needed.
sgr %r15, %r9 # Make space on the stack.
la %r8, 0xa0(%r15) # Destination.
la %r12, 0xa0(%r11) # Source.
lgr %r13, %r9 # Source size.
.Lcopy:
mvcle %r8, %r12, 0 # Copy.
jo .Lcopy
.Lnostackparm:
# Third parameter is address of function meat - address of parameter
# block.
ag %r10, 0x10(%r10)
# Leave vararg pointer in %r1, in case function uses it
la %r1, 0xa0(%r11)
# OK, no stack allocation needed. We still follow the protocol and
# call our caller - it doesn't cost much and makes sure vararg works.
# No need to set any registers here - %r0 and %r2-%r6 weren't modified.
basr %r14, %r10 # Call our caller.
lmg %r6, %r15, 0x30(%r11) # Restore all callee-saved registers.
.cfi_remember_state
.cfi_restore %r15
.cfi_restore %r14
.cfi_restore %r13
.cfi_restore %r12
.cfi_restore %r11
.cfi_restore %r10
.cfi_restore %r9
.cfi_restore %r8
.cfi_restore %r7
.cfi_restore %r6
.cfi_def_cfa_register %r15
br %r14 # Return to caller's caller.
# This is the cleanup code called by the stack unwinder when unwinding
# through the code between .LEHB0 and .LEHE0 above.
.L1:
.cfi_restore_state
lgr %r2, %r11 # Stack pointer after resume.
brasl %r14, __generic_findstack
lgr %r3, %r11 # Get the stack pointer.
sgr %r3, %r2 # Subtract available space.
aghi %r3, BACKOFF # Back off a bit.
ear %r1, %a0
sllg %r1, %r1, 32
ear %r1, %a1 # Extract thread pointer.
stg %r3, 0x38(%r1) # Save the new stack boundary.
lgr %r2, %r6 # Exception header.
#ifdef __PIC__
brasl %r14, _Unwind_Resume@PLT
#else
brasl %r14, _Unwind_Resume
#endif
#endif /* defined(__s390x__) */
.cfi_endproc
.size __morestack, . - __morestack
# The exception table. This tells the personality routine to execute
# the exception handler.
.section .gcc_except_table,"a",@progbits
.align 4
.LLSDA1:
.byte 0xff # @LPStart format (omit)
.byte 0xff # @TType format (omit)
.byte 0x1 # call-site format (uleb128)
.uleb128 .LLSDACSE1-.LLSDACSB1 # Call-site table length
.LLSDACSB1:
.uleb128 .LEHB0-.LFB1 # region 0 start
.uleb128 .LEHE0-.LEHB0 # length
.uleb128 .L1-.LFB1 # landing pad
.uleb128 0 # action
.LLSDACSE1:
.global __gcc_personality_v0
#ifdef __PIC__
# Build a position independent reference to the basic
# personality function.
.hidden DW.ref.__gcc_personality_v0
.weak DW.ref.__gcc_personality_v0
.section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat
.type DW.ref.__gcc_personality_v0, @object
DW.ref.__gcc_personality_v0:
#ifndef __LP64__
.align 4
.size DW.ref.__gcc_personality_v0, 4
.long __gcc_personality_v0
#else
.align 8
.size DW.ref.__gcc_personality_v0, 8
.quad __gcc_personality_v0
#endif
#endif
# Initialize the stack test value when the program starts or when a
# new thread starts. We don't know how large the main stack is, so we
# guess conservatively. We might be able to use getrlimit here.
.text
.global __stack_split_initialize
.hidden __stack_split_initialize
.type __stack_split_initialize, @function
__stack_split_initialize:
#ifndef __s390x__
ear %r1, %a0
lr %r0, %r15
ahi %r0, -0x4000 # We should have at least 16K.
st %r0, 0x20(%r1)
lr %r2, %r15
lhi %r3, 0x4000
#ifdef __PIC__
jg __generic_morestack_set_initial_sp@PLT # Tail call
#else
jg __generic_morestack_set_initial_sp # Tail call
#endif
#else /* defined(__s390x__) */
ear %r1, %a0
sllg %r1, %r1, 32
ear %r1, %a1
lgr %r0, %r15
aghi %r0, -0x4000 # We should have at least 16K.
stg %r0, 0x38(%r1)
lgr %r2, %r15
lghi %r3, 0x4000
#ifdef __PIC__
jg __generic_morestack_set_initial_sp@PLT # Tail call
#else
jg __generic_morestack_set_initial_sp # Tail call
#endif
#endif /* defined(__s390x__) */
.size __stack_split_initialize, . - __stack_split_initialize
# Routines to get and set the guard, for __splitstack_getcontext,
# __splitstack_setcontext, and __splitstack_makecontext.
# void *__morestack_get_guard (void) returns the current stack guard.
.text
.global __morestack_get_guard
.hidden __morestack_get_guard
.type __morestack_get_guard,@function
__morestack_get_guard:
#ifndef __s390x__
ear %r1, %a0
l %r2, 0x20(%r1)
#else
ear %r1, %a0
sllg %r1, %r1, 32
ear %r1, %a1
lg %r2, 0x38(%r1)
#endif
br %r14
.size __morestack_get_guard, . - __morestack_get_guard
# void __morestack_set_guard (void *) sets the stack guard.
.global __morestack_set_guard
.hidden __morestack_set_guard
.type __morestack_set_guard,@function
__morestack_set_guard:
#ifndef __s390x__
ear %r1, %a0
st %r2, 0x20(%r1)
#else
ear %r1, %a0
sllg %r1, %r1, 32
ear %r1, %a1
stg %r2, 0x38(%r1)
#endif
br %r14
.size __morestack_set_guard, . - __morestack_set_guard
# void *__morestack_make_guard (void *, size_t) returns the stack
# guard value for a stack.
.global __morestack_make_guard
.hidden __morestack_make_guard
.type __morestack_make_guard,@function
__morestack_make_guard:
#ifndef __s390x__
sr %r2, %r3
ahi %r2, BACKOFF
#else
sgr %r2, %r3
aghi %r2, BACKOFF
#endif
br %r14
.size __morestack_make_guard, . - __morestack_make_guard
# Make __stack_split_initialize a high priority constructor.
#if HAVE_INITFINI_ARRAY_SUPPORT
.section .init_array.00000,"aw",@progbits
#else
.section .ctors.65535,"aw",@progbits
#endif
#ifndef __LP64__
.align 4
.long __stack_split_initialize
.long __morestack_load_mmap
#else
.align 8
.quad __stack_split_initialize
.quad __morestack_load_mmap
#endif
.section .note.GNU-stack,"",@progbits
.section .note.GNU-split-stack,"",@progbits
.section .note.GNU-no-split-stack,"",@progbits
|
4ms/metamodule-plugin-sdk
| 1,500
|
plugin-libc/libgcc/config/cr16/crtn.S
|
# Specialized code needed to support construction and destruction of
# file-scope objects in C++ and Java code, and to support exception handling.
# Copyright (C) 2012-2022 Free Software Foundation, Inc.
# Contributed by KPIT Cummins Infosystems Limited.
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
/* This file supplies function epilogues for the .init and .fini sections.
It is linked in after all other files. */
.ident "GNU C crtn.o"
.section .init
#if defined (__ID_SHARED_LIB__)
popret $2, r12, ra
#else
popret ra
#endif
.section .fini
#if defined (__ID_SHARED_LIB__)
popret $2, r12, ra
#else
popret ra
#endif
|
4ms/metamodule-plugin-sdk
| 1,234
|
plugin-libc/libgcc/config/cr16/crtlibid.S
|
# Provide a weak definition of the library ID, for the benefit of certain
# configure scripts.
# Copyright (C) 2012-2022 Free Software Foundation, Inc.
# Contributed by KPIT Cummins Infosystems Limited.
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
.ident "GNU C crtlibid.o"
.weak __current_shared_library_r12_offset_
.set __current_shared_library_r12_offset_, 0
|
4ms/metamodule-plugin-sdk
| 1,782
|
plugin-libc/libgcc/config/cr16/crti.S
|
# Specialized code needed to support construction and destruction of
# file-scope objects in C++ and Java code, and to support exception handling.
# Copyright (C) 2012-2022 Free Software Foundation, Inc.
# Contributed by KPIT Cummins Infosystems Limited.
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
/* This file just supplies function prologues for the .init and .fini
sections. It is linked in before crtbegin.o. */
.ident "GNU C crti.o"
.section .init
.globl __init
.type __init,@function
__init:
#if defined (__ID_SHARED_LIB__)
push $2, r12, ra
movd $__current_shared_library_r12_offset_, (r1,r0)
loadd [r12]0(r1,r0), (r12)
#else
push ra
#endif
.section .fini
.globl __fini
.type __fini,@function
__fini:
#if defined (__ID_SHARED_LIB__)
push $2, r12, ra
movd $__current_shared_library_r12_offset_, (r1,r0)
loadd [r12]0(r1,r0), (r12)
#else
push ra
#endif
|
4ms/metamodule-plugin-sdk
| 14,909
|
plugin-libc/libgcc/config/cr16/lib1funcs.S
|
/* Libgcc Target specific implementation.
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by KPIT Cummins Infosystems Limited.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#ifdef L_mulsi3
.text
.align 4
.globl ___mulsi3
___mulsi3:
movw r4,r0
movw r2,r1
/* Extended multiplication between the 2 lower words */
muluw r1,(r1,r0)
/* Multiply the lower word of each parameter */
mulw r2,r5
/* With the higher word of the other */
mulw r3,r4
/* Add products to the higher part of the final result */
addw r4,r1
addw r5,r1
jump (ra)
#endif
#ifdef L_divdi3
.text
.align 4
.globl ___divdi3
___divdi3:
push $4, r7, ra
/* Param #1 Long Long low bit first */
loadd 12(sp), (r1, r0)
loadd 16(sp), (r3, r2)
/* Param #2 Long Long low bit first */
loadd 20(sp), (r5, r4)
loadd 24(sp), (r7, r6)
/* Set neg to 0 */
movw $0, r10
subd $16, (sp)
/* Compare if param1 is greater than 0 */
cmpw $0, r3
ble L4
/* Invert param1 and neg */
movd $-1, (r9, r8) /* Temp set to FFFFFFFF */
xord (r9, r8), (r1, r0) /* Xor low bits of param 1 with temp */
xord (r9, r8), (r3, r2) /* Xor high bits of param 1 with temp */
addd $1, (r1, r0) /* Add 1 to low bits of param 1 */
xorw $1, r10 /* Invert neg */
bcc L4 /* If no carry occurred go to L4 */
addd $1, (r3, r2) /* Add 1 to high bits of param 1 */
L4: stord (r1, r0), 0(sp)
stord (r3, r2), 4(sp)
/* Compare if param2 is greater than 0 */
cmpw $0, r7
ble L5
/* Invert param2 and neg */
movd $-1, (r9, r8) /* Temp set to FFFFFFFF */
xord (r9, r8), (r5, r4) /* Xor low bits of param 2 with temp */
xord (r9, r8), (r7, r6) /* Xor high bits of param 2 with temp */
addd $1, (r5, r4) /* Add 1 to low bits of param 2 */
xorw $1, r10 /* Invert neg */
bcc L5 /* If no carry occurred go to L5 */
addd $1, (r7, r6) /* Add 1 to high bits of param 2 */
L5: stord (r5, r4), 8(sp)
stord (r7, r6), 12(sp)
movw $0, r2
/* Call udivmoddi3 */
#ifdef __PIC__
loadd ___udivmoddi3@cGOT(r12), (r1,r0)
jal (r1,r0)
#else
bal (ra), ___udivmoddi3
#endif
/* If (neg) */
addd $16, (sp)
cmpw $0, r10 /* Compare 0 with neg */
beq Lexit__
/* Neg = -Neg */
xord (r9, r8), (r1, r0) /* Xor low bits of ures with temp */
xord (r9, r8), (r3, r2) /* Xor high bits of ures with temp */
addd $1, (r1, r0) /* Add 1 to low bits of ures */
bcc Lexit__
addd $1, (r3, r2) /* Add 1 to high bit of ures */
Lexit__:
# ifdef __ID_SHARED_LIB__
pop $2, r12
# endif
popret $4, r7, ra
#endif
#ifdef L_lshrdi3
.text
.align 4
.globl ___lshrdi3
___lshrdi3:
push $3, r7
/* Load parameters from stack in this order */
movw r2, r6 /* Number of shifts */
loadd 6(sp), (r1, r0) /* Low bits */
loadd 10(sp), (r3, r2)/* High bits */
xorw $-1, r6 /* Invert number of shifts */
addw $1, r6 /* Add 1 by number of shifts */
movw r6, r7 /* Copy number of shifts */
tbit $15, r6 /* Test if number is negative */
bfs L2 /* If negative jump to L2 */
movd (r1, r0), (r9, r8) /* Copy low bits */
subw $32, r7 /* Calc how many bits will overflow */
/* Shift the temp low bit to the right to see the overflowing bits */
lshd r7, (r9, r8)
cmpw $32, r6 /* If number of shifts is higher than 31 */
blt L1 /* Shift by moving */
lshd r6, (r3, r2) /* Shift high bits */
lshd r6, (r1, r0) /* Shift low bits */
addd (r9, r8), (r3, r2) /* Add overflow to the high bits */
popret $3, r7 /* Return */
L1: movd $0, (r1, r0) /* Reset low bit */
movd (r9, r8), (r3, r2) /* Add the overflow from the low bit */
popret $3, r7 /* Return */
L2: movd (r3, r2), (r9, r8) /* Copy high bits */
addw $32, r7 /* Calc how many bits will overflow */
/* Shift the temp low bit to the left to see the overflowing bits */
lshd r7, (r9, r8)
cmpw $-32, r6 /* If number of shifts is lower than -31 */
bgt L3 /* Shift by moving */
lshd r6, (r1, r0) /* Shift low bits */
lshd r6, (r3, r2) /* Shift high bits */
addd (r9, r8), (r1, r0) /* Add overflow to the low bits */
popret $3, r7 /* Return */
L3: movd $0, (r3, r2) /* Reset the high bit */
movd (r9, r8), (r1, r0) /* Add the overflow from the high bit */
popret $3, r7 /* Return */
#endif
#ifdef L_moddi3
.text
.align 4
.globl ___moddi3
___moddi3:
push $4, r7, ra
/* Param #1 Long Long low bit first */
loadd 12(sp), (r1, r0)
loadd 16(sp), (r3, r2)
/* Param #2 Long Long low bit first */
loadd 20(sp), (r5, r4)
loadd 24(sp), (r7, r6)
subd $18, (sp)
/* Set neg to 0 */
storw $0, 16(sp)
movd $-1, (r9, r8) /* Temp set to FFFFFFFF */
/* Compare if param1 is greater than 0 */
cmpw $0, r3
ble L4
/* Invert param1 and neg */
xord (r9, r8), (r1, r0) /* Xor low bits of param 1 with temp */
xord (r9, r8), (r3, r2) /* Xor high bits of param 1 with temp */
addd $1, (r1, r0) /* Add 1 to low bits of param 1 */
storw $1, 16(sp)
bcc L4 /* If no carry occurred go to L4 */
addd $1, (r3, r2) /* Add 1 to high bits of param 1 */
L4: stord (r1, r0), 0(sp)
stord (r3, r2), 4(sp)
/* Compare if param2 is greater than 0 */
cmpw $0, r7
ble L5
/* Invert param2 and neg */
xord (r9, r8), (r5, r4) /* Xor low bits of param 2 with temp */
xord (r9, r8), (r7, r6) /* Xor high bits of param 2 with temp */
addd $1, (r5, r4) /* Add 1 to low bits of param 2 */
bcc L5 /* If no carry occurred go to L5 */
addd $1, (r7, r6) /* Add 1 to high bits of param 2 */
L5: stord (r5, r4), 8(sp)
stord (r7, r6), 12(sp)
movw $1, r2
/* Call udivmoddi3 */
#ifdef __PIC__
loadd ___udivmoddi3@cGOT(r12), (r1,r0)
jal (r1,r0)
#else
bal (ra), ___udivmoddi3
#endif
/* If (neg) */
loadw 16(sp), r10 /* Load neg from stack */
addd $18, (sp)
cmpw $0, r10 /* Compare 0 with neg */
beq Lexit__
/* Neg = -Neg */
xord (r9, r8), (r1, r0) /* Xor low bits of ures with temp */
xord (r9, r8), (r3, r2) /* Xor high bits of ures with temp */
addd $1, (r1, r0) /* Add 1 to low bits of ures */
bcc Lexit__
addd $1, (r3, r2) /* Add 1 to high bit of ures */
Lexit__:
# ifdef __ID_SHARED_LIB__
pop $2, r12
# endif
popret $4, r7, ra
#endif
#ifdef L_muldi3
.text
.align 4
.globl ___muldi3
___muldi3:
push $2, r13
push $7, r7
/* Param #1 Long Long low bit first */
loadd 18(sp), (r1, r0)
loadd 22(sp), (r3, r2)
/* Param #2 Long Long low bit first */
loadd 26(sp), (r5, r4)
loadd 30(sp), (r7, r6)
/* Clear r13, r12 */
movd $0, (r12)
movd $0, (r13)
/* Set neg */
movw $0, r10
/* Compare if param1 is greater than 0 */
cmpw $0, r3
ble L1
/* Invert param1 and neg */
movd $-1, (r9, r8) /* Temp set to FFFFFFFF */
xord (r9, r8), (r1, r0) /* Xor low bits of param 1 with temp */
xord (r9, r8), (r3, r2) /* Xor high bits of param 1 with temp */
addd $1, (r1, r0) /* Add 1 to low bits of param 1 */
xorw $1, r10 /* Invert neg */
bcc L1 /* If no carry occurred go to L1 */
addd $1, (r3, r2) /* Add 1 to high bits of param 1 */
L1: /* Compare if param2 is greater than 0 */
cmpw $0, r7
ble L2
/* Invert param2 and neg */
movd $-1, (r9, r8) /* Temp set to FFFFFFFF */
xord (r9, r8), (r5, r4) /* Xor low bits of param 2 with temp */
xord (r9, r8), (r7, r6) /* Xor high bits of param 2 with temp */
addd $1, (r5, r4) /* Add 1 to low bits of param 2 */
xorw $1, r10 /* Invert neg */
bcc L2 /* If no carry occurred go to L2 */
addd $1, (r7, r6) /* Add 1 to high bits of param 2 */
L2: storw r10, 18(sp) /* Store neg to stack so we can use r10 */
/* B*D */
/* Bl*Dl */
macuw r0, r4, (r12) /* Multiply r0 and r4 and add to r12 */
/* Bh*Dl */
movd $0, (r9, r8) /* Clear r9, r8 */
macuw r1, r4, (r9, r8) /* Multiply Bh*Dl and add result to (r9, r8) */
movw r9, r10 /* Shift left: r9 to r10 */
lshd $16, (r9, r8) /* Shift left: r8 to r9 */
movw $0, r11 /* Clear r11 */
addd (r9, r8), (r12) /* Add (r9, r8) to r12 */
bcc L3 /* If no carry occurred go to L3 */
addd $1, (r13) /* If carry occurred add 1 to r13 */
L3: addd (r11, r10), (r13) /* Add (r11, r10) to r13 */
/* Bl*Dh */
movd $0, (r9, r8) /* Clear (r9, r8) */
macuw r0, r5, (r9, r8) /* Multiply r0 and r5 and stor in (r9, r8) */
movw r9, r10 /* Shift left: r9 to r10 */
lshd $16, (r9, r8) /* Shift left: r8 to r9 */
addd (r9, r8), (r12) /* Add (r9, r8) to r12 */
bcc L4 /* If no carry occurred go to L4 */
addd $1, (r13) /* If carry occurred add 1 to r13 */
L4: addd (r11, r10), (r13) /* Add (r11, r10) to r13 */
/* Bh*Dh */
movd $0, (r9, r8) /* Clear (r9, r8) */
macuw r1, r5, (r9, r8) /* Multiply r1 and r5 and add to r13 */
addd (r9, r8), (r13) /* Add (r9, r8) to result */
/* A*D */
/* Al*Dl */
movd $0, (r11, r10) /* Clear (r11, r10) */
macuw r2, r4, (r11, r10)/* Multiply r2 and r4 and add to (r11, r10) */
addd (r13), (r11, r10) /* Copy r13 to (r11, r10) */
/* Al*Dh */
movd $0, (r9, r8) /* Clear (r9, r8) */
macuw r2, r5, (r9, r8) /* Multiply r2 and r5 and add to (r9, r8) */
addw r8, r11 /* Add r8 to r11 */
/* Ah*Dl */
muluw r3, (r5, r4) /* Multiply r3 and r4 and stor in (r5, r4) */
addw r4, r11 /* Add r4 to r11 */
/* B*C */
/* Bl*Cl */
movd $0, (r9, r8) /* Clear (r9, r8) */
macuw r0, r6, (r9, r8) /* Multiply r0 and r6 and add to (r9, r8) */
addd (r9, r8), (r11, r10)/* Add (r9, r8) to result */
/* Bl*Ch */
movd $0, (r9, r8) /* Clear (r9, r8) */
macuw r0, r7, (r9, r8) /* Multiply r0 and r7 and add to (r9, r8) */
addw r8, r11 /* Add r8 to r11 */
loadw 18(sp), r8 /* Load neg from stack */
/* Bh*Cl */
muluw r1, (r7, r6) /* Multiply r1 and r6 and stor in (r7, r6) */
addw r6, r11 /* Add r6 to r11 */
E1: movd (r11, r10), (r3, r2)
movd (r12), (r1, r0)
/* If (neg) */
cmpw $0, r8 /* Compare 0 with neg */
beq Lexit__
/* Neg = -Neg */
movd $-1, (r9, r8) /* Temp set to FFFFFFFF */
xord (r9, r8), (r1, r0) /* Xor low bits of result with temp */
xord (r9, r8), (r3, r2) /* Xor high bits of result with temp */
addd $1, (r1, r0) /* Add 1 to low bits of result */
bcc Lexit__
addd $1, (r3, r2) /* Add 1 to high bit of result */
Lexit__:
pop $7, r7
popret $2, r13
#endif
#ifdef L_negdi2
.text
.align 4
.globl ___negdi2
___negdi2:
/* Load parameter from the registers in this order */
loadd 0(sp), (r1, r0)
loadd 4(sp), (r3, r2)
movd $-1, (r6, r5) /* Set temp to FFFFFFFF */
xord (r6, r5), (r1, r0) /* Xor low bits with temp */
xord (r6, r5), (r3, r2) /* Xor high bits with temp */
addd $1, (r1, r0) /* Add one */
jcc (ra)
addd $1, (r3, r2) /* Add the carry to the high bits */
jump (ra)
#endif
#ifdef L_udivdi3
.text
.align 4
.globl ___udivdi3
___udivdi3:
movw $0, r2
br ___udivmoddi3
#endif
#ifdef L_udivmoddi3
.text
.align 4
.globl ___udivmoddi3
___udivmoddi3:
push $2, r13
push $7, r7
/* Param #1 Long Long low bit first */
loadd 18(sp), (r1, r0)
storw r2, 18(sp) /* Store modulo on stack */
loadd 22(sp), (r3, r2)
/* Param #2 Long Long low bit first */
loadd 26(sp), (r5, r4)
loadd 30(sp), (r7, r6)
/* Set ures to 0 */
movd $0, (r13)
movd $0, (r12)
cmpd (r12), (r5, r4)
beq LE
L5: movd $1, (r9, r8) /* Store 1 in low bits from bit */
movd $0, (r11, r10) /* Store 0 in high bits from bit */
L6: /* While (den < num && (!den & (1LL<<63))) */
/* Compare high bits from param 1 and param 2 */
cmpd (r7, r6), (r3, r2)
bhi L10 /* If param 2 is greater go to L10 */
bne L8 /* If param 1 is greater go to L8 */
cmpd (r5, r4), (r1, r0) /* Compare low bits from param 1 and param 2 */
/* If param 2 is greater or the same go to L1 */
bhs L10
L8: /* Check if most significant bit of param 2 is set */
tbit $15, r7
bfs L10 /* If PSR is set go to L10 */
/* Shift bit */
lshd $1, (r11, r10) /* Shift left: high bits of bit */
/* Check if most significant bit of bit is set */
tbit $15, r9
lshd $1, (r9, r8) /* Shift left: low bits of bit */
bfs L28 /* If PSR is set go to L28 */
L9: /* Shift b */
lshd $1, (r7, r6) /* Shift left: high bits of param 2 */
/* Check if most significant bit of param 2 is set */
tbit $15, r5
lshd $1, (r5, r4) /* Shift left: low bits of param 2 */
bfc L6 /* If PSR is set go to L6 */
addw $1, r6 /* Add 1 to the highest bits of b */
br L6 /* Go to L6 */
L10: /* While (bit) */
cmpd $0, (r11, r10)
bne L11
cmpd $0, (r9, r8)
beq E1
L11: /* If (num >= den) */
cmpd (r3, r2), (r7, r6) /* Compare high bits of param 1 and param 2 */
blo L15 /* If param 1 lower than param 2 go to L15 */
bne L12 /* If not equal go to L12 */
cmpd (r1, r0), (r5, r4) /* Compare low bits of param 1 and param 2 */
blo L15 /* If param 1 lower than param 2 go to L15 */
L12: /* Ures |= bit */
ord (r11, r10), (r13)
ord (r9, r8), (r12)
/* Num -= den */
subd (r7, r6), (r3, r2) /* Subtract highest 32 bits from each other */
subd (r5, r4), (r1, r0) /* Subtract lowest 32 bits from each other */
bcc L15 /* If no carry occurred go to L15 */
subd $1, (r3, r2) /* Subtract the carry */
L15: /* Shift bit to the right */
lshd $-1, (r9, r8) /* Shift right: low bits of bit */
/* Check if least significant bit of high bits is set */
tbit $0, r10
lshd $-1, (r11, r10) /* Shift right: high bits of bit */
bfs L18 /* If PSR is set go to L18 */
L17: /* Shift param#2 to the right */
lshd $-1, (r5, r4) /* Shift right: low bits of param 2 */
/* Check if least significant bit of high bits is set */
tbit $0, r6
lshd $-1, (r7, r6) /* Shift right: high bits of param 2 */
bfc L10 /* If PSR is not set go to L10 */
/* Or with 0x8000 to set most significant bit */
orw $32768, r5
br L10 /* Go to L10 */
L18: /* Or with 0x8000 to set most significant bit */
orw $32768, r9
br L17
L28: /* Left shift bit */
addw $1, r10 /* Add 1 to highest bits of bit */
br L9 /* Go to L9 */
LE: cmpd (r12), (r7, r6)
bne L5
excp dvz
br Lexit__
E1: loadw 18(sp), r4
cmpw $0, r4
bne Lexit__
/* Return result */
movd (r12), (r1, r0)
movd (r13), (r3, r2)
Lexit__:
pop $7, r7
popret $2, r13
#endif
#ifdef L_umoddi3
.text
.align 4
.globl ___umoddi3
___umoddi3:
movw $1, r2
br ___udivmoddi3
#endif
|
4ms/stm32mp1-baremetal
| 3,323
|
bootloaders/mp1-boot/startup.s
|
.syntax unified
.cpu cortex-a7
.equ MODE_FIQ, 0x11
.equ MODE_IRQ, 0x12
.equ MODE_SVC, 0x13
.equ MODE_ABT, 0x17
.equ MODE_UND, 0x1B
.equ MODE_SYS, 0x1F
.section .vector_table, "x"
.global _Reset
.global _start
_Reset:
b Reset_Handler
b Undef_Handler // 0x4 Undefined Instruction
b SVC_Handler // Software Interrupt
b PAbt_Handler // 0xC Prefetch Abort
b DAbt_Handler // 0x10 Data Abort
b . // 0x14 Reserved
b IRQ_Handler // 0x18 IRQ
b FIQ_Handler // 0x1C FIQ
.section .text
Reset_Handler:
cpsid if // Mask Interrupts
mrc p15, 0, r0, c1, c0, 0 // Read System Control register (SCTLR)
bic r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
bic r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
bic r0, r0, #0x1 // Clear M bit 0 to disable MMU
bic r0, r0, #(0x1 << 11) // Clear Z bit 11 to disable branch prediction
bic r0, r0, #(0x1 << 13) // Clear V bit 13 to disable High Vector Table Base Address
mcr p15, 0, r0, c1, c0, 0 // Write System Control register (SCTLR)
isb
// Configure ACTLR
mrc p15, 0, r0, c1, c0, 1 // Read CP15 Auxiliary Control Register
orr r0, r0, #(1 << 1) // Enable L2 prefetch hint
mcr p15, 0, r0, c1, c0, 1 // Write CP15 Auxiliary Control Register
// Set Vector Base Address Register (VBAR) to point to this application's vector table
ldr r0, =0x2FFC2500
mcr p15, 0, r0, c12, c0, 0
// FIQ stack: Fill with FEFF
msr cpsr_c, MODE_FIQ
ldr r1, =_fiq_stack_start
ldr sp, =_fiq_stack_end
movw r0, #0xFEFF
movt r0, #0xFEFF
fiq_loop:
cmp r1, sp
strlt r0, [r1], #4
blt fiq_loop
// IRQ stack: Fill will F1F1
msr cpsr_c, MODE_IRQ
ldr r1, =_irq_stack_start
ldr sp, =_irq_stack_end
movw r0, #0xF1F1
movt r0, #0xF1F1
irq_loop:
cmp r1, sp
strlt r0, [r1], #4
blt irq_loop
// Supervisor (SVC) stack: Fill with F5F5
msr cpsr_c, MODE_SVC
ldr r1, =_svc_stack_start
ldr sp, =_svc_stack_end
movw r0, #0xF5F5
movt r0, #0xF5F5
svc_loop:
cmp r1, sp
strlt r0, [r1], #4
blt svc_loop
// USER and SYS mode stack: Fill with F0F0
msr cpsr_c, MODE_SYS
ldr r1, =_user_stack_start
ldr sp, =_user_stack_end
movw r0, #0xF0F0
movt r0, #0xF0F0
usrsys_loop:
cmp r1, sp
strlt r0, [r1], #4
blt usrsys_loop
// Copying initialization values (.data)
ldr r0, =_text_end
ldr r1, =_data_start
ldr r2, =_data_end
data_loop:
cmp r1, r2
ldrlt r3, [r0], #4
strlt r3, [r1], #4
blt data_loop
// Initialize .bss
mov r0, #0
ldr r1, =_bss_start
ldr r2, =_bss_end
bss_loop:
cmp r1, r2
strlt r0, [r1], #4
blt bss_loop
bl SystemInit // Setup MMU, TLB, Caches, FPU, IRQ
bl __libc_init_array // libc init (static constructors)
//Do not enable IRQ interrupts, this project doesn't use them
//cpsie i
run_main:
bl main
b Abort_Exception
Abort_Exception:
b .
Undef_Handler:
b .
SVC_Handler:
b .
PAbt_Handler:
b .
DAbt_Handler:
b .
IRQ_Handler:
b .
FIQ_Handler:
b .
|
4ms/metamodule-plugin-sdk
| 2,349
|
plugin-libc/libgcc/config/pa/lib2funcs.S
|
; Subroutines for calling unbound dynamic functions from within GDB for HPPA.
; Subroutines for out of line prologues and epilogues on for the HPPA
; Copyright (C) 1994-2022 Free Software Foundation, Inc.
; This file is part of GCC.
; GCC is free software; you can redistribute it and/or modify
; it under the terms of the GNU General Public License as published by
; the Free Software Foundation; either version 3, or (at your option)
; any later version.
; GCC is distributed in the hope that it will be useful,
; but WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
; GNU General Public License for more details.
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#if !defined(__pro__) && !defined(__rtems__)
.SPACE $PRIVATE$
.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
.SPACE $TEXT$
.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
.SUBSPA $MILLICODE$,QUAD=0,ALIGN=8,ACCESS=44,SORT=8
#endif
.IMPORT $$dyncall,MILLICODE
#if !defined(__pro__) && !defined(__rtems__)
.SPACE $TEXT$
.SUBSPA $CODE$
#else
.text
#endif
; Simply call with the address of the desired import stub in %r22 and
; arguments in the normal place (%r26-%r23 and stack slots).
;
.align 4
.EXPORT __gcc_plt_call,ENTRY,PRIV_LEV=3,RTNVAL=GR
__gcc_plt_call
.PROC
.CALLINFO
.ENTRY
; Our return address comes in %r31, not %r2!
stw %r31,-8(%r30)
; An inline version of dyncall so we don't have to worry
; about long calls to millicode, PIC and other complexities.
bb,>=,n %r22,30,L$foo
depi 0,31,2,%r22
ldw 0(%r22),%r21
ldw 4(%r22),%r19
L$foo
ldsid (%r21),%r1
mtsp %r1,%sr0
ble 0(%sr0,%r21)
copy %r31,%r2
ldw -8(%r30),%r2
; We're going to be returning to a stack address, so we
; need to do an intra-space return.
ldsid (%rp),%r1
mtsp %r1,%sr0
be,n 0(%sr0,%rp)
.EXIT
.PROCEND
|
4ms/metamodule-plugin-sdk
| 72,005
|
plugin-libc/libgcc/config/pa/milli64.S
|
/* 32 and 64-bit millicode, original author Hewlett-Packard
adapted for gcc by Paul Bame <bame@debian.org>
and Alan Modra <alan@linuxcare.com.au>.
Copyright (C) 2001-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* An executable stack is *not* required for these functions. */
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
.previous
#endif
#ifdef pa64
.level 2.0w
#endif
/* Hardware General Registers. */
r0: .reg %r0
r1: .reg %r1
r2: .reg %r2
r3: .reg %r3
r4: .reg %r4
r5: .reg %r5
r6: .reg %r6
r7: .reg %r7
r8: .reg %r8
r9: .reg %r9
r10: .reg %r10
r11: .reg %r11
r12: .reg %r12
r13: .reg %r13
r14: .reg %r14
r15: .reg %r15
r16: .reg %r16
r17: .reg %r17
r18: .reg %r18
r19: .reg %r19
r20: .reg %r20
r21: .reg %r21
r22: .reg %r22
r23: .reg %r23
r24: .reg %r24
r25: .reg %r25
r26: .reg %r26
r27: .reg %r27
r28: .reg %r28
r29: .reg %r29
r30: .reg %r30
r31: .reg %r31
/* Hardware Space Registers. */
sr0: .reg %sr0
sr1: .reg %sr1
sr2: .reg %sr2
sr3: .reg %sr3
sr4: .reg %sr4
sr5: .reg %sr5
sr6: .reg %sr6
sr7: .reg %sr7
/* Hardware Floating Point Registers. */
fr0: .reg %fr0
fr1: .reg %fr1
fr2: .reg %fr2
fr3: .reg %fr3
fr4: .reg %fr4
fr5: .reg %fr5
fr6: .reg %fr6
fr7: .reg %fr7
fr8: .reg %fr8
fr9: .reg %fr9
fr10: .reg %fr10
fr11: .reg %fr11
fr12: .reg %fr12
fr13: .reg %fr13
fr14: .reg %fr14
fr15: .reg %fr15
/* Hardware Control Registers. */
cr11: .reg %cr11
sar: .reg %cr11 /* Shift Amount Register */
/* Software Architecture General Registers. */
rp: .reg r2 /* return pointer */
#ifdef pa64
mrp: .reg r2 /* millicode return pointer */
#else
mrp: .reg r31 /* millicode return pointer */
#endif
ret0: .reg r28 /* return value */
ret1: .reg r29 /* return value (high part of double) */
sp: .reg r30 /* stack pointer */
dp: .reg r27 /* data pointer */
arg0: .reg r26 /* argument */
arg1: .reg r25 /* argument or high part of double argument */
arg2: .reg r24 /* argument */
arg3: .reg r23 /* argument or high part of double argument */
/* Software Architecture Space Registers. */
/* sr0 ; return link from BLE */
sret: .reg sr1 /* return value */
sarg: .reg sr1 /* argument */
/* sr4 ; PC SPACE tracker */
/* sr5 ; process private data */
/* Frame Offsets (millicode convention!) Used when calling other
millicode routines. Stack unwinding is dependent upon these
definitions. */
r31_slot: .equ -20 /* "current RP" slot */
sr0_slot: .equ -16 /* "static link" slot */
#if defined(pa64)
mrp_slot: .equ -16 /* "current RP" slot */
psp_slot: .equ -8 /* "previous SP" slot */
#else
mrp_slot: .equ -20 /* "current RP" slot (replacing "r31_slot") */
#endif
#define DEFINE(name,value)name: .EQU value
#define RDEFINE(name,value)name: .REG value
#ifdef milliext
#define MILLI_BE(lbl) BE lbl(sr7,r0)
#define MILLI_BEN(lbl) BE,n lbl(sr7,r0)
#define MILLI_BLE(lbl) BLE lbl(sr7,r0)
#define MILLI_BLEN(lbl) BLE,n lbl(sr7,r0)
#define MILLIRETN BE,n 0(sr0,mrp)
#define MILLIRET BE 0(sr0,mrp)
#define MILLI_RETN BE,n 0(sr0,mrp)
#define MILLI_RET BE 0(sr0,mrp)
#else
#define MILLI_BE(lbl) B lbl
#define MILLI_BEN(lbl) B,n lbl
#define MILLI_BLE(lbl) BL lbl,mrp
#define MILLI_BLEN(lbl) BL,n lbl,mrp
#define MILLIRETN BV,n 0(mrp)
#define MILLIRET BV 0(mrp)
#define MILLI_RETN BV,n 0(mrp)
#define MILLI_RET BV 0(mrp)
#endif
#ifdef __STDC__
#define CAT(a,b) a##b
#else
#define CAT(a,b) a/**/b
#endif
#ifdef ELF
#define SUBSPA_MILLI .section .text
#define SUBSPA_MILLI_DIV .section .text.div,"ax",@progbits! .align 16
#define SUBSPA_MILLI_MUL .section .text.mul,"ax",@progbits! .align 16
#define ATTR_MILLI
#define SUBSPA_DATA .section .data
#define ATTR_DATA
#define GLOBAL $global$
#define GSYM(sym) !sym:
#define LSYM(sym) !CAT(.L,sym:)
#define LREF(sym) CAT(.L,sym)
#else
#ifdef coff
/* This used to be .milli but since link32 places different named
sections in different segments millicode ends up a long ways away
from .text (1meg?). This way they will be a lot closer.
The SUBSPA_MILLI_* specify locality sets for certain millicode
modules in order to ensure that modules that call one another are
placed close together. Without locality sets this is unlikely to
happen because of the Dynamite linker library search algorithm. We
want these modules close together so that short calls always reach
(we don't want to require long calls or use long call stubs). */
#define SUBSPA_MILLI .subspa .text
#define SUBSPA_MILLI_DIV .subspa .text$dv,align=16
#define SUBSPA_MILLI_MUL .subspa .text$mu,align=16
#define ATTR_MILLI .attr code,read,execute
#define SUBSPA_DATA .subspa .data
#define ATTR_DATA .attr init_data,read,write
#define GLOBAL _gp
#else
#define SUBSPA_MILLI .subspa $MILLICODE$,QUAD=0,ALIGN=4,ACCESS=0x2c,SORT=8
#define SUBSPA_MILLI_DIV SUBSPA_MILLI
#define SUBSPA_MILLI_MUL SUBSPA_MILLI
#define ATTR_MILLI
#define SUBSPA_DATA .subspa $BSS$,quad=1,align=8,access=0x1f,sort=80,zero
#define ATTR_DATA
#define GLOBAL $global$
#endif
#define SPACE_DATA .space $PRIVATE$,spnum=1,sort=16
#define GSYM(sym) !sym
#define LSYM(sym) !CAT(L$,sym)
#define LREF(sym) CAT(L$,sym)
#endif
#ifdef L_dyncall
SUBSPA_MILLI
ATTR_DATA
GSYM($$dyncall)
.export $$dyncall,millicode
.proc
.callinfo millicode
.entry
#ifdef LINUX
extru,<> %r22,30,1,%r0 ; nullify if plabel bit set
bv,n %r0(%r22) ; branch to target
ldw -2(%r22),%r21 ; load address of target
bv %r0(%r21) ; branch to the real target
ldw 2(%r22),%r19 ; load new LTP value
#else
bb,>=,n %r22,30,LREF(1) ; branch if not plabel address
ldw -2(%r22),%r21 ; load address of target to r21
ldsid (%sr0,%r21),%r1 ; get the "space ident" selected by r21
ldw 2(%r22),%r19 ; load new LTP value
mtsp %r1,%sr0 ; move that space identifier into sr0
be 0(%sr0,%r21) ; branch to the real target
stw %r2,-24(%r30) ; save return address into frame marker
LSYM(1)
ldsid (%sr0,%r22),%r1 ; get the "space ident" selected by r22
mtsp %r1,%sr0 ; move that space identifier into sr0
be 0(%sr0,%r22) ; branch to the target
stw %r2,-24(%r30) ; save return address into frame marker
#endif
.exit
.procend
#endif
#ifdef L_divI
/* ROUTINES: $$divI, $$divoI
Single precision divide for signed binary integers.
The quotient is truncated towards zero.
The sign of the quotient is the XOR of the signs of the dividend and
divisor.
Divide by zero is trapped.
Divide of -2**31 by -1 is trapped for $$divoI but not for $$divI.
INPUT REGISTERS:
. arg0 == dividend
. arg1 == divisor
. mrp == return pc
. sr0 == return space when called externally
OUTPUT REGISTERS:
. arg0 = undefined
. arg1 = undefined
. ret1 = quotient
OTHER REGISTERS AFFECTED:
. r1 = undefined
SIDE EFFECTS:
. Causes a trap under the following conditions:
. divisor is zero (traps with ADDIT,= 0,25,0)
. dividend==-2**31 and divisor==-1 and routine is $$divoI
. (traps with ADDO 26,25,0)
. Changes memory at the following places:
. NONE
PERMISSIBLE CONTEXT:
. Unwindable.
. Suitable for internal or external millicode.
. Assumes the special millicode register conventions.
DISCUSSION:
. Branchs to other millicode routines using BE
. $$div_# for # being 2,3,4,5,6,7,8,9,10,12,14,15
.
. For selected divisors, calls a divide by constant routine written by
. Karl Pettis. Eligible divisors are 1..15 excluding 11 and 13.
.
. The only overflow case is -2**31 divided by -1.
. Both routines return -2**31 but only $$divoI traps. */
RDEFINE(temp,r1)
RDEFINE(retreg,ret1) /* r29 */
RDEFINE(temp1,arg0)
SUBSPA_MILLI_DIV
ATTR_MILLI
.import $$divI_2,millicode
.import $$divI_3,millicode
.import $$divI_4,millicode
.import $$divI_5,millicode
.import $$divI_6,millicode
.import $$divI_7,millicode
.import $$divI_8,millicode
.import $$divI_9,millicode
.import $$divI_10,millicode
.import $$divI_12,millicode
.import $$divI_14,millicode
.import $$divI_15,millicode
.export $$divI,millicode
.export $$divoI,millicode
.proc
.callinfo millicode
.entry
GSYM($$divoI)
comib,=,n -1,arg1,LREF(negative1) /* when divisor == -1 */
GSYM($$divI)
ldo -1(arg1),temp /* is there at most one bit set ? */
and,<> arg1,temp,r0 /* if not, don't use power of 2 divide */
addi,> 0,arg1,r0 /* if divisor > 0, use power of 2 divide */
b,n LREF(neg_denom)
LSYM(pow2)
addi,>= 0,arg0,retreg /* if numerator is negative, add the */
add arg0,temp,retreg /* (denominaotr -1) to correct for shifts */
extru,= arg1,15,16,temp /* test denominator with 0xffff0000 */
extrs retreg,15,16,retreg /* retreg = retreg >> 16 */
or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 16) */
ldi 0xcc,temp1 /* setup 0xcc in temp1 */
extru,= arg1,23,8,temp /* test denominator with 0xff00 */
extrs retreg,23,24,retreg /* retreg = retreg >> 8 */
or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 8) */
ldi 0xaa,temp /* setup 0xaa in temp */
extru,= arg1,27,4,r0 /* test denominator with 0xf0 */
extrs retreg,27,28,retreg /* retreg = retreg >> 4 */
and,= arg1,temp1,r0 /* test denominator with 0xcc */
extrs retreg,29,30,retreg /* retreg = retreg >> 2 */
and,= arg1,temp,r0 /* test denominator with 0xaa */
extrs retreg,30,31,retreg /* retreg = retreg >> 1 */
MILLIRETN
LSYM(neg_denom)
addi,< 0,arg1,r0 /* if arg1 >= 0, it's not power of 2 */
b,n LREF(regular_seq)
sub r0,arg1,temp /* make denominator positive */
comb,=,n arg1,temp,LREF(regular_seq) /* test against 0x80000000 and 0 */
ldo -1(temp),retreg /* is there at most one bit set ? */
and,= temp,retreg,r0 /* if so, the denominator is power of 2 */
b,n LREF(regular_seq)
sub r0,arg0,retreg /* negate numerator */
comb,=,n arg0,retreg,LREF(regular_seq) /* test against 0x80000000 */
copy retreg,arg0 /* set up arg0, arg1 and temp */
copy temp,arg1 /* before branching to pow2 */
b LREF(pow2)
ldo -1(arg1),temp
LSYM(regular_seq)
comib,>>=,n 15,arg1,LREF(small_divisor)
add,>= 0,arg0,retreg /* move dividend, if retreg < 0, */
LSYM(normal)
subi 0,retreg,retreg /* make it positive */
sub 0,arg1,temp /* clear carry, */
/* negate the divisor */
ds 0,temp,0 /* set V-bit to the comple- */
/* ment of the divisor sign */
add retreg,retreg,retreg /* shift msb bit into carry */
ds r0,arg1,temp /* 1st divide step, if no carry */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 2nd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 3rd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 4th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 5th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 6th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 7th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 8th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 9th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 10th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 11th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 12th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 13th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 14th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 15th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 16th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 17th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 18th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 19th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 20th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 21st divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 22nd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 23rd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 24th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 25th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 26th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 27th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 28th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 29th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 30th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 31st divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 32nd divide step, */
addc retreg,retreg,retreg /* shift last retreg bit into retreg */
xor,>= arg0,arg1,0 /* get correct sign of quotient */
sub 0,retreg,retreg /* based on operand signs */
MILLIRETN
nop
LSYM(small_divisor)
#if defined(pa64)
/* Clear the upper 32 bits of the arg1 register. We are working with */
/* small divisors (and 32-bit integers) We must not be mislead */
/* by "1" bits left in the upper 32 bits. */
depd %r0,31,32,%r25
#endif
blr,n arg1,r0
nop
/* table for divisor == 0,1, ... ,15 */
addit,= 0,arg1,r0 /* trap if divisor == 0 */
nop
MILLIRET /* divisor == 1 */
copy arg0,retreg
MILLI_BEN($$divI_2) /* divisor == 2 */
nop
MILLI_BEN($$divI_3) /* divisor == 3 */
nop
MILLI_BEN($$divI_4) /* divisor == 4 */
nop
MILLI_BEN($$divI_5) /* divisor == 5 */
nop
MILLI_BEN($$divI_6) /* divisor == 6 */
nop
MILLI_BEN($$divI_7) /* divisor == 7 */
nop
MILLI_BEN($$divI_8) /* divisor == 8 */
nop
MILLI_BEN($$divI_9) /* divisor == 9 */
nop
MILLI_BEN($$divI_10) /* divisor == 10 */
nop
b LREF(normal) /* divisor == 11 */
add,>= 0,arg0,retreg
MILLI_BEN($$divI_12) /* divisor == 12 */
nop
b LREF(normal) /* divisor == 13 */
add,>= 0,arg0,retreg
MILLI_BEN($$divI_14) /* divisor == 14 */
nop
MILLI_BEN($$divI_15) /* divisor == 15 */
nop
LSYM(negative1)
sub 0,arg0,retreg /* result is negation of dividend */
MILLIRET
addo arg0,arg1,r0 /* trap iff dividend==0x80000000 && divisor==-1 */
.exit
.procend
.end
#endif
#ifdef L_divU
/* ROUTINE: $$divU
.
. Single precision divide for unsigned integers.
.
. Quotient is truncated towards zero.
. Traps on divide by zero.
INPUT REGISTERS:
. arg0 == dividend
. arg1 == divisor
. mrp == return pc
. sr0 == return space when called externally
OUTPUT REGISTERS:
. arg0 = undefined
. arg1 = undefined
. ret1 = quotient
OTHER REGISTERS AFFECTED:
. r1 = undefined
SIDE EFFECTS:
. Causes a trap under the following conditions:
. divisor is zero
. Changes memory at the following places:
. NONE
PERMISSIBLE CONTEXT:
. Unwindable.
. Does not create a stack frame.
. Suitable for internal or external millicode.
. Assumes the special millicode register conventions.
DISCUSSION:
. Branchs to other millicode routines using BE:
. $$divU_# for 3,5,6,7,9,10,12,14,15
.
. For selected small divisors calls the special divide by constant
. routines written by Karl Pettis. These are: 3,5,6,7,9,10,12,14,15. */
RDEFINE(temp,r1)
RDEFINE(retreg,ret1) /* r29 */
RDEFINE(temp1,arg0)
SUBSPA_MILLI_DIV
ATTR_MILLI
.export $$divU,millicode
.import $$divU_3,millicode
.import $$divU_5,millicode
.import $$divU_6,millicode
.import $$divU_7,millicode
.import $$divU_9,millicode
.import $$divU_10,millicode
.import $$divU_12,millicode
.import $$divU_14,millicode
.import $$divU_15,millicode
.proc
.callinfo millicode
.entry
GSYM($$divU)
/* The subtract is not nullified since it does no harm and can be used
by the two cases that branch back to "normal". */
ldo -1(arg1),temp /* is there at most one bit set ? */
and,= arg1,temp,r0 /* if so, denominator is power of 2 */
b LREF(regular_seq)
addit,= 0,arg1,0 /* trap for zero dvr */
copy arg0,retreg
extru,= arg1,15,16,temp /* test denominator with 0xffff0000 */
extru retreg,15,16,retreg /* retreg = retreg >> 16 */
or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 16) */
ldi 0xcc,temp1 /* setup 0xcc in temp1 */
extru,= arg1,23,8,temp /* test denominator with 0xff00 */
extru retreg,23,24,retreg /* retreg = retreg >> 8 */
or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 8) */
ldi 0xaa,temp /* setup 0xaa in temp */
extru,= arg1,27,4,r0 /* test denominator with 0xf0 */
extru retreg,27,28,retreg /* retreg = retreg >> 4 */
and,= arg1,temp1,r0 /* test denominator with 0xcc */
extru retreg,29,30,retreg /* retreg = retreg >> 2 */
and,= arg1,temp,r0 /* test denominator with 0xaa */
extru retreg,30,31,retreg /* retreg = retreg >> 1 */
MILLIRETN
nop
LSYM(regular_seq)
comib,>= 15,arg1,LREF(special_divisor)
subi 0,arg1,temp /* clear carry, negate the divisor */
ds r0,temp,r0 /* set V-bit to 1 */
LSYM(normal)
add arg0,arg0,retreg /* shift msb bit into carry */
ds r0,arg1,temp /* 1st divide step, if no carry */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 2nd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 3rd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 4th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 5th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 6th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 7th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 8th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 9th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 10th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 11th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 12th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 13th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 14th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 15th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 16th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 17th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 18th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 19th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 20th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 21st divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 22nd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 23rd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 24th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 25th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 26th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 27th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 28th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 29th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 30th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 31st divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 32nd divide step, */
MILLIRET
addc retreg,retreg,retreg /* shift last retreg bit into retreg */
/* Handle the cases where divisor is a small constant or has high bit on. */
LSYM(special_divisor)
/* blr arg1,r0 */
/* comib,>,n 0,arg1,LREF(big_divisor) ; nullify previous instruction */
/* Pratap 8/13/90. The 815 Stirling chip set has a bug that prevents us from
generating such a blr, comib sequence. A problem in nullification. So I
rewrote this code. */
#if defined(pa64)
/* Clear the upper 32 bits of the arg1 register. We are working with
small divisors (and 32-bit unsigned integers) We must not be mislead
by "1" bits left in the upper 32 bits. */
depd %r0,31,32,%r25
#endif
comib,> 0,arg1,LREF(big_divisor)
nop
blr arg1,r0
nop
LSYM(zero_divisor) /* this label is here to provide external visibility */
addit,= 0,arg1,0 /* trap for zero dvr */
nop
MILLIRET /* divisor == 1 */
copy arg0,retreg
MILLIRET /* divisor == 2 */
extru arg0,30,31,retreg
MILLI_BEN($$divU_3) /* divisor == 3 */
nop
MILLIRET /* divisor == 4 */
extru arg0,29,30,retreg
MILLI_BEN($$divU_5) /* divisor == 5 */
nop
MILLI_BEN($$divU_6) /* divisor == 6 */
nop
MILLI_BEN($$divU_7) /* divisor == 7 */
nop
MILLIRET /* divisor == 8 */
extru arg0,28,29,retreg
MILLI_BEN($$divU_9) /* divisor == 9 */
nop
MILLI_BEN($$divU_10) /* divisor == 10 */
nop
b LREF(normal) /* divisor == 11 */
ds r0,temp,r0 /* set V-bit to 1 */
MILLI_BEN($$divU_12) /* divisor == 12 */
nop
b LREF(normal) /* divisor == 13 */
ds r0,temp,r0 /* set V-bit to 1 */
MILLI_BEN($$divU_14) /* divisor == 14 */
nop
MILLI_BEN($$divU_15) /* divisor == 15 */
nop
/* Handle the case where the high bit is on in the divisor.
Compute: if( dividend>=divisor) quotient=1; else quotient=0;
Note: dividend>==divisor iff dividend-divisor does not borrow
and not borrow iff carry. */
LSYM(big_divisor)
sub arg0,arg1,r0
MILLIRET
addc r0,r0,retreg
.exit
.procend
.end
#endif
#ifdef L_remI
/* ROUTINE: $$remI
DESCRIPTION:
. $$remI returns the remainder of the division of two signed 32-bit
. integers. The sign of the remainder is the same as the sign of
. the dividend.
INPUT REGISTERS:
. arg0 == dividend
. arg1 == divisor
. mrp == return pc
. sr0 == return space when called externally
OUTPUT REGISTERS:
. arg0 = destroyed
. arg1 = destroyed
. ret1 = remainder
OTHER REGISTERS AFFECTED:
. r1 = undefined
SIDE EFFECTS:
. Causes a trap under the following conditions: DIVIDE BY ZERO
. Changes memory at the following places: NONE
PERMISSIBLE CONTEXT:
. Unwindable
. Does not create a stack frame
. Is usable for internal or external microcode
DISCUSSION:
. Calls other millicode routines via mrp: NONE
. Calls other millicode routines: NONE */
RDEFINE(tmp,r1)
RDEFINE(retreg,ret1)
SUBSPA_MILLI
ATTR_MILLI
.proc
.callinfo millicode
.entry
GSYM($$remI)
GSYM($$remoI)
.export $$remI,MILLICODE
.export $$remoI,MILLICODE
ldo -1(arg1),tmp /* is there at most one bit set ? */
and,<> arg1,tmp,r0 /* if not, don't use power of 2 */
addi,> 0,arg1,r0 /* if denominator > 0, use power */
/* of 2 */
b,n LREF(neg_denom)
LSYM(pow2)
comb,>,n 0,arg0,LREF(neg_num) /* is numerator < 0 ? */
and arg0,tmp,retreg /* get the result */
MILLIRETN
LSYM(neg_num)
subi 0,arg0,arg0 /* negate numerator */
and arg0,tmp,retreg /* get the result */
subi 0,retreg,retreg /* negate result */
MILLIRETN
LSYM(neg_denom)
addi,< 0,arg1,r0 /* if arg1 >= 0, it's not power */
/* of 2 */
b,n LREF(regular_seq)
sub r0,arg1,tmp /* make denominator positive */
comb,=,n arg1,tmp,LREF(regular_seq) /* test against 0x80000000 and 0 */
ldo -1(tmp),retreg /* is there at most one bit set ? */
and,= tmp,retreg,r0 /* if not, go to regular_seq */
b,n LREF(regular_seq)
comb,>,n 0,arg0,LREF(neg_num_2) /* if arg0 < 0, negate it */
and arg0,retreg,retreg
MILLIRETN
LSYM(neg_num_2)
subi 0,arg0,tmp /* test against 0x80000000 */
and tmp,retreg,retreg
subi 0,retreg,retreg
MILLIRETN
LSYM(regular_seq)
addit,= 0,arg1,0 /* trap if div by zero */
add,>= 0,arg0,retreg /* move dividend, if retreg < 0, */
sub 0,retreg,retreg /* make it positive */
sub 0,arg1, tmp /* clear carry, */
/* negate the divisor */
ds 0, tmp,0 /* set V-bit to the comple- */
/* ment of the divisor sign */
or 0,0, tmp /* clear tmp */
add retreg,retreg,retreg /* shift msb bit into carry */
ds tmp,arg1, tmp /* 1st divide step, if no carry */
/* out, msb of quotient = 0 */
addc retreg,retreg,retreg /* shift retreg with/into carry */
LSYM(t1)
ds tmp,arg1, tmp /* 2nd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 3rd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 4th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 5th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 6th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 7th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 8th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 9th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 10th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 11th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 12th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 13th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 14th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 15th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 16th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 17th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 18th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 19th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 20th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 21st divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 22nd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 23rd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 24th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 25th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 26th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 27th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 28th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 29th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 30th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 31st divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 32nd divide step, */
addc retreg,retreg,retreg /* shift last bit into retreg */
movb,>=,n tmp,retreg,LREF(finish) /* branch if pos. tmp */
add,< arg1,0,0 /* if arg1 > 0, add arg1 */
add,tr tmp,arg1,retreg /* for correcting remainder tmp */
sub tmp,arg1,retreg /* else add absolute value arg1 */
LSYM(finish)
add,>= arg0,0,0 /* set sign of remainder */
sub 0,retreg,retreg /* to sign of dividend */
MILLIRET
nop
.exit
.procend
#ifdef milliext
.origin 0x00000200
#endif
.end
#endif
#ifdef L_remU
/* ROUTINE: $$remU
. Single precision divide for remainder with unsigned binary integers.
.
. The remainder must be dividend-(dividend/divisor)*divisor.
. Divide by zero is trapped.
INPUT REGISTERS:
. arg0 == dividend
. arg1 == divisor
. mrp == return pc
. sr0 == return space when called externally
OUTPUT REGISTERS:
. arg0 = undefined
. arg1 = undefined
. ret1 = remainder
OTHER REGISTERS AFFECTED:
. r1 = undefined
SIDE EFFECTS:
. Causes a trap under the following conditions: DIVIDE BY ZERO
. Changes memory at the following places: NONE
PERMISSIBLE CONTEXT:
. Unwindable.
. Does not create a stack frame.
. Suitable for internal or external millicode.
. Assumes the special millicode register conventions.
DISCUSSION:
. Calls other millicode routines using mrp: NONE
. Calls other millicode routines: NONE */
RDEFINE(temp,r1)
RDEFINE(rmndr,ret1) /* r29 */
SUBSPA_MILLI
ATTR_MILLI
.export $$remU,millicode
.proc
.callinfo millicode
.entry
GSYM($$remU)
ldo -1(arg1),temp /* is there at most one bit set ? */
and,= arg1,temp,r0 /* if not, don't use power of 2 */
b LREF(regular_seq)
addit,= 0,arg1,r0 /* trap on div by zero */
and arg0,temp,rmndr /* get the result for power of 2 */
MILLIRETN
LSYM(regular_seq)
comib,>=,n 0,arg1,LREF(special_case)
subi 0,arg1,rmndr /* clear carry, negate the divisor */
ds r0,rmndr,r0 /* set V-bit to 1 */
add arg0,arg0,temp /* shift msb bit into carry */
ds r0,arg1,rmndr /* 1st divide step, if no carry */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 2nd divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 3rd divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 4th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 5th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 6th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 7th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 8th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 9th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 10th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 11th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 12th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 13th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 14th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 15th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 16th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 17th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 18th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 19th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 20th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 21st divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 22nd divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 23rd divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 24th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 25th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 26th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 27th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 28th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 29th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 30th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 31st divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 32nd divide step, */
comiclr,<= 0,rmndr,r0
add rmndr,arg1,rmndr /* correction */
MILLIRETN
nop
/* Putting >= on the last DS and deleting COMICLR does not work! */
LSYM(special_case)
sub,>>= arg0,arg1,rmndr
copy arg0,rmndr
MILLIRETN
nop
.exit
.procend
.end
#endif
#ifdef L_div_const
/* ROUTINE: $$divI_2
. $$divI_3 $$divU_3
. $$divI_4
. $$divI_5 $$divU_5
. $$divI_6 $$divU_6
. $$divI_7 $$divU_7
. $$divI_8
. $$divI_9 $$divU_9
. $$divI_10 $$divU_10
.
. $$divI_12 $$divU_12
.
. $$divI_14 $$divU_14
. $$divI_15 $$divU_15
. $$divI_16
. $$divI_17 $$divU_17
.
. Divide by selected constants for single precision binary integers.
INPUT REGISTERS:
. arg0 == dividend
. mrp == return pc
. sr0 == return space when called externally
OUTPUT REGISTERS:
. arg0 = undefined
. arg1 = undefined
. ret1 = quotient
OTHER REGISTERS AFFECTED:
. r1 = undefined
SIDE EFFECTS:
. Causes a trap under the following conditions: NONE
. Changes memory at the following places: NONE
PERMISSIBLE CONTEXT:
. Unwindable.
. Does not create a stack frame.
. Suitable for internal or external millicode.
. Assumes the special millicode register conventions.
DISCUSSION:
. Calls other millicode routines using mrp: NONE
. Calls other millicode routines: NONE */
/* TRUNCATED DIVISION BY SMALL INTEGERS
We are interested in q(x) = floor(x/y), where x >= 0 and y > 0
(with y fixed).
Let a = floor(z/y), for some choice of z. Note that z will be
chosen so that division by z is cheap.
Let r be the remainder(z/y). In other words, r = z - ay.
Now, our method is to choose a value for b such that
q'(x) = floor((ax+b)/z)
is equal to q(x) over as large a range of x as possible. If the
two are equal over a sufficiently large range, and if it is easy to
form the product (ax), and it is easy to divide by z, then we can
perform the division much faster than the general division algorithm.
So, we want the following to be true:
. For x in the following range:
.
. ky <= x < (k+1)y
.
. implies that
.
. k <= (ax+b)/z < (k+1)
We want to determine b such that this is true for all k in the
range {0..K} for some maximum K.
Since (ax+b) is an increasing function of x, we can take each
bound separately to determine the "best" value for b.
(ax+b)/z < (k+1) implies
(a((k+1)y-1)+b < (k+1)z implies
b < a + (k+1)(z-ay) implies
b < a + (k+1)r
This needs to be true for all k in the range {0..K}. In
particular, it is true for k = 0 and this leads to a maximum
acceptable value for b.
b < a+r or b <= a+r-1
Taking the other bound, we have
k <= (ax+b)/z implies
k <= (aky+b)/z implies
k(z-ay) <= b implies
kr <= b
Clearly, the largest range for k will be achieved by maximizing b,
when r is not zero. When r is zero, then the simplest choice for b
is 0. When r is not 0, set
. b = a+r-1
Now, by construction, q'(x) = floor((ax+b)/z) = q(x) = floor(x/y)
for all x in the range:
. 0 <= x < (K+1)y
We need to determine what K is. Of our two bounds,
. b < a+(k+1)r is satisfied for all k >= 0, by construction.
The other bound is
. kr <= b
This is always true if r = 0. If r is not 0 (the usual case), then
K = floor((a+r-1)/r), is the maximum value for k.
Therefore, the formula q'(x) = floor((ax+b)/z) yields the correct
answer for q(x) = floor(x/y) when x is in the range
(0,(K+1)y-1) K = floor((a+r-1)/r)
To be most useful, we want (K+1)y-1 = (max x) >= 2**32-1 so that
the formula for q'(x) yields the correct value of q(x) for all x
representable by a single word in HPPA.
We are also constrained in that computing the product (ax), adding
b, and dividing by z must all be done quickly, otherwise we will be
better off going through the general algorithm using the DS
instruction, which uses approximately 70 cycles.
For each y, there is a choice of z which satisfies the constraints
for (K+1)y >= 2**32. We may not, however, be able to satisfy the
timing constraints for arbitrary y. It seems that z being equal to
a power of 2 or a power of 2 minus 1 is as good as we can do, since
it minimizes the time to do division by z. We want the choice of z
to also result in a value for (a) that minimizes the computation of
the product (ax). This is best achieved if (a) has a regular bit
pattern (so the multiplication can be done with shifts and adds).
The value of (a) also needs to be less than 2**32 so the product is
always guaranteed to fit in 2 words.
In actual practice, the following should be done:
1) For negative x, you should take the absolute value and remember
. the fact so that the result can be negated. This obviously does
. not apply in the unsigned case.
2) For even y, you should factor out the power of 2 that divides y
. and divide x by it. You can then proceed by dividing by the
. odd factor of y.
Here is a table of some odd values of y, and corresponding choices
for z which are "good".
y z r a (hex) max x (hex)
3 2**32 1 55555555 100000001
5 2**32 1 33333333 100000003
7 2**24-1 0 249249 (infinite)
9 2**24-1 0 1c71c7 (infinite)
11 2**20-1 0 1745d (infinite)
13 2**24-1 0 13b13b (infinite)
15 2**32 1 11111111 10000000d
17 2**32 1 f0f0f0f 10000000f
If r is 1, then b = a+r-1 = a. This simplifies the computation
of (ax+b), since you can compute (x+1)(a) instead. If r is 0,
then b = 0 is ok to use which simplifies (ax+b).
The bit patterns for 55555555, 33333333, and 11111111 are obviously
very regular. The bit patterns for the other values of a above are:
y (hex) (binary)
7 249249 001001001001001001001001 << regular >>
9 1c71c7 000111000111000111000111 << regular >>
11 1745d 000000010111010001011101 << irregular >>
13 13b13b 000100111011000100111011 << irregular >>
The bit patterns for (a) corresponding to (y) of 11 and 13 may be
too irregular to warrant using this method.
When z is a power of 2 minus 1, then the division by z is slightly
more complicated, involving an iterative solution.
The code presented here solves division by 1 through 17, except for
11 and 13. There are algorithms for both signed and unsigned
quantities given.
TIMINGS (cycles)
divisor positive negative unsigned
. 1 2 2 2
. 2 4 4 2
. 3 19 21 19
. 4 4 4 2
. 5 18 22 19
. 6 19 22 19
. 8 4 4 2
. 10 18 19 17
. 12 18 20 18
. 15 16 18 16
. 16 4 4 2
. 17 16 18 16
Now, the algorithm for 7, 9, and 14 is an iterative one. That is,
a loop body is executed until the tentative quotient is 0. The
number of times the loop body is executed varies depending on the
dividend, but is never more than two times. If the dividend is
less than the divisor, then the loop body is not executed at all.
Each iteration adds 4 cycles to the timings.
divisor positive negative unsigned
. 7 19+4n 20+4n 20+4n n = number of iterations
. 9 21+4n 22+4n 21+4n
. 14 21+4n 22+4n 20+4n
To give an idea of how the number of iterations varies, here is a
table of dividend versus number of iterations when dividing by 7.
smallest largest required
dividend dividend iterations
. 0 6 0
. 7 0x6ffffff 1
0x1000006 0xffffffff 2
There is some overlap in the range of numbers requiring 1 and 2
iterations. */
RDEFINE(t2,r1)
RDEFINE(x2,arg0) /* r26 */
RDEFINE(t1,arg1) /* r25 */
RDEFINE(x1,ret1) /* r29 */
SUBSPA_MILLI_DIV
ATTR_MILLI
.proc
.callinfo millicode
.entry
/* NONE of these routines require a stack frame
ALL of these routines are unwindable from millicode */
GSYM($$divide_by_constant)
.export $$divide_by_constant,millicode
/* Provides a "nice" label for the code covered by the unwind descriptor
for things like gprof. */
/* DIVISION BY 2 (shift by 1) */
GSYM($$divI_2)
.export $$divI_2,millicode
comclr,>= arg0,0,0
addi 1,arg0,arg0
MILLIRET
extrs arg0,30,31,ret1
/* DIVISION BY 4 (shift by 2) */
GSYM($$divI_4)
.export $$divI_4,millicode
comclr,>= arg0,0,0
addi 3,arg0,arg0
MILLIRET
extrs arg0,29,30,ret1
/* DIVISION BY 8 (shift by 3) */
GSYM($$divI_8)
.export $$divI_8,millicode
comclr,>= arg0,0,0
addi 7,arg0,arg0
MILLIRET
extrs arg0,28,29,ret1
/* DIVISION BY 16 (shift by 4) */
GSYM($$divI_16)
.export $$divI_16,millicode
comclr,>= arg0,0,0
addi 15,arg0,arg0
MILLIRET
extrs arg0,27,28,ret1
/****************************************************************************
*
* DIVISION BY DIVISORS OF FFFFFFFF, and powers of 2 times these
*
* includes 3,5,15,17 and also 6,10,12
*
****************************************************************************/
/* DIVISION BY 3 (use z = 2**32; a = 55555555) */
GSYM($$divI_3)
.export $$divI_3,millicode
comb,<,N x2,0,LREF(neg3)
addi 1,x2,x2 /* this cannot overflow */
extru x2,1,2,x1 /* multiply by 5 to get started */
sh2add x2,x2,x2
b LREF(pos)
addc x1,0,x1
LSYM(neg3)
subi 1,x2,x2 /* this cannot overflow */
extru x2,1,2,x1 /* multiply by 5 to get started */
sh2add x2,x2,x2
b LREF(neg)
addc x1,0,x1
GSYM($$divU_3)
.export $$divU_3,millicode
addi 1,x2,x2 /* this CAN overflow */
addc 0,0,x1
shd x1,x2,30,t1 /* multiply by 5 to get started */
sh2add x2,x2,x2
b LREF(pos)
addc x1,t1,x1
/* DIVISION BY 5 (use z = 2**32; a = 33333333) */
GSYM($$divI_5)
.export $$divI_5,millicode
comb,<,N x2,0,LREF(neg5)
addi 3,x2,t1 /* this cannot overflow */
sh1add x2,t1,x2 /* multiply by 3 to get started */
b LREF(pos)
addc 0,0,x1
LSYM(neg5)
sub 0,x2,x2 /* negate x2 */
addi 1,x2,x2 /* this cannot overflow */
shd 0,x2,31,x1 /* get top bit (can be 1) */
sh1add x2,x2,x2 /* multiply by 3 to get started */
b LREF(neg)
addc x1,0,x1
GSYM($$divU_5)
.export $$divU_5,millicode
addi 1,x2,x2 /* this CAN overflow */
addc 0,0,x1
shd x1,x2,31,t1 /* multiply by 3 to get started */
sh1add x2,x2,x2
b LREF(pos)
addc t1,x1,x1
/* DIVISION BY 6 (shift to divide by 2 then divide by 3) */
GSYM($$divI_6)
.export $$divI_6,millicode
comb,<,N x2,0,LREF(neg6)
extru x2,30,31,x2 /* divide by 2 */
addi 5,x2,t1 /* compute 5*(x2+1) = 5*x2+5 */
sh2add x2,t1,x2 /* multiply by 5 to get started */
b LREF(pos)
addc 0,0,x1
LSYM(neg6)
subi 2,x2,x2 /* negate, divide by 2, and add 1 */
/* negation and adding 1 are done */
/* at the same time by the SUBI */
extru x2,30,31,x2
shd 0,x2,30,x1
sh2add x2,x2,x2 /* multiply by 5 to get started */
b LREF(neg)
addc x1,0,x1
GSYM($$divU_6)
.export $$divU_6,millicode
extru x2,30,31,x2 /* divide by 2 */
addi 1,x2,x2 /* cannot carry */
shd 0,x2,30,x1 /* multiply by 5 to get started */
sh2add x2,x2,x2
b LREF(pos)
addc x1,0,x1
/* DIVISION BY 10 (shift to divide by 2 then divide by 5) */
GSYM($$divU_10)
.export $$divU_10,millicode
extru x2,30,31,x2 /* divide by 2 */
addi 3,x2,t1 /* compute 3*(x2+1) = (3*x2)+3 */
sh1add x2,t1,x2 /* multiply by 3 to get started */
addc 0,0,x1
LSYM(pos)
shd x1,x2,28,t1 /* multiply by 0x11 */
shd x2,0,28,t2
add x2,t2,x2
addc x1,t1,x1
LSYM(pos_for_17)
shd x1,x2,24,t1 /* multiply by 0x101 */
shd x2,0,24,t2
add x2,t2,x2
addc x1,t1,x1
shd x1,x2,16,t1 /* multiply by 0x10001 */
shd x2,0,16,t2
add x2,t2,x2
MILLIRET
addc x1,t1,x1
GSYM($$divI_10)
.export $$divI_10,millicode
comb,< x2,0,LREF(neg10)
copy 0,x1
extru x2,30,31,x2 /* divide by 2 */
addib,TR 1,x2,LREF(pos) /* add 1 (cannot overflow) */
sh1add x2,x2,x2 /* multiply by 3 to get started */
LSYM(neg10)
subi 2,x2,x2 /* negate, divide by 2, and add 1 */
/* negation and adding 1 are done */
/* at the same time by the SUBI */
extru x2,30,31,x2
sh1add x2,x2,x2 /* multiply by 3 to get started */
LSYM(neg)
shd x1,x2,28,t1 /* multiply by 0x11 */
shd x2,0,28,t2
add x2,t2,x2
addc x1,t1,x1
LSYM(neg_for_17)
shd x1,x2,24,t1 /* multiply by 0x101 */
shd x2,0,24,t2
add x2,t2,x2
addc x1,t1,x1
shd x1,x2,16,t1 /* multiply by 0x10001 */
shd x2,0,16,t2
add x2,t2,x2
addc x1,t1,x1
MILLIRET
sub 0,x1,x1
/* DIVISION BY 12 (shift to divide by 4 then divide by 3) */
GSYM($$divI_12)
.export $$divI_12,millicode
comb,< x2,0,LREF(neg12)
copy 0,x1
extru x2,29,30,x2 /* divide by 4 */
addib,tr 1,x2,LREF(pos) /* compute 5*(x2+1) = 5*x2+5 */
sh2add x2,x2,x2 /* multiply by 5 to get started */
LSYM(neg12)
subi 4,x2,x2 /* negate, divide by 4, and add 1 */
/* negation and adding 1 are done */
/* at the same time by the SUBI */
extru x2,29,30,x2
b LREF(neg)
sh2add x2,x2,x2 /* multiply by 5 to get started */
GSYM($$divU_12)
.export $$divU_12,millicode
extru x2,29,30,x2 /* divide by 4 */
addi 5,x2,t1 /* cannot carry */
sh2add x2,t1,x2 /* multiply by 5 to get started */
b LREF(pos)
addc 0,0,x1
/* DIVISION BY 15 (use z = 2**32; a = 11111111) */
GSYM($$divI_15)
.export $$divI_15,millicode
comb,< x2,0,LREF(neg15)
copy 0,x1
addib,tr 1,x2,LREF(pos)+4
shd x1,x2,28,t1
LSYM(neg15)
b LREF(neg)
subi 1,x2,x2
GSYM($$divU_15)
.export $$divU_15,millicode
addi 1,x2,x2 /* this CAN overflow */
b LREF(pos)
addc 0,0,x1
/* DIVISION BY 17 (use z = 2**32; a = f0f0f0f) */
GSYM($$divI_17)
.export $$divI_17,millicode
comb,<,n x2,0,LREF(neg17)
addi 1,x2,x2 /* this cannot overflow */
shd 0,x2,28,t1 /* multiply by 0xf to get started */
shd x2,0,28,t2
sub t2,x2,x2
b LREF(pos_for_17)
subb t1,0,x1
LSYM(neg17)
subi 1,x2,x2 /* this cannot overflow */
shd 0,x2,28,t1 /* multiply by 0xf to get started */
shd x2,0,28,t2
sub t2,x2,x2
b LREF(neg_for_17)
subb t1,0,x1
GSYM($$divU_17)
.export $$divU_17,millicode
addi 1,x2,x2 /* this CAN overflow */
addc 0,0,x1
shd x1,x2,28,t1 /* multiply by 0xf to get started */
LSYM(u17)
shd x2,0,28,t2
sub t2,x2,x2
b LREF(pos_for_17)
subb t1,x1,x1
/* DIVISION BY DIVISORS OF FFFFFF, and powers of 2 times these
includes 7,9 and also 14
z = 2**24-1
r = z mod x = 0
so choose b = 0
Also, in order to divide by z = 2**24-1, we approximate by dividing
by (z+1) = 2**24 (which is easy), and then correcting.
(ax) = (z+1)q' + r
. = zq' + (q'+r)
So to compute (ax)/z, compute q' = (ax)/(z+1) and r = (ax) mod (z+1)
Then the true remainder of (ax)/z is (q'+r). Repeat the process
with this new remainder, adding the tentative quotients together,
until a tentative quotient is 0 (and then we are done). There is
one last correction to be done. It is possible that (q'+r) = z.
If so, then (q'+r)/(z+1) = 0 and it looks like we are done. But,
in fact, we need to add 1 more to the quotient. Now, it turns
out that this happens if and only if the original value x is
an exact multiple of y. So, to avoid a three instruction test at
the end, instead use 1 instruction to add 1 to x at the beginning. */
/* DIVISION BY 7 (use z = 2**24-1; a = 249249) */
GSYM($$divI_7)
.export $$divI_7,millicode
comb,<,n x2,0,LREF(neg7)
LSYM(7)
addi 1,x2,x2 /* cannot overflow */
shd 0,x2,29,x1
sh3add x2,x2,x2
addc x1,0,x1
LSYM(pos7)
shd x1,x2,26,t1
shd x2,0,26,t2
add x2,t2,x2
addc x1,t1,x1
shd x1,x2,20,t1
shd x2,0,20,t2
add x2,t2,x2
addc x1,t1,t1
/* computed <t1,x2>. Now divide it by (2**24 - 1) */
copy 0,x1
shd,= t1,x2,24,t1 /* tentative quotient */
LSYM(1)
addb,tr t1,x1,LREF(2) /* add to previous quotient */
extru x2,31,24,x2 /* new remainder (unadjusted) */
MILLIRETN
LSYM(2)
addb,tr t1,x2,LREF(1) /* adjust remainder */
extru,= x2,7,8,t1 /* new quotient */
LSYM(neg7)
subi 1,x2,x2 /* negate x2 and add 1 */
LSYM(8)
shd 0,x2,29,x1
sh3add x2,x2,x2
addc x1,0,x1
LSYM(neg7_shift)
shd x1,x2,26,t1
shd x2,0,26,t2
add x2,t2,x2
addc x1,t1,x1
shd x1,x2,20,t1
shd x2,0,20,t2
add x2,t2,x2
addc x1,t1,t1
/* computed <t1,x2>. Now divide it by (2**24 - 1) */
copy 0,x1
shd,= t1,x2,24,t1 /* tentative quotient */
LSYM(3)
addb,tr t1,x1,LREF(4) /* add to previous quotient */
extru x2,31,24,x2 /* new remainder (unadjusted) */
MILLIRET
sub 0,x1,x1 /* negate result */
LSYM(4)
addb,tr t1,x2,LREF(3) /* adjust remainder */
extru,= x2,7,8,t1 /* new quotient */
GSYM($$divU_7)
.export $$divU_7,millicode
addi 1,x2,x2 /* can carry */
addc 0,0,x1
shd x1,x2,29,t1
sh3add x2,x2,x2
b LREF(pos7)
addc t1,x1,x1
/* DIVISION BY 9 (use z = 2**24-1; a = 1c71c7) */
GSYM($$divI_9)
.export $$divI_9,millicode
comb,<,n x2,0,LREF(neg9)
addi 1,x2,x2 /* cannot overflow */
shd 0,x2,29,t1
shd x2,0,29,t2
sub t2,x2,x2
b LREF(pos7)
subb t1,0,x1
LSYM(neg9)
subi 1,x2,x2 /* negate and add 1 */
shd 0,x2,29,t1
shd x2,0,29,t2
sub t2,x2,x2
b LREF(neg7_shift)
subb t1,0,x1
GSYM($$divU_9)
.export $$divU_9,millicode
addi 1,x2,x2 /* can carry */
addc 0,0,x1
shd x1,x2,29,t1
shd x2,0,29,t2
sub t2,x2,x2
b LREF(pos7)
subb t1,x1,x1
/* DIVISION BY 14 (shift to divide by 2 then divide by 7) */
GSYM($$divI_14)
.export $$divI_14,millicode
comb,<,n x2,0,LREF(neg14)
GSYM($$divU_14)
.export $$divU_14,millicode
b LREF(7) /* go to 7 case */
extru x2,30,31,x2 /* divide by 2 */
LSYM(neg14)
subi 2,x2,x2 /* negate (and add 2) */
b LREF(8)
extru x2,30,31,x2 /* divide by 2 */
.exit
.procend
.end
#endif
#ifdef L_mulI
/* VERSION "@(#)$$mulI $ Revision: 12.4 $ $ Date: 94/03/17 17:18:51 $" */
/******************************************************************************
This routine is used on PA2.0 processors when gcc -mno-fpregs is used
ROUTINE: $$mulI
DESCRIPTION:
$$mulI multiplies two single word integers, giving a single
word result.
INPUT REGISTERS:
arg0 = Operand 1
arg1 = Operand 2
r31 == return pc
sr0 == return space when called externally
OUTPUT REGISTERS:
arg0 = undefined
arg1 = undefined
ret1 = result
OTHER REGISTERS AFFECTED:
r1 = undefined
SIDE EFFECTS:
Causes a trap under the following conditions: NONE
Changes memory at the following places: NONE
PERMISSIBLE CONTEXT:
Unwindable
Does not create a stack frame
Is usable for internal or external microcode
DISCUSSION:
Calls other millicode routines via mrp: NONE
Calls other millicode routines: NONE
***************************************************************************/
#define a0 %arg0
#define a1 %arg1
#define t0 %r1
#define r %ret1
#define a0__128a0 zdep a0,24,25,a0
#define a0__256a0 zdep a0,23,24,a0
#define a1_ne_0_b_l0 comb,<> a1,0,LREF(l0)
#define a1_ne_0_b_l1 comb,<> a1,0,LREF(l1)
#define a1_ne_0_b_l2 comb,<> a1,0,LREF(l2)
#define b_n_ret_t0 b,n LREF(ret_t0)
#define b_e_shift b LREF(e_shift)
#define b_e_t0ma0 b LREF(e_t0ma0)
#define b_e_t0 b LREF(e_t0)
#define b_e_t0a0 b LREF(e_t0a0)
#define b_e_t02a0 b LREF(e_t02a0)
#define b_e_t04a0 b LREF(e_t04a0)
#define b_e_2t0 b LREF(e_2t0)
#define b_e_2t0a0 b LREF(e_2t0a0)
#define b_e_2t04a0 b LREF(e2t04a0)
#define b_e_3t0 b LREF(e_3t0)
#define b_e_4t0 b LREF(e_4t0)
#define b_e_4t0a0 b LREF(e_4t0a0)
#define b_e_4t08a0 b LREF(e4t08a0)
#define b_e_5t0 b LREF(e_5t0)
#define b_e_8t0 b LREF(e_8t0)
#define b_e_8t0a0 b LREF(e_8t0a0)
#define r__r_a0 add r,a0,r
#define r__r_2a0 sh1add a0,r,r
#define r__r_4a0 sh2add a0,r,r
#define r__r_8a0 sh3add a0,r,r
#define r__r_t0 add r,t0,r
#define r__r_2t0 sh1add t0,r,r
#define r__r_4t0 sh2add t0,r,r
#define r__r_8t0 sh3add t0,r,r
#define t0__3a0 sh1add a0,a0,t0
#define t0__4a0 sh2add a0,0,t0
#define t0__5a0 sh2add a0,a0,t0
#define t0__8a0 sh3add a0,0,t0
#define t0__9a0 sh3add a0,a0,t0
#define t0__16a0 zdep a0,27,28,t0
#define t0__32a0 zdep a0,26,27,t0
#define t0__64a0 zdep a0,25,26,t0
#define t0__128a0 zdep a0,24,25,t0
#define t0__t0ma0 sub t0,a0,t0
#define t0__t0_a0 add t0,a0,t0
#define t0__t0_2a0 sh1add a0,t0,t0
#define t0__t0_4a0 sh2add a0,t0,t0
#define t0__t0_8a0 sh3add a0,t0,t0
#define t0__2t0_a0 sh1add t0,a0,t0
#define t0__3t0 sh1add t0,t0,t0
#define t0__4t0 sh2add t0,0,t0
#define t0__4t0_a0 sh2add t0,a0,t0
#define t0__5t0 sh2add t0,t0,t0
#define t0__8t0 sh3add t0,0,t0
#define t0__8t0_a0 sh3add t0,a0,t0
#define t0__9t0 sh3add t0,t0,t0
#define t0__16t0 zdep t0,27,28,t0
#define t0__32t0 zdep t0,26,27,t0
#define t0__256a0 zdep a0,23,24,t0
SUBSPA_MILLI
ATTR_MILLI
.align 16
.proc
.callinfo millicode
.export $$mulI,millicode
GSYM($$mulI)
combt,<<= a1,a0,LREF(l4) /* swap args if unsigned a1>a0 */
copy 0,r /* zero out the result */
xor a0,a1,a0 /* swap a0 & a1 using the */
xor a0,a1,a1 /* old xor trick */
xor a0,a1,a0
LSYM(l4)
combt,<= 0,a0,LREF(l3) /* if a0>=0 then proceed like unsigned */
zdep a1,30,8,t0 /* t0 = (a1&0xff)<<1 ********* */
sub,> 0,a1,t0 /* otherwise negate both and */
combt,<=,n a0,t0,LREF(l2) /* swap back if |a0|<|a1| */
sub 0,a0,a1
movb,tr,n t0,a0,LREF(l2) /* 10th inst. */
LSYM(l0) r__r_t0 /* add in this partial product */
LSYM(l1) a0__256a0 /* a0 <<= 8 ****************** */
LSYM(l2) zdep a1,30,8,t0 /* t0 = (a1&0xff)<<1 ********* */
LSYM(l3) blr t0,0 /* case on these 8 bits ****** */
extru a1,23,24,a1 /* a1 >>= 8 ****************** */
/*16 insts before this. */
/* a0 <<= 8 ************************** */
LSYM(x0) a1_ne_0_b_l2 ! a0__256a0 ! MILLIRETN ! nop
LSYM(x1) a1_ne_0_b_l1 ! r__r_a0 ! MILLIRETN ! nop
LSYM(x2) a1_ne_0_b_l1 ! r__r_2a0 ! MILLIRETN ! nop
LSYM(x3) a1_ne_0_b_l0 ! t0__3a0 ! MILLIRET ! r__r_t0
LSYM(x4) a1_ne_0_b_l1 ! r__r_4a0 ! MILLIRETN ! nop
LSYM(x5) a1_ne_0_b_l0 ! t0__5a0 ! MILLIRET ! r__r_t0
LSYM(x6) t0__3a0 ! a1_ne_0_b_l1 ! r__r_2t0 ! MILLIRETN
LSYM(x7) t0__3a0 ! a1_ne_0_b_l0 ! r__r_4a0 ! b_n_ret_t0
LSYM(x8) a1_ne_0_b_l1 ! r__r_8a0 ! MILLIRETN ! nop
LSYM(x9) a1_ne_0_b_l0 ! t0__9a0 ! MILLIRET ! r__r_t0
LSYM(x10) t0__5a0 ! a1_ne_0_b_l1 ! r__r_2t0 ! MILLIRETN
LSYM(x11) t0__3a0 ! a1_ne_0_b_l0 ! r__r_8a0 ! b_n_ret_t0
LSYM(x12) t0__3a0 ! a1_ne_0_b_l1 ! r__r_4t0 ! MILLIRETN
LSYM(x13) t0__5a0 ! a1_ne_0_b_l0 ! r__r_8a0 ! b_n_ret_t0
LSYM(x14) t0__3a0 ! t0__2t0_a0 ! b_e_shift ! r__r_2t0
LSYM(x15) t0__5a0 ! a1_ne_0_b_l0 ! t0__3t0 ! b_n_ret_t0
LSYM(x16) t0__16a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
LSYM(x17) t0__9a0 ! a1_ne_0_b_l0 ! t0__t0_8a0 ! b_n_ret_t0
LSYM(x18) t0__9a0 ! a1_ne_0_b_l1 ! r__r_2t0 ! MILLIRETN
LSYM(x19) t0__9a0 ! a1_ne_0_b_l0 ! t0__2t0_a0 ! b_n_ret_t0
LSYM(x20) t0__5a0 ! a1_ne_0_b_l1 ! r__r_4t0 ! MILLIRETN
LSYM(x21) t0__5a0 ! a1_ne_0_b_l0 ! t0__4t0_a0 ! b_n_ret_t0
LSYM(x22) t0__5a0 ! t0__2t0_a0 ! b_e_shift ! r__r_2t0
LSYM(x23) t0__5a0 ! t0__2t0_a0 ! b_e_t0 ! t0__2t0_a0
LSYM(x24) t0__3a0 ! a1_ne_0_b_l1 ! r__r_8t0 ! MILLIRETN
LSYM(x25) t0__5a0 ! a1_ne_0_b_l0 ! t0__5t0 ! b_n_ret_t0
LSYM(x26) t0__3a0 ! t0__4t0_a0 ! b_e_shift ! r__r_2t0
LSYM(x27) t0__3a0 ! a1_ne_0_b_l0 ! t0__9t0 ! b_n_ret_t0
LSYM(x28) t0__3a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
LSYM(x29) t0__3a0 ! t0__2t0_a0 ! b_e_t0 ! t0__4t0_a0
LSYM(x30) t0__5a0 ! t0__3t0 ! b_e_shift ! r__r_2t0
LSYM(x31) t0__32a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
LSYM(x32) t0__32a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
LSYM(x33) t0__8a0 ! a1_ne_0_b_l0 ! t0__4t0_a0 ! b_n_ret_t0
LSYM(x34) t0__16a0 ! t0__t0_a0 ! b_e_shift ! r__r_2t0
LSYM(x35) t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__t0_8a0
LSYM(x36) t0__9a0 ! a1_ne_0_b_l1 ! r__r_4t0 ! MILLIRETN
LSYM(x37) t0__9a0 ! a1_ne_0_b_l0 ! t0__4t0_a0 ! b_n_ret_t0
LSYM(x38) t0__9a0 ! t0__2t0_a0 ! b_e_shift ! r__r_2t0
LSYM(x39) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__2t0_a0
LSYM(x40) t0__5a0 ! a1_ne_0_b_l1 ! r__r_8t0 ! MILLIRETN
LSYM(x41) t0__5a0 ! a1_ne_0_b_l0 ! t0__8t0_a0 ! b_n_ret_t0
LSYM(x42) t0__5a0 ! t0__4t0_a0 ! b_e_shift ! r__r_2t0
LSYM(x43) t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__2t0_a0
LSYM(x44) t0__5a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
LSYM(x45) t0__9a0 ! a1_ne_0_b_l0 ! t0__5t0 ! b_n_ret_t0
LSYM(x46) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__t0_a0
LSYM(x47) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__t0_2a0
LSYM(x48) t0__3a0 ! a1_ne_0_b_l0 ! t0__16t0 ! b_n_ret_t0
LSYM(x49) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__t0_4a0
LSYM(x50) t0__5a0 ! t0__5t0 ! b_e_shift ! r__r_2t0
LSYM(x51) t0__9a0 ! t0__t0_8a0 ! b_e_t0 ! t0__3t0
LSYM(x52) t0__3a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
LSYM(x53) t0__3a0 ! t0__4t0_a0 ! b_e_t0 ! t0__4t0_a0
LSYM(x54) t0__9a0 ! t0__3t0 ! b_e_shift ! r__r_2t0
LSYM(x55) t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__2t0_a0
LSYM(x56) t0__3a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
LSYM(x57) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__3t0
LSYM(x58) t0__3a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__4t0_a0
LSYM(x59) t0__9a0 ! t0__2t0_a0 ! b_e_t02a0 ! t0__3t0
LSYM(x60) t0__5a0 ! t0__3t0 ! b_e_shift ! r__r_4t0
LSYM(x61) t0__5a0 ! t0__3t0 ! b_e_t0 ! t0__4t0_a0
LSYM(x62) t0__32a0 ! t0__t0ma0 ! b_e_shift ! r__r_2t0
LSYM(x63) t0__64a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
LSYM(x64) t0__64a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
LSYM(x65) t0__8a0 ! a1_ne_0_b_l0 ! t0__8t0_a0 ! b_n_ret_t0
LSYM(x66) t0__32a0 ! t0__t0_a0 ! b_e_shift ! r__r_2t0
LSYM(x67) t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__2t0_a0
LSYM(x68) t0__8a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
LSYM(x69) t0__8a0 ! t0__2t0_a0 ! b_e_t0 ! t0__4t0_a0
LSYM(x70) t0__64a0 ! t0__t0_4a0 ! b_e_t0 ! t0__t0_2a0
LSYM(x71) t0__9a0 ! t0__8t0 ! b_e_t0 ! t0__t0ma0
LSYM(x72) t0__9a0 ! a1_ne_0_b_l1 ! r__r_8t0 ! MILLIRETN
LSYM(x73) t0__9a0 ! t0__8t0_a0 ! b_e_shift ! r__r_t0
LSYM(x74) t0__9a0 ! t0__4t0_a0 ! b_e_shift ! r__r_2t0
LSYM(x75) t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__2t0_a0
LSYM(x76) t0__9a0 ! t0__2t0_a0 ! b_e_shift ! r__r_4t0
LSYM(x77) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__4t0_a0
LSYM(x78) t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__2t0_a0
LSYM(x79) t0__16a0 ! t0__5t0 ! b_e_t0 ! t0__t0ma0
LSYM(x80) t0__16a0 ! t0__5t0 ! b_e_shift ! r__r_t0
LSYM(x81) t0__9a0 ! t0__9t0 ! b_e_shift ! r__r_t0
LSYM(x82) t0__5a0 ! t0__8t0_a0 ! b_e_shift ! r__r_2t0
LSYM(x83) t0__5a0 ! t0__8t0_a0 ! b_e_t0 ! t0__2t0_a0
LSYM(x84) t0__5a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
LSYM(x85) t0__8a0 ! t0__2t0_a0 ! b_e_t0 ! t0__5t0
LSYM(x86) t0__5a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__2t0_a0
LSYM(x87) t0__9a0 ! t0__9t0 ! b_e_t02a0 ! t0__t0_4a0
LSYM(x88) t0__5a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
LSYM(x89) t0__5a0 ! t0__2t0_a0 ! b_e_t0 ! t0__8t0_a0
LSYM(x90) t0__9a0 ! t0__5t0 ! b_e_shift ! r__r_2t0
LSYM(x91) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__2t0_a0
LSYM(x92) t0__5a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__2t0_a0
LSYM(x93) t0__32a0 ! t0__t0ma0 ! b_e_t0 ! t0__3t0
LSYM(x94) t0__9a0 ! t0__5t0 ! b_e_2t0 ! t0__t0_2a0
LSYM(x95) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__5t0
LSYM(x96) t0__8a0 ! t0__3t0 ! b_e_shift ! r__r_4t0
LSYM(x97) t0__8a0 ! t0__3t0 ! b_e_t0 ! t0__4t0_a0
LSYM(x98) t0__32a0 ! t0__3t0 ! b_e_t0 ! t0__t0_2a0
LSYM(x99) t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__3t0
LSYM(x100) t0__5a0 ! t0__5t0 ! b_e_shift ! r__r_4t0
LSYM(x101) t0__5a0 ! t0__5t0 ! b_e_t0 ! t0__4t0_a0
LSYM(x102) t0__32a0 ! t0__t0_2a0 ! b_e_t0 ! t0__3t0
LSYM(x103) t0__5a0 ! t0__5t0 ! b_e_t02a0 ! t0__4t0_a0
LSYM(x104) t0__3a0 ! t0__4t0_a0 ! b_e_shift ! r__r_8t0
LSYM(x105) t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__5t0
LSYM(x106) t0__3a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__4t0_a0
LSYM(x107) t0__9a0 ! t0__t0_4a0 ! b_e_t02a0 ! t0__8t0_a0
LSYM(x108) t0__9a0 ! t0__3t0 ! b_e_shift ! r__r_4t0
LSYM(x109) t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__4t0_a0
LSYM(x110) t0__9a0 ! t0__3t0 ! b_e_2t0 ! t0__2t0_a0
LSYM(x111) t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__3t0
LSYM(x112) t0__3a0 ! t0__2t0_a0 ! b_e_t0 ! t0__16t0
LSYM(x113) t0__9a0 ! t0__4t0_a0 ! b_e_t02a0 ! t0__3t0
LSYM(x114) t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__3t0
LSYM(x115) t0__9a0 ! t0__2t0_a0 ! b_e_2t0a0 ! t0__3t0
LSYM(x116) t0__3a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__4t0_a0
LSYM(x117) t0__3a0 ! t0__4t0_a0 ! b_e_t0 ! t0__9t0
LSYM(x118) t0__3a0 ! t0__4t0_a0 ! b_e_t0a0 ! t0__9t0
LSYM(x119) t0__3a0 ! t0__4t0_a0 ! b_e_t02a0 ! t0__9t0
LSYM(x120) t0__5a0 ! t0__3t0 ! b_e_shift ! r__r_8t0
LSYM(x121) t0__5a0 ! t0__3t0 ! b_e_t0 ! t0__8t0_a0
LSYM(x122) t0__5a0 ! t0__3t0 ! b_e_2t0 ! t0__4t0_a0
LSYM(x123) t0__5a0 ! t0__8t0_a0 ! b_e_t0 ! t0__3t0
LSYM(x124) t0__32a0 ! t0__t0ma0 ! b_e_shift ! r__r_4t0
LSYM(x125) t0__5a0 ! t0__5t0 ! b_e_t0 ! t0__5t0
LSYM(x126) t0__64a0 ! t0__t0ma0 ! b_e_shift ! r__r_2t0
LSYM(x127) t0__128a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
LSYM(x128) t0__128a0 ! a1_ne_0_b_l1 ! r__r_t0 ! MILLIRETN
LSYM(x129) t0__128a0 ! a1_ne_0_b_l0 ! t0__t0_a0 ! b_n_ret_t0
LSYM(x130) t0__64a0 ! t0__t0_a0 ! b_e_shift ! r__r_2t0
LSYM(x131) t0__8a0 ! t0__8t0_a0 ! b_e_t0 ! t0__2t0_a0
LSYM(x132) t0__8a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
LSYM(x133) t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__4t0_a0
LSYM(x134) t0__8a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__2t0_a0
LSYM(x135) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__3t0
LSYM(x136) t0__8a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
LSYM(x137) t0__8a0 ! t0__2t0_a0 ! b_e_t0 ! t0__8t0_a0
LSYM(x138) t0__8a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__4t0_a0
LSYM(x139) t0__8a0 ! t0__2t0_a0 ! b_e_2t0a0 ! t0__4t0_a0
LSYM(x140) t0__3a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__5t0
LSYM(x141) t0__8a0 ! t0__2t0_a0 ! b_e_4t0a0 ! t0__2t0_a0
LSYM(x142) t0__9a0 ! t0__8t0 ! b_e_2t0 ! t0__t0ma0
LSYM(x143) t0__16a0 ! t0__9t0 ! b_e_t0 ! t0__t0ma0
LSYM(x144) t0__9a0 ! t0__8t0 ! b_e_shift ! r__r_2t0
LSYM(x145) t0__9a0 ! t0__8t0 ! b_e_t0 ! t0__2t0_a0
LSYM(x146) t0__9a0 ! t0__8t0_a0 ! b_e_shift ! r__r_2t0
LSYM(x147) t0__9a0 ! t0__8t0_a0 ! b_e_t0 ! t0__2t0_a0
LSYM(x148) t0__9a0 ! t0__4t0_a0 ! b_e_shift ! r__r_4t0
LSYM(x149) t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__4t0_a0
LSYM(x150) t0__9a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__2t0_a0
LSYM(x151) t0__9a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__2t0_a0
LSYM(x152) t0__9a0 ! t0__2t0_a0 ! b_e_shift ! r__r_8t0
LSYM(x153) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__8t0_a0
LSYM(x154) t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__4t0_a0
LSYM(x155) t0__32a0 ! t0__t0ma0 ! b_e_t0 ! t0__5t0
LSYM(x156) t0__9a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__2t0_a0
LSYM(x157) t0__32a0 ! t0__t0ma0 ! b_e_t02a0 ! t0__5t0
LSYM(x158) t0__16a0 ! t0__5t0 ! b_e_2t0 ! t0__t0ma0
LSYM(x159) t0__32a0 ! t0__5t0 ! b_e_t0 ! t0__t0ma0
LSYM(x160) t0__5a0 ! t0__4t0 ! b_e_shift ! r__r_8t0
LSYM(x161) t0__8a0 ! t0__5t0 ! b_e_t0 ! t0__4t0_a0
LSYM(x162) t0__9a0 ! t0__9t0 ! b_e_shift ! r__r_2t0
LSYM(x163) t0__9a0 ! t0__9t0 ! b_e_t0 ! t0__2t0_a0
LSYM(x164) t0__5a0 ! t0__8t0_a0 ! b_e_shift ! r__r_4t0
LSYM(x165) t0__8a0 ! t0__4t0_a0 ! b_e_t0 ! t0__5t0
LSYM(x166) t0__5a0 ! t0__8t0_a0 ! b_e_2t0 ! t0__2t0_a0
LSYM(x167) t0__5a0 ! t0__8t0_a0 ! b_e_2t0a0 ! t0__2t0_a0
LSYM(x168) t0__5a0 ! t0__4t0_a0 ! b_e_shift ! r__r_8t0
LSYM(x169) t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__8t0_a0
LSYM(x170) t0__32a0 ! t0__t0_2a0 ! b_e_t0 ! t0__5t0
LSYM(x171) t0__9a0 ! t0__2t0_a0 ! b_e_t0 ! t0__9t0
LSYM(x172) t0__5a0 ! t0__4t0_a0 ! b_e_4t0 ! t0__2t0_a0
LSYM(x173) t0__9a0 ! t0__2t0_a0 ! b_e_t02a0 ! t0__9t0
LSYM(x174) t0__32a0 ! t0__t0_2a0 ! b_e_t04a0 ! t0__5t0
LSYM(x175) t0__8a0 ! t0__2t0_a0 ! b_e_5t0 ! t0__2t0_a0
LSYM(x176) t0__5a0 ! t0__4t0_a0 ! b_e_8t0 ! t0__t0_a0
LSYM(x177) t0__5a0 ! t0__4t0_a0 ! b_e_8t0a0 ! t0__t0_a0
LSYM(x178) t0__5a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__8t0_a0
LSYM(x179) t0__5a0 ! t0__2t0_a0 ! b_e_2t0a0 ! t0__8t0_a0
LSYM(x180) t0__9a0 ! t0__5t0 ! b_e_shift ! r__r_4t0
LSYM(x181) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__4t0_a0
LSYM(x182) t0__9a0 ! t0__5t0 ! b_e_2t0 ! t0__2t0_a0
LSYM(x183) t0__9a0 ! t0__5t0 ! b_e_2t0a0 ! t0__2t0_a0
LSYM(x184) t0__5a0 ! t0__9t0 ! b_e_4t0 ! t0__t0_a0
LSYM(x185) t0__9a0 ! t0__4t0_a0 ! b_e_t0 ! t0__5t0
LSYM(x186) t0__32a0 ! t0__t0ma0 ! b_e_2t0 ! t0__3t0
LSYM(x187) t0__9a0 ! t0__4t0_a0 ! b_e_t02a0 ! t0__5t0
LSYM(x188) t0__9a0 ! t0__5t0 ! b_e_4t0 ! t0__t0_2a0
LSYM(x189) t0__5a0 ! t0__4t0_a0 ! b_e_t0 ! t0__9t0
LSYM(x190) t0__9a0 ! t0__2t0_a0 ! b_e_2t0 ! t0__5t0
LSYM(x191) t0__64a0 ! t0__3t0 ! b_e_t0 ! t0__t0ma0
LSYM(x192) t0__8a0 ! t0__3t0 ! b_e_shift ! r__r_8t0
LSYM(x193) t0__8a0 ! t0__3t0 ! b_e_t0 ! t0__8t0_a0
LSYM(x194) t0__8a0 ! t0__3t0 ! b_e_2t0 ! t0__4t0_a0
LSYM(x195) t0__8a0 ! t0__8t0_a0 ! b_e_t0 ! t0__3t0
LSYM(x196) t0__8a0 ! t0__3t0 ! b_e_4t0 ! t0__2t0_a0
LSYM(x197) t0__8a0 ! t0__3t0 ! b_e_4t0a0 ! t0__2t0_a0
LSYM(x198) t0__64a0 ! t0__t0_2a0 ! b_e_t0 ! t0__3t0
LSYM(x199) t0__8a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__3t0
LSYM(x200) t0__5a0 ! t0__5t0 ! b_e_shift ! r__r_8t0
LSYM(x201) t0__5a0 ! t0__5t0 ! b_e_t0 ! t0__8t0_a0
LSYM(x202) t0__5a0 ! t0__5t0 ! b_e_2t0 ! t0__4t0_a0
LSYM(x203) t0__5a0 ! t0__5t0 ! b_e_2t0a0 ! t0__4t0_a0
LSYM(x204) t0__8a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__3t0
LSYM(x205) t0__5a0 ! t0__8t0_a0 ! b_e_t0 ! t0__5t0
LSYM(x206) t0__64a0 ! t0__t0_4a0 ! b_e_t02a0 ! t0__3t0
LSYM(x207) t0__8a0 ! t0__2t0_a0 ! b_e_3t0 ! t0__4t0_a0
LSYM(x208) t0__5a0 ! t0__5t0 ! b_e_8t0 ! t0__t0_a0
LSYM(x209) t0__5a0 ! t0__5t0 ! b_e_8t0a0 ! t0__t0_a0
LSYM(x210) t0__5a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__5t0
LSYM(x211) t0__5a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__5t0
LSYM(x212) t0__3a0 ! t0__4t0_a0 ! b_e_4t0 ! t0__4t0_a0
LSYM(x213) t0__3a0 ! t0__4t0_a0 ! b_e_4t0a0 ! t0__4t0_a0
LSYM(x214) t0__9a0 ! t0__t0_4a0 ! b_e_2t04a0 ! t0__8t0_a0
LSYM(x215) t0__5a0 ! t0__4t0_a0 ! b_e_5t0 ! t0__2t0_a0
LSYM(x216) t0__9a0 ! t0__3t0 ! b_e_shift ! r__r_8t0
LSYM(x217) t0__9a0 ! t0__3t0 ! b_e_t0 ! t0__8t0_a0
LSYM(x218) t0__9a0 ! t0__3t0 ! b_e_2t0 ! t0__4t0_a0
LSYM(x219) t0__9a0 ! t0__8t0_a0 ! b_e_t0 ! t0__3t0
LSYM(x220) t0__3a0 ! t0__9t0 ! b_e_4t0 ! t0__2t0_a0
LSYM(x221) t0__3a0 ! t0__9t0 ! b_e_4t0a0 ! t0__2t0_a0
LSYM(x222) t0__9a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__3t0
LSYM(x223) t0__9a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__3t0
LSYM(x224) t0__9a0 ! t0__3t0 ! b_e_8t0 ! t0__t0_a0
LSYM(x225) t0__9a0 ! t0__5t0 ! b_e_t0 ! t0__5t0
LSYM(x226) t0__3a0 ! t0__2t0_a0 ! b_e_t02a0 ! t0__32t0
LSYM(x227) t0__9a0 ! t0__5t0 ! b_e_t02a0 ! t0__5t0
LSYM(x228) t0__9a0 ! t0__2t0_a0 ! b_e_4t0 ! t0__3t0
LSYM(x229) t0__9a0 ! t0__2t0_a0 ! b_e_4t0a0 ! t0__3t0
LSYM(x230) t0__9a0 ! t0__5t0 ! b_e_5t0 ! t0__t0_a0
LSYM(x231) t0__9a0 ! t0__2t0_a0 ! b_e_3t0 ! t0__4t0_a0
LSYM(x232) t0__3a0 ! t0__2t0_a0 ! b_e_8t0 ! t0__4t0_a0
LSYM(x233) t0__3a0 ! t0__2t0_a0 ! b_e_8t0a0 ! t0__4t0_a0
LSYM(x234) t0__3a0 ! t0__4t0_a0 ! b_e_2t0 ! t0__9t0
LSYM(x235) t0__3a0 ! t0__4t0_a0 ! b_e_2t0a0 ! t0__9t0
LSYM(x236) t0__9a0 ! t0__2t0_a0 ! b_e_4t08a0 ! t0__3t0
LSYM(x237) t0__16a0 ! t0__5t0 ! b_e_3t0 ! t0__t0ma0
LSYM(x238) t0__3a0 ! t0__4t0_a0 ! b_e_2t04a0 ! t0__9t0
LSYM(x239) t0__16a0 ! t0__5t0 ! b_e_t0ma0 ! t0__3t0
LSYM(x240) t0__9a0 ! t0__t0_a0 ! b_e_8t0 ! t0__3t0
LSYM(x241) t0__9a0 ! t0__t0_a0 ! b_e_8t0a0 ! t0__3t0
LSYM(x242) t0__5a0 ! t0__3t0 ! b_e_2t0 ! t0__8t0_a0
LSYM(x243) t0__9a0 ! t0__9t0 ! b_e_t0 ! t0__3t0
LSYM(x244) t0__5a0 ! t0__3t0 ! b_e_4t0 ! t0__4t0_a0
LSYM(x245) t0__8a0 ! t0__3t0 ! b_e_5t0 ! t0__2t0_a0
LSYM(x246) t0__5a0 ! t0__8t0_a0 ! b_e_2t0 ! t0__3t0
LSYM(x247) t0__5a0 ! t0__8t0_a0 ! b_e_2t0a0 ! t0__3t0
LSYM(x248) t0__32a0 ! t0__t0ma0 ! b_e_shift ! r__r_8t0
LSYM(x249) t0__32a0 ! t0__t0ma0 ! b_e_t0 ! t0__8t0_a0
LSYM(x250) t0__5a0 ! t0__5t0 ! b_e_2t0 ! t0__5t0
LSYM(x251) t0__5a0 ! t0__5t0 ! b_e_2t0a0 ! t0__5t0
LSYM(x252) t0__64a0 ! t0__t0ma0 ! b_e_shift ! r__r_4t0
LSYM(x253) t0__64a0 ! t0__t0ma0 ! b_e_t0 ! t0__4t0_a0
LSYM(x254) t0__128a0 ! t0__t0ma0 ! b_e_shift ! r__r_2t0
LSYM(x255) t0__256a0 ! a1_ne_0_b_l0 ! t0__t0ma0 ! b_n_ret_t0
/*1040 insts before this. */
LSYM(ret_t0) MILLIRET
LSYM(e_t0) r__r_t0
LSYM(e_shift) a1_ne_0_b_l2
a0__256a0 /* a0 <<= 8 *********** */
MILLIRETN
LSYM(e_t0ma0) a1_ne_0_b_l0
t0__t0ma0
MILLIRET
r__r_t0
LSYM(e_t0a0) a1_ne_0_b_l0
t0__t0_a0
MILLIRET
r__r_t0
LSYM(e_t02a0) a1_ne_0_b_l0
t0__t0_2a0
MILLIRET
r__r_t0
LSYM(e_t04a0) a1_ne_0_b_l0
t0__t0_4a0
MILLIRET
r__r_t0
LSYM(e_2t0) a1_ne_0_b_l1
r__r_2t0
MILLIRETN
LSYM(e_2t0a0) a1_ne_0_b_l0
t0__2t0_a0
MILLIRET
r__r_t0
LSYM(e2t04a0) t0__t0_2a0
a1_ne_0_b_l1
r__r_2t0
MILLIRETN
LSYM(e_3t0) a1_ne_0_b_l0
t0__3t0
MILLIRET
r__r_t0
LSYM(e_4t0) a1_ne_0_b_l1
r__r_4t0
MILLIRETN
LSYM(e_4t0a0) a1_ne_0_b_l0
t0__4t0_a0
MILLIRET
r__r_t0
LSYM(e4t08a0) t0__t0_2a0
a1_ne_0_b_l1
r__r_4t0
MILLIRETN
LSYM(e_5t0) a1_ne_0_b_l0
t0__5t0
MILLIRET
r__r_t0
LSYM(e_8t0) a1_ne_0_b_l1
r__r_8t0
MILLIRETN
LSYM(e_8t0a0) a1_ne_0_b_l0
t0__8t0_a0
MILLIRET
r__r_t0
.procend
.end
#endif
|
4ms/metamodule-plugin-sdk
| 1,746
|
plugin-libc/libgcc/config/csky/crtn.S
|
# Terminate C-SKY .init and .fini sections.
# Copyright (C) 2018-2022 Free Software Foundation, Inc.
# Contributed by C-SKY Microsystems and Mentor Graphics.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just makes sure that the .fini and .init sections do in
# fact return. Users may put any desired instructions in those sections.
# This file is the last thing linked into any executable.
.file "crtn.S"
# Is this the GLIBC version?
#if defined(__gnu_linux__)
.section .init,"ax",@progbits
ldw lr, (sp, 0)
addi sp, 8
rts
.section .fini,"ax",@progbits
ldw lr, (sp, 0)
addi sp, 8
rts
#else /* !defined(__gnu_linux__) */
.section ".init"
ldw lr, (sp, 12)
addi sp, 16
jmp lr
.section ".fini"
ldw lr, (sp, 12)
addi sp, 16
jmp lr
# Th-th-th-that is all folks!
#endif /* defined(__gnu_linux__) */
|
4ms/metamodule-plugin-sdk
| 3,106
|
plugin-libc/libgcc/config/csky/crti.S
|
# Define _init and _fini entry points for C-SKY.
# Copyright (C) 2018-2022 Free Software Foundation, Inc.
# Contributed by C-SKY Microsystems and Mentor Graphics.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just makes a stack frame for the contents of the .fini and
# .init sections. Users may put any desired instructions in those
# sections.
.file "crti.S"
/* We use more complicated versions of this code with GLIBC. */
#if defined(__gnu_linux__)
#ifndef PREINIT_FUNCTION
# define PREINIT_FUNCTION __gmon_start__
#endif
#ifndef PREINIT_FUNCTION_WEAK
# define PREINIT_FUNCTION_WEAK 1
#endif
#if PREINIT_FUNCTION_WEAK
.global PREINIT_FUNCTION
.weak PREINIT_FUNCTION
.align 4
.type call_weak_fn, %function
call_weak_fn:
// push lr
subi sp, 4
stw lr, (sp)
#ifdef __PIC__
lrw a2, PREINIT_FUNCTION@GOT
addu a2, gb
ldw a2, (a2)
#else
lrw a2, PREINIT_FUNCTION
#endif
cmpnei a2, 0
bf 1f
jsr a2
1:
// pop lr
ldw lr, (sp)
addi sp, 4
rts
.align 4
#else
.hidden PREINIT_FUNCTION
#endif /* PREINIT_FUNCTION_WEAK */
.section .init,"ax",@progbits
.align 4
.globl _init
.type _init, @function
_init:
subi sp, 8
stw lr, (sp, 0)
#ifdef __PIC__
// stw gb, (sp, 4)
bsr .Lgetpc
.Lgetpc:
lrw gb, .Lgetpc@GOTPC
add gb, lr
#endif
#if PREINIT_FUNCTION_WEAK
#ifdef __PIC__
lrw a2, call_weak_fn@GOTOFF
add a2, gb
jsr a2
#else
jsri call_weak_fn
#endif
#else /* !PREINIT_FUNCTION_WEAK */
#ifdef __PIC__
lrw a2, PREINIT_FUNCTION@PLT
addu a2, gb
ldw a2, (a2)
jsr a2
#else
jsri PREINIT_FUNCTION
#endif
#endif /* PREINIT_FUNCTION_WEAK */
br 2f
.literals
.align 4
2:
.section .fini,"ax",@progbits
.align 4
.globl _fini
.type _fini, @function
_fini:
subi sp,8
stw lr, (sp, 0)
br 2f
.literals
.align 4
2:
/* These are the non-GLIBC versions. */
#else /* !defined(__gnu_linux__) */
.section ".init"
.global _init
.type _init,@function
.align 2
_init:
subi sp, 16
st.w lr, (sp, 12)
mov r0, r0
.section ".fini"
.global _fini
.type _fini,@function
.align 2
_fini:
subi sp, 16
st.w lr, (sp, 12)
mov r0, r0
#endif /* defined(__gnu_linux__) */
|
4ms/metamodule-plugin-sdk
| 12,754
|
plugin-libc/libgcc/config/csky/lib1funcs.S
|
/* libgcc routines for C-SKY.
Copyright (C) 2018-2022 Free Software Foundation, Inc.
Contributed by C-SKY Microsystems and Mentor Graphics.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Use the right prefix for global labels. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
#define SYM(x) CONCAT1 (__, x)
#ifndef __CSKYBE__
#define xl r0
#define xh r1
#define yl r2
#define yh r3
#else
#define xh r0
#define xl r1
#define yh r2
#define yl r3
#endif
#ifdef __ELF__
#define TYPE(x) .type SYM (x),@function
#define SIZE(x) .size SYM (x), . - SYM (x)
#else
#define TYPE(x)
#define SIZE(x)
#endif
.macro FUNC_START name
.text
.align 2
.globl SYM (\name)
TYPE (\name)
SYM (\name):
.endm
.macro FUNC_END name
SIZE (\name)
.endm
/* Emulate FF1 ("fast find 1") instruction on ck801.
Result goes in rx, clobbering ry. */
#if defined(__CK801__)
.macro FF1_M rx, ry
movi \rx, 32
10:
cmphsi \ry, 1
bf 11f
subi \rx, \rx, 1
lsri \ry, \ry, 1
br 10b
11:
.endm
#else
.macro FF1_M rx, ry
ff1 \rx, \ry
.endm
#endif
/* Likewise emulate lslc instruction ("logical left shift to C") on CK801. */
#if defined(__CK801__)
.macro LSLC_M rx
cmpne \rx, \rx
addc \rx, \rx
.endm
#else
.macro LSLC_M rx
lslc \rx
.endm
#endif
/* Emulate the abs instruction. */
#if defined(__CK802__)
.macro ABS_M rx
btsti \rx, 31
bf 10f
not \rx
addi \rx, 1
10:
.endm
#elif defined(__CK801__)
.macro ABS_M rx
cmplti \rx, 1
bf 10f
not \rx
addi \rx, 1
10:
.endm
#else
.macro ABS_M rx
abs \rx
.endm
#endif
/* Emulate the ld.hs ("load signed halfword and extend") instruction
on ck801 and ck802. */
#if defined(__CK801__)
.macro LDBS_M rx, ry
ld.b \rx, (\ry, 0x0)
sextb \rx, \rx
.endm
#else
.macro LDBS_M rx, ry
ld.bs \rx, (\ry, 0x0)
.endm
#endif
#if defined(__CK801__)
.macro LDHS_M rx, ry
ld.h \rx, (\ry, 0x0)
sexth \rx, \rx
.endm
#else
.macro LDHS_M rx, ry
ld.hs \rx, (\ry, 0x0)
.endm
#endif
/* Signed and unsigned div/mod/rem functions. */
#ifdef L_udivsi3
FUNC_START udiv32
FUNC_START udivsi3
cmpnei a1, 0 // look for 0 divisor
bt 9f
trap 3 // divide by 0
9:
// control iterations, skip across high order 0 bits in dividend
cmpnei a0, 0
bt 8f
jmp lr // 0 dividend quick return
8:
push l0
movi a2, 1 // a2 is quotient (1 for a sentinel)
mov a3, a0
FF1_M l0, a3 // figure distance to skip
lsl a2, l0 // move the sentinel along (with 0's behind)
lsl a0, l0 // and the low 32 bits of numerator
// FIXME: Is this correct?
mov a3, a1 // looking at divisor
FF1_M l0, a3 // I can move 32-l0 more bits to left.
addi l0, 1 // ok, one short of that...
mov a3, a0
lsr a3, l0 // bits that came from low order...
not l0 // l0 == "32-n" == LEFT distance
addi l0, 33 // this is (32-n)
lsl a2,l0 // fixes the high 32 (quotient)
lsl a0,l0
cmpnei a2,0
bf 4f // the sentinel went away...
// run the remaining bits
1:
LSLC_M a0 // 1 bit left shift of a3-a0
addc a3, a3
cmphs a3, a1 // upper 32 of dividend >= divisor?
bf 2f
subu a3, a1 // if yes, subtract divisor
2:
addc a2, a2 // shift by 1 and count subtracts
bf 1b // if sentinel falls out of quotient, stop
4:
mov a0, a2 // return quotient
mov a1, a3 // and piggyback the remainder
pop l0
FUNC_END udiv32
FUNC_END udivsi3
#endif
#ifdef L_umodsi3
FUNC_START urem32
FUNC_START umodsi3
cmpnei a1, 0 // look for 0 divisor
bt 9f
trap 3 // divide by 0
9:
// control iterations, skip across high order 0 bits in dividend
cmpnei a0, 0
bt 8f
jmp lr // 0 dividend quick return
8:
mov a2, a0
FF1_M a3, a2 // figure distance to skip
movi a2, 1 // a2 is quotient (1 for a sentinel)
lsl a2, a3 // move the sentinel along (with 0's behind)
lsl a0, a3 // and the low 32 bits of numerator
movi a3, 0
1:
LSLC_M a0 // 1 bit left shift of a3-a0
addc a3, a3
cmphs a3, a1 // upper 32 of dividend >= divisor?
bf 2f
subu a3, a1 // if yes, subtract divisor
2:
addc a2, a2 // shift by 1 and count subtracts
bf 1b // if sentinel falls out of quotient, stop
4:
mov a0, a3 // and piggyback the remainder
jmp lr
FUNC_END urem32
FUNC_END umodsi3
#endif
#ifdef L_divsi3
FUNC_START div32
FUNC_START divsi3
cmpnei a1, 0 // look for 0 divisor
bt 9f
trap 3 // divide by 0
9:
// control iterations, skip across high order 0 bits in dividend
cmpnei a0, 0
bt 8f
jmp lr // 0 dividend quick return
8:
push l0, l1
mov l1, a0
xor l1, a1 // calc sign of quotient
ABS_M a0
ABS_M a1
movi a2, 1 // a2 is quotient (1 for a sentinel)
mov a3, a0
FF1_M l0, a3 // figure distance to skip
lsl a2, l0 // move the sentinel along (with 0's behind)
lsl a0, l0 // and the low 32 bits of numerator
// FIXME: is this correct?
mov a3, a1 // looking at divisor
FF1_M l0, a3 // I can move 32-l0 more bits to left.
addi l0, 1 // ok, one short of that...
mov a3, a0
lsr a3, l0 // bits that came from low order...
not l0 // l0 == "32-n" == LEFT distance
addi l0, 33 // this is (32-n)
lsl a2,l0 // fixes the high 32 (quotient)
lsl a0,l0
cmpnei a2,0
bf 4f // the sentinel went away...
// run the remaining bits
1:
LSLC_M a0 // 1 bit left shift of a3-a0
addc a3, a3
cmphs a3, a1 // upper 32 of dividend >= divisor?
bf 2f
subu a3, a1 // if yes, subtract divisor
2:
addc a2, a2 // shift by 1 and count subtracts
bf 1b // if sentinel falls out of quotient, stop
4:
mov a0, a2 // return quotient
mov a1, a3 // and piggyback the remainder
LSLC_M l1 // after adjusting for sign
bf 3f
not a0
addi a0, 1
not a1
addi a1, 1
3:
pop l0, l1
FUNC_END div32
FUNC_END divsi3
#endif
#ifdef L_modsi3
FUNC_START rem32
FUNC_START modsi3
push l0
cmpnei a1, 0 // look for 0 divisor
bt 9f
trap 3 // divide by 0
9:
// control iterations, skip across high order 0 bits in dividend
cmpnei a0, 0
bt 8f
pop l0 // 0 dividend quick return
8:
mov l0, a0
ABS_M a0
ABS_M a1
mov a2, a0
FF1_M a3, a2 // figure distance to skip
movi a2, 1 // a2 is quotient (1 for a sentinel)
lsl a2, a3 // move the sentinel along (with 0's behind)
lsl a0, a3 // and the low 32 bits of numerator
movi a3, 0
// run the remaining bits
1:
LSLC_M a0 // 1 bit left shift of a3-a0
addc a3, a3
cmphs a3, a1 // upper 32 of dividend >= divisor?
bf 2f
subu a3, a1 // if yes, subtract divisor
2:
addc a2, a2 // shift by 1 and count subtracts
bf 1b // if sentinel falls out of quotient, stop
4:
mov a0, a3 // and piggyback the remainder
LSLC_M l0 // after adjusting for sign
bf 3f
not a0
addi a0, 1
3:
pop l0
FUNC_END rem32
FUNC_END modsi3
#endif
/* Unordered comparisons for single and double float. */
#ifdef L_unordsf2
FUNC_START unordsf2
#if defined(__CK801__)
subi sp, 4
st.w r4, (sp, 0x0)
lsli r2, r0, 1
lsli r3, r1, 1
asri r4, r2, 24
not r4
cmpnei r4, 0
bt 1f
lsli r4, r0, 9
cmpnei r4, 0
bt 3f
1:
asri r4, r3, 24
not r4
cmpnei r4, 0
bt 2f
lsli r4, r1, 9
cmpnei r4, 0
bt 3f
2:
ld.w r4, (sp, 0x0)
addi sp, 4
movi r0, 0
rts
3:
ld.w r4, (sp, 0x0)
addi sp, 4
movi r0, 1
rts
#elif defined(__CK802__)
lsli r2, r0, 1
lsli r3, r1, 1
asri r2, r2, 24
not r13, r2
cmpnei r13, 0
bt 1f
lsli r13, r0, 9
cmpnei r13, 0
bt 3f
1:
asri r3, r3, 24
not r13, r3
cmpnei r13, 0
bt 2f
lsli r13, r1, 9
cmpnei r13, 0
bt 3f
2:
movi r0, 0
rts
3:
movi r0, 1
rts
#else
lsli r2, r0, 1
lsli r3, r1, 1
asri r2, r2, 24
not r13, r2
bnez r13, 1f
lsli r13, r0, 9
bnez r13, 3f
1:
asri r3, r3, 24
not r13, r3
bnez r13, 2f
lsli r13, r1, 9
bnez r13, 3f
2:
movi r0, 0
rts
3:
movi r0, 1
rts
#endif
FUNC_END unordsf2
#endif
#ifdef L_unorddf2
FUNC_START unorddf2
#if defined(__CK801__)
subi sp, 8
st.w r4, (sp, 0x0)
st.w r5, (sp, 0x4)
lsli r4, xh, 1
asri r4, r4, 21
not r4
cmpnei r4, 0
bt 1f
mov r4, xl
lsli r5, xh, 12
or r4, r5
cmpnei r4, 0
bt 3f
1:
lsli r4, yh, 1
asri r4, r4, 21
not r4
cmpnei r4, 0
bt 2f
mov r4,yl
lsli r5, yh, 12
or r4, r5
cmpnei r4, 0
bt 3f
2:
ld.w r4, (sp, 0x0)
ld.w r5, (sp, 0x4)
addi sp, 8
movi r0, 0
rts
3:
ld.w r4, (sp, 0x0)
ld.w r5, (sp, 0x4)
addi sp, 8
movi r0, 1
rts
#elif defined(__CK802__)
lsli r13, xh, 1
asri r13, r13, 21
not r13
cmpnei r13, 0
bt 1f
lsli xh, xh, 12
or r13, xl, xh
cmpnei r13, 0
bt 3f
1:
lsli r13, yh, 1
asri r13, r13, 21
not r13
cmpnei r13, 0
bt 2f
lsli yh, yh, 12
or r13, yl, yh
cmpnei r13, 0
bt 3f
2:
movi r0, 0
rts
3:
movi r0, 1
rts
#else
lsli r13, xh, 1
asri r13, r13, 21
not r13
bnez r13, 1f
lsli xh, xh, 12
or r13, xl, xh
bnez r13, 3f
1:
lsli r13, yh, 1
asri r13, r13, 21
not r13
bnez r13, 2f
lsli yh, yh, 12
or r13, yl, yh
bnez r13, 3f
2:
movi r0, 0
rts
3:
movi r0, 1
rts
#endif
FUNC_END unorddf2
#endif
/* When optimizing for size on ck801 and ck802, GCC emits calls to the
following helper functions when expanding casesi, instead of emitting
the table lookup and jump inline. Note that in these functions the
jump is handled by tweaking the value of lr before rts. */
#ifdef L_csky_case_sqi
FUNC_START _gnu_csky_case_sqi
subi sp, 4
st.w a1, (sp, 0x0)
mov a1, lr
add a1, a1, a0
LDBS_M a1, a1
lsli a1, a1, 1
add lr, lr, a1
ld.w a1, (sp, 0x0)
addi sp, 4
rts
FUNC_END _gnu_csky_case_sqi
#endif
#ifdef L_csky_case_uqi
FUNC_START _gnu_csky_case_uqi
subi sp, 4
st.w a1, (sp, 0x0)
mov a1, lr
add a1, a1, a0
ld.b a1, (a1, 0x0)
lsli a1, a1, 1
add lr, lr, a1
ld.w a1, (sp, 0x0)
addi sp, 4
rts
FUNC_END _gnu_csky_case_uqi
#endif
#ifdef L_csky_case_shi
FUNC_START _gnu_csky_case_shi
subi sp, 8
st.w a0, (sp, 0x4)
st.w a1, (sp, 0x0)
mov a1, lr
lsli a0, a0, 1
add a1, a1, a0
LDHS_M a1, a1
lsli a1, a1, 1
add lr, lr, a1
ld.w a0, (sp, 0x4)
ld.w a1, (sp, 0x0)
addi sp, 8
rts
FUNC_END _gnu_csky_case_shi
#endif
#ifdef L_csky_case_uhi
FUNC_START _gnu_csky_case_uhi
subi sp, 8
st.w a0, (sp, 0x4)
st.w a1, (sp, 0x0)
mov a1, lr
lsli a0, a0, 1
add a1, a1, a0
ld.h a1, (a1, 0x0)
lsli a1, a1, 1
add lr, lr, a1
ld.w a0, (sp, 0x4)
ld.w a1, (sp, 0x0)
addi sp, 8
rts
FUNC_END _gnu_csky_case_uhi
#endif
#ifdef L_csky_case_si
FUNC_START _gnu_csky_case_si
subi sp, 8
st.w a0, (sp, 0x4)
st.w a1, (sp, 0x0)
mov a1, lr
addi a1, a1, 2 // Align to word.
bclri a1, a1, 1
mov lr, a1
lsli a0, a0, 2
add a1, a1, a0
ld.w a0, (a1, 0x0)
add lr, lr, a0
ld.w a0, (sp, 0x4)
ld.w a1, (sp, 0x0)
addi sp, 8
rts
FUNC_END _gnu_csky_case_si
#endif
/* GCC expects that {__eq,__ne,__gt,__ge,__le,__lt}{df2,sf2}
will behave as __cmpdf2. So, we stub the implementations to
jump on to __cmpdf2 and __cmpsf2.
All of these short-circuit the return path so that __cmp{sd}f2
will go directly back to the caller. */
.macro COMPARE_DF_JUMP name
.import SYM (cmpdf2)
FUNC_START \name
jmpi SYM (cmpdf2)
FUNC_END \name
.endm
#ifdef L_eqdf2
COMPARE_DF_JUMP eqdf2
#endif /* L_eqdf2 */
#ifdef L_nedf2
COMPARE_DF_JUMP nedf2
#endif /* L_nedf2 */
#ifdef L_gtdf2
COMPARE_DF_JUMP gtdf2
#endif /* L_gtdf2 */
#ifdef L_gedf2
COMPARE_DF_JUMP gedf2
#endif /* L_gedf2 */
#ifdef L_ltdf2
COMPARE_DF_JUMP ltdf2
#endif /* L_ltdf2 */
#ifdef L_ledf2
COMPARE_DF_JUMP ledf2
#endif /* L_ledf2 */
/* Single-precision floating point stubs. */
.macro COMPARE_SF_JUMP name
.import SYM (cmpsf2)
FUNC_START \name
jmpi SYM (cmpsf2)
FUNC_END \name
.endm
#ifdef L_eqsf2
COMPARE_SF_JUMP eqsf2
#endif /* L_eqsf2 */
#ifdef L_nesf2
COMPARE_SF_JUMP nesf2
#endif /* L_nesf2 */
#ifdef L_gtsf2
COMPARE_SF_JUMP gtsf2
#endif /* L_gtsf2 */
#ifdef L_gesf2
COMPARE_SF_JUMP __gesf2
#endif /* L_gesf2 */
#ifdef L_ltsf2
COMPARE_SF_JUMP __ltsf2
#endif /* L_ltsf2 */
#ifdef L_lesf2
COMPARE_SF_JUMP lesf2
#endif /* L_lesf2 */
|
4ms/metamodule-plugin-sdk
| 1,264
|
plugin-libc/libgcc/config/visium/crtn.S
|
/* crtn.S for Visium.
Copyright (C) 2005-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
.file "crtn.o"
.ident "GNU C crtn.o"
.section .init
move.l r23,r22
read.l r22,(r22)
read.l r21,1(r23)
bra tr,r21,r0
addi r23,8
.section .fini
move.l r23,r22
read.l r22,(r22)
read.l r21,1(r23)
bra tr,r21,r0
addi r23,8
|
4ms/metamodule-plugin-sdk
| 1,346
|
plugin-libc/libgcc/config/visium/crti.S
|
/* crti.S for Visium.
Copyright (C) 2005-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
.file "crti.o"
.ident "GNU C crti.o"
.section .init
.globl __init
.type __init,@function
__init:
subi r23,8
nop
write.l (r23),r22
write.l 1(r23),r21
move.l r22,r23
.section .fini
.globl __fini
.type __fini,@function
__fini:
subi r23,8
nop
write.l (r23),r22
write.l 1(r23),r21
move.l r22,r23
|
4ms/metamodule-plugin-sdk
| 2,552
|
plugin-libc/libgcc/config/rs6000/eabi-cn.S
|
/* crtn.s for eabi
Copyright (C) 1996-2022 Free Software Foundation, Inc.
Written By Michael Meissner
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file just supplies labeled ending points for the .got* and other
special sections. It is linked in last after other modules. */
.ident "GNU C crtn.s"
#ifndef __powerpc64__
.section ".got","aw"
.globl __GOT_END__
.type __GOT_END__,@object
__GOT_END__:
.section ".got1","aw"
.globl __GOT1_END__
.type __GOT1_END__,@object
__GOT1_END__:
.section ".got2","aw"
.globl __GOT2_END__
.type __GOT2_END__,@object
__GOT2_END__:
.section ".fixup","aw"
.globl __FIXUP_END__
.type __FIXUP_END__,@object
__FIXUP_END__:
.section ".ctors","aw"
.globl __CTOR_END__
.type __CTOR_END__,@object
__CTOR_END__:
.section ".dtors","aw"
.weak __DTOR_END__
.type __DTOR_END__,@object
__DTOR_END__:
.section ".sdata","aw"
.globl __SDATA_END__
.type __SDATA_END__,@object
__SDATA_END__:
.section ".sbss","aw",@nobits
.globl __SBSS_END__
.type __SBSS_END__,@object
__SBSS_END__:
.section ".sdata2","a"
.globl __SDATA2_END__
.type __SDATA2_END__,@object
__SDATA2_END__:
.section ".sbss2","a"
.globl __SBSS2_END__
.type __SBSS2_END__,@object
__SBSS2_END__:
.section ".gcc_except_table","aw"
.globl __EXCEPT_END__
.type __EXCEPT_END__,@object
__EXCEPT_END__:
.section ".eh_frame","aw"
.globl __EH_FRAME_END__
.type __EH_FRAME_END__,@object
__EH_FRAME_END__:
.long 0
/* Tail of __init function used for static constructors. */
.section ".init","ax"
lwz 0,20(1)
mtlr 0
addi 1,1,16
blr
/* Tail of __fini function used for static destructors. */
.section ".fini","ax"
lwz 0,20(1)
mtlr 0
addi 1,1,16
blr
#endif
|
4ms/metamodule-plugin-sdk
| 2,540
|
plugin-libc/libgcc/config/rs6000/e500crtresx64gpr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* "Exit" versions that return to their caller's caller. */
HIDDEN_FUNC(_rest64gpr_14_x) evldd 14,0(11)
HIDDEN_FUNC(_rest64gpr_15_x) evldd 15,8(11)
HIDDEN_FUNC(_rest64gpr_16_x) evldd 16,16(11)
HIDDEN_FUNC(_rest64gpr_17_x) evldd 17,24(11)
HIDDEN_FUNC(_rest64gpr_18_x) evldd 18,32(11)
HIDDEN_FUNC(_rest64gpr_19_x) evldd 19,40(11)
HIDDEN_FUNC(_rest64gpr_20_x) evldd 20,48(11)
HIDDEN_FUNC(_rest64gpr_21_x) evldd 21,56(11)
HIDDEN_FUNC(_rest64gpr_22_x) evldd 22,64(11)
HIDDEN_FUNC(_rest64gpr_23_x) evldd 23,72(11)
HIDDEN_FUNC(_rest64gpr_24_x) evldd 24,80(11)
HIDDEN_FUNC(_rest64gpr_25_x) evldd 25,88(11)
HIDDEN_FUNC(_rest64gpr_26_x) evldd 26,96(11)
HIDDEN_FUNC(_rest64gpr_27_x) evldd 27,104(11)
HIDDEN_FUNC(_rest64gpr_28_x) evldd 28,112(11)
HIDDEN_FUNC(_rest64gpr_29_x) evldd 29,120(11)
HIDDEN_FUNC(_rest64gpr_30_x) evldd 30,128(11)
HIDDEN_FUNC(_rest64gpr_31_x) lwz 0,148(11)
evldd 31,136(11)
addi 1,11,144
mtlr 0
blr
FUNC_END(_rest64gpr_31_x)
FUNC_END(_rest64gpr_30_x)
FUNC_END(_rest64gpr_29_x)
FUNC_END(_rest64gpr_28_x)
FUNC_END(_rest64gpr_27_x)
FUNC_END(_rest64gpr_26_x)
FUNC_END(_rest64gpr_25_x)
FUNC_END(_rest64gpr_24_x)
FUNC_END(_rest64gpr_23_x)
FUNC_END(_rest64gpr_22_x)
FUNC_END(_rest64gpr_21_x)
FUNC_END(_rest64gpr_20_x)
FUNC_END(_rest64gpr_19_x)
FUNC_END(_rest64gpr_18_x)
FUNC_END(_rest64gpr_17_x)
FUNC_END(_rest64gpr_16_x)
FUNC_END(_rest64gpr_15_x)
FUNC_END(_rest64gpr_14_x)
#endif
|
4ms/metamodule-plugin-sdk
| 6,145
|
plugin-libc/libgcc/config/rs6000/darwin-world.S
|
/* This file contains the exception-handling save_world and
* restore_world routines, which need to do a run-time check to see if
* they should save and restore the vector registers.
*
* Copyright (C) 2004-2022 Free Software Foundation, Inc.
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
#ifndef __ppc64__
.machine ppc7400
.data
.align 2
#ifdef __DYNAMIC__
.non_lazy_symbol_pointer
L_has_vec$non_lazy_ptr:
.indirect_symbol __cpu_has_altivec
.long 0
#else
/* For static, "pretend" we have a non-lazy-pointer. */
L_has_vec$non_lazy_ptr:
.long __cpu_has_altivec
#endif
.text
.align 2
/* save_world and rest_world save/restore F14-F31 and possibly V20-V31
(assuming you have a CPU with vector registers; we use a global var
provided by the System Framework to determine this.)
SAVE_WORLD takes R0 (the caller`s caller`s return address) and R11
(the stack frame size) as parameters. It returns the updated VRsave
in R0 if we`re on a CPU with vector regs.
For gcc3 onward, we need to save and restore CR as well, since scheduled
prologs can cause comparisons to be moved before calls to save_world.
USES: R0 R11 R12 */
.private_extern save_world
save_world:
stw r0,8(r1)
mflr r0
bcl 20,31,Ls$pb
Ls$pb: mflr r12
addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Ls$pb)
lwz r12,lo16(L_has_vec$non_lazy_ptr-Ls$pb)(r12)
mtlr r0
lwz r12,0(r12)
/* grab CR */
mfcr r0
/* test HAS_VEC */
cmpwi r12,0
stfd f14,-144(r1)
stfd f15,-136(r1)
stfd f16,-128(r1)
stfd f17,-120(r1)
stfd f18,-112(r1)
stfd f19,-104(r1)
stfd f20,-96(r1)
stfd f21,-88(r1)
stfd f22,-80(r1)
stfd f23,-72(r1)
stfd f24,-64(r1)
stfd f25,-56(r1)
stfd f26,-48(r1)
stfd f27,-40(r1)
stfd f28,-32(r1)
stfd f29,-24(r1)
stfd f30,-16(r1)
stfd f31,-8(r1)
stmw r13,-220(r1)
/* stash CR */
stw r0,4(r1)
/* set R12 pointing at Vector Reg save area */
addi r12,r1,-224
/* allocate stack frame */
stwux r1,r1,r11
/* ...but return if HAS_VEC is zero */
bne+ L$saveVMX
/* Not forgetting to restore CR. */
mtcr r0
blr
L$saveVMX:
/* We're saving Vector regs too. */
/* Restore CR from R0. No More Branches! */
mtcr r0
/* We should really use VRSAVE to figure out which vector regs
we actually need to save and restore. Some other time :-/ */
li r11,-192
stvx v20,r11,r12
li r11,-176
stvx v21,r11,r12
li r11,-160
stvx v22,r11,r12
li r11,-144
stvx v23,r11,r12
li r11,-128
stvx v24,r11,r12
li r11,-112
stvx v25,r11,r12
li r11,-96
stvx v26,r11,r12
li r11,-80
stvx v27,r11,r12
li r11,-64
stvx v28,r11,r12
li r11,-48
stvx v29,r11,r12
li r11,-32
stvx v30,r11,r12
mfspr r0,VRsave
li r11,-16
stvx v31,r11,r12
stw r0,0(r12) /* VRsave lives at -224(R1). */
ori r0,r0,0xfff /* We just saved these. */
mtspr VRsave,r0
blr
/* rest_world is jumped to, not called, so no need to worry about LR.
clobbers R0, R7, R11 and R12. This just undoes the work done above. */
.private_extern rest_world
rest_world:
lwz r11, 0(r1) /* Pickup previous SP */
li r7, 0 /* Stack offset is zero, r10 is ignored. */
b Lrest_world_eh_r7
/* eh_rest_world_r10 is jumped to, not called, so no need to worry about LR.
R10 is the C++ EH stack adjust parameter, we return to the caller`s caller.
clobbers: R0, R7, R11 and R12
uses : R10
RETURNS : C++ EH Data registers (R3 - R6). */
.private_extern eh_rest_world_r10
eh_rest_world_r10:
lwz r11, 0(r1) /* Pickup previous SP */
mr r7,r10 /* Stack offset. */
/* pickup the C++ EH data regs (R3 - R6.) */
lwz r6,-420(r11)
lwz r5,-424(r11)
lwz r4,-428(r11)
lwz r3,-432(r11)
/* Fall through to Lrest_world_eh_r7. */
/* When we are doing the exception-handling epilog, R7 contains the offset to
add to the SP.
clobbers: R0, R11 and R12
uses : R7. */
Lrest_world_eh_r7:
/* See if we have Altivec. */
bcl 20,31,Lr7$pb
Lr7$pb: mflr r12
addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Lr7$pb)
lwz r12,lo16(L_has_vec$non_lazy_ptr-Lr7$pb)(r12)
lwz r12,0(r12) /* R12 := HAS_VEC */
cmpwi r12,0
lmw r13,-220(r11)
beq L.rest_world_fp_eh
/* We have Altivec, restore VRsave and V20..V31 */
lwz r0,-224(r11)
li r12,-416
mtspr VRsave,r0
lvx v20,r11,r12
li r12,-400
lvx v21,r11,r12
li r12,-384
lvx v22,r11,r12
li r12,-368
lvx v23,r11,r12
li r12,-352
lvx v24,r11,r12
li r12,-336
lvx v25,r11,r12
li r12,-320
lvx v26,r11,r12
li r12,-304
lvx v27,r11,r12
li r12,-288
lvx v28,r11,r12
li r12,-272
lvx v29,r11,r12
li r12,-256
lvx v30,r11,r12
li r12,-240
lvx v31,r11,r12
L.rest_world_fp_eh:
lwz r0,4(r11) /* recover saved CR */
lfd f14,-144(r11)
lfd f15,-136(r11)
lfd f16,-128(r11)
lfd f17,-120(r11)
lfd f18,-112(r11)
lfd f19,-104(r11)
lfd f20,-96(r11)
lfd f21,-88(r11)
lfd f22,-80(r11)
lfd f23,-72(r11)
lfd f24,-64(r11)
lfd f25,-56(r11)
lfd f26,-48(r11)
lfd f27,-40(r11)
lfd f28,-32(r11)
lfd f29,-24(r11)
lfd f30,-16(r11)
mtcr r0 /* restore the saved cr. */
lwz r0, 8(r11) /* Pick up the 'real' return address. */
lfd f31,-8(r11)
mtctr r0 /* exception-handler ret. address */
add r1,r11,r7 /* set SP to original value + R7 offset */
bctr
#endif
/* we should never be called on ppc64 for this ... */
/* Done. */
|
4ms/metamodule-plugin-sdk
| 2,750
|
plugin-libc/libgcc/config/rs6000/crtresfpr.S
|
/*
* Special support for eabi and SVR4
*
* Copyright (C) 1995-2022 Free Software Foundation, Inc.
* Written By Michael Meissner
* 64-bit support written by David Edelsohn
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
/* Do any initializations needed for the eabi environment */
.machine ppc
.section ".text"
#include "ppc-asm.h"
/* On PowerPC64 Linux, these functions are provided by the linker. */
#ifndef __powerpc64__
/* Routines for restoring floating point registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
/* function, just beyond the end of the floating point save area. */
CFI_STARTPROC
HIDDEN_FUNC(_restfpr_14) lfd 14,-144(11) /* restore fp registers */
HIDDEN_FUNC(_restfpr_15) lfd 15,-136(11)
HIDDEN_FUNC(_restfpr_16) lfd 16,-128(11)
HIDDEN_FUNC(_restfpr_17) lfd 17,-120(11)
HIDDEN_FUNC(_restfpr_18) lfd 18,-112(11)
HIDDEN_FUNC(_restfpr_19) lfd 19,-104(11)
HIDDEN_FUNC(_restfpr_20) lfd 20,-96(11)
HIDDEN_FUNC(_restfpr_21) lfd 21,-88(11)
HIDDEN_FUNC(_restfpr_22) lfd 22,-80(11)
HIDDEN_FUNC(_restfpr_23) lfd 23,-72(11)
HIDDEN_FUNC(_restfpr_24) lfd 24,-64(11)
HIDDEN_FUNC(_restfpr_25) lfd 25,-56(11)
HIDDEN_FUNC(_restfpr_26) lfd 26,-48(11)
HIDDEN_FUNC(_restfpr_27) lfd 27,-40(11)
HIDDEN_FUNC(_restfpr_28) lfd 28,-32(11)
HIDDEN_FUNC(_restfpr_29) lfd 29,-24(11)
HIDDEN_FUNC(_restfpr_30) lfd 30,-16(11)
HIDDEN_FUNC(_restfpr_31) lfd 31,-8(11)
blr
FUNC_END(_restfpr_31)
FUNC_END(_restfpr_30)
FUNC_END(_restfpr_29)
FUNC_END(_restfpr_28)
FUNC_END(_restfpr_27)
FUNC_END(_restfpr_26)
FUNC_END(_restfpr_25)
FUNC_END(_restfpr_24)
FUNC_END(_restfpr_23)
FUNC_END(_restfpr_22)
FUNC_END(_restfpr_21)
FUNC_END(_restfpr_20)
FUNC_END(_restfpr_19)
FUNC_END(_restfpr_18)
FUNC_END(_restfpr_17)
FUNC_END(_restfpr_16)
FUNC_END(_restfpr_15)
FUNC_END(_restfpr_14)
CFI_ENDPROC
#endif
|
4ms/metamodule-plugin-sdk
| 2,253
|
plugin-libc/libgcc/config/rs6000/crtrestvr.S
|
/* Routines for restoring vector registers.
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Written by Alan Modra, IBM
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* On PowerPC64 Linux, these functions are provided by the linker. */
#ifndef __powerpc64__
#undef __ALTIVEC__
#define __ALTIVEC__ 1
#include "ppc-asm.h"
/* Called with r0 pointing just beyond the end of the vector save area. */
.machine altivec
.section ".text"
CFI_STARTPROC
HIDDEN_FUNC(_restvr_20)
li r11,-192
lvx v20,r11,r0
HIDDEN_FUNC(_restvr_21)
li r11,-176
lvx v21,r11,r0
HIDDEN_FUNC(_restvr_22)
li r11,-160
lvx v22,r11,r0
HIDDEN_FUNC(_restvr_23)
li r11,-144
lvx v23,r11,r0
HIDDEN_FUNC(_restvr_24)
li r11,-128
lvx v24,r11,r0
HIDDEN_FUNC(_restvr_25)
li r11,-112
lvx v25,r11,r0
HIDDEN_FUNC(_restvr_26)
li r11,-96
lvx v26,r11,r0
HIDDEN_FUNC(_restvr_27)
li r11,-80
lvx v27,r11,r0
HIDDEN_FUNC(_restvr_28)
li r11,-64
lvx v28,r11,r0
HIDDEN_FUNC(_restvr_29)
li r11,-48
lvx v29,r11,r0
HIDDEN_FUNC(_restvr_30)
li r11,-32
lvx v30,r11,r0
HIDDEN_FUNC(_restvr_31)
li r11,-16
lvx v31,r11,r0
blr
FUNC_END(_restvr_31)
FUNC_END(_restvr_30)
FUNC_END(_restvr_29)
FUNC_END(_restvr_28)
FUNC_END(_restvr_27)
FUNC_END(_restvr_26)
FUNC_END(_restvr_25)
FUNC_END(_restvr_24)
FUNC_END(_restvr_23)
FUNC_END(_restvr_22)
FUNC_END(_restvr_21)
FUNC_END(_restvr_20)
CFI_ENDPROC
#endif
|
4ms/metamodule-plugin-sdk
| 2,455
|
plugin-libc/libgcc/config/rs6000/e500crtsav64gpr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* Routines for saving 64-bit integer registers, called by the compiler. */
HIDDEN_FUNC(_save64gpr_14) evstdd 14,0(11)
HIDDEN_FUNC(_save64gpr_15) evstdd 15,8(11)
HIDDEN_FUNC(_save64gpr_16) evstdd 16,16(11)
HIDDEN_FUNC(_save64gpr_17) evstdd 17,24(11)
HIDDEN_FUNC(_save64gpr_18) evstdd 18,32(11)
HIDDEN_FUNC(_save64gpr_19) evstdd 19,40(11)
HIDDEN_FUNC(_save64gpr_20) evstdd 20,48(11)
HIDDEN_FUNC(_save64gpr_21) evstdd 21,56(11)
HIDDEN_FUNC(_save64gpr_22) evstdd 22,64(11)
HIDDEN_FUNC(_save64gpr_23) evstdd 23,72(11)
HIDDEN_FUNC(_save64gpr_24) evstdd 24,80(11)
HIDDEN_FUNC(_save64gpr_25) evstdd 25,88(11)
HIDDEN_FUNC(_save64gpr_26) evstdd 26,96(11)
HIDDEN_FUNC(_save64gpr_27) evstdd 27,104(11)
HIDDEN_FUNC(_save64gpr_28) evstdd 28,112(11)
HIDDEN_FUNC(_save64gpr_29) evstdd 29,120(11)
HIDDEN_FUNC(_save64gpr_30) evstdd 30,128(11)
HIDDEN_FUNC(_save64gpr_31) evstdd 31,136(11)
blr
FUNC_END(_save64gpr_31)
FUNC_END(_save64gpr_30)
FUNC_END(_save64gpr_29)
FUNC_END(_save64gpr_28)
FUNC_END(_save64gpr_27)
FUNC_END(_save64gpr_26)
FUNC_END(_save64gpr_25)
FUNC_END(_save64gpr_24)
FUNC_END(_save64gpr_23)
FUNC_END(_save64gpr_22)
FUNC_END(_save64gpr_21)
FUNC_END(_save64gpr_20)
FUNC_END(_save64gpr_19)
FUNC_END(_save64gpr_18)
FUNC_END(_save64gpr_17)
FUNC_END(_save64gpr_16)
FUNC_END(_save64gpr_15)
FUNC_END(_save64gpr_14)
#endif
|
4ms/metamodule-plugin-sdk
| 2,585
|
plugin-libc/libgcc/config/rs6000/e500crtresx32gpr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* Routines for restoring 32-bit integer registers, called by the compiler. */
/* "Exit" versions that return to the caller's caller. */
HIDDEN_FUNC(_rest32gpr_14_x) lwz 14,-72(11)
HIDDEN_FUNC(_rest32gpr_15_x) lwz 15,-68(11)
HIDDEN_FUNC(_rest32gpr_16_x) lwz 16,-64(11)
HIDDEN_FUNC(_rest32gpr_17_x) lwz 17,-60(11)
HIDDEN_FUNC(_rest32gpr_18_x) lwz 18,-56(11)
HIDDEN_FUNC(_rest32gpr_19_x) lwz 19,-52(11)
HIDDEN_FUNC(_rest32gpr_20_x) lwz 20,-48(11)
HIDDEN_FUNC(_rest32gpr_21_x) lwz 21,-44(11)
HIDDEN_FUNC(_rest32gpr_22_x) lwz 22,-40(11)
HIDDEN_FUNC(_rest32gpr_23_x) lwz 23,-36(11)
HIDDEN_FUNC(_rest32gpr_24_x) lwz 24,-32(11)
HIDDEN_FUNC(_rest32gpr_25_x) lwz 25,-28(11)
HIDDEN_FUNC(_rest32gpr_26_x) lwz 26,-24(11)
HIDDEN_FUNC(_rest32gpr_27_x) lwz 27,-20(11)
HIDDEN_FUNC(_rest32gpr_28_x) lwz 28,-16(11)
HIDDEN_FUNC(_rest32gpr_29_x) lwz 29,-12(11)
HIDDEN_FUNC(_rest32gpr_30_x) lwz 30,-8(11)
HIDDEN_FUNC(_rest32gpr_31_x) lwz 0,4(11)
lwz 31,-4(11)
mr 1,11
mtlr 0
blr
FUNC_END(_rest32gpr_31_x)
FUNC_END(_rest32gpr_30_x)
FUNC_END(_rest32gpr_29_x)
FUNC_END(_rest32gpr_28_x)
FUNC_END(_rest32gpr_27_x)
FUNC_END(_rest32gpr_26_x)
FUNC_END(_rest32gpr_25_x)
FUNC_END(_rest32gpr_24_x)
FUNC_END(_rest32gpr_23_x)
FUNC_END(_rest32gpr_22_x)
FUNC_END(_rest32gpr_21_x)
FUNC_END(_rest32gpr_20_x)
FUNC_END(_rest32gpr_19_x)
FUNC_END(_rest32gpr_18_x)
FUNC_END(_rest32gpr_17_x)
FUNC_END(_rest32gpr_16_x)
FUNC_END(_rest32gpr_15_x)
FUNC_END(_rest32gpr_14_x)
#endif
|
4ms/metamodule-plugin-sdk
| 2,517
|
plugin-libc/libgcc/config/rs6000/e500crtrest64gpr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* "Tail" versions that perform a tail call. */
HIDDEN_FUNC(_rest64gpr_14_t) evldd 14,0(11)
HIDDEN_FUNC(_rest64gpr_15_t) evldd 15,8(11)
HIDDEN_FUNC(_rest64gpr_16_t) evldd 16,16(11)
HIDDEN_FUNC(_rest64gpr_17_t) evldd 17,24(11)
HIDDEN_FUNC(_rest64gpr_18_t) evldd 18,32(11)
HIDDEN_FUNC(_rest64gpr_19_t) evldd 19,40(11)
HIDDEN_FUNC(_rest64gpr_20_t) evldd 20,48(11)
HIDDEN_FUNC(_rest64gpr_21_t) evldd 21,56(11)
HIDDEN_FUNC(_rest64gpr_22_t) evldd 22,64(11)
HIDDEN_FUNC(_rest64gpr_23_t) evldd 23,72(11)
HIDDEN_FUNC(_rest64gpr_24_t) evldd 24,80(11)
HIDDEN_FUNC(_rest64gpr_25_t) evldd 25,88(11)
HIDDEN_FUNC(_rest64gpr_26_t) evldd 26,96(11)
HIDDEN_FUNC(_rest64gpr_27_t) evldd 27,104(11)
HIDDEN_FUNC(_rest64gpr_28_t) evldd 28,112(11)
HIDDEN_FUNC(_rest64gpr_29_t) evldd 29,120(11)
HIDDEN_FUNC(_rest64gpr_30_t) evldd 30,128(11)
HIDDEN_FUNC(_rest64gpr_31_t) lwz 0,148(11)
evldd 31,136(11)
addi 1,11,144
blr
FUNC_END(_rest64gpr_31_t)
FUNC_END(_rest64gpr_30_t)
FUNC_END(_rest64gpr_29_t)
FUNC_END(_rest64gpr_28_t)
FUNC_END(_rest64gpr_27_t)
FUNC_END(_rest64gpr_26_t)
FUNC_END(_rest64gpr_25_t)
FUNC_END(_rest64gpr_24_t)
FUNC_END(_rest64gpr_23_t)
FUNC_END(_rest64gpr_22_t)
FUNC_END(_rest64gpr_21_t)
FUNC_END(_rest64gpr_20_t)
FUNC_END(_rest64gpr_19_t)
FUNC_END(_rest64gpr_18_t)
FUNC_END(_rest64gpr_17_t)
FUNC_END(_rest64gpr_16_t)
FUNC_END(_rest64gpr_15_t)
FUNC_END(_rest64gpr_14_t)
#endif
|
4ms/metamodule-plugin-sdk
| 11,760
|
plugin-libc/libgcc/config/rs6000/morestack.S
|
#ifdef __powerpc64__
# PowerPC64 support for -fsplit-stack.
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
# Contributed by Alan Modra <amodra@gmail.com>.
# This file is part of GCC.
# GCC is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#include <auto-host.h>
#if _CALL_ELF == 2
.abiversion 2
#define PARAMS 32
#else
#define PARAMS 48
#endif
#define MORESTACK_FRAMESIZE (PARAMS+96)
#define R2_SAVE -MORESTACK_FRAMESIZE+PARAMS-8
#define PARAMREG_SAVE -MORESTACK_FRAMESIZE+PARAMS+0
#define STATIC_CHAIN_SAVE -MORESTACK_FRAMESIZE+PARAMS+64
#define R29_SAVE -MORESTACK_FRAMESIZE+PARAMS+72
#define LINKREG_SAVE -MORESTACK_FRAMESIZE+PARAMS+80
#define NEWSTACKSIZE_SAVE -MORESTACK_FRAMESIZE+PARAMS+88
# Excess space needed to call ld.so resolver for lazy plt
# resolution. Go uses sigaltstack so this doesn't need to
# also cover signal frame size.
#define BACKOFF 4096
# Large excess allocated when calling non-split-stack code.
#define NON_SPLIT_STACK 0x100000
#if _CALL_ELF == 2
#define BODY_LABEL(name) name
#define ENTRY0(name) \
.global name; \
.hidden name; \
.type name,@function; \
name##:
#ifdef __PCREL__
#define ENTRY(name) \
ENTRY0(name); \
.localentry name, 1
#define JUMP_TARGET(name) name##@notoc
#else
#define ENTRY(name) \
ENTRY0(name); \
0: addis %r2,%r12,.TOC.-0b@ha; \
addi %r2,%r2,.TOC.-0b@l; \
.localentry name, .-name
#endif
#else
#define BODY_LABEL(name) .L.##name
#define ENTRY0(name) \
.global name; \
.hidden name; \
.type name,@function; \
.pushsection ".opd","aw"; \
.p2align 3; \
name##: .quad BODY_LABEL (name), .TOC.@tocbase, 0; \
.popsection; \
BODY_LABEL(name)##:
#define ENTRY(name) ENTRY0(name)
#endif
#define SIZE(name) .size name, .-BODY_LABEL(name)
#ifndef JUMP_TARGET
#define JUMP_TARGET(name) name
#endif
.text
# Just like __morestack, but with larger excess allocation
ENTRY0(__morestack_non_split)
.LFB1:
.cfi_startproc
# We use a cleanup to restore the tcbhead_t.__private_ss if
# an exception is thrown through this code.
#ifdef __PIC__
.cfi_personality 0x9b,DW.ref.__gcc_personality_v0
.cfi_lsda 0x1b,.LLSDA1
#else
.cfi_personality 0x3,__gcc_personality_v0
.cfi_lsda 0x3,.LLSDA1
#endif
# LR is already saved by the split-stack prologue code.
# We may as well have the unwinder skip over the call in the
# prologue too.
.cfi_offset %lr,16
addis %r12,%r12,-NON_SPLIT_STACK@h
SIZE (__morestack_non_split)
# Fall through into __morestack
# This function is called with non-standard calling conventions.
# On entry, r12 is the requested stack pointer. One version of the
# split-stack prologue that calls __morestack looks like
# ld %r0,-0x7000-64(%r13)
# addis %r12,%r1,-allocate@ha
# addi %r12,%r12,-allocate@l
# cmpld %r12,%r0
# bge+ enough
# mflr %r0
# std %r0,16(%r1)
# bl __morestack
# ld %r0,16(%r1)
# mtlr %r0
# blr
# enough:
# The normal function prologue follows here, with a small addition at
# the end to set up the arg pointer. The arg pointer is set up with:
# addi %r12,%r1,offset
# bge %cr7,.+8
# mr %r12,%r29
#
# Note that the lr save slot 16(%r1) has already been used.
# r3 thru r11 possibly contain arguments and a static chain
# pointer for the function we're calling, so must be preserved.
# cr7 must also be preserved.
ENTRY0(__morestack)
#if _CALL_ELF == 2
# Functions with localentry bits of zero cannot make calls if those
# calls might change r2. This is true generally, and also true for
# __morestack with its special calling convention. When __morestack's
# caller is non-pcrel but libgcc is pcrel, the functions called here
# might modify r2. r2 must be preserved on exit, and also restored
# for the call back to our caller.
std %r2,R2_SAVE(%r1)
#endif
# Save parameter passing registers, our arguments, lr, r29
# and use r29 as a frame pointer.
std %r3,PARAMREG_SAVE+0(%r1)
sub %r3,%r1,%r12 # calculate requested stack size
mflr %r12
std %r4,PARAMREG_SAVE+8(%r1)
std %r5,PARAMREG_SAVE+16(%r1)
std %r6,PARAMREG_SAVE+24(%r1)
std %r7,PARAMREG_SAVE+32(%r1)
addi %r3,%r3,BACKOFF
std %r8,PARAMREG_SAVE+40(%r1)
std %r9,PARAMREG_SAVE+48(%r1)
std %r10,PARAMREG_SAVE+56(%r1)
std %r11,STATIC_CHAIN_SAVE(%r1)
std %r29,R29_SAVE(%r1)
std %r12,LINKREG_SAVE(%r1)
std %r3,NEWSTACKSIZE_SAVE(%r1) # new stack size
mr %r29,%r1
#if _CALL_ELF == 2
.cfi_offset %r2,R2_SAVE
#endif
.cfi_offset %r29,R29_SAVE
.cfi_def_cfa_register %r29
stdu %r1,-MORESTACK_FRAMESIZE(%r1)
#if _CALL_ELF == 2 && !defined __PCREL__
# If this isn't a pcrel libgcc then the functions we call here will
# require r2 to be valid. If __morestack is called from pcrel code r2
# won't be valid. Set it up.
bcl 20,31,1f
1:
mflr %r12
addis %r2,%r12,.TOC.-1b@ha
addi %r2,%r2,.TOC.-1b@l
#endif
# void __morestack_block_signals (void)
bl JUMP_TARGET(__morestack_block_signals)
# void *__generic_morestack (size_t *pframe_size,
# void *old_stack,
# size_t param_size)
addi %r3,%r29,NEWSTACKSIZE_SAVE
mr %r4,%r29
li %r5,0 # no copying from old stack
bl JUMP_TARGET(__generic_morestack)
# Start using new stack
stdu %r29,-32(%r3) # back-chain
mr %r1,%r3
# Set __private_ss stack guard for the new stack.
ld %r12,NEWSTACKSIZE_SAVE(%r29) # modified size
addi %r3,%r3,BACKOFF-32
sub %r3,%r3,%r12
# Note that a signal frame has $pc pointing at the instruction
# where the signal occurred. For something like a timer
# interrupt this means the instruction has already executed,
# thus the region starts at the instruction modifying
# __private_ss, not one instruction after.
.LEHB0:
std %r3,-0x7000-64(%r13) # tcbhead_t.__private_ss
# void __morestack_unblock_signals (void)
bl JUMP_TARGET(__morestack_unblock_signals)
# Set up for a call to the target function, located 3
# instructions after __morestack's return address.
#
ld %r12,LINKREG_SAVE(%r29)
#if _CALL_ELF == 2
ld %r2,R2_SAVE(%r29)
#endif
ld %r3,PARAMREG_SAVE+0(%r29) # restore arg regs
ld %r4,PARAMREG_SAVE+8(%r29)
ld %r5,PARAMREG_SAVE+16(%r29)
ld %r6,PARAMREG_SAVE+24(%r29)
ld %r7,PARAMREG_SAVE+32(%r29)
ld %r8,PARAMREG_SAVE+40(%r29)
ld %r9,PARAMREG_SAVE+48(%r29)
addi %r0,%r12,12 # add 3 instructions
ld %r10,PARAMREG_SAVE+56(%r29)
ld %r11,STATIC_CHAIN_SAVE(%r29)
cmpld %cr7,%r12,%r0 # indicate we were called
mtctr %r0
bctrl # call caller!
# On return, save regs possibly used to return a value, and
# possibly trashed by calls to __morestack_block_signals,
# __generic_releasestack and __morestack_unblock_signals.
# Assume those calls don't use vector or floating point regs.
std %r3,PARAMREG_SAVE+0(%r29)
std %r4,PARAMREG_SAVE+8(%r29)
std %r5,PARAMREG_SAVE+16(%r29)
std %r6,PARAMREG_SAVE+24(%r29)
#if _CALL_ELF == 2
std %r7,PARAMREG_SAVE+32(%r29)
std %r8,PARAMREG_SAVE+40(%r29)
std %r9,PARAMREG_SAVE+48(%r29)
std %r10,PARAMREG_SAVE+56(%r29)
#endif
#if _CALL_ELF == 2 && !defined __PCREL__
# r2 was restored for calling back into our caller. Set it up again.
bcl 20,31,1f
1:
mflr %r12
addis %r2,%r12,.TOC.-1b@ha
addi %r2,%r2,.TOC.-1b@l
#endif
bl JUMP_TARGET(__morestack_block_signals)
# void *__generic_releasestack (size_t *pavailable)
addi %r3,%r29,NEWSTACKSIZE_SAVE
bl JUMP_TARGET(__generic_releasestack)
# Reset __private_ss stack guard to value for old stack
ld %r12,NEWSTACKSIZE_SAVE(%r29)
addi %r3,%r3,BACKOFF
sub %r3,%r3,%r12
.LEHE0:
std %r3,-0x7000-64(%r13) # tcbhead_t.__private_ss
bl JUMP_TARGET(__morestack_unblock_signals)
# Use old stack again.
mr %r1,%r29
# Restore return value regs, and return.
ld %r0,LINKREG_SAVE(%r29)
mtlr %r0
#if _CALL_ELF == 2
ld %r2,R2_SAVE(%r29)
#endif
ld %r3,PARAMREG_SAVE+0(%r29)
ld %r4,PARAMREG_SAVE+8(%r29)
ld %r5,PARAMREG_SAVE+16(%r29)
ld %r6,PARAMREG_SAVE+24(%r29)
#if _CALL_ELF == 2
ld %r7,PARAMREG_SAVE+32(%r29)
ld %r8,PARAMREG_SAVE+40(%r29)
ld %r9,PARAMREG_SAVE+48(%r29)
ld %r10,PARAMREG_SAVE+56(%r29)
#endif
ld %r29,R29_SAVE(%r29)
.cfi_def_cfa_register %r1
blr
# This is the cleanup code called by the stack unwinder when
# unwinding through code between .LEHB0 and .LEHE0 above.
cleanup:
.cfi_def_cfa_register %r29
std %r3,PARAMREG_SAVE(%r29) # Save exception header
# size_t __generic_findstack (void *stack)
mr %r3,%r29
bl JUMP_TARGET(__generic_findstack)
sub %r3,%r29,%r3
addi %r3,%r3,BACKOFF
std %r3,-0x7000-64(%r13) # tcbhead_t.__private_ss
ld %r3,PARAMREG_SAVE(%r29)
bl JUMP_TARGET(_Unwind_Resume)
#ifndef __PCREL__
nop
#endif
.cfi_endproc
SIZE (__morestack)
.section .gcc_except_table,"a",@progbits
.p2align 2
.LLSDA1:
.byte 0xff # @LPStart format (omit)
.byte 0xff # @TType format (omit)
.byte 0x1 # call-site format (uleb128)
.uleb128 .LLSDACSE1-.LLSDACSB1 # Call-site table length
.LLSDACSB1:
.uleb128 .LEHB0-.LFB1 # region 0 start
.uleb128 .LEHE0-.LEHB0 # length
.uleb128 cleanup-.LFB1 # landing pad
.uleb128 0 # no action, ie. a cleanup
.LLSDACSE1:
#ifdef __PIC__
# Build a position independent reference to the personality function.
.hidden DW.ref.__gcc_personality_v0
.weak DW.ref.__gcc_personality_v0
.section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat
.p2align 3
DW.ref.__gcc_personality_v0:
.quad __gcc_personality_v0
.type DW.ref.__gcc_personality_v0, @object
.size DW.ref.__gcc_personality_v0, 8
#endif
.text
# Initialize the stack guard when the program starts or when a
# new thread starts. This is called from a constructor.
# void __stack_split_initialize (void)
ENTRY(__stack_split_initialize)
.cfi_startproc
addi %r3,%r1,-0x4000 # We should have at least 16K.
std %r3,-0x7000-64(%r13) # tcbhead_t.__private_ss
# void __generic_morestack_set_initial_sp (void *sp, size_t len)
mr %r3,%r1
li %r4, 0x4000
b JUMP_TARGET(__generic_morestack_set_initial_sp)
# The lack of .cfi_endproc here is deliberate. This function and the
# following ones can all use the default FDE.
SIZE (__stack_split_initialize)
# Return current __private_ss
# void *__morestack_get_guard (void)
ENTRY0(__morestack_get_guard)
ld %r3,-0x7000-64(%r13) # tcbhead_t.__private_ss
blr
SIZE (__morestack_get_guard)
# Set __private_ss
# void __morestack_set_guard (void *ptr)
ENTRY0(__morestack_set_guard)
std %r3,-0x7000-64(%r13) # tcbhead_t.__private_ss
blr
SIZE (__morestack_set_guard)
# Return the stack guard value for given stack
# void *__morestack_make_guard (void *stack, size_t size)
ENTRY0(__morestack_make_guard)
sub %r3,%r3,%r4
addi %r3,%r3,BACKOFF
blr
.cfi_endproc
SIZE (__morestack_make_guard)
# Make __stack_split_initialize a high priority constructor.
#if HAVE_INITFINI_ARRAY_SUPPORT
.section .init_array.00000,"aw",@progbits
#else
.section .ctors.65535,"aw",@progbits
#endif
.p2align 3
.quad __stack_split_initialize
.quad __morestack_load_mmap
.section .note.GNU-stack,"",@progbits
.section .note.GNU-split-stack,"",@progbits
.section .note.GNU-no-split-stack,"",@progbits
#endif /* __powerpc64__ */
|
4ms/metamodule-plugin-sdk
| 3,303
|
plugin-libc/libgcc/config/rs6000/e500crtsavg64gprctr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* Routines for saving 64-bit integer registers, called by the compiler. */
/* "GOT" versions that load the address of the GOT into lr before returning. */
HIDDEN_FUNC(_save64gpr_ctr_14_g) evstdd 14,0(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_15_g) evstdd 15,8(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_16_g) evstdd 16,16(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_17_g) evstdd 17,24(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_18_g) evstdd 18,32(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_19_g) evstdd 19,40(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_20_g) evstdd 20,48(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_21_g) evstdd 21,56(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_22_g) evstdd 22,64(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_23_g) evstdd 23,72(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_24_g) evstdd 24,80(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_25_g) evstdd 25,88(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_26_g) evstdd 26,96(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_27_g) evstdd 27,104(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_28_g) evstdd 28,112(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_29_g) evstdd 29,120(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_30_g) evstdd 30,128(11)
bdz _save64gpr_ctr_g_done
HIDDEN_FUNC(_save64gpr_ctr_31_g) evstdd 31,136(11)
_save64gpr_ctr_g_done: b _GLOBAL_OFFSET_TABLE_-4
FUNC_END(_save64gpr_ctr_31_g)
FUNC_END(_save64gpr_ctr_30_g)
FUNC_END(_save64gpr_ctr_29_g)
FUNC_END(_save64gpr_ctr_28_g)
FUNC_END(_save64gpr_ctr_27_g)
FUNC_END(_save64gpr_ctr_26_g)
FUNC_END(_save64gpr_ctr_25_g)
FUNC_END(_save64gpr_ctr_24_g)
FUNC_END(_save64gpr_ctr_23_g)
FUNC_END(_save64gpr_ctr_22_g)
FUNC_END(_save64gpr_ctr_21_g)
FUNC_END(_save64gpr_ctr_20_g)
FUNC_END(_save64gpr_ctr_19_g)
FUNC_END(_save64gpr_ctr_18_g)
FUNC_END(_save64gpr_ctr_17_g)
FUNC_END(_save64gpr_ctr_16_g)
FUNC_END(_save64gpr_ctr_15_g)
FUNC_END(_save64gpr_ctr_14_g)
#endif
|
4ms/metamodule-plugin-sdk
| 2,566
|
plugin-libc/libgcc/config/rs6000/e500crtrest32gpr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* Routines for restoring 32-bit integer registers, called by the compiler. */
/* "Tail" versions that perform a tail call. */
HIDDEN_FUNC(_rest32gpr_14_t) lwz 14,-72(11)
HIDDEN_FUNC(_rest32gpr_15_t) lwz 15,-68(11)
HIDDEN_FUNC(_rest32gpr_16_t) lwz 16,-64(11)
HIDDEN_FUNC(_rest32gpr_17_t) lwz 17,-60(11)
HIDDEN_FUNC(_rest32gpr_18_t) lwz 18,-56(11)
HIDDEN_FUNC(_rest32gpr_19_t) lwz 19,-52(11)
HIDDEN_FUNC(_rest32gpr_20_t) lwz 20,-48(11)
HIDDEN_FUNC(_rest32gpr_21_t) lwz 21,-44(11)
HIDDEN_FUNC(_rest32gpr_22_t) lwz 22,-40(11)
HIDDEN_FUNC(_rest32gpr_23_t) lwz 23,-36(11)
HIDDEN_FUNC(_rest32gpr_24_t) lwz 24,-32(11)
HIDDEN_FUNC(_rest32gpr_25_t) lwz 25,-28(11)
HIDDEN_FUNC(_rest32gpr_26_t) lwz 26,-24(11)
HIDDEN_FUNC(_rest32gpr_27_t) lwz 27,-20(11)
HIDDEN_FUNC(_rest32gpr_28_t) lwz 28,-16(11)
HIDDEN_FUNC(_rest32gpr_29_t) lwz 29,-12(11)
HIDDEN_FUNC(_rest32gpr_30_t) lwz 30,-8(11)
HIDDEN_FUNC(_rest32gpr_31_t) lwz 31,-4(11)
lwz 0,4(11)
mr 1,11
blr
FUNC_END(_rest32gpr_31_t)
FUNC_END(_rest32gpr_30_t)
FUNC_END(_rest32gpr_29_t)
FUNC_END(_rest32gpr_28_t)
FUNC_END(_rest32gpr_27_t)
FUNC_END(_rest32gpr_26_t)
FUNC_END(_rest32gpr_25_t)
FUNC_END(_rest32gpr_24_t)
FUNC_END(_rest32gpr_23_t)
FUNC_END(_rest32gpr_22_t)
FUNC_END(_rest32gpr_21_t)
FUNC_END(_rest32gpr_20_t)
FUNC_END(_rest32gpr_19_t)
FUNC_END(_rest32gpr_18_t)
FUNC_END(_rest32gpr_17_t)
FUNC_END(_rest32gpr_16_t)
FUNC_END(_rest32gpr_15_t)
FUNC_END(_rest32gpr_14_t)
#endif
|
4ms/metamodule-plugin-sdk
| 2,493
|
plugin-libc/libgcc/config/rs6000/e500crtres64gpr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* Routines for restoring 64-bit integer registers, called by the compiler. */
/* "Bare" versions that return to their caller. */
HIDDEN_FUNC(_rest64gpr_14) evldd 14,0(11)
HIDDEN_FUNC(_rest64gpr_15) evldd 15,8(11)
HIDDEN_FUNC(_rest64gpr_16) evldd 16,16(11)
HIDDEN_FUNC(_rest64gpr_17) evldd 17,24(11)
HIDDEN_FUNC(_rest64gpr_18) evldd 18,32(11)
HIDDEN_FUNC(_rest64gpr_19) evldd 19,40(11)
HIDDEN_FUNC(_rest64gpr_20) evldd 20,48(11)
HIDDEN_FUNC(_rest64gpr_21) evldd 21,56(11)
HIDDEN_FUNC(_rest64gpr_22) evldd 22,64(11)
HIDDEN_FUNC(_rest64gpr_23) evldd 23,72(11)
HIDDEN_FUNC(_rest64gpr_24) evldd 24,80(11)
HIDDEN_FUNC(_rest64gpr_25) evldd 25,88(11)
HIDDEN_FUNC(_rest64gpr_26) evldd 26,96(11)
HIDDEN_FUNC(_rest64gpr_27) evldd 27,104(11)
HIDDEN_FUNC(_rest64gpr_28) evldd 28,112(11)
HIDDEN_FUNC(_rest64gpr_29) evldd 29,120(11)
HIDDEN_FUNC(_rest64gpr_30) evldd 30,128(11)
HIDDEN_FUNC(_rest64gpr_31) evldd 31,136(11)
blr
FUNC_END(_rest64gpr_31)
FUNC_END(_rest64gpr_30)
FUNC_END(_rest64gpr_29)
FUNC_END(_rest64gpr_28)
FUNC_END(_rest64gpr_27)
FUNC_END(_rest64gpr_26)
FUNC_END(_rest64gpr_25)
FUNC_END(_rest64gpr_24)
FUNC_END(_rest64gpr_23)
FUNC_END(_rest64gpr_22)
FUNC_END(_rest64gpr_21)
FUNC_END(_rest64gpr_20)
FUNC_END(_rest64gpr_19)
FUNC_END(_rest64gpr_18)
FUNC_END(_rest64gpr_17)
FUNC_END(_rest64gpr_16)
FUNC_END(_rest64gpr_15)
FUNC_END(_rest64gpr_14)
#endif
|
4ms/metamodule-plugin-sdk
| 1,238
|
plugin-libc/libgcc/config/rs6000/crtdbase.S
|
/* Defines __gcc_unwind_dbase
Copyright (C) 2014-2022 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Symbol used as an arbitrary base for offsets inside the data
* segment for unwind information. */
.file "crtdbase.S"
.globl __gcc_unwind_dbase
.csect __gcc_unwind_dbase[RW],2
.align 2
__gcc_unwind_dbase:
.long 0
|
4ms/metamodule-plugin-sdk
| 3,143
|
plugin-libc/libgcc/config/rs6000/e500crtres64gprctr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* Routines for restoring 64-bit integer registers where the number of
registers to be restored is passed in CTR, called by the compiler. */
HIDDEN_FUNC(_rest64gpr_ctr_14) evldd 14,0(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_15) evldd 15,8(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_16) evldd 16,16(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_17) evldd 17,24(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_18) evldd 18,32(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_19) evldd 19,40(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_20) evldd 20,48(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_21) evldd 21,56(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_22) evldd 22,64(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_23) evldd 23,72(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_24) evldd 24,80(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_25) evldd 25,88(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_26) evldd 26,96(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_27) evldd 27,104(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_28) evldd 28,112(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_29) evldd 29,120(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_30) evldd 30,128(11)
bdz _rest64gpr_ctr_done
HIDDEN_FUNC(_rest64gpr_ctr_31) evldd 31,136(11)
_rest64gpr_ctr_done: blr
FUNC_END(_rest64gpr_ctr_31)
FUNC_END(_rest64gpr_ctr_30)
FUNC_END(_rest64gpr_ctr_29)
FUNC_END(_rest64gpr_ctr_28)
FUNC_END(_rest64gpr_ctr_27)
FUNC_END(_rest64gpr_ctr_26)
FUNC_END(_rest64gpr_ctr_25)
FUNC_END(_rest64gpr_ctr_24)
FUNC_END(_rest64gpr_ctr_23)
FUNC_END(_rest64gpr_ctr_22)
FUNC_END(_rest64gpr_ctr_21)
FUNC_END(_rest64gpr_ctr_20)
FUNC_END(_rest64gpr_ctr_19)
FUNC_END(_rest64gpr_ctr_18)
FUNC_END(_rest64gpr_ctr_17)
FUNC_END(_rest64gpr_ctr_16)
FUNC_END(_rest64gpr_ctr_15)
FUNC_END(_rest64gpr_ctr_14)
#endif
|
4ms/metamodule-plugin-sdk
| 3,466
|
plugin-libc/libgcc/config/rs6000/darwin-vecsave.S
|
/* This file contains the vector save and restore routines.
*
* Copyright (C) 2004-2022 Free Software Foundation, Inc.
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
/* Vector save/restore routines for Darwin. Note that each vector
save/restore requires 2 instructions (8 bytes.)
THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE
ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31,
for example. For FP reg saves/restores, it takes one instruction
(4 bytes) to do the operation; for Vector regs, 2 instructions are
required (8 bytes.). */
/* With some assemblers, we need the correct machine directive to get the
right CPU type / subtype in the file header. */
#if __ppc64__
.machine ppc64
#else
.machine ppc7400
#endif
.text
.align 2
.private_extern saveVEC
saveVEC:
li r11,-192
stvx v20,r11,r0
li r11,-176
stvx v21,r11,r0
li r11,-160
stvx v22,r11,r0
li r11,-144
stvx v23,r11,r0
li r11,-128
stvx v24,r11,r0
li r11,-112
stvx v25,r11,r0
li r11,-96
stvx v26,r11,r0
li r11,-80
stvx v27,r11,r0
li r11,-64
stvx v28,r11,r0
li r11,-48
stvx v29,r11,r0
li r11,-32
stvx v30,r11,r0
li r11,-16
stvx v31,r11,r0
blr
.private_extern restVEC
restVEC:
li r11,-192
lvx v20,r11,r0
li r11,-176
lvx v21,r11,r0
li r11,-160
lvx v22,r11,r0
li r11,-144
lvx v23,r11,r0
li r11,-128
lvx v24,r11,r0
li r11,-112
lvx v25,r11,r0
li r11,-96
lvx v26,r11,r0
li r11,-80
lvx v27,r11,r0
li r11,-64
lvx v28,r11,r0
li r11,-48
lvx v29,r11,r0
li r11,-32
lvx v30,r11,r0
li r11,-16
lvx v31,r11,r0
blr
/* saveVEC_vr11 -- as saveVEC but VRsave is returned in R11. */
.private_extern saveVEC_vr11
saveVEC_vr11:
li r11,-192
stvx v20,r11,r0
li r11,-176
stvx v21,r11,r0
li r11,-160
stvx v22,r11,r0
li r11,-144
stvx v23,r11,r0
li r11,-128
stvx v24,r11,r0
li r11,-112
stvx v25,r11,r0
li r11,-96
stvx v26,r11,r0
li r11,-80
stvx v27,r11,r0
li r11,-64
stvx v28,r11,r0
li r11,-48
stvx v29,r11,r0
li r11,-32
stvx v30,r11,r0
li r11,-16
stvx v31,r11,r0
mfspr r11,VRsave
blr
/* As restVec, but the original VRsave value passed in R10. */
.private_extern restVEC_vr10
restVEC_vr10:
li r11,-192
lvx v20,r11,r0
li r11,-176
lvx v21,r11,r0
li r11,-160
lvx v22,r11,r0
li r11,-144
lvx v23,r11,r0
li r11,-128
lvx v24,r11,r0
li r11,-112
lvx v25,r11,r0
li r11,-96
lvx v26,r11,r0
li r11,-80
lvx v27,r11,r0
li r11,-64
lvx v28,r11,r0
li r11,-48
lvx v29,r11,r0
li r11,-32
lvx v30,r11,r0
li r11,-16
lvx v31,r11,r0
/* restore VRsave from R10. */
mtspr VRsave,r10
blr
|
4ms/metamodule-plugin-sdk
| 3,685
|
plugin-libc/libgcc/config/rs6000/darwin-gpsave.S
|
/* This file contains the GPR save and restore routines for Darwin.
*
* Copyright (C) 2011-2022 Free Software Foundation, Inc.
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
/* Contributed by Iain Sandoe <iains@gcc.gnu.org> */
/* Like their FP and VEC counterparts, these routines have only one externally
visible entry point. Calls have to be constructed as offsets from this.
(I.E. callers have to jump to "saveGPR+((x-13)*4" to save registers x..31).
Each save/load instruction is 4 bytes long (for both m32 and m64 builds).
The save/restores here are done w.r.t r11.
restGPRx restores the link reg from the stack and returns to the saved
address.
*/
#include "darwin-asm.h"
.text
.align 2
.private_extern saveGPR
saveGPR:
stg r13,(-19 * GPR_BYTES)(r11)
stg r14,(-18 * GPR_BYTES)(r11)
stg r15,(-17 * GPR_BYTES)(r11)
stg r16,(-16 * GPR_BYTES)(r11)
stg r17,(-15 * GPR_BYTES)(r11)
stg r18,(-14 * GPR_BYTES)(r11)
stg r19,(-13 * GPR_BYTES)(r11)
stg r20,(-12 * GPR_BYTES)(r11)
stg r21,(-11 * GPR_BYTES)(r11)
stg r22,(-10 * GPR_BYTES)(r11)
stg r23,( -9 * GPR_BYTES)(r11)
stg r24,( -8 * GPR_BYTES)(r11)
stg r25,( -7 * GPR_BYTES)(r11)
stg r26,( -6 * GPR_BYTES)(r11)
stg r27,( -5 * GPR_BYTES)(r11)
stg r28,( -4 * GPR_BYTES)(r11)
stg r29,( -3 * GPR_BYTES)(r11)
stg r30,( -2 * GPR_BYTES)(r11)
stg r31,( -1 * GPR_BYTES)(r11)
blr
/* */
.private_extern restGPR
restGPR:
lg r13,(-19 * GPR_BYTES)(r11)
lg r14,(-18 * GPR_BYTES)(r11)
lg r15,(-17 * GPR_BYTES)(r11)
lg r16,(-16 * GPR_BYTES)(r11)
lg r17,(-15 * GPR_BYTES)(r11)
lg r18,(-14 * GPR_BYTES)(r11)
lg r19,(-13 * GPR_BYTES)(r11)
lg r20,(-12 * GPR_BYTES)(r11)
lg r21,(-11 * GPR_BYTES)(r11)
lg r22,(-10 * GPR_BYTES)(r11)
lg r23,( -9 * GPR_BYTES)(r11)
lg r24,( -8 * GPR_BYTES)(r11)
lg r25,( -7 * GPR_BYTES)(r11)
lg r26,( -6 * GPR_BYTES)(r11)
lg r27,( -5 * GPR_BYTES)(r11)
lg r28,( -4 * GPR_BYTES)(r11)
lg r29,( -3 * GPR_BYTES)(r11)
lg r30,( -2 * GPR_BYTES)(r11)
lg r31,( -1 * GPR_BYTES)(r11)
blr
.private_extern restGPRx
restGPRx:
lg r13,(-19 * GPR_BYTES)(r11)
lg r14,(-18 * GPR_BYTES)(r11)
lg r15,(-17 * GPR_BYTES)(r11)
lg r16,(-16 * GPR_BYTES)(r11)
lg r17,(-15 * GPR_BYTES)(r11)
lg r18,(-14 * GPR_BYTES)(r11)
lg r19,(-13 * GPR_BYTES)(r11)
lg r20,(-12 * GPR_BYTES)(r11)
lg r21,(-11 * GPR_BYTES)(r11)
lg r22,(-10 * GPR_BYTES)(r11)
lg r23,( -9 * GPR_BYTES)(r11)
lg r24,( -8 * GPR_BYTES)(r11)
lg r25,( -7 * GPR_BYTES)(r11)
lg r26,( -6 * GPR_BYTES)(r11)
lg r27,( -5 * GPR_BYTES)(r11)
lg r28,( -4 * GPR_BYTES)(r11)
lg r29,( -3 * GPR_BYTES)(r11)
/* Like the FP restore, we start from the offset for r30
thus a restore of only r31 is not going to work. */
lg r0,SAVED_LR_OFFSET(r1)
lg r30,( -2 * GPR_BYTES)(r11)
mtlr r0
lg r31,( -1 * GPR_BYTES)(r11)
blr
|
4ms/metamodule-plugin-sdk
| 2,763
|
plugin-libc/libgcc/config/rs6000/crtsavfpr.S
|
/*
* Special support for eabi and SVR4
*
* Copyright (C) 1995-2022 Free Software Foundation, Inc.
* Written By Michael Meissner
* 64-bit support written by David Edelsohn
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
/* Do any initializations needed for the eabi environment */
.machine ppc
.section ".text"
#include "ppc-asm.h"
/* On PowerPC64 Linux, these functions are provided by the linker. */
#ifndef __powerpc64__
/* Routines for saving floating point registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
/* function, just beyond the end of the floating point save area. */
CFI_STARTPROC
HIDDEN_FUNC(_savefpr_14) stfd 14,-144(11) /* save fp registers */
HIDDEN_FUNC(_savefpr_15) stfd 15,-136(11)
HIDDEN_FUNC(_savefpr_16) stfd 16,-128(11)
HIDDEN_FUNC(_savefpr_17) stfd 17,-120(11)
HIDDEN_FUNC(_savefpr_18) stfd 18,-112(11)
HIDDEN_FUNC(_savefpr_19) stfd 19,-104(11)
HIDDEN_FUNC(_savefpr_20) stfd 20,-96(11)
HIDDEN_FUNC(_savefpr_21) stfd 21,-88(11)
HIDDEN_FUNC(_savefpr_22) stfd 22,-80(11)
HIDDEN_FUNC(_savefpr_23) stfd 23,-72(11)
HIDDEN_FUNC(_savefpr_24) stfd 24,-64(11)
HIDDEN_FUNC(_savefpr_25) stfd 25,-56(11)
HIDDEN_FUNC(_savefpr_26) stfd 26,-48(11)
HIDDEN_FUNC(_savefpr_27) stfd 27,-40(11)
HIDDEN_FUNC(_savefpr_28) stfd 28,-32(11)
HIDDEN_FUNC(_savefpr_29) stfd 29,-24(11)
HIDDEN_FUNC(_savefpr_30) stfd 30,-16(11)
HIDDEN_FUNC(_savefpr_31) stfd 31,-8(11)
blr
FUNC_END(_savefpr_31)
FUNC_END(_savefpr_30)
FUNC_END(_savefpr_29)
FUNC_END(_savefpr_28)
FUNC_END(_savefpr_27)
FUNC_END(_savefpr_26)
FUNC_END(_savefpr_25)
FUNC_END(_savefpr_24)
FUNC_END(_savefpr_23)
FUNC_END(_savefpr_22)
FUNC_END(_savefpr_21)
FUNC_END(_savefpr_20)
FUNC_END(_savefpr_19)
FUNC_END(_savefpr_18)
FUNC_END(_savefpr_17)
FUNC_END(_savefpr_16)
FUNC_END(_savefpr_15)
FUNC_END(_savefpr_14)
CFI_ENDPROC
#endif
|
4ms/metamodule-plugin-sdk
| 2,418
|
plugin-libc/libgcc/config/rs6000/sol-ci.S
|
# crti.s for sysv4
# Copyright (C) 1996-2022 Free Software Foundation, Inc.
# Written By Michael Meissner
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just supplies labeled starting points for the .got* and other
# special sections. It is linked in first before other modules.
.ident "GNU C scrti.s"
#ifndef __powerpc64__
# Start of .text
.section ".text"
.globl _ex_text0
_ex_text0:
# Exception range
.section ".exception_ranges","aw"
.globl _ex_range0
_ex_range0:
# List of C++ constructors
.section ".ctors","aw"
.globl __CTOR_LIST__
.type __CTOR_LIST__,@object
__CTOR_LIST__:
# List of C++ destructors
.section ".dtors","aw"
.globl __DTOR_LIST__
.type __DTOR_LIST__,@object
__DTOR_LIST__:
# Head of _init function used for static constructors
.section ".init","ax"
.align 2
.globl _init
.type _init,@function
_init: stwu %r1,-16(%r1)
mflr %r0
stw %r31,12(%r1)
stw %r0,16(%r1)
bl _GLOBAL_OFFSET_TABLE_-4 # get the GOT address
mflr %r31
# lwz %r3,_ex_shared0@got(%r31)
# lwz %r4,-8(%r3) # _ex_register or 0
# cmpi %cr0,%r4,0
# beq .Lno_reg
# mtlr %r4
# blrl
#.Lno_reg:
# Head of _fini function used for static destructors
.section ".fini","ax"
.align 2
.globl _fini
.type _fini,@function
_fini: stwu %r1,-16(%r1)
mflr %r0
stw %r31,12(%r1)
stw %r0,16(%r1)
bl _GLOBAL_OFFSET_TABLE_-4 # get the GOT address
mflr %r31
# _environ and its evil twin environ, pointing to the environment
.section ".sdata","aw"
.align 2
.globl _environ
.space 4
.weak environ
.set environ,_environ
#endif
|
4ms/metamodule-plugin-sdk
| 3,207
|
plugin-libc/libgcc/config/rs6000/e500crtsav64gprctr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* Routines for saving 64-bit integer registers where the number of
registers to be saved is passed in CTR, called by the compiler. */
/* "Bare" versions that return to their caller. */
HIDDEN_FUNC(_save64gpr_ctr_14) evstdd 14,0(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_15) evstdd 15,8(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_16) evstdd 16,16(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_17) evstdd 17,24(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_18) evstdd 18,32(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_19) evstdd 19,40(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_20) evstdd 20,48(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_21) evstdd 21,56(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_22) evstdd 22,64(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_23) evstdd 23,72(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_24) evstdd 24,80(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_25) evstdd 25,88(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_26) evstdd 26,96(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_27) evstdd 27,104(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_28) evstdd 28,112(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_29) evstdd 29,120(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_30) evstdd 30,128(11)
bdz _save64gpr_ctr_done
HIDDEN_FUNC(_save64gpr_ctr_31) evstdd 31,136(11)
_save64gpr_ctr_done: blr
FUNC_END(_save64gpr_ctr_31)
FUNC_END(_save64gpr_ctr_30)
FUNC_END(_save64gpr_ctr_29)
FUNC_END(_save64gpr_ctr_28)
FUNC_END(_save64gpr_ctr_27)
FUNC_END(_save64gpr_ctr_26)
FUNC_END(_save64gpr_ctr_25)
FUNC_END(_save64gpr_ctr_24)
FUNC_END(_save64gpr_ctr_23)
FUNC_END(_save64gpr_ctr_22)
FUNC_END(_save64gpr_ctr_21)
FUNC_END(_save64gpr_ctr_20)
FUNC_END(_save64gpr_ctr_19)
FUNC_END(_save64gpr_ctr_18)
FUNC_END(_save64gpr_ctr_17)
FUNC_END(_save64gpr_ctr_16)
FUNC_END(_save64gpr_ctr_15)
FUNC_END(_save64gpr_ctr_14)
#endif
|
4ms/metamodule-plugin-sdk
| 2,669
|
plugin-libc/libgcc/config/rs6000/darwin-fpsave.S
|
/* This file contains the floating-point save and restore routines.
*
* Copyright (C) 2004-2022 Free Software Foundation, Inc.
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
/* THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE
ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31,
for example. For FP reg saves/restores, it takes one instruction
(4 bytes) to do the operation; for Vector regs, 2 instructions are
required (8 bytes.)
MORAL: DO NOT MESS AROUND WITH THESE FUNCTIONS! */
#include "darwin-asm.h"
.text
.align 2
/* saveFP saves R0 -- assumed to be the callers LR -- to 8/16(R1). */
.private_extern saveFP
saveFP:
stfd f14,-144(r1)
stfd f15,-136(r1)
stfd f16,-128(r1)
stfd f17,-120(r1)
stfd f18,-112(r1)
stfd f19,-104(r1)
stfd f20,-96(r1)
stfd f21,-88(r1)
stfd f22,-80(r1)
stfd f23,-72(r1)
stfd f24,-64(r1)
stfd f25,-56(r1)
stfd f26,-48(r1)
stfd f27,-40(r1)
stfd f28,-32(r1)
stfd f29,-24(r1)
stfd f30,-16(r1)
stfd f31,-8(r1)
stg r0,SAVED_LR_OFFSET(r1)
blr
/* restFP restores the caller`s LR from 8/16(R1). Note that the code for
this starts at the offset of F30 restoration, so calling this
routine in an attempt to restore only F31 WILL NOT WORK (it would
be a stupid thing to do, anyway.) */
.private_extern restFP
restFP:
lfd f14,-144(r1)
lfd f15,-136(r1)
lfd f16,-128(r1)
lfd f17,-120(r1)
lfd f18,-112(r1)
lfd f19,-104(r1)
lfd f20,-96(r1)
lfd f21,-88(r1)
lfd f22,-80(r1)
lfd f23,-72(r1)
lfd f24,-64(r1)
lfd f25,-56(r1)
lfd f26,-48(r1)
lfd f27,-40(r1)
lfd f28,-32(r1)
lfd f29,-24(r1)
/* <OFFSET OF F30 RESTORE> restore callers LR */
lg r0,SAVED_LR_OFFSET(r1)
lfd f30,-16(r1)
/* and prepare for return to caller */
mtlr r0
lfd f31,-8(r1)
blr
|
4ms/metamodule-plugin-sdk
| 3,599
|
plugin-libc/libgcc/config/rs6000/crtresxgpr.S
|
/*
* Special support for eabi and SVR4
*
* Copyright (C) 1995-2022 Free Software Foundation, Inc.
* Written By Michael Meissner
* 64-bit support written by David Edelsohn
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
/* Do any initializations needed for the eabi environment */
.section ".text"
#include "ppc-asm.h"
/* On PowerPC64 Linux, these functions are provided by the linker. */
#ifndef __powerpc64__
/* Routines for restoring integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
/* function, just beyond the end of the integer restore area. */
CFI_STARTPROC
CFI_DEF_CFA_REGISTER (11)
CFI_OFFSET (65, 4)
CFI_OFFSET (14, -72)
CFI_OFFSET (15, -68)
CFI_OFFSET (16, -64)
CFI_OFFSET (17, -60)
CFI_OFFSET (18, -56)
CFI_OFFSET (19, -52)
CFI_OFFSET (20, -48)
CFI_OFFSET (21, -44)
CFI_OFFSET (22, -40)
CFI_OFFSET (23, -36)
CFI_OFFSET (24, -32)
CFI_OFFSET (25, -28)
CFI_OFFSET (26, -24)
CFI_OFFSET (27, -20)
CFI_OFFSET (28, -16)
CFI_OFFSET (29, -12)
CFI_OFFSET (30, -8)
CFI_OFFSET (31, -4)
HIDDEN_FUNC(_restgpr_14_x) lwz 14,-72(11) /* restore gp registers */
CFI_RESTORE (14)
HIDDEN_FUNC(_restgpr_15_x) lwz 15,-68(11)
CFI_RESTORE (15)
HIDDEN_FUNC(_restgpr_16_x) lwz 16,-64(11)
CFI_RESTORE (16)
HIDDEN_FUNC(_restgpr_17_x) lwz 17,-60(11)
CFI_RESTORE (17)
HIDDEN_FUNC(_restgpr_18_x) lwz 18,-56(11)
CFI_RESTORE (18)
HIDDEN_FUNC(_restgpr_19_x) lwz 19,-52(11)
CFI_RESTORE (19)
HIDDEN_FUNC(_restgpr_20_x) lwz 20,-48(11)
CFI_RESTORE (20)
HIDDEN_FUNC(_restgpr_21_x) lwz 21,-44(11)
CFI_RESTORE (21)
HIDDEN_FUNC(_restgpr_22_x) lwz 22,-40(11)
CFI_RESTORE (22)
HIDDEN_FUNC(_restgpr_23_x) lwz 23,-36(11)
CFI_RESTORE (23)
HIDDEN_FUNC(_restgpr_24_x) lwz 24,-32(11)
CFI_RESTORE (24)
HIDDEN_FUNC(_restgpr_25_x) lwz 25,-28(11)
CFI_RESTORE (25)
HIDDEN_FUNC(_restgpr_26_x) lwz 26,-24(11)
CFI_RESTORE (26)
HIDDEN_FUNC(_restgpr_27_x) lwz 27,-20(11)
CFI_RESTORE (27)
HIDDEN_FUNC(_restgpr_28_x) lwz 28,-16(11)
CFI_RESTORE (28)
HIDDEN_FUNC(_restgpr_29_x) lwz 29,-12(11)
CFI_RESTORE (29)
HIDDEN_FUNC(_restgpr_30_x) lwz 30,-8(11)
CFI_RESTORE (30)
HIDDEN_FUNC(_restgpr_31_x) lwz 0,4(11)
lwz 31,-4(11)
CFI_RESTORE (31)
mtlr 0
CFI_RESTORE (65)
mr 1,11
CFI_DEF_CFA_REGISTER (1)
blr
FUNC_END(_restgpr_31_x)
FUNC_END(_restgpr_30_x)
FUNC_END(_restgpr_29_x)
FUNC_END(_restgpr_28_x)
FUNC_END(_restgpr_27_x)
FUNC_END(_restgpr_26_x)
FUNC_END(_restgpr_25_x)
FUNC_END(_restgpr_24_x)
FUNC_END(_restgpr_23_x)
FUNC_END(_restgpr_22_x)
FUNC_END(_restgpr_21_x)
FUNC_END(_restgpr_20_x)
FUNC_END(_restgpr_19_x)
FUNC_END(_restgpr_18_x)
FUNC_END(_restgpr_17_x)
FUNC_END(_restgpr_16_x)
FUNC_END(_restgpr_15_x)
FUNC_END(_restgpr_14_x)
CFI_ENDPROC
#endif
|
4ms/metamodule-plugin-sdk
| 2,794
|
plugin-libc/libgcc/config/rs6000/eabi-ci.S
|
/* crti.s for eabi
Copyright (C) 1996-2022 Free Software Foundation, Inc.
Written By Michael Meissner
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file just supplies labeled starting points for the .got* and other
special sections. It is linked in first before other modules. */
.ident "GNU C crti.s"
#include <ppc-asm.h>
#ifndef __powerpc64__
.section ".got","aw"
.globl __GOT_START__
.type __GOT_START__,@object
__GOT_START__:
.section ".got1","aw"
.globl __GOT1_START__
.type __GOT1_START__,@object
__GOT1_START__:
.section ".got2","aw"
.globl __GOT2_START__
.type __GOT2_START__,@object
__GOT2_START__:
.section ".fixup","aw"
.globl __FIXUP_START__
.type __FIXUP_START__,@object
__FIXUP_START__:
.section ".ctors","aw"
.globl __CTOR_LIST__
.type __CTOR_LIST__,@object
__CTOR_LIST__:
.section ".dtors","aw"
.globl __DTOR_LIST__
.type __DTOR_LIST__,@object
__DTOR_LIST__:
.section ".sdata","aw"
.globl __SDATA_START__
.type __SDATA_START__,@object
.weak _SDA_BASE_
.type _SDA_BASE_,@object
__SDATA_START__:
_SDA_BASE_:
.section ".sbss","aw",@nobits
.globl __SBSS_START__
.type __SBSS_START__,@object
__SBSS_START__:
.section ".sdata2","a"
.weak _SDA2_BASE_
.type _SDA2_BASE_,@object
.globl __SDATA2_START__
.type __SDATA2_START__,@object
__SDATA2_START__:
_SDA2_BASE_:
.section ".sbss2","a"
.globl __SBSS2_START__
.type __SBSS2_START__,@object
__SBSS2_START__:
.section ".gcc_except_table","aw"
.globl __EXCEPT_START__
.type __EXCEPT_START__,@object
__EXCEPT_START__:
.section ".eh_frame","aw"
.globl __EH_FRAME_BEGIN__
.type __EH_FRAME_BEGIN__,@object
__EH_FRAME_BEGIN__:
/* Head of __init function used for static constructors. */
.section ".init","ax"
.align 2
FUNC_START(__init)
stwu 1,-16(1)
mflr 0
stw 0,20(1)
/* Head of __fini function used for static destructors. */
.section ".fini","ax"
.align 2
FUNC_START(__fini)
stwu 1,-16(1)
mflr 0
stw 0,20(1)
#endif
|
4ms/metamodule-plugin-sdk
| 2,476
|
plugin-libc/libgcc/config/rs6000/e500crtres32gpr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* Routines for restoring 32-bit integer registers, called by the compiler. */
/* "Bare" versions that simply return to their caller. */
HIDDEN_FUNC(_rest32gpr_14) lwz 14,-72(11)
HIDDEN_FUNC(_rest32gpr_15) lwz 15,-68(11)
HIDDEN_FUNC(_rest32gpr_16) lwz 16,-64(11)
HIDDEN_FUNC(_rest32gpr_17) lwz 17,-60(11)
HIDDEN_FUNC(_rest32gpr_18) lwz 18,-56(11)
HIDDEN_FUNC(_rest32gpr_19) lwz 19,-52(11)
HIDDEN_FUNC(_rest32gpr_20) lwz 20,-48(11)
HIDDEN_FUNC(_rest32gpr_21) lwz 21,-44(11)
HIDDEN_FUNC(_rest32gpr_22) lwz 22,-40(11)
HIDDEN_FUNC(_rest32gpr_23) lwz 23,-36(11)
HIDDEN_FUNC(_rest32gpr_24) lwz 24,-32(11)
HIDDEN_FUNC(_rest32gpr_25) lwz 25,-28(11)
HIDDEN_FUNC(_rest32gpr_26) lwz 26,-24(11)
HIDDEN_FUNC(_rest32gpr_27) lwz 27,-20(11)
HIDDEN_FUNC(_rest32gpr_28) lwz 28,-16(11)
HIDDEN_FUNC(_rest32gpr_29) lwz 29,-12(11)
HIDDEN_FUNC(_rest32gpr_30) lwz 30,-8(11)
HIDDEN_FUNC(_rest32gpr_31) lwz 31,-4(11)
blr
FUNC_END(_rest32gpr_31)
FUNC_END(_rest32gpr_30)
FUNC_END(_rest32gpr_29)
FUNC_END(_rest32gpr_28)
FUNC_END(_rest32gpr_27)
FUNC_END(_rest32gpr_26)
FUNC_END(_rest32gpr_25)
FUNC_END(_rest32gpr_24)
FUNC_END(_rest32gpr_23)
FUNC_END(_rest32gpr_22)
FUNC_END(_rest32gpr_21)
FUNC_END(_rest32gpr_20)
FUNC_END(_rest32gpr_19)
FUNC_END(_rest32gpr_18)
FUNC_END(_rest32gpr_17)
FUNC_END(_rest32gpr_16)
FUNC_END(_rest32gpr_15)
FUNC_END(_rest32gpr_14)
#endif
|
4ms/metamodule-plugin-sdk
| 3,117
|
plugin-libc/libgcc/config/rs6000/darwin-tramp.S
|
/* Special support for trampolines
*
* Copyright (C) 1996-2022 Free Software Foundation, Inc.
* Written By Michael Meissner
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
#include "darwin-asm.h"
/* Set up trampolines. */
.text
.align LOG2_GPR_BYTES
Ltrampoline_initial:
mflr r0
bl 1f
Lfunc = .-Ltrampoline_initial
.g_long 0 /* will be replaced with function address */
Lchain = .-Ltrampoline_initial
.g_long 0 /* will be replaced with static chain */
1: mflr r11
lg r12,0(r11) /* function address */
mtlr r0
mtctr r12
lg r11,GPR_BYTES(r11) /* static chain */
bctr
trampoline_size = .-Ltrampoline_initial
/* R3 = stack address to store trampoline */
/* R4 = length of trampoline area */
/* R5 = function address */
/* R6 = static chain */
.globl ___trampoline_setup
___trampoline_setup:
mflr r0 /* save return address */
bcl 20,31,LCF0 /* load up __trampoline_initial into r7 */
LCF0:
mflr r11
addis r7,r11,ha16(LTRAMP-LCF0)
lg r7,lo16(LTRAMP-LCF0)(r7)
subi r7,r7,4
li r8,trampoline_size /* verify trampoline big enough */
cmpg cr1,r8,r4
srwi r4,r4,2 /* # words to move (insns always 4-byte) */
addi r9,r3,-4 /* adjust pointer for lgu */
mtctr r4
blt cr1,Labort
mtlr r0
/* Copy the instructions to the stack */
Lmove:
lwzu r10,4(r7)
stwu r10,4(r9)
bdnz Lmove
/* Store correct function and static chain */
stg r5,Lfunc(r3)
stg r6,Lchain(r3)
/* Now flush both caches */
mtctr r4
Lcache:
icbi 0,r3
dcbf 0,r3
addi r3,r3,4
bdnz Lcache
/* Ensure cache-flushing has finished. */
sync
isync
/* Make stack writeable. */
b ___enable_execute_stack
Labort:
#ifdef __DYNAMIC__
bl L_abort$stub
.data
.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
.align 2
L_abort$stub:
.indirect_symbol _abort
mflr r0
bcl 20,31,L0$_abort
L0$_abort:
mflr r11
addis r11,r11,ha16(L_abort$lazy_ptr-L0$_abort)
mtlr r0
lgu r12,lo16(L_abort$lazy_ptr-L0$_abort)(r11)
mtctr r12
bctr
.data
.lazy_symbol_pointer
L_abort$lazy_ptr:
.indirect_symbol _abort
.g_long dyld_stub_binding_helper
#else
bl _abort
#endif
.data
.align LOG2_GPR_BYTES
LTRAMP:
.g_long Ltrampoline_initial
|
4ms/metamodule-plugin-sdk
| 4,651
|
plugin-libc/libgcc/config/rs6000/tramp.S
|
/* Special support for trampolines
*
* Copyright (C) 1996-2022 Free Software Foundation, Inc.
* Written By Michael Meissner
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
/* Set up trampolines. */
.section ".text"
#include "ppc-asm.h"
#include "config.h"
#ifndef __powerpc64__
.type trampoline_initial,@object
.align 2
trampoline_initial:
mflr r0
bcl 20,31,1f
.Lfunc = .-trampoline_initial
.long 0 /* will be replaced with function address */
.Lchain = .-trampoline_initial
.long 0 /* will be replaced with static chain */
1: mflr r11
mtlr r0
lwz r0,0(r11) /* function address */
lwz r11,4(r11) /* static chain */
mtctr r0
bctr
trampoline_size = .-trampoline_initial
.size trampoline_initial,trampoline_size
/* R3 = stack address to store trampoline */
/* R4 = length of trampoline area */
/* R5 = function address */
/* R6 = static chain */
FUNC_START(__trampoline_setup)
.cfi_startproc
mflr r0 /* save return address */
bcl 20,31,.LCF0 /* load up __trampoline_initial into r7 */
.cfi_register lr,r0
.LCF0:
mflr r11
addi r7,r11,trampoline_initial-4-.LCF0 /* trampoline address -4 */
cmpwi cr1,r4,trampoline_size /* verify that the trampoline is big enough */
srwi r4,r4,2 /* # words to move */
addi r9,r3,-4 /* adjust pointer for lwzu */
mtctr r4
blt cr1,.Labort
mtlr r0
/* Copy the instructions to the stack */
.Lmove:
lwzu r10,4(r7)
stwu r10,4(r9)
bdnz .Lmove
/* Store correct function and static chain */
stw r5,.Lfunc(r3)
stw r6,.Lchain(r3)
/* Now flush both caches */
mtctr r4
.Lcache:
icbi 0,r3
dcbf 0,r3
addi r3,r3,4
bdnz .Lcache
/* Finally synchronize things & return */
sync
isync
blr
.Labort:
/* Use a longcall sequence in the non PIC case on VxWorks, to prevent
possible relocation errors if this is module-loaded very far away from
the 'abort' entry point. */
#if defined (__VXWORKS__) && ! (defined __PIC__ || defined __pic__)
lis r11,JUMP_TARGET(abort)@ha
addic r11,r11,JUMP_TARGET(abort)@l
mtlr r11
blrl
#else
#if (defined __PIC__ || defined __pic__) && defined HAVE_AS_REL16
bcl 20,31,1f
1: mflr r30
addis r30,r30,_GLOBAL_OFFSET_TABLE_-1b@ha
addi r30,r30,_GLOBAL_OFFSET_TABLE_-1b@l
#endif
bl JUMP_TARGET(abort)
#endif
.cfi_endproc
FUNC_END(__trampoline_setup)
#elif _CALL_ELF == 2
.type trampoline_initial,@object
.align 3
trampoline_initial:
ld r11,.Lchain(r12)
ld r12,.Lfunc(r12)
mtctr r12
bctr
.Lfunc = .-trampoline_initial
.quad 0 /* will be replaced with function address */
.Lchain = .-trampoline_initial
.quad 0 /* will be replaced with static chain */
trampoline_size = .-trampoline_initial
.size trampoline_initial,trampoline_size
/* R3 = stack address to store trampoline */
/* R4 = length of trampoline area */
/* R5 = function address */
/* R6 = static chain */
#ifndef __PCREL__
.pushsection ".toc","aw"
.LC0:
.quad trampoline_initial-8
.popsection
#endif
FUNC_START(__trampoline_setup)
.cfi_startproc
#ifdef __PCREL__
pla 7,(trampoline_initial-8)@pcrel
#else
addis 7,2,.LC0@toc@ha
ld 7,.LC0@toc@l(7) /* trampoline address -8 */
#endif
cmpwi cr1,r4,trampoline_size /* verify that the trampoline is big enough */
srwi r4,r4,3 /* # doublewords to move */
addi r9,r3,-8 /* adjust pointer for stdu */
mtctr r4
blt cr1,.Labort
/* Copy the instructions to the stack */
.Lmove:
ldu r10,8(r7)
stdu r10,8(r9)
bdnz .Lmove
/* Store correct function and static chain */
std r5,.Lfunc(r3)
std r6,.Lchain(r3)
/* Now flush both caches */
mtctr r4
.Lcache:
icbi 0,r3
dcbf 0,r3
addi r3,r3,8
bdnz .Lcache
/* Finally synchronize things & return */
sync
isync
blr
.Labort:
bl JUMP_TARGET(abort)
nop
.cfi_endproc
FUNC_END(__trampoline_setup)
#endif
|
4ms/metamodule-plugin-sdk
| 2,262
|
plugin-libc/libgcc/config/rs6000/crtsavevr.S
|
/* Routines for saving vector registers.
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Written by Alan Modra, IBM
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* On PowerPC64 Linux, these functions are provided by the linker. */
#ifndef __powerpc64__
#undef __ALTIVEC__
#define __ALTIVEC__ 1
#include "ppc-asm.h"
/* Called with r0 pointing just beyond the end of the vector save area. */
.machine altivec
.section ".text"
CFI_STARTPROC
HIDDEN_FUNC(_savevr_20)
li r11,-192
stvx v20,r11,r0
HIDDEN_FUNC(_savevr_21)
li r11,-176
stvx v21,r11,r0
HIDDEN_FUNC(_savevr_22)
li r11,-160
stvx v22,r11,r0
HIDDEN_FUNC(_savevr_23)
li r11,-144
stvx v23,r11,r0
HIDDEN_FUNC(_savevr_24)
li r11,-128
stvx v24,r11,r0
HIDDEN_FUNC(_savevr_25)
li r11,-112
stvx v25,r11,r0
HIDDEN_FUNC(_savevr_26)
li r11,-96
stvx v26,r11,r0
HIDDEN_FUNC(_savevr_27)
li r11,-80
stvx v27,r11,r0
HIDDEN_FUNC(_savevr_28)
li r11,-64
stvx v28,r11,r0
HIDDEN_FUNC(_savevr_29)
li r11,-48
stvx v29,r11,r0
HIDDEN_FUNC(_savevr_30)
li r11,-32
stvx v30,r11,r0
HIDDEN_FUNC(_savevr_31)
li r11,-16
stvx v31,r11,r0
blr
FUNC_END(_savevr_31)
FUNC_END(_savevr_30)
FUNC_END(_savevr_29)
FUNC_END(_savevr_28)
FUNC_END(_savevr_27)
FUNC_END(_savevr_26)
FUNC_END(_savevr_25)
FUNC_END(_savevr_24)
FUNC_END(_savevr_23)
FUNC_END(_savevr_22)
FUNC_END(_savevr_21)
FUNC_END(_savevr_20)
CFI_ENDPROC
#endif
|
4ms/metamodule-plugin-sdk
| 2,473
|
plugin-libc/libgcc/config/rs6000/e500crtsav32gpr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* Routines for saving 32-bit integer registers, called by the compiler. */
/* "Bare" versions that simply return to their caller. */
HIDDEN_FUNC(_save32gpr_14) stw 14,-72(11)
HIDDEN_FUNC(_save32gpr_15) stw 15,-68(11)
HIDDEN_FUNC(_save32gpr_16) stw 16,-64(11)
HIDDEN_FUNC(_save32gpr_17) stw 17,-60(11)
HIDDEN_FUNC(_save32gpr_18) stw 18,-56(11)
HIDDEN_FUNC(_save32gpr_19) stw 19,-52(11)
HIDDEN_FUNC(_save32gpr_20) stw 20,-48(11)
HIDDEN_FUNC(_save32gpr_21) stw 21,-44(11)
HIDDEN_FUNC(_save32gpr_22) stw 22,-40(11)
HIDDEN_FUNC(_save32gpr_23) stw 23,-36(11)
HIDDEN_FUNC(_save32gpr_24) stw 24,-32(11)
HIDDEN_FUNC(_save32gpr_25) stw 25,-28(11)
HIDDEN_FUNC(_save32gpr_26) stw 26,-24(11)
HIDDEN_FUNC(_save32gpr_27) stw 27,-20(11)
HIDDEN_FUNC(_save32gpr_28) stw 28,-16(11)
HIDDEN_FUNC(_save32gpr_29) stw 29,-12(11)
HIDDEN_FUNC(_save32gpr_30) stw 30,-8(11)
HIDDEN_FUNC(_save32gpr_31) stw 31,-4(11)
blr
FUNC_END(_save32gpr_31)
FUNC_END(_save32gpr_30)
FUNC_END(_save32gpr_29)
FUNC_END(_save32gpr_28)
FUNC_END(_save32gpr_27)
FUNC_END(_save32gpr_26)
FUNC_END(_save32gpr_25)
FUNC_END(_save32gpr_24)
FUNC_END(_save32gpr_23)
FUNC_END(_save32gpr_22)
FUNC_END(_save32gpr_21)
FUNC_END(_save32gpr_20)
FUNC_END(_save32gpr_19)
FUNC_END(_save32gpr_18)
FUNC_END(_save32gpr_17)
FUNC_END(_save32gpr_16)
FUNC_END(_save32gpr_15)
FUNC_END(_save32gpr_14)
#endif
|
4ms/metamodule-plugin-sdk
| 2,709
|
plugin-libc/libgcc/config/rs6000/crtsavgpr.S
|
/*
* Special support for eabi and SVR4
*
* Copyright (C) 1995-2022 Free Software Foundation, Inc.
* Written By Michael Meissner
* 64-bit support written by David Edelsohn
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
/* Do any initializations needed for the eabi environment */
.section ".text"
#include "ppc-asm.h"
/* On PowerPC64 Linux, these functions are provided by the linker. */
#ifndef __powerpc64__
/* Routines for saving integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
/* function, just beyond the end of the integer save area. */
CFI_STARTPROC
HIDDEN_FUNC(_savegpr_14) stw 14,-72(11) /* save gp registers */
HIDDEN_FUNC(_savegpr_15) stw 15,-68(11)
HIDDEN_FUNC(_savegpr_16) stw 16,-64(11)
HIDDEN_FUNC(_savegpr_17) stw 17,-60(11)
HIDDEN_FUNC(_savegpr_18) stw 18,-56(11)
HIDDEN_FUNC(_savegpr_19) stw 19,-52(11)
HIDDEN_FUNC(_savegpr_20) stw 20,-48(11)
HIDDEN_FUNC(_savegpr_21) stw 21,-44(11)
HIDDEN_FUNC(_savegpr_22) stw 22,-40(11)
HIDDEN_FUNC(_savegpr_23) stw 23,-36(11)
HIDDEN_FUNC(_savegpr_24) stw 24,-32(11)
HIDDEN_FUNC(_savegpr_25) stw 25,-28(11)
HIDDEN_FUNC(_savegpr_26) stw 26,-24(11)
HIDDEN_FUNC(_savegpr_27) stw 27,-20(11)
HIDDEN_FUNC(_savegpr_28) stw 28,-16(11)
HIDDEN_FUNC(_savegpr_29) stw 29,-12(11)
HIDDEN_FUNC(_savegpr_30) stw 30,-8(11)
HIDDEN_FUNC(_savegpr_31) stw 31,-4(11)
blr
FUNC_END(_savegpr_31)
FUNC_END(_savegpr_30)
FUNC_END(_savegpr_29)
FUNC_END(_savegpr_28)
FUNC_END(_savegpr_27)
FUNC_END(_savegpr_26)
FUNC_END(_savegpr_25)
FUNC_END(_savegpr_24)
FUNC_END(_savegpr_23)
FUNC_END(_savegpr_22)
FUNC_END(_savegpr_21)
FUNC_END(_savegpr_20)
FUNC_END(_savegpr_19)
FUNC_END(_savegpr_18)
FUNC_END(_savegpr_17)
FUNC_END(_savegpr_16)
FUNC_END(_savegpr_15)
FUNC_END(_savegpr_14)
CFI_ENDPROC
#endif
|
4ms/metamodule-plugin-sdk
| 2,589
|
plugin-libc/libgcc/config/rs6000/e500crtsavg32gpr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* Routines for saving 32-bit integer registers, called by the compiler. */
/* "GOT" versions that load the address of the GOT into lr before returning. */
HIDDEN_FUNC(_save32gpr_14_g) stw 14,-72(11)
HIDDEN_FUNC(_save32gpr_15_g) stw 15,-68(11)
HIDDEN_FUNC(_save32gpr_16_g) stw 16,-64(11)
HIDDEN_FUNC(_save32gpr_17_g) stw 17,-60(11)
HIDDEN_FUNC(_save32gpr_18_g) stw 18,-56(11)
HIDDEN_FUNC(_save32gpr_19_g) stw 19,-52(11)
HIDDEN_FUNC(_save32gpr_20_g) stw 20,-48(11)
HIDDEN_FUNC(_save32gpr_21_g) stw 21,-44(11)
HIDDEN_FUNC(_save32gpr_22_g) stw 22,-40(11)
HIDDEN_FUNC(_save32gpr_23_g) stw 23,-36(11)
HIDDEN_FUNC(_save32gpr_24_g) stw 24,-32(11)
HIDDEN_FUNC(_save32gpr_25_g) stw 25,-28(11)
HIDDEN_FUNC(_save32gpr_26_g) stw 26,-24(11)
HIDDEN_FUNC(_save32gpr_27_g) stw 27,-20(11)
HIDDEN_FUNC(_save32gpr_28_g) stw 28,-16(11)
HIDDEN_FUNC(_save32gpr_29_g) stw 29,-12(11)
HIDDEN_FUNC(_save32gpr_30_g) stw 30,-8(11)
HIDDEN_FUNC(_save32gpr_31_g) stw 31,-4(11)
b _GLOBAL_OFFSET_TABLE_-4
FUNC_END(_save32gpr_31_g)
FUNC_END(_save32gpr_30_g)
FUNC_END(_save32gpr_29_g)
FUNC_END(_save32gpr_28_g)
FUNC_END(_save32gpr_27_g)
FUNC_END(_save32gpr_26_g)
FUNC_END(_save32gpr_25_g)
FUNC_END(_save32gpr_24_g)
FUNC_END(_save32gpr_23_g)
FUNC_END(_save32gpr_22_g)
FUNC_END(_save32gpr_21_g)
FUNC_END(_save32gpr_20_g)
FUNC_END(_save32gpr_19_g)
FUNC_END(_save32gpr_18_g)
FUNC_END(_save32gpr_17_g)
FUNC_END(_save32gpr_16_g)
FUNC_END(_save32gpr_15_g)
FUNC_END(_save32gpr_14_g)
#endif
|
4ms/metamodule-plugin-sdk
| 2,718
|
plugin-libc/libgcc/config/rs6000/crtresgpr.S
|
/*
* Special support for eabi and SVR4
*
* Copyright (C) 1995-2022 Free Software Foundation, Inc.
* Written By Michael Meissner
* 64-bit support written by David Edelsohn
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
/* Do any initializations needed for the eabi environment */
.section ".text"
#include "ppc-asm.h"
/* On PowerPC64 Linux, these functions are provided by the linker. */
#ifndef __powerpc64__
/* Routines for restoring integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
/* function, just beyond the end of the integer restore area. */
CFI_STARTPROC
HIDDEN_FUNC(_restgpr_14) lwz 14,-72(11) /* restore gp registers */
HIDDEN_FUNC(_restgpr_15) lwz 15,-68(11)
HIDDEN_FUNC(_restgpr_16) lwz 16,-64(11)
HIDDEN_FUNC(_restgpr_17) lwz 17,-60(11)
HIDDEN_FUNC(_restgpr_18) lwz 18,-56(11)
HIDDEN_FUNC(_restgpr_19) lwz 19,-52(11)
HIDDEN_FUNC(_restgpr_20) lwz 20,-48(11)
HIDDEN_FUNC(_restgpr_21) lwz 21,-44(11)
HIDDEN_FUNC(_restgpr_22) lwz 22,-40(11)
HIDDEN_FUNC(_restgpr_23) lwz 23,-36(11)
HIDDEN_FUNC(_restgpr_24) lwz 24,-32(11)
HIDDEN_FUNC(_restgpr_25) lwz 25,-28(11)
HIDDEN_FUNC(_restgpr_26) lwz 26,-24(11)
HIDDEN_FUNC(_restgpr_27) lwz 27,-20(11)
HIDDEN_FUNC(_restgpr_28) lwz 28,-16(11)
HIDDEN_FUNC(_restgpr_29) lwz 29,-12(11)
HIDDEN_FUNC(_restgpr_30) lwz 30,-8(11)
HIDDEN_FUNC(_restgpr_31) lwz 31,-4(11)
blr
FUNC_END(_restgpr_31)
FUNC_END(_restgpr_30)
FUNC_END(_restgpr_29)
FUNC_END(_restgpr_28)
FUNC_END(_restgpr_27)
FUNC_END(_restgpr_26)
FUNC_END(_restgpr_25)
FUNC_END(_restgpr_24)
FUNC_END(_restgpr_23)
FUNC_END(_restgpr_22)
FUNC_END(_restgpr_21)
FUNC_END(_restgpr_20)
FUNC_END(_restgpr_19)
FUNC_END(_restgpr_18)
FUNC_END(_restgpr_17)
FUNC_END(_restgpr_16)
FUNC_END(_restgpr_15)
FUNC_END(_restgpr_14)
CFI_ENDPROC
#endif
|
4ms/metamodule-plugin-sdk
| 2,630
|
plugin-libc/libgcc/config/rs6000/e500crtsavg64gpr.S
|
/*
* Special support for e500 eabi and SVR4
*
* Copyright (C) 2008-2022 Free Software Foundation, Inc.
* Written by Nathan Froyd
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
.section ".text"
#include "ppc-asm.h"
#ifdef __SPE__
/* Routines for saving 64-bit integer registers, called by the compiler. */
/* "GOT" versions that load the address of the GOT into lr before returning. */
HIDDEN_FUNC(_save64gpr_14_g) evstdd 14,0(11)
HIDDEN_FUNC(_save64gpr_15_g) evstdd 15,8(11)
HIDDEN_FUNC(_save64gpr_16_g) evstdd 16,16(11)
HIDDEN_FUNC(_save64gpr_17_g) evstdd 17,24(11)
HIDDEN_FUNC(_save64gpr_18_g) evstdd 18,32(11)
HIDDEN_FUNC(_save64gpr_19_g) evstdd 19,40(11)
HIDDEN_FUNC(_save64gpr_20_g) evstdd 20,48(11)
HIDDEN_FUNC(_save64gpr_21_g) evstdd 21,56(11)
HIDDEN_FUNC(_save64gpr_22_g) evstdd 22,64(11)
HIDDEN_FUNC(_save64gpr_23_g) evstdd 23,72(11)
HIDDEN_FUNC(_save64gpr_24_g) evstdd 24,80(11)
HIDDEN_FUNC(_save64gpr_25_g) evstdd 25,88(11)
HIDDEN_FUNC(_save64gpr_26_g) evstdd 26,96(11)
HIDDEN_FUNC(_save64gpr_27_g) evstdd 27,104(11)
HIDDEN_FUNC(_save64gpr_28_g) evstdd 28,112(11)
HIDDEN_FUNC(_save64gpr_29_g) evstdd 29,120(11)
HIDDEN_FUNC(_save64gpr_30_g) evstdd 30,128(11)
HIDDEN_FUNC(_save64gpr_31_g) evstdd 31,136(11)
b _GLOBAL_OFFSET_TABLE_-4
FUNC_END(_save64gpr_31_g)
FUNC_END(_save64gpr_30_g)
FUNC_END(_save64gpr_29_g)
FUNC_END(_save64gpr_28_g)
FUNC_END(_save64gpr_27_g)
FUNC_END(_save64gpr_26_g)
FUNC_END(_save64gpr_25_g)
FUNC_END(_save64gpr_24_g)
FUNC_END(_save64gpr_23_g)
FUNC_END(_save64gpr_22_g)
FUNC_END(_save64gpr_21_g)
FUNC_END(_save64gpr_20_g)
FUNC_END(_save64gpr_19_g)
FUNC_END(_save64gpr_18_g)
FUNC_END(_save64gpr_17_g)
FUNC_END(_save64gpr_16_g)
FUNC_END(_save64gpr_15_g)
FUNC_END(_save64gpr_14_g)
#endif
|
4ms/metamodule-plugin-sdk
| 3,731
|
plugin-libc/libgcc/config/rs6000/crtresxfpr.S
|
/*
* Special support for eabi and SVR4
*
* Copyright (C) 1995-2022 Free Software Foundation, Inc.
* Written By Michael Meissner
* 64-bit support written by David Edelsohn
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
/* Do any initializations needed for the eabi environment */
.machine ppc
.section ".text"
#include "ppc-asm.h"
/* On PowerPC64 Linux, these functions are provided by the linker. */
#ifndef __powerpc64__
/* Routines for restoring floating point registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
/* function, just beyond the end of the floating point save area. */
/* In addition to restoring the fp registers, it will return to the caller's */
/* caller */
CFI_STARTPROC
CFI_DEF_CFA_REGISTER (11)
CFI_OFFSET (65, 4)
CFI_OFFSET (46, -144)
CFI_OFFSET (47, -136)
CFI_OFFSET (48, -128)
CFI_OFFSET (49, -120)
CFI_OFFSET (50, -112)
CFI_OFFSET (51, -104)
CFI_OFFSET (52, -96)
CFI_OFFSET (53, -88)
CFI_OFFSET (54, -80)
CFI_OFFSET (55, -72)
CFI_OFFSET (56, -64)
CFI_OFFSET (57, -56)
CFI_OFFSET (58, -48)
CFI_OFFSET (59, -40)
CFI_OFFSET (60, -32)
CFI_OFFSET (61, -24)
CFI_OFFSET (62, -16)
CFI_OFFSET (63, -8)
HIDDEN_FUNC(_restfpr_14_x) lfd 14,-144(11) /* restore fp registers */
CFI_RESTORE (46)
HIDDEN_FUNC(_restfpr_15_x) lfd 15,-136(11)
CFI_RESTORE (47)
HIDDEN_FUNC(_restfpr_16_x) lfd 16,-128(11)
CFI_RESTORE (48)
HIDDEN_FUNC(_restfpr_17_x) lfd 17,-120(11)
CFI_RESTORE (49)
HIDDEN_FUNC(_restfpr_18_x) lfd 18,-112(11)
CFI_RESTORE (50)
HIDDEN_FUNC(_restfpr_19_x) lfd 19,-104(11)
CFI_RESTORE (51)
HIDDEN_FUNC(_restfpr_20_x) lfd 20,-96(11)
CFI_RESTORE (52)
HIDDEN_FUNC(_restfpr_21_x) lfd 21,-88(11)
CFI_RESTORE (53)
HIDDEN_FUNC(_restfpr_22_x) lfd 22,-80(11)
CFI_RESTORE (54)
HIDDEN_FUNC(_restfpr_23_x) lfd 23,-72(11)
CFI_RESTORE (55)
HIDDEN_FUNC(_restfpr_24_x) lfd 24,-64(11)
CFI_RESTORE (56)
HIDDEN_FUNC(_restfpr_25_x) lfd 25,-56(11)
CFI_RESTORE (57)
HIDDEN_FUNC(_restfpr_26_x) lfd 26,-48(11)
CFI_RESTORE (58)
HIDDEN_FUNC(_restfpr_27_x) lfd 27,-40(11)
CFI_RESTORE (59)
HIDDEN_FUNC(_restfpr_28_x) lfd 28,-32(11)
CFI_RESTORE (60)
HIDDEN_FUNC(_restfpr_29_x) lfd 29,-24(11)
CFI_RESTORE (61)
HIDDEN_FUNC(_restfpr_30_x) lfd 30,-16(11)
CFI_RESTORE (62)
HIDDEN_FUNC(_restfpr_31_x) lwz 0,4(11)
lfd 31,-8(11)
CFI_RESTORE (63)
mtlr 0
CFI_RESTORE (65)
mr 1,11
CFI_DEF_CFA_REGISTER (1)
blr
FUNC_END(_restfpr_31_x)
FUNC_END(_restfpr_30_x)
FUNC_END(_restfpr_29_x)
FUNC_END(_restfpr_28_x)
FUNC_END(_restfpr_27_x)
FUNC_END(_restfpr_26_x)
FUNC_END(_restfpr_25_x)
FUNC_END(_restfpr_24_x)
FUNC_END(_restfpr_23_x)
FUNC_END(_restfpr_22_x)
FUNC_END(_restfpr_21_x)
FUNC_END(_restfpr_20_x)
FUNC_END(_restfpr_19_x)
FUNC_END(_restfpr_18_x)
FUNC_END(_restfpr_17_x)
FUNC_END(_restfpr_16_x)
FUNC_END(_restfpr_15_x)
FUNC_END(_restfpr_14_x)
CFI_ENDPROC
#endif
|
4ms/metamodule-plugin-sdk
| 1,985
|
plugin-libc/libgcc/config/rs6000/sol-cn.S
|
# crtn.s for sysv4
# Copyright (C) 1996-2022 Free Software Foundation, Inc.
# Written By Michael Meissner
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just supplies labeled ending points for the .got* and other
# special sections. It is linked in last after other modules.
.ident "GNU C scrtn.s"
#ifndef __powerpc64__
# Default versions of exception handling register/deregister
.weak _ex_register
.weak _ex_deregister
.set _ex_register,0
.set _ex_deregister,0
# End list of C++ constructors
.section ".ctors","aw"
.globl __CTOR_END__
.type __CTOR_END__,@object
__CTOR_END__:
# End list of C++ destructors
.section ".dtors","aw"
.weak __DTOR_END__
.type __DTOR_END__,@object
__DTOR_END__:
.section ".text"
.globl _ex_text1
_ex_text1:
.section ".exception_ranges","aw"
.globl _ex_range1
_ex_range1:
# Tail of _init used for static constructors
.section ".init","ax"
lwz %r0,16(%r1)
lwz %r31,12(%r1)
mtlr %r0
addi %r1,%r1,16
blr
# Tail of _fini used for static destructors
.section ".fini","ax"
lwz %r0,16(%r1)
lwz %r31,12(%r1)
mtlr %r0
addi %r1,%r1,16
blr
#endif
|
4ms/metamodule-plugin-sdk
| 9,007
|
plugin-libc/libgcc/config/rs6000/eabi.S
|
/*
* Special support for eabi and SVR4
*
* Copyright (C) 1995-2022 Free Software Foundation, Inc.
* Written By Michael Meissner
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
/* Do any initializations needed for the eabi environment */
.section ".text"
#include "ppc-asm.h"
#ifndef __powerpc64__
.section ".got2","aw"
.align 2
.LCTOC1 = . /* +32768 */
/* Table of addresses */
.Ltable = .-.LCTOC1
.long .LCTOC1 /* address we are really at */
.Lsda = .-.LCTOC1
.long _SDA_BASE_ /* address of the first small data area */
.Lsdas = .-.LCTOC1
.long __SDATA_START__ /* start of .sdata/.sbss section */
.Lsdae = .-.LCTOC1
.long __SBSS_END__ /* end of .sdata/.sbss section */
.Lsda2 = .-.LCTOC1
.long _SDA2_BASE_ /* address of the second small data area */
.Lsda2s = .-.LCTOC1
.long __SDATA2_START__ /* start of .sdata2/.sbss2 section */
.Lsda2e = .-.LCTOC1
.long __SBSS2_END__ /* end of .sdata2/.sbss2 section */
#ifdef _RELOCATABLE
.Lgots = .-.LCTOC1
.long __GOT_START__ /* Global offset table start */
.Lgotm1 = .-.LCTOC1
.long _GLOBAL_OFFSET_TABLE_-4 /* end of GOT ptrs before BLCL + 3 reserved words */
.Lgotm2 = .-.LCTOC1
.long _GLOBAL_OFFSET_TABLE_+12 /* start of GOT ptrs after BLCL + 3 reserved words */
.Lgote = .-.LCTOC1
.long __GOT_END__ /* Global offset table end */
.Lgot2s = .-.LCTOC1
.long __GOT2_START__ /* -mrelocatable GOT pointers start */
.Lgot2e = .-.LCTOC1
.long __GOT2_END__ /* -mrelocatable GOT pointers end */
.Lfixups = .-.LCTOC1
.long __FIXUP_START__ /* start of .fixup section */
.Lfixupe = .-.LCTOC1
.long __FIXUP_END__ /* end of .fixup section */
.Lctors = .-.LCTOC1
.long __CTOR_LIST__ /* start of .ctor section */
.Lctore = .-.LCTOC1
.long __CTOR_END__ /* end of .ctor section */
.Ldtors = .-.LCTOC1
.long __DTOR_LIST__ /* start of .dtor section */
.Ldtore = .-.LCTOC1
.long __DTOR_END__ /* end of .dtor section */
.Lexcepts = .-.LCTOC1
.long __EXCEPT_START__ /* start of .gcc_except_table section */
.Lexcepte = .-.LCTOC1
.long __EXCEPT_END__ /* end of .gcc_except_table section */
.Linit = .-.LCTOC1
.long .Linit_p /* address of variable to say we've been called */
.text
.align 2
.Lptr:
.long .LCTOC1-.Laddr /* PC relative pointer to .got2 */
#endif
.data
.align 2
.Linit_p:
.long 0
.text
FUNC_START(__eabi)
/* Eliminate -mrelocatable code if not -mrelocatable, so that this file can
be assembled with other assemblers than GAS. */
#ifndef _RELOCATABLE
addis 10,0,.Linit_p@ha /* init flag */
addis 11,0,.LCTOC1@ha /* load address of .LCTOC1 */
lwz 9,.Linit_p@l(10) /* init flag */
addi 11,11,.LCTOC1@l
cmplwi 2,9,0 /* init flag != 0? */
bnelr 2 /* return now, if we've been called already */
stw 1,.Linit_p@l(10) /* store a nonzero value in the done flag */
#else /* -mrelocatable */
mflr 0
bl .Laddr /* get current address */
.Laddr:
mflr 12 /* real address of .Laddr */
lwz 11,(.Lptr-.Laddr)(12) /* linker generated address of .LCTOC1 */
add 11,11,12 /* correct to real pointer */
lwz 12,.Ltable(11) /* get linker's idea of where .Laddr is */
lwz 10,.Linit(11) /* address of init flag */
subf. 12,12,11 /* calculate difference */
lwzx 9,10,12 /* done flag */
cmplwi 2,9,0 /* init flag != 0? */
mtlr 0 /* restore in case branch was taken */
bnelr 2 /* return now, if we've been called already */
stwx 1,10,12 /* store a nonzero value in the done flag */
beq+ 0,.Lsdata /* skip if we don't need to relocate */
/* We need to relocate the .got2 pointers. */
lwz 3,.Lgot2s(11) /* GOT2 pointers start */
lwz 4,.Lgot2e(11) /* GOT2 pointers end */
add 3,12,3 /* adjust pointers */
add 4,12,4
bl FUNC_NAME(__eabi_convert) /* convert pointers in .got2 section */
/* Fixup the .ctor section for static constructors */
lwz 3,.Lctors(11) /* constructors pointers start */
lwz 4,.Lctore(11) /* constructors pointers end */
bl FUNC_NAME(__eabi_convert) /* convert constructors */
/* Fixup the .dtor section for static destructors */
lwz 3,.Ldtors(11) /* destructors pointers start */
lwz 4,.Ldtore(11) /* destructors pointers end */
bl FUNC_NAME(__eabi_convert) /* convert destructors */
/* Fixup the .gcc_except_table section for G++ exceptions */
lwz 3,.Lexcepts(11) /* exception table pointers start */
lwz 4,.Lexcepte(11) /* exception table pointers end */
bl FUNC_NAME(__eabi_convert) /* convert exceptions */
/* Fixup the addresses in the GOT below _GLOBAL_OFFSET_TABLE_-4 */
lwz 3,.Lgots(11) /* GOT table pointers start */
lwz 4,.Lgotm1(11) /* GOT table pointers below _GLOBAL_OFFSET_TABLE-4 */
bl FUNC_NAME(__eabi_convert) /* convert lower GOT */
/* Fixup the addresses in the GOT above _GLOBAL_OFFSET_TABLE_+12 */
lwz 3,.Lgotm2(11) /* GOT table pointers above _GLOBAL_OFFSET_TABLE+12 */
lwz 4,.Lgote(11) /* GOT table pointers end */
bl FUNC_NAME(__eabi_convert) /* convert lower GOT */
/* Fixup any user initialized pointers now (the compiler drops pointers to */
/* each of the relocs that it does in the .fixup section). */
.Lfix:
lwz 3,.Lfixups(11) /* fixup pointers start */
lwz 4,.Lfixupe(11) /* fixup pointers end */
bl FUNC_NAME(__eabi_uconvert) /* convert user initialized pointers */
.Lsdata:
mtlr 0 /* restore link register */
#endif /* _RELOCATABLE */
/* Only load up register 13 if there is a .sdata and/or .sbss section */
lwz 3,.Lsdas(11) /* start of .sdata/.sbss section */
lwz 4,.Lsdae(11) /* end of .sdata/.sbss section */
cmpw 1,3,4 /* .sdata/.sbss section non-empty? */
beq- 1,.Lsda2l /* skip loading r13 */
lwz 13,.Lsda(11) /* load r13 with _SDA_BASE_ address */
/* Only load up register 2 if there is a .sdata2 and/or .sbss2 section */
.Lsda2l:
lwz 3,.Lsda2s(11) /* start of .sdata/.sbss section */
lwz 4,.Lsda2e(11) /* end of .sdata/.sbss section */
cmpw 1,3,4 /* .sdata/.sbss section non-empty? */
beq+ 1,.Ldone /* skip loading r2 */
lwz 2,.Lsda2(11) /* load r2 with _SDA2_BASE_ address */
/* Done adjusting pointers, return by way of doing the C++ global constructors. */
.Ldone:
b FUNC_NAME(__init) /* do any C++ global constructors (which returns to caller) */
FUNC_END(__eabi)
/* Special subroutine to convert a bunch of pointers directly.
r0 has original link register
r3 has low pointer to convert
r4 has high pointer to convert
r5 .. r10 are scratch registers
r11 has the address of .LCTOC1 in it.
r12 has the value to add to each pointer
r13 .. r31 are unchanged */
#ifdef _RELOCATABLE
FUNC_START(__eabi_convert)
cmplw 1,3,4 /* any pointers to convert? */
subf 5,3,4 /* calculate number of words to convert */
bclr 4,4 /* return if no pointers */
srawi 5,5,2
addi 3,3,-4 /* start-4 for use with lwzu */
mtctr 5
.Lcvt:
lwzu 6,4(3) /* pointer to convert */
cmpwi 0,6,0
beq- .Lcvt2 /* if pointer is null, don't convert */
add 6,6,12 /* convert pointer */
stw 6,0(3)
.Lcvt2:
bdnz+ .Lcvt
blr
FUNC_END(__eabi_convert)
/* Special subroutine to convert the pointers the user has initialized. The
compiler has placed the address of the initialized pointer into the .fixup
section.
r0 has original link register
r3 has low pointer to convert
r4 has high pointer to convert
r5 .. r10 are scratch registers
r11 has the address of .LCTOC1 in it.
r12 has the value to add to each pointer
r13 .. r31 are unchanged */
FUNC_START(__eabi_uconvert)
cmplw 1,3,4 /* any pointers to convert? */
subf 5,3,4 /* calculate number of words to convert */
bclr 4,4 /* return if no pointers */
srawi 5,5,2
addi 3,3,-4 /* start-4 for use with lwzu */
mtctr 5
.Lucvt:
lwzu 6,4(3) /* next pointer to pointer to convert */
add 6,6,12 /* adjust pointer */
lwz 7,0(6) /* get the pointer it points to */
stw 6,0(3) /* store adjusted pointer */
add 7,7,12 /* adjust */
stw 7,0(6)
bdnz+ .Lucvt
blr
FUNC_END(__eabi_uconvert)
#endif
#endif
|
4ms/metamodule-plugin-sdk
| 1,235
|
plugin-libc/libgcc/config/alpha/vms-dwarf2eh.S
|
/* VMS dwarf2 exception handling section sequentializer.
Copyright (C) 2002-2022 Free Software Foundation, Inc.
Contributed by Douglas B. Rupp (rupp@gnat.com).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Linking with this file forces the Dwarf2 EH section to be
individually loaded by the VMS linker an the unwinder to read it. */
.section .eh_frame,NOWRT
.align 0
|
4ms/metamodule-plugin-sdk
| 3,225
|
plugin-libc/libgcc/config/alpha/qrnnd.S
|
# Alpha 21064 __udiv_qrnnd
# Copyright (C) 1992-2022 Free Software Foundation, Inc.
# This file is part of GCC.
# The GNU MP Library is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#ifdef __ELF__
.section .note.GNU-stack,""
#endif
.set noreorder
.set noat
.text
.globl __udiv_qrnnd
.ent __udiv_qrnnd
#ifdef __VMS__
__udiv_qrnnd..en:
.frame $29,0,$26,0
.prologue
#else
__udiv_qrnnd:
.frame $30,0,$26,0
.prologue 0
#endif
#define cnt $2
#define tmp $3
#define rem_ptr $16
#define n1 $17
#define n0 $18
#define d $19
#define qb $20
#define AT $at
ldiq cnt,16
blt d,$largedivisor
$loop1: cmplt n0,0,tmp
addq n1,n1,n1
bis n1,tmp,n1
addq n0,n0,n0
cmpule d,n1,qb
subq n1,d,tmp
cmovne qb,tmp,n1
bis n0,qb,n0
cmplt n0,0,tmp
addq n1,n1,n1
bis n1,tmp,n1
addq n0,n0,n0
cmpule d,n1,qb
subq n1,d,tmp
cmovne qb,tmp,n1
bis n0,qb,n0
cmplt n0,0,tmp
addq n1,n1,n1
bis n1,tmp,n1
addq n0,n0,n0
cmpule d,n1,qb
subq n1,d,tmp
cmovne qb,tmp,n1
bis n0,qb,n0
cmplt n0,0,tmp
addq n1,n1,n1
bis n1,tmp,n1
addq n0,n0,n0
cmpule d,n1,qb
subq n1,d,tmp
cmovne qb,tmp,n1
bis n0,qb,n0
subq cnt,1,cnt
bgt cnt,$loop1
stq n1,0(rem_ptr)
bis $31,n0,$0
ret $31,($26),1
$largedivisor:
and n0,1,$4
srl n0,1,n0
sll n1,63,tmp
or tmp,n0,n0
srl n1,1,n1
and d,1,$6
srl d,1,$5
addq $5,$6,$5
$loop2: cmplt n0,0,tmp
addq n1,n1,n1
bis n1,tmp,n1
addq n0,n0,n0
cmpule $5,n1,qb
subq n1,$5,tmp
cmovne qb,tmp,n1
bis n0,qb,n0
cmplt n0,0,tmp
addq n1,n1,n1
bis n1,tmp,n1
addq n0,n0,n0
cmpule $5,n1,qb
subq n1,$5,tmp
cmovne qb,tmp,n1
bis n0,qb,n0
cmplt n0,0,tmp
addq n1,n1,n1
bis n1,tmp,n1
addq n0,n0,n0
cmpule $5,n1,qb
subq n1,$5,tmp
cmovne qb,tmp,n1
bis n0,qb,n0
cmplt n0,0,tmp
addq n1,n1,n1
bis n1,tmp,n1
addq n0,n0,n0
cmpule $5,n1,qb
subq n1,$5,tmp
cmovne qb,tmp,n1
bis n0,qb,n0
subq cnt,1,cnt
bgt cnt,$loop2
addq n1,n1,n1
addq $4,n1,n1
bne $6,$Odd
stq n1,0(rem_ptr)
bis $31,n0,$0
ret $31,($26),1
$Odd:
/* q' in n0. r' in n1 */
addq n1,n0,n1
cmpult n1,n0,tmp # tmp := carry from addq
subq n1,d,AT
addq n0,tmp,n0
cmovne tmp,AT,n1
cmpult n1,d,tmp
addq n0,1,AT
cmoveq tmp,AT,n0
subq n1,d,AT
cmoveq tmp,AT,n1
stq n1,0(rem_ptr)
bis $31,n0,$0
ret $31,($26),1
#ifdef __VMS__
.link
.align 3
__udiv_qrnnd:
.pdesc __udiv_qrnnd..en,null
#endif
.end __udiv_qrnnd
|
4ms/metamodule-plugin-sdk
| 2,113
|
plugin-libc/libgcc/config/alpha/vms-dwarf2.S
|
/* VMS dwarf2 section sequentializer.
Copyright (C) 2001-2022 Free Software Foundation, Inc.
Contributed by Douglas B. Rupp (rupp@gnat.com).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Linking with this file forces Dwarf2 debug sections to be
sequentially loaded by the VMS linker, enabling GDB to read them. */
.section .debug_abbrev,NOWRT
.align 0
.globl $dwarf2.debug_abbrev
$dwarf2.debug_abbrev:
.section .debug_aranges,NOWRT
.align 0
.globl $dwarf2.debug_aranges
$dwarf2.debug_aranges:
.section .debug_frame,NOWRT
.align 0
.globl $dwarf2.debug_frame
$dwarf2.debug_frame:
.section .debug_info,NOWRT
.align 0
.globl $dwarf2.debug_info
$dwarf2.debug_info:
.section .debug_line,NOWRT
.align 0
.globl $dwarf2.debug_line
$dwarf2.debug_line:
.section .debug_loc,NOWRT
.align 0
.globl $dwarf2.debug_loc
$dwarf2.debug_loc:
.section .debug_macinfo,NOWRT
.align 0
.globl $dwarf2.debug_macinfo
$dwarf2.debug_macinfo:
.section .debug_pubnames,NOWRT
.align 0
.globl $dwarf2.debug_pubnames
$dwarf2.debug_pubnames:
.section .debug_str,NOWRT
.align 0
.globl $dwarf2.debug_str
$dwarf2.debug_str:
.section .debug_zzzzzz,NOWRT
.align 0
.globl $dwarf2.debug_zzzzzz
$dwarf2.debug_zzzzzz:
|
4ms/metamodule-plugin-sdk
| 1,541
|
plugin-libc/libgcc/config/bfin/crtn.S
|
/* Specialized code needed to support construction and destruction of
file-scope objects in C++ and Java code, and to support exception handling.
Copyright (C) 2005-2022 Free Software Foundation, Inc.
Contributed by Analog Devices.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
* This file supplies function epilogues for the .init and .fini sections.
* It is linked in after all other files.
*/
.ident "GNU C crtn.o"
.section .init
unlink;
#if defined __ID_SHARED_LIB__
P5 = [SP++];
#elif defined __BFIN_FDPIC__
P3 = [SP++];
#endif
rts;
.section .fini
unlink;
#if defined __ID_SHARED_LIB__
P5 = [SP++];
#elif defined __BFIN_FDPIC__
P3 = [SP++];
#endif
rts;
|
4ms/metamodule-plugin-sdk
| 1,168
|
plugin-libc/libgcc/config/bfin/crtlibid.S
|
/* Provide a weak definition of the library ID, for the benefit of certain
configure scripts.
Copyright (C) 2005-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
.ident "GNU C crtlibid.o"
.weak _current_shared_library_p5_offset_
.set _current_shared_library_p5_offset_, 0
|
4ms/metamodule-plugin-sdk
| 1,807
|
plugin-libc/libgcc/config/bfin/crti.S
|
/* Specialized code needed to support construction and destruction of
file-scope objects in C++ and Java code, and to support exception handling.
Copyright (C) 2005-2022 Free Software Foundation, Inc.
Contributed by Analog Devices.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
* This file just supplies function prologues for the .init and .fini
* sections. It is linked in before crtbegin.o.
*/
.ident "GNU C crti.o"
.section .init
.globl __init
.type __init,@function
__init:
#if defined __ID_SHARED_LIB__
[--SP] = P5;
#elif defined __BFIN_FDPIC__
[--SP] = P3;
#endif
LINK 12;
#if defined __ID_SHARED_LIB__
P5 = [P5 + _current_shared_library_p5_offset_]
#endif
.section .fini
.globl __fini
.type __fini,@function
__fini:
#if defined __ID_SHARED_LIB__
[--SP] = P5;
#elif defined __BFIN_FDPIC__
[--SP] = P3;
#endif
LINK 12;
#if defined __ID_SHARED_LIB__
P5 = [P5 + _current_shared_library_p5_offset_]
#endif
|
4ms/metamodule-plugin-sdk
| 5,125
|
plugin-libc/libgcc/config/bfin/lib1funcs.S
|
/* libgcc functions for Blackfin.
Copyright (C) 2005-2022 Free Software Foundation, Inc.
Contributed by Analog Devices.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#ifdef L_divsi3
.text
.align 2
.global ___divsi3;
.type ___divsi3, STT_FUNC;
___divsi3:
[--SP]= RETS;
[--SP] = R7;
R2 = -R0;
CC = R0 < 0;
IF CC R0 = R2;
R7 = CC;
R2 = -R1;
CC = R1 < 0;
IF CC R1 = R2;
R2 = CC;
R7 = R7 ^ R2;
CALL ___udivsi3;
CC = R7;
R1 = -R0;
IF CC R0 = R1;
R7 = [SP++];
RETS = [SP++];
RTS;
#endif
#ifdef L_modsi3
.align 2
.global ___modsi3;
.type ___modsi3, STT_FUNC;
___modsi3:
[--SP] = RETS;
[--SP] = R0;
[--SP] = R1;
CALL ___divsi3;
R2 = [SP++];
R1 = [SP++];
R2 *= R0;
R0 = R1 - R2;
RETS = [SP++];
RTS;
#endif
#ifdef L_udivsi3
.align 2
.global ___udivsi3;
.type ___udivsi3, STT_FUNC;
___udivsi3:
P0 = 32;
LSETUP (0f, 1f) LC0 = P0;
/* upper half of dividend */
R3 = 0;
0:
/* The first time round in the loop we shift in garbage, but since we
perform 33 shifts, it doesn't matter. */
R0 = ROT R0 BY 1;
R3 = ROT R3 BY 1;
R2 = R3 - R1;
CC = R3 < R1 (IU);
1:
/* Last instruction of the loop. */
IF ! CC R3 = R2;
/* Shift in the last bit. */
R0 = ROT R0 BY 1;
/* R0 is the result, R3 contains the remainder. */
R0 = ~ R0;
RTS;
#endif
#ifdef L_umodsi3
.align 2
.global ___umodsi3;
.type ___umodsi3, STT_FUNC;
___umodsi3:
[--SP] = RETS;
CALL ___udivsi3;
R0 = R3;
RETS = [SP++];
RTS;
#endif
#ifdef L_umulsi3_highpart
.align 2
.global ___umulsi3_highpart;
.type ___umulsi3_highpart, STT_FUNC;
___umulsi3_highpart:
A1 = R1.L * R0.L (FU);
A1 = A1 >> 16;
A0 = R1.H * R0.H, A1 += R1.L * R0.H (FU);
A1 += R0.L * R1.H (FU);
A1 = A1 >> 16;
A0 += A1;
R0 = A0 (FU);
RTS;
#endif
#ifdef L_smulsi3_highpart
.align 2
.global ___smulsi3_highpart;
.type ___smulsi3_highpart, STT_FUNC;
___smulsi3_highpart:
A1 = R1.L * R0.L (FU);
A1 = A1 >> 16;
A0 = R0.H * R1.H, A1 += R0.H * R1.L (IS,M);
A1 += R1.H * R0.L (IS,M);
A1 = A1 >>> 16;
R0 = (A0 += A1);
RTS;
#endif
#ifdef L_muldi3
.align 2
.global ___muldi3;
.type ___muldi3, STT_FUNC;
/*
R1:R0 * R3:R2
= R1.h:R1.l:R0.h:R0.l * R3.h:R3.l:R2.h:R2.l
[X] = (R1.h * R3.h) * 2^96
[X] + (R1.h * R3.l + R1.l * R3.h) * 2^80
[X] + (R1.h * R2.h + R1.l * R3.l + R3.h * R0.h) * 2^64
[T1] + (R1.h * R2.l + R3.h * R0.l + R1.l * R2.h + R3.l * R0.h) * 2^48
[T2] + (R1.l * R2.l + R3.l * R0.l + R0.h * R2.h) * 2^32
[T3] + (R0.l * R2.h + R2.l * R0.h) * 2^16
[T4] + (R0.l * R2.l)
We can discard the first three lines marked "X" since we produce
only a 64 bit result. So, we need ten 16-bit multiplies.
Individual mul-acc results:
[E1] = R1.h * R2.l + R3.h * R0.l + R1.l * R2.h + R3.l * R0.h
[E2] = R1.l * R2.l + R3.l * R0.l + R0.h * R2.h
[E3] = R0.l * R2.h + R2.l * R0.h
[E4] = R0.l * R2.l
We also need to add high parts from lower-level results to higher ones:
E[n]c = E[n] + (E[n+1]c >> 16), where E4c := E4
One interesting property is that all parts of the result that depend
on the sign of the multiplication are discarded. Those would be the
multiplications involving R1.h and R3.h, but only the top 16 bit of
the 32 bit result depend on the sign, and since R1.h and R3.h only
occur in E1, the top half of these results is cut off.
So, we can just use FU mode for all of the 16-bit multiplies, and
ignore questions of when to use mixed mode. */
___muldi3:
/* [SP] technically is part of the caller's frame, but we can
use it as scratch space. */
A0 = R2.H * R1.L, A1 = R2.L * R1.H (FU) || R3 = [SP + 12]; /* E1 */
A0 += R3.H * R0.L, A1 += R3.L * R0.H (FU) || [SP] = R4; /* E1 */
A0 += A1; /* E1 */
R4 = A0.w;
A0 = R0.l * R3.l (FU); /* E2 */
A0 += R2.l * R1.l (FU); /* E2 */
A1 = R2.L * R0.L (FU); /* E4 */
R3 = A1.w;
A1 = A1 >> 16; /* E3c */
A0 += R2.H * R0.H, A1 += R2.L * R0.H (FU); /* E2, E3c */
A1 += R0.L * R2.H (FU); /* E3c */
R0 = A1.w;
A1 = A1 >> 16; /* E2c */
A0 += A1; /* E2c */
R1 = A0.w;
/* low(result) = low(E3c):low(E4) */
R0 = PACK (R0.l, R3.l);
/* high(result) = E2c + (E1 << 16) */
R1.h = R1.h + R4.l (NS) || R4 = [SP];
RTS;
.size ___muldi3, .-___muldi3
#endif
|
4ms/metamodule-plugin-sdk
| 1,265
|
plugin-libc/libgcc/config/ft32/crtn.S
|
# crtn.S for FT32
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just makes sure that the .fini and .init sections do in
# fact return. Users may put any desired instructions in those sections.
# This file is the last thing linked into any executable.
.file "crtn.S"
.section ".init"
return
.section ".fini"
return
|
4ms/metamodule-plugin-sdk
| 4,749
|
plugin-libc/libgcc/config/ft32/crti-hw.S
|
.global _start
_start:
# START Interrupt Vector Table [[
jmp __PMSIZE-4 # RESET Vector
jmp interrupt_33 # Watchdog reset vector
jmp interrupt_0
jmp interrupt_1
jmp interrupt_2
jmp interrupt_3
jmp interrupt_4
jmp interrupt_5
jmp interrupt_6
jmp interrupt_7
jmp interrupt_8
jmp interrupt_9
jmp interrupt_10
jmp interrupt_11
jmp interrupt_12
jmp interrupt_13
jmp interrupt_14
jmp interrupt_15
jmp interrupt_16
jmp interrupt_17
jmp interrupt_18
jmp interrupt_19
jmp interrupt_20
jmp interrupt_21
jmp interrupt_22
jmp interrupt_23
jmp interrupt_24
jmp interrupt_25
jmp interrupt_26
jmp interrupt_27
jmp interrupt_28
jmp interrupt_29
jmp interrupt_30
jmp interrupt_31
jmp __PMSIZE-8 # Interrupt vector 32 (NMI)
# ]] END Interrupt Vector Table
codestart:
jmp init
.global _exithook
_exithook: # Debugger uses '_exithook' at 0x90 to catch program exit
return
init:
ldk $sp,__RAMSIZE
# Disable all interrupts
lda $r1,0x10000
lshr $r1,$r1,20
cmp $r1,0x90
ldk $r1,0x100e3 # FT900 IRQ Control Register
jmpc z,1f
ldk $r1,0x10123 # FT930 IRQ Control Register
1:
ldk $r4,0x80
sti.b $r1,0,$r4
# Initialize DATA by copying from program memory
ldk.l $r4,__data_load_start
ldk.l $r1,__data_load_end
ldk.l $r2,0 # Will use __data after binutils patch
jmp .dscopy
.dsloop:
# Copy PM[$r4] to RAM $r2
lpmi.l $r3,$r4,0
sti.l $r2,0,$r3
add.l $r4,$r4,4
add.l $r2,$r2,4
.dscopy:
cmp.l $r4,$r1
jmpc lt,.dsloop
# Zero BSS
ldk.l $r4,_bss_start
ldk.l $r2,_end
sub.l $r2,$r2,$r4
ldk.l $r1,0
ldk $r3,32764
1:
cmp $r2,$r3
jmpc lt,2f
memset $r4,$r1,$r3
add $r4,$r4,$r3
sub $r2,$r2,$r3
jmp 1b
2:
memset $r4,$r1,$r2
sub.l $sp,$sp,24 # Space for the caller argument frame
call main
.equ EXITEXIT,0x1fffc
.global _exit
_exit:
sta.l EXITEXIT,$r0 # simulator end of test
jmp _exithook
# Macro to construct the interrupt stub code.
# it just saves r0, loads r0 with the int vector
# and branches to interrupt_common.
.macro inth i=0
interrupt_\i:
push $r0 # {
lda $r0,(vector_table + 4 * \i)
jmp interrupt_common
.endm
inth 0
inth 1
inth 2
inth 3
inth 4
inth 5
inth 6
inth 7
inth 8
inth 9
inth 10
inth 11
inth 12
inth 13
inth 14
inth 15
inth 16
inth 17
inth 18
inth 19
inth 20
inth 21
inth 22
inth 23
inth 24
inth 25
inth 26
inth 27
inth 28
inth 29
inth 30
inth 31
inth 32
inth 33
# On entry: r0, already saved, holds the handler function
interrupt_common:
push $r1 # {
push $r2 # {
push $r3 # {
push $r4 # {
push $r5 # {
push $r6 # {
push $r7 # {
push $r8 # {
push $r9 # {
push $r10 # {
push $r11 # {
push $r12 # {
push $cc # {
calli $r0
pop $cc # }
pop $r12 # }
pop $r11 # }
pop $r10 # }
pop $r9 # }
pop $r8 # }
pop $r7 # }
pop $r6 # }
pop $r5 # }
pop $r4 # }
pop $r3 # }
pop $r2 # }
pop $r1 # }
pop $r0 # } matching push in interrupt_0-31 above
reti
# Null function for unassigned interrupt to point at
.global nullvector
nullvector:
return
.section .data
.global vector_table
vector_table:
.rept 34
.long nullvector
.endr
.section .text
.global __gxx_personality_sj0
__gxx_personality_sj0:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.