repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
4ms/metamodule-plugin-sdk
| 2,251
|
plugin-libc/libgcc/config/arc/ieee-754/gtsf2.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
/* inputs: r0, r1
output: c, z flags to be used for 'hi' condition
clobber: r12,flags */
/* For NaNs, bit 22.. bit 30 must be set. */
#if 0 /* DEBUG */
.global __gtsf2
.balign 4
FUNC(__gtsf2)
__gtsf2:
st.a r11,[sp,-4]` push_s blink` st.a r10,[sp,-4]` st.a r9,[sp,-4]
st.a r8,[sp,-4]` st.a r7,[sp,-4]` st.a r6,[sp,-4]` st.a r5,[sp,-4]
st.a r4,[sp,-4]` push_s r3` push_s r2` push_s r1`
bl.d __gtsf2_c` push_s r0
mov r11,r0` pop_s r0` pop_s r1` pop_s r2` pop_s r3
ld.ab r4,[sp,4]` ld.ab r5,[sp,4]` ld.ab r6,[sp,4]`
ld.ab r7,[sp,4]` ld.ab r8,[sp,4]` ld.ab r9,[sp,4]
bl.d __gtsf2_asm` ld.ab r10,[sp,4]
pop_s blink
brgt.d r11,0,0f
ld.ab r11,[sp,4]
jls [blink]
bl abort
0: jhi [blink]
bl abort
ENDFUNC(__gtsf2)
#define __gtsf2 __gtsf2_asm
#endif /* DEBUG */
.global __gtsf2
.balign 4
HIDDEN_FUNC(__gtsf2)
__gtsf2:
or.f r12,r0,r1
bmi.d .Lneg
bmsk_s r12,r12,23
add1.f 0,r12,r0 ; check for NaN
add1.cc.f r12,r12,r1
j_s.d [blink]
cmp.cc r0,r1
.balign 4
.Lneg: breq.d r0,0,.L0
add1.f 0,r12,r0 ; check for NaN
add1.cc.f r12,r12,r1
j_s.d [blink]
cmp.cc r1,r0
.balign 4
.L0: bxor.f 0,r1,31 ; check for -0
j_s.d [blink]
cmp.hi r1,r0
ENDFUNC(__gtsf2)
|
4ms/metamodule-plugin-sdk
| 3,558
|
plugin-libc/libgcc/config/arc/ieee-754/truncdfsf2.S
|
/* Copyright (C) 2006-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
#if 0 /* DEBUG */
FUNC(__truncdfsf2)
.global __truncdfsf2
.balign 4
__truncdfsf2:
push_s blink
push_s r0
bl.d __truncdfsf2_c
push_s r1
mov_s r2,r0
pop_s r1
ld r0,[sp]
bl.d __truncdfsf2_asm
st r2,[sp]
pop_s r1
pop_s blink
cmp r0,r1
jeq_s [blink]
and r12,r0,r1
bic.f 0,0x7f800000,r12
bne 0f
bmsk.f 0,r0,22
bmsk.ne.f r1,r1,22
jne_s [blink] ; both NaN -> OK
0: bl abort
ENDFUNC(__truncdfsf2)
#define __truncdfsf2 __truncdfsf2_asm
#endif /* DEBUG */
.global __truncdfsf2
.balign 4
FUNC(__truncdfsf2)
__truncdfsf2:
lsr r2,DBL0H,20
asl_s DBL0H,DBL0H,12
sub r12,r2,0x380
bclr.f r3,r12,11
brhs r3,0xff,.Lill_exp
beq_l .Ldenorm0
asl_s r12,r12,23
tst DBL0L, \
0x2fffffff /* Check if msb guard bit wants rounding up. */
lsr_s DBL0L,DBL0L,28
lsr_s DBL0H,DBL0H,8
add.ne DBL0L,DBL0L,1
add_s DBL0H,DBL0H,DBL0L
lsr_s DBL0H,DBL0H
btst_s r2,11
add_s r0,DBL0H,r12
j_s.d [blink]
bxor.ne r0,r0,31
.balign 4
.Lill_exp:
bbit1 r2,10,.Linf_nan
bmsk_s r12,r12,9
rsub.f r12,r12,8+0x400-32 ; Go from 9 to 1 guard bit in MSW. */
bhs_s .Lzero
lsr r3,DBL0L,21
rrc DBL0H,DBL0H ; insert leading 1
asl.f 0,DBL0L,8 ; check lower 24 guard bits
add_s r3,DBL0H,r3
add.pnz r3,r3,1 ; assemble fraction with compressed guard bits.
lsr r0,r3,r12
neg_s r12,r12
btst_s r0,1
asl.eq.f r3,r3,r12
add.ne r0,r0,1
btst_s r2,11
lsr_s r0,r0
j_s.d [blink]
bxor.ne r0,r0,31
.Lzero:
lsr_s r2,r2,11
j_s.d [blink]
asl r0,r2,31
.Ldenorm0:
asl_s r12,r12,20
tst DBL0L, \
0x5fffffff /* Check if msb guard bit wants rounding up. */
lsr_s DBL0L,DBL0L,29
lsr_s DBL0H,DBL0H,9
add.ne DBL0L,DBL0L,1
bset_s DBL0H,DBL0H,23
add_s DBL0H,DBL0H,DBL0L
lsr_s DBL0H,DBL0H
j_s.d [blink]
add_l r0,DBL0H,r12
/* We would generally say that NaNs must have a non-zero high fraction part,
but to allow hardware double precision floating point to interoperate
with single precision software floating point, we make an exception here.
The cost is to replace a tst_s DBL0H with an or.f DBL0L,DBL0L,DBL0H .
As we start out unaligned, and there is an odd number of other short insns,
we have a choice of letting this cost us a misalign penalty or
4 more bytes (if we align the code). We choose the former here because
infinity / NaN is not expected to be prevalent in time-critical code. */
.Linf_nan:
or.f DBL0L,DBL0L,DBL0H
mov_s r0,1
add.ne r2,r2,1
tst r2,0x7ff
asl.ne r0,r0,23
btst_s r12,11
neg r0,r0
j_s.d [blink]
bxor.eq r0,r0,31
ENDFUNC(__truncdfsf2)
|
4ms/metamodule-plugin-sdk
| 1,969
|
plugin-libc/libgcc/config/arc/ieee-754/fixunsdfsi.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
#if 0 /* DEBUG */
FUNC(__fixunsdfsi)
.global __fixunsdfsi
.balign 4
__fixunsdfsi:
push_s blink
push_s r0
bl.d __fixunsdfsi_c
push_s r1
mov_s r2,r0
pop_s r1
ld r0,[sp]
bl.d __fixunsdfsi_asm
st r2,[sp]
pop_s r1
pop_s blink
cmp r0,r1
jeq_s [blink]
bl abort
ENDFUNC(__fixunsdfsi)
#define __fixunsdfsi __fixunsdfsi_asm
#endif /* DEBUG */
.global __fixunsdfsi
FUNC(__fixunsdfsi)
.balign 4
__fixunsdfsi:
bbit0 DBL0H,30,.Lret0or1
lsr r2,DBL0H,20
bmsk_s DBL0H,DBL0H,19
sub_s r2,r2,19; 0x3ff+20-0x400
neg_s r3,r2
btst_s r3,10
bset_s DBL0H,DBL0H,20
#ifdef __LITTLE_ENDIAN__
mov.ne DBL0L,DBL0H
asl DBL0H,DBL0H,r2
#else
asl.eq DBL0H,DBL0H,r2
lsr.ne DBL0H,DBL0H,r3
#endif
lsr DBL0L,DBL0L,r3
j_s.d [blink]
add.eq r0,r0,r1
.Lret0:
j_s.d [blink]
mov_l r0,0
.Lret0or1:
add_s DBL0H,DBL0H,0x100000
lsr_s DBL0H,DBL0H,30
j_s.d [blink]
bmsk_l r0,DBL0H,0
ENDFUNC(__fixunsdfsi)
|
4ms/metamodule-plugin-sdk
| 4,862
|
plugin-libc/libgcc/config/arc/ieee-754/divsf3.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
#if 0 /* DEBUG */
.global __divsf3
FUNC(__divsf3)
.balign 4
__divsf3:
push_s blink
push_s r1
bl.d __divsf3_c
push_s r0
ld_s r1,[sp,4]
st_s r0,[sp,4]
bl.d __divsf3_asm
pop_s r0
pop_s r1
pop_s blink
cmp r0,r1
#if 1
bne abort
jeq_s [blink]
b abort
#else
bne abort
j_s [blink]
#endif
ENDFUNC(__divsf3)
#define __divsf3 __divsf3_asm
#endif /* DEBUG */
.balign 4
__divdf3_support: /* This label makes debugger output saner. */
FUNC(__divsf3)
.Ldenorm_fp0:
norm.f r12,r2 ; flag for 0/x -> 0 check
bic.ne.f 0,0x60000000,r1 ; denorm/large number -> 0
beq_s .Lret0_NaN
tst r1,r9
add_s r2,r2,r2
sub_s r12,r12,8
asl_s r2,r2,r12
asl_l r12,r12,23
bne.d .Lpast_denorm_fp0
add r5,r5,r12
/* r0 is subnormal, r1 is subnormal or 0. */
.balign 4
.Ldenorm_fp1:
norm.f r12,r3 ; flag for x/0 -> Inf check
bic.ne.f 0,0x60000000,r0 ; large number/denorm -> Inf
beq_s .Linf
add_s r3,r3,r3
sub_s r12,r12,8
asl_s r3,r3,r12
asl_s r12,r12,23
b.d .Lpast_denorm_fp1
add r4,r4,r12
.Lret0_NaN:
bclr.f 0,r1,31 ; 0/0 -> NaN
bic r0,r10,r9
j_s.d [blink]
sub.eq r0,r0,1
.global __divsf3
.balign 4
.long 0x7f800000 ; exponent mask
__divsf3:
ld r9,[pcl,-4]
bmsk r2,r0,22
xor r4,r0,r2
bmsk r3,r1,22
xor r5,r1,r3
and r11,r0,r9
breq.d r11,0,.Ldenorm_fp0
xor r10,r4,r5
breq r11,r9,.Linf_nan_fp0
bset_s r2,r2,23
and r11,r1,r9
breq r11,0,.Ldenorm_fp1
breq r11,r9,.Linf_nan_fp1
.Lpast_denorm_fp0:
bset_s r3,r3,23
.Lpast_denorm_fp1:
cmp r2,r3
asl_s r2,r2,6+1
asl_s r3,r3,7
add.lo r2,r2,r2
bclr r8,r9,30 ; exponent bias
bclr.lo r8,r8,23 ; reduce exp by one if fraction is shifted
sub r4,r4,r5
add r4,r4,r8
xor.f 0,r10,r4
bmi .Linf_denorm
and r12,r4,r9
breq r12,0,.Ldenorm
sub_s r2,r2,r3 ; discard implicit 1
.Ldiv_23bit:
.rep 6
divaw r2,r2,r3
.endr
breq r12,r9,.Linf
bmsk r0,r2,6
xor_s r2,r2,r0
.Ldiv_17bit:
.rep 7
divaw r2,r2,r3
.endr
asl_s r0,r0,7
bmsk r1,r2,6
xor_s r2,r2,r1
or_s r0,r0,r1
.Ldiv_10bit:
.rep 7
divaw r2,r2,r3
.endr
asl_s r0,r0,7
bmsk r1,r2,6
xor_s r2,r2,r1
or_s r0,r0,r1
.Ldiv_3bit:
.rep 3
divaw r2,r2,r3
.endr
asl_s r0,r0,3
.Ldiv_0bit:
divaw r1,r2,r3
bmsk_s r2,r2,2
tst r1,-0x7e ; 0xffffff82, test for rest or odd
bmsk_s r1,r1,0
add_s r0,r0,r2 ; assemble fraction
add_s r0,r0,r4 ; add in sign & exponent
j_s.d [blink]
add.ne r0,r0,r1 ; round to nearest / even
.balign 4
.Linf_nan_fp0:
bic.f 0,r9,r1 ; fp1 Inf -> result NaN
bic r1,r5,r9 ; fp1 sign
sub.eq r1,r1,1
j_s.d [blink]
xor_s r0,r0,r1
.Linf_nan_fp1:
bic r0,r4,r9 ; fp0 sign
bmsk.f 0,r1,22 ; x/inf -> 0, x/nan -> nan
xor.eq r1,r1,r9
j_s.d [blink]
xor_s r0,r0,r1
.Linf:
j_s.d [blink]
or r0,r10,r9
.Lret_r4:
j_s.d [blink]
mov_s r0,r4
.balign 4
.Linf_denorm:
add.f r12,r4,r4
asr_l r12,r12,24
bpl .Linf
max r12,r12,-24
.Ldenorm:
add r1,pcl,42; .Ldenorm_tab-.
ldb_s r12,[r12,r1]
mov_s r0,0
lsr_s r2,r2
sub_s r1,r1,r12
j_s.d [r1]
bic r4,r10,r9
.byte .Ldenorm_tab-.Lret_r4
.byte .Ldenorm_tab-.Ldiv_0bit
.byte .Ldenorm_tab-.Ldiv_3bit-8
.byte .Ldenorm_tab-.Ldiv_3bit-4
.byte .Ldenorm_tab-.Ldiv_3bit
.byte .Ldenorm_tab-.Ldiv_10bit-24
.byte .Ldenorm_tab-.Ldiv_10bit-20
.byte .Ldenorm_tab-.Ldiv_10bit-16
.byte .Ldenorm_tab-.Ldiv_10bit-12
.byte .Ldenorm_tab-.Ldiv_10bit-8
.byte .Ldenorm_tab-.Ldiv_10bit-4
.byte .Ldenorm_tab-.Ldiv_10bit
.byte .Ldenorm_tab-.Ldiv_17bit-24
.byte .Ldenorm_tab-.Ldiv_17bit-20
.byte .Ldenorm_tab-.Ldiv_17bit-16
.byte .Ldenorm_tab-.Ldiv_17bit-12
.byte .Ldenorm_tab-.Ldiv_17bit-8
.byte .Ldenorm_tab-.Ldiv_17bit-4
.byte .Ldenorm_tab-.Ldiv_17bit
.byte .Ldenorm_tab-.Ldiv_23bit-20
.byte .Ldenorm_tab-.Ldiv_23bit-16
.byte .Ldenorm_tab-.Ldiv_23bit-12
.byte .Ldenorm_tab-.Ldiv_23bit-8
.byte .Ldenorm_tab-.Ldiv_23bit-4
.Ldenorm_tab:
.byte .Ldenorm_tab-.Ldiv_23bit
ENDFUNC(__divsf3)
|
4ms/metamodule-plugin-sdk
| 2,281
|
plugin-libc/libgcc/config/arc/ieee-754/uneqdf2.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
/* inputs: DBL0, DBL1
output: z flag
clobber: r12, flags
For NaNs, bit 19.. bit 30 of the high word must be set. */
#if 0 /* DEBUG */
.global __uneqdf2
.balign 4
FUNC(__uneqdf2)
__uneqdf2:
st.a r11,[sp,-4]` push_s blink` st.a r10,[sp,-4]` st.a r9,[sp,-4]
st.a r8,[sp,-4]` st.a r7,[sp,-4]` st.a r6,[sp,-4]` st.a r5,[sp,-4]
st.a r4,[sp,-4]` push_s r3` push_s r2` push_s r1`
bl.d __eqdf2_c` push_s r0
push_s r0` ld_s r0, [sp,4]` ld_s r1, [sp,8]` ld_s r2,[sp,12]
bl.d __unorddf2_c` ld_s r3,[sp,16]
ld.ab r11,[sp,4]` tst r0,r0` mov.ne r11,0
pop_s r0` pop_s r1` pop_s r2` pop_s r3
ld.ab r4,[sp,4]` ld.ab r5,[sp,4]` ld.ab r6,[sp,4]`
ld.ab r7,[sp,4]` ld.ab r8,[sp,4]` ld.ab r9,[sp,4]
bl.d __uneqdf2_asm` ld.ab r10,[sp,4]
pop_s blink
breq.d r11,0,0f
ld.ab r11,[sp,4]
jne_s [blink]
bl abort
0: jeq_s [blink]
bl abort
ENDFUNC(__uneqdf2)
#define __uneqdf2 __uneqdf2_asm
#endif /* DEBUG */
.global __uneqdf2
.balign 4
HIDDEN_FUNC(__uneqdf2)
__uneqdf2:
cmp_s DBL0H,DBL1H
cmp.eq DBL0L,DBL1L
jeq_s [blink]
or r12,DBL0H,DBL1H
or.f 0,DBL0L,DBL1L
bclr.eq.f r12,r12,31
jeq_s [blink]
mov_s r12, \
0x7ff80000
bic.f 0,r12,DBL0H
j_s.d [blink]
bic.ne.f r12,r12,DBL1H
ENDFUNC(__uneqdf2)
|
4ms/metamodule-plugin-sdk
| 2,139
|
plugin-libc/libgcc/config/arc/ieee-754/uneqsf2.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
/* inputs: r0, r1
output: z flag
clobber: r12, flags
For NaNs, bit 22 .. bit 30 must be set. */
#if 0 /* DEBUG */
.global __uneqsf2
.balign 4
FUNC(__uneqsf2)
__uneqsf2:
st.a r11,[sp,-4]` push_s blink` st.a r10,[sp,-4]` st.a r9,[sp,-4]
st.a r8,[sp,-4]` st.a r7,[sp,-4]` st.a r6,[sp,-4]` st.a r5,[sp,-4]
st.a r4,[sp,-4]` push_s r3` push_s r2` push_s r1`
bl.d __eqsf2_c` push_s r0
push_s r0` ld_s r0, [sp,4]
bl.d __unordsf2_c` ld_s r1,[sp,8]
ld.ab r11,[sp,4]` tst r0,r0` mov.ne r11,0
pop_s r0` pop_s r1` pop_s r2` pop_s r3
ld.ab r4,[sp,4]` ld.ab r5,[sp,4]` ld.ab r6,[sp,4]`
ld.ab r7,[sp,4]` ld.ab r8,[sp,4]` ld.ab r9,[sp,4]
bl.d __uneqsf2_asm` ld.ab r10,[sp,4]
pop_s blink
breq.d r11,0,0f
ld.ab r11,[sp,4]
jne_s [blink]
bl abort
0: jeq_s [blink]
bl abort
ENDFUNC(__uneqsf2)
#define __uneqsf2 __uneqsf2_asm
#endif /* DEBUG */
.global __uneqsf2
.balign 4
HIDDEN_FUNC(__uneqsf2)
__uneqsf2:
mov_s r12, \
0x7fc00000
bic.f 0,r12,r0
bic.ne.f r12,r12,r1
or r12,r0,r1
bmsk.ne.f r12,r12,30
j_s.d [blink]
cmp.ne r0,r1
ENDFUNC(__uneqsf2)
|
4ms/metamodule-plugin-sdk
| 1,746
|
plugin-libc/libgcc/config/arc/ieee-754/fixsfsi.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
#if 0 /* DEBUG */
.global __fixsfsi
FUNC(__fixsfsi)
.balign 4
__fixsfsi:
push_s blink
bl.d __fixsfsi_c
push_s r0
ld_s r1,[sp]
st_s r0,[sp]
bl.d __fixsfsi_asm
mov_s r0,r1
pop_s r1
pop_s blink
cmp r0,r1
jeq_s [blink]
bl abort
ENDFUNC(__fixsfsi)
#define __fixsfsi __fixsfsi_asm
#endif /* DEBUG */
.global __fixsfsi
FUNC(__fixsfsi)
.balign 4
__fixsfsi:
bbit0 r0,30,.Lret0or1
lsr r2,r0,23
bmsk_s r0,r0,22
bset_s r0,r0,23
sub_s r2,r2,22;0x7f+23-0x80
asl.f 0,r2,24
neg r3,r2
asl.mi r0,r0,r2
lsr.pl r0,r0,r3
j_s.d [blink]
neg.cs r0,r0
.Lret0or1:
add.f r0,r0,0x800000
lsr_s r0,r0,30
bmsk_s r0,r0,0
j_s.d [blink]
neg.mi r0,r0
ENDFUNC(__fixsfsi)
|
4ms/metamodule-plugin-sdk
| 2,274
|
plugin-libc/libgcc/config/arc/ieee-754/floatsisf.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
#if 0 /* DEBUG */
.global __floatsisf
FUNC(__floatsisf)
.balign 4
__floatsisf:
push_s blink
bl.d __floatsisf_c
push_s r0
ld_s r1,[sp]
st_s r0,[sp]
bl.d __floatsisf_asm
mov_s r0,r1
pop_s r1
pop_s blink
cmp r0,r1
jeq_s [blink]
bl abort
ENDFUNC(__floatsisf)
.global __floatunsisf
FUNC(__floatunsisf)
.balign 4
__floatunsisf:
push_s blink
bl.d __floatunsisf_c
push_s r0
ld_s r1,[sp]
st_s r0,[sp]
bl.d __floatunsisf_asm
mov_s r0,r1
pop_s r1
pop_s blink
cmp r0,r1
jeq_s [blink]
bl abort
ENDFUNC(__floatunsisf)
#define __floatsisf __floatsisf_asm
#define __floatunsisf __floatunsisf_asm
#endif /* DEBUG */
.global __floatunsisf
.global __floatsisf
FUNC(__floatsisf)
FUNC(__floatunsisf)
.balign 4
__floatunsisf:
lsr_s r2,r0
mov_l r12,0x9d ; 0x7f + 31 - 1
norm r2,r2
brne_l r0,0,0f
j_s [blink]
.balign 4
__floatsisf:
abs.f r0,r0
jeq_s [blink]
lsr_s r2,r0
mov_s r12,0x9d ; 0x7f + 31 - 1
norm r2,r2
bset.cs r12,r12,8
0: rsub.f r3,r2,8
bmsk r1,r0,r3
ror r1,r1,r3
lsr.pl r0,r0,r3
neg_s r3,r3
asl.mi r0,r0,r3
sub_s r12,r12,r2
asl_s r12,r12,23
bxor.pl.f r1,r1,31
add_s r0,r0,r12
j_s.d [blink]
add.pnz r0,r0,1
ENDFUNC(__floatunsisf)
ENDFUNC(__floatsisf)
|
4ms/metamodule-plugin-sdk
| 2,203
|
plugin-libc/libgcc/config/arc/ieee-754/eqsf2.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
/* inputs: r0, r1
output: z flag
clobber: r12, flags
For NaNs, bit 22 .. bit 30 must be set. */
#if 0 /* DEBUG */
.global __eqsf2
.balign 4
FUNC(__eqsf2)
__eqsf2:
st.a r11,[sp,-4]` push_s blink` st.a r10,[sp,-4]` st.a r9,[sp,-4]
st.a r8,[sp,-4]` st.a r7,[sp,-4]` st.a r6,[sp,-4]` st.a r5,[sp,-4]
st.a r4,[sp,-4]` push_s r3` push_s r2` push_s r1`
bl.d __eqsf2_c` push_s r0
mov r11,r0` pop_s r0` pop_s r1` pop_s r2` pop_s r3
ld.ab r4,[sp,4]` ld.ab r5,[sp,4]` ld.ab r6,[sp,4]`
ld.ab r7,[sp,4]` ld.ab r8,[sp,4]` ld.ab r9,[sp,4]
bl.d __eqsf2_asm` ld.ab r10,[sp,4]
pop_s blink
breq.d r11,0,0f
ld.ab r11,[sp,4]
jne_s [blink]
bl abort
0: jeq_s [blink]
bl abort
ENDFUNC(__eqsf2)
#define __eqsf2 __eqsf2_asm
#endif /* DEBUG */
/* Good performance as long as the binary difference is
well predictable (as seen from the branch predictor). */
.global __eqsf2
.balign 4
HIDDEN_FUNC(__eqsf2)
__eqsf2:
breq r0, r1,.Lno_bdiff
or r12,r0,r1
j_s.d [blink]
bmsk.f 0,r12,30
.Lno_bdiff:
bmsk r12,r0,23
add1.f r12,r12,r0 /* set c iff NaN; also, clear z if NaN. */
j_s.d [blink]
cmp.cc r0,r1
ENDFUNC(__eqsf2)
|
4ms/metamodule-plugin-sdk
| 2,064
|
plugin-libc/libgcc/config/arc/ieee-754/orddf2.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
/* inputs: r0, r1
output: c flag
clobber: r12, flags
For NaNs, bit 19 .. bit 30 must be set. */
#if 0 /* DEBUG */
.global __orddf2
.balign 4
FUNC(__orddf2)
__orddf2:
st.a r11,[sp,-4]` push_s blink` st.a r10,[sp,-4]` st.a r9,[sp,-4]
st.a r8,[sp,-4]` st.a r7,[sp,-4]` st.a r6,[sp,-4]` st.a r5,[sp,-4]
st.a r4,[sp,-4]` push_s r3` push_s r2` push_s r1`
bl.d __unorddf2_c` push_s r0
mov r11,r0` pop_s r0` pop_s r1` pop_s r2` pop_s r3
ld.ab r4,[sp,4]` ld.ab r5,[sp,4]` ld.ab r6,[sp,4]`
ld.ab r7,[sp,4]` ld.ab r8,[sp,4]` ld.ab r9,[sp,4]
bl.d __orddf2_asm` ld.ab r10,[sp,4]
pop_s blink
brne.d r11,0,0f
ld.ab r11,[sp,4]
jcc [blink]
bl abort
0: jcs [blink]
bl abort
ENDFUNC(__orddf2)
#define __orddf2 __orddf2_asm
#endif /* DEBUG */
.global __orddf2
.balign 4
HIDDEN_FUNC(__orddf2)
__orddf2:
bmsk r12,DBL0H,20
add1.f r12,r12,DBL0H /* clear z; set c if NaN. */
bmsk r12,DBL1H,20
j_s.d [blink]
add1.cc.f r12,r12,DBL1H /* clear z; set c if NaN. */
ENDFUNC(__orddf2)
|
4ms/metamodule-plugin-sdk
| 2,643
|
plugin-libc/libgcc/config/arc/ieee-754/eqdf2.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
/* inputs: DBL0, DBL1
output: z flag
clobber: r12, flags
For NaNs, bit 19.. bit 30 of the high word must be set. */
#if 0 /* DEBUG */
.global __eqdf2
.balign 4
FUNC(__eqdf2)
__eqdf2:
st.a r11,[sp,-4]` push_s blink` st.a r10,[sp,-4]` st.a r9,[sp,-4]
st.a r8,[sp,-4]` st.a r7,[sp,-4]` st.a r6,[sp,-4]` st.a r5,[sp,-4]
st.a r4,[sp,-4]` push_s r3` push_s r2` push_s r1`
bl.d __eqdf2_c` push_s r0
mov r11,r0` pop_s r0` pop_s r1` pop_s r2` pop_s r3
ld.ab r4,[sp,4]` ld.ab r5,[sp,4]` ld.ab r6,[sp,4]`
ld.ab r7,[sp,4]` ld.ab r8,[sp,4]` ld.ab r9,[sp,4]
bl.d __eqdf2_asm` ld.ab r10,[sp,4]
pop_s blink
breq.d r11,0,0f
ld.ab r11,[sp,4]
jne_s [blink]
bl abort
0: jeq_s [blink]
bl abort
ENDFUNC(__eqdf2)
#define __eqdf2 __eqdf2_asm
#endif /* DEBUG */
.global __eqdf2
.balign 4
HIDDEN_FUNC(__eqdf2)
/* Good performance as long as the difference in high word is
well predictable (as seen from the branch predictor). */
__eqdf2:
brne.d DBL0H,DBL1H,.Lhighdiff
#ifndef __HS__
/* The next two instructions are required to recognize the FPX
NaN, which has a pattern like this: 0x7ff0_0000_8000_0000, as
oposite to 0x7ff8_0000_0000_0000. */
or.f 0,DBL0L,DBL1L
mov_s r12,0x00200000
bset.ne r12,r12,0
#else
bmsk r12,DBL0H,20
#endif /* __HS__ */
add1.f r12,r12,DBL0H /* set c iff NaN; also, clear z if NaN. */
j_s.d [blink]
cmp.cc DBL0L,DBL1L
.balign 4
.Lhighdiff:
or r12,DBL0H,DBL1H
or.f 0,DBL0L,DBL1L
j_s.d [blink]
bmsk.eq.f r12,r12,30
ENDFUNC(__eqdf2)
/* ??? could we do better by speeding up some 'common' case of inequality? */
|
4ms/metamodule-plugin-sdk
| 2,052
|
plugin-libc/libgcc/config/arc/ieee-754/ordsf2.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
/* inputs: r0, r1
output: c flag
clobber: r12, flags
For NaNs, bit 22 .. bit 30 must be set. */
#if 0 /* DEBUG */
.global __ordsf2
.balign 4
FUNC(__ordsf2)
__ordsf2:
st.a r11,[sp,-4]` push_s blink` st.a r10,[sp,-4]` st.a r9,[sp,-4]
st.a r8,[sp,-4]` st.a r7,[sp,-4]` st.a r6,[sp,-4]` st.a r5,[sp,-4]
st.a r4,[sp,-4]` push_s r3` push_s r2` push_s r1`
bl.d __unordsf2_c` push_s r0
mov r11,r0` pop_s r0` pop_s r1` pop_s r2` pop_s r3
ld.ab r4,[sp,4]` ld.ab r5,[sp,4]` ld.ab r6,[sp,4]`
ld.ab r7,[sp,4]` ld.ab r8,[sp,4]` ld.ab r9,[sp,4]
bl.d __ordsf2_asm` ld.ab r10,[sp,4]
pop_s blink
brne.d r11,0,0f
ld.ab r11,[sp,4]
jcc [blink]
bl abort
0: jcs [blink]
bl abort
ENDFUNC(__ordsf2)
#define __ordsf2 __ordsf2_asm
#endif /* DEBUG */
.global __ordsf2
.balign 4
HIDDEN_FUNC(__ordsf2)
__ordsf2:
bmsk r12,r0,23
add1.f r12,r12,r0 /* clear z; set c if NaN. */
bmsk r12,r1,23
j_s.d [blink]
add1.cc.f r12,r12,r1 /* clear z; set c if NaN. */
ENDFUNC(__ordsf2)
|
4ms/metamodule-plugin-sdk
| 2,247
|
plugin-libc/libgcc/config/arc/ieee-754/gesf2.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
/* inputs: r0, r1
output: c flag to be used for 'hs' condition
clobber: r12,flags */
/* For NaNs, bit 22.. bit 30 must be set. */
#if 0 /* DEBUG */
.global __gesf2
.balign 4
FUNC(__gesf2)
__gesf2:
st.a r11,[sp,-4]` push_s blink` st.a r10,[sp,-4]` st.a r9,[sp,-4]
st.a r8,[sp,-4]` st.a r7,[sp,-4]` st.a r6,[sp,-4]` st.a r5,[sp,-4]
st.a r4,[sp,-4]` push_s r3` push_s r2` push_s r1`
bl.d __gesf2_c` push_s r0
mov r11,r0` pop_s r0` pop_s r1` pop_s r2` pop_s r3
ld.ab r4,[sp,4]` ld.ab r5,[sp,4]` ld.ab r6,[sp,4]`
ld.ab r7,[sp,4]` ld.ab r8,[sp,4]` ld.ab r9,[sp,4]
bl.d __gesf2_asm` ld.ab r10,[sp,4]
pop_s blink
brge.d r11,0,0f
ld.ab r11,[sp,4]
jlo [blink]
bl abort
0: jhs [blink]
bl abort
ENDFUNC(__gesf2)
#define __gesf2 __gesf2_asm
#endif /* DEBUG */
.global __gesf2
.balign 4
HIDDEN_FUNC(__gesf2)
__gesf2:
or.f r12,r0,r1
bmi.d .Lneg
bmsk_s r12,r12,23
add1.f 0,r12,r0 ; check for NaN
add1.cc.f r12,r12,r1
j_s.d [blink]
cmp.cc r0,r1
.balign 4
.Lneg: breq.d r1,0,.L0
add1.f 0,r12,r0 ; check for NaN
add1.cc.f r12,r12,r1
j_s.d [blink]
cmp.cc r1,r0
.balign 4
.L0: bxor.f 0,r0,31 ; check for -0
j_s.d [blink]
cmp.hi r1,r0
ENDFUNC(__gesf2)
|
4ms/metamodule-plugin-sdk
| 2,573
|
plugin-libc/libgcc/config/arc/ieee-754/gedf2.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
/* inputs: DBL0, DBL1
output: c flags to be used for 'hs' condition
clobber: r12, flags */
/* For NaNs, bit 19.. bit 30 of the high word must be set. */
#if 0 /* DEBUG */
.global __gedf2
.balign 4
FUNC(__gedf2)
__gedf2:
st.a r11,[sp,-4]` push_s blink` st.a r10,[sp,-4]` st.a r9,[sp,-4]
st.a r8,[sp,-4]` st.a r7,[sp,-4]` st.a r6,[sp,-4]` st.a r5,[sp,-4]
st.a r4,[sp,-4]` push_s r3` push_s r2` push_s r1`
bl.d __gedf2_c` push_s r0
mov r11,r0` pop_s r0` pop_s r1` pop_s r2` pop_s r3
ld.ab r4,[sp,4]` ld.ab r5,[sp,4]` ld.ab r6,[sp,4]`
ld.ab r7,[sp,4]` ld.ab r8,[sp,4]` ld.ab r9,[sp,4]
bl.d __gedf2_asm` ld.ab r10,[sp,4]
pop_s blink
brge.d r11,0,0f
ld.ab r11,[sp,4]
jlo [blink]
bl abort
0: jhs [blink]
bl abort
ENDFUNC(__gedf2)
#define __gedf2 __gedf2_asm
#endif /* DEBUG */
.global __gedf2
.balign 4
HIDDEN_FUNC(__gedf2)
__gedf2:
or.f r12,DBL0H,DBL1H
bmi.d .Lneg
bmsk_s r12,r12,20
add1.f 0,r12,DBL0H ; clear z; set c iff NaN
add1.cc.f r12,r12,DBL1H ; clear z; set c iff NaN
bbit1 DBL0H,31,.Lneg
cmp.cc DBL0H,DBL1H
j_s.d [blink]
cmp.eq DBL0L,DBL1L
.balign 4
.Lneg: breq.d DBL1H,0,.L0
add1.f 0,r12,DBL0H
add1.cc.f r12,r12,DBL1H
cmp.cc DBL1H,DBL0H
j_s.d [blink]
cmp.eq DBL1L,DBL0L
.balign 4
.L0:
bxor.f 0,DBL0H,31 ; check for high word of -0.
beq_s .Lcheck_0
cmp.cc DBL1H,DBL0H
j_s.d [blink]
cmp.eq DBL1L,DBL0L
.Lcheck_0:
; high words suggest DBL0 may be -0, DBL1 +0; check low words.
cmp_s DBL1H,DBL0L
j_s.d [blink]
cmp.cc DBL1H,DBL1L
ENDFUNC(__gedf2)
|
4ms/metamodule-plugin-sdk
| 1,882
|
plugin-libc/libgcc/config/arc/ieee-754/floatunsidf.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
#if 0 /* DEBUG */
.global __floatunsidf
.balign 4
FUNC(__floatunsidf)
__floatunsidf:
push_s blink
bl.d __floatunsidf_c
push_s r0
ld_s r2,[sp]
st_s r1,[sp]
push_s r0
bl.d __floatunsidf_asm
mov_s r0,r2
pop_s r2
pop_s r3
pop_s blink
cmp r0,r2
cmp.eq r1,r3
jeq_s [blink]
bl abort
ENDFUNC(__floatunsidf)
#define __floatunsidf __floatunsidf_asm
#endif /* DEBUG */
.global __floatunsidf
.balign 4
FUNC(__floatunsidf)
__floatunsidf:
lsr_s r1,r0
breq_s r0,0,.Lret0
norm r2,r1
mov r12,-0x41d ; -(0x3ff+31-1)
rsub.f r3,r2,11
add_s r12,r2,r12
add_s r2,r2,21
#ifdef __LITTLE_ENDIAN__
lsr DBL0H,r0,r3
asl_s DBL0L,r0,r2
#else
asl DBL0L,r0,r2
lsr_s DBL0H,r0,r3
#endif
asl_s r12,r12,20
mov.lo DBL0H,DBL0L
sub_s DBL0H,DBL0H,r12
.Lret0: j_s.d [blink]
mov.ls DBL0L,0
ENDFUNC(__floatunsidf)
|
4ms/metamodule-plugin-sdk
| 2,150
|
plugin-libc/libgcc/config/arc/ieee-754/fixdfsi.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
#if 0 /* DEBUG */
FUNC(__fixdfsi)
.global __fixdfsi
.balign 4
__fixdfsi:
push_s blink
push_s r0
bl.d __fixdfsi_c
push_s r1
mov_s r2,r0
pop_s r1
ld r0,[sp]
bl.d __fixdfsi_asm
st r2,[sp]
pop_s r1
pop_s blink
cmp r0,r1
jeq_s [blink]
bl abort
ENDFUNC(__fixdfsi)
#define __fixdfsi __fixdfsi_asm
#endif /* DEBUG */
/* If the fraction has to be shifted left by a positive non-zero amount,
we have to combine bits from DBL0L and DBL0H. If we shift right,
or shift by zero, we only want to have the bits from DBL0H in r0. */
.global __fixdfsi
FUNC(__fixdfsi)
.balign 4
__fixdfsi:
bbit0 DBL0H,30,.Lret0or1
asr r2,DBL0H,20
bmsk_s DBL0H,DBL0H,19
sub_s r2,r2,19; 0x3ff+20-0x400
neg_s r3,r2
asr.f 0,r3,11
bset_s DBL0H,DBL0H,20
#ifdef __LITTLE_ENDIAN__
mov.cs DBL0L,DBL0H
asl DBL0H,DBL0H,r2
#else
asl.cc DBL0H,DBL0H,r2
lsr.cs DBL0H,DBL0H,r3
#endif
lsr_s DBL0L,DBL0L,r3
add.cc r0,r0,r1
j_s.d [blink]
neg.pl r0,r0
.Lret0or1:
add.f r0,DBL0H,0x100000
lsr_s r0,r0,30
bmsk_s r0,r0,0
j_s.d [blink]
neg.mi r0,r0
ENDFUNC(__fixdfsi)
|
4ms/metamodule-plugin-sdk
| 6,432
|
plugin-libc/libgcc/config/arc/ieee-754/divsf3-stdmul.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
- calculate 15..18 bit inverse using a table of approximating polynoms.
precision is higher for polynoms used to evaluate input with larger
value.
- do one newton-raphson iteration step to double the precision,
then multiply this with the divisor
-> more time to decide if dividend is subnormal
- the worst error propagation is on the side of the value range
with the least initial defect, thus giving us about 30 bits precision.
*/
#include "arc-ieee-754.h"
#if 0 /* DEBUG */
.global __divsf3
FUNC(__divsf3)
.balign 4
__divsf3:
push_s blink
push_s r1
bl.d __divsf3_c
push_s r0
ld_s r1,[sp,4]
st_s r0,[sp,4]
bl.d __divsf3_asm
pop_s r0
pop_s r1
pop_s blink
cmp r0,r1
#if 1
bne abort
jeq_s [blink]
b abort
#else
bne abort
j_s [blink]
#endif
ENDFUNC(__divsf3)
#define __divsf3 __divsf3_asm
#endif /* DEBUG */
FUNC(__divsf3)
.balign 4
.L7f800000:
.long 0x7f800000
.Ldivtab:
.long 0xfc0ffff0
.long 0xf46ffefd
.long 0xed1ffd2a
.long 0xe627fa8e
.long 0xdf7ff73b
.long 0xd917f33b
.long 0xd2f7eea3
.long 0xcd1fe986
.long 0xc77fe3e7
.long 0xc21fdddb
.long 0xbcefd760
.long 0xb7f7d08c
.long 0xb32fc960
.long 0xae97c1ea
.long 0xaa27ba26
.long 0xa5e7b22e
.long 0xa1cfa9fe
.long 0x9ddfa1a0
.long 0x9a0f990c
.long 0x9667905d
.long 0x92df878a
.long 0x8f6f7e84
.long 0x8c27757e
.long 0x88f76c54
.long 0x85df630c
.long 0x82e759c5
.long 0x8007506d
.long 0x7d3f470a
.long 0x7a8f3da2
.long 0x77ef341e
.long 0x756f2abe
.long 0x72f7212d
.long 0x709717ad
.long 0x6e4f0e44
.long 0x6c1704d6
.long 0x69e6fb44
.long 0x67cef1d7
.long 0x65c6e872
.long 0x63cedf18
.long 0x61e6d5cd
.long 0x6006cc6d
.long 0x5e36c323
.long 0x5c76b9f3
.long 0x5abeb0b7
.long 0x5916a79b
.long 0x57769e77
.long 0x55de954d
.long 0x54568c4e
.long 0x52d6834d
.long 0x51667a7f
.long 0x4ffe71b5
.long 0x4e9e68f1
.long 0x4d466035
.long 0x4bf65784
.long 0x4aae4ede
.long 0x496e4646
.long 0x48363dbd
.long 0x47063547
.long 0x45de2ce5
.long 0x44be2498
.long 0x43a61c64
.long 0x4296144a
.long 0x41860c0e
.long 0x407e03ee
__divsf3_support: /* This label makes debugger output saner. */
.Ldenorm_fp1:
bclr r6,r6,31
norm.f r12,r6 ; flag for x/0 -> Inf check
add r6,r6,r6
rsub r5,r12,16
ror r5,r1,r5
asl r6,r6,r12
bmsk r5,r5,5
ld.as r5,[r3,r5]
add r4,r6,r6
; load latency
MPYHU r7,r5,r4
bic.ne.f 0, \
0x60000000,r0 ; large number / denorm -> Inf
beq_s .Linf_NaN
asl r5,r5,13
; wb stall
; slow track
sub r7,r5,r7
MPYHU r8,r7,r6
asl_s r12,r12,23
and.f r2,r0,r9
add r2,r2,r12
asl r12,r0,8
; wb stall
bne.d .Lpast_denorm_fp1
.Ldenorm_fp0:
MPYHU r8,r8,r7
bclr r12,r12,31
norm.f r3,r12 ; flag for 0/x -> 0 check
bic.ne.f 0,0x60000000,r1 ; denorm/large number -> 0
beq_s .Lret0
asl_s r12,r12,r3
asl_s r3,r3,23
add_s r12,r12,r12
add r11,r11,r3
b.d .Lpast_denorm_fp0
mov_s r3,r12
.balign 4
.Linf_NaN:
bclr.f 0,r0,31 ; 0/0 -> NaN
xor_s r0,r0,r1
bmsk r1,r0,30
bic_s r0,r0,r1
sub.eq r0,r0,1
j_s.d [blink]
or r0,r0,r9
.Lret0:
xor_s r0,r0,r1
bmsk r1,r0,30
j_s.d [blink]
bic_s r0,r0,r1
.Linf_nan_fp1:
lsr_s r0,r0,31
bmsk.f 0,r1,22
asl_s r0,r0,31
bne_s 0f ; inf/inf -> nan
brne r2,r9,.Lsigned0 ; x/inf -> 0, but x/nan -> nan
0: j_s.d [blink]
mov r0,-1
.Lsigned0:
.Linf_nan_fp0:
tst_s r1,r1
j_s.d [blink]
bxor.mi r0,r0,31
.balign 4
.global __divsf3
/* N.B. the spacing between divtab and the sub3 to get its address must
be a multiple of 8. */
__divsf3:
lsr r2,r1,17
sub3 r3,pcl,55;(.-.Ldivtab) >> 3
bmsk_s r2,r2,5
ld.as r5,[r3,r2]
asl r4,r1,9
ld.as r9,[pcl,-114]; [pcl,(-((.-.L7f800000) >> 2))] ; 0x7f800000
MPYHU r7,r5,r4
asl r6,r1,8
and.f r11,r1,r9
bset r6,r6,31
asl r5,r5,13
; wb stall
beq .Ldenorm_fp1
sub r7,r5,r7
MPYHU r8,r7,r6
breq.d r11,r9,.Linf_nan_fp1
and.f r2,r0,r9
beq.d .Ldenorm_fp0
asl r12,r0,8
; wb stall
breq r2,r9,.Linf_nan_fp0
MPYHU r8,r8,r7
.Lpast_denorm_fp1:
bset r3,r12,31
.Lpast_denorm_fp0:
cmp_s r3,r6
lsr.cc r3,r3,1
add_s r2,r2, /* wait for immediate */ \
/* wb stall */ \
0x3f000000
sub r7,r7,r8 ; u1.31 inverse, about 30 bit
MPYHU r3,r3,r7
sbc r2,r2,r11
xor.f 0,r0,r1
and r0,r2,r9
bxor.mi r0,r0,31
brhs r2, /* wb stall / wait for immediate */ \
0x7f000000,.Linf_denorm
.Lpast_denorm:
add_s r3,r3,0x22 ; round to nearest or higher
tst r3,0x3c ; check if rounding was unsafe
lsr r3,r3,6
jne.d [blink] ; return if rounding was safe.
add_s r0,r0,r3
/* work out exact rounding if we fall through here. */
/* We know that the exact result cannot be represented in single
precision. Find the mid-point between the two nearest
representable values, multiply with the divisor, and check if
the result is larger than the dividend. */
add_s r3,r3,r3
sub_s r3,r3,1
mpyu r3,r3,r6
asr.f 0,r0,1 ; for round-to-even in case this is a denorm
rsub r2,r9,25
asl_s r12,r12,r2
; wb stall
; slow track
sub.f 0,r12,r3
j_s.d [blink]
sub.mi r0,r0,1
/* For denormal results, it is possible that an exact result needs
rounding, and thus the round-to-even rule has to come into play. */
.Linf_denorm:
brlo r2,0xc0000000,.Linf
.Ldenorm:
asr_s r2,r2,23
bic r0,r0,r9
neg r9,r2
brlo.d r9,25,.Lpast_denorm
lsr r3,r3,r9
/* Fall through: return +- 0 */
j_s [blink]
.Linf:
j_s.d [blink]
or r0,r0,r9
ENDFUNC(__divsf3)
|
4ms/metamodule-plugin-sdk
| 12,161
|
plugin-libc/libgcc/config/arc/ieee-754/adddf3.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "arc-ieee-754.h"
#if 0 /* DEBUG */
.global __adddf3
.balign 4
__adddf3:
push_s blink
push_s r2
push_s r3
push_s r0
bl.d __adddf3_c
push_s r1
ld_s r2,[sp,12]
ld_s r3,[sp,8]
st_s r0,[sp,12]
st_s r1,[sp,8]
pop_s r1
bl.d __adddf3_asm
pop_s r0
pop_s r3
pop_s r2
pop_s blink
cmp r0,r2
cmp.eq r1,r3
jeq_s [blink]
bl abort
.global __subdf3
.balign 4
__subdf3:
push_s blink
push_s r2
push_s r3
push_s r0
bl.d __subdf3_c
push_s r1
ld_s r2,[sp,12]
ld_s r3,[sp,8]
st_s r0,[sp,12]
st_s r1,[sp,8]
pop_s r1
bl.d __subdf3_asm
pop_s r0
pop_s r3
pop_s r2
pop_s blink
cmp r0,r2
cmp.eq r1,r3
jeq_s [blink]
bl abort
#define __adddf3 __adddf3_asm
#define __subdf3 __subdf3_asm
#endif /* DEBUG */
/* N.B. This is optimized for ARC700.
ARC600 has very different scheduling / instruction selection criteria. */
/* inputs: DBL0, DBL1 (r0-r3)
output: DBL0 (r0, r1)
clobber: r2-r10, r12, flags
All NaN highword bits must be 1. NaN low word is random. */
.balign 4
.global __adddf3
.global __subdf3
.long 0x7ff00000 ; exponent mask
FUNC(__adddf3)
FUNC(__subdf3)
__subdf3:
bxor_l DBL1H,DBL1H,31
__adddf3:
ld r9,[pcl,-8]
bmsk r4,DBL0H,30
xor r10,DBL0H,DBL1H
and r6,DBL1H,r9
sub.f r12,r4,r6
asr_s r12,r12,20
blo .Ldbl1_gt
brhs r4,r9,.Linf_nan
brhs r12,32,.Large_shift
brne r12,0,.Lsmall_shift
brge r10,0,.Ladd_same_exp ; r12 == 0
/* After subtracting, we need to normalize; when shifting to place the
leading 1 into position for the implicit 1 and adding that to DBL0H,
we increment the exponent. Thus, we have to subtract one more than
the shift count from the exponent beforehand. Iff the exponent drops thus
below zero (before adding in the fraction with the leading one), we have
generated a denormal number. Denormal handling is basicallly reducing the
shift count so that we produce a zero exponent instead; however, this way
the shift count can become zero (if we started out with exponent 1).
Therefore, a simple min operation is not good enough, since we don't
want to handle a zero normalizing shift in the main path.
On the plus side, we don't need to check for denorm input, the result
of subtracing these looks just the same as denormals generated during
subtraction. */
bmsk r7,DBL1H,30
cmp r4,r7
cmp.eq DBL0L,DBL1L
blo .L_rsub_same_exp
sub.f DBL0L,DBL0L,DBL1L
bmsk r12,DBL0H,19
bic DBL1H,DBL0H,r12
sbc.f r4,r4,r7
beq_l .Large_cancel
norm DBL1L,r4
b.d .Lsub_done_same_exp
sub r12,DBL1L,9
.balign 4
.Linf_nan:
; If both inputs are inf, but with different signs, the result is NaN.
asr r12,r10,31
or_s DBL1H,DBL1H,r12
j_s.d [blink]
or.eq DBL0H,DBL0H,DBL1H
.balign 4
.L_rsub_same_exp:
rsub.f DBL0L,DBL0L,DBL1L
bmsk r12,DBL1H,19
bic_s DBL1H,DBL1H,r12
sbc.f r4,r7,r4
beq_l .Large_cancel
norm DBL1L,r4
sub r12,DBL1L,9
.Lsub_done_same_exp:
asl_s r12,r12,20
sub_s DBL1L,DBL1L,10
sub DBL0H,DBL1H,r12
xor.f 0,DBL0H,DBL1H
bmi .Ldenorm
.Lpast_denorm:
neg_s r12,DBL1L
lsr r7,DBL0L,r12
asl r12,r4,DBL1L
asl_s DBL0L,DBL0L,DBL1L
add_s r12,r12,r7
j_s.d [blink]
add_l DBL0H,DBL0H,r12
.balign 4
.Ladd_same_exp:
/* This is a special case because we can't test for need to shift
down by checking if bit 20 of DBL0H changes. OTOH, here we know
that we always need to shift down. */
; The implicit 1 of DBL0 is not shifted together with the
; fraction, thus effectively doubled, compensating for not setting
; implicit1 for DBL1
add_s r12,DBL0L,DBL1L
lsr.f 0,r12,2 ; round to even
breq r6,0,.Ldenorm_add
adc.f DBL0L,DBL0L,DBL1L
sub r7,DBL1H,DBL0H
sub1 r7,r7,r9 ; boost exponent by 2/2
rrc DBL0L,DBL0L
asr.f r7,r7 ; DBL1.fraction/2 - DBL0.fraction/2 ; exp++
add.cs.f DBL0L,DBL0L,0x80000000
add_l DBL0H,DBL0H,r7 ; DBL0.implicit1 not shifted for DBL1.implicit1
add.cs DBL0H,DBL0H,1
bic.f 0,r9,DBL0H ; check for overflow -> infinity.
jne_l [blink]
and DBL0H,DBL0H,0xfff00000
j_s.d [blink]
mov_s DBL0L,0
.balign 4
.Large_shift:
brhs r12,55,.Lret_dbl0
bmsk_s DBL1H,DBL1H,19
brne r6,0,.Lno_denorm_large_shift
brhi.d r12,33,.Lfixed_denorm_large_shift
sub_s r12,r12,1
breq r12,31, .Lfixed_denorm_small_shift
.Lshift32:
mov_s r12,DBL1L
mov_s DBL1L,DBL1H
brlt.d r10,0,.Lsub
mov_s DBL1H,0
b_s .Ladd
.Ldenorm_add:
cmp_s r12,DBL1L
mov_s DBL0L,r12
j_s.d [blink]
adc DBL0H,r4,DBL1H
.Lret_dbl0:
j_s [blink]
.balign 4
.Lsmall_shift:
breq.d r6,0,.Ldenorm_small_shift
bmsk_s DBL1H,DBL1H,19
bset_s DBL1H,DBL1H,20
.Lfixed_denorm_small_shift:
neg r8,r12
asl r4,DBL1H,r8
lsr_l DBL1H,DBL1H,r12
lsr r5,DBL1L,r12
asl r12,DBL1L,r8
brge.d r10,0,.Ladd
or DBL1L,r4,r5
/* subtract, abs(DBL0) > abs(DBL1) */
/* DBL0H, DBL0L: original values
DBL1H, DBL1L: fraction with explicit leading 1, shifted into place
r4: orig. DBL0H & 0x7fffffff
r6: orig. DBL1H & 0x7ff00000
r9: 0x7ff00000
r10: orig. DBL0H ^ DBL1H
r12: guard bits */
.balign 4
.Lsub:
neg.f r12,r12
mov_s r7,DBL1H
bmsk r5,DBL0H,19
sbc.f DBL0L,DBL0L,DBL1L
bic DBL1H,DBL0H,r5
bset r5,r5,20
sbc.f r4,r5,r7
beq_l .Large_cancel_sub
norm DBL1L,r4
bmsk r6,DBL1H,30
.Lsub_done:
sub_s DBL1L,DBL1L,9
breq DBL1L,1,.Lsub_done_noshift
asl r5,DBL1L,20
sub_s DBL1L,DBL1L,1
brlo r6,r5,.Ldenorm_sub
sub DBL0H,DBL1H,r5
.Lpast_denorm_sub:
neg_s DBL1H,DBL1L
lsr r6,r12,DBL1H
asl_s r12,r12,DBL1L
and r8,r6,1
add1.f 0,r8,r12
add.ne.f r12,r12,r12
asl r8,DBL0L,DBL1L
lsr r12,DBL0L,DBL1H
adc.f DBL0L,r8,r6
asl r5,r4,DBL1L
add_s DBL0H,DBL0H,r12
j_s.d [blink]
adc DBL0H,DBL0H,r5
.balign 4
.Lno_denorm_large_shift:
breq.d r12,32,.Lshift32
bset_l DBL1H,DBL1H,20
.Lfixed_denorm_large_shift:
neg r8,r12
asl r4,DBL1H,r8
lsr r5,DBL1L,r12
asl.f 0,DBL1L,r8
lsr DBL1L,DBL1H,r12
or r12,r4,r5
tst.eq r12,1
or.ne r12,r12,2
brlt.d r10,0,.Lsub
mov_s DBL1H,0
b_l .Ladd
; If a denorm is produced without shifting, we have an exact result -
; no need for rounding.
.balign 4
.Ldenorm_sub:
lsr DBL1L,r6,20
xor DBL0H,r6,DBL1H
brne.d DBL1L,1,.Lpast_denorm_sub
sub_s DBL1L,DBL1L,1
.Lsub_done_noshift:
add.f 0,r12,r12
btst.eq DBL0L,0
cmp.eq r12,r12
add.cs.f DBL0L,DBL0L,1
bclr r4,r4,20
j_s.d [blink]
adc DBL0H,DBL1H,r4
.balign 4
.Ldenorm_small_shift:
brne.d r12,1,.Lfixed_denorm_small_shift
sub_l r12,r12,1
brlt r10,0,.Lsub
.Ladd: ; bit 20 of DBL1H is clear and bit 0 of r12 does not matter
add.f DBL0L,DBL0L,DBL1L
add_s DBL1H,DBL1H,DBL0H
add.cs DBL1H,DBL1H,1
xor_l DBL0H,DBL0H,DBL1H
bbit0 DBL0H,20,.Lno_shiftdown
lsr.f DBL0H,DBL1H
and r4,DBL0L,2
bmsk DBL0H,DBL0H,18
sbc DBL0H,DBL1H,DBL0H
rrc.f DBL0L,DBL0L
or.f r12,r12,r4
cmp.eq r12,r12
add.cs.f DBL0L,DBL0L,1
bic.f 0,r9,DBL0H ; check for generating infinity with possible ...
jne.d [blink] ; ... non-zero fraction
add.cs DBL0H,DBL0H,1
mov_s DBL0L,0
bmsk DBL1H,DBL0H,19
j_s.d [blink]
bic_s DBL0H,DBL0H,DBL1H
.Lno_shiftdown:
mov_s DBL0H,DBL1H
add.f 0,r12,r12
btst.eq DBL0L,0
cmp.eq r12,r12
add.cs.f DBL0L,DBL0L,1
j_s.d [blink]
add.cs DBL0H,DBL0H,1
.balign 4
.Ldenorm:
bmsk DBL0H,DBL1H,30
lsr r12,DBL0H,20
xor_s DBL0H,DBL0H,DBL1H
sub_l DBL1L,r12,1
bgt .Lpast_denorm
j_s.d [blink]
add_l DBL0H,DBL0H,r4
.balign 4
.Large_cancel:
;DBL0L: mantissa DBL1H: sign & exponent
norm.f DBL1L,DBL0L
bmsk DBL0H,DBL1H,30
add_s DBL1L,DBL1L,22
mov.mi DBL1L,21
add_s r12,DBL1L,1
asl_s r12,r12,20
beq_s .Lret0
brhs.d DBL0H,r12,.Lpast_denorm_large_cancel
sub DBL0H,DBL1H,r12
bmsk DBL0H,DBL1H,30
lsr r12,DBL0H,20
xor_s DBL0H,DBL0H,DBL1H
sub.f DBL1L,r12,1
jle [blink]
.Lpast_denorm_large_cancel:
rsub.f r7,DBL1L,32
lsr r7,DBL0L,r7
asl_s DBL0L,DBL0L,DBL1L
mov.ls r7,DBL0L
add_s DBL0H,DBL0H,r7
j_s.d [blink]
mov.ls DBL0L,0
.Lret0:
j_s.d [blink]
mov_l DBL0H,0
/* r4:DBL0L:r12 : unnormalized result fraction
DBL1H: result sign and exponent */
/* When seeing large cancellation, only the topmost guard bit might be set. */
.balign 4
.Large_cancel_sub:
norm.f DBL1L,DBL0L
bpnz.d 0f
bmsk DBL0H,DBL1H,30
mov r5,22<<20
bne.d 1f
mov_s DBL1L,21
bset r5,r5,5+20
add_s DBL1L,DBL1L,32
brne r12,0,1f
j_s.d [blink]
mov_l DBL0H,0
.balign 4
0: add r5,DBL1L,23
asl r5,r5,20
add_s DBL1L,DBL1L,22
1: brlo DBL0H,r5,.Ldenorm_large_cancel_sub
sub DBL0H,DBL1H,r5
.Lpast_denorm_large_cancel_sub:
rsub.f r7,DBL1L,32
lsr r12,r12,r7
lsr r7,DBL0L,r7
asl_s DBL0L,DBL0L,DBL1L
add.ge DBL0H,DBL0H,r7
add_s DBL0L,DBL0L,r12
add.lt DBL0H,DBL0H,DBL0L
mov.eq DBL0L,r12
j_s.d [blink]
mov.lt DBL0L,0
.balign 4
.Ldenorm_large_cancel_sub:
lsr r5,DBL0H,20
xor_s DBL0H,DBL0H,DBL1H
brgt.d r5,1,.Lpast_denorm_large_cancel_sub
sub DBL1L,r5,1
j_l [blink] ; denorm, no shift -> no rounding needed.
/* r4: DBL0H & 0x7fffffff
r6: DBL1H & 0x7ff00000
r9: 0x7ff00000
r10: sign difference
r12: shift count (negative) */
.balign 4
.Ldbl1_gt:
brhs r6,r9,.Lret_dbl1 ; inf or NaN
neg r8,r12
brhs r8,32,.Large_shift_dbl0
.Lsmall_shift_dbl0:
breq.d r6,0,.Ldenorm_small_shift_dbl0
bmsk_s DBL0H,DBL0H,19
bset_s DBL0H,DBL0H,20
.Lfixed_denorm_small_shift_dbl0:
asl r4,DBL0H,r12
lsr DBL0H,DBL0H,r8
lsr r5,DBL0L,r8
asl r12,DBL0L,r12
brge.d r10,0,.Ladd_dbl1_gt
or DBL0L,r4,r5
/* subtract, abs(DBL0) < abs(DBL1) */
/* DBL0H, DBL0L: fraction with explicit leading 1, shifted into place
DBL1H, DBL1L: original values
r6: orig. DBL1H & 0x7ff00000
r9: 0x7ff00000
r12: guard bits */
.balign 4
.Lrsub:
neg.f r12,r12
bmsk r7,DBL1H,19
mov_s r5,DBL0H
sbc.f DBL0L,DBL1L,DBL0L
bic DBL1H,DBL1H,r7
bset r7,r7,20
sbc.f r4,r7,r5
beq_l .Large_cancel_sub
norm DBL1L,r4
b_l .Lsub_done ; note: r6 is already set up.
.Lret_dbl1:
mov_s DBL0H,DBL1H
j_s.d [blink]
mov_l DBL0L,DBL1L
.balign 4
.Ldenorm_small_shift_dbl0:
sub.f r8,r8,1
bne.d .Lfixed_denorm_small_shift_dbl0
add_s r12,r12,1
brlt r10,0,.Lrsub
.Ladd_dbl1_gt: ; bit 20 of DBL0H is clear and bit 0 of r12 does not matter
add.f DBL0L,DBL0L,DBL1L
add_s DBL0H,DBL0H,DBL1H
add.cs DBL0H,DBL0H,1
xor DBL1H,DBL0H,DBL1H
bbit0 DBL1H,20,.Lno_shiftdown_dbl1_gt
lsr.f DBL1H,DBL0H
and r4,DBL0L,2
bmsk DBL1H,DBL1H,18
sbc DBL0H,DBL0H,DBL1H
rrc.f DBL0L,DBL0L
or.f r12,r12,r4
cmp.eq r12,r12
add.cs.f DBL0L,DBL0L,1
bic.f 0,r9,DBL0H ; check for generating infinity with possible ...
jne.d [blink] ; ... non-zero fraction
add.cs DBL0H,DBL0H,1
mov_s DBL0L,0
bmsk DBL1H,DBL0H,19
j_s.d [blink]
bic_s DBL0H,DBL0H,DBL1H
.Lno_shiftdown_dbl1_gt:
add.f 0,r12,r12
btst.eq DBL0L,0
cmp.eq r12,r12
add.cs.f DBL0L,DBL0L,1
j_s.d [blink]
add.cs DBL0H,DBL0H,1
.balign 4
.Large_shift_dbl0:
brhs r8,55,.Lret_dbl1
bmsk_s DBL0H,DBL0H,19
brne r6,0,.Lno_denorm_large_shift_dbl0
add_s r12,r12,1
brne.d r8,33,.Lfixed_denorm_large_shift_dbl0
sub r8,r8,1
bset_s DBL0H,DBL0H,20
.Lshift32_dbl0:
mov_s r12,DBL0L
mov_s DBL0L,DBL0H
brlt.d r10,0,.Lrsub
mov_s DBL0H,0
b_s .Ladd_dbl1_gt
.balign 4
.Lno_denorm_large_shift_dbl0:
breq.d r8,32,.Lshift32_dbl0
bset_l DBL0H,DBL0H,20
.Lfixed_denorm_large_shift_dbl0:
asl r4,DBL0H,r12
lsr r5,DBL0L,r8
asl.f 0,DBL0L,r12
lsr DBL0L,DBL0H,r8
or r12,r4,r5
tst.eq r12,1
or.ne r12,r12,2
brlt.d r10,0,.Lrsub
mov_s DBL0H,0
b_l .Ladd_dbl1_gt
ENDFUNC(__adddf3)
ENDFUNC(__subdf3)
|
4ms/metamodule-plugin-sdk
| 10,887
|
plugin-libc/libgcc/config/arc/ieee-754/arc600-dsp/divdf3.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
to calculate a := b/x as b*y, with y := 1/x:
- x is in the range [1..2)
- calculate 15..18 bit inverse y0 using a table of approximating polynoms.
Precision is higher for polynoms used to evaluate input with larger
value.
- Do one newton-raphson iteration step to double the precision,
then multiply this with the divisor
-> more time to decide if dividend is subnormal
- the worst error propagation is on the side of the value range
with the least initial defect, thus giving us about 30 bits precision.
The truncation error for the either is less than 1 + x/2 ulp.
A 31 bit inverse can be simply calculated by using x with implicit 1
and chaining the multiplies. For a 32 bit inverse, we multiply y0^2
with the bare fraction part of x, then add in y0^2 for the implicit
1 of x.
- If calculating a 31 bit inverse, the systematic error is less than
-1 ulp; likewise, for 32 bit, it is less than -2 ulp.
- If we calculate our seed with a 32 bit fraction, we can archive a
tentative result strictly better than -2 / +2.5 (1) ulp/128, i.e. we
only need to take the step to calculate the 2nd stage rest and
rounding adjust 1/32th of the time. However, if we use a 20 bit
fraction for the seed, the negative error can exceed -2 ulp/128, (2)
thus for a simple add / tst check, we need to do the 2nd stage
rest calculation/ rounding adjust 1/16th of the time.
(1): The inexactness of the 32 bit inverse contributes an error in the
range of (-1 .. +(1+x/2) ) ulp/128. Leaving out the low word of the
rest contributes an error < +1/x ulp/128 . In the interval [1,2),
x/2 + 1/x <= 1.5 .
(2): Unless proven otherwise. I have not actually looked for an
example where -2 ulp/128 is exceeded, and my calculations indicate
that the excess, if existent, is less than -1/512 ulp.
??? The algorithm is still based on the ARC700 optimized code.
Maybe we could make better use of 32x16 bit multiply, or 64 bit multiply
results.
*/
#include "../arc-ieee-754.h"
#define mlo acc2
#define mhi acc1
#define mul64(b,c) mullw 0,b,c` machlw 0,b,c
#define mulu64(b,c) mululw 0,b,c` machulw 0,b,c
/* N.B. fp-bit.c does double rounding on denormal numbers. */
#if 0 /* DEBUG */
.global __divdf3
FUNC(__divdf3)
.balign 4
__divdf3:
push_s blink
push_s r2
push_s r3
push_s r0
bl.d __divdf3_c
push_s r1
ld_s r2,[sp,12]
ld_s r3,[sp,8]
st_s r0,[sp,12]
st_s r1,[sp,8]
pop_s r1
bl.d __divdf3_asm
pop_s r0
pop_s r3
pop_s r2
pop_s blink
cmp r0,r2
cmp.eq r1,r3
jeq_s [blink]
and r12,DBL0H,DBL1H
bic.f 0,0x7ff80000,r12 ; both NaN -> OK
jeq_s [blink]
bl abort
ENDFUNC(__divdf3)
#define __divdf3 __divdf3_asm
#endif /* DEBUG */
FUNC(__divdf3)
.balign 4
.L7ff00000:
.long 0x7ff00000
.Ldivtab:
.long 0xfc0fffe1
.long 0xf46ffdfb
.long 0xed1ffa54
.long 0xe61ff515
.long 0xdf7fee75
.long 0xd91fe680
.long 0xd2ffdd52
.long 0xcd1fd30c
.long 0xc77fc7cd
.long 0xc21fbbb6
.long 0xbcefaec0
.long 0xb7efa100
.long 0xb32f92bf
.long 0xae8f83b7
.long 0xaa2f7467
.long 0xa5ef6479
.long 0xa1cf53fa
.long 0x9ddf433e
.long 0x9a0f3216
.long 0x965f2091
.long 0x92df0f11
.long 0x8f6efd05
.long 0x8c1eeacc
.long 0x88eed876
.long 0x85dec615
.long 0x82eeb3b9
.long 0x800ea10b
.long 0x7d3e8e0f
.long 0x7a8e7b3f
.long 0x77ee6836
.long 0x756e5576
.long 0x72fe4293
.long 0x709e2f93
.long 0x6e4e1c7f
.long 0x6c0e095e
.long 0x69edf6c5
.long 0x67cde3a5
.long 0x65cdd125
.long 0x63cdbe25
.long 0x61ddab3f
.long 0x600d991f
.long 0x5e3d868c
.long 0x5c6d7384
.long 0x5abd615f
.long 0x590d4ecd
.long 0x576d3c83
.long 0x55dd2a89
.long 0x545d18e9
.long 0x52dd06e9
.long 0x516cf54e
.long 0x4ffce356
.long 0x4e9cd1ce
.long 0x4d3cbfec
.long 0x4becae86
.long 0x4aac9da4
.long 0x496c8c73
.long 0x483c7bd3
.long 0x470c6ae8
.long 0x45dc59af
.long 0x44bc4915
.long 0x43ac3924
.long 0x428c27fb
.long 0x418c187a
.long 0x407c07bd
__divdf3_support: /* This label makes debugger output saner. */
.balign 4
.Ldenorm_dbl1:
brge r6, \
0x43500000,.Linf_NaN ; large number / denorm -> Inf
bmsk.f r12,DBL1H,19
mov.eq r12,DBL1L
mov.eq DBL1L,0
sub.eq r7,r7,32
norm.f r11,r12 ; flag for x/0 -> Inf check
beq_s .Linf_NaN
mov.mi r11,0
add.pl r11,r11,1
add_s r12,r12,r12
asl r8,r12,r11
rsub r12,r11,31
lsr r12,DBL1L,r12
tst_s DBL1H,DBL1H
or r8,r8,r12
lsr r4,r8,26
lsr DBL1H,r8,12
ld.as r4,[r10,r4]
bxor.mi DBL1H,DBL1H,31
sub r11,r11,11
asl DBL1L,DBL1L,r11
sub r11,r11,1
mulu64 (r4,r8)
sub r7,r7,r11
b.d .Lpast_denorm_dbl1
asl r7,r7,20
.Linf_NaN:
tst_s DBL0L,DBL0L ; 0/0 -> NaN
xor_s DBL1H,DBL1H,DBL0H
bclr.eq.f DBL0H,DBL0H,31
bmsk DBL0H,DBL1H,30
xor_s DBL0H,DBL0H,DBL1H
sub.eq DBL0H,DBL0H,1
mov_s DBL0L,0
j_s.d [blink]
or DBL0H,DBL0H,r9
.balign 4
.Lret0_2:
xor_s DBL1H,DBL1H,DBL0H
mov_s DBL0L,0
bmsk DBL0H,DBL1H,30
j_s.d [blink]
xor_s DBL0H,DBL0H,DBL1H
.balign 4
.global __divdf3
/* N.B. the spacing between divtab and the sub3 to get its address must
be a multiple of 8. */
__divdf3:
asl r8,DBL1H,12
lsr r4,r8,26
sub3 r10,pcl,51;(.-.Ldivtab) >> 3
ld.as r9,[pcl,-104]; [pcl,(-((.-.L7ff00000) >> 2))] ; 0x7ff00000
ld.as r4,[r10,r4]
lsr r12,DBL1L,20
and.f r7,DBL1H,r9
or r8,r8,r12
mulu64 (r4,r8)
beq.d .Ldenorm_dbl1
.Lpast_denorm_dbl1:
and.f r6,DBL0H,r9
breq.d r7,r9,.Linf_nan_dbl1
asl r4,r4,12
sub r4,r4,mhi
mululw 0,r4,r4
machulw r5,r4,r4
bne.d .Lnormal_dbl0
lsr r8,r8,1
.balign 4
.Ldenorm_dbl0:
bmsk.f r12,DBL0H,19
; wb stall
mov.eq r12,DBL0L
sub.eq r6,r6,32
norm.f r11,r12 ; flag for 0/x -> 0 check
brge r7, \
0x43500000, .Lret0_2 ; denorm/large number -> 0
beq_s .Lret0_2
mov.mi r11,0
add.pl r11,r11,1
asl r12,r12,r11
sub r6,r6,r11
add.f 0,r6,31
lsr r10,DBL0L,r6
mov.mi r10,0
add r6,r6,11+32
neg.f r11,r6
asl DBL0L,DBL0L,r11
mov.pl DBL0L,0
sub r6,r6,32-1
b.d .Lpast_denorm_dbl0
asl r6,r6,20
.balign 4
.Linf_nan_dbl1: ; 0/Inf -> NaN Inf/Inf -> NaN x/Inf-> 0 x/NaN -> NaN
or.f 0,r6,DBL0L
cmp.ne r6,r9
not_s DBL0L,DBL1H
sub_s.ne DBL0L,DBL0L,DBL0L
tst_s DBL0H,DBL0H
add_s DBL0H,DBL1H,DBL0L
j_s.d [blink]
bxor.mi DBL0H,DBL0H,31
.balign 4
.Lnormal_dbl0:
breq.d r6,r9,.Linf_nan_dbl0
asl r12,DBL0H,11
lsr r10,DBL0L,21
.Lpast_denorm_dbl0:
bset r8,r8,31
mulu64 (r5,r8)
add_s r12,r12,r10
bset r5,r12,31
cmp r5,r8
cmp.eq DBL0L,DBL1L
lsr.cc r5,r5,1
sub r4,r4,mhi ; u1.31 inverse, about 30 bit
mululw 0,r5,r4
machulw r11,r5,r4 ; result fraction highpart
lsr r8,r8,2 ; u3.29
add r5,r6, /* wait for immediate */ \
0x3fe00000
mulu64 (r11,r8) ; u-28.31
asl_s DBL1L,DBL1L,9 ; u-29.23:9
sbc r6,r5,r7
mov r12,mlo ; u-28.31
mulu64 (r11,DBL1L) ; mhi: u-28.23:9
add.cs DBL0L,DBL0L,DBL0L
asl_s DBL0L,DBL0L,6 ; u-26.25:7
asl r10,r11,23
sub_l DBL0L,DBL0L,r12
lsr r7,r11,9
sub r5,DBL0L,mhi ; rest msw ; u-26.31:0
mul64 (r5,r4) ; mhi: result fraction lowpart
xor.f 0,DBL0H,DBL1H
and DBL0H,r6,r9
add_s DBL0H,DBL0H,r7
bclr r12,r9,20 ; 0x7fe00000
brhs.d r6,r12,.Linf_denorm
bxor.mi DBL0H,DBL0H,31
add.f r12,mhi,0x11
asr r9,r12,5
sub.mi DBL0H,DBL0H,1
add.f DBL0L,r9,r10
tst r12,0x1c
jne.d [blink]
add.cs DBL0H,DBL0H,1
/* work out exact rounding if we fall through here. */
/* We know that the exact result cannot be represented in double
precision. Find the mid-point between the two nearest
representable values, multiply with the divisor, and check if
the result is larger than the dividend. Since we want to know
only the sign bit, it is sufficient to calculate only the
highpart of the lower 64 bits. */
mulu64 (r11,DBL1L) ; rest before considering r12 in r5 : -mlo
sub.f DBL0L,DBL0L,1
asl r12,r9,2 ; u-22.30:2
sub.cs DBL0H,DBL0H,1
sub.f r12,r12,2
mov r10,mlo ; rest before considering r12 in r5 : -r10
mululw 0,r12,DBL1L
machulw r7,r12,DBL1L ; mhi: u-51.32
asl r5,r5,25 ; s-51.7:25
lsr r10,r10,7 ; u-51.30:2
mulu64 (r12,r8) ; mlo: u-51.31:1
sub r5,r5,r10
add.mi r5,r5,DBL1L ; signed multiply adjust for r12*DBL1L
bset r7,r7,0 ; make sure that the result is not zero, and that
sub r5,r5,r7 ; a highpart zero appears negative
sub.f r5,r5,mlo ; rest msw
add.pl.f DBL0L,DBL0L,1
j_s.d [blink]
add.eq DBL0H,DBL0H,1
.Linf_nan_dbl0:
tst_s DBL1H,DBL1H
j_s.d [blink]
bxor.mi DBL0H,DBL0H,31
.balign 4
.Linf_denorm:
lsr r12,r6,28
brlo.d r12,0xc,.Linf
.Ldenorm:
asr r6,r6,20
neg r9,r6
mov_s DBL0H,0
brhs.d r9,54,.Lret0
bxor.mi DBL0H,DBL0H,31
add r12,mhi,1
and r12,r12,-4
rsub r7,r6,5
asr r10,r12,28
bmsk r4,r12,27
min r7,r7,31
asr DBL0L,r4,r7
add DBL1H,r11,r10
abs.f r10,r4
sub.mi r10,r10,1
add.f r7,r6,32-5
asl r4,r4,r7
mov.mi r4,r10
add.f r10,r6,23
rsub r7,r6,9
lsr r7,DBL1H,r7
asl r10,DBL1H,r10
or.pnz DBL0H,DBL0H,r7
or.mi r4,r4,r10
mov.mi r10,r7
add.f DBL0L,r10,DBL0L
add.cs.f DBL0H,DBL0H,1 ; carry clear after this point
bxor.f 0,r4,31
add.pnz.f DBL0L,DBL0L,1
add.cs.f DBL0H,DBL0H,1
jne_s [blink]
/* Calculation so far was not conclusive; calculate further rest. */
mulu64 (r11,DBL1L) ; rest before considering r12 in r5 : -mlo
asr.f r12,r12,3
asl r5,r5,25 ; s-51.7:25
mov r11,mlo ; rest before considering r12 in r5 : -r11
mulu64 (r12,r8) ; u-51.31:1
and r9,DBL0L,1 ; tie-breaker: round to even
lsr r11,r11,7 ; u-51.30:2
mov DBL1H,mlo ; u-51.31:1
mulu64 (r12,DBL1L) ; u-51.62:2
sub.mi r11,r11,DBL1L ; signed multiply adjust for r12*DBL1L
add_s DBL1H,DBL1H,r11
sub DBL1H,DBL1H,r5 ; -rest msw
add_s DBL1H,DBL1H,mhi ; -rest msw
add.f 0,DBL1H,DBL1H ; can't ror.f by 32 :-(
tst_s DBL1H,DBL1H
cmp.eq mlo,r9
add.cs.f DBL0L,DBL0L,1
j_s.d [blink]
add.cs DBL0H,DBL0H,1
.Lret0:
/* return +- 0 */
j_s.d [blink]
mov_s DBL0L,0
.Linf:
mov_s DBL0H,r9
mov_s DBL0L,0
j_s.d [blink]
bxor.mi DBL0H,DBL0H,31
ENDFUNC(__divdf3)
|
4ms/metamodule-plugin-sdk
| 3,642
|
plugin-libc/libgcc/config/arc/ieee-754/arc600-dsp/mulsf3.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "../arc-ieee-754.h"
#if 0 /* DEBUG */
.global __mulsf3
FUNC(__mulsf3)
.balign 4
__mulsf3:
push_s blink
push_s r1
bl.d __mulsf3_c
push_s r0
ld_s r1,[sp,4]
st_s r0,[sp,4]
bl.d __mulsf3_asm
pop_s r0
pop_s r1
pop_s blink
cmp r0,r1
jeq_s [blink]
and r12,r0,r1
bic.f 0,0x7f800000,r12
bne 0f
bmsk.f 0,r0,22
bmsk.ne.f r1,r1,22
jne_s [blink] ; both NaN -> OK
0: bl abort
ENDFUNC(__mulsf3)
#define __mulsf3 __mulsf3_asm
#endif /* DEBUG */
.balign 4
.global __mulsf3
FUNC(__mulsf3)
__mulsf3:
ld.as r9,[pcl,80]; [pcl,((.L7f800000-.+2)/4)]
bmsk r4,r1,22
bset r2,r0,23
asl_s r2,r2,8
bset r3,r4,23
and r11,r0,r9
breq.d r11,0,.Ldenorm_dbl0
and r12,r1,r9
breq.d r12,0,.Ldenorm_dbl1
xor_s r0,r0,r1
mululw 0,r2,r3
machulw r6,r2,r3
breq.d r11,r9,.Linf_nan_dbl0
ld.as r4,[pcl,69]; [pcl,((.L7fffffff-.+2)/4)]
breq.d r12,r9,.Linf_nan_dbl1
.Lpast_denorm:
asl.f 0,r6,8
mov r7,acc2
add.pl r6,r6,r6
bclr.pl r6,r6,23
add.pl.f r7,r7,r7
add.cs r6,r6,1
lsr.f 0,r6,1
add_s r12,r12,r11
adc.f 0,r7,r4
add_s r12,r12, \
-0x3f800000
adc.f r8,r6,r12
tst.pl r8,r9
bic r0,r0,r4
min r3,r8,r9
jpnz.d [blink]
add.pnz r0,r0,r3
; infinity or denormal number
add.ne.f r3,r3,r3
asr_s r3,r3,23+1
bset r6,r6,23
bpnz.d .Linfinity
sub_s r3,r3,1
neg_s r2,r3
brhi.d r2,24,.Lret_r0 ; right shift shift > 24 -> return +-0
lsr r2,r6,r2
asl r9,r6,r3
lsr.f 0,r2,1
tst r7,r7
add_s r0,r0,r2
bset.ne r9,r9,0
adc.f 0,r9,r4
j_s.d [blink]
add.cs r0,r0,1
.Linfinity:
j_s.d [blink]
add_s r0,r0,r9
.Lret_r0: j_s [blink]
.balign 4
.Ldenorm_dbl0:
bclr_s r2,r2,31
norm.f r4,r2
add_s r2,r2,r2
asl r2,r2,r4
breq.d r12,r9,.Ldenorm_dbl0_inf_nan_dbl1
asl r4,r4,23
mululw 0,r2,r3
machulw r6,r2,r3
sub.ne.f r12,r12,r4
ld.as r4,[pcl,28]; [pcl,((.L7fffffff-.+2)/4)]
bhi.d .Lpast_denorm
xor_s r0,r0,r1
bmsk r1,r0,30
j_s.d [blink]
bic_s r0,r0,r1
.balign 4
.Ldenorm_dbl0_inf_nan_dbl1:
bmsk.f 0,r0,30
mov.eq r1,-1
.Linf_nan_dbl1:
xor_s r1,r1,r0
.Linf_nan_dbl0:
bclr_s r1,r1,31
j_s.d [blink]
xor_s r0,r0,r1
.balign 4
.Ldenorm_dbl1:
breq.d r11,r9,.Linf_nan_dbl0_2
norm.f r3,r4
sub_s r3,r3,7
asl r4,r4,r3
mululw 0,r2,r4
machulw r6,r2,r4
sub_s r3,r3,1
asl_s r3,r3,23
sub.ne.f r11,r11,r3
ld.as r4,[pcl,11]; [pcl,((.L7fffffff-.+2)/4)]
bhi.d .Lpast_denorm
bmsk r8,r0,30
j_s.d [blink]
bic r0,r0,r8
.balign 4
.Linf_nan_dbl0_2:
bclr_s r1,r1,31
xor_s r0,r0,r1
sub.eq r1,r1,1 ; inf/nan * 0 -> nan
bic.f 0,r9,r1
j_s.d [blink]
or.eq r0,r0,r1 ; r1 nan -> result nan
.balign 4
.L7f800000:
.long 0x7f800000
.L7fffffff:
.long 0x7fffffff
ENDFUNC(__mulsf3)
|
4ms/metamodule-plugin-sdk
| 5,318
|
plugin-libc/libgcc/config/arc/ieee-754/arc600-dsp/muldf3.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "../arc-ieee-754.h"
#if 0 /* DEBUG */
.global __muldf3
.balign 4
__muldf3:
push_s blink
push_s r2
push_s r3
push_s r0
bl.d __muldf3_c
push_s r1
ld_s r2,[sp,12]
ld_s r3,[sp,8]
st_s r0,[sp,12]
st_s r1,[sp,8]
pop_s r1
bl.d __muldf3_asm
pop_s r0
pop_s r3
pop_s r2
pop_s blink
cmp r0,r2
cmp.eq r1,r3
jeq_s [blink]
b abort
#define __muldf3 __muldf3_asm
#endif /* DEBUG */
__muldf3_support: /* This label makes debugger output saner. */
.balign 4
FUNC(__muldf3)
.Ldenorm_2:
breq.d DBL1L,0,.Lret0_2 ; 0 input -> 0 output
norm.f r12,DBL1L
mov.mi r12,21
add.pl r12,r12,22
neg r11,r12
asl_s r12,r12,20
lsr.f DBL1H,DBL1L,r11
ror DBL1L,DBL1L,r11
sub_s DBL0H,DBL0H,r12
mov.eq DBL1H,DBL1L
sub_l DBL1L,DBL1L,DBL1H
/* Fall through. */
.global __muldf3
.balign 4
__muldf3:
mululw 0,DBL0L,DBL1L
machulw r4,DBL0L,DBL1L
ld.as r9,[pcl,0x67] ; ((.L7ff00000-.+2)/4)]
bmsk r6,DBL0H,19
bset r6,r6,20
mov r8,acc2
mululw 0,r4,1
and r11,DBL0H,r9
breq.d r11,0,.Ldenorm_dbl0
and r12,DBL1H,r9
breq.d r12,0,.Ldenorm_dbl1
maclw 0,r6,DBL1L
machulw 0,r6,DBL1L
breq.d r11,r9,.Linf_nan
bmsk r10,DBL1H,19
breq.d r12,r9,.Linf_nan
bset r10,r10,20
maclw 0,r10,DBL0L
machulw r5,r10,DBL0L
add_s r12,r12,r11 ; add exponents
mov r4,acc2
mululw 0,r5,1
maclw 0,r6,r10
machulw r7,r6,r10 ; fraction product in r7:acc2:r4:r8
tst r8,r8
bclr r8,r9,30 ; 0x3ff00000
bset.ne r4,r4,0 ; put least significant word into sticky bit
bclr r6,r9,20 ; 0x7fe00000
lsr.f r10,r7,9
rsub.eq r8,r8,r9 ; 0x40000000
sub r12,r12,r8 ; subtract bias + implicit 1
brhs.d r12,r6,.Linf_denorm
rsub r10,r10,12
.Lshift_frac:
neg r8,r10
asl r6,r4,r10
lsr DBL0L,r4,r8
add.f 0,r6,r6
btst.eq DBL0L,0
cmp.eq r4,r4 ; round to nearest / round to even
asl r4,acc2,r10
lsr r5,acc2,r8
adc.f DBL0L,DBL0L,r4
xor.f 0,DBL0H,DBL1H
asl r7,r7,r10
add_s r12,r12,r5
adc DBL0H,r12,r7
j_s.d [blink]
bset.mi DBL0H,DBL0H,31
/* N.B. This is optimized for ARC700.
ARC600 has very different scheduling / instruction selection criteria. */
/* If one number is denormal, subtract some from the exponent of the other
one (if the other exponent is too small, return 0), and normalize the
denormal. Then re-run the computation. */
.Lret0_2:
lsr_s DBL0H,DBL0H,31
asl_s DBL0H,DBL0H,31
j_s.d [blink]
mov_s DBL0L,0
.balign 4
.Ldenorm_dbl0:
mov_s r12,DBL0L
mov_s DBL0L,DBL1L
mov_s DBL1L,r12
mov_s r12,DBL0H
mov_s DBL0H,DBL1H
mov_s DBL1H,r12
and r11,DBL0H,r9
.Ldenorm_dbl1:
brhs r11,r9,.Linf_nan
brhs 0x3ca00001,r11,.Lret0
sub_s DBL0H,DBL0H,DBL1H
bmsk.f DBL1H,DBL1H,30
add_s DBL0H,DBL0H,DBL1H
beq.d .Ldenorm_2
norm r12,DBL1H
sub_s r12,r12,10
asl r5,r12,20
asl_s DBL1H,DBL1H,r12
sub DBL0H,DBL0H,r5
neg r5,r12
lsr r6,DBL1L,r5
asl_s DBL1L,DBL1L,r12
b.d __muldf3
add_s DBL1H,DBL1H,r6
.Lret0: xor_s DBL0H,DBL0H,DBL1H
bclr DBL1H,DBL0H,31
xor_s DBL0H,DBL0H,DBL1H
j_s.d [blink]
mov_s DBL0L,0
.balign 4
.Linf_nan:
bclr r12,DBL1H,31
xor_s DBL1H,DBL1H,DBL0H
bclr_s DBL0H,DBL0H,31
max r8,DBL0H,r12 ; either NaN -> NaN ; otherwise inf
or.f 0,DBL0H,DBL0L
mov_s DBL0L,0
or.ne.f DBL1L,DBL1L,r12
not_s DBL0H,DBL0L ; inf * 0 -> NaN
mov.ne DBL0H,r8
tst_s DBL1H,DBL1H
j_s.d [blink]
bset.mi DBL0H,DBL0H,31
/* We have checked for infinity / NaN input before, and transformed
denormalized inputs into normalized inputs. Thus, the worst case
exponent overflows are:
1 + 1 - 0x400 == 0xc02 : maximum underflow
0x7fe + 0x7fe - 0x3ff == 0xbfd ; maximum overflow
N.B. 0x7e and 0x7f are also values for overflow.
If (r12 <= -54), we have an underflow to zero. */
.balign 4
.Linf_denorm:
lsr r6,r12,28
brlo.d r6,0xc,.Linf
asr r6,r12,20
add.f r10,r10,r6
brgt.d r10,0,.Lshift_frac
mov_s r12,0
beq.d .Lround_frac
add r10,r10,32
.Lshift32_frac:
tst r4,r4
mov r4,acc2
bset.ne r4,r4,1
mululw 0,r7,1
brge.d r10,1,.Lshift_frac
mov r7,0
breq.d r10,0,.Lround_frac
add r10,r10,32
brgt r10,21,.Lshift32_frac
b_s .Lret0
.Lround_frac:
add.f 0,r4,r4
btst.eq acc2,0
mov_s DBL0L,acc2
mov_s DBL0H,r7
adc.eq.f DBL0L,DBL0L,0
j_s.d [blink]
adc.eq DBL0H,DBL0H,0
.Linf: mov_s DBL0L,0
xor.f DBL1H,DBL1H,DBL0H
mov_s DBL0H,r9
j_s.d [blink]
bset.mi DBL0H,DBL0H,31
ENDFUNC(__muldf3)
.balign 4
.L7ff00000:
.long 0x7ff00000
|
4ms/metamodule-plugin-sdk
| 6,363
|
plugin-libc/libgcc/config/arc/ieee-754/arc600-dsp/divsf3.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
- calculate 15..18 bit inverse using a table of approximating polynoms.
precision is higher for polynoms used to evaluate input with larger
value.
- do one newton-raphson iteration step to double the precision,
then multiply this with the divisor
-> more time to decide if dividend is subnormal
- the worst error propagation is on the side of the value range
with the least initial defect, thus giving us about 30 bits precision.
*/
#include "../arc-ieee-754.h"
#define mlo acc2
#define mhi acc1
#define mul64(b,c) mullw 0,b,c` machlw 0,b,c
#define mulu64(b,c) mululw 0,b,c` machulw 0,b,c
#if 0 /* DEBUG */
.global __divsf3
FUNC(__divsf3)
.balign 4
__divsf3:
push_s blink
push_s r1
bl.d __divsf3_c
push_s r0
ld_s r1,[sp,4]
st_s r0,[sp,4]
bl.d __divsf3_asm
pop_s r0
pop_s r1
pop_s blink
cmp r0,r1
#if 1
bne abort
jeq_s [blink]
b abort
#else
bne abort
j_s [blink]
#endif
ENDFUNC(__divsf3)
#define __divsf3 __divsf3_asm
#endif /* DEBUG */
FUNC(__divsf3)
.balign 4
.Ldivtab:
.long 0xfc0ffff0
.long 0xf46ffefd
.long 0xed1ffd2a
.long 0xe627fa8e
.long 0xdf7ff73b
.long 0xd917f33b
.long 0xd2f7eea3
.long 0xcd1fe986
.long 0xc77fe3e7
.long 0xc21fdddb
.long 0xbcefd760
.long 0xb7f7d08c
.long 0xb32fc960
.long 0xae97c1ea
.long 0xaa27ba26
.long 0xa5e7b22e
.long 0xa1cfa9fe
.long 0x9ddfa1a0
.long 0x9a0f990c
.long 0x9667905d
.long 0x92df878a
.long 0x8f6f7e84
.long 0x8c27757e
.long 0x88f76c54
.long 0x85df630c
.long 0x82e759c5
.long 0x8007506d
.long 0x7d3f470a
.long 0x7a8f3da2
.long 0x77ef341e
.long 0x756f2abe
.long 0x72f7212d
.long 0x709717ad
.long 0x6e4f0e44
.long 0x6c1704d6
.long 0x69e6fb44
.long 0x67cef1d7
.long 0x65c6e872
.long 0x63cedf18
.long 0x61e6d5cd
.long 0x6006cc6d
.long 0x5e36c323
.long 0x5c76b9f3
.long 0x5abeb0b7
.long 0x5916a79b
.long 0x57769e77
.long 0x55de954d
.long 0x54568c4e
.long 0x52d6834d
.long 0x51667a7f
.long 0x4ffe71b5
.long 0x4e9e68f1
.long 0x4d466035
.long 0x4bf65784
.long 0x4aae4ede
.long 0x496e4646
.long 0x48363dbd
.long 0x47063547
.long 0x45de2ce5
.long 0x44be2498
.long 0x43a61c64
.long 0x4296144a
.long 0x41860c0e
.long 0x407e03ee
.L7f800000:
.long 0x7f800000
.balign 4
.global __divsf3_support
__divsf3_support:
.Linf_NaN:
bclr.f 0,r0,31 ; 0/0 -> NaN
xor_s r0,r0,r1
bmsk r1,r0,30
bic_s r0,r0,r1
sub.eq r0,r0,1
j_s.d [blink]
or r0,r0,r9
.Lret0:
xor_s r0,r0,r1
bmsk r1,r0,30
j_s.d [blink]
bic_s r0,r0,r1
/* N.B. the spacing between divtab and the sub3 to get its address must
be a multiple of 8. */
__divsf3:
ld.as r9,[pcl,-9]; [pcl,(-((.-.L7f800000) >> 2))] ; 0x7f800000
sub3 r3,pcl,37;(.-.Ldivtab) >> 3
lsr r2,r1,17
and.f r11,r1,r9
bmsk r5,r2,5
beq.d .Ldenorm_fp1
asl r6,r1,8
and.f r2,r0,r9
ld.as r5,[r3,r5]
asl r4,r1,9
bset r6,r6,31
breq.d r11,r9,.Linf_nan_fp1
.Lpast_denorm_fp1:
mululw 0,r5,r4
machulw r8,r5,r4
breq.d r2,r9,.Linf_nan_fp0
asl r5,r5,13
sub r7,r5,r8
mululw 0,r7,r6
machulw r8,r7,r6
beq.d .Ldenorm_fp0
asl r12,r0,8
mulu64 (r8,r7)
bset r3,r12,31
.Lpast_denorm_fp0:
cmp_s r3,r6
lsr.cc r3,r3,1
add_s r2,r2, /* wait for immediate */ \
0x3f000000
sub r7,r7,mhi ; u1.31 inverse, about 30 bit
mulu64 (r3,r7)
sbc r2,r2,r11
xor.f 0,r0,r1
and r0,r2,r9
bclr r3,r9,23 ; 0x7f000000
brhs.d r2,r3,.Linf_denorm
bxor.mi r0,r0,31
.Lpast_denorm:
add r3,mhi,0x22 ; round to nearest or higher
tst r3,0x3c ; check if rounding was unsafe
lsr r3,r3,6
jne.d [blink] ; return if rounding was safe.
add_s r0,r0,r3
/* work out exact rounding if we fall through here. */
/* We know that the exact result cannot be represented in single
precision. Find the mid-point between the two nearest
representable values, multiply with the divisor, and check if
the result is larger than the dividend. */
add_s r3,r3,r3
sub_s r3,r3,1
mulu64 (r3,r6)
asr.f 0,r0,1 ; for round-to-even in case this is a denorm
rsub r2,r9,25
asl_s r12,r12,r2
sub.f 0,r12,mlo
j_s.d [blink]
sub.mi r0,r0,1
.Linf_nan_fp1:
lsr_s r0,r0,31
bmsk.f 0,r1,22
asl_s r0,r0,31
bne_s 0f ; inf/inf -> nan
brne r2,r9,.Lsigned0 ; x/inf -> 0, but x/nan -> nan
0: j_s.d [blink]
mov r0,-1
.Lsigned0:
.Linf_nan_fp0:
tst_s r1,r1
j_s.d [blink]
bxor.mi r0,r0,31
.balign 4
.global __divsf3
/* For denormal results, it is possible that an exact result needs
rounding, and thus the round-to-even rule has to come into play. */
.Linf_denorm:
brlo r2,0xc0000000,.Linf
.Ldenorm:
asr_s r2,r2,23
bic r0,r0,r9
neg r9,r2
brlo.d r9,25,.Lpast_denorm
lsr r3,mlo,r9
/* Fall through: return +- 0 */
j_s [blink]
.Linf:
j_s.d [blink]
or r0,r0,r9
.balign 4
.Ldenorm_fp1:
norm.f r12,r6 ; flag for x/0 -> Inf check
add r6,r6,r6
rsub r5,r12,16
ror r5,r1,r5
bmsk r5,r5,5
bic.ne.f 0, \
0x60000000,r0 ; large number / denorm -> Inf
ld.as r5,[r3,r5]
asl r6,r6,r12
beq.d .Linf_NaN
and.f r2,r0,r9
add r4,r6,r6
asl_s r12,r12,23
bne.d .Lpast_denorm_fp1
add_s r2,r2,r12
.Ldenorm_fp0:
mulu64 (r8,r7)
bclr r12,r12,31
norm.f r3,r12 ; flag for 0/x -> 0 check
bic.ne.f 0,0x60000000,r1 ; denorm/large number -> 0
beq_s .Lret0
asl_s r12,r12,r3
asl_s r3,r3,23
add_s r12,r12,r12
add r11,r11,r3
b.d .Lpast_denorm_fp0
mov_s r3,r12
ENDFUNC(__divsf3)
|
4ms/metamodule-plugin-sdk
| 3,744
|
plugin-libc/libgcc/config/arc/ieee-754/arc600/mulsf3.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "../arc-ieee-754.h"
#if 0 /* DEBUG */
.global __mulsf3
FUNC(__mulsf3)
.balign 4
__mulsf3:
push_s blink
push_s r1
bl.d __mulsf3_c
push_s r0
ld_s r1,[sp,4]
st_s r0,[sp,4]
bl.d __mulsf3_asm
pop_s r0
pop_s r1
pop_s blink
cmp r0,r1
jeq_s [blink]
and r12,r0,r1
bic.f 0,0x7f800000,r12
bne 0f
bmsk.f 0,r0,22
bmsk.ne.f r1,r1,22
jne_s [blink] ; both NaN -> OK
0: bl abort
ENDFUNC(__mulsf3)
#define __mulsf3 __mulsf3_asm
#endif /* DEBUG */
.balign 4
.global __mulsf3
FUNC(__mulsf3)
__mulsf3:
ld.as r9,[pcl,76]; [pcl,((.L7f800000-.+2)/4)]
bmsk r4,r1,22
bset r3,r4,23
bmsk r2,r0,22
and r11,r0,r9
breq.d r11,0,.Ldenorm_dbl0
and r12,r1,r9
xor_s r0,r0,r1
breq.d r11,r9,.Linf_nan_dbl0
bset_s r2,r2,23
breq r12,0,.Ldenorm_dbl1
breq r12,r9,.Linf_nan_dbl1
.Lpast_denorm:
mov r6,0
lsr.f r7,r2
; We could so this a bit faster here with a 32 bit shift register and
; inserting the r2 factor / retrieving the low result a byte at a time,
; but that'd increase code size.
mov lp_count,24
.balign 4
lp 0f
add.cs r6,r6,r3
lsr.f r6,r6
rrc.f r7,r7
0:
ld.as r4,[pcl,59]; [pcl,((.L7fffffff-.+2)/4)]
asl.f 0,r6,8
add.pl r6,r6,r6
bclr.pl r6,r6,23
add.pl.f r7,r7,r7
add.cs r6,r6,1
lsr.f 0,r6,1
add_s r12,r12,r11
adc.f 0,r7,r4
add_s r12,r12, \
-0x3f800000
adc.f r8,r6,r12
tst.pl r8,r9
bic r0,r0,r4
min r3,r8,r9
jpnz.d [blink]
add.pnz r0,r0,r3
; infinity or denormal number
add.ne.f r3,r3,r3
asr_s r3,r3,23+1
bset r6,r6,23
bpnz.d .Linfinity
sub_s r3,r3,1
neg_s r2,r3
brhi.d r2,24,.Lret_r0 ; right shift shift > 24 -> return +-0
lsr r2,r6,r2
asl r9,r6,r3
lsr.f 0,r2,1
tst r7,r7
add_s r0,r0,r2
bset.ne r9,r9,0
adc.f 0,r9,r4
j_s.d [blink]
add.cs r0,r0,1
.Linfinity:
j_s.d [blink]
add_s r0,r0,r9
.Lret_r0: j_s [blink]
.balign 4
.Ldenorm_dbl0:
asl_s r2,r2,8
norm.f r4,r2
lsr_s r2,r2,7
asl r2,r2,r4
breq.d r12,r9,.Ldenorm_dbl0_inf_nan_dbl1
asl r4,r4,23
sub.ne.f r12,r12,r4
bhi.d .Lpast_denorm
xor_s r0,r0,r1
bmsk r1,r0,30
j_s.d [blink]
bic_s r0,r0,r1
.balign 4
.Ldenorm_dbl0_inf_nan_dbl1:
bmsk.f 0,r0,30
beq_s .Lretnan
xor_s r0,r0,r1
.Linf_nan_dbl1:
xor_s r1,r1,r0
bclr_s r1,r1,31
j_s.d [blink]
xor_s r0,r0,r1
.Linf_nan_dbl0:
sub_s r2,r1,1 ; inf/nan * 0 -> nan; inf * nan -> nan (use |r2| >= inf)
bic.f 0,r9,r2
xor_s r0,r0,r1
bclr_s r1,r1,31
xor_s r0,r0,r1
jne_s [blink]
.Lretnan:
j_s.d [blink]
mov r0,-1
.balign 4
.Ldenorm_dbl1:
norm.f r3,r4
sub_s r3,r3,7
asl r4,r4,r3
sub_s r3,r3,1
asl_s r3,r3,23
sub.ne.f r11,r11,r3
bhi.d .Lpast_denorm
mov_s r3,r4
bmsk r3,r0,30
j_s.d [blink]
bic_s r0,r0,r3
.balign 4
.L7f800000:
.long 0x7f800000
.L7fffffff:
.long 0x7fffffff
ENDFUNC(__mulsf3)
|
4ms/metamodule-plugin-sdk
| 5,136
|
plugin-libc/libgcc/config/arc/ieee-754/arc600/divsf3.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "../arc-ieee-754.h"
#if 0 /* DEBUG */
.global __divsf3
FUNC(__divsf3)
.balign 4
__divsf3:
push_s blink
push_s r1
bl.d __divsf3_c
push_s r0
ld_s r1,[sp,4]
st_s r0,[sp,4]
bl.d __divsf3_asm
pop_s r0
pop_s r1
pop_s blink
cmp r0,r1
jeq_s [blink]
and r12,r0,r1
bic.f 0,0x7f800000,r12 ; both NaN -> OK
jeq_s [blink]
bl abort
ENDFUNC(__divsf3)
#define __divsf3 __divsf3_asm
#endif /* DEBUG */
.balign 4
__divdf3_support: /* This label makes debugger output saner. */
FUNC(__divsf3)
.Ldenorm_fp0:
norm.f r12,r2 ; flag for 0/x -> 0 check
bic.ne.f 0,0x60000000,r1 ; denorm/large number -> 0
beq_s .Lret0_NaN
tst r1,r9
add_s r2,r2,r2
sub_s r12,r12,8
asl_s r2,r2,r12
asl_l r12,r12,23
bne.d .Lpast_denorm_fp0
add r5,r5,r12
/* r0 is subnormal, r1 is subnormal or 0. */
.balign 4
.Ldenorm_fp1:
norm.f r12,r3 ; flag for x/0 -> Inf check
bic.ne.f 0,0x60000000,r0 ; large number/denorm -> Inf
beq_s .Linf
add_s r3,r3,r3
sub_s r12,r12,8
asl_s r3,r3,r12
asl_s r12,r12,23
b.d .Lpast_denorm_fp1
add r4,r4,r12
.Lret0_NaN:
bclr.f 0,r1,31 ; 0/0 -> NaN
bic r0,r10,r9
j_s.d [blink]
sub.eq r0,r0,1
.balign 4
.Linf_nan_fp0:
bic.f 0,r9,r1 ; fp1 Inf -> result NaN
bic r1,r5,r9 ; fp1 sign
sub.eq r1,r1,1
j_s.d [blink]
xor_s r0,r0,r1
.Linf_nan_fp1:
bic r0,r4,r9 ; fp0 sign
bmsk.f 0,r1,22 ; x/inf -> 0, x/nan -> nan
xor.eq r1,r1,r9
j_s.d [blink]
xor_s r0,r0,r1
.global __divsf3
.balign 4
.long 0x7f800000 ; exponent mask
__divsf3:
ld r9,[pcl,-4]
bmsk r2,r0,22
xor r4,r0,r2
bmsk r3,r1,22
xor r5,r1,r3
and r11,r0,r9
breq.d r11,0,.Ldenorm_fp0
xor r10,r4,r5
breq r11,r9,.Linf_nan_fp0
bset_s r2,r2,23
and r11,r1,r9
breq r11,0,.Ldenorm_fp1
breq r11,r9,.Linf_nan_fp1
.Lpast_denorm_fp0:
bset_s r3,r3,23
.Lpast_denorm_fp1:
cmp r2,r3
asl_s r2,r2,6+1
asl_s r3,r3,7
add.lo r2,r2,r2
bclr r8,r9,30 ; exponent bias
bclr.lo r8,r8,23 ; reduce exp by one if fraction is shifted
sub r4,r4,r5
add r4,r4,r8
xor.f 0,r10,r4
bmi .Linf_denorm
and.f r12,r4,r9
beq .Ldenorm
sub_s r2,r2,r3 ; discard implicit 1
rsub r3,r3,1 ; prime r3 for two-insn divide-step use
.Ldiv_23bit:
.rep 6
add1.f r2,r3,r2
sub.cc r2,r2,r3
.endr
breq r12,r9,.Linf
bmsk r0,r2,6
xor_s r2,r2,r0
.Ldiv_17bit:
.rep 7
add1.f r2,r3,r2
sub.cc r2,r2,r3
.endr
asl_s r0,r0,7
bmsk r1,r2,6
xor_s r2,r2,r1
or_s r0,r0,r1
.Ldiv_10bit:
.rep 7
add1.f r2,r3,r2
sub.cc r2,r2,r3
.endr
asl_s r0,r0,7
bmsk r1,r2,6
xor_s r2,r2,r1
or_s r0,r0,r1
.Ldiv_3bit:
.rep 3
add1.f r2,r3,r2
sub.cc r2,r2,r3
.endr
asl_s r0,r0,3
.Ldiv_0bit:
add1.f r1,r3,r2
sub.cc r1,r1,r3
bmsk_s r2,r2,2
tst r1,-0x7e ; 0xffffff82, test for rest or odd
bmsk_s r1,r1,0
add_s r0,r0,r2 ; assemble fraction
add_s r0,r0,r4 ; add in sign & exponent
j_s.d [blink]
add.ne r0,r0,r1 ; round to nearest / even
.balign 4
.Linf:
j_s.d [blink]
or r0,r10,r9
.Lret_r4:
j_s.d [blink]
mov_s r0,r4
.balign 4
.Linf_denorm:
add.f r12,r4,r4
asr_l r12,r12,24
bpl .Linf
max r12,r12,-24
.Ldenorm:
rsub r3,r3,1
add r1,pcl,68; .Ldenorm_tab-.
ldw.as r12,[r1,r12]
mov_s r0,0
lsr_s r2,r2
sub_s r1,r1,r12
j_s.d [r1]
bic r4,r10,r9
.short .Ldenorm_tab-.Lret_r4
.short .Ldenorm_tab-.Ldiv_0bit
.short .Ldenorm_tab-.Ldiv_3bit-2*8
.short .Ldenorm_tab-.Ldiv_3bit-1*8
.short .Ldenorm_tab-.Ldiv_3bit
.short .Ldenorm_tab-.Ldiv_10bit-6*8
.short .Ldenorm_tab-.Ldiv_10bit-5*8
.short .Ldenorm_tab-.Ldiv_10bit-3*8
.short .Ldenorm_tab-.Ldiv_10bit-3*8
.short .Ldenorm_tab-.Ldiv_10bit-2*8
.short .Ldenorm_tab-.Ldiv_10bit-1*8
.short .Ldenorm_tab-.Ldiv_10bit
.short .Ldenorm_tab-.Ldiv_17bit-6*8
.short .Ldenorm_tab-.Ldiv_17bit-5*8
.short .Ldenorm_tab-.Ldiv_17bit-4*8
.short .Ldenorm_tab-.Ldiv_17bit-3*8
.short .Ldenorm_tab-.Ldiv_17bit-2*8
.short .Ldenorm_tab-.Ldiv_17bit-1*8
.short .Ldenorm_tab-.Ldiv_17bit
.short .Ldenorm_tab-.Ldiv_23bit-5*8
.short .Ldenorm_tab-.Ldiv_23bit-4*8
.short .Ldenorm_tab-.Ldiv_23bit-3*8
.short .Ldenorm_tab-.Ldiv_23bit-2*8
.short .Ldenorm_tab-.Ldiv_23bit-1*8
.Ldenorm_tab:
.short .Ldenorm_tab-.Ldiv_23bit
ENDFUNC(__divsf3)
|
4ms/metamodule-plugin-sdk
| 10,684
|
plugin-libc/libgcc/config/arc/ieee-754/arc600-mul64/divdf3.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
to calculate a := b/x as b*y, with y := 1/x:
- x is in the range [1..2)
- calculate 15..18 bit inverse y0 using a table of approximating polynoms.
Precision is higher for polynoms used to evaluate input with larger
value.
- Do one newton-raphson iteration step to double the precision,
then multiply this with the divisor
-> more time to decide if dividend is subnormal
- the worst error propagation is on the side of the value range
with the least initial defect, thus giving us about 30 bits precision.
The truncation error for the either is less than 1 + x/2 ulp.
A 31 bit inverse can be simply calculated by using x with implicit 1
and chaining the multiplies. For a 32 bit inverse, we multiply y0^2
with the bare fraction part of x, then add in y0^2 for the implicit
1 of x.
- If calculating a 31 bit inverse, the systematic error is less than
-1 ulp; likewise, for 32 bit, it is less than -2 ulp.
- If we calculate our seed with a 32 bit fraction, we can archive a
tentative result strictly better than -2 / +2.5 (1) ulp/128, i.e. we
only need to take the step to calculate the 2nd stage rest and
rounding adjust 1/32th of the time. However, if we use a 20 bit
fraction for the seed, the negative error can exceed -2 ulp/128, (2)
thus for a simple add / tst check, we need to do the 2nd stage
rest calculation/ rounding adjust 1/16th of the time.
(1): The inexactness of the 32 bit inverse contributes an error in the
range of (-1 .. +(1+x/2) ) ulp/128. Leaving out the low word of the
rest contributes an error < +1/x ulp/128 . In the interval [1,2),
x/2 + 1/x <= 1.5 .
(2): Unless proven otherwise. I have not actually looked for an
example where -2 ulp/128 is exceeded, and my calculations indicate
that the excess, if existent, is less than -1/512 ulp.
??? The algorithm is still based on the ARC700 optimized code.
Maybe we could make better use of 64 bit multiply results and/or mmed .
*/
#include "../arc-ieee-754.h"
/* N.B. fp-bit.c does double rounding on denormal numbers. */
#if 0 /* DEBUG */
.global __divdf3
FUNC(__divdf3)
.balign 4
__divdf3:
push_s blink
push_s r2
push_s r3
push_s r0
bl.d __divdf3_c
push_s r1
ld_s r2,[sp,12]
ld_s r3,[sp,8]
st_s r0,[sp,12]
st_s r1,[sp,8]
pop_s r1
bl.d __divdf3_asm
pop_s r0
pop_s r3
pop_s r2
pop_s blink
cmp r0,r2
cmp.eq r1,r3
jeq_s [blink]
and r12,DBL0H,DBL1H
bic.f 0,0x7ff80000,r12 ; both NaN -> OK
jeq_s [blink]
bl abort
ENDFUNC(__divdf3)
#define __divdf3 __divdf3_asm
#endif /* DEBUG */
FUNC(__divdf3)
.balign 4
.L7ff00000:
.long 0x7ff00000
.Ldivtab:
.long 0xfc0fffe1
.long 0xf46ffdfb
.long 0xed1ffa54
.long 0xe61ff515
.long 0xdf7fee75
.long 0xd91fe680
.long 0xd2ffdd52
.long 0xcd1fd30c
.long 0xc77fc7cd
.long 0xc21fbbb6
.long 0xbcefaec0
.long 0xb7efa100
.long 0xb32f92bf
.long 0xae8f83b7
.long 0xaa2f7467
.long 0xa5ef6479
.long 0xa1cf53fa
.long 0x9ddf433e
.long 0x9a0f3216
.long 0x965f2091
.long 0x92df0f11
.long 0x8f6efd05
.long 0x8c1eeacc
.long 0x88eed876
.long 0x85dec615
.long 0x82eeb3b9
.long 0x800ea10b
.long 0x7d3e8e0f
.long 0x7a8e7b3f
.long 0x77ee6836
.long 0x756e5576
.long 0x72fe4293
.long 0x709e2f93
.long 0x6e4e1c7f
.long 0x6c0e095e
.long 0x69edf6c5
.long 0x67cde3a5
.long 0x65cdd125
.long 0x63cdbe25
.long 0x61ddab3f
.long 0x600d991f
.long 0x5e3d868c
.long 0x5c6d7384
.long 0x5abd615f
.long 0x590d4ecd
.long 0x576d3c83
.long 0x55dd2a89
.long 0x545d18e9
.long 0x52dd06e9
.long 0x516cf54e
.long 0x4ffce356
.long 0x4e9cd1ce
.long 0x4d3cbfec
.long 0x4becae86
.long 0x4aac9da4
.long 0x496c8c73
.long 0x483c7bd3
.long 0x470c6ae8
.long 0x45dc59af
.long 0x44bc4915
.long 0x43ac3924
.long 0x428c27fb
.long 0x418c187a
.long 0x407c07bd
__divdf3_support: /* This label makes debugger output saner. */
.balign 4
.Ldenorm_dbl1:
brge r6, \
0x43500000,.Linf_NaN ; large number / denorm -> Inf
bmsk.f r12,DBL1H,19
mov.eq r12,DBL1L
mov.eq DBL1L,0
sub.eq r7,r7,32
norm.f r11,r12 ; flag for x/0 -> Inf check
beq_s .Linf_NaN
mov.mi r11,0
add.pl r11,r11,1
add_s r12,r12,r12
asl r8,r12,r11
rsub r12,r11,31
lsr r12,DBL1L,r12
tst_s DBL1H,DBL1H
or r8,r8,r12
lsr r4,r8,26
lsr DBL1H,r8,12
ld.as r4,[r10,r4]
bxor.mi DBL1H,DBL1H,31
sub r11,r11,11
asl DBL1L,DBL1L,r11
sub r11,r11,1
mulu64 r4,r8
sub r7,r7,r11
b.d .Lpast_denorm_dbl1
asl r7,r7,20
.balign 4
.Ldenorm_dbl0:
bmsk.f r12,DBL0H,19
; wb stall
mov.eq r12,DBL0L
sub.eq r6,r6,32
norm.f r11,r12 ; flag for 0/x -> 0 check
brge r7, \
0x43500000, .Lret0_2 ; denorm/large number -> 0
beq_s .Lret0_2
mov.mi r11,0
add.pl r11,r11,1
asl r12,r12,r11
sub r6,r6,r11
add.f 0,r6,31
lsr r10,DBL0L,r6
mov.mi r10,0
add r6,r6,11+32
neg.f r11,r6
asl DBL0L,DBL0L,r11
mov.pl DBL0L,0
sub r6,r6,32-1
b.d .Lpast_denorm_dbl0
asl r6,r6,20
.Linf_NaN:
tst_s DBL0L,DBL0L ; 0/0 -> NaN
xor_s DBL1H,DBL1H,DBL0H
bclr.eq.f DBL0H,DBL0H,31
bmsk DBL0H,DBL1H,30
xor_s DBL0H,DBL0H,DBL1H
sub.eq DBL0H,DBL0H,1
mov_s DBL0L,0
j_s.d [blink]
or DBL0H,DBL0H,r9
.balign 4
.Lret0_2:
xor_s DBL1H,DBL1H,DBL0H
mov_s DBL0L,0
bmsk DBL0H,DBL1H,30
j_s.d [blink]
xor_s DBL0H,DBL0H,DBL1H
.balign 4
.global __divdf3
/* N.B. the spacing between divtab and the sub3 to get its address must
be a multiple of 8. */
__divdf3:
asl r8,DBL1H,12
lsr r4,r8,26
sub3 r10,pcl,61; (.-.Ldivtab) >> 3
ld.as r9,[pcl,-124]; [pcl,(-((.-.L7ff00000) >> 2))] ; 0x7ff00000
ld.as r4,[r10,r4]
lsr r12,DBL1L,20
and.f r7,DBL1H,r9
or r8,r8,r12
mulu64 r4,r8
beq.d .Ldenorm_dbl1
.Lpast_denorm_dbl1:
and.f r6,DBL0H,r9
breq.d r7,r9,.Linf_nan_dbl1
asl r4,r4,12
sub r4,r4,mhi
mulu64 r4,r4
beq.d .Ldenorm_dbl0
lsr r8,r8,1
breq.d r6,r9,.Linf_nan_dbl0
asl r12,DBL0H,11
lsr r10,DBL0L,21
.Lpast_denorm_dbl0:
bset r8,r8,31
mulu64 mhi,r8
add_s r12,r12,r10
bset r5,r12,31
cmp r5,r8
cmp.eq DBL0L,DBL1L
lsr.cc r5,r5,1
sub r4,r4,mhi ; u1.31 inverse, about 30 bit
mulu64 r5,r4 ; result fraction highpart
lsr r8,r8,2 ; u3.29
add r5,r6, /* wait for immediate */ \
0x3fe00000
mov r11,mhi ; result fraction highpart
mulu64 r11,r8 ; u-28.31
asl_s DBL1L,DBL1L,9 ; u-29.23:9
sbc r6,r5,r7
mov r12,mlo ; u-28.31
mulu64 r11,DBL1L ; mhi: u-28.23:9
add.cs DBL0L,DBL0L,DBL0L
asl_s DBL0L,DBL0L,6 ; u-26.25:7
asl r10,r11,23
sub_l DBL0L,DBL0L,r12
lsr r7,r11,9
sub r5,DBL0L,mhi ; rest msw ; u-26.31:0
mul64 r5,r4 ; mhi: result fraction lowpart
xor.f 0,DBL0H,DBL1H
and DBL0H,r6,r9
add_s DBL0H,DBL0H,r7
bclr r12,r9,20 ; 0x7fe00000
brhs.d r6,r12,.Linf_denorm
bxor.mi DBL0H,DBL0H,31
add.f r12,mhi,0x11
asr r9,r12,5
sub.mi DBL0H,DBL0H,1
add.f DBL0L,r9,r10
tst r12,0x1c
jne.d [blink]
add.cs DBL0H,DBL0H,1
/* work out exact rounding if we fall through here. */
/* We know that the exact result cannot be represented in double
precision. Find the mid-point between the two nearest
representable values, multiply with the divisor, and check if
the result is larger than the dividend. Since we want to know
only the sign bit, it is sufficient to calculate only the
highpart of the lower 64 bits. */
mulu64 r11,DBL1L ; rest before considering r12 in r5 : -mlo
sub.f DBL0L,DBL0L,1
asl r12,r9,2 ; u-22.30:2
sub.cs DBL0H,DBL0H,1
sub.f r12,r12,2
mov r10,mlo ; rest before considering r12 in r5 : -r10
mulu64 r12,DBL1L ; mhi: u-51.32
asl r5,r5,25 ; s-51.7:25
lsr r10,r10,7 ; u-51.30:2
mov r7,mhi ; u-51.32
mulu64 r12,r8 ; mlo: u-51.31:1
sub r5,r5,r10
add.mi r5,r5,DBL1L ; signed multiply adjust for r12*DBL1L
bset r7,r7,0 ; make sure that the result is not zero, and that
sub r5,r5,r7 ; a highpart zero appears negative
sub.f r5,r5,mlo ; rest msw
add.pl.f DBL0L,DBL0L,1
j_s.d [blink]
add.eq DBL0H,DBL0H,1
.Linf_nan_dbl1: ; 0/Inf -> NaN Inf/Inf -> NaN x/Inf-> 0 x/NaN -> NaN
or.f 0,r6,DBL0L
cmp.ne r6,r9
not_s DBL0L,DBL1H
sub_s.ne DBL0L,DBL0L,DBL0L
tst_s DBL0H,DBL0H
add_s DBL0H,DBL1H,DBL0L
j_s.d [blink]
bxor.mi DBL0H,DBL0H,31
.Linf_nan_dbl0:
tst_s DBL1H,DBL1H
j_s.d [blink]
bxor.mi DBL0H,DBL0H,31
.balign 4
.Linf_denorm:
lsr r12,r6,28
brlo.d r12,0xc,.Linf
.Ldenorm:
asr r6,r6,20
neg r9,r6
mov_s DBL0H,0
brhs.d r9,54,.Lret0
bxor.mi DBL0H,DBL0H,31
add r12,mhi,1
and r12,r12,-4
rsub r7,r6,5
asr r10,r12,28
bmsk r4,r12,27
min r7,r7,31
asr DBL0L,r4,r7
add DBL1H,r11,r10
abs.f r10,r4
sub.mi r10,r10,1
add.f r7,r6,32-5
asl r4,r4,r7
mov.mi r4,r10
add.f r10,r6,23
rsub r7,r6,9
lsr r7,DBL1H,r7
asl r10,DBL1H,r10
or.pnz DBL0H,DBL0H,r7
or.mi r4,r4,r10
mov.mi r10,r7
add.f DBL0L,r10,DBL0L
add.cs.f DBL0H,DBL0H,1 ; carry clear after this point
bxor.f 0,r4,31
add.pnz.f DBL0L,DBL0L,1
add.cs.f DBL0H,DBL0H,1
jne_s [blink]
/* Calculation so far was not conclusive; calculate further rest. */
mulu64 r11,DBL1L ; rest before considering r12 in r5 : -mlo
asr.f r12,r12,3
asl r5,r5,25 ; s-51.7:25
mov r11,mlo ; rest before considering r12 in r5 : -r11
mulu64 r12,r8 ; u-51.31:1
and r9,DBL0L,1 ; tie-breaker: round to even
lsr r11,r11,7 ; u-51.30:2
mov DBL1H,mlo ; u-51.31:1
mulu64 r12,DBL1L ; u-51.62:2
sub.mi r11,r11,DBL1L ; signed multiply adjust for r12*DBL1L
add_s DBL1H,DBL1H,r11
sub DBL1H,DBL1H,r5 ; -rest msw
add_s DBL1H,DBL1H,mhi ; -rest msw
add.f 0,DBL1H,DBL1H ; can't ror.f by 32 :-(
tst_s DBL1H,DBL1H
cmp.eq mlo,r9
add.cs.f DBL0L,DBL0L,1
j_s.d [blink]
add.cs DBL0H,DBL0H,1
.Lret0:
/* return +- 0 */
j_s.d [blink]
mov_s DBL0L,0
.Linf:
mov_s DBL0H,r9
mov_s DBL0L,0
j_s.d [blink]
bxor.mi DBL0H,DBL0H,31
ENDFUNC(__divdf3)
|
4ms/metamodule-plugin-sdk
| 3,684
|
plugin-libc/libgcc/config/arc/ieee-754/arc600-mul64/mulsf3.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "../arc-ieee-754.h"
#if 0 /* DEBUG */
.global __mulsf3
FUNC(__mulsf3)
.balign 4
__mulsf3:
push_s blink
push_s r1
bl.d __mulsf3_c
push_s r0
ld_s r1,[sp,4]
st_s r0,[sp,4]
bl.d __mulsf3_asm
pop_s r0
pop_s r1
pop_s blink
cmp r0,r1
jeq_s [blink]
and r12,r0,r1
bic.f 0,0x7f800000,r12
bne 0f
bmsk.f 0,r0,22
bmsk.ne.f r1,r1,22
jne_s [blink] ; both NaN -> OK
0: bl abort
ENDFUNC(__mulsf3)
#define __mulsf3 __mulsf3_asm
#endif /* DEBUG */
.balign 4
.global __mulsf3
FUNC(__mulsf3)
__mulsf3:
ld.as r9,[pcl,80]; [pcl,((.L7f800000-.+2)/4)]
bmsk r4,r1,22
bset r2,r0,23
asl_s r2,r2,8
bset r3,r4,23
mulu64 r2,r3
and r11,r0,r9
breq.d r11,0,.Ldenorm_dbl0
and r12,r1,r9
breq.d r12,0,.Ldenorm_dbl1
xor_s r0,r0,r1
breq.d r11,r9,.Linf_nan_dbl0
ld.as r4,[pcl,70]; [pcl,((.L7fffffff-.+2)/4)]
breq.d r12,r9,.Linf_nan_dbl1
.Lpast_denorm:
asl.f 0,mhi,8
mov r6,mhi
mov r7,mlo
add.pl r6,r6,r6
bclr.pl r6,r6,23
add.pl.f r7,r7,r7
add.cs r6,r6,1
lsr.f 0,r6,1
add_s r12,r12,r11
adc.f 0,r7,r4
add_s r12,r12, \
-0x3f800000
adc.f r8,r6,r12
tst.pl r8,r9
bic r0,r0,r4
min r3,r8,r9
jpnz.d [blink]
add.pnz r0,r0,r3
; infinity or denormal number
add.ne.f r3,r3,r3
asr_s r3,r3,23+1
bset r6,r6,23
bpnz.d .Linfinity
sub_s r3,r3,1
neg_s r2,r3
brhi.d r2,24,.Lret_r0 ; right shift shift > 24 -> return +-0
lsr r2,r6,r2
asl r9,r6,r3
lsr.f 0,r2,1
tst r7,r7
add_s r0,r0,r2
bset.ne r9,r9,0
adc.f 0,r9,r4
j_s.d [blink]
add.cs r0,r0,1
.Linfinity:
j_s.d [blink]
add_s r0,r0,r9
.Lret_r0: j_s [blink]
.balign 4
.Ldenorm_dbl0:
bclr_s r2,r2,31
norm.f r4,r2
add_s r2,r2,r2
asl r2,r2,r4
mulu64 r2,r3
breq.d r12,r9,.Ldenorm_dbl0_inf_nan_dbl1
asl r4,r4,23
sub.ne.f r12,r12,r4
ld.as r4,[pcl,29]; [pcl,((.L7fffffff-.+2)/4)]
bhi.d .Lpast_denorm
xor_s r0,r0,r1
bmsk r1,r0,30
j_s.d [blink]
bic_s r0,r0,r1
.balign 4
.Ldenorm_dbl0_inf_nan_dbl1:
bmsk.f 0,r0,30
beq_s .Lretnan
xor_s r0,r0,r1
.Linf_nan_dbl1:
xor_s r1,r1,r0
.Linf_nan_dbl0:
bclr_s r1,r1,31
cmp_s r1,r9
jls.d [blink]
xor_s r0,r0,r1
; r1 NaN -> result NaN
.Lretnan:
j_s.d [blink]
mov r0,-1
.balign 4
.Ldenorm_dbl1:
breq.d r11,r9,.Linf_nan_dbl0_2
norm.f r3,r4
sub_s r3,r3,7
asl r4,r4,r3
mulu64 r2,r4
sub_s r3,r3,1
asl_s r3,r3,23
sub.ne.f r11,r11,r3
ld.as r4,[pcl,11]; [pcl,((.L7fffffff-.+2)/4)]
bhi.d .Lpast_denorm
bmsk r8,r0,30
j_s.d [blink]
bic r0,r0,r8
.balign 4
.Linf_nan_dbl0_2:
bclr_s r1,r1,31
xor_s r0,r0,r1
sub.eq r1,r1,1 ; inf/nan * 0 -> nan
bic.f 0,r9,r1
j_s.d [blink]
or.eq r0,r0,r1 ; r1 nan -> result nan
.balign 4
.L7f800000:
.long 0x7f800000
.L7fffffff:
.long 0x7fffffff
ENDFUNC(__mulsf3)
|
4ms/metamodule-plugin-sdk
| 5,348
|
plugin-libc/libgcc/config/arc/ieee-754/arc600-mul64/muldf3.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "../arc-ieee-754.h"
#if 0 /* DEBUG */
.global __muldf3
.balign 4
__muldf3:
push_s blink
push_s r2
push_s r3
push_s r0
bl.d __muldf3_c
push_s r1
ld_s r2,[sp,12]
ld_s r3,[sp,8]
st_s r0,[sp,12]
st_s r1,[sp,8]
pop_s r1
bl.d __muldf3_asm
pop_s r0
pop_s r3
pop_s r2
pop_s blink
cmp r0,r2
cmp.eq r1,r3
jeq_s [blink]
and r12,DBL0H,DBL1H
bic.f 0,0x7ff80000,r12 ; both NaN -> OK
jeq_s [blink]
b abort
#define __muldf3 __muldf3_asm
#endif /* DEBUG */
__muldf3_support: /* This label makes debugger output saner. */
.balign 4
FUNC(__muldf3)
.Ldenorm_2:
breq.d DBL1L,0,.Lret0_2 ; 0 input -> 0 output
norm.f r12,DBL1L
mov.mi r12,21
add.pl r12,r12,22
neg r11,r12
asl_s r12,r12,20
lsr.f DBL1H,DBL1L,r11
ror DBL1L,DBL1L,r11
sub_s DBL0H,DBL0H,r12
mov.eq DBL1H,DBL1L
sub_l DBL1L,DBL1L,DBL1H
/* Fall through. */
.global __muldf3
.balign 4
__muldf3:
mulu64 DBL0L,DBL1L
ld.as r9,[pcl,0x68] ; ((.L7ff00000-.+2)/4)]
bmsk r6,DBL0H,19
bset r6,r6,20
and r11,DBL0H,r9
breq.d r11,0,.Ldenorm_dbl0
and r12,DBL1H,r9
breq.d r12,0,.Ldenorm_dbl1
mov r8,mlo
mov r4,mhi
mulu64 r6,DBL1L
breq.d r11,r9,.Linf_nan
bmsk r10,DBL1H,19
breq.d r12,r9,.Linf_nan
bset r10,r10,20
add.f r4,r4,mlo
adc r5,mhi,0
mulu64 r10,DBL0L
add_s r12,r12,r11 ; add exponents
add.f r4,r4,mlo
adc r5,r5,mhi
mulu64 r6,r10
tst r8,r8
bclr r8,r9,30 ; 0x3ff00000
bset.ne r4,r4,0 ; put least significant word into sticky bit
bclr r6,r9,20 ; 0x7fe00000
add.f r5,r5,mlo
adc r7,mhi,0 ; fraction product in r7:r5:r4
lsr.f r10,r7,9
rsub.eq r8,r8,r9 ; 0x40000000
sub r12,r12,r8 ; subtract bias + implicit 1
brhs.d r12,r6,.Linf_denorm
rsub r10,r10,12
.Lshift_frac:
neg r8,r10
asl r6,r4,r10
lsr DBL0L,r4,r8
add.f 0,r6,r6
btst.eq DBL0L,0
cmp.eq r4,r4 ; round to nearest / round to even
asl r4,r5,r10
lsr r5,r5,r8
adc.f DBL0L,DBL0L,r4
xor.f 0,DBL0H,DBL1H
asl r7,r7,r10
add_s r12,r12,r5
adc DBL0H,r12,r7
j_s.d [blink]
bset.mi DBL0H,DBL0H,31
/* N.B. This is optimized for ARC700.
ARC600 has very different scheduling / instruction selection criteria. */
/* If one number is denormal, subtract some from the exponent of the other
one (if the other exponent is too small, return 0), and normalize the
denormal. Then re-run the computation. */
.Lret0_2:
lsr_s DBL0H,DBL0H,31
asl_s DBL0H,DBL0H,31
j_s.d [blink]
mov_s DBL0L,0
.balign 4
.Ldenorm_dbl0:
mov_s r12,DBL0L
mov_s DBL0L,DBL1L
mov_s DBL1L,r12
mov_s r12,DBL0H
mov_s DBL0H,DBL1H
mov_s DBL1H,r12
and r11,DBL0H,r9
.Ldenorm_dbl1:
brhs r11,r9,.Linf_nan
brhs 0x3ca00001,r11,.Lret0
sub_s DBL0H,DBL0H,DBL1H
bmsk.f DBL1H,DBL1H,30
add_s DBL0H,DBL0H,DBL1H
beq.d .Ldenorm_2
norm r12,DBL1H
sub_s r12,r12,10
asl r5,r12,20
asl_s DBL1H,DBL1H,r12
sub DBL0H,DBL0H,r5
neg r5,r12
lsr r6,DBL1L,r5
asl_s DBL1L,DBL1L,r12
b.d __muldf3
add_s DBL1H,DBL1H,r6
.Lret0: xor_s DBL0H,DBL0H,DBL1H
bclr DBL1H,DBL0H,31
xor_s DBL0H,DBL0H,DBL1H
j_s.d [blink]
mov_s DBL0L,0
.balign 4
.Linf_nan:
bclr r12,DBL1H,31
xor_s DBL1H,DBL1H,DBL0H
bclr_s DBL0H,DBL0H,31
max r8,DBL0H,r12 ; either NaN -> NaN ; otherwise inf
or.f 0,DBL0H,DBL0L
mov_s DBL0L,0
or.ne.f DBL1L,DBL1L,r12
not_s DBL0H,DBL0L ; inf * 0 -> NaN
mov.ne DBL0H,r8
tst_s DBL1H,DBL1H
j_s.d [blink]
bset.mi DBL0H,DBL0H,31
/* We have checked for infinity / NaN input before, and transformed
denormalized inputs into normalized inputs. Thus, the worst case
exponent overflows are:
1 + 1 - 0x400 == 0xc02 : maximum underflow
0x7fe + 0x7fe - 0x3ff == 0xbfd ; maximum overflow
N.B. 0x7e and 0x7f are also values for overflow.
If (r12 <= -54), we have an underflow to zero. */
.balign 4
.Linf_denorm:
lsr r6,r12,28
brlo.d r6,0xc,.Linf
asr r6,r12,20
add.f r10,r10,r6
brgt.d r10,0,.Lshift_frac
mov_s r12,0
beq.d .Lround_frac
add r10,r10,32
.Lshift32_frac:
tst r4,r4
mov r4,r5
bset.ne r4,r4,1
mov r5,r7
brge.d r10,1,.Lshift_frac
mov r7,0
breq.d r10,0,.Lround_frac
add r10,r10,32
brgt r10,21,.Lshift32_frac
b_s .Lret0
.Lround_frac:
add.f 0,r4,r4
btst.eq r5,0
mov_s DBL0L,r5
mov_s DBL0H,r7
adc.eq.f DBL0L,DBL0L,0
j_s.d [blink]
adc.eq DBL0H,DBL0H,0
.Linf: mov_s DBL0L,0
xor.f DBL1H,DBL1H,DBL0H
mov_s DBL0H,r9
j_s.d [blink]
bset.mi DBL0H,DBL0H,31
ENDFUNC(__muldf3)
.balign 4
.L7ff00000:
.long 0x7ff00000
|
4ms/metamodule-plugin-sdk
| 6,297
|
plugin-libc/libgcc/config/arc/ieee-754/arc600-mul64/divsf3.S
|
/* Copyright (C) 2008-2022 Free Software Foundation, Inc.
Contributor: Joern Rennecke <joern.rennecke@embecosm.com>
on behalf of Synopsys Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
- calculate 15..18 bit inverse using a table of approximating polynoms.
precision is higher for polynoms used to evaluate input with larger
value.
- do one newton-raphson iteration step to double the precision,
then multiply this with the divisor
-> more time to decide if dividend is subnormal
- the worst error propagation is on the side of the value range
with the least initial defect, thus giving us about 30 bits precision.
*/
#include "../arc-ieee-754.h"
#if 0 /* DEBUG */
.global __divsf3
FUNC(__divsf3)
.balign 4
__divsf3:
push_s blink
push_s r1
bl.d __divsf3_c
push_s r0
ld_s r1,[sp,4]
st_s r0,[sp,4]
bl.d __divsf3_asm
pop_s r0
pop_s r1
pop_s blink
cmp r0,r1
#if 1
bne abort
jeq_s [blink]
b abort
#else
bne abort
j_s [blink]
#endif
ENDFUNC(__divsf3)
#define __divsf3 __divsf3_asm
#endif /* DEBUG */
FUNC(__divsf3)
.balign 4
.Ldivtab:
.long 0xfc0ffff0
.long 0xf46ffefd
.long 0xed1ffd2a
.long 0xe627fa8e
.long 0xdf7ff73b
.long 0xd917f33b
.long 0xd2f7eea3
.long 0xcd1fe986
.long 0xc77fe3e7
.long 0xc21fdddb
.long 0xbcefd760
.long 0xb7f7d08c
.long 0xb32fc960
.long 0xae97c1ea
.long 0xaa27ba26
.long 0xa5e7b22e
.long 0xa1cfa9fe
.long 0x9ddfa1a0
.long 0x9a0f990c
.long 0x9667905d
.long 0x92df878a
.long 0x8f6f7e84
.long 0x8c27757e
.long 0x88f76c54
.long 0x85df630c
.long 0x82e759c5
.long 0x8007506d
.long 0x7d3f470a
.long 0x7a8f3da2
.long 0x77ef341e
.long 0x756f2abe
.long 0x72f7212d
.long 0x709717ad
.long 0x6e4f0e44
.long 0x6c1704d6
.long 0x69e6fb44
.long 0x67cef1d7
.long 0x65c6e872
.long 0x63cedf18
.long 0x61e6d5cd
.long 0x6006cc6d
.long 0x5e36c323
.long 0x5c76b9f3
.long 0x5abeb0b7
.long 0x5916a79b
.long 0x57769e77
.long 0x55de954d
.long 0x54568c4e
.long 0x52d6834d
.long 0x51667a7f
.long 0x4ffe71b5
.long 0x4e9e68f1
.long 0x4d466035
.long 0x4bf65784
.long 0x4aae4ede
.long 0x496e4646
.long 0x48363dbd
.long 0x47063547
.long 0x45de2ce5
.long 0x44be2498
.long 0x43a61c64
.long 0x4296144a
.long 0x41860c0e
.long 0x407e03ee
.L7f800000:
.long 0x7f800000
.balign 4
.global __divsf3_support
__divsf3_support:
.Linf_NaN:
bclr.f 0,r0,31 ; 0/0 -> NaN
xor_s r0,r0,r1
bmsk r1,r0,30
bic_s r0,r0,r1
sub.eq r0,r0,1
j_s.d [blink]
or r0,r0,r9
.Lret0:
xor_s r0,r0,r1
bmsk r1,r0,30
j_s.d [blink]
bic_s r0,r0,r1
/* N.B. the spacing between divtab and the sub3 to get its address must
be a multiple of 8. */
__divsf3:
lsr r2,r1,17
sub3 r3,pcl,37 ; (.-.Ldivtab) >> 3
bmsk_s r2,r2,5
ld.as r5,[r3,r2]
asl r4,r1,9
ld.as r9,[pcl,-13]; [pcl,(-((.-.L7f800000) >> 2))] ; 0x7f800000
mulu64 r5,r4
and.f r11,r1,r9
asl r6,r1,8
bset r6,r6,31
beq.d .Ldenorm_fp1
asl r5,r5,13
breq.d r11,r9,.Linf_nan_fp1
and.f r2,r0,r9
sub r7,r5,mhi
mulu64 r7,r6
beq.d .Ldenorm_fp0
asl r12,r0,8
breq.d r2,r9,.Linf_nan_fp0
mulu64 mhi,r7
.Lpast_denorm_fp1:
bset r3,r12,31
.Lpast_denorm_fp0:
cmp_s r3,r6
lsr.cc r3,r3,1
add_s r2,r2, /* wait for immediate */ \
0x3f000000
sub r7,r7,mhi ; u1.31 inverse, about 30 bit
mulu64 r3,r7
sbc r2,r2,r11
xor.f 0,r0,r1
and r0,r2,r9
bclr r3,r9,23 ; 0x7f000000
brhs.d r2,r3,.Linf_denorm
bxor.mi r0,r0,31
.Lpast_denorm:
add r3,mhi,0x22 ; round to nearest or higher
tst r3,0x3c ; check if rounding was unsafe
lsr r3,r3,6
jne.d [blink] ; return if rounding was safe.
add_s r0,r0,r3
/* work out exact rounding if we fall through here. */
/* We know that the exact result cannot be represented in single
precision. Find the mid-point between the two nearest
representable values, multiply with the divisor, and check if
the result is larger than the dividend. */
add_s r3,r3,r3
sub_s r3,r3,1
mulu64 r3,r6
asr.f 0,r0,1 ; for round-to-even in case this is a denorm
rsub r2,r9,25
asl_s r12,r12,r2
sub.f 0,r12,mlo
j_s.d [blink]
sub.mi r0,r0,1
.Linf_nan_fp1:
lsr_s r0,r0,31
bmsk.f 0,r1,22
asl_s r0,r0,31
bne_s 0f ; inf/inf -> nan
brne r2,r9,.Lsigned0 ; x/inf -> 0, but x/nan -> nan
0: j_s.d [blink]
mov r0,-1
.Lsigned0:
.Linf_nan_fp0:
tst_s r1,r1
j_s.d [blink]
bxor.mi r0,r0,31
.balign 4
.global __divsf3
/* For denormal results, it is possible that an exact result needs
rounding, and thus the round-to-even rule has to come into play. */
.Linf_denorm:
brlo r2,0xc0000000,.Linf
.Ldenorm:
asr_s r2,r2,23
bic r0,r0,r9
neg r9,r2
brlo.d r9,25,.Lpast_denorm
lsr r3,mlo,r9
/* Fall through: return +- 0 */
j_s [blink]
.Linf:
j_s.d [blink]
or r0,r0,r9
.balign 4
.Ldenorm_fp1:
bclr r6,r6,31
norm.f r12,r6 ; flag for x/0 -> Inf check
add r6,r6,r6
rsub r5,r12,16
ror r5,r1,r5
asl r6,r6,r12
bmsk r5,r5,5
ld.as r5,[r3,r5]
add r4,r6,r6
; load latency
mulu64 r5,r4
bic.ne.f 0, \
0x60000000,r0 ; large number / denorm -> Inf
asl r5,r5,13
sub r7,r5,mhi
beq.d .Linf_NaN
mulu64 r7,r6
asl_s r12,r12,23
and.f r2,r0,r9
add_s r2,r2,r12
asl r12,r0,8
bne.d .Lpast_denorm_fp1
.Ldenorm_fp0: mulu64 mhi,r7
bclr r12,r12,31
norm.f r3,r12 ; flag for 0/x -> 0 check
bic.ne.f 0,0x60000000,r1 ; denorm/large number -> 0
beq_s .Lret0
asl_s r12,r12,r3
asl_s r3,r3,23
add_s r12,r12,r12
add r11,r11,r3
b.d .Lpast_denorm_fp0
mov_s r3,r12
ENDFUNC(__divsf3)
|
4ms/metamodule-plugin-sdk
| 2,465
|
plugin-libc/newlib/libm/machine/nds32/wf_sqrt.S
|
/*
Copyright (c) 2013-2014 Andes Technology Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.text
.align 1
.global sqrtf
.type sqrtf, @function
sqrtf:
/* The input argument is supposed to be stored in $fs0.
The return value is supposed to be stored in $fs0 either. */
/* Clear the IEEE cumulative exceptions flags. ($fpcsr.b[6:2]) */
FMFCSR $r0
bitci $r0, $r0, #0b1111100
FMTCSR $r0
fsqrts $fs0, $fs0
/* Check the IEEE cumulative exceptions flags. */
FMFCSR $r5
bmski33 $r5, #2 /* Is $fpcsr.IVO('b2) set ? */
bnez $r5, .L_EDOM /* Set errno as EDOM. */
bmski33 $r5, #4 /* Is $fpcsr.OVF('b4) set ? */
bnez $r5, .L_ERANGE /* Set errno as ERANGE. */
bmski33 $r5, #5 /* Is $fpcsr.UDF('b5) set ? */
bnez $r5, .L_ERANGE /* Set errno as ERANGE. */
/* No error at all. Just ret. */
ret
.L_EDOM:
movi $r0, #33 /* EDOM: Math arg out of domain of func. */
j .L_Set_errno
.L_ERANGE:
movi $r0, #34 /* ERANGE: Math result not representable. */
.L_Set_errno:
l.w $r15, _impure_ptr
swi $r0, [$r15] /* Set errno. */
ret
.size sqrtf, .-sqrtf
|
4ms/metamodule-plugin-sdk
| 2,461
|
plugin-libc/newlib/libm/machine/nds32/w_sqrt.S
|
/*
Copyright (c) 2013-2014 Andes Technology Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.text
.align 1
.global sqrt
.type sqrt, @function
sqrt:
/* The input argument is supposed to be stored in $fd0.
The return value is supposed to be stored in $fd0 either. */
/* Clear the IEEE cumulative exceptions flags. ($fpcsr.b[6:2]) */
FMFCSR $r0
bitci $r0, $r0, #0b1111100
FMTCSR $r0
fsqrtd $fd0, $fd0
/* Check the IEEE cumulative exceptions flags. */
FMFCSR $r0
bmski33 $r0, #2 /* Is $fpcsr.IVO('b2) set ? */
bnez $r0, .L_EDOM /* Set errno as EDOM. */
bmski33 $r0, #4 /* Is $fpcsr.OVF('b4) set ? */
bnez $r0, .L_ERANGE /* Set errno as ERANGE. */
bmski33 $r0, #5 /* Is $fpcsr.UDF('b5) set ? */
bnez $r0, .L_ERANGE /* Set errno as ERANGE. */
/* No error at all. Just ret. */
ret
.L_EDOM:
movi $r0, #33 /* EDOM: Math arg out of domain of func. */
j .L_Set_errno
.L_ERANGE:
movi $r0, #34 /* ERANGE: Math result not representable. */
.L_Set_errno:
l.w $r15, _impure_ptr
swi $r0, [$r15] /* Set errno. */
ret
.size sqrt, .-sqrt
|
4ms/metamodule-plugin-sdk
| 2,487
|
plugin-libc/newlib/libc/machine/nios2/setjmp.s
|
;/*
; * C library -- _setjmp, _longjmp
; *
; * _longjmp(a,v)
; * will generate a "return(v?v:1)" from
; * the last call to
; * _setjmp(a)
; * by unwinding the call stack.
; * The previous signal state is NOT restored.
; *
; *
; * Copyright (c) 2003 Altera Corporation
; * All rights reserved.
; *
; * Redistribution and use in source and binary forms, with or without
; * modification, are permitted provided that the following conditions
; * are met:
; *
; * o Redistributions of source code must retain the above copyright
; * notice, this list of conditions and the following disclaimer.
; * o Redistributions in binary form must reproduce the above copyright
; * notice, this list of conditions and the following disclaimer in the
; * documentation and/or other materials provided with the distribution.
; * o Neither the name of Altera Corporation nor the names of its
; * contributors may be used to endorse or promote products derived from
; * this software without specific prior written permission.
; *
; * THIS SOFTWARE IS PROVIDED BY ALTERA CORPORATION, THE COPYRIGHT HOLDER,
; * AND ITS CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
; * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
; * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
; * THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
; * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
; * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
; * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
; * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
; * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
; * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
; */
.section .text
.align 3
.globl setjmp
.type setjmp,@function
.globl longjmp
.type longjmp,@function
setjmp:
stw r16, 0(r4)
stw r17, 4(r4)
stw r18, 8(r4)
stw r19, 12(r4)
stw r20, 16(r4)
stw r21, 20(r4)
stw r22, 24(r4)
stw r23, 28(r4)
stw gp, 32(r4)
stw sp, 36(r4)
stw fp, 40(r4)
stw ra, 44(r4)
mov r2, zero
ret
longjmp:
ldw r16, 0(r4)
ldw r17, 4(r4)
ldw r18, 8(r4)
ldw r19, 12(r4)
ldw r20, 16(r4)
ldw r21, 20(r4)
ldw r22, 24(r4)
ldw r23, 28(r4)
ldw gp, 32(r4)
ldw sp, 36(r4)
ldw fp, 40(r4)
ldw ra, 44(r4)
mov r2, r5
bne r2, zero, 1f
movi r2, 1
1:
ret
|
4ms/metamodule-plugin-sdk
| 1,855
|
plugin-libc/newlib/libc/machine/rx/strncat.S
|
.file "strncat.S"
.section .text
.global _strncat
.type _strncat,@function
_strncat:
;; On entry: r1 => Destination
;; r2 => Source
;; r3 => Max number of bytes to copy
#ifdef __RX_DISALLOW_STRING_INSNS__
cmp #0, r3 ; If max is zero we have nothing to do.
beq 2f
mov r1, r4 ; Leave the desintation pointer intact for the return value.
1: mov.b [r4+], r5 ; Find the NUL byte at the end of the destination.
cmp #0, r5
bne 1b
sub #1, r4
3: mov.b [r2+], r5 ; Copy bytes from the source into the destination ...
mov.b r5, [r4+]
cmp #0, r5 ; ... until we reach a NUL byte ...
beq 2f
sub #1, r3
bne 3b ; ... or we have copied N bytes.
2: rts
#else
mov r1, r4 ; Save a copy of the dest pointer.
mov r2, r5 ; Save a copy of the source pointer.
mov r3, r14 ; Save a copy of the byte count.
mov #0, r2 ; Search for the NUL byte.
mov #-1, r3 ; Search until we run out of memory.
suntil.b ; Find the end of the destination string.
sub #1, r1 ; suntil.b leaves r1 pointing to the byte beyond the NUL.
mov r14, r3 ; Restore the limit on the number of bytes copied.
mov r5, r2 ; Restore the source pointer.
mov r1, r5 ; Save a copy of the dest pointer.
smovu ; Copy source to destination.
add #0, r14, r3 ; Restore the number of bytes to copy (again), but this time set the Z flag as well.
beq 1f ; If we copied 0 bytes then we already know that the dest string is NUL terminated, so we do not have to do anything.
mov #0, r2 ; Otherwise we must check to see if a NUL byte
mov r5, r1 ; was included in the bytes that were copied.
suntil.b
beq 1f ; Z flag is set if a match was found.
add r14, r5 ; Point at byte after end of copied bytes.
mov.b #0, [r5] ; Store a NUL there.
1:
mov r4, r1 ; Return the original dest pointer.
rts
#endif
.size _strncat, . - _strncat
|
4ms/metamodule-plugin-sdk
| 1,937
|
plugin-libc/newlib/libc/machine/rx/setjmp.S
|
# setjmp/longjmp for Renesas RX.
#
# The jmpbuf looks like this:
#
# Register jmpbuf offset
# R0 0x0
# R1 0x4
# R2 0x8
# R3 0xc
# R4 0x10
# R5 0x14
# R6 0x18
# R7 0x1c
# R8 0x20
# R9 0x24
# R10 0x28
# R11 0x2c
# R12 0x30
# R13 0x34
# R14 0x38
# R15 0x3c
# PC 0x40
#
# R1 contains the pointer to jmpbuf:
#
# int R1 = setjmp (jmp_buf R1)
# void longjmp (jmp_buf R1, int R2)
#
# The ABI allows for R1-R5 to be clobbered by functions. We must be
# careful to always leave the stack in a usable state in case an
# interrupt happens.
.text
.global _setjmp
.type _setjmp, @function
_setjmp:
mov.l r0, [r1] ; save all the general registers
mov.l r1, 0x4[r1] ; longjmp won't use this, but someone else might.
mov.l r2, 0x8[r1]
mov.l r3, 0xc[r1]
mov.l r4, 0x10[r1]
mov.l r5, 0x14[r1]
mov.l r6, 0x18[r1]
mov.l r7, 0x1c[r1]
mov.l r8, 0x20[r1]
mov.l r9, 0x24[r1]
mov.l r10, 0x28[r1]
mov.l r11, 0x2c[r1]
mov.l r12, 0x30[r1]
mov.l r13, 0x34[r1]
mov.l r14, 0x38[r1]
mov.l r15, 0x3c[r1]
mov.l [r0], r2 ; get return address off the stack
mov.l r2, 0x40[r1] ; PC
mov #0, r1 ; Return 0.
rts
.Lend1:
.size _setjmp, .Lend1 - _setjmp
.global _longjmp
.type _longjmp, @function
_longjmp:
tst r2, r2 ; Set the Z flag if r2 is 0.
stz #1, r2 ; If the Z flag was set put 1 into the return register.
mov r2, 4[r1] ; Put r2 (our return value) into the setjmp buffer as r1.
mov.l [r1], r0 ; Restore the stack - there's a slot for PC
mov.l 0x40[r1], r2 ; Get the saved PC
mov.l r2, [r0] ; Overwrite the old return address
mov.l 0x3c[r1], r15
mov.l 0x38[r1], r14
mov.l 0x34[r1], r13
mov.l 0x30[r1], r12
mov.l 0x2c[r1], r11
mov.l 0x28[r1], r10
mov.l 0x24[r1], r9
mov.l 0x20[r1], r8
mov.l 0x1c[r1], r7
mov.l 0x18[r1], r6
mov.l 0x14[r1], r5
mov.l 0x10[r1], r4
mov.l 0xc[r1], r3
mov.l 0x8[r1], r2
mov.l 0x4[r1], r1 ; This sets up the new return value
rts
.Lend2:
.size _longjmp, .Lend2 - _longjmp
|
4ms/metamodule-plugin-sdk
| 1,142
|
plugin-libc/newlib/libc/machine/rx/memmove.S
|
.file "memmove.S"
.section .text
.global _memmove
.type _memmove,@function
_memmove:
;; R1: DEST
;; R2: SRC
;; R3: COUNT
#ifdef __RX_DISALLOW_STRING_INSNS__
/* Do not use the string instructions - they might prefetch
bytes from outside of valid memory. This is particularly
dangerous in I/O space. */
cmp #0, r3 ; If the count is zero, do nothing
beq 4f
cmp r1, r2
blt 3f ; If SRC < DEST copy backwards
mov r1, r14 ; Save a copy of DEST
5: mov.b [r2+], r5
mov.b r5, [r14+]
sub #1, r3
bne 5b
4: rts
3: add r3, r1
add r3, r2
6: mov.b [-r2], r5
mov.b r5, [-r1]
sub #1, r3
bne 6b
rts
#else
mov r1, r4 ; Save a copy of DEST
cmp r1, r2
blt 2f ; If SRC (r2) is less than DEST (r1) then copy backwards
smovf
1:
mov r4, r1 ; Return DEST
rts
2:
add r3, r1 ; The SMOVB instructions requires the DEST in r1 and the
add r3, r2 ; SRC in r2 but it needs them to point the last bytes of
sub #1, r2 ; the regions involved not the first bytes, hence these
sub #1, r1 ; additions and subtractions.
smovb
bra 1b
#endif /* SMOVF allowed. */
.size _memmove, . - _memmove
|
4ms/metamodule-plugin-sdk
| 1,050
|
plugin-libc/newlib/libc/machine/rx/strcat.S
|
.file "strcat.S"
.section .text
.global _strcat
.type _strcat,@function
_strcat:
;; On entry: r1 => Destination
;; r2 => Source
#ifdef __RX_DISALLOW_STRING_INSNS__
mov r1, r4 ; Save a copy of the dest pointer.
1: mov.b [r4+], r5 ; Find the NUL byte at the end of R4.
cmp #0, r5
bne 1b
sub #1, r4 ; Move R4 back to point at the NUL byte.
2: mov.b [r2+], r5 ; Copy bytes from R2 to R4 until we reach a NUL byte.
mov.b r5, [r4+]
cmp #0, r5
bne 2b
rts
#else
mov r1, r4 ; Save a copy of the dest pointer.
mov r2, r5 ; Save a copy of the source pointer.
mov #0, r2 ; Search for the NUL byte.
mov #-1, r3 ; Limit on the number of bytes examined.
suntil.b ; Find the end of the destination string.
sub #1, r1 ; suntil.b leaves r1 pointing to the byte beyond the match.
mov #-1, r3 ; Set a limit on the number of bytes copied.
mov r5, r2 ; Restore the source pointer.
smovu ; Copy source to destination
mov r4, r1 ; Return the original dest pointer.
rts
#endif
.size _strcat, . - _strcat
|
4ms/metamodule-plugin-sdk
| 2,777
|
plugin-libc/newlib/libc/machine/m68k/memcpy.S
|
/* a-memcpy.s -- memcpy, optimised for m68k asm
*
* Copyright (c) 2007 mocom software GmbH & Co KG)
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#include "m68kasm.h"
#if defined (__mcoldfire__) || defined (__mc68030__) || defined (__mc68040__) || defined (__mc68060__)
# define MISALIGNED_OK 1
#else
# define MISALIGNED_OK 0
#endif
.text
.align 4
.globl SYM(memcpy)
.type SYM(memcpy), @function
/* memcpy, optimised
*
* strategy:
* - no argument testing (the original memcpy from the GNU lib does
* no checking either)
* - make sure the destination pointer (the write pointer) is long word
* aligned. This is the best you can do, because writing to unaligned
* addresses can be the most costfull thing you could do.
* - Once you have figured that out, we do a little loop unrolling
* to further improve speed.
*/
SYM(memcpy):
move.l 4(sp),a0 | dest ptr
move.l 8(sp),a1 | src ptr
move.l 12(sp),d1 | len
cmp.l #8,d1 | if fewer than 8 bytes to transfer,
blo .Lresidue | do not optimise
#if !MISALIGNED_OK
/* Goto .Lresidue if either dest or src is not 4-byte aligned */
move.l a0,d0
and.l #3,d0
bne .Lresidue
move.l a1,d0
and.l #3,d0
bne .Lresidue
#else /* MISALIGNED_OK */
/* align dest */
move.l a0,d0 | copy of dest
neg.l d0
and.l #3,d0 | look for the lower two only
beq 2f | is aligned?
sub.l d0,d1
lsr.l #1,d0 | word align needed?
bcc 1f
move.b (a1)+,(a0)+
1:
lsr.l #1,d0 | long align needed?
bcc 2f
move.w (a1)+,(a0)+
2:
#endif /* !MISALIGNED_OK */
/* long word transfers */
move.l d1,d0
and.l #3,d1 | byte residue
lsr.l #3,d0
bcc 1f | carry set for 4-byte residue
move.l (a1)+,(a0)+
1:
lsr.l #1,d0 | number of 16-byte transfers
bcc .Lcopy | carry set for 8-byte residue
bra .Lcopy8
1:
move.l (a1)+,(a0)+
move.l (a1)+,(a0)+
.Lcopy8:
move.l (a1)+,(a0)+
move.l (a1)+,(a0)+
.Lcopy:
#if !defined (__mcoldfire__)
dbra d0,1b
sub.l #0x10000,d0
#else
subq.l #1,d0
#endif
bpl 1b
bra .Lresidue
1:
move.b (a1)+,(a0)+ | move residue bytes
.Lresidue:
#if !defined (__mcoldfire__)
dbra d1,1b | loop until done
#else
subq.l #1,d1
bpl 1b
#endif
move.l 4(sp),d0 | return value
rts
|
4ms/metamodule-plugin-sdk
| 2,494
|
plugin-libc/newlib/libc/machine/m68k/memset.S
|
/* a-memset.s -- memset, optimised for fido asm
*
* Copyright (c) 2007 mocom software GmbH & Co KG)
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#include "m68kasm.h"
.text
.align 4
.globl SYM(memset)
.type SYM(memset), @function
| memset, optimised
|
| strategy:
| - no argument testing (the original memcpy from the GNU lib does
| no checking either)
| - make sure the destination pointer (the write pointer) is long word
| aligned. This is the best you can do, because writing to unaligned
| addresses can be the most costfull thing one could do.
| - we fill long word wise if possible
|
| VG, 2006
|
| bugfixes:
| - distribution of byte value improved - in cases someone gives
| non-byte value
| - residue byte transfer was not working
|
| VG, April 2007
|
SYM(memset):
move.l 4(sp),a0 | dest ptr
move.l 8(sp),d0 | value
move.l 12(sp),d1 | len
cmp.l #16,d1
blo .Lbset | below, byte fills
|
move.l d2,-(sp) | need a register
move.b d0,d2 | distribute low byte to all byte in word
lsl.l #8,d0
move.b d2,d0
move.w d0,d2
swap d0 | rotate 16
move.w d2,d0
|
move.l a0,d2 | copy of src
neg.l d2 | 1 2 3 ==> 3 2 1
and.l #3,d2
beq 2f | is aligned
|
sub.l d2,d1 | fix length
lsr.l #1,d2 | word align needed?
bcc 1f
move.b d0,(a0)+ | fill byte
1:
lsr.l #1,d2 | long align needed?
bcc 2f
move.w d0,(a0)+ | fill word
2:
move.l d1,d2 | number of long transfers (at least 3)
lsr.l #2,d2
subq.l #1,d2
1:
move.l d0,(a0)+ | fill long words
.Llset:
#if !defined (__mcoldfire__)
dbra d2,1b | loop until done
sub.l #0x10000,d2
#else
subq.l #1,d2
#endif
bpl 1b
and.l #3,d1 | residue byte transfers, fixed
move.l (sp)+,d2 | restore d2
bra .Lbset
1:
move.b d0,(a0)+ | fill residue bytes
.Lbset:
#if !defined (__mcoldfire__)
dbra d1,1b | loop until done
#else
subq.l #1,d1
bpl 1b
#endif
move.l 4(sp),d0 | return value
rts
|
4ms/metamodule-plugin-sdk
| 3,629
|
plugin-libc/newlib/libc/machine/rl78/setjmp.S
|
/*
Copyright (c) 2011 Red Hat Incorporated.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of Red Hat Incorporated may not be used to endorse
or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef __RL78_G10__
; clobberable
r8 = 0xffec8
r9 = 0xffec9
r10 = 0xffeca
r11 = 0xffecb
r12 = 0xffecc
r13 = 0xffecd
r14 = 0xffece
r15 = 0xffecf
; preserved
r16 = 0xffed0
r17 = 0xffed1
r18 = 0xffed2
r19 = 0xffed3
r20 = 0xffed4
r21 = 0xffed5
r22 = 0xffed6
r23 = 0xffed7
#else
; clobberable
r8 = 0xffef0
r9 = 0xffef1
r10 = 0xffef2
r11 = 0xffef3
r12 = 0xffef4
r13 = 0xffef5
r14 = 0xffef6
r15 = 0xffef7
; preserved
r16 = 0xffee8
r17 = 0xffee9
r18 = 0xffeea
r19 = 0xffeeb
r20 = 0xffeec
r21 = 0xffeed
r22 = 0xffeee
r23 = 0xffeef
#endif
/* The jump buffer has the following structure:
R0 .. R23 3*8 bytes
SP 2 bytes
ES 1 byte
CS 1 byte
PC 4 bytes
*/
.macro _saveb ofs,reg
mov a,\reg
mov [hl+\ofs],a
.endm
.macro _save ofs,reg
movw ax,\reg
movw [hl+\ofs],ax
.endm
.global _setjmp
.type _setjmp, @function
_setjmp:
;; R8 = setjmp (jmp_buf *[sp+4].w)
;; must return zero !!
push ax
push hl
push ax
movw ax, [sp+10]
movw hl, ax
pop ax
movw [hl], ax
_save 2, bc
_save 4, de
pop ax
movw [hl+6], ax
_save 8, r8
_save 10, r10
_save 12, r12
_save 14, r14
_save 16, r16
_save 18, r18
_save 20, r20
_save 22, r22
;; The sp we have now includes one more pushed reg, plus $PC
movw ax, sp
addw ax, #6
movw [hl+24], ax
_saveb 26, es
_saveb 27, cs
_save 28, [sp+2]
_save 30, [sp+4]
clrw ax
movw r8, ax
pop ax
ret
.size _setjmp, . - _setjmp
.macro _loadb ofs,reg
mov a,[hl+\ofs]
mov \reg,a
.endm
.macro _load ofs,reg
movw ax,[hl+\ofs]
movw \reg,ax
.endm
.macro _push ofs
movw ax,[hl+\ofs]
push ax
.endm
.global _longjmp
.type _longjmp, @function
_longjmp:
;; noreturn longjmp (jmp_buf *[sp+4].w, int [sp+6].w)
movw ax, [sp+6]
cmpw ax,#0
sknz
onew ax
movw r8, ax
movw ax, [sp+4]
movw hl, ax
movw ax, [hl+24]
movw sp, ax ; this is the *new* stack
_push 30 ; high half of PC
_push 28 ; low half of PC
_push 6 ; HL
_push 0 ; AX
_load 2, bc
_load 4, de
_load 10, r10
_load 12, r12
_load 14, r14
_load 16, r16
_load 18, r18
_load 20, r20
_load 22, r22
_loadb 26, es
_loadb 27, cs
pop ax
pop hl
ret ; pops PC (4 bytes)
.size _longjmp, . - _longjmp
|
4ms/metamodule-plugin-sdk
| 1,479
|
plugin-libc/newlib/libc/machine/mt/setjmp.S
|
# setjmp/longjmp for mt.
#
# The jmpbuf looks like this:
#
# Register jmpbuf offset
# R0 --- --
# R1 0x4 4
# R2 0x8 8
# R3 0xc 12
# R4 0x10 16
# R5 0x14 20
# R6 0x18 24
# R7 0x1c 28
# R8 0x20 32
# R9 ---- --
# R10 ---- --
# R11 0x2c 44
# R12 0x30 48
# R13 0x34 52
# R14 0x38 56
# R15 0x3c 60
#
# R1 contains the pointer to jmpbuf
.text
.global setjmp
.type setjmp ,@function
setjmp:
stw r1, r1, #4
or r0, r0, r0
stw r2, r1, #8
or r0, r0, r0
stw r3, r1, #12
or r0, r0, r0
stw r4, r1, #16
or r0, r0, r0
stw r5, r1, #20
or r0, r0, r0
stw r6, r1, #24
or r0, r0, r0
stw r7, r1, #28
or r0, r0, r0
stw r8, r1, #32
or r0, r0, r0
stw r11, r1, #44
or r0, r0, r0
stw r12, r1, #48
or r0, r0, r0
stw r13, r1, #52
or r0, r0, r0
stw r14, r1, #56
or r0, r0, r0
stw r15, r1, #60
jal r0, r14
addi r11, r0, #0
.Lend1:
.size setjmp,.Lend1-setjmp
.global longjmp
.type longjmp,@function
longjmp:
or r9, r1, r1
or r11, r2, r2
ldw r1, r1, #4
or r0, r0, r0
ldw r2, r1, #8
or r0, r0, r0
ldw r3, r1, #12
or r0, r0, r0
ldw r4, r1, #16
or r0, r0, r0
ldw r5, r1, #20
or r0, r0, r0
ldw r6, r1, #24
or r0, r0, r0
ldw r7, r1, #28
or r0, r0, r0
ldw r8, r1, #32
or r0, r0, r0
ldw r12, r1, #48
or r0, r0, r0
ldw r13, r1, #52
or r0, r0, r0
ldw r14, r1, #56
or r0, r0, r0
ldw r15, r1, #60
brne r0, r11, .L01
or r0, r0, r0
addi r11, r0, #1
.L01:
jal r0, r14
or r0, r0, r0
.Lend2:
.size longjmp,.Lend2-longjmp2
|
4ms/metamodule-plugin-sdk
| 2,426
|
plugin-libc/newlib/libc/machine/crx/setjmp.S
|
##############################################################################
# setjmp.S -- CRX setjmp routine #
# #
# Copyright (c) 2004 National Semiconductor Corporation #
# #
# The authors hereby grant permission to use, copy, modify, distribute, #
# and license this software and its documentation for any purpose, provided #
# that existing copyright notices are retained in all copies and that this #
# notice is included verbatim in any distributions. No written agreement, #
# license, or royalty fee is required for any of the authorized uses. #
# Modifications to this software may be copyrighted by their authors #
# and need not follow the licensing terms described here, provided that #
# the new terms are clearly indicated on the first page of each file where #
# they apply. #
# #
# C library -- setjmp, longjmp #
# longjmp(a,v) #
# will generate a "return(v)" #
# from the last call to #
# setjmp(a) #
# by restoring r7-ra, sp, #
# and pc from 'a' #
# and doing a return. (Makes sure that longjmp never returns 0). #
##############################################################################
.text
.file "setjmp.s"
.align 4
.globl _setjmp
.align 4
_setjmp:
#r2: .blkw
storm r2,{r7,r8,r9,r10,r11,r12,r13,r14}
stord sp,0(r2)
movd $0,r0
jump ra
.globl _longjmp
_longjmp:
#r2: .blkw # pointer save area
#r3: .blkw # ret vlaue
loadm r2, {r7,r8,r9,r10,r11,r12,r13,ra}
loadd 0(r2), sp
movd r3, r0
cmpd $0, r3
bne end1
movd $1, r0
end1:
jump ra
.align 4
|
4ms/metamodule-plugin-sdk
| 2,394
|
plugin-libc/newlib/libc/machine/msp430/setjmp.S
|
/* Copyright (c) 2013 Red Hat, Inc. All rights reserved.
This copyrighted material is made available to anyone wishing to use,
modify, copy, or redistribute it subject to the terms and conditions
of the BSD License. This program is distributed in the hope that
it will be useful, but WITHOUT ANY WARRANTY expressed or implied,
including the implied warranties of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. A copy of this license is available at
http://www.opensource.org/licenses. Any Red Hat trademarks that are
incorporated in the source code or documentation are not subject to
the BSD License and may only be used or replicated with the express
permission of Red Hat, Inc.
*/
# setjmp/longjmp for msp430. The jmpbuf looks like this:
#
# Register Jmpbuf offset
# small large
# r0 (pc) 0x00 0x00
# r1 (sp) 0x02 0x04
# r4 0x04 0x08
# r5 0x06 0x0c
# r6 0x08 0x10
# r7 0x0a 0x14
# r8 0x0c 0x18
# r9 0x0e 0x1c
# r10 0x10 0x20
.text
.global setjmp
setjmp:
; Upon entry r12 points to the jump buffer.
; Returns 0 to caller.
#if defined __MSP430X_LARGE__
mova @r1, r13
mova r13, 0(r12)
mova r1, 4(r12)
mova r4, 8(r12)
mova r5, 12(r12)
mova r6, 16(r12)
mova r7, 20(r12)
mova r8, 24(r12)
mova r9, 28(r12)
mova r10, 32(r12)
clr r12
reta
#else
;; Get the return address off the stack
mov.w @r1, r13
mov.w r13, 0(r12)
mov.w r1, 2(r12)
mov.w r4, 4(r12)
mov.w r5, 6(r12)
mov.w r6, 8(r12)
mov.w r7, 10(r12)
mov.w r8, 12(r12)
mov.w r9, 14(r12)
mov.w r10, 16(r12)
clr r12
ret
#endif
.size setjmp , . - setjmp
.global longjmp
longjmp:
; Upon entry r12 points to the jump buffer and
; r13 contains the value to be returned by setjmp.
#if defined __MSP430X_LARGE__
mova @r12+, r14
mova @r12+, r1
mova @r12+, r4
mova @r12+, r5
mova @r12+, r6
mova @r12+, r7
mova @r12+, r8
mova @r12+, r9
mova @r12+, r10
#else
mov.w @r12+, r14
mov.w @r12+, r1
mov.w @r12+, r4
mov.w @r12+, r5
mov.w @r12+, r6
mov.w @r12+, r7
mov.w @r12+, r8
mov.w @r12+, r9
mov.w @r12+, r10
#endif
; If caller attempts to return 0, return 1 instead.
cmp.w #0, r13
jne .Lnot_zero
mov.w #1, r13
.Lnot_zero:
mov.w r13, r12
#if defined __MSP430X_LARGE__
adda #4, r1
mova r14, r0
#else
add.w #2, r1
mov.w r14, r0
#endif
.size longjmp , . - longjmp
|
4ms/metamodule-plugin-sdk
| 1,089
|
plugin-libc/newlib/libc/machine/d10v/setjmp.S
|
; setjmp/longjmp for D10V. The jmpbuf looks like this:
;
; Register jmpbuf offset
; R6 0x00
; R7 0x02
; R8 0x04
; R9 0x06
; R10 0x08
; R11 0x0a
; R13 (return address) 0x0c
; R15 (SP) 0x0E
.text
.globl setjmp
.type setjmp,@function
.stabs "setjmp.S",100,0,0,setjmp
.stabs "int:t(0,1)=r(0,1);-65536;65535;",128,0,0,0
.stabs "setjmp:F(0,1)",36,0,1,setjmp
setjmp:
; Address of jmpbuf is passed in R0. Save the appropriate registers.
st2w r6, @r0+
st2w r8, @r0+
st2w r10, @r0+
st r13, @r0+
st r15, @r0+
; Return 0 to caller
ldi r0, 0
jmp r13
.Lsetjmp:
.size setjmp,.Lsetjmp-setjmp
.stabs "",36,0,0,.Lsetjmp-setjmp
.globl longjmp
.type longjmp,@function
.stabs "longjmp:F(0,1)",36,0,1,longjmp
longjmp:
; Address of jmpbuf is in R0. Restore the registers.
ld2w r6, @r0+
ld2w r8, @r0+
ld2w r10, @r0+
ld r13, @r0+
ld r15, @r0+
; Value to return to caller is in R1. If caller attemped to return 0,
; return 1 instead.
mv r0, r1
cmpeqi r0, 0
exef0t || ldi r0,1
jmp r13
.Llongjmp:
.size longjmp,.Llongjmp-longjmp
.stabs "",36,0,0,.Llongjmp-longjmp
|
4ms/metamodule-plugin-sdk
| 1,200
|
plugin-libc/newlib/libc/machine/i386/memcpy.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memcpy)
SOTYPE_FUNCTION(memcpy)
SYM (memcpy):
#ifdef __iamcu__
pushl esi
pushl edi
movl eax,edi
movl edx,esi
rep movsb
popl edi
popl esi
#else
pushl ebp
movl esp,ebp
pushl esi
pushl edi
pushl ebx
movl 8(ebp),edi
movl 16(ebp),ecx
movl 12(ebp),esi
cld
#ifndef __OPTIMIZE_SIZE__
cmpl $8,ecx
jbe .L3
/* move any preceding bytes until destination address is long word aligned */
movl edi,edx
movl ecx,ebx
andl $3,edx
jz .L11
movl $4,ecx
subl edx,ecx
andl $3,ecx
subl ecx,ebx
rep
movsb
mov ebx,ecx
/* move bytes a long word at a time */
.L11:
shrl $2,ecx
.p2align 2
rep
movsl
movl ebx,ecx
andl $3,ecx
#endif /* !__OPTIMIZE_SIZE__ */
/* handle any remaining bytes */
.L3:
rep
movsb
.L5:
movl 8(ebp),eax
leal -12(ebp),esp
popl ebx
popl edi
popl esi
leave
#endif
ret
|
4ms/metamodule-plugin-sdk
| 2,351
|
plugin-libc/newlib/libc/machine/i386/setjmp.S
|
/* This is file is a merger of SETJMP.S and LONGJMP.S */
/*
* This file was modified to use the __USER_LABEL_PREFIX__ and
* __REGISTER_PREFIX__ macros defined by later versions of GNU cpp by
* Joel Sherrill (joel@OARcorp.com)
* Slight change: now includes i386mach.h for this (Werner Almesberger)
*
* Copyright (C) 1991 DJ Delorie
* All rights reserved.
*
* Redistribution, modification, and use in source and binary forms is permitted
* provided that the above copyright notice and following paragraph are
* duplicated in all such forms.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/*
** jmp_buf:
** eax ebx ecx edx esi edi ebp esp eip
** 0 4 8 12 16 20 24 28 32
**
** Intel MCU jmp_buf:
** ebx esi edi ebp esp eip
** 0 4 8 12 16 20
*/
#include "i386mach.h"
.global SYM (setjmp)
.global SYM (longjmp)
SOTYPE_FUNCTION(setjmp)
SOTYPE_FUNCTION(longjmp)
SYM (setjmp):
#ifdef __iamcu__
/* Store EIP. */
movl 0(esp),ecx
movl ecx,20(eax)
movl ebx,0 (eax)
movl esi,4 (eax)
movl edi,8 (eax)
movl ebp,12(eax)
/* Skip return address, which will be pushed onto stack in
longjmp, and store SP. */
leal 4(esp),ecx
movl ecx,16(eax)
xorl eax,eax
#else
pushl ebp
movl esp,ebp
pushl edi
movl 8 (ebp),edi
movl eax,0 (edi)
movl ebx,4 (edi)
movl ecx,8 (edi)
movl edx,12 (edi)
movl esi,16 (edi)
movl -4 (ebp),eax
movl eax,20 (edi)
movl 0 (ebp),eax
movl eax,24 (edi)
movl esp,eax
addl $12,eax
movl eax,28 (edi)
movl 4 (ebp),eax
movl eax,32 (edi)
popl edi
movl $0,eax
leave
#endif
ret
SYM (longjmp):
#ifdef __iamcu__
/* Check retval. */
testl edx,edx
jne 0f
incl edx
0:
/* Restore stack first. */
movl 16(eax),esp
/* Put return address on stack. */
pushl 20(eax)
movl 0(eax),ebx
movl 4(eax),esi
movl 8(eax),edi
movl 12(eax),ebp
movl edx,eax
#else
pushl ebp
movl esp,ebp
movl 8(ebp),edi /* get jmp_buf */
movl 12(ebp),eax /* store retval in j->eax */
testl eax,eax
jne 0f
incl eax
0:
movl eax,0(edi)
movl 24(edi),ebp
__CLI
movl 28(edi),esp
pushl 32(edi)
movl 0(edi),eax
movl 4(edi),ebx
movl 8(edi),ecx
movl 12(edi),edx
movl 16(edi),esi
movl 20(edi),edi
__STI
#endif
ret
|
4ms/metamodule-plugin-sdk
| 2,318
|
plugin-libc/newlib/libc/machine/i386/memmove.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memmove)
SOTYPE_FUNCTION(memmove)
SYM (memmove):
#ifdef __iamcu__
pushl esi
pushl edi
movl eax,edi
movl edx,esi
cmp esi,edi
ja .Lcopy_backward
je .Lbwd_write_0bytes
rep movsb
popl edi
popl esi
ret
.Lcopy_backward:
lea -1(edi,ecx),edi
lea -1(esi,ecx),esi
std
rep movsb
cld
.Lbwd_write_0bytes:
popl edi
popl esi
#else
pushl ebp
movl esp,ebp
pushl esi
pushl edi
pushl ebx
movl 8(ebp),edi
movl 16(ebp),ecx
movl 12(ebp),esi
/* check for destructive overlap (src < dst && dst < src + length) */
cld
cmpl edi,esi
jae .L2
leal -1(ecx,esi),ebx
cmpl ebx,edi
ja .L2
/* IF: destructive overlap, must copy backwards */
addl ecx,esi
addl ecx,edi
std
#ifndef __OPTIMIZE_SIZE__
cmpl $8,ecx
jbe .L13
.L18:
/* move trailing bytes in reverse until destination address is long word aligned */
movl edi,edx
movl ecx,ebx
andl $3,edx
jz .L21
movl edx,ecx
decl esi
decl edi
subl ecx,ebx
rep
movsb
mov ebx,ecx
incl esi
incl edi
.L21:
/* move bytes in reverse, a long word at a time */
shrl $2,ecx
subl $4,esi
subl $4,edi
rep
movsl
addl $4,esi
addl $4,edi
movl ebx,ecx
andl $3,ecx
#endif /* !__OPTIMIZE_SIZE__ */
/* handle any remaining bytes not on a long word boundary */
.L13:
decl esi
decl edi
.L15:
rep
movsb
jmp .L5
.p2align 4,,7
/* ELSE: no destructive overlap so we copy forwards */
.L2:
#ifndef __OPTIMIZE_SIZE__
cmpl $8,ecx
jbe .L3
/* move any preceding bytes until destination address is long word aligned */
movl edi,edx
movl ecx,ebx
andl $3,edx
jz .L11
movl $4,ecx
subl edx,ecx
andl $3,ecx
subl ecx,ebx
rep
movsb
mov ebx,ecx
/* move bytes a long word at a time */
.L11:
shrl $2,ecx
.p2align 2
rep
movsl
movl ebx,ecx
andl $3,ecx
#endif /* !__OPTIMIZE_SIZE__ */
/* handle any remaining bytes */
.L3:
rep
movsb
.L5:
movl 8(ebp),eax
cld
leal -12(ebp),esp
popl ebx
popl edi
popl esi
leave
#endif
ret
|
4ms/metamodule-plugin-sdk
| 1,548
|
plugin-libc/newlib/libc/machine/i386/memset.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002, 2008 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memset)
SOTYPE_FUNCTION(memset)
SYM (memset):
#ifdef __iamcu__
pushl edi
movl eax,edi
movzbl dl,eax
mov edi,edx
rep stosb
mov edx,eax
popl edi
#else
pushl ebp
movl esp,ebp
pushl edi
movl 8(ebp),edi
movzbl 12(ebp),eax
movl 16(ebp),ecx
cld
#ifndef __OPTIMIZE_SIZE__
/* Less than 16 bytes won't benefit from the 'rep stosl' loop. */
cmpl $16,ecx
jbe .L19
testl $7,edi
je .L10
/* It turns out that 8-byte aligned 'rep stosl' outperforms
4-byte aligned on some x86 platforms. */
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
/* At this point, ecx>8 and edi%8==0. */
.L10:
movb al,ah
movl eax,edx
sall $16,edx
orl edx,eax
movl ecx,edx
shrl $2,ecx
andl $3,edx
rep
stosl
movl edx,ecx
#endif /* not __OPTIMIZE_SIZE__ */
.L19:
rep
stosb
movl 8(ebp),eax
leal -4(ebp),esp
popl edi
leave
#endif
ret
|
4ms/metamodule-plugin-sdk
| 1,699
|
plugin-libc/newlib/libc/machine/i386/memchr.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002, 2008 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memchr)
SOTYPE_FUNCTION(memchr)
SYM (memchr):
#ifdef __iamcu__
pushl edi
movl eax,edi
movl edx,eax
xorl edx,edx
testl ecx,ecx
jz L20
repnz
scasb
setnz dl
decl edi
decl edx
andl edi,edx
L20:
movl edx,eax
popl edi
#else
pushl ebp
movl esp,ebp
pushl edi
movzbl 12(ebp),eax
movl 16(ebp),ecx
movl 8(ebp),edi
xorl edx,edx
testl ecx,ecx
jz L20
#ifdef __OPTIMIZE_SIZE__
cld
repnz
scasb
setnz dl
decl edi
#else /* !__OPTIMIZE_SIZE__ */
/* Do byte-wise checks until string is aligned. */
testl $3,edi
je L5
cmpb (edi),al
je L15
incl edi
decl ecx
je L20
testl $3,edi
je L5
cmpb (edi),al
je L15
incl edi
decl ecx
je L20
testl $3,edi
je L5
cmpb (edi),al
je L15
incl edi
decl ecx
je L20
/* Create a mask, then check a word at a time. */
L5:
movb al,ah
movl eax,edx
sall $16,edx
orl edx,eax
pushl ebx
.p2align 4,,7
L8:
subl $4,ecx
jc L9
movl (edi),edx
addl $4,edi
xorl eax,edx
leal -16843009(edx),ebx
notl edx
andl edx,ebx
testl $-2139062144,ebx
je L8
subl $4,edi
L9:
popl ebx
xorl edx,edx
addl $4,ecx
je L20
/* Final byte-wise checks. */
.p2align 4,,7
L10:
cmpb (edi),al
je L15
incl edi
decl ecx
jne L10
xorl edi,edi
#endif /* !__OPTIMIZE_SIZE__ */
L15:
decl edx
andl edi,edx
L20:
movl edx,eax
leal -4(ebp),esp
popl edi
leave
#endif
ret
|
4ms/metamodule-plugin-sdk
| 1,759
|
plugin-libc/newlib/libc/machine/i386/memcmp.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memcmp)
SOTYPE_FUNCTION(memcmp)
SYM (memcmp):
#ifdef __iamcu__
pushl edi
pushl esi
movl eax,edi
movl edx,esi
cld
/* check if length is zero in which case just return 0 */
xorl eax,eax
testl ecx,ecx
jz L4
/* compare any unaligned bytes or remainder bytes */
repz
cmpsb
/* set output to be < 0 if less than, 0 if equal, or > 0 if greater than */
xorl edx,edx
movb -1(esi),dl
movb -1(edi),al
subl edx,eax
L4:
popl esi
popl edi
#else
pushl ebp
movl esp,ebp
subl $16,esp
pushl ebx
pushl edi
pushl esi
movl 8(ebp),edi
movl 12(ebp),esi
movl 16(ebp),ecx
cld
/* check if length is zero in which case just return 0 */
xorl eax,eax
testl ecx,ecx
jz L4
#ifndef __OPTIMIZE_SIZE__
/* if aligned on long boundary, compare doublewords at a time first */
movl edi,eax
orl esi,eax
testb $3,al
jne BYTECMP
movl ecx,ebx
shrl $2,ecx /* calculate number of long words to compare */
repz
cmpsl
jz L5
subl $4,esi
subl $4,edi
movl $4,ecx
jmp BYTECMP
L5:
andl $3,ebx /* calculate number of remaining bytes */
movl ebx,ecx
#endif /* not __OPTIMIZE_SIZE__ */
BYTECMP: /* compare any unaligned bytes or remainder bytes */
repz
cmpsb
/* set output to be < 0 if less than, 0 if equal, or > 0 if greater than */
L3:
xorl edx,edx
movb -1(esi),dl
xorl eax,eax
movb -1(edi),al
subl edx,eax
L4:
leal -28(ebp),esp
popl esi
popl edi
popl ebx
leave
#endif
ret
|
4ms/metamodule-plugin-sdk
| 1,491
|
plugin-libc/newlib/libc/machine/i386/strlen.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002, 2008 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (strlen)
SOTYPE_FUNCTION(strlen)
SYM (strlen):
pushl ebp
movl esp,ebp
pushl edi
#ifdef __iamcu__
movl eax,edx
#else
movl 8(ebp),edx
#endif
#if defined __OPTIMIZE_SIZE__ || defined __iamcu__
cld
movl edx,edi
movl $4294967295,ecx
xor eax,eax
repnz
scasb
#else
/* Modern x86 hardware is much faster at double-word
manipulation than with bytewise repnz scasb. */
/* Do byte-wise checks until string is aligned. */
movl edx,edi
test $3,edi
je L5
movb (edi),cl
incl edi
testb cl,cl
je L15
test $3,edi
je L5
movb (edi),cl
incl edi
testb cl,cl
je L15
test $3,edi
je L5
movb (edi),cl
incl edi
testb cl,cl
je L15
L5:
subl $4,edi
/* loop performing 4 byte mask checking for desired 0 byte */
.p2align 4,,7
L10:
addl $4,edi
movl (edi),ecx
leal -16843009(ecx),eax
notl ecx
andl ecx,eax
testl $-2139062144,eax
je L10
/* Find which of four bytes is 0. */
notl ecx
incl edi
testb cl,cl
je L15
incl edi
shrl $8,ecx
testb cl,cl
je L15
incl edi
shrl $8,ecx
testb cl,cl
je L15
incl edi
#endif
L15:
subl edx,edi
leal -1(edi),eax
leal -4(ebp),esp
popl edi
leave
ret
|
4ms/metamodule-plugin-sdk
| 2,833
|
plugin-libc/newlib/libc/machine/i386/strchr.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002, 2008 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (strchr)
SOTYPE_FUNCTION(strchr)
SYM (strchr):
#ifdef __iamcu__
xorl ecx,ecx
movb dl,cl
/* loop while (*s && *s++ != c) */
leal -1(eax),eax
L15:
incl eax
movb (eax),dl
testb dl,dl
je L14
cmpb cl,dl
jne L15
L14:
/* if (*s == c) return address otherwise return NULL */
cmpb cl,(eax)
je L19
xorl eax,eax
L19:
ret
#else
pushl ebp
movl esp,ebp
pushl edi
pushl ebx
xorl ebx,ebx
movl 8(ebp),edi
addb 12(ebp),bl
#ifndef __OPTIMIZE_SIZE__
/* Special case strchr(p,0). */
je L25
/* Do byte-wise checks until string is aligned. */
test $3,edi
je L5
movl edi,eax
movb (eax),cl
testb cl,cl
je L14
cmpb bl,cl
je L19
incl edi
test $3,edi
je L5
movl edi,eax
movb (eax),cl
testb cl,cl
je L14
cmpb bl,cl
je L19
incl edi
test $3,edi
je L5
movl edi,eax
movb (eax),cl
testb cl,cl
je L14
cmpb bl,cl
je L19
incl edi
/* create 4 byte mask which is just the desired byte repeated 4 times */
L5:
movl ebx,ecx
sall $8,ebx
subl $4,edi
orl ecx,ebx
movl ebx,edx
sall $16,ebx
orl edx,ebx
/* loop performing 4 byte mask checking for 0 byte or desired byte */
.p2align 4,,7
L10:
addl $4,edi
movl (edi),ecx
leal -16843009(ecx),edx
movl ecx,eax
notl eax
andl eax,edx
testl $-2139062144,edx
jne L9
xorl ebx,ecx
leal -16843009(ecx),edx
notl ecx
andl ecx,edx
testl $-2139062144,edx
je L10
#endif /* not __OPTIMIZE_SIZE__ */
/* loop while (*s && *s++ != c) */
L9:
leal -1(edi),eax
.p2align 4,,7
L15:
incl eax
movb (eax),dl
testb dl,dl
je L14
cmpb bl,dl
jne L15
L14:
/* if (*s == c) return address otherwise return NULL */
cmpb bl,(eax)
je L19
xorl eax,eax
L19:
leal -8(ebp),esp
popl ebx
popl edi
leave
ret
#ifndef __OPTIMIZE_SIZE__
/* Special case strchr(p,0). */
#if 0
/* Hideous performance on modern machines. */
L25:
cld
movl $-1,ecx
xor eax,eax
repnz
scasb
leal -1(edi),eax
jmp L19
#endif
L25:
/* Do byte-wise checks until string is aligned. */
test $3,edi
je L26
movl edi,eax
movb (eax),cl
testb cl,cl
je L19
incl edi
test $3,edi
je L26
movl edi,eax
movb (eax),cl
testb cl,cl
je L19
incl edi
test $3,edi
je L26
movl edi,eax
movb (eax),cl
testb cl,cl
je L19
incl edi
L26:
subl $4,edi
/* loop performing 4 byte mask checking for desired 0 byte */
.p2align 4,,7
L27:
addl $4,edi
movl (edi),ecx
leal -16843009(ecx),edx
movl ecx,eax
notl eax
andl eax,edx
testl $-2139062144,edx
je L27
jmp L9
#endif /* !__OPTIMIZE_SIZE__ */
#endif /* __iamcu__ */
|
4ms/metamodule-plugin-sdk
| 2,194
|
plugin-libc/newlib/libc/machine/d30v/setjmp.S
|
; setjmp/longjmp for D30V.
.text
.globl setjmp
.type setjmp,@function
.stabs "setjmp.S",100,0,0,setjmp
.stabs "int:t(0,1)=r(0,1);-2147483648;2147483647;",128,0,0,0
.stabs "setjmp:F(0,1)",36,0,1,setjmp
setjmp:
; Address of jmpbuf is passed in R2. Save the appropriate registers.
st2w r26, @(r2+,r0)
st2w r28, @(r2+,r0)
st2w r30, @(r2+,r0)
st2w r32, @(r2+,r0)
st2w r34, @(r2+,r0)
st2w r36, @(r2+,r0)
st2w r38, @(r2+,r0)
st2w r40, @(r2+,r0)
st2w r42, @(r2+,r0)
st2w r44, @(r2+,r0)
st2w r46, @(r2+,r0)
st2w r48, @(r2+,r0)
st2w r50, @(r2+,r0)
st2w r52, @(r2+,r0)
st2w r54, @(r2+,r0)
st2w r56, @(r2+,r0)
st2w r58, @(r2+,r0)
st2w r60, @(r2+,r0)
st2w r62, @(r2+,r0)
mvfacc r4, a1, 16
mvfacc r5, a1, 0
st2w r4, @(r2+,r0)
mvfsys r4, psw
mvfsys r5, rpt_c
st2w r4, @(r2+,r0)
mvfsys r4, rpt_s
mvfsys r5, rpt_e
st2w r4, @(r2+,r0)
mvfsys r4, mod_s
mvfsys r5, mod_e
st2w r4, @(r2+,r0)
; Return 0 to caller
add r2, r0, r0
jmp link
.Lsetjmp:
.size setjmp,.Lsetjmp-setjmp
.stabs "",36,0,0,.Lsetjmp-setjmp
.globl longjmp
.type longjmp,@function
.stabs "longjmp:F(0,1)",36,0,1,longjmp
longjmp:
; Address of jmpbuf is in R2. Restore the registers.
ld2w r26, @(r2+,r0)
ld2w r28, @(r2+,r0)
ld2w r30, @(r2+,r0)
ld2w r32, @(r2+,r0)
ld2w r34, @(r2+,r0)
ld2w r36, @(r2+,r0)
ld2w r38, @(r2+,r0)
ld2w r40, @(r2+,r0)
ld2w r42, @(r2+,r0)
ld2w r44, @(r2+,r0)
ld2w r46, @(r2+,r0)
ld2w r48, @(r2+,r0)
ld2w r50, @(r2+,r0)
ld2w r52, @(r2+,r0)
ld2w r54, @(r2+,r0)
ld2w r56, @(r2+,r0)
ld2w r58, @(r2+,r0)
ld2w r60, @(r2+,r0)
ld2w r62, @(r2+,r0)
ld2w r4, @(r2+,r0)
mvtacc a1, r4, r5
mvfsys r6, psw
ld2w r4, @(r2+,r0) /* psw, rpt_c */
and r6, r6, 0xfcff /* set rp, md bits from setjmp, leave */
and r4, r4, 0x0300 /* all other psw bits the same */
or r4, r4, r6
mvtsys psw, r4
mvtsys rpt_c, r5
ld2w r4, @(r2+,r0)
mvtsys rpt_s, r4
mvtsys rpt_e, r5
ld2w r4, @(r2+,r0)
mvtsys mod_s, r4
mvtsys mod_e, r5
; Value to return to caller is in R3. If caller attemped to return 0,
; return 1 instead.
cmpeq f0, r3, 0 || add r2, r3, r0
jmp link || add/tx r2, r2, 1
.Llongjmp:
.size longjmp,.Llongjmp-longjmp
.stabs "",36,0,0,.Llongjmp-longjmp
|
4ms/metamodule-plugin-sdk
| 1,762
|
plugin-libc/newlib/libc/machine/aarch64/stpcpy.S
|
/*
stpcpy - copy a string returning pointer to end.
Copyright (c) 2015 ARM Ltd.
All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the company nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/* This is just a wrapper that uses strcpy code with appropriate
pre-defines. */
#define BUILD_STPCPY
#include "strcpy.S"
|
4ms/metamodule-plugin-sdk
| 7,064
|
plugin-libc/newlib/libc/machine/aarch64/memcpy.S
|
/* Copyright (c) 2012-2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses.
*
*/
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See memcpy-stub.c */
#else
#define dstin x0
#define src x1
#define count x2
#define dst x3
#define srcend x4
#define dstend x5
#define A_l x6
#define A_lw w6
#define A_h x7
#define A_hw w7
#define B_l x8
#define B_lw w8
#define B_h x9
#define C_l x10
#define C_h x11
#define D_l x12
#define D_h x13
#define E_l src
#define E_h count
#define F_l srcend
#define F_h dst
#define tmp1 x9
#define L(l) .L ## l
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
/* Copies are split into 3 main cases: small copies of up to 16 bytes,
medium copies of 17..96 bytes which are fully unrolled. Large copies
of more than 96 bytes align the destination and use an unrolled loop
processing 64 bytes per iteration.
Small and medium copies read all data before writing, allowing any
kind of overlap, and memmove tailcalls memcpy for these cases as
well as non-overlapping copies.
*/
def_fn memcpy p2align=6
prfm PLDL1KEEP, [src]
add srcend, src, count
add dstend, dstin, count
cmp count, 16
b.ls L(copy16)
cmp count, 96
b.hi L(copy_long)
/* Medium copies: 17..96 bytes. */
sub tmp1, count, 1
ldp A_l, A_h, [src]
tbnz tmp1, 6, L(copy96)
ldp D_l, D_h, [srcend, -16]
tbz tmp1, 5, 1f
ldp B_l, B_h, [src, 16]
ldp C_l, C_h, [srcend, -32]
stp B_l, B_h, [dstin, 16]
stp C_l, C_h, [dstend, -32]
1:
stp A_l, A_h, [dstin]
stp D_l, D_h, [dstend, -16]
ret
.p2align 4
/* Small copies: 0..16 bytes. */
L(copy16):
cmp count, 8
b.lo 1f
ldr A_l, [src]
ldr A_h, [srcend, -8]
str A_l, [dstin]
str A_h, [dstend, -8]
ret
.p2align 4
1:
tbz count, 2, 1f
ldr A_lw, [src]
ldr A_hw, [srcend, -4]
str A_lw, [dstin]
str A_hw, [dstend, -4]
ret
/* Copy 0..3 bytes. Use a branchless sequence that copies the same
byte 3 times if count==1, or the 2nd byte twice if count==2. */
1:
cbz count, 2f
lsr tmp1, count, 1
ldrb A_lw, [src]
ldrb A_hw, [srcend, -1]
ldrb B_lw, [src, tmp1]
strb A_lw, [dstin]
strb B_lw, [dstin, tmp1]
strb A_hw, [dstend, -1]
2: ret
.p2align 4
/* Copy 64..96 bytes. Copy 64 bytes from the start and
32 bytes from the end. */
L(copy96):
ldp B_l, B_h, [src, 16]
ldp C_l, C_h, [src, 32]
ldp D_l, D_h, [src, 48]
ldp E_l, E_h, [srcend, -32]
ldp F_l, F_h, [srcend, -16]
stp A_l, A_h, [dstin]
stp B_l, B_h, [dstin, 16]
stp C_l, C_h, [dstin, 32]
stp D_l, D_h, [dstin, 48]
stp E_l, E_h, [dstend, -32]
stp F_l, F_h, [dstend, -16]
ret
/* Align DST to 16 byte alignment so that we don't cross cache line
boundaries on both loads and stores. There are at least 96 bytes
to copy, so copy 16 bytes unaligned and then align. The loop
copies 64 bytes per iteration and prefetches one iteration ahead. */
.p2align 4
L(copy_long):
and tmp1, dstin, 15
bic dst, dstin, 15
ldp D_l, D_h, [src]
sub src, src, tmp1
add count, count, tmp1 /* Count is now 16 too large. */
ldp A_l, A_h, [src, 16]
stp D_l, D_h, [dstin]
ldp B_l, B_h, [src, 32]
ldp C_l, C_h, [src, 48]
ldp D_l, D_h, [src, 64]!
subs count, count, 128 + 16 /* Test and readjust count. */
b.ls 2f
1:
stp A_l, A_h, [dst, 16]
ldp A_l, A_h, [src, 16]
stp B_l, B_h, [dst, 32]
ldp B_l, B_h, [src, 32]
stp C_l, C_h, [dst, 48]
ldp C_l, C_h, [src, 48]
stp D_l, D_h, [dst, 64]!
ldp D_l, D_h, [src, 64]!
subs count, count, 64
b.hi 1b
/* Write the last full set of 64 bytes. The remainder is at most 64
bytes, so it is safe to always copy 64 bytes from the end even if
there is just 1 byte left. */
2:
ldp E_l, E_h, [srcend, -64]
stp A_l, A_h, [dst, 16]
ldp A_l, A_h, [srcend, -48]
stp B_l, B_h, [dst, 32]
ldp B_l, B_h, [srcend, -32]
stp C_l, C_h, [dst, 48]
ldp C_l, C_h, [srcend, -16]
stp D_l, D_h, [dst, 64]
stp E_l, E_h, [dstend, -64]
stp A_l, A_h, [dstend, -48]
stp B_l, B_h, [dstend, -32]
stp C_l, C_h, [dstend, -16]
ret
.size memcpy, . - memcpy
#endif
|
4ms/metamodule-plugin-sdk
| 2,274
|
plugin-libc/newlib/libc/machine/aarch64/rawmemchr.S
|
/* Copyright (c) 2015-2016, ARM Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the company nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses
*
*/
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See rawmemchr-stub.c. */
#else
#define L(l) .L ## l
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
/* Special case rawmemchr (s, 0) as strlen, otherwise tailcall memchr.
Call strlen without setting up a full frame - it preserves x14/x15.
*/
def_fn rawmemchr p2align=5
.cfi_startproc
cbz w1, L(do_strlen)
mov x2, -1
b memchr
L(do_strlen):
mov x15, x30
.cfi_return_column x15
mov x14, x0
bl strlen
add x0, x14, x0
ret x15
.cfi_endproc
.size rawmemchr, . - rawmemchr
#endif
|
4ms/metamodule-plugin-sdk
| 2,524
|
plugin-libc/newlib/libc/machine/aarch64/setjmp.S
|
/*
Copyright (c) 2011, 2012 ARM Ltd
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define GPR_LAYOUT \
REG_PAIR (x19, x20, 0); \
REG_PAIR (x21, x22, 16); \
REG_PAIR (x23, x24, 32); \
REG_PAIR (x25, x26, 48); \
REG_PAIR (x27, x28, 64); \
REG_PAIR (x29, x30, 80); \
REG_ONE (x16, 96)
#define FPR_LAYOUT \
REG_PAIR ( d8, d9, 112); \
REG_PAIR (d10, d11, 128); \
REG_PAIR (d12, d13, 144); \
REG_PAIR (d14, d15, 160);
// int setjmp (jmp_buf)
.global setjmp
.type setjmp, %function
setjmp:
mov x16, sp
#define REG_PAIR(REG1, REG2, OFFS) stp REG1, REG2, [x0, OFFS]
#define REG_ONE(REG1, OFFS) str REG1, [x0, OFFS]
GPR_LAYOUT
FPR_LAYOUT
#undef REG_PAIR
#undef REG_ONE
mov w0, #0
ret
.size setjmp, .-setjmp
// void longjmp (jmp_buf, int) __attribute__ ((noreturn))
.global longjmp
.type longjmp, %function
longjmp:
#define REG_PAIR(REG1, REG2, OFFS) ldp REG1, REG2, [x0, OFFS]
#define REG_ONE(REG1, OFFS) ldr REG1, [x0, OFFS]
GPR_LAYOUT
FPR_LAYOUT
#undef REG_PAIR
#undef REG_ONE
mov sp, x16
cmp w1, #0
cinc w0, w1, eq
// use br not ret, as ret is guaranteed to mispredict
br x30
.size longjmp, .-longjmp
|
4ms/metamodule-plugin-sdk
| 10,507
|
plugin-libc/newlib/libc/machine/aarch64/strcpy.S
|
/*
strcpy/stpcpy - copy a string returning pointer to start/end.
Copyright (c) 2013, 2014, 2015 ARM Ltd.
All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the company nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strchr-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses, min page size 4k.
*/
/* To build as stpcpy, define BUILD_STPCPY before compiling this file.
To test the page crossing code path more thoroughly, compile with
-DSTRCPY_TEST_PAGE_CROSS - this will force all copies through the slower
entry path. This option is not intended for production use. */
/* Arguments and results. */
#define dstin x0
#define srcin x1
/* Locals and temporaries. */
#define src x2
#define dst x3
#define data1 x4
#define data1w w4
#define data2 x5
#define data2w w5
#define has_nul1 x6
#define has_nul2 x7
#define tmp1 x8
#define tmp2 x9
#define tmp3 x10
#define tmp4 x11
#define zeroones x12
#define data1a x13
#define data2a x14
#define pos x15
#define len x16
#define to_align x17
#ifdef BUILD_STPCPY
#define STRCPY stpcpy
#else
#define STRCPY strcpy
#endif
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
can be done in parallel across the entire word. */
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* AArch64 systems have a minimum page size of 4k. We can do a quick
page size check for crossing this boundary on entry and if we
do not, then we can short-circuit much of the entry code. We
expect early page-crossing strings to be rare (probability of
16/MIN_PAGE_SIZE ~= 0.4%), so the branch should be quite
predictable, even with random strings.
We don't bother checking for larger page sizes, the cost of setting
up the correct page size is just not worth the extra gain from
a small reduction in the cases taking the slow path. Note that
we only care about whether the first fetch, which may be
misaligned, crosses a page boundary - after that we move to aligned
fetches for the remainder of the string. */
#ifdef STRCPY_TEST_PAGE_CROSS
/* Make everything that isn't Qword aligned look like a page cross. */
#define MIN_PAGE_P2 4
#else
#define MIN_PAGE_P2 12
#endif
#define MIN_PAGE_SIZE (1 << MIN_PAGE_P2)
def_fn STRCPY p2align=6
/* For moderately short strings, the fastest way to do the copy is to
calculate the length of the string in the same way as strlen, then
essentially do a memcpy of the result. This avoids the need for
multiple byte copies and further means that by the time we
reach the bulk copy loop we know we can always use DWord
accesses. We expect strcpy to rarely be called repeatedly
with the same source string, so branch prediction is likely to
always be difficult - we mitigate against this by preferring
conditional select operations over branches whenever this is
feasible. */
and tmp2, srcin, #(MIN_PAGE_SIZE - 1)
mov zeroones, #REP8_01
and to_align, srcin, #15
cmp tmp2, #(MIN_PAGE_SIZE - 16)
neg tmp1, to_align
/* The first fetch will straddle a (possible) page boundary iff
srcin + 15 causes bit[MIN_PAGE_P2] to change value. A 16-byte
aligned string will never fail the page align check, so will
always take the fast path. */
b.gt .Lpage_cross
.Lpage_cross_ok:
ldp data1, data2, [srcin]
#ifdef __AARCH64EB__
/* Because we expect the end to be found within 16 characters
(profiling shows this is the most common case), it's worth
swapping the bytes now to save having to recalculate the
termination syndrome later. We preserve data1 and data2
so that we can re-use the values later on. */
rev tmp2, data1
sub tmp1, tmp2, zeroones
orr tmp2, tmp2, #REP8_7f
bics has_nul1, tmp1, tmp2
b.ne .Lfp_le8
rev tmp4, data2
sub tmp3, tmp4, zeroones
orr tmp4, tmp4, #REP8_7f
#else
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
bics has_nul1, tmp1, tmp2
b.ne .Lfp_le8
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
#endif
bics has_nul2, tmp3, tmp4
b.eq .Lbulk_entry
/* The string is short (<=16 bytes). We don't know exactly how
short though, yet. Work out the exact length so that we can
quickly select the optimal copy strategy. */
.Lfp_gt8:
rev has_nul2, has_nul2
clz pos, has_nul2
mov tmp2, #56
add dst, dstin, pos, lsr #3 /* Bits to bytes. */
sub pos, tmp2, pos
#ifdef __AARCH64EB__
lsr data2, data2, pos
#else
lsl data2, data2, pos
#endif
str data2, [dst, #1]
str data1, [dstin]
#ifdef BUILD_STPCPY
add dstin, dst, #8
#endif
ret
.Lfp_le8:
rev has_nul1, has_nul1
clz pos, has_nul1
add dst, dstin, pos, lsr #3 /* Bits to bytes. */
subs tmp2, pos, #24 /* Pos in bits. */
b.lt .Lfp_lt4
#ifdef __AARCH64EB__
mov tmp2, #56
sub pos, tmp2, pos
lsr data2, data1, pos
lsr data1, data1, #32
#else
lsr data2, data1, tmp2
#endif
/* 4->7 bytes to copy. */
str data2w, [dst, #-3]
str data1w, [dstin]
#ifdef BUILD_STPCPY
mov dstin, dst
#endif
ret
.Lfp_lt4:
cbz pos, .Lfp_lt2
/* 2->3 bytes to copy. */
#ifdef __AARCH64EB__
lsr data1, data1, #48
#endif
strh data1w, [dstin]
/* Fall-through, one byte (max) to go. */
.Lfp_lt2:
/* Null-terminated string. Last character must be zero! */
strb wzr, [dst]
#ifdef BUILD_STPCPY
mov dstin, dst
#endif
ret
.p2align 6
/* Aligning here ensures that the entry code and main loop all lies
within one 64-byte cache line. */
.Lbulk_entry:
sub to_align, to_align, #16
stp data1, data2, [dstin]
sub src, srcin, to_align
sub dst, dstin, to_align
b .Lentry_no_page_cross
/* The inner loop deals with two Dwords at a time. This has a
slightly higher start-up cost, but we should win quite quickly,
especially on cores with a high number of issue slots per
cycle, as we get much better parallelism out of the operations. */
.Lmain_loop:
stp data1, data2, [dst], #16
.Lentry_no_page_cross:
ldp data1, data2, [src], #16
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
bic has_nul1, tmp1, tmp2
bics has_nul2, tmp3, tmp4
ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
b.eq .Lmain_loop
/* Since we know we are copying at least 16 bytes, the fastest way
to deal with the tail is to determine the location of the
trailing NUL, then (re)copy the 16 bytes leading up to that. */
cmp has_nul1, #0
#ifdef __AARCH64EB__
/* For big-endian, carry propagation (if the final byte in the
string is 0x01) means we cannot use has_nul directly. The
easiest way to get the correct byte is to byte-swap the data
and calculate the syndrome a second time. */
csel data1, data1, data2, ne
rev data1, data1
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
bic has_nul1, tmp1, tmp2
#else
csel has_nul1, has_nul1, has_nul2, ne
#endif
rev has_nul1, has_nul1
clz pos, has_nul1
add tmp1, pos, #72
add pos, pos, #8
csel pos, pos, tmp1, ne
add src, src, pos, lsr #3
add dst, dst, pos, lsr #3
ldp data1, data2, [src, #-32]
stp data1, data2, [dst, #-16]
#ifdef BUILD_STPCPY
sub dstin, dst, #1
#endif
ret
.Lpage_cross:
bic src, srcin, #15
/* Start by loading two words at [srcin & ~15], then forcing the
bytes that precede srcin to 0xff. This means they never look
like termination bytes. */
ldp data1, data2, [src]
lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
tst to_align, #7
csetm tmp2, ne
#ifdef __AARCH64EB__
lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
#else
lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
#endif
orr data1, data1, tmp2
orr data2a, data2, tmp2
cmp to_align, #8
csinv data1, data1, xzr, lt
csel data2, data2, data2a, lt
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
bic has_nul1, tmp1, tmp2
bics has_nul2, tmp3, tmp4
ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
b.eq .Lpage_cross_ok
/* We now need to make data1 and data2 look like they've been
loaded directly from srcin. Do a rotate on the 128-bit value. */
lsl tmp1, to_align, #3 /* Bytes->bits. */
neg tmp2, to_align, lsl #3
#ifdef __AARCH64EB__
lsl data1a, data1, tmp1
lsr tmp4, data2, tmp2
lsl data2, data2, tmp1
orr tmp4, tmp4, data1a
cmp to_align, #8
csel data1, tmp4, data2, lt
rev tmp2, data1
rev tmp4, data2
sub tmp1, tmp2, zeroones
orr tmp2, tmp2, #REP8_7f
sub tmp3, tmp4, zeroones
orr tmp4, tmp4, #REP8_7f
#else
lsr data1a, data1, tmp1
lsl tmp4, data2, tmp2
lsr data2, data2, tmp1
orr tmp4, tmp4, data1a
cmp to_align, #8
csel data1, tmp4, data2, lt
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
#endif
bic has_nul1, tmp1, tmp2
cbnz has_nul1, .Lfp_le8
bic has_nul2, tmp3, tmp4
b .Lfp_gt8
.size STRCPY, . - STRCPY
#endif
|
4ms/metamodule-plugin-sdk
| 5,439
|
plugin-libc/newlib/libc/machine/aarch64/memmove.S
|
/* Copyright (c) 2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses
*/
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See memmove-stub.c */
#else
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
/* Parameters and result. */
#define dstin x0
#define src x1
#define count x2
#define srcend x3
#define dstend x4
#define tmp1 x5
#define A_l x6
#define A_h x7
#define B_l x8
#define B_h x9
#define C_l x10
#define C_h x11
#define D_l x12
#define D_h x13
#define E_l count
#define E_h tmp1
/* All memmoves up to 96 bytes are done by memcpy as it supports overlaps.
Larger backwards copies are also handled by memcpy. The only remaining
case is forward large copies. The destination is aligned, and an
unrolled loop processes 64 bytes per iteration.
*/
def_fn memmove, 6
sub tmp1, dstin, src
cmp count, 96
ccmp tmp1, count, 2, hi
b.hs memcpy
cbz tmp1, 3f
add dstend, dstin, count
add srcend, src, count
/* Align dstend to 16 byte alignment so that we don't cross cache line
boundaries on both loads and stores. There are at least 96 bytes
to copy, so copy 16 bytes unaligned and then align. The loop
copies 64 bytes per iteration and prefetches one iteration ahead. */
and tmp1, dstend, 15
ldp D_l, D_h, [srcend, -16]
sub srcend, srcend, tmp1
sub count, count, tmp1
ldp A_l, A_h, [srcend, -16]
stp D_l, D_h, [dstend, -16]
ldp B_l, B_h, [srcend, -32]
ldp C_l, C_h, [srcend, -48]
ldp D_l, D_h, [srcend, -64]!
sub dstend, dstend, tmp1
subs count, count, 128
b.ls 2f
nop
1:
stp A_l, A_h, [dstend, -16]
ldp A_l, A_h, [srcend, -16]
stp B_l, B_h, [dstend, -32]
ldp B_l, B_h, [srcend, -32]
stp C_l, C_h, [dstend, -48]
ldp C_l, C_h, [srcend, -48]
stp D_l, D_h, [dstend, -64]!
ldp D_l, D_h, [srcend, -64]!
subs count, count, 64
b.hi 1b
/* Write the last full set of 64 bytes. The remainder is at most 64
bytes, so it is safe to always copy 64 bytes from the start even if
there is just 1 byte left. */
2:
ldp E_l, E_h, [src, 48]
stp A_l, A_h, [dstend, -16]
ldp A_l, A_h, [src, 32]
stp B_l, B_h, [dstend, -32]
ldp B_l, B_h, [src, 16]
stp C_l, C_h, [dstend, -48]
ldp C_l, C_h, [src]
stp D_l, D_h, [dstend, -64]
stp E_l, E_h, [dstin, 48]
stp A_l, A_h, [dstin, 32]
stp B_l, B_h, [dstin, 16]
stp C_l, C_h, [dstin]
3: ret
.size memmove, . - memmove
#endif
|
4ms/metamodule-plugin-sdk
| 5,109
|
plugin-libc/newlib/libc/machine/aarch64/strchrnul.S
|
/*
strchrnul - find a character or nul in a string
Copyright (c) 2014, ARM Limited
All rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the company nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strchrnul-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
* Neon Available.
*/
/* Arguments and results. */
#define srcin x0
#define chrin w1
#define result x0
#define src x2
#define tmp1 x3
#define wtmp2 w4
#define tmp3 x5
#define vrepchr v0
#define vdata1 v1
#define vdata2 v2
#define vhas_nul1 v3
#define vhas_nul2 v4
#define vhas_chr1 v5
#define vhas_chr2 v6
#define vrepmask v7
#define vend1 v16
/* Core algorithm.
For each 32-byte hunk we calculate a 64-bit syndrome value, with
two bits per byte (LSB is always in bits 0 and 1, for both big
and little-endian systems). For each tuple, bit 0 is set iff
the relevant byte matched the requested character or nul. Since the
bits in the syndrome reflect exactly the order in which things occur
in the original string a count_trailing_zeros() operation will
identify exactly which byte is causing the termination. */
/* Locals and temporaries. */
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn strchrnul
/* Magic constant 0x40100401 to allow us to identify which lane
matches the termination condition. */
mov wtmp2, #0x0401
movk wtmp2, #0x4010, lsl #16
dup vrepchr.16b, chrin
bic src, srcin, #31 /* Work with aligned 32-byte hunks. */
dup vrepmask.4s, wtmp2
ands tmp1, srcin, #31
b.eq .Lloop
/* Input string is not 32-byte aligned. Rather than forcing
the padding bytes to a safe value, we calculate the syndrome
for all the bytes, but then mask off those bits of the
syndrome that are related to the padding. */
ld1 {vdata1.16b, vdata2.16b}, [src], #32
neg tmp1, tmp1
cmeq vhas_nul1.16b, vdata1.16b, #0
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_nul2.16b, vdata2.16b, #0
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
orr vhas_chr1.16b, vhas_chr1.16b, vhas_nul1.16b
orr vhas_chr2.16b, vhas_chr2.16b, vhas_nul2.16b
and vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
lsl tmp1, tmp1, #1
addp vend1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
mov tmp3, #~0
addp vend1.16b, vend1.16b, vend1.16b // 128->64
lsr tmp1, tmp3, tmp1
mov tmp3, vend1.2d[0]
bic tmp1, tmp3, tmp1 // Mask padding bits.
cbnz tmp1, .Ltail
.Lloop:
ld1 {vdata1.16b, vdata2.16b}, [src], #32
cmeq vhas_nul1.16b, vdata1.16b, #0
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_nul2.16b, vdata2.16b, #0
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
/* Use a fast check for the termination condition. */
orr vhas_chr1.16b, vhas_nul1.16b, vhas_chr1.16b
orr vhas_chr2.16b, vhas_nul2.16b, vhas_chr2.16b
orr vend1.16b, vhas_chr1.16b, vhas_chr2.16b
addp vend1.2d, vend1.2d, vend1.2d
mov tmp1, vend1.2d[0]
cbz tmp1, .Lloop
/* Termination condition found. Now need to establish exactly why
we terminated. */
and vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
addp vend1.16b, vend1.16b, vend1.16b // 128->64
mov tmp1, vend1.2d[0]
.Ltail:
/* Count the trailing zeros, by bit reversing... */
rbit tmp1, tmp1
/* Re-bias source. */
sub src, src, #32
clz tmp1, tmp1 /* ... and counting the leading zeros. */
/* tmp1 is twice the offset into the fragment. */
add result, src, tmp1, lsr #1
ret
.size strchrnul, . - strchrnul
#endif
|
4ms/metamodule-plugin-sdk
| 8,771
|
plugin-libc/newlib/libc/machine/aarch64/strncmp.S
|
/* Copyright (c) 2013, 2018, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strcmp-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
*/
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* Parameters and result. */
#define src1 x0
#define src2 x1
#define limit x2
#define result x0
/* Internal variables. */
#define data1 x3
#define data1w w3
#define data2 x4
#define data2w w4
#define has_nul x5
#define diff x6
#define syndrome x7
#define tmp1 x8
#define tmp2 x9
#define tmp3 x10
#define zeroones x11
#define pos x12
#define limit_wd x13
#define mask x14
#define endloop x15
#define count mask
.text
.p2align 6
.rep 7
nop /* Pad so that the loop below fits a cache line. */
.endr
def_fn strncmp
cbz limit, .Lret0
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
and count, src1, #7
b.ne .Lmisaligned8
cbnz count, .Lmutual_align
/* Calculate the number of full and partial words -1. */
sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
can be done in parallel across the entire word. */
/* Start of performance-critical section -- one 64B cache line. */
.Lloop_aligned:
ldr data1, [src1], #8
ldr data2, [src2], #8
.Lstart_realigned:
subs limit_wd, limit_wd, #1
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
csinv endloop, diff, xzr, pl /* Last Dword or differences. */
bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
ccmp endloop, #0, #0, eq
b.eq .Lloop_aligned
/* End of performance-critical section -- one 64B cache line. */
/* Not reached the limit, must have found the end or a diff. */
tbz limit_wd, #63, .Lnot_limit
/* Limit % 8 == 0 => all bytes significant. */
ands limit, limit, #7
b.eq .Lnot_limit
lsl limit, limit, #3 /* Bits -> bytes. */
mov mask, #~0
#ifdef __AARCH64EB__
lsr mask, mask, limit
#else
lsl mask, mask, limit
#endif
bic data1, data1, mask
bic data2, data2, mask
/* Make sure that the NUL byte is marked in the syndrome. */
orr has_nul, has_nul, mask
.Lnot_limit:
orr syndrome, diff, has_nul
#ifndef __AARCH64EB__
rev syndrome, syndrome
rev data1, data1
/* The MS-non-zero bit of the syndrome marks either the first bit
that is different, or the top bit of the first zero byte.
Shifting left now will bring the critical information into the
top bits. */
clz pos, syndrome
rev data2, data2
lsl data1, data1, pos
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
#else
/* For big-endian we cannot use the trick with the syndrome value
as carry-propagation can corrupt the upper bits if the trailing
bytes in the string contain 0x01. */
/* However, if there is no NUL byte in the dword, we can generate
the result directly. We can't just subtract the bytes as the
MSB might be significant. */
cbnz has_nul, 1f
cmp data1, data2
cset result, ne
cneg result, result, lo
ret
1:
/* Re-compute the NUL-byte detection, using a byte-reversed value. */
rev tmp3, data1
sub tmp1, tmp3, zeroones
orr tmp2, tmp3, #REP8_7f
bic has_nul, tmp1, tmp2
rev has_nul, has_nul
orr syndrome, diff, has_nul
clz pos, syndrome
/* The MS-non-zero bit of the syndrome marks either the first bit
that is different, or the top bit of the first zero byte.
Shifting left now will bring the critical information into the
top bits. */
lsl data1, data1, pos
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
#endif
.Lmutual_align:
/* Sources are mutually aligned, but are not currently at an
alignment boundary. Round down the addresses and then mask off
the bytes that precede the start point.
We also need to adjust the limit calculations, but without
overflowing if the limit is near ULONG_MAX. */
bic src1, src1, #7
bic src2, src2, #7
ldr data1, [src1], #8
neg tmp3, count, lsl #3 /* 64 - bits(bytes beyond align). */
ldr data2, [src2], #8
mov tmp2, #~0
sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
lsl tmp2, tmp2, tmp3 /* Shift (count & 63). */
#else
/* Little-endian. Early bytes are at LSB. */
lsr tmp2, tmp2, tmp3 /* Shift (count & 63). */
#endif
and tmp3, limit_wd, #7
lsr limit_wd, limit_wd, #3
/* Adjust the limit. Only low 3 bits used, so overflow irrelevant. */
add limit, limit, count
add tmp3, tmp3, count
orr data1, data1, tmp2
orr data2, data2, tmp2
add limit_wd, limit_wd, tmp3, lsr #3
b .Lstart_realigned
.p2align 6
/* Don't bother with dwords for up to 16 bytes. */
.Lmisaligned8:
cmp limit, #16
b.hs .Ltry_misaligned_words
.Lbyte_loop:
/* Perhaps we can do better than this. */
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
subs limit, limit, #1
ccmp data1w, #1, #0, hi /* NZCV = 0b0000. */
ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
b.eq .Lbyte_loop
.Ldone:
sub result, data1, data2
ret
/* Align the SRC1 to a dword by doing a bytewise compare and then do
the dword loop. */
.Ltry_misaligned_words:
lsr limit_wd, limit, #3
cbz count, .Ldo_misaligned
neg count, count
and count, count, #7
sub limit, limit, count
lsr limit_wd, limit, #3
.Lpage_end_loop:
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
cmp data1w, #1
ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
b.ne .Ldone
subs count, count, #1
b.hi .Lpage_end_loop
.Ldo_misaligned:
/* Prepare ourselves for the next page crossing. Unlike the aligned
loop, we fetch 1 less dword because we risk crossing bounds on
SRC2. */
mov count, #8
subs limit_wd, limit_wd, #1
b.lo .Ldone_loop
.Lloop_misaligned:
and tmp2, src2, #0xff8
eor tmp2, tmp2, #0xff8
cbz tmp2, .Lpage_end_loop
ldr data1, [src1], #8
ldr data2, [src2], #8
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
ccmp diff, #0, #0, eq
b.ne .Lnot_limit
subs limit_wd, limit_wd, #1
b.pl .Lloop_misaligned
.Ldone_loop:
/* We found a difference or a NULL before the limit was reached. */
and limit, limit, #7
cbz limit, .Lnot_limit
/* Read the last word. */
sub src1, src1, 8
sub src2, src2, 8
ldr data1, [src1, limit]
ldr data2, [src2, limit]
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
ccmp diff, #0, #0, eq
b.ne .Lnot_limit
.Lret0:
mov result, #0
ret
.size strncmp, . - strncmp
#endif
|
4ms/metamodule-plugin-sdk
| 6,769
|
plugin-libc/newlib/libc/machine/aarch64/memset.S
|
/* Copyright (c) 2012-2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses
*
*/
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See memset-stub.c */
#else
#define dstin x0
#define val x1
#define valw w1
#define count x2
#define dst x3
#define dstend x4
#define tmp1 x5
#define tmp1w w5
#define tmp2 x6
#define tmp2w w6
#define zva_len x7
#define zva_lenw w7
#define L(l) .L ## l
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn memset p2align=6
dup v0.16B, valw
add dstend, dstin, count
cmp count, 96
b.hi L(set_long)
cmp count, 16
b.hs L(set_medium)
mov val, v0.D[0]
/* Set 0..15 bytes. */
tbz count, 3, 1f
str val, [dstin]
str val, [dstend, -8]
ret
nop
1: tbz count, 2, 2f
str valw, [dstin]
str valw, [dstend, -4]
ret
2: cbz count, 3f
strb valw, [dstin]
tbz count, 1, 3f
strh valw, [dstend, -2]
3: ret
/* Set 17..96 bytes. */
L(set_medium):
str q0, [dstin]
tbnz count, 6, L(set96)
str q0, [dstend, -16]
tbz count, 5, 1f
str q0, [dstin, 16]
str q0, [dstend, -32]
1: ret
.p2align 4
/* Set 64..96 bytes. Write 64 bytes from the start and
32 bytes from the end. */
L(set96):
str q0, [dstin, 16]
stp q0, q0, [dstin, 32]
stp q0, q0, [dstend, -32]
ret
.p2align 3
nop
L(set_long):
and valw, valw, 255
bic dst, dstin, 15
str q0, [dstin]
cmp count, 256
ccmp valw, 0, 0, cs
b.eq L(try_zva)
L(no_zva):
sub count, dstend, dst /* Count is 16 too large. */
sub dst, dst, 16 /* Dst is biased by -32. */
sub count, count, 64 + 16 /* Adjust count and bias for loop. */
1: stp q0, q0, [dst, 32]
stp q0, q0, [dst, 64]!
L(tail64):
subs count, count, 64
b.hi 1b
2: stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
.p2align 3
L(try_zva):
mrs tmp1, dczid_el0
tbnz tmp1w, 4, L(no_zva)
and tmp1w, tmp1w, 15
cmp tmp1w, 4 /* ZVA size is 64 bytes. */
b.ne L(zva_128)
/* Write the first and last 64 byte aligned block using stp rather
than using DC ZVA. This is faster on some cores.
*/
L(zva_64):
str q0, [dst, 16]
stp q0, q0, [dst, 32]
bic dst, dst, 63
stp q0, q0, [dst, 64]
stp q0, q0, [dst, 96]
sub count, dstend, dst /* Count is now 128 too large. */
sub count, count, 128+64+64 /* Adjust count and bias for loop. */
add dst, dst, 128
nop
1: dc zva, dst
add dst, dst, 64
subs count, count, 64
b.hi 1b
stp q0, q0, [dst, 0]
stp q0, q0, [dst, 32]
stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
.p2align 3
L(zva_128):
cmp tmp1w, 5 /* ZVA size is 128 bytes. */
b.ne L(zva_other)
str q0, [dst, 16]
stp q0, q0, [dst, 32]
stp q0, q0, [dst, 64]
stp q0, q0, [dst, 96]
bic dst, dst, 127
sub count, dstend, dst /* Count is now 128 too large. */
sub count, count, 128+128 /* Adjust count and bias for loop. */
add dst, dst, 128
1: dc zva, dst
add dst, dst, 128
subs count, count, 128
b.hi 1b
stp q0, q0, [dstend, -128]
stp q0, q0, [dstend, -96]
stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
L(zva_other):
mov tmp2w, 4
lsl zva_lenw, tmp2w, tmp1w
add tmp1, zva_len, 64 /* Max alignment bytes written. */
cmp count, tmp1
blo L(no_zva)
sub tmp2, zva_len, 1
add tmp1, dst, zva_len
add dst, dst, 16
subs count, tmp1, dst /* Actual alignment bytes to write. */
bic tmp1, tmp1, tmp2 /* Aligned dc zva start address. */
beq 2f
1: stp q0, q0, [dst], 64
stp q0, q0, [dst, -32]
subs count, count, 64
b.hi 1b
2: mov dst, tmp1
sub count, dstend, tmp1 /* Remaining bytes to write. */
subs count, count, zva_len
b.lo 4f
3: dc zva, dst
add dst, dst, zva_len
subs count, count, zva_len
b.hs 3b
4: add count, count, zva_len
sub dst, dst, 32 /* Bias dst for tail loop. */
b L(tail64)
.size memset, . - memset
#endif
|
4ms/metamodule-plugin-sdk
| 6,488
|
plugin-libc/newlib/libc/machine/aarch64/strrchr.S
|
/*
strrchr - find last instance of a character in a string
Copyright (c) 2014, ARM Limited
All rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the company nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strchr-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
* Neon Available.
*/
/* Arguments and results. */
#define srcin x0
#define chrin w1
#define result x0
#define src x2
#define tmp1 x3
#define wtmp2 w4
#define tmp3 x5
#define src_match x6
#define src_offset x7
#define const_m1 x8
#define tmp4 x9
#define nul_match x10
#define chr_match x11
#define vrepchr v0
#define vdata1 v1
#define vdata2 v2
#define vhas_nul1 v3
#define vhas_nul2 v4
#define vhas_chr1 v5
#define vhas_chr2 v6
#define vrepmask_0 v7
#define vrepmask_c v16
#define vend1 v17
#define vend2 v18
/* Core algorithm.
For each 32-byte hunk we calculate a 64-bit syndrome value, with
two bits per byte (LSB is always in bits 0 and 1, for both big
and little-endian systems). For each tuple, bit 0 is set iff
the relevant byte matched the requested character; bit 1 is set
iff the relevant byte matched the NUL end of string (we trigger
off bit0 for the special case of looking for NUL). Since the bits
in the syndrome reflect exactly the order in which things occur
in the original string a count_trailing_zeros() operation will
identify exactly which byte is causing the termination, and why. */
/* Locals and temporaries. */
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn strrchr
/* Magic constant 0x40100401 to allow us to identify which lane
matches the requested byte. Magic constant 0x80200802 used
similarly for NUL termination. */
mov wtmp2, #0x0401
movk wtmp2, #0x4010, lsl #16
dup vrepchr.16b, chrin
bic src, srcin, #31 /* Work with aligned 32-byte hunks. */
dup vrepmask_c.4s, wtmp2
mov src_offset, #0
ands tmp1, srcin, #31
add vrepmask_0.4s, vrepmask_c.4s, vrepmask_c.4s /* equiv: lsl #1 */
b.eq .Laligned
/* Input string is not 32-byte aligned. Rather than forcing
the padding bytes to a safe value, we calculate the syndrome
for all the bytes, but then mask off those bits of the
syndrome that are related to the padding. */
ld1 {vdata1.16b, vdata2.16b}, [src], #32
neg tmp1, tmp1
cmeq vhas_nul1.16b, vdata1.16b, #0
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_nul2.16b, vdata2.16b, #0
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
and vhas_nul1.16b, vhas_nul1.16b, vrepmask_0.16b
and vhas_chr1.16b, vhas_chr1.16b, vrepmask_c.16b
and vhas_nul2.16b, vhas_nul2.16b, vrepmask_0.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask_c.16b
addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul2.16b // 256->128
addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul1.16b // 128->64
addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr1.16b // 128->64
mov nul_match, vhas_nul1.2d[0]
lsl tmp1, tmp1, #1
mov const_m1, #~0
mov chr_match, vhas_chr1.2d[0]
lsr tmp3, const_m1, tmp1
bic nul_match, nul_match, tmp3 // Mask padding bits.
bic chr_match, chr_match, tmp3 // Mask padding bits.
cbnz nul_match, .Ltail
.Lloop:
cmp chr_match, #0
csel src_match, src, src_match, ne
csel src_offset, chr_match, src_offset, ne
.Laligned:
ld1 {vdata1.16b, vdata2.16b}, [src], #32
cmeq vhas_nul1.16b, vdata1.16b, #0
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_nul2.16b, vdata2.16b, #0
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
addp vend1.16b, vhas_nul1.16b, vhas_nul2.16b // 256->128
and vhas_chr1.16b, vhas_chr1.16b, vrepmask_c.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask_c.16b
addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
addp vend1.16b, vend1.16b, vend1.16b // 128->64
addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr1.16b // 128->64
mov nul_match, vend1.2d[0]
mov chr_match, vhas_chr1.2d[0]
cbz nul_match, .Lloop
and vhas_nul1.16b, vhas_nul1.16b, vrepmask_0.16b
and vhas_nul2.16b, vhas_nul2.16b, vrepmask_0.16b
addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul2.16b
addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul1.16b
mov nul_match, vhas_nul1.2d[0]
.Ltail:
/* Work out exactly where the string ends. */
sub tmp4, nul_match, #1
eor tmp4, tmp4, nul_match
ands chr_match, chr_match, tmp4
/* And pick the values corresponding to the last match. */
csel src_match, src, src_match, ne
csel src_offset, chr_match, src_offset, ne
/* Count down from the top of the syndrome to find the last match. */
clz tmp3, src_offset
/* Src_match points beyond the word containing the match, so we can
simply subtract half the bit-offset into the syndrome. Because
we are counting down, we need to go back one more character. */
add tmp3, tmp3, #2
sub result, src_match, tmp3, lsr #1
/* But if the syndrome shows no match was found, then return NULL. */
cmp src_offset, #0
csel result, result, xzr, ne
ret
.size strrchr, . - strrchr
#endif
|
4ms/metamodule-plugin-sdk
| 5,428
|
plugin-libc/newlib/libc/machine/aarch64/memchr.S
|
/*
* memchr - find a character in a memory zone
*
* Copyright (c) 2014, ARM Limited
* All rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the company nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See memchr-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
* Neon Available.
*/
/* Arguments and results. */
#define srcin x0
#define chrin w1
#define cntin x2
#define result x0
#define src x3
#define tmp x4
#define wtmp2 w5
#define synd x6
#define soff x9
#define cntrem x10
#define vrepchr v0
#define vdata1 v1
#define vdata2 v2
#define vhas_chr1 v3
#define vhas_chr2 v4
#define vrepmask v5
#define vend v6
/*
* Core algorithm:
*
* For each 32-byte chunk we calculate a 64-bit syndrome value, with two bits
* per byte. For each tuple, bit 0 is set if the relevant byte matched the
* requested character and bit 1 is not used (faster than using a 32bit
* syndrome). Since the bits in the syndrome reflect exactly the order in which
* things occur in the original string, counting trailing zeros allows to
* identify exactly which byte has matched.
*/
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn memchr
/* Do not dereference srcin if no bytes to compare. */
cbz cntin, .Lzero_length
/*
* Magic constant 0x40100401 allows us to identify which lane matches
* the requested byte.
*/
mov wtmp2, #0x0401
movk wtmp2, #0x4010, lsl #16
dup vrepchr.16b, chrin
/* Work with aligned 32-byte chunks */
bic src, srcin, #31
dup vrepmask.4s, wtmp2
ands soff, srcin, #31
and cntrem, cntin, #31
b.eq .Lloop
/*
* Input string is not 32-byte aligned. We calculate the syndrome
* value for the aligned 32 bytes block containing the first bytes
* and mask the irrelevant part.
*/
ld1 {vdata1.16b, vdata2.16b}, [src], #32
sub tmp, soff, #32
adds cntin, cntin, tmp
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
and vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */
addp vend.16b, vend.16b, vend.16b /* 128->64 */
mov synd, vend.2d[0]
/* Clear the soff*2 lower bits */
lsl tmp, soff, #1
lsr synd, synd, tmp
lsl synd, synd, tmp
/* The first block can also be the last */
b.ls .Lmasklast
/* Have we found something already? */
cbnz synd, .Ltail
.Lloop:
ld1 {vdata1.16b, vdata2.16b}, [src], #32
subs cntin, cntin, #32
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
/* If we're out of data we finish regardless of the result */
b.ls .Lend
/* Use a fast check for the termination condition */
orr vend.16b, vhas_chr1.16b, vhas_chr2.16b
addp vend.2d, vend.2d, vend.2d
mov synd, vend.2d[0]
/* We're not out of data, loop if we haven't found the character */
cbz synd, .Lloop
.Lend:
/* Termination condition found, let's calculate the syndrome value */
and vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */
addp vend.16b, vend.16b, vend.16b /* 128->64 */
mov synd, vend.2d[0]
/* Only do the clear for the last possible block */
b.hi .Ltail
.Lmasklast:
/* Clear the (32 - ((cntrem + soff) % 32)) * 2 upper bits */
add tmp, cntrem, soff
and tmp, tmp, #31
sub tmp, tmp, #32
neg tmp, tmp, lsl #1
lsl synd, synd, tmp
lsr synd, synd, tmp
.Ltail:
/* Count the trailing zeros using bit reversing */
rbit synd, synd
/* Compensate the last post-increment */
sub src, src, #32
/* Check that we have found a character */
cmp synd, #0
/* And count the leading zeros */
clz synd, synd
/* Compute the potential result */
add result, src, synd, lsr #1
/* Select result or NULL */
csel result, xzr, result, eq
ret
.Lzero_length:
mov result, #0
ret
.size memchr, . - memchr
#endif
|
4ms/metamodule-plugin-sdk
| 5,918
|
plugin-libc/newlib/libc/machine/aarch64/memcmp.S
|
/* memcmp - compare memory
Copyright (c) 2018 Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/*
* Copyright (c) 2017 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See memcmp-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses.
*/
#define L(l) .L ## l
/* Parameters and result. */
#define src1 x0
#define src2 x1
#define limit x2
#define result w0
/* Internal variables. */
#define data1 x3
#define data1w w3
#define data1h x4
#define data2 x5
#define data2w w5
#define data2h x6
#define tmp1 x7
#define tmp2 x8
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn memcmp p2align=6
subs limit, limit, 8
b.lo L(less8)
ldr data1, [src1], 8
ldr data2, [src2], 8
cmp data1, data2
b.ne L(return)
subs limit, limit, 8
b.gt L(more16)
ldr data1, [src1, limit]
ldr data2, [src2, limit]
b L(return)
L(more16):
ldr data1, [src1], 8
ldr data2, [src2], 8
cmp data1, data2
bne L(return)
/* Jump directly to comparing the last 16 bytes for 32 byte (or less)
strings. */
subs limit, limit, 16
b.ls L(last_bytes)
/* We overlap loads between 0-32 bytes at either side of SRC1 when we
try to align, so limit it only to strings larger than 128 bytes. */
cmp limit, 96
b.ls L(loop16)
/* Align src1 and adjust src2 with bytes not yet done. */
and tmp1, src1, 15
add limit, limit, tmp1
sub src1, src1, tmp1
sub src2, src2, tmp1
/* Loop performing 16 bytes per iteration using aligned src1.
Limit is pre-decremented by 16 and must be larger than zero.
Exit if <= 16 bytes left to do or if the data is not equal. */
.p2align 4
L(loop16):
ldp data1, data1h, [src1], 16
ldp data2, data2h, [src2], 16
subs limit, limit, 16
ccmp data1, data2, 0, hi
ccmp data1h, data2h, 0, eq
b.eq L(loop16)
cmp data1, data2
bne L(return)
mov data1, data1h
mov data2, data2h
cmp data1, data2
bne L(return)
/* Compare last 1-16 bytes using unaligned access. */
L(last_bytes):
add src1, src1, limit
add src2, src2, limit
ldp data1, data1h, [src1]
ldp data2, data2h, [src2]
cmp data1, data2
bne L(return)
mov data1, data1h
mov data2, data2h
cmp data1, data2
/* Compare data bytes and set return value to 0, -1 or 1. */
L(return):
#ifndef __AARCH64EB__
rev data1, data1
rev data2, data2
#endif
cmp data1, data2
L(ret_eq):
cset result, ne
cneg result, result, lo
ret
.p2align 4
/* Compare up to 8 bytes. Limit is [-8..-1]. */
L(less8):
adds limit, limit, 4
b.lo L(less4)
ldr data1w, [src1], 4
ldr data2w, [src2], 4
cmp data1w, data2w
b.ne L(return)
sub limit, limit, 4
L(less4):
adds limit, limit, 4
beq L(ret_eq)
L(byte_loop):
ldrb data1w, [src1], 1
ldrb data2w, [src2], 1
subs limit, limit, 1
ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */
b.eq L(byte_loop)
sub result, data1w, data2w
ret
.size memcmp, . - memcmp
#endif
|
4ms/metamodule-plugin-sdk
| 7,735
|
plugin-libc/newlib/libc/machine/aarch64/strlen.S
|
/* Copyright (c) 2013-2015, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strlen-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses, min page size 4k.
*/
/* To test the page crossing code path more thoroughly, compile with
-DTEST_PAGE_CROSS - this will force all calls through the slower
entry path. This option is not intended for production use. */
/* Arguments and results. */
#define srcin x0
#define len x0
/* Locals and temporaries. */
#define src x1
#define data1 x2
#define data2 x3
#define has_nul1 x4
#define has_nul2 x5
#define tmp1 x4
#define tmp2 x5
#define tmp3 x6
#define tmp4 x7
#define zeroones x8
#define L(l) .L ## l
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
can be done in parallel across the entire word. A faster check
(X - 1) & 0x80 is zero for non-NUL ASCII characters, but gives
false hits for characters 129..255. */
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
#ifdef TEST_PAGE_CROSS
# define MIN_PAGE_SIZE 15
#else
# define MIN_PAGE_SIZE 4096
#endif
/* Since strings are short on average, we check the first 16 bytes
of the string for a NUL character. In order to do an unaligned ldp
safely we have to do a page cross check first. If there is a NUL
byte we calculate the length from the 2 8-byte words using
conditional select to reduce branch mispredictions (it is unlikely
strlen will be repeatedly called on strings with the same length).
If the string is longer than 16 bytes, we align src so don't need
further page cross checks, and process 32 bytes per iteration
using the fast NUL check. If we encounter non-ASCII characters,
fallback to a second loop using the full NUL check.
If the page cross check fails, we read 16 bytes from an aligned
address, remove any characters before the string, and continue
in the main loop using aligned loads. Since strings crossing a
page in the first 16 bytes are rare (probability of
16/MIN_PAGE_SIZE ~= 0.4%), this case does not need to be optimized.
AArch64 systems have a minimum page size of 4k. We don't bother
checking for larger page sizes - the cost of setting up the correct
page size is just not worth the extra gain from a small reduction in
the cases taking the slow path. Note that we only care about
whether the first fetch, which may be misaligned, crosses a page
boundary. */
def_fn strlen p2align=6
and tmp1, srcin, MIN_PAGE_SIZE - 1
mov zeroones, REP8_01
cmp tmp1, MIN_PAGE_SIZE - 16
b.gt L(page_cross)
ldp data1, data2, [srcin]
#ifdef __AARCH64EB__
/* For big-endian, carry propagation (if the final byte in the
string is 0x01) means we cannot use has_nul1/2 directly.
Since we expect strings to be small and early-exit,
byte-swap the data now so has_null1/2 will be correct. */
rev data1, data1
rev data2, data2
#endif
sub tmp1, data1, zeroones
orr tmp2, data1, REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, REP8_7f
bics has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
ccmp has_nul2, 0, 0, eq
beq L(main_loop_entry)
/* Enter with C = has_nul1 == 0. */
csel has_nul1, has_nul1, has_nul2, cc
mov len, 8
rev has_nul1, has_nul1
clz tmp1, has_nul1
csel len, xzr, len, cc
add len, len, tmp1, lsr 3
ret
/* The inner loop processes 32 bytes per iteration and uses the fast
NUL check. If we encounter non-ASCII characters, use a second
loop with the accurate NUL check. */
.p2align 4
L(main_loop_entry):
bic src, srcin, 15
sub src, src, 16
L(main_loop):
ldp data1, data2, [src, 32]!
.Lpage_cross_entry:
sub tmp1, data1, zeroones
sub tmp3, data2, zeroones
orr tmp2, tmp1, tmp3
tst tmp2, zeroones, lsl 7
bne 1f
ldp data1, data2, [src, 16]
sub tmp1, data1, zeroones
sub tmp3, data2, zeroones
orr tmp2, tmp1, tmp3
tst tmp2, zeroones, lsl 7
beq L(main_loop)
add src, src, 16
1:
/* The fast check failed, so do the slower, accurate NUL check. */
orr tmp2, data1, REP8_7f
orr tmp4, data2, REP8_7f
bics has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
ccmp has_nul2, 0, 0, eq
beq L(nonascii_loop)
/* Enter with C = has_nul1 == 0. */
L(tail):
#ifdef __AARCH64EB__
/* For big-endian, carry propagation (if the final byte in the
string is 0x01) means we cannot use has_nul1/2 directly. The
easiest way to get the correct byte is to byte-swap the data
and calculate the syndrome a second time. */
csel data1, data1, data2, cc
rev data1, data1
sub tmp1, data1, zeroones
orr tmp2, data1, REP8_7f
bic has_nul1, tmp1, tmp2
#else
csel has_nul1, has_nul1, has_nul2, cc
#endif
sub len, src, srcin
rev has_nul1, has_nul1
add tmp2, len, 8
clz tmp1, has_nul1
csel len, len, tmp2, cc
add len, len, tmp1, lsr 3
ret
L(nonascii_loop):
ldp data1, data2, [src, 16]!
sub tmp1, data1, zeroones
orr tmp2, data1, REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, REP8_7f
bics has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
ccmp has_nul2, 0, 0, eq
bne L(tail)
ldp data1, data2, [src, 16]!
sub tmp1, data1, zeroones
orr tmp2, data1, REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, REP8_7f
bics has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
ccmp has_nul2, 0, 0, eq
beq L(nonascii_loop)
b L(tail)
/* Load 16 bytes from [srcin & ~15] and force the bytes that precede
srcin to 0x7f, so we ignore any NUL bytes before the string.
Then continue in the aligned loop. */
L(page_cross):
bic src, srcin, 15
ldp data1, data2, [src]
lsl tmp1, srcin, 3
mov tmp4, -1
#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
lsr tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
#else
/* Little-endian. Early bytes are at LSB. */
lsl tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
#endif
orr tmp1, tmp1, REP8_80
orn data1, data1, tmp1
orn tmp2, data2, tmp1
tst srcin, 8
csel data1, data1, tmp4, eq
csel data2, data2, tmp2, eq
b L(page_cross_entry)
.size strlen, . - strlen
#endif
|
4ms/metamodule-plugin-sdk
| 5,909
|
plugin-libc/newlib/libc/machine/aarch64/strnlen.S
|
/* strnlen - calculate the length of a string with limit.
Copyright (c) 2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strlen-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
*/
/* Arguments and results. */
#define srcin x0
#define len x0
#define limit x1
/* Locals and temporaries. */
#define src x2
#define data1 x3
#define data2 x4
#define data2a x5
#define has_nul1 x6
#define has_nul2 x7
#define tmp1 x8
#define tmp2 x9
#define tmp3 x10
#define tmp4 x11
#define zeroones x12
#define pos x13
#define limit_wd x14
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
.text
.p2align 6
.Lstart:
/* Pre-pad to ensure critical loop begins an icache line. */
.rep 7
nop
.endr
/* Put this code here to avoid wasting more space with pre-padding. */
.Lhit_limit:
mov len, limit
ret
def_fn strnlen
cbz limit, .Lhit_limit
mov zeroones, #REP8_01
bic src, srcin, #15
ands tmp1, srcin, #15
b.ne .Lmisaligned
/* Calculate the number of full and partial words -1. */
sub limit_wd, limit, #1 /* Limit != 0, so no underflow. */
lsr limit_wd, limit_wd, #4 /* Convert to Qwords. */
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
can be done in parallel across the entire word. */
/* The inner loop deals with two Dwords at a time. This has a
slightly higher start-up cost, but we should win quite quickly,
especially on cores with a high number of issue slots per
cycle, as we get much better parallelism out of the operations. */
/* Start of critial section -- keep to one 64Byte cache line. */
.Lloop:
ldp data1, data2, [src], #16
.Lrealigned:
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
bic has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
subs limit_wd, limit_wd, #1
orr tmp1, has_nul1, has_nul2
ccmp tmp1, #0, #0, pl /* NZCV = 0000 */
b.eq .Lloop
/* End of critical section -- keep to one 64Byte cache line. */
orr tmp1, has_nul1, has_nul2
cbz tmp1, .Lhit_limit /* No null in final Qword. */
/* We know there's a null in the final Qword. The easiest thing
to do now is work out the length of the string and return
MIN (len, limit). */
sub len, src, srcin
cbz has_nul1, .Lnul_in_data2
#ifdef __AARCH64EB__
mov data2, data1
#endif
sub len, len, #8
mov has_nul2, has_nul1
.Lnul_in_data2:
#ifdef __AARCH64EB__
/* For big-endian, carry propagation (if the final byte in the
string is 0x01) means we cannot use has_nul directly. The
easiest way to get the correct byte is to byte-swap the data
and calculate the syndrome a second time. */
rev data2, data2
sub tmp1, data2, zeroones
orr tmp2, data2, #REP8_7f
bic has_nul2, tmp1, tmp2
#endif
sub len, len, #8
rev has_nul2, has_nul2
clz pos, has_nul2
add len, len, pos, lsr #3 /* Bits to bytes. */
cmp len, limit
csel len, len, limit, ls /* Return the lower value. */
ret
.Lmisaligned:
/* Deal with a partial first word.
We're doing two things in parallel here;
1) Calculate the number of words (but avoiding overflow if
limit is near ULONG_MAX) - to do this we need to work out
limit + tmp1 - 1 as a 65-bit value before shifting it;
2) Load and mask the initial data words - we force the bytes
before the ones we are interested in to 0xff - this ensures
early bytes will not hit any zero detection. */
sub limit_wd, limit, #1
neg tmp4, tmp1
cmp tmp1, #8
and tmp3, limit_wd, #15
lsr limit_wd, limit_wd, #4
mov tmp2, #~0
ldp data1, data2, [src], #16
lsl tmp4, tmp4, #3 /* Bytes beyond alignment -> bits. */
add tmp3, tmp3, tmp1
#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
lsl tmp2, tmp2, tmp4 /* Shift (tmp1 & 63). */
#else
/* Little-endian. Early bytes are at LSB. */
lsr tmp2, tmp2, tmp4 /* Shift (tmp1 & 63). */
#endif
add limit_wd, limit_wd, tmp3, lsr #4
orr data1, data1, tmp2
orr data2a, data2, tmp2
csinv data1, data1, xzr, le
csel data2, data2, data2a, le
b .Lrealigned
.size strnlen, . - .Lstart /* Include pre-padding in size. */
#endif
|
4ms/metamodule-plugin-sdk
| 5,783
|
plugin-libc/newlib/libc/machine/aarch64/strchr.S
|
/*
strchr - find a character in a string
Copyright (c) 2014, ARM Limited
All rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the company nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strchr-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
* Neon Available.
*/
/* Arguments and results. */
#define srcin x0
#define chrin w1
#define result x0
#define src x2
#define tmp1 x3
#define wtmp2 w4
#define tmp3 x5
#define vrepchr v0
#define vdata1 v1
#define vdata2 v2
#define vhas_nul1 v3
#define vhas_nul2 v4
#define vhas_chr1 v5
#define vhas_chr2 v6
#define vrepmask_0 v7
#define vrepmask_c v16
#define vend1 v17
#define vend2 v18
/* Core algorithm.
For each 32-byte hunk we calculate a 64-bit syndrome value, with
two bits per byte (LSB is always in bits 0 and 1, for both big
and little-endian systems). For each tuple, bit 0 is set iff
the relevant byte matched the requested character; bit 1 is set
iff the relevant byte matched the NUL end of string (we trigger
off bit0 for the special case of looking for NUL). Since the bits
in the syndrome reflect exactly the order in which things occur
in the original string a count_trailing_zeros() operation will
identify exactly which byte is causing the termination, and why. */
/* Locals and temporaries. */
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn strchr
/* Magic constant 0x40100401 to allow us to identify which lane
matches the requested byte. Magic constant 0x80200802 used
similarly for NUL termination. */
mov wtmp2, #0x0401
movk wtmp2, #0x4010, lsl #16
dup vrepchr.16b, chrin
bic src, srcin, #31 /* Work with aligned 32-byte hunks. */
dup vrepmask_c.4s, wtmp2
ands tmp1, srcin, #31
add vrepmask_0.4s, vrepmask_c.4s, vrepmask_c.4s /* equiv: lsl #1 */
b.eq .Lloop
/* Input string is not 32-byte aligned. Rather than forcing
the padding bytes to a safe value, we calculate the syndrome
for all the bytes, but then mask off those bits of the
syndrome that are related to the padding. */
ld1 {vdata1.16b, vdata2.16b}, [src], #32
neg tmp1, tmp1
cmeq vhas_nul1.16b, vdata1.16b, #0
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_nul2.16b, vdata2.16b, #0
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
and vhas_nul1.16b, vhas_nul1.16b, vrepmask_0.16b
and vhas_nul2.16b, vhas_nul2.16b, vrepmask_0.16b
and vhas_chr1.16b, vhas_chr1.16b, vrepmask_c.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask_c.16b
orr vend1.16b, vhas_nul1.16b, vhas_chr1.16b
orr vend2.16b, vhas_nul2.16b, vhas_chr2.16b
lsl tmp1, tmp1, #1
addp vend1.16b, vend1.16b, vend2.16b // 256->128
mov tmp3, #~0
addp vend1.16b, vend1.16b, vend2.16b // 128->64
lsr tmp1, tmp3, tmp1
mov tmp3, vend1.2d[0]
bic tmp1, tmp3, tmp1 // Mask padding bits.
cbnz tmp1, .Ltail
.Lloop:
ld1 {vdata1.16b, vdata2.16b}, [src], #32
cmeq vhas_nul1.16b, vdata1.16b, #0
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_nul2.16b, vdata2.16b, #0
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
/* Use a fast check for the termination condition. */
orr vend1.16b, vhas_nul1.16b, vhas_chr1.16b
orr vend2.16b, vhas_nul2.16b, vhas_chr2.16b
orr vend1.16b, vend1.16b, vend2.16b
addp vend1.2d, vend1.2d, vend1.2d
mov tmp1, vend1.2d[0]
cbz tmp1, .Lloop
/* Termination condition found. Now need to establish exactly why
we terminated. */
and vhas_nul1.16b, vhas_nul1.16b, vrepmask_0.16b
and vhas_nul2.16b, vhas_nul2.16b, vrepmask_0.16b
and vhas_chr1.16b, vhas_chr1.16b, vrepmask_c.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask_c.16b
orr vend1.16b, vhas_nul1.16b, vhas_chr1.16b
orr vend2.16b, vhas_nul2.16b, vhas_chr2.16b
addp vend1.16b, vend1.16b, vend2.16b // 256->128
addp vend1.16b, vend1.16b, vend2.16b // 128->64
mov tmp1, vend1.2d[0]
.Ltail:
/* Count the trailing zeros, by bit reversing... */
rbit tmp1, tmp1
/* Re-bias source. */
sub src, src, #32
clz tmp1, tmp1 /* And counting the leading zeros. */
/* Tmp1 is even if the target charager was found first. Otherwise
we've found the end of string and we weren't looking for NUL. */
tst tmp1, #1
add result, src, tmp1, lsr #1
csel result, result, xzr, eq
ret
.size strchr, . - strchr
#endif
|
4ms/metamodule-plugin-sdk
| 6,465
|
plugin-libc/newlib/libc/machine/aarch64/strcmp.S
|
/* Copyright (c) 2012-2018, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/* Assumptions:
*
* ARMv8-a, AArch64
*/
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strcmp-stub.c */
#else
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#define L(label) .L ## label
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* Parameters and result. */
#define src1 x0
#define src2 x1
#define result x0
/* Internal variables. */
#define data1 x2
#define data1w w2
#define data2 x3
#define data2w w3
#define has_nul x4
#define diff x5
#define syndrome x6
#define tmp1 x7
#define tmp2 x8
#define tmp3 x9
#define zeroones x10
#define pos x11
/* Start of performance-critical section -- one 64B cache line. */
def_fn strcmp p2align=6
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
b.ne L(misaligned8)
ands tmp1, src1, #7
b.ne L(mutual_align)
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
can be done in parallel across the entire word. */
L(loop_aligned):
ldr data1, [src1], #8
ldr data2, [src2], #8
L(start_realigned):
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
orr syndrome, diff, has_nul
cbz syndrome, L(loop_aligned)
/* End of performance-critical section -- one 64B cache line. */
L(end):
#ifndef __AARCH64EB__
rev syndrome, syndrome
rev data1, data1
/* The MS-non-zero bit of the syndrome marks either the first bit
that is different, or the top bit of the first zero byte.
Shifting left now will bring the critical information into the
top bits. */
clz pos, syndrome
rev data2, data2
lsl data1, data1, pos
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
#else
/* For big-endian we cannot use the trick with the syndrome value
as carry-propagation can corrupt the upper bits if the trailing
bytes in the string contain 0x01. */
/* However, if there is no NUL byte in the dword, we can generate
the result directly. We can't just subtract the bytes as the
MSB might be significant. */
cbnz has_nul, 1f
cmp data1, data2
cset result, ne
cneg result, result, lo
ret
1:
/* Re-compute the NUL-byte detection, using a byte-reversed value. */
rev tmp3, data1
sub tmp1, tmp3, zeroones
orr tmp2, tmp3, #REP8_7f
bic has_nul, tmp1, tmp2
rev has_nul, has_nul
orr syndrome, diff, has_nul
clz pos, syndrome
/* The MS-non-zero bit of the syndrome marks either the first bit
that is different, or the top bit of the first zero byte.
Shifting left now will bring the critical information into the
top bits. */
lsl data1, data1, pos
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
#endif
L(mutual_align):
/* Sources are mutually aligned, but are not currently at an
alignment boundary. Round down the addresses and then mask off
the bytes that preceed the start point. */
bic src1, src1, #7
bic src2, src2, #7
lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
ldr data1, [src1], #8
neg tmp1, tmp1 /* Bits to alignment -64. */
ldr data2, [src2], #8
mov tmp2, #~0
#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
#else
/* Little-endian. Early bytes are at LSB. */
lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
#endif
orr data1, data1, tmp2
orr data2, data2, tmp2
b L(start_realigned)
L(misaligned8):
/* Align SRC1 to 8 bytes and then compare 8 bytes at a time, always
checking to make sure that we don't access beyond page boundary in
SRC2. */
tst src1, #7
b.eq L(loop_misaligned)
L(do_misaligned):
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
cmp data1w, #1
ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
b.ne L(done)
tst src1, #7
b.ne L(do_misaligned)
L(loop_misaligned):
/* Test if we are within the last dword of the end of a 4K page. If
yes then jump back to the misaligned loop to copy a byte at a time. */
and tmp1, src2, #0xff8
eor tmp1, tmp1, #0xff8
cbz tmp1, L(do_misaligned)
ldr data1, [src1], #8
ldr data2, [src2], #8
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
orr syndrome, diff, has_nul
cbz syndrome, L(loop_misaligned)
b L(end)
L(done):
sub result, data1, data2
ret
.size strcmp, .-strcmp
#endif
|
4ms/metamodule-plugin-sdk
| 2,546
|
plugin-libc/newlib/libc/machine/frv/setjmp.S
|
# setjmp/longjmp for Frv. The jmpbuf looks like this:
#
# Register jmpbuf offset
# R16-R31 0x0-0x03c
# R48-R63 0x40-0x7c
# FR16-FR31 0x80-0xbc
# FR48-FR63 0xc0-0xfc
# LR 0x100
# SP 0x104
# FP 0x108
#
# R8 contains the pointer to jmpbuf
#include <frv-asm.h>
.text
.global EXT(setjmp)
.type EXT(setjmp),@function
EXT(setjmp):
stdi gr16, @(gr8,0)
stdi gr18, @(gr8,8)
stdi gr20, @(gr8,16)
stdi gr22, @(gr8,24)
stdi gr24, @(gr8,32)
stdi gr26, @(gr8,40)
stdi gr28, @(gr8,48)
stdi gr30, @(gr8,56)
#if __FRV_GPR__ != 32
stdi gr48, @(gr8,64)
stdi gr50, @(gr8,72)
stdi gr52, @(gr8,80)
stdi gr54, @(gr8,88)
stdi gr56, @(gr8,96)
stdi gr58, @(gr8,104)
stdi gr60, @(gr8,112)
stdi gr62, @(gr8,120)
#endif
#if __FRV_FPR__ != 0
stdfi fr16, @(gr8,128)
stdfi fr18, @(gr8,136)
stdfi fr20, @(gr8,144)
stdfi fr22, @(gr8,152)
stdfi fr24, @(gr8,160)
stdfi fr26, @(gr8,168)
stdfi fr28, @(gr8,176)
stdfi fr30, @(gr8,184)
#if __FRV_FPR__ != 32
stdfi fr48, @(gr8,192)
stdfi fr50, @(gr8,200)
stdfi fr52, @(gr8,208)
stdfi fr54, @(gr8,216)
stdfi fr56, @(gr8,224)
stdfi fr58, @(gr8,232)
stdfi fr60, @(gr8,240)
stdfi fr62, @(gr8,248)
#endif
#endif
movsg lr, gr4
sti gr4, @(gr8,256)
sti sp, @(gr8,260)
sti fp, @(gr8,264)
mov gr0,gr8
ret
.Lend1:
.size EXT(setjmp),.Lend1-EXT(setjmp)
.global EXT(longjmp)
.type EXT(longjmp),@function
EXT(longjmp):
lddi @(gr8,0), gr16
lddi @(gr8,8), gr18
lddi @(gr8,16), gr20
lddi @(gr8,24), gr22
lddi @(gr8,32), gr24
lddi @(gr8,40), gr26
lddi @(gr8,48), gr28
lddi @(gr8,56), gr30
#if __FRV_GPR__ != 32
lddi @(gr8,64), gr48
lddi @(gr8,72), gr50
lddi @(gr8,80), gr52
lddi @(gr8,88), gr54
lddi @(gr8,96), gr56
lddi @(gr8,104), gr58
lddi @(gr8,112), gr60
lddi @(gr8,120), gr62
#endif
#if __FRV_FPR__ != 0
lddfi @(gr8,128), fr16
lddfi @(gr8,136), fr18
lddfi @(gr8,144), fr20
lddfi @(gr8,152), fr22
lddfi @(gr8,160), fr24
lddfi @(gr8,168), fr26
lddfi @(gr8,176), fr28
lddfi @(gr8,184), fr30
#if __FRV_FPR__ != 32
lddfi @(gr8,192), fr48
lddfi @(gr8,200), fr50
lddfi @(gr8,208), fr52
lddfi @(gr8,216), fr54
lddfi @(gr8,224), fr56
lddfi @(gr8,232), fr58
lddfi @(gr8,240), fr60
lddfi @(gr8,248), fr62
#endif
#endif
ldi @(gr8,256), gr4
movgs gr4,lr
ldi @(gr8,260), sp
ldi @(gr8,264), fp
# Value to return is in r9. If zero, return 1
cmp gr9, gr0, icc0
setlos #1, gr8
ckne icc0, cc4
cmov gr9, gr8, cc4, 1
ret
.Lend2:
.size EXT(longjmp),.Lend2-EXT(longjmp)
|
4ms/metamodule-plugin-sdk
| 2,325
|
plugin-libc/newlib/libc/machine/epiphany/setjmp.S
|
/* setjmp and longjmp
Copyright (c) 2011, Adapteva, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Adapteva nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE. */
.file "setjmp.S"
.section .text
.align 4
.global _setjmp
.type _setjmp, %function
_setjmp:
strd lr,[r0]
strd r4,[r0,1]
strd r6,[r0,2]
strd r8,[r0,3]
strd r10,[r0,4]
strd r32,[r0,5]
strd r34,[r0,6]
strd r36,[r0,7]
strd r38,[r0,8]
str sp,[r0,18]
mov r0,#0
rts
.size _setjmp, .-_setjmp
.global setjmp
.set setjmp, _setjmp
.global _longjmp
_longjmp:
ldrd lr,[r0] ; return address / r15
ldrd r4,[r0,1]
ldrd r6,[r0,2]
ldrd r8,[r0,3]
ldrd r10,[r0,4]
ldrd r32,[r0,5]
ldrd r34,[r0,6]
ldrd r36,[r0,7]
ldrd r38,[r0,8]
ldr sp,[r0,18]
sub r1,r1,0
mov r0,#1
movne r0,r1
jr lr
.size _longjmp, .-_longjmp
.global longjmp
.set longjmp, _longjmp
|
4ms/metamodule-plugin-sdk
| 2,935
|
plugin-libc/newlib/libc/machine/lm32/setjmp.S
|
/*
* setjmp/longjmp for LatticeMico32.
* Contributed by Jon Beniston <jon@beniston.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.section .text
.align 4
.globl setjmp
.type setjmp,@function
.globl longjmp
.type longjmp,@function
/* setjmp: save all callee saves into jmp_buf
r1 - Address of jmp_buf
*/
setjmp:
sw (r1+0), r11
sw (r1+4), r12
sw (r1+8), r13
sw (r1+12), r14
sw (r1+16), r15
sw (r1+20), r16
sw (r1+24), r17
sw (r1+28), r18
sw (r1+32), r19
sw (r1+36), r20
sw (r1+40), r21
sw (r1+44), r22
sw (r1+48), r23
sw (r1+52), r24
sw (r1+56), r25
sw (r1+60), gp
sw (r1+64), fp
sw (r1+68), sp
sw (r1+72), ra
mvi r1, 0
ret
/* longjmp: restore all callee saves from jmp_buf
r1 - Address of jmb_buf
r2 - Value to return with
*/
.global longjmp
.type longjmp,@function
.align 4
longjmp:
lw r11, (r1+0)
lw r12, (r1+4)
lw r13, (r1+8)
lw r14, (r1+12)
lw r15, (r1+16)
lw r16, (r1+20)
lw r17, (r1+24)
lw r18, (r1+28)
lw r19, (r1+32)
lw r20, (r1+36)
lw r21, (r1+40)
lw r22, (r1+44)
lw r23, (r1+48)
lw r24, (r1+52)
lw r25, (r1+56)
lw gp, (r1+60)
lw fp, (r1+64)
lw sp, (r1+68)
lw ra, (r1+72)
mv r1, r2
ret
|
4ms/metamodule-plugin-sdk
| 1,979
|
plugin-libc/newlib/libc/machine/moxie/setjmp.S
|
/* A setjmp.c for Moxie
Copyright (C) 2009, 2019 Anthony Green
The authors hereby grant permission to use, copy, modify, distribute,
and license this software and its documentation for any purpose, provided
that existing copyright notices are retained in all copies and that this
notice is included verbatim in any distributions. No written agreement,
license, or royalty fee is required for any of the authorized uses.
Modifications to this software may be copyrighted by their authors
and need not follow the licensing terms described here, provided that
the new terms are clearly indicated on the first page of each file where
they apply. */
# setjmp/longjmp for moxie. The jmpbuf looks like this:
#
# Register jmpbuf offset
# $r6 0x00
# $r7 0x04
# $r8 0x08
# $r9 0x0c
# $r10 0x10
# $fp 0x14
# $sp 0x18
# stack frame fp 0x1c
# stack frame ra 0x20
# stack frame sc 0x25
.text
.global setjmp
.type setjmp,@function
setjmp:
st.l ($r0), $r6
sto.l 0x04($r0), $r7
sto.l 0x08($r0), $r8
sto.l 0x0c($r0), $r9
sto.l 0x10($r0), $r10
sto.l 0x14($r0), $sp
sto.l 0x18($r0), $fp
ldo.l $r1, 0x00($fp)
sto.l 0x1c($r0), $r1
ldo.l $r1, 0x04($fp)
sto.l 0x20($r0), $r1
ldo.l $r1, 0x08($fp)
sto.l 0x24($r0), $r1
xor $r0, $r0
ret
.Lend1:
.size setjmp,.Lend1-setjmp
.global longjmp
.type longjmp,@function
longjmp:
ldo.l $r6, 0x00($r0)
ldo.l $r7, 0x04($r0)
ldo.l $r8, 0x08($r0)
ldo.l $r9, 0x0c($r0)
ldo.l $r10, 0x10($r0)
ldo.l $sp, 0x14($r0)
ldo.l $fp, 0x18($r0)
ldo.l $r2, 0x1c($r0)
sto.l 0x0($fp), $r2
ldo.l $r2, 0x20($r0)
sto.l 0x4($fp), $r2
ldo.l $r2, 0x24($r0)
sto.l 0x8($fp), $r2
ldo.l $r2, 0x08($r0)
mov $r0, $r1
xor $r2, $r2
cmp $r0, $r2
beq .Lreturn1
ret
.Lreturn1:
inc $r0, 1
ret
.Lend2:
.size longjmp,.Lend2-longjmp
|
4ms/metamodule-plugin-sdk
| 2,776
|
plugin-libc/newlib/libc/machine/microblaze/setjmp.S
|
/* Copyright (c) 2001, 2009 Xilinx, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Xilinx nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* setjmp - save stack context for non-local goto
* args - r5 - jmp_buf
*
* jmpbuf frame structure
* ---------------------
*
* +-------------+ + 0
* | r1 |
* +-------------+ + 4
* | r13 |
* | . |
* | . |
* | . |
* | r31 |
* +-------------+ + 80
* | . |
* | . |
*/
.globl setjmp
.section .text
.align 2
.ent setjmp
setjmp:
swi r1, r5, 0
swi r13, r5, 4
swi r14, r5, 8
swi r15, r5, 12
swi r16, r5, 16
swi r17, r5, 20
swi r18, r5, 24
swi r19, r5, 28
swi r20, r5, 32
swi r21, r5, 36
swi r22, r5, 40
swi r23, r5, 44
swi r24, r5, 48
swi r25, r5, 52
swi r26, r5, 56
swi r27, r5, 60
swi r28, r5, 64
swi r29, r5, 68
swi r30, r5, 72
swi r31, r5, 76
rtsd r15, 8
or r3, r0, r0
.end setjmp
|
4ms/metamodule-plugin-sdk
| 2,800
|
plugin-libc/newlib/libc/machine/microblaze/longjmp.S
|
/* Copyright (c) 2001, 2009 Xilinx, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Xilinx nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* longjmp - non-local jump to a saved stack context
* args - r5 - jmp_buf
* r6 - val
*
* jmpbuf frame structure
* ---------------------
*
* +-------------+ + 0
* | r1 |
* +-------------+ + 4
* | r13 |
* | . |
* | . |
* | . |
* | r31 |
* +-------------+ + 80
* | . |
* | . |
*/
.globl longjmp
.section .text
.align 2
.ent longjmp
longjmp:
lwi r1, r5, 0
lwi r13, r5, 4
lwi r14, r5, 8
lwi r15, r5, 12
lwi r16, r5, 16
lwi r17, r5, 20
lwi r18, r5, 24
lwi r19, r5, 28
lwi r20, r5, 32
lwi r21, r5, 36
lwi r22, r5, 40
lwi r23, r5, 44
lwi r24, r5, 48
lwi r25, r5, 52
lwi r26, r5, 56
lwi r27, r5, 60
lwi r28, r5, 64
lwi r29, r5, 68
lwi r30, r5, 72
lwi r31, r5, 76
rtsd r15, 8
or r3, r0, r6
.end longjmp
|
4ms/metamodule-plugin-sdk
| 6,575
|
plugin-libc/newlib/libc/machine/hppa/memcpy.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* HPUX_ID: @(#) $Revision$ */
/*
* memcpy(s1, s2, n)
*
* Copy n characters from s2 to s1; returns s1.
*/
#define d_addr arg0
#define s_addr arg1
#define count arg2
#define tmp5 arg3
#define tmp1 r19
#define tmp2 r20
#define tmp3 r21
#define tmp4 r22
#define tmp6 r31
#include "DEFS.h"
ENTRY(memcpy)
comib,>= 5,count,byteloop /* If count is <= 6 don't get fancy.*/
movb,=,n d_addr,ret0,done /* The return value is defined to be the value of d_addr. DELAY SLOT */
/* if d_addr is null then exit */
extru s_addr,31,2,tmp1 /* Extract the low two bits of the source address. */
extru d_addr,31,2,tmp2 /* Extract the low two bits of the destination address. */
add count,tmp2,count /* pre increment the count to adjust for alignment of s1 */
comb,<> tmp2,tmp1,not_aligned /* see if s1 is aligned w.r.t. s2. */
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT. */
/* aligned */
/* We will now begin the 16 byte at a time word move if count >= 16 ! */
/* Else we will branch to the 4 byte-at-a time word move ! */
addibt,<,n -16,count,chekchunk /* If count < 16 then we can't move 16 byte chunks ! */
/* actually we can legally move 13 or more bytes on the first loop. */
/* These loads and stores are done so as to prevent processor interlock. */
chunks:
ldwm 16(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 16 */
ldw -12(0,s_addr),tmp2 /* tmp2 = 2nd word */
ldw -8(0,s_addr),tmp3 /* tmp3 = 3rd word */
ldw -4(0,s_addr),tmp4 /* tmp4 = 4th word */
/* Now store the results ! */
stbys,b,m tmp1,4(0,d_addr) /* tmp1 = 1st word stored d_addr += 16 also take care of front porch. */
stwm tmp2,4(0,d_addr) /* tmp2 = 2nd word stored. */
stwm tmp3,4(0,d_addr) /* tmp3 = 3rd word stored. */
addibf,< -16,count,chunks /* If count is still >= 16 do another loop. */
stwm tmp4,4(0,d_addr) /* tmp4 = 4th word stored. DELAY SLOT */
chekchunk:
addibt,<,n 12,count,back_porch /* since the count is already decremented by -16 we're testing */
/* to see if there are at least 4 bytes left ? */
subchunk:
ldws,ma 4(s_addr),tmp1 /* tmp1 = *s_addr++ */
addibf,< -4,count,subchunk /* count -= 4 */
stbys,b,m tmp1,4(d_addr) /* *d_addr++ = tmp1 */
back_porch:
addibt,=,n 4,count,done /* if count = 0 we're, of course, done ! */
ldws 0(s_addr),tmp1 /* load up the back_porch */
add d_addr,count,d_addr/* final store address is +1 too high ! */
bv 0(r2) /* return--were done. */
stbys,e tmp1,0(d_addr) /* kerplunk! whew ! */
/* Begin non_aligned code. (no refrence to politics) */
not_aligned:
sub,>= tmp2,tmp1,tmp3 /* compute the shift quantity again and skip the load if tmp2 > tmp1. */
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++ */
zdep tmp3,28,29,tmp4 /* compute the number of bits to shift based on the number of bytes above. */
mtctl tmp4,11 /* load the shift count into cr11 = shift count register. */
addibt,<,n -16,count,chkchnk2 /* first step in pre adjustment of count for looping. */
chunk2:
ldwm 16(0,s_addr),tmp2 /* get either first or second word . tmp2 = *s_addr++ */
ldw -12(s_addr),tmp3
ldw -8(s_addr),tmp4
ldw -4(s_addr),tmp5
vshd tmp1,tmp2,tmp6 /* position data ! */
stbys,b,m tmp6,4(0,d_addr) /* store ! */
vshd tmp2,tmp3,tmp6 /* position data ! */
stwm tmp6,4(0,d_addr) /* store ! */
vshd tmp3,tmp4,tmp6 /* position data ! */
stwm tmp6,4(0,d_addr) /* store ! */
vshd tmp4,tmp5,tmp6 /* position data ! */
stwm tmp6,4(0,d_addr) /* store the data ! */
addibf,< -16,count,chunk2 /* If count is still >= 16 do another loop. */
copy tmp5,tmp1
chkchnk2:
addibt,<,n 12,count,bp_0 /* if we don't have 4 bytes left then do the back porch (bp_0) */
subchnk2:
ldwm 4(0,s_addr),tmp2 /* get next word ! */
vshd tmp1,tmp2,tmp3 /* position data ! */
addibt,< -4,count,bp_1 /* decrement count and when count < 4 goto back_porch (bp_1) */
stbys,b,m tmp3,4(0,d_addr) /* store ! */
ldwm 4(0,s_addr),tmp1 /* get 4th word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
addib,>= -4,count,subchnk2 /* decrement count and when count <= 4 go to back porch (bp_2) */
stbys,b,m tmp3,4(0,d_addr) /* store the data ! */
bp_0: copy tmp1,tmp2 /* switch registers used in the shift process. */
bp_1: addibt,<=,n 4,count,done /* if count = -4 this implies that count = 0 -> done */
add d_addr,count,d_addr /* bump destination address to be +1 too high ! */
mfctl sar,tmp3 /* suppress final ldwm unless result used */
extru tmp3,28,2,tmp3 /* convert bitshift to byteshift */
sub,<= count,tmp3,r0 /* bytes unused if (count-byteshift <= 0*/
ldwm 4(0,s_addr),tmp1 /* get final word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
bv 0(r2) /* return */
stbys,e tmp3,0(0,d_addr) /* store the data ! */
/* here we do ye old byte-at-a-time moves. */
byteloop:
comb,>=,n 0,count,done
encore:
ldbs,ma 1(s_addr),tmp1
addibf,= -1,count,encore
stbs,ma tmp1,1(d_addr)
done:
EXIT(memcpy)
|
4ms/metamodule-plugin-sdk
| 8,424
|
plugin-libc/newlib/libc/machine/hppa/strncat.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/*HPUX_ID: @(#) $Revision$ */
/* strncat(s1,s2,n) : concatonate at most n characters from s2 onto s1 */
#include "DEFS.h"
#define d_addr r26
#define s_addr r25
#define count r24
#define tmp1 r19
#define tmp2 r20
#define tmp3 r21
#define tmp4 r22
#define tmp5 arg3
#define tmp6 r31
#define save r1
#define tmp7 ret1 /* source offset-- reset to orig source addr if not aligned */
ENTRY(strncat)
comb,= r0,s_addr,quit /* quit if s2=NULL */
copy d_addr,ret0 /* The return value is the value of d_addr. DELAY SLOT*/
/* First look for end of s1 (d_addr) */
extru d_addr,31,2,tmp1 /* Extract the low two bits of the dest address. */
combt,= tmp1,r0,dont_mask
dep 0,31,2,d_addr /*set word alignment */
ldwm 4(d_addr),tmp2
sh3add tmp1,r0,save /* build mask based on tmp1 */
mtctl save,11
zvdepi -2,32,save
or save,tmp2,tmp2
uxor,nbz tmp2,r0,save
search:
b,n found_end /* nullified under uxor conditions above and below */
dont_mask:
ldwm 4(d_addr),tmp2
comib,tr r0,r0,search
uxor,nbz tmp2,r0,save
found_end: /* at this point d_addr points to word */
extru,<> save,7,8,r0 /* following word with null */
addib,tr,n -4,d_addr,begin_copy /*set d_addr to end of s1 */
extru,<> save,15,8,r0
addib,tr,n -3,d_addr,begin_copy
extru,<> save,23,8,r0
addi -1,d_addr,d_addr
addi -1,d_addr,d_addr
begin_copy:
addibt,<,n -4,count,byteloop /* If count is <= 4 don't get fancy.*/
extru s_addr,31,2,tmp4 /* Extract the low two bits of the source address.*/
extru d_addr,31,2,tmp5 /* Extract the low two bits of the destination address.*/
add count,tmp5,count /* pre increment the count by the byte address so that the count is*/
copy s_addr,tmp6 /* save original s_addr in case we find null in first word */
copy s_addr, tmp7 /* save s_addr in case we find null before first store */
comb,<> tmp5,tmp4,not_aligned /* branch if tmp5<>tmp4. */
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT.*/
/* aligned*/
combt,= tmp5,r0,skip_mask
ldwm 4(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 4 (DELAY SLOT)*/
sh3add tmp5,r0,save /* compute mask in save*/
mtctl save,11
zvdepi -2,32,save
or save,tmp1,tmp1 /* or mask with data*/
uxor,nbz tmp1,r0,save /* check for null*/
b,n null1
addibt,< -4,count,back_porch
stbys,b,m tmp1,4(0,d_addr) /* store word (delay slot)*/
chunks:
ldwm 4(0,s_addr),tmp1 /* get a word*/
skip_mask:
uxor,nbz tmp1,r0,save /* check for null*/
b,n align_null1
addibf,< -4,count,chunks
stbys,b,m tmp1,4(0,d_addr) /* store word (delay slot)*/
back_porch: /* last word to store*/
addibt,=,n 4,count,done /* if count = 0 we're, of course, done !*/
ldws 0(s_addr),tmp1 /* load up the back_porch*/
sh3add count,r0, save /* setup right mask based on count*/
mtctl save,r11
zvdepi -2,32,save /*save now has left-hand mask*/
uaddcm r0,save,save /*form right hand mask */
or tmp1,save,tmp1 /*and insert data*/
uxor,nbz tmp1,r0,save /* check for null*/
b,n null2
add d_addr,count,d_addr/* final store address is +1 too high !*/
b done
stbys,e tmp1,0(d_addr) /* done */
/* Begin non_aligned code. */
not_aligned:
sub,>= tmp5,tmp4,tmp6 /* compute the shift amt.and skip load if tmp5 > tmp4.*/
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++*/
zdep tmp6,28,29,tmp4 /* compute the number of bits to shift */
mtctl tmp4,11 /* load the shift count into cr11 = shift count register.*/
addibt,<,n -4,count,chkchnk2 /* first step in pre adjustment of count for looping.*/
ldwm 4(0,s_addr),tmp2 /* get either first or second word from source. */
combt,= tmp5,r0,skip_mask4 /* don't mask if whole word is valid*/
vshd tmp1,tmp2,tmp3 /* position data ! (delay slot)*/
sh3add tmp5,r0,save /* setup r1*/
mtctl save,r11 /* setup mask in save*/
zvdepi -2,32,save
or save, tmp3, tmp3
mtctl tmp4,11 /* re-load the shift count into cr11 */
skip_mask4:
uxor,nbz tmp3, r0, save
b,n null4 /* special case for first word */
copy r0, tmp5 /* zero out tmp5 so we don't try to mask again*/
copy r0, tmp7 /* zero out tmp7 so we don't try to use original s_addr anymore */
b continue
stbys,b,m tmp3,4(0,d_addr) /* store ! */
chunk2:
ldwm 4(0,s_addr),tmp2
vshd tmp1,tmp2,tmp3
skip_mask2:
uxor,nbz tmp3, r0, save
b,n null3
stbys,b,m tmp3,4(0,d_addr) /* store ! */
continue:
ldwm 4(0,s_addr),tmp1 /* get 2nd word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
uxor,nbz tmp3, r0, save
b,n null3
addibf,< -8,count,chunk2 /* If count is still >= 8 do another loop.*/
stbys,b,m tmp3,4(0,d_addr) /* store !*/
chkchnk2:
addibt,<,n 4,count,bp_0 /* if we don't have 4 bytes left then do the back porch (bp_0)*/
subchnk2: /* we have less than 8 chars to copy*/
ldwm 4(0,s_addr),tmp2 /* get next word !*/
combt,= tmp5,r0,skip_mask3
vshd tmp1,tmp2,tmp3 /* position data !*/
sh3add tmp5,r0,save /* setup r1*/
mtctl save,r11 /* setup mask in save*/
zvdepi -2,32,save
or save, tmp3, tmp3
mtctl tmp4,11 /* restore shift value again */
skip_mask3:
uxor,nbz tmp3,r0,save
b,n null3
copy r0,tmp5 /* zero out tmp5 so null3 does correct alignment */
copy r0,tmp7 /* zero out tmp7 so we don't use orignal s_addr since no longer valid */
b bp_1 /* we now have less than 4 bytes to move*/
stbys,b,m tmp3,4(0,d_addr) /* store !*/
bp_0:
copy tmp1,tmp2 /* switch registers for shift process */
addibt,<=,n 4,count,done /* if count = -4 this implies that count = 0 -> done */
bp_1:
ldwm 4(0,s_addr),tmp1 /* get final word ! */
vshd tmp2,tmp1,tmp3 /* position data !*/
uxor,nbz tmp3,r0,save /* if no-byte-zero */
b,n bp_null /* don't goto no_null-find which null instead */
no_null:
add d_addr,count,d_addr /* set up d_addr for stbys,e */
b done /* were done*/
stbys,e tmp3,0(0,d_addr) /* store the data !*/
/* here we do ye old byte-at-a-time moves.*/
align_null1:
b byteloop
addi -4,s_addr,s_addr
null1:
copy tmp6,s_addr /* restore orig s_addr (aligned only) */
byteloop:
addibt,= 4,count,done
null2:
ldbs,ma 1(s_addr),tmp1
encore:
combt,=,n tmp1,r0, done
stbs,ma tmp1,1(d_addr)
addibf,=,n -1,count,encore
ldbs,ma 1(s_addr),tmp1
b,n done
bp_null:
addi -4,count,count /* fudge count 'cause byteloop will re-increment */
null3: /* not_aligned case reset s_addr and finish byte-wise */
combt,=,n r0,tmp7,null3a /* if tmp7 is not valid address then branch below */
b byteloop /* otherwise reset s_addr to tmp7 and finish */
copy tmp7, s_addr
null3a: /* right shift target */
addibt,<,n 0,tmp6,null3b /* if left shifting */
sub r0,tmp6,tmp6 /* do null3b code */
addi -4,tmp6,tmp6
b byteloop
add tmp6,s_addr,s_addr /* reset s_addr by 4 + shift_amt */
null3b:
subi -8,tmp6,tmp6
add tmp5,tmp6,tmp6 /* adjust by the dest offset if this is our first store */
b byteloop
add tmp6,s_addr,s_addr /* adjust s_addr by (8-shift_amt-dest_off) */
null4:
add,> tmp6,r0,tmp6 /* if left shift */
b,n null3 /* then do null3 */
b byteloop
addi -4,s_addr,s_addr /* adj source only by 4 */
done:
bv 0(r2)
stbs r0,0(d_addr)
quit:
EXIT(strncat)
|
4ms/metamodule-plugin-sdk
| 4,191
|
plugin-libc/newlib/libc/machine/hppa/setjmp.S
|
/* Copyright (c) 1995, 2002 Red Hat Incorporated.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* The name of Red Hat Incorporated may not be used to endorse
* or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Note I don't know an easy way to get the FP version into the
traditional C library and the non-FP version into the soft-float
library. Maybe we'll have to have -msoft-float trigger something
like -DSOFT_FLOAT if this issue ever arises. */
#include "DEFS.h"
#if 0
.SPACE $PRIVATE$
.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
.SPACE $TEXT$
.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
.IMPORT $global$,DATA
.IMPORT $$dyncall,MILLICODE
; gcc_compiled.:
#endif
TEXT_SEGMENT
.align 4
.EXPORT setjmp,ENTRY,PRIV_LEV=3,ARGW0=GR,RTNVAL=GR
setjmp
.PROC
.CALLINFO FRAME=64,NO_CALLS,SAVE_SP,ENTRY_GR=3
.ENTRY
stwm %r30,4(%r26)
stwm %r2,4(%r26)
stwm %r3,4(%r26)
stwm %r4,4(%r26)
stwm %r5,4(%r26)
stwm %r6,4(%r26)
stwm %r7,4(%r26)
stwm %r8,4(%r26)
stwm %r9,4(%r26)
stwm %r10,4(%r26)
stwm %r11,4(%r26)
stwm %r12,4(%r26)
stwm %r13,4(%r26)
stwm %r14,4(%r26)
stwm %r15,4(%r26)
stwm %r16,4(%r26)
stwm %r17,4(%r26)
stwm %r18,4(%r26)
stwm %r27,4(%r26)
#ifdef FP
; jmp_buf may only have a 4 byte alignment, so handle FP stores
; very carefully.
fstds %fr12,-16(%r30)
ldw -16(%r30),%r28
stwm %r28,4(%r26)
ldw -12(%r30),%r28
stwm %r28,4(%r26)
fstds %fr13,-16(%r30)
ldw -16(%r30),%r28
stwm %r28,4(%r26)
ldw -12(%r30),%r28
stwm %r28,4(%r26)
fstds %fr14,-16(%r30)
ldw -16(%r30),%r28
stwm %r28,4(%r26)
ldw -12(%r30),%r28
stwm %r28,4(%r26)
fstds %fr15,-16(%r30)
ldw -16(%r30),%r28
stwm %r28,4(%r26)
ldw -12(%r30),%r28
stwm %r28,4(%r26)
#endif
bv 0(%r2)
copy %r0,%r28
.EXIT
.PROCEND
.align 4
.EXPORT longjmp,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,RTNVAL=GR
longjmp
.PROC
.CALLINFO FRAME=64,NO_CALLS,SAVE_SP,ENTRY_GR=3
.ENTRY
ldwm 4(%r26),%r30
ldwm 4(%r26),%r2
ldwm 4(%r26),%r3
ldwm 4(%r26),%r4
ldwm 4(%r26),%r5
ldwm 4(%r26),%r6
ldwm 4(%r26),%r7
ldwm 4(%r26),%r8
ldwm 4(%r26),%r9
ldwm 4(%r26),%r10
ldwm 4(%r26),%r11
ldwm 4(%r26),%r12
ldwm 4(%r26),%r13
ldwm 4(%r26),%r14
ldwm 4(%r26),%r15
ldwm 4(%r26),%r16
ldwm 4(%r26),%r17
ldwm 4(%r26),%r18
ldwm 4(%r26),%r27
#ifdef FP
ldwm 4(%r26),%r28
stw %r28,-16(%r30)
ldwm 4(%r26),%r28
stw %r28,-12(%r30)
fldds -16(%r30),%fr12
ldwm 4(%r26),%r28
stw %r28,-16(%r30)
ldwm 4(%r26),%r28
stw %r28,-12(%r30)
fldds -16(%r30),%fr13
ldwm 4(%r26),%r28
stw %r28,-16(%r30)
ldwm 4(%r26),%r28
stw %r28,-12(%r30)
fldds -16(%r30),%fr14
ldwm 4(%r26),%r28
stw %r28,-16(%r30)
ldwm 4(%r26),%r28
stw %r28,-12(%r30)
fldds -16(%r30),%fr15
#endif
comclr,<> %r0,%r25,%r0
ldi 1,%r25
bv 0(%r2)
copy %r25,%r28
.EXIT
.PROCEND
|
4ms/metamodule-plugin-sdk
| 10,201
|
plugin-libc/newlib/libc/machine/hppa/strcpy.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/*
A faster strcpy.
by
Jerry Huck (aligned case)
Daryl Odnert (equal-alignment case)
Edgar Circenis (non-aligned case)
*/
/*
* strcpy(s1, s2)
*
* Copy string s2 to s1. s1 must be large enough.
* return s1
*/
#include "DEFS.h"
#define d_addr r26
#define s_addr r25
#define tmp6 r24
#define tmp1 r19
#define evenside r19
#define tmp2 r20
#define oddside r20
#define tmp3 r21
#define tmp4 r22
#define tmp5 arg3
#define save r1
ENTRY(strcpy)
/* Do some quick alignment checking on and fast path both word aligned */
extru,<> s_addr,31,2,tmp6 /*Is source word aligned? */
ldwm 4(0,s_addr),oddside /*Assume yes and guess that it
is double-word aligned. */
dep,= d_addr,29,2,tmp6 /*Is target word aligned? */
b case_analysis
copy d_addr,ret0
/* Both are aligned. First source word already loaded assuming that
source was oddword aligned. Fall through (therefore fastest) code
shuffles the registers to join the main loop */
bothaligned:
bb,>= s_addr,29,twoatatime /*Branch if source was odd aligned*/
uxor,nbz oddside,r0,save
/* Even aligned source. save holds that operand.
Do one iteration of the main copy loop juggling the registers to avoid
one copy. */
b,n nullfound
ldwm 4(s_addr),oddside
stwm save,4(d_addr)
uxor,nbz oddside,r0,save
b,n nullfound
ldwm 4(s_addr),evenside
stwm oddside,4(d_addr)
uxor,nbz evenside,r0,save
b,n nullfound
ldwm 4(s_addr),oddside
/* Main loop body. Entry expects evenside still to be stored, oddside
just loaded. */
loop:
stwm evenside,4(d_addr)
uxor,nbz oddside,r0,save
/* mid loop entry */
twoatatime:
b,n nullfound
ldwm 4(s_addr),evenside
stwm oddside,4(d_addr)
uxor,sbz evenside,r0,save
b loop
ldwm 4(s_addr),oddside
/* fall through when null found in evenside. oddside actually loaded */
nullfound: /* adjust d_addr and store final word */
extru,<> save,7,8,r0 /* pick up leftmost byte */
addib,tr,n 1,d_addr,store_final
extru,<> save,15,8,r0
addib,tr,n 2,d_addr,store_final
extru,<> save,23,8,r0
addib,tr 3,d_addr,store_final2
bv 0(rp)
stw save,0(d_addr)
store_final:
bv 0(rp)
store_final2:
stbys,e save,0(d_addr) /* delay slot */
case_analysis:
blr tmp6,r0
nop
/* NOTE: the delay slots for the non-aligned cases load a */
/* shift quantity which is TGT-SRC into tmp3. */
/* Note also, the case for both strings being word aligned */
/* is already checked before the BLR is executed, so that */
/* case can never occur. */
/* TGT SRC */
nop /* 00 00 can't happen */
nop
b neg_aligned_copy /* 00 01 */
ldi -1,tmp3 /* load shift quantity. delay slot */
b neg_aligned_copy /* 00 10 */
ldi -2,tmp3 /* load shift quantity. delay slot */
b neg_aligned_copy /* 00 11 */
ldi -3,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy0 /* 01 00 */
ldi 1,tmp3 /* load shift quantity. delay slot */
b equal_alignment_1 /* 01 01 */
ldbs,ma 1(s_addr),tmp1
b neg_aligned_copy /* 01 10 */
ldi -1,tmp3 /* load shift quantity. delay slot */
b neg_aligned_copy /* 01 11 */
ldi -2,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy0 /* 10 00 */
ldi 2,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy /* 10 01 */
ldi 1,tmp3 /* load shift quantity. delay slot */
b equal_alignment_2 /* 10 10 */
ldhs,ma 2(s_addr),tmp1
b neg_aligned_copy /* 10 11 */
ldi -1,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy0 /* 11 00 */
ldi 3,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy /* 11 01 */
ldi 2,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy /* 11 10 */
ldi 1,tmp3 /* load shift quantity. delay slot */
ldbs,ma 1(s_addr),tmp1 /* 11 11 */
comiclr,<> r0,tmp1,r0
bv 0(rp) /* return if 1st byte was null */
stbs,ma tmp1,1(d_addr) /* store a byte to dst string */
b bothaligned /* can now goto word_aligned */
ldwm 4(s_addr),oddside /* load next word of source */
equal_alignment_1:
comiclr,<> r0,tmp1,r0 /* nullify next if tmp1 <> 0 */
bv 0(rp) /* return if null byte found */
stbs,ma tmp1,1(d_addr) /* store a byte to dst string */
ldhs,ma 2(s_addr),tmp1 /* load next halfword */
equal_alignment_2:
extru,<> tmp1,23,8,tmp6 /* look at left byte of halfword */
bv 0(rp) /* return if 1st byte was null */
stbs,ma tmp6,1(d_addr)
extru,<> tmp1,31,8,r0
bv 0(rp) /* return if 2nd byte was null */
stbs,ma tmp1,1(d_addr)
b bothaligned
ldwm 4(s_addr),oddside /* load next word */
/* source and destination are not aligned, so we do it the hard way. */
/* target alignment is greater than source alignment */
pos_aligned_copy0:
addi -4,s_addr,s_addr
pos_aligned_copy:
extru d_addr,31,2,tmp6 /* Extract low 2 bits of the dest addr */
extru s_addr,31,2,tmp1 /* Extract low 2 bits of the src addr */
dep r0,31,2,s_addr /* Compute word address of the source. */
sh3add tmp3,r0,tmp4 /* compute shift amt */
ldwm 4(0,s_addr),tmp2 /* get 1st source word */
sh3add tmp1,r0,save /* setup mask shift amount */
mtctl save,r11 /* set-up cr11 for mask */
zvdepi -2,32,save /* create mask */
or save,tmp2,tmp2 /* mask unused bytes in src */
ldi -1,tmp1 /* load tmp1 with 0xffffffff */
mtctl tmp4,r11 /* shift count -> shift count reg */
vshd tmp1,tmp2,tmp3 /* position data ! */
uxor,nbz tmp3,r0,save
b,n first_null
uxor,nbz tmp2,r0,save
b nullfound1
mtctl tmp4,r11 /* re-load shift cnt (delay slot) */
b loop_entry
ldwm 4(0,s_addr),tmp1 /* get next word. delay slot */
neg_aligned_copy:
extru d_addr,31,2,tmp6 /* Extract low 2 bits of the dest addr */
extru s_addr,31,2,tmp2 /* Extract low 2 bits of the src addr */
dep r0,31,2,s_addr /* Compute word address of the source. */
sh3add tmp3,r0,tmp4 /* compute shift amt */
ldwm 4(0,s_addr),tmp1 /* load first word from source. */
/* check to see if next word can be read safely */
sh3add tmp2,r0,save
mtctl save,r11 /* shift count -> shift count reg */
zvdepi -2,32,save
or save, tmp1, tmp1
uxor,nbz tmp1,r0,save /* any nulls in first word? */
b first_null0
mtctl tmp4,r11
ldwm 4(0,s_addr),tmp2 /* load second word from source */
combt,= tmp6,r0,chunk1 /* don't mask if whole word valid */
vshd tmp1,tmp2,tmp3 /* position data ! */
sh3add tmp6,r0,save /* setup r1 */
mtctl save,r11 /* set-up cr11 for mask */
zvdepi -2,32,save
or save, tmp3, tmp3
uxor,nbz tmp3,r0,save
b,n first_null
uxor,nbz tmp2,r0,save
b nullfound1
mtctl tmp4,r11 /* re-load shift cnt (delay slot) */
b loop_entry
ldwm 4(0,s_addr),tmp1 /* get next word. delay slot */
chunk1:
uxor,nbz tmp2,r0,save
b nullfound0
vshd tmp1,tmp2,tmp3
did_mask:
ldwm 4(0,s_addr),tmp1 /* get next word ! */
loop_entry:
stbys,b,m tmp3,4(0,d_addr) /* store ! */
uxor,nbz tmp1, r0, save
b nullfound2
vshd tmp2,tmp1,tmp3 /* position data ! */
ldwm 4(s_addr),tmp2
stwm tmp3,4(d_addr)
uxor,sbz tmp2,r0,save
b did_mask
nullfound0:
vshd tmp1,tmp2,tmp3 /* delay slot */
uxor,nbz tmp3,r0,save
b,n nullfound
nullfound1:
stbys,b,m tmp3,4(0,d_addr)
b nullfound
vshd tmp2,r0,save /* delay slot */
nullfound2:
uxor,nbz tmp3,r0,save
b,n nullfound
stwm tmp3,4(d_addr)
b nullfound
/* notice that delay slot is in next routine */
first_null0: /* null found in first word of non-aligned (wrt d_addr) */
vshd tmp1,r0,save /* delay slot */
combt,= tmp6,r0,check4
extru save,7,8,tmp4
first_null:
addibt,= -1,tmp6,check3 /* check last 3 bytes of word */
extru save,15,8,tmp4
addibt,=,n -1,tmp6,check2 /* check last 2 bytes */
bv 0(rp) /* null in last byte--store and exit */
stbys,b save, 0(d_addr)
check4:
combt,= tmp4,r0,done
stbs,ma tmp4,1(d_addr)
extru,<> save,15,8,tmp4
check3:
combt,= tmp4,r0,done
stbs,ma tmp4,1(d_addr)
check2:
extru,<> save,23,8,tmp4
bv 0(rp)
stbs,ma tmp4,1(d_addr)
bv 0(rp)
stbs r0,0(d_addr)
done:
EXIT(strcpy)
|
4ms/metamodule-plugin-sdk
| 7,404
|
plugin-libc/newlib/libc/machine/hppa/strncmp.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* strcmp(s1, s2) */
/* returns integer: < 0 iff s1 lexicographically less than s2 */
/* > 0 iff s1 lexicographically greater than s2 */
/* = 0 iff s1 lexicographically equal to s2 */
/* = 0 iff s1 lexicographically equal to s2 */
/* quit after n charachters */
#include "DEFS.h"
#define s1 26
#define s2 25
#define tmp1 19
#define s2word 20
#define tmp3 21
#define tmp7 22
#define s1word 29
#define save 1
#define tmp6 23
#define tmp5 28
#define count 24
ENTRY(strncmp)
combt,<,n r0,count,search /* N <= 0 yields equality */
bv r0(rp) /* */
copy 0,ret0 /* return 0 (DELAY SLOT) */
search: combf,=,n s1,s2,findout /* s1 != s2? */
bv r0(rp) /* */
copy 0,ret0 /* return 0 (delay slot) */
findout:
comibf,=,n 0,s1,checks1 /* s1 == NULL? */
ldbs 0(0,s2),ret0 /* */
bv r0(rp) /* */
subi 0,ret0,ret0 /* ret0 <- -*s2 */
checks1:
comibf,=,n 0,s2,checkitout /* s2 == NULL? */
bv r0(rp) /* */
ldbs 0(0,s1),28 /* return *s1 */
checkitout:
extru s2,31,2,tmp1 /* Extract the low two bits of the s2. */
extru s1,31,2,tmp5 /* Extract the low two bits of the s1 */
sub,= tmp5,tmp1,tmp3 /* Are s1 & s2 aligned with each other? */
b not_aligned /* It's more complicated (not_aligned) */
dep 0,31,2,s1 /* Compute word address of s1 (DELAY SLOT) */
dep 0,31,2,s2 /* Compute word address of s2 */
ldwm 4(0,s1),s1word /* get next s1 word s1+=4 */
combt,= tmp5,r0,skipmask /* skip masking, if we can */
ldwm 4(0,s2),s2word /* get next s2 word s2+=4 (DELAY SLOT) */
add tmp5,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /* mask s1word (s1) */
or save,s2word,s2word /* mask s2word (s2) */
skipmask:
combt,=,n s1word,s2word,chknulls /* are these words equal? */
checkbyte:
extru s1word,7,8,tmp3 /* get first byte (character) */
ckbyte2: extru s2word,7,8,tmp7 /* get first byte (character) */
combf,= tmp3,tmp7,done /* quit if first byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
comibt,=,n 0,tmp3,done /* have we reached the end of string */
/* if so done ret0 already has zero */
addibt,<=,n -1,count,done /* have we checked N chars? ret0 == 0 */
extru s1word,15,8,tmp3 /* get second byte (character) */
extru s2word,15,8,tmp7 /* get second byte (character) */
combf,= tmp3,tmp7,done /* quit if second byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
comibt,=,n 0,tmp3,done /* have we reached the end of string */
/* if so done ret0 already has zero */
addibt,<=,n -1,count,done /* have we checked N chars? */
extru s1word,23,8,tmp3 /* get third byte (character) */
extru s2word,23,8,tmp7 /* get third byte (character) */
combf,= tmp3,tmp7,done /* done if third byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
comibt,=,n 0,tmp3,done /* have we reached the end of string */
/* if so done ret0 already has zero */
addibt,<=,n -1,count,done /* have we checked N chars? */
extru s1word,31,8,tmp3 /* get last byte (character) */
extru s2word,31,8,tmp7 /* get last byte (character) */
bv r0(rp) /* */
sub tmp3,tmp7,ret0 /* the last characters in the word is */
/* where the difference is, so return */
/* the difference and we're outta here */
chknulls:
addibt,<=,n -4,count,zero /* have we checked N chars? */
uxor,nbz s1word,0,0 /* don't have to check s2 Just quit */
bv r0(rp) /* */
copy 0,28 /* return 0 */
ldwm 4(0,s2),s2word /* get next s2 word s2+=4 */
b skipmask /* keep checking */
ldwm 4(0,s1),s1word /* get next s1 word s1+=4 */
not_aligned:
dep r0,31,2,s2 /* Compute word address of s2 */
combt,<,n r0,tmp3,shifts1 /* Do we shift s1 or s2 */
sh3add tmp3,r0,tmp3 /* eight bits per byte so mul by 8 */
ldwm 4(0,s1),s1word /* get first word of s1 */
ldwm 4(0,s2),s2word /* get first word or s2 */
combt,=,n r0,tmp5,masks2 /* Do we need to mask beginning of s1 */
add tmp5,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /* */
masks2: sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word /* */
mtctl tmp3,11 /* Move shift amount to CR11 */
more: uxor,nbz s2word,r0,r0 /* Is there a null in first word */
b,n chunk1 /* */
ldwm 4(0,s2),tmp7 /* load second word to enable us to shift */
vshd s2word,tmp7,s2word /* */
combf,=,n s1word,s2word,ckbyte2 /* */
extru s1word,7,8,tmp3 /* get first byte (DELAY SLOT) */
addibt,<=,n -4,count,zero /* have we checked N chars? */
uxor,nbz s1word,0,0 /* even though they're equal we could be done */
b,n zero
copy tmp7,s2word /* */
b more /* keep checking */
ldwm 4(0,s1),s1word /* get next s1 (DELAY SLOT) */
chunk1:
vshd s2word,r0,s2word /* */
b ckbyte2 /* */
extru s1word,7,8,tmp3 /* */
shifts1:
sh3add tmp3,r0,tmp3 /* eight bits per byte so mul by 4 */
sub r0,tmp3,tmp3 /* Get negative value for left shift */
ldwm 4(0,s2),s2word /* get first word of s2 */
ldwm 4(0,s1),s1word /* get first word or s1 */
combt,=,n r0,tmp1,masks1 /* Do we need to mask beginning of s2 */
add tmp1,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word /* */
masks1: sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /* */
mtctl tmp3,11 /* Move shift amount to CR11 */
more1: uxor,nbz s1word,r0,r0 /* Is there a null in first byte */
b,n chunk2 /* */
ldwm 4(0,s1),tmp7 /* load second word to enable us to shift */
vshd s1word,tmp7,s1word /* */
combf,=,n s2word,s1word,ckbyte2 /* */
extru s1word,7,8,tmp3 /* get first byte (DELAY SLOT) */
addibt,<=,n -4,count,zero /* have we checked N chars? */
uxor,nbz s2word,0,0 /* even though they're equal we could be done */
b,n zero /* zero ret0 and quit */
copy tmp7,s1word /* */
b more1 /* keep checking */
ldwm 4(0,s2),s2word /* get next s2 (DELAY SLOT) */
chunk2:
vshd s1word,r0,s1word /* */
b ckbyte2 /* */
extru s1word,7,8,tmp3 /* */
zero: copy r0,ret0
done:
EXIT(strncmp)
|
4ms/metamodule-plugin-sdk
| 2,480
|
plugin-libc/newlib/libc/machine/hppa/memset.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* SPECTRUM_ID: @(#)memset.s 37.4 86/08/25 */
/*
* memset(s, c, n)
*
* Sets first n chars in memory area s to value of character c.
* Returns s.
*/
#ifndef _NAMESPACE_CLEAN
#define NOSECDEF /* prevent _memset from being defined as entry */
#endif
#include "DEFS.h"
#define TO arg0
#define FILLCHAR arg1
#define COUNT arg2
#define TMP r31
ENTRY(memset)
comb,<= COUNT,r0,msexit /* return if count not positive */
copy TO,ret0 /* return value is start of copy */
comibf,<,n 5,COUNT,msbyteloop /* be straightforward */
dep FILLCHAR,23,8,FILLCHAR /* dup low byte */
dep FILLCHAR,15,16,FILLCHAR /* into high bytes */
add TO,COUNT,TMP /* TMP points just past fill area */
stbys,m FILLCHAR,0(TO) /* fill out first word */
/*
* If we're pointing to high-order byte, no fill will happen,
* but permissions will be checked. We don't want this (we
* might be pointing at the beginning of a protected region),
* so we branch around stbys if neither low bits are set.
*/
bb,<,n TMP,31,filend /* if low bit is set, stbys */
bb,>=,n TMP,30,endfil /* if next lowest bit isn't set */
/* (and lowest isn't, either) */
/* do not stbys */
filend:
stbys,m,e FILLCHAR,0(TMP) /* fill out the last */
endfil:
addi 4, TO, TO
sub TMP,TO,COUNT /* will now divide by 4 */
comb,=,n COUNT,r0,msexit /* If count is zero ret. */
extru,<> COUNT,31,4,r1
b msquadloop
depi 0,31,4,COUNT /* will now divide by 16 */
mswordloop:
addib,<> -4,r1,mswordloop
stws,ma FILLCHAR,4(TO)
comb,=,n COUNT,r0,msexit /* If count is zero ret. */
msquadloop:
stws,ma FILLCHAR,4(TO)
stws,ma FILLCHAR,4(TO)
stws,ma FILLCHAR,4(TO)
addib,<> -16,COUNT,msquadloop
stws,ma FILLCHAR,4(TO)
b,n msexit
msbyteloop:
addib,<> -1,COUNT,msbyteloop
stbs,ma FILLCHAR,1(TO)
msexit:
EXIT(memset)
|
4ms/metamodule-plugin-sdk
| 1,332
|
plugin-libc/newlib/libc/machine/hppa/memchr.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* SPECTRUM_ID: @(#)memchr.s 37.4 86/04/23 */
/*
* memchr(s, c, n)
*
* returns pointer to first occurrence of char c
* in first n characters of memory area s,
* or null if c does not occur.
*/
#include "DEFS.h"
#define FROM arg0
#define CHAR arg1
#define COUNT arg2
#define TEMP1 r19
ENTRY(memchr)
comb,<= COUNT,r0,memchrexit /* return if count is zero */
copy r0,ret0 /* null if c not found in n chars */
depi 0,23,24,CHAR /* make char unsigned */
ldbs,ma 1(FROM),TEMP1
memchrloop:
comb,=,n TEMP1,CHAR,memchrequal
addib,<> -1,COUNT,memchrloop
ldbs,ma 1(FROM),TEMP1
b,n memchrexit
memchrequal:
ldo -1(FROM),ret0
memchrexit:
EXIT(memchr)
|
4ms/metamodule-plugin-sdk
| 7,108
|
plugin-libc/newlib/libc/machine/hppa/memcmp.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* memcmp(s1, s2, n) */
/* returns integer: < 0 iff s1 lexicographically less than s2 */
/* > 0 iff s1 lexicographically greater than s2 */
/* = 0 iff s1 lexicographically equal to s2 */
/* = 0 iff s1 lexicographically equal to s2 */
/* quit after n charachters */
#ifndef _NAMESPACE_CLEAN
#define NOSECDEF /* prevents _memcmp from becoming primary entry */
#endif
#include "DEFS.h"
#define s1 26
#define s2 25
#define tmp1 19
#define s2word 20
#define tmp3 21
#define tmp7 22
#define s1word 29
#define save 1
#define tmp6 23
#define tmp5 28
#define count 24
ENTRY(memcmp)
combt,<,n r0,count,search /*N <= 0 yields equality */
b done /**/
copy 0,ret0 /*return 0 (DELAY SLOT) */
search: combf,=,n s1,s2,findout /*s1 != s2? */
b done
copy 0,ret0 /*return 0 (delay slot) */
findout:
comibf,=,n 0,s1,checks1 /*s1 == NULL? */
ldbs 0(0,s2),ret0 /**/
b done /*quit */
sub 0,ret0,ret0 /*ret0 <- -*s2 */
checks1:
comibf,=,n 0,s2,checkitout /*s2 == NULL? */
b done /* quit */
ldbs 0(0,s1),28 /* return *s1 */
checkitout:
extru s2,31,2,tmp1 /* Extract the low two bits of the s2. */
extru s1,31,2,tmp5 /* Extract the low two bits of the s1 */
sub,= tmp5,tmp1,tmp3 /* Are s1 & s2 aligned with each other? */
b not_aligned /* It's more complicated (not_aligned) */
dep 0,31,2,s1 /* Compute word address of s1 (DELAY SLOT) */
dep 0,31,2,s2 /* Compute word address of s2 */
ldwm 4(0,s1),s1word /* get next s1 word s1+=4 */
combt,= tmp5,r0,skipmask /* skip masking, if we can */
ldwm 4(0,s2),s2word /* get next s2 word s2+=4 (DELAY SLOT) */
add tmp5,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /* mask s1word (s1) */
or save,s2word,s2word /* mask s2word (s2) */
skipmask:
combt,=,n s1word,s2word,checkN /* We may be done */
checkbyte:
extru s1word,7,8,tmp3 /* get first byte (character) */
ckbyte2: extru s2word,7,8,tmp7 /* get first byte (character) */
combf,= tmp3,tmp7,done /* quit if first byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
addibt,<=,n -1,count,done /* have we checked N chars? ret0 == 0 */
extru s1word,15,8,tmp3 /* get second byte (character) */
extru s2word,15,8,tmp7 /* get second byte (character) */
combf,= tmp3,tmp7,done /* quit if second byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
addibt,<=,n -1,count,done /* have we checked N chars? */
extru s1word,23,8,tmp3 /* get third byte (character) */
extru s2word,23,8,tmp7 /* get third byte (character) */
combf,= tmp3,tmp7,done /* done if third byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
addibt,<=,n -1,count,done /* have we checked N chars? */
extru s1word,31,8,tmp3 /* get last byte (character) */
extru s2word,31,8,tmp7 /* get last byte (character) */
b done /* if we reach this point we know that */
sub tmp3,tmp7,ret0 /* the last character in the word is */
/* where the difference is, so return */
/* the difference and we're outta here */
checkN:
addibt,<=,n -4,count,zero /* have we checked N chars? */
ldwm 4(0,s2),s2word /* get next s2 word s2+=4 */
b skipmask /* keep checking */
ldwm 4(0,s1),s1word /* get next s1 word s1+=4 */
not_aligned:
dep r0,31,2,s2 /* Compute word address of s2 */
combt,<,n r0,tmp3,shifts1 /* Do we shift s1 or s2 */
sh3add tmp3,r0,tmp3 /* eight bits per byte so mul by 8 */
ldwm 4(0,s1),s1word /* get first word of s1 */
ldwm 4(0,s2),s2word /* get first word or s2 */
combt,=,n r0,tmp5,masks2 /* Do we need to mask beginning of s1 */
add tmp5,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /**/
masks2: sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word /**/
subi 4,tmp1,tmp1 /* tmp1 now has the number of byte that */
/* are valid in s2word before the vshd */
mtctl tmp3,11 /* Move shift amount to CR11 */
more: combt,<=,n count,tmp1,chunk1 /* Can we do the vshd? */
ldwm 4(0,s2),tmp7 /* load second word to enable us to shift */
vshd s2word,tmp7,s2word /**/
combf,=,n s1word,s2word,ckbyte2 /**/
extru s1word,7,8,tmp3 /* get first byte (DELAY SLOT) */
addibt,<=,n -4,count,zero /* have we checked N chars? */
copy tmp7,s2word /**/
b more /* keep checking */
ldwm 4(0,s1),s1word /* get next s1 (DELAY SLOT) */
chunk1:
vshd s2word,r0,s2word /* do an arithmetic shift left to position data */
b ckbyte2 /**/
extru s1word,7,8,tmp3 /**/
shifts1:
sh3add tmp3,r0,tmp3 /* eight bits per byte so mul by 8 */
sub r0,tmp3,tmp3 /* Get negative value for left shift */
dep r0,31,2,s2 /* Compute word address of s2 */
ldwm 4(0,s2),s2word /* get first word of s2 */
ldwm 4(0,s1),s1word /* get first word or s1 */
combt,=,n r0,tmp1,masks1 /*Do we need to mask beginning of s2 */
add tmp1,count,count /*bump count by the number of bytes */
/* we are going to mask */
sh3add tmp1,r0,save /*save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /*load save with proper mask */
or save,s2word,s2word /**/
masks1: sh3add tmp5,r0,save /*save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /*load save with proper mask */
or save,s1word,s1word /**/
subi 4,tmp5,tmp5 /*tmp5 now has the number of byte that */
/*are valid in s1word before the vshd */
mtctl tmp3,11 /*Move shift amount to CR11 */
more1: combt,<=,n count,tmp5,chunk2 /*Can we do the vshd? */
ldwm 4(0,s1),tmp7 /*load second word to enable us to shift */
vshd s1word,tmp7,s1word /**/
combf,=,n s2word,s1word,ckbyte2 /**/
extru s1word,7,8,tmp3 /*get first byte (DELAY SLOT) */
addibt,<=,n -4,count,zero /*have we checked N chars? */
copy tmp7,s1word /**/
b more1 /*keep checking */
ldwm 4(0,s2),s2word /*get next s2 (DELAY SLOT) */
chunk2:
vshd s1word,r0,s1word /**/
b ckbyte2 /**/
extru s1word,7,8,tmp3 /**/
zero: copy r0,ret0
done:
EXIT(memcmp)
|
4ms/metamodule-plugin-sdk
| 3,370
|
plugin-libc/newlib/libc/machine/hppa/strlen.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* HPUX_ID = "@(#) $Revision$" */
/* strlen(s): Return length of string s */
#define start arg0
#define end ret0
#define tmp1 arg1
#define tmp2 arg2
#include "DEFS.h"
ENTRY(strlen)
movb,=,n start,end,$null_ptr
depi 0,31,2,end
comb,<> start,end,$not_aligned
ldws,ma 4(end),tmp1
comib,tr 0,0,$loop /* avoid INDIGO two register interlock */
uxor,nbz 0,tmp1,0
$not_aligned:
/*
; Tricky code. The problem is that the value of of the word
; including the start of the string has some garbage bytes that
; may be 0. We don't want them to stop the string scan. So
; we make those bytes non-zero (and any old non-zero value
; will do). Notice that the end pointer has been rounded
; down to a word boundary, and then incremented to the next
; word by the time we get here. Therefore, (start-end) has
; one of the values (-3, -2, or -1). Use uaddcm to do the
; subtraction (instead of sub), and the result will be
; (-4, -3, or -2). Multiply this by 8, and put into the
; shift register (which truncates to the last 5 bits) and
; the value will be (0, 8, or 16). Use this as a bit position,
; and drop a mask down into tmp1. All the garbage bytes will
; have at least 1 bit affected by the vdepi, so all the garbage
; in this first word will be non-zero garbage.
*/
uaddcm start,end,tmp2 /* tmp2 <- { -4, -3, -2 } */
sh3add tmp2,0,tmp2 /* tmp2 <- { -32, -24, -16 } */
mtsar tmp2 /* sar <- { 0, 8, 16 } */
vdepi -1,32,tmp1
uxor,nbz 0,tmp1,0
$loop:
b,n $end_loop
ldws,ma 4(end),tmp1
comib,tr 0,0,$loop /* avoid INDIGO two register interlock */
uxor,nbz 0,tmp1,0
$end_loop:
/* adjust the end pointer to one past the end of the string */
extru,<> tmp1,7,8,0
addib,tr,n -3,end,$out
extru,<> tmp1,15,8,0
addib,tr,n -2,end,$out
extru,<> tmp1,23,8,0
addi -1,end,end
$out:
bv 0(rp)
/*
; tricky code. the end pointer is just beyond the terminating
; null byte, so the length is (end-start-1). use uaddcm
; to do this in 1 instruction
*/
uaddcm end,start,ret0
$null_ptr:
EXIT(strlen)
|
4ms/metamodule-plugin-sdk
| 5,257
|
plugin-libc/newlib/libc/machine/hppa/strcat.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* HPUX_ID: @(#) $Revision$ */
/*
* strcat(s1, s2)
*
* Concatenate s2 on the end of s1. S1's space must be large enough.
* Return s1.
*/
#include "DEFS.h"
#define d_addr r26
#define s_addr r25
#define tmp6 r24
#define tmp1 r19
#define tmp2 r20
#define tmp3 r21
#define tmp4 r22
#define tmp5 arg3
#define save r1
ENTRY(strcat)
comb,= r0,s_addr,done /* quit if s2=NULL */
copy d_addr,ret0 /* The return value is the value of d_addr. DELAY SLOT*/
/* First look for end of s1 (d_addr) */
extru d_addr,31,2,tmp1 /* Extract the low two bits of the dest address. */
combt,= tmp1,r0,dont_mask
dep 0,31,2,d_addr /*set word alignment */
ldwm 4(d_addr),tmp2
sh3add tmp1,r0,save /* build mask based on tmp1 */
mtctl save,11
zvdepi -2,32,save
or save,tmp2,tmp2
uxor,nbz tmp2,r0,save
search:
b,n found_end /* nullified under uxor conditions above and below */
dont_mask:
ldwm 4(d_addr),tmp2
comib,tr r0,r0,search
uxor,nbz tmp2,r0,save
found_end: /* at this point d_addr points to word */
extru,<> save,7,8,r0 /* following word with null */
addib,tr,n -4,d_addr,begin_copy /*set d_addr to end of s1 */
extru,<> save,15,8,r0
addib,tr,n -3,d_addr,begin_copy
extru,<> save,23,8,r0
addi -1,d_addr,d_addr
addi -1,d_addr,d_addr
begin_copy:
extru s_addr,31,2,tmp1 /* Extract the low two bits of the source address. */
extru d_addr,31,2,tmp6 /* Extract the low two bits of the destination address. */
sub,= tmp6,tmp1,tmp3 /* Compute the shift quantity and don't branch if tmp6=tmp1. */
b not_aligned /* Not_aligned says that shifts Will be needed. */
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT. */
/* aligned */
combt,= tmp6,r0,skip_mask
ldwm 4(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 4 (DELAY SLOT) */
sh3add tmp6,r0,save
mtctl save,r11
zvdepi -2,32,save
or save,tmp1,tmp1
uxor,nbz tmp1,r0,save
b,n first_null /* special case: null in first word */
b,n skip_mask2
chunks:
b,n null_found /* delay slot for uxor below */
skip_mask2:
stbys,b,m tmp1,4(d_addr)
ldwm 4(s_addr),tmp1
skip_mask:
comib,tr 0,0,chunks
uxor,nbz tmp1,r0,save
/* Begin non_aligned code. */
not_aligned:
sh3add,>= tmp3,r0,tmp4 /* compute the shift amt.and skip load if tmp6 > tmp1. */
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++ */
ldwm 4(0,s_addr),tmp2 /* get either first or second word from source. */
combt,= tmp6,r0,chunk2 /* don't mask if whole word is valid */
mtctl tmp4,11 /* load the shift count into cr11 = shift count register. */
vshd tmp1,tmp2,tmp3 /* position data ! (delay slot) */
sh3add tmp6,r0,save /* setup r1 */
mtctl save,r11 /* set-up cr11 for mask */
zvdepi -2,32,save
or save, tmp3, tmp3
uxor,nbz tmp3,r0,save
b,n first_null2
b did_mask
mtctl tmp4,11 /* re-load the shift count into cr11 */
chunk2:
vshd tmp1,tmp2,tmp3
uxor,nbz tmp3, r0, save
b,n null_found
did_mask:
stbys,b,m tmp3,4(0,d_addr) /* store ! */
ldwm 4(0,s_addr),tmp1 /* get next word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
uxor,nbz tmp3, r0, save
b,n null_found
stwm tmp3,4(d_addr)
comib,tr 0,0,chunk2
ldwm 4(s_addr),tmp2
null_found: /* adjust d_addr and store final word */
extru,<> save,7,8,r0
addib,tr,n 1,d_addr,store_final
extru,<> save,15,8,r0
addib,tr,n 2,d_addr,store_final
extru,<> save,23,8,r0
addib,tr 3,d_addr,store_final2
bv 0(r2)
stw save,0(d_addr)
store_final:
bv 0(r2)
store_final2:
stbys,e save,0(d_addr) /* delay slot */
first_null: /* null found in first word of aligned (wrt d_addr) */
addi -4,s_addr,s_addr
ldbx tmp6(s_addr),tmp4
add tmp6,s_addr,s_addr
comib,= 0,tmp4,done
stbs,ma tmp4,1(d_addr)
ldbs 1(s_addr),tmp4
comib,= 0,tmp4,done
stbs,ma tmp4,1(d_addr)
bv 0(r2) /* done */
stbs 0,0(d_addr)
first_null2: /* null found in first word of non-aligned (wrt d_addr) */
addibt,= -1,tmp6,check3 /* check last 3 bytes of word */
extru save,15,8,tmp4
addibt,=,n -1,tmp6,check2 /* check last 2 bytes */
bv 0(r2)
stbys,b save, 0(d_addr)
check3:
combt,= tmp4,r0,done
stbs,ma tmp4,1(d_addr)
check2:
extru,<> save,23,8,tmp4
bv 0(r2)
stbs,ma tmp4,1(d_addr)
bv 0(r2)
stbs r0,0(d_addr)
done:
EXIT(strcat)
|
4ms/metamodule-plugin-sdk
| 7,312
|
plugin-libc/newlib/libc/machine/hppa/pcc_prefix.s
|
;
; (c) Copyright 1986 HEWLETT-PACKARD COMPANY
;
; To anyone who acknowledges that this file is provided "AS IS"
; without any express or implied warranty:
; permission to use, copy, modify, and distribute this file
; for any purpose is hereby granted without fee, provided that
; the above copyright notice and this notice appears in all
; copies, and that the name of Hewlett-Packard Company not be
; used in advertising or publicity pertaining to distribution
; of the software without specific, written prior permission.
; Hewlett-Packard Company makes no representations about the
; suitability of this software for any purpose.
;
; Standard Hardware Register Definitions for Use with Assembler
; version A.08.06
; - fr16-31 added at Utah
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Hardware General Registers
r0: .equ 0
r1: .equ 1
r2: .equ 2
r3: .equ 3
r4: .equ 4
r5: .equ 5
r6: .equ 6
r7: .equ 7
r8: .equ 8
r9: .equ 9
r10: .equ 10
r11: .equ 11
r12: .equ 12
r13: .equ 13
r14: .equ 14
r15: .equ 15
r16: .equ 16
r17: .equ 17
r18: .equ 18
r19: .equ 19
r20: .equ 20
r21: .equ 21
r22: .equ 22
r23: .equ 23
r24: .equ 24
r25: .equ 25
r26: .equ 26
r27: .equ 27
r28: .equ 28
r29: .equ 29
r30: .equ 30
r31: .equ 31
; Hardware Space Registers
sr0: .equ 0
sr1: .equ 1
sr2: .equ 2
sr3: .equ 3
sr4: .equ 4
sr5: .equ 5
sr6: .equ 6
sr7: .equ 7
; Hardware Floating Point Registers
fr0: .equ 0
fr1: .equ 1
fr2: .equ 2
fr3: .equ 3
fr4: .equ 4
fr5: .equ 5
fr6: .equ 6
fr7: .equ 7
fr8: .equ 8
fr9: .equ 9
fr10: .equ 10
fr11: .equ 11
fr12: .equ 12
fr13: .equ 13
fr14: .equ 14
fr15: .equ 15
fr16: .equ 16
fr17: .equ 17
fr18: .equ 18
fr19: .equ 19
fr20: .equ 20
fr21: .equ 21
fr22: .equ 22
fr23: .equ 23
fr24: .equ 24
fr25: .equ 25
fr26: .equ 26
fr27: .equ 27
fr28: .equ 28
fr29: .equ 29
fr30: .equ 30
fr31: .equ 31
; Hardware Control Registers
cr0: .equ 0
rctr: .equ 0 ; Recovery Counter Register
cr8: .equ 8 ; Protection ID 1
pidr1: .equ 8
cr9: .equ 9 ; Protection ID 2
pidr2: .equ 9
cr10: .equ 10
ccr: .equ 10 ; Coprocessor Confiquration Register
cr11: .equ 11
sar: .equ 11 ; Shift Amount Register
cr12: .equ 12
pidr3: .equ 12 ; Protection ID 3
cr13: .equ 13
pidr4: .equ 13 ; Protection ID 4
cr14: .equ 14
iva: .equ 14 ; Interrupt Vector Address
cr15: .equ 15
eiem: .equ 15 ; External Interrupt Enable Mask
cr16: .equ 16
itmr: .equ 16 ; Interval Timer
cr17: .equ 17
pcsq: .equ 17 ; Program Counter Space queue
cr18: .equ 18
pcoq: .equ 18 ; Program Counter Offset queue
cr19: .equ 19
iir: .equ 19 ; Interruption Instruction Register
cr20: .equ 20
isr: .equ 20 ; Interruption Space Register
cr21: .equ 21
ior: .equ 21 ; Interruption Offset Register
cr22: .equ 22
ipsw: .equ 22 ; Interrpution Processor Status Word
cr23: .equ 23
eirr: .equ 23 ; External Interrupt Request
cr24: .equ 24
ppda: .equ 24 ; Physcial Page Directory Address
tr0: .equ 24 ; Temporary register 0
cr25: .equ 25
hta: .equ 25 ; Hash Table Address
tr1: .equ 25 ; Temporary register 1
cr26: .equ 26
tr2: .equ 26 ; Temporary register 2
cr27: .equ 27
tr3: .equ 27 ; Temporary register 3
cr28: .equ 28
tr4: .equ 28 ; Temporary register 4
cr29: .equ 29
tr5: .equ 29 ; Temporary register 5
cr30: .equ 30
tr6: .equ 30 ; Temporary register 6
cr31: .equ 31
tr7: .equ 31 ; Temporary register 7
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Procedure Call Convention ~
; Register Definitions for Use with Assembler ~
; version A.08.06
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Software Architecture General Registers
rp: .equ r2 ; return pointer
mrp: .equ r31 ; millicode return pointer
ret0: .equ r28 ; return value
ret1: .equ r29 ; return value (high part of double)
sl: .equ r29 ; static link
sp: .equ r30 ; stack pointer
dp: .equ r27 ; data pointer
arg0: .equ r26 ; argument
arg1: .equ r25 ; argument or high part of double argument
arg2: .equ r24 ; argument
arg3: .equ r23 ; argument or high part of double argument
;_____________________________________________________________________________
; Software Architecture Space Registers
; sr0 ; return link form BLE
sret: .equ sr1 ; return value
sarg: .equ sr1 ; argument
; sr4 ; PC SPACE tracker
; sr5 ; process private data
;_____________________________________________________________________________
; Software Architecture Pseudo Registers
previous_sp: .equ 64 ; old stack pointer (locates previous frame)
#if 0
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Standard space and subspace definitions. version A.08.06
; These are generally suitable for programs on HP_UX and HPE.
; Statements commented out are used when building such things as operating
; system kernels.
;;;;;;;;;;;;;;;;
.SPACE $TEXT$, SPNUM=0,SORT=8
; .subspa $FIRST$, QUAD=0,ALIGN=2048,ACCESS=0x2c,SORT=4,FIRST
; .subspa $REAL$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=4,FIRST,LOCK
.subspa $MILLICODE$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=8
.subspa $LIT$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=16
.subspa $CODE$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=24
; .subspa $UNWIND$, QUAD=0,ALIGN=4,ACCESS=0x2c,SORT=64
; .subspa $RECOVER$, QUAD=0,ALIGN=4,ACCESS=0x2c,SORT=80
; .subspa $RESERVED$, QUAD=0,ALIGN=8,ACCESS=0x73,SORT=82
; .subspa $GATE$, QUAD=0,ALIGN=8,ACCESS=0x4c,SORT=84,CODE_ONLY
; Additional code subspaces should have ALIGN=8 for an interspace BV
; and should have SORT=24.
;
; For an incomplete executable (program bound to shared libraries),
; sort keys $GLOBAL$ -1 and $GLOBAL$ -2 are reserved for the $DLT$
; and $PLT$ subspaces respectively.
;;;;;;;;;;;;;;;
.SPACE $PRIVATE$, SPNUM=1,PRIVATE,SORT=16
.subspa $GLOBAL$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=40
.import $global$
.subspa $SHORTDATA$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=24
.subspa $DATA$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=16
.subspa $PFA_COUNTER$, QUAD=1,ALIGN=4,ACCESS=0x1f,SORT=8
.subspa $SHORTBSS$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=80,ZERO
.subspa $BSS$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82,ZERO
; .subspa $PCB$, QUAD=1,ALIGN=8,ACCESS=0x10,SORT=82
; .subspa $STACK$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82
; .subspa $HEAP$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82
;;;;;;;;;;;;;;;;
; .SPACE $PFA$, SPNUM=0,PRIVATE,UNLOADABLE,SORT=64
; .subspa $PFA_ADDRESS$, ALIGN=4,ACCESS=0x2c,UNLOADABLE
;;;;;;;;;;;;;;;;
; .SPACE $DEBUG$, SPNUM=2,PRIVATE,UNLOADABLE,SORT=80
; .subspa $HEADER$, ALIGN=4,ACCESS=0,UNLOADABLE,FIRST
; .subspa $GNTT$, ALIGN=4,ACCESS=0,UNLOADABLE
; .subspa $LNTT$, ALIGN=4,ACCESS=0,UNLOADABLE
; .subspa $SLT$, ALIGN=4,ACCESS=0,UNLOADABLE
; .subspa $VT$, ALIGN=4,ACCESS=0,UNLOADABLE
; To satisfy the copyright terms each .o will have a reference
; the the actual copyright. This will force the actual copyright
; message to be brought in from libgloss/hp-milli.s
.space $PRIVATE$
.subspa $DATA$
#else
.data
#endif
.import ___hp_free_copyright,data
L$copyright .word ___hp_free_copyright
|
4ms/metamodule-plugin-sdk
| 9,050
|
plugin-libc/newlib/libc/machine/hppa/strcmp.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/*
strcmp
Jerry Huck
Edgar Circenis
*/
/*
* strcmp(s1, s2)
*
* returns integer: < 0 iff s1 lexicographically less than s2
* > 0 iff s1 lexicographically greater than s2
* = 0 iff s1 lexicographically equal to s2
*/
#include "DEFS.h"
#define s1 26
#define s2 25
#define tmp1 19
#define s2word 20
#define tmp3 21
#define tmp7 22
#define s1word 23
#define save 1
#define tmp6 24
#define tmp5 28
ENTRY(strcmp)
comb,=,n s1,s2,samestring
comib,=,n 0,s1,s1isnull
comib,=,n 0,s2,s2isnull
/* Hope for word alignment. Pick up low two bits of each adress */
extru,<> s1,31,2,tmp1
ldwm 4(s1),s1word
dep,= s2,29,2,tmp1
b,n case_analysis
/* Start looping until null is found in s1 or they mis-compare */
loop:
ldwm 4(s2),s2word
loop_plus:
uxor,nbz s1word,r0,r0 /* Null in this? */
b,n nullins1
comb,=,n s1word,s2word,loop
ldwm 4(s1),s1word
/* The words do not compare equal and s1 does not have a null.
Need to treat words as unsigned and generate either a positive
or negative return value */
wordcomparereturn:
comclr,>> s1word,s2word,ret0 /*Set ret0 to 0 and skip if greater*/
ldi -2,ret0 /*Set ret0 to -2 when less */
bv r0(rp)
addi 1,ret0,ret0 /*Fix return value to be -1 or +1 */
/* s1 has a null. s2 has not been checked. */
nullins1:
/*If s2 has no nulls this is simple, but assume that it might
and fix up s1 to allow the word comparision to work by
scanning s1 and duplicating all the bytes in s2 below that byte into
the remainder of s1. A remainder only exists if the zero byte
is found in the upper three bytes */
extru,<> s1word,7,8,r0 /*in the first byte? */
dep,tr s2word,31,24,s1word /*copy low 3 bytes of *s2 into *s1 */
extru,<> s1word,15,8,r0 /*in the second byte? */
dep,tr s2word,31,16,s1word /*copy low 2 bytes of *s2 into *s1 */
extru,<> s1word,23,8,r0 /*in the third byte? */
dep s2word,31,8,s1word /*copy low 1 byte of *s2 into *s1 */
/* Do the normal unsigned compare and return */
comclr,<> s1word,s2word,ret0 /*Set ret0 to 0 and skip if not equal */
bv,n r0(rp)
comclr,>> s1word,s2word,ret0 /*Set ret0 to 0 and skip if greater*/
ldi -2,ret0 /*Set ret0 to -2 when less */
bv r0(rp)
addi 1,ret0,ret0 /*Fix return value to be -1 or +1 */
/* s1 and s2 are the same string and therefore equal */
samestring:
bv r0(rp)
copy r0,ret0
/* s1 is null. Treat as string of nulls. Therefore return
the negative of s2's first byte. s2 cannot be zero. */
s1isnull:
ldbs 0(0,s2),ret0
bv r0(rp)
sub 0,ret0,ret0
/* s2 is null. Treat as string of nulls. Therefore return
s1's first byte. s1 cannot be zero. */
s2isnull:
bv r0(rp)
ldbs 0(0,s1),ret0
case_analysis:
blr tmp1,r0
nop
/*
Case statement for non-aligned cases (we've already
checked the aligned case.
NOTE: for non-aligned cases, the absolute shift value
gets loaded into tmp3.
*/
/* S2 S1 */
nop /* 00 00 can't happen */
nop
b shifts2 /* 00 01 */
ldi 8,tmp3 /* load shift count (delay slot) */
b shifts2 /* 00 10 */
ldi 16,tmp3 /* load shift count (delay slot) */
b shifts2 /* 00 11 */
ldi 24,tmp3 /* load shift count (delay slot) */
b shifts1_0 /* 01 00 */
ldi 8,tmp3 /* load shift count (delay slot) */
b eq_align1 /* 01 01 */
ldbs,ma 1(s1),s1word
b shifts2 /* 01 10 */
ldi 8,tmp3 /* load shift count (delay slot) */
b shifts2 /* 01 11 */
ldi 16,tmp3 /* load shift count (delay slot) */
b shifts1_0 /* 10 00 */
ldi 16,tmp3 /* load shift count (delay slot) */
b shifts1 /* 10 01 */
ldi 8,tmp3 /* load shift count (delay slot) */
b eq_align2 /* 10 10 */
ldhs,ma 2(s1),s1word
b shifts2 /* 10 11 */
ldi 8,tmp3 /* load shift count (delay slot) */
b shifts1_0 /* 11 00 */
ldi 24,tmp3 /* load shift count (delay slot) */
b shifts1 /* 11 01 */
ldi 16,tmp3 /* load shift count (delay slot) */
b shifts1 /* 11 10 */
ldi 8,tmp3 /* load shift count (delay slot) */
ldbs,ma 1(s1),s1word /* 11 11 */
ldbs,ma 1(s2),s2word
sub,= s1word,s2word,ret0 /* if not equal, we can return now */
bv,n r0(rp)
comclr,<> s1word,r0,ret0
bv,n r0(rp)
b loop /* fall into main loop */
ldwm 4(s1),s1word
eq_align1:
ldbs,ma 1(s2),s2word
sub,= s1word,s2word,ret0 /* if not equal, we can return now */
bv,n r0(rp)
comclr,<> s1word,r0,ret0
bv,n r0(rp)
/* fall through to half-word aligned case */
ldhs,ma 2(s1),s1word /* load next halfword */
eq_align2:
ldhs,ma 2(s2),s2word /* load next halfword */
/* form the mask: 0xffff0000 and mask leading nulls in s1word and s2word
so that we can fall into the main loop with word aligned data */
ldi 16,save
mtctl save,r11
zvdepi -2,32,save
or save,s1word,s1word
b loop_plus /* fall into main loop */
or save,s2word,s2word
/* s2's alignment is greater than s1's alignment, so we will shift s1 */
shifts1_0:
addi -4,s1,s1 /* fix up s1 due to earlier read */
shifts1:
extru s1,31,2,tmp1
extru s2,31,2,tmp5
dep r0,31,2,s1 /* Compute word address of s1 */
dep r0,31,2,s2 /* Compute word address of s2 */
ldwm 4(s1),s1word /* get first word of s1 */
ldwm 4(s2),s2word /* get first word of s2 */
combt,=,n r0,tmp1,masks2 /* Do we need to mask beginning of s1 */
sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,r11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word
masks2:
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,r11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word
ldi -1,tmp7 /* load tmp7 with 0xffffffff */
mtctl tmp3,r11 /* Move shift amount to CR11 */
more: uxor,nbz s1word,r0,r0 /* Is there a null in s1? */
b ends1
vshd tmp7,s1word,save
combf,=,n save,s2word,cmps1
ldwm 4(s1),tmp7
ldwm 4(s2),s2word
uxor,nbz tmp7,r0,r0 /* is there a null in s1? */
b ends1_0
vshd s1word,tmp7,save
combf,=,n save,s2word,cmps1
ldwm 4(s1),s1word
b more
ldwm 4(s2),s2word
cmps1: movb,tr save,s1word,wordcomparereturn
nop
ends1_0:
copy tmp7,s1word /* move tmp7 to s1word */
ends1:
combf,=,n save,s2word,nullins1 /* branch if no match */
copy save,s1word /* delay slot */
/* At this point, we know that we've read a null */
/* from s1, so we can't read more from s1 */
uxor,nbz save,r0,r0 /* are the strings equal? */
b,n samestring
vshd s1word,r0,s1word
b nullins1
ldwm 4(s2),s2word
/* s1's alignment is greater than s2's alignment, so we will shift s2 */
shifts2:
extru s1,31,2,tmp1
extru s2,31,2,tmp5
dep r0,31,2,s1 /* Compute word address of s1 */
dep r0,31,2,s2 /* Compute word address of s2 */
ldwm 4(s2),s2word /* get first word of s2 */
ldwm 4(s1),s1word /* get first word of s1 */
combt,=,n r0,tmp5,masks1 /* Do we need to mask beginning of s2 */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,r11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word
masks1:
sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,r11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word
ldi -1,tmp7 /* load tmp7 with 0xffffffff */
mtctl tmp3,r11 /* Move shift amount to CR11 */
more1: uxor,nbz s2word,r0,r0 /* is there a null in s2? */
b ends2
vshd tmp7,s2word,save
combf,=,n s1word,save,cmps2
ldwm 4(s2),tmp7
ldwm 4(s1),s1word
uxor,nbz tmp7,r0,r0 /* is there a null in s2? */
b ends2_0
vshd s2word,tmp7,save
combf,=,n s1word,save,cmps2
ldwm 4(s2),s2word
b more1
ldwm 4(s1),s1word
cmps2: movb,tr save,s2word,wordcomparereturn
nop
ends2_0:
copy tmp7,s2word /* move tmp7 to s2word */
ends2:
combf,=,n s1word,save,nullins1 /* branch if no match */
copy save,s2word /* delay slot */
/* At this point, we know that we've read a null */
/* from s2, so we can't read more from s2 */
uxor,nbz save,r0,r0 /* are the strings equal? */
b,n samestring
vshd s2word,r0,s2word
b nullins1
ldwm 4(s1),s1word
EXIT(strcmp)
|
4ms/metamodule-plugin-sdk
| 8,847
|
plugin-libc/newlib/libc/machine/hppa/strncpy.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* HPUX_ID: @(#) $Revision$ */
/*
* strncpy(s1, s2, n)
*
* Copy s2 to s1, truncating or null-padding to always copy n bytes
* return s1
*/
#include "DEFS.h"
#define d_addr r26
#define s_addr r25
#define count r24
#define tmp1 r19
#define tmp2 r20
#define tmp3 r21
#define tmp4 r22
#define tmp5 arg3
#define save r1
ENTRY(strncpy)
combt,= s_addr,r0,pad_null_bytes1 /* if s2==NULL then pad nulls and exit */
copy d_addr,ret0 /* The return value is defined to be the value of d_addr. DELAY SLOT*/
addibt,<,n -4,count,byteloop /* If count is <= 4 don't get fancy.*/
extru s_addr,31,2,tmp1 /* Extract the low two bits of the source address.*/
extru d_addr,31,2,tmp5 /* Extract the low two bits of the destination address.*/
add count,tmp5,count /* pre increment the count by the byte address so that the count is*/
comb,<> tmp5,tmp1,not_aligned /* branch if tmp5<>tmp1. */
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT.*/
/* aligned*/
combt,= tmp5,r0,skip_mask
ldwm 4(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 4 (DELAY SLOT)*/
sh3add tmp5,r0,save /* compute mask in save*/
mtctl save,11
zvdepi -2,32,save
b skip_mask /* don't reload tmp1*/
or save,tmp1,tmp1 /* or mask with data*/
chunks:
ldwm 4(0,s_addr),tmp1 /* get a word*/
skip_mask:
uxor,nbz tmp1,r0,save /* check for null*/
b,n null1
addibf,< -4,count,chunks
stbys,b,m tmp1,4(0,d_addr) /* store word (delay slot)*/
/* back_porch last word to store*/
addibt,=,n 4,count,done /* if count = 0 we're, of course, done !*/
ldws 0(s_addr),tmp1 /* load up the back_porch*/
add d_addr,count,d_addr/* final store address is +1 too high !*/
sh3add count,r0, save /* setup right mask based on count*/
mtctl save,r11
zvdepi -2,32,save /*save now has left-hand mask*/
uaddcm r0,save,save /*form right hand mask */
or tmp1,save,tmp1 /*and insert data*/
uxor,nbz tmp1,r0,save /* check for null*/
b,n null2
bv 0(r2)
stbys,e tmp1,0(d_addr) /* done */
/* Begin non_aligned code. */
not_aligned:
sub,>= tmp5,tmp1,tmp3 /* compute the shift amt.and skip load if tmp5 > tmp1.*/
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++*/
zdep tmp3,28,29,tmp4 /* compute the number of bits to shift */
mtctl tmp4,11 /* load the shift count into cr11 = shift count register.*/
addibt,<,n -4,count,chkchnk2 /* first step in pre adjustment of count for looping.*/
ldwm 4(0,s_addr),tmp2 /* get either first or second word from source. */
combt,= tmp5,r0,skip_mask2 /* don't mask if whole word is valid*/
vshd tmp1,tmp2,tmp3 /* position data ! (delay slot)*/
sh3add tmp5,r0,save /* setup r1*/
mtctl save,r11 /* setup mask in save*/
zvdepi -2,32,save
or save, tmp3, tmp3
mtctl tmp4,11 /* re-load the shift count into cr11 */
b skip_mask2
copy r0, tmp5 /* zero out tmp5 so we don't try to mask again*/
chunk2:
ldwm 4(0,s_addr),tmp2
vshd tmp1,tmp2,tmp3
skip_mask2:
uxor,nbz tmp3, r0, save
b,n null3
stbys,b,m tmp3,4(0,d_addr) /* store ! */
ldwm 4(0,s_addr),tmp1 /* get 2nd word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
uxor,nbz tmp3, r0, save
b,n null4
addibf,< -8,count,chunk2 /* If count is still >= 8 do another loop.*/
stbys,b,m tmp3,4(0,d_addr) /* store !*/
chkchnk2:
addibt,<,n 4,count,bp_0 /* if we don't have 4 bytes left then do the back porch (bp_0)*/
subchnk2: /* we have less than 8 chars to copy*/
ldwm 4(0,s_addr),tmp2 /* get next word !*/
combt,= tmp5,r0,skip_mask3
vshd tmp1,tmp2,tmp3 /* position data !*/
sh3add tmp5,r0,save /* setup r1*/
mtctl save,r11 /* setup mask in save*/
zvdepi -2,32,save
or save, tmp3, tmp3
mtctl tmp4,11 /* restore shift value again */
copy r0, tmp5 /* zero out tmp5 so we don't try to mask again*/
skip_mask3:
uxor,nbz tmp3,r0,save
b,n null4
b bp_1 /* we now have less than 4 bytes to move*/
stbys,b,m tmp3,4(0,d_addr) /* store !*/
bp_0:
copy tmp1,tmp2 /* switch registers used in the shift process.*/
addibt,<=,n 4,count,done /* if count = -4 this implies that count = 0 -> done */
bp_1:
ldwm 4(0,s_addr),tmp1 /* get final word ! */
vshd tmp2,tmp1,tmp3 /* position data !*/
uxor,sbz tmp3,r0,save /* if some-byte-zero */
b no_null /* don't goto no_null-find which null instead */
add d_addr,count,d_addr /* get d_addr ready for stbys,e */
extru,<> save,7,8,r0
b found_null5
copy r0, tmp5
extru,<> save,15,8,r0
b found_null5
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
extru,<> save,23,8,r0
b found_null5
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
ldo -1(r0),tmp5 /* setup mask (FFFFFFFF)*/
found_null5:
and tmp3,tmp5,tmp3 /* zero out tmp5 based on mask in tmp5*/
no_null:
bv 0(r2) /* were done*/
stbys,e tmp3,0(0,d_addr) /* store the data !*/
/* here we do ye old byte-at-a-time moves.*/
byteloop:
addibt,=,n 4,count,done
comb,= 0,s_addr,done
stbs r0,0(d_addr) /* store null in case s_addr == NULL */
ldbs,ma 1(s_addr),tmp1
encore:
combt,=,n tmp1,r0, pad_null_bytes1
stbs,ma tmp1,1(d_addr)
addibf,=,n -1,count,encore
ldbs,ma 1(s_addr),tmp1
b,n done
pnb_1:
addibt,=,n 4,count,done /* if count was already 0 then we're done*/
pad_null_bytes1:
combt,=,n count,r0,done /* if count==0 then exit */
pad_null_bytes2:
addibf,= -1,count,pad_null_bytes2
stbs,ma r0,1(d_addr)
b,n done
pad_nulls:
addibf,<=,n -4,count,pad_nulls
stwm r0,4(d_addr)
b,n pnb_1
null1:
extru,<> save,7,8,r0
b found_null1
copy r0, tmp5
extru,<> save,15,8,r0
b found_null1
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
extru,<> save,23,8,r0
b found_null1
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
ldo -1(r0),tmp5 /* setup mask (FFFFFFFF)*/
found_null1:
and tmp1,tmp5,tmp1 /*zero out tmp1 according to mask*/
b pad_nulls /* nullify remaining count bytes*/
stbys,b,m tmp1,4(0,d_addr) /* first word (account for alignment)*/
null2: /* back porch case. We have less than 4 bytes to go.*/
extru,<> save,7,8,r0 /* is null in 1st byte? */
b found_null2
copy r0, tmp5
extru,<> save,15,8,r0 /* is null in 2nd byte? */
b found_null2
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
b found_null2 /* null must be in 3rd byte */
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
found_null2:
and tmp1,tmp5,tmp1 /*zero out tmp1 according to mask*/
bv 0(r2) /* we're done*/
stbys,e tmp1,0(0,d_addr) /* last word (back porch)*/
null3: /* not_aligned case where null is found in first of two words--adjust count*/
extru,<> save,7,8,r0
b found_null3
copy r0, tmp5
extru,<> save,15,8,r0
b found_null3
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
extru,<> save,23,8,r0
b found_null3
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
ldo -1(r0),tmp5 /* setup mask (FFFFFFFF)*/
found_null3:
addi 4,count,count /* fix count since null is in first of two words*/
and tmp3,tmp5,tmp3 /*zero out tmp3 according to mask*/
b pad_nulls /* nullify remaining count bytes*/
stbys,b,m tmp3,4(d_addr)
null4: /* not_aligned case where null is found in second of two words*/
extru,<> save,7,8,r0
b found_null4
copy r0, tmp5
extru,<> save,15,8,r0
b found_null4
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
extru,<> save,23,8,r0
b found_null4
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
ldo -1(r0),tmp5 /* setup mask (FFFFFFFF)*/
found_null4:
and tmp3,tmp5,tmp3 /*zero out tmp4 according to mask*/
b pad_nulls /* nullify remaining count bytes*/
stbys,b,m tmp3,4(d_addr)
done:
EXIT(strncpy)
|
4ms/metamodule-plugin-sdk
| 3,913
|
plugin-libc/newlib/libc/machine/m32c/setjmp.S
|
/*
Copyright (c) 2005 Red Hat Incorporated.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of Red Hat Incorporated may not be used to endorse
or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(__r8c_cpu__) || defined(__m16c_cpu__)
#define A16 1
#endif
/* We implement setjmp/longjmp much like the way gcc implements
exceptions - we create new stack frames, then switch to them and
return. Thus, the two setjmp's below each push all the relevent
registers, then copy the whole frame into the buffer (first $sp is
moved, then smovf copies the frame itself), and the two longjmps
restore $sp, copy the frame back into place, and issue the same
return as the setjmp would have used.
Since the sizes of registers differs between the 16 and 24 bit
models, we provide separate implementations for each rather than
trying to parameterize them.
Jump buffer sizes: 21 bytes for 16 bit, 34 bytes for 24 bit.
*/
.text
#ifdef A16 /* 16 bit versions */
.global _setjmp
_setjmp:
enter #0
pushm r1,r2,r3,a0,a1,sb,fb
; At this point, the stack looks like this:
; ... [pc:3] [oldfb:2] <fb> [r1:2] [r2:2] [r3:2] [a0:2] [a1:2] [sb:2] [fb:2] <sp> */
mov.w r1,a1 ; a1 is the destination of smovf
mov.b #0,r1h
stc sp,a0 ; r1h:a0 is the source of smovf
mov.w a0,[a1]
add.w #2,a1
mov.w #19,r3 ; plus two for sp later
smovf.b
; Return 0 to caller.
mov.w #0,r0
popm r1,r2,r3,a0,a1,sb,fb
exitd
.global _longjmp
_longjmp:
enter #0
mov.w r1,a0 ; pointer to jump buf
mov.w r2,r0 ; setjmp's "new" return value
mov.b #0,r1h ; r1h: a0 is the source, now jmpbuf
mov.w [a0],a1 ; dest is new stack
ldc a1,sp
add.w #2,a0
mov.w #19,r3
smovf.b
;; now return to our caller with this newly restored frame
popm r1,r2,r3,a0,a1,sb,fb
exitd
#else /* 24 bit versions */
.global _setjmp
_setjmp:
enter #0
pushm r1,r2,r3,a0,a1,sb,fb
; At this point, the stack looks like this:
; ... [jbuf:4] [pc:4] [oldfb:4] <fb> [r1:2] [r2:2] [r3:2] [a0:4] [a1:4] [sb:4] [fb:4] <sp> */
mov.l 8[fb],a1 ; a1 is the destination of smovf
stc sp,a0 ; r1h:a0 is the source of smovf
mov.l a0,[a1]
add.l #4,a1
mov.w #30,r3 ; plus two for sp later
smovf.b
; Return 0 to caller.
mov.w #0,r0
popm r1,r2,r3,a0,a1,sb,fb
exitd
.global _longjmp
_longjmp:
enter #0
; ... [rv:2] [jbuf:4] [pc:4] [oldfb:4] <fb>
mov.l 8[fb],a0 ; pointer to jump buf
mov.w 12[fb],r0 ; setjmp's "new" return value
mov.l [a0],a1 ; dest is new stack
ldc a1,sp
add.l #4,a0
mov.w #30,r3
smovf.b
;; now return to our caller with this newly restored frame
popm r1,r2,r3,a0,a1,sb,fb
exitd
#endif
|
4ms/metamodule-plugin-sdk
| 2,426
|
plugin-libc/newlib/libc/machine/z8k/memcpy.S
|
/*
* memcpy routine for Z8000
* Copyright (C) 2004 Christian Groessler <chris@groessler.org>
*
* Permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/* void *memcpy(void *dest, const void *src, size_t length);
*/
name "memcpy.S"
.text
even
global _memcpy
global memmove_entry
_memcpy:
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
ldl rr4,rr14(#8)
ldl rr2,rr14(#12)
#else
pushl @rr14,rr6
#endif
/* rr2 - length (high word ignored)
* rr4 - src
* rr6 - dest
*/
testl rr2
jr z,finish
memmove_entry: /* external entry point from memmove */
bitb rl7,#0 /* odd destination address? */
jr nz,testsrc
bitb rl5,#0 /* odd source address? */
jr nz,odd_copy
jr t,even_copy /* dest even, src odd */
testsrc:
bitb rl5,#0
jr z,odd_copy /* src even, dest odd */
ldib @rr6,@rr4,r3
jr ov,finish /* jump if r3 is zero now */
/* copy words */
even_copy:
ld r2,r3 /* remember length */
srl r3,#1
jr z,no_words
ldir @rr6,@rr4,r3
no_words:
bitb rl2,#0 /* odd length? */
jr z,finish
ldib @rr6,@rr4,r2 /* yes, copy last byte */
jr finish
/* copy bytes */
odd_copy:
ldirb @rr6,@rr4,r3
finish:
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
#else
popl rr2,@rr14
#endif
#else /* above Z8001, below Z8002 */
unsegm
#ifdef __STD_CALL__
ld r7,r15(#2)
ld r6,r15(#4)
ld r5,r15(#6)
#else
ld r2,r7 /* buffer pointer return value */
#endif
/* r5 - length
* r6 - src
* r7 - dest
*/
test r5
jr z,finish
memmove_entry: /* external entry point from memmove */
bitb rl7,#0 /* odd destination address? */
jr nz,testsrc
bitb rl6,#0 /* odd source address? */
jr nz,odd_copy
jr t,even_copy /* dest even, src odd */
testsrc:
bitb rl6,#0
jr z,odd_copy /* src even, dest odd */
ldib @r7,@r6,r5
jr ov,finish /* jump if r5 is zero now */
/* copy words */
even_copy:
ld r4,r5 /* remember length */
srl r5,#1
jr z,no_words
ldir @r7,@r6,r5
no_words:
bitb rl4,#0 /* odd length? */
jr z,finish
ldib @r7,@r6,r4 /* yes, copy last byte */
jr finish
/* copy bytes */
odd_copy:
ldirb @r7,@r6,r5
finish:
#ifdef __STD_CALL__
ld r7,r15(#2)
#endif
#endif /* Z8002 */
ret
.end
|
4ms/metamodule-plugin-sdk
| 1,905
|
plugin-libc/newlib/libc/machine/z8k/setjmp.S
|
.global _setjmp
.global _longjmp
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
_setjmp:
ldl rr6,rr14(#4) ! get argument
ldl rr2,@rr14 ! fetch pc
ldl @rr6,rr2 ! save it
ldl rr6(#16),rr8
ldl rr6(#4),rr10
ldl rr6(#8),rr12 ! remember frame pointer
ldl rr6(#12),rr14 ! remember stack pointer
ldk r7,#0
ret t
_longjmp:
ldl rr4,rr14(#4) ! get first argument
ld r7,rr14(#8) ! get return value
ldl rr8,rr4(#16)
ldl rr10,rr4(#4)
ldl rr12,rr4(#8) ! restore old frame pointer
ldl rr14,rr4(#12) ! restore old stack pointer
ldl rr4,@rr4 ! return address
inc r15,#4
jp @rr4
#else /* above __STD_CALL_, below not */
_setjmp:
ldl rr2,@rr14 ! fetch pc
ldl @rr6,rr2 ! save it
ldl rr6(16),rr8
ldl rr6(4),rr10
ldl rr6(8),rr12 ! and the other special regs
ldl rr6(12),rr14
ldk r2,#0
ret t
_longjmp:
ld r2,r5 ! get return value
ldl rr4,rr6(0)
ldl rr8,rr6(16)
ldl rr10,rr6(4)
ldl rr12,rr6(8)
ldl rr14,rr6(12)
inc r15,#4
jp @rr4
#endif /* not __STD_CALL__ */
#else /* above Z8001, below Z8002 */
unseg
#ifdef __STD_CALL__
_setjmp:
ld r7,r15(#2) ! get argument
ld r2,@r15 ! fetch pc
ld @r7,r2 ! save it
ldl r7(#14),rr8
ldl r7(#2),rr10
ldl r7(#6),rr12 ! remember frame pointer
ldl r7(#10),rr14 ! remember stack pointer
ldk r7,#0
ret t
_longjmp:
ld r4,r15(#2) ! get first argument (jmp_buf)
ld r7,r15(#4) ! get return value
ldl rr8,r4(#14)
ldl rr10,r4(#2)
ldl rr12,r4(#6) ! restore old frame pointer
ldl rr14,r4(#10) ! restore old stack pointer
ld r4,@r4 ! return address
inc r15,#2
jp @r4
#else /* above __STD_CALL_, below not */
_setjmp:
ld r2,@r15 ! fetch pc
ld @r7,r2 ! save it
ldl r7(4),rr10
ldl r7(8),rr12 ! and the other special regs
ldl r7(12),rr14
ldk r2,#0
ret t
_longjmp:
ld r2,r6 ! get return value
ld r4,@r7
ldl rr10,r7(4)
ldl rr12,r7(8)
ldl rr14,r7(12)
inc r15,#2
jp @r4
#endif /* not __STD_CALL__ */
#endif /* Z8002 version */
|
4ms/metamodule-plugin-sdk
| 3,306
|
plugin-libc/newlib/libc/machine/z8k/memmove.S
|
/*
* memmove routine for Z8000
* Copyright (C) 2004 Christian Groessler <chris@groessler.org>
*
* Permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/* void *memmove(void *dest, const void *src, size_t length);
*/
name "memmove.S"
.text
even
global _memmove
_memmove:
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
ldl rr4,rr14(#8)
ldl rr2,rr14(#12)
#else
pushl @rr14,rr6
#endif
/* rr2 - length (high word ignored)
* rr4 - src
* rr6 - dest
*/
testl rr2
jr z,finish
/* check for destructive overlap (src < dest && dest < src + length) */
cpl rr6,rr4
jp ule,memmove_entry /* non-destructive, let memcpy do the work */
ldl rr0,rr2
addl rr0,rr4 /* rr0 = src + length */
cpl rr0,rr6
jp ult,memmove_entry /* non-destructive, let memcpy do the work */
/* set-up pointers to copy backwards, add (length - 1) */
addl rr4,rr2 /* src + length */
addl rr6,rr2 /* dest + length */
subl rr4,#1
subl rr6,#1
/* check alignment */
bitb rl7,#0 /* odd destination address? */
jr z,testsrc
bitb rl5,#0 /* odd source address? */
jr z,odd_copy
jr even_copy
testsrc:
bitb rl5,#0
jr nz,odd_copy /* src even, dest odd */
lddb @rr6,@rr4,r3
jr ov,finish /* jump if r5 is zero now */
/* copy words */
even_copy:
ld r2,r3 /* remember length */
srl r3,#1
/* jr z,no_words it cannot be zero here */
dec r5,#1
dec r7,#1
lddr @rr6,@rr4,r3
no_words:
bitb rl2,#0 /* odd length? */
jr z,finish
inc r5,#1
inc r7,#1
lddb @rr6,@rr4,r2 /* yes, copy last byte */
jr finish
/* copy bytes */
odd_copy:
lddrb @rr6,@rr4,r3
finish:
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
#else
popl rr2,@rr14
#endif
#else /* above Z8001, below Z8002 */
unsegm
#ifdef __STD_CALL__
ld r7,r15(#2)
ld r6,r15(#4)
ld r5,r15(#6)
#else
ld r2,r7 /* buffer pointer return value */
#endif
/* r5 - length
* r6 - src
* r7 - dest
*/
test r5
jr z,finish
/* check for destructive overlap (src < dest && dest < src + length) */
cp r7,r6
jp ule,memmove_entry /* non-destructive, let memcpy do the work */
ld r0,r5
add r0,r6 /* r0 = src + length */
cp r0,r7
jp ult,memmove_entry /* non-destructive, let memcpy do the work */
/* set-up pointers to copy backwards, add (length - 1) */
add r6,r5 /* src + length */
add r7,r5 /* dest + length */
dec r6,#1
dec r7,#1
/* check alignment */
bitb rl7,#0 /* odd destination address? */
jr z,testsrc
bitb rl6,#0 /* odd source address? */
jr z,odd_copy
jr even_copy
testsrc:
bitb rl6,#0
jr nz,odd_copy /* src even, dest odd */
lddb @r7,@r6,r5
jr ov,finish /* jump if r5 is zero now */
/* copy words */
even_copy:
ld r4,r5 /* remember length */
srl r5,#1
/* jr z,no_words it cannot be zero here */
dec r6,#1
dec r7,#1
lddr @r7,@r6,r5
no_words:
bitb rl4,#0 /* odd length? */
jr z,finish
inc r6,#1
inc r7,#1
lddb @r7,@r6,r4 /* yes, copy last byte */
jr finish
/* copy bytes */
odd_copy:
lddrb @r7,@r6,r5
finish:
#ifdef __STD_CALL__
ld r7,r15(#2)
#endif
#endif /* Z8002 */
ret
.end
|
4ms/metamodule-plugin-sdk
| 1,908
|
plugin-libc/newlib/libc/machine/z8k/memset.S
|
/*
* memset routine for Z8000
* Copyright (C) 2004 Christian Groessler <chris@groessler.org>
*
* Permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/* void *memset(void *buffer, int value, size_t length);
*/
name "memset.S"
.text
even
global _memset
_memset:
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
ld r5,rr14(#8)
ldl rr2,rr14(#10)
#else
pushl @rr14,rr6
#endif
/* rr2 - length
* rl5 - value
* rr6 - buffer
*/
testl rr2
jr z,finish
ldb rh5,rl5
ld r1,r5 /* r1 contains value */
bit r7,#0
jr z,not_odd
ldb @rr6,rl1
inc r7,#1
subl rr2,#1
jr z,finish
not_odd:ld r0,r3 /* remember length */
srl r3,#1
jr z,no_words
ldl rr4,rr6
ld @rr6,r1
inc r7,#2
dec r3,#1
jr z,no_words
ldir @rr6,@rr4,r3 /* fill words */
no_words:
bit r0,#0 /* one byte remaining? */
jr z,finish
ldb @rr6,rl1
finish:
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
#else
popl rr2,@rr14
#endif
#else /* above Z8001, below Z8002 */
unsegm
#ifdef __STD_CALL__
ld r7,r15(#2)
ld r6,r15(#4)
ld r5,r15(#6)
#else
ld r2,r7 /* buffer pointer return value */
#endif
/* r5 - length
* r6 - value
* r7 - buffer
*/
test r5
jr z,finish
ldb rh6,rl6
ld r1,r6 /* r1 contains value */
bit r7,#0
jr z,not_odd
ldb @r7,rl1
inc r7,#1
dec r5,#1
jr z,finish
not_odd:ld r0,r5 /* remember length */
srl r5,#1
jr z,no_words
ld r4,r7
ld @r7,r1
inc r7,#2
dec r5,#1
jr z,no_words
ldir @r7,@r4,r5 /* fill words */
no_words:
bit r0,#0 /* one byte remaining? */
jr z,finish
ldb @r7,rl1
finish:
#ifdef __STD_CALL__
ld r7,r15(#2)
#endif
#endif /* Z8002 */
ret
.end
|
4ms/metamodule-plugin-sdk
| 2,911
|
plugin-libc/newlib/libc/machine/z8k/memcmp.S
|
/*
* memcmp routine for Z8000
* Copyright (C) 2004 Christian Groessler <chris@groessler.org>
*
* Permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/* int memcmp(const void *b1, const void *b2, size_t length);
*/
name "memcmp.S"
.text
even
global _memcmp
_memcmp:
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
ldl rr4,rr14(#8)
ldl rr2,rr14(#12)
#endif
/* rr2 - length (high word ignored)
* rr4 - b2
* rr6 - b1
*/
clr r1 /* initialize return value */
testl rr2
jr z,finish
bitb rl7,#0 /* odd b1? */
jr nz,testb2
bitb rl5,#0 /* odd b2? */
jr nz,odd_cmp /* b1 even, b2 odd */
jr t,even_cmp
testb2:
bitb rl5,#0
jr z,odd_cmp /* b2 even, b1 odd */
cpsib @rr6,@rr4,r3,eq
jr z,beq /* bytes are the same */
jr t,byte_diff
beq: jr ov,finish /* jump if r3 is zero now */
/* compare words */
even_cmp:
ld r2,r3 /* remember length */
srl r3,#1
jr z,no_words
cpsir @rr6,@rr4,r3,ne
jr nz,no_words
dec r7,#2
dec r5,#2 /* point to different bytes */
ldk r3,#2
jr t,odd_cmp
no_words:
bitb rl2,#0 /* odd length? */
jr z,finish
cpsib @rr6,@rr4,r3,eq
jr z,finish /* last bytes are the same */
jr t,byte_diff
/* compare bytes */
odd_cmp:
cpsirb @rr6,@rr4,r3,ne
jr nz,finish
byte_diff:
dec r7,#1
dec r5,#1 /* point to different bytes */
ldb rl1,@rr6
clr r0
ldb rl0,@rr4
sub r1,r0
finish: /* set return value */
#ifdef __STD_CALL__
ld r7,r1
#else
ld r2,r1
#endif
#else /* above Z8001, below Z8002 */
unsegm
#ifdef __STD_CALL__
ld r7,r15(#2)
ld r6,r15(#4)
ld r5,r15(#6)
#endif
/* r5 - length
* r6 - b2
* r7 - b1
*/
clr r1 /* initialize return value */
test r5
jr z,finish
bitb rl7,#0 /* odd destination address? */
jr nz,testb2
bitb rl6,#0 /* odd source address? */
jr nz,odd_cmp /* b1 even, b2 odd */
jr t,even_cmp
testb2:
bitb rl6,#0
jr z,odd_cmp /* b2 even, b1 odd */
cpsib @r7,@r6,r5,eq
jr z,beq /* bytes are the same */
jr t,byte_diff
beq: jr ov,finish /* jump if r3 is zero now */
/* compare words */
even_cmp:
ld r4,r5 /* remember length */
srl r5,#1
jr z,no_words
cpsir @r7,@r6,r5,ne
jr nz,no_words
dec r7,#2
dec r6,#2 /* point to different bytes */
ldk r5,#2
jr t,odd_cmp
no_words:
bitb rl4,#0 /* odd length? */
jr z,finish
cpsib @r7,@r6,r4,eq
jr z,finish /* last bytes are the same */
jr t,byte_diff
/* compare bytes */
odd_cmp:
cpsirb @r7,@r6,r5,ne
jr nz,finish
byte_diff:
dec r7,#1
dec r6,#1 /* point to different bytes */
ldb rl1,@r7
clr r0
ldb rl0,@r6
sub r1,r0
finish:
#ifdef __STD_CALL__
ld r7,r1
#else
ld r2,r1
#endif
#endif /* Z8002 */
ret
.end
|
4ms/metamodule-plugin-sdk
| 6,260
|
plugin-libc/newlib/libc/machine/spu/spu_timer_flih.S
|
/*
(C) Copyright IBM Corp. 2008
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of IBM nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* First-level interrupt handler. */
/* The following two convenience macros assist in the coding of the
saving and restoring the volatile register starting from register
2 up to register 79.
saveregs first, last Saves registers from first to the last.
restoreregs first, last Restores registers from last down to first.
Note: first must be less than or equal to last. */
.macro saveregs first, last
stqd $\first, -(STACK_SKIP+\first)*16($SP)
.if \last-\first
saveregs "(\first+1)",\last
.endif
.endm
.macro restoreregs first, last
lqd $\last, (82-\last)*16($SP)
.if \last-\first
restoreregs \first,"(\last-1)"
.endif
.endm
.section .interrupt,"ax"
.align 3
.type spu_flih, @function
spu_flih:
/* Adjust the stack pointer to skip the maximum register save area
(STACK_SKIP quadword registers) in case an interrupt occurred while
executing a leaf function that used the stack area without actually
allocating its own stack frame. */
.set STACK_SKIP, 125
/* Save the current link register on a new stack frame for the
normal spu_flih() version of this file. */
stqd $0, -(STACK_SKIP+80)*16($SP)
stqd $SP, -(STACK_SKIP+82)*16($SP) /* Save back chain pointer. */
saveregs 2, 39
il $2, -(STACK_SKIP+82)*16 /* Stack frame size. */
rdch $3, $SPU_RdEventStat /* Read event status. */
rdch $6, $SPU_RdEventMask /* Read event mask. */
hbrp /* Open a slot for instruction prefetch. */
saveregs 40,59
clz $4, $3 /* Get first slih index. */
stqd $6, -(STACK_SKIP+1)*16($SP) /* Save event mask on stack. */
saveregs 60, 67
/* Do not disable/ack the decrementer event here.
The timer library manages this and expects it
to be enabled upon entry to the SLIH. */
il $7, 0x20
andc $5, $3, $7
andc $7, $6, $5 /* Clear event bits. */
saveregs 68, 69
wrch $SPU_WrEventAck, $3 /* Ack events(s) - include decrementer event. */
wrch $SPU_WrEventMask, $7 /* Disable event(s) - exclude decrementer event. */
saveregs 70, 79
a $SP, $SP, $2 /* Instantiate flih stack frame. */
next_event:
/* Fetch and dispatch the event handler for the first non-zero event. The
dispatch handler is indexed into the __spu_slih_handlers array using the
count of zero off the event status as an index. */
ila $5, __spu_slih_handlers /* Slih array offset. */
shli $4, $4, 2 /* Slih entry offset. */
lqx $5, $4, $5 /* Load slih address. */
rotqby $5, $5, $4 /* Rotate to word 0. */
bisl $0, $5 /* Branch to slih. */
clz $4, $3 /* Get next slih index. */
brnz $3, next_event
lqd $2, 81*16($SP) /* Read event mask from stack. */
restoreregs 40, 79
wrch $SPU_WrEventMask, $2 /* Restore event mask. */
hbrp /* Open a slot for instruction pre-fetch. */
restoreregs 2, 39
/* Restore the link register from the new stack frame for the
normal spu_flih() version of this file. */
lqd $0, 2*16($SP)
lqd $SP, 0*16($SP) /* restore stack pointer from back chain ptr. */
irete /* Return from interrupt and re-enable interrupts. */
.size spu_flih, .-spu_flih
/* spu_slih_handlers[]
Here we initialize 33 default event handlers. The first entry in this array
corresponds to the event handler for the event associated with bit 0 of
Channel 0 (External Event Status). The 32nd entry in this array corresponds
to bit 31 of Channel 0 (DMA Tag Status Update Event). The 33rd entry in
this array is a special case entry to handle "phantom events" which occur
when the channel count for Channel 0 is 1, causing an asynchronous SPU
interrupt, but the value returned for a read of Channel 0 is 0. The index
calculated into this array by spu_flih() for this case is 32, hence the
33rd entry. */
.data
.align 4
.extern __spu_default_slih
.global __spu_slih_handlers
.type __spu_slih_handlers, @object
__spu_slih_handlers:
.rept 33
.long __spu_default_slih
.endr
.size __spu_slih_handlers, .-__spu_slih_handlers
|
4ms/metamodule-plugin-sdk
| 1,585
|
plugin-libc/newlib/libc/machine/spu/sniprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define snprintf sniprintf
#include "snprintf.S"
|
4ms/metamodule-plugin-sdk
| 2,250
|
plugin-libc/newlib/libc/machine/spu/fprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL fprintf
.type fprintf, @function
fprintf:
stqd $0, 16($sp) /* save caller address */
il $2, 2 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
brsl $0, __check_init
lqd $3, 16*2($sp) /* $3 <- saved FP on the stack frame */
lqd $2, 0($3) /* FP = fp->_fp */
rotqby $2, $2, $3
stqd $2, 16*2($sp) /* replace FP on the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VFPRINTF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size fprintf, .-fprintf
|
4ms/metamodule-plugin-sdk
| 5,817
|
plugin-libc/newlib/libc/machine/spu/stack_reg_va.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file contains code use to construct a PIC, spu side, syscall
* function with variable parameters in accordance with the CBE ABI.
*
* This function is equivalent to constructing a va_list structure and
* calling the va_list form of the function. Therefore, for example,
* a printf function stack frame will look like this:
*
* | Stack | high memory
* | Parms |
* | |
* |------------|
* | Link Reg |
* |------------|
* | Back Chain |<-----. <---- input SP
* |------------| |
* | Reg 74 | |
* |------------| |
* | Reg 73 | |
* |------------| |
* // ... // |
* |------------| |
* | Reg 5 | |
* |------------| |
* | Reg 4 |<--. |
* |------------| | |
* va_list.| call_stack |------'
* |------------| |
* va_list.| next_arg |---'
* |------------|
* | format (r3)| <---- start of parameters
* |------------| |------------|
* | stack | | |
* | code | |(Back Chain)| <---- output SP
* | 1-3 qwords | <---- code_ptr `------------'
* `------------'
* low memory
*
* This was written in assembly so that it is smaller than what would
* be produced by using va_start.
*/
#include "c99ppe.h"
#define parms $2 /* Number of fixed arguments */
#define offset $67
#define flag $68
#define regdec $69
#define link $70
#define code_ptr $71
#define ptr $72
#define inst $73
#define tmp $74
.text
.global __stack_reg_va
.type __stack_reg_va, @function
__stack_reg_va:
/* Save registers 69-74 explicitly so that we have some
* working registers.
*/
stqd $74, 16*(-1)($sp)
stqd $73, 16*(-2)($sp)
stqd $72, 16*(-3)($sp)
stqd $71, 16*(-4)($sp)
stqd $70, 16*(-5)($sp)
stqd $69, 16*(-6)($sp)
/* Construct self-modifying stack code that saves the remaining
* volatile registers onto the stack.
*/
il regdec, -1 /* for decrement register value in save instruction */
shlqbyi regdec, regdec, 12
il tmp, -(SPE_STACK_REGS+2+3)*16
a code_ptr, $sp, tmp
lqr tmp, save_regs_1 /* store stack code */
stqd tmp, 0(code_ptr)
lqr inst, save_regs_2
ai ptr, $sp, 16*(-6)
sync
bisl link, code_ptr /* branch to the constructed stack code */
/* Adjust pointer so that it points to the first variable
* argument on the stack.
*/
ai offset, parms, -1 /* offset = parms - 1 */
mpyi offset, offset, 16 /* offset = offset * 16 */
a ptr, ptr, offset /* ptr = ptr + offset */
/* Store the va_list to the parameter list.
*/
stqd $sp, 16*(-1)(ptr)
stqd ptr, 16*(-2)(ptr)
/* Make $3 store address.
*/
ai offset, parms, 2 /* offset = parms + 2 */
mpyi offset, offset, -16 /* offset = offset * -16 */
a ptr, ptr, offset /* ptr = ptr + offset */
/* Save all the fixed (non-variable arguments on the stack)
*/
ceqi flag, parms, 0x01 /* if(parms==1) flag=0xFFFFFFFF */
brnz flag, reg_3 /* if(flag!=0) jump */
ceqi flag, parms, 0x02 /* if(parms==2) flag=0xFFFFFFFF */
brnz flag, reg_4 /* if(flag!=0) jump */
stqd $5, 16*2(ptr)
reg_4:
stqd $4, 16*1(ptr)
reg_3:
stqd $3, 0(ptr)
il $3, -16*(SPE_STACK_REGS+2+2)
stqx $sp, $3, $sp /* save back chain */
a $sp, $sp, $3
bi $0 /* return to caller */
/***************************** stack code *********************************************/
/* The following code is copied into the stack for re-entract,
* self-modified, code execution. This code copies the volatile
* registers into a va_list parameter array.
*/
.balignl 16, 0
save_regs_1:
stqd inst, 16(code_ptr) /* store instruction */
sync
a inst, inst, regdec /* decrement register number in the instruction */
ceqbi tmp, inst, 3 /* if (reg-num == 3) tmp = 0x000000FF 000..0 */
save_regs_2:
stqd $68, -16(ptr)
ai ptr, ptr, -16
brz tmp, save_regs_1 /* if (tmp == 0) jump */
bi link /* finish to make va_list */
.size __stack_reg_va, .-__stack_reg_va
|
4ms/metamodule-plugin-sdk
| 4,238
|
plugin-libc/newlib/libc/machine/spu/setjmp.S
|
/*
(C) Copyright IBM Corp. 2005, 2006
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of IBM nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Author: Andreas Neukoetter (ti95neuk@de.ibm.com)
*/
/*
int setjmp( jmp_buf env );
*/
.text
.align 2
.global setjmp
.type setjmp, @function
setjmp:
stqd $80, 2*16($3)
stqd $81, 3*16($3)
stqd $82, 4*16($3)
stqd $83, 5*16($3)
stqd $84, 6*16($3)
stqd $85, 7*16($3)
stqd $86, 8*16($3)
stqd $87, 9*16($3)
stqd $88, 10*16($3)
stqd $89, 11*16($3)
stqd $90, 12*16($3)
stqd $91, 13*16($3)
stqd $92, 14*16($3)
stqd $93, 15*16($3)
stqd $94, 16*16($3)
stqd $95, 17*16($3)
stqd $96, 18*16($3)
stqd $97, 19*16($3)
stqd $98, 20*16($3)
stqd $99, 21*16($3)
stqd $100, 22*16($3)
stqd $101, 23*16($3)
stqd $102, 24*16($3)
stqd $103, 25*16($3)
stqd $104, 26*16($3)
stqd $105, 27*16($3)
stqd $106, 28*16($3)
stqd $107, 29*16($3)
stqd $108, 30*16($3)
stqd $109, 31*16($3)
stqd $110, 32*16($3)
stqd $111, 33*16($3)
stqd $112, 34*16($3)
stqd $113, 35*16($3)
stqd $114, 36*16($3)
stqd $115, 37*16($3)
stqd $116, 38*16($3)
stqd $117, 39*16($3)
stqd $118, 40*16($3)
stqd $119, 41*16($3)
hbr setjmp_ret, $0
lnop # pipe1 bubble added for instruction fetch
stqd $120, 42*16($3)
stqd $121, 43*16($3)
stqd $122, 44*16($3)
stqd $123, 45*16($3)
stqd $124, 46*16($3)
stqd $125, 47*16($3)
stqd $126, 48*16($3)
stqd $127, 49*16($3)
stqd $0, 0*16($3)
stqd $1, 1*16($3)
il $3, 0
setjmp_ret:
bi $0
.size setjmp, .-setjmp
/*
int longjmp( jmp_buf env, int val );
*/
.text
.align 2
.global longjmp
.type longjmp, @function
longjmp:
lr $127, $1
lqd $0, 0*16($3)
lqd $1, 1*16($3)
sf $126, $127, $1
rotqbyi $126, $126, 12
fsmbi $127, 0x0F00
and $126, $126, $127
a $1, $1, $126
# restore all the non-volatile registers
lqd $80, 2*16($3)
lqd $81, 3*16($3)
lqd $82, 4*16($3)
lqd $83, 5*16($3)
lqd $84, 6*16($3)
lqd $85, 7*16($3)
lqd $86, 8*16($3)
lqd $87, 9*16($3)
lqd $88, 10*16($3)
lqd $89, 11*16($3)
lqd $90, 12*16($3)
lqd $91, 13*16($3)
lqd $92, 14*16($3)
lqd $93, 15*16($3)
lqd $94, 16*16($3)
lqd $95, 17*16($3)
lqd $96, 18*16($3)
lqd $97, 19*16($3)
lqd $98, 20*16($3)
lqd $99, 21*16($3)
lqd $100, 22*16($3)
lqd $101, 23*16($3)
lqd $102, 24*16($3)
lqd $103, 25*16($3)
lqd $104, 26*16($3)
lqd $105, 27*16($3)
lqd $106, 28*16($3)
lqd $107, 29*16($3)
lqd $108, 30*16($3)
lqd $109, 31*16($3)
hbr longjmp_ret, $0
lqd $110, 32*16($3)
lqd $111, 33*16($3)
lqd $112, 34*16($3)
lqd $113, 35*16($3)
lqd $114, 36*16($3)
lqd $115, 37*16($3)
lqd $116, 38*16($3)
lqd $117, 39*16($3)
lqd $118, 40*16($3)
lqd $119, 41*16($3)
lqd $120, 42*16($3)
lqd $121, 43*16($3)
lqd $122, 44*16($3)
lqd $123, 45*16($3)
lqd $124, 46*16($3)
lqd $125, 47*16($3)
ceqi $5, $4, 0
lqd $126, 48*16($3)
lqd $127, 49*16($3)
sf $3, $5, $4
longjmp_ret:
bi $0
.size longjmp, .-longjmp
|
4ms/metamodule-plugin-sdk
| 1,576
|
plugin-libc/newlib/libc/machine/spu/iscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define scanf iscanf
#include "scanf.S"
|
4ms/metamodule-plugin-sdk
| 2,244
|
plugin-libc/newlib/libc/machine/spu/fscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL fscanf
.type fscanf, @function
fscanf:
stqd $0, 16($sp) /* save caller address */
il $2, 2 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
brsl $0, __check_init
lqd $3, 16*2($sp) /* $3 <- saved FP on the stack frame */
lqd $2, 0($3) /* FP = fp->_fp */
rotqby $2, $2, $3
stqd $2, 16*2($sp) /* replace FP on the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VFSCANF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size fscanf, .-fscanf
|
4ms/metamodule-plugin-sdk
| 1,582
|
plugin-libc/newlib/libc/machine/spu/siprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define sprintf siprintf
#include "sprintf.S"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.