repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
stsp/newlib-ia16
| 2,142
|
newlib/libc/machine/m68hc11/setjmp.S
|
/* setjmp/longjmp routines for M68HC11 & M68HC12.
* Copyright (C) 1999, 2000, 2001, 2002 Stephane Carrez (stcarrez@nerim.fr)
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#if __INT__ == 32
# define val 4
# define INT32(X) X
#else
# define val 2
# define INT32(X)
#endif
#ifdef mc6811
# define REG(X) *X
#else
# define REG(X) X
#endif
.sect .text
.global setjmp
.global longjmp
#ifdef mc6811
setjmp:
xgdx
tsy
ldd 0,y
std 0,x
sty 2,x
ldd REG(_.frame)
std 4,x
ldd REG(_.d1)
std 6,x
ldd REG(_.d2)
std 8,x
ldd REG(_.d3)
std 10,x
ldd REG(_.d4)
std 12,x
ldd REG(_.d5)
std 14,x
ldd REG(_.d6)
std 16,x
ldd REG(_.d7)
std 18,x
ldd REG(_.d8)
std 20,x
INT32( ldx #0)
clra
clrb
rts
#else
setjmp:
xgdx
movw 0,sp,2,x+
sts 2,x+
movw _.frame,2,x+
movw _.d1,2,x+
movw _.d2,2,x+
movw _.d3,2,x+
movw _.d4,2,x+
movw _.d5,2,x+
movw _.d6,2,x+
movw _.d7,2,x+
movw _.d8,2,x+
INT32( ldx #0)
clra
clrb
rts
#endif
#ifdef mc6811
longjmp:
xgdx
tsy
ldd val,y
bne do_jump
ldd #1
do_jump:
xgdy
ldd 4,x
std REG(_.frame)
ldd 6,x
std REG(_.d1)
ldd 8,x
std REG(_.d2)
ldd 10,x
std REG(_.d3)
ldd 12,x
std REG(_.d4)
ldd 14,x
std REG(_.d5)
ldd 16,x
std REG(_.d6)
ldd 18,x
std REG(_.d7)
ldd 20,x
std REG(_.d8)
ldd 0,x
ldx 2,x
txs
std 0,x
INT32( ldx #0)
xgdy
rts
#else
longjmp:
xgdx
ldy val,sp
bne do_jump
ldy #1
do_jump:
ldd 4,x+
movw 2,x+,_.frame
movw 0,x,_.d1
movw 2,x,_.d2
movw 4,x,_.d3
movw 6,x,_.d4
movw 8,x,_.d5
movw 10,x,_.d6
movw 12,x,_.d7
movw 14,x,_.d8
ldx -4,x
txs
std 0,x
INT32( ldx #0)
xgdy
rts
#endif
|
stsp/newlib-ia16
| 1,899
|
newlib/libc/machine/arm/aeabi_memset-soft.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "arm_asm.h"
.macro ASM_ALIAS new old
.global \new
.type \new, %function
#if defined (__thumb__)
.thumb_set \new, \old
#else
.set \new, \old
#endif
.endm
/* NOTE: This ifdef MUST match the one in aeabi_memset.c. */
#if !defined (__SOFTFP__)
# if defined (__thumb2__)
# include "aeabi_memset-thumb2.S"
# elif defined (__thumb__)
# include "aeabi_memset-thumb.S"
# else
# include "aeabi_memset-arm.S"
# endif
#endif
|
stsp/newlib-ia16
| 2,743
|
newlib/libc/machine/arm/aeabi_memset-thumb.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.thumb
.syntax unified
.global __aeabi_memset
.type __aeabi_memset, %function
ASM_ALIAS __aeabi_memset4 __aeabi_memset
ASM_ALIAS __aeabi_memset8 __aeabi_memset
__aeabi_memset:
push {r4, r5, r6, lr}
lsls r3, r0, #30
beq 10f
subs r4, r1, #1
cmp r1, #0
beq 9f
lsls r5, r2, #24
lsrs r5, r5, #24
movs r3, r0
movs r6, #3
b 2f
1:
subs r1, r4, #1
cmp r4, #0
beq 9f
movs r4, r1
2:
adds r3, r3, #1
subs r1, r3, #1
strb r5, [r1]
tst r3, r6
bne 1b
3:
cmp r4, #3
bls 7f
movs r5, #255
ands r5, r2
lsls r1, r5, #8
orrs r5, r1
lsls r1, r5, #16
orrs r5, r1
cmp r4, #15
bls 5f
movs r6, r4
subs r6, r6, #16
lsrs r6, r6, #4
adds r6, r6, #1
lsls r6, r6, #4
movs r1, r3
adds r3, r3, r6
4:
str r5, [r1]
str r5, [r1, #4]
str r5, [r1, #8]
str r5, [r1, #12]
adds r1, r1, #16
cmp r3, r1
bne 4b
movs r1, #15
ands r4, r1
cmp r4, #3
bls 7f
5:
subs r6, r4, #4
lsrs r6, r6, #2
adds r6, r6, #1
lsls r6, r6, #2
movs r1, r3
adds r3, r3, r6
6:
stmia r1!, {r5}
cmp r3, r1
bne 6b
movs r1, #3
ands r4, r1
7:
cmp r4, #0
beq 9f
lsls r2, r2, #24
lsrs r2, r2, #24
adds r4, r3, r4
8:
strb r2, [r3]
adds r3, r3, #1
cmp r4, r3
bne 8b
9:
pop {r4, r5, r6}
pop {r1}
bx r1
10:
movs r3, r0
movs r4, r1
b 3b
.size __aeabi_memset, . - __aeabi_memset
|
stsp/newlib-ia16
| 11,992
|
newlib/libc/machine/arm/strcmp-armv7.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Implementation of strcmp for ARMv7 when DSP instructions are
available. Use ldrd to support wider loads, provided the data
is sufficiently aligned. Use saturating arithmetic to optimize
the compares. */
/* Build Options:
STRCMP_NO_PRECHECK: Don't run a quick pre-check of the first
byte in the string. If comparing completely random strings
the pre-check will save time, since there is a very high
probability of a mismatch in the first character: we save
significant overhead if this is the common case. However,
if strings are likely to be identical (eg because we're
verifying a hit in a hash table), then this check is largely
redundant. */
/* This version uses Thumb-2 code. */
.thumb
.syntax unified
/* Parameters and result. */
#define src1 r0
#define src2 r1
#define result r0 /* Overlaps src1. */
/* Internal variables. */
#define tmp1 r4
#define tmp2 r5
#define const_m1 r12
/* Additional internal variables for 64-bit aligned data. */
#define data1a r2
#define data1b r3
#define data2a r6
#define data2b r7
#define syndrome_a tmp1
#define syndrome_b tmp2
/* Additional internal variables for 32-bit aligned data. */
#define data1 r2
#define data2 r3
#define syndrome tmp2
/* Macro to compute and return the result value for word-aligned
cases. */
.macro strcmp_epilogue_aligned synd d1 d2 restore_r6
#ifdef __ARM_BIG_ENDIAN
/* If data1 contains a zero byte, then syndrome will contain a 1 in
bit 7 of that byte. Otherwise, the highest set bit in the
syndrome will highlight the first different bit. It is therefore
sufficient to extract the eight bits starting with the syndrome
bit. */
clz tmp1, \synd
lsl r1, \d2, tmp1
.if \restore_r6
ldrd r6, r7, [sp, #8]
.endif
.cfi_restore 6
.cfi_restore 7
lsl \d1, \d1, tmp1
.cfi_remember_state
lsr result, \d1, #24
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
sub result, result, r1, lsr #24
bx lr
#else
/* To use the big-endian trick we'd have to reverse all three words.
that's slower than this approach. */
rev \synd, \synd
clz tmp1, \synd
bic tmp1, tmp1, #7
lsr r1, \d2, tmp1
.cfi_remember_state
.if \restore_r6
ldrd r6, r7, [sp, #8]
.endif
.cfi_restore 6
.cfi_restore 7
lsr \d1, \d1, tmp1
and result, \d1, #255
and r1, r1, #255
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
sub result, result, r1
bx lr
#endif
.endm
.text
.p2align 5
.Lstrcmp_start_addr:
#ifndef STRCMP_NO_PRECHECK
.Lfastpath_exit:
sub r0, r2, r3
bx lr
nop
#endif
def_fn strcmp
#ifndef STRCMP_NO_PRECHECK
ldrb r2, [src1]
ldrb r3, [src2]
cmp r2, #1
it cs
cmpcs r2, r3
bne .Lfastpath_exit
#endif
.cfi_sections .debug_frame
.cfi_startproc
strd r4, r5, [sp, #-16]!
.cfi_def_cfa_offset 16
.cfi_offset 4, -16
.cfi_offset 5, -12
orr tmp1, src1, src2
strd r6, r7, [sp, #8]
.cfi_offset 6, -8
.cfi_offset 7, -4
mvn const_m1, #0
lsl r2, tmp1, #29
cbz r2, .Lloop_aligned8
.Lnot_aligned:
eor tmp1, src1, src2
tst tmp1, #7
bne .Lmisaligned8
/* Deal with mutual misalignment by aligning downwards and then
masking off the unwanted loaded data to prevent a difference. */
and tmp1, src1, #7
bic src1, src1, #7
and tmp2, tmp1, #3
bic src2, src2, #7
lsl tmp2, tmp2, #3 /* Bytes -> bits. */
ldrd data1a, data1b, [src1], #16
tst tmp1, #4
ldrd data2a, data2b, [src2], #16
/* In thumb code we can't use MVN with a register shift, but
we do have ORN. */
S2HI tmp1, const_m1, tmp2
orn data1a, data1a, tmp1
orn data2a, data2a, tmp1
beq .Lstart_realigned8
orn data1b, data1b, tmp1
mov data1a, const_m1
orn data2b, data2b, tmp1
mov data2a, const_m1
b .Lstart_realigned8
/* Unwind the inner loop by a factor of 2, giving 16 bytes per
pass. */
.p2align 5,,12 /* Don't start in the tail bytes of a cache line. */
.p2align 2 /* Always word aligned. */
.Lloop_aligned8:
ldrd data1a, data1b, [src1], #16
ldrd data2a, data2b, [src2], #16
.Lstart_realigned8:
uadd8 syndrome_b, data1a, const_m1 /* Only want GE bits, */
eor syndrome_a, data1a, data2a
sel syndrome_a, syndrome_a, const_m1
cbnz syndrome_a, .Ldiff_in_a
uadd8 syndrome_b, data1b, const_m1 /* Only want GE bits. */
eor syndrome_b, data1b, data2b
sel syndrome_b, syndrome_b, const_m1
cbnz syndrome_b, .Ldiff_in_b
ldrd data1a, data1b, [src1, #-8]
ldrd data2a, data2b, [src2, #-8]
uadd8 syndrome_b, data1a, const_m1 /* Only want GE bits, */
eor syndrome_a, data1a, data2a
sel syndrome_a, syndrome_a, const_m1
uadd8 syndrome_b, data1b, const_m1 /* Only want GE bits. */
eor syndrome_b, data1b, data2b
sel syndrome_b, syndrome_b, const_m1
/* Can't use CBZ for backwards branch. */
orrs syndrome_b, syndrome_b, syndrome_a /* Only need if s_a == 0 */
beq .Lloop_aligned8
.Ldiff_found:
cbnz syndrome_a, .Ldiff_in_a
.Ldiff_in_b:
strcmp_epilogue_aligned syndrome_b, data1b, data2b 1
.Ldiff_in_a:
.cfi_restore_state
strcmp_epilogue_aligned syndrome_a, data1a, data2a 1
.cfi_restore_state
.Lmisaligned8:
tst tmp1, #3
bne .Lmisaligned4
ands tmp1, src1, #3
bne .Lmutual_align4
/* Unrolled by a factor of 2, to reduce the number of post-increment
operations. */
.Lloop_aligned4:
ldr data1, [src1], #8
ldr data2, [src2], #8
.Lstart_realigned4:
uadd8 syndrome, data1, const_m1 /* Only need GE bits. */
eor syndrome, data1, data2
sel syndrome, syndrome, const_m1
cbnz syndrome, .Laligned4_done
ldr data1, [src1, #-4]
ldr data2, [src2, #-4]
uadd8 syndrome, data1, const_m1
eor syndrome, data1, data2
sel syndrome, syndrome, const_m1
cmp syndrome, #0
beq .Lloop_aligned4
.Laligned4_done:
strcmp_epilogue_aligned syndrome, data1, data2, 0
.Lmutual_align4:
.cfi_restore_state
/* Deal with mutual misalignment by aligning downwards and then
masking off the unwanted loaded data to prevent a difference. */
lsl tmp1, tmp1, #3 /* Bytes -> bits. */
bic src1, src1, #3
ldr data1, [src1], #8
bic src2, src2, #3
ldr data2, [src2], #8
/* In thumb code we can't use MVN with a register shift, but
we do have ORN. */
S2HI tmp1, const_m1, tmp1
orn data1, data1, tmp1
orn data2, data2, tmp1
b .Lstart_realigned4
.Lmisaligned4:
ands tmp1, src1, #3
beq .Lsrc1_aligned
sub src2, src2, tmp1
bic src1, src1, #3
lsls tmp1, tmp1, #31
ldr data1, [src1], #4
beq .Laligned_m2
bcs .Laligned_m1
#ifdef STRCMP_NO_PRECHECK
ldrb data2, [src2, #1]
uxtb tmp1, data1, ror #BYTE1_OFFSET
subs tmp1, tmp1, data2
bne .Lmisaligned_exit
cbz data2, .Lmisaligned_exit
.Laligned_m2:
ldrb data2, [src2, #2]
uxtb tmp1, data1, ror #BYTE2_OFFSET
subs tmp1, tmp1, data2
bne .Lmisaligned_exit
cbz data2, .Lmisaligned_exit
.Laligned_m1:
ldrb data2, [src2, #3]
uxtb tmp1, data1, ror #BYTE3_OFFSET
subs tmp1, tmp1, data2
bne .Lmisaligned_exit
add src2, src2, #4
cbnz data2, .Lsrc1_aligned
#else /* STRCMP_NO_PRECHECK */
/* If we've done the pre-check, then we don't need to check the
first byte again here. */
ldrb data2, [src2, #2]
uxtb tmp1, data1, ror #BYTE2_OFFSET
subs tmp1, tmp1, data2
bne .Lmisaligned_exit
cbz data2, .Lmisaligned_exit
.Laligned_m2:
ldrb data2, [src2, #3]
uxtb tmp1, data1, ror #BYTE3_OFFSET
subs tmp1, tmp1, data2
bne .Lmisaligned_exit
cbnz data2, .Laligned_m1
#endif
.Lmisaligned_exit:
.cfi_remember_state
mov result, tmp1
ldr r4, [sp], #16
.cfi_restore 4
bx lr
#ifndef STRCMP_NO_PRECHECK
.Laligned_m1:
add src2, src2, #4
#endif
.Lsrc1_aligned:
.cfi_restore_state
/* src1 is word aligned, but src2 has no common alignment
with it. */
ldr data1, [src1], #4
lsls tmp1, src2, #31 /* C=src2[1], Z=src2[0]. */
bic src2, src2, #3
ldr data2, [src2], #4
bhi .Loverlap1 /* C=1, Z=0 => src2[1:0] = 0b11. */
bcs .Loverlap2 /* C=1, Z=1 => src2[1:0] = 0b10. */
/* (overlap3) C=0, Z=0 => src2[1:0] = 0b01. */
.Loverlap3:
bic tmp1, data1, #MSB
uadd8 syndrome, data1, const_m1
eors syndrome, tmp1, data2, S2LO #8
sel syndrome, syndrome, const_m1
bne 4f
cbnz syndrome, 5f
ldr data2, [src2], #4
eor tmp1, tmp1, data1
cmp tmp1, data2, S2HI #24
bne 6f
ldr data1, [src1], #4
b .Loverlap3
4:
S2LO data2, data2, #8
b .Lstrcmp_tail
5:
bics syndrome, syndrome, #MSB
bne .Lstrcmp_done_equal
/* We can only get here if the MSB of data1 contains 0, so
fast-path the exit. */
ldrb result, [src2]
.cfi_remember_state
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
/* R6/7 Not used in this sequence. */
.cfi_restore 6
.cfi_restore 7
neg result, result
bx lr
6:
.cfi_restore_state
S2LO data1, data1, #24
and data2, data2, #LSB
b .Lstrcmp_tail
.p2align 5,,12 /* Ensure at least 3 instructions in cache line. */
.Loverlap2:
and tmp1, data1, const_m1, S2LO #16
uadd8 syndrome, data1, const_m1
eors syndrome, tmp1, data2, S2LO #16
sel syndrome, syndrome, const_m1
bne 4f
cbnz syndrome, 5f
ldr data2, [src2], #4
eor tmp1, tmp1, data1
cmp tmp1, data2, S2HI #16
bne 6f
ldr data1, [src1], #4
b .Loverlap2
4:
S2LO data2, data2, #16
b .Lstrcmp_tail
5:
ands syndrome, syndrome, const_m1, S2LO #16
bne .Lstrcmp_done_equal
ldrh data2, [src2]
S2LO data1, data1, #16
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #16
#endif
b .Lstrcmp_tail
6:
S2LO data1, data1, #16
and data2, data2, const_m1, S2LO #16
b .Lstrcmp_tail
.p2align 5,,12 /* Ensure at least 3 instructions in cache line. */
.Loverlap1:
and tmp1, data1, #LSB
uadd8 syndrome, data1, const_m1
eors syndrome, tmp1, data2, S2LO #24
sel syndrome, syndrome, const_m1
bne 4f
cbnz syndrome, 5f
ldr data2, [src2], #4
eor tmp1, tmp1, data1
cmp tmp1, data2, S2HI #8
bne 6f
ldr data1, [src1], #4
b .Loverlap1
4:
S2LO data2, data2, #24
b .Lstrcmp_tail
5:
tst syndrome, #LSB
bne .Lstrcmp_done_equal
ldr data2, [src2]
6:
S2LO data1, data1, #8
bic data2, data2, #MSB
b .Lstrcmp_tail
.Lstrcmp_done_equal:
mov result, #0
.cfi_remember_state
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
/* R6/7 not used in this sequence. */
.cfi_restore 6
.cfi_restore 7
bx lr
.Lstrcmp_tail:
.cfi_restore_state
#ifndef __ARM_BIG_ENDIAN
rev data1, data1
rev data2, data2
/* Now everything looks big-endian... */
#endif
uadd8 tmp1, data1, const_m1
eor tmp1, data1, data2
sel syndrome, tmp1, const_m1
clz tmp1, syndrome
lsl data1, data1, tmp1
lsl data2, data2, tmp1
lsr result, data1, #24
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
/* R6/7 not used in this sequence. */
.cfi_restore 6
.cfi_restore 7
sub result, result, data2, lsr #24
bx lr
.cfi_endproc
.size strcmp, . - .Lstrcmp_start_addr
|
stsp/newlib-ia16
| 2,060
|
newlib/libc/machine/arm/strlen.S
|
/* Copyright (c) 2015 ARM Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#include "acle-compat.h"
#if defined __OPTIMIZE_SIZE__ || defined PREFER_SIZE_OVER_SPEED
#if defined __thumb__ && !defined __thumb2__
#include "strlen-thumb1-Os.S"
#else
#include "strlen-thumb2-Os.S"
#endif
#else /* defined __OPTIMIZE_SIZE__ || defined PREFER_SIZE_OVER_SPEED */
#if defined __thumb__ && ! defined __thumb2__
/* Implemented in strlen-stub.c. */
#elif __ARM_ARCH_ISA_THUMB >= 2 && defined __ARM_FEATURE_DSP
#include "strlen-armv7.S"
#else
/* Implemented in strlen-stub.c. */
#endif
#endif
|
stsp/newlib-ia16
| 11,787
|
newlib/libc/machine/arm/strcmp-armv6.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Implementation of strcmp for ARMv6. Use ldrd to support wider
loads, provided the data is sufficiently aligned. Use
saturating arithmetic to optimize the compares. */
/* Build Options:
STRCMP_NO_PRECHECK: Don't run a quick pre-check of the first
byte in the string. If comparing completely random strings
the pre-check will save time, since there is a very high
probability of a mismatch in the first character: we save
significant overhead if this is the common case. However,
if strings are likely to be identical (eg because we're
verifying a hit in a hash table), then this check is largely
redundant. */
.arm
/* Parameters and result. */
#define src1 r0
#define src2 r1
#define result r0 /* Overlaps src1. */
/* Internal variables. */
#define tmp1 r4
#define tmp2 r5
#define const_m1 r12
/* Additional internal variables for 64-bit aligned data. */
#define data1a r2
#define data1b r3
#define data2a r6
#define data2b r7
#define syndrome_a tmp1
#define syndrome_b tmp2
/* Additional internal variables for 32-bit aligned data. */
#define data1 r2
#define data2 r3
#define syndrome tmp2
/* Macro to compute and return the result value for word-aligned
cases. */
.macro strcmp_epilogue_aligned synd d1 d2 restore_r6
#ifdef __ARM_BIG_ENDIAN
/* If data1 contains a zero byte, then syndrome will contain a 1 in
bit 7 of that byte. Otherwise, the highest set bit in the
syndrome will highlight the first different bit. It is therefore
sufficient to extract the eight bits starting with the syndrome
bit. */
clz tmp1, \synd
lsl r1, \d2, tmp1
.if \restore_r6
ldrd r6, r7, [sp, #8]
.endif
.cfi_restore 6
.cfi_restore 7
lsl \d1, \d1, tmp1
.cfi_remember_state
lsr result, \d1, #24
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
sub result, result, r1, lsr #24
bx lr
#else
/* To use the big-endian trick we'd have to reverse all three words.
that's slower than this approach. */
rev \synd, \synd
clz tmp1, \synd
bic tmp1, tmp1, #7
lsr r1, \d2, tmp1
.cfi_remember_state
.if \restore_r6
ldrd r6, r7, [sp, #8]
.endif
.cfi_restore 6
.cfi_restore 7
lsr \d1, \d1, tmp1
and result, \d1, #255
and r1, r1, #255
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
sub result, result, r1
bx lr
#endif
.endm
.text
.p2align 5
.Lstrcmp_start_addr:
#ifndef STRCMP_NO_PRECHECK
.Lfastpath_exit:
sub r0, r2, r3
bx lr
#endif
def_fn strcmp
#ifndef STRCMP_NO_PRECHECK
ldrb r2, [src1]
ldrb r3, [src2]
cmp r2, #1
cmpcs r2, r3
bne .Lfastpath_exit
#endif
.cfi_sections .debug_frame
.cfi_startproc
strd r4, r5, [sp, #-16]!
.cfi_def_cfa_offset 16
.cfi_offset 4, -16
.cfi_offset 5, -12
orr tmp1, src1, src2
strd r6, r7, [sp, #8]
.cfi_offset 6, -8
.cfi_offset 7, -4
mvn const_m1, #0
tst tmp1, #7
beq .Lloop_aligned8
.Lnot_aligned:
eor tmp1, src1, src2
tst tmp1, #7
bne .Lmisaligned8
/* Deal with mutual misalignment by aligning downwards and then
masking off the unwanted loaded data to prevent a difference. */
and tmp1, src1, #7
bic src1, src1, #7
and tmp2, tmp1, #3
bic src2, src2, #7
lsl tmp2, tmp2, #3 /* Bytes -> bits. */
ldrd data1a, data1b, [src1], #16
tst tmp1, #4
ldrd data2a, data2b, [src2], #16
/* In ARM code we can't use ORN, but with do have MVN with a
register shift. */
mvn tmp1, const_m1, S2HI tmp2
orr data1a, data1a, tmp1
orr data2a, data2a, tmp1
beq .Lstart_realigned8
orr data1b, data1b, tmp1
mov data1a, const_m1
orr data2b, data2b, tmp1
mov data2a, const_m1
b .Lstart_realigned8
/* Unwind the inner loop by a factor of 2, giving 16 bytes per
pass. */
.p2align 5,,12 /* Don't start in the tail bytes of a cache line. */
.p2align 2 /* Always word aligned. */
.Lloop_aligned8:
ldrd data1a, data1b, [src1], #16
ldrd data2a, data2b, [src2], #16
.Lstart_realigned8:
uadd8 syndrome_b, data1a, const_m1 /* Only want GE bits, */
eor syndrome_a, data1a, data2a
sel syndrome_a, syndrome_a, const_m1
uadd8 syndrome_b, data1b, const_m1 /* Only want GE bits. */
eor syndrome_b, data1b, data2b
sel syndrome_b, syndrome_b, const_m1
orrs syndrome_b, syndrome_b, syndrome_a /* Only need if s_a == 0 */
bne .Ldiff_found
ldrd data1a, data1b, [src1, #-8]
ldrd data2a, data2b, [src2, #-8]
uadd8 syndrome_b, data1a, const_m1 /* Only want GE bits, */
eor syndrome_a, data1a, data2a
sel syndrome_a, syndrome_a, const_m1
uadd8 syndrome_b, data1b, const_m1 /* Only want GE bits. */
eor syndrome_b, data1b, data2b
sel syndrome_b, syndrome_b, const_m1
orrs syndrome_b, syndrome_b, syndrome_a /* Only need if s_a == 0 */
beq .Lloop_aligned8
.Ldiff_found:
cmp syndrome_a, #0
bne .Ldiff_in_a
.Ldiff_in_b:
strcmp_epilogue_aligned syndrome_b, data1b, data2b 1
.Ldiff_in_a:
.cfi_restore_state
strcmp_epilogue_aligned syndrome_a, data1a, data2a 1
.cfi_restore_state
.Lmisaligned8:
tst tmp1, #3
bne .Lmisaligned4
ands tmp1, src1, #3
bne .Lmutual_align4
/* Unrolled by a factor of 2, to reduce the number of post-increment
operations. */
.Lloop_aligned4:
ldr data1, [src1], #8
ldr data2, [src2], #8
.Lstart_realigned4:
uadd8 syndrome, data1, const_m1 /* Only need GE bits. */
eor syndrome, data1, data2
sel syndrome, syndrome, const_m1
cmp syndrome, #0
bne .Laligned4_done
ldr data1, [src1, #-4]
ldr data2, [src2, #-4]
uadd8 syndrome, data1, const_m1
eor syndrome, data1, data2
sel syndrome, syndrome, const_m1
cmp syndrome, #0
beq .Lloop_aligned4
.Laligned4_done:
strcmp_epilogue_aligned syndrome, data1, data2, 0
.Lmutual_align4:
.cfi_restore_state
/* Deal with mutual misalignment by aligning downwards and then
masking off the unwanted loaded data to prevent a difference. */
lsl tmp1, tmp1, #3 /* Bytes -> bits. */
bic src1, src1, #3
ldr data1, [src1], #8
bic src2, src2, #3
ldr data2, [src2], #8
/* In ARM code we can't use ORN, but with do have MVN with a
register shift. */
mvn tmp1, const_m1, S2HI tmp1
orr data1, data1, tmp1
orr data2, data2, tmp1
b .Lstart_realigned4
.Lmisaligned4:
ands tmp1, src1, #3
beq .Lsrc1_aligned
sub src2, src2, tmp1
bic src1, src1, #3
lsls tmp1, tmp1, #31
ldr data1, [src1], #4
beq .Laligned_m2
bcs .Laligned_m1
#ifdef STRCMP_NO_PRECHECK
ldrb data2, [src2, #1]
uxtb tmp1, data1, ror #BYTE1_OFFSET
cmp tmp1, #1
cmpcs tmp1, data2
bne .Lmisaligned_exit
.Laligned_m2:
ldrb data2, [src2, #2]
uxtb tmp1, data1, ror #BYTE2_OFFSET
cmp tmp1, #1
cmpcs tmp1, data2
bne .Lmisaligned_exit
.Laligned_m1:
ldrb data2, [src2, #3]
uxtb tmp1, data1, ror #BYTE3_OFFSET
cmp tmp1, #1
cmpcs tmp1, data2
beq .Lsrc1_aligned
#else /* STRCMP_NO_PRECHECK */
/* If we've done the pre-check, then we don't need to check the
first byte again here. */
ldrb data2, [src2, #2]
uxtb tmp1, data1, ror #BYTE2_OFFSET
cmp tmp1, #1
cmpcs tmp1, data2
bne .Lmisaligned_exit
.Laligned_m2:
ldrb data2, [src2, #3]
uxtb tmp1, data1, ror #BYTE3_OFFSET
cmp tmp1, #1
cmpcs tmp1, data2
beq .Laligned_m1
#endif
.Lmisaligned_exit:
.cfi_remember_state
sub result, tmp1, data2
ldr r4, [sp], #16
.cfi_restore 4
bx lr
#ifndef STRCMP_NO_PRECHECK
.Laligned_m1:
add src2, src2, #4
#endif
.Lsrc1_aligned:
.cfi_restore_state
/* src1 is word aligned, but src2 has no common alignment
with it. */
ldr data1, [src1], #4
lsls tmp1, src2, #31 /* C=src2[1], Z=src2[0]. */
bic src2, src2, #3
ldr data2, [src2], #4
bhi .Loverlap1 /* C=1, Z=0 => src2[1:0] = 0b11. */
bcs .Loverlap2 /* C=1, Z=1 => src2[1:0] = 0b10. */
/* (overlap3) C=0, Z=0 => src2[1:0] = 0b01. */
.Loverlap3:
bic tmp1, data1, #MSB
uadd8 syndrome, data1, const_m1
eors syndrome, tmp1, data2, S2LO #8
sel syndrome, syndrome, const_m1
bne 4f
cmp syndrome, #0
ldreq data2, [src2], #4
bne 5f
eor tmp1, tmp1, data1
cmp tmp1, data2, S2HI #24
bne 6f
ldr data1, [src1], #4
b .Loverlap3
4:
S2LO data2, data2, #8
b .Lstrcmp_tail
5:
bics syndrome, syndrome, #MSB
bne .Lstrcmp_done_equal
/* We can only get here if the MSB of data1 contains 0, so
fast-path the exit. */
ldrb result, [src2]
.cfi_remember_state
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
/* R6/7 Not used in this sequence. */
.cfi_restore 6
.cfi_restore 7
neg result, result
bx lr
6:
.cfi_restore_state
S2LO data1, data1, #24
and data2, data2, #LSB
b .Lstrcmp_tail
.p2align 5,,12 /* Ensure at least 3 instructions in cache line. */
.Loverlap2:
and tmp1, data1, const_m1, S2LO #16
uadd8 syndrome, data1, const_m1
eors syndrome, tmp1, data2, S2LO #16
sel syndrome, syndrome, const_m1
bne 4f
cmp syndrome, #0
ldreq data2, [src2], #4
bne 5f
eor tmp1, tmp1, data1
cmp tmp1, data2, S2HI #16
bne 6f
ldr data1, [src1], #4
b .Loverlap2
4:
S2LO data2, data2, #16
b .Lstrcmp_tail
5:
ands syndrome, syndrome, const_m1, S2LO #16
bne .Lstrcmp_done_equal
ldrh data2, [src2]
S2LO data1, data1, #16
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #16
#endif
b .Lstrcmp_tail
6:
S2LO data1, data1, #16
and data2, data2, const_m1, S2LO #16
b .Lstrcmp_tail
.p2align 5,,12 /* Ensure at least 3 instructions in cache line. */
.Loverlap1:
and tmp1, data1, #LSB
uadd8 syndrome, data1, const_m1
eors syndrome, tmp1, data2, S2LO #24
sel syndrome, syndrome, const_m1
bne 4f
cmp syndrome, #0
ldreq data2, [src2], #4
bne 5f
eor tmp1, tmp1, data1
cmp tmp1, data2, S2HI #8
bne 6f
ldr data1, [src1], #4
b .Loverlap1
4:
S2LO data2, data2, #24
b .Lstrcmp_tail
5:
tst syndrome, #LSB
bne .Lstrcmp_done_equal
ldr data2, [src2]
6:
S2LO data1, data1, #8
bic data2, data2, #MSB
b .Lstrcmp_tail
.Lstrcmp_done_equal:
mov result, #0
.cfi_remember_state
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
/* R6/7 not used in this sequence. */
.cfi_restore 6
.cfi_restore 7
bx lr
.Lstrcmp_tail:
.cfi_restore_state
#ifndef __ARM_BIG_ENDIAN
rev data1, data1
rev data2, data2
/* Now everything looks big-endian... */
#endif
uadd8 tmp1, data1, const_m1
eor tmp1, data1, data2
sel syndrome, tmp1, const_m1
clz tmp1, syndrome
lsl data1, data1, tmp1
lsl data2, data2, tmp1
lsr result, data1, #24
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
/* R6/7 not used in this sequence. */
.cfi_restore 6
.cfi_restore 7
sub result, result, data2, lsr #24
bx lr
.cfi_endproc
.size strcmp, . - .Lstrcmp_start_addr
|
stsp/newlib-ia16
| 2,032
|
newlib/libc/machine/arm/aeabi_memmove-thumb2.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.thumb
.syntax unified
.global __aeabi_memmove
.type __aeabi_memmove, %function
ASM_ALIAS __aeabi_memmove4 __aeabi_memmove
ASM_ALIAS __aeabi_memmove8 __aeabi_memmove
__aeabi_memmove:
cmp r0, r1
push {r4}
bls 3f
adds r3, r1, r2
cmp r0, r3
bcs 3f
adds r1, r0, r2
cbz r2, 2f
subs r2, r3, r2
1:
ldrb r4, [r3, #-1]!
cmp r2, r3
strb r4, [r1, #-1]!
bne 1b
2:
pop {r4}
bx lr
3:
cmp r2, #0
beq 2b
add r2, r2, r1
subs r3, r0, #1
4:
ldrb r4, [r1], #1
cmp r2, r1
strb r4, [r3, #1]!
bne 4b
pop {r4}
bx lr
.size __aeabi_memmove, . - __aeabi_memmove
|
stsp/newlib-ia16
| 5,993
|
newlib/libc/machine/arm/strlen-armv7.S
|
/* Copyright (c) 2010-2011,2013 Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Assumes:
ARMv6T2 or ARMv7E-M, AArch32
*/
/* Copyright (c) 2015 ARM Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#include "arm_asm.h"
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#ifdef __ARMEB__
#define S2LO lsl
#define S2HI lsr
#else
#define S2LO lsr
#define S2HI lsl
#endif
/* This code requires Thumb. */
#if __ARM_ARCH_PROFILE == 'M'
.arch armv7e-m
#else
.arch armv6t2
#endif
.eabi_attribute Tag_ARM_ISA_use, 0
.thumb
.syntax unified
/* Parameters and result. */
#define srcin r0
#define result r0
/* Internal variables. */
#define src r1
#define data1a r2
#define data1b r3
#define const_m1 r12
#define const_0 r4
#define tmp1 r4 /* Overlaps const_0 */
#define tmp2 r5
def_fn strlen p2align=6
pld [srcin, #0]
strd r4, r5, [sp, #-8]!
bic src, srcin, #7
mvn const_m1, #0
ands tmp1, srcin, #7 /* (8 - bytes) to alignment. */
pld [src, #32]
bne.w .Lmisaligned8
mov const_0, #0
mov result, #-8
.Lloop_aligned:
/* Bytes 0-7. */
ldrd data1a, data1b, [src]
pld [src, #64]
add result, result, #8
.Lstart_realigned:
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
uadd8 data1b, data1b, const_m1
sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
cbnz data1b, .Lnull_found
/* Bytes 8-15. */
ldrd data1a, data1b, [src, #8]
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
add result, result, #8
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
uadd8 data1b, data1b, const_m1
sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
cbnz data1b, .Lnull_found
/* Bytes 16-23. */
ldrd data1a, data1b, [src, #16]
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
add result, result, #8
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
uadd8 data1b, data1b, const_m1
sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
cbnz data1b, .Lnull_found
/* Bytes 24-31. */
ldrd data1a, data1b, [src, #24]
add src, src, #32
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
add result, result, #8
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
uadd8 data1b, data1b, const_m1
sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
cmp data1b, #0
beq .Lloop_aligned
.Lnull_found:
cmp data1a, #0
itt eq
addeq result, result, #4
moveq data1a, data1b
#ifndef __ARMEB__
rev data1a, data1a
#endif
clz data1a, data1a
ldrd r4, r5, [sp], #8
add result, result, data1a, lsr #3 /* Bits -> Bytes. */
bx lr
.Lmisaligned8:
ldrd data1a, data1b, [src]
and tmp2, tmp1, #3
rsb result, tmp1, #0
lsl tmp2, tmp2, #3 /* Bytes -> bits. */
tst tmp1, #4
pld [src, #64]
S2HI tmp2, const_m1, tmp2
orn data1a, data1a, tmp2
itt ne
ornne data1b, data1b, tmp2
movne data1a, const_m1
mov const_0, #0
b .Lstart_realigned
.size strlen, . - strlen
|
stsp/newlib-ia16
| 1,975
|
newlib/libc/machine/arm/memcpy.S
|
/*
* Copyright (c) 2013-2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* The structure of the following #if #else #endif conditional chain
must match the chain in memcpy-stub.c. */
#include "acle-compat.h"
#if defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED)
/* Defined in memcpy-stub.c. */
#elif (__ARM_ARCH >= 7 && __ARM_ARCH_PROFILE == 'A' \
&& defined (__ARM_FEATURE_UNALIGNED))
#include "memcpy-armv7a.S"
#elif __ARM_ARCH_ISA_THUMB == 2 && !__ARM_ARCH_ISA_ARM
#include "memcpy-armv7m.S"
#else
/* Defined in memcpy-stub.c. */
#endif
|
stsp/newlib-ia16
| 2,912
|
newlib/libc/machine/arm/strcmp.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Wrapper for the various implementations of strcmp. */
#include "arm_asm.h"
#include "acle-compat.h"
#ifdef __ARM_BIG_ENDIAN
#define S2LO lsl
#define S2LOEQ lsleq
#define S2HI lsr
#define MSB 0x000000ff
#define LSB 0xff000000
#define BYTE0_OFFSET 24
#define BYTE1_OFFSET 16
#define BYTE2_OFFSET 8
#define BYTE3_OFFSET 0
#else /* not __ARM_BIG_ENDIAN */
#define S2LO lsr
#define S2LOEQ lsreq
#define S2HI lsl
#define BYTE0_OFFSET 0
#define BYTE1_OFFSET 8
#define BYTE2_OFFSET 16
#define BYTE3_OFFSET 24
#define MSB 0xff000000
#define LSB 0x000000ff
#endif /* not __ARM_BIG_ENDIAN */
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#if defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) \
|| (__ARM_ARCH_ISA_THUMB == 1 && !__ARM_ARCH_ISA_ARM)
# if defined (__thumb__) && !defined (__thumb2__)
/* Thumb1 only variant. If size is preferred, use strcmp-armv4t.S.
If speed is preferred, the strcmp function in strcmp-armv6m.S
will be used. */
# if defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED)
# include "strcmp-armv4t.S"
# else
# include "strcmp-armv6m.S"
# endif
# else
# include "strcmp-arm-tiny.S"
# endif
#elif __ARM_ARCH_ISA_THUMB == 2
# ifdef __ARM_FEATURE_SIMD32
# include "strcmp-armv7.S"
# else
# include "strcmp-armv7m.S"
# endif
#elif __ARM_ARCH >= 6
# include "strcmp-armv6.S"
#else
# include "strcmp-armv4.S"
#endif
|
stsp/newlib-ia16
| 10,375
|
newlib/libc/machine/arm/aeabi_memcpy-armv7a.S
|
/*
* Copyright (c) 2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "arm_asm.h"
/* NOTE: This ifdef MUST match the one in aeabi_memcpy.c. */
#if defined (__ARM_ARCH_7A__) && defined (__ARM_FEATURE_UNALIGNED) && \
(defined (__ARM_NEON__) || !defined (__SOFTFP__))
.syntax unified
.global __aeabi_memcpy
.type __aeabi_memcpy, %function
__aeabi_memcpy:
/* Assumes that n >= 0, and dst, src are valid pointers.
If there is at least 8 bytes to copy, use LDRD/STRD.
If src and dst are misaligned with different offsets,
first copy byte by byte until dst is aligned,
and then copy using LDRD/STRD and shift if needed.
When less than 8 left, copy a word and then byte by byte. */
/* Save registers (r0 holds the return value):
optimized push {r0, r4, r5, lr}.
To try and improve performance, stack layout changed,
i.e., not keeping the stack looking like users expect
(highest numbered register at highest address). */
push {r0, lr}
strd r4, r5, [sp, #-8]!
/* Get copying of tiny blocks out of the way first. */
/* Is there at least 4 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_4 /* If n < 4. */
/* Check word alignment. */
ands ip, r0, #3 /* ip = last 2 bits of dst. */
bne dst_not_word_aligned /* If dst is not word-aligned. */
/* Get here if dst is word-aligned. */
ands ip, r1, #3 /* ip = last 2 bits of src. */
bne src_not_word_aligned /* If src is not word-aligned. */
word_aligned:
/* Get here if source and dst both are word-aligned.
The number of bytes remaining to copy is r2+4. */
/* Is there is at least 64 bytes to copy? */
subs r2, r2, #60
blt copy_less_than_64 /* If r2 + 4 < 64. */
/* First, align the destination buffer to 8-bytes,
to make sure double loads and stores don't cross cache line boundary,
as they are then more expensive even if the data is in the cache
(require two load/store issue cycles instead of one).
If only one of the buffers is not 8-bytes aligned,
then it's more important to align dst than src,
because there is more penalty for stores
than loads that cross cacheline boundary.
This check and realignment are only worth doing
if there is a lot to copy. */
/* Get here if dst is word aligned,
i.e., the 2 least significant bits are 0.
If dst is not 2w aligned (i.e., the 3rd bit is not set in dst),
then copy 1 word (4 bytes). */
ands r3, r0, #4
beq two_word_aligned /* If dst already two-word aligned. */
ldr r3, [r1], #4
str r3, [r0], #4
subs r2, r2, #4
blt copy_less_than_64
two_word_aligned:
/* TODO: Align to cacheline (useful for PLD optimization). */
/* Every loop iteration copies 64 bytes. */
1:
.irp offset, #0, #8, #16, #24, #32, #40, #48, #56
ldrd r4, r5, [r1, \offset]
strd r4, r5, [r0, \offset]
.endr
add r0, r0, #64
add r1, r1, #64
subs r2, r2, #64
bge 1b /* If there is more to copy. */
copy_less_than_64:
/* Get here if less than 64 bytes to copy, -64 <= r2 < 0.
Restore the count if there is more than 7 bytes to copy. */
adds r2, r2, #56
blt copy_less_than_8
/* Copy 8 bytes at a time. */
2:
ldrd r4, r5, [r1], #8
strd r4, r5, [r0], #8
subs r2, r2, #8
bge 2b /* If there is more to copy. */
copy_less_than_8:
/* Get here if less than 8 bytes to copy, -8 <= r2 < 0.
Check if there is more to copy. */
cmn r2, #8
beq return /* If r2 + 8 == 0. */
/* Restore the count if there is more than 3 bytes to copy. */
adds r2, r2, #4
blt copy_less_than_4
/* Copy 4 bytes. */
ldr r3, [r1], #4
str r3, [r0], #4
copy_less_than_4:
/* Get here if less than 4 bytes to copy, -4 <= r2 < 0. */
/* Restore the count, check if there is more to copy. */
adds r2, r2, #4
beq return /* If r2 == 0. */
/* Get here with r2 is in {1,2,3}={01,10,11}. */
/* Logical shift left r2, insert 0s, update flags. */
lsls r2, r2, #31
/* Copy byte by byte.
Condition ne means the last bit of r2 is 0.
Condition cs means the second to last bit of r2 is set,
i.e., r2 is 1 or 3. */
itt ne
ldrbne r3, [r1], #1
strbne r3, [r0], #1
itttt cs
ldrbcs r4, [r1], #1
ldrbcs r5, [r1]
strbcs r4, [r0], #1
strbcs r5, [r0]
return:
/* Restore registers: optimized pop {r0, r4, r5, pc} */
ldrd r4, r5, [sp], #8
pop {r0, pc} /* This is the only return point of memcpy. */
dst_not_word_aligned:
/* Get here when dst is not aligned and ip has the last 2 bits of dst,
i.e., ip is the offset of dst from word.
The number of bytes that remains to copy is r2 + 4,
i.e., there are at least 4 bytes to copy.
Write a partial word (0 to 3 bytes), such that dst becomes
word-aligned. */
/* If dst is at ip bytes offset from a word (with 0 < ip < 4),
then there are (4 - ip) bytes to fill up to align dst to the next
word. */
rsb ip, ip, #4 /* ip = #4 - ip. */
cmp ip, #2
/* Copy byte by byte with conditionals. */
itt gt
ldrbgt r3, [r1], #1
strbgt r3, [r0], #1
itt ge
ldrbge r4, [r1], #1
strbge r4, [r0], #1
ldrb lr, [r1], #1
strb lr, [r0], #1
/* Update the count.
ip holds the number of bytes we have just copied. */
subs r2, r2, ip /* r2 = r2 - ip. */
blt copy_less_than_4 /* If r2 < ip. */
/* Get here if there are more than 4 bytes to copy.
Check if src is aligned. If beforehand src and dst were not word
aligned but congruent (same offset), then now they are both
word-aligned, and we can copy the rest efficiently (without
shifting). */
ands ip, r1, #3 /* ip = last 2 bits of src. */
beq word_aligned /* If r1 is word-aligned. */
src_not_word_aligned:
/* Get here when src is not word-aligned, but dst is word-aligned.
The number of bytes that remains to copy is r2+4. */
/* Copy word by word using LDR when alignment can be done in hardware,
i.e., SCTLR.A is set, supporting unaligned access in LDR and STR. */
subs r2, r2, #60
blt 8f
7:
/* Copy 64 bytes in every loop iteration. */
.irp offset, #0, #4, #8, #12, #16, #20, #24, #28, #32, #36, #40, #44, #48, #52, #56, #60
ldr r3, [r1, \offset]
str r3, [r0, \offset]
.endr
add r0, r0, #64
add r1, r1, #64
subs r2, r2, #64
bge 7b
8:
/* Get here if less than 64 bytes to copy, -64 <= r2 < 0.
Check if there is more than 3 bytes to copy. */
adds r2, r2, #60
blt copy_less_than_4
9:
/* Get here if there is less than 64 but at least 4 bytes to copy,
where the number of bytes to copy is r2+4. */
ldr r3, [r1], #4
str r3, [r0], #4
subs r2, r2, #4
bge 9b
b copy_less_than_4
.syntax unified
.global __aeabi_memcpy4
.type __aeabi_memcpy4, %function
__aeabi_memcpy4:
/* Assumes that both of its arguments are 4-byte aligned. */
push {r0, lr}
strd r4, r5, [sp, #-8]!
/* Is there at least 4 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_4 /* If n < 4. */
bl word_aligned
.syntax unified
.global __aeabi_memcpy8
.type __aeabi_memcpy8, %function
__aeabi_memcpy8:
/* Assumes that both of its arguments are 8-byte aligned. */
push {r0, lr}
strd r4, r5, [sp, #-8]!
/* Is there at least 4 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_4 /* If n < 4. */
/* Is there at least 8 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_8 /* If n < 8. */
/* Is there at least 64 bytes to copy? */
subs r2, r2, #56
blt copy_less_than_64 /* if n + 8 < 64. */
bl two_word_aligned
#endif
|
stsp/newlib-ia16
| 1,934
|
newlib/libc/machine/arm/strlen-thumb1-Os.S
|
/* Copyright (c) 2015 ARM Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
.arch armv4t
.eabi_attribute Tag_also_compatible_with, "\006\013" /* ARMv6-M. */
.eabi_attribute Tag_ARM_ISA_use, 0
.thumb
.syntax unified
def_fn strlen p2align=1
movs r3, #0
1:
ldrb r2, [r0, r3]
adds r3, r3, #1
cmp r2, #0
bne 1b
subs r0, r3, #1
bx lr
.size strlen, . - strlen
|
stsp/newlib-ia16
| 9,175
|
newlib/libc/machine/arm/strcmp-armv7m.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Very similar to the generic code, but uses Thumb2 as implemented
in ARMv7-M. */
/* Parameters and result. */
#define src1 r0
#define src2 r1
#define result r0 /* Overlaps src1. */
/* Internal variables. */
#define data1 r2
#define data2 r3
#define tmp2 r5
#define tmp1 r12
#define syndrome r12 /* Overlaps tmp1 */
.thumb
.syntax unified
def_fn strcmp
.cfi_sections .debug_frame
.cfi_startproc
eor tmp1, src1, src2
tst tmp1, #3
/* Strings not at same byte offset from a word boundary. */
bne .Lstrcmp_unaligned
ands tmp1, src1, #3
bic src1, src1, #3
bic src2, src2, #3
ldr data1, [src1], #4
it eq
ldreq data2, [src2], #4
beq 4f
/* Although s1 and s2 have identical initial alignment, they are
not currently word aligned. Rather than comparing bytes,
make sure that any bytes fetched from before the addressed
bytes are forced to 0xff. Then they will always compare
equal. */
eor tmp1, tmp1, #3
mvn data2, #MSB
lsl tmp1, tmp1, #3
S2LO tmp1, data2, tmp1
ldr data2, [src2], #4
orr data1, data1, tmp1
orr data2, data2, tmp1
.p2align 2
/* Critical loop. */
4:
sub syndrome, data1, #0x01010101
cmp data1, data2
/* check for any zero bytes in first word */
itttt eq
biceq syndrome, syndrome, data1
tsteq syndrome, #0x80808080
ldreq data1, [src1], #4
ldreq data2, [src2], #4
beq 4b
2:
/* There's a zero or a different byte in the word */
S2HI result, data1, #24
S2LO data1, data1, #8
cmp result, #1
it cs
cmpcs result, data2, S2HI #24
it eq
S2LOEQ data2, data2, #8
beq 2b
/* On a big-endian machine, RESULT contains the desired byte in bits
0-7; on a little-endian machine they are in bits 24-31. In
both cases the other bits in RESULT are all zero. For DATA2 the
interesting byte is at the other end of the word, but the
other bits are not necessarily zero. We need a signed result
representing the differnece in the unsigned bytes, so for the
little-endian case we can't just shift the interesting bits
up. */
#ifdef __ARM_BIG_ENDIAN
sub result, result, data2, lsr #24
#else
and data2, data2, #255
lsrs result, result, #24
subs result, result, data2
#endif
RETURN
#if 0
/* The assembly code below is based on the following alogrithm. */
#ifdef __ARM_BIG_ENDIAN
#define RSHIFT <<
#define LSHIFT >>
#else
#define RSHIFT >>
#define LSHIFT <<
#endif
#define body(shift) \
mask = 0xffffffffU RSHIFT shift; \
data1 = *src1++; \
data2 = *src2++; \
do \
{ \
tmp2 = data1 & mask; \
if (__builtin_expect(tmp2 != data2 RSHIFT shift, 0)) \
{ \
data2 RSHIFT= shift; \
break; \
} \
if (__builtin_expect(((data1 - b1) & ~data1) & (b1 << 7), 0)) \
{ \
/* See comment in assembler below re syndrome on big-endian */\
if ((((data1 - b1) & ~data1) & (b1 << 7)) & mask) \
data2 RSHIFT= shift; \
else \
{ \
data2 = *src2; \
tmp2 = data1 RSHIFT (32 - shift); \
data2 = (data2 LSHIFT (32 - shift)) RSHIFT (32 - shift); \
} \
break; \
} \
data2 = *src2++; \
tmp2 ^= data1; \
if (__builtin_expect(tmp2 != data2 LSHIFT (32 - shift), 0)) \
{ \
tmp2 = data1 >> (32 - shift); \
data2 = (data2 << (32 - shift)) RSHIFT (32 - shift); \
break; \
} \
data1 = *src1++; \
} while (1)
const unsigned* src1;
const unsigned* src2;
unsigned data1, data2;
unsigned mask;
unsigned shift;
unsigned b1 = 0x01010101;
char c1, c2;
unsigned tmp2;
while (((unsigned) s1) & 3)
{
c1 = *s1++;
c2 = *s2++;
if (c1 == 0 || c1 != c2)
return c1 - (int)c2;
}
src1 = (unsigned*) (((unsigned)s1) & ~3);
src2 = (unsigned*) (((unsigned)s2) & ~3);
tmp2 = ((unsigned) s2) & 3;
if (tmp2 == 1)
{
body(8);
}
else if (tmp2 == 2)
{
body(16);
}
else
{
body (24);
}
do
{
#ifdef __ARM_BIG_ENDIAN
c1 = (char) tmp2 >> 24;
c2 = (char) data2 >> 24;
#else /* not __ARM_BIG_ENDIAN */
c1 = (char) tmp2;
c2 = (char) data2;
#endif /* not __ARM_BIG_ENDIAN */
tmp2 RSHIFT= 8;
data2 RSHIFT= 8;
} while (c1 != 0 && c1 == c2);
return c1 - c2;
#endif /* 0 */
/* First of all, compare bytes until src1(sp1) is word-aligned. */
.Lstrcmp_unaligned:
tst src1, #3
beq 2f
ldrb data1, [src1], #1
ldrb data2, [src2], #1
cmp data1, #1
it cs
cmpcs data1, data2
beq .Lstrcmp_unaligned
sub result, data1, data2
bx lr
2:
stmfd sp!, {r5}
.cfi_def_cfa_offset 4
.cfi_offset 5, -4
ldr data1, [src1], #4
and tmp2, src2, #3
bic src2, src2, #3
ldr data2, [src2], #4
cmp tmp2, #2
beq .Loverlap2
bhi .Loverlap1
/* Critical inner Loop: Block with 3 bytes initial overlap */
.p2align 2
.Loverlap3:
bic tmp2, data1, #MSB
cmp tmp2, data2, S2LO #8
sub syndrome, data1, #0x01010101
bic syndrome, syndrome, data1
bne 4f
ands syndrome, syndrome, #0x80808080
it eq
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #24
bne 6f
ldr data1, [src1], #4
b .Loverlap3
4:
S2LO data2, data2, #8
b .Lstrcmp_tail
5:
#ifdef __ARM_BIG_ENDIAN
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00. */
tst data1, #0xff000000
itt ne
tstne data1, #0x00ff0000
tstne data1, #0x0000ff00
beq .Lstrcmp_done_equal
#else
bics syndrome, syndrome, #0xff000000
bne .Lstrcmp_done_equal
#endif
ldrb data2, [src2]
S2LO tmp2, data1, #24
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #24
#endif
b .Lstrcmp_tail
6:
S2LO tmp2, data1, #24
and data2, data2, #LSB
b .Lstrcmp_tail
/* Critical inner Loop: Block with 2 bytes initial overlap. */
.p2align 2
.Loverlap2:
S2HI tmp2, data1, #16
sub syndrome, data1, #0x01010101
S2LO tmp2, tmp2, #16
bic syndrome, syndrome, data1
cmp tmp2, data2, S2LO #16
bne 4f
ands syndrome, syndrome, #0x80808080
it eq
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #16
bne 6f
ldr data1, [src1], #4
b .Loverlap2
5:
#ifdef __ARM_BIG_ENDIAN
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00 */
tst data1, #0xff000000
it ne
tstne data1, #0x00ff0000
beq .Lstrcmp_done_equal
#else
lsls syndrome, syndrome, #16
bne .Lstrcmp_done_equal
#endif
ldrh data2, [src2]
S2LO tmp2, data1, #16
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #16
#endif
b .Lstrcmp_tail
6:
S2HI data2, data2, #16
S2LO tmp2, data1, #16
4:
S2LO data2, data2, #16
b .Lstrcmp_tail
/* Critical inner Loop: Block with 1 byte initial overlap. */
.p2align 2
.Loverlap1:
and tmp2, data1, #LSB
cmp tmp2, data2, S2LO #24
sub syndrome, data1, #0x01010101
bic syndrome, syndrome, data1
bne 4f
ands syndrome, syndrome, #0x80808080
it eq
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #8
bne 6f
ldr data1, [src1], #4
b .Loverlap1
4:
S2LO data2, data2, #24
b .Lstrcmp_tail
5:
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00. */
tst data1, #LSB
beq .Lstrcmp_done_equal
ldr data2, [src2], #4
6:
S2LO tmp2, data1, #8
bic data2, data2, #MSB
b .Lstrcmp_tail
.Lstrcmp_done_equal:
mov result, #0
.cfi_remember_state
ldmfd sp!, {r5}
.cfi_restore 5
.cfi_def_cfa_offset 0
RETURN
.Lstrcmp_tail:
.cfi_restore_state
and r2, tmp2, #LSB
and result, data2, #LSB
cmp result, #1
it cs
cmpcs result, r2
itt eq
S2LOEQ tmp2, tmp2, #8
S2LOEQ data2, data2, #8
beq .Lstrcmp_tail
sub result, r2, result
ldmfd sp!, {r5}
.cfi_restore 5
.cfi_def_cfa_offset 0
RETURN
.cfi_endproc
.size strcmp, . - strcmp
|
stsp/newlib-ia16
| 2,582
|
newlib/libc/machine/arm/aeabi_memset-arm.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.arm
.syntax divided
.global __aeabi_memset
.type __aeabi_memset, %function
ASM_ALIAS __aeabi_memset4 __aeabi_memset
ASM_ALIAS __aeabi_memset8 __aeabi_memset
__aeabi_memset:
tst r0, #3
stmfd sp!, {r4, lr}
beq 10f
cmp r1, #0
sub r1, r1, #1
beq 9f
and ip, r2, #255
mov r3, r0
b 2f
1:
cmp r1, #0
sub r1, r1, #1
beq 9f
2:
strb ip, [r3], #1
tst r3, #3
bne 1b
3:
cmp r1, #3
bls 7f
and lr, r2, #255
orr lr, lr, lr, asl #8
cmp r1, #15
orr lr, lr, lr, asl #16
bls 5f
mov r4, r1
add ip, r3, #16
4:
sub r4, r4, #16
cmp r4, #15
str lr, [ip, #-16]
str lr, [ip, #-12]
str lr, [ip, #-8]
str lr, [ip, #-4]
add ip, ip, #16
bhi 4b
sub ip, r1, #16
bic ip, ip, #15
and r1, r1, #15
add ip, ip, #16
cmp r1, #3
add r3, r3, ip
bls 7f
5:
mov r4, r3
mov ip, r1
6:
sub ip, ip, #4
cmp ip, #3
str lr, [r4], #4
bhi 6b
sub ip, r1, #4
bic ip, ip, #3
add ip, ip, #4
add r3, r3, ip
and r1, r1, #3
7:
cmp r1, #0
andne r2, r2, #255
addne r1, r3, r1
beq 9f
8:
strb r2, [r3], #1
cmp r3, r1
bne 8b
9:
ldmfd sp!, {r4, lr}
bx lr
10:
mov r3, r0
b 3b
.size __aeabi_memset, . - __aeabi_memset
|
stsp/newlib-ia16
| 7,760
|
newlib/libc/machine/arm/memcpy-armv7m.S
|
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* This memcpy routine is optimised for Cortex-M3/M4 cores with/without
unaligned access.
If compiled with GCC, this file should be enclosed within following
pre-processing check:
if defined (__ARM_ARCH_7M__) || defined (__ARM_ARCH_7EM__)
Prototype: void *memcpy (void *dst, const void *src, size_t count);
The job will be done in 5 steps.
Step 1: Align src/dest pointers, copy mis-aligned if fail to align both
Step 2: Repeatedly copy big block size of __OPT_BIG_BLOCK_SIZE
Step 3: Repeatedly copy big block size of __OPT_MID_BLOCK_SIZE
Step 4: Copy word by word
Step 5: Copy byte-to-byte
Tunable options:
__OPT_BIG_BLOCK_SIZE: Size of big block in words. Default to 64.
__OPT_MID_BLOCK_SIZE: Size of big block in words. Default to 16.
*/
#ifndef __OPT_BIG_BLOCK_SIZE
#define __OPT_BIG_BLOCK_SIZE (4 * 16)
#endif
#ifndef __OPT_MID_BLOCK_SIZE
#define __OPT_MID_BLOCK_SIZE (4 * 4)
#endif
#if __OPT_BIG_BLOCK_SIZE == 16
#define BEGIN_UNROLL_BIG_BLOCK \
.irp offset, 0,4,8,12
#elif __OPT_BIG_BLOCK_SIZE == 32
#define BEGIN_UNROLL_BIG_BLOCK \
.irp offset, 0,4,8,12,16,20,24,28
#elif __OPT_BIG_BLOCK_SIZE == 64
#define BEGIN_UNROLL_BIG_BLOCK \
.irp offset, 0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60
#else
#error "Illegal __OPT_BIG_BLOCK_SIZE"
#endif
#if __OPT_MID_BLOCK_SIZE == 8
#define BEGIN_UNROLL_MID_BLOCK \
.irp offset, 0,4
#elif __OPT_MID_BLOCK_SIZE == 16
#define BEGIN_UNROLL_MID_BLOCK \
.irp offset, 0,4,8,12
#else
#error "Illegal __OPT_MID_BLOCK_SIZE"
#endif
#define END_UNROLL .endr
.syntax unified
.text
.align 2
.global memcpy
.thumb
.thumb_func
.type memcpy, %function
memcpy:
@ r0: dst
@ r1: src
@ r2: len
#ifdef __ARM_FEATURE_UNALIGNED
/* In case of UNALIGNED access supported, ip is not used in
function body. */
mov ip, r0
#else
push {r0}
#endif
orr r3, r1, r0
ands r3, r3, #3
bne .Lmisaligned_copy
.Lbig_block:
subs r2, __OPT_BIG_BLOCK_SIZE
blo .Lmid_block
/* Kernel loop for big block copy */
.align 2
.Lbig_block_loop:
BEGIN_UNROLL_BIG_BLOCK
#ifdef __ARM_ARCH_7EM__
ldr r3, [r1], #4
str r3, [r0], #4
END_UNROLL
#else /* __ARM_ARCH_7M__ */
ldr r3, [r1, \offset]
str r3, [r0, \offset]
END_UNROLL
adds r0, __OPT_BIG_BLOCK_SIZE
adds r1, __OPT_BIG_BLOCK_SIZE
#endif
subs r2, __OPT_BIG_BLOCK_SIZE
bhs .Lbig_block_loop
.Lmid_block:
adds r2, __OPT_BIG_BLOCK_SIZE - __OPT_MID_BLOCK_SIZE
blo .Lcopy_word_by_word
/* Kernel loop for mid-block copy */
.align 2
.Lmid_block_loop:
BEGIN_UNROLL_MID_BLOCK
#ifdef __ARM_ARCH_7EM__
ldr r3, [r1], #4
str r3, [r0], #4
END_UNROLL
#else /* __ARM_ARCH_7M__ */
ldr r3, [r1, \offset]
str r3, [r0, \offset]
END_UNROLL
adds r0, __OPT_MID_BLOCK_SIZE
adds r1, __OPT_MID_BLOCK_SIZE
#endif
subs r2, __OPT_MID_BLOCK_SIZE
bhs .Lmid_block_loop
.Lcopy_word_by_word:
adds r2, __OPT_MID_BLOCK_SIZE - 4
blo .Lcopy_less_than_4
/* Kernel loop for small block copy */
.align 2
.Lcopy_word_by_word_loop:
ldr r3, [r1], #4
str r3, [r0], #4
subs r2, #4
bhs .Lcopy_word_by_word_loop
.Lcopy_less_than_4:
adds r2, #4
beq .Ldone
lsls r2, r2, #31
itt ne
ldrbne r3, [r1], #1
strbne r3, [r0], #1
bcc .Ldone
#ifdef __ARM_FEATURE_UNALIGNED
ldrh r3, [r1]
strh r3, [r0]
#else
ldrb r3, [r1]
strb r3, [r0]
ldrb r3, [r1, #1]
strb r3, [r0, #1]
#endif /* __ARM_FEATURE_UNALIGNED */
.Ldone:
#ifdef __ARM_FEATURE_UNALIGNED
mov r0, ip
#else
pop {r0}
#endif
bx lr
.align 2
.Lmisaligned_copy:
#ifdef __ARM_FEATURE_UNALIGNED
/* Define label DST_ALIGNED to BIG_BLOCK. It will go to aligned copy
once destination is adjusted to aligned. */
#define Ldst_aligned Lbig_block
/* Copy word by word using LDR when alignment can be done in hardware,
i.e., SCTLR.A is set, supporting unaligned access in LDR and STR. */
cmp r2, #8
blo .Lbyte_copy
/* if src is aligned, just go to the big block loop. */
lsls r3, r1, #30
beq .Ldst_aligned
#else
/* if len < 12, misalignment adjustment has more overhead than
just byte-to-byte copy. Also, len must >=8 to guarantee code
afterward work correctly. */
cmp r2, #12
blo .Lbyte_copy
#endif /* __ARM_FEATURE_UNALIGNED */
/* Align dst only, not trying to align src. That is the because
handling of aligned src and misaligned dst need more overhead than
otherwise. By doing this the worst case is when initial src is aligned,
additional up to 4 byte additional copy will executed, which is
acceptable. */
ands r3, r0, #3
beq .Ldst_aligned
rsb r3, #4
subs r2, r3
lsls r3, r3, #31
itt ne
ldrbne r3, [r1], #1
strbne r3, [r0], #1
bcc .Ldst_aligned
#ifdef __ARM_FEATURE_UNALIGNED
ldrh r3, [r1], #2
strh r3, [r0], #2
b .Ldst_aligned
#else
ldrb r3, [r1], #1
strb r3, [r0], #1
ldrb r3, [r1], #1
strb r3, [r0], #1
/* Now that dst is aligned */
.Ldst_aligned:
/* if r1 is aligned now, it means r0/r1 has the same misalignment,
and they are both aligned now. Go aligned copy. */
ands r3, r1, #3
beq .Lbig_block
/* dst is aligned, but src isn't. Misaligned copy. */
push {r4, r5}
subs r2, #4
/* Backward r1 by misaligned bytes, to make r1 aligned.
Since we need to restore r1 to unaligned address after the loop,
we need keep the offset bytes to ip and sub it from r1 afterward. */
subs r1, r3
rsb ip, r3, #4
/* Pre-load on word */
ldr r4, [r1], #4
cmp r3, #2
beq .Lmisaligned_copy_2_2
cmp r3, #3
beq .Lmisaligned_copy_3_1
.macro mis_src_copy shift
1:
lsrs r4, r4, \shift
ldr r3, [r1], #4
lsls r5, r3, 32-\shift
orr r4, r4, r5
str r4, [r0], #4
mov r4, r3
subs r2, #4
bhs 1b
.endm
.Lmisaligned_copy_1_3:
mis_src_copy shift=8
b .Lsrc_misaligned_tail
.Lmisaligned_copy_3_1:
mis_src_copy shift=24
b .Lsrc_misaligned_tail
.Lmisaligned_copy_2_2:
/* For 2_2 misalignment, ldr is still faster than 2 x ldrh. */
mis_src_copy shift=16
.Lsrc_misaligned_tail:
adds r2, #4
subs r1, ip
pop {r4, r5}
#endif /* __ARM_FEATURE_UNALIGNED */
.Lbyte_copy:
subs r2, #4
blo .Lcopy_less_than_4
.Lbyte_copy_loop:
subs r2, #1
ldrb r3, [r1], #1
strb r3, [r0], #1
bhs .Lbyte_copy_loop
ldrb r3, [r1]
strb r3, [r0]
ldrb r3, [r1, #1]
strb r3, [r0, #1]
ldrb r3, [r1, #2]
strb r3, [r0, #2]
#ifdef __ARM_FEATURE_UNALIGNED
mov r0, ip
#else
pop {r0}
#endif
bx lr
.size memcpy, .-memcpy
|
stsp/newlib-ia16
| 7,075
|
newlib/libc/machine/arm/memchr.S
|
/* Copyright (c) 2010-2011, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Written by Dave Gilbert <david.gilbert@linaro.org>
This memchr routine is optimised on a Cortex-A9 and should work on
all ARMv7 processors. It has a fast path for short sizes, and has
an optimised path for large data sets; the worst case is finding the
match early in a large data set. */
/* Copyright (c) 2015 ARM Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
@ 2011-02-07 david.gilbert@linaro.org
@ Extracted from local git a5b438d861
@ 2011-07-14 david.gilbert@linaro.org
@ Import endianness fix from local git ea786f1b
@ 2011-10-11 david.gilbert@linaro.org
@ Import from cortex-strings bzr rev 63
@ Flip to ldrd (as suggested by Greta Yorsh)
@ Make conditional on CPU type
@ tidy
@ This code requires armv6t2 or later. Uses Thumb2.
.syntax unified
#include "acle-compat.h"
#include "arm_asm.h"
@ NOTE: This ifdef MUST match the one in memchr-stub.c
#if __ARM_ARCH_ISA_THUMB >= 2 && defined (__ARM_FEATURE_DSP)
#if __ARM_ARCH_PROFILE == 'M'
.arch armv7e-m
#else
.arch armv6t2
#endif
@ this lets us check a flag in a 00/ff byte easily in either endianness
#ifdef __ARMEB__
#define CHARTSTMASK(c) 1<<(31-(c*8))
#else
#define CHARTSTMASK(c) 1<<(c*8)
#endif
.text
.thumb
@ ---------------------------------------------------------------------------
.thumb_func
.align 2
.p2align 4,,15
.global memchr
.type memchr,%function
memchr:
@ r0 = start of memory to scan
@ r1 = character to look for
@ r2 = length
@ returns r0 = pointer to character or NULL if not found
and r1,r1,#0xff @ Don't trust the caller to pass a char
cmp r2,#16 @ If short don't bother with anything clever
blt 20f
tst r0, #7 @ If it's already aligned skip the next bit
beq 10f
@ Work up to an aligned point
5:
ldrb r3, [r0],#1
subs r2, r2, #1
cmp r3, r1
beq 50f @ If it matches exit found
tst r0, #7
cbz r2, 40f @ If we run off the end, exit not found
bne 5b @ If not aligned yet then do next byte
10:
@ We are aligned, we know we have at least 8 bytes to work with
push {r4,r5,r6,r7}
orr r1, r1, r1, lsl #8 @ expand the match word across all bytes
orr r1, r1, r1, lsl #16
bic r4, r2, #7 @ Number of double words to work with * 8
mvns r7, #0 @ all F's
movs r3, #0
15:
ldrd r5,r6,[r0],#8
subs r4, r4, #8
eor r5,r5, r1 @ r5,r6 have 00's where bytes match the target
eor r6,r6, r1
uadd8 r5, r5, r7 @ Par add 0xff - sets GE bits for bytes!=0
sel r5, r3, r7 @ bytes are 00 for none-00 bytes,
@ or ff for 00 bytes - NOTE INVERSION
uadd8 r6, r6, r7 @ Par add 0xff - sets GE bits for bytes!=0
sel r6, r5, r7 @ chained....bytes are 00 for none-00 bytes
@ or ff for 00 bytes - NOTE INVERSION
cbnz r6, 60f
bne 15b @ (Flags from the subs above)
pop {r4,r5,r6,r7}
and r1,r1,#0xff @ r1 back to a single character
and r2,r2,#7 @ Leave the count remaining as the number
@ after the double words have been done
20:
cbz r2, 40f @ 0 length or hit the end already then not found
21: @ Post aligned section, or just a short call
ldrb r3,[r0],#1
subs r2,r2,#1
eor r3,r3,r1 @ r3 = 0 if match - doesn't break flags from sub
cbz r3, 50f
bne 21b @ on r2 flags
40:
movs r0,#0 @ not found
bx lr
50:
subs r0,r0,#1 @ found
bx lr
60: @ We're here because the fast path found a hit
@ now we have to track down exactly which word it was
@ r0 points to the start of the double word after the one tested
@ r5 has the 00/ff pattern for the first word, r6 has the chained value
cmp r5, #0
itte eq
moveq r5, r6 @ the end is in the 2nd word
subeq r0,r0,#3 @ Points to 2nd byte of 2nd word
subne r0,r0,#7 @ or 2nd byte of 1st word
@ r0 currently points to the 2nd byte of the word containing the hit
tst r5, # CHARTSTMASK(0) @ 1st character
bne 61f
adds r0,r0,#1
tst r5, # CHARTSTMASK(1) @ 2nd character
ittt eq
addeq r0,r0,#1
tsteq r5, # (3<<15) @ 2nd & 3rd character
@ If not the 3rd must be the last one
addeq r0,r0,#1
61:
pop {r4,r5,r6,r7}
subs r0,r0,#1
bx lr
#else
/* Defined in memchr-stub.c. */
#endif
|
stsp/newlib-ia16
| 2,040
|
newlib/libc/machine/arm/aeabi_memmove-thumb.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.thumb
.syntax unified
.global __aeabi_memmove
.type __aeabi_memmove, %function
ASM_ALIAS __aeabi_memmove4 __aeabi_memmove
ASM_ALIAS __aeabi_memmove8 __aeabi_memmove
__aeabi_memmove:
push {r4, lr}
cmp r0, r1
bls 3f
adds r4, r1, r2
cmp r0, r4
bcs 3f
subs r3, r2, #1
cmp r2, #0
beq 2f
subs r2, r4, r2
1:
ldrb r1, [r2, r3]
strb r1, [r0, r3]
subs r3, r3, #1
bcs 1b
2:
pop {r4}
pop {r1}
bx r1
3:
movs r3, #0
cmp r2, #0
beq 2b
4:
ldrb r4, [r1, r3]
strb r4, [r0, r3]
adds r3, r3, #1
cmp r2, r3
bne 4b
b 2b
.size __aeabi_memmove, . - __aeabi_memmove
|
stsp/newlib-ia16
| 1,804
|
newlib/libc/machine/arm/strcmp-arm-tiny.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Tiny version of strcmp in ARM state. Used only when optimizing
for size. Also supports Thumb-2. */
.syntax unified
def_fn strcmp
.cfi_sections .debug_frame
.cfi_startproc
1:
ldrb r2, [r0], #1
ldrb r3, [r1], #1
cmp r2, #1
it cs
cmpcs r2, r3
beq 1b
2:
subs r0, r2, r3
RETURN
.cfi_endproc
.size strcmp, . - strcmp
|
stsp/newlib-ia16
| 1,984
|
newlib/libc/machine/arm/strlen-thumb2-Os.S
|
/* Copyright (c) 2015 ARM Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#include "acle-compat.h"
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#if __ARM_ARCH_ISA_THUMB >= 2 && __ARM_ARCH >= 7
.arch armv7
#else
.arch armv6t2
#endif
.eabi_attribute Tag_ARM_ISA_use, 0
.thumb
.syntax unified
def_fn strlen p2align=1
mov r3, r0
1: ldrb.w r2, [r3], #1
cmp r2, #0
bne 1b
subs r0, r3, r0
subs r0, #1
bx lr
.size strlen, . - strlen
|
stsp/newlib-ia16
| 3,123
|
newlib/libc/machine/arm/strcmp-armv6m.S
|
/*
* Copyright (c) 2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Implementation of strcmp for ARMv6m. This version is only used in
ARMv6-M when we want an efficient implementation. Otherwize if the
code size is preferred, strcmp-armv4t.S will be used. */
.thumb_func
.syntax unified
.arch armv6-m
.macro DoSub n, label
subs r0, r0, r1
#ifdef __ARM_BIG_ENDIAN
lsrs r1, r4, \n
#else
lsls r1, r4, \n
#endif
orrs r1, r0
bne \label
.endm
.macro Byte_Test n, label
lsrs r0, r2, \n
lsrs r1, r3, \n
DoSub \n, \label
.endm
.text
def_fn strcmp
.cfi_sections .debug_frame
.cfi_startproc
mov r2, r0
push {r4, r5, r6, lr}
orrs r2, r1
lsls r2, r2, #30
bne 6f
ldr r5, =0x01010101
lsls r6, r5, #7
1:
ldmia r0!, {r2}
ldmia r1!, {r3}
subs r4, r2, r5
bics r4, r2
ands r4, r6
beq 3f
#ifdef __ARM_BIG_ENDIAN
Byte_Test #24, 4f
Byte_Test #16, 4f
Byte_Test #8, 4f
b 7f
3:
cmp r2, r3
beq 1b
cmp r2, r3
#else
uxtb r0, r2
uxtb r1, r3
DoSub #24, 2f
uxth r0, r2
uxth r1, r3
DoSub #16, 2f
lsls r0, r2, #8
lsls r1, r3, #8
lsrs r0, r0, #8
lsrs r1, r1, #8
DoSub #8, 2f
lsrs r0, r2, #24
lsrs r1, r3, #24
subs r0, r0, r1
2:
pop {r4, r5, r6, pc}
3:
cmp r2, r3
beq 1b
rev r0, r2
rev r1, r3
cmp r0, r1
#endif
bls 5f
movs r0, #1
4:
pop {r4, r5, r6, pc}
5:
movs r0, #0
mvns r0, r0
pop {r4, r5, r6, pc}
6:
ldrb r2, [r0, #0]
ldrb r3, [r1, #0]
adds r0, #1
adds r1, #1
cmp r2, #0
beq 7f
cmp r2, r3
bne 7f
ldrb r2, [r0, #0]
ldrb r3, [r1, #0]
adds r0, #1
adds r1, #1
cmp r2, #0
beq 7f
cmp r2, r3
beq 6b
7:
subs r0, r2, r3
pop {r4, r5, r6, pc}
.cfi_endproc
.size strcmp, . - strcmp
|
stsp/newlib-ia16
| 2,015
|
newlib/libc/machine/arm/aeabi_memmove-arm.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.arm
.syntax divided
.global __aeabi_memmove
.type __aeabi_memmove, %function
ASM_ALIAS __aeabi_memmove4 __aeabi_memmove
ASM_ALIAS __aeabi_memmove8 __aeabi_memmove
__aeabi_memmove:
cmp r0, r1
bls 2f
add r3, r1, r2
cmp r0, r3
bcs 2f
cmp r2, #0
add r1, r0, r2
bxeq lr
rsb r2, r2, r3
1:
ldrb ip, [r3, #-1]!
cmp r2, r3
strb ip, [r1, #-1]!
bne 1b
bx lr
2:
cmp r2, #0
addne r2, r1, r2
subne r3, r0, #1
beq 4f
3:
ldrb ip, [r1], #1
cmp r2, r1
strb ip, [r3, #1]!
bne 3b
bx lr
4:
bx lr
.size __aeabi_memmove, . - __aeabi_memmove
|
stsp/newlib-ia16
| 2,028
|
newlib/libc/machine/arm/strcmp-armv4t.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* This version is only used when we want a very basic Thumb1
implementation or for size, otherwise we use the base ARMv4
version. This is also suitable for ARMv6-M. */
.thumb
.syntax unified
.arch armv4t
.eabi_attribute Tag_also_compatible_with, "\006\013" /* ARMv6-M. */
.eabi_attribute Tag_ARM_ISA_use, 0
def_fn strcmp
.cfi_sections .debug_frame
.cfi_startproc
1:
ldrb r2, [r0]
ldrb r3, [r1]
cmp r2, #0
beq 2f
adds r0, r0, #1
adds r1, r1, #1
cmp r2, r3
beq 1b
2:
subs r0, r2, r3
bx lr
.cfi_endproc
.size strcmp, . - strcmp
|
stsp/newlib-ia16
| 1,903
|
newlib/libc/machine/arm/aeabi_memmove-soft.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "arm_asm.h"
.macro ASM_ALIAS new old
.global \new
.type \new, %function
#if defined (__thumb__)
.thumb_set \new, \old
#else
.set \new, \old
#endif
.endm
/* NOTE: This ifdef MUST match the one in aeabi_memmove.c. */
#if !defined (__SOFTFP__)
# if defined (__thumb2__)
# include "aeabi_memmove-thumb2.S"
# elif defined (__thumb__)
# include "aeabi_memmove-thumb.S"
# else
# include "aeabi_memmove-arm.S"
# endif
#endif
|
stsp/newlib-ia16
| 15,559
|
newlib/libc/machine/arm/memcpy-armv7a.S
|
/* Copyright (c) 2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This memcpy routine is optimised for Cortex-A15 cores and takes advantage
of VFP or NEON when built with the appropriate flags.
Assumptions:
ARMv6 (ARMv7-a if using Neon)
ARM state
Unaligned accesses
LDRD/STRD support unaligned word accesses
If compiled with GCC, this file should be enclosed within following
pre-processing check:
if defined (__ARM_ARCH_7A__) && defined (__ARM_FEATURE_UNALIGNED)
*/
.syntax unified
/* This implementation requires ARM state. */
.arm
#ifdef __ARM_NEON__
.fpu neon
.arch armv7-a
# define FRAME_SIZE 4
# define USE_VFP
# define USE_NEON
#elif !defined (__SOFTFP__)
.arch armv6
.fpu vfpv2
# define FRAME_SIZE 32
# define USE_VFP
#else
.arch armv6
# define FRAME_SIZE 32
#endif
/* Old versions of GAS incorrectly implement the NEON align semantics. */
#ifdef BROKEN_ASM_NEON_ALIGN
#define ALIGN(addr, align) addr,:align
#else
#define ALIGN(addr, align) addr:align
#endif
#define PC_OFFSET 8 /* PC pipeline compensation. */
#define INSN_SIZE 4
/* Call parameters. */
#define dstin r0
#define src r1
#define count r2
/* Locals. */
#define tmp1 r3
#define dst ip
#define tmp2 r10
#ifndef USE_NEON
/* For bulk copies using GP registers. */
#define A_l r2 /* Call-clobbered. */
#define A_h r3 /* Call-clobbered. */
#define B_l r4
#define B_h r5
#define C_l r6
#define C_h r7
#define D_l r8
#define D_h r9
#endif
/* Number of lines ahead to pre-fetch data. If you change this the code
below will need adjustment to compensate. */
#define prefetch_lines 5
#ifdef USE_VFP
.macro cpy_line_vfp vreg, base
vstr \vreg, [dst, #\base]
vldr \vreg, [src, #\base]
vstr d0, [dst, #\base + 8]
vldr d0, [src, #\base + 8]
vstr d1, [dst, #\base + 16]
vldr d1, [src, #\base + 16]
vstr d2, [dst, #\base + 24]
vldr d2, [src, #\base + 24]
vstr \vreg, [dst, #\base + 32]
vldr \vreg, [src, #\base + prefetch_lines * 64 - 32]
vstr d0, [dst, #\base + 40]
vldr d0, [src, #\base + 40]
vstr d1, [dst, #\base + 48]
vldr d1, [src, #\base + 48]
vstr d2, [dst, #\base + 56]
vldr d2, [src, #\base + 56]
.endm
.macro cpy_tail_vfp vreg, base
vstr \vreg, [dst, #\base]
vldr \vreg, [src, #\base]
vstr d0, [dst, #\base + 8]
vldr d0, [src, #\base + 8]
vstr d1, [dst, #\base + 16]
vldr d1, [src, #\base + 16]
vstr d2, [dst, #\base + 24]
vldr d2, [src, #\base + 24]
vstr \vreg, [dst, #\base + 32]
vstr d0, [dst, #\base + 40]
vldr d0, [src, #\base + 40]
vstr d1, [dst, #\base + 48]
vldr d1, [src, #\base + 48]
vstr d2, [dst, #\base + 56]
vldr d2, [src, #\base + 56]
.endm
#endif
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn memcpy p2align=6
mov dst, dstin /* Preserve dstin, we need to return it. */
cmp count, #64
bge .Lcpy_not_short
/* Deal with small copies quickly by dropping straight into the
exit block. */
.Ltail63unaligned:
#ifdef USE_NEON
and tmp1, count, #0x38
rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
add pc, pc, tmp1
vld1.8 {d0}, [src]! /* 14 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 12 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 10 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 8 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 6 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 4 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 2 words to go. */
vst1.8 {d0}, [dst]!
tst count, #4
ldrne tmp1, [src], #4
strne tmp1, [dst], #4
#else
/* Copy up to 15 full words of data. May not be aligned. */
/* Cannot use VFP for unaligned data. */
and tmp1, count, #0x3c
add dst, dst, tmp1
add src, src, tmp1
rsb tmp1, tmp1, #(60 - PC_OFFSET/2 + INSN_SIZE/2)
/* Jump directly into the sequence below at the correct offset. */
add pc, pc, tmp1, lsl #1
ldr tmp1, [src, #-60] /* 15 words to go. */
str tmp1, [dst, #-60]
ldr tmp1, [src, #-56] /* 14 words to go. */
str tmp1, [dst, #-56]
ldr tmp1, [src, #-52]
str tmp1, [dst, #-52]
ldr tmp1, [src, #-48] /* 12 words to go. */
str tmp1, [dst, #-48]
ldr tmp1, [src, #-44]
str tmp1, [dst, #-44]
ldr tmp1, [src, #-40] /* 10 words to go. */
str tmp1, [dst, #-40]
ldr tmp1, [src, #-36]
str tmp1, [dst, #-36]
ldr tmp1, [src, #-32] /* 8 words to go. */
str tmp1, [dst, #-32]
ldr tmp1, [src, #-28]
str tmp1, [dst, #-28]
ldr tmp1, [src, #-24] /* 6 words to go. */
str tmp1, [dst, #-24]
ldr tmp1, [src, #-20]
str tmp1, [dst, #-20]
ldr tmp1, [src, #-16] /* 4 words to go. */
str tmp1, [dst, #-16]
ldr tmp1, [src, #-12]
str tmp1, [dst, #-12]
ldr tmp1, [src, #-8] /* 2 words to go. */
str tmp1, [dst, #-8]
ldr tmp1, [src, #-4]
str tmp1, [dst, #-4]
#endif
lsls count, count, #31
ldrhcs tmp1, [src], #2
ldrbne src, [src] /* Src is dead, use as a scratch. */
strhcs tmp1, [dst], #2
strbne src, [dst]
bx lr
.Lcpy_not_short:
/* At least 64 bytes to copy, but don't know the alignment yet. */
str tmp2, [sp, #-FRAME_SIZE]!
and tmp2, src, #7
and tmp1, dst, #7
cmp tmp1, tmp2
bne .Lcpy_notaligned
#ifdef USE_VFP
/* Magic dust alert! Force VFP on Cortex-A9. Experiments show
that the FP pipeline is much better at streaming loads and
stores. This is outside the critical loop. */
vmov.f32 s0, s0
#endif
/* SRC and DST have the same mutual 32-bit alignment, but we may
still need to pre-copy some bytes to get to natural alignment.
We bring DST into full 64-bit alignment. */
lsls tmp2, dst, #29
beq 1f
rsbs tmp2, tmp2, #0
sub count, count, tmp2, lsr #29
ldrmi tmp1, [src], #4
strmi tmp1, [dst], #4
lsls tmp2, tmp2, #2
ldrhcs tmp1, [src], #2
ldrbne tmp2, [src], #1
strhcs tmp1, [dst], #2
strbne tmp2, [dst], #1
1:
subs tmp2, count, #64 /* Use tmp2 for count. */
blt .Ltail63aligned
cmp tmp2, #512
bge .Lcpy_body_long
.Lcpy_body_medium: /* Count in tmp2. */
#ifdef USE_VFP
1:
vldr d0, [src, #0]
subs tmp2, tmp2, #64
vldr d1, [src, #8]
vstr d0, [dst, #0]
vldr d0, [src, #16]
vstr d1, [dst, #8]
vldr d1, [src, #24]
vstr d0, [dst, #16]
vldr d0, [src, #32]
vstr d1, [dst, #24]
vldr d1, [src, #40]
vstr d0, [dst, #32]
vldr d0, [src, #48]
vstr d1, [dst, #40]
vldr d1, [src, #56]
vstr d0, [dst, #48]
add src, src, #64
vstr d1, [dst, #56]
add dst, dst, #64
bge 1b
tst tmp2, #0x3f
beq .Ldone
.Ltail63aligned: /* Count in tmp2. */
and tmp1, tmp2, #0x38
add dst, dst, tmp1
add src, src, tmp1
rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
add pc, pc, tmp1
vldr d0, [src, #-56] /* 14 words to go. */
vstr d0, [dst, #-56]
vldr d0, [src, #-48] /* 12 words to go. */
vstr d0, [dst, #-48]
vldr d0, [src, #-40] /* 10 words to go. */
vstr d0, [dst, #-40]
vldr d0, [src, #-32] /* 8 words to go. */
vstr d0, [dst, #-32]
vldr d0, [src, #-24] /* 6 words to go. */
vstr d0, [dst, #-24]
vldr d0, [src, #-16] /* 4 words to go. */
vstr d0, [dst, #-16]
vldr d0, [src, #-8] /* 2 words to go. */
vstr d0, [dst, #-8]
#else
sub src, src, #8
sub dst, dst, #8
1:
ldrd A_l, A_h, [src, #8]
strd A_l, A_h, [dst, #8]
ldrd A_l, A_h, [src, #16]
strd A_l, A_h, [dst, #16]
ldrd A_l, A_h, [src, #24]
strd A_l, A_h, [dst, #24]
ldrd A_l, A_h, [src, #32]
strd A_l, A_h, [dst, #32]
ldrd A_l, A_h, [src, #40]
strd A_l, A_h, [dst, #40]
ldrd A_l, A_h, [src, #48]
strd A_l, A_h, [dst, #48]
ldrd A_l, A_h, [src, #56]
strd A_l, A_h, [dst, #56]
ldrd A_l, A_h, [src, #64]!
strd A_l, A_h, [dst, #64]!
subs tmp2, tmp2, #64
bge 1b
tst tmp2, #0x3f
bne 1f
ldr tmp2,[sp], #FRAME_SIZE
bx lr
1:
add src, src, #8
add dst, dst, #8
.Ltail63aligned: /* Count in tmp2. */
/* Copy up to 7 d-words of data. Similar to Ltail63unaligned, but
we know that the src and dest are 32-bit aligned so we can use
LDRD/STRD to improve efficiency. */
/* TMP2 is now negative, but we don't care about that. The bottom
six bits still tell us how many bytes are left to copy. */
and tmp1, tmp2, #0x38
add dst, dst, tmp1
add src, src, tmp1
rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
add pc, pc, tmp1
ldrd A_l, A_h, [src, #-56] /* 14 words to go. */
strd A_l, A_h, [dst, #-56]
ldrd A_l, A_h, [src, #-48] /* 12 words to go. */
strd A_l, A_h, [dst, #-48]
ldrd A_l, A_h, [src, #-40] /* 10 words to go. */
strd A_l, A_h, [dst, #-40]
ldrd A_l, A_h, [src, #-32] /* 8 words to go. */
strd A_l, A_h, [dst, #-32]
ldrd A_l, A_h, [src, #-24] /* 6 words to go. */
strd A_l, A_h, [dst, #-24]
ldrd A_l, A_h, [src, #-16] /* 4 words to go. */
strd A_l, A_h, [dst, #-16]
ldrd A_l, A_h, [src, #-8] /* 2 words to go. */
strd A_l, A_h, [dst, #-8]
#endif
tst tmp2, #4
ldrne tmp1, [src], #4
strne tmp1, [dst], #4
lsls tmp2, tmp2, #31 /* Count (tmp2) now dead. */
ldrhcs tmp1, [src], #2
ldrbne tmp2, [src]
strhcs tmp1, [dst], #2
strbne tmp2, [dst]
.Ldone:
ldr tmp2, [sp], #FRAME_SIZE
bx lr
.Lcpy_body_long: /* Count in tmp2. */
/* Long copy. We know that there's at least (prefetch_lines * 64)
bytes to go. */
#ifdef USE_VFP
/* Don't use PLD. Instead, read some data in advance of the current
copy position into a register. This should act like a PLD
operation but we won't have to repeat the transfer. */
vldr d3, [src, #0]
vldr d4, [src, #64]
vldr d5, [src, #128]
vldr d6, [src, #192]
vldr d7, [src, #256]
vldr d0, [src, #8]
vldr d1, [src, #16]
vldr d2, [src, #24]
add src, src, #32
subs tmp2, tmp2, #prefetch_lines * 64 * 2
blt 2f
1:
cpy_line_vfp d3, 0
cpy_line_vfp d4, 64
cpy_line_vfp d5, 128
add dst, dst, #3 * 64
add src, src, #3 * 64
cpy_line_vfp d6, 0
cpy_line_vfp d7, 64
add dst, dst, #2 * 64
add src, src, #2 * 64
subs tmp2, tmp2, #prefetch_lines * 64
bge 1b
2:
cpy_tail_vfp d3, 0
cpy_tail_vfp d4, 64
cpy_tail_vfp d5, 128
add src, src, #3 * 64
add dst, dst, #3 * 64
cpy_tail_vfp d6, 0
vstr d7, [dst, #64]
vldr d7, [src, #64]
vstr d0, [dst, #64 + 8]
vldr d0, [src, #64 + 8]
vstr d1, [dst, #64 + 16]
vldr d1, [src, #64 + 16]
vstr d2, [dst, #64 + 24]
vldr d2, [src, #64 + 24]
vstr d7, [dst, #64 + 32]
add src, src, #96
vstr d0, [dst, #64 + 40]
vstr d1, [dst, #64 + 48]
vstr d2, [dst, #64 + 56]
add dst, dst, #128
add tmp2, tmp2, #prefetch_lines * 64
b .Lcpy_body_medium
#else
/* Long copy. Use an SMS style loop to maximize the I/O
bandwidth of the core. We don't have enough spare registers
to synthesise prefetching, so use PLD operations. */
/* Pre-bias src and dst. */
sub src, src, #8
sub dst, dst, #8
pld [src, #8]
pld [src, #72]
subs tmp2, tmp2, #64
pld [src, #136]
ldrd A_l, A_h, [src, #8]
strd B_l, B_h, [sp, #8]
ldrd B_l, B_h, [src, #16]
strd C_l, C_h, [sp, #16]
ldrd C_l, C_h, [src, #24]
strd D_l, D_h, [sp, #24]
pld [src, #200]
ldrd D_l, D_h, [src, #32]!
b 1f
.p2align 6
2:
pld [src, #232]
strd A_l, A_h, [dst, #40]
ldrd A_l, A_h, [src, #40]
strd B_l, B_h, [dst, #48]
ldrd B_l, B_h, [src, #48]
strd C_l, C_h, [dst, #56]
ldrd C_l, C_h, [src, #56]
strd D_l, D_h, [dst, #64]!
ldrd D_l, D_h, [src, #64]!
subs tmp2, tmp2, #64
1:
strd A_l, A_h, [dst, #8]
ldrd A_l, A_h, [src, #8]
strd B_l, B_h, [dst, #16]
ldrd B_l, B_h, [src, #16]
strd C_l, C_h, [dst, #24]
ldrd C_l, C_h, [src, #24]
strd D_l, D_h, [dst, #32]
ldrd D_l, D_h, [src, #32]
bcs 2b
/* Save the remaining bytes and restore the callee-saved regs. */
strd A_l, A_h, [dst, #40]
add src, src, #40
strd B_l, B_h, [dst, #48]
ldrd B_l, B_h, [sp, #8]
strd C_l, C_h, [dst, #56]
ldrd C_l, C_h, [sp, #16]
strd D_l, D_h, [dst, #64]
ldrd D_l, D_h, [sp, #24]
add dst, dst, #72
tst tmp2, #0x3f
bne .Ltail63aligned
ldr tmp2, [sp], #FRAME_SIZE
bx lr
#endif
.Lcpy_notaligned:
pld [src]
pld [src, #64]
/* There's at least 64 bytes to copy, but there is no mutual
alignment. */
/* Bring DST to 64-bit alignment. */
lsls tmp2, dst, #29
pld [src, #(2 * 64)]
beq 1f
rsbs tmp2, tmp2, #0
sub count, count, tmp2, lsr #29
ldrmi tmp1, [src], #4
strmi tmp1, [dst], #4
lsls tmp2, tmp2, #2
ldrbne tmp1, [src], #1
ldrhcs tmp2, [src], #2
strbne tmp1, [dst], #1
strhcs tmp2, [dst], #2
1:
pld [src, #(3 * 64)]
subs count, count, #64
ldrmi tmp2, [sp], #FRAME_SIZE
bmi .Ltail63unaligned
pld [src, #(4 * 64)]
#ifdef USE_NEON
vld1.8 {d0-d3}, [src]!
vld1.8 {d4-d7}, [src]!
subs count, count, #64
bmi 2f
1:
pld [src, #(4 * 64)]
vst1.8 {d0-d3}, [ALIGN (dst, 64)]!
vld1.8 {d0-d3}, [src]!
vst1.8 {d4-d7}, [ALIGN (dst, 64)]!
vld1.8 {d4-d7}, [src]!
subs count, count, #64
bpl 1b
2:
vst1.8 {d0-d3}, [ALIGN (dst, 64)]!
vst1.8 {d4-d7}, [ALIGN (dst, 64)]!
ands count, count, #0x3f
#else
/* Use an SMS style loop to maximize the I/O bandwidth. */
sub src, src, #4
sub dst, dst, #8
subs tmp2, count, #64 /* Use tmp2 for count. */
ldr A_l, [src, #4]
ldr A_h, [src, #8]
strd B_l, B_h, [sp, #8]
ldr B_l, [src, #12]
ldr B_h, [src, #16]
strd C_l, C_h, [sp, #16]
ldr C_l, [src, #20]
ldr C_h, [src, #24]
strd D_l, D_h, [sp, #24]
ldr D_l, [src, #28]
ldr D_h, [src, #32]!
b 1f
.p2align 6
2:
pld [src, #(5 * 64) - (32 - 4)]
strd A_l, A_h, [dst, #40]
ldr A_l, [src, #36]
ldr A_h, [src, #40]
strd B_l, B_h, [dst, #48]
ldr B_l, [src, #44]
ldr B_h, [src, #48]
strd C_l, C_h, [dst, #56]
ldr C_l, [src, #52]
ldr C_h, [src, #56]
strd D_l, D_h, [dst, #64]!
ldr D_l, [src, #60]
ldr D_h, [src, #64]!
subs tmp2, tmp2, #64
1:
strd A_l, A_h, [dst, #8]
ldr A_l, [src, #4]
ldr A_h, [src, #8]
strd B_l, B_h, [dst, #16]
ldr B_l, [src, #12]
ldr B_h, [src, #16]
strd C_l, C_h, [dst, #24]
ldr C_l, [src, #20]
ldr C_h, [src, #24]
strd D_l, D_h, [dst, #32]
ldr D_l, [src, #28]
ldr D_h, [src, #32]
bcs 2b
/* Save the remaining bytes and restore the callee-saved regs. */
strd A_l, A_h, [dst, #40]
add src, src, #36
strd B_l, B_h, [dst, #48]
ldrd B_l, B_h, [sp, #8]
strd C_l, C_h, [dst, #56]
ldrd C_l, C_h, [sp, #16]
strd D_l, D_h, [dst, #64]
ldrd D_l, D_h, [sp, #24]
add dst, dst, #72
ands count, tmp2, #0x3f
#endif
ldr tmp2, [sp], #FRAME_SIZE
bne .Ltail63unaligned
bx lr
.size memcpy, . - memcpy
|
stsp/newlib-ia16
| 2,587
|
newlib/libc/machine/arm/aeabi_memset-thumb2.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.thumb
.syntax unified
.global __aeabi_memset
.type __aeabi_memset, %function
ASM_ALIAS __aeabi_memset4 __aeabi_memset
ASM_ALIAS __aeabi_memset8 __aeabi_memset
__aeabi_memset:
push {r4, r5, r6}
lsls r4, r0, #30
beq 10f
subs r4, r1, #1
cmp r1, #0
beq 9f
uxtb r5, r2
mov r3, r0
b 2f
1:
subs r1, r4, #1
cbz r4, 9f
mov r4, r1
2:
strb r5, [r3], #1
lsls r1, r3, #30
bne 1b
3:
cmp r4, #3
bls 7f
uxtb r5, r2
orr r5, r5, r5, lsl #8
cmp r4, #15
orr r5, r5, r5, lsl #16
bls 5f
mov r6, r4
add r1, r3, #16
4:
subs r6, r6, #16
cmp r6, #15
str r5, [r1, #-16]
str r5, [r1, #-12]
str r5, [r1, #-8]
str r5, [r1, #-4]
add r1, r1, #16
bhi 4b
sub r1, r4, #16
bic r1, r1, #15
and r4, r4, #15
adds r1, r1, #16
cmp r4, #3
add r3, r3, r1
bls 7f
5:
mov r6, r3
mov r1, r4
6:
subs r1, r1, #4
cmp r1, #3
str r5, [r6], #4
bhi 6b
subs r1, r4, #4
bic r1, r1, #3
adds r1, r1, #4
add r3, r3, r1
and r4, r4, #3
7:
cbz r4, 9f
uxtb r2, r2
add r4, r4, r3
8:
strb r2, [r3], #1
cmp r3, r4
bne 8b
9:
pop {r4, r5, r6}
bx lr
10:
mov r4, r1
mov r3, r0
b 3b
.size __aeabi_memset, . - __aeabi_memset
|
stsp/newlib-ia16
| 9,588
|
newlib/libc/machine/arm/strcmp-armv4.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Basic ARM implementation. This should run on anything except
for ARMv6-M, but there are better implementations for later
revisions of the architecture. This version can support ARMv4T
ARM/Thumb interworking. */
/* Parameters and result. */
#define src1 r0
#define src2 r1
#define result r0 /* Overlaps src1. */
/* Internal variables. */
#define data1 r2
#define data2 r3
#define magic1 r4
#define tmp2 r5
#define tmp1 r12
#define syndrome r12 /* Overlaps tmp1 */
.arm
def_fn strcmp
.cfi_sections .debug_frame
.cfi_startproc
eor tmp1, src1, src2
tst tmp1, #3
/* Strings not at same byte offset from a word boundary. */
bne .Lstrcmp_unaligned
ands tmp1, src1, #3
bic src1, src1, #3
bic src2, src2, #3
ldr data1, [src1], #4
ldreq data2, [src2], #4
beq 1f
/* Although s1 and s2 have identical initial alignment, they are
not currently word aligned. Rather than comparing bytes,
make sure that any bytes fetched from before the addressed
bytes are forced to 0xff. Then they will always compare
equal. */
eor tmp1, tmp1, #3
mvn data2, #MSB
lsl tmp1, tmp1, #3
S2LO tmp1, data2, tmp1
ldr data2, [src2], #4
orr data1, data1, tmp1
orr data2, data2, tmp1
1:
/* Load the 'magic' constant 0x01010101. */
str r4, [sp, #-4]!
.cfi_def_cfa_offset 4
.cfi_offset 4, -4
mov magic1, #1
orr magic1, magic1, magic1, lsl #8
orr magic1, magic1, magic1, lsl #16
.p2align 2
4:
sub syndrome, data1, magic1
cmp data1, data2
/* check for any zero bytes in first word */
biceq syndrome, syndrome, data1
tsteq syndrome, magic1, lsl #7
ldreq data1, [src1], #4
ldreq data2, [src2], #4
beq 4b
2:
/* There's a zero or a different byte in the word */
S2HI result, data1, #24
S2LO data1, data1, #8
cmp result, #1
cmpcs result, data2, S2HI #24
S2LOEQ data2, data2, #8
beq 2b
/* On a big-endian machine, RESULT contains the desired byte in bits
0-7; on a little-endian machine they are in bits 24-31. In
both cases the other bits in RESULT are all zero. For DATA2 the
interesting byte is at the other end of the word, but the
other bits are not necessarily zero. We need a signed result
representing the differnece in the unsigned bytes, so for the
little-endian case we can't just shift the interesting bits
up. */
#ifdef __ARM_BIG_ENDIAN
sub result, result, data2, lsr #24
#else
and data2, data2, #255
rsb result, data2, result, lsr #24
#endif
ldr r4, [sp], #4
.cfi_restore 4
.cfi_def_cfa_offset 0
RETURN
#if 0
/* The assembly code below is based on the following alogrithm. */
#ifdef __ARM_BIG_ENDIAN
#define RSHIFT <<
#define LSHIFT >>
#else
#define RSHIFT >>
#define LSHIFT <<
#endif
#define body(shift) \
mask = 0xffffffffU RSHIFT shift; \
data1 = *src1++; \
data2 = *src2++; \
do \
{ \
tmp2 = data1 & mask; \
if (__builtin_expect(tmp2 != data2 RSHIFT shift, 0)) \
{ \
data2 RSHIFT= shift; \
break; \
} \
if (__builtin_expect(((data1 - b1) & ~data1) & (b1 << 7), 0)) \
{ \
/* See comment in assembler below re syndrome on big-endian */\
if ((((data1 - b1) & ~data1) & (b1 << 7)) & mask) \
data2 RSHIFT= shift; \
else \
{ \
data2 = *src2; \
tmp2 = data1 RSHIFT (32 - shift); \
data2 = (data2 LSHIFT (32 - shift)) RSHIFT (32 - shift); \
} \
break; \
} \
data2 = *src2++; \
tmp2 ^= data1; \
if (__builtin_expect(tmp2 != data2 LSHIFT (32 - shift), 0)) \
{ \
tmp2 = data1 >> (32 - shift); \
data2 = (data2 << (32 - shift)) RSHIFT (32 - shift); \
break; \
} \
data1 = *src1++; \
} while (1)
const unsigned* src1;
const unsigned* src2;
unsigned data1, data2;
unsigned mask;
unsigned shift;
unsigned b1 = 0x01010101;
char c1, c2;
unsigned tmp2;
while (((unsigned) s1) & 3)
{
c1 = *s1++;
c2 = *s2++;
if (c1 == 0 || c1 != c2)
return c1 - (int)c2;
}
src1 = (unsigned*) (((unsigned)s1) & ~3);
src2 = (unsigned*) (((unsigned)s2) & ~3);
tmp2 = ((unsigned) s2) & 3;
if (tmp2 == 1)
{
body(8);
}
else if (tmp2 == 2)
{
body(16);
}
else
{
body (24);
}
do
{
#ifdef __ARM_BIG_ENDIAN
c1 = (char) tmp2 >> 24;
c2 = (char) data2 >> 24;
#else /* not __ARM_BIG_ENDIAN */
c1 = (char) tmp2;
c2 = (char) data2;
#endif /* not __ARM_BIG_ENDIAN */
tmp2 RSHIFT= 8;
data2 RSHIFT= 8;
} while (c1 != 0 && c1 == c2);
return c1 - c2;
#endif /* 0 */
/* First of all, compare bytes until src1(sp1) is word-aligned. */
.Lstrcmp_unaligned:
tst src1, #3
beq 2f
ldrb data1, [src1], #1
ldrb data2, [src2], #1
cmp data1, #1
cmpcs data1, data2
beq .Lstrcmp_unaligned
sub result, data1, data2
RETURN
2:
stmfd sp!, {r4, r5}
.cfi_def_cfa_offset 8
.cfi_offset 4, -8
.cfi_offset 5, -4
mov magic1, #1
orr magic1, magic1, magic1, lsl #8
orr magic1, magic1, magic1, lsl #16
ldr data1, [src1], #4
and tmp2, src2, #3
bic src2, src2, #3
ldr data2, [src2], #4
cmp tmp2, #2
beq .Loverlap2
bhi .Loverlap1
/* Critical inner Loop: Block with 3 bytes initial overlap */
.p2align 2
.Loverlap3:
bic tmp2, data1, #MSB
cmp tmp2, data2, S2LO #8
sub syndrome, data1, magic1
bic syndrome, syndrome, data1
bne 4f
ands syndrome, syndrome, magic1, lsl #7
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #24
bne 6f
ldr data1, [src1], #4
b .Loverlap3
4:
S2LO data2, data2, #8
b .Lstrcmp_tail
5:
#ifdef __ARM_BIG_ENDIAN
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00. */
tst data1, #0xff000000
tstne data1, #0x00ff0000
tstne data1, #0x0000ff00
beq .Lstrcmp_done_equal
#else
bics syndrome, syndrome, #0xff000000
bne .Lstrcmp_done_equal
#endif
ldrb data2, [src2]
S2LO tmp2, data1, #24
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #24
#endif
b .Lstrcmp_tail
6:
S2LO tmp2, data1, #24
and data2, data2, #LSB
b .Lstrcmp_tail
/* Critical inner Loop: Block with 2 bytes initial overlap. */
.p2align 2
.Loverlap2:
S2HI tmp2, data1, #16
sub syndrome, data1, magic1
S2LO tmp2, tmp2, #16
bic syndrome, syndrome, data1
cmp tmp2, data2, S2LO #16
bne 4f
ands syndrome, syndrome, magic1, lsl #7
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #16
bne 6f
ldr data1, [src1], #4
b .Loverlap2
5:
#ifdef __ARM_BIG_ENDIAN
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00 */
tst data1, #0xff000000
tstne data1, #0x00ff0000
beq .Lstrcmp_done_equal
#else
lsls syndrome, syndrome, #16
bne .Lstrcmp_done_equal
#endif
ldrh data2, [src2]
S2LO tmp2, data1, #16
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #16
#endif
b .Lstrcmp_tail
6:
S2HI data2, data2, #16
S2LO tmp2, data1, #16
4:
S2LO data2, data2, #16
b .Lstrcmp_tail
/* Critical inner Loop: Block with 1 byte initial overlap. */
.p2align 2
.Loverlap1:
and tmp2, data1, #LSB
cmp tmp2, data2, S2LO #24
sub syndrome, data1, magic1
bic syndrome, syndrome, data1
bne 4f
ands syndrome, syndrome, magic1, lsl #7
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #8
bne 6f
ldr data1, [src1], #4
b .Loverlap1
4:
S2LO data2, data2, #24
b .Lstrcmp_tail
5:
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00. */
tst data1, #LSB
beq .Lstrcmp_done_equal
ldr data2, [src2], #4
6:
S2LO tmp2, data1, #8
bic data2, data2, #MSB
b .Lstrcmp_tail
.Lstrcmp_done_equal:
mov result, #0
.cfi_remember_state
ldmfd sp!, {r4, r5}
.cfi_restore 4
.cfi_restore 5
.cfi_def_cfa_offset 0
RETURN
.Lstrcmp_tail:
.cfi_restore_state
and r2, tmp2, #LSB
and result, data2, #LSB
cmp result, #1
cmpcs result, r2
S2LOEQ tmp2, tmp2, #8
S2LOEQ data2, data2, #8
beq .Lstrcmp_tail
sub result, r2, result
ldmfd sp!, {r4, r5}
.cfi_restore 4
.cfi_restore 5
.cfi_def_cfa_offset 0
RETURN
.cfi_endproc
.size strcmp, . - strcmp
|
stsp/newlib-ia16
| 5,383
|
newlib/libc/machine/arm/setjmp.S
|
/* This is a simple version of setjmp and longjmp.
Nick Clifton, Cygnus Solutions, 13 June 1997. */
#include "acle-compat.h"
/* ANSI concatenation macros. */
#define CONCAT(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a##b
#ifndef __USER_LABEL_PREFIX__
#error __USER_LABEL_PREFIX__ not defined
#endif
#define SYM(x) CONCAT (__USER_LABEL_PREFIX__, x)
#ifdef __ELF__
#define TYPE(x) .type SYM(x),function
#define SIZE(x) .size SYM(x), . - SYM(x)
#else
#define TYPE(x)
#define SIZE(x)
#endif
/* Arm/Thumb interworking support:
The interworking scheme expects functions to use a BX instruction
to return control to their parent. Since we need this code to work
in both interworked and non-interworked environments as well as with
older processors which do not have the BX instruction we do the
following:
Test the return address.
If the bottom bit is clear perform an "old style" function exit.
(We know that we are in ARM mode and returning to an ARM mode caller).
Otherwise use the BX instruction to perform the function exit.
We know that we will never attempt to perform the BX instruction on
an older processor, because that kind of processor will never be
interworked, and a return address with the bottom bit set will never
be generated.
In addition, we do not actually assemble the BX instruction as this would
require us to tell the assembler that the processor is an ARM7TDMI and
it would store this information in the binary. We want this binary to be
able to be linked with binaries compiled for older processors however, so
we do not want such information stored there.
If we are running using the APCS-26 convention however, then we never
test the bottom bit, because this is part of the processor status.
Instead we just do a normal return, since we know that we cannot be
returning to a Thumb caller - the Thumb does not support APCS-26.
Function entry is much simpler. If we are compiling for the Thumb we
just switch into ARM mode and then drop through into the rest of the
function. The function exit code will take care of the restore to
Thumb mode.
For Thumb-2 do everything in Thumb mode. */
#if __ARM_ARCH_ISA_THUMB == 1 && !__ARM_ARCH_ISA_ARM
/* ARMv6-M-like has to be implemented in Thumb mode. */
.thumb
.thumb_func
.globl SYM (setjmp)
TYPE (setjmp)
SYM (setjmp):
/* Save registers in jump buffer. */
stmia r0!, {r4, r5, r6, r7}
mov r1, r8
mov r2, r9
mov r3, r10
mov r4, fp
mov r5, sp
mov r6, lr
stmia r0!, {r1, r2, r3, r4, r5, r6}
sub r0, r0, #40
/* Restore callee-saved low regs. */
ldmia r0!, {r4, r5, r6, r7}
/* Return zero. */
mov r0, #0
bx lr
.thumb_func
.globl SYM (longjmp)
TYPE (longjmp)
SYM (longjmp):
/* Restore High regs. */
add r0, r0, #16
ldmia r0!, {r2, r3, r4, r5, r6}
mov r8, r2
mov r9, r3
mov r10, r4
mov fp, r5
mov sp, r6
ldmia r0!, {r3} /* lr */
/* Restore low regs. */
sub r0, r0, #40
ldmia r0!, {r4, r5, r6, r7}
/* Return the result argument, or 1 if it is zero. */
mov r0, r1
bne 1f
mov r0, #1
1:
bx r3
#else
#ifdef __APCS_26__
#define RET movs pc, lr
#elif defined(__thumb2__)
#define RET bx lr
#else
#define RET tst lr, #1; \
moveq pc, lr ; \
.word 0xe12fff1e /* bx lr */
#endif
#ifdef __thumb2__
.macro COND where when
i\where \when
.endm
#else
.macro COND where when
.endm
#endif
#if defined(__thumb2__)
.syntax unified
.macro MODE
.thumb
.thumb_func
.endm
.macro PROLOGUE name
.endm
#elif defined(__thumb__)
#define MODE .thumb_func
.macro PROLOGUE name
.code 16
bx pc
nop
.code 32
SYM (.arm_start_of.\name):
.endm
#else /* Arm */
#define MODE .code 32
.macro PROLOGUE name
.endm
#endif
.macro FUNC_START name
.text
.align 2
MODE
.globl SYM (\name)
TYPE (\name)
SYM (\name):
PROLOGUE \name
.endm
.macro FUNC_END name
RET
SIZE (\name)
.endm
/* --------------------------------------------------------------------
int setjmp (jmp_buf);
-------------------------------------------------------------------- */
FUNC_START setjmp
/* Save all the callee-preserved registers into the jump buffer. */
#ifdef __thumb2__
mov ip, sp
stmea a1!, { v1-v7, fp, ip, lr }
#else
stmea a1!, { v1-v7, fp, ip, sp, lr }
#endif
#if 0 /* Simulator does not cope with FP instructions yet. */
#ifndef __SOFTFP__
/* Save the floating point registers. */
sfmea f4, 4, [a1]
#endif
#endif
/* When setting up the jump buffer return 0. */
mov a1, #0
FUNC_END setjmp
/* --------------------------------------------------------------------
volatile void longjmp (jmp_buf, int);
-------------------------------------------------------------------- */
FUNC_START longjmp
/* If we have stack extension code it ought to be handled here. */
/* Restore the registers, retrieving the state when setjmp() was called. */
#ifdef __thumb2__
ldmfd a1!, { v1-v7, fp, ip, lr }
mov sp, ip
#else
ldmfd a1!, { v1-v7, fp, ip, sp, lr }
#endif
#if 0 /* Simulator does not cope with FP instructions yet. */
#ifndef __SOFTFP__
/* Restore floating point registers as well. */
lfmfd f4, 4, [a1]
#endif
#endif
/* Put the return value into the integer result register.
But if it is zero then return 1 instead. */
movs a1, a2
#ifdef __thumb2__
it eq
#endif
moveq a1, #1
FUNC_END longjmp
#endif
|
stsp/newlib-ia16
| 7,262
|
newlib/libc/machine/powerpc/setjmp.S
|
/* This is a simple version of setjmp and longjmp for the PowerPC.
Ian Lance Taylor, Cygnus Support, 9 Feb 1994.
Modified by Jeff Johnston, Red Hat Inc. 2 Oct 2001. */
#include "ppc-asm.h"
FUNC_START(setjmp)
#ifdef __ALTIVEC__
addi 3,3,15 # align Altivec to 16 byte boundary
rlwinm 3,3,0,0,27
#else
addi 3,3,7 # align to 8 byte boundary
rlwinm 3,3,0,0,28
#endif
#if __SPE__
/* If we are E500, then save 64-bit registers. */
evstdd 1,0(3) # offset 0
evstdd 2,8(3) # offset 8
evstdd 13,16(3) # offset 16
evstdd 14,24(3) # offset 24
evstdd 15,32(3) # offset 32
evstdd 16,40(3) # offset 40
evstdd 17,48(3) # offset 48
evstdd 18,56(3) # offset 56
evstdd 19,64(3) # offset 64
evstdd 20,72(3) # offset 72
evstdd 21,80(3) # offset 80
evstdd 22,88(3) # offset 88
evstdd 23,96(3) # offset 96
evstdd 24,104(3) # offset 104
evstdd 25,112(3) # offset 112
evstdd 26,120(3) # offset 120
evstdd 27,128(3) # offset 128
evstdd 28,136(3) # offset 136
evstdd 29,144(3) # offset 144
evstdd 30,152(3) # offset 152
evstdd 31,160(3) # offset 160
/* Add 164 to r3 to account for the amount of data we just
stored. Note that we are not adding 168 because the next
store instruction uses an offset of 4. */
addi 3,3,164
#else
stw 1,0(3) # offset 0
stwu 2,4(3) # offset 4
stwu 13,4(3) # offset 8
stwu 14,4(3) # offset 12
stwu 15,4(3) # offset 16
stwu 16,4(3) # offset 20
stwu 17,4(3) # offset 24
stwu 18,4(3) # offset 28
stwu 19,4(3) # offset 32
stwu 20,4(3) # offset 36
stwu 21,4(3) # offset 40
stwu 22,4(3) # offset 44
stwu 23,4(3) # offset 48
stwu 24,4(3) # offset 52
stwu 25,4(3) # offset 56
stwu 26,4(3) # offset 60
stwu 27,4(3) # offset 64
stwu 28,4(3) # offset 68
stwu 29,4(3) # offset 72
stwu 30,4(3) # offset 76
stwu 31,4(3) # offset 80
#endif
/* From this point on until the end of this function, add 84
to the offset shown if __SPE__. This difference comes from
the fact that we save 21 64-bit registers instead of 21
32-bit registers above. */
mflr 4
stwu 4,4(3) # offset 84
mfcr 4
stwu 4,4(3) # offset 88
# one word pad to get floating point aligned on 8 byte boundary
/* Check whether we need to save FPRs. Checking __NO_FPRS__
on its own would be enough for GCC 4.1 and above, but older
compilers only define _SOFT_FLOAT, so check both. */
#if !defined (__NO_FPRS__) && !defined (_SOFT_FLOAT)
stfdu 14,8(3) # offset 96
stfdu 15,8(3) # offset 104
stfdu 16,8(3) # offset 112
stfdu 17,8(3) # offset 120
stfdu 18,8(3) # offset 128
stfdu 19,8(3) # offset 136
stfdu 20,8(3) # offset 144
stfdu 21,8(3) # offset 152
stfdu 22,8(3) # offset 160
stfdu 23,8(3) # offset 168
stfdu 24,8(3) # offset 176
stfdu 25,8(3) # offset 184
stfdu 26,8(3) # offset 192
stfdu 27,8(3) # offset 200
stfdu 28,8(3) # offset 208
stfdu 29,8(3) # offset 216
stfdu 30,8(3) # offset 224
stfdu 31,8(3) # offset 232
#endif
/* This requires a total of 21 * 4 + 18 * 8 + 4 + 4 + 4
bytes == 60 * 4 bytes == 240 bytes. */
#ifdef __ALTIVEC__
/* save Altivec vrsave and vr20-vr31 registers */
mfspr 4,256 # vrsave register
stwu 4,16(3) # offset 248
addi 3,3,8
stvx 20,0,3 # offset 256
addi 3,3,16
stvx 21,0,3 # offset 272
addi 3,3,16
stvx 22,0,3 # offset 288
addi 3,3,16
stvx 23,0,3 # offset 304
addi 3,3,16
stvx 24,0,3 # offset 320
addi 3,3,16
stvx 25,0,3 # offset 336
addi 3,3,16
stvx 26,0,3 # offset 352
addi 3,3,16
stvx 27,0,3 # offset 368
addi 3,3,16
stvx 28,0,3 # offset 384
addi 3,3,16
stvx 29,0,3 # offset 400
addi 3,3,16
stvx 30,0,3 # offset 416
addi 3,3,16
stvx 31,0,3 # offset 432
/* This requires a total of 240 + 8 + 8 + 12 * 16 == 448 bytes. */
#endif
li 3,0
blr
FUNC_END(setjmp)
FUNC_START(longjmp)
#ifdef __ALTIVEC__
addi 3,3,15 # align Altivec to 16 byte boundary
rlwinm 3,3,0,0,27
#else
addi 3,3,7 # align to 8 byte boundary
rlwinm 3,3,0,0,28
#endif
#if __SPE__
/* If we are E500, then restore 64-bit registers. */
evldd 1,0(3) # offset 0
evldd 2,8(3) # offset 8
evldd 13,16(3) # offset 16
evldd 14,24(3) # offset 24
evldd 15,32(3) # offset 32
evldd 16,40(3) # offset 40
evldd 17,48(3) # offset 48
evldd 18,56(3) # offset 56
evldd 19,64(3) # offset 64
evldd 20,72(3) # offset 72
evldd 21,80(3) # offset 80
evldd 22,88(3) # offset 88
evldd 23,96(3) # offset 96
evldd 24,104(3) # offset 104
evldd 25,112(3) # offset 112
evldd 26,120(3) # offset 120
evldd 27,128(3) # offset 128
evldd 28,136(3) # offset 136
evldd 29,144(3) # offset 144
evldd 30,152(3) # offset 152
evldd 31,160(3) # offset 160
/* Add 164 to r3 to account for the amount of data we just
loaded. Note that we are not adding 168 because the next
load instruction uses an offset of 4. */
addi 3,3,164
#else
lwz 1,0(3) # offset 0
lwzu 2,4(3) # offset 4
lwzu 13,4(3) # offset 8
lwzu 14,4(3) # offset 12
lwzu 15,4(3) # offset 16
lwzu 16,4(3) # offset 20
lwzu 17,4(3) # offset 24
lwzu 18,4(3) # offset 28
lwzu 19,4(3) # offset 32
lwzu 20,4(3) # offset 36
lwzu 21,4(3) # offset 40
lwzu 22,4(3) # offset 44
lwzu 23,4(3) # offset 48
lwzu 24,4(3) # offset 52
lwzu 25,4(3) # offset 56
lwzu 26,4(3) # offset 60
lwzu 27,4(3) # offset 64
lwzu 28,4(3) # offset 68
lwzu 29,4(3) # offset 72
lwzu 30,4(3) # offset 76
lwzu 31,4(3) # offset 80
#endif
/* From this point on until the end of this function, add 84
to the offset shown if __SPE__. This difference comes from
the fact that we restore 21 64-bit registers instead of 21
32-bit registers above. */
lwzu 5,4(3) # offset 84
mtlr 5
lwzu 5,4(3) # offset 88
mtcrf 255,5
# one word pad to get floating point aligned on 8 byte boundary
/* Check whether we need to restore FPRs. Checking
__NO_FPRS__ on its own would be enough for GCC 4.1 and
above, but older compilers only define _SOFT_FLOAT, so
check both. */
#if !defined (__NO_FPRS__) && !defined (_SOFT_FLOAT)
lfdu 14,8(3) # offset 96
lfdu 15,8(3) # offset 104
lfdu 16,8(3) # offset 112
lfdu 17,8(3) # offset 120
lfdu 18,8(3) # offset 128
lfdu 19,8(3) # offset 136
lfdu 20,8(3) # offset 144
lfdu 21,8(3) # offset 152
lfdu 22,8(3) # offset 160
lfdu 23,8(3) # offset 168
lfdu 24,8(3) # offset 176
lfdu 25,8(3) # offset 184
lfdu 26,8(3) # offset 192
lfdu 27,8(3) # offset 200
lfdu 28,8(3) # offset 208
lfdu 29,8(3) # offset 216
lfdu 30,8(3) # offset 224
lfdu 31,8(3) # offset 232
#endif
#ifdef __ALTIVEC__
/* restore Altivec vrsave and v20-v31 registers */
lwzu 5,16(3) # offset 248
mtspr 256,5 # vrsave
addi 3,3,8
lvx 20,0,3 # offset 256
addi 3,3,16
lvx 21,0,3 # offset 272
addi 3,3,16
lvx 22,0,3 # offset 288
addi 3,3,16
lvx 23,0,3 # offset 304
addi 3,3,16
lvx 24,0,3 # offset 320
addi 3,3,16
lvx 25,0,3 # offset 336
addi 3,3,16
lvx 26,0,3 # offset 352
addi 3,3,16
lvx 27,0,3 # offset 368
addi 3,3,16
lvx 28,0,3 # offset 384
addi 3,3,16
lvx 29,0,3 # offset 400
addi 3,3,16
lvx 30,0,3 # offset 416
addi 3,3,16
lvx 31,0,3 # offset 432
#endif
mr. 3,4
bclr+ 4,2
li 3,1
blr
FUNC_END(longjmp)
|
stsp/newlib-ia16
| 8,847
|
newlib/libc/machine/hppa/strncpy.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* HPUX_ID: @(#) $Revision$ */
/*
* strncpy(s1, s2, n)
*
* Copy s2 to s1, truncating or null-padding to always copy n bytes
* return s1
*/
#include "DEFS.h"
#define d_addr r26
#define s_addr r25
#define count r24
#define tmp1 r19
#define tmp2 r20
#define tmp3 r21
#define tmp4 r22
#define tmp5 arg3
#define save r1
ENTRY(strncpy)
combt,= s_addr,r0,pad_null_bytes1 /* if s2==NULL then pad nulls and exit */
copy d_addr,ret0 /* The return value is defined to be the value of d_addr. DELAY SLOT*/
addibt,<,n -4,count,byteloop /* If count is <= 4 don't get fancy.*/
extru s_addr,31,2,tmp1 /* Extract the low two bits of the source address.*/
extru d_addr,31,2,tmp5 /* Extract the low two bits of the destination address.*/
add count,tmp5,count /* pre increment the count by the byte address so that the count is*/
comb,<> tmp5,tmp1,not_aligned /* branch if tmp5<>tmp1. */
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT.*/
/* aligned*/
combt,= tmp5,r0,skip_mask
ldwm 4(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 4 (DELAY SLOT)*/
sh3add tmp5,r0,save /* compute mask in save*/
mtctl save,11
zvdepi -2,32,save
b skip_mask /* don't reload tmp1*/
or save,tmp1,tmp1 /* or mask with data*/
chunks:
ldwm 4(0,s_addr),tmp1 /* get a word*/
skip_mask:
uxor,nbz tmp1,r0,save /* check for null*/
b,n null1
addibf,< -4,count,chunks
stbys,b,m tmp1,4(0,d_addr) /* store word (delay slot)*/
/* back_porch last word to store*/
addibt,=,n 4,count,done /* if count = 0 we're, of course, done !*/
ldws 0(s_addr),tmp1 /* load up the back_porch*/
add d_addr,count,d_addr/* final store address is +1 too high !*/
sh3add count,r0, save /* setup right mask based on count*/
mtctl save,r11
zvdepi -2,32,save /*save now has left-hand mask*/
uaddcm r0,save,save /*form right hand mask */
or tmp1,save,tmp1 /*and insert data*/
uxor,nbz tmp1,r0,save /* check for null*/
b,n null2
bv 0(r2)
stbys,e tmp1,0(d_addr) /* done */
/* Begin non_aligned code. */
not_aligned:
sub,>= tmp5,tmp1,tmp3 /* compute the shift amt.and skip load if tmp5 > tmp1.*/
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++*/
zdep tmp3,28,29,tmp4 /* compute the number of bits to shift */
mtctl tmp4,11 /* load the shift count into cr11 = shift count register.*/
addibt,<,n -4,count,chkchnk2 /* first step in pre adjustment of count for looping.*/
ldwm 4(0,s_addr),tmp2 /* get either first or second word from source. */
combt,= tmp5,r0,skip_mask2 /* don't mask if whole word is valid*/
vshd tmp1,tmp2,tmp3 /* position data ! (delay slot)*/
sh3add tmp5,r0,save /* setup r1*/
mtctl save,r11 /* setup mask in save*/
zvdepi -2,32,save
or save, tmp3, tmp3
mtctl tmp4,11 /* re-load the shift count into cr11 */
b skip_mask2
copy r0, tmp5 /* zero out tmp5 so we don't try to mask again*/
chunk2:
ldwm 4(0,s_addr),tmp2
vshd tmp1,tmp2,tmp3
skip_mask2:
uxor,nbz tmp3, r0, save
b,n null3
stbys,b,m tmp3,4(0,d_addr) /* store ! */
ldwm 4(0,s_addr),tmp1 /* get 2nd word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
uxor,nbz tmp3, r0, save
b,n null4
addibf,< -8,count,chunk2 /* If count is still >= 8 do another loop.*/
stbys,b,m tmp3,4(0,d_addr) /* store !*/
chkchnk2:
addibt,<,n 4,count,bp_0 /* if we don't have 4 bytes left then do the back porch (bp_0)*/
subchnk2: /* we have less than 8 chars to copy*/
ldwm 4(0,s_addr),tmp2 /* get next word !*/
combt,= tmp5,r0,skip_mask3
vshd tmp1,tmp2,tmp3 /* position data !*/
sh3add tmp5,r0,save /* setup r1*/
mtctl save,r11 /* setup mask in save*/
zvdepi -2,32,save
or save, tmp3, tmp3
mtctl tmp4,11 /* restore shift value again */
copy r0, tmp5 /* zero out tmp5 so we don't try to mask again*/
skip_mask3:
uxor,nbz tmp3,r0,save
b,n null4
b bp_1 /* we now have less than 4 bytes to move*/
stbys,b,m tmp3,4(0,d_addr) /* store !*/
bp_0:
copy tmp1,tmp2 /* switch registers used in the shift process.*/
addibt,<=,n 4,count,done /* if count = -4 this implies that count = 0 -> done */
bp_1:
ldwm 4(0,s_addr),tmp1 /* get final word ! */
vshd tmp2,tmp1,tmp3 /* position data !*/
uxor,sbz tmp3,r0,save /* if some-byte-zero */
b no_null /* don't goto no_null-find which null instead */
add d_addr,count,d_addr /* get d_addr ready for stbys,e */
extru,<> save,7,8,r0
b found_null5
copy r0, tmp5
extru,<> save,15,8,r0
b found_null5
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
extru,<> save,23,8,r0
b found_null5
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
ldo -1(r0),tmp5 /* setup mask (FFFFFFFF)*/
found_null5:
and tmp3,tmp5,tmp3 /* zero out tmp5 based on mask in tmp5*/
no_null:
bv 0(r2) /* were done*/
stbys,e tmp3,0(0,d_addr) /* store the data !*/
/* here we do ye old byte-at-a-time moves.*/
byteloop:
addibt,=,n 4,count,done
comb,= 0,s_addr,done
stbs r0,0(d_addr) /* store null in case s_addr == NULL */
ldbs,ma 1(s_addr),tmp1
encore:
combt,=,n tmp1,r0, pad_null_bytes1
stbs,ma tmp1,1(d_addr)
addibf,=,n -1,count,encore
ldbs,ma 1(s_addr),tmp1
b,n done
pnb_1:
addibt,=,n 4,count,done /* if count was already 0 then we're done*/
pad_null_bytes1:
combt,=,n count,r0,done /* if count==0 then exit */
pad_null_bytes2:
addibf,= -1,count,pad_null_bytes2
stbs,ma r0,1(d_addr)
b,n done
pad_nulls:
addibf,<=,n -4,count,pad_nulls
stwm r0,4(d_addr)
b,n pnb_1
null1:
extru,<> save,7,8,r0
b found_null1
copy r0, tmp5
extru,<> save,15,8,r0
b found_null1
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
extru,<> save,23,8,r0
b found_null1
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
ldo -1(r0),tmp5 /* setup mask (FFFFFFFF)*/
found_null1:
and tmp1,tmp5,tmp1 /*zero out tmp1 according to mask*/
b pad_nulls /* nullify remaining count bytes*/
stbys,b,m tmp1,4(0,d_addr) /* first word (account for alignment)*/
null2: /* back porch case. We have less than 4 bytes to go.*/
extru,<> save,7,8,r0 /* is null in 1st byte? */
b found_null2
copy r0, tmp5
extru,<> save,15,8,r0 /* is null in 2nd byte? */
b found_null2
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
b found_null2 /* null must be in 3rd byte */
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
found_null2:
and tmp1,tmp5,tmp1 /*zero out tmp1 according to mask*/
bv 0(r2) /* we're done*/
stbys,e tmp1,0(0,d_addr) /* last word (back porch)*/
null3: /* not_aligned case where null is found in first of two words--adjust count*/
extru,<> save,7,8,r0
b found_null3
copy r0, tmp5
extru,<> save,15,8,r0
b found_null3
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
extru,<> save,23,8,r0
b found_null3
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
ldo -1(r0),tmp5 /* setup mask (FFFFFFFF)*/
found_null3:
addi 4,count,count /* fix count since null is in first of two words*/
and tmp3,tmp5,tmp3 /*zero out tmp3 according to mask*/
b pad_nulls /* nullify remaining count bytes*/
stbys,b,m tmp3,4(d_addr)
null4: /* not_aligned case where null is found in second of two words*/
extru,<> save,7,8,r0
b found_null4
copy r0, tmp5
extru,<> save,15,8,r0
b found_null4
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
extru,<> save,23,8,r0
b found_null4
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
ldo -1(r0),tmp5 /* setup mask (FFFFFFFF)*/
found_null4:
and tmp3,tmp5,tmp3 /*zero out tmp4 according to mask*/
b pad_nulls /* nullify remaining count bytes*/
stbys,b,m tmp3,4(d_addr)
done:
EXIT(strncpy)
|
stsp/newlib-ia16
| 5,257
|
newlib/libc/machine/hppa/strcat.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* HPUX_ID: @(#) $Revision$ */
/*
* strcat(s1, s2)
*
* Concatenate s2 on the end of s1. S1's space must be large enough.
* Return s1.
*/
#include "DEFS.h"
#define d_addr r26
#define s_addr r25
#define tmp6 r24
#define tmp1 r19
#define tmp2 r20
#define tmp3 r21
#define tmp4 r22
#define tmp5 arg3
#define save r1
ENTRY(strcat)
comb,= r0,s_addr,done /* quit if s2=NULL */
copy d_addr,ret0 /* The return value is the value of d_addr. DELAY SLOT*/
/* First look for end of s1 (d_addr) */
extru d_addr,31,2,tmp1 /* Extract the low two bits of the dest address. */
combt,= tmp1,r0,dont_mask
dep 0,31,2,d_addr /*set word alignment */
ldwm 4(d_addr),tmp2
sh3add tmp1,r0,save /* build mask based on tmp1 */
mtctl save,11
zvdepi -2,32,save
or save,tmp2,tmp2
uxor,nbz tmp2,r0,save
search:
b,n found_end /* nullified under uxor conditions above and below */
dont_mask:
ldwm 4(d_addr),tmp2
comib,tr r0,r0,search
uxor,nbz tmp2,r0,save
found_end: /* at this point d_addr points to word */
extru,<> save,7,8,r0 /* following word with null */
addib,tr,n -4,d_addr,begin_copy /*set d_addr to end of s1 */
extru,<> save,15,8,r0
addib,tr,n -3,d_addr,begin_copy
extru,<> save,23,8,r0
addi -1,d_addr,d_addr
addi -1,d_addr,d_addr
begin_copy:
extru s_addr,31,2,tmp1 /* Extract the low two bits of the source address. */
extru d_addr,31,2,tmp6 /* Extract the low two bits of the destination address. */
sub,= tmp6,tmp1,tmp3 /* Compute the shift quantity and don't branch if tmp6=tmp1. */
b not_aligned /* Not_aligned says that shifts Will be needed. */
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT. */
/* aligned */
combt,= tmp6,r0,skip_mask
ldwm 4(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 4 (DELAY SLOT) */
sh3add tmp6,r0,save
mtctl save,r11
zvdepi -2,32,save
or save,tmp1,tmp1
uxor,nbz tmp1,r0,save
b,n first_null /* special case: null in first word */
b,n skip_mask2
chunks:
b,n null_found /* delay slot for uxor below */
skip_mask2:
stbys,b,m tmp1,4(d_addr)
ldwm 4(s_addr),tmp1
skip_mask:
comib,tr 0,0,chunks
uxor,nbz tmp1,r0,save
/* Begin non_aligned code. */
not_aligned:
sh3add,>= tmp3,r0,tmp4 /* compute the shift amt.and skip load if tmp6 > tmp1. */
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++ */
ldwm 4(0,s_addr),tmp2 /* get either first or second word from source. */
combt,= tmp6,r0,chunk2 /* don't mask if whole word is valid */
mtctl tmp4,11 /* load the shift count into cr11 = shift count register. */
vshd tmp1,tmp2,tmp3 /* position data ! (delay slot) */
sh3add tmp6,r0,save /* setup r1 */
mtctl save,r11 /* set-up cr11 for mask */
zvdepi -2,32,save
or save, tmp3, tmp3
uxor,nbz tmp3,r0,save
b,n first_null2
b did_mask
mtctl tmp4,11 /* re-load the shift count into cr11 */
chunk2:
vshd tmp1,tmp2,tmp3
uxor,nbz tmp3, r0, save
b,n null_found
did_mask:
stbys,b,m tmp3,4(0,d_addr) /* store ! */
ldwm 4(0,s_addr),tmp1 /* get next word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
uxor,nbz tmp3, r0, save
b,n null_found
stwm tmp3,4(d_addr)
comib,tr 0,0,chunk2
ldwm 4(s_addr),tmp2
null_found: /* adjust d_addr and store final word */
extru,<> save,7,8,r0
addib,tr,n 1,d_addr,store_final
extru,<> save,15,8,r0
addib,tr,n 2,d_addr,store_final
extru,<> save,23,8,r0
addib,tr 3,d_addr,store_final2
bv 0(r2)
stw save,0(d_addr)
store_final:
bv 0(r2)
store_final2:
stbys,e save,0(d_addr) /* delay slot */
first_null: /* null found in first word of aligned (wrt d_addr) */
addi -4,s_addr,s_addr
ldbx tmp6(s_addr),tmp4
add tmp6,s_addr,s_addr
comib,= 0,tmp4,done
stbs,ma tmp4,1(d_addr)
ldbs 1(s_addr),tmp4
comib,= 0,tmp4,done
stbs,ma tmp4,1(d_addr)
bv 0(r2) /* done */
stbs 0,0(d_addr)
first_null2: /* null found in first word of non-aligned (wrt d_addr) */
addibt,= -1,tmp6,check3 /* check last 3 bytes of word */
extru save,15,8,tmp4
addibt,=,n -1,tmp6,check2 /* check last 2 bytes */
bv 0(r2)
stbys,b save, 0(d_addr)
check3:
combt,= tmp4,r0,done
stbs,ma tmp4,1(d_addr)
check2:
extru,<> save,23,8,tmp4
bv 0(r2)
stbs,ma tmp4,1(d_addr)
bv 0(r2)
stbs r0,0(d_addr)
done:
EXIT(strcat)
|
stsp/newlib-ia16
| 3,370
|
newlib/libc/machine/hppa/strlen.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* HPUX_ID = "@(#) $Revision$" */
/* strlen(s): Return length of string s */
#define start arg0
#define end ret0
#define tmp1 arg1
#define tmp2 arg2
#include "DEFS.h"
ENTRY(strlen)
movb,=,n start,end,$null_ptr
depi 0,31,2,end
comb,<> start,end,$not_aligned
ldws,ma 4(end),tmp1
comib,tr 0,0,$loop /* avoid INDIGO two register interlock */
uxor,nbz 0,tmp1,0
$not_aligned:
/*
; Tricky code. The problem is that the value of of the word
; including the start of the string has some garbage bytes that
; may be 0. We don't want them to stop the string scan. So
; we make those bytes non-zero (and any old non-zero value
; will do). Notice that the end pointer has been rounded
; down to a word boundary, and then incremented to the next
; word by the time we get here. Therefore, (start-end) has
; one of the values (-3, -2, or -1). Use uaddcm to do the
; subtraction (instead of sub), and the result will be
; (-4, -3, or -2). Multiply this by 8, and put into the
; shift register (which truncates to the last 5 bits) and
; the value will be (0, 8, or 16). Use this as a bit position,
; and drop a mask down into tmp1. All the garbage bytes will
; have at least 1 bit affected by the vdepi, so all the garbage
; in this first word will be non-zero garbage.
*/
uaddcm start,end,tmp2 /* tmp2 <- { -4, -3, -2 } */
sh3add tmp2,0,tmp2 /* tmp2 <- { -32, -24, -16 } */
mtsar tmp2 /* sar <- { 0, 8, 16 } */
vdepi -1,32,tmp1
uxor,nbz 0,tmp1,0
$loop:
b,n $end_loop
ldws,ma 4(end),tmp1
comib,tr 0,0,$loop /* avoid INDIGO two register interlock */
uxor,nbz 0,tmp1,0
$end_loop:
/* adjust the end pointer to one past the end of the string */
extru,<> tmp1,7,8,0
addib,tr,n -3,end,$out
extru,<> tmp1,15,8,0
addib,tr,n -2,end,$out
extru,<> tmp1,23,8,0
addi -1,end,end
$out:
bv 0(rp)
/*
; tricky code. the end pointer is just beyond the terminating
; null byte, so the length is (end-start-1). use uaddcm
; to do this in 1 instruction
*/
uaddcm end,start,ret0
$null_ptr:
EXIT(strlen)
|
stsp/newlib-ia16
| 2,480
|
newlib/libc/machine/hppa/memset.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* SPECTRUM_ID: @(#)memset.s 37.4 86/08/25 */
/*
* memset(s, c, n)
*
* Sets first n chars in memory area s to value of character c.
* Returns s.
*/
#ifndef _NAMESPACE_CLEAN
#define NOSECDEF /* prevent _memset from being defined as entry */
#endif
#include "DEFS.h"
#define TO arg0
#define FILLCHAR arg1
#define COUNT arg2
#define TMP r31
ENTRY(memset)
comb,<= COUNT,r0,msexit /* return if count not positive */
copy TO,ret0 /* return value is start of copy */
comibf,<,n 5,COUNT,msbyteloop /* be straightforward */
dep FILLCHAR,23,8,FILLCHAR /* dup low byte */
dep FILLCHAR,15,16,FILLCHAR /* into high bytes */
add TO,COUNT,TMP /* TMP points just past fill area */
stbys,m FILLCHAR,0(TO) /* fill out first word */
/*
* If we're pointing to high-order byte, no fill will happen,
* but permissions will be checked. We don't want this (we
* might be pointing at the beginning of a protected region),
* so we branch around stbys if neither low bits are set.
*/
bb,<,n TMP,31,filend /* if low bit is set, stbys */
bb,>=,n TMP,30,endfil /* if next lowest bit isn't set */
/* (and lowest isn't, either) */
/* do not stbys */
filend:
stbys,m,e FILLCHAR,0(TMP) /* fill out the last */
endfil:
addi 4, TO, TO
sub TMP,TO,COUNT /* will now divide by 4 */
comb,=,n COUNT,r0,msexit /* If count is zero ret. */
extru,<> COUNT,31,4,r1
b msquadloop
depi 0,31,4,COUNT /* will now divide by 16 */
mswordloop:
addib,<> -4,r1,mswordloop
stws,ma FILLCHAR,4(TO)
comb,=,n COUNT,r0,msexit /* If count is zero ret. */
msquadloop:
stws,ma FILLCHAR,4(TO)
stws,ma FILLCHAR,4(TO)
stws,ma FILLCHAR,4(TO)
addib,<> -16,COUNT,msquadloop
stws,ma FILLCHAR,4(TO)
b,n msexit
msbyteloop:
addib,<> -1,COUNT,msbyteloop
stbs,ma FILLCHAR,1(TO)
msexit:
EXIT(memset)
|
stsp/newlib-ia16
| 6,575
|
newlib/libc/machine/hppa/memcpy.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* HPUX_ID: @(#) $Revision$ */
/*
* memcpy(s1, s2, n)
*
* Copy n characters from s2 to s1; returns s1.
*/
#define d_addr arg0
#define s_addr arg1
#define count arg2
#define tmp5 arg3
#define tmp1 r19
#define tmp2 r20
#define tmp3 r21
#define tmp4 r22
#define tmp6 r31
#include "DEFS.h"
ENTRY(memcpy)
comib,>= 5,count,byteloop /* If count is <= 6 don't get fancy.*/
movb,=,n d_addr,ret0,done /* The return value is defined to be the value of d_addr. DELAY SLOT */
/* if d_addr is null then exit */
extru s_addr,31,2,tmp1 /* Extract the low two bits of the source address. */
extru d_addr,31,2,tmp2 /* Extract the low two bits of the destination address. */
add count,tmp2,count /* pre increment the count to adjust for alignment of s1 */
comb,<> tmp2,tmp1,not_aligned /* see if s1 is aligned w.r.t. s2. */
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT. */
/* aligned */
/* We will now begin the 16 byte at a time word move if count >= 16 ! */
/* Else we will branch to the 4 byte-at-a time word move ! */
addibt,<,n -16,count,chekchunk /* If count < 16 then we can't move 16 byte chunks ! */
/* actually we can legally move 13 or more bytes on the first loop. */
/* These loads and stores are done so as to prevent processor interlock. */
chunks:
ldwm 16(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 16 */
ldw -12(0,s_addr),tmp2 /* tmp2 = 2nd word */
ldw -8(0,s_addr),tmp3 /* tmp3 = 3rd word */
ldw -4(0,s_addr),tmp4 /* tmp4 = 4th word */
/* Now store the results ! */
stbys,b,m tmp1,4(0,d_addr) /* tmp1 = 1st word stored d_addr += 16 also take care of front porch. */
stwm tmp2,4(0,d_addr) /* tmp2 = 2nd word stored. */
stwm tmp3,4(0,d_addr) /* tmp3 = 3rd word stored. */
addibf,< -16,count,chunks /* If count is still >= 16 do another loop. */
stwm tmp4,4(0,d_addr) /* tmp4 = 4th word stored. DELAY SLOT */
chekchunk:
addibt,<,n 12,count,back_porch /* since the count is already decremented by -16 we're testing */
/* to see if there are at least 4 bytes left ? */
subchunk:
ldws,ma 4(s_addr),tmp1 /* tmp1 = *s_addr++ */
addibf,< -4,count,subchunk /* count -= 4 */
stbys,b,m tmp1,4(d_addr) /* *d_addr++ = tmp1 */
back_porch:
addibt,=,n 4,count,done /* if count = 0 we're, of course, done ! */
ldws 0(s_addr),tmp1 /* load up the back_porch */
add d_addr,count,d_addr/* final store address is +1 too high ! */
bv 0(r2) /* return--were done. */
stbys,e tmp1,0(d_addr) /* kerplunk! whew ! */
/* Begin non_aligned code. (no refrence to politics) */
not_aligned:
sub,>= tmp2,tmp1,tmp3 /* compute the shift quantity again and skip the load if tmp2 > tmp1. */
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++ */
zdep tmp3,28,29,tmp4 /* compute the number of bits to shift based on the number of bytes above. */
mtctl tmp4,11 /* load the shift count into cr11 = shift count register. */
addibt,<,n -16,count,chkchnk2 /* first step in pre adjustment of count for looping. */
chunk2:
ldwm 16(0,s_addr),tmp2 /* get either first or second word . tmp2 = *s_addr++ */
ldw -12(s_addr),tmp3
ldw -8(s_addr),tmp4
ldw -4(s_addr),tmp5
vshd tmp1,tmp2,tmp6 /* position data ! */
stbys,b,m tmp6,4(0,d_addr) /* store ! */
vshd tmp2,tmp3,tmp6 /* position data ! */
stwm tmp6,4(0,d_addr) /* store ! */
vshd tmp3,tmp4,tmp6 /* position data ! */
stwm tmp6,4(0,d_addr) /* store ! */
vshd tmp4,tmp5,tmp6 /* position data ! */
stwm tmp6,4(0,d_addr) /* store the data ! */
addibf,< -16,count,chunk2 /* If count is still >= 16 do another loop. */
copy tmp5,tmp1
chkchnk2:
addibt,<,n 12,count,bp_0 /* if we don't have 4 bytes left then do the back porch (bp_0) */
subchnk2:
ldwm 4(0,s_addr),tmp2 /* get next word ! */
vshd tmp1,tmp2,tmp3 /* position data ! */
addibt,< -4,count,bp_1 /* decrement count and when count < 4 goto back_porch (bp_1) */
stbys,b,m tmp3,4(0,d_addr) /* store ! */
ldwm 4(0,s_addr),tmp1 /* get 4th word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
addib,>= -4,count,subchnk2 /* decrement count and when count <= 4 go to back porch (bp_2) */
stbys,b,m tmp3,4(0,d_addr) /* store the data ! */
bp_0: copy tmp1,tmp2 /* switch registers used in the shift process. */
bp_1: addibt,<=,n 4,count,done /* if count = -4 this implies that count = 0 -> done */
add d_addr,count,d_addr /* bump destination address to be +1 too high ! */
mfctl sar,tmp3 /* suppress final ldwm unless result used */
extru tmp3,28,2,tmp3 /* convert bitshift to byteshift */
sub,<= count,tmp3,r0 /* bytes unused if (count-byteshift <= 0*/
ldwm 4(0,s_addr),tmp1 /* get final word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
bv 0(r2) /* return */
stbys,e tmp3,0(0,d_addr) /* store the data ! */
/* here we do ye old byte-at-a-time moves. */
byteloop:
comb,>=,n 0,count,done
encore:
ldbs,ma 1(s_addr),tmp1
addibf,= -1,count,encore
stbs,ma tmp1,1(d_addr)
done:
EXIT(memcpy)
|
stsp/newlib-ia16
| 9,050
|
newlib/libc/machine/hppa/strcmp.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/*
strcmp
Jerry Huck
Edgar Circenis
*/
/*
* strcmp(s1, s2)
*
* returns integer: < 0 iff s1 lexicographically less than s2
* > 0 iff s1 lexicographically greater than s2
* = 0 iff s1 lexicographically equal to s2
*/
#include "DEFS.h"
#define s1 26
#define s2 25
#define tmp1 19
#define s2word 20
#define tmp3 21
#define tmp7 22
#define s1word 23
#define save 1
#define tmp6 24
#define tmp5 28
ENTRY(strcmp)
comb,=,n s1,s2,samestring
comib,=,n 0,s1,s1isnull
comib,=,n 0,s2,s2isnull
/* Hope for word alignment. Pick up low two bits of each adress */
extru,<> s1,31,2,tmp1
ldwm 4(s1),s1word
dep,= s2,29,2,tmp1
b,n case_analysis
/* Start looping until null is found in s1 or they mis-compare */
loop:
ldwm 4(s2),s2word
loop_plus:
uxor,nbz s1word,r0,r0 /* Null in this? */
b,n nullins1
comb,=,n s1word,s2word,loop
ldwm 4(s1),s1word
/* The words do not compare equal and s1 does not have a null.
Need to treat words as unsigned and generate either a positive
or negative return value */
wordcomparereturn:
comclr,>> s1word,s2word,ret0 /*Set ret0 to 0 and skip if greater*/
ldi -2,ret0 /*Set ret0 to -2 when less */
bv r0(rp)
addi 1,ret0,ret0 /*Fix return value to be -1 or +1 */
/* s1 has a null. s2 has not been checked. */
nullins1:
/*If s2 has no nulls this is simple, but assume that it might
and fix up s1 to allow the word comparision to work by
scanning s1 and duplicating all the bytes in s2 below that byte into
the remainder of s1. A remainder only exists if the zero byte
is found in the upper three bytes */
extru,<> s1word,7,8,r0 /*in the first byte? */
dep,tr s2word,31,24,s1word /*copy low 3 bytes of *s2 into *s1 */
extru,<> s1word,15,8,r0 /*in the second byte? */
dep,tr s2word,31,16,s1word /*copy low 2 bytes of *s2 into *s1 */
extru,<> s1word,23,8,r0 /*in the third byte? */
dep s2word,31,8,s1word /*copy low 1 byte of *s2 into *s1 */
/* Do the normal unsigned compare and return */
comclr,<> s1word,s2word,ret0 /*Set ret0 to 0 and skip if not equal */
bv,n r0(rp)
comclr,>> s1word,s2word,ret0 /*Set ret0 to 0 and skip if greater*/
ldi -2,ret0 /*Set ret0 to -2 when less */
bv r0(rp)
addi 1,ret0,ret0 /*Fix return value to be -1 or +1 */
/* s1 and s2 are the same string and therefore equal */
samestring:
bv r0(rp)
copy r0,ret0
/* s1 is null. Treat as string of nulls. Therefore return
the negative of s2's first byte. s2 cannot be zero. */
s1isnull:
ldbs 0(0,s2),ret0
bv r0(rp)
sub 0,ret0,ret0
/* s2 is null. Treat as string of nulls. Therefore return
s1's first byte. s1 cannot be zero. */
s2isnull:
bv r0(rp)
ldbs 0(0,s1),ret0
case_analysis:
blr tmp1,r0
nop
/*
Case statement for non-aligned cases (we've already
checked the aligned case.
NOTE: for non-aligned cases, the absolute shift value
gets loaded into tmp3.
*/
/* S2 S1 */
nop /* 00 00 can't happen */
nop
b shifts2 /* 00 01 */
ldi 8,tmp3 /* load shift count (delay slot) */
b shifts2 /* 00 10 */
ldi 16,tmp3 /* load shift count (delay slot) */
b shifts2 /* 00 11 */
ldi 24,tmp3 /* load shift count (delay slot) */
b shifts1_0 /* 01 00 */
ldi 8,tmp3 /* load shift count (delay slot) */
b eq_align1 /* 01 01 */
ldbs,ma 1(s1),s1word
b shifts2 /* 01 10 */
ldi 8,tmp3 /* load shift count (delay slot) */
b shifts2 /* 01 11 */
ldi 16,tmp3 /* load shift count (delay slot) */
b shifts1_0 /* 10 00 */
ldi 16,tmp3 /* load shift count (delay slot) */
b shifts1 /* 10 01 */
ldi 8,tmp3 /* load shift count (delay slot) */
b eq_align2 /* 10 10 */
ldhs,ma 2(s1),s1word
b shifts2 /* 10 11 */
ldi 8,tmp3 /* load shift count (delay slot) */
b shifts1_0 /* 11 00 */
ldi 24,tmp3 /* load shift count (delay slot) */
b shifts1 /* 11 01 */
ldi 16,tmp3 /* load shift count (delay slot) */
b shifts1 /* 11 10 */
ldi 8,tmp3 /* load shift count (delay slot) */
ldbs,ma 1(s1),s1word /* 11 11 */
ldbs,ma 1(s2),s2word
sub,= s1word,s2word,ret0 /* if not equal, we can return now */
bv,n r0(rp)
comclr,<> s1word,r0,ret0
bv,n r0(rp)
b loop /* fall into main loop */
ldwm 4(s1),s1word
eq_align1:
ldbs,ma 1(s2),s2word
sub,= s1word,s2word,ret0 /* if not equal, we can return now */
bv,n r0(rp)
comclr,<> s1word,r0,ret0
bv,n r0(rp)
/* fall through to half-word aligned case */
ldhs,ma 2(s1),s1word /* load next halfword */
eq_align2:
ldhs,ma 2(s2),s2word /* load next halfword */
/* form the mask: 0xffff0000 and mask leading nulls in s1word and s2word
so that we can fall into the main loop with word aligned data */
ldi 16,save
mtctl save,r11
zvdepi -2,32,save
or save,s1word,s1word
b loop_plus /* fall into main loop */
or save,s2word,s2word
/* s2's alignment is greater than s1's alignment, so we will shift s1 */
shifts1_0:
addi -4,s1,s1 /* fix up s1 due to earlier read */
shifts1:
extru s1,31,2,tmp1
extru s2,31,2,tmp5
dep r0,31,2,s1 /* Compute word address of s1 */
dep r0,31,2,s2 /* Compute word address of s2 */
ldwm 4(s1),s1word /* get first word of s1 */
ldwm 4(s2),s2word /* get first word of s2 */
combt,=,n r0,tmp1,masks2 /* Do we need to mask beginning of s1 */
sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,r11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word
masks2:
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,r11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word
ldi -1,tmp7 /* load tmp7 with 0xffffffff */
mtctl tmp3,r11 /* Move shift amount to CR11 */
more: uxor,nbz s1word,r0,r0 /* Is there a null in s1? */
b ends1
vshd tmp7,s1word,save
combf,=,n save,s2word,cmps1
ldwm 4(s1),tmp7
ldwm 4(s2),s2word
uxor,nbz tmp7,r0,r0 /* is there a null in s1? */
b ends1_0
vshd s1word,tmp7,save
combf,=,n save,s2word,cmps1
ldwm 4(s1),s1word
b more
ldwm 4(s2),s2word
cmps1: movb,tr save,s1word,wordcomparereturn
nop
ends1_0:
copy tmp7,s1word /* move tmp7 to s1word */
ends1:
combf,=,n save,s2word,nullins1 /* branch if no match */
copy save,s1word /* delay slot */
/* At this point, we know that we've read a null */
/* from s1, so we can't read more from s1 */
uxor,nbz save,r0,r0 /* are the strings equal? */
b,n samestring
vshd s1word,r0,s1word
b nullins1
ldwm 4(s2),s2word
/* s1's alignment is greater than s2's alignment, so we will shift s2 */
shifts2:
extru s1,31,2,tmp1
extru s2,31,2,tmp5
dep r0,31,2,s1 /* Compute word address of s1 */
dep r0,31,2,s2 /* Compute word address of s2 */
ldwm 4(s2),s2word /* get first word of s2 */
ldwm 4(s1),s1word /* get first word of s1 */
combt,=,n r0,tmp5,masks1 /* Do we need to mask beginning of s2 */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,r11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word
masks1:
sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,r11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word
ldi -1,tmp7 /* load tmp7 with 0xffffffff */
mtctl tmp3,r11 /* Move shift amount to CR11 */
more1: uxor,nbz s2word,r0,r0 /* is there a null in s2? */
b ends2
vshd tmp7,s2word,save
combf,=,n s1word,save,cmps2
ldwm 4(s2),tmp7
ldwm 4(s1),s1word
uxor,nbz tmp7,r0,r0 /* is there a null in s2? */
b ends2_0
vshd s2word,tmp7,save
combf,=,n s1word,save,cmps2
ldwm 4(s2),s2word
b more1
ldwm 4(s1),s1word
cmps2: movb,tr save,s2word,wordcomparereturn
nop
ends2_0:
copy tmp7,s2word /* move tmp7 to s2word */
ends2:
combf,=,n s1word,save,nullins1 /* branch if no match */
copy save,s2word /* delay slot */
/* At this point, we know that we've read a null */
/* from s2, so we can't read more from s2 */
uxor,nbz save,r0,r0 /* are the strings equal? */
b,n samestring
vshd s2word,r0,s2word
b nullins1
ldwm 4(s1),s1word
EXIT(strcmp)
|
stsp/newlib-ia16
| 8,424
|
newlib/libc/machine/hppa/strncat.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/*HPUX_ID: @(#) $Revision$ */
/* strncat(s1,s2,n) : concatonate at most n characters from s2 onto s1 */
#include "DEFS.h"
#define d_addr r26
#define s_addr r25
#define count r24
#define tmp1 r19
#define tmp2 r20
#define tmp3 r21
#define tmp4 r22
#define tmp5 arg3
#define tmp6 r31
#define save r1
#define tmp7 ret1 /* source offset-- reset to orig source addr if not aligned */
ENTRY(strncat)
comb,= r0,s_addr,quit /* quit if s2=NULL */
copy d_addr,ret0 /* The return value is the value of d_addr. DELAY SLOT*/
/* First look for end of s1 (d_addr) */
extru d_addr,31,2,tmp1 /* Extract the low two bits of the dest address. */
combt,= tmp1,r0,dont_mask
dep 0,31,2,d_addr /*set word alignment */
ldwm 4(d_addr),tmp2
sh3add tmp1,r0,save /* build mask based on tmp1 */
mtctl save,11
zvdepi -2,32,save
or save,tmp2,tmp2
uxor,nbz tmp2,r0,save
search:
b,n found_end /* nullified under uxor conditions above and below */
dont_mask:
ldwm 4(d_addr),tmp2
comib,tr r0,r0,search
uxor,nbz tmp2,r0,save
found_end: /* at this point d_addr points to word */
extru,<> save,7,8,r0 /* following word with null */
addib,tr,n -4,d_addr,begin_copy /*set d_addr to end of s1 */
extru,<> save,15,8,r0
addib,tr,n -3,d_addr,begin_copy
extru,<> save,23,8,r0
addi -1,d_addr,d_addr
addi -1,d_addr,d_addr
begin_copy:
addibt,<,n -4,count,byteloop /* If count is <= 4 don't get fancy.*/
extru s_addr,31,2,tmp4 /* Extract the low two bits of the source address.*/
extru d_addr,31,2,tmp5 /* Extract the low two bits of the destination address.*/
add count,tmp5,count /* pre increment the count by the byte address so that the count is*/
copy s_addr,tmp6 /* save original s_addr in case we find null in first word */
copy s_addr, tmp7 /* save s_addr in case we find null before first store */
comb,<> tmp5,tmp4,not_aligned /* branch if tmp5<>tmp4. */
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT.*/
/* aligned*/
combt,= tmp5,r0,skip_mask
ldwm 4(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 4 (DELAY SLOT)*/
sh3add tmp5,r0,save /* compute mask in save*/
mtctl save,11
zvdepi -2,32,save
or save,tmp1,tmp1 /* or mask with data*/
uxor,nbz tmp1,r0,save /* check for null*/
b,n null1
addibt,< -4,count,back_porch
stbys,b,m tmp1,4(0,d_addr) /* store word (delay slot)*/
chunks:
ldwm 4(0,s_addr),tmp1 /* get a word*/
skip_mask:
uxor,nbz tmp1,r0,save /* check for null*/
b,n align_null1
addibf,< -4,count,chunks
stbys,b,m tmp1,4(0,d_addr) /* store word (delay slot)*/
back_porch: /* last word to store*/
addibt,=,n 4,count,done /* if count = 0 we're, of course, done !*/
ldws 0(s_addr),tmp1 /* load up the back_porch*/
sh3add count,r0, save /* setup right mask based on count*/
mtctl save,r11
zvdepi -2,32,save /*save now has left-hand mask*/
uaddcm r0,save,save /*form right hand mask */
or tmp1,save,tmp1 /*and insert data*/
uxor,nbz tmp1,r0,save /* check for null*/
b,n null2
add d_addr,count,d_addr/* final store address is +1 too high !*/
b done
stbys,e tmp1,0(d_addr) /* done */
/* Begin non_aligned code. */
not_aligned:
sub,>= tmp5,tmp4,tmp6 /* compute the shift amt.and skip load if tmp5 > tmp4.*/
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++*/
zdep tmp6,28,29,tmp4 /* compute the number of bits to shift */
mtctl tmp4,11 /* load the shift count into cr11 = shift count register.*/
addibt,<,n -4,count,chkchnk2 /* first step in pre adjustment of count for looping.*/
ldwm 4(0,s_addr),tmp2 /* get either first or second word from source. */
combt,= tmp5,r0,skip_mask4 /* don't mask if whole word is valid*/
vshd tmp1,tmp2,tmp3 /* position data ! (delay slot)*/
sh3add tmp5,r0,save /* setup r1*/
mtctl save,r11 /* setup mask in save*/
zvdepi -2,32,save
or save, tmp3, tmp3
mtctl tmp4,11 /* re-load the shift count into cr11 */
skip_mask4:
uxor,nbz tmp3, r0, save
b,n null4 /* special case for first word */
copy r0, tmp5 /* zero out tmp5 so we don't try to mask again*/
copy r0, tmp7 /* zero out tmp7 so we don't try to use original s_addr anymore */
b continue
stbys,b,m tmp3,4(0,d_addr) /* store ! */
chunk2:
ldwm 4(0,s_addr),tmp2
vshd tmp1,tmp2,tmp3
skip_mask2:
uxor,nbz tmp3, r0, save
b,n null3
stbys,b,m tmp3,4(0,d_addr) /* store ! */
continue:
ldwm 4(0,s_addr),tmp1 /* get 2nd word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
uxor,nbz tmp3, r0, save
b,n null3
addibf,< -8,count,chunk2 /* If count is still >= 8 do another loop.*/
stbys,b,m tmp3,4(0,d_addr) /* store !*/
chkchnk2:
addibt,<,n 4,count,bp_0 /* if we don't have 4 bytes left then do the back porch (bp_0)*/
subchnk2: /* we have less than 8 chars to copy*/
ldwm 4(0,s_addr),tmp2 /* get next word !*/
combt,= tmp5,r0,skip_mask3
vshd tmp1,tmp2,tmp3 /* position data !*/
sh3add tmp5,r0,save /* setup r1*/
mtctl save,r11 /* setup mask in save*/
zvdepi -2,32,save
or save, tmp3, tmp3
mtctl tmp4,11 /* restore shift value again */
skip_mask3:
uxor,nbz tmp3,r0,save
b,n null3
copy r0,tmp5 /* zero out tmp5 so null3 does correct alignment */
copy r0,tmp7 /* zero out tmp7 so we don't use orignal s_addr since no longer valid */
b bp_1 /* we now have less than 4 bytes to move*/
stbys,b,m tmp3,4(0,d_addr) /* store !*/
bp_0:
copy tmp1,tmp2 /* switch registers for shift process */
addibt,<=,n 4,count,done /* if count = -4 this implies that count = 0 -> done */
bp_1:
ldwm 4(0,s_addr),tmp1 /* get final word ! */
vshd tmp2,tmp1,tmp3 /* position data !*/
uxor,nbz tmp3,r0,save /* if no-byte-zero */
b,n bp_null /* don't goto no_null-find which null instead */
no_null:
add d_addr,count,d_addr /* set up d_addr for stbys,e */
b done /* were done*/
stbys,e tmp3,0(0,d_addr) /* store the data !*/
/* here we do ye old byte-at-a-time moves.*/
align_null1:
b byteloop
addi -4,s_addr,s_addr
null1:
copy tmp6,s_addr /* restore orig s_addr (aligned only) */
byteloop:
addibt,= 4,count,done
null2:
ldbs,ma 1(s_addr),tmp1
encore:
combt,=,n tmp1,r0, done
stbs,ma tmp1,1(d_addr)
addibf,=,n -1,count,encore
ldbs,ma 1(s_addr),tmp1
b,n done
bp_null:
addi -4,count,count /* fudge count 'cause byteloop will re-increment */
null3: /* not_aligned case reset s_addr and finish byte-wise */
combt,=,n r0,tmp7,null3a /* if tmp7 is not valid address then branch below */
b byteloop /* otherwise reset s_addr to tmp7 and finish */
copy tmp7, s_addr
null3a: /* right shift target */
addibt,<,n 0,tmp6,null3b /* if left shifting */
sub r0,tmp6,tmp6 /* do null3b code */
addi -4,tmp6,tmp6
b byteloop
add tmp6,s_addr,s_addr /* reset s_addr by 4 + shift_amt */
null3b:
subi -8,tmp6,tmp6
add tmp5,tmp6,tmp6 /* adjust by the dest offset if this is our first store */
b byteloop
add tmp6,s_addr,s_addr /* adjust s_addr by (8-shift_amt-dest_off) */
null4:
add,> tmp6,r0,tmp6 /* if left shift */
b,n null3 /* then do null3 */
b byteloop
addi -4,s_addr,s_addr /* adj source only by 4 */
done:
bv 0(r2)
stbs r0,0(d_addr)
quit:
EXIT(strncat)
|
stsp/newlib-ia16
| 7,404
|
newlib/libc/machine/hppa/strncmp.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* strcmp(s1, s2) */
/* returns integer: < 0 iff s1 lexicographically less than s2 */
/* > 0 iff s1 lexicographically greater than s2 */
/* = 0 iff s1 lexicographically equal to s2 */
/* = 0 iff s1 lexicographically equal to s2 */
/* quit after n charachters */
#include "DEFS.h"
#define s1 26
#define s2 25
#define tmp1 19
#define s2word 20
#define tmp3 21
#define tmp7 22
#define s1word 29
#define save 1
#define tmp6 23
#define tmp5 28
#define count 24
ENTRY(strncmp)
combt,<,n r0,count,search /* N <= 0 yields equality */
bv r0(rp) /* */
copy 0,ret0 /* return 0 (DELAY SLOT) */
search: combf,=,n s1,s2,findout /* s1 != s2? */
bv r0(rp) /* */
copy 0,ret0 /* return 0 (delay slot) */
findout:
comibf,=,n 0,s1,checks1 /* s1 == NULL? */
ldbs 0(0,s2),ret0 /* */
bv r0(rp) /* */
subi 0,ret0,ret0 /* ret0 <- -*s2 */
checks1:
comibf,=,n 0,s2,checkitout /* s2 == NULL? */
bv r0(rp) /* */
ldbs 0(0,s1),28 /* return *s1 */
checkitout:
extru s2,31,2,tmp1 /* Extract the low two bits of the s2. */
extru s1,31,2,tmp5 /* Extract the low two bits of the s1 */
sub,= tmp5,tmp1,tmp3 /* Are s1 & s2 aligned with each other? */
b not_aligned /* It's more complicated (not_aligned) */
dep 0,31,2,s1 /* Compute word address of s1 (DELAY SLOT) */
dep 0,31,2,s2 /* Compute word address of s2 */
ldwm 4(0,s1),s1word /* get next s1 word s1+=4 */
combt,= tmp5,r0,skipmask /* skip masking, if we can */
ldwm 4(0,s2),s2word /* get next s2 word s2+=4 (DELAY SLOT) */
add tmp5,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /* mask s1word (s1) */
or save,s2word,s2word /* mask s2word (s2) */
skipmask:
combt,=,n s1word,s2word,chknulls /* are these words equal? */
checkbyte:
extru s1word,7,8,tmp3 /* get first byte (character) */
ckbyte2: extru s2word,7,8,tmp7 /* get first byte (character) */
combf,= tmp3,tmp7,done /* quit if first byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
comibt,=,n 0,tmp3,done /* have we reached the end of string */
/* if so done ret0 already has zero */
addibt,<=,n -1,count,done /* have we checked N chars? ret0 == 0 */
extru s1word,15,8,tmp3 /* get second byte (character) */
extru s2word,15,8,tmp7 /* get second byte (character) */
combf,= tmp3,tmp7,done /* quit if second byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
comibt,=,n 0,tmp3,done /* have we reached the end of string */
/* if so done ret0 already has zero */
addibt,<=,n -1,count,done /* have we checked N chars? */
extru s1word,23,8,tmp3 /* get third byte (character) */
extru s2word,23,8,tmp7 /* get third byte (character) */
combf,= tmp3,tmp7,done /* done if third byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
comibt,=,n 0,tmp3,done /* have we reached the end of string */
/* if so done ret0 already has zero */
addibt,<=,n -1,count,done /* have we checked N chars? */
extru s1word,31,8,tmp3 /* get last byte (character) */
extru s2word,31,8,tmp7 /* get last byte (character) */
bv r0(rp) /* */
sub tmp3,tmp7,ret0 /* the last characters in the word is */
/* where the difference is, so return */
/* the difference and we're outta here */
chknulls:
addibt,<=,n -4,count,zero /* have we checked N chars? */
uxor,nbz s1word,0,0 /* don't have to check s2 Just quit */
bv r0(rp) /* */
copy 0,28 /* return 0 */
ldwm 4(0,s2),s2word /* get next s2 word s2+=4 */
b skipmask /* keep checking */
ldwm 4(0,s1),s1word /* get next s1 word s1+=4 */
not_aligned:
dep r0,31,2,s2 /* Compute word address of s2 */
combt,<,n r0,tmp3,shifts1 /* Do we shift s1 or s2 */
sh3add tmp3,r0,tmp3 /* eight bits per byte so mul by 8 */
ldwm 4(0,s1),s1word /* get first word of s1 */
ldwm 4(0,s2),s2word /* get first word or s2 */
combt,=,n r0,tmp5,masks2 /* Do we need to mask beginning of s1 */
add tmp5,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /* */
masks2: sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word /* */
mtctl tmp3,11 /* Move shift amount to CR11 */
more: uxor,nbz s2word,r0,r0 /* Is there a null in first word */
b,n chunk1 /* */
ldwm 4(0,s2),tmp7 /* load second word to enable us to shift */
vshd s2word,tmp7,s2word /* */
combf,=,n s1word,s2word,ckbyte2 /* */
extru s1word,7,8,tmp3 /* get first byte (DELAY SLOT) */
addibt,<=,n -4,count,zero /* have we checked N chars? */
uxor,nbz s1word,0,0 /* even though they're equal we could be done */
b,n zero
copy tmp7,s2word /* */
b more /* keep checking */
ldwm 4(0,s1),s1word /* get next s1 (DELAY SLOT) */
chunk1:
vshd s2word,r0,s2word /* */
b ckbyte2 /* */
extru s1word,7,8,tmp3 /* */
shifts1:
sh3add tmp3,r0,tmp3 /* eight bits per byte so mul by 4 */
sub r0,tmp3,tmp3 /* Get negative value for left shift */
ldwm 4(0,s2),s2word /* get first word of s2 */
ldwm 4(0,s1),s1word /* get first word or s1 */
combt,=,n r0,tmp1,masks1 /* Do we need to mask beginning of s2 */
add tmp1,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word /* */
masks1: sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /* */
mtctl tmp3,11 /* Move shift amount to CR11 */
more1: uxor,nbz s1word,r0,r0 /* Is there a null in first byte */
b,n chunk2 /* */
ldwm 4(0,s1),tmp7 /* load second word to enable us to shift */
vshd s1word,tmp7,s1word /* */
combf,=,n s2word,s1word,ckbyte2 /* */
extru s1word,7,8,tmp3 /* get first byte (DELAY SLOT) */
addibt,<=,n -4,count,zero /* have we checked N chars? */
uxor,nbz s2word,0,0 /* even though they're equal we could be done */
b,n zero /* zero ret0 and quit */
copy tmp7,s1word /* */
b more1 /* keep checking */
ldwm 4(0,s2),s2word /* get next s2 (DELAY SLOT) */
chunk2:
vshd s1word,r0,s1word /* */
b ckbyte2 /* */
extru s1word,7,8,tmp3 /* */
zero: copy r0,ret0
done:
EXIT(strncmp)
|
stsp/newlib-ia16
| 1,332
|
newlib/libc/machine/hppa/memchr.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* SPECTRUM_ID: @(#)memchr.s 37.4 86/04/23 */
/*
* memchr(s, c, n)
*
* returns pointer to first occurrence of char c
* in first n characters of memory area s,
* or null if c does not occur.
*/
#include "DEFS.h"
#define FROM arg0
#define CHAR arg1
#define COUNT arg2
#define TEMP1 r19
ENTRY(memchr)
comb,<= COUNT,r0,memchrexit /* return if count is zero */
copy r0,ret0 /* null if c not found in n chars */
depi 0,23,24,CHAR /* make char unsigned */
ldbs,ma 1(FROM),TEMP1
memchrloop:
comb,=,n TEMP1,CHAR,memchrequal
addib,<> -1,COUNT,memchrloop
ldbs,ma 1(FROM),TEMP1
b,n memchrexit
memchrequal:
ldo -1(FROM),ret0
memchrexit:
EXIT(memchr)
|
stsp/newlib-ia16
| 7,108
|
newlib/libc/machine/hppa/memcmp.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* memcmp(s1, s2, n) */
/* returns integer: < 0 iff s1 lexicographically less than s2 */
/* > 0 iff s1 lexicographically greater than s2 */
/* = 0 iff s1 lexicographically equal to s2 */
/* = 0 iff s1 lexicographically equal to s2 */
/* quit after n charachters */
#ifndef _NAMESPACE_CLEAN
#define NOSECDEF /* prevents _memcmp from becoming primary entry */
#endif
#include "DEFS.h"
#define s1 26
#define s2 25
#define tmp1 19
#define s2word 20
#define tmp3 21
#define tmp7 22
#define s1word 29
#define save 1
#define tmp6 23
#define tmp5 28
#define count 24
ENTRY(memcmp)
combt,<,n r0,count,search /*N <= 0 yields equality */
b done /**/
copy 0,ret0 /*return 0 (DELAY SLOT) */
search: combf,=,n s1,s2,findout /*s1 != s2? */
b done
copy 0,ret0 /*return 0 (delay slot) */
findout:
comibf,=,n 0,s1,checks1 /*s1 == NULL? */
ldbs 0(0,s2),ret0 /**/
b done /*quit */
sub 0,ret0,ret0 /*ret0 <- -*s2 */
checks1:
comibf,=,n 0,s2,checkitout /*s2 == NULL? */
b done /* quit */
ldbs 0(0,s1),28 /* return *s1 */
checkitout:
extru s2,31,2,tmp1 /* Extract the low two bits of the s2. */
extru s1,31,2,tmp5 /* Extract the low two bits of the s1 */
sub,= tmp5,tmp1,tmp3 /* Are s1 & s2 aligned with each other? */
b not_aligned /* It's more complicated (not_aligned) */
dep 0,31,2,s1 /* Compute word address of s1 (DELAY SLOT) */
dep 0,31,2,s2 /* Compute word address of s2 */
ldwm 4(0,s1),s1word /* get next s1 word s1+=4 */
combt,= tmp5,r0,skipmask /* skip masking, if we can */
ldwm 4(0,s2),s2word /* get next s2 word s2+=4 (DELAY SLOT) */
add tmp5,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /* mask s1word (s1) */
or save,s2word,s2word /* mask s2word (s2) */
skipmask:
combt,=,n s1word,s2word,checkN /* We may be done */
checkbyte:
extru s1word,7,8,tmp3 /* get first byte (character) */
ckbyte2: extru s2word,7,8,tmp7 /* get first byte (character) */
combf,= tmp3,tmp7,done /* quit if first byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
addibt,<=,n -1,count,done /* have we checked N chars? ret0 == 0 */
extru s1word,15,8,tmp3 /* get second byte (character) */
extru s2word,15,8,tmp7 /* get second byte (character) */
combf,= tmp3,tmp7,done /* quit if second byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
addibt,<=,n -1,count,done /* have we checked N chars? */
extru s1word,23,8,tmp3 /* get third byte (character) */
extru s2word,23,8,tmp7 /* get third byte (character) */
combf,= tmp3,tmp7,done /* done if third byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
addibt,<=,n -1,count,done /* have we checked N chars? */
extru s1word,31,8,tmp3 /* get last byte (character) */
extru s2word,31,8,tmp7 /* get last byte (character) */
b done /* if we reach this point we know that */
sub tmp3,tmp7,ret0 /* the last character in the word is */
/* where the difference is, so return */
/* the difference and we're outta here */
checkN:
addibt,<=,n -4,count,zero /* have we checked N chars? */
ldwm 4(0,s2),s2word /* get next s2 word s2+=4 */
b skipmask /* keep checking */
ldwm 4(0,s1),s1word /* get next s1 word s1+=4 */
not_aligned:
dep r0,31,2,s2 /* Compute word address of s2 */
combt,<,n r0,tmp3,shifts1 /* Do we shift s1 or s2 */
sh3add tmp3,r0,tmp3 /* eight bits per byte so mul by 8 */
ldwm 4(0,s1),s1word /* get first word of s1 */
ldwm 4(0,s2),s2word /* get first word or s2 */
combt,=,n r0,tmp5,masks2 /* Do we need to mask beginning of s1 */
add tmp5,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /**/
masks2: sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word /**/
subi 4,tmp1,tmp1 /* tmp1 now has the number of byte that */
/* are valid in s2word before the vshd */
mtctl tmp3,11 /* Move shift amount to CR11 */
more: combt,<=,n count,tmp1,chunk1 /* Can we do the vshd? */
ldwm 4(0,s2),tmp7 /* load second word to enable us to shift */
vshd s2word,tmp7,s2word /**/
combf,=,n s1word,s2word,ckbyte2 /**/
extru s1word,7,8,tmp3 /* get first byte (DELAY SLOT) */
addibt,<=,n -4,count,zero /* have we checked N chars? */
copy tmp7,s2word /**/
b more /* keep checking */
ldwm 4(0,s1),s1word /* get next s1 (DELAY SLOT) */
chunk1:
vshd s2word,r0,s2word /* do an arithmetic shift left to position data */
b ckbyte2 /**/
extru s1word,7,8,tmp3 /**/
shifts1:
sh3add tmp3,r0,tmp3 /* eight bits per byte so mul by 8 */
sub r0,tmp3,tmp3 /* Get negative value for left shift */
dep r0,31,2,s2 /* Compute word address of s2 */
ldwm 4(0,s2),s2word /* get first word of s2 */
ldwm 4(0,s1),s1word /* get first word or s1 */
combt,=,n r0,tmp1,masks1 /*Do we need to mask beginning of s2 */
add tmp1,count,count /*bump count by the number of bytes */
/* we are going to mask */
sh3add tmp1,r0,save /*save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /*load save with proper mask */
or save,s2word,s2word /**/
masks1: sh3add tmp5,r0,save /*save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /*load save with proper mask */
or save,s1word,s1word /**/
subi 4,tmp5,tmp5 /*tmp5 now has the number of byte that */
/*are valid in s1word before the vshd */
mtctl tmp3,11 /*Move shift amount to CR11 */
more1: combt,<=,n count,tmp5,chunk2 /*Can we do the vshd? */
ldwm 4(0,s1),tmp7 /*load second word to enable us to shift */
vshd s1word,tmp7,s1word /**/
combf,=,n s2word,s1word,ckbyte2 /**/
extru s1word,7,8,tmp3 /*get first byte (DELAY SLOT) */
addibt,<=,n -4,count,zero /*have we checked N chars? */
copy tmp7,s1word /**/
b more1 /*keep checking */
ldwm 4(0,s2),s2word /*get next s2 (DELAY SLOT) */
chunk2:
vshd s1word,r0,s1word /**/
b ckbyte2 /**/
extru s1word,7,8,tmp3 /**/
zero: copy r0,ret0
done:
EXIT(memcmp)
|
stsp/newlib-ia16
| 7,312
|
newlib/libc/machine/hppa/pcc_prefix.s
|
;
; (c) Copyright 1986 HEWLETT-PACKARD COMPANY
;
; To anyone who acknowledges that this file is provided "AS IS"
; without any express or implied warranty:
; permission to use, copy, modify, and distribute this file
; for any purpose is hereby granted without fee, provided that
; the above copyright notice and this notice appears in all
; copies, and that the name of Hewlett-Packard Company not be
; used in advertising or publicity pertaining to distribution
; of the software without specific, written prior permission.
; Hewlett-Packard Company makes no representations about the
; suitability of this software for any purpose.
;
; Standard Hardware Register Definitions for Use with Assembler
; version A.08.06
; - fr16-31 added at Utah
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Hardware General Registers
r0: .equ 0
r1: .equ 1
r2: .equ 2
r3: .equ 3
r4: .equ 4
r5: .equ 5
r6: .equ 6
r7: .equ 7
r8: .equ 8
r9: .equ 9
r10: .equ 10
r11: .equ 11
r12: .equ 12
r13: .equ 13
r14: .equ 14
r15: .equ 15
r16: .equ 16
r17: .equ 17
r18: .equ 18
r19: .equ 19
r20: .equ 20
r21: .equ 21
r22: .equ 22
r23: .equ 23
r24: .equ 24
r25: .equ 25
r26: .equ 26
r27: .equ 27
r28: .equ 28
r29: .equ 29
r30: .equ 30
r31: .equ 31
; Hardware Space Registers
sr0: .equ 0
sr1: .equ 1
sr2: .equ 2
sr3: .equ 3
sr4: .equ 4
sr5: .equ 5
sr6: .equ 6
sr7: .equ 7
; Hardware Floating Point Registers
fr0: .equ 0
fr1: .equ 1
fr2: .equ 2
fr3: .equ 3
fr4: .equ 4
fr5: .equ 5
fr6: .equ 6
fr7: .equ 7
fr8: .equ 8
fr9: .equ 9
fr10: .equ 10
fr11: .equ 11
fr12: .equ 12
fr13: .equ 13
fr14: .equ 14
fr15: .equ 15
fr16: .equ 16
fr17: .equ 17
fr18: .equ 18
fr19: .equ 19
fr20: .equ 20
fr21: .equ 21
fr22: .equ 22
fr23: .equ 23
fr24: .equ 24
fr25: .equ 25
fr26: .equ 26
fr27: .equ 27
fr28: .equ 28
fr29: .equ 29
fr30: .equ 30
fr31: .equ 31
; Hardware Control Registers
cr0: .equ 0
rctr: .equ 0 ; Recovery Counter Register
cr8: .equ 8 ; Protection ID 1
pidr1: .equ 8
cr9: .equ 9 ; Protection ID 2
pidr2: .equ 9
cr10: .equ 10
ccr: .equ 10 ; Coprocessor Confiquration Register
cr11: .equ 11
sar: .equ 11 ; Shift Amount Register
cr12: .equ 12
pidr3: .equ 12 ; Protection ID 3
cr13: .equ 13
pidr4: .equ 13 ; Protection ID 4
cr14: .equ 14
iva: .equ 14 ; Interrupt Vector Address
cr15: .equ 15
eiem: .equ 15 ; External Interrupt Enable Mask
cr16: .equ 16
itmr: .equ 16 ; Interval Timer
cr17: .equ 17
pcsq: .equ 17 ; Program Counter Space queue
cr18: .equ 18
pcoq: .equ 18 ; Program Counter Offset queue
cr19: .equ 19
iir: .equ 19 ; Interruption Instruction Register
cr20: .equ 20
isr: .equ 20 ; Interruption Space Register
cr21: .equ 21
ior: .equ 21 ; Interruption Offset Register
cr22: .equ 22
ipsw: .equ 22 ; Interrpution Processor Status Word
cr23: .equ 23
eirr: .equ 23 ; External Interrupt Request
cr24: .equ 24
ppda: .equ 24 ; Physcial Page Directory Address
tr0: .equ 24 ; Temporary register 0
cr25: .equ 25
hta: .equ 25 ; Hash Table Address
tr1: .equ 25 ; Temporary register 1
cr26: .equ 26
tr2: .equ 26 ; Temporary register 2
cr27: .equ 27
tr3: .equ 27 ; Temporary register 3
cr28: .equ 28
tr4: .equ 28 ; Temporary register 4
cr29: .equ 29
tr5: .equ 29 ; Temporary register 5
cr30: .equ 30
tr6: .equ 30 ; Temporary register 6
cr31: .equ 31
tr7: .equ 31 ; Temporary register 7
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Procedure Call Convention ~
; Register Definitions for Use with Assembler ~
; version A.08.06
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Software Architecture General Registers
rp: .equ r2 ; return pointer
mrp: .equ r31 ; millicode return pointer
ret0: .equ r28 ; return value
ret1: .equ r29 ; return value (high part of double)
sl: .equ r29 ; static link
sp: .equ r30 ; stack pointer
dp: .equ r27 ; data pointer
arg0: .equ r26 ; argument
arg1: .equ r25 ; argument or high part of double argument
arg2: .equ r24 ; argument
arg3: .equ r23 ; argument or high part of double argument
;_____________________________________________________________________________
; Software Architecture Space Registers
; sr0 ; return link form BLE
sret: .equ sr1 ; return value
sarg: .equ sr1 ; argument
; sr4 ; PC SPACE tracker
; sr5 ; process private data
;_____________________________________________________________________________
; Software Architecture Pseudo Registers
previous_sp: .equ 64 ; old stack pointer (locates previous frame)
#if 0
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Standard space and subspace definitions. version A.08.06
; These are generally suitable for programs on HP_UX and HPE.
; Statements commented out are used when building such things as operating
; system kernels.
;;;;;;;;;;;;;;;;
.SPACE $TEXT$, SPNUM=0,SORT=8
; .subspa $FIRST$, QUAD=0,ALIGN=2048,ACCESS=0x2c,SORT=4,FIRST
; .subspa $REAL$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=4,FIRST,LOCK
.subspa $MILLICODE$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=8
.subspa $LIT$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=16
.subspa $CODE$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=24
; .subspa $UNWIND$, QUAD=0,ALIGN=4,ACCESS=0x2c,SORT=64
; .subspa $RECOVER$, QUAD=0,ALIGN=4,ACCESS=0x2c,SORT=80
; .subspa $RESERVED$, QUAD=0,ALIGN=8,ACCESS=0x73,SORT=82
; .subspa $GATE$, QUAD=0,ALIGN=8,ACCESS=0x4c,SORT=84,CODE_ONLY
; Additional code subspaces should have ALIGN=8 for an interspace BV
; and should have SORT=24.
;
; For an incomplete executable (program bound to shared libraries),
; sort keys $GLOBAL$ -1 and $GLOBAL$ -2 are reserved for the $DLT$
; and $PLT$ subspaces respectively.
;;;;;;;;;;;;;;;
.SPACE $PRIVATE$, SPNUM=1,PRIVATE,SORT=16
.subspa $GLOBAL$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=40
.import $global$
.subspa $SHORTDATA$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=24
.subspa $DATA$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=16
.subspa $PFA_COUNTER$, QUAD=1,ALIGN=4,ACCESS=0x1f,SORT=8
.subspa $SHORTBSS$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=80,ZERO
.subspa $BSS$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82,ZERO
; .subspa $PCB$, QUAD=1,ALIGN=8,ACCESS=0x10,SORT=82
; .subspa $STACK$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82
; .subspa $HEAP$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82
;;;;;;;;;;;;;;;;
; .SPACE $PFA$, SPNUM=0,PRIVATE,UNLOADABLE,SORT=64
; .subspa $PFA_ADDRESS$, ALIGN=4,ACCESS=0x2c,UNLOADABLE
;;;;;;;;;;;;;;;;
; .SPACE $DEBUG$, SPNUM=2,PRIVATE,UNLOADABLE,SORT=80
; .subspa $HEADER$, ALIGN=4,ACCESS=0,UNLOADABLE,FIRST
; .subspa $GNTT$, ALIGN=4,ACCESS=0,UNLOADABLE
; .subspa $LNTT$, ALIGN=4,ACCESS=0,UNLOADABLE
; .subspa $SLT$, ALIGN=4,ACCESS=0,UNLOADABLE
; .subspa $VT$, ALIGN=4,ACCESS=0,UNLOADABLE
; To satisfy the copyright terms each .o will have a reference
; the the actual copyright. This will force the actual copyright
; message to be brought in from libgloss/hp-milli.s
.space $PRIVATE$
.subspa $DATA$
#else
.data
#endif
.import ___hp_free_copyright,data
L$copyright .word ___hp_free_copyright
|
stsp/newlib-ia16
| 10,201
|
newlib/libc/machine/hppa/strcpy.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/*
A faster strcpy.
by
Jerry Huck (aligned case)
Daryl Odnert (equal-alignment case)
Edgar Circenis (non-aligned case)
*/
/*
* strcpy(s1, s2)
*
* Copy string s2 to s1. s1 must be large enough.
* return s1
*/
#include "DEFS.h"
#define d_addr r26
#define s_addr r25
#define tmp6 r24
#define tmp1 r19
#define evenside r19
#define tmp2 r20
#define oddside r20
#define tmp3 r21
#define tmp4 r22
#define tmp5 arg3
#define save r1
ENTRY(strcpy)
/* Do some quick alignment checking on and fast path both word aligned */
extru,<> s_addr,31,2,tmp6 /*Is source word aligned? */
ldwm 4(0,s_addr),oddside /*Assume yes and guess that it
is double-word aligned. */
dep,= d_addr,29,2,tmp6 /*Is target word aligned? */
b case_analysis
copy d_addr,ret0
/* Both are aligned. First source word already loaded assuming that
source was oddword aligned. Fall through (therefore fastest) code
shuffles the registers to join the main loop */
bothaligned:
bb,>= s_addr,29,twoatatime /*Branch if source was odd aligned*/
uxor,nbz oddside,r0,save
/* Even aligned source. save holds that operand.
Do one iteration of the main copy loop juggling the registers to avoid
one copy. */
b,n nullfound
ldwm 4(s_addr),oddside
stwm save,4(d_addr)
uxor,nbz oddside,r0,save
b,n nullfound
ldwm 4(s_addr),evenside
stwm oddside,4(d_addr)
uxor,nbz evenside,r0,save
b,n nullfound
ldwm 4(s_addr),oddside
/* Main loop body. Entry expects evenside still to be stored, oddside
just loaded. */
loop:
stwm evenside,4(d_addr)
uxor,nbz oddside,r0,save
/* mid loop entry */
twoatatime:
b,n nullfound
ldwm 4(s_addr),evenside
stwm oddside,4(d_addr)
uxor,sbz evenside,r0,save
b loop
ldwm 4(s_addr),oddside
/* fall through when null found in evenside. oddside actually loaded */
nullfound: /* adjust d_addr and store final word */
extru,<> save,7,8,r0 /* pick up leftmost byte */
addib,tr,n 1,d_addr,store_final
extru,<> save,15,8,r0
addib,tr,n 2,d_addr,store_final
extru,<> save,23,8,r0
addib,tr 3,d_addr,store_final2
bv 0(rp)
stw save,0(d_addr)
store_final:
bv 0(rp)
store_final2:
stbys,e save,0(d_addr) /* delay slot */
case_analysis:
blr tmp6,r0
nop
/* NOTE: the delay slots for the non-aligned cases load a */
/* shift quantity which is TGT-SRC into tmp3. */
/* Note also, the case for both strings being word aligned */
/* is already checked before the BLR is executed, so that */
/* case can never occur. */
/* TGT SRC */
nop /* 00 00 can't happen */
nop
b neg_aligned_copy /* 00 01 */
ldi -1,tmp3 /* load shift quantity. delay slot */
b neg_aligned_copy /* 00 10 */
ldi -2,tmp3 /* load shift quantity. delay slot */
b neg_aligned_copy /* 00 11 */
ldi -3,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy0 /* 01 00 */
ldi 1,tmp3 /* load shift quantity. delay slot */
b equal_alignment_1 /* 01 01 */
ldbs,ma 1(s_addr),tmp1
b neg_aligned_copy /* 01 10 */
ldi -1,tmp3 /* load shift quantity. delay slot */
b neg_aligned_copy /* 01 11 */
ldi -2,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy0 /* 10 00 */
ldi 2,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy /* 10 01 */
ldi 1,tmp3 /* load shift quantity. delay slot */
b equal_alignment_2 /* 10 10 */
ldhs,ma 2(s_addr),tmp1
b neg_aligned_copy /* 10 11 */
ldi -1,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy0 /* 11 00 */
ldi 3,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy /* 11 01 */
ldi 2,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy /* 11 10 */
ldi 1,tmp3 /* load shift quantity. delay slot */
ldbs,ma 1(s_addr),tmp1 /* 11 11 */
comiclr,<> r0,tmp1,r0
bv 0(rp) /* return if 1st byte was null */
stbs,ma tmp1,1(d_addr) /* store a byte to dst string */
b bothaligned /* can now goto word_aligned */
ldwm 4(s_addr),oddside /* load next word of source */
equal_alignment_1:
comiclr,<> r0,tmp1,r0 /* nullify next if tmp1 <> 0 */
bv 0(rp) /* return if null byte found */
stbs,ma tmp1,1(d_addr) /* store a byte to dst string */
ldhs,ma 2(s_addr),tmp1 /* load next halfword */
equal_alignment_2:
extru,<> tmp1,23,8,tmp6 /* look at left byte of halfword */
bv 0(rp) /* return if 1st byte was null */
stbs,ma tmp6,1(d_addr)
extru,<> tmp1,31,8,r0
bv 0(rp) /* return if 2nd byte was null */
stbs,ma tmp1,1(d_addr)
b bothaligned
ldwm 4(s_addr),oddside /* load next word */
/* source and destination are not aligned, so we do it the hard way. */
/* target alignment is greater than source alignment */
pos_aligned_copy0:
addi -4,s_addr,s_addr
pos_aligned_copy:
extru d_addr,31,2,tmp6 /* Extract low 2 bits of the dest addr */
extru s_addr,31,2,tmp1 /* Extract low 2 bits of the src addr */
dep r0,31,2,s_addr /* Compute word address of the source. */
sh3add tmp3,r0,tmp4 /* compute shift amt */
ldwm 4(0,s_addr),tmp2 /* get 1st source word */
sh3add tmp1,r0,save /* setup mask shift amount */
mtctl save,r11 /* set-up cr11 for mask */
zvdepi -2,32,save /* create mask */
or save,tmp2,tmp2 /* mask unused bytes in src */
ldi -1,tmp1 /* load tmp1 with 0xffffffff */
mtctl tmp4,r11 /* shift count -> shift count reg */
vshd tmp1,tmp2,tmp3 /* position data ! */
uxor,nbz tmp3,r0,save
b,n first_null
uxor,nbz tmp2,r0,save
b nullfound1
mtctl tmp4,r11 /* re-load shift cnt (delay slot) */
b loop_entry
ldwm 4(0,s_addr),tmp1 /* get next word. delay slot */
neg_aligned_copy:
extru d_addr,31,2,tmp6 /* Extract low 2 bits of the dest addr */
extru s_addr,31,2,tmp2 /* Extract low 2 bits of the src addr */
dep r0,31,2,s_addr /* Compute word address of the source. */
sh3add tmp3,r0,tmp4 /* compute shift amt */
ldwm 4(0,s_addr),tmp1 /* load first word from source. */
/* check to see if next word can be read safely */
sh3add tmp2,r0,save
mtctl save,r11 /* shift count -> shift count reg */
zvdepi -2,32,save
or save, tmp1, tmp1
uxor,nbz tmp1,r0,save /* any nulls in first word? */
b first_null0
mtctl tmp4,r11
ldwm 4(0,s_addr),tmp2 /* load second word from source */
combt,= tmp6,r0,chunk1 /* don't mask if whole word valid */
vshd tmp1,tmp2,tmp3 /* position data ! */
sh3add tmp6,r0,save /* setup r1 */
mtctl save,r11 /* set-up cr11 for mask */
zvdepi -2,32,save
or save, tmp3, tmp3
uxor,nbz tmp3,r0,save
b,n first_null
uxor,nbz tmp2,r0,save
b nullfound1
mtctl tmp4,r11 /* re-load shift cnt (delay slot) */
b loop_entry
ldwm 4(0,s_addr),tmp1 /* get next word. delay slot */
chunk1:
uxor,nbz tmp2,r0,save
b nullfound0
vshd tmp1,tmp2,tmp3
did_mask:
ldwm 4(0,s_addr),tmp1 /* get next word ! */
loop_entry:
stbys,b,m tmp3,4(0,d_addr) /* store ! */
uxor,nbz tmp1, r0, save
b nullfound2
vshd tmp2,tmp1,tmp3 /* position data ! */
ldwm 4(s_addr),tmp2
stwm tmp3,4(d_addr)
uxor,sbz tmp2,r0,save
b did_mask
nullfound0:
vshd tmp1,tmp2,tmp3 /* delay slot */
uxor,nbz tmp3,r0,save
b,n nullfound
nullfound1:
stbys,b,m tmp3,4(0,d_addr)
b nullfound
vshd tmp2,r0,save /* delay slot */
nullfound2:
uxor,nbz tmp3,r0,save
b,n nullfound
stwm tmp3,4(d_addr)
b nullfound
/* notice that delay slot is in next routine */
first_null0: /* null found in first word of non-aligned (wrt d_addr) */
vshd tmp1,r0,save /* delay slot */
combt,= tmp6,r0,check4
extru save,7,8,tmp4
first_null:
addibt,= -1,tmp6,check3 /* check last 3 bytes of word */
extru save,15,8,tmp4
addibt,=,n -1,tmp6,check2 /* check last 2 bytes */
bv 0(rp) /* null in last byte--store and exit */
stbys,b save, 0(d_addr)
check4:
combt,= tmp4,r0,done
stbs,ma tmp4,1(d_addr)
extru,<> save,15,8,tmp4
check3:
combt,= tmp4,r0,done
stbs,ma tmp4,1(d_addr)
check2:
extru,<> save,23,8,tmp4
bv 0(rp)
stbs,ma tmp4,1(d_addr)
bv 0(rp)
stbs r0,0(d_addr)
done:
EXIT(strcpy)
|
stsp/newlib-ia16
| 4,191
|
newlib/libc/machine/hppa/setjmp.S
|
/* Copyright (c) 1995, 2002 Red Hat Incorporated.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* The name of Red Hat Incorporated may not be used to endorse
* or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Note I don't know an easy way to get the FP version into the
traditional C library and the non-FP version into the soft-float
library. Maybe we'll have to have -msoft-float trigger something
like -DSOFT_FLOAT if this issue ever arises. */
#include "DEFS.h"
#if 0
.SPACE $PRIVATE$
.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
.SPACE $TEXT$
.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
.IMPORT $global$,DATA
.IMPORT $$dyncall,MILLICODE
; gcc_compiled.:
#endif
TEXT_SEGMENT
.align 4
.EXPORT setjmp,ENTRY,PRIV_LEV=3,ARGW0=GR,RTNVAL=GR
setjmp
.PROC
.CALLINFO FRAME=64,NO_CALLS,SAVE_SP,ENTRY_GR=3
.ENTRY
stwm %r30,4(%r26)
stwm %r2,4(%r26)
stwm %r3,4(%r26)
stwm %r4,4(%r26)
stwm %r5,4(%r26)
stwm %r6,4(%r26)
stwm %r7,4(%r26)
stwm %r8,4(%r26)
stwm %r9,4(%r26)
stwm %r10,4(%r26)
stwm %r11,4(%r26)
stwm %r12,4(%r26)
stwm %r13,4(%r26)
stwm %r14,4(%r26)
stwm %r15,4(%r26)
stwm %r16,4(%r26)
stwm %r17,4(%r26)
stwm %r18,4(%r26)
stwm %r27,4(%r26)
#ifdef FP
; jmp_buf may only have a 4 byte alignment, so handle FP stores
; very carefully.
fstds %fr12,-16(%r30)
ldw -16(%r30),%r28
stwm %r28,4(%r26)
ldw -12(%r30),%r28
stwm %r28,4(%r26)
fstds %fr13,-16(%r30)
ldw -16(%r30),%r28
stwm %r28,4(%r26)
ldw -12(%r30),%r28
stwm %r28,4(%r26)
fstds %fr14,-16(%r30)
ldw -16(%r30),%r28
stwm %r28,4(%r26)
ldw -12(%r30),%r28
stwm %r28,4(%r26)
fstds %fr15,-16(%r30)
ldw -16(%r30),%r28
stwm %r28,4(%r26)
ldw -12(%r30),%r28
stwm %r28,4(%r26)
#endif
bv 0(%r2)
copy %r0,%r28
.EXIT
.PROCEND
.align 4
.EXPORT longjmp,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,RTNVAL=GR
longjmp
.PROC
.CALLINFO FRAME=64,NO_CALLS,SAVE_SP,ENTRY_GR=3
.ENTRY
ldwm 4(%r26),%r30
ldwm 4(%r26),%r2
ldwm 4(%r26),%r3
ldwm 4(%r26),%r4
ldwm 4(%r26),%r5
ldwm 4(%r26),%r6
ldwm 4(%r26),%r7
ldwm 4(%r26),%r8
ldwm 4(%r26),%r9
ldwm 4(%r26),%r10
ldwm 4(%r26),%r11
ldwm 4(%r26),%r12
ldwm 4(%r26),%r13
ldwm 4(%r26),%r14
ldwm 4(%r26),%r15
ldwm 4(%r26),%r16
ldwm 4(%r26),%r17
ldwm 4(%r26),%r18
ldwm 4(%r26),%r27
#ifdef FP
ldwm 4(%r26),%r28
stw %r28,-16(%r30)
ldwm 4(%r26),%r28
stw %r28,-12(%r30)
fldds -16(%r30),%fr12
ldwm 4(%r26),%r28
stw %r28,-16(%r30)
ldwm 4(%r26),%r28
stw %r28,-12(%r30)
fldds -16(%r30),%fr13
ldwm 4(%r26),%r28
stw %r28,-16(%r30)
ldwm 4(%r26),%r28
stw %r28,-12(%r30)
fldds -16(%r30),%fr14
ldwm 4(%r26),%r28
stw %r28,-16(%r30)
ldwm 4(%r26),%r28
stw %r28,-12(%r30)
fldds -16(%r30),%fr15
#endif
comclr,<> %r0,%r25,%r0
ldi 1,%r25
bv 0(%r2)
copy %r25,%r28
.EXIT
.PROCEND
|
stsp/newlib-ia16
| 2,078
|
newlib/libc/machine/mep/setjmp.S
|
#
# Setjmp/longjmp for MeP
#
# DJ Delorie, Red Hat Inc.
#
# 19 32-bit words in the jmpbuf:
# $0
# $1
# ...
# $15
# $pc
# $hi
# $lo
#
# Note that $0 is saved but not restored. It can't be restored
# as it's the return value of setjmp, but we save it in case
# some application wants to see it in the jmp_buf. Ideally,
# we should not need to save anything that is call-clobbered,
# but you never know what the user is going to tell gcc with -f
# options.
.noregerr
.text
.globl setjmp
.type setjmp,@function
setjmp:
# $1 is the address of the buffer. We return 0 in $0.
sw $0, ($1)
sw $1, 4($1)
sw $2, 8($1)
sw $3, 12($1)
sw $4, 16($1)
sw $5, 20($1)
sw $6, 24($1)
sw $7, 28($1)
sw $8, 32($1)
sw $9, 36($1)
sw $10, 40($1)
sw $11, 44($1)
sw $12, 48($1)
sw $13, 52($1)
sw $14, 56($1)
sw $15, 60($1)
ldc $0, $lp
sw $0, 64($1)
ldc $0, $opt
sra $0, 24
and3 $0, $0, 3
beqz $0, sj_skip_hilo
ldc $0, $hi
sw $0, 68($1)
ldc $0, $lo
sw $0, 72($1)
sj_skip_hilo:
mov $0, 0
ret
.globl longjmp
.type longjmp,@function
longjmp:
# $1 is the address of the buffer. $2 is the value setjmp
# returns. We do not faithfully restore $0 or $lp, because
# the act of calling setjmp clobbered those anyway.
bnez $2, rv_not_zero
mov $2, 1
rv_not_zero:
# We restore $sp first so we can save the return value there,
# otherwise we'd need to have another unrestored register.
lw $15, 60($1)
add3 $sp, $sp, -4
sw $2, ($sp)
# Now restore the general registers.
lw $2, 8($1)
lw $3, 12($1)
lw $4, 16($1)
lw $5, 20($1)
lw $6, 24($1)
lw $7, 28($1)
lw $8, 32($1)
lw $9, 36($1)
lw $10, 40($1)
lw $11, 44($1)
lw $12, 48($1)
lw $13, 52($1)
lw $14, 56($1)
# We restore $pc's value to $lp so that we can just ret later.
lw $0, 64($1)
stc $0, $lp
ldc $0, $opt
sra $0, 24
and3 $0, $0, 3
beqz $0, lj_skip_hilo
lw $0, 68($1)
stc $0, $hi
lw $0, 72($1)
stc $0, $lo
lj_skip_hilo:
# Restore $1
lw $1, 8($1)
# Get the return value off the stack, and restore the stack.
lw $0, ($sp)
add3 $sp, $sp, 4
ret
|
stsp/newlib-ia16
| 1,462
|
newlib/libc/machine/tic4x/setjmp.S
|
/* setjmp/longjmp routines.
*
* Written by Michael Hayes <m.hayes@elec.canterbury.ac.nz>.
*
* The author hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
.sect .text
.global setjmp
.global longjmp
setjmp:
pop r1
ldi sp, ar0
#ifndef _REGPARM
ldi *ar0, ar2
#endif
sti r4, *ar2++
sti r5, *ar2++
stf r6, *ar2++
stf r7, *ar2++
#ifdef _TMS320C4x
sti r8, *ar2++
#endif
sti ar3, *ar2++
sti ar4, *ar2++
sti ar5, *ar2++
sti ar6, *ar2++
sti ar7, *ar2++
bd r1
sti r1, *ar2++
sti ar0, *ar2
ldi 0, r0
longjmp:
#ifndef _REGPARM
ldi sp, ar0
ldi *-ar0(1), ar2
ldi *-ar0(2), r0
ldiz 1, r0
#else
ldi r2, r0
ldiz 1, r0
#endif
ldi *ar2++, r4
ldi *ar2++, r5
ldf *ar2++, r6
ldf *ar2++, r7
#ifdef _TMS320C4x
ldi *ar2++, r8
#endif
ldi *ar2++, ar3
ldi *ar2++, ar4
ldi *ar2++, ar5
ldi *ar2++, ar6
ldi *ar2++, ar7
ldi *ar2++, r1
ldi *ar2, sp
b r1
.end
|
stsp/newlib-ia16
| 3,718
|
newlib/libc/machine/arc/strncpy.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strncpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch. */
#if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
#define BRand(a,b,l) and a,a,b ` brne_s a,0,l
ENTRY (strncpy)
cmp_s r2,8
or r12,r0,r1
bmsk.cc.f r12,r12,1
brne.d r12,0,.Lbytewise
mov_s r10,r0
ld_s r3,[r1,0]
mov r8,0x01010101
add r6,r0,r2
sub r6,r6,8
bbit0.d r1,2,.Loop_start
ror r11,r8
sub r12,r3,r8
bic_l r12,r12,r3
BRand (r12,r11,.Lr3z)
mov_s r4,r3
ld.a r3,[r1,4]
st.ab r4,[r10,4]
.balign 4
.Loop_start:
brhs r10,r6,.Loop_end
1:
ld.a r4,[r1,4]
sub r12,r3,r8
bic_s r12,r12,r3
BRand (r12,r11,.Lr3z2)
st.ab r3,[r10,8]
sub r12,r4,r8
bic r12,r12,r4
BRand (r12,r11,.Lr4z)
ld.a r3,[r1,4]
brlo.d r10,r6,1b
st r4,[r10,-4]
.Loop_end:
add r6,r6,4
brhs r10,r6,.Lastword
sub r12,r3,r8
bic_s r12,r12,r3
BRand (r12,r11,.Lr3z)
add_s r1,r1,4
st.ab r3,[r10,4]
.Lastword:
sub_s r2,r2,1
b.d .Lstart_charloop
bmsk.f r2,r2,1
.balign 4
nop_s
.Lr3z2: sub_s r1,r1,4
.Lr4z:
.Lr3z:
.balign 4
.Lr3z_loop:
ldb.ab r3,[r1,1]
brne.d r3,0,.Lr3z_loop
stb.ab r3,[r10,1]
.Lzero_rest:
; __strncpy_bzero requires:
; return value in r0
; zeroing length in r2
; zeroing start address in r3
mov_s r3,r10
add_s r2,r2,r0
b.d __strncpy_bzero
sub_s r2,r2,r3
.balign 4
.Lbytewise:
sub.f r2,r2,1
jcs [blink]
.Lstart_charloop:
mov_s r3,r10
.Lcharloop:
ldb.ab r12,[r1,1]
beq.d .Last_byte
sub.f r2,r2,1
brne.d r12,0,.Lcharloop
stb.ab r12,[r3,1]
b.d __strncpy_bzero
stb.ab r12,[r3,1]
.Last_byte:
j_s.d [blink]
stb_s r12,[r3]
ENDFUNC (strncpy)
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 4,143
|
newlib/libc/machine/arc/strlen.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strlen.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined(__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
/* This code is optimized for the ARC601 pipeline without barrel shifter. */
ENTRY (strlen)
or r3,r0,7
ld r2,[r3,-7]
ld.a r6,[r3,-3]
mov r4,0x01010101
; uses long immediate
#ifdef __LITTLE_ENDIAN__
bmsk.f 0,r0,1
mov_s r1,31
add3_s r1,r1,r0
bmsk r7,r4,r1
xor.ne r7,r7,r4
btst_s r0,2
ror r5,r4
sub r1,r2,r7
bic_s r1,r1,r2
mov.eq r7,r4
sub r12,r6,r7
bic r12,r12,r6
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#else /* BIG ENDIAN */
add.f r1,r4,30 ; r1 mod 31 := -1; clear carry
ror r5,r4
sub3 r7,r1,r0
btst_s r0,2
sub r1,r2,r4
bic_s r1,r1,r2
bmsk r1,r1,r7
sub r12,r6,r4
bic r12,r12,r6
bmsk.ne r12,r12,r7
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#endif /* ENDIAN */
.Loop:
ld_s r2,[r3,4]
ld.a r6,[r3,8]
; stall for load result
sub r1,r2,r4
bic_s r1,r1,r2
sub r12,r6,r4
bic r12,r12,r6
or_s r12,r12,r1
and r12,r12,r5
breq_s r12,0,.Loop
.Lend:
and.f r1,r1,r5
sub.ne r3,r3,4
#ifdef __LITTLE_ENDIAN__
mov.eq r1,r12
btst_s r1,7
sub r0,r3,r0
add.eq r0,r0,1
bmsk.f 0,r1,15
add.eq r0,r0,1
bmsk.f 0,r1,23
j_s.d [blink]
add.eq r0,r0,1
#else /* BIG ENDIAN */
#ifdef __OPTIMIZE_SIZE__
1: ldb_s r1,[r3]
breq_s r1,0,0f
ldb.a r1,[r3,1]
breq_s r1,0,0f
ldb.a r1,[r3,1]
breq_s r1,0,0f
add_s r3,r3,1
0: j_s.d [blink]
sub r0,r3,r0
#define SPECIAL_EARLY_END
.Learly_end:
mov_s r3,r0
b_s 1b
#elif 0 /* Need more information about pipeline to assess if this is faster. */
mov.eq r2,r6
and r2,r2,r5
sub1 r2,r4,r2
mov.eq r1,r12
bic.f r1,r1,r2
sub r0,r3,r0
add.pl r0,r0,1
btst.pl r1,23
add.eq r0,r0,1
btst.eq r1,15
j_s.d [blink]
add.eq r0,r0,1
#else /* !__OPTIMIZE_SIZE__ */
/* Need carry clear here. */
mov.eq r2,r6
1: bmsk r1,r2,23
breq r1,r2,0f
bmsk r2,r1,15
breq.d r1,r2,0f
add_s r3,r3,1
cmp r2,0x100
add_s r3,r3,2
0: j_s.d [blink]
sbc r0,r3,r0
#define SPECIAL_EARLY_END
.Learly_end:
sub_s.ne r1,r1,r1
mov_s r12,0
bset r12,r12,r7
sub1 r2,r2,r12
b.d .Lend
sub1.ne r6,r6,r12
#endif /* !__OPTIMIZE_SIZE__ */
#endif /* ENDIAN */
#ifndef SPECIAL_EARLY_END
.balign 4
.Learly_end:
b.d .Lend
sub_s.ne r1,r1,r1
#endif /* !SPECIAL_EARLY_END */
ENDFUNC (strlen)
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__*/
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 5,286
|
newlib/libc/machine/arc/strchr.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strchr.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* In order to search for a zero in a W, we calculate
X := (W - 0x01010101) & ~W & 0x80808080;
In the little endian case:
If no byte in W is zero, X will be zero; otherwise, the least significant
byte of X which is nonzero indicates the least significant byte of W that
is zero.
In the big endian case:
X will be zero iff no byte in W is zero.
If X is nonzero, to find out which is the most significant zero byte
in W, we calculate:
Y := ~(((W | 0x80808080) - 0x01010101) | W) & 0x80808080;
Each byte in Y is 0x80 if the the corresponding byte in
W is zero, otherwise that byte of Y is 0. */
#if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
ENTRY (strchr)
bmsk.f r2,r0,1
mov_s r3,0x01010101
extb_s r1,r1
mov r8,0
add3 r5,r8,r1
add3 r5,r8,r5
add2 r5,r1,r5
add3 r4,r8,r5
add3 r4,r8,r4
add3 r4,r8,r4
add3 r4,r8,r4
beq.d .Laligned
add3 r4,r8,r4
sub_s r0,r0,r2
#ifdef __LITTLE_ENDIAN__
add3.f r2,-1,r2
bmsk r7,r3,r2
rsub.pl r7,r7,r3
#else
mov_s r12,31
sub3 r2,r12,r2
bmsk r7,r3,r2
#endif
ld_s r2,[r0]
add1 r5,r5,r4
ror r4,r3
sub r12,r2,r7
bic_s r12,r12,r2
and r12,r12,r4
brne.d r12,0,.Lfound0_ua
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
#ifdef __LITTLE_ENDIAN__
and.f r7,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
b.d .Lfound_char_ua
btst r7,7
#else
and.f r8,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
bic r12,r7,r6
bic r2,r3,r12
sub1 r2,r3,r2
sub_s r0,r0,4
b.d .Lfound_char_ua
bic.f r2,r8,r2
#endif
.balign 4
.Laligned:
ld_s r2,[r0]
add1 r5,r5,r4
ror r4,r3
sub r12,r2,r3
bic_s r12,r12,r2
and r12,r12,r4
.Loop:
brne.d r12,0,.Lfound0
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r3
bic r12,r12,r6
and.f r7,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
; Found searched-for character. r0 has already advanced to next word.
#ifdef __LITTLE_ENDIAN__
/* We only need the information about the first matching byte
(i.e. the least significant matching byte) to be exact,
hence there is no problem with carry effects. */
.Lfound_char:
btst r7,7
.Lfound_char_ua:
sub_s r0,r0,4
add.eq r0,r0,1
btst.eq r7,15
add.eq r0,r0,1
btst.eq r7,23
j_s.d [blink]
add.eq r0,r0,1
.balign 4
.Lfound0_ua:
mov_l r3,r7
.Lfound0:
sub r2,r6,r3
bic r2,r2,r6
and r2,r2,r4
or r3,r12,r2
sub_s r12,r3,1
xor_s r3,r3,r12
cmp 0xffff,r3
; cmp uses limm ; ARC600 would do: asl.f 0,r3,9
tst_s r2,r3
mov.eq r0,0
add.mi r0,r0,1
btst.ne r3,15
j_s.d [blink]
adc.ne r0,r0,1
#else /* BIG ENDIAN */
.Lfound_char:
and r2,r6,r3
sub1 r2,r3,r2
sub_s r0,r0,4
bic.f r2,r7,r2
.Lfound_char_ua:
add.pl r0,r0,1
jmi.d [blink]
btst_s r2,23
add.eq r0,r0,1
btst.eq r2,15
j_s.d [blink]
add.eq r0,r0,1
; N.B. if we searched for a char zero and found it in the MSB,
; and ignored matches are identical, we will take the early exit
; like for an ordinary found zero - except for the extra stalls at jhi -
; but still compute the right result.
.Lfound0_ua:
mov_s r3,r7
.Lfound0:
and_s r2,r2,r3
sub1 r2,r3,r2
or r7,r6,r4
bic_s r12,r12,r2
sub r2,r7,r3
or r2,r2,r6
bic r2,r4,r2
cmp_s r12,r2
mov.hi r0,0
btst.ls r2,31
jhi.d [blink]
add.eq r0,r0,1
btst.eq r2,23
add.eq r0,r0,1
btst.eq r2,15
j_s.d [blink]
add.eq r0,r0,1
#endif /* ENDIAN */
ENDFUNC (strchr)
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 5,236
|
newlib/libc/machine/arc/strchr-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strchr.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* In order to search for a zero in a W, we calculate
X := (W - 0x01010101) & ~W & 0x80808080;
In the little endian case:
If no byte in W is zero, X will be zero; otherwise, the least significant
byte of X which is nonzero indicates the least significant byte of W that
is zero.
In the big endian case:
X will be zero iff no byte in W is zero.
If X is nonzero, to find out which is the most significant zero byte
in W, we calculate:
Y := ~(((W | 0x80808080) - 0x01010101) | W) & 0x80808080;
Each byte in Y is 0x80 if the the corresponding byte in
W is zero, otherwise that byte of Y is 0. */
#if defined (__ARC_BARREL_SHIFTER__) && \
(defined (__ARC600__) || (!defined (__ARC_NORM__) && !defined (__ARC601__)))
ENTRY (strchr)
bmsk.f r2,r0,1
mov_s r3,0x01010101
extb_s r1,r1
asl r5,r1,8
or r5,r5,r1
beq.d .Laligned
asl r4,r5,16
sub_s r0,r0,r2
asl_s r2,r2,3
#ifdef __LITTLE_ENDIAN__
asl r7,r3,r2
#else
lsr r7,r3,r2
#endif
ld_s r2,[r0]
or r5,r5,r4
ror r4,r3
sub r12,r2,r7
bic_s r12,r12,r2
and r12,r12,r4
brne.d r12,0,.Lfound0_ua
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
#ifdef __LITTLE_ENDIAN__
and.f r7,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
b.d .Lfound_char_ua
btst r7,7
#else
and.f r8,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
bic r12,r7,r6
asl_s r12,r12,7
and.f r2,r8,r12
b.d .Lfound_char_ua
sub_s r0,r0,4
#endif
.balign 4
.Laligned:
ld_s r2,[r0]
or r5,r5,r4
ror r4,r3
sub r12,r2,r3
bic_s r12,r12,r2
and r12,r12,r4
.Loop:
brne.d r12,0,.Lfound0
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r3
bic r12,r12,r6
and.f r7,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
; Found searched-for character. r0 has already advanced to next word.
#ifdef __LITTLE_ENDIAN__
/* We only need the information about the first matching byte
(i.e. the least significant matching byte) to be exact,
hence there is no problem with carry effects. */
.Lfound_char:
btst r7,7
.Lfound_char_ua:
sub_s r0,r0,4
add.eq r0,r0,1
btst.eq r7,15
add.eq r0,r0,1
btst.eq r7,23
j_s.d [blink]
add.eq r0,r0,1
.balign 4
.Lfound0_ua:
mov_l r3,r7
.Lfound0:
sub r2,r6,r3
bic r2,r2,r6
and r2,r2,r4
or r3,r12,r2
sub_s r12,r3,1
xor_s r3,r3,r12
tst_s r2,r3
lsr r2,r3,31
lsr r12,r3,16
jeq.d [blink]
mov.eq r0,0
lsr r3,r3,8
sub_s r2,r2,r12
sub_s r2,r2,r3
bmsk_s r2,r2,1
j_s.d [blink]
add_s r0,r0,r2
#else /* BIG ENDIAN */
.Lfound_char:
asl r6,r6,7
sub_s r0,r0,4
bic.f r2,r7,r6
.Lfound_char_ua:
add.pl r0,r0,1
jmi.d [blink]
btst_s r2,23
add.eq r0,r0,1
btst.eq r2,15
j_s.d [blink]
add.eq r0,r0,1
; N.B. if we searched for a char zero and found it in the MSB,
; and ignored matches are identical, we will take the early exit
; like for an ordinary found zero - except for the extra stalls at jhi -
; but still compute the right result.
.Lfound0_ua:
mov_s r3,r7
.Lfound0:
asl_s r2,r2,7
or r7,r6,r4
bic_s r12,r12,r2
sub r2,r7,r3
or r2,r2,r6
bic r2,r4,r2
cmp r12,r2
mov.hi r0,0
btst.ls r2,31
jhi.d [blink]
add.eq r0,r0,1
btst.eq r2,23
add.eq r0,r0,1
btst.eq r2,15
j_s.d [blink]
add.eq r0,r0,1
#endif /* ENDIAN */
ENDFUNC (strchr)
#endif /* __ARC_BARREL_SHIFTER__ &&
(__ARC600__ || (!__ARC_NORM__ && !__ARC601__)) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 3,419
|
newlib/libc/machine/arc/memset.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memset.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC601__) \
|| (!defined (__ARC_BARREL_SHIFTER__) && !defined (__ARCHS__))
/* To deal with alignment/loop issues, SMALL must be at least 2. */
#define SMALL 8 /* Even faster if aligned. */
.global __strncpy_bzero
.hidden __strncpy_bzero
/* __strncpy_bzero provides the following interface to strncpy:
r0: return value
r2: zeroing length
r3: zeroing start address
No attempt is made here for __strncpy_memset to speed up aligned
cases, because the copying of a string presumably leaves start address
and length alignment for the zeroing randomly distributed. */
ENTRY (memset)
brls.d r2,SMALL,.Ltiny
mov_s r3,r0
or r12,r0,r2
bmsk.f r12,r12,1
breq_s r1,0,.Lbzero
mov r4,0
stb.a r1,[sp,-4]
stb r1,[sp,1]
stb r1,[sp,2]
stb r1,[sp,3]
ld.ab r1,[sp,4]
.Lbzero:
beq.d .Laligned
.Lbzero2:
add r6,r2,r3
.Lnot_tiny:
stb r1,[r6,-1]
bclr r12,r6,0
stw r1,[r12,-2]
stb.ab r1,[r3,1]
bclr_s r3,r3,0
stw.ab r1,[r3,2]
bclr_s r3,r3,1
.Laligned: ; This code address should be aligned for speed.
sub r6,r6,8
brlo.d r6,r3,.Loop_end
sub r6,r6,8
3:
st_l r1,[r3,4]
brhs.d r6,r3,3b
st.ab r1,[r3,8]
.Loop_end:
bic r12,r6,3
j_s.d [blink]
st_s r1,[r12,12]
.balign 4
__strncpy_bzero:
brhi.d r2,8,.Lbzero2
mov_s r1,0
.Ltiny:
sub_s r2,r2,11
sub1 r12,pcl,r2
j_s [r12]
stb_s r1,[r3,7]
stb_s r1,[r3,6]
stb_s r1,[r3,5]
stb_s r1,[r3,4]
stb_s r1,[r3,3]
stb_s r1,[r3,2]
stb_s r1,[r3,1]
stb_s r1,[r3]
j_s [blink]
ENDFUNC (memset)
#endif /* __ARC601__ || (!__ARC_BARREL_SHIFTER__ && !__ARCHS__) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 3,261
|
newlib/libc/machine/arc/memcpy.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC601__) || \
(!defined (__ARC_BARREL_SHIFTER__) && !defined (__ARCHS__))
/* Adapted from memcpy-bs.S. */
/* We assume that most sources and destinations are aligned, and
that also lengths are mostly a multiple of four, although to a lesser
extent. */
ENTRY (memcpy)
or r3,r0,r1
bmsk.f 0,r3,1
breq_s r2,0,.Lnil
mov_s r5,r0
bne.d .Lcopy_bytewise
add r6,r0,r2
sub_s r3,r2,1
ld_s r12,[r1,0]
bbit0.d r3,2,.Lnox4
sub r6,r6,8
st.ab r12,[r5,4]
ld.a r12,[r1,4]
.Lnox4:
brlo r2,9,.Lendloop
.Lnox4a:
ld_s r3,[r1,4]
st.ab r12,[r5,8]
ld.a r12,[r1,8]
brlo.d r5,r6,.Lnox4a
st r3,[r5,-4]
.Lendloop:
#ifdef __LITTLE_ENDIAN__
ld r3,[r5,0]
add3 r2,-1,r2
; uses long immediate
xor_s r12,r12,r3
bmsk r12,r12,r2
xor_s r12,r12,r3
#else /* BIG ENDIAN */
bmsk_s r2,r2,1
breq_s r2,0,.Last_store
ld r3,[r5,0]
sub3 r2,31,r2
; uses long immediate
xor_s r3,r3,r12
bmsk r3,r3,r2
xor_s r12,r12,r3
#endif /* ENDIAN */
.Last_store:
j_s.d [blink]
st r12,[r5,0]
.Lnil:
j_s [blink]
.balign 4
.Lcopy_bytewise:
ldb_s r12,[r1,0]
bbit1.d r2,0,.Lnox1
sub r6,r6,2
stb.ab r12,[r5,1]
ldb.a r12,[r1,1]
.Lnox1:
brlo r2,3,.Lendbloop
.Lnox1a:
ldb_s r3,[r1,1]
stb.ab r12,[r5,2]
ldb.a r12,[r1,2]
brlo.d r5,r6,.Lnox1a
stb r3,[r5,-1]
.Lendbloop:
j_s.d [blink]
stb r12,[r5,0]
ENDFUNC (memcpy)
#endif /* __ARC601__ || (!__ARC_BARREL_SHIFTER__ && !__ARCHS__) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 4,404
|
newlib/libc/machine/arc/strcmp.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcmp.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* This is optimized primarily for the ARC700.
It would be possible to speed up the loops by one cycle / word
respective one cycle / byte by forcing double source 1 alignment, unrolling
by a factor of two, and speculatively loading the second word / byte of
source 1; however, that would increase the overhead for loop setup / finish,
and strcmp might often terminate early. */
#ifndef __ARCHS__
ENTRY (strcmp)
or r2,r0,r1
bmsk_s r2,r2,1
brne_l r2,0,.Lcharloop
mov_s r12,0x01010101
ror r5,r12
.Lwordloop:
ld.ab r2,[r0,4]
ld.ab r3,[r1,4]
nop_s
sub r4,r2,r12
bic r4,r4,r2
and r4,r4,r5
brne_l r4,0,.Lfound0
breq r2,r3,.Lwordloop
#ifdef __LITTLE_ENDIAN__
xor r0,r2,r3 ; mask for difference
sub_s r1,r0,1
bic_s r0,r0,r1 ; mask for least significant difference bit
sub r1,r5,r0
xor r0,r5,r1 ; mask for least significant difference byte
and_s r2,r2,r0
and_s r3,r3,r0
#endif /* LITTLE ENDIAN */
cmp_s r2,r3
mov_s r0,1
j_s.d [blink]
bset.lo r0,r0,31
.balign 4
#ifdef __LITTLE_ENDIAN__
.Lfound0:
xor r0,r2,r3 ; mask for difference
or r0,r0,r4 ; or in zero indicator
sub_s r1,r0,1
bic_s r0,r0,r1 ; mask for least significant difference bit
sub r1,r5,r0
xor r0,r5,r1 ; mask for least significant difference byte
and_s r2,r2,r0
and_s r3,r3,r0
sub.f r0,r2,r3
mov.hi r0,1
j_s.d [blink]
bset.lo r0,r0,31
#else /* BIG ENDIAN */
/* The zero-detection above can mis-detect 0x01 bytes as zeroes
because of carry-propagateion from a lower significant zero byte.
We can compensate for this by checking that bit0 is zero.
This compensation is not necessary in the step where we
get a low estimate for r2, because in any affected bytes
we already have 0x00 or 0x01, which will remain unchanged
when bit 7 is cleared. */
.balign 4
.Lfound0:
#ifndef __ARC601__
lsr r0,r4,8
lsr_s r1,r2
bic_s r2,r2,r0 ; get low estimate for r2 and get ...
bic_s r0,r0,r1 ; <this is the adjusted mask for zeros>
or_s r3,r3,r0 ; ... high estimate r3 so that r2 > r3 will ...
cmp_s r3,r2 ; ... be independent of trailing garbage
or_s r2,r2,r0 ; likewise for r3 > r2
bic_s r3,r3,r0
rlc r0,0 ; r0 := r2 > r3 ? 1 : 0
cmp_s r2,r3
j_s.d [blink]
bset.lo r0,r0,31
#else /* __ARC601__ */
/* Fall through to .Lcharloop. */
sub_s r0,r0,4
sub_s r1,r1,4
#endif /* __ARC601__ */
#endif /* ENDIAN */
.balign 4
.Lcharloop:
ldb.ab r2,[r0,1]
ldb.ab r3,[r1,1]
nop_s
breq_l r2,0,.Lcmpend
breq r2,r3,.Lcharloop
.Lcmpend:
j_s.d [blink]
sub r0,r2,r3
ENDFUNC (strcmp)
#endif /* !__ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 4,537
|
newlib/libc/machine/arc/strchr-bs-norm.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strchr.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
/* ARC700 has a relatively long pipeline and branch prediction, so we want
to avoid branches that are hard to predict. On the other hand, the
presence of the norm instruction makes it easier to operate on whole
words branch-free. */
#include "asm.h"
#if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__)
ENTRY (strchr)
extb_s r1,r1
asl r5,r1,8
bmsk r2,r0,1
or r5,r5,r1
mov_s r3,0x01010101
breq.d r2,r0,.Laligned
asl r4,r5,16
sub_s r0,r0,r2
asl r7,r2,3
ld_s r2,[r0]
#ifdef __LITTLE_ENDIAN__
asl r7,r3,r7
#else
lsr r7,r3,r7
#endif
or r5,r5,r4
ror r4,r3
sub r12,r2,r7
bic_s r12,r12,r2
and r12,r12,r4
brne.d r12,0,.Lfound0_ua
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
#ifdef __LITTLE_ENDIAN__
and r7,r12,r4
breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
b_l .Lfound_char ; Likewise this one.
#else
and r12,r12,r4
breq_l r12,0,.Loop ; For speed, we want this branch to be unaligned.
lsr_s r12,r12,7
bic r2,r7,r6
b.d .Lfound_char_b
and_s r2,r2,r12
#endif
; /* We require this code address to be unaligned for speed... */
.Laligned:
ld_s r2,[r0]
or r5,r5,r4
ror r4,r3
; /* ... so that this code address is aligned, for itself and ... */
.Loop:
sub r12,r2,r3
bic_s r12,r12,r2
and r12,r12,r4
brne.d r12,0,.Lfound0
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r3
bic r12,r12,r6
and r7,r12,r4
breq r7,0,.Loop /* ... so that this branch is unaligned. */
; Found searched-for character. r0 has already advanced to next word.
#ifdef __LITTLE_ENDIAN__
/* We only need the information about the first matching byte
(i.e. the least significant matching byte) to be exact,
hence there is no problem with carry effects. */
.Lfound_char:
sub r3,r7,1
bic r3,r3,r7
norm r2,r3
sub_s r0,r0,1
asr_s r2,r2,3
j_l.d [blink]
sub_s r0,r0,r2
.balign 4
.Lfound0_ua:
mov_l r3,r7
.Lfound0:
sub r3,r6,r3
bic r3,r3,r6
and r2,r3,r4
or_s r12,r12,r2
sub_s r3,r12,1
bic_s r3,r3,r12
norm r3,r3
add_s r0,r0,3
asr_s r12,r3,3
asl.f 0,r2,r3
sub_s r0,r0,r12
j_s.d [blink]
mov.pl r0,0
#else /* BIG ENDIAN */
.Lfound_char:
lsr r7,r7,7
bic r2,r7,r6
.Lfound_char_b:
norm r2,r2
sub_s r0,r0,4
asr_s r2,r2,3
j_l.d [blink]
add_s r0,r0,r2
.Lfound0_ua:
mov_s r3,r7
.Lfound0:
asl_s r2,r2,7
or r7,r6,r4
bic_s r12,r12,r2
sub r2,r7,r3
or r2,r2,r6
bic r12,r2,r12
bic.f r3,r4,r12
norm r3,r3
add.pl r3,r3,1
asr_s r12,r3,3
asl.f 0,r2,r3
add_s r0,r0,r12
j_s.d [blink]
mov.mi r0,0
#endif /* ENDIAN */
ENDFUNC (strchr)
#endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_NORM__
&& __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 6,775
|
newlib/libc/machine/arc/memcpy-archs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARCHS__)
#ifdef __LITTLE_ENDIAN__
# define SHIFT_1(RX,RY,IMM) asl RX, RY, IMM ; <<
# define SHIFT_2(RX,RY,IMM) lsr RX, RY, IMM ; >>
# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM
# define MERGE_2(RX,RY,IMM)
# define EXTRACT_1(RX,RY,IMM) and RX, RY, 0xFFFF
# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, IMM
#else
# define SHIFT_1(RX,RY,IMM) lsr RX, RY, IMM ; >>
# define SHIFT_2(RX,RY,IMM) asl RX, RY, IMM ; <<
# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM ; <<
# define MERGE_2(RX,RY,IMM) asl RX, RY, IMM ; <<
# define EXTRACT_1(RX,RY,IMM) lsr RX, RY, IMM
# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08
#endif
#ifdef __ARC_LL64__
# define PREFETCH_READ(RX) prefetch [RX, 56]
# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
# define ZOLSHFT 5
# define ZOLAND 0x1F
#else
# define PREFETCH_READ(RX) prefetch [RX, 28]
# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
# define LOADX(DST,RX) ld.ab DST, [RX, 4]
# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
# define ZOLSHFT 4
# define ZOLAND 0xF
#endif
ENTRY (memcpy)
prefetch [r1] ; Prefetch the read location
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
; if size is zero
jz.d [blink]
mov r3, r0 ; don't clobber ret val
; if size <= 8
cmp r2, 8
bls.d @.Lsmallchunk
mov.f lp_count, r2
and.f r4, r0, 0x03
rsub lp_count, r4, 4
lpnz @.Laligndestination
; LOOP BEGIN
ldb.ab r5, [r1,1]
sub r2, r2, 1
stb.ab r5, [r3,1]
.Laligndestination:
; Check the alignment of the source
and.f r4, r1, 0x03
bnz.d @.Lsourceunaligned
; CASE 0: Both source and destination are 32bit aligned
; Convert len to Dwords, unfold x4
lsr.f lp_count, r2, ZOLSHFT
lpnz @.Lcopy32_64bytes
; LOOP START
LOADX (r6, r1)
PREFETCH_READ (r1)
PREFETCH_WRITE (r3)
LOADX (r8, r1)
LOADX (r10, r1)
LOADX (r4, r1)
STOREX (r6, r3)
STOREX (r8, r3)
STOREX (r10, r3)
STOREX (r4, r3)
.Lcopy32_64bytes:
and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes
.Lsmallchunk:
lpnz @.Lcopyremainingbytes
; LOOP START
ldb.ab r5, [r1,1]
stb.ab r5, [r3,1]
.Lcopyremainingbytes:
j [blink]
; END CASE 0
.Lsourceunaligned:
cmp r4, 2
beq.d @.LunalignedOffby2
sub r2, r2, 1
bhi.d @.LunalignedOffby3
ldb.ab r5, [r1, 1]
; CASE 1: The source is unaligned, off by 1
; Hence I need to read 1 byte for a 16bit alignment
; and 2bytes to reach 32bit alignment
ldh.ab r6, [r1, 2]
sub r2, r2, 2
; Convert to words, unfold x2
lsr.f lp_count, r2, 3
MERGE_1 (r6, r6, 8)
MERGE_2 (r5, r5, 24)
or r5, r5, r6
; Both src and dst are aligned
lpnz @.Lcopy8bytes_1
; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 24)
or r7, r7, r5
SHIFT_2 (r5, r6, 8)
SHIFT_1 (r9, r8, 24)
or r9, r9, r5
SHIFT_2 (r5, r8, 8)
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
.Lcopy8bytes_1:
; Write back the remaining 16bits
EXTRACT_1 (r6, r5, 16)
sth.ab r6, [r3, 2]
; Write back the remaining 8bits
EXTRACT_2 (r5, r5, 16)
stb.ab r5, [r3, 1]
and.f lp_count, r2, 0x07 ;Last 8bytes
lpnz @.Lcopybytewise_1
; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
.Lcopybytewise_1:
j [blink]
.LunalignedOffby2:
; CASE 2: The source is unaligned, off by 2
ldh.ab r5, [r1, 2]
sub r2, r2, 1
; Both src and dst are aligned
; Convert to words, unfold x2
lsr.f lp_count, r2, 3
#ifdef __BIG_ENDIAN__
asl.nz r5, r5, 16
#endif
lpnz @.Lcopy8bytes_2
; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 16)
or r7, r7, r5
SHIFT_2 (r5, r6, 16)
SHIFT_1 (r9, r8, 16)
or r9, r9, r5
SHIFT_2 (r5, r8, 16)
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
.Lcopy8bytes_2:
#ifdef __BIG_ENDIAN__
lsr.nz r5, r5, 16
#endif
sth.ab r5, [r3, 2]
and.f lp_count, r2, 0x07 ;Last 8bytes
lpnz @.Lcopybytewise_2
; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
.Lcopybytewise_2:
j [blink]
.LunalignedOffby3:
; CASE 3: The source is unaligned, off by 3
; Hence, I need to read 1byte for achieve the 32bit alignment
; Both src and dst are aligned
; Convert to words, unfold x2
lsr.f lp_count, r2, 3
#ifdef __BIG_ENDIAN__
asl.ne r5, r5, 24
#endif
lpnz @.Lcopy8bytes_3
; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 8)
or r7, r7, r5
SHIFT_2 (r5, r6, 24)
SHIFT_1 (r9, r8, 8)
or r9, r9, r5
SHIFT_2 (r5, r8, 24)
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
.Lcopy8bytes_3:
#ifdef __BIG_ENDIAN__
lsr.nz r5, r5, 24
#endif
stb.ab r5, [r3, 1]
and.f lp_count, r2, 0x07 ;Last 8bytes
lpnz @.Lcopybytewise_3
; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
.Lcopybytewise_3:
j [blink]
ENDFUNC (memcpy)
#endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 3,840
|
newlib/libc/machine/arc/memset-archs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memset.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#ifdef __ARCHS__
#ifdef USE_PREFETCH
#define PREWRITE(A,B) prefetchw [(A),(B)]
#else
#define PREWRITE(A,B) prealloc [(A),(B)]
#endif
ENTRY (memset)
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
; if size is zero
jz.d [blink]
mov r3, r0 ; don't clobber ret val
; if length < 8
brls.d.nt r2, 8, .Lsmallchunk
mov.f lp_count,r2
and.f r4, r0, 0x03
rsub lp_count, r4, 4
lpnz @.Laligndestination
; LOOP BEGIN
stb.ab r1, [r3,1]
sub r2, r2, 1
.Laligndestination:
; Destination is aligned
and r1, r1, 0xFF
asl r4, r1, 8
or r4, r4, r1
asl r5, r4, 16
or r5, r5, r4
mov r4, r5
sub3 lp_count, r2, 8
cmp r2, 64
bmsk.hi r2, r2, 5
mov.ls lp_count, 0
add3.hi r2, r2, 8
; Convert len to Dwords, unfold x8
lsr.f lp_count, lp_count, 6
lpnz @.Lset64bytes
; LOOP START
PREWRITE (r3, 64) ;Prefetch the next write location
#ifdef __ARC_LL64__
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
#else
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
#endif
.Lset64bytes:
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes
; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
#ifdef __ARC_LL64__
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
#else
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
#endif
.Lset32bytes:
and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
.Lsmallchunk:
lpnz .Lcopy3bytes
; LOOP START
stb.ab r1, [r3, 1]
.Lcopy3bytes:
j [blink]
ENDFUNC (memset)
#endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 3,337
|
newlib/libc/machine/arc/memcpy-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if !defined (__ARC601__) && !defined (__ARCHS__) \
&& defined (__ARC_BARREL_SHIFTER__)
/* Mostly optimized for ARC700, but not bad for ARC600 either. */
/* This memcpy implementation does not support objects of 1GB or larger -
the check for alignment does not work then. */
/* We assume that most sources and destinations are aligned, and
that also lengths are mostly a multiple of four, although to a lesser
extent. */
ENTRY (memcpy)
or r3,r0,r1
asl_s r3,r3,30
mov_s r5,r0
brls.d r2,r3,.Lcopy_bytewise
sub.f r3,r2,1
ld_s r12,[r1,0]
asr.f lp_count,r3,3
bbit0.d r3,2,.Lnox4
bmsk_s r2,r2,1
st.ab r12,[r5,4]
ld.a r12,[r1,4]
.Lnox4:
lppnz .Lendloop
ld_s r3,[r1,4]
st.ab r12,[r5,4]
ld.a r12,[r1,8]
st.ab r3,[r5,4]
.Lendloop:
breq_l r2,0,.Last_store
ld r3,[r5,0]
#ifdef __LITTLE_ENDIAN__
add3 r2,-1,r2
; uses long immediate
xor_s r12,r12,r3
bmsk r12,r12,r2
xor_s r12,r12,r3
#else /* BIG ENDIAN */
sub3 r2,31,r2
; uses long immediate
xor_s r3,r3,r12
bmsk r3,r3,r2
xor_s r12,r12,r3
#endif /* ENDIAN */
.Last_store:
j_s.d [blink]
st r12,[r5,0]
.balign 4
.Lcopy_bytewise:
jcs [blink]
ldb_s r12,[r1,0]
lsr.f lp_count,r3
bcc_s .Lnox1
stb.ab r12,[r5,1]
ldb.a r12,[r1,1]
.Lnox1:
lppnz .Lendbloop
ldb_s r3,[r1,1]
stb.ab r12,[r5,1]
ldb.a r12,[r1,2]
stb.ab r3,[r5,1]
.Lendbloop:
j_s.d [blink]
stb r12,[r5,0]
ENDFUNC (memcpy)
#endif /* !__ARC601__ && !__ARCHS__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 3,969
|
newlib/libc/machine/arc/memcmp.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcmp.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC601__) || !defined (__ARC_NORM__) \
|| !defined (__ARC_BARREL_SHIFTER__)
/* Addresses are unsigned, and at 0 is the vector table, so it's OK to assume
that we can subtract 8 from a source end address without underflow. */
ENTRY (memcmp)
or r12,r0,r1
tst r12,3
breq r2,0,.Lnil
add_s r3,r0,r2
/* This algorithm for big endian targets sometimes works incorrectly
when sources are aligned. To be precise the last step is omitted.
Just use a simple bytewise variant until the algorithm is reviewed
and fixed. */
#ifdef __LITTLE_ENDIAN__
bne_s .Lbytewise
#else /* BIG ENDIAN */
b_s .Lbytewise
#endif /* ENDIAN */
sub r6,r3,8
ld r4,[r0,0]
ld r5,[r1,0]
2:
brhs r0,r6,.Loop_end
ld_s r3,[r0,4]
ld_s r12,[r1,4]
brne r4,r5,.Leven
ld.a r4,[r0,8]
breq.d r3,r12,2b
ld.a r5,[r1,8]
#ifdef __LITTLE_ENDIAN__
mov_s r4,r3
b.d .Lodd
mov_s r5,r12
#else /* BIG ENDIAN */
cmp_s r3,r12
j_s.d [blink]
rrc r0,2
#endif /* ENDIAN */
.balign 4
.Loop_end:
sub r3,r0,r6
brhs r3,4,.Last_cmp
brne r4,r5,.Leven
ld r4,[r0,4]
ld r5,[r1,4]
#ifdef __LITTLE_ENDIAN__
.balign 4
.Last_cmp:
mov_l r0,24
add3 r2,r0,r2
xor r0,r4,r5
b.d .Leven_cmp
bset r0,r0,r2
.Lodd:
.Leven:
xor r0,r4,r5
.Leven_cmp:
mov_s r1,0x80808080
; uses long immediate
sub_s r12,r0,1
bic_s r0,r0,r12
sub r0,r1,r0
xor_s r0,r0,r1
and r1,r5,r0
and r0,r4,r0
#else /* BIG ENDIAN */
.Last_cmp:
mov_s r3,0
sub3 r2,r3,r2
sub_s r3,r3,1
bclr r3,r3,r2
add_l r3,r3,1
and r0,r4,r3
and r1,r5,r3
.Leven:
#endif /* ENDIAN */
xor.f 0,r0,r1
sub_s r0,r0,r1
j_s.d [blink]
mov.mi r0,r1
.balign 4
.Lbytewise:
ldb r4,[r0,0]
ldb r5,[r1,0]
sub r6,r3,2
3:
brhs r0,r6,.Lbyte_end
ldb_s r3,[r0,1]
ldb_s r12,[r1,1]
brne r4,r5,.Lbyte_even
ldb.a r4,[r0,2]
breq.d r3,r12,3b
ldb.a r5,[r1,2]
.Lbyte_odd:
j_s.d [blink]
sub r0,r3,r12
.balign 4
.Lbyte_end:
bbit1 r2,0,.Lbyte_even
brne r4,r5,.Lbyte_even
ldb r4,[r0,1]
ldb r5,[r1,1]
.Lbyte_even:
j_s.d [blink]
sub r0,r4,r5
.Lnil:
j_s.d [blink]
mov_s r0,0
ENDFUNC (memcmp)
#endif /* __ARC601__ || !__ARC_NORM__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 4,627
|
newlib/libc/machine/arc/strncpy-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strncpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch.
Note that short and long instructions have been scheduled to avoid
branch stalls.
The beq_s to r3z could be made unaligned & long to avoid a stall
there, but the it is not likely to be taken often, and it
would also be likey to cost an unaligned mispredict at the next call. */
#if !defined (__ARC601__) && defined (__ARC_BARREL_SHIFTER__)
#if defined (__ARC700___) || defined (__ARCEM__) || defined (__ARCHS__)
#define BRand(a,b,l) tst a,b ` bne_l l
#else
#define BRand(a,b,l) and a,a,b ` brne_s a,0,l
#endif
ENTRY (strncpy)
cmp_s r2,8
or r12,r0,r1
bmsk.cc.f r12,r12,1
brne.d r12,0,.Lbytewise
mov_s r10,r0
ld_s r3,[r1,0]
mov r8,0x01010101
sub lp_count,r2,1
bbit0.d r1,2,.Loop_start
ror r11,r8
sub r12,r3,r8
bic_l r12,r12,r3
BRand (r12,r11,.Lr3z)
mov_s r4,r3
ld.a r3,[r1,4]
sub lp_count,lp_count,4
st.ab r4,[r10,4]
.balign 4
.Loop_start:
lsr.f lp_count,lp_count,3
lpne .Loop_end
ld.a r4,[r1,4]
sub r12,r3,r8
bic_s r12,r12,r3
BRand (r12,r11,.Lr3z)
st.ab r3,[r10,4]
sub r12,r4,r8
bic r12,r12,r4
BRand (r12,r11,.Lr4z)
ld.a r3,[r1,4]
st.ab r4,[r10,4]
.Loop_end:
bcc_s .Lastword
ld.a r4,[r1,4]
sub r12,r3,r8
bic_s r12,r12,r3
BRand (r12,r11,.Lr3z)
st.ab r3,[r10,4]
mov_s r3,r4
.Lastword:
and.f lp_count,r2,3
mov.eq lp_count,4
lp .Last_byte_end
#ifdef __LITTLE_ENDIAN__
bmsk.f r1,r3,7
lsr.ne r3,r3,8
#else
lsr.f r1,r3,24
asl.ne r3,r3,8
#endif
stb.ab r1,[r10,1]
.Last_byte_end:
j_s [blink]
.balign 4
.Lr4z:
mov_l r3,r4
.Lr3z:
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
#ifdef __LITTLE_ENDIAN__
bmsk.f r1,r3,7
lsr_s r3,r3,8
#else
lsr.f r1,r3,24
asl_s r3,r3,8
#endif
bne.d .Lr3z
stb.ab r1,[r10,1]
#else /* ! __ARC700__ */
#ifdef __LITTLE_ENDIAN__
bmsk.f r1,r3,7
.Lr3z_loop:
lsr_s r3,r3,8
stb.ab r1,[r10,1]
bne.d .Lr3z_loop
bmsk.f r1,r3,7
#else
lsr.f r1,r3,24
.Lr3z_loop:
asl_s r3,r3,8
stb.ab r1,[r10,1]
bne.d .Lr3z_loop
lsr.f r1,r3,24
#endif /* ENDIAN */
#endif /* ! __ARC700__ */
.Lzero_rest:
; __strncpy_bzero requires:
; return value in r0
; zeroing length in r2
; zeroing start address in r3
mov_s r3,r10
add_s r2,r2,r0
b.d __strncpy_bzero
sub_s r2,r2,r3
.balign 4
.Lbytewise:
sub.f r2,r2,1
mov_l r3,r0
jcs [blink]
.Lcharloop:
ldb.ab r12,[r1,1]
beq.d .Last_byte
sub.f r2,r2,1
brne.d r12,0,.Lcharloop
stb.ab r12,[r3,1]
b.d __strncpy_bzero
stb.ab r12,[r3,1]
.Last_byte:
j_s.d [blink]
stb_l r12,[r3]
ENDFUNC (strncpy)
#endif /* !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 3,322
|
newlib/libc/machine/arc/strcpy-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch.
Note that short and long instructions have been scheduled to avoid
branch stalls.
The beq_s to r3z could be made unaligned & long to avoid a stall
there, but the it is not likely to be taken often, and it
would also be likey to cost an unaligned mispredict at the next call. */
ENTRY (strcpy)
or r2,r0,r1
bmsk_s r2,r2,1
brne.d r2,0,charloop
mov_s r10,r0
ld_s r3,[r1,0]
mov r8,0x01010101
bbit0.d r1,2,loop_start
ror r12,r8
sub r2,r3,r8
bic_s r2,r2,r3
tst_s r2,r12
bne_l r3z
mov_s r4,r3
.balign 4
loop:
ld.a r3,[r1,4]
st.ab r4,[r10,4]
loop_start:
ld.a r4,[r1,4]
sub r2,r3,r8
bic_s r2,r2,r3
tst_l r2,r12
bne_l r3z
st.ab r3,[r10,4]
sub r2,r4,r8
bic r2,r2,r4
tst_l r2,r12
beq_l loop
mov_s r3,r4
#ifdef __LITTLE_ENDIAN__
r3z: bmsk.f r1,r3,7
lsr_s r3,r3,8
#else
r3z: lsr.f r1,r3,24
asl_s r3,r3,8
#endif
bne.d r3z
stb.ab r1,[r10,1]
j_s [blink]
.balign 4
charloop:
ldb.ab r3,[r1,1]
brne.d r3,0,charloop
stb.ab r3,[r10,1]
j [blink]
ENDFUNC (strcpy)
#endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 3,269
|
newlib/libc/machine/arc/strlen-bs-norm.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strlen.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__)
ENTRY (strlen)
or r3,r0,7
ld r2,[r3,-7]
ld.a r6,[r3,-3]
mov r4,0x01010101
; uses long immediate
#ifdef __LITTLE_ENDIAN__
asl_s r1,r0,3
btst_s r0,2
asl r7,r4,r1
ror r5,r4
sub r1,r2,r7
bic_s r1,r1,r2
mov.eq r7,r4
sub r12,r6,r7
bic r12,r12,r6
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#else /* BIG ENDIAN */
ror r5,r4
btst_s r0,2
mov_s r1,31
sub3 r7,r1,r0
sub r1,r2,r4
bic_s r1,r1,r2
bmsk r1,r1,r7
sub r12,r6,r4
bic r12,r12,r6
bmsk.ne r12,r12,r7
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#endif /* ENDIAN */
.Loop:
ld_s r2,[r3,4]
ld.a r6,[r3,8]
; stall for load result
sub r1,r2,r4
bic_s r1,r1,r2
sub r12,r6,r4
bic r12,r12,r6
or_l r12,r12,r1
and r12,r12,r5
breq_l r12,0,.Loop
.Lend:
and.f r1,r1,r5
sub.ne r3,r3,4
mov.eq r1,r12
#ifdef __LITTLE_ENDIAN__
sub_s r2,r1,1
bic_s r2,r2,r1
norm r1,r2
sub_s r0,r0,3
lsr_s r1,r1,3
sub r0,r3,r0
j_s.d [blink]
sub_l r0,r0,r1
#else /* BIG ENDIAN */
lsr_s r1,r1,7
mov.eq r2,r6
bic_s r1,r1,r2
norm r1,r1
sub r0,r3,r0
lsr_s r1,r1,3
j_s.d [blink]
add_l r0,r0,r1
#endif /* ENDIAN */
.Learly_end:
b.d .Lend
sub_s.ne r1,r1,r1
ENDFUNC (strlen)
#endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_NORM__
&& __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 3,353
|
newlib/libc/machine/arc/strlen-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strlen.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if (defined (__ARC600__) || !defined (__ARC_NORM__)) && !defined (__ARC601__) \
&& defined (__ARC_BARREL_SHIFTER__)
/* This code is optimized for the ARC600 pipeline. */
ENTRY (strlen)
or r3,r0,7
ld r2,[r3,-7]
ld.a r6,[r3,-3]
mov r4,0x01010101
; uses long immediate
#ifdef __LITTLE_ENDIAN__
asl_s r1,r0,3
btst_s r0,2
asl r7,r4,r1
ror r5,r4
sub r1,r2,r7
bic_l r1,r1,r2
mov.eq r7,r4
sub r12,r6,r7
bic r12,r12,r6
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#else /* BIG ENDIAN */
ror r5,r4
btst_s r0,2
mov_s r1,31
sub3 r7,r1,r0
sub r1,r2,r4
bic_l r1,r1,r2
bmsk r1,r1,r7
sub r12,r6,r4
bic r12,r12,r6
bmsk.ne r12,r12,r7
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#endif /* ENDIAN */
.Loop:
ld_s r2,[r3,4]
ld.a r6,[r3,8]
; stall for load result
sub r1,r2,r4
bic_s r1,r1,r2
sub r12,r6,r4
bic r12,r12,r6
or_s r12,r12,r1
and r12,r12,r5
breq_s r12,0,.Loop
.Lend:
and.f r1,r1,r5
sub.ne r3,r3,4
#ifdef __LITTLE_ENDIAN__
mov.eq r1,r12
asr.f 0,r1,8
bmsk.f 0,r1,15
sub r0,r3,r0
add.cc r0,r0,1
jne.d [blink]
asl.f 0,r1,9
j_s.d [blink]
sbc r0,r0,-2
#else /* BIG ENDIAN */
mov.eq r2,r6
asl_s r2,r2,7
mov.eq r1,r12
bic_s r1,r1,r2
asr.f 0,r1,16
sub r0,r3,r0
add.pl r0,r0,1
jne.d [blink]
add.eq r0,r0,1
j_s.d [blink]
add.cc r0,r0,1
#endif /* ENDIAN */
.balign 4
.Learly_end:
b.d .Lend
sub_s.ne r1,r1,r1
ENDFUNC (strlen)
#endif /* (__ARC600__ || !__ARC_NORM__) && !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 5,138
|
newlib/libc/machine/arc/memcmp-bs-norm.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcmp.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if !defined (__ARC601__) && defined (__ARC_NORM__) \
&& defined (__ARC_BARREL_SHIFTER__)
#ifdef __LITTLE_ENDIAN__
#define WORD2 r2
#define SHIFT r3
#else /* BIG ENDIAN */
#define WORD2 r3
#define SHIFT r2
#endif
ENTRY (memcmp)
or r12,r0,r1
asl_s r12,r12,30
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
sub_l r3,r2,1
brls r2,r12,.Lbytewise
#else
brls.d r2,r12,.Lbytewise
sub_s r3,r2,1
#endif
ld r4,[r0,0]
ld r5,[r1,0]
lsr.f lp_count,r3,3
#ifdef __ARCEM__
/* A branch can't be the last instruction in a zero overhead loop.
So we move the branch to the start of the loop, duplicate it
after the end, and set up r12 so that the branch isn't taken
initially. */
mov_s r12,WORD2
lpne .Loop_end
brne WORD2,r12,.Lodd
ld WORD2,[r0,4]
#else
lpne .Loop_end
ld_s WORD2,[r0,4]
#endif
ld_s r12,[r1,4]
brne r4,r5,.Leven
ld.a r4,[r0,8]
ld.a r5,[r1,8]
#ifdef __ARCEM__
.Loop_end:
brne WORD2,r12,.Lodd
#else
brne WORD2,r12,.Lodd
#ifdef __ARCHS__
nop
#endif
.Loop_end:
#endif
asl_s SHIFT,SHIFT,3
bcc_s .Last_cmp
brne r4,r5,.Leven
ld r4,[r0,4]
ld r5,[r1,4]
#ifdef __LITTLE_ENDIAN__
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
nop_s
; one more load latency cycle
.Last_cmp:
xor r0,r4,r5
bset r0,r0,SHIFT
sub_s r1,r0,1
bic_s r1,r1,r0
norm r1,r1
b.d .Leven_cmp
and r1,r1,24
.Leven:
xor r0,r4,r5
sub_s r1,r0,1
bic_s r1,r1,r0
norm r1,r1
; slow track insn
and r1,r1,24
.Leven_cmp:
asl r2,r4,r1
asl r12,r5,r1
lsr_s r2,r2,1
lsr_s r12,r12,1
j_s.d [blink]
sub r0,r2,r12
.balign 4
.Lodd:
xor r0,WORD2,r12
sub_s r1,r0,1
bic_s r1,r1,r0
norm r1,r1
; slow track insn
and r1,r1,24
asl_s r2,r2,r1
asl_s r12,r12,r1
lsr_s r2,r2,1
lsr_s r12,r12,1
j_s.d [blink]
sub r0,r2,r12
#else /* !__ARC700__ */
.balign 4
.Last_cmp:
xor r0,r4,r5
b.d .Leven_cmp
bset r0,r0,SHIFT
.Lodd:
mov_s r4,WORD2
mov_s r5,r12
.Leven:
xor r0,r4,r5
.Leven_cmp:
mov_s r1,0x80808080
; uses long immediate
sub_s r12,r0,1
bic_s r0,r0,r12
sub r0,r1,r0
xor_s r0,r0,r1
and r1,r5,r0
and r0,r4,r0
xor.f 0,r0,r1
sub_s r0,r0,r1
j_s.d [blink]
mov.mi r0,r1
#endif /* !__ARC700__ */
#else /* BIG ENDIAN */
.Last_cmp:
neg_s SHIFT,SHIFT
lsr r4,r4,SHIFT
lsr r5,r5,SHIFT
; slow track insn
.Leven:
sub.f r0,r4,r5
mov.ne r0,1
j_s.d [blink]
bset.cs r0,r0,31
.Lodd:
cmp_s WORD2,r12
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
mov_s r0,1
j_s.d [blink]
bset.cs r0,r0,31
#else
j_s.d [blink]
rrc r0,2
#endif /* __ARC700__ || __ARCEM__ || __ARCHS__ */
#endif /* ENDIAN */
.balign 4
.Lbytewise:
breq r2,0,.Lnil
ldb r4,[r0,0]
ldb r5,[r1,0]
lsr.f lp_count,r3
#ifdef __ARCEM__
mov r12,r3
lpne .Lbyte_end
brne r3,r12,.Lbyte_odd
#else
lpne .Lbyte_end
#endif
ldb_s r3,[r0,1]
ldb_l r12,[r1,1]
brne r4,r5,.Lbyte_even
ldb.a r4,[r0,2]
ldb.a r5,[r1,2]
#ifdef __ARCEM__
.Lbyte_end:
brne r3,r12,.Lbyte_odd
#else
brne r3,r12,.Lbyte_odd
#ifdef __ARCHS__
nop
#endif
.Lbyte_end:
#endif
bcc_l .Lbyte_even
brne r4,r5,.Lbyte_even
ldb_s r3,[r0,1]
ldb_s r12,[r1,1]
.Lbyte_odd:
j_s.d [blink]
sub r0,r3,r12
.Lbyte_even:
j_s.d [blink]
sub r0,r4,r5
.Lnil:
j_s.d [blink]
mov_l r0,0
ENDFUNC (memcmp)
#endif /* !__ARC601__ && __ARC_NORM__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 4,560
|
newlib/libc/machine/arc/memset-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memset.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* ARC HS has it's own implementation of memset, yet we want this function
still to be compiled under "__dummy_memset" disguise, because strncpy
function uses __strncpy_bzero as a second entry point into memset. Would be
better to add __strncpy_bzero label to memset for ARC HS though, and even
better would be to avoid a second entry point into function. ARC HS always
has barrel-shifter, so this implementation will be always used for this
purpose. */
#if !defined (__ARC601__) && defined (__ARC_BARREL_SHIFTER__)
/* To deal with alignment/loop issues, SMALL must be at least 2. */
#define SMALL 7
.global __strncpy_bzero
.hidden __strncpy_bzero
/* __strncpy_bzero provides the following interface to strncpy:
r0: return value
r2: zeroing length
r3: zeroing start address
No attempt is made here for __strncpy_memset to speed up aligned
cases, because the copying of a string presumably leaves start address
and length alignment for the zeroing randomly distributed. */
#ifdef __ARCHS__
ENTRY (__dummy_memset)
#else
ENTRY (memset)
#endif
#if !defined (__ARC700__) && !defined (__ARCEM__)
#undef SMALL
#define SMALL 8 /* Even faster if aligned. */
brls.d r2,SMALL,.Ltiny
#endif
mov_s r3,r0
or r12,r0,r2
bmsk.f r12,r12,1
extb_s r1,r1
asl r12,r1,8
beq.d .Laligned
or_s r1,r1,r12
#if defined (__ARC700__) || defined (__ARCEM__)
brls r2,SMALL,.Ltiny
#endif
.Lnot_tiny:
add_s r12,r2,r0
stb r1,[r12,-1]
bclr_l r12,r12,0
stw r1,[r12,-2]
bmsk.f r12,r3,1
add_s r2,r2,r12
sub.ne r2,r2,4
stb.ab r1,[r3,1]
bclr_s r3,r3,0
stw.ab r1,[r3,2]
bclr_s r3,r3,1
.Laligned: ; This code address should be aligned for speed.
#if defined (__ARC700__) || defined (__ARCEM__)
asl r12,r1,16
lsr.f lp_count,r2,2
or_s r1,r1,r12
lpne .Loop_end
st.ab r1,[r3,4]
.Loop_end:
j_s [blink]
#else /* !__ARC700 */
lsr.f lp_count,r2,3
asl r12,r1,16
or_s r1,r1,r12
lpne .Loop_end
st.ab r1,[r3,4]
st.ab r1,[r3,4]
.Loop_end:
jcc [blink]
j_s.d [blink]
st_s r1,[r3]
#endif /* !__ARC700 */
#if defined (__ARC700__) || defined (__ARCEM__)
.balign 4
__strncpy_bzero:
brhi.d r2,17,.Lnot_tiny
mov_l r1,0
.Ltiny:
mov.f lp_count,r2
lpne .Ltiny_end
stb.ab r1,[r3,1]
.Ltiny_end:
j_s [blink]
#else /* !__ARC700__ */
#if SMALL > 8
FIXME
#endif
.balign 4
__strncpy_bzero:
brhi.d r2,8,.Lnot_tiny
mov_s r1,0
.Ltiny:
sub_s r2,r2,11
sub1 r12,pcl,r2
j_s [r12]
stb_s r1,[r3,7]
stb_s r1,[r3,6]
stb_s r1,[r3,5]
stb_s r1,[r3,4]
stb_s r1,[r3,3]
stb_s r1,[r3,2]
stb_s r1,[r3,1]
stb_s r1,[r3]
j_s [blink]
#endif /* !__ARC700 */
#ifdef __ARCHS__
ENDFUNC (__dummy_memset)
#else
ENDFUNC (memset)
#endif
#endif /* !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 2,969
|
newlib/libc/machine/arc/strcmp-archs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcmp.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#ifdef __ARCHS__
ENTRY (strcmp)
or r2, r0, r1
bmsk_s r2, r2, 1
brne r2, 0, @.Lcharloop
; s1 and s2 are word aligned
ld.ab r2, [r0, 4]
mov_s r12, 0x01010101
ror r11, r12
.align 4
.LwordLoop:
ld.ab r3, [r1, 4]
; Detect NULL char in str1
sub r4, r2, r12
ld.ab r5, [r0, 4]
bic r4, r4, r2
and r4, r4, r11
brne.d.nt r4, 0, .LfoundNULL
; Check if the read locations are the same
cmp r2, r3
beq.d .LwordLoop
mov.eq r2, r5
; A match is found, spot it out
#ifdef __LITTLE_ENDIAN__
swape r3, r3
mov_s r0, 1
swape r2, r2
#else
mov_s r0, 1
#endif
cmp_s r2, r3
j_s.d [blink]
bset.lo r0, r0, 31
.align 4
.LfoundNULL:
#ifdef __BIG_ENDIAN__
swape r4, r4
swape r2, r2
swape r3, r3
#endif
; Find null byte
ffs r0, r4
bmsk r2, r2, r0
bmsk r3, r3, r0
swape r2, r2
swape r3, r3
; make the return value
sub.f r0, r2, r3
mov.hi r0, 1
j_s.d [blink]
bset.lo r0, r0, 31
.align 4
.Lcharloop:
ldb.ab r2, [r0, 1]
ldb.ab r3, [r1, 1]
nop
breq r2, 0, .Lcmpend
breq r2, r3, .Lcharloop
.align 4
.Lcmpend:
j_s.d [blink]
sub r0, r2, r3
ENDFUNC (strcmp)
#endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 3,445
|
newlib/libc/machine/arc/strcpy-bs-arc600.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC600__) && defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch.
Note that short and long instructions have been scheduled to avoid
branch stalls.
This version is optimized for the ARC600 pipeline. */
ENTRY (strcpy)
or r2,r0,r1
bmsk.f 0,r2,1
mov r8,0x01010101
bne.d .Lcharloop
mov_s r10,r0
ld_l r3,[r1,0]
bbit0.d r1,2,.Loop_setup
ror r12,r8
sub r2,r3,r8
bic_s r2,r2,r3
and_s r2,r2,r12
brne_s r2,0,.Lr3z
st.ab r3,[r10,4]
ld.a r3,[r1,4]
.Loop_setup:
ld.a r4,[r1,4]
sub r2,r3,r8
and.f r2,r2,r12
sub r5,r4,r8
and.eq.f r5,r5,r12
b.d .Loop_start
mov_s r6,r3
.balign 4
.Loop:
ld.a r3,[r1,4]
st r4,[r10,4]
ld.a r4,[r1,4]
sub r2,r3,r8
and.f r2,r2,r12
sub r5,r4,r8
and.eq.f r5,r5,r12
st.ab r6,[r10,8]
mov r6,r3
.Loop_start:
beq.d .Loop
bic_s r2,r2,r3
brne.d r2,0,.Lr3z
and r5,r5,r12
bic r5,r5,r4
breq.d r5,0,.Loop
mov_s r3,r4
st.ab r6,[r10,4]
#ifdef __LITTLE_ENDIAN__
.Lr3z: bmsk.f r1,r3,7
.Lr3z_loop:
lsr_s r3,r3,8
stb.ab r1,[r10,1]
bne.d .Lr3z_loop
bmsk.f r1,r3,7
j_s [blink]
#else
.Lr3z: lsr.f r1,r3,24
.Lr3z_loop:
asl_s r3,r3,8
stb.ab r1,[r10,1]
bne.d .Lr3z_loop
lsr.f r1,r3,24
j_s [blink]
#endif
.balign 4
.Lcharloop:
ldb.ab r3,[r1,1]
brne.d r3,0,.Lcharloop
stb.ab r3,[r10,1]
j [blink]
ENDFUNC (strcpy)
#endif /* __ARC600__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 2,941
|
newlib/libc/machine/arc/strcpy.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch.
This version is a compromise between speed for the 601 pipeline and code
size. */
ENTRY (strcpy)
or r2,r0,r1
bmsk.f 0,r2,1
mov r8,0x01010101
bne.d .Lcharloop
mov_s r10,r0
ld_s r3,[r1]
bbit0.d r1,2,.Loop_start
ror r12,r8
sub r2,r3,r8
bic_s r2,r2,r3
and_s r2,r2,r12
brne_s r2,0,.Lr3z
mov r4,r3
sub_s r1,r1,4
.balign 4
.Loop:
ld.a r3,[r1,8]
st.ab r4,[r10,4]
.Loop_start:
ld r4,[r1,4]
sub r2,r3,r8
bic_s r2,r2,r3
tst_s r2,r12
sub r5,r4,r8
bic r5,r5,r4
bne_s .Lr3z
and r5,r5,r12
breq.d r5,0,.Loop
st.ab r3,[r10,4]
;mov_s r3,r4
add_s r1,r1,4
.balign 4
.Lr3z:
.Lcharloop:
ldb.ab r3,[r1,1]
brne.d r3,0,.Lcharloop
stb.ab r3,[r10,1]
j_s [blink]
ENDFUNC (strcpy)
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
stsp/newlib-ia16
| 4,244
|
newlib/libc/machine/arc/setjmp.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* ABI interface file
these are the stack mappings for the registers
as stored in the ABI for ARC */
.file "setjmp.S"
ABIr13 = 0
ABIr14 = ABIr13 + 4
ABIr15 = ABIr14 + 4
ABIr16 = ABIr15 + 4
ABIr17 = ABIr16 + 4
ABIr18 = ABIr17 + 4
ABIr19 = ABIr18 + 4
ABIr20 = ABIr19 + 4
ABIr21 = ABIr20 + 4
ABIr22 = ABIr21 + 4
ABIr23 = ABIr22 + 4
ABIr24 = ABIr23 + 4
ABIr25 = ABIr24 + 4
ABIr26 = ABIr25 + 4
ABIr27 = ABIr26 + 4
ABIr28 = ABIr27 + 4
ABIr29 = ABIr28 + 4
ABIr30 = ABIr29 + 4
ABIr31 = ABIr30 + 4
ABIlpc = ABIr31 + 4
ABIlps = ABIlpc + 4
ABIlpe = ABIlps + 4
ABIflg = ABIlpe + 4
ABImlo = ABIflg + 4
ABImhi = ABImlo + 4
.text
.align 4
.global setjmp
.type setjmp,@function
setjmp:
st r13, [r0, ABIr13]
st r14, [r0, ABIr14]
st r15, [r0, ABIr15]
st r16, [r0, ABIr16]
st r17, [r0, ABIr17]
st r18, [r0, ABIr18]
st r19, [r0, ABIr19]
st r20, [r0, ABIr20]
st r21, [r0, ABIr21]
st r22, [r0, ABIr22]
st r23, [r0, ABIr23]
st r24, [r0, ABIr24]
st r25, [r0, ABIr25]
st r26, [r0, ABIr26]
st r27, [r0, ABIr27]
st r28, [r0, ABIr28]
st r29, [r0, ABIr29]
st r30, [r0, ABIr30]
st blink, [r0, ABIr31]
st lp_count, [r0, ABIlpc]
lr r2, [lp_start]
lr r3, [lp_end]
st r2, [r0, ABIlps]
st r3, [r0, ABIlpe]
#if (!defined (__ARC700__) && !defined (__ARCEM__) && !defined (__ARCHS__))
; Till the configure changes are decided, and implemented, the code working on
; mlo/mhi and using mul64 should be disabled.
; st mlo, [r0, ABImlo]
; st mhi, [r0, ABImhi]
lr r2, [status32]
st r2, [r0, ABIflg]
#endif
j.d [blink]
mov r0,0
.Lfe1:
.size setjmp,.Lfe1-setjmp
.align 4
.global longjmp
.type longjmp,@function
longjmp:
; load registers
ld r13, [r0, ABIr13]
ld r14, [r0, ABIr14]
ld r15, [r0, ABIr15]
ld r16, [r0, ABIr16]
ld r17, [r0, ABIr17]
ld r18, [r0, ABIr18]
ld r19, [r0, ABIr19]
ld r20, [r0, ABIr20]
ld r21, [r0, ABIr21]
ld r22, [r0, ABIr22]
ld r23, [r0, ABIr23]
ld r24, [r0, ABIr24]
ld r25, [r0, ABIr25]
ld r26, [r0, ABIr26]
ld r27, [r0, ABIr27]
ld r28, [r0, ABIr28]
ld r3, [r0, ABIr29]
mov r29, r3
ld r3, [r0, ABIr30]
mov r30, r3
ld blink, [r0, ABIr31]
ld r3, [r0, ABIlpc]
mov lp_count, r3
ld r2, [r0, ABIlps]
ld r3, [r0, ABIlpe]
sr r2, [lp_start]
sr r3, [lp_end]
#if (!defined (__ARC700__) && !defined (__ARCEM__) && !defined (__ARCHS__))
ld r2, [r0, ABImlo]
ld r3, [r0, ABImhi]
; We do not support restoring of mulhi and mlo registers, yet.
; mulu64 0,r2,1 ; restores mlo
; mov 0,mlo ; force multiply to finish
; sr r3, [mulhi]
ld r2, [r0, ABIflg]
flag r2 ; restore "status32" register
#endif
mov.f r1, r1 ; to avoid return 0 from longjmp
mov.eq r1, 1
j.d [blink]
mov r0,r1
.Lfe2:
.size longjmp,.Lfe2-longjmp
|
stsp/newlib-ia16
| 2,336
|
newlib/libc/machine/moxie/setjmp.S
|
/* A setjmp.c for Moxie
Copyright (C) 2009 Anthony Green
The authors hereby grant permission to use, copy, modify, distribute,
and license this software and its documentation for any purpose, provided
that existing copyright notices are retained in all copies and that this
notice is included verbatim in any distributions. No written agreement,
license, or royalty fee is required for any of the authorized uses.
Modifications to this software may be copyrighted by their authors
and need not follow the licensing terms described here, provided that
the new terms are clearly indicated on the first page of each file where
they apply. */
# setjmp/longjmp for moxie. The jmpbuf looks like this:
#
# Register jmpbuf offset
# $r0 0x00
# $r1 0x04
# $r2 0x08
# $r3 0x0c
# $r4 0x10
# $r5 0x14
# $r6 0x18
# $r7 0x1c
# $r8 0x20
# $r9 0x24
# $r10 0x28
# $r11 0x2c
# $r12 0x30
# $r13 0x34
# $fp 0x38
# $sp 0x3c
.text
.global setjmp
.type setjmp,@function
setjmp:
st.l ($r0), $r0
sto.l 0x04($r0), $r1
sto.l 0x08($r0), $r2
sto.l 0x0c($r0), $r3
sto.l 0x10($r0), $r4
sto.l 0x14($r0), $r5
sto.l 0x18($r0), $r6
sto.l 0x1c($r0), $r7
sto.l 0x20($r0), $r8
sto.l 0x24($r0), $r9
sto.l 0x28($r0), $r10
sto.l 0x2c($r0), $r11
sto.l 0x30($r0), $r12
sto.l 0x34($r0), $r13
sto.l 0x38($r0), $sp
sto.l 0x3c($r0), $fp
xor $r0, $r0
ret
.Lend1:
.size setjmp,.Lend1-setjmp
.global longjmp
.type longjmp,@function
longjmp:
ldi.l $r2, 0x00
cmp $r1, $r2
beq .Lreturn1
ldo.l $r1, 0x04($r0)
ldo.l $r2, 0x08($r0)
ldo.l $r3, 0x0c($r0)
ldo.l $r4, 0x10($r0)
ldo.l $r5, 0x14($r0)
ldo.l $r6, 0x18($r0)
ldo.l $r7, 0x1c($r0)
ldo.l $r8, 0x20($r0)
ldo.l $r9, 0x24($r0)
ldo.l $r10, 0x28($r0)
ldo.l $r11, 0x2c($r0)
ldo.l $r12, 0x30($r0)
ldo.l $r13, 0x34($r0)
ldo.l $sp, 0x38($r0)
ldo.l $fp, 0x3c($r0)
mov $r0, $r1
ret
.Lreturn1:
ldo.l $r1, 0x04($r0)
ldo.l $r2, 0x08($r0)
ldo.l $r3, 0x0c($r0)
ldo.l $r4, 0x10($r0)
ldo.l $r5, 0x14($r0)
ldo.l $r6, 0x18($r0)
ldo.l $r7, 0x1c($r0)
ldo.l $r8, 0x20($r0)
ldo.l $r9, 0x24($r0)
ldo.l $r10, 0x28($r0)
ldo.l $r11, 0x2c($r0)
ldo.l $r12, 0x30($r0)
ldo.l $r13, 0x34($r0)
ldo.l $sp, 0x38($r0)
ldo.l $fp, 0x3c($r0)
ldi.l $r0, 0x01
ret
.Lend2:
.size longjmp,.Lend2-longjmp
|
stsp/newlib-ia16
| 3,956
|
newlib/libc/machine/tic6x/setjmp.S
|
;******************************************************************************
;* SETJMP v7.2.0I10181 *
;* *
;* Copyright (c) 1996-2010 Texas Instruments Incorporated *
;* http://www.ti.com/ *
;* *
;* Redistribution and use in source and binary forms, with or without *
;* modification, are permitted provided that the following conditions *
;* are met: *
;* *
;* Redistributions of source code must retain the above copyright *
;* notice, this list of conditions and the following disclaimer. *
;* *
;* Redistributions in binary form must reproduce the above copyright *
;* notice, this list of conditions and the following disclaimer in *
;* the documentation and/or other materials provided with the *
;* distribution. *
;* *
;* Neither the name of Texas Instruments Incorporated nor the names *
;* of its contributors may be used to endorse or promote products *
;* derived from this software without specific prior written *
;* permission. *
;* *
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS *
;* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT *
;* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR *
;* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT *
;* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *
;* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *
;* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *
;* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *
;* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *
;* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
;* *
;******************************************************************************
.text
.globl setjmp
.type setjmp,%function
setjmp:
MV .L2X A4, B4
|| STW .D1T2 B3, *+A4(48)
STW .D1T1 A10, *+A4(0)
|| STW .D2T2 B10, *+B4(4)
|| RET .S2 B3
STW .D1T1 A11, *+A4(8)
|| STW .D2T2 B11, *+B4(12)
STW .D1T1 A12, *+A4(16)
|| STW .D2T2 B12, *+B4(20)
STW .D1T1 A13, *+A4(24)
|| STW .D2T2 B13, *+B4(28)
STW .D1T1 A14, *+A4(32)
|| STW .D2T2 B14, *+B4(36)
STW .D1T1 A15, *+A4(40)
|| STW .D2T2 B15, *+B4(44)
|| ZERO .S1 A4
.size setjmp, . - setjmp
.globl longjmp
.type longjmp,%function
longjmp:
LDW .D1T1 *+A4(48), A3
MV .L2X A4, B6
|| MV .S1 A4, A6
|| MV .D2 B4, B2
LDW .D1T1 *+A6(0), A10
|| LDW .D2T2 *+B6(4), B10
|| [B2] MV .L1X B4, A4
|| [!B2] MVK .S1 1, A4
LDW .D1T1 *+A6(8), A11
|| LDW .D2T2 *+B6(12), B11
LDW .D1T1 *+A6(16), A12
|| LDW .D2T2 *+B6(20), B12
LDW .D1T1 *+A6(24), A13
|| LDW .D2T2 *+B6(28), B13
LDW .D1T1 *+A6(32), A14
|| LDW .D2T2 *+B6(36), B14
LDW .D1T1 *+A6(40), A15
|| LDW .D2T2 *+B6(44), B15
|| RET .S2X A3
NOP 5
.size longjmp, . - longjmp
|
stsp/newlib-ia16
| 3,945
|
newlib/libc/machine/xscale/setjmp.S
|
/* This is a simple version of setjmp and longjmp.
Nick Clifton, Cygnus Solutions, 13 June 1997. */
/* ANSI concatenation macros. */
#define CONCAT(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a##b
#ifndef __USER_LABEL_PREFIX__
#error __USER_LABEL_PREFIX__ not defined
#endif
#define SYM(x) CONCAT (__USER_LABEL_PREFIX__, x)
#ifdef __ELF__
#define TYPE(x) .type SYM(x),function
#define SIZE(x) .size SYM(x), . - SYM(x)
#else
#define TYPE(x)
#define SIZE(x)
#endif
/* Arm/Thumb interworking support:
The interworking scheme expects functions to use a BX instruction
to return control to their parent. Since we need this code to work
in both interworked and non-interworked environments as well as with
older processors which do not have the BX instruction we do the
following:
Test the return address.
If the bottom bit is clear perform an "old style" function exit.
(We know that we are in ARM mode and returning to an ARM mode caller).
Otherwise use the BX instruction to perform the function exit.
We know that we will never attempt to perform the BX instruction on
an older processor, because that kind of processor will never be
interworked, and a return address with the bottom bit set will never
be generated.
In addition, we do not actually assemble the BX instruction as this would
require us to tell the assembler that the processor is an ARM7TDMI and
it would store this information in the binary. We want this binary to be
able to be linked with binaries compiled for older processors however, so
we do not want such information stored there.
If we are running using the APCS-26 convention however, then we never
test the bottom bit, because this is part of the processor status.
Instead we just do a normal return, since we know that we cannot be
returning to a Thumb caller - the Thumb does not support APCS-26.
Function entry is much simpler. If we are compiling for the Thumb we
just switch into ARM mode and then drop through into the rest of the
function. The function exit code will take care of the restore to
Thumb mode. */
#ifdef __APCS_26__
#define RET movs pc, lr
#else
#define RET tst lr, #1; \
moveq pc, lr ; \
.word 0xe12fff1e /* bx lr */
#endif
#ifdef __thumb__
#define MODE .thumb_func
.macro PROLOGUE name
.code 16
bx pc
nop
.code 32
SYM (.arm_start_of.\name):
.endm
#else
#define MODE .code 32
.macro PROLOGUE name
.endm
#endif
.macro FUNC_START name
.text
.align 2
MODE
.globl SYM (\name)
TYPE (\name)
SYM (\name):
PROLOGUE \name
.endm
.macro FUNC_END name
RET
SIZE (\name)
.endm
/* --------------------------------------------------------------------
int setjmp (jmp_buf);
-------------------------------------------------------------------- */
FUNC_START setjmp
/* Save all the callee-preserved registers into the jump buffer. */
stmea a1!, { v1-v7, fp, ip, sp, lr }
#if 0 /* Simulator does not cope with FP instructions yet. */
#ifndef __SOFTFP__
/* Save the floating point registers. */
sfmea f4, 4, [a1]
#endif
#endif
/* When setting up the jump buffer return 0. */
mov a1, #0
FUNC_END setjmp
/* --------------------------------------------------------------------
volatile void longjmp (jmp_buf, int);
-------------------------------------------------------------------- */
FUNC_START longjmp
/* If we have stack extension code it ought to be handled here. */
/* Restore the registers, retrieving the state when setjmp() was called. */
ldmfd a1!, { v1-v7, fp, ip, sp, lr }
#if 0 /* Simulator does not cope with FP instructions yet. */
#ifndef __SOFTFP__
/* Restore floating point registers as well. */
lfmfd f4, 4, [a1]
#endif
#endif
/* Put the return value into the integer result register.
But if it is zero then return 1 instead. */
movs a1, a2
moveq a1, #1
FUNC_END longjmp
|
stsp/newlib-ia16
| 1,098
|
newlib/libc/machine/m88k/setjmp.S
|
/* This is a simple version of setjmp and longjmp.
Ian Lance Taylor, Cygnus Support, 15 July 1993. */
/* We need to save the address of the return instruction, which is in
r1, as well as general register r14 through r25. If we are
compiling for the 88110 with the extended register file, we also
need to save registers x22 through x29. The jmp_buf should be 52
bytes long in the one case, 84 bytes in the other. */
/* int setjmp (jmp_buf); */
globl _setjmp
_setjmp:
st r1,r2,0
st.d r14,r2,4
st.d r16,r2,12
st.d r18,r2,20
st.d r20,r2,28
st.d r22,r2,36
st.d r24,r2,44
#ifdef __m88110__
/* These instructions are just a guess, and gas doesn't
support them anyhow. */
st.d x22,r2,52
st.d x24,r2,60
st.d x26,r2,68
st.d x28,r2,76
#endif
jmp r1
global _longjmp
_longjmp:
ld r1,r2,0
ld.d r14,r2,4
ld.d r16,r2,12
ld.d r18,r2,20
ld.d r20,r2,28
ld.d r22,r2,36
ld.d r24,r2,44
#ifdef __m88110__
/* These instructions are just a guess, and gas doesn't
support them anyhow. */
ld.d x22,r2,52
ld.d x24,r2,60
ld.d x26,r2,68
ld.d x28,r2,76
#endif
jmp r1
|
stsp/newlib-ia16
| 7,735
|
newlib/libc/machine/aarch64/strlen.S
|
/* Copyright (c) 2013-2015, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strlen-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses, min page size 4k.
*/
/* To test the page crossing code path more thoroughly, compile with
-DTEST_PAGE_CROSS - this will force all calls through the slower
entry path. This option is not intended for production use. */
/* Arguments and results. */
#define srcin x0
#define len x0
/* Locals and temporaries. */
#define src x1
#define data1 x2
#define data2 x3
#define has_nul1 x4
#define has_nul2 x5
#define tmp1 x4
#define tmp2 x5
#define tmp3 x6
#define tmp4 x7
#define zeroones x8
#define L(l) .L ## l
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
can be done in parallel across the entire word. A faster check
(X - 1) & 0x80 is zero for non-NUL ASCII characters, but gives
false hits for characters 129..255. */
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
#ifdef TEST_PAGE_CROSS
# define MIN_PAGE_SIZE 15
#else
# define MIN_PAGE_SIZE 4096
#endif
/* Since strings are short on average, we check the first 16 bytes
of the string for a NUL character. In order to do an unaligned ldp
safely we have to do a page cross check first. If there is a NUL
byte we calculate the length from the 2 8-byte words using
conditional select to reduce branch mispredictions (it is unlikely
strlen will be repeatedly called on strings with the same length).
If the string is longer than 16 bytes, we align src so don't need
further page cross checks, and process 32 bytes per iteration
using the fast NUL check. If we encounter non-ASCII characters,
fallback to a second loop using the full NUL check.
If the page cross check fails, we read 16 bytes from an aligned
address, remove any characters before the string, and continue
in the main loop using aligned loads. Since strings crossing a
page in the first 16 bytes are rare (probability of
16/MIN_PAGE_SIZE ~= 0.4%), this case does not need to be optimized.
AArch64 systems have a minimum page size of 4k. We don't bother
checking for larger page sizes - the cost of setting up the correct
page size is just not worth the extra gain from a small reduction in
the cases taking the slow path. Note that we only care about
whether the first fetch, which may be misaligned, crosses a page
boundary. */
def_fn strlen p2align=6
and tmp1, srcin, MIN_PAGE_SIZE - 1
mov zeroones, REP8_01
cmp tmp1, MIN_PAGE_SIZE - 16
b.gt L(page_cross)
ldp data1, data2, [srcin]
#ifdef __AARCH64EB__
/* For big-endian, carry propagation (if the final byte in the
string is 0x01) means we cannot use has_nul1/2 directly.
Since we expect strings to be small and early-exit,
byte-swap the data now so has_null1/2 will be correct. */
rev data1, data1
rev data2, data2
#endif
sub tmp1, data1, zeroones
orr tmp2, data1, REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, REP8_7f
bics has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
ccmp has_nul2, 0, 0, eq
beq L(main_loop_entry)
/* Enter with C = has_nul1 == 0. */
csel has_nul1, has_nul1, has_nul2, cc
mov len, 8
rev has_nul1, has_nul1
clz tmp1, has_nul1
csel len, xzr, len, cc
add len, len, tmp1, lsr 3
ret
/* The inner loop processes 32 bytes per iteration and uses the fast
NUL check. If we encounter non-ASCII characters, use a second
loop with the accurate NUL check. */
.p2align 4
L(main_loop_entry):
bic src, srcin, 15
sub src, src, 16
L(main_loop):
ldp data1, data2, [src, 32]!
.Lpage_cross_entry:
sub tmp1, data1, zeroones
sub tmp3, data2, zeroones
orr tmp2, tmp1, tmp3
tst tmp2, zeroones, lsl 7
bne 1f
ldp data1, data2, [src, 16]
sub tmp1, data1, zeroones
sub tmp3, data2, zeroones
orr tmp2, tmp1, tmp3
tst tmp2, zeroones, lsl 7
beq L(main_loop)
add src, src, 16
1:
/* The fast check failed, so do the slower, accurate NUL check. */
orr tmp2, data1, REP8_7f
orr tmp4, data2, REP8_7f
bics has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
ccmp has_nul2, 0, 0, eq
beq L(nonascii_loop)
/* Enter with C = has_nul1 == 0. */
L(tail):
#ifdef __AARCH64EB__
/* For big-endian, carry propagation (if the final byte in the
string is 0x01) means we cannot use has_nul1/2 directly. The
easiest way to get the correct byte is to byte-swap the data
and calculate the syndrome a second time. */
csel data1, data1, data2, cc
rev data1, data1
sub tmp1, data1, zeroones
orr tmp2, data1, REP8_7f
bic has_nul1, tmp1, tmp2
#else
csel has_nul1, has_nul1, has_nul2, cc
#endif
sub len, src, srcin
rev has_nul1, has_nul1
add tmp2, len, 8
clz tmp1, has_nul1
csel len, len, tmp2, cc
add len, len, tmp1, lsr 3
ret
L(nonascii_loop):
ldp data1, data2, [src, 16]!
sub tmp1, data1, zeroones
orr tmp2, data1, REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, REP8_7f
bics has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
ccmp has_nul2, 0, 0, eq
bne L(tail)
ldp data1, data2, [src, 16]!
sub tmp1, data1, zeroones
orr tmp2, data1, REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, REP8_7f
bics has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
ccmp has_nul2, 0, 0, eq
beq L(nonascii_loop)
b L(tail)
/* Load 16 bytes from [srcin & ~15] and force the bytes that precede
srcin to 0x7f, so we ignore any NUL bytes before the string.
Then continue in the aligned loop. */
L(page_cross):
bic src, srcin, 15
ldp data1, data2, [src]
lsl tmp1, srcin, 3
mov tmp4, -1
#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
lsr tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
#else
/* Little-endian. Early bytes are at LSB. */
lsl tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
#endif
orr tmp1, tmp1, REP8_80
orn data1, data1, tmp1
orn tmp2, data2, tmp1
tst srcin, 8
csel data1, data1, tmp4, eq
csel data2, data2, tmp2, eq
b L(page_cross_entry)
.size strlen, . - strlen
#endif
|
stsp/newlib-ia16
| 5,783
|
newlib/libc/machine/aarch64/strchr.S
|
/*
strchr - find a character in a string
Copyright (c) 2014, ARM Limited
All rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the company nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strchr-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
* Neon Available.
*/
/* Arguments and results. */
#define srcin x0
#define chrin w1
#define result x0
#define src x2
#define tmp1 x3
#define wtmp2 w4
#define tmp3 x5
#define vrepchr v0
#define vdata1 v1
#define vdata2 v2
#define vhas_nul1 v3
#define vhas_nul2 v4
#define vhas_chr1 v5
#define vhas_chr2 v6
#define vrepmask_0 v7
#define vrepmask_c v16
#define vend1 v17
#define vend2 v18
/* Core algorithm.
For each 32-byte hunk we calculate a 64-bit syndrome value, with
two bits per byte (LSB is always in bits 0 and 1, for both big
and little-endian systems). For each tuple, bit 0 is set iff
the relevant byte matched the requested character; bit 1 is set
iff the relevant byte matched the NUL end of string (we trigger
off bit0 for the special case of looking for NUL). Since the bits
in the syndrome reflect exactly the order in which things occur
in the original string a count_trailing_zeros() operation will
identify exactly which byte is causing the termination, and why. */
/* Locals and temporaries. */
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn strchr
/* Magic constant 0x40100401 to allow us to identify which lane
matches the requested byte. Magic constant 0x80200802 used
similarly for NUL termination. */
mov wtmp2, #0x0401
movk wtmp2, #0x4010, lsl #16
dup vrepchr.16b, chrin
bic src, srcin, #31 /* Work with aligned 32-byte hunks. */
dup vrepmask_c.4s, wtmp2
ands tmp1, srcin, #31
add vrepmask_0.4s, vrepmask_c.4s, vrepmask_c.4s /* equiv: lsl #1 */
b.eq .Lloop
/* Input string is not 32-byte aligned. Rather than forcing
the padding bytes to a safe value, we calculate the syndrome
for all the bytes, but then mask off those bits of the
syndrome that are related to the padding. */
ld1 {vdata1.16b, vdata2.16b}, [src], #32
neg tmp1, tmp1
cmeq vhas_nul1.16b, vdata1.16b, #0
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_nul2.16b, vdata2.16b, #0
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
and vhas_nul1.16b, vhas_nul1.16b, vrepmask_0.16b
and vhas_nul2.16b, vhas_nul2.16b, vrepmask_0.16b
and vhas_chr1.16b, vhas_chr1.16b, vrepmask_c.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask_c.16b
orr vend1.16b, vhas_nul1.16b, vhas_chr1.16b
orr vend2.16b, vhas_nul2.16b, vhas_chr2.16b
lsl tmp1, tmp1, #1
addp vend1.16b, vend1.16b, vend2.16b // 256->128
mov tmp3, #~0
addp vend1.16b, vend1.16b, vend2.16b // 128->64
lsr tmp1, tmp3, tmp1
mov tmp3, vend1.2d[0]
bic tmp1, tmp3, tmp1 // Mask padding bits.
cbnz tmp1, .Ltail
.Lloop:
ld1 {vdata1.16b, vdata2.16b}, [src], #32
cmeq vhas_nul1.16b, vdata1.16b, #0
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_nul2.16b, vdata2.16b, #0
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
/* Use a fast check for the termination condition. */
orr vend1.16b, vhas_nul1.16b, vhas_chr1.16b
orr vend2.16b, vhas_nul2.16b, vhas_chr2.16b
orr vend1.16b, vend1.16b, vend2.16b
addp vend1.2d, vend1.2d, vend1.2d
mov tmp1, vend1.2d[0]
cbz tmp1, .Lloop
/* Termination condition found. Now need to establish exactly why
we terminated. */
and vhas_nul1.16b, vhas_nul1.16b, vrepmask_0.16b
and vhas_nul2.16b, vhas_nul2.16b, vrepmask_0.16b
and vhas_chr1.16b, vhas_chr1.16b, vrepmask_c.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask_c.16b
orr vend1.16b, vhas_nul1.16b, vhas_chr1.16b
orr vend2.16b, vhas_nul2.16b, vhas_chr2.16b
addp vend1.16b, vend1.16b, vend2.16b // 256->128
addp vend1.16b, vend1.16b, vend2.16b // 128->64
mov tmp1, vend1.2d[0]
.Ltail:
/* Count the trailing zeros, by bit reversing... */
rbit tmp1, tmp1
/* Re-bias source. */
sub src, src, #32
clz tmp1, tmp1 /* And counting the leading zeros. */
/* Tmp1 is even if the target charager was found first. Otherwise
we've found the end of string and we weren't looking for NUL. */
tst tmp1, #1
add result, src, tmp1, lsr #1
csel result, result, xzr, eq
ret
.size strchr, . - strchr
#endif
|
stsp/newlib-ia16
| 6,689
|
newlib/libc/machine/aarch64/memset.S
|
/* Copyright (c) 2012-2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses
*
*/
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See memset-stub.c */
#else
#define dstin x0
#define val x1
#define valw w1
#define count x2
#define dst x3
#define dstend x4
#define tmp1 x5
#define tmp1w w5
#define tmp2 x6
#define tmp2w w6
#define zva_len x7
#define zva_lenw w7
#define L(l) .L ## l
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn memset p2align=6
dup v0.16B, valw
add dstend, dstin, count
cmp count, 96
b.hi L(set_long)
cmp count, 16
b.hs L(set_medium)
mov val, v0.D[0]
/* Set 0..15 bytes. */
tbz count, 3, 1f
str val, [dstin]
str val, [dstend, -8]
ret
nop
1: tbz count, 2, 2f
str valw, [dstin]
str valw, [dstend, -4]
ret
2: cbz count, 3f
strb valw, [dstin]
tbz count, 1, 3f
strh valw, [dstend, -2]
3: ret
/* Set 17..96 bytes. */
L(set_medium):
str q0, [dstin]
tbnz count, 6, L(set96)
str q0, [dstend, -16]
tbz count, 5, 1f
str q0, [dstin, 16]
str q0, [dstend, -32]
1: ret
.p2align 4
/* Set 64..96 bytes. Write 64 bytes from the start and
32 bytes from the end. */
L(set96):
str q0, [dstin, 16]
stp q0, q0, [dstin, 32]
stp q0, q0, [dstend, -32]
ret
.p2align 3
nop
L(set_long):
and valw, valw, 255
bic dst, dstin, 15
str q0, [dstin]
cmp count, 256
ccmp valw, 0, 0, cs
b.eq L(try_zva)
L(no_zva):
sub count, dstend, dst /* Count is 16 too large. */
add dst, dst, 16
sub count, count, 64 + 16 /* Adjust count and bias for loop. */
1: stp q0, q0, [dst], 64
stp q0, q0, [dst, -32]
L(tail64):
subs count, count, 64
b.hi 1b
2: stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
.p2align 3
L(try_zva):
mrs tmp1, dczid_el0
tbnz tmp1w, 4, L(no_zva)
and tmp1w, tmp1w, 15
cmp tmp1w, 4 /* ZVA size is 64 bytes. */
b.ne L(zva_128)
/* Write the first and last 64 byte aligned block using stp rather
than using DC ZVA. This is faster on some cores.
*/
L(zva_64):
str q0, [dst, 16]
stp q0, q0, [dst, 32]
bic dst, dst, 63
stp q0, q0, [dst, 64]
stp q0, q0, [dst, 96]
sub count, dstend, dst /* Count is now 128 too large. */
sub count, count, 128+64+64 /* Adjust count and bias for loop. */
add dst, dst, 128
nop
1: dc zva, dst
add dst, dst, 64
subs count, count, 64
b.hi 1b
stp q0, q0, [dst, 0]
stp q0, q0, [dst, 32]
stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
.p2align 3
L(zva_128):
cmp tmp1w, 5 /* ZVA size is 128 bytes. */
b.ne L(zva_other)
str q0, [dst, 16]
stp q0, q0, [dst, 32]
stp q0, q0, [dst, 64]
stp q0, q0, [dst, 96]
bic dst, dst, 127
sub count, dstend, dst /* Count is now 128 too large. */
sub count, count, 128+128 /* Adjust count and bias for loop. */
add dst, dst, 128
1: dc zva, dst
add dst, dst, 128
subs count, count, 128
b.hi 1b
stp q0, q0, [dstend, -128]
stp q0, q0, [dstend, -96]
stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
L(zva_other):
mov tmp2w, 4
lsl zva_lenw, tmp2w, tmp1w
add tmp1, zva_len, 64 /* Max alignment bytes written. */
cmp count, tmp1
blo L(no_zva)
sub tmp2, zva_len, 1
add tmp1, dst, zva_len
add dst, dst, 16
subs count, tmp1, dst /* Actual alignment bytes to write. */
bic tmp1, tmp1, tmp2 /* Aligned dc zva start address. */
beq 2f
1: stp q0, q0, [dst], 64
stp q0, q0, [dst, -32]
subs count, count, 64
b.hi 1b
2: mov dst, tmp1
sub count, dstend, tmp1 /* Remaining bytes to write. */
subs count, count, zva_len
b.lo 4f
3: dc zva, dst
add dst, dst, zva_len
subs count, count, zva_len
b.hs 3b
4: add count, count, zva_len
b L(tail64)
.size memset, . - memset
#endif
|
stsp/newlib-ia16
| 7,064
|
newlib/libc/machine/aarch64/memcpy.S
|
/* Copyright (c) 2012-2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses.
*
*/
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See memcpy-stub.c */
#else
#define dstin x0
#define src x1
#define count x2
#define dst x3
#define srcend x4
#define dstend x5
#define A_l x6
#define A_lw w6
#define A_h x7
#define A_hw w7
#define B_l x8
#define B_lw w8
#define B_h x9
#define C_l x10
#define C_h x11
#define D_l x12
#define D_h x13
#define E_l src
#define E_h count
#define F_l srcend
#define F_h dst
#define tmp1 x9
#define L(l) .L ## l
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
/* Copies are split into 3 main cases: small copies of up to 16 bytes,
medium copies of 17..96 bytes which are fully unrolled. Large copies
of more than 96 bytes align the destination and use an unrolled loop
processing 64 bytes per iteration.
Small and medium copies read all data before writing, allowing any
kind of overlap, and memmove tailcalls memcpy for these cases as
well as non-overlapping copies.
*/
def_fn memcpy p2align=6
prfm PLDL1KEEP, [src]
add srcend, src, count
add dstend, dstin, count
cmp count, 16
b.ls L(copy16)
cmp count, 96
b.hi L(copy_long)
/* Medium copies: 17..96 bytes. */
sub tmp1, count, 1
ldp A_l, A_h, [src]
tbnz tmp1, 6, L(copy96)
ldp D_l, D_h, [srcend, -16]
tbz tmp1, 5, 1f
ldp B_l, B_h, [src, 16]
ldp C_l, C_h, [srcend, -32]
stp B_l, B_h, [dstin, 16]
stp C_l, C_h, [dstend, -32]
1:
stp A_l, A_h, [dstin]
stp D_l, D_h, [dstend, -16]
ret
.p2align 4
/* Small copies: 0..16 bytes. */
L(copy16):
cmp count, 8
b.lo 1f
ldr A_l, [src]
ldr A_h, [srcend, -8]
str A_l, [dstin]
str A_h, [dstend, -8]
ret
.p2align 4
1:
tbz count, 2, 1f
ldr A_lw, [src]
ldr A_hw, [srcend, -4]
str A_lw, [dstin]
str A_hw, [dstend, -4]
ret
/* Copy 0..3 bytes. Use a branchless sequence that copies the same
byte 3 times if count==1, or the 2nd byte twice if count==2. */
1:
cbz count, 2f
lsr tmp1, count, 1
ldrb A_lw, [src]
ldrb A_hw, [srcend, -1]
ldrb B_lw, [src, tmp1]
strb A_lw, [dstin]
strb B_lw, [dstin, tmp1]
strb A_hw, [dstend, -1]
2: ret
.p2align 4
/* Copy 64..96 bytes. Copy 64 bytes from the start and
32 bytes from the end. */
L(copy96):
ldp B_l, B_h, [src, 16]
ldp C_l, C_h, [src, 32]
ldp D_l, D_h, [src, 48]
ldp E_l, E_h, [srcend, -32]
ldp F_l, F_h, [srcend, -16]
stp A_l, A_h, [dstin]
stp B_l, B_h, [dstin, 16]
stp C_l, C_h, [dstin, 32]
stp D_l, D_h, [dstin, 48]
stp E_l, E_h, [dstend, -32]
stp F_l, F_h, [dstend, -16]
ret
/* Align DST to 16 byte alignment so that we don't cross cache line
boundaries on both loads and stores. There are at least 96 bytes
to copy, so copy 16 bytes unaligned and then align. The loop
copies 64 bytes per iteration and prefetches one iteration ahead. */
.p2align 4
L(copy_long):
and tmp1, dstin, 15
bic dst, dstin, 15
ldp D_l, D_h, [src]
sub src, src, tmp1
add count, count, tmp1 /* Count is now 16 too large. */
ldp A_l, A_h, [src, 16]
stp D_l, D_h, [dstin]
ldp B_l, B_h, [src, 32]
ldp C_l, C_h, [src, 48]
ldp D_l, D_h, [src, 64]!
subs count, count, 128 + 16 /* Test and readjust count. */
b.ls 2f
1:
stp A_l, A_h, [dst, 16]
ldp A_l, A_h, [src, 16]
stp B_l, B_h, [dst, 32]
ldp B_l, B_h, [src, 32]
stp C_l, C_h, [dst, 48]
ldp C_l, C_h, [src, 48]
stp D_l, D_h, [dst, 64]!
ldp D_l, D_h, [src, 64]!
subs count, count, 64
b.hi 1b
/* Write the last full set of 64 bytes. The remainder is at most 64
bytes, so it is safe to always copy 64 bytes from the end even if
there is just 1 byte left. */
2:
ldp E_l, E_h, [srcend, -64]
stp A_l, A_h, [dst, 16]
ldp A_l, A_h, [srcend, -48]
stp B_l, B_h, [dst, 32]
ldp B_l, B_h, [srcend, -32]
stp C_l, C_h, [dst, 48]
ldp C_l, C_h, [srcend, -16]
stp D_l, D_h, [dst, 64]
stp E_l, E_h, [dstend, -64]
stp A_l, A_h, [dstend, -48]
stp B_l, B_h, [dstend, -32]
stp C_l, C_h, [dstend, -16]
ret
.size memcpy, . - memcpy
#endif
|
stsp/newlib-ia16
| 6,488
|
newlib/libc/machine/aarch64/strrchr.S
|
/*
strrchr - find last instance of a character in a string
Copyright (c) 2014, ARM Limited
All rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the company nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strchr-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
* Neon Available.
*/
/* Arguments and results. */
#define srcin x0
#define chrin w1
#define result x0
#define src x2
#define tmp1 x3
#define wtmp2 w4
#define tmp3 x5
#define src_match x6
#define src_offset x7
#define const_m1 x8
#define tmp4 x9
#define nul_match x10
#define chr_match x11
#define vrepchr v0
#define vdata1 v1
#define vdata2 v2
#define vhas_nul1 v3
#define vhas_nul2 v4
#define vhas_chr1 v5
#define vhas_chr2 v6
#define vrepmask_0 v7
#define vrepmask_c v16
#define vend1 v17
#define vend2 v18
/* Core algorithm.
For each 32-byte hunk we calculate a 64-bit syndrome value, with
two bits per byte (LSB is always in bits 0 and 1, for both big
and little-endian systems). For each tuple, bit 0 is set iff
the relevant byte matched the requested character; bit 1 is set
iff the relevant byte matched the NUL end of string (we trigger
off bit0 for the special case of looking for NUL). Since the bits
in the syndrome reflect exactly the order in which things occur
in the original string a count_trailing_zeros() operation will
identify exactly which byte is causing the termination, and why. */
/* Locals and temporaries. */
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn strrchr
/* Magic constant 0x40100401 to allow us to identify which lane
matches the requested byte. Magic constant 0x80200802 used
similarly for NUL termination. */
mov wtmp2, #0x0401
movk wtmp2, #0x4010, lsl #16
dup vrepchr.16b, chrin
bic src, srcin, #31 /* Work with aligned 32-byte hunks. */
dup vrepmask_c.4s, wtmp2
mov src_offset, #0
ands tmp1, srcin, #31
add vrepmask_0.4s, vrepmask_c.4s, vrepmask_c.4s /* equiv: lsl #1 */
b.eq .Laligned
/* Input string is not 32-byte aligned. Rather than forcing
the padding bytes to a safe value, we calculate the syndrome
for all the bytes, but then mask off those bits of the
syndrome that are related to the padding. */
ld1 {vdata1.16b, vdata2.16b}, [src], #32
neg tmp1, tmp1
cmeq vhas_nul1.16b, vdata1.16b, #0
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_nul2.16b, vdata2.16b, #0
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
and vhas_nul1.16b, vhas_nul1.16b, vrepmask_0.16b
and vhas_chr1.16b, vhas_chr1.16b, vrepmask_c.16b
and vhas_nul2.16b, vhas_nul2.16b, vrepmask_0.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask_c.16b
addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul2.16b // 256->128
addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul1.16b // 128->64
addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr1.16b // 128->64
mov nul_match, vhas_nul1.2d[0]
lsl tmp1, tmp1, #1
mov const_m1, #~0
mov chr_match, vhas_chr1.2d[0]
lsr tmp3, const_m1, tmp1
bic nul_match, nul_match, tmp3 // Mask padding bits.
bic chr_match, chr_match, tmp3 // Mask padding bits.
cbnz nul_match, .Ltail
.Lloop:
cmp chr_match, #0
csel src_match, src, src_match, ne
csel src_offset, chr_match, src_offset, ne
.Laligned:
ld1 {vdata1.16b, vdata2.16b}, [src], #32
cmeq vhas_nul1.16b, vdata1.16b, #0
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_nul2.16b, vdata2.16b, #0
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
addp vend1.16b, vhas_nul1.16b, vhas_nul2.16b // 256->128
and vhas_chr1.16b, vhas_chr1.16b, vrepmask_c.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask_c.16b
addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
addp vend1.16b, vend1.16b, vend1.16b // 128->64
addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr1.16b // 128->64
mov nul_match, vend1.2d[0]
mov chr_match, vhas_chr1.2d[0]
cbz nul_match, .Lloop
and vhas_nul1.16b, vhas_nul1.16b, vrepmask_0.16b
and vhas_nul2.16b, vhas_nul2.16b, vrepmask_0.16b
addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul2.16b
addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul1.16b
mov nul_match, vhas_nul1.2d[0]
.Ltail:
/* Work out exactly where the string ends. */
sub tmp4, nul_match, #1
eor tmp4, tmp4, nul_match
ands chr_match, chr_match, tmp4
/* And pick the values corresponding to the last match. */
csel src_match, src, src_match, ne
csel src_offset, chr_match, src_offset, ne
/* Count down from the top of the syndrome to find the last match. */
clz tmp3, src_offset
/* Src_match points beyond the word containing the match, so we can
simply subtract half the bit-offset into the syndrome. Because
we are counting down, we need to go back one more character. */
add tmp3, tmp3, #2
sub result, src_match, tmp3, lsr #1
/* But if the syndrome shows no match was found, then return NULL. */
cmp src_offset, #0
csel result, result, xzr, ne
ret
.size strrchr, . - strrchr
#endif
|
stsp/newlib-ia16
| 5,664
|
newlib/libc/machine/aarch64/strcmp.S
|
/* Copyright (c) 2012-2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/* Assumptions:
*
* ARMv8-a, AArch64
*/
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strcmp-stub.c */
#else
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* Parameters and result. */
#define src1 x0
#define src2 x1
#define result x0
/* Internal variables. */
#define data1 x2
#define data1w w2
#define data2 x3
#define data2w w3
#define has_nul x4
#define diff x5
#define syndrome x6
#define tmp1 x7
#define tmp2 x8
#define tmp3 x9
#define zeroones x10
#define pos x11
/* Start of performance-critical section -- one 64B cache line. */
def_fn strcmp p2align=6
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
b.ne .Lmisaligned8
ands tmp1, src1, #7
b.ne .Lmutual_align
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
can be done in parallel across the entire word. */
.Lloop_aligned:
ldr data1, [src1], #8
ldr data2, [src2], #8
.Lstart_realigned:
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
orr syndrome, diff, has_nul
cbz syndrome, .Lloop_aligned
/* End of performance-critical section -- one 64B cache line. */
#ifndef __AARCH64EB__
rev syndrome, syndrome
rev data1, data1
/* The MS-non-zero bit of the syndrome marks either the first bit
that is different, or the top bit of the first zero byte.
Shifting left now will bring the critical information into the
top bits. */
clz pos, syndrome
rev data2, data2
lsl data1, data1, pos
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
#else
/* For big-endian we cannot use the trick with the syndrome value
as carry-propagation can corrupt the upper bits if the trailing
bytes in the string contain 0x01. */
/* However, if there is no NUL byte in the dword, we can generate
the result directly. We can't just subtract the bytes as the
MSB might be significant. */
cbnz has_nul, 1f
cmp data1, data2
cset result, ne
cneg result, result, lo
ret
1:
/* Re-compute the NUL-byte detection, using a byte-reversed value. */
rev tmp3, data1
sub tmp1, tmp3, zeroones
orr tmp2, tmp3, #REP8_7f
bic has_nul, tmp1, tmp2
rev has_nul, has_nul
orr syndrome, diff, has_nul
clz pos, syndrome
/* The MS-non-zero bit of the syndrome marks either the first bit
that is different, or the top bit of the first zero byte.
Shifting left now will bring the critical information into the
top bits. */
lsl data1, data1, pos
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
#endif
.Lmutual_align:
/* Sources are mutually aligned, but are not currently at an
alignment boundary. Round down the addresses and then mask off
the bytes that preceed the start point. */
bic src1, src1, #7
bic src2, src2, #7
lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
ldr data1, [src1], #8
neg tmp1, tmp1 /* Bits to alignment -64. */
ldr data2, [src2], #8
mov tmp2, #~0
#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
#else
/* Little-endian. Early bytes are at LSB. */
lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
#endif
orr data1, data1, tmp2
orr data2, data2, tmp2
b .Lstart_realigned
.Lmisaligned8:
/* We can do better than this. */
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
cmp data1w, #1
ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
b.eq .Lmisaligned8
sub result, data1, data2
ret
.size strcmp, .-strcmp
#endif
|
stsp/newlib-ia16
| 5,909
|
newlib/libc/machine/aarch64/strnlen.S
|
/* strnlen - calculate the length of a string with limit.
Copyright (c) 2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strlen-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
*/
/* Arguments and results. */
#define srcin x0
#define len x0
#define limit x1
/* Locals and temporaries. */
#define src x2
#define data1 x3
#define data2 x4
#define data2a x5
#define has_nul1 x6
#define has_nul2 x7
#define tmp1 x8
#define tmp2 x9
#define tmp3 x10
#define tmp4 x11
#define zeroones x12
#define pos x13
#define limit_wd x14
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
.text
.p2align 6
.Lstart:
/* Pre-pad to ensure critical loop begins an icache line. */
.rep 7
nop
.endr
/* Put this code here to avoid wasting more space with pre-padding. */
.Lhit_limit:
mov len, limit
ret
def_fn strnlen
cbz limit, .Lhit_limit
mov zeroones, #REP8_01
bic src, srcin, #15
ands tmp1, srcin, #15
b.ne .Lmisaligned
/* Calculate the number of full and partial words -1. */
sub limit_wd, limit, #1 /* Limit != 0, so no underflow. */
lsr limit_wd, limit_wd, #4 /* Convert to Qwords. */
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
can be done in parallel across the entire word. */
/* The inner loop deals with two Dwords at a time. This has a
slightly higher start-up cost, but we should win quite quickly,
especially on cores with a high number of issue slots per
cycle, as we get much better parallelism out of the operations. */
/* Start of critial section -- keep to one 64Byte cache line. */
.Lloop:
ldp data1, data2, [src], #16
.Lrealigned:
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
bic has_nul1, tmp1, tmp2
bic has_nul2, tmp3, tmp4
subs limit_wd, limit_wd, #1
orr tmp1, has_nul1, has_nul2
ccmp tmp1, #0, #0, pl /* NZCV = 0000 */
b.eq .Lloop
/* End of critical section -- keep to one 64Byte cache line. */
orr tmp1, has_nul1, has_nul2
cbz tmp1, .Lhit_limit /* No null in final Qword. */
/* We know there's a null in the final Qword. The easiest thing
to do now is work out the length of the string and return
MIN (len, limit). */
sub len, src, srcin
cbz has_nul1, .Lnul_in_data2
#ifdef __AARCH64EB__
mov data2, data1
#endif
sub len, len, #8
mov has_nul2, has_nul1
.Lnul_in_data2:
#ifdef __AARCH64EB__
/* For big-endian, carry propagation (if the final byte in the
string is 0x01) means we cannot use has_nul directly. The
easiest way to get the correct byte is to byte-swap the data
and calculate the syndrome a second time. */
rev data2, data2
sub tmp1, data2, zeroones
orr tmp2, data2, #REP8_7f
bic has_nul2, tmp1, tmp2
#endif
sub len, len, #8
rev has_nul2, has_nul2
clz pos, has_nul2
add len, len, pos, lsr #3 /* Bits to bytes. */
cmp len, limit
csel len, len, limit, ls /* Return the lower value. */
ret
.Lmisaligned:
/* Deal with a partial first word.
We're doing two things in parallel here;
1) Calculate the number of words (but avoiding overflow if
limit is near ULONG_MAX) - to do this we need to work out
limit + tmp1 - 1 as a 65-bit value before shifting it;
2) Load and mask the initial data words - we force the bytes
before the ones we are interested in to 0xff - this ensures
early bytes will not hit any zero detection. */
sub limit_wd, limit, #1
neg tmp4, tmp1
cmp tmp1, #8
and tmp3, limit_wd, #15
lsr limit_wd, limit_wd, #4
mov tmp2, #~0
ldp data1, data2, [src], #16
lsl tmp4, tmp4, #3 /* Bytes beyond alignment -> bits. */
add tmp3, tmp3, tmp1
#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
lsl tmp2, tmp2, tmp4 /* Shift (tmp1 & 63). */
#else
/* Little-endian. Early bytes are at LSB. */
lsr tmp2, tmp2, tmp4 /* Shift (tmp1 & 63). */
#endif
add limit_wd, limit_wd, tmp3, lsr #4
orr data1, data1, tmp2
orr data2a, data2, tmp2
csinv data1, data1, xzr, le
csel data2, data2, data2a, le
b .Lrealigned
.size strnlen, . - .Lstart /* Include pre-padding in size. */
#endif
|
stsp/newlib-ia16
| 1,762
|
newlib/libc/machine/aarch64/stpcpy.S
|
/*
stpcpy - copy a string returning pointer to end.
Copyright (c) 2015 ARM Ltd.
All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the company nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/* This is just a wrapper that uses strcpy code with appropriate
pre-defines. */
#define BUILD_STPCPY
#include "strcpy.S"
|
stsp/newlib-ia16
| 7,066
|
newlib/libc/machine/aarch64/strncmp.S
|
/* Copyright (c) 2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strcmp-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
*/
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* Parameters and result. */
#define src1 x0
#define src2 x1
#define limit x2
#define result x0
/* Internal variables. */
#define data1 x3
#define data1w w3
#define data2 x4
#define data2w w4
#define has_nul x5
#define diff x6
#define syndrome x7
#define tmp1 x8
#define tmp2 x9
#define tmp3 x10
#define zeroones x11
#define pos x12
#define limit_wd x13
#define mask x14
#define endloop x15
.text
.p2align 6
.rep 7
nop /* Pad so that the loop below fits a cache line. */
.endr
def_fn strncmp
cbz limit, .Lret0
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
b.ne .Lmisaligned8
ands tmp1, src1, #7
b.ne .Lmutual_align
/* Calculate the number of full and partial words -1. */
sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
can be done in parallel across the entire word. */
/* Start of performance-critical section -- one 64B cache line. */
.Lloop_aligned:
ldr data1, [src1], #8
ldr data2, [src2], #8
.Lstart_realigned:
subs limit_wd, limit_wd, #1
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
csinv endloop, diff, xzr, pl /* Last Dword or differences. */
bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
ccmp endloop, #0, #0, eq
b.eq .Lloop_aligned
/* End of performance-critical section -- one 64B cache line. */
/* Not reached the limit, must have found the end or a diff. */
tbz limit_wd, #63, .Lnot_limit
/* Limit % 8 == 0 => all bytes significant. */
ands limit, limit, #7
b.eq .Lnot_limit
lsl limit, limit, #3 /* Bits -> bytes. */
mov mask, #~0
#ifdef __AARCH64EB__
lsr mask, mask, limit
#else
lsl mask, mask, limit
#endif
bic data1, data1, mask
bic data2, data2, mask
/* Make sure that the NUL byte is marked in the syndrome. */
orr has_nul, has_nul, mask
.Lnot_limit:
orr syndrome, diff, has_nul
#ifndef __AARCH64EB__
rev syndrome, syndrome
rev data1, data1
/* The MS-non-zero bit of the syndrome marks either the first bit
that is different, or the top bit of the first zero byte.
Shifting left now will bring the critical information into the
top bits. */
clz pos, syndrome
rev data2, data2
lsl data1, data1, pos
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
#else
/* For big-endian we cannot use the trick with the syndrome value
as carry-propagation can corrupt the upper bits if the trailing
bytes in the string contain 0x01. */
/* However, if there is no NUL byte in the dword, we can generate
the result directly. We can't just subtract the bytes as the
MSB might be significant. */
cbnz has_nul, 1f
cmp data1, data2
cset result, ne
cneg result, result, lo
ret
1:
/* Re-compute the NUL-byte detection, using a byte-reversed value. */
rev tmp3, data1
sub tmp1, tmp3, zeroones
orr tmp2, tmp3, #REP8_7f
bic has_nul, tmp1, tmp2
rev has_nul, has_nul
orr syndrome, diff, has_nul
clz pos, syndrome
/* The MS-non-zero bit of the syndrome marks either the first bit
that is different, or the top bit of the first zero byte.
Shifting left now will bring the critical information into the
top bits. */
lsl data1, data1, pos
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
#endif
.Lmutual_align:
/* Sources are mutually aligned, but are not currently at an
alignment boundary. Round down the addresses and then mask off
the bytes that precede the start point.
We also need to adjust the limit calculations, but without
overflowing if the limit is near ULONG_MAX. */
bic src1, src1, #7
bic src2, src2, #7
ldr data1, [src1], #8
neg tmp3, tmp1, lsl #3 /* 64 - bits(bytes beyond align). */
ldr data2, [src2], #8
mov tmp2, #~0
sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
lsl tmp2, tmp2, tmp3 /* Shift (tmp1 & 63). */
#else
/* Little-endian. Early bytes are at LSB. */
lsr tmp2, tmp2, tmp3 /* Shift (tmp1 & 63). */
#endif
and tmp3, limit_wd, #7
lsr limit_wd, limit_wd, #3
/* Adjust the limit. Only low 3 bits used, so overflow irrelevant. */
add limit, limit, tmp1
add tmp3, tmp3, tmp1
orr data1, data1, tmp2
orr data2, data2, tmp2
add limit_wd, limit_wd, tmp3, lsr #3
b .Lstart_realigned
.Lret0:
mov result, #0
ret
.p2align 6
.Lmisaligned8:
sub limit, limit, #1
1:
/* Perhaps we can do better than this. */
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
subs limit, limit, #1
ccmp data1w, #1, #0, cs /* NZCV = 0b0000. */
ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
b.eq 1b
sub result, data1, data2
ret
.size strncmp, . - strncmp
#endif
|
stsp/newlib-ia16
| 5,428
|
newlib/libc/machine/aarch64/memchr.S
|
/*
* memchr - find a character in a memory zone
*
* Copyright (c) 2014, ARM Limited
* All rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the company nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See memchr-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
* Neon Available.
*/
/* Arguments and results. */
#define srcin x0
#define chrin w1
#define cntin x2
#define result x0
#define src x3
#define tmp x4
#define wtmp2 w5
#define synd x6
#define soff x9
#define cntrem x10
#define vrepchr v0
#define vdata1 v1
#define vdata2 v2
#define vhas_chr1 v3
#define vhas_chr2 v4
#define vrepmask v5
#define vend v6
/*
* Core algorithm:
*
* For each 32-byte chunk we calculate a 64-bit syndrome value, with two bits
* per byte. For each tuple, bit 0 is set if the relevant byte matched the
* requested character and bit 1 is not used (faster than using a 32bit
* syndrome). Since the bits in the syndrome reflect exactly the order in which
* things occur in the original string, counting trailing zeros allows to
* identify exactly which byte has matched.
*/
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn memchr
/* Do not dereference srcin if no bytes to compare. */
cbz cntin, .Lzero_length
/*
* Magic constant 0x40100401 allows us to identify which lane matches
* the requested byte.
*/
mov wtmp2, #0x0401
movk wtmp2, #0x4010, lsl #16
dup vrepchr.16b, chrin
/* Work with aligned 32-byte chunks */
bic src, srcin, #31
dup vrepmask.4s, wtmp2
ands soff, srcin, #31
and cntrem, cntin, #31
b.eq .Lloop
/*
* Input string is not 32-byte aligned. We calculate the syndrome
* value for the aligned 32 bytes block containing the first bytes
* and mask the irrelevant part.
*/
ld1 {vdata1.16b, vdata2.16b}, [src], #32
sub tmp, soff, #32
adds cntin, cntin, tmp
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
and vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */
addp vend.16b, vend.16b, vend.16b /* 128->64 */
mov synd, vend.2d[0]
/* Clear the soff*2 lower bits */
lsl tmp, soff, #1
lsr synd, synd, tmp
lsl synd, synd, tmp
/* The first block can also be the last */
b.ls .Lmasklast
/* Have we found something already? */
cbnz synd, .Ltail
.Lloop:
ld1 {vdata1.16b, vdata2.16b}, [src], #32
subs cntin, cntin, #32
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
/* If we're out of data we finish regardless of the result */
b.ls .Lend
/* Use a fast check for the termination condition */
orr vend.16b, vhas_chr1.16b, vhas_chr2.16b
addp vend.2d, vend.2d, vend.2d
mov synd, vend.2d[0]
/* We're not out of data, loop if we haven't found the character */
cbz synd, .Lloop
.Lend:
/* Termination condition found, let's calculate the syndrome value */
and vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend.16b, vhas_chr1.16b, vhas_chr2.16b /* 256->128 */
addp vend.16b, vend.16b, vend.16b /* 128->64 */
mov synd, vend.2d[0]
/* Only do the clear for the last possible block */
b.hi .Ltail
.Lmasklast:
/* Clear the (32 - ((cntrem + soff) % 32)) * 2 upper bits */
add tmp, cntrem, soff
and tmp, tmp, #31
sub tmp, tmp, #32
neg tmp, tmp, lsl #1
lsl synd, synd, tmp
lsr synd, synd, tmp
.Ltail:
/* Count the trailing zeros using bit reversing */
rbit synd, synd
/* Compensate the last post-increment */
sub src, src, #32
/* Check that we have found a character */
cmp synd, #0
/* And count the leading zeros */
clz synd, synd
/* Compute the potential result */
add result, src, synd, lsr #1
/* Select result or NULL */
csel result, xzr, result, eq
ret
.Lzero_length:
mov result, #0
ret
.size memchr, . - memchr
#endif
|
stsp/newlib-ia16
| 4,878
|
newlib/libc/machine/aarch64/memcmp.S
|
/* memcmp - compare memory
Copyright (c) 2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See memcmp-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
*/
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
/* Parameters and result. */
#define src1 x0
#define src2 x1
#define limit x2
#define result x0
/* Internal variables. */
#define data1 x3
#define data1w w3
#define data2 x4
#define data2w w4
#define has_nul x5
#define diff x6
#define endloop x7
#define tmp1 x8
#define tmp2 x9
#define tmp3 x10
#define pos x11
#define limit_wd x12
#define mask x13
def_fn memcmp p2align=6
cbz limit, .Lret0
eor tmp1, src1, src2
tst tmp1, #7
b.ne .Lmisaligned8
ands tmp1, src1, #7
b.ne .Lmutual_align
add limit_wd, limit, #7
lsr limit_wd, limit_wd, #3
/* Start of performance-critical section -- one 64B cache line. */
.Lloop_aligned:
ldr data1, [src1], #8
ldr data2, [src2], #8
.Lstart_realigned:
subs limit_wd, limit_wd, #1
eor diff, data1, data2 /* Non-zero if differences found. */
csinv endloop, diff, xzr, ne /* Last Dword or differences. */
cbz endloop, .Lloop_aligned
/* End of performance-critical section -- one 64B cache line. */
/* Not reached the limit, must have found a diff. */
cbnz limit_wd, .Lnot_limit
/* Limit % 8 == 0 => all bytes significant. */
ands limit, limit, #7
b.eq .Lnot_limit
lsl limit, limit, #3 /* Bits -> bytes. */
mov mask, #~0
#ifdef __AARCH64EB__
lsr mask, mask, limit
#else
lsl mask, mask, limit
#endif
bic data1, data1, mask
bic data2, data2, mask
orr diff, diff, mask
.Lnot_limit:
#ifndef __AARCH64EB__
rev diff, diff
rev data1, data1
rev data2, data2
#endif
/* The MS-non-zero bit of DIFF marks either the first bit
that is different, or the end of the significant data.
Shifting left now will bring the critical information into the
top bits. */
clz pos, diff
lsl data1, data1, pos
lsl data2, data2, pos
/* But we need to zero-extend (char is unsigned) the value and then
perform a signed 32-bit subtraction. */
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
.Lmutual_align:
/* Sources are mutually aligned, but are not currently at an
alignment boundary. Round down the addresses and then mask off
the bytes that precede the start point. */
bic src1, src1, #7
bic src2, src2, #7
add limit, limit, tmp1 /* Adjust the limit for the extra. */
lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
ldr data1, [src1], #8
neg tmp1, tmp1 /* Bits to alignment -64. */
ldr data2, [src2], #8
mov tmp2, #~0
#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
#else
/* Little-endian. Early bytes are at LSB. */
lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
#endif
add limit_wd, limit, #7
orr data1, data1, tmp2
orr data2, data2, tmp2
lsr limit_wd, limit_wd, #3
b .Lstart_realigned
.Lret0:
mov result, #0
ret
.p2align 6
.Lmisaligned8:
sub limit, limit, #1
1:
/* Perhaps we can do better than this. */
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
subs limit, limit, #1
ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
b.eq 1b
sub result, data1, data2
ret
.size memcmp, . - memcmp
#endif
|
stsp/newlib-ia16
| 5,109
|
newlib/libc/machine/aarch64/strchrnul.S
|
/*
strchrnul - find a character or nul in a string
Copyright (c) 2014, ARM Limited
All rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the company nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strchrnul-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64
* Neon Available.
*/
/* Arguments and results. */
#define srcin x0
#define chrin w1
#define result x0
#define src x2
#define tmp1 x3
#define wtmp2 w4
#define tmp3 x5
#define vrepchr v0
#define vdata1 v1
#define vdata2 v2
#define vhas_nul1 v3
#define vhas_nul2 v4
#define vhas_chr1 v5
#define vhas_chr2 v6
#define vrepmask v7
#define vend1 v16
/* Core algorithm.
For each 32-byte hunk we calculate a 64-bit syndrome value, with
two bits per byte (LSB is always in bits 0 and 1, for both big
and little-endian systems). For each tuple, bit 0 is set iff
the relevant byte matched the requested character or nul. Since the
bits in the syndrome reflect exactly the order in which things occur
in the original string a count_trailing_zeros() operation will
identify exactly which byte is causing the termination. */
/* Locals and temporaries. */
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn strchrnul
/* Magic constant 0x40100401 to allow us to identify which lane
matches the termination condition. */
mov wtmp2, #0x0401
movk wtmp2, #0x4010, lsl #16
dup vrepchr.16b, chrin
bic src, srcin, #31 /* Work with aligned 32-byte hunks. */
dup vrepmask.4s, wtmp2
ands tmp1, srcin, #31
b.eq .Lloop
/* Input string is not 32-byte aligned. Rather than forcing
the padding bytes to a safe value, we calculate the syndrome
for all the bytes, but then mask off those bits of the
syndrome that are related to the padding. */
ld1 {vdata1.16b, vdata2.16b}, [src], #32
neg tmp1, tmp1
cmeq vhas_nul1.16b, vdata1.16b, #0
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_nul2.16b, vdata2.16b, #0
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
orr vhas_chr1.16b, vhas_chr1.16b, vhas_nul1.16b
orr vhas_chr2.16b, vhas_chr2.16b, vhas_nul2.16b
and vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
lsl tmp1, tmp1, #1
addp vend1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
mov tmp3, #~0
addp vend1.16b, vend1.16b, vend1.16b // 128->64
lsr tmp1, tmp3, tmp1
mov tmp3, vend1.2d[0]
bic tmp1, tmp3, tmp1 // Mask padding bits.
cbnz tmp1, .Ltail
.Lloop:
ld1 {vdata1.16b, vdata2.16b}, [src], #32
cmeq vhas_nul1.16b, vdata1.16b, #0
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_nul2.16b, vdata2.16b, #0
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
/* Use a fast check for the termination condition. */
orr vhas_chr1.16b, vhas_nul1.16b, vhas_chr1.16b
orr vhas_chr2.16b, vhas_nul2.16b, vhas_chr2.16b
orr vend1.16b, vhas_chr1.16b, vhas_chr2.16b
addp vend1.2d, vend1.2d, vend1.2d
mov tmp1, vend1.2d[0]
cbz tmp1, .Lloop
/* Termination condition found. Now need to establish exactly why
we terminated. */
and vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
addp vend1.16b, vend1.16b, vend1.16b // 128->64
mov tmp1, vend1.2d[0]
.Ltail:
/* Count the trailing zeros, by bit reversing... */
rbit tmp1, tmp1
/* Re-bias source. */
sub src, src, #32
clz tmp1, tmp1 /* ... and counting the leading zeros. */
/* tmp1 is twice the offset into the fragment. */
add result, src, tmp1, lsr #1
ret
.size strchrnul, . - strchrnul
#endif
|
stsp/newlib-ia16
| 10,507
|
newlib/libc/machine/aarch64/strcpy.S
|
/*
strcpy/stpcpy - copy a string returning pointer to start/end.
Copyright (c) 2013, 2014, 2015 ARM Ltd.
All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the company nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See strchr-stub.c */
#else
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses, min page size 4k.
*/
/* To build as stpcpy, define BUILD_STPCPY before compiling this file.
To test the page crossing code path more thoroughly, compile with
-DSTRCPY_TEST_PAGE_CROSS - this will force all copies through the slower
entry path. This option is not intended for production use. */
/* Arguments and results. */
#define dstin x0
#define srcin x1
/* Locals and temporaries. */
#define src x2
#define dst x3
#define data1 x4
#define data1w w4
#define data2 x5
#define data2w w5
#define has_nul1 x6
#define has_nul2 x7
#define tmp1 x8
#define tmp2 x9
#define tmp3 x10
#define tmp4 x11
#define zeroones x12
#define data1a x13
#define data2a x14
#define pos x15
#define len x16
#define to_align x17
#ifdef BUILD_STPCPY
#define STRCPY stpcpy
#else
#define STRCPY strcpy
#endif
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
/* NUL detection works on the principle that (X - 1) & (~X) & 0x80
(=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
can be done in parallel across the entire word. */
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* AArch64 systems have a minimum page size of 4k. We can do a quick
page size check for crossing this boundary on entry and if we
do not, then we can short-circuit much of the entry code. We
expect early page-crossing strings to be rare (probability of
16/MIN_PAGE_SIZE ~= 0.4%), so the branch should be quite
predictable, even with random strings.
We don't bother checking for larger page sizes, the cost of setting
up the correct page size is just not worth the extra gain from
a small reduction in the cases taking the slow path. Note that
we only care about whether the first fetch, which may be
misaligned, crosses a page boundary - after that we move to aligned
fetches for the remainder of the string. */
#ifdef STRCPY_TEST_PAGE_CROSS
/* Make everything that isn't Qword aligned look like a page cross. */
#define MIN_PAGE_P2 4
#else
#define MIN_PAGE_P2 12
#endif
#define MIN_PAGE_SIZE (1 << MIN_PAGE_P2)
def_fn STRCPY p2align=6
/* For moderately short strings, the fastest way to do the copy is to
calculate the length of the string in the same way as strlen, then
essentially do a memcpy of the result. This avoids the need for
multiple byte copies and further means that by the time we
reach the bulk copy loop we know we can always use DWord
accesses. We expect strcpy to rarely be called repeatedly
with the same source string, so branch prediction is likely to
always be difficult - we mitigate against this by preferring
conditional select operations over branches whenever this is
feasible. */
and tmp2, srcin, #(MIN_PAGE_SIZE - 1)
mov zeroones, #REP8_01
and to_align, srcin, #15
cmp tmp2, #(MIN_PAGE_SIZE - 16)
neg tmp1, to_align
/* The first fetch will straddle a (possible) page boundary iff
srcin + 15 causes bit[MIN_PAGE_P2] to change value. A 16-byte
aligned string will never fail the page align check, so will
always take the fast path. */
b.gt .Lpage_cross
.Lpage_cross_ok:
ldp data1, data2, [srcin]
#ifdef __AARCH64EB__
/* Because we expect the end to be found within 16 characters
(profiling shows this is the most common case), it's worth
swapping the bytes now to save having to recalculate the
termination syndrome later. We preserve data1 and data2
so that we can re-use the values later on. */
rev tmp2, data1
sub tmp1, tmp2, zeroones
orr tmp2, tmp2, #REP8_7f
bics has_nul1, tmp1, tmp2
b.ne .Lfp_le8
rev tmp4, data2
sub tmp3, tmp4, zeroones
orr tmp4, tmp4, #REP8_7f
#else
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
bics has_nul1, tmp1, tmp2
b.ne .Lfp_le8
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
#endif
bics has_nul2, tmp3, tmp4
b.eq .Lbulk_entry
/* The string is short (<=16 bytes). We don't know exactly how
short though, yet. Work out the exact length so that we can
quickly select the optimal copy strategy. */
.Lfp_gt8:
rev has_nul2, has_nul2
clz pos, has_nul2
mov tmp2, #56
add dst, dstin, pos, lsr #3 /* Bits to bytes. */
sub pos, tmp2, pos
#ifdef __AARCH64EB__
lsr data2, data2, pos
#else
lsl data2, data2, pos
#endif
str data2, [dst, #1]
str data1, [dstin]
#ifdef BUILD_STPCPY
add dstin, dst, #8
#endif
ret
.Lfp_le8:
rev has_nul1, has_nul1
clz pos, has_nul1
add dst, dstin, pos, lsr #3 /* Bits to bytes. */
subs tmp2, pos, #24 /* Pos in bits. */
b.lt .Lfp_lt4
#ifdef __AARCH64EB__
mov tmp2, #56
sub pos, tmp2, pos
lsr data2, data1, pos
lsr data1, data1, #32
#else
lsr data2, data1, tmp2
#endif
/* 4->7 bytes to copy. */
str data2w, [dst, #-3]
str data1w, [dstin]
#ifdef BUILD_STPCPY
mov dstin, dst
#endif
ret
.Lfp_lt4:
cbz pos, .Lfp_lt2
/* 2->3 bytes to copy. */
#ifdef __AARCH64EB__
lsr data1, data1, #48
#endif
strh data1w, [dstin]
/* Fall-through, one byte (max) to go. */
.Lfp_lt2:
/* Null-terminated string. Last character must be zero! */
strb wzr, [dst]
#ifdef BUILD_STPCPY
mov dstin, dst
#endif
ret
.p2align 6
/* Aligning here ensures that the entry code and main loop all lies
within one 64-byte cache line. */
.Lbulk_entry:
sub to_align, to_align, #16
stp data1, data2, [dstin]
sub src, srcin, to_align
sub dst, dstin, to_align
b .Lentry_no_page_cross
/* The inner loop deals with two Dwords at a time. This has a
slightly higher start-up cost, but we should win quite quickly,
especially on cores with a high number of issue slots per
cycle, as we get much better parallelism out of the operations. */
.Lmain_loop:
stp data1, data2, [dst], #16
.Lentry_no_page_cross:
ldp data1, data2, [src], #16
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
bic has_nul1, tmp1, tmp2
bics has_nul2, tmp3, tmp4
ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
b.eq .Lmain_loop
/* Since we know we are copying at least 16 bytes, the fastest way
to deal with the tail is to determine the location of the
trailing NUL, then (re)copy the 16 bytes leading up to that. */
cmp has_nul1, #0
#ifdef __AARCH64EB__
/* For big-endian, carry propagation (if the final byte in the
string is 0x01) means we cannot use has_nul directly. The
easiest way to get the correct byte is to byte-swap the data
and calculate the syndrome a second time. */
csel data1, data1, data2, ne
rev data1, data1
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
bic has_nul1, tmp1, tmp2
#else
csel has_nul1, has_nul1, has_nul2, ne
#endif
rev has_nul1, has_nul1
clz pos, has_nul1
add tmp1, pos, #72
add pos, pos, #8
csel pos, pos, tmp1, ne
add src, src, pos, lsr #3
add dst, dst, pos, lsr #3
ldp data1, data2, [src, #-32]
stp data1, data2, [dst, #-16]
#ifdef BUILD_STPCPY
sub dstin, dst, #1
#endif
ret
.Lpage_cross:
bic src, srcin, #15
/* Start by loading two words at [srcin & ~15], then forcing the
bytes that precede srcin to 0xff. This means they never look
like termination bytes. */
ldp data1, data2, [src]
lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
tst to_align, #7
csetm tmp2, ne
#ifdef __AARCH64EB__
lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
#else
lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
#endif
orr data1, data1, tmp2
orr data2a, data2, tmp2
cmp to_align, #8
csinv data1, data1, xzr, lt
csel data2, data2, data2a, lt
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
bic has_nul1, tmp1, tmp2
bics has_nul2, tmp3, tmp4
ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
b.eq .Lpage_cross_ok
/* We now need to make data1 and data2 look like they've been
loaded directly from srcin. Do a rotate on the 128-bit value. */
lsl tmp1, to_align, #3 /* Bytes->bits. */
neg tmp2, to_align, lsl #3
#ifdef __AARCH64EB__
lsl data1a, data1, tmp1
lsr tmp4, data2, tmp2
lsl data2, data2, tmp1
orr tmp4, tmp4, data1a
cmp to_align, #8
csel data1, tmp4, data2, lt
rev tmp2, data1
rev tmp4, data2
sub tmp1, tmp2, zeroones
orr tmp2, tmp2, #REP8_7f
sub tmp3, tmp4, zeroones
orr tmp4, tmp4, #REP8_7f
#else
lsr data1a, data1, tmp1
lsl tmp4, data2, tmp2
lsr data2, data2, tmp1
orr tmp4, tmp4, data1a
cmp to_align, #8
csel data1, tmp4, data2, lt
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
#endif
bic has_nul1, tmp1, tmp2
cbnz has_nul1, .Lfp_le8
bic has_nul2, tmp3, tmp4
b .Lfp_gt8
.size STRCPY, . - STRCPY
#endif
|
stsp/newlib-ia16
| 5,439
|
newlib/libc/machine/aarch64/memmove.S
|
/* Copyright (c) 2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses
*/
#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
/* See memmove-stub.c */
#else
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
/* Parameters and result. */
#define dstin x0
#define src x1
#define count x2
#define srcend x3
#define dstend x4
#define tmp1 x5
#define A_l x6
#define A_h x7
#define B_l x8
#define B_h x9
#define C_l x10
#define C_h x11
#define D_l x12
#define D_h x13
#define E_l count
#define E_h tmp1
/* All memmoves up to 96 bytes are done by memcpy as it supports overlaps.
Larger backwards copies are also handled by memcpy. The only remaining
case is forward large copies. The destination is aligned, and an
unrolled loop processes 64 bytes per iteration.
*/
def_fn memmove, 6
sub tmp1, dstin, src
cmp count, 96
ccmp tmp1, count, 2, hi
b.hs memcpy
cbz tmp1, 3f
add dstend, dstin, count
add srcend, src, count
/* Align dstend to 16 byte alignment so that we don't cross cache line
boundaries on both loads and stores. There are at least 96 bytes
to copy, so copy 16 bytes unaligned and then align. The loop
copies 64 bytes per iteration and prefetches one iteration ahead. */
and tmp1, dstend, 15
ldp D_l, D_h, [srcend, -16]
sub srcend, srcend, tmp1
sub count, count, tmp1
ldp A_l, A_h, [srcend, -16]
stp D_l, D_h, [dstend, -16]
ldp B_l, B_h, [srcend, -32]
ldp C_l, C_h, [srcend, -48]
ldp D_l, D_h, [srcend, -64]!
sub dstend, dstend, tmp1
subs count, count, 128
b.ls 2f
nop
1:
stp A_l, A_h, [dstend, -16]
ldp A_l, A_h, [srcend, -16]
stp B_l, B_h, [dstend, -32]
ldp B_l, B_h, [srcend, -32]
stp C_l, C_h, [dstend, -48]
ldp C_l, C_h, [srcend, -48]
stp D_l, D_h, [dstend, -64]!
ldp D_l, D_h, [srcend, -64]!
subs count, count, 64
b.hi 1b
/* Write the last full set of 64 bytes. The remainder is at most 64
bytes, so it is safe to always copy 64 bytes from the start even if
there is just 1 byte left. */
2:
ldp E_l, E_h, [src, 48]
stp A_l, A_h, [dstend, -16]
ldp A_l, A_h, [src, 32]
stp B_l, B_h, [dstend, -32]
ldp B_l, B_h, [src, 16]
stp C_l, C_h, [dstend, -48]
ldp C_l, C_h, [src]
stp D_l, D_h, [dstend, -64]
stp E_l, E_h, [dstin, 48]
stp A_l, A_h, [dstin, 32]
stp B_l, B_h, [dstin, 16]
stp C_l, C_h, [dstin]
3: ret
.size memmove, . - memmove
#endif
|
stsp/newlib-ia16
| 2,524
|
newlib/libc/machine/aarch64/setjmp.S
|
/*
Copyright (c) 2011, 2012 ARM Ltd
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define GPR_LAYOUT \
REG_PAIR (x19, x20, 0); \
REG_PAIR (x21, x22, 16); \
REG_PAIR (x23, x24, 32); \
REG_PAIR (x25, x26, 48); \
REG_PAIR (x27, x28, 64); \
REG_PAIR (x29, x30, 80); \
REG_ONE (x16, 96)
#define FPR_LAYOUT \
REG_PAIR ( d8, d9, 112); \
REG_PAIR (d10, d11, 128); \
REG_PAIR (d12, d13, 144); \
REG_PAIR (d14, d15, 160);
// int setjmp (jmp_buf)
.global setjmp
.type setjmp, %function
setjmp:
mov x16, sp
#define REG_PAIR(REG1, REG2, OFFS) stp REG1, REG2, [x0, OFFS]
#define REG_ONE(REG1, OFFS) str REG1, [x0, OFFS]
GPR_LAYOUT
FPR_LAYOUT
#undef REG_PAIR
#undef REG_ONE
mov w0, #0
ret
.size setjmp, .-setjmp
// void longjmp (jmp_buf, int) __attribute__ ((noreturn))
.global longjmp
.type longjmp, %function
longjmp:
#define REG_PAIR(REG1, REG2, OFFS) ldp REG1, REG2, [x0, OFFS]
#define REG_ONE(REG1, OFFS) ldr REG1, [x0, OFFS]
GPR_LAYOUT
FPR_LAYOUT
#undef REG_PAIR
#undef REG_ONE
mov sp, x16
cmp w1, #0
cinc w0, w1, eq
// use br not ret, as ret is guaranteed to mispredict
br x30
.size longjmp, .-longjmp
|
stsp/newlib-ia16
| 1,046
|
newlib/libc/machine/ft32/memset.S
|
/* A memset.c for FT32
Copyright (C) 2014 FTDI (support@ftdichip.com)
The authors hereby grant permission to use, copy, modify, distribute,
and license this software and its documentation for any purpose, provided
that existing copyright notices are retained in all copies and that this
notice is included verbatim in any distributions. No written agreement,
license, or royalty fee is required for any of the authorized uses.
Modifications to this software may be copyrighted by their authors
and need not follow the licensing terms described here, provided that
the new terms are clearly indicated on the first page of each file where
they apply. */
.text
.global memset
.type memset,@function
memset:
ldk $r3,32764
1:
cmp $r2,$r3
jmpc lte,2f
memset.b $r0,$r1,$r3
add $r0,$r0,$r3
sub $r2,$r2,$r3
jmp 1b
2:
memset.b $r0,$r1,$r2
return
.Lend2:
.size memset,.Lend2-memset
|
stsp/newlib-ia16
| 1,074
|
newlib/libc/machine/ft32/memcpy.S
|
/* A memcpy.c for FT32
Copyright (C) 2014 FTDI (support@ftdichip.com)
The authors hereby grant permission to use, copy, modify, distribute,
and license this software and its documentation for any purpose, provided
that existing copyright notices are retained in all copies and that this
notice is included verbatim in any distributions. No written agreement,
license, or royalty fee is required for any of the authorized uses.
Modifications to this software may be copyrighted by their authors
and need not follow the licensing terms described here, provided that
the new terms are clearly indicated on the first page of each file where
they apply. */
.text
.global memcpy
.type memcpy,@function
memcpy:
ldk $r3,32764
1:
cmp $r2,$r3
jmpc lte,2f
memcpy.b $r0,$r1,$r3
add $r0,$r0,$r3
add $r1,$r1,$r3
sub $r2,$r2,$r3
jmp 1b
2:
memcpy.b $r0,$r1,$r2
return
.Lend2:
.size memcpy,.Lend2-memcpy
|
stsp/newlib-ia16
| 2,761
|
newlib/libc/machine/ft32/setjmp.S
|
/* A setjmp.c for FT32
Copyright (C) 2014 FTDI (support@ftdichip.com)
The authors hereby grant permission to use, copy, modify, distribute,
and license this software and its documentation for any purpose, provided
that existing copyright notices are retained in all copies and that this
notice is included verbatim in any distributions. No written agreement,
license, or royalty fee is required for any of the authorized uses.
Modifications to this software may be copyrighted by their authors
and need not follow the licensing terms described here, provided that
the new terms are clearly indicated on the first page of each file where
they apply. */
# setjmp/longjmp for FT32.
# Total jumpbuf size is 108 bytes, or 27 words.
#
.text
.global setjmp
.type setjmp,@function
setjmp:
pop.l $r5 # return address in $r5
sti.l $r0,0,$r5
sti.l $r0,4,$r6
sti.l $r0,8,$r7
sti.l $r0,12,$r8
sti.l $r0,16,$r9
sti.l $r0,20,$r10
sti.l $r0,24,$r11
sti.l $r0,28,$r12
sti.l $r0,32,$r13
sti.l $r0,36,$r14
sti.l $r0,40,$r15
sti.l $r0,44,$r16
sti.l $r0,48,$r17
sti.l $r0,52,$r18
sti.l $r0,56,$r19
sti.l $r0,60,$r20
sti.l $r0,64,$r21
sti.l $r0,68,$r22
sti.l $r0,72,$r23
sti.l $r0,76,$r24
sti.l $r0,80,$r25
sti.l $r0,84,$r26
sti.l $r0,88,$r27
sti.l $r0,92,$r28
sti.l $r0,96,$r29
sti.l $r0,100,$r30
sti.l $r0,104,$r31
ldk.l $r0,0
jmpi $r5
.Lend1:
.size setjmp,.Lend1-setjmp
.global longjmp
.type longjmp,@function
longjmp:
cmp.l $r1,0
jmpc nz,.nonz
ldk.l $r1,1
.nonz:
ldi.l $r5,$r0,0
ldi.l $r6,$r0,4
ldi.l $r7,$r0,8
ldi.l $r8,$r0,12
ldi.l $r9,$r0,16
ldi.l $r10,$r0,20
ldi.l $r11,$r0,24
ldi.l $r12,$r0,28
ldi.l $r13,$r0,32
ldi.l $r14,$r0,36
ldi.l $r15,$r0,40
ldi.l $r16,$r0,44
ldi.l $r17,$r0,48
ldi.l $r18,$r0,52
ldi.l $r19,$r0,56
ldi.l $r20,$r0,60
ldi.l $r21,$r0,64
ldi.l $r22,$r0,68
ldi.l $r23,$r0,72
ldi.l $r24,$r0,76
ldi.l $r25,$r0,80
ldi.l $r26,$r0,84
ldi.l $r27,$r0,88
ldi.l $r28,$r0,92
ldi.l $r29,$r0,96
ldi.l $r30,$r0,100
ldi.l $r31,$r0,104
move.l $r0,$r1
jmpi $r5
.Lend2:
.size longjmp,.Lend2-longjmp
|
stsp/newlib-ia16
| 2,394
|
newlib/libc/machine/msp430/setjmp.S
|
/* Copyright (c) 2013 Red Hat, Inc. All rights reserved.
This copyrighted material is made available to anyone wishing to use,
modify, copy, or redistribute it subject to the terms and conditions
of the BSD License. This program is distributed in the hope that
it will be useful, but WITHOUT ANY WARRANTY expressed or implied,
including the implied warranties of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. A copy of this license is available at
http://www.opensource.org/licenses. Any Red Hat trademarks that are
incorporated in the source code or documentation are not subject to
the BSD License and may only be used or replicated with the express
permission of Red Hat, Inc.
*/
# setjmp/longjmp for msp430. The jmpbuf looks like this:
#
# Register Jmpbuf offset
# small large
# r0 (pc) 0x00 0x00
# r1 (sp) 0x02 0x04
# r4 0x04 0x08
# r5 0x06 0x0c
# r6 0x08 0x10
# r7 0x0a 0x14
# r8 0x0c 0x18
# r9 0x0e 0x1c
# r10 0x10 0x20
.text
.global setjmp
setjmp:
; Upon entry r12 points to the jump buffer.
; Returns 0 to caller.
#if defined __MSP430X_LARGE__
mova @r1, r13
mova r13, 0(r12)
mova r1, 4(r12)
mova r4, 8(r12)
mova r5, 12(r12)
mova r6, 16(r12)
mova r7, 20(r12)
mova r8, 24(r12)
mova r9, 28(r12)
mova r10, 32(r12)
clr r12
reta
#else
;; Get the return address off the stack
mov.w @r1, r13
mov.w r13, 0(r12)
mov.w r1, 2(r12)
mov.w r4, 4(r12)
mov.w r5, 6(r12)
mov.w r6, 8(r12)
mov.w r7, 10(r12)
mov.w r8, 12(r12)
mov.w r9, 14(r12)
mov.w r10, 16(r12)
clr r12
ret
#endif
.size setjmp , . - setjmp
.global longjmp
longjmp:
; Upon entry r12 points to the jump buffer and
; r13 contains the value to be returned by setjmp.
#if defined __MSP430X_LARGE__
mova @r12+, r14
mova @r12+, r1
mova @r12+, r4
mova @r12+, r5
mova @r12+, r6
mova @r12+, r7
mova @r12+, r8
mova @r12+, r9
mova @r12+, r10
#else
mov.w @r12+, r14
mov.w @r12+, r1
mov.w @r12+, r4
mov.w @r12+, r5
mov.w @r12+, r6
mov.w @r12+, r7
mov.w @r12+, r8
mov.w @r12+, r9
mov.w @r12+, r10
#endif
; If caller attempts to return 0, return 1 instead.
cmp.w #0, r13
jne .Lnot_zero
mov.w #1, r13
.Lnot_zero:
mov.w r13, r12
#if defined __MSP430X_LARGE__
adda #4, r1
mova r14, r0
#else
add.w #2, r1
mov.w r14, r0
#endif
.size longjmp , . - longjmp
|
stsp/newlib-ia16
| 2,731
|
newlib/libc/machine/necv70/fastmath.S
|
.globl _fast_sin
_fast_sin:
fsin.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_sinf
_fast_sinf:
fsin.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_cos
_fast_cos:
fcos.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_cosf
_fast_cosf:
fcos.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_tan
_fast_tan:
ftan.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_tanf
_fast_tanf:
ftan.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_fabs
_fast_fabs:
fabs.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_fabsf
_fast_fabsf:
fabs.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_sqrt
_fast_sqrt:
fsqrt.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_sqrtf
_fast_sqrtf:
fsqrt.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_acos
_fast_acos:
facos.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_acosf
_fast_acosf:
facos.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_asin
_fast_asin:
fasin.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_asinf
_fast_asinf:
fasin.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_atan
_fast_atan:
fatan.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_atanf
_fast_atanf:
fatan.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_cosh
_fast_cosh:
fcosh.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_coshf
_fast_coshf:
fcosh.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_sinh
_fast_sinh:
fsin.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_sinhf
_fast_sinhf:
fsin.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_tanh
_fast_tanh:
ftanh.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_tanhf
_fast_tanhf:
ftanh.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_atanh
_fast_atanh:
fatanh.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_atanhf
_fast_atanhf:
fatanh.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_exp2
_fast_exp2:
fexp2.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_exp2f
_fast_exp2f:
fexp2.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_exp10
_fast_exp10:
fexp10.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_exp10f
_fast_exp10f:
fexp10.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_expe
_fast_expe:
fexpe.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_expef
_fast_expef:
fexpe.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_log2
_fast_log2:
flog2.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_log2f
_fast_log2f:
flog2.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_log10
_fast_log10:
flog10.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_log10f
_fast_log10f:
flog10.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_loge
_fast_loge:
floge.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_logef
_fast_logef:
floge.s [ap],[ap]
mov.w [ap],r0
ret #0
|
stsp/newlib-ia16
| 5,675
|
newlib/libc/machine/i960/strncpy.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strncpy.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncpy (optimized assembler version for the 80960K Series)
dest_addr = strncpy (dest_addr, src_addr, max_bytes)
copy the null terminated string pointed to by src_addr to the
string pointed to by dest_addr. Return the original dest_addr.
If the source string is shorter than max_bytes, then null-pad
the destination string. If it is longer than max_bytes, the
copy stops at max_bytes bytes (and no terminating null appears
in the destination string).
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last two words of the
program's allocated memory space. This is so because strncpy fetches
ahead. Disallowing the fetch ahead would impose a severe performance
penalty.
Strategy:
Fetch and store the strings by words and go to a character move loop
as soon as a null byte is encountered. If max_bytes is exhausted
first, then terminate after moving only max_bytes (with the last
0, 1, 2, or 3 bytes moved as single bytes, not as a word).
Otherwise, the character move loop moves the last bytes or the
source string, and then null-pads the destination string until
max_bytes is exhausted.
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment.
2) When the null byte is encountered in a source word, null out the
higher-numbered bytes in that word, store the word in the destination,
and go to the word null-padder, which may eventually go to the byte
null-padder.
*/
.globl _strncpy
.globl __strncpy
.leafproc _strncpy,__strncpy
.align 2
_strncpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncpy:
mov g14, g13
cmpibge 0,g2,Lexit # quit early if max_bytes <= 0
ld (g1), g7 # fetch the first word of the source
mov g0, g5
lda 0xff, g3 # byte extraction mask
addo g1, g2, g6
addo g2, g5, g2
Lwloop: # word copying loop
addo 4, g1, g1 # post-increment source ptr
cmpo g6, g1 # max_bytes < 4 ?
mov g7, g4 # keep a copy of source word
bl Lcloop.a # if less than four bytes to go, go to char loop
scanbyte 0, g4 # null byte found?
ld (g1), g7 # pre-fetch next word of the source
be Lcloop.c # go to char loop if null encountered
st g4, (g5) # store current word
addo 4, g5, g5 # post-increment destination ptr
b Lwloop
Lcloop.a: # character copying loop (max_bytes < 3)
and g3, g4, g14 # extract byte
Lcloop.b:
cmpo g2, g5 # max_bytes <= 0 ?
shro 8, g4, g4 # position word to extract next byte
be Lexit # exit if max_bytes exhausted
cmpo 0, g14 # is it null?
stob g14, (g5) # store it
addo 1, g5, g5 # post-increment dest ptr
bne Lcloop.a # branch if we are NOT null padding
b Lcloop.b # branch if we are null padding
Lexit:
mov 0, g14
bx (g13) # g0 = dest string address; g14 = 0
Lrett:
ret
Lcloop.c: # character copying loop
and g3, g4, g14 # extract byte
cmpo 0, g14 # is it null?
mov g3, g7 # save mask
shlo 8, g3, g3 # shift mask to next byte position
bne Lcloop.c # loop until null found
subo 1, g7, g3 # mask to null pad after null byte
and g3, g4, g4 # null-out stuff after null byte
st g4, (g5) # store last part of src and first of null-pad
subo 8,g2,g6 # adjust max_byte counter
Lzwloop:
cmpo g5, g6 # max_bytes < 4 ?
addo 4, g5, g5
bg Lcloop.b # if so, goto character loop
st g14, (g5) # store four null bytes
b Lzwloop
/* end of strncpy */
|
stsp/newlib-ia16
| 5,234
|
newlib/libc/machine/i960/strchr_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strch_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strchr (optimized assembler version for the CA)
src_addr = strchr (src_addr, char)
return a pointer to the first byte that contains the indicated
byte in the source string. Return null if the byte is not found.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last two words of the program's
allocated memory space. This is so because, in several cases, strchr
will fetch ahead. Disallowing the fetch ahead would impose a severe
performance penalty.
This program handles two cases:
1) the argument starts on a word boundary
2) the argument doesn't start on a word boundary
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 src ptr; upon return it is a pointer to the matching byte, or null
g1 char to seek
g2 mask to avoid unimportant bytes in first word
g3 char to seek, broadcast to all four bytes
g4 word of the source string
g5 copy of the word
g6 extracted character
g7 byte extraction mask
g13 return address
g14
*/
.globl _strchr
.globl __strchr
.leafproc _strchr, __strchr
.align 2
_strchr:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strchr:
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
shlo 8,g1,g2 # broadcast the char to four bytes
or g1,g2,g2
shlo 16,g2,g4
cmpo g1,g7 # is char being sought 0xff?
or g4,g2,g3
lda (g14),g13 # preserve return address
notand g0,3,g5 # extract word addr of start of src
lda 0,g14 # conform to register linkage standard
and g0,3,g6 # extract byte offset of src
ld (g5),g4 # fetch word containing at least first byte
shlo 3,g6,g6 # get shift count for making mask for first word
lda 4(g5),g0 # post-increment src word pointer
subi 1,0,g5 # mask initially all ones
#if __i960_BIG_ENDIAN__
shro g6,g5,g5 # get mask for bytes needed from first word
#else
shlo g6,g5,g5 # get mask for bytes needed from first word
#endif
notor g4,g5,g4 # set unneeded bytes to all ones
be.f Lsearch_for_0xff # branch if seeking 0xff
Lsearch_for_word_with_char_or_null:
scanbyte g3,g4 # check for byte with char
lda (g4),g5 # copy word
ld (g0),g4 # fetch next word of src
bo.f Lsearch_for_char # branch if null found
scanbyte 0,g5 # check for null byte
lda 4(g0),g0 # post-increment src word pointer
bno.t Lsearch_for_word_with_char_or_null # branch if not null
Lnot_found:
mov 0,g0 # char not found. Return null
Lexit_code:
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
Lsearch_for_char:
subo 5,g0,g0 # back up the byte pointer
Lsearch_for_char.a:
#if __i960_BIG_ENDIAN__
rotate 8,g5,g5 # shift word to position next byte
#endif
and g5,g7,g6 # extract byte
cmpo g1,g6 # is it char?
lda 1(g0),g0 # bump src byte ptr
#if ! __i960_BIG_ENDIAN__
shro 8,g5,g5 # shift word to position next byte
#endif
be.f Lexit_code
cmpobne.t 0,g6,Lsearch_for_char.a # quit if null comes before char
b Lnot_found
Lsearch_for_0xff:
lda 0xf0f0f0f0,g2 # make first comparison mask for char=-1 case.
or g5,g2,g2
and g4,g2,g4 # make unimportant bytes of first word 0x0f
b Lsearch_for_word_with_char_or_null
/* end of strchr */
|
stsp/newlib-ia16
| 4,046
|
newlib/libc/machine/i960/strlen.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strlen.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strlen (optimized assembler version for the 80960K series)
src_addr = strlen (src_addr)
return the number of bytes that precede the null byte in the
string pointed to by src_addr.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last four words of the program's
allocated memory space. This is so because strlen fetches ahead
several words. Disallowing the fetch ahead would impose a severe
performance penalty.
Strategy:
Fetch the source array by long-words and scanbyte the words for the
null byte until found. Examine the word in which the null byte is
found, to determine its actual position, and return the length.
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that many source strings will be word
aligned to begin with.
*/
.globl _strlen
.globl __strlen
.leafproc _strlen, __strlen
.align 2
_strlen:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strlen:
mov g14,g13 # preserve return address
ldl (g0),g4 # fetch first two words
addo 8,g0,g2 # post-increment src word pointer
lda 0xff,g3 # byte extraction mask
Lsearch_for_word_with_null_byte:
scanbyte 0,g4 # check for null byte
mov g5,g7 # copy second word
bo.f Lsearch_for_null # branch if null found
scanbyte 0,g7 # check for null byte
ldl (g2),g4 # fetch next pair of word of src
addo 8,g2,g2 # post-increment src word pointer
bno Lsearch_for_word_with_null_byte # branch if null not found yet
subo 4,g2,g2 # back up the byte pointer
mov g7,g4 # move word with null to search word
Lsearch_for_null:
subo 9,g2,g2 # back up the byte pointer
Lsearch_for_null.a:
and g4,g3,g14 # extract byte
cmpo 0,g14 # is it null?
addo 1,g2,g2 # bump src byte ptr
shro 8,g4,g4 # shift word to position next byte
bne Lsearch_for_null.a
Lexit_code:
subo g0,g2,g0 # calculate string length
bx (g13) # g0 = addr of src; g14 = 0
Lrett:
ret
/* end of strlen */
|
stsp/newlib-ia16
| 4,364
|
newlib/libc/machine/i960/strchr.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strchr.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strchr (optimized assembler version for the 80960K series)
src_addr = strchr (src_addr, char)
return a pointer to the first byte that contains the indicated
byte in the source string. Return null if the byte is not found.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last two words of the program's
allocated memory space. This is so because strchr fetches ahead.
Disallowing the fetch ahead would impose a severe performance penalty.
Strategy:
Fetch the source string by words and scanbyte the words for the
char until either a word with the byte is found or the null byte is
encountered. In the former case, move through the word to find the
matching byte and return its memory address. In the latter case,
return zero (null).
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that most source arrays (even more
true of most big source arrays) will be word aligned to begin with.
*/
.globl _strchr
.globl __strchr
.leafproc _strchr, __strchr
.align 2
_strchr:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strchr:
ld (g0),g4 # fetch first word
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
shlo 8,g1,g2 # broadcast the char to four bytes
or g1,g2,g2
shlo 16,g2,g5
or g2,g5,g3
mov g14,g13 # preserve return address
addo 4,g0,g0 # post-increment src pointer
mov 0,g14 # conform to register linkage standard
Lsearch_for_word_with_char_or_null:
mov g4,g5 # copy word
scanbyte g3,g5 # check for byte with char
ld (g0),g4 # fetch next word of src
bo Lsearch_for_char # branch if char found
scanbyte 0,g5 # check for null byte
addo 4,g0,g0 # post-increment src pointer
bno Lsearch_for_word_with_char_or_null # branch if not null
Lnot_found:
mov 0,g0 # char not found. Return null
Lexit_code:
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
Lsearch_for_char:
subo 5,g0,g0 # back up the byte pointer
Lsearch_for_char.a:
and g5,g7,g6 # extract byte
cmpo g1,g6 # is it char?
addo 1,g0,g0 # bump src byte ptr
shro 8,g5,g5 # shift word to position next byte
be Lexit_code
cmpobne 0,g6,Lsearch_for_char.a # quit if null comes before char
b Lnot_found
/* end of strchr */
|
stsp/newlib-ia16
| 8,544
|
newlib/libc/machine/i960/strncmp_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "sncmp_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncmp (optimized assembler version for the CA)
result = strncmp (src1_addr, src2_addr, max_bytes)
compare the null terminated string pointed to by src1_addr to
the string space pointed to by src2_addr. Return 0 iff the strings
are equal, -1 if src1_addr is lexicly less than src2_addr, and 1
if it is lexicly greater. Do not compare more than max_bytes bytes.
Undefined behavior will occur if the end of either source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strncmp
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source1 is word aligned, source2 is not
4) source2 is word aligned, source1 is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g14 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers are sufficient to implement the routine.
The registers are used as follows:
g0 original src1 ptr; extracted word; return result
g1 src2 ptr; 0xff -- byte extraction mask
g2 maximum number of bytes to compare
g3 src2 word ptr
Little endian:
g4 lsw of src1
g5 msw of src1
g6 src2 word
g7 src1 word ptr
Big endian:
g4 msw of src1
g5 lsw of src1
g6 src1 word ptr
g7 src2 word
g13 return address
g14 shift count
*/
#if __i960_BIG_ENDIAN__
#define MSW g4
#define LSW g5
#define SRC1 g6
#define SRC2 g7
#else
#define LSW g4
#define MSW g5
#define SRC2 g6
#define SRC1 g7
#endif
.globl _strncmp
.globl __strncmp
.leafproc _strncmp, __strncmp
.align 2
_strncmp:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncmp:
Lrestart:
notand g0,3,SRC1 # extract word addr of start of src1
lda (g14),g13 # preserve return address
cmpibge.f 0,g2,Lequal_exit # return equality if number of bytes to
/* compare is none. */
#if __i960_BIG_ENDIAN__
cmpo g0,SRC1 # check alignment of src1
#endif
ld (SRC1),LSW # fetch word with at least first byte of src1
notand g1,3,g3 # extract word addr of start of src2
ld 4(SRC1),MSW # fetch second word of src1
#if __i960_BIG_ENDIAN__
bne Lsrc1_unaligned # branch if src1 is unaligned
cmpo g3,g1 # check alignment of src2
ld (g3),SRC2 # fetch word with at least first byte of src2
shlo 3,g0,g14 # compute shift count for src1
subo g14,0,g14 # adjust shift count for big endian
lda 8(SRC1),SRC1 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
/* src2 is word aligned */
mov LSW,g0
Lwloop2: # word comparing loop
cmpo SRC2,g0 # compare src1 and src2 words
lda 0xff000000,g1 # byte extraction mask
mov MSW,LSW # move msw of src1 to lsw
ld (SRC1),MSW # pre-fetch next msw of src1
addo 4,SRC1,SRC1 # post-increment src1 addr
lda 4(g3),g3 # pre-increment src2 addr
bne.f Lcloop # branch if src1 and src2 unequal
scanbyte 0,g0 # check for null byte in src1 word
ld (g3),SRC2 # pre-fetch next word of src2
mov LSW,g0 # extract word of src1
subi 4,g2,g2 # decrement maximum byte count
bo.f Lequal_exit # branch if null byte encountered
cmpibl.t 0,g2,Lwloop2 # branch if max_bytes not reached yet
b Lequal_exit # strings were equal up through max_bytes
Lsrc1_unaligned:
#endif
cmpo g3,g1 # check alignment of src2
ld (g3),SRC2 # fetch word with at least first byte of src2
shlo 3,g0,g14 # compute shift count for src1
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian
#endif
eshro g14,g4,LSW # extract word of src1
lda 8(SRC1),SRC1 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
/* at least src2 is word aligned */
mov LSW,g0
Lwloop: # word comparing loop
cmpo SRC2,g0 # compare src1 and src2 words
#if __i960_BIG_ENDIAN__
lda 0xff000000,g1 # byte extraction mask
#else
lda 0xff,g1 # byte extraction mask
#endif
mov MSW,LSW # move msw of src1 to lsw
ld (SRC1),MSW # pre-fetch next msw of src1
addo 4,SRC1,SRC1 # post-increment src1 addr
lda 4(g3),g3 # pre-increment src2 addr
bne.f Lcloop # branch if src1 and src2 unequal
scanbyte 0,g0 # check for null byte in src1 word
ld (g3),SRC2 # pre-fetch next word of src2
eshro g14,g4,g0 # extract word of src1
subi 4,g2,g2 # decrement maximum byte count
bo.f Lequal_exit # branch if null byte encountered
cmpibl.t 0,g2,Lwloop # branch if max_bytes not reached yet
b Lequal_exit # strings were equal up through max_bytes
Lcloop_setup: # setup for coming from Lsrc2_unaligned
mov LSW,g0 # restore extracted src1 word
#if __i960_BIG_ENDIAN__
lda 0xff000000,g1 # byte extraction mask
#else
lda 0xff,g1 # byte extraction mask
#endif
Lcloop: # character comparing loop
and SRC2,g1,g3 # extract next char of src2
and g0,g1,LSW # extract next char of src1
cmpobne.f LSW,g3,.diff # check for equality
cmpo 0,LSW # check for null byte
#if __i960_BIG_ENDIAN__
shro 8,g1,g1 # shift mask for next byte
#else
shlo 8,g1,g1 # shift mask for next byte
#endif
subi 1,g2,g2 # decrement character counter
bne.t Lcloop # branch if null not reached
/* words are equal up thru null byte */
Lequal_exit:
mov 0,g14 # conform to register conventions
lda 0,g0 # return zero, indicating equality
bx (g13) # return
Lrett:
ret
.diff:
mov 0,g14
bl Lless_than_exit
Lgreater_than_exit:
cmpibge.f 0,g2,Lequal_exit # branch if difference is beyond max_bytes
mov 1,g0
bx (g13) # g0 = 1 (src1 > src2)
Lless_than_exit:
cmpibge.f 0,g2,Lequal_exit # branch if difference is beyond max_bytes
subi 1,0,g0
bx (g13) # g0 = -1 (src1 < src2)
Lsrc2_unaligned:
notor g1,3,g14 # first step in computing new src1 ptr
ld 4(g3),SRC1 # fetch second word of src2
shlo 3,g1,MSW # compute shift count for src2
#if __i960_BIG_ENDIAN__
subo MSW,0,MSW # adjust shift count for big endian
#endif
eshro MSW,g6,SRC2 # extract word of src2
cmpo LSW,SRC2 # compare src1 and src2 words
lda 4(g3),g1 # set new src2 ptr
bne.f Lcloop_setup # first four bytes differ
scanbyte 0,LSW # check for null byte
subo g14,g0,g0 # second (final) step in computing new src1 ptr
addi g14,g2,g2 # compute new max_bytes too
lda (g13),g14 # prepare return pointer for Lrestart
bno.t Lrestart # if null byte not encountered, continue
/* with both string fetches shifted such that*/
/* src2 is now word aligned.*/
mov 0,g14 # conform to register conventions.
lda 0,g0 # return indicator of equality.
bx (g13)
|
stsp/newlib-ia16
| 3,969
|
newlib/libc/machine/i960/memset.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memset.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1989,1993 Intel Corp., all rights reserved
*/
/*
procedure memset (optimized assembler version: 80960K series, 80960CA)
dest_addr = memset (dest_addr, char, len)
Fill len bytes pointed to by dest_addr with the value of char.
Return the original address of dest_addr.
This program avoids performing unaligned accesses. It stores
from zero to seven bytes, and then stores aligned longwords,
and then stores from zero to seven bytes, as necessary to
store len bytes starting at dest_addr.
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine.
*/
.globl _memset
.globl __memset
.leafproc _memset, __memset
.align 2
_memset:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memset:
cmpo 7,g2 # are there fewer than seven characters to move?
lda (g14),g13 # save return address
notand g0,7,g3 # test for non-aligned dest_ptr
lda 0,g14 # conform to register conventions
shlo 24,g1,g4 # prepare word of char
lda (g0),g6 # preserve dest_ptr for return
shro 8,g4,g5
bge.f Lcloop_setup
cmpo g3,g0 # is dest longword aligned
lda 7(g3),g3 # bump dest_ptr to next longword boundary
or g4,g5,g4
be.t Lwloop_setup
Lbgn_cloop:
cmpo g6,g3 # Have we reached longword boundary?
stob g1,(g6) # store one byte of char
subo 1,g2,g2 # decrement len
lda 1(g6),g6 # increment dest_ptr
bne.t Lbgn_cloop # loop if more bytes to store before longword
cmpobge.f 7,g2,Lcloop
Lwloop_setup:
shro 16,g4,g5
or g4,g5,g4
mov g4,g5 # now have a longword of char
Lwloop:
cmpo 15,g2 # Do we have to store more longwords?
stl g4,(g6) # Store longword of char
subo 8,g2,g2 # Decrement len
lda 8(g6),g6 # Increment dest_ptr
bl.t Lwloop # loop if more longwords to store
Lcloop_setup:
cmpobge.t 0,g2,Lexit
Lcloop:
cmpo 1,g2 # Is len exhausted?
stob g1,(g6) # Store byte
subo 1,g2,g2 # Decrement len
lda 1(g6),g6 # Increment dest_ptr
bne.t Lcloop # loop if more bytes to store
Lexit:
bx (g13)
Lrett:
ret
/* end of memset */
|
stsp/newlib-ia16
| 5,853
|
newlib/libc/machine/i960/memcpy.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memcpy.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure memmove (optimized assembler version for the 80960K series)
procedure memcpy (optimized assembler version for the 80960K series)
dest_addr = memmove (dest_addr, src_addr, len)
dest_addr = memcpy (dest_addr, src_addr, len)
copy len bytes pointed to by src_addr to the space pointed to by
dest_addr. Return the original dest_addr.
These routines will work even if the arrays overlap. The standard
requires this of memmove, but memcpy is allowed to fail if overlap
is present. Nevertheless, it is implemented the same as memmove
because the overhead is trifling.
Undefined behavior will occur if the end of the source array is in
the last two words of the program's allocated memory space. This
is so because the routine fetches ahead. Disallowing the fetch
ahead would impose a severe performance penalty.
Strategy:
Fetch the source array by words and store them by words to the
destination array, until there are fewer than three bytes left
to copy. Then, using the last word of the source (the one that
contains the remaining 0, 1, 2, or 3 bytes to be copied), store
a byte at a time until Ldone.
Tactics:
1) Do NOT try to fetch and store the words in a word aligned manner
because, in my judgement, the performance degradation experienced due
to non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment. This is supported by the intuition that most source and
destination arrays (even more true of most big source arrays) will
be word aligned to begin with.
2) For non-overlapping arrays, rather than decrementing len to zero,
I calculate the address of the byte after the last byte of the
destination array, and quit when the destination byte pointer passes
that.
3) For overlapping arrays where the source starts at a lower address
than the destination the move is performed in reverse order.
4) Overlapping arrays where the source starts at a higher address
are treated like non-overlapping case. Where the two arrays exactly
coincide, the routine is short-circuited; no move is Ldone at all.
This costs only one cycle.
*/
.globl _memcpy, _memmove
.globl __memcpy, __memmove
.leafproc _memmove, __memmove
.leafproc _memcpy, __memcpy
.align 2
_memmove:
_memcpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memmove:
__memcpy:
mov g14, g13 # preserve return address
cmpibge 0,g2,Lexit # exit if number of bytes to move is <= zero.
cmpo g0,g1 # does start of dest overlap end of src?
addo g2,g1,g3
be Lexit # no move necessary if src and dest are same
concmpo g3,g0
addo g2, g0, g6
bg Lbackwards # if overlap, then do move backwards
ld (g1), g7 # fetch first word of source
mov g0, g5
b Lwloop_b
Lwloop_a:
ld (g1), g7 # fetch ahead next word of source
st g4, (g5) # store word to dest
addo 4, g5, g5 # post-increment dest pointer
Lwloop_b: # word copying loop
addo 4, g1, g1 # pre-increment src pointer
cmpo g3, g1 # is len <= 3 ?
mov g7, g4 # keep a copy of the current word
bge Lwloop_a # loop if more than 3 bytes to move
cmpobe g6, g5, Lexit # quit if no more bytes to move
Lcloop_a: # character copying loop (len < 3)
stob g4, (g5) # store a byte
shro 8, g4, g4 # position next byte for storing
addo 1, g5, g5
cmpobne g6, g5, Lcloop_a # quit if no more bytes to move
Lexit:
mov 0, g14
bx (g13) # g0 = dest array address; g14 = 0
Lrett:
ret
Lwloop.a:
subo 4, g6, g6 # pre-decrement dest pointer
st g7, (g6) # store word to dest
Lbackwards: # word copying loop
subo 4, g3, g3 # pre-decrement src pointer
cmpo g1, g3 # is len <= 3?
ld (g3), g7 # fetch ahead next word of source
ble Lwloop.a # loop if more than 3 bytes to move
cmpobe g6, g0, Lexit # quit if no more bytes to move
Lcloop.a:
subo 1, g6, g6
rotate 8, g7, g7 # position byte for storing
stob g7, (g6) # store byte
cmpobne g6, g0, Lcloop.a # quit if no more bytes to move
b Lexit
/* end of memmove */
|
stsp/newlib-ia16
| 5,015
|
newlib/libc/machine/i960/strrchr.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strrchr.s"
#ifdef __i960_BIG_ENDIAN__
#error "This does not work in big-endian"
#endif
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strrchr (optimized assembler version for the 80960K series)
src_addr = strrchr (src_addr, char)
return a pointer to the last byte that contains the indicated
byte in the source string. Return null if the byte is not found.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last two words of the program's
allocated memory space. This is so because strrchr fetches ahead.
Disallowing the fetch ahead would impose a severe performance penalty.
Strategy:
Fetch the source string by words and scanbyte the words for the
char until either a word with the byte is found or the null byte is
encountered. In the former case, move through the word to find the
matching byte and save its memory address, then continue the search.
In the latter case, return the saved address, or zero (null) if none
was ever found to save.
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that most source arrays (even more
true of most big source arrays) will be word aligned to begin with.
*/
.globl _strrchr
.globl __strrchr
.leafproc _strrchr, __strrchr
.align 2
_strrchr:
#ifdef __PIC
lda Lrett-(.+8)(ip),g14
#else
lda Lrett,g14
#endif
__strrchr:
ld (g0),g4 # fetch first word
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
shlo 8,g1,g2 # broadcast the char to four bytes
or g1,g2,g2
shlo 16,g2,g5
or g2,g5,g3
mov g14,g13 # preserve return address
addo 4,g0,g2 # post-increment src pointer
mov 1,g0 # prepare to return null pointer
mov g3,g6 # prepare to return null pointer
Lsearch_for_word_with_char_or_null:
mov g4,g5 # copy word
scanbyte 0,g5 # check for null byte
ld (g2),g4 # fetch next word of src
bo Lword_has_null # branch if null found
scanbyte g3,g5 # check for byte with char
addo 4,g2,g2 # post-increment src pointer
bno Lsearch_for_word_with_char_or_null # branch if no copy of char
mov g5,g6 # save word that has char in it (at least once)
subo 4,g2,g0 # save addr of byte after word with char
b Lsearch_for_word_with_char_or_null
Lword_has_null:
subo 4,g2,g2 # move src pointer back to word with null
Lfind_null:
addo 1,g2,g2 # advance src pointer to byte after current
and g7,g5,g14 # extract next byte
cmpo g1,g14 # is current byte char?
shro 8,g5,g5 # position next byte for extraction
bne 1f # skip if not char sought after
mov g2,g0 # save addr of byte after char
mov g3,g6 # save word of all char to short circuit search
1: cmpobne 0,g14,Lfind_null # is current byte null?
Lfind_last_char:
rotate 8,g6,g6 # position next highest byte
and g7,g6,g5 # extract byte
subo 1,g0,g0 # move pointer to that byte (or nullify)
cmpobne g5,g1,Lfind_last_char # branch if not at char
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
/* end of strrchr */
|
stsp/newlib-ia16
| 4,396
|
newlib/libc/machine/i960/strcmp.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strcmp.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strcmp (optimized assembler version for the 80960K Series)
result = strcmp (src1_addr, src2_addr)
compare the null terminated string pointed to by src1_addr to
the string pointed to by src2_addr. Return 0 iff the strings
are equal, -1 if src1_addr is lexicographically less than src2_addr,
and 1 if it is lexicographically greater.
Undefined behavior will occur if the end of either source string
(i.e. the terminating null byte) is in the last two words of the
program's allocated memory space. This is so because strcmp fetches
ahead. Disallowing the fetch ahead would impose a severe performance
penalty.
Strategy:
Fetch the source strings by words and compare the words until either
differing words are found or the null byte is encountered. In either
case, move through the word until either the differing byte if found,
in which case return -1 or 1 appropriately; or the null byte is
encountered, in which case, return zero (equality).
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment. This is supported by the intuition that many source
strings will be word aligned to begin with.
*/
.globl _strcmp
.globl __strcmp
.leafproc _strcmp,__strcmp
.align 2
_strcmp:
#ifndef __PIC
lda .Lrett,g14
#else
lda .Lrett-(.+8)(ip),g14
#endif
__strcmp:
ld (g0), g5 # fetch first word of source_1
mov g14,g7 # preserve return address
ldconst 0,g14 # conform to register conventions
ldconst 0xff,g4 # byte extraction mask
.Lwloop:
addo 4,g0,g0 # post-increment source_1 byte ptr
ld (g1), g3 # fetch word of source_2
scanbyte 0,g5 # does word have a null byte?
mov g5,g2 # save a copy of the source_1 word
be .Lcloop # branch if null byte encountered
cmpo g2,g3 # are the source words the same?
addo 4,g1,g1 # post-increment source_2 byte ptr
ld (g0), g5 # fetch ahead next word of source_1
be .Lwloop # fall thru if words are unequal
.Lcloop: and g4,g2,g5 # extract and compare individual bytes
and g4,g3,g6
cmpobne g5,g6,.diff # if they differ, go return 1 or -1
cmpo 0,g6 # they are the same. Are they null?
shlo 8,g4,g4 # position mask for next extraction
bne .Lcloop # loop if null not encountered
mov 0,g0 # return equality
bx (g7)
.Lrett:
ret
.diff: bl .neg
mov 1,g0
bx (g7)
.neg: subi 1,0,g0
.Lexit:
bx (g7)
|
stsp/newlib-ia16
| 8,008
|
newlib/libc/machine/i960/strcmp_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strcm_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strcmp (optimized assembler version for the CA)
result = strcmp (src1_addr, src2_addr)
compare the null terminated string pointed to by src1_addr to
the string space pointed to by src2_addr. Return 0 iff the strings
are equal, -1 if src1_addr is lexicly less than src2_addr, and 1
if it is lexicly greater.
Undefined behavior will occur if the end of either source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strcmp
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source1 is word aligned, source2 is not
4) source2 is word aligned, source1 is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g14 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers are sufficient to implement the routine.
The registers are used as follows:
g0 original src1 ptr; return result
g1 src2 ptr; 0xff -- byte extraction mask
g2 src1 word ptr
g3 src2 word ptr
Little endian:
g4 lsw of src1
g5 msw of src1
g6 src2 word
g7 extracted src1
Big endian:
g4 msw of src1
g5 lsw of src1
g6 extracted src1
g7 src2 word
g13 return address
g14 shift count
*/
#if __i960_BIG_ENDIAN__
#define MSW g4
#define LSW g5
#define SRC1 g6
#define SRC2 g7
#else
#define LSW g4
#define MSW g5
#define SRC2 g6
#define SRC1 g7
#endif
.globl _strcmp
.globl __strcmp
.leafproc _strcmp, __strcmp
.align 2
_strcmp:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcmp:
Lrestart:
notand g0,3,g2 # extract word addr of start of src1
lda (g14),g13 # preserve return address
#if __i960_BIG_ENDIAN__
cmpo g0,g2 # check alignment of src1
#endif
ld (g2),LSW # fetch word with at least first byte of src1
notand g1,3,g3 # extract word addr of start of src2
ld 4(g2),MSW # fetch second word of src1
#if __i960_BIG_ENDIAN__
bne Lsrc1_unaligned # branch if src1 is unaligned
cmpo g3,g1 # check alignment of src2
ld (g3),SRC2 # fetch word with at least first byte of src2
mov LSW,SRC1 # extract word of src1
lda 8(g2),g2 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
/* src2 is word aligned */
Lwloop2: # word comparing loop
cmpo SRC2,SRC1 # compare src1 and src2 words
lda 0xff000000,g1 # byte extraction mask
mov MSW,LSW # move msw of src1 to lsw
ld (g2),MSW # pre-fetch next msw of src1
addo 4,g2,g2 # post-increment src1 addr
lda 4(g3),g3 # pre-increment src2 addr
bne.f Lcloop # branch if src1 and src2 unequal
scanbyte 0,SRC1 # check for null byte in src1 word
ld (g3),SRC2 # pre-fetch next word of src2
mov LSW,SRC1 # extract word of src1
lda 0,g0 # prepare to return zero, indicating equality
bno.t Lwloop2 # branch if null byte not encountered
/* words were equal and contained null byte */
mov 0,g14 # conform to register conventions
bx (g13) # return
Lsrc1_unaligned:
#endif
cmpo g3,g1 # check alignment of src2
ld (g3),SRC2 # fetch word with at least first byte of src2
shlo 3,g0,g14 # compute shift count for src1
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # 32 - shift count for big endian.
#endif
eshro g14,g4,SRC1 # extract word of src1
lda 8(g2),g2 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
/* at least src2 is word aligned */
Lwloop: # word comparing loop
cmpo SRC2,SRC1 # compare src1 and src2 words
#if __i960_BIG_ENDIAN__
lda 0xff000000,g1 # byte extraction mask
#else
lda 0xff,g1 # byte extraction mask
#endif
mov MSW,LSW # move msw of src1 to lsw
ld (g2),MSW # pre-fetch next msw of src1
addo 4,g2,g2 # post-increment src1 addr
lda 4(g3),g3 # pre-increment src2 addr
bne.f Lcloop # branch if src1 and src2 unequal
scanbyte 0,SRC1 # check for null byte in src1 word
ld (g3),SRC2 # pre-fetch next word of src2
eshro g14,g4,SRC1 # extract word of src1
lda 0,g0 # prepare to return zero, indicating equality
bno.t Lwloop # branch if null byte not encountered
/* words were equal and contained null byte */
mov 0,g14 # conform to register conventions
bx (g13) # return
Lcloop_setup: # setup for coming from Lsrc2_unaligned
mov LSW,SRC1 # restore extracted src1 word
#if __i960_BIG_ENDIAN__
lda 0xff000000,g1 # byte extraction mask
#else
lda 0xff,g1 # byte extraction mask
#endif
Lcloop: # character comparing loop
and SRC2,g1,g3 # extract next char of src2
and SRC1,g1,g0 # extract next char of src1
cmpobne.f g0,g3,.diff # check for equality
cmpo 0,g0 # check for null byte
#if __i960_BIG_ENDIAN__
shro 8,g1,g1 # shift mask for next byte
#else
shlo 8,g1,g1 # shift mask for next byte
#endif
bne.t Lcloop # branch if null not reached
/* words are equal up thru null byte */
mov 0,g14
bx (g13) # g0 = 0 (src1 == src2)
Lrett:
ret
.diff:
mov 0,g14
bl Lless_than_exit
Lgreater_than_exit:
mov 1,g0
bx (g13) # g0 = 1 (src1 > src2)
Lless_than_exit:
subi 1,0,g0
bx (g13) # g0 = -1 (src1 < src2)
Lsrc2_unaligned:
mov SRC1,LSW # retain src1 extracted word
ld 4(g3),SRC1 # fetch second word of src2
shlo 3,g1,MSW # compute shift count for src2
#if __i960_BIG_ENDIAN__
subo MSW,0,MSW # 32 - shift count for big endian.
#endif
eshro MSW,g6,SRC2 # extract word of src2
cmpo LSW,SRC2 # compare src1 and src2 words
notor g1,3,MSW # first step in computing new src1 ptr
lda 4(g3),g1 # set new src2 ptr
bne.f Lcloop_setup # first four bytes differ
scanbyte 0,LSW # check for null byte
lda (g13),g14 # prepare return pointer for Lrestart
subo MSW,g0,g0 # second (final) step in computing new src1 ptr
bno.t Lrestart # if null byte not encountered, continue
/* with both string fetches shifted such that */
/* src2 is now word aligned. */
mov 0,g14 # conform to register conventions.
lda 0,g0 # return indicator of equality.
bx (g13)
|
stsp/newlib-ia16
| 3,435
|
newlib/libc/machine/i960/strpbrk.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
/*
* (c) copyright 1989,1993 Intel Corp., all rights reserved
*/
/*
procedure strpbrk (optimized assembler version: 80960K series, 80960CA)
char_addr = strpbrk (string, brkset_string)
Return the address of the first character in string that is NOT
in the brkset_string. Return NULL if none exists.
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine.
This routine stays out of g3 and g4 altogether. They may be used by
the strtok routine, which calls this routine in an incestuous way.
*/
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
.file "strprk.s"
.globl _strpbrk
.globl __strpbrk
.leafproc _strpbrk, __strpbrk
.align 2
_strpbrk:
#ifdef __PIC
lda Lrett-(.+8)(ip),g14
b __strpbrk
#else
lda Lrett,g14
b __strpbrk
#endif
Lrett: ret
__strpbrk:
Lnext_char_strpbrk:
addo 1,g1,g2 # g2 will be the brkset ptr
ldob (g0),g7 # fetch next character of string
ldob (g1),g6 # fetch first character of brkset
cmpobe.f 0,g7,Lexit_char_not_found # quit if at end of string
Lscan_set_strpbrk:
cmpo g6,g7 # is brkset char equal to string char?
ldob (g2),g5 # fetch next brkset char
addo 1,g2,g2 # bump brkset ptr
be.f Lexit_char_found
cmpo g6,0 # is brkset_string exhausted?
lda (g5),g6
bne.t Lscan_set_strpbrk # check next character of brkset
addo 1,g0,g0 # check next character of string
b Lnext_char_strpbrk
Lexit_char_not_found:
mov 0,g0 # return null if brkset char not found in string
Lexit_char_found:
mov g14,g13 # save return address
lda 0,g14 # conform to register conventions
bx (g13)
/* end of strpbrk */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.