repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
4ms/metamodule-plugin-sdk
| 2,055
|
plugin-libc/newlib/libc/machine/spu/sprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL sprintf
.type sprintf, @function
sprintf:
stqd $0, 16($sp) /* save caller address */
il $2, 2 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VSPRINTF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size sprintf, .-sprintf
|
4ms/metamodule-plugin-sdk
| 1,579
|
plugin-libc/newlib/libc/machine/spu/iprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define printf iprintf
#include "printf.S"
|
4ms/metamodule-plugin-sdk
| 1,579
|
plugin-libc/newlib/libc/machine/spu/fiscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define fscanf fiscanf
#include "fscanf.S"
|
4ms/metamodule-plugin-sdk
| 1,579
|
plugin-libc/newlib/libc/machine/spu/siscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define sscanf siscanf
#include "sscanf.S"
|
4ms/metamodule-plugin-sdk
| 2,049
|
plugin-libc/newlib/libc/machine/spu/printf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL printf
.type printf, @function
printf:
stqd $0, 16($sp) /* save caller address */
il $2, 1 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VPRINTF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size printf, .-printf
|
4ms/metamodule-plugin-sdk
| 1,582
|
plugin-libc/newlib/libc/machine/spu/fiprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define fprintf fiprintf
#include "fprintf.S"
|
4ms/metamodule-plugin-sdk
| 2,043
|
plugin-libc/newlib/libc/machine/spu/scanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL scanf
.type scanf, @function
scanf:
stqd $0, 16($sp) /* save caller address */
il $2, 1 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VSCANF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size scanf, .-scanf
|
4ms/metamodule-plugin-sdk
| 2,061
|
plugin-libc/newlib/libc/machine/spu/snprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL snprintf
.type snprintf, @function
snprintf:
stqd $0, 16($sp) /* save caller address */
il $2, 3 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VSNPRINTF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size snprintf, .-snprintf
|
4ms/metamodule-plugin-sdk
| 2,049
|
plugin-libc/newlib/libc/machine/spu/sscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL sscanf
.type sscanf, @function
sscanf:
stqd $0, 16($sp) /* save caller address */
il $2, 2 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VSSCANF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size sscanf, .-sscanf
|
4ms/metamodule-plugin-sdk
| 3,694
|
plugin-libc/newlib/libc/machine/spu/spu-mcount.S
|
/*
(C) Copyright IBM Corp. 2008
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of IBM nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Author: Ken Werner <ken.werner@de.ibm.com>
*/
/* _mcount extracts the address of the function just entered and the address
of the caller of that function and then calls __mcount_internal. The
prologue calls mcount without saving any registers. The return address is
stored in $75. The _mcount function has to:
- create a new stack frame
- save registers $2 to $75 on the stack
- copy the two addresses ($0 and $75) into the argument registers $3 and $4
- call __mcount_internal
- restore registers
- return to $75 */
/* The following two convenience macros assist in the coding of the
saving and restoring the register.
saveregs first, last Saves registers from first to the last.
restoreregs first, last Restores registers from last down to first.
Note: first must be less than or equal to last. */
.macro saveregs first, last
stqd $\first, \first*16($SP)
.if \last-\first
saveregs "(\first+1)",\last
.endif
.endm
.macro restoreregs first, last
lqd $\last, \last*16($SP)
.if \last-\first
restoreregs \first,"(\last-1)"
.endif
.endm
/* _mcount needs to be resident since the overlay manager uses the scratch
registers too. */
.text
.align 3 /* 8 byte alignment. */
.global _mcount
.type _mcount, @function
_mcount:
stqd $lr, 16($sp) /* Save link register in the callers stack frame. */
stqd $lr, -1216($sp) /* Store back pointer. */
il $lr, -1216 /* Push a new stack frame. */
a $sp, $sp, $lr /* Frame size: 16 * (74 + 2) = 1216. */
/* Save registers $2 to $75 on the stack. */
saveregs 2, 75
/* Bring the __mcount_internal arguments in place. */
lqd $3, 1232($sp) /* frompc (the link register). */
ori $4, $75, 0 /* selfpc (the gcc prologue puts "brsl $75, _mcount" in
front of every function). */
brsl $lr, __mcount_internal
/* Restore register $2 to $75 from the stack. */
restoreregs 2, 75
il $lr, 1216
a $sp, $sp, $lr /* Pop the stack frame. */
lqd $lr, 16($sp) /* Restore link register. */
bi $75 /* Branch to the called function. */
|
4ms/metamodule-plugin-sdk
| 5,119
|
plugin-libc/newlib/libc/machine/sparc/setjmp.S
|
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* Modified for incorporation into newlib by Joel Sherrill
* (joel@OARcorp.com), On-Line Applications Research, 1995.
* Did the following:
* + merged in DEFS.h
* + removed error check since it prevented using this setjmp
* to "context switch"
* + added the support for the "user label" and "register" prefix
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: $Header$
*/
#if defined(LIBC_SCCS) && !defined(lint)
.asciz "@(#)_setjmp.s 8.1 (Berkeley) 6/4/93"
#endif /* LIBC_SCCS and not lint */
/*
* Recent versions of GNU cpp define variables which indicate the
* need for underscores and percents. If not using GNU cpp or
* the version does not support this, then you will obviously
* have to define these as appropriate.
*/
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__ _
#endif
#ifndef __REGISTER_PREFIX__
#define __REGISTER_PREFIX__
#endif
/* ANSI concatenation macros. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
/*********************************************************************
*********************************************************************
* Contents of DEFS.h *
*********************************************************************
*********************************************************************/
#ifdef PROF
#define ENTRY(x) \
.align 4; .globl SYM(x); .proc 1; SYM(x):; .data; .align 4; 1: .long 0; \
.text; save %sp,-96,%sp; sethi %hi(1b),%o0; call mcount; \
or %lo(1b),%o0,%o0; restore
#else
#define ENTRY(x) \
.align 4; .globl SYM(x); .proc 1; SYM(x):
#endif
/*********************************************************************
*********************************************************************
* END of DEFS.h *
*********************************************************************
*********************************************************************/
/*
* C library -- _setjmp, _longjmp
*
* _longjmp(a,v)
* will generate a "return(v?v:1)" from
* the last call to
* _setjmp(a)
* by unwinding the call stack.
* The previous signal state is NOT restored.
*/
/* #include "DEFS.h" */
ENTRY(setjmp)
ENTRY(_setjmp)
st %sp, [%o0] /* caller's stack pointer */
st %i7, [%o0+4] /* caller's return pc */
st %fp, [%o0+8] /* store caller's frame pointer */
st %o7, [%o0+12]
retl
clr %o0 ! return 0
ENTRY(longjmp)
ENTRY(_longjmp)
ta 0x03 /* flush registers */
addcc %o1, %g0, %g1 ! compute v ? v : 1 in a global register
be,a 0f
mov 1, %g1
0:
ld [%o0], %sp /* caller's stack pointer */
ldd [%sp], %l0
ldd [%sp+8], %l2
ldd [%sp+16], %l4
ldd [%sp+24], %l6
ldd [%sp+32], %i0
ldd [%sp+40], %i2
ldd [%sp+48], %i4
ld [%o0+4], %i7 /* caller's return pc */
ld [%o0+8], %fp /* caller's frame pointer */
ld [%o0+12], %o7
jmp %o7 + 8 ! success, return %g1
mov %g1, %o0
|
4ms/metamodule-plugin-sdk
| 1,462
|
plugin-libc/newlib/libc/machine/tic4x/setjmp.S
|
/* setjmp/longjmp routines.
*
* Written by Michael Hayes <m.hayes@elec.canterbury.ac.nz>.
*
* The author hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
.sect .text
.global setjmp
.global longjmp
setjmp:
pop r1
ldi sp, ar0
#ifndef _REGPARM
ldi *ar0, ar2
#endif
sti r4, *ar2++
sti r5, *ar2++
stf r6, *ar2++
stf r7, *ar2++
#ifdef _TMS320C4x
sti r8, *ar2++
#endif
sti ar3, *ar2++
sti ar4, *ar2++
sti ar5, *ar2++
sti ar6, *ar2++
sti ar7, *ar2++
bd r1
sti r1, *ar2++
sti ar0, *ar2
ldi 0, r0
longjmp:
#ifndef _REGPARM
ldi sp, ar0
ldi *-ar0(1), ar2
ldi *-ar0(2), r0
ldiz 1, r0
#else
ldi r2, r0
ldiz 1, r0
#endif
ldi *ar2++, r4
ldi *ar2++, r5
ldf *ar2++, r6
ldf *ar2++, r7
#ifdef _TMS320C4x
ldi *ar2++, r8
#endif
ldi *ar2++, ar3
ldi *ar2++, ar4
ldi *ar2++, ar5
ldi *ar2++, ar6
ldi *ar2++, ar7
ldi *ar2++, r1
ldi *ar2, sp
b r1
.end
|
4ms/metamodule-plugin-sdk
| 2,731
|
plugin-libc/newlib/libc/machine/necv70/fastmath.S
|
.globl _fast_sin
_fast_sin:
fsin.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_sinf
_fast_sinf:
fsin.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_cos
_fast_cos:
fcos.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_cosf
_fast_cosf:
fcos.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_tan
_fast_tan:
ftan.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_tanf
_fast_tanf:
ftan.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_fabs
_fast_fabs:
fabs.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_fabsf
_fast_fabsf:
fabs.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_sqrt
_fast_sqrt:
fsqrt.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_sqrtf
_fast_sqrtf:
fsqrt.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_acos
_fast_acos:
facos.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_acosf
_fast_acosf:
facos.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_asin
_fast_asin:
fasin.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_asinf
_fast_asinf:
fasin.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_atan
_fast_atan:
fatan.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_atanf
_fast_atanf:
fatan.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_cosh
_fast_cosh:
fcosh.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_coshf
_fast_coshf:
fcosh.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_sinh
_fast_sinh:
fsin.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_sinhf
_fast_sinhf:
fsin.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_tanh
_fast_tanh:
ftanh.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_tanhf
_fast_tanhf:
ftanh.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_atanh
_fast_atanh:
fatanh.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_atanhf
_fast_atanhf:
fatanh.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_exp2
_fast_exp2:
fexp2.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_exp2f
_fast_exp2f:
fexp2.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_exp10
_fast_exp10:
fexp10.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_exp10f
_fast_exp10f:
fexp10.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_expe
_fast_expe:
fexpe.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_expef
_fast_expef:
fexpe.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_log2
_fast_log2:
flog2.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_log2f
_fast_log2f:
flog2.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_log10
_fast_log10:
flog10.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_log10f
_fast_log10f:
flog10.s [ap],[ap]
mov.w [ap],r0
ret #0
.globl _fast_loge
_fast_loge:
floge.l [ap],[ap]
mov.d [ap],r0
ret #0
.globl _fast_logef
_fast_logef:
floge.s [ap],[ap]
mov.w [ap],r0
ret #0
|
4ms/metamodule-plugin-sdk
| 1,098
|
plugin-libc/newlib/libc/machine/m88k/setjmp.S
|
/* This is a simple version of setjmp and longjmp.
Ian Lance Taylor, Cygnus Support, 15 July 1993. */
/* We need to save the address of the return instruction, which is in
r1, as well as general register r14 through r25. If we are
compiling for the 88110 with the extended register file, we also
need to save registers x22 through x29. The jmp_buf should be 52
bytes long in the one case, 84 bytes in the other. */
/* int setjmp (jmp_buf); */
globl _setjmp
_setjmp:
st r1,r2,0
st.d r14,r2,4
st.d r16,r2,12
st.d r18,r2,20
st.d r20,r2,28
st.d r22,r2,36
st.d r24,r2,44
#ifdef __m88110__
/* These instructions are just a guess, and gas doesn't
support them anyhow. */
st.d x22,r2,52
st.d x24,r2,60
st.d x26,r2,68
st.d x28,r2,76
#endif
jmp r1
global _longjmp
_longjmp:
ld r1,r2,0
ld.d r14,r2,4
ld.d r16,r2,12
ld.d r18,r2,20
ld.d r20,r2,28
ld.d r22,r2,36
ld.d r24,r2,44
#ifdef __m88110__
/* These instructions are just a guess, and gas doesn't
support them anyhow. */
ld.d x22,r2,52
ld.d x24,r2,60
ld.d x26,r2,68
ld.d x28,r2,76
#endif
jmp r1
|
4ms/metamodule-plugin-sdk
| 1,790
|
plugin-libc/newlib/libc/machine/mn10300/memcpy.S
|
.file "memcpy.S"
.section .text
.global _memcpy
.type _memcpy,@function
_memcpy:
movm [d2,d3,a2,a3],(sp)
.Lend_of_prologue:
mov d0,d2
mov d1,a0
mov d2,a1
mov (28,sp),d1
#ifndef __OPTIMIZE_SIZE__
mov a0,d0
or d2,d0
btst 3,d0
bne .L37
cmp 15,d1
bls .L34
setlb
mov (a0),d0
mov d0,(a1)
inc4 a0
inc4 a1
mov (a0),d0
mov d0,(a1)
inc4 a0
inc4 a1
mov (a0),d0
mov d0,(a1)
inc4 a0
inc4 a1
mov (a0),d0
mov d0,(a1)
inc4 a0
inc4 a1
add -16,d1
cmp 15,d1
lhi
.L34:
cmp 3,d1
bls .L37
setlb
mov (a0),d0
mov d0,(a1)
inc4 a0
inc4 a1
add -4,d1
cmp 3,d1
lhi
.L37:
#endif
cmp 0,d1
beq .L36
setlb
movbu (a0),d0
movbu d0,(a1)
inc a0
inc a1
sub 1,d1
lne
.L36:
mov d2,a0
.Lepilogue:
ret [d2,d3,a2,a3],16
.Lend_of_memcpy:
.size _memcpy, .Lend_of_memcpy - _memcpy
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _memcpy # FDE initial location
.4byte .Lend_of_memcpy - _memcpy # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _memcpy
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
4ms/metamodule-plugin-sdk
| 1,328
|
plugin-libc/newlib/libc/machine/mn10300/setjmp.S
|
.file "setjmp.S"
.section .text
.align 1
.global _setjmp
#ifdef __AM33__
#ifdef __AM33_2__
.am33_2
#else
.am33
#endif
#endif
_setjmp:
mov d0,a0
mov d2,(0,a0)
mov d3,(4,a0)
mov mdr,d1
mov d1,(8,a0)
mov a2,(12,a0)
mov a3,(16,a0)
mov sp,a1
mov a1,(20,a0)
#ifdef __AM33__
add 24,a0
mov r4,(a0+)
mov r5,(a0+)
mov r6,(a0+)
mov r7,(a0+)
#ifdef __AM33_2__
fmov fs4,(a0+)
fmov fs5,(a0+)
fmov fs6,(a0+)
fmov fs7,(a0+)
fmov fs8,(a0+)
fmov fs9,(a0+)
fmov fs10,(a0+)
fmov fs11,(a0+)
fmov fs12,(a0+)
fmov fs13,(a0+)
fmov fs14,(a0+)
fmov fs15,(a0+)
fmov fs16,(a0+)
fmov fs17,(a0+)
fmov fs18,(a0+)
fmov fs19,(a0+)
#endif
#endif
sub d0,d0
rets
.global _longjmp
_longjmp:
mov d0,a0
mov (8,a0),d2
mov d2,mdr
mov (0,a0),d2
mov (4,a0),d3
mov (12,a0),a2
mov (16,a0),a3
mov (20,a0),a1
mov a1,sp
#ifdef __AM33__
add 24,a0
mov (a0+),r4
mov (a0+),r5
mov (a0+),r6
mov (a0+),r7
#ifdef __AM33_2__
fmov (a0+),fs4
fmov (a0+),fs5
fmov (a0+),fs6
fmov (a0+),fs7
fmov (a0+),fs8
fmov (a0+),fs9
fmov (a0+),fs10
fmov (a0+),fs11
fmov (a0+),fs12
fmov (a0+),fs13
fmov (a0+),fs14
fmov (a0+),fs15
fmov (a0+),fs16
fmov (a0+),fs17
fmov (a0+),fs18
fmov (a0+),fs19
#endif
#endif
cmp 0,d1
bne L1
mov 1,d1
L1:
mov d1,d0
retf [],0
|
4ms/metamodule-plugin-sdk
| 1,655
|
plugin-libc/newlib/libc/machine/mn10300/strcpy.S
|
.file "strcpy.S"
.section .text
.global _strcpy
.type _strcpy,@function
_strcpy:
movm [d2,d3,a2,a3],(sp)
.Lend_of_prologue:
mov d0,d3
mov d1,a0
mov d3,a1
mov a0,d0
#ifndef __OPTIMIZE_SIZE__
or d3,d0
btst 3,d0
bne .L2
mov (a0),d0
mov -16843009,a2
mov a2,d1
add d0,d1
not d0
and d0,d1
mov -2139062144,d2
btst -2139062144,d1
bne .L2
setlb
mov (a0),d0
mov d0,(a1)
inc4 a0
inc4 a1
mov (a0),d0
mov a2,d1
add d0,d1
not d0
and d0,d1
and d2,d1
leq
.L2:
#endif
setlb
movbu (a0),d0
movbu d0,(a1)
inc a0
inc a1
cmp 0,d0
lne
mov d3,a0
.Lepilogue:
ret [d2,d3,a2,a3],16
.Lend_of_strcpy:
.size _strcpy, .Lend_of_strcpy - _strcpy
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _strcpy # FDE initial location
.4byte .Lend_of_strcpy - _strcpy # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _strcpy
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
4ms/metamodule-plugin-sdk
| 1,826
|
plugin-libc/newlib/libc/machine/mn10300/memset.S
|
.file "memset.S"
.section .text
.global _memset
.type _memset,@function
_memset:
movm [d2,d3,a2,a3], (sp)
.Lend_of_prologue:
mov d0, d3
mov d1, d2
mov (28, sp),a1
mov d3, a0
#ifndef __OPTIMIZE_SIZE__
cmp 3, a1
bls .L41
btst 3, d3
bne .L41
extbu d2
mov d2, d1
asl 8, d1
or d2, d1
mov d1, d0
asl 16, d0
or d0, d1
cmp 15, a1
bls .L36
setlb
mov d1, (a0)
inc4 a0
mov d1, (a0)
inc4 a0
mov d1, (a0)
inc4 a0
mov d1, (a0)
inc4 a0
add -16, a1
cmp 15, a1
lhi
.L36:
cmp 3, a1
bls .L41
setlb
mov d1, (a0)
inc4 a0
add -4, a1
cmp 3, a1
lhi
.L41:
#endif
cmp 0, a1
beq .Lepilogue
setlb
movbu d2, (a0)
inc a0
sub 1, a1
lne
.Lepilogue:
mov d3,a0
ret [d2,d3,a2,a3], 16
.Lend_of_memset:
.size _memset, .Lend_of_memset - _memset
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _memset # FDE initial location
.4byte .Lend_of_memset - _memset # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _memset
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
4ms/metamodule-plugin-sdk
| 1,863
|
plugin-libc/newlib/libc/machine/mn10300/memchr.S
|
.file "memchr.S"
.section .text
.global _memchr
.type _memchr,@function
_memchr:
movm [d2,d3,a2,a3],(sp)
.Lend_of_prologue:
mov d0,a0
mov d1,d2
mov (28,sp),a1
#ifndef __OPTIMIZE_SIZE__
cmp 3,a1
bls .L44
mov a0,d3
btst 3,d3
bne .L44
mov a0,a2
mov 0,a3
clr d1
setlb
mov a3,d0
asl 8,d0
mov d2,a3
add d0,a3
inc d1
cmp 3,d1
lls
cmp 3,a1
bls .L48
.L33:
mov (a2),d0
mov a3,d3
xor d3,d0
mov d0,d1
not d1
add -16843009,d0
and d1,d0
btst -2139062144,d0
beq .L34
mov a2,a0
clr d1
setlb
movbu (a0),d0
cmp d2,d0
beq .Lepilogue
inc a0
inc d1
cmp 3,d1
lls
.L34:
add -4,a1
inc4 a2
cmp 3,a1
bhi .L33
.L48:
mov a2,a0
.L44:
#endif
cmp 0,a1
beq .L50
setlb
movbu (a0),d0
cmp d2,d0
beq .Lepilogue
inc a0
sub 1,a1
lne
.L50:
mov 0,a0
.Lepilogue:
ret [d2,d3,a2,a3],16
.Lend_of_memchr:
.size _memchr, .Lend_of_memchr - _memchr
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _memchr # FDE initial location
.4byte .Lend_of_memchr - _memchr # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _memchr
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
4ms/metamodule-plugin-sdk
| 1,618
|
plugin-libc/newlib/libc/machine/mn10300/memcmp.S
|
.file "memcmp.S"
.section .text
.global _memcmp
.type _memcmp,@function
_memcmp:
movm [d2,d3,a2,a3],(sp)
.Lend_of_prologue:
mov d0,a0
mov d1,a1
mov (28,sp),a2
#ifndef __OPTIMIZE_SIZE__
cmp 3,a2
bls .L22
mov a1,d2
or d2,d0
btst 3,d0
bne .L22
setlb
mov (a0),d1
mov (a1),d0
cmp d0,d1
bne .L22
inc4 a0
inc4 a1
add -4,a2
cmp 3,a2
lhi
.L22:
#endif
cmp 0,a2
beq .L24
setlb
movbu (a0),d3
movbu (a1),d2
cmp d2,d3
beq .L23
mov d3,d0
sub d2,d0
jmp .Lepilogue
.L23:
inc a0
inc a1
sub 1,a2
lne
.L24:
clr d0
.Lepilogue:
ret [d2,d3,a2,a3],16
.Lend_of_func:
.size _memcmp, .Lend_of_func - _memcmp
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _memcmp # FDE initial location
.4byte .Lend_of_func - _memcmp # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _memcmp
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
4ms/metamodule-plugin-sdk
| 1,592
|
plugin-libc/newlib/libc/machine/mn10300/strlen.S
|
.file "strlen.S"
.section .text
.global _strlen
.type _strlen,@function
_strlen:
movm [d2,d3,a2,a3],(sp)
.Lend_of_prologue:
mov d0,a0
mov a0,a2
#ifndef __OPTIMIZE_SIZE__
btst 3,d0
bne .L21
mov (a0),d0
mov -16843009,a1
mov a1,d1
add d0,d1
not d0
and d0,d1
mov -2139062144,d2
btst -2139062144,d1
bne .L21
setlb
inc4 a0
mov (a0),d0
mov a1,d1
add d0,d1
not d0
and d0,d1
and d2,d1
leq
jmp .L21
#endif
.L19:
inc a0
.L21:
movbu (a0),d3
cmp 0,d3
bne .L19
sub a2,a0
mov a0,d0
.Lepilogue:
ret [d2,d3,a2,a3],16
.Lend_of_strlen:
.size _strlen, .Lend_of_strlen - _strlen
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _strlen # FDE initial location
.4byte .Lend_of_strlen - _strlen # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _strlen
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
4ms/metamodule-plugin-sdk
| 2,065
|
plugin-libc/newlib/libc/machine/mn10300/strchr.S
|
.file "strchr.S"
.section .text
.global _strchr
.type _strchr,@function
_strchr:
movm [d2,d3,a2,a3],(sp)
add -12,sp
.Lend_of_prologue:
mov d0,a1
movbu d1,(7,sp)
#ifndef __OPTIMIZE_SIZE__
btst 3,d0
bne .L20
clr d0
setlb
mov sp,a2
mov d0,d3
add d3,a2
mov a2,a0
add 12,a0
movbu (7,sp),d3
movbu d3,(-4,a0)
inc d0
cmp 3,d0
lls
mov a1,a0
mov -16843009,a1
mov (a0),d2
mov a1,d1
add d2,d1
mov d2,d0
not d0
and d0,d1
mov -2139062144,d3
mov d3,(0,sp)
btst -2139062144,d1
bne .L27
jmp .L38
.L28:
inc4 a0
mov (a0),d2
mov a1,d1
add d2,d1
mov d2,d0
not d0
and d0,d1
mov (0,sp),d3
and d3,d1
bne .L27
.L38:
mov (8,sp),d0
xor d2,d0
mov a1,d1
add d0,d1
not d0
and d0,d1
and d3,d1
beq .L28
.L27:
mov a0,a1
.L20:
#endif
movbu (a1),d0
cmp 0,d0
beq .L32
movbu (7,sp),d1
setlb
cmp d1,d0
beq .L36
inc a1
movbu (a1),d0
cmp 0,d0
lne
.L32:
movbu (7,sp),d0
movbu (a1),d3
cmp d0,d3
beq .L36
mov 0,a0
jmp .Lepilogue
.L36:
mov a1,a0
.Lepilogue:
ret [d2,d3,a2,a3],28
.Lend_of_strchr:
.size _strchr, .Lend_of_strchr - _strchr
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _strchr # FDE initial location
.4byte .Lend_of_strchr - _strchr # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _strchr
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
4ms/metamodule-plugin-sdk
| 1,669
|
plugin-libc/newlib/libc/machine/mn10300/strcmp.S
|
.file "strcmp.S"
.section .text
.global _strcmp
.type _strcmp,@function
_strcmp:
movm [d2,d3,a2,a3],(sp)
.Lend_of_prologue:
mov d0,a0
mov d1,a1
#ifndef __OPTIMIZE_SIZE__
or d1,d0
btst 3,d0
bne .L11
mov (a0),d1
mov (a1),d0
cmp d0,d1
bne .L11
mov -16843009,d3
setlb
mov (a0),d0
mov d3,d1
add d0,d1
not d0
and d0,d1
and -2139062144,d1
beq .L6
clr d0
jmp .Lepilogue
.L6:
inc4 a0
inc4 a1
mov (a0),d1
mov (a1),d0
cmp d0,d1
leq
.L11:
#endif
setlb
movbu (a1),d2
movbu (a0),d0
cmp 0,d0
beq .L9
cmp d2,d0
bne .L9
inc a0
inc a1
lra
.L9:
sub d2,d0
.Lepilogue:
ret [d2,d3,a2,a3],16
.Lend_of_strcmp:
.size _strcmp, .Lend_of_strcmp - _strcmp
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _strcmp # FDE initial location
.4byte .Lend_of_strcmp - _strcmp # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _strcmp
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
4ms/metamodule-plugin-sdk
| 3,291
|
plugin-libc/newlib/libc/machine/h8300/memcpy.S
|
#include "setarch.h"
#include "defines.h"
#ifdef __H8300SX__
.global _memcpy
_memcpy:
stm.l er4-er6,@-er7
; Set up source and destination pointers for movmd.
mov.l er0,er6
mov.l er1,er5
; See whether the copy is long enough to use the movmd.l code.
; Although the code can handle anything longer than 6 bytes,
; it can be more expensive than movmd.b for small moves.
; It's better to use a higher threshold to account for this.
;
; Note that the exact overhead of the movmd.l checks depends on
; the alignments of the length and pointers. They are faster when
; er0 & 3 == er1 & 3 == er2 & 3, faster still when these values
; are 0. This threshold is a compromise between the various cases.
cmp #16,LEN(r2)
blo simple
; movmd.l only works for even addresses. If one of the addresses
; is odd and the other is not, fall back on a simple move.
bld #0,r5l
bxor #0,r6l
bcs simple
; Make the addresses even.
bld #0,r5l
bcc word_aligned
mov.b @er5+,@er6+
sub #1,LEN(r2)
word_aligned:
; See if copying one word would make the first operand longword
; aligned. Although this is only really worthwhile if it aligns
; the second operand as well, it's no worse if doesn't, so it
; hardly seems worth the overhead of a "band" check.
bld #1,r6l
bcc fast_copy
mov.w @er5+,@er6+
sub #2,LEN(r2)
fast_copy:
; Set (e)r4 to the number of longwords to copy.
mov LEN(r2),LEN(r4)
shlr #2,LEN(r4)
#ifdef __NORMAL_MODE__
; 16-bit pointers and size_ts: one movmd.l is enough. This code
; is never reached with r4 == 0.
movmd.l
and.w #3,r2
simple:
mov.w r2,r4
beq quit
movmd.b
quit:
rts/l er4-er6
#else
; Skip the first iteration if the number of longwords is divisible
; by 0x10000.
mov.w r4,r4
beq fast_loop_next
; This loop copies r4 (!= 0) longwords the first time round and 65536
; longwords on each iteration after that.
fast_loop:
movmd.l
fast_loop_next:
sub.w #1,e4
bhs fast_loop
; Mop up any left-over bytes. We could just fall through to the
; simple code after the "and" but the version below is quicker
; and only takes 10 more bytes.
and.w #3,r2
beq quit
mov.w r2,r4
movmd.b
quit:
rts/l er4-er6
simple:
; Simple bytewise copy. We need to handle all lengths, including zero.
mov.w r2,r4
beq simple_loop_next
simple_loop:
movmd.b
simple_loop_next:
sub.w #1,e2
bhs simple_loop
rts/l er4-er6
#endif
#else
.global _memcpy
_memcpy:
; MOVP @(2/4,r7),A0P ; dst
; MOVP @(4/8,r7),A1P ; src
; MOVP @(6/12,r7),A2P ; len
MOVP A0P,A3P ; keep copy of final dst
ADDP A2P,A0P ; point to end of dst
CMPP A0P,A3P ; see if anything to do
beq quit
ADDP A2P,A1P ; point to end of src
; lets see if we can do this in words
or A0L,A2L ; or in the dst address
or A3L,A2L ; or the length
or A1L,A2L ; or the src address
btst #0,A2L ; see if the lsb is zero
bne byteloop
wordloop:
#ifdef __NORMAL_MODE__
sub #2,A1P
#else
subs #2,A1P ; point to word
#endif
mov.w @A1P,A2 ; get word
mov.w A2,@-A0P ; save word
CMPP A0P,A3P ; at the front again ?
bne wordloop
rts
byteloop:
#ifdef __NORMAL_MODE__
sub #1,A1P
#else
subs #1,A1P ; point to byte
#endif
mov.b @A1P,A2L ; get byte
mov.b A2L,@-A0P ; save byte
CMPP A0P,A3P ; at the front again ?
bne byteloop
; return with A0 pointing to dst
quit: rts
#endif
|
4ms/metamodule-plugin-sdk
| 1,037
|
plugin-libc/newlib/libc/machine/h8300/setjmp.S
|
#include "setarch.h"
.file "setjmp.S"
.section .text
.align 2
.global _setjmp
_setjmp:
#if defined(__H8300SX__)
mov.l er7,@er0+
mov.l er6,@er0+
mov.l er5,@er0+
mov.l er4,@er0+
mov.l @sp,@er0
sub.l er0,er0
#elif defined(__H8300H__) || defined(__H8300S__)
mov.l er7,@er0
mov.l er6,@(4,er0)
mov.l er5,@(8,er0)
mov.l er4,@(12,er0)
mov.l @sp,er1
mov.l er1,@(16,er0)
sub.l er0,er0
#else
mov.w r7,@r0
mov.w r6,@(2,r0)
mov.w r5,@(4,r0)
mov.w r4,@(6,r0)
mov.w @sp,r1
mov.w r1,@(8,r0)
sub.w r0,r0
#endif
rts
.global _longjmp
_longjmp:
#if defined(__H8300H__) || defined (__H8300S__) || defined (__H8300SX__)
mov.l @er0+,er7
mov.l @er0+,er6
mov.l @er0+,er5
mov.l @er0+,er4
#if defined(__H8300SX__)
mov.l @er0,@sp
#else
mov.l @er0,er2
mov.l er2,@sp
#endif
#if (__INT_MAX__ <= 32767)
mov.w r1,r0
#else
mov.l er1,er0
#endif
bne .L1
sub er0,er0
adds #1,er0
#else
mov.w @r0+,r7
mov.w @r0+,r6
mov.w @r0+,r5
mov.w @r0+,r4
mov.w @r0,r2
mov.w r2,@sp
mov.w r1,r0
bne .L1
mov.w #1,r0
#endif
.L1:
rts
|
4ms/metamodule-plugin-sdk
| 1,640
|
plugin-libc/newlib/libc/machine/h8300/memset.S
|
#include "setarch.h"
#include "defines.h"
#if defined (__H8300SX__)
.global _memset
_memset:
; Use er3 is a temporary since er0 must remain unchanged on exit.
mov.l er0,er3
; Fill er1 with the byte to copy.
mov.b r1l,r1h
mov.w r1,e1
; Account for any excess bytes and words that will be copied after
; the main loop. r2 >= 0 if there is a longword to copy.
sub #4,LEN(r2)
blo longs_done
; Copy one byte if doing so will make er3 word-aligned.
; This isn't needed for correctness but it makes the main loop
; slightly faster.
bld #0,r3l
bcc word_aligned
mov.b r1l,@er3+
sub #1,LEN(r2)
blo longs_done
word_aligned:
; Likewise one word for longword alignment.
bld #1,r3l
bcc long_copy
mov.w r1,@er3+
sub #2,LEN(r2)
blo longs_done
long_copy:
; Copy longwords.
mov.l er1,@er3+
sub #4,LEN(r2)
bhs long_copy
longs_done:
; At this point, we need to copy r2 & 3 bytes. Copy a word
; if necessary.
bld #1,r2l
bcc words_done
mov.w r1,@er3+
words_done:
; Copy a byte.
bld #0,r2l
bcc bytes_done
mov.b r1l,@er3+
bytes_done:
rts
#else
; A0P pointer to cursor
; A1P thing to copy
.global _memset
_memset:
; MOVP @(2/4,r7),A2P ; dst
; MOVP @(4/8,r7),A1 ; src thing
; MOVP @(6/12,r7),A3P ; len
MOVP A2P,A2P
beq quit
; A3 points to the end of the area
MOVP A0P,A3P
ADDP A2P,A3P
; see if we can do it in words
; by oring in the start of the buffer to the end address
or A0L,A2L
btst #0,A2L
bne byteloop
; we can do it a word at a time
mov.b A1L,A1H
wordloop:
mov.w A1,@-A3P
CMPP A3P,A0P
bne wordloop
quit: rts
byteloop:
mov.b A1L,@-A3P
CMPP A3P,A0P
bne byteloop
rts
#endif
|
4ms/metamodule-plugin-sdk
| 2,731
|
plugin-libc/newlib/libc/machine/nds32/memcpy.S
|
/*
Copyright (c) 2013 Andes Technology Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Function:
memcpy - copy memory regions
Syntax:
void *memcpy(void *s1, const void *s2, size_t n);
Description:
The memcpy function copies n characters from the object pointed to
by s2 into the object pointed to by s1. If copying takes place
between objects that overlap, the behavior is undefined.
Return value:
The memcpy function returns the value of s1.
*/
.text
.align 2
.globl memcpy
.type memcpy, @function
memcpy:
/* Corner cases. If *s1 equals *s2
or size_t is zero, just go return. */
beq $r0, $r1, .Lend_memcpy
beqz $r2, .Lend_memcpy
/* Keep *s1 as return value.
Set $r3 as how many words to copy.
Set $r2 as how many bytes are less than a word. */
move $r5, $r0
srli $r3, $r2, 2
andi $r2, $r2, 3
beqz $r3, .Lbyte_copy
.Lword_copy:
/* Do the word copy $r3 times. Then, do the byte copy $r2 times. */
lmw.bim $r4, [$r1], $r4, 0
addi $r3, $r3, -1
smw.bim $r4, [$r5], $r4, 0
bnez $r3, .Lword_copy /* Loop again ? */
beqz $r2, .Lend_memcpy /* Fall THRU or go return ? */
.Lbyte_copy:
/* Do the byte copy $r2 times. */
lbi.bi $r4, [$r1], 1
addi $r2, $r2, -1
sbi.bi $r4, [$r5], 1
bnez $r2, .Lbyte_copy /* Loop again ? */
.Lend_memcpy:
ret
.size memcpy, .-memcpy
|
4ms/metamodule-plugin-sdk
| 4,974
|
plugin-libc/newlib/libc/machine/nds32/setjmp.S
|
/*
Copyright (c) 2013 Andes Technology Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The setjmp/longjmp for nds32.
The usage of thirty-two 32-bit General Purpose Registers (GPR):
$r28 : $fp
$r29 : $gp
$r30 : $lp
$r31 : $sp
caller-save registers: $r0 ~ $r5, $r16 ~ $r23
callee-save registers: $r6 ~ $r10, $r11 ~ $r14
reserved for assembler : $r15
reserved for other use : $r24, $r25, $r26, $r27
Save all callee-save registers and $fp, $gp, $lp and $sp is enough in theory.
For debugging issue, the layout of jum_buf in here should be in sync with GDB.
The $r16 ~ $r19 are used to store D0/D1, keep them for backward-compatible.
*/
/* int setjmp(jmp_buf env); */
.text
.align 2
.global setjmp
.type setjmp, @function
setjmp:
#if __NDS32_REDUCED_REGS__
smw.bim $r6, [$r0], $r10, #0b0000
addi $r0, $r0, #32 /* Leave room to keep jum_buf all the same. */
smw.bim $r31, [$r0], $r31, #0b1111
#else
smw.bim $r6, [$r0], $r14, #0b0000
smw.bim $r16, [$r0], $r19, #0b1111
#endif
#if __NDS32_EXT_FPU_SP__ || __NDS32_EXT_FPU_DP__
/* Extract $fpcfg.freg (b[3:2]), then save into jmp_buf. */
fmfcfg $r2
slli $r2, $r2, #28
srli $r2, $r2, #30
swi.bi $r2, [$r0], #4
/* Make sure $r0 is double-word-aligned. */
addi $r0, $r0, #7
bitci $r0, $r0, #7
/* Case switch according to $fpcfg.freg */
beqz $r2, .LCFG0_save /* Branch if $fpcfg.freg = 0b00. */
xori $r15, $r2, #0b10
beqz $r15, .LCFG2_save /* Branch $fpcfg.freg = 0b10. */
srli $r2, $r2, #0b01
beqz $r2, .LCFG1_save /* Branch if $fpcfg.freg = 0b01. */
/* Fall-through if $fpcfg.freg = 0b11. */
.LCFG3_save:
fsdi.bi $fd31, [$r0], #8
fsdi.bi $fd29, [$r0], #8
fsdi.bi $fd27, [$r0], #8
fsdi.bi $fd25, [$r0], #8
fsdi.bi $fd23, [$r0], #8
fsdi.bi $fd21, [$r0], #8
fsdi.bi $fd19, [$r0], #8
fsdi.bi $fd17, [$r0], #8
.LCFG2_save:
fsdi.bi $fd15, [$r0], #8
fsdi.bi $fd13, [$r0], #8
fsdi.bi $fd11, [$r0], #8
fsdi.bi $fd9, [$r0], #8
.LCFG1_save:
fsdi.bi $fd7, [$r0], #8
fsdi.bi $fd5, [$r0], #8
.LCFG0_save:
fsdi.bi $fd3, [$r0], #8
#endif
/* Set return value to zero. */
movi $r0, 0
ret
.size setjmp, .-setjmp
/* void longjmp(jmp_buf env, int val); */
.text
.align 2
.global longjmp
.type longjmp, @function
longjmp:
#if __NDS32_REDUCED_REGS__
lmw.bim $r6, [$r0], $r10, #0b0000
addi $r0, $r0, #32
lmw.bim $r31, [$r0], $r31, #0b1111
#else
lmw.bim $r6, [$r0], $r14, #0b0000
lmw.bim $r16, [$r0], $r19, #0b1111
#endif
#if __NDS32_EXT_FPU_SP__ || __NDS32_EXT_FPU_DP__
/* Restore value of $fpcfg.freg (b[3:2]). */
lwi.bi $r2, [$r0], #4
/* Make sure $r0 is double-word-aligned. */
addi $r0, $r0, #7
bitci $r0, $r0, #7
/* Case switch according to $fpcfg.freg */
beqz $r2, .LCFG0_restore /* Branch if $fpcfg.freg = 0b00. */
xori $r15, $r2, #0b10
beqz $r15, .LCFG2_restore /* Branch $fpcfg.freg = 0b10. */
srli $r2, $r2, #0b01
beqz $r2, .LCFG1_restore /* Branch if $fpcfg.freg = 0b01. */
/* Fall-through if $fpcfg.freg = 0b11. */
.LCFG3_restore:
fldi.bi $fd31, [$r0], #8
fldi.bi $fd29, [$r0], #8
fldi.bi $fd27, [$r0], #8
fldi.bi $fd25, [$r0], #8
fldi.bi $fd23, [$r0], #8
fldi.bi $fd21, [$r0], #8
fldi.bi $fd19, [$r0], #8
fldi.bi $fd17, [$r0], #8
.LCFG2_restore:
fldi.bi $fd15, [$r0], #8
fldi.bi $fd13, [$r0], #8
fldi.bi $fd11, [$r0], #8
fldi.bi $fd9, [$r0], #8
.LCFG1_restore:
fldi.bi $fd7, [$r0], #8
fldi.bi $fd5, [$r0], #8
.LCFG0_restore:
fldi.bi $fd3, [$r0], #8
#endif
/* Set val as return value. If the value val is 0, 1 will be returned
instead. */
movi $r0, 1
cmovn $r0, $r1, $r1 /* r0=(r1!=0)? r1: r0 */
ret
.size longjmp, .-longjmp
|
4ms/metamodule-plugin-sdk
| 2,863
|
plugin-libc/newlib/libc/machine/nds32/strcpy.S
|
/*
Copyright (c) 2013 Andes Technology Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Function:
strcpy - copy a string.
Syntax:
char *strcpy(char *dest, const char *src);
Description:
This function copies the string pointed to by src into the array
point to by dest (include the teminating null character).
Return value:
strcpy returns the dest as given.
*/
.text
.align 2
.globl strcpy
.type strcpy, @function
strcpy:
move $r3, $r0 /* Keep r0 as reture value. */
/* If SRC or DEST is unaligned, then copy bytes. */
or $r2, $r1, $r0
andi $r2, $r2, #3
bnez $r2, .Lbyte_mode
.Lword_mode:
/* SRC and DEST are both "long int" aligned, try to do "long int"
sized copies. */
/* #define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080)
DETECTNULL returns nonzero if (long)X contains a NULL byte. */
lwi $r2, [$r1+(0)] /* r2 is X */
sethi $r4, hi20(0xFEFEFEFF)
ori $r4, $r4, lo12(0xFEFEFEFF)
add $r4, $r2, $r4 /* r4 = ((X) - 0x01010101) */
nor $r5, $r2, $r2 /* r5 = ~(X) */
and $r4, $r5, $r4 /* r4 = ~(X) & ((X) - 0x01010101) */
sethi $r5, hi20(0x80808080)
ori $r5, $r5, lo12(0x80808080)
and $r4, $r4, $r5 /* r4 = r4 & 0x80808080 */
bnez $r4, .Lbyte_mode /* Contains a NULL byte. */
swi.bi $r2, [$r3], #4
addi $r1, $r1, #4
b .Lword_mode
.Lbyte_mode:
lbi.bi $r4, [$r1], #1 /* r4 <- *src++ */
sbi.bi $r4, [$r3], #1 /* r4 -> *dest++ */
bnez $r4, .Lbyte_mode
ret
.size strcpy, .-strcpy
|
4ms/metamodule-plugin-sdk
| 3,027
|
plugin-libc/newlib/libc/machine/nds32/memset.S
|
/*
Copyright (c) 2013 Andes Technology Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Function:
memset - fill memory with a constant byte
Syntax:
void *memset(void *s, int c, size_t n);
Description:
The memset function copies the value of c (converted to an unsigned char)
into each of the first n characters of the object pointed to by s.
Return value:
The memset function returns the value of s.
*/
.text
.align 2
.globl memset
.type memset, @function
memset:
/* Corner case. If n is zero, just go return. */
beqz $r2, .Lend_memset
/* Keep $r0 as return value.
Set $r4 as how many words to copy.
Set $r2 as how many bytes are less than a word. */
move $r5, $r0
srli $r4, $r2, 2
andi $r2, $r2, 3
beqz $r4, .Lbyte_set
/* Set $r1 a word-size pattern composed of the value of c
(converted to an unsigned char). Convert ??????ab to abababab. */
andi $r1, $r1, 0xff /* Set $r1 = 000000ab. */
slli $r3, $r1, 8 /* Set $r3 = 0000ab00. */
or $r1, $r1, $r3 /* Set $r1 = 0000abab. */
slli $r3, $r1, 16 /* Set $r3 = abab0000. */
or $r1, $r1, $r3 /* Set $r1 = abababab. */
.Lword_set:
/* Do the word set $r4 times. Then, do the byte set $r2 times. */
addi $r4, $r4, -1
smw.bim $r1, [$r5], $r1 /* Set a word-size. */
bnez $r4, .Lword_set /* Loop again ? */
beqz $r2, .Lend_memset /* Fall THRU or go return ? */
.Lbyte_set:
/* Do the byte set $r2 times. */
addi $r2, $r2, -1
sbi.p $r1, [$r5], 1 /* Set a byte-size. */
bnez $r2, .Lbyte_set /* Loop again ? */
.Lend_memset:
ret
.size memset, .-memset
|
4ms/metamodule-plugin-sdk
| 3,494
|
plugin-libc/newlib/libc/machine/nds32/strcmp.S
|
/*
Copyright (c) 2013 Andes Technology Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Function:
strcmp - compare two strings.
Syntax:
int strcmp(const char *s1, const char *s2);
Description:
This function compares the two strings s1 and s2. It returns an
integer less than, equal to, or greater than zero if s1 is found,
respectively, to be less than, to match, or be greater than s2.
Return value:
strcmp returns an integer less than, equal to, or greater than
zero if s1 (or the first n bytes thereof) is found, respectively,
to be less than, to match, or be greater than s2.
*/
.text
.align 2
.globl strcmp
.type strcmp, @function
strcmp:
/* If s1 or s2 are unaligned, then compare bytes. */
or $r5, $r1, $r0
andi $r5, $r5, #3
bnez $r5, .Lbyte_mode
/* If s1 and s2 are word-aligned, compare them a word at a time. */
lwi $r5, [$r0+(0)]
lwi $r3, [$r1+(0)]
bne $r5, $r3, .Lbyte_mode /* A difference was detected, so
search bytewise. */
/* It's more efficient to set bit mask outside the word_mode loop. */
sethi $r4, hi20(0xFEFEFEFF) /* Set $r4 as -0x01010101. */
ori $r4, $r4, lo12(0xFEFEFEFF)
sethi $r2, hi20(0x80808080)
ori $r2, $r2, lo12(0x80808080)
b .Ldetect_null
.align 2
.Lword_mode:
lmw.aim $r5, [$r0], $r5
lmw.aim $r3, [$r1], $r3
bne $r5, $r3, .Lbyte_mode
.Ldetect_null:
/* #define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080)
DETECTNULL returns nonzero if (long)X contains a NULL byte. */
nor $r3, $r5, $r5 /* r3 = ~(X) */
add $r5, $r5, $r4 /* r2 = ((X) - 0x01010101) */
and $r5, $r5, $r3 /* r2 = ~(X) & ((X) - 0x01010101) */
and $r5, $r5, $r2 /* r2= r2 & 0x80808080 */
beqz $r5, .Lword_mode /* No NULL byte, compare next word. */
/* To get here, *a1 == *a2, thus if we find a null in *a1,
then the strings must be equal, so return zero. */
movi $r0, #0
ret
.Lbyte_mode:
/* Byte-mode compare. */
lbi.bi $r5, [$r0], #1
lbi.bi $r3, [$r1], #1
bne $r5, $r3, 1f /* Mismatch, done. */
bnez $r5, .Lbyte_mode
1:
sub $r0, $r5, $r3
ret
.size strcmp, .-strcmp
|
4ms/metamodule-plugin-sdk
| 8,107
|
plugin-libc/newlib/libc/machine/i960/memccpy_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "mccpy_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1989,1993 Intel Corp., all rights reserved
*/
/*
procedure memccpy (optimized assembler version for the 80960CA)
dest_addr = memccpy (dest_addr, src_addr, char, len)
copy len bytes pointed to by src_addr to the space pointed to by
dest_addr, stopping if char is copied. If char is copied,
return address of byte after char in dest string; else null.
Undefined behavior will occur if the end of the source array is in
the last two words of the program's allocated memory space. This
is so because the routine fetches ahead. Disallowing the fetch
ahead would impose a severe performance penalty.
Undefined behavior will also occur if the source and destination
strings overlap.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source is word aligned, destination is not
4) destination is word aligned, source is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine.
*/
#if __i960_BIG_ENDIAN__
#define MSW g6
#define LSW g7
#else
#define LSW g6
#define MSW g7
#endif
.globl _memccpy
.leafproc _memccpy, __memccpy
.align 2
_memccpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memccpy:
notand g1,3,g5 # extract word addr of start of src
lda (g14),g13 # preserve return address
cmpibge.f 0,g3,Lexit_char_not_found # Lexit if # of bytes to move is <= 0
cmpo g5,g1 # check alignment of src
ld (g5),LSW # fetch word containing at least first byte
notand g0,3,g4 # extract word addr of start of dest
lda 4(g5),g5 # advance src word addr
shlo 24,g2,g2 # reduce char to single byte
bne.f Lcase_245 # branch if src is NOT word aligned
Lcase_13:
cmpobe.t g0,g4,Lcase_1_setup # branch if dest word aligned
Lcase_3: # src is word aligned; dest is not
mov LSW,MSW # make copy of first word of src
addo 4,g4,g1 # move dest word ptr to first word boundary
lda 32,g14 # initialize shift count to zero
Lcase_25:
Lcase_3_cloop_at_start: # character copying loop for start of dest str
cmpdeci 0,g3,g3 # is max_bytes exhausted?
#if __i960_BIG_ENDIAN__
lda -8(g14),g14 # augment the shift counter
#else
lda 8(g14),g14 # augment the shift counter
#endif
be.f Lexit_char_not_found # Lexit if max_bytes is exhausted
#if __i960_BIG_ENDIAN__
rotate 8,MSW,MSW # move next byte into position for extraction
#endif
shlo 24,MSW,g4
stob MSW,(g0) # store the byte in dest
cmpo g4,g2
lda 1(g0),g0 # post-increment dest ptr
#if ! __i960_BIG_ENDIAN__
shro 8,MSW,MSW # move next byte into position for extraction
#endif
be.f Lexit_char_found # Lexit if char found
cmpobne.t g1,g0,Lcase_3_cloop_at_start # branch if reached word boundary
ld (g5),MSW # fetch msw of operand for double shift
Lcase_4:
shro 8,g2,g4
or g4,g2,g1
shro 16,g1,g4
or g4,g1,g4
#if __i960_BIG_ENDIAN__
cmpobne 0,g14,Lcase_3_wloop
Lcase_3_wloop2:
cmpi g3,4 # less than four bytes to move?
lda 4(g5),g5 # post-increment src word addr
mov LSW,g1 # extract 4 bytes of src
bl.f Lcase_13_cloop_setup # branch if < four bytes left to move
scanbyte g4,g1 # branch if word has char in it
bo.f Lcase_13_cloop_setup
mov MSW,LSW # move msw to lsw
ld (g5),MSW # pre-fetch msw of operand for double shift
subi 4,g3,g3 # decrease max_byte count by the 4 bytes moved
st g1,(g0) # store 4 bytes to dest
addo 4,g0,g0 # post-increment dest ptr
b Lcase_3_wloop2
#endif
Lcase_3_wloop:
cmpi g3,4 # less than four bytes to move?
lda 4(g5),g5 # post-increment src word addr
eshro g14,g6,g1 # extract 4 bytes of src
bl.f Lcase_13_cloop_setup # branch if < four bytes left to move
scanbyte g4,g1 # branch if word has char in it
bo.f Lcase_13_cloop_setup
mov MSW,LSW # move msw to lsw
ld (g5),MSW # pre-fetch msw of operand for double shift
subi 4,g3,g3 # decrease max_byte count by the 4 bytes moved
st g1,(g0) # store 4 bytes to dest
addo 4,g0,g0 # post-increment dest ptr
b Lcase_3_wloop
Lcase_1_setup:
subo 4,g0,g0 # store is pre-incrementing; back up dest addr
shro 8,g2,g4
or g4,g2,MSW
shro 16,MSW,g4
or g4,MSW,g4
b Lcase_1
Lcase_1_wloop: # word copying loop
subi 4,g3,g3 # decrease max_byte count by the 4 bytes moved
ld (g5),LSW # pre-fetch next word of src
addo 4,g5,g5 # post-increment src addr
st g1,(g0) # store word in dest string
Lcase_1: # src and dest are word aligned
cmpi g3,4 # check for fewer than four bytes to move
addo 4,g0,g0 # pre-increment dest addr
lda (LSW),g1 # keep a copy of the src word
bl.f Lcase_13_cloop_setup # branch if less than four bytes to copy
scanbyte LSW,g4 # branch if char is not in foursome
bno.t Lcase_1_wloop
Lcase_13_cloop_setup:
cmpibe.f 0,g3,Lexit_char_not_found # Lexit if max_bytes is exhausted
Lcase_1_cloop:
#if __i960_BIG_ENDIAN__
rotate 8,g1,g1 # move next byte into position for extraction
#endif
shlo 24,g1,g4
stob g1,(g0) # store the byte in dest
cmpo g4,g2
lda 1(g0),g0 # post-increment dest byte addr
subi 1,g3,g3
be.f Lexit_char_found # Lexit if char reached
cmpi 0,g3
#if ! __i960_BIG_ENDIAN__
shro 8,g1,g1 # move next byte into position for extraction
#endif
bne.t Lcase_1_cloop # continue if len not exhausted
Lexit_char_not_found:
mov 0,g0
Lexit_char_found:
lda 0,g14
bx (g13) # g0 = dest array address; g14 = 0
Lrett:
ret
Lcase_245:
cmpo g0,g4 # check alignment of dest
ld (g5),MSW # pre-fetch second half
and 3,g1,g1 # compute shift count
shlo 3,g1,g14
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian
#endif
be.t Lcase_4 # branch if dest is word aligned
or g4,g1,g1 # is src earlier in word, later, or sync w/ dst
cmpo g0,g1 # < indicates first word of dest has more bytes
/* than first word of source. */
eshro g14,g6,g4 # extract four bytes
lda 4(g0),g1 # move dest word addr to first word boundary
#if __i960_BIG_ENDIAN__
bge.f 1f
#else
bg.f 1f
#endif
mov MSW,LSW
lda 4(g5),g5 # move src word addr to second word boundary
1:
notand g1,3,g1
mov g4,MSW
b Lcase_25
/* end of memccpy */
|
4ms/metamodule-plugin-sdk
| 5,853
|
plugin-libc/newlib/libc/machine/i960/memcpy.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memcpy.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure memmove (optimized assembler version for the 80960K series)
procedure memcpy (optimized assembler version for the 80960K series)
dest_addr = memmove (dest_addr, src_addr, len)
dest_addr = memcpy (dest_addr, src_addr, len)
copy len bytes pointed to by src_addr to the space pointed to by
dest_addr. Return the original dest_addr.
These routines will work even if the arrays overlap. The standard
requires this of memmove, but memcpy is allowed to fail if overlap
is present. Nevertheless, it is implemented the same as memmove
because the overhead is trifling.
Undefined behavior will occur if the end of the source array is in
the last two words of the program's allocated memory space. This
is so because the routine fetches ahead. Disallowing the fetch
ahead would impose a severe performance penalty.
Strategy:
Fetch the source array by words and store them by words to the
destination array, until there are fewer than three bytes left
to copy. Then, using the last word of the source (the one that
contains the remaining 0, 1, 2, or 3 bytes to be copied), store
a byte at a time until Ldone.
Tactics:
1) Do NOT try to fetch and store the words in a word aligned manner
because, in my judgement, the performance degradation experienced due
to non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment. This is supported by the intuition that most source and
destination arrays (even more true of most big source arrays) will
be word aligned to begin with.
2) For non-overlapping arrays, rather than decrementing len to zero,
I calculate the address of the byte after the last byte of the
destination array, and quit when the destination byte pointer passes
that.
3) For overlapping arrays where the source starts at a lower address
than the destination the move is performed in reverse order.
4) Overlapping arrays where the source starts at a higher address
are treated like non-overlapping case. Where the two arrays exactly
coincide, the routine is short-circuited; no move is Ldone at all.
This costs only one cycle.
*/
.globl _memcpy, _memmove
.globl __memcpy, __memmove
.leafproc _memmove, __memmove
.leafproc _memcpy, __memcpy
.align 2
_memmove:
_memcpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memmove:
__memcpy:
mov g14, g13 # preserve return address
cmpibge 0,g2,Lexit # exit if number of bytes to move is <= zero.
cmpo g0,g1 # does start of dest overlap end of src?
addo g2,g1,g3
be Lexit # no move necessary if src and dest are same
concmpo g3,g0
addo g2, g0, g6
bg Lbackwards # if overlap, then do move backwards
ld (g1), g7 # fetch first word of source
mov g0, g5
b Lwloop_b
Lwloop_a:
ld (g1), g7 # fetch ahead next word of source
st g4, (g5) # store word to dest
addo 4, g5, g5 # post-increment dest pointer
Lwloop_b: # word copying loop
addo 4, g1, g1 # pre-increment src pointer
cmpo g3, g1 # is len <= 3 ?
mov g7, g4 # keep a copy of the current word
bge Lwloop_a # loop if more than 3 bytes to move
cmpobe g6, g5, Lexit # quit if no more bytes to move
Lcloop_a: # character copying loop (len < 3)
stob g4, (g5) # store a byte
shro 8, g4, g4 # position next byte for storing
addo 1, g5, g5
cmpobne g6, g5, Lcloop_a # quit if no more bytes to move
Lexit:
mov 0, g14
bx (g13) # g0 = dest array address; g14 = 0
Lrett:
ret
Lwloop.a:
subo 4, g6, g6 # pre-decrement dest pointer
st g7, (g6) # store word to dest
Lbackwards: # word copying loop
subo 4, g3, g3 # pre-decrement src pointer
cmpo g1, g3 # is len <= 3?
ld (g3), g7 # fetch ahead next word of source
ble Lwloop.a # loop if more than 3 bytes to move
cmpobe g6, g0, Lexit # quit if no more bytes to move
Lcloop.a:
subo 1, g6, g6
rotate 8, g7, g7 # position byte for storing
stob g7, (g6) # store byte
cmpobne g6, g0, Lcloop.a # quit if no more bytes to move
b Lexit
/* end of memmove */
|
4ms/metamodule-plugin-sdk
| 12,226
|
plugin-libc/newlib/libc/machine/i960/memcpy_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memcp_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1992,1993 Intel Corp., all rights reserved
*/
/*
procedure memmove (optimized assembler version for the CA)
procedure memcpy (optimized assembler version for the CA)
dest_addr = memmove (dest_addr, src_addr, len)
dest_addr = memcpy (dest_addr, src_addr, len)
copy len bytes pointed to by src_addr to the space pointed to by
dest_addr. Return the original dest_addr.
Memcpy will fail if the source and destination string overlap
(in particular, if the end of the source is overlapped by the
beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Memmove will not fail if overlap exists.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, the
routine will fetch ahead one word. Disallowing the fetch ahead would
impose a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source is word aligned, destination is not
4) destination is word aligned, source is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 dest ptr; not modified, so that it may be returned
g1 src ptr; shift count
g2 len
g3 src ptr (word aligned)
g4 dest ptr (word aligned)
g5 -4 for Lbackwards move
Little endian
g6 lsw of double word for extraction of 4 bytes
g7 msw of double word for extraction of 4 bytes
Big endian
g6 msw of double word for extraction of 4 bytes
g7 lsw of double word for extraction of 4 bytes
g13 return address
g14 byte extracted.
*/
#if __i960_BIG_ENDIAN__
#define MSW g6
#define LSW g7
#else
#define LSW g6
#define MSW g7
#endif
.globl _memmove, _memcpy
.globl __memmove, __memcpy
.leafproc _memmove, __memmove
.leafproc _memcpy, __memcpy
.align 2
_memcpy:
_memmove:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memcpy:
__memmove:
cmpibge.f 0,g2,Lquick_exit # Lexit if number of bytes to move is <= zero.
cmpo g0,g1 # if dest starts earlier than src ...
lda (g14),g13 # preserve return address
addo g2,g1,g5 # compute addr of byte after last byte of src
be.f Lexit_code # no move necessary if src and dest are same
concmpo g5,g0 # ... or if dest starts after end of src ...
notand g1,3,g3 # extract word addr of start of src
bg.f Lbackwards # ... then drop thru, else do move backwards
cmpo g3,g1 # check alignment of src
ld (g3),LSW # fetch word containing at least first byte
notand g0,3,g4 # extract word addr of start of dest
lda 4(g3),g3 # advance src word addr
bne.f Lcase_245 # branch if src is NOT word aligned
Lcase_13:
cmpo g0,g4 # check alignment of dest
subo 4,g4,g4 # store is pre-incrementing; back up dest addr
be.t Lcase_1 # branch if dest word aligned
Lcase_3: # src is word aligned; dest is not
addo 8,g4,g4 # move dest word ptr to first word boundary
lda (g0),g1 # copy dest byte ptr
mov LSW,MSW # make copy of first word of src
lda 32,g14 # initialize shift count to zero (mod 32)
Lcase_25:
Lcase_3_cloop_at_start: # character copying loop for start of dest str
cmpdeci 0,g2,g2 # is max_bytes exhausted?
be.f Lexit_code # Lexit if max_bytes is exhausted
#if __i960_BIG_ENDIAN__
rotate 8,MSW,MSW # move next byte into position for extraction
subo 8,g14,g14 # augment the shift counter
stob MSW,(g1) # store the byte in dest
#else
addo 8,g14,g14 # augment the shift counter
stob MSW,(g1) # store the byte in dest
shro 8,MSW,MSW # move next byte into position for extraction
#endif
lda 1(g1),g1 # post-increment dest ptr
cmpobne.t g1,g4,Lcase_3_cloop_at_start # branch if reached word boundary
ld (g3),MSW # fetch msw of operand for double shift
Lcase_4:
Lcase_3_wloop:
cmpi g2,4 # less than four bytes to move?
lda 4(g3),g3 # post-increment src word addr
eshro g14,g6,g1 # extract 4 bytes of src
bl.f Lcase_3_cloop # branch if < four bytes left to move
mov MSW,LSW # move msw to lsw
ld (g3),MSW # pre-fetch msw of operand for double shift
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
st g1,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop
Lcase_1_wloop: # word copying loop
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
ld (g3),LSW # pre-fetch next word of src
addo 4,g3,g3 # post-increment src addr
st g1,(g4) # store word in dest string
Lcase_1: # src and dest are word aligned
cmpi g2,4 # check for fewer than four bytes to move
addo 4,g4,g4 # pre-increment dest addr
lda (LSW),g1 # keep a copy of the src word
bge.t Lcase_1_wloop # branch if at least four bytes to copy
Lcase_3_cloop:
cmpibe.f 0,g2,Lexit_code # Lexit if max_bytes is exhausted
Lcase_1_cloop:
#if __i960_BIG_ENDIAN__
rotate 8,g1,g1 # move next byte into position for extraction
#endif
subi 1,g2,g2
stob g1,(g4) # store the byte in dest
cmpi 0,g2
lda 1(g4),g4 # post-increment dest byte addr
#if ! __i960_BIG_ENDIAN__
shro 8,g1,g1 # move next byte into position for extraction
#endif
bne.t Lcase_1_cloop # Lexit if max_bytes is exhausted
Lexit_code:
mov 0,g14 # conform to register conventions
bx (g13) # g0 = addr of dest; g14 = 0
Lrett:
ret
Lcase_245:
cmpo g0,g4 # check alignment of dest
ld (g3),MSW # pre-fetch second half
and 3,g1,g1 # compute shift count
shlo 3,g1,g14
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian
#endif
be.t Lcase_4 # branch if dest is word aligned
or g4,g1,g1 # is src earlier in word, later, or sync w/ dst
cmpo g0,g1 # < indicates first word of dest has more bytes
lda 4(g4),g4 # move dest word addr to first word boundary
eshro g14,g6,g5 # extract four bytes
lda (g0),g1
#if __i960_BIG_ENDIAN__
bge.f 1f
#else
bg.f 1f
#endif
mov MSW,LSW
lda 4(g3),g3 # move src word addr to second word boundary
1:
mov g5,MSW
b Lcase_25
Lbackwards:
notand g5,3,MSW # extract word addr of byte after end of src
cmpo MSW,g5 # check alignment of end of src
subo 4,MSW,g3 # retreat src word addr
addo g2,g0,g1 # compute addr of byte after end of dest
notand g1,3,g4 # extract word addr of start of dest
bne.f Lcase.245 # branch if src is NOT word aligned
Lcase.13:
cmpo g1,g4 # check alignment of dest
ld (g3),MSW # fetch last word of src
subo 4,g3,g3 # retreat src word addr
be.t Lcase.1 # branch if dest word aligned
Lcase.3: # src is word aligned; dest is not
mov MSW,LSW # make copy of first word of src
lda 32,g14 # initialize shift count to zero (mod 32)
Lcase.25:
Lcase.3_cloop_at_start: # character copying loop for start of dest str
cmpdeci 0,g2,g2 # is max.bytes exhausted?
be.f Lexit_code # Lexit if max_bytes is exhausted
#if ! __i960_BIG_ENDIAN__
rotate 8,LSW,LSW # move next byte into position for storing
#endif
lda -1(g1),g1 # pre-decrement dest ptr
cmpo g1,g4 # have we reached word boundary in dest yet?
stob LSW,(g1) # store the byte in dest
#if __i960_BIG_ENDIAN__
shro 8,LSW,LSW # move next byte into position for storing
addo 8,g14,g14 # augment the shift counter
#else
subo 8,g14,g14 # augment the shift counter
#endif
bne.t Lcase.3_cloop_at_start # branch if reached word boundary?
ld (g3),LSW # fetch lsw of operand for double shift
#if __i960_BIG_ENDIAN__
cmpobne 0,g14,Lcase.3_wloop
Lcase.3_wloop2:
cmpi g2,4 # less than four bytes to move?
lda -4(g3),g3 # post-decrement src word addr
mov MSW,g1 # extract 4 bytes of src
lda (LSW),MSW # move lsw to msw
subo 4,g4,g4 # pre-decrement dest ptr
bl.f Lcase.3_cloop # branch if < four bytes left to move
ld (g3),LSW # pre-fetch lsw of operand for double shift
subi 4,g2,g2 # decrease max.byte count by the 4 bytes moved
st g1,(g4) # store 4 bytes to dest
b Lcase.3_wloop2
#endif
Lcase.4:
Lcase.3_wloop:
cmpi g2,4 # less than four bytes to move?
lda -4(g3),g3 # post-decrement src word addr
eshro g14,g6,g1 # extract 4 bytes of src
lda (LSW),MSW # move lsw to msw
subo 4,g4,g4 # pre-decrement dest ptr
bl.f Lcase.3_cloop # branch if < four bytes left to move
ld (g3),LSW # pre-fetch lsw of operand for double shift
subi 4,g2,g2 # decrease max.byte count by the 4 bytes moved
st g1,(g4) # store 4 bytes to dest
b Lcase.3_wloop
Lcase.1_wloop: # word copying loop
subi 4,g2,g2 # decrease max.byte count by the 4 bytes moved
ld (g3),MSW # pre-fetch next word of src
subo 4,g3,g3 # post-decrement src addr
st g1,(g4) # store word in dest string
Lcase.1: # src and dest are word aligned
cmpi g2,4 # check for fewer than four bytes to move
subo 4,g4,g4 # pre-decrement dest addr
lda (MSW),g1 # keep a copy of the src word
bge.t Lcase.1_wloop # branch if at least four bytes to copy
Lcase.3_cloop:
cmpibe.f 0,g2,Lexit_code # Lexit if max_bytes is exhausted
#if ! __i960_BIG_ENDIAN__
rotate 8,g1,g1 # move next byte into position for storing
#endif
lda 4(g4),g4 # pre-decremented dest addr 4 too much
Lcase.1_cloop:
subi 1,g4,g4 # pre-decrement dest byte addr
cmpi g4,g0 # has dest ptr reached beginning of dest?
stob g1,(g4) # store the byte in dest
#if __i960_BIG_ENDIAN__
shro 8,g1,g1 # move next byte into position for storing
#else
rotate 8,g1,g1 # move next byte into position for storing
#endif
bne.t Lcase.1_cloop # Lexit if move is completed
b Lexit_code
Lcase.245:
cmpo g1,g4 # check alignment of dest
ld (MSW),MSW # pre-fetch word with at least last byte
and 3,g5,g5 # compute shift count
ld (g3),LSW # pre-fetch second to last word
shlo 3,g5,g14
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian
#endif
be.t Lcase.4 # branch if dest is word aligned
or g4,g5,g5 # is src earlier in word, later, or sync w/ dst
cmpo g1,g5 # < indicates last word of dest has less bytes
eshro g14,g6,g5 # extract four bytes
bl.t 1f
mov LSW,MSW
#if ! __i960_BIG_ENDIAN__
be.t 1f
#endif
subo 4,g3,g3 # move src word addr to second word boundary
1:
mov g5,LSW
b Lcase.25
Lquick_exit:
mov g14,g13
b Lexit_code
/* end of memmove */
|
4ms/metamodule-plugin-sdk
| 3,435
|
plugin-libc/newlib/libc/machine/i960/strpbrk.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
/*
* (c) copyright 1989,1993 Intel Corp., all rights reserved
*/
/*
procedure strpbrk (optimized assembler version: 80960K series, 80960CA)
char_addr = strpbrk (string, brkset_string)
Return the address of the first character in string that is NOT
in the brkset_string. Return NULL if none exists.
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine.
This routine stays out of g3 and g4 altogether. They may be used by
the strtok routine, which calls this routine in an incestuous way.
*/
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
.file "strprk.s"
.globl _strpbrk
.globl __strpbrk
.leafproc _strpbrk, __strpbrk
.align 2
_strpbrk:
#ifdef __PIC
lda Lrett-(.+8)(ip),g14
b __strpbrk
#else
lda Lrett,g14
b __strpbrk
#endif
Lrett: ret
__strpbrk:
Lnext_char_strpbrk:
addo 1,g1,g2 # g2 will be the brkset ptr
ldob (g0),g7 # fetch next character of string
ldob (g1),g6 # fetch first character of brkset
cmpobe.f 0,g7,Lexit_char_not_found # quit if at end of string
Lscan_set_strpbrk:
cmpo g6,g7 # is brkset char equal to string char?
ldob (g2),g5 # fetch next brkset char
addo 1,g2,g2 # bump brkset ptr
be.f Lexit_char_found
cmpo g6,0 # is brkset_string exhausted?
lda (g5),g6
bne.t Lscan_set_strpbrk # check next character of brkset
addo 1,g0,g0 # check next character of string
b Lnext_char_strpbrk
Lexit_char_not_found:
mov 0,g0 # return null if brkset char not found in string
Lexit_char_found:
mov g14,g13 # save return address
lda 0,g14 # conform to register conventions
bx (g13)
/* end of strpbrk */
|
4ms/metamodule-plugin-sdk
| 6,881
|
plugin-libc/newlib/libc/machine/i960/memcmp_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memcm_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1992,1993 Intel Corp., all rights reserved
*/
/*
procedure memcmp (optimized assembler version for the CA)
result = memcmp (src1_addr, src2_addr, max_bytes)
compare the byte array pointed to by src1_addr to the byte array
pointed to by src2_addr. Return 0 iff the arrays are equal, -1 if
src1_addr is lexicly less than src2_addr, and 1 if it is lexicly
greater. Do not compare more than max_bytes bytes.
Undefined behavior will occur if the end of either source array
is in the last word of the program's allocated memory space. This
is so because, in several cases, memcmp will fetch ahead one word.
Disallowing the fetch ahead would impose a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source1 is word aligned, source2 is not
4) source2 is word aligned, source1 is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g14 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers are sufficient to implement the routine.
The registers are used as follows:
g0 original src1 ptr; extracted word; return result
g1 src2 ptr; byt extraction mask
g2 maximum number of bytes to compare
g3 src2 word ptr
Little endian
g4 lsw of src1
g5 msw of src1
g6 src2 word
g7 src1 word ptr
Big endian
g4 msw of src1
g5 lsw of src1
g6 src1 word ptr
g7 src2 word
g13 return address
g14 shift count
*/
#if __i960_BIG_ENDIAN__
#define MSW g4
#define LSW g5
#define SRC1 g6
#define SRC2 g7
#else
#define LSW g4
#define MSW g5
#define SRC2 g6
#define SRC1 g7
#endif
.globl _memcmp
.globl __memcmp
.leafproc _memcmp, __memcmp
.align 2
_memcmp:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memcmp:
Lrestart:
#if __i960_BIG_ENDIAN__
subo 1,g0,SRC1
notand SRC1,3,SRC1 # extract word addr of start of src1
#else
notand g0,3,SRC1 # extract word addr of start of src1
#endif
lda (g14),g13 # preserve return address
cmpibge.f 0,g2,Lequal_exit # return equality if number bytes 0
notand g1,3,g3 # extract word addr of start of src2
ld (SRC1),LSW # fetch word with at least first byte of src1
cmpo g3,g1 # check alignment of src2
ld 4(SRC1),MSW # fetch second word of src1
shlo 3,g0,g14 # compute shift count for src1
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian.
#endif
ld (g3),SRC2 # fetch word with at least first byte of src2
eshro g14,g4,LSW # extract word of src1
lda 8(SRC1),SRC1 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
mov LSW,g0 # at least src2 is word aligned
lda 0xff,g1
Lwloop: # word comparing loop
cmpo SRC2,g0 # compare src1 and src2 words
lda 4(g3),g3 # pre-increment src2 addr
mov MSW,LSW # move msw of src1 to lsw
ld (SRC1),MSW # pre-fetch next msw of src1
subi 4,g2,g2 # decrement maximum byte count
bne.f Lcloop # branch if src1 and src2 unequal
cmpi 0,g2
ld (g3),SRC2 # pre-fetch next word of src2
eshro g14,g4,g0 # extract word of src1
lda 4(SRC1),SRC1 # post-increment src1 addr
bl.t Lwloop # branch if max_bytes not reached yet
b Lequal_exit # strings were equal up through max_bytes
Lcloop_setup: # setup for coming from Lsrc2_unaligned
mov LSW,g0 # restore extracted src1 word
subo 4,g2,g2 # make up for later re-incrementing
lda 0xff,g1 # byte extraction mask
Lcloop: # character comparing loop
#if __i960_BIG_ENDIAN__
rotate 24,g1,g1 # shift mask for next byte
#endif
and SRC2,g1,g3 # extract next char of src2
and g0,g1,LSW # extract next char of src1
cmpobne.f LSW,g3,.diff # check for equality
#if ! __i960_BIG_ENDIAN__
shlo 8,g1,g1 # shift mask for next byte
#endif
subi 1,g2,g2 # decrement character counter
b Lcloop # branch if null not reached
Lequal_exit: # words are equal up thru null byte
mov 0,g14 # conform to register conventions
lda 0,g0 # return zero, indicating equality
bx (g13) # return
Lrett:
ret
.diff:
addo 4,g2,g2 # to make up for extra decrement in loop
lda 0,g14
bl Lless_than_exit
Lgreater_than_exit:
cmpibge.f 0,g2,Lequal_exit # branch if difference is beyond max_bytes
mov 1,g0
bx (g13) # g0 = 1 (src1 > src2)
Lless_than_exit:
cmpibge.f 0,g2,Lequal_exit # branch if difference is beyond max_bytes
subi 1,0,g0
bx (g13) # g0 = -1 (src1 < src2)
Lsrc2_unaligned:
notor g1,3,g14 # first step in computing new src1 ptr
ld 4(g3),SRC1 # fetch second word of src2
shlo 3,g1,MSW # compute shift count for src2
#if __i960_BIG_ENDIAN__
subo MSW,0,MSW
#endif
eshro MSW,g6,SRC2 # extract word of src2
cmpo LSW,SRC2 # compare src1 and src2 words
lda 4(g3),g1 # set new src2 ptr
bne.f Lcloop_setup # first four bytes differ
subo g14,g0,g0 # second (final) step in computing new src1 ptr
addi g14,g2,g2 # compute new max_bytes too
lda (g13),g14 # prepare return pointer for Lrestart
b Lrestart # continue with both string fetches shifted
|
4ms/metamodule-plugin-sdk
| 5,781
|
plugin-libc/newlib/libc/machine/i960/strncat.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strncat.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncat (optimized assembler version for the 80960K Series)
dest_addr = strncat (dest_addr, src_addr, max_bytes)
append the null terminated string pointed to by src_addr to the null
terminated string pointed to by dest_addr. Return the original
dest_addr. If the source string is longer than max_bytes, then
append only max_bytes bytes, and tack on a null byte on the end.
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last two words of the
program's allocated memory space. This is so because strncat fetches
ahead. Disallowing the fetch ahead would impose a severe performance
penalty.
Strategy:
First, skip to the null byte in the destination string. Then
fetch the source string by words and store them by words to the
destination string, until there are fewer than three bytes left
to copy. Then, using the last word of the source (the one that
contains the remaining 0, 1, 2, or 3 bytes to be copied), store
a byte at a time until Ldone.
If, before exhausting the max_byte count, the null byte is encountered
in the source string, then just copy up thru the null byte.
Tactics:
1) Do NOT try to fetch and store the words in a word aligned manner
because, in my judgement, the performance degradation experienced due
to non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment.
*/
.globl _strncat
.globl __strncat
.leafproc _strncat,__strncat
.align 2
_strncat:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncat:
mov g14,g6
cmpibge 0, g2, Lno_operation # Lexit early if max_bytes <= 0
mov g0, g5
Lskip_word_loop:
ld (g5), g7 # fetch word of dest string
addo 4, g5, g5 # post-increment dest ptr
scanbyte 0, g7 # does it contain null byte?
bno Lskip_word_loop # if not, loop
subo 5, g5, g5 # adjust dest ptr
lda 0xff, g3 # byte extraction mask = 0xff;
Lskip_byte_loop:
and g7, g3, g14 # extract byte of last word of dest string
cmpo 0, g14 # is it null?
addo 1, g5, g5 # adjust dest ptr
shro 8, g7, g7 # position next byte for extraction
bne Lskip_byte_loop # loop if null not found yet
ld (g1), g7 # fetch first word of source string
Lwloop: # word copying loop
cmpo 4, g2 # max_bytes < 4 ?
addo 4, g1, g1 # post-increment source ptr
bge Lcloop.a # branch if less than 4 bytes to move
scanbyte 0, g7 # is null byte reached yet?
mov g7, g4 # keep a copy of the source word
be Lcloop # branch if null byte reached
ld (g1), g7 # pre-fetch next word of source
subo 4, g2, g2 # reduce max_byte counter
st g4, (g5) # store current word
addo 4, g5, g5 # post-increment destination ptr
b Lwloop
Lcloop.b:
addo 1, g5, g5 # post-increment destination ptr
shro 8, g7, g7 # position next byte for extraction
Lcloop: # character copying loop (max_byte > 3)
and g3, g7, g4 # extract character
cmpo 0, g4 # is it null?
stob g4, (g5) # store it
bne Lcloop.b # loop if null not encountered yet
bx (g6) # g0 = dest string address; g14 = 0
Lrett:
ret
Lcloop.c:
addo 1, g5, g5 # post-increment destination ptr
shro 8, g7, g7 # position next byte for extraction
Lcloop.a: # character copying loop (max_byte <= 3)
cmpdeco 0,g2,g2 # max_byte == 0?
and g3, g7, g4 # extract character
be Ldone # store null and Lexit if max_byte exhausted
cmpo 0, g4 # is it null?
stob g4, (g5) # store it
bne Lcloop.c # loop if null not encountered yet
Ldone: stob g14, (g5) # store trailing null
bx (g6) # g0 = dest string address; g14 = 0
Lno_operation: mov 0, g14 # conform to register conventions
bx (g6)
/* end of strncat */
|
4ms/metamodule-plugin-sdk
| 3,917
|
plugin-libc/newlib/libc/machine/i960/setjmp.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
/******************************************************************************/
/* */
/* setjmp(), longjmp() */
/* */
/******************************************************************************/
.file "setjmp.as"
.text
/* .link_pix */
.align 4
.globl _setjmp
_setjmp:
flushreg
andnot 0xf,pfp,g1 /* get pfp, mask out return status bits */
st g1, 0x58(g0) /* save fp of caller*/
/* save globals not killed by the calling convention */
stq g8, 0x40(g0) /* save g8-g11*/
st g12, 0x50(g0) /* save g12*/
st g14, 0x54(g0) /* save g14*/
/* save previous frame local registers */
ldq (g1), g4 /* get previous frame pfp, sp, rip, r3 */
stq g4, (g0) /* save pfp, sp, rip, r3 */
ldq 0x10(g1), g4 /* get previous frame r4-r7 */
stq g4, 0x10(g0) /* save r4-r7 */
ldq 0x20(g1), g4 /* get previous frame r8-r11 */
stq g4, 0x20(g0) /* save r8-r11 */
ldq 0x30(g1), g4 /* get previous frame r12-r15 */
stq g4, 0x30(g0) /* save r12-r15 */
mov 0, g0 /* return 0 */
ret
/*
* fake a return to the place that called the corresponding _setjmp
*/
.align 4
.globl _longjmp
_longjmp:
call 0f /* ensure there is at least one stack frame */
0:
flushreg /* do this before swapping stack */
ld 0x58(g0), pfp /* get fp of caller of setjmp */
/* restore local registers
* the following code modifies the frame of the function which originally
* called setjmp.
*/
ldq (g0), g4 /* get pfp, sp, rip, r3 */
stq g4, (pfp) /* restore pfp, sp, rip, r3 */
ldq 0x10(g0), g4 /* get r4-r7 */
stq g4, 0x10(pfp) /* restore r4-r7 */
ldq 0x20(g0), g4 /* get r8-r11 */
stq g4, 0x20(pfp) /* restore r8-r11 */
ldq 0x30(g0), g4 /* get r12-r15 */
stq g4, 0x30(pfp) /* restore r12-r15 */
/* restore global registers */
ldq 0x40(g0), g8 /* get old g8-g11 values */
ld 0x50(g0), g12 /* get old g12 value */
ld 0x54(g0), g14 /* get old g14 value */
mov g1, g0 /* get return value */
cmpo g0, 0 /* make sure it is not zero */
bne 0f
mov 1, g0 /* return 1 by default */
0:
ret /* return to caller of _setjmp */
|
4ms/metamodule-plugin-sdk
| 6,476
|
plugin-libc/newlib/libc/machine/i960/strcpy.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strcpy.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strcpy (optimized assembler version for the 80960K series)
procedure strcat (optimized assembler version for the 80960K series)
dest_addr = strcpy (dest_addr, src_addr)
copy the null terminated string pointed to by src_addr to
the string space pointed to by dest_addr. Return the original
dest_addr.
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last two words of the
program's allocated memory space. This is so because strcpy fetches
ahead. Disallowing the fetch ahead would impose a severe performance
penalty.
Strategy:
Fetch the source string and store the destination string by words
until the null byte is encountered. When the word with the null
byte is reached, store it by bytes up through the null byte only.
Tactics:
1) Do NOT try to fetch and store the words in a word aligned manner
because, in my judgement, the performance degradation experienced due
to non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment. This is supported by the intuition that most source and
destination strings will be word aligned to begin with.
procedure strcat
dest_addr = strcat (dest_addr, src_addr)
Appends the string pointed to by src_addr to the string pointed
to by dest_addr. The first character of the source string is
copied to the location initially occupied by the trailing null
byte of the destination string. Thereafter, characters are copied
from the source to the destination up thru the null byte that
trails the source string.
See the strcpy routine, above, for its caveats, as they apply here too.
Strategy:
Skip to the end (null byte) of the destination string, and then drop
into the strcpy code.
Tactics:
Skipping to the null byte is Ldone by reading the destination string
in long-words and scanbyte'ing them, then examining the bytes of the
word that contains the null byte, until the address of the null byte is
known. Then we drop into the strcpy routine. It is probable (approx.
three out of four times) that the destination string as strcpy sees
it will NOT be word aligned (i.e. that the null byte won't be the
last byte of a word). But it is not worth the complication to that
routine to force word aligned memory accesses to be gaurenteed.
*/
.globl _strcpy, _strcat
.globl __strcpy, __strcat
.leafproc _strcpy,__strcpy
.leafproc _strcat,__strcat
.align 2
_strcat:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcat:
mov g14,g13 # preserve return address
ldl (g0),g4 # fetch first two words
addo 8,g0,g2 # post-increment src word pointer
lda 0xff,g3 # byte extraction mask
Lsearch_for_word_with_null_byte:
scanbyte 0,g4 # check for null byte
mov g5,g7 # copy second word
bo.f Lsearch_for_null # branch if null found
scanbyte 0,g7 # check for null byte
ldl (g2),g4 # fetch next pair of word of src
addo 8,g2,g2 # post-increment src word pointer
bno Lsearch_for_word_with_null_byte # branch if null not found yet
subo 4,g2,g2 # back up the byte pointer
mov g7,g4 # move word with null to search word
Lsearch_for_null:
subo 9,g2,g5 # back up the byte pointer
Lsearch_for_null.a:
and g4,g3,g6 # extract byte
cmpo 0,g6 # is it null?
addo 1,g5,g5 # bump src byte ptr
shro 8,g4,g4 # shift word to position next byte
bne Lsearch_for_null.a
b Lend_of_dest_found
_strcpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcpy:
mov g0, g5
Lend_of_dest_found:
ld (g1), g2 # fetch first word of source
mov g14,g6 # preserve return address
lda 0xff, g3 # byte extraction mask = 0xff;
Lwloop: # word copying loop
addo 4, g1, g1 # post-increment source ptr
scanbyte 0, g2 # does source word contain null byte?
mov g2, g4 # save a copy of the source word
be Lcloop # branch if null present
ld (g1), g2 # pre-fetch next word of source
st g4, (g5) # store current word
addo 4, g5, g5 # post-increment dest ptr
b Lwloop
Lcloop: # character copying loop
and g3, g4, g14 # extract next char
shro 8, g4, g4 # position word for next byte extraction
cmpo 0, g14 # is it null?
stob g14, (g5) # store the byte
addo 1, g5, g5 # post-increment dest ptr
bne Lcloop # quit if null encountered
bx (g6) # g0 = dest string address; g14 = 0
Lrett:
ret
|
4ms/metamodule-plugin-sdk
| 3,179
|
plugin-libc/newlib/libc/machine/i960/strcspn.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strcspn.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1989,1993 Intel Corp., all rights reserved
*/
/*
procedure strcspn (optimized assembler version: 80960K series, 80960CA)
len = strcspn (string, charset)
Return the number of characters in the maximum leading segment
of string which consists solely of characters NOT from charset.
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine.
*/
.globl _strcspn
.globl __strcspn
.leafproc _strcspn, __strcspn
.align 2
_strcspn:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcspn:
mov g14,g13 # save return address
lda (g0),g3 # copy string pointer
mov 0,g14 # conform to register conventions
Lnext_char:
ldob (g3),g7 # fetch next character of string
addo 1,g1,g2 # g2 will be the charset ptr
ldob (g1),g6 # fetch first character of charset
cmpobe.f 0,g7,Lexit # quit if at end of string
Lscan_set:
cmpo g6,g7 # is charset char same as string char?
ldob (g2),g5 # fetch next charset char
addo 1,g2,g2 # bump charset ptr
be.f Lexit
cmpo g6,0 # is charset exhausted?
lda (g5),g6
bne.t Lscan_set # check next character of charset
addo 1,g3,g3 # check next character of string
b Lnext_char
Lexit:
subo g0,g3,g0 # compute string length
bx (g13)
Lrett:
ret
/* end of strcspn */
|
4ms/metamodule-plugin-sdk
| 9,947
|
plugin-libc/newlib/libc/machine/i960/strcpy_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strcp_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strcpy (optimized assembler version for the CA)
dest_addr = strcpy (dest_addr, src_addr)
copy the null terminated string pointed to by src_addr to
the string space pointed to by dest_addr. Return the original
dest_addr.
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strcpy
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source is word aligned, destination is not
4) destination is word aligned, source is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 original dest ptr; not modified, so that it may be returned.
g1 src ptr; shift count
g2 dest ptr; 4 bytes of src
g3 src ptr (word aligned)
g4 dest ptr (word aligned)
g5 0xff -- byte extraction mask
g6 lsw of double word for extraction of 4 bytes (little endian)
msw of double word for extraction of 4 bytes (big endian)
g7 msw of double word for extraction of 4 bytes (little endian)
lsw of double word for extraction of 4 bytes (big endian)
g13 return address
g14 byte extracted. When reaches null byte, which is zero, we will
be in conformance with register conventions, and can return to
the caller with a clear conscience.
procedure strcat
dest_addr = strcat (dest_addr, src_addr)
Appends the string pointed to by src_addr to the string pointed
to by dest_addr. The first character of the source string is
copied to the location initially occupied by the trailing null
byte of the destination string. Thereafter, characters are copied
from the source to the destination up thru the null byte that
trails the source string.
*/
#if __i960_BIG_ENDIAN__
#define MSW g6
#define LSW g7
#else
#define LSW g6
#define MSW g7
#endif
.globl _strcpy, _strcat
.globl __strcpy, __strcat
.leafproc _strcpy, __strcpy
.leafproc _strcat, __strcat
.align 2
_strcat:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcat:
notand g0,3,g4 # extract word addr of start of dest
lda (g14),g13 # preserve return address
and g0,3,LSW # extract byte offset of dest
ld (g4),MSW # fetch word containing at least first byte
shlo 3,LSW,g14 # get shift count for making mask for first word
subi 1,0,LSW # mask initially all ones
#if __i960_BIG_ENDIAN__
shro g14,LSW,LSW # get mask for bytes needed from first word
lda 0xff000000,g5 # byte extraction mask
#else
shlo g14,LSW,LSW # get mask for bytes needed from first word
lda 0xff,g5 # byte extraction mask
#endif
notor MSW,LSW,MSW # set unneeded bytes to all ones
Lsearch_for_word_with_null:
scanbyte 0,MSW # check for null byte
lda 4(g4),g4 # post-increment dest word pointer
mov MSW,LSW # keep a copy of current word
ld (g4),MSW # fetch next word of dest
bno.t Lsearch_for_word_with_null # branch if null not found yet
and g5,LSW,g14 # extract byte
cmpo 0,g14 # branch if null is first byte of word
subo 4,g4,g4 # move dest word ptr back to word with null
notand g1,3,g3 # extract word addr of start of src
lda (g4),g2 # set dest byte ptr to 1st byte of word w/ null
be.f Lcase_14
Lsearch_for_null:
#if __i960_BIG_ENDIAN__
shro 8,g5,g5 # move mask down to next byte
#else
shlo 8,g5,g5 # move mask up to next byte
#endif
lda 1(g2),g2 # move dest byte ptr to next byte
and g5,LSW,g14 # extract byte
cmpobne.t 0,g14,Lsearch_for_null # branch if null is not yet found
lda 0xff,g5 # byte extraction mask
b Lcase_235.a
_strcpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcpy:
notand g0,3,g4 # extract word addr of start of dest
lda (g14),g13 # preserve return address
cmpo g0,g4 # check alignment of dest
lda 0xff,g5 # load mask for byte extraction
notand g1,3,g3 # extract word addr of start of src
bne.f Lcase_235 # branch if dest is NOT word aligned
Lcase_14:
cmpo g3,g1 # check alignment of src
ld (g3),LSW # fetch word containing at least first byte
shlo 3,g1,g1 # compute shift count
lda 4(g3),g3 # advance src word addr
#if __i960_BIG_ENDIAN__
lda 0xff,g5 # byte extraction mask
#endif
bne.f Lcase_4 # branch if src is NOT word aligned
Lcase_1: # src and dest are word aligned
subo 4,g4,g4 # store is pre-incrementing; back up dest addr
Lcase_1_wloop: # word copying loop
scanbyte 0,LSW # check for null byte in src word
lda (LSW),g2 # keep a copy of the src word
addo 4,g4,g4 # pre-increment dest addr
ld (g3),LSW # pre-fetch next word of src
addo 4,g3,g3 # post-increment src addr
bo.f Lcase_1_cloop # branch if word contains null byte
st g2,(g4) # store word in dest string
b Lcase_1_wloop
Lcase_3_cloop:
Lcase_1_cloop: # character copying loop
#if __i960_BIG_ENDIAN__
rotate 8,g2,g2 # move next byte into position for extraction
and g5,g2,g14 # extract next char
#else
and g5,g2,g14 # extract next char
shro 8,g2,g2 # move next byte into position for extraction
#endif
cmpo 0,g14 # check for null byte
stob g14,(g4) # store the byte in dest
lda 1(g4),g4 # post-increment dest byte addr
bne.t Lcase_1_cloop # branch if null not reached
Lexit_code:
bx (g13) # g0 = addr of dest; g14 = 0
Lrett:
ret
Lcase_3: # src is word aligned; dest is not
addo 4,g4,g4 # move dest word ptr to first word boundary
mov LSW,MSW # make copy of first word of src
lda 0,g1 # initialize shift count to zero
Lcase_25:
Lcase_3_cloop_at_start: # character copying loop for start of dest str
#if __i960_BIG_ENDIAN__
rotate 8,MSW,MSW # move next byte into position for extraction
and g5,MSW,g14 # extract next char
#else
and g5,MSW,g14 # extract next char
shro 8,MSW,MSW # move next byte into position for extraction
#endif
cmpo 0,g14 # check for null byte
stob g14,(g2) # store the byte in dest
lda 1(g2),g2 # post-increment dest ptr
be.f Lexit_code # branch if null byte reached
cmpo g2,g4 # have we reached word boundary in dest?
lda 8(g1),g1 # augment the shift counter
bne.t Lcase_3_cloop_at_start
Lcase_4:
ld (g3),MSW # fetch msw of operand for double shift
#if __i960_BIG_ENDIAN__
subo g1,0,g1 # Adjust shift count for big endian.
#endif
Lcase_3_wloop:
eshro g1,g6,g2 # extract 4 bytes of src
lda 4(g3),g3 # post-increment src word addr
scanbyte 0,g2 # check for null byte
lda (MSW),LSW # move msw to lsw
ld (g3),MSW # pre-fetch msw of operand for double shift
bo.f Lcase_3_cloop # branch if word contains null byte
st g2,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop
Lcase_235:
lda (g0),g2 # copy dest ptr
Lcase_235.a:
cmpo g3,g1 # check alignment of src
ld (g3),LSW # fetch word containing at least first byte
and 3,g1,g14 # compute shift count
lda 4(g3),g3 # advance src word addr
shlo 3,g14,g1
be.t Lcase_3 # branch if dest is word aligned
or g4,g14,g14 # is src earlier in word, later, or sync w/ dst
ld (g3),MSW # pre-fetch second half
cmpo g2,g14 # < indicates first word of dest has more bytes
lda 4(g4),g4 # move dest word addr to first word boundary
/* than first word of source. */
#if __i960_BIG_ENDIAN__
subo g1,0,g14 # Adjust shift count for big endian.
eshro g14,g6,g14 # extract four bytes
bge.f 1f
#else
eshro g1,g6,g14 # extract four bytes
bg.f 1f
#endif
mov MSW,LSW
lda 4(g3),g3 # move src word addr to second word boundary
1:
mov g14,MSW
b Lcase_25
/* end of strcpy */
|
4ms/metamodule-plugin-sdk
| 5,074
|
plugin-libc/newlib/libc/machine/i960/strlen_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strle_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strlen (optimized assembler version for the CA)
src_addr = strlen (src_addr)
return the number of bytes that precede the null byte in the
string pointed to by src_addr.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last four words of the program's
allocated memory space. This is so because, in several cases, strlen
will fetch ahead several words. Disallowing the fetch ahead would
impose a severe performance penalty.
This program handles two cases:
1) the argument starts on a word boundary
2) the argument doesn't start on a word boundary
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 original src ptr; upon return it is the byte count.
g1
g2 src ptr
g3 mask
g4 even word of the source string
g5 odd word of the source string
g6 copy of even word, shift count
g7 copy of odd word
g13 return address
g14 byte extracted.
*/
.globl _strlen
.globl __strlen
.leafproc _strlen, __strlen
.align 2
_strlen:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strlen:
notand g0,3,g2 # extract word addr of start of src
lda (g14),g13 # preserve return address
and g0,3,g7 # extract byte offset of src
ld (g2),g5 # fetch word containing at least first byte
shlo 3,g7,g7 # get shift count for making mask for first word
lda 4(g2),g2 # post-increment src word pointer
subi 1,0,g3 # mask initially all ones
chkbit 2,g2 # are we on an even word boundary or an odd one?
#if __i960_BIG_ENDIAN__
shro g7,g3,g3 # get mask for bytes needed from first word
notor g5,g3,g7 # set unneeded bytes to all ones
lda 0xff000000,g3 # byte extraction mask
#else
shlo g7,g3,g3 # get mask for bytes needed from first word
notor g5,g3,g7 # set unneeded bytes to all ones
lda 0xff,g3 # byte extraction mask
#endif
bno.f Lodd_word # branch if first word is odd
mov g7,g4 # move first word to copy thereof
ld (g2),g5 # load odd word
lda 4(g2),g2 # post-increment src word pointer
Leven_word:
scanbyte 0,g4 # check for null byte
movl g4,g6 # copy both words
Lodd_word: # trickery! if we branch here, following branch
/* instruction will fall thru, as we want, */
/* effecting the load of g4 and g5 only. */
ldl (g2),g4 # fetch next pair of word of src
bo.f Lsearch_for_null # branch if null found
scanbyte 0,g7 # check for null byte
lda 8(g2),g2 # post-increment src word pointer
bno.t Leven_word # branch if null not found yet
subo 4,g2,g2 # back up the byte pointer
lda (g7),g6 # move odd word to search word
Lsearch_for_null:
subo 9,g2,g2 # back up the byte pointer
Lsearch_for_null.a:
and g6,g3,g14 # extract byte
cmpo 0,g14 # is it null?
lda 1(g2),g2 # bump src byte ptr
#if __i960_BIG_ENDIAN__
shlo 8,g6,g6 # shift word to position next byte
#else
shro 8,g6,g6 # shift word to position next byte
#endif
bne.t Lsearch_for_null.a
Lexit_code:
subo g0,g2,g0 # calculate string length
bx (g13) # g0 = addr of src; g14 = 0
Lrett:
ret
/* end of strlen */
|
4ms/metamodule-plugin-sdk
| 2,480
|
plugin-libc/newlib/libc/machine/i960/strdup.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strdup.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1989,1993 Intel Corp., all rights reserved
*/
/*
procedure strdup (optimized assembler version: 80960K series, 80960CA)
dest_addr = strdup (src_addr)
Allocate memory and copy thereto the string pointed to by src_addr.
Return the address of the copy, or null if unable to perform the
operation.
*/
.text
.align 2
.globl _strdup
_strdup:
mov g0,r3 # Keep a copy of the original string addr
callj _strlen # Determine how much to allocate
addo 1,g0,g0 # Add one byte for the null byte at end
callj _malloc # Allocate the storage
cmpo 0,g0
mov r3,g1 # Original string addr is now src for copy
bne.t _strcpy # Jump if allocation was successful
ret # Return the null ptr otherwise
/* end of strdup */
|
4ms/metamodule-plugin-sdk
| 4,835
|
plugin-libc/newlib/libc/machine/i960/memccpy.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memccpy.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1989,1993 Intel Corp., all rights reserved
*/
/*
procedure memccpy (optimized assembler version for the 80960K series)
dest_addr = memccpy (dest_addr, src_addr, char, len)
copy len bytes pointed to by src_addr to the space pointed to by
dest_addr, stopping if char is copied. If char is copied,
return address of byte after char in dest string; else null.
Undefined behavior will occur if the end of the source array is in
the last two words of the program's allocated memory space. This
is so because the routine fetches ahead. Disallowing the fetch
ahead would impose a severe performance penalty.
Undefined behavior will also occur if the source and destination
strings overlap.
Strategy:
Fetch the source array by words and store them by words to the
destination array, until there are fewer than three bytes left
to copy. Then, using the last word of the source (the one that
contains the remaining 0, 1, 2, or 3 bytes to be copied), store
a byte at a time until Ldone.
Tactics:
1) Do NOT try to fetch and store the words in a word aligned manner
because, in my judgement, the performance degradation experienced due
to non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment. This is supported by the intuition that most source and
destination arrays (even more true of most big source arrays) will
be word aligned to begin with.
2) Rather than decrementing len to zero,
I calculate the address of the byte after the last byte of the
destination array, and quit when the destination byte pointer passes
that.
*/
.globl _memccpy
.leafproc _memccpy, __memccpy
.align 2
_memccpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memccpy:
mov g14, g13 # preserve return address
cmpibge 0,g3,Lexit_char_not_found
addo g3,g1,g3 # compute beyond end of src
ld (g1), g7 # fetch first word of source
lda 0xff,g5 # mask for char
and g5,g2,g2 # extract only char
shlo 8,g2,g6
or g2,g6,g6
shlo 16,g6,g4
or g6,g4,g6 # word of char
b Lwloop_b
Lwloop_a:
ld (g1), g7 # fetch ahead next word of source
st g4, (g0) # store word to dest
addo 4, g0, g0 # post-increment dest pointer
Lwloop_b: # word copying loop
addo 4, g1, g1 # pre-increment src pointer
cmpo g3, g1 # is len <= 3 ?
mov g7, g4 # keep a copy of the current word
bl Lcloop_setup # quit word loop if less than 4 bytes
scanbyte g6, g7 # check for char
bno Lwloop_a # continue word loop if char not found.
Lcloop_setup:
subo 4, g1, g1 # back down src pointer
cmpobe g1, g3, Lexit_char_not_found
Lcloop_a: # character copying loop (len < 3)
and g5,g4,g7 # check the byte against char
cmpo g7,g2
stob g7,(g0) # store the byte
addo 1, g0, g0
be Lexit_char_found
addo 1,g1,g1
cmpo g1,g3
shro 8,g4,g4 # position next byte
bne Lcloop_a
Lexit_char_not_found:
mov 0, g0
Lexit_char_found:
lda 0,g14
bx (g13) # g0 = dest array address; g14 = 0
Lrett:
ret
/* end of memccpy */
|
4ms/metamodule-plugin-sdk
| 5,373
|
plugin-libc/newlib/libc/machine/i960/memchr_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memchr_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure memchr (optimized assembler version for the CA)
src_addr = memchr (src_addr, char, max_bytes)
searching from src_addr for max_bytes bytes, return a pointer to the
first byte that contains the indicated byte in the source string.
Return null if the byte is not found.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last two words of the program's
allocated memory space. This is so because, in several cases, memchr
will fetch ahead. Disallowing the fetch ahead would impose a severe
performance penalty.
This program handles two cases:
1) the argument starts on a word boundary
2) the argument doesn't start on a word boundary
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 src ptr; upon return it is a pointer to the matching byte, or null
g1 char to seek
g2 maximum number of bytes to check
g3 char to seek, broadcast to all four bytes
g4 word of the source string
g5 copy of the word
g6 mask to avoid unimportant bytes in first word
g7 byte extraction mask
g13 return address
g14
*/
.globl _memchr
.globl __memchr
.leafproc _memchr, __memchr
.align 2
_memchr:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memchr:
mov g14,g13 # preserve return address
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
lda 0,g14 # conform to register linkage standard
cmpibge.f 0,g2,Lnot_found # do nothing if max_bytes <= 0
addo g0,g2,g2 # compute ending address from start and len
and g0,3,g6 # extract byte offset of src
notand g0,3,g0 # extract word addr of start of src
shlo 8,g1,g3 # broadcast the char to four bytes
ld (g0),g4 # fetch word containing at least first byte
or g1,g3,g3
shlo 16,g3,g5
cmpo g1,g7 # is char being sought 0xff?
or g5,g3,g3
shlo 3,g6,g6 # get shift count for making mask for first word
subi 1,0,g5 # mask initially all ones
#if __i960_BIG_ENDIAN__
shro g6,g5,g5 # get mask for bytes needed from first word
#else
shlo g6,g5,g5 # get mask for bytes needed from first word
#endif
notor g4,g5,g4 # set unneeded bytes to all ones
be.f Lsearch_for_0xff # branch if seeking 0xff
Lsearch_for_word_with_char:
scanbyte g3,g4 # check for byte with char
lda 4(g0),g0 # pre-increment src word pointer
mov g4,g5 # keep a copy of word
ld (g0),g4 # fetch next word of src
bo.f Lsearch_for_char # branch if null found
cmpoble.t g0,g2,Lsearch_for_word_with_char # branch if not null
Lnot_found:
mov 0,g0 # char not found. Return null
Lexit_code:
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
Lsearch_for_char:
subo 4,g0,g0 # back up the byte pointer
Lsearch_for_char.a:
cmpobe.f g0,g2,Lnot_found # quit if max_bytes exhausted
#if __i960_BIG_ENDIAN__
rotate 8,g5,g5 # shift word to position next byte
#endif
and g5,g7,g6 # extract byte
cmpo g1,g6 # is it char?
lda 1(g0),g0 # bump src byte ptr
#if ! __i960_BIG_ENDIAN__
shro 8,g5,g5 # shift word to position next byte
#endif
bne.t Lsearch_for_char.a
subo 1,g0,g0 # back up the byte pointer
b Lexit_code
Lsearch_for_0xff:
lda 0xf0f0f0f0,g6 # make first comparison mask for char=-1 case.
or g6,g5,g6
and g4,g6,g4 # make unimportant bytes of first word 0x0f
b Lsearch_for_word_with_char
/* end of memchr */
|
4ms/metamodule-plugin-sdk
| 4,578
|
plugin-libc/newlib/libc/machine/i960/strncmp.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strncmp.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncmp (optimized assembler version for the 80960K Series)
result = strncmp (src1_addr, src2_addr, max_bytes)
compare the null terminated string pointed to by src1_addr to
the string pointed to by src2_addr. Return 0 iff the strings
are equal, -1 if src1_addr is lexicographically less than src2_addr,
and 1 if it is lexicographically greater. Do not compare more than
max_bytes bytes.
Undefined behavior will occur if the end of either source string
(i.e. the terminating null byte) is in the last two words of the
program's allocated memory space. This is so because strncmp
will fetch ahead. Disallowing the fetch ahead would impose
a severe performance penalty.
Strategy:
Fetch and compare the strings by words and go to a character
comparison loop as soon as a pair of words differ. If the
words are equal up through either the exhaustion of max_bytes
or the presence of the null byte, return 0 (equality). Otherwise,
the character comparator will return -1 or 1 for inequality, or
0 if the differing byte is after the null byte or after the
exhaustion of max_bytes.
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment.
*/
.globl _strncmp
.globl __strncmp
.leafproc _strncmp,__strncmp
.align 2
_strncmp:
#ifndef __PIC
lda .Lrett,g14
#else
lda .Lrett-(.+8)(ip),g14
#endif
__strncmp:
mov g14,g13
ldconst 0,g14
cmpibge 0,g2,Lequal_exit # Lexit early if max_bytes <= 0
addo g2,g0,g2
.Lwloop:
cmpo g0,g2 # are max_bytes exhausted?
ld (g0), g5 # fetch word of source_1
bge Lequal_exit # Lexit (equality) if max_bytes exhausted
ld (g1), g3 # fetch word of source_2
addo 4,g0,g0 # post-increment source_1 ptr
scanbyte 0,g5 # is a null byte present?
addo 4,g1,g1 # post-increment source_1 ptr
be .Lcloop.a # perform char comparator if null byte found
cmpobe g5,g3,.Lwloop # perform char comparator if words are unequal
.Lcloop.a: subo 4,g0,g0 # adjust max_byte counter
ldconst 0xff,g4 # byte extraction mask
.Lcloop: and g4,g5,g7 # compare individual bytes
and g4,g3,g6
cmpobne g7,g6,.diff # if different, return -1 or 1
cmpo 0,g6 # they are equal. are they null?
shlo 8,g4,g4 # position mask to extract next byte
be Lequal_exit # if they are null, Lexit (equality)
addo 1,g0,g0 # is max_bytes exhausted?
cmpobl g0,g2,.Lcloop # if not, loop. if so, Lexit (equality)
Lequal_exit:
mov 0,g0
bx (g13)
.Lrett:
ret
.diff: bl .neg
mov 1,g0
bx (g13)
.neg: subi 1,0,g0
.Lexit:
bx (g13)
/* end of strncmp */
|
4ms/metamodule-plugin-sdk
| 3,969
|
plugin-libc/newlib/libc/machine/i960/memset.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memset.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1989,1993 Intel Corp., all rights reserved
*/
/*
procedure memset (optimized assembler version: 80960K series, 80960CA)
dest_addr = memset (dest_addr, char, len)
Fill len bytes pointed to by dest_addr with the value of char.
Return the original address of dest_addr.
This program avoids performing unaligned accesses. It stores
from zero to seven bytes, and then stores aligned longwords,
and then stores from zero to seven bytes, as necessary to
store len bytes starting at dest_addr.
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine.
*/
.globl _memset
.globl __memset
.leafproc _memset, __memset
.align 2
_memset:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memset:
cmpo 7,g2 # are there fewer than seven characters to move?
lda (g14),g13 # save return address
notand g0,7,g3 # test for non-aligned dest_ptr
lda 0,g14 # conform to register conventions
shlo 24,g1,g4 # prepare word of char
lda (g0),g6 # preserve dest_ptr for return
shro 8,g4,g5
bge.f Lcloop_setup
cmpo g3,g0 # is dest longword aligned
lda 7(g3),g3 # bump dest_ptr to next longword boundary
or g4,g5,g4
be.t Lwloop_setup
Lbgn_cloop:
cmpo g6,g3 # Have we reached longword boundary?
stob g1,(g6) # store one byte of char
subo 1,g2,g2 # decrement len
lda 1(g6),g6 # increment dest_ptr
bne.t Lbgn_cloop # loop if more bytes to store before longword
cmpobge.f 7,g2,Lcloop
Lwloop_setup:
shro 16,g4,g5
or g4,g5,g4
mov g4,g5 # now have a longword of char
Lwloop:
cmpo 15,g2 # Do we have to store more longwords?
stl g4,(g6) # Store longword of char
subo 8,g2,g2 # Decrement len
lda 8(g6),g6 # Increment dest_ptr
bl.t Lwloop # loop if more longwords to store
Lcloop_setup:
cmpobge.t 0,g2,Lexit
Lcloop:
cmpo 1,g2 # Is len exhausted?
stob g1,(g6) # Store byte
subo 1,g2,g2 # Decrement len
lda 1(g6),g6 # Increment dest_ptr
bne.t Lcloop # loop if more bytes to store
Lexit:
bx (g13)
Lrett:
ret
/* end of memset */
|
4ms/metamodule-plugin-sdk
| 10,423
|
plugin-libc/newlib/libc/machine/i960/strncat_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "sncat_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncat (optimized assembler version for the CA)
dest_addr = strncat (dest_addr, src_addr, max_bytes)
append the null terminated string pointed to by src_addr to the null
terminated string pointed to by dest_addr. Return the original
dest_addr. If the source string is longer than max_bytes, then
append only max_bytes bytes, and tack on a null byte on the end
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strncat
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source is word aligned, destination is not
4) destination is word aligned, source is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 original dest ptr; not modified, so that it may be returned.
g1 src ptr; shift count
g2 max_bytes
g3 src ptr (word aligned)
g4 dest ptr (word aligned)
g5 0xff -- byte extraction mask
Little endian:
g6 lsw of double word for extraction of 4 bytes
g7 msw of double word for extraction of 4 bytes
Big endian:
g6 msw of double word for extraction of 4 bytes
g7 lsw of double word for extraction of 4 bytes
g13 return address
g14 byte extracted.
*/
#if __i960_BIG_ENDIAN__
#define MSW g6
#define LSW g7
#else
#define LSW g6
#define MSW g7
#endif
.globl _strncat
.globl __strncat
.leafproc _strncat, __strncat
.align 2
_strncat:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncat:
notand g0,3,g4 # extract word addr of start of dest
lda (g14),g13 # preserve return address
cmpibge.f 0,g2,Lexit_code # Lexit if number of bytes to move is <= zero.
and g0,3,LSW # extract byte offset of dest
ld (g4),MSW # fetch word containing at least first byte
shlo 3,LSW,g14 # get shift count for making mask for first word
subi 1,0,LSW # mask initially all ones
#if __i960_BIG_ENDIAN__
shro g14,LSW,LSW # get mask for bytes needed from first word
#else
shlo g14,LSW,LSW # get mask for bytes needed from first word
#endif
notor MSW,LSW,MSW # set unneeded bytes to all ones
lda 0xff,g5 # byte extraction mask
Lsearch_for_word_with_null:
scanbyte 0,MSW # check for null byte
lda 4(g4),g4 # post-increment dest word pointer
mov MSW,LSW # keep a copy of current word
ld (g4),MSW # fetch next word of dest
bno.t Lsearch_for_word_with_null # branch if null not found yet
#if __i960_BIG_ENDIAN__
shro 24,LSW,g14 # extract byte
#else
and g5,LSW,g14 # extract byte
#endif
cmpo 0,g14 # branch if null is first byte of word
subo 4,g4,g4 # move dest word ptr to word with null
notand g1,3,g3 # extract word addr of start of src
bne.t Lsearch_for_null
Lcase_14:
cmpo g1,g3 # check alignment of source
ld (g3),LSW # fetch first word of source
shlo 3,g1,g14 # compute shift count
lda 4(g3),g3 # post-increment src addr
bne.f Lcase_4 # branch if source is unaligned
Lcase_1:
Lcase_1_wloop: # word copying loop
cmpi g2,4 # check for fewer than four bytes to move
lda (LSW),g1 # keep a copy of the src word
bl.f Lcase_1_cloop # branch if fewer than four bytes to copy
scanbyte 0,g1 # check for null byte in src word
ld (g3),LSW # pre-fetch next word of src
addo 4,g3,g3 # post-increment src addr
bo.f Lcase_1_cloop # branch if word contains null byte
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
st g1,(g4) # store word in dest string
addo 4,g4,g4 # post-increment dest addr
b Lcase_1_wloop
Lcase_3_cloop:
Lcase_1_cloop: # character copying loop (max_bytes <= 3)
cmpdeci 0,g2,g2 # is max_bytes exhausted?
#if __i960_BIG_ENDIAN__
rotate 8,g1,g1 # move next byte into position for extraction
#endif
and g5,g1,g14 # extract next char
be.f Lstore_null # if max_bytes is exhausted, store null and quit
cmpo 0,g14 # check for null byte
stob g14,(g4) # store the byte in dest
#if ! __i960_BIG_ENDIAN__
shro 8,g1,g1 # move next byte into position for extraction
#endif
lda 1(g4),g4 # post-increment dest byte addr
bne.t Lcase_1_cloop # branch if null not reached
bx (g13) # Lexit (g14 == 0)
Lstore_null:
mov 0,g14 # store null, and set g14 to zero
stob g14,(g4)
bx (g13)
Lsearch_for_null:
#if __i960_BIG_ENDIAN__
shlo 8,LSW,LSW # check next byte
shro 24,LSW,g14
#else
shlo 8,g5,g5 # move mask up to next byte
and g5,LSW,g14 # extract byte
#endif
lda 1(g4),g4 # move dest byte ptr to next byte
cmpobne.t 0,g14,Lsearch_for_null # branch if null is not yet found
Lcase_235:
cmpo g1,g3 # check alignment of src
ld (g3),LSW # pre-fetch word with start of src
and 3,g1,g1 # compute shift count
lda 0xff,g5 # load mask for byte extraction
shlo 3,g1,g14
lda 4(g3),g3 # post-increment src word counter
be.t Lcase_3 # branch if src is word aligned
and g4,3,MSW # extract byte offset for dest string
cmpo MSW,g1 # < indicates first word of dest has more bytes
/* than first word of source. */
ld (g3),MSW # fetch second word of src
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian
#endif
eshro g14,g6,g5 # extract four bytes
#if __i960_BIG_ENDIAN__
bge.f 1f
#else
bg.f 1f
#endif
mov MSW,LSW
lda 4(g3),g3 # move src word addr to second word boundary
1:
mov g5,MSW
lda 0xff,g5
b Lcase_25
Lcase_3: # src is word aligned; dest is not
mov LSW,MSW # make copy of first word of src
lda 32,g14 # initialize shift count to zero (mod 32)
Lcase_25:
Lcase_3_cloop_at_start: # character copying loop for start of dest str
cmpdeci 0,g2,g2 # is max_bytes exhausted?
#if __i960_BIG_ENDIAN__
shro 24,MSW,g5 # extract next char
#else
and g5,MSW,g5 # extract next char
#endif
be.f Lstore_null # Lexit if max_bytes is exhausted
cmpo 0,g5 # check for null byte
stob g5,(g4) # store the byte in dest
addo 1,g4,g4 # post-increment dest ptr
lda 0xff,g5 # re-initialize byte extraction mask
notand g4,3,g1 # extract word address
be.t Lexit_code # Lexit if null byte reached
cmpo g1,g4 # have we reached word boundary in dest yet?
#if __i960_BIG_ENDIAN__
lda -8(g14),g14 # augment the shift counter
rotate 8,MSW,MSW # move next byte into position for extraction
#else
lda 8(g14),g14 # augment the shift counter
shro 8,MSW,MSW # move next byte into position for extraction
#endif
bne.t Lcase_3_cloop_at_start # branch if reached word boundary?
#if __i960_BIG_ENDIAN__
cmpo 0,g14
ld (g3),MSW # fetch msw of operand for double shift
bne Lcase_3_wloop # branch if src is still unaligned.
Lcase_3_wloop2:
cmpi g2,4 # less than four bytes to move?
mov LSW,g1 # extract 4 bytes of src
lda 4(g3),g3 # post-increment src word addr
bl.f Lcase_3_cloop # branch if < four bytes left to move
scanbyte 0,g1 # check for null byte
mov MSW,LSW # move msw to lsw
ld (g3),MSW # pre-fetch msw of operand for double shift
bo.f Lcase_3_cloop # branch if word contains null byte
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
st g1,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop2
Lcase_4:
subo g14,0,g14 # adjust shift count for big endian
#else
Lcase_4:
#endif
ld (g3),MSW # fetch msw of operand for double shift
Lcase_3_wloop:
cmpi g2,4 # less than four bytes to move?
eshro g14,g6,g1 # extract 4 bytes of src
lda 4(g3),g3 # post-increment src word addr
bl.f Lcase_3_cloop # branch if < four bytes left to move
scanbyte 0,g1 # check for null byte
mov MSW,LSW # move msw to lsw
ld (g3),MSW # pre-fetch msw of operand for double shift
bo.f Lcase_3_cloop # branch if word contains null byte
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
st g1,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop
Lexit_code:
mov 0,g14 # conform to register conventions
bx (g13) # g0 = addr of dest; g14 = 0
Lrett:
ret
/* end of strncat */
|
4ms/metamodule-plugin-sdk
| 5,234
|
plugin-libc/newlib/libc/machine/i960/strchr_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strch_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strchr (optimized assembler version for the CA)
src_addr = strchr (src_addr, char)
return a pointer to the first byte that contains the indicated
byte in the source string. Return null if the byte is not found.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last two words of the program's
allocated memory space. This is so because, in several cases, strchr
will fetch ahead. Disallowing the fetch ahead would impose a severe
performance penalty.
This program handles two cases:
1) the argument starts on a word boundary
2) the argument doesn't start on a word boundary
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 src ptr; upon return it is a pointer to the matching byte, or null
g1 char to seek
g2 mask to avoid unimportant bytes in first word
g3 char to seek, broadcast to all four bytes
g4 word of the source string
g5 copy of the word
g6 extracted character
g7 byte extraction mask
g13 return address
g14
*/
.globl _strchr
.globl __strchr
.leafproc _strchr, __strchr
.align 2
_strchr:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strchr:
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
shlo 8,g1,g2 # broadcast the char to four bytes
or g1,g2,g2
shlo 16,g2,g4
cmpo g1,g7 # is char being sought 0xff?
or g4,g2,g3
lda (g14),g13 # preserve return address
notand g0,3,g5 # extract word addr of start of src
lda 0,g14 # conform to register linkage standard
and g0,3,g6 # extract byte offset of src
ld (g5),g4 # fetch word containing at least first byte
shlo 3,g6,g6 # get shift count for making mask for first word
lda 4(g5),g0 # post-increment src word pointer
subi 1,0,g5 # mask initially all ones
#if __i960_BIG_ENDIAN__
shro g6,g5,g5 # get mask for bytes needed from first word
#else
shlo g6,g5,g5 # get mask for bytes needed from first word
#endif
notor g4,g5,g4 # set unneeded bytes to all ones
be.f Lsearch_for_0xff # branch if seeking 0xff
Lsearch_for_word_with_char_or_null:
scanbyte g3,g4 # check for byte with char
lda (g4),g5 # copy word
ld (g0),g4 # fetch next word of src
bo.f Lsearch_for_char # branch if null found
scanbyte 0,g5 # check for null byte
lda 4(g0),g0 # post-increment src word pointer
bno.t Lsearch_for_word_with_char_or_null # branch if not null
Lnot_found:
mov 0,g0 # char not found. Return null
Lexit_code:
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
Lsearch_for_char:
subo 5,g0,g0 # back up the byte pointer
Lsearch_for_char.a:
#if __i960_BIG_ENDIAN__
rotate 8,g5,g5 # shift word to position next byte
#endif
and g5,g7,g6 # extract byte
cmpo g1,g6 # is it char?
lda 1(g0),g0 # bump src byte ptr
#if ! __i960_BIG_ENDIAN__
shro 8,g5,g5 # shift word to position next byte
#endif
be.f Lexit_code
cmpobne.t 0,g6,Lsearch_for_char.a # quit if null comes before char
b Lnot_found
Lsearch_for_0xff:
lda 0xf0f0f0f0,g2 # make first comparison mask for char=-1 case.
or g5,g2,g2
and g4,g2,g4 # make unimportant bytes of first word 0x0f
b Lsearch_for_word_with_char_or_null
/* end of strchr */
|
4ms/metamodule-plugin-sdk
| 8,008
|
plugin-libc/newlib/libc/machine/i960/strcmp_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strcm_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strcmp (optimized assembler version for the CA)
result = strcmp (src1_addr, src2_addr)
compare the null terminated string pointed to by src1_addr to
the string space pointed to by src2_addr. Return 0 iff the strings
are equal, -1 if src1_addr is lexicly less than src2_addr, and 1
if it is lexicly greater.
Undefined behavior will occur if the end of either source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strcmp
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source1 is word aligned, source2 is not
4) source2 is word aligned, source1 is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g14 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers are sufficient to implement the routine.
The registers are used as follows:
g0 original src1 ptr; return result
g1 src2 ptr; 0xff -- byte extraction mask
g2 src1 word ptr
g3 src2 word ptr
Little endian:
g4 lsw of src1
g5 msw of src1
g6 src2 word
g7 extracted src1
Big endian:
g4 msw of src1
g5 lsw of src1
g6 extracted src1
g7 src2 word
g13 return address
g14 shift count
*/
#if __i960_BIG_ENDIAN__
#define MSW g4
#define LSW g5
#define SRC1 g6
#define SRC2 g7
#else
#define LSW g4
#define MSW g5
#define SRC2 g6
#define SRC1 g7
#endif
.globl _strcmp
.globl __strcmp
.leafproc _strcmp, __strcmp
.align 2
_strcmp:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcmp:
Lrestart:
notand g0,3,g2 # extract word addr of start of src1
lda (g14),g13 # preserve return address
#if __i960_BIG_ENDIAN__
cmpo g0,g2 # check alignment of src1
#endif
ld (g2),LSW # fetch word with at least first byte of src1
notand g1,3,g3 # extract word addr of start of src2
ld 4(g2),MSW # fetch second word of src1
#if __i960_BIG_ENDIAN__
bne Lsrc1_unaligned # branch if src1 is unaligned
cmpo g3,g1 # check alignment of src2
ld (g3),SRC2 # fetch word with at least first byte of src2
mov LSW,SRC1 # extract word of src1
lda 8(g2),g2 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
/* src2 is word aligned */
Lwloop2: # word comparing loop
cmpo SRC2,SRC1 # compare src1 and src2 words
lda 0xff000000,g1 # byte extraction mask
mov MSW,LSW # move msw of src1 to lsw
ld (g2),MSW # pre-fetch next msw of src1
addo 4,g2,g2 # post-increment src1 addr
lda 4(g3),g3 # pre-increment src2 addr
bne.f Lcloop # branch if src1 and src2 unequal
scanbyte 0,SRC1 # check for null byte in src1 word
ld (g3),SRC2 # pre-fetch next word of src2
mov LSW,SRC1 # extract word of src1
lda 0,g0 # prepare to return zero, indicating equality
bno.t Lwloop2 # branch if null byte not encountered
/* words were equal and contained null byte */
mov 0,g14 # conform to register conventions
bx (g13) # return
Lsrc1_unaligned:
#endif
cmpo g3,g1 # check alignment of src2
ld (g3),SRC2 # fetch word with at least first byte of src2
shlo 3,g0,g14 # compute shift count for src1
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # 32 - shift count for big endian.
#endif
eshro g14,g4,SRC1 # extract word of src1
lda 8(g2),g2 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
/* at least src2 is word aligned */
Lwloop: # word comparing loop
cmpo SRC2,SRC1 # compare src1 and src2 words
#if __i960_BIG_ENDIAN__
lda 0xff000000,g1 # byte extraction mask
#else
lda 0xff,g1 # byte extraction mask
#endif
mov MSW,LSW # move msw of src1 to lsw
ld (g2),MSW # pre-fetch next msw of src1
addo 4,g2,g2 # post-increment src1 addr
lda 4(g3),g3 # pre-increment src2 addr
bne.f Lcloop # branch if src1 and src2 unequal
scanbyte 0,SRC1 # check for null byte in src1 word
ld (g3),SRC2 # pre-fetch next word of src2
eshro g14,g4,SRC1 # extract word of src1
lda 0,g0 # prepare to return zero, indicating equality
bno.t Lwloop # branch if null byte not encountered
/* words were equal and contained null byte */
mov 0,g14 # conform to register conventions
bx (g13) # return
Lcloop_setup: # setup for coming from Lsrc2_unaligned
mov LSW,SRC1 # restore extracted src1 word
#if __i960_BIG_ENDIAN__
lda 0xff000000,g1 # byte extraction mask
#else
lda 0xff,g1 # byte extraction mask
#endif
Lcloop: # character comparing loop
and SRC2,g1,g3 # extract next char of src2
and SRC1,g1,g0 # extract next char of src1
cmpobne.f g0,g3,.diff # check for equality
cmpo 0,g0 # check for null byte
#if __i960_BIG_ENDIAN__
shro 8,g1,g1 # shift mask for next byte
#else
shlo 8,g1,g1 # shift mask for next byte
#endif
bne.t Lcloop # branch if null not reached
/* words are equal up thru null byte */
mov 0,g14
bx (g13) # g0 = 0 (src1 == src2)
Lrett:
ret
.diff:
mov 0,g14
bl Lless_than_exit
Lgreater_than_exit:
mov 1,g0
bx (g13) # g0 = 1 (src1 > src2)
Lless_than_exit:
subi 1,0,g0
bx (g13) # g0 = -1 (src1 < src2)
Lsrc2_unaligned:
mov SRC1,LSW # retain src1 extracted word
ld 4(g3),SRC1 # fetch second word of src2
shlo 3,g1,MSW # compute shift count for src2
#if __i960_BIG_ENDIAN__
subo MSW,0,MSW # 32 - shift count for big endian.
#endif
eshro MSW,g6,SRC2 # extract word of src2
cmpo LSW,SRC2 # compare src1 and src2 words
notor g1,3,MSW # first step in computing new src1 ptr
lda 4(g3),g1 # set new src2 ptr
bne.f Lcloop_setup # first four bytes differ
scanbyte 0,LSW # check for null byte
lda (g13),g14 # prepare return pointer for Lrestart
subo MSW,g0,g0 # second (final) step in computing new src1 ptr
bno.t Lrestart # if null byte not encountered, continue
/* with both string fetches shifted such that */
/* src2 is now word aligned. */
mov 0,g14 # conform to register conventions.
lda 0,g0 # return indicator of equality.
bx (g13)
|
4ms/metamodule-plugin-sdk
| 10,205
|
plugin-libc/newlib/libc/machine/i960/strncpy_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "sncpy_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncpy (optimized assembler version for the CA)
dest_addr = strncpy (dest_addr, src_addr, max_bytes)
copy the null terminated string pointed to by src_addr to
the string space pointed to by dest_addr. Return the original
dest_addr. If the source string is shorter than max_bytes,
then null-pad the destination string.
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strcpy
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source is word aligned, destination is not
4) destination is word aligned, source is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 original dest ptr; not modified, so that it may be returned.
g1 src ptr; shift count
g2 max_bytes
g3 src ptr (word aligned)
g4 dest ptr (word aligned)
g5 0xff -- byte extraction mask
Little endian:
g6 lsw of double word for extraction of 4 bytes
g7 msw of double word for extraction of 4 bytes
Big endian:
g6 msw of double word for extraction of 4 bytes
g7 lsw of double word for extraction of 4 bytes
g13 return address
g14 byte extracted.
*/
#if __i960_BIG_ENDIAN__
#define MSW g6
#define LSW g7
#else
#define LSW g6
#define MSW g7
#endif
.globl _strncpy
.globl __strncpy
.leafproc _strncpy, __strncpy
.align 2
_strncpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncpy:
notand g1,3,g3 # extract word addr of start of src
lda (g14),g13 # preserve return address
cmpibge.f 0,g2,Lexit_code # Lexit if number of bytes to move is <= zero.
cmpo g3,g1 # check alignment of src
ld (g3),LSW # fetch word containing at least first byte
notand g0,3,g4 # extract word addr of start of dest
lda 4(g3),g3 # advance src word addr
bne.f Lcase_245 # branch if src is NOT word aligned
Lcase_13:
cmpo g0,g4 # check alignment of dest
lda 0xff,g5 # load mask for byte extraction
subo 4,g4,g4 # store is pre-incrementing; back up dest addr
bne.f Lcase_3 # branch if dest not word aligned
Lcase_1: # src and dest are word aligned
Lcase_1_wloop: # word copying loop
cmpi g2,4 # check for fewer than four bytes to move
lda (LSW),g1 # keep a copy of the src word
addo 4,g4,g4 # pre-increment dest addr
bl.f Lcase_1_cloop.a # branch if fewer than four bytes to copy
scanbyte 0,g1 # check for null byte in src word
ld (g3),LSW # pre-fetch next word of src
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
bo.f Lcase_1_cloop.c # branch if word contains null byte
addo 4,g3,g3 # post-increment src addr
st g1,(g4) # store word in dest string
b Lcase_1_wloop
Lcase_3_cloop.a:
Lcase_1_cloop.a: # character copying loop (max_bytes <= 3)
#if __i960_BIG_ENDIAN__
rotate 8,g1,g1 # move next byte into position for extraction
#endif
and g5,g1,g14 # extract next char
Lcase_1_cloop.b:
cmpdeci 0,g2,g2 # is max_bytes exhausted?
be.f Lexit_code # Lexit if max_bytes is exhausted
cmpo 0,g14 # check for null byte
stob g14,(g4) # store the byte in dest
#if ! __i960_BIG_ENDIAN__
shro 8,g1,g1 # move next byte into position for extraction
#endif
lda 1(g4),g4 # post-increment dest byte addr
bne.t Lcase_1_cloop.a # branch if null not reached
b Lcase_1_cloop.b
Lexit_code:
mov 0,g14 # conform to register conventions
bx (g13) # g0 = addr of dest; g14 = 0
Lrett:
ret
Lcase_1_cloop.c:
Lcase_3_cloop.c:
#if __i960_BIG_ENDIAN__
rotate 24,g5,g5 # move mask into position for testing next byte
#endif
and g5,g1,g14 # extract next char
cmpo 0,g14 # check for null byte
#if ! __i960_BIG_ENDIAN__
lda (g5),LSW # keep a copy of the current mask
shlo 8,g5,g5 # move mask into position for testing next byte
#endif
bne.t Lcase_1_cloop.c # branch if null not reached
#if __i960_BIG_ENDIAN__
subo 1,g5,g5 # null pad.
andnot g5,g1,g1 # last bytes to copy, and null pad rest of word
#else
subo 1,LSW,g5 # mask to get last bytes to copy, and null pad
and g5,g1,g1 # last bytes to copy, and null pad rest of word
#endif
st g1,(g4)
Lcase_1_zwloop: # zero word loop
cmpi g2,4 # check for fewer than four bytes to move
addo 4,g4,g4 # pre-increment dest addr
bl.f Lcase_1_cloop.b # branch if fewer than four bytes to copy
subo 4,g2,g2 # decrease max_byte count by the 4 bytes moved
st g14,(g4) # store word in dest string
b Lcase_1_zwloop
Lcase_3: # src is word aligned; dest is not
addo 8,g4,g4 # move dest word ptr to first word boundary
lda (g0),g1 # copy dest byte ptr
mov LSW,MSW # make copy of first word of src
lda 32,g14 # initialize shift count to zero (mod 32)
Lcase_25:
Lcase_3_cloop_at_start: # character copying loop for start of dest str
cmpdeci 0,g2,g2 # is max_bytes exhausted?
#if __i960_BIG_ENDIAN__
shro 24,MSW,g5 # extract next char
#else
and g5,MSW,g5 # extract next char
#endif
be.f Lexit_code # Lexit if max_bytes is exhausted
cmpo 0,g5 # check for null byte
stob g5,(g1) # store the byte in dest
addo 1,g1,g1 # post-increment dest ptr
lda 0xff,g5 # re-initialize byte extraction mask
bne.t 1f # drop thru if null byte reached (to pad)
movl 0,g6 # blank out remainder of input buffer
1:
cmpo g1,g4 # have we reached word boundary in dest yet?
#if __i960_BIG_ENDIAN__
lda -8(g14),g14 # augment the shift counter
rotate 8,MSW,MSW # move next byte into position for extraction
#else
lda 8(g14),g14 # augment the shift counter
shro 8,MSW,MSW # move next byte into position for extraction
#endif
bne.t Lcase_3_cloop_at_start # branch if reached word boundary?
ld (g3),MSW # fetch msw of operand for double shift
Lcase_4:
#if __i960_BIG_ENDIAN__
cmpobne 0,g14,Lcase_3_wloop # branch if src is still unaligned.
Lcase_3_wloop2:
cmpi g2,4 # less than four bytes to move?
lda (LSW),g1 # extract 4 bytes of src
lda 4(g3),g3 # post-increment src word addr
bl.f Lcase_3_cloop.a # branch if < four bytes left to move
scanbyte 0,g1 # check for null byte
lda (MSW),LSW # move msw to lsw
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
ld (g3),MSW # pre-fetch msw of operand for double shift
bo.f Lcase_3_cloop.c # branch if word contains null byte
st g1,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop2
#endif
Lcase_3_wloop:
cmpi g2,4 # less than four bytes to move?
eshro g14,g6,g1 # extract 4 bytes of src
lda 4(g3),g3 # post-increment src word addr
bl.f Lcase_3_cloop.a # branch if < four bytes left to move
scanbyte 0,g1 # check for null byte
lda (MSW),LSW # move msw to lsw
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
ld (g3),MSW # pre-fetch msw of operand for double shift
bo.f Lcase_3_cloop.c # branch if word contains null byte
st g1,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop
Lcase_245:
cmpo g0,g4 # check alignment of dest
ld (g3),MSW # pre-fetch second half
and 3,g1,g1 # compute shift count
lda 0xff,g5 # load mask for byte extraction
#if __i960_BIG_ENDIAN__
subo g1,4,g14 # adjust shift count for big endian.
shlo 3,g14,g14
#else
shlo 3,g1,g14
#endif
be.t Lcase_4 # branch if dest is word aligned
or g4,g1,g1 # is src earlier in word, later, or sync w/ dst
cmpo g0,g1 # < indicates first word of dest has more bytes
/* than first word of source. */
lda 4(g4),g4 # move dest word addr to first word boundary
eshro g14,g6,g5 # extract four bytes
lda (g0),g1
bg.f 1f
mov MSW,LSW
lda 4(g3),g3 # move src word addr to second word boundary
1:
mov g5,MSW
lda 0xff,g5
b Lcase_25
/* end of strncpy */
|
4ms/metamodule-plugin-sdk
| 5,015
|
plugin-libc/newlib/libc/machine/i960/strrchr.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strrchr.s"
#ifdef __i960_BIG_ENDIAN__
#error "This does not work in big-endian"
#endif
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strrchr (optimized assembler version for the 80960K series)
src_addr = strrchr (src_addr, char)
return a pointer to the last byte that contains the indicated
byte in the source string. Return null if the byte is not found.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last two words of the program's
allocated memory space. This is so because strrchr fetches ahead.
Disallowing the fetch ahead would impose a severe performance penalty.
Strategy:
Fetch the source string by words and scanbyte the words for the
char until either a word with the byte is found or the null byte is
encountered. In the former case, move through the word to find the
matching byte and save its memory address, then continue the search.
In the latter case, return the saved address, or zero (null) if none
was ever found to save.
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that most source arrays (even more
true of most big source arrays) will be word aligned to begin with.
*/
.globl _strrchr
.globl __strrchr
.leafproc _strrchr, __strrchr
.align 2
_strrchr:
#ifdef __PIC
lda Lrett-(.+8)(ip),g14
#else
lda Lrett,g14
#endif
__strrchr:
ld (g0),g4 # fetch first word
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
shlo 8,g1,g2 # broadcast the char to four bytes
or g1,g2,g2
shlo 16,g2,g5
or g2,g5,g3
mov g14,g13 # preserve return address
addo 4,g0,g2 # post-increment src pointer
mov 1,g0 # prepare to return null pointer
mov g3,g6 # prepare to return null pointer
Lsearch_for_word_with_char_or_null:
mov g4,g5 # copy word
scanbyte 0,g5 # check for null byte
ld (g2),g4 # fetch next word of src
bo Lword_has_null # branch if null found
scanbyte g3,g5 # check for byte with char
addo 4,g2,g2 # post-increment src pointer
bno Lsearch_for_word_with_char_or_null # branch if no copy of char
mov g5,g6 # save word that has char in it (at least once)
subo 4,g2,g0 # save addr of byte after word with char
b Lsearch_for_word_with_char_or_null
Lword_has_null:
subo 4,g2,g2 # move src pointer back to word with null
Lfind_null:
addo 1,g2,g2 # advance src pointer to byte after current
and g7,g5,g14 # extract next byte
cmpo g1,g14 # is current byte char?
shro 8,g5,g5 # position next byte for extraction
bne 1f # skip if not char sought after
mov g2,g0 # save addr of byte after char
mov g3,g6 # save word of all char to short circuit search
1: cmpobne 0,g14,Lfind_null # is current byte null?
Lfind_last_char:
rotate 8,g6,g6 # position next highest byte
and g7,g6,g5 # extract byte
subo 1,g0,g0 # move pointer to that byte (or nullify)
cmpobne g5,g1,Lfind_last_char # branch if not at char
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
/* end of strrchr */
|
4ms/metamodule-plugin-sdk
| 8,544
|
plugin-libc/newlib/libc/machine/i960/strncmp_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "sncmp_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncmp (optimized assembler version for the CA)
result = strncmp (src1_addr, src2_addr, max_bytes)
compare the null terminated string pointed to by src1_addr to
the string space pointed to by src2_addr. Return 0 iff the strings
are equal, -1 if src1_addr is lexicly less than src2_addr, and 1
if it is lexicly greater. Do not compare more than max_bytes bytes.
Undefined behavior will occur if the end of either source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strncmp
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source1 is word aligned, source2 is not
4) source2 is word aligned, source1 is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g14 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers are sufficient to implement the routine.
The registers are used as follows:
g0 original src1 ptr; extracted word; return result
g1 src2 ptr; 0xff -- byte extraction mask
g2 maximum number of bytes to compare
g3 src2 word ptr
Little endian:
g4 lsw of src1
g5 msw of src1
g6 src2 word
g7 src1 word ptr
Big endian:
g4 msw of src1
g5 lsw of src1
g6 src1 word ptr
g7 src2 word
g13 return address
g14 shift count
*/
#if __i960_BIG_ENDIAN__
#define MSW g4
#define LSW g5
#define SRC1 g6
#define SRC2 g7
#else
#define LSW g4
#define MSW g5
#define SRC2 g6
#define SRC1 g7
#endif
.globl _strncmp
.globl __strncmp
.leafproc _strncmp, __strncmp
.align 2
_strncmp:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncmp:
Lrestart:
notand g0,3,SRC1 # extract word addr of start of src1
lda (g14),g13 # preserve return address
cmpibge.f 0,g2,Lequal_exit # return equality if number of bytes to
/* compare is none. */
#if __i960_BIG_ENDIAN__
cmpo g0,SRC1 # check alignment of src1
#endif
ld (SRC1),LSW # fetch word with at least first byte of src1
notand g1,3,g3 # extract word addr of start of src2
ld 4(SRC1),MSW # fetch second word of src1
#if __i960_BIG_ENDIAN__
bne Lsrc1_unaligned # branch if src1 is unaligned
cmpo g3,g1 # check alignment of src2
ld (g3),SRC2 # fetch word with at least first byte of src2
shlo 3,g0,g14 # compute shift count for src1
subo g14,0,g14 # adjust shift count for big endian
lda 8(SRC1),SRC1 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
/* src2 is word aligned */
mov LSW,g0
Lwloop2: # word comparing loop
cmpo SRC2,g0 # compare src1 and src2 words
lda 0xff000000,g1 # byte extraction mask
mov MSW,LSW # move msw of src1 to lsw
ld (SRC1),MSW # pre-fetch next msw of src1
addo 4,SRC1,SRC1 # post-increment src1 addr
lda 4(g3),g3 # pre-increment src2 addr
bne.f Lcloop # branch if src1 and src2 unequal
scanbyte 0,g0 # check for null byte in src1 word
ld (g3),SRC2 # pre-fetch next word of src2
mov LSW,g0 # extract word of src1
subi 4,g2,g2 # decrement maximum byte count
bo.f Lequal_exit # branch if null byte encountered
cmpibl.t 0,g2,Lwloop2 # branch if max_bytes not reached yet
b Lequal_exit # strings were equal up through max_bytes
Lsrc1_unaligned:
#endif
cmpo g3,g1 # check alignment of src2
ld (g3),SRC2 # fetch word with at least first byte of src2
shlo 3,g0,g14 # compute shift count for src1
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian
#endif
eshro g14,g4,LSW # extract word of src1
lda 8(SRC1),SRC1 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
/* at least src2 is word aligned */
mov LSW,g0
Lwloop: # word comparing loop
cmpo SRC2,g0 # compare src1 and src2 words
#if __i960_BIG_ENDIAN__
lda 0xff000000,g1 # byte extraction mask
#else
lda 0xff,g1 # byte extraction mask
#endif
mov MSW,LSW # move msw of src1 to lsw
ld (SRC1),MSW # pre-fetch next msw of src1
addo 4,SRC1,SRC1 # post-increment src1 addr
lda 4(g3),g3 # pre-increment src2 addr
bne.f Lcloop # branch if src1 and src2 unequal
scanbyte 0,g0 # check for null byte in src1 word
ld (g3),SRC2 # pre-fetch next word of src2
eshro g14,g4,g0 # extract word of src1
subi 4,g2,g2 # decrement maximum byte count
bo.f Lequal_exit # branch if null byte encountered
cmpibl.t 0,g2,Lwloop # branch if max_bytes not reached yet
b Lequal_exit # strings were equal up through max_bytes
Lcloop_setup: # setup for coming from Lsrc2_unaligned
mov LSW,g0 # restore extracted src1 word
#if __i960_BIG_ENDIAN__
lda 0xff000000,g1 # byte extraction mask
#else
lda 0xff,g1 # byte extraction mask
#endif
Lcloop: # character comparing loop
and SRC2,g1,g3 # extract next char of src2
and g0,g1,LSW # extract next char of src1
cmpobne.f LSW,g3,.diff # check for equality
cmpo 0,LSW # check for null byte
#if __i960_BIG_ENDIAN__
shro 8,g1,g1 # shift mask for next byte
#else
shlo 8,g1,g1 # shift mask for next byte
#endif
subi 1,g2,g2 # decrement character counter
bne.t Lcloop # branch if null not reached
/* words are equal up thru null byte */
Lequal_exit:
mov 0,g14 # conform to register conventions
lda 0,g0 # return zero, indicating equality
bx (g13) # return
Lrett:
ret
.diff:
mov 0,g14
bl Lless_than_exit
Lgreater_than_exit:
cmpibge.f 0,g2,Lequal_exit # branch if difference is beyond max_bytes
mov 1,g0
bx (g13) # g0 = 1 (src1 > src2)
Lless_than_exit:
cmpibge.f 0,g2,Lequal_exit # branch if difference is beyond max_bytes
subi 1,0,g0
bx (g13) # g0 = -1 (src1 < src2)
Lsrc2_unaligned:
notor g1,3,g14 # first step in computing new src1 ptr
ld 4(g3),SRC1 # fetch second word of src2
shlo 3,g1,MSW # compute shift count for src2
#if __i960_BIG_ENDIAN__
subo MSW,0,MSW # adjust shift count for big endian
#endif
eshro MSW,g6,SRC2 # extract word of src2
cmpo LSW,SRC2 # compare src1 and src2 words
lda 4(g3),g1 # set new src2 ptr
bne.f Lcloop_setup # first four bytes differ
scanbyte 0,LSW # check for null byte
subo g14,g0,g0 # second (final) step in computing new src1 ptr
addi g14,g2,g2 # compute new max_bytes too
lda (g13),g14 # prepare return pointer for Lrestart
bno.t Lrestart # if null byte not encountered, continue
/* with both string fetches shifted such that*/
/* src2 is now word aligned.*/
mov 0,g14 # conform to register conventions.
lda 0,g0 # return indicator of equality.
bx (g13)
|
4ms/metamodule-plugin-sdk
| 4,835
|
plugin-libc/newlib/libc/machine/i960/memchr.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memchr.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure memchr (optimized assembler version for the 80960K series)
src_addr = memchr (src_addr, char, max_bytes)
searching from src_addr for a span of max_bytes bytes, return a
pointer to the first byte in the source array that contains the
indicated char. Return null if the char is not found.
Undefined behavior will occur if the last byte of the source array
is in the last two words of the program's allocated memory space.
This is so because memchr fetches ahead. Disallowing the fetch
ahead would impose a severe performance penalty.
Strategy:
Fetch the source array by words and scanbyte the words for the
char until either a word with the byte is found or max_bytes is
exhausted. In the former case, move through the word to find the
matching byte and return its memory address. In the latter case,
return zero (null).
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that most source arrays (even more
true of most big source arrays) will be word aligned to begin with.
2) Rather than decrementing max_bytes to zero, I calculate the
address of the byte after the last byte of the source array, and
quit when the source byte pointer passes that. Refining, actually
I calculate the address of the fifth byte after the last byte of
the source array, because the source byte pointer is ahead of the
actual examination point due to fetch ahead.
*/
.globl _memchr
.globl __memchr
.leafproc _memchr, __memchr
.align 2
_memchr:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memchr:
mov g14,g13 # preserve return address
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
mov 0,g14 # conform to register linkage standard
cmpibge 0,g2,Lnot_found # do nothing if max_bytes <= 0
addo 4,g0,g6 # post-increment src word pointer
addo g2,g6,g2 # compute ending address from start and len
ld (g0),g4 # fetch first word
shlo 8,g1,g3 # broadcast the char to four bytes
or g1,g3,g3
shlo 16,g3,g5
or g3,g5,g3
Lsearch_for_word_with_char:
mov g4,g5 # keep a copy of word
scanbyte g3,g5 # check for byte with char
ld (g6),g4 # fetch next word of src
bo Lsearch_for_char # branch if null found
addo 4,g6,g6 # post-increment src word pointer
cmpobge g2,g6,Lsearch_for_word_with_char # branch if max_bytes > 3
Lnot_found:
mov 0,g0 # char not found. Return null
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
Lsearch_for_char:
cmpobe.f g6,g2,Lnot_found # quit if max_bytes exhausted
and g5,g7,g0 # extract byte
cmpo g1,g0 # is it char?
addo 1,g6,g6 # bump src byte ptr
shro 8,g5,g5 # shift word to position next byte
bne.t Lsearch_for_char
subo 5,g6,g0 # back up the byte pointer
bx (g13)
/* end of memchr */
|
4ms/metamodule-plugin-sdk
| 4,628
|
plugin-libc/newlib/libc/machine/i960/memcmp.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memcmp.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure memcmp (optimized assembler version for the 80960K series)
result = memcmp (src1_addr, src2_addr, max_bytes)
compare the byte array pointed to by src1_addr to the byte array
pointed to by src2_addr. Return 0 iff the arrays are equal, -1 iff
src1_addr is lexicographically less than src2_addr, and 1 iff it is
lexicographically greater. Do not compare more than max_bytes bytes.
Undefined behavior will occur if the end of either source array
is in the last two words of the program's allocated memory space.
This is so because memcmp fetches ahead. Disallowing the fetch ahead
would impose a severe performance penalty.
Strategy:
Fetch the source strings by words and compare the words until either
a differing word is found or max_bytes is exhausted. In the former
case, move through the words to find the differing byte and return
plus or minus one, appropriately. In the latter case, return zero
(equality).
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that most source arrays (even more
true of most big source arrays) will be word aligned to begin with.
2) Rather than decrementing max_bytes to zero, I calculate the
address of the byte after the last byte of the source_1 array, and
quit when the source byte pointer passes that.
*/
.globl _memcmp
.globl __memcmp
.leafproc _memcmp,__memcmp
.align 2
_memcmp:
#ifndef __PIC
lda .Lrett,g14
#else
lda .Lrett-(.+8)(ip),g14
#endif
__memcmp:
mov g14,g13 # preserve return address
ldconst 0,g14 # conform to register conventions
cmpibge 0,g2,Lequal_exit # quit if max_bytes <= 0
addo g0,g2,g2 # calculate byte addr of byte after last in src1
.Lwloop:
cmpo g0,g2
ld (g0), g5 # fetch word of source_1
bge Lequal_exit # quit (equal) if max_bytes exhausted
ld (g1), g3 # fetch word of source_2
addo 4,g0,g0 # post-increment source_1 byte ptr
addo 4,g1,g1 # post-increment source_2 byte ptr
cmpobe g5,g3,.Lwloop # branch if source words are equal
ldconst 0xff,g4 # byte extraction mask
subo 4,g0,g0 # back up src1 pointer
.Lcloop: and g4,g5,g7 # extract and compare individual bytes
and g4,g3,g6
cmpobne g7,g6,.diff # branch if they are different
shlo 8,g4,g4 # position mask for next extraction
addo 1,g0,g0
cmpobl g0,g2,.Lcloop # quit if max_bytes is exhausted
Lequal_exit:
mov 0,g0
bx (g13)
.Lrett:
ret
.diff: bl .neg # arrays differ at current byte.
/* return 1 or -1 appropriately */
mov 1,g0
bx (g13)
.neg: subi 1,0,g0
.Lexit:
bx (g13)
/* end or memcmp */
|
4ms/metamodule-plugin-sdk
| 4,046
|
plugin-libc/newlib/libc/machine/i960/strlen.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strlen.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strlen (optimized assembler version for the 80960K series)
src_addr = strlen (src_addr)
return the number of bytes that precede the null byte in the
string pointed to by src_addr.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last four words of the program's
allocated memory space. This is so because strlen fetches ahead
several words. Disallowing the fetch ahead would impose a severe
performance penalty.
Strategy:
Fetch the source array by long-words and scanbyte the words for the
null byte until found. Examine the word in which the null byte is
found, to determine its actual position, and return the length.
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that many source strings will be word
aligned to begin with.
*/
.globl _strlen
.globl __strlen
.leafproc _strlen, __strlen
.align 2
_strlen:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strlen:
mov g14,g13 # preserve return address
ldl (g0),g4 # fetch first two words
addo 8,g0,g2 # post-increment src word pointer
lda 0xff,g3 # byte extraction mask
Lsearch_for_word_with_null_byte:
scanbyte 0,g4 # check for null byte
mov g5,g7 # copy second word
bo.f Lsearch_for_null # branch if null found
scanbyte 0,g7 # check for null byte
ldl (g2),g4 # fetch next pair of word of src
addo 8,g2,g2 # post-increment src word pointer
bno Lsearch_for_word_with_null_byte # branch if null not found yet
subo 4,g2,g2 # back up the byte pointer
mov g7,g4 # move word with null to search word
Lsearch_for_null:
subo 9,g2,g2 # back up the byte pointer
Lsearch_for_null.a:
and g4,g3,g14 # extract byte
cmpo 0,g14 # is it null?
addo 1,g2,g2 # bump src byte ptr
shro 8,g4,g4 # shift word to position next byte
bne Lsearch_for_null.a
Lexit_code:
subo g0,g2,g0 # calculate string length
bx (g13) # g0 = addr of src; g14 = 0
Lrett:
ret
/* end of strlen */
|
4ms/metamodule-plugin-sdk
| 4,364
|
plugin-libc/newlib/libc/machine/i960/strchr.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strchr.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strchr (optimized assembler version for the 80960K series)
src_addr = strchr (src_addr, char)
return a pointer to the first byte that contains the indicated
byte in the source string. Return null if the byte is not found.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last two words of the program's
allocated memory space. This is so because strchr fetches ahead.
Disallowing the fetch ahead would impose a severe performance penalty.
Strategy:
Fetch the source string by words and scanbyte the words for the
char until either a word with the byte is found or the null byte is
encountered. In the former case, move through the word to find the
matching byte and return its memory address. In the latter case,
return zero (null).
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that most source arrays (even more
true of most big source arrays) will be word aligned to begin with.
*/
.globl _strchr
.globl __strchr
.leafproc _strchr, __strchr
.align 2
_strchr:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strchr:
ld (g0),g4 # fetch first word
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
shlo 8,g1,g2 # broadcast the char to four bytes
or g1,g2,g2
shlo 16,g2,g5
or g2,g5,g3
mov g14,g13 # preserve return address
addo 4,g0,g0 # post-increment src pointer
mov 0,g14 # conform to register linkage standard
Lsearch_for_word_with_char_or_null:
mov g4,g5 # copy word
scanbyte g3,g5 # check for byte with char
ld (g0),g4 # fetch next word of src
bo Lsearch_for_char # branch if char found
scanbyte 0,g5 # check for null byte
addo 4,g0,g0 # post-increment src pointer
bno Lsearch_for_word_with_char_or_null # branch if not null
Lnot_found:
mov 0,g0 # char not found. Return null
Lexit_code:
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
Lsearch_for_char:
subo 5,g0,g0 # back up the byte pointer
Lsearch_for_char.a:
and g5,g7,g6 # extract byte
cmpo g1,g6 # is it char?
addo 1,g0,g0 # bump src byte ptr
shro 8,g5,g5 # shift word to position next byte
be Lexit_code
cmpobne 0,g6,Lsearch_for_char.a # quit if null comes before char
b Lnot_found
/* end of strchr */
|
4ms/metamodule-plugin-sdk
| 4,396
|
plugin-libc/newlib/libc/machine/i960/strcmp.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strcmp.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strcmp (optimized assembler version for the 80960K Series)
result = strcmp (src1_addr, src2_addr)
compare the null terminated string pointed to by src1_addr to
the string pointed to by src2_addr. Return 0 iff the strings
are equal, -1 if src1_addr is lexicographically less than src2_addr,
and 1 if it is lexicographically greater.
Undefined behavior will occur if the end of either source string
(i.e. the terminating null byte) is in the last two words of the
program's allocated memory space. This is so because strcmp fetches
ahead. Disallowing the fetch ahead would impose a severe performance
penalty.
Strategy:
Fetch the source strings by words and compare the words until either
differing words are found or the null byte is encountered. In either
case, move through the word until either the differing byte if found,
in which case return -1 or 1 appropriately; or the null byte is
encountered, in which case, return zero (equality).
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment. This is supported by the intuition that many source
strings will be word aligned to begin with.
*/
.globl _strcmp
.globl __strcmp
.leafproc _strcmp,__strcmp
.align 2
_strcmp:
#ifndef __PIC
lda .Lrett,g14
#else
lda .Lrett-(.+8)(ip),g14
#endif
__strcmp:
ld (g0), g5 # fetch first word of source_1
mov g14,g7 # preserve return address
ldconst 0,g14 # conform to register conventions
ldconst 0xff,g4 # byte extraction mask
.Lwloop:
addo 4,g0,g0 # post-increment source_1 byte ptr
ld (g1), g3 # fetch word of source_2
scanbyte 0,g5 # does word have a null byte?
mov g5,g2 # save a copy of the source_1 word
be .Lcloop # branch if null byte encountered
cmpo g2,g3 # are the source words the same?
addo 4,g1,g1 # post-increment source_2 byte ptr
ld (g0), g5 # fetch ahead next word of source_1
be .Lwloop # fall thru if words are unequal
.Lcloop: and g4,g2,g5 # extract and compare individual bytes
and g4,g3,g6
cmpobne g5,g6,.diff # if they differ, go return 1 or -1
cmpo 0,g6 # they are the same. Are they null?
shlo 8,g4,g4 # position mask for next extraction
bne .Lcloop # loop if null not encountered
mov 0,g0 # return equality
bx (g7)
.Lrett:
ret
.diff: bl .neg
mov 1,g0
bx (g7)
.neg: subi 1,0,g0
.Lexit:
bx (g7)
|
4ms/metamodule-plugin-sdk
| 5,675
|
plugin-libc/newlib/libc/machine/i960/strncpy.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strncpy.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncpy (optimized assembler version for the 80960K Series)
dest_addr = strncpy (dest_addr, src_addr, max_bytes)
copy the null terminated string pointed to by src_addr to the
string pointed to by dest_addr. Return the original dest_addr.
If the source string is shorter than max_bytes, then null-pad
the destination string. If it is longer than max_bytes, the
copy stops at max_bytes bytes (and no terminating null appears
in the destination string).
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last two words of the
program's allocated memory space. This is so because strncpy fetches
ahead. Disallowing the fetch ahead would impose a severe performance
penalty.
Strategy:
Fetch and store the strings by words and go to a character move loop
as soon as a null byte is encountered. If max_bytes is exhausted
first, then terminate after moving only max_bytes (with the last
0, 1, 2, or 3 bytes moved as single bytes, not as a word).
Otherwise, the character move loop moves the last bytes or the
source string, and then null-pads the destination string until
max_bytes is exhausted.
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment.
2) When the null byte is encountered in a source word, null out the
higher-numbered bytes in that word, store the word in the destination,
and go to the word null-padder, which may eventually go to the byte
null-padder.
*/
.globl _strncpy
.globl __strncpy
.leafproc _strncpy,__strncpy
.align 2
_strncpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncpy:
mov g14, g13
cmpibge 0,g2,Lexit # quit early if max_bytes <= 0
ld (g1), g7 # fetch the first word of the source
mov g0, g5
lda 0xff, g3 # byte extraction mask
addo g1, g2, g6
addo g2, g5, g2
Lwloop: # word copying loop
addo 4, g1, g1 # post-increment source ptr
cmpo g6, g1 # max_bytes < 4 ?
mov g7, g4 # keep a copy of source word
bl Lcloop.a # if less than four bytes to go, go to char loop
scanbyte 0, g4 # null byte found?
ld (g1), g7 # pre-fetch next word of the source
be Lcloop.c # go to char loop if null encountered
st g4, (g5) # store current word
addo 4, g5, g5 # post-increment destination ptr
b Lwloop
Lcloop.a: # character copying loop (max_bytes < 3)
and g3, g4, g14 # extract byte
Lcloop.b:
cmpo g2, g5 # max_bytes <= 0 ?
shro 8, g4, g4 # position word to extract next byte
be Lexit # exit if max_bytes exhausted
cmpo 0, g14 # is it null?
stob g14, (g5) # store it
addo 1, g5, g5 # post-increment dest ptr
bne Lcloop.a # branch if we are NOT null padding
b Lcloop.b # branch if we are null padding
Lexit:
mov 0, g14
bx (g13) # g0 = dest string address; g14 = 0
Lrett:
ret
Lcloop.c: # character copying loop
and g3, g4, g14 # extract byte
cmpo 0, g14 # is it null?
mov g3, g7 # save mask
shlo 8, g3, g3 # shift mask to next byte position
bne Lcloop.c # loop until null found
subo 1, g7, g3 # mask to null pad after null byte
and g3, g4, g4 # null-out stuff after null byte
st g4, (g5) # store last part of src and first of null-pad
subo 8,g2,g6 # adjust max_byte counter
Lzwloop:
cmpo g5, g6 # max_bytes < 4 ?
addo 4, g5, g5
bg Lcloop.b # if so, goto character loop
st g14, (g5) # store four null bytes
b Lzwloop
/* end of strncpy */
|
4ms/metamodule-plugin-sdk
| 3,261
|
plugin-libc/newlib/libc/machine/arc/memcpy.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC601__) || \
(!defined (__ARC_BARREL_SHIFTER__) && !defined (__ARCHS__))
/* Adapted from memcpy-bs.S. */
/* We assume that most sources and destinations are aligned, and
that also lengths are mostly a multiple of four, although to a lesser
extent. */
ENTRY (memcpy)
or r3,r0,r1
bmsk.f 0,r3,1
breq_s r2,0,.Lnil
mov_s r5,r0
bne.d .Lcopy_bytewise
add r6,r0,r2
sub_s r3,r2,1
ld_s r12,[r1,0]
bbit0.d r3,2,.Lnox4
sub r6,r6,8
st.ab r12,[r5,4]
ld.a r12,[r1,4]
.Lnox4:
brlo r2,9,.Lendloop
.Lnox4a:
ld_s r3,[r1,4]
st.ab r12,[r5,8]
ld.a r12,[r1,8]
brlo.d r5,r6,.Lnox4a
st r3,[r5,-4]
.Lendloop:
#ifdef __LITTLE_ENDIAN__
ld r3,[r5,0]
add3 r2,-1,r2
; uses long immediate
xor_s r12,r12,r3
bmsk r12,r12,r2
xor_s r12,r12,r3
#else /* BIG ENDIAN */
bmsk_s r2,r2,1
breq_s r2,0,.Last_store
ld r3,[r5,0]
sub3 r2,31,r2
; uses long immediate
xor_s r3,r3,r12
bmsk r3,r3,r2
xor_s r12,r12,r3
#endif /* ENDIAN */
.Last_store:
j_s.d [blink]
st r12,[r5,0]
.Lnil:
j_s [blink]
.balign 4
.Lcopy_bytewise:
ldb_s r12,[r1,0]
bbit1.d r2,0,.Lnox1
sub r6,r6,2
stb.ab r12,[r5,1]
ldb.a r12,[r1,1]
.Lnox1:
brlo r2,3,.Lendbloop
.Lnox1a:
ldb_s r3,[r1,1]
stb.ab r12,[r5,2]
ldb.a r12,[r1,2]
brlo.d r5,r6,.Lnox1a
stb r3,[r5,-1]
.Lendbloop:
j_s.d [blink]
stb r12,[r5,0]
ENDFUNC (memcpy)
#endif /* __ARC601__ || (!__ARC_BARREL_SHIFTER__ && !__ARCHS__) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,337
|
plugin-libc/newlib/libc/machine/arc/memcpy-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if !defined (__ARC601__) && !defined (__ARCHS__) \
&& defined (__ARC_BARREL_SHIFTER__)
/* Mostly optimized for ARC700, but not bad for ARC600 either. */
/* This memcpy implementation does not support objects of 1GB or larger -
the check for alignment does not work then. */
/* We assume that most sources and destinations are aligned, and
that also lengths are mostly a multiple of four, although to a lesser
extent. */
ENTRY (memcpy)
or r3,r0,r1
asl_s r3,r3,30
mov_s r5,r0
brls.d r2,r3,.Lcopy_bytewise
sub.f r3,r2,1
ld_s r12,[r1,0]
asr.f lp_count,r3,3
bbit0.d r3,2,.Lnox4
bmsk_s r2,r2,1
st.ab r12,[r5,4]
ld.a r12,[r1,4]
.Lnox4:
lppnz .Lendloop
ld_s r3,[r1,4]
st.ab r12,[r5,4]
ld.a r12,[r1,8]
st.ab r3,[r5,4]
.Lendloop:
breq_l r2,0,.Last_store
ld r3,[r5,0]
#ifdef __LITTLE_ENDIAN__
add3 r2,-1,r2
; uses long immediate
xor_s r12,r12,r3
bmsk r12,r12,r2
xor_s r12,r12,r3
#else /* BIG ENDIAN */
sub3 r2,31,r2
; uses long immediate
xor_s r3,r3,r12
bmsk r3,r3,r2
xor_s r12,r12,r3
#endif /* ENDIAN */
.Last_store:
j_s.d [blink]
st r12,[r5,0]
.balign 4
.Lcopy_bytewise:
jcs [blink]
ldb_s r12,[r1,0]
lsr.f lp_count,r3
bcc_s .Lnox1
stb.ab r12,[r5,1]
ldb.a r12,[r1,1]
.Lnox1:
lppnz .Lendbloop
ldb_s r3,[r1,1]
stb.ab r12,[r5,1]
ldb.a r12,[r1,2]
stb.ab r3,[r5,1]
.Lendbloop:
j_s.d [blink]
stb r12,[r5,0]
ENDFUNC (memcpy)
#endif /* !__ARC601__ && !__ARCHS__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 4,244
|
plugin-libc/newlib/libc/machine/arc/setjmp.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* ABI interface file
these are the stack mappings for the registers
as stored in the ABI for ARC */
.file "setjmp.S"
ABIr13 = 0
ABIr14 = ABIr13 + 4
ABIr15 = ABIr14 + 4
ABIr16 = ABIr15 + 4
ABIr17 = ABIr16 + 4
ABIr18 = ABIr17 + 4
ABIr19 = ABIr18 + 4
ABIr20 = ABIr19 + 4
ABIr21 = ABIr20 + 4
ABIr22 = ABIr21 + 4
ABIr23 = ABIr22 + 4
ABIr24 = ABIr23 + 4
ABIr25 = ABIr24 + 4
ABIr26 = ABIr25 + 4
ABIr27 = ABIr26 + 4
ABIr28 = ABIr27 + 4
ABIr29 = ABIr28 + 4
ABIr30 = ABIr29 + 4
ABIr31 = ABIr30 + 4
ABIlpc = ABIr31 + 4
ABIlps = ABIlpc + 4
ABIlpe = ABIlps + 4
ABIflg = ABIlpe + 4
ABImlo = ABIflg + 4
ABImhi = ABImlo + 4
.text
.align 4
.global setjmp
.type setjmp,@function
setjmp:
st r13, [r0, ABIr13]
st r14, [r0, ABIr14]
st r15, [r0, ABIr15]
st r16, [r0, ABIr16]
st r17, [r0, ABIr17]
st r18, [r0, ABIr18]
st r19, [r0, ABIr19]
st r20, [r0, ABIr20]
st r21, [r0, ABIr21]
st r22, [r0, ABIr22]
st r23, [r0, ABIr23]
st r24, [r0, ABIr24]
st r25, [r0, ABIr25]
st r26, [r0, ABIr26]
st r27, [r0, ABIr27]
st r28, [r0, ABIr28]
st r29, [r0, ABIr29]
st r30, [r0, ABIr30]
st blink, [r0, ABIr31]
st lp_count, [r0, ABIlpc]
lr r2, [lp_start]
lr r3, [lp_end]
st r2, [r0, ABIlps]
st r3, [r0, ABIlpe]
#if (!defined (__ARC700__) && !defined (__ARCEM__) && !defined (__ARCHS__))
; Till the configure changes are decided, and implemented, the code working on
; mlo/mhi and using mul64 should be disabled.
; st mlo, [r0, ABImlo]
; st mhi, [r0, ABImhi]
lr r2, [status32]
st r2, [r0, ABIflg]
#endif
j.d [blink]
mov r0,0
.Lfe1:
.size setjmp,.Lfe1-setjmp
.align 4
.global longjmp
.type longjmp,@function
longjmp:
; load registers
ld r13, [r0, ABIr13]
ld r14, [r0, ABIr14]
ld r15, [r0, ABIr15]
ld r16, [r0, ABIr16]
ld r17, [r0, ABIr17]
ld r18, [r0, ABIr18]
ld r19, [r0, ABIr19]
ld r20, [r0, ABIr20]
ld r21, [r0, ABIr21]
ld r22, [r0, ABIr22]
ld r23, [r0, ABIr23]
ld r24, [r0, ABIr24]
ld r25, [r0, ABIr25]
ld r26, [r0, ABIr26]
ld r27, [r0, ABIr27]
ld r28, [r0, ABIr28]
ld r3, [r0, ABIr29]
mov r29, r3
ld r3, [r0, ABIr30]
mov r30, r3
ld blink, [r0, ABIr31]
ld r3, [r0, ABIlpc]
mov lp_count, r3
ld r2, [r0, ABIlps]
ld r3, [r0, ABIlpe]
sr r2, [lp_start]
sr r3, [lp_end]
#if (!defined (__ARC700__) && !defined (__ARCEM__) && !defined (__ARCHS__))
ld r2, [r0, ABImlo]
ld r3, [r0, ABImhi]
; We do not support restoring of mulhi and mlo registers, yet.
; mulu64 0,r2,1 ; restores mlo
; mov 0,mlo ; force multiply to finish
; sr r3, [mulhi]
ld r2, [r0, ABIflg]
flag r2 ; restore "status32" register
#endif
mov.f r1, r1 ; to avoid return 0 from longjmp
mov.eq r1, 1
j.d [blink]
mov r0,r1
.Lfe2:
.size longjmp,.Lfe2-longjmp
|
4ms/metamodule-plugin-sdk
| 4,627
|
plugin-libc/newlib/libc/machine/arc/strncpy-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strncpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch.
Note that short and long instructions have been scheduled to avoid
branch stalls.
The beq_s to r3z could be made unaligned & long to avoid a stall
there, but the it is not likely to be taken often, and it
would also be likey to cost an unaligned mispredict at the next call. */
#if !defined (__ARC601__) && defined (__ARC_BARREL_SHIFTER__)
#if defined (__ARC700___) || defined (__ARCEM__) || defined (__ARCHS__)
#define BRand(a,b,l) tst a,b ` bne_l l
#else
#define BRand(a,b,l) and a,a,b ` brne_s a,0,l
#endif
ENTRY (strncpy)
cmp_s r2,8
or r12,r0,r1
bmsk.cc.f r12,r12,1
brne.d r12,0,.Lbytewise
mov_s r10,r0
ld_s r3,[r1,0]
mov r8,0x01010101
sub lp_count,r2,1
bbit0.d r1,2,.Loop_start
ror r11,r8
sub r12,r3,r8
bic_l r12,r12,r3
BRand (r12,r11,.Lr3z)
mov_s r4,r3
ld.a r3,[r1,4]
sub lp_count,lp_count,4
st.ab r4,[r10,4]
.balign 4
.Loop_start:
lsr.f lp_count,lp_count,3
lpne .Loop_end
ld.a r4,[r1,4]
sub r12,r3,r8
bic_s r12,r12,r3
BRand (r12,r11,.Lr3z)
st.ab r3,[r10,4]
sub r12,r4,r8
bic r12,r12,r4
BRand (r12,r11,.Lr4z)
ld.a r3,[r1,4]
st.ab r4,[r10,4]
.Loop_end:
bcc_s .Lastword
ld.a r4,[r1,4]
sub r12,r3,r8
bic_s r12,r12,r3
BRand (r12,r11,.Lr3z)
st.ab r3,[r10,4]
mov_s r3,r4
.Lastword:
and.f lp_count,r2,3
mov.eq lp_count,4
lp .Last_byte_end
#ifdef __LITTLE_ENDIAN__
bmsk.f r1,r3,7
lsr.ne r3,r3,8
#else
lsr.f r1,r3,24
asl.ne r3,r3,8
#endif
stb.ab r1,[r10,1]
.Last_byte_end:
j_s [blink]
.balign 4
.Lr4z:
mov_l r3,r4
.Lr3z:
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
#ifdef __LITTLE_ENDIAN__
bmsk.f r1,r3,7
lsr_s r3,r3,8
#else
lsr.f r1,r3,24
asl_s r3,r3,8
#endif
bne.d .Lr3z
stb.ab r1,[r10,1]
#else /* ! __ARC700__ */
#ifdef __LITTLE_ENDIAN__
bmsk.f r1,r3,7
.Lr3z_loop:
lsr_s r3,r3,8
stb.ab r1,[r10,1]
bne.d .Lr3z_loop
bmsk.f r1,r3,7
#else
lsr.f r1,r3,24
.Lr3z_loop:
asl_s r3,r3,8
stb.ab r1,[r10,1]
bne.d .Lr3z_loop
lsr.f r1,r3,24
#endif /* ENDIAN */
#endif /* ! __ARC700__ */
.Lzero_rest:
; __strncpy_bzero requires:
; return value in r0
; zeroing length in r2
; zeroing start address in r3
mov_s r3,r10
add_s r2,r2,r0
b.d __strncpy_bzero
sub_s r2,r2,r3
.balign 4
.Lbytewise:
sub.f r2,r2,1
mov_l r3,r0
jcs [blink]
.Lcharloop:
ldb.ab r12,[r1,1]
beq.d .Last_byte
sub.f r2,r2,1
brne.d r12,0,.Lcharloop
stb.ab r12,[r3,1]
b.d __strncpy_bzero
stb.ab r12,[r3,1]
.Last_byte:
j_s.d [blink]
stb_l r12,[r3]
ENDFUNC (strncpy)
#endif /* !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 2,941
|
plugin-libc/newlib/libc/machine/arc/strcpy.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch.
This version is a compromise between speed for the 601 pipeline and code
size. */
ENTRY (strcpy)
or r2,r0,r1
bmsk.f 0,r2,1
mov r8,0x01010101
bne.d .Lcharloop
mov_s r10,r0
ld_s r3,[r1]
bbit0.d r1,2,.Loop_start
ror r12,r8
sub r2,r3,r8
bic_s r2,r2,r3
and_s r2,r2,r12
brne_s r2,0,.Lr3z
mov r4,r3
sub_s r1,r1,4
.balign 4
.Loop:
ld.a r3,[r1,8]
st.ab r4,[r10,4]
.Loop_start:
ld r4,[r1,4]
sub r2,r3,r8
bic_s r2,r2,r3
tst_s r2,r12
sub r5,r4,r8
bic r5,r5,r4
bne_s .Lr3z
and r5,r5,r12
breq.d r5,0,.Loop
st.ab r3,[r10,4]
;mov_s r3,r4
add_s r1,r1,4
.balign 4
.Lr3z:
.Lcharloop:
ldb.ab r3,[r1,1]
brne.d r3,0,.Lcharloop
stb.ab r3,[r10,1]
j_s [blink]
ENDFUNC (strcpy)
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,269
|
plugin-libc/newlib/libc/machine/arc/strlen-bs-norm.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strlen.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__)
ENTRY (strlen)
or r3,r0,7
ld r2,[r3,-7]
ld.a r6,[r3,-3]
mov r4,0x01010101
; uses long immediate
#ifdef __LITTLE_ENDIAN__
asl_s r1,r0,3
btst_s r0,2
asl r7,r4,r1
ror r5,r4
sub r1,r2,r7
bic_s r1,r1,r2
mov.eq r7,r4
sub r12,r6,r7
bic r12,r12,r6
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#else /* BIG ENDIAN */
ror r5,r4
btst_s r0,2
mov_s r1,31
sub3 r7,r1,r0
sub r1,r2,r4
bic_s r1,r1,r2
bmsk r1,r1,r7
sub r12,r6,r4
bic r12,r12,r6
bmsk.ne r12,r12,r7
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#endif /* ENDIAN */
.Loop:
ld_s r2,[r3,4]
ld.a r6,[r3,8]
; stall for load result
sub r1,r2,r4
bic_s r1,r1,r2
sub r12,r6,r4
bic r12,r12,r6
or_l r12,r12,r1
and r12,r12,r5
breq_l r12,0,.Loop
.Lend:
and.f r1,r1,r5
sub.ne r3,r3,4
mov.eq r1,r12
#ifdef __LITTLE_ENDIAN__
sub_s r2,r1,1
bic_s r2,r2,r1
norm r1,r2
sub_s r0,r0,3
lsr_s r1,r1,3
sub r0,r3,r0
j_s.d [blink]
sub_l r0,r0,r1
#else /* BIG ENDIAN */
lsr_s r1,r1,7
mov.eq r2,r6
bic_s r1,r1,r2
norm r1,r1
sub r0,r3,r0
lsr_s r1,r1,3
j_s.d [blink]
add_l r0,r0,r1
#endif /* ENDIAN */
.Learly_end:
b.d .Lend
sub_s.ne r1,r1,r1
ENDFUNC (strlen)
#endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_NORM__
&& __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 4,537
|
plugin-libc/newlib/libc/machine/arc/strchr-bs-norm.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strchr.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
/* ARC700 has a relatively long pipeline and branch prediction, so we want
to avoid branches that are hard to predict. On the other hand, the
presence of the norm instruction makes it easier to operate on whole
words branch-free. */
#include "asm.h"
#if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__)
ENTRY (strchr)
extb_s r1,r1
asl r5,r1,8
bmsk r2,r0,1
or r5,r5,r1
mov_s r3,0x01010101
breq.d r2,r0,.Laligned
asl r4,r5,16
sub_s r0,r0,r2
asl r7,r2,3
ld_s r2,[r0]
#ifdef __LITTLE_ENDIAN__
asl r7,r3,r7
#else
lsr r7,r3,r7
#endif
or r5,r5,r4
ror r4,r3
sub r12,r2,r7
bic_s r12,r12,r2
and r12,r12,r4
brne.d r12,0,.Lfound0_ua
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
#ifdef __LITTLE_ENDIAN__
and r7,r12,r4
breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
b_l .Lfound_char ; Likewise this one.
#else
and r12,r12,r4
breq_l r12,0,.Loop ; For speed, we want this branch to be unaligned.
lsr_s r12,r12,7
bic r2,r7,r6
b.d .Lfound_char_b
and_s r2,r2,r12
#endif
; /* We require this code address to be unaligned for speed... */
.Laligned:
ld_s r2,[r0]
or r5,r5,r4
ror r4,r3
; /* ... so that this code address is aligned, for itself and ... */
.Loop:
sub r12,r2,r3
bic_s r12,r12,r2
and r12,r12,r4
brne.d r12,0,.Lfound0
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r3
bic r12,r12,r6
and r7,r12,r4
breq r7,0,.Loop /* ... so that this branch is unaligned. */
; Found searched-for character. r0 has already advanced to next word.
#ifdef __LITTLE_ENDIAN__
/* We only need the information about the first matching byte
(i.e. the least significant matching byte) to be exact,
hence there is no problem with carry effects. */
.Lfound_char:
sub r3,r7,1
bic r3,r3,r7
norm r2,r3
sub_s r0,r0,1
asr_s r2,r2,3
j_l.d [blink]
sub_s r0,r0,r2
.balign 4
.Lfound0_ua:
mov_l r3,r7
.Lfound0:
sub r3,r6,r3
bic r3,r3,r6
and r2,r3,r4
or_s r12,r12,r2
sub_s r3,r12,1
bic_s r3,r3,r12
norm r3,r3
add_s r0,r0,3
asr_s r12,r3,3
asl.f 0,r2,r3
sub_s r0,r0,r12
j_s.d [blink]
mov.pl r0,0
#else /* BIG ENDIAN */
.Lfound_char:
lsr r7,r7,7
bic r2,r7,r6
.Lfound_char_b:
norm r2,r2
sub_s r0,r0,4
asr_s r2,r2,3
j_l.d [blink]
add_s r0,r0,r2
.Lfound0_ua:
mov_s r3,r7
.Lfound0:
asl_s r2,r2,7
or r7,r6,r4
bic_s r12,r12,r2
sub r2,r7,r3
or r2,r2,r6
bic r12,r2,r12
bic.f r3,r4,r12
norm r3,r3
add.pl r3,r3,1
asr_s r12,r3,3
asl.f 0,r2,r3
add_s r0,r0,r12
j_s.d [blink]
mov.mi r0,0
#endif /* ENDIAN */
ENDFUNC (strchr)
#endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_NORM__
&& __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 4,560
|
plugin-libc/newlib/libc/machine/arc/memset-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memset.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* ARC HS has it's own implementation of memset, yet we want this function
still to be compiled under "__dummy_memset" disguise, because strncpy
function uses __strncpy_bzero as a second entry point into memset. Would be
better to add __strncpy_bzero label to memset for ARC HS though, and even
better would be to avoid a second entry point into function. ARC HS always
has barrel-shifter, so this implementation will be always used for this
purpose. */
#if !defined (__ARC601__) && defined (__ARC_BARREL_SHIFTER__)
/* To deal with alignment/loop issues, SMALL must be at least 2. */
#define SMALL 7
.global __strncpy_bzero
.hidden __strncpy_bzero
/* __strncpy_bzero provides the following interface to strncpy:
r0: return value
r2: zeroing length
r3: zeroing start address
No attempt is made here for __strncpy_memset to speed up aligned
cases, because the copying of a string presumably leaves start address
and length alignment for the zeroing randomly distributed. */
#ifdef __ARCHS__
ENTRY (__dummy_memset)
#else
ENTRY (memset)
#endif
#if !defined (__ARC700__) && !defined (__ARCEM__)
#undef SMALL
#define SMALL 8 /* Even faster if aligned. */
brls.d r2,SMALL,.Ltiny
#endif
mov_s r3,r0
or r12,r0,r2
bmsk.f r12,r12,1
extb_s r1,r1
asl r12,r1,8
beq.d .Laligned
or_s r1,r1,r12
#if defined (__ARC700__) || defined (__ARCEM__)
brls r2,SMALL,.Ltiny
#endif
.Lnot_tiny:
add_s r12,r2,r0
stb r1,[r12,-1]
bclr_l r12,r12,0
stw r1,[r12,-2]
bmsk.f r12,r3,1
add_s r2,r2,r12
sub.ne r2,r2,4
stb.ab r1,[r3,1]
bclr_s r3,r3,0
stw.ab r1,[r3,2]
bclr_s r3,r3,1
.Laligned: ; This code address should be aligned for speed.
#if defined (__ARC700__) || defined (__ARCEM__)
asl r12,r1,16
lsr.f lp_count,r2,2
or_s r1,r1,r12
lpne .Loop_end
st.ab r1,[r3,4]
.Loop_end:
j_s [blink]
#else /* !__ARC700 */
lsr.f lp_count,r2,3
asl r12,r1,16
or_s r1,r1,r12
lpne .Loop_end
st.ab r1,[r3,4]
st.ab r1,[r3,4]
.Loop_end:
jcc [blink]
j_s.d [blink]
st_s r1,[r3]
#endif /* !__ARC700 */
#if defined (__ARC700__) || defined (__ARCEM__)
.balign 4
__strncpy_bzero:
brhi.d r2,17,.Lnot_tiny
mov_l r1,0
.Ltiny:
mov.f lp_count,r2
lpne .Ltiny_end
stb.ab r1,[r3,1]
.Ltiny_end:
j_s [blink]
#else /* !__ARC700__ */
#if SMALL > 8
FIXME
#endif
.balign 4
__strncpy_bzero:
brhi.d r2,8,.Lnot_tiny
mov_s r1,0
.Ltiny:
sub_s r2,r2,11
sub1 r12,pcl,r2
j_s [r12]
stb_s r1,[r3,7]
stb_s r1,[r3,6]
stb_s r1,[r3,5]
stb_s r1,[r3,4]
stb_s r1,[r3,3]
stb_s r1,[r3,2]
stb_s r1,[r3,1]
stb_s r1,[r3]
j_s [blink]
#endif /* !__ARC700 */
#ifdef __ARCHS__
ENDFUNC (__dummy_memset)
#else
ENDFUNC (memset)
#endif
#endif /* !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 7,794
|
plugin-libc/newlib/libc/machine/arc/memcpy-archs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARCHS__)
#ifdef __LITTLE_ENDIAN__
# define SHIFT_1(RX,RY,IMM) asl RX, RY, IMM ; <<
# define SHIFT_2(RX,RY,IMM) lsr RX, RY, IMM ; >>
# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM
# define MERGE_2(RX,RY,IMM)
# define EXTRACT_1(RX,RY,IMM) and RX, RY, 0xFFFF
# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, IMM
#else
# define SHIFT_1(RX,RY,IMM) lsr RX, RY, IMM ; >>
# define SHIFT_2(RX,RY,IMM) asl RX, RY, IMM ; <<
# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM ; <<
# define MERGE_2(RX,RY,IMM) asl RX, RY, IMM ; <<
# define EXTRACT_1(RX,RY,IMM) lsr RX, RY, IMM
# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08
#endif
#ifdef __ARC_LL64__
# define PREFETCH_READ(RX) prefetch [RX, 56]
# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
# define ZOLSHFT 5
# define ZOLAND 0x1F
#else
# define PREFETCH_READ(RX) prefetch [RX, 28]
# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
# define LOADX(DST,RX) ld.ab DST, [RX, 4]
# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
# define ZOLSHFT 4
# define ZOLAND 0xF
#endif
#ifdef __ARC_ALIGNED_ACCESS__
ENTRY (memcpy)
prefetch [r1] ; Prefetch the read location
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
; if size is zero
jz.d [blink]
mov r3, r0 ; don't clobber ret val
; if size <= 8
cmp r2, 8
bls.d @.Lsmallchunk
mov.f lp_count, r2
and.f r4, r0, 0x03
rsub lp_count, r4, 4
lpnz @.Laligndestination
; LOOP BEGIN
ldb.ab r5, [r1,1]
sub r2, r2, 1
stb.ab r5, [r3,1]
.Laligndestination:
; Check the alignment of the source
and.f r4, r1, 0x03
bnz.d @.Lsourceunaligned
; CASE 0: Both source and destination are 32bit aligned
; Convert len to Dwords, unfold x4
lsr.f lp_count, r2, ZOLSHFT
lpnz @.Lcopy32_64bytes
; LOOP START
LOADX (r6, r1)
PREFETCH_READ (r1)
PREFETCH_WRITE (r3)
LOADX (r8, r1)
LOADX (r10, r1)
LOADX (r4, r1)
STOREX (r6, r3)
STOREX (r8, r3)
STOREX (r10, r3)
STOREX (r4, r3)
.Lcopy32_64bytes:
and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes
.Lsmallchunk:
lpnz @.Lcopyremainingbytes
; LOOP START
ldb.ab r5, [r1,1]
stb.ab r5, [r3,1]
.Lcopyremainingbytes:
j [blink]
; END CASE 0
.Lsourceunaligned:
cmp r4, 2
beq.d @.LunalignedOffby2
sub r2, r2, 1
bhi.d @.LunalignedOffby3
ldb.ab r5, [r1, 1]
; CASE 1: The source is unaligned, off by 1
; Hence I need to read 1 byte for a 16bit alignment
; and 2bytes to reach 32bit alignment
ldh.ab r6, [r1, 2]
sub r2, r2, 2
; Convert to words, unfold x2
lsr.f lp_count, r2, 3
MERGE_1 (r6, r6, 8)
MERGE_2 (r5, r5, 24)
or r5, r5, r6
; Both src and dst are aligned
lpnz @.Lcopy8bytes_1
; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 24)
or r7, r7, r5
SHIFT_2 (r5, r6, 8)
SHIFT_1 (r9, r8, 24)
or r9, r9, r5
SHIFT_2 (r5, r8, 8)
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
.Lcopy8bytes_1:
; Write back the remaining 16bits
EXTRACT_1 (r6, r5, 16)
sth.ab r6, [r3, 2]
; Write back the remaining 8bits
EXTRACT_2 (r5, r5, 16)
stb.ab r5, [r3, 1]
and.f lp_count, r2, 0x07 ;Last 8bytes
lpnz @.Lcopybytewise_1
; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
.Lcopybytewise_1:
j [blink]
.LunalignedOffby2:
; CASE 2: The source is unaligned, off by 2
ldh.ab r5, [r1, 2]
sub r2, r2, 1
; Both src and dst are aligned
; Convert to words, unfold x2
lsr.f lp_count, r2, 3
#ifdef __BIG_ENDIAN__
asl.nz r5, r5, 16
#endif
lpnz @.Lcopy8bytes_2
; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 16)
or r7, r7, r5
SHIFT_2 (r5, r6, 16)
SHIFT_1 (r9, r8, 16)
or r9, r9, r5
SHIFT_2 (r5, r8, 16)
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
.Lcopy8bytes_2:
#ifdef __BIG_ENDIAN__
lsr.nz r5, r5, 16
#endif
sth.ab r5, [r3, 2]
and.f lp_count, r2, 0x07 ;Last 8bytes
lpnz @.Lcopybytewise_2
; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
.Lcopybytewise_2:
j [blink]
.LunalignedOffby3:
; CASE 3: The source is unaligned, off by 3
; Hence, I need to read 1byte for achieve the 32bit alignment
; Both src and dst are aligned
; Convert to words, unfold x2
lsr.f lp_count, r2, 3
#ifdef __BIG_ENDIAN__
asl.ne r5, r5, 24
#endif
lpnz @.Lcopy8bytes_3
; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 8)
or r7, r7, r5
SHIFT_2 (r5, r6, 24)
SHIFT_1 (r9, r8, 8)
or r9, r9, r5
SHIFT_2 (r5, r8, 24)
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
.Lcopy8bytes_3:
#ifdef __BIG_ENDIAN__
lsr.nz r5, r5, 24
#endif
stb.ab r5, [r3, 1]
and.f lp_count, r2, 0x07 ;Last 8bytes
lpnz @.Lcopybytewise_3
; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
.Lcopybytewise_3:
j [blink]
ENDFUNC (memcpy)
#else
ENTRY(memcpy)
prefetch [r1] ; Prefetch the read location
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
mov r3, r0 ; don't clobber ret val
;;; if size <= 8
cmp r2, 8
bls.d @.Lsmallchunk
mov.f lp_count, r2
;;; Convert len to Dwords, unfold x4
lsr.f lp_count, r2, ZOLSHFT
lpnz @.Lcopyfast
;; LOOP START
LOADX (r6, r1)
PREFETCH_READ (r1)
PREFETCH_WRITE (r3)
LOADX (r8, r1)
LOADX (r10, r1)
LOADX (r4, r1)
STOREX (r6, r3)
STOREX (r8, r3)
STOREX (r10, r3)
STOREX (r4, r3)
.Lcopyfast:
#ifdef __ARC_LL64__
and r2, r2, ZOLAND ;Remaining 31 bytes
lsr.f lp_count, r2, 3 ;Convert to 64-bit words.
lpnz @.Lcopy64b
;; LOOP START
ldd.ab r6,[r1,8]
std.ab r6,[r3,8]
.Lcopy64b:
and.f lp_count, r2, 0x07 ; Last 7 bytes
#else
and.f lp_count, r2, ZOLAND
#endif
.Lsmallchunk:
lpnz @.Lcopyremainingbytes
;; LOOP START
ldb.ab r5, [r1,1]
stb.ab r5, [r3,1]
.Lcopyremainingbytes:
j [blink]
ENDFUNC(memcpy)
#endif
#endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,862
|
plugin-libc/newlib/libc/machine/arc/memset-archs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memset.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#ifdef __ARCHS__
#define USE_PREFETCH
#ifdef USE_PREFETCH
#define PREWRITE(A,B) prefetchw [(A),(B)]
#else
#define PREWRITE(A,B) prealloc [(A),(B)]
#endif
ENTRY (memset)
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
; if size is zero
jz.d [blink]
mov r3, r0 ; don't clobber ret val
; if length < 8
brls.d.nt r2, 8, .Lsmallchunk
mov.f lp_count,r2
and.f r4, r0, 0x03
rsub lp_count, r4, 4
lpnz @.Laligndestination
; LOOP BEGIN
stb.ab r1, [r3,1]
sub r2, r2, 1
.Laligndestination:
; Destination is aligned
and r1, r1, 0xFF
asl r4, r1, 8
or r4, r4, r1
asl r5, r4, 16
or r5, r5, r4
mov r4, r5
sub3 lp_count, r2, 8
cmp r2, 64
bmsk.hi r2, r2, 5
mov.ls lp_count, 0
add3.hi r2, r2, 8
; Convert len to Dwords, unfold x8
lsr.f lp_count, lp_count, 6
lpnz @.Lset64bytes
; LOOP START
PREWRITE (r3, 64) ;Prefetch the next write location
#ifdef __ARC_LL64__
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
#else
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
#endif
.Lset64bytes:
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes
; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
#ifdef __ARC_LL64__
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
#else
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
#endif
.Lset32bytes:
and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
.Lsmallchunk:
lpnz .Lcopy3bytes
; LOOP START
stb.ab r1, [r3, 1]
.Lcopy3bytes:
j [blink]
ENDFUNC (memset)
#endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,419
|
plugin-libc/newlib/libc/machine/arc/memset.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memset.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC601__) \
|| (!defined (__ARC_BARREL_SHIFTER__) && !defined (__ARCHS__))
/* To deal with alignment/loop issues, SMALL must be at least 2. */
#define SMALL 8 /* Even faster if aligned. */
.global __strncpy_bzero
.hidden __strncpy_bzero
/* __strncpy_bzero provides the following interface to strncpy:
r0: return value
r2: zeroing length
r3: zeroing start address
No attempt is made here for __strncpy_memset to speed up aligned
cases, because the copying of a string presumably leaves start address
and length alignment for the zeroing randomly distributed. */
ENTRY (memset)
brls.d r2,SMALL,.Ltiny
mov_s r3,r0
or r12,r0,r2
bmsk.f r12,r12,1
breq_s r1,0,.Lbzero
mov r4,0
stb.a r1,[sp,-4]
stb r1,[sp,1]
stb r1,[sp,2]
stb r1,[sp,3]
ld.ab r1,[sp,4]
.Lbzero:
beq.d .Laligned
.Lbzero2:
add r6,r2,r3
.Lnot_tiny:
stb r1,[r6,-1]
bclr r12,r6,0
stw r1,[r12,-2]
stb.ab r1,[r3,1]
bclr_s r3,r3,0
stw.ab r1,[r3,2]
bclr_s r3,r3,1
.Laligned: ; This code address should be aligned for speed.
sub r6,r6,8
brlo.d r6,r3,.Loop_end
sub r6,r6,8
3:
st_l r1,[r3,4]
brhs.d r6,r3,3b
st.ab r1,[r3,8]
.Loop_end:
bic r12,r6,3
j_s.d [blink]
st_s r1,[r12,12]
.balign 4
__strncpy_bzero:
brhi.d r2,8,.Lbzero2
mov_s r1,0
.Ltiny:
sub_s r2,r2,11
sub1 r12,pcl,r2
j_s [r12]
stb_s r1,[r3,7]
stb_s r1,[r3,6]
stb_s r1,[r3,5]
stb_s r1,[r3,4]
stb_s r1,[r3,3]
stb_s r1,[r3,2]
stb_s r1,[r3,1]
stb_s r1,[r3]
j_s [blink]
ENDFUNC (memset)
#endif /* __ARC601__ || (!__ARC_BARREL_SHIFTER__ && !__ARCHS__) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 2,969
|
plugin-libc/newlib/libc/machine/arc/strcmp-archs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcmp.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#ifdef __ARCHS__
ENTRY (strcmp)
or r2, r0, r1
bmsk_s r2, r2, 1
brne r2, 0, @.Lcharloop
; s1 and s2 are word aligned
ld.ab r2, [r0, 4]
mov_s r12, 0x01010101
ror r11, r12
.align 4
.LwordLoop:
ld.ab r3, [r1, 4]
; Detect NULL char in str1
sub r4, r2, r12
ld.ab r5, [r0, 4]
bic r4, r4, r2
and r4, r4, r11
brne.d.nt r4, 0, .LfoundNULL
; Check if the read locations are the same
cmp r2, r3
beq.d .LwordLoop
mov.eq r2, r5
; A match is found, spot it out
#ifdef __LITTLE_ENDIAN__
swape r3, r3
mov_s r0, 1
swape r2, r2
#else
mov_s r0, 1
#endif
cmp_s r2, r3
j_s.d [blink]
bset.lo r0, r0, 31
.align 4
.LfoundNULL:
#ifdef __BIG_ENDIAN__
swape r4, r4
swape r2, r2
swape r3, r3
#endif
; Find null byte
ffs r0, r4
bmsk r2, r2, r0
bmsk r3, r3, r0
swape r2, r2
swape r3, r3
; make the return value
sub.f r0, r2, r3
mov.hi r0, 1
j_s.d [blink]
bset.lo r0, r0, 31
.align 4
.Lcharloop:
ldb.ab r2, [r0, 1]
ldb.ab r3, [r1, 1]
nop
breq r2, 0, .Lcmpend
breq r2, r3, .Lcharloop
.align 4
.Lcmpend:
j_s.d [blink]
sub r0, r2, r3
ENDFUNC (strcmp)
#endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 5,138
|
plugin-libc/newlib/libc/machine/arc/memcmp-bs-norm.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcmp.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if !defined (__ARC601__) && defined (__ARC_NORM__) \
&& defined (__ARC_BARREL_SHIFTER__)
#ifdef __LITTLE_ENDIAN__
#define WORD2 r2
#define SHIFT r3
#else /* BIG ENDIAN */
#define WORD2 r3
#define SHIFT r2
#endif
ENTRY (memcmp)
or r12,r0,r1
asl_s r12,r12,30
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
sub_l r3,r2,1
brls r2,r12,.Lbytewise
#else
brls.d r2,r12,.Lbytewise
sub_s r3,r2,1
#endif
ld r4,[r0,0]
ld r5,[r1,0]
lsr.f lp_count,r3,3
#ifdef __ARCEM__
/* A branch can't be the last instruction in a zero overhead loop.
So we move the branch to the start of the loop, duplicate it
after the end, and set up r12 so that the branch isn't taken
initially. */
mov_s r12,WORD2
lpne .Loop_end
brne WORD2,r12,.Lodd
ld WORD2,[r0,4]
#else
lpne .Loop_end
ld_s WORD2,[r0,4]
#endif
ld_s r12,[r1,4]
brne r4,r5,.Leven
ld.a r4,[r0,8]
ld.a r5,[r1,8]
#ifdef __ARCEM__
.Loop_end:
brne WORD2,r12,.Lodd
#else
brne WORD2,r12,.Lodd
#ifdef __ARCHS__
nop
#endif
.Loop_end:
#endif
asl_s SHIFT,SHIFT,3
bcc_s .Last_cmp
brne r4,r5,.Leven
ld r4,[r0,4]
ld r5,[r1,4]
#ifdef __LITTLE_ENDIAN__
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
nop_s
; one more load latency cycle
.Last_cmp:
xor r0,r4,r5
bset r0,r0,SHIFT
sub_s r1,r0,1
bic_s r1,r1,r0
norm r1,r1
b.d .Leven_cmp
and r1,r1,24
.Leven:
xor r0,r4,r5
sub_s r1,r0,1
bic_s r1,r1,r0
norm r1,r1
; slow track insn
and r1,r1,24
.Leven_cmp:
asl r2,r4,r1
asl r12,r5,r1
lsr_s r2,r2,1
lsr_s r12,r12,1
j_s.d [blink]
sub r0,r2,r12
.balign 4
.Lodd:
xor r0,WORD2,r12
sub_s r1,r0,1
bic_s r1,r1,r0
norm r1,r1
; slow track insn
and r1,r1,24
asl_s r2,r2,r1
asl_s r12,r12,r1
lsr_s r2,r2,1
lsr_s r12,r12,1
j_s.d [blink]
sub r0,r2,r12
#else /* !__ARC700__ */
.balign 4
.Last_cmp:
xor r0,r4,r5
b.d .Leven_cmp
bset r0,r0,SHIFT
.Lodd:
mov_s r4,WORD2
mov_s r5,r12
.Leven:
xor r0,r4,r5
.Leven_cmp:
mov_s r1,0x80808080
; uses long immediate
sub_s r12,r0,1
bic_s r0,r0,r12
sub r0,r1,r0
xor_s r0,r0,r1
and r1,r5,r0
and r0,r4,r0
xor.f 0,r0,r1
sub_s r0,r0,r1
j_s.d [blink]
mov.mi r0,r1
#endif /* !__ARC700__ */
#else /* BIG ENDIAN */
.Last_cmp:
neg_s SHIFT,SHIFT
lsr r4,r4,SHIFT
lsr r5,r5,SHIFT
; slow track insn
.Leven:
sub.f r0,r4,r5
mov.ne r0,1
j_s.d [blink]
bset.cs r0,r0,31
.Lodd:
cmp_s WORD2,r12
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
mov_s r0,1
j_s.d [blink]
bset.cs r0,r0,31
#else
j_s.d [blink]
rrc r0,2
#endif /* __ARC700__ || __ARCEM__ || __ARCHS__ */
#endif /* ENDIAN */
.balign 4
.Lbytewise:
breq r2,0,.Lnil
ldb r4,[r0,0]
ldb r5,[r1,0]
lsr.f lp_count,r3
#ifdef __ARCEM__
mov r12,r3
lpne .Lbyte_end
brne r3,r12,.Lbyte_odd
#else
lpne .Lbyte_end
#endif
ldb_s r3,[r0,1]
ldb_l r12,[r1,1]
brne r4,r5,.Lbyte_even
ldb.a r4,[r0,2]
ldb.a r5,[r1,2]
#ifdef __ARCEM__
.Lbyte_end:
brne r3,r12,.Lbyte_odd
#else
brne r3,r12,.Lbyte_odd
#ifdef __ARCHS__
nop
#endif
.Lbyte_end:
#endif
bcc_l .Lbyte_even
brne r4,r5,.Lbyte_even
ldb_s r3,[r0,1]
ldb_s r12,[r1,1]
.Lbyte_odd:
j_s.d [blink]
sub r0,r3,r12
.Lbyte_even:
j_s.d [blink]
sub r0,r4,r5
.Lnil:
j_s.d [blink]
mov_l r0,0
ENDFUNC (memcmp)
#endif /* !__ARC601__ && __ARC_NORM__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,322
|
plugin-libc/newlib/libc/machine/arc/strcpy-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch.
Note that short and long instructions have been scheduled to avoid
branch stalls.
The beq_s to r3z could be made unaligned & long to avoid a stall
there, but the it is not likely to be taken often, and it
would also be likey to cost an unaligned mispredict at the next call. */
ENTRY (strcpy)
or r2,r0,r1
bmsk_s r2,r2,1
brne.d r2,0,charloop
mov_s r10,r0
ld_s r3,[r1,0]
mov r8,0x01010101
bbit0.d r1,2,loop_start
ror r12,r8
sub r2,r3,r8
bic_s r2,r2,r3
tst_s r2,r12
bne_l r3z
mov_s r4,r3
.balign 4
loop:
ld.a r3,[r1,4]
st.ab r4,[r10,4]
loop_start:
ld.a r4,[r1,4]
sub r2,r3,r8
bic_s r2,r2,r3
tst_l r2,r12
bne_l r3z
st.ab r3,[r10,4]
sub r2,r4,r8
bic r2,r2,r4
tst_l r2,r12
beq_l loop
mov_s r3,r4
#ifdef __LITTLE_ENDIAN__
r3z: bmsk.f r1,r3,7
lsr_s r3,r3,8
#else
r3z: lsr.f r1,r3,24
asl_s r3,r3,8
#endif
bne.d r3z
stb.ab r1,[r10,1]
j_s [blink]
.balign 4
charloop:
ldb.ab r3,[r1,1]
brne.d r3,0,charloop
stb.ab r3,[r10,1]
j [blink]
ENDFUNC (strcpy)
#endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,969
|
plugin-libc/newlib/libc/machine/arc/memcmp.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcmp.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC601__) || !defined (__ARC_NORM__) \
|| !defined (__ARC_BARREL_SHIFTER__)
/* Addresses are unsigned, and at 0 is the vector table, so it's OK to assume
that we can subtract 8 from a source end address without underflow. */
ENTRY (memcmp)
or r12,r0,r1
tst r12,3
breq r2,0,.Lnil
add_s r3,r0,r2
/* This algorithm for big endian targets sometimes works incorrectly
when sources are aligned. To be precise the last step is omitted.
Just use a simple bytewise variant until the algorithm is reviewed
and fixed. */
#ifdef __LITTLE_ENDIAN__
bne_s .Lbytewise
#else /* BIG ENDIAN */
b_s .Lbytewise
#endif /* ENDIAN */
sub r6,r3,8
ld r4,[r0,0]
ld r5,[r1,0]
2:
brhs r0,r6,.Loop_end
ld_s r3,[r0,4]
ld_s r12,[r1,4]
brne r4,r5,.Leven
ld.a r4,[r0,8]
breq.d r3,r12,2b
ld.a r5,[r1,8]
#ifdef __LITTLE_ENDIAN__
mov_s r4,r3
b.d .Lodd
mov_s r5,r12
#else /* BIG ENDIAN */
cmp_s r3,r12
j_s.d [blink]
rrc r0,2
#endif /* ENDIAN */
.balign 4
.Loop_end:
sub r3,r0,r6
brhs r3,4,.Last_cmp
brne r4,r5,.Leven
ld r4,[r0,4]
ld r5,[r1,4]
#ifdef __LITTLE_ENDIAN__
.balign 4
.Last_cmp:
mov_l r0,24
add3 r2,r0,r2
xor r0,r4,r5
b.d .Leven_cmp
bset r0,r0,r2
.Lodd:
.Leven:
xor r0,r4,r5
.Leven_cmp:
mov_s r1,0x80808080
; uses long immediate
sub_s r12,r0,1
bic_s r0,r0,r12
sub r0,r1,r0
xor_s r0,r0,r1
and r1,r5,r0
and r0,r4,r0
#else /* BIG ENDIAN */
.Last_cmp:
mov_s r3,0
sub3 r2,r3,r2
sub_s r3,r3,1
bclr r3,r3,r2
add_l r3,r3,1
and r0,r4,r3
and r1,r5,r3
.Leven:
#endif /* ENDIAN */
xor.f 0,r0,r1
sub_s r0,r0,r1
j_s.d [blink]
mov.mi r0,r1
.balign 4
.Lbytewise:
ldb r4,[r0,0]
ldb r5,[r1,0]
sub r6,r3,2
3:
brhs r0,r6,.Lbyte_end
ldb_s r3,[r0,1]
ldb_s r12,[r1,1]
brne r4,r5,.Lbyte_even
ldb.a r4,[r0,2]
breq.d r3,r12,3b
ldb.a r5,[r1,2]
.Lbyte_odd:
j_s.d [blink]
sub r0,r3,r12
.balign 4
.Lbyte_end:
bbit1 r2,0,.Lbyte_even
brne r4,r5,.Lbyte_even
ldb r4,[r0,1]
ldb r5,[r1,1]
.Lbyte_even:
j_s.d [blink]
sub r0,r4,r5
.Lnil:
j_s.d [blink]
mov_s r0,0
ENDFUNC (memcmp)
#endif /* __ARC601__ || !__ARC_NORM__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 4,143
|
plugin-libc/newlib/libc/machine/arc/strlen.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strlen.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined(__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
/* This code is optimized for the ARC601 pipeline without barrel shifter. */
ENTRY (strlen)
or r3,r0,7
ld r2,[r3,-7]
ld.a r6,[r3,-3]
mov r4,0x01010101
; uses long immediate
#ifdef __LITTLE_ENDIAN__
bmsk.f 0,r0,1
mov_s r1,31
add3_s r1,r1,r0
bmsk r7,r4,r1
xor.ne r7,r7,r4
btst_s r0,2
ror r5,r4
sub r1,r2,r7
bic_s r1,r1,r2
mov.eq r7,r4
sub r12,r6,r7
bic r12,r12,r6
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#else /* BIG ENDIAN */
add.f r1,r4,30 ; r1 mod 31 := -1; clear carry
ror r5,r4
sub3 r7,r1,r0
btst_s r0,2
sub r1,r2,r4
bic_s r1,r1,r2
bmsk r1,r1,r7
sub r12,r6,r4
bic r12,r12,r6
bmsk.ne r12,r12,r7
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#endif /* ENDIAN */
.Loop:
ld_s r2,[r3,4]
ld.a r6,[r3,8]
; stall for load result
sub r1,r2,r4
bic_s r1,r1,r2
sub r12,r6,r4
bic r12,r12,r6
or_s r12,r12,r1
and r12,r12,r5
breq_s r12,0,.Loop
.Lend:
and.f r1,r1,r5
sub.ne r3,r3,4
#ifdef __LITTLE_ENDIAN__
mov.eq r1,r12
btst_s r1,7
sub r0,r3,r0
add.eq r0,r0,1
bmsk.f 0,r1,15
add.eq r0,r0,1
bmsk.f 0,r1,23
j_s.d [blink]
add.eq r0,r0,1
#else /* BIG ENDIAN */
#ifdef __OPTIMIZE_SIZE__
1: ldb_s r1,[r3]
breq_s r1,0,0f
ldb.a r1,[r3,1]
breq_s r1,0,0f
ldb.a r1,[r3,1]
breq_s r1,0,0f
add_s r3,r3,1
0: j_s.d [blink]
sub r0,r3,r0
#define SPECIAL_EARLY_END
.Learly_end:
mov_s r3,r0
b_s 1b
#elif 0 /* Need more information about pipeline to assess if this is faster. */
mov.eq r2,r6
and r2,r2,r5
sub1 r2,r4,r2
mov.eq r1,r12
bic.f r1,r1,r2
sub r0,r3,r0
add.pl r0,r0,1
btst.pl r1,23
add.eq r0,r0,1
btst.eq r1,15
j_s.d [blink]
add.eq r0,r0,1
#else /* !__OPTIMIZE_SIZE__ */
/* Need carry clear here. */
mov.eq r2,r6
1: bmsk r1,r2,23
breq r1,r2,0f
bmsk r2,r1,15
breq.d r1,r2,0f
add_s r3,r3,1
cmp r2,0x100
add_s r3,r3,2
0: j_s.d [blink]
sbc r0,r3,r0
#define SPECIAL_EARLY_END
.Learly_end:
sub_s.ne r1,r1,r1
mov_s r12,0
bset r12,r12,r7
sub1 r2,r2,r12
b.d .Lend
sub1.ne r6,r6,r12
#endif /* !__OPTIMIZE_SIZE__ */
#endif /* ENDIAN */
#ifndef SPECIAL_EARLY_END
.balign 4
.Learly_end:
b.d .Lend
sub_s.ne r1,r1,r1
#endif /* !SPECIAL_EARLY_END */
ENDFUNC (strlen)
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__*/
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 5,286
|
plugin-libc/newlib/libc/machine/arc/strchr.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strchr.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* In order to search for a zero in a W, we calculate
X := (W - 0x01010101) & ~W & 0x80808080;
In the little endian case:
If no byte in W is zero, X will be zero; otherwise, the least significant
byte of X which is nonzero indicates the least significant byte of W that
is zero.
In the big endian case:
X will be zero iff no byte in W is zero.
If X is nonzero, to find out which is the most significant zero byte
in W, we calculate:
Y := ~(((W | 0x80808080) - 0x01010101) | W) & 0x80808080;
Each byte in Y is 0x80 if the the corresponding byte in
W is zero, otherwise that byte of Y is 0. */
#if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
ENTRY (strchr)
bmsk.f r2,r0,1
mov_s r3,0x01010101
extb_s r1,r1
mov r8,0
add3 r5,r8,r1
add3 r5,r8,r5
add2 r5,r1,r5
add3 r4,r8,r5
add3 r4,r8,r4
add3 r4,r8,r4
add3 r4,r8,r4
beq.d .Laligned
add3 r4,r8,r4
sub_s r0,r0,r2
#ifdef __LITTLE_ENDIAN__
add3.f r2,-1,r2
bmsk r7,r3,r2
rsub.pl r7,r7,r3
#else
mov_s r12,31
sub3 r2,r12,r2
bmsk r7,r3,r2
#endif
ld_s r2,[r0]
add1 r5,r5,r4
ror r4,r3
sub r12,r2,r7
bic_s r12,r12,r2
and r12,r12,r4
brne.d r12,0,.Lfound0_ua
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
#ifdef __LITTLE_ENDIAN__
and.f r7,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
b.d .Lfound_char_ua
btst r7,7
#else
and.f r8,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
bic r12,r7,r6
bic r2,r3,r12
sub1 r2,r3,r2
sub_s r0,r0,4
b.d .Lfound_char_ua
bic.f r2,r8,r2
#endif
.balign 4
.Laligned:
ld_s r2,[r0]
add1 r5,r5,r4
ror r4,r3
sub r12,r2,r3
bic_s r12,r12,r2
and r12,r12,r4
.Loop:
brne.d r12,0,.Lfound0
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r3
bic r12,r12,r6
and.f r7,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
; Found searched-for character. r0 has already advanced to next word.
#ifdef __LITTLE_ENDIAN__
/* We only need the information about the first matching byte
(i.e. the least significant matching byte) to be exact,
hence there is no problem with carry effects. */
.Lfound_char:
btst r7,7
.Lfound_char_ua:
sub_s r0,r0,4
add.eq r0,r0,1
btst.eq r7,15
add.eq r0,r0,1
btst.eq r7,23
j_s.d [blink]
add.eq r0,r0,1
.balign 4
.Lfound0_ua:
mov_l r3,r7
.Lfound0:
sub r2,r6,r3
bic r2,r2,r6
and r2,r2,r4
or r3,r12,r2
sub_s r12,r3,1
xor_s r3,r3,r12
cmp 0xffff,r3
; cmp uses limm ; ARC600 would do: asl.f 0,r3,9
tst_s r2,r3
mov.eq r0,0
add.mi r0,r0,1
btst.ne r3,15
j_s.d [blink]
adc.ne r0,r0,1
#else /* BIG ENDIAN */
.Lfound_char:
and r2,r6,r3
sub1 r2,r3,r2
sub_s r0,r0,4
bic.f r2,r7,r2
.Lfound_char_ua:
add.pl r0,r0,1
jmi.d [blink]
btst_s r2,23
add.eq r0,r0,1
btst.eq r2,15
j_s.d [blink]
add.eq r0,r0,1
; N.B. if we searched for a char zero and found it in the MSB,
; and ignored matches are identical, we will take the early exit
; like for an ordinary found zero - except for the extra stalls at jhi -
; but still compute the right result.
.Lfound0_ua:
mov_s r3,r7
.Lfound0:
and_s r2,r2,r3
sub1 r2,r3,r2
or r7,r6,r4
bic_s r12,r12,r2
sub r2,r7,r3
or r2,r2,r6
bic r2,r4,r2
cmp_s r12,r2
mov.hi r0,0
btst.ls r2,31
jhi.d [blink]
add.eq r0,r0,1
btst.eq r2,23
add.eq r0,r0,1
btst.eq r2,15
j_s.d [blink]
add.eq r0,r0,1
#endif /* ENDIAN */
ENDFUNC (strchr)
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,353
|
plugin-libc/newlib/libc/machine/arc/strlen-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strlen.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if (defined (__ARC600__) || !defined (__ARC_NORM__)) && !defined (__ARC601__) \
&& defined (__ARC_BARREL_SHIFTER__)
/* This code is optimized for the ARC600 pipeline. */
ENTRY (strlen)
or r3,r0,7
ld r2,[r3,-7]
ld.a r6,[r3,-3]
mov r4,0x01010101
; uses long immediate
#ifdef __LITTLE_ENDIAN__
asl_s r1,r0,3
btst_s r0,2
asl r7,r4,r1
ror r5,r4
sub r1,r2,r7
bic_l r1,r1,r2
mov.eq r7,r4
sub r12,r6,r7
bic r12,r12,r6
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#else /* BIG ENDIAN */
ror r5,r4
btst_s r0,2
mov_s r1,31
sub3 r7,r1,r0
sub r1,r2,r4
bic_l r1,r1,r2
bmsk r1,r1,r7
sub r12,r6,r4
bic r12,r12,r6
bmsk.ne r12,r12,r7
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#endif /* ENDIAN */
.Loop:
ld_s r2,[r3,4]
ld.a r6,[r3,8]
; stall for load result
sub r1,r2,r4
bic_s r1,r1,r2
sub r12,r6,r4
bic r12,r12,r6
or_s r12,r12,r1
and r12,r12,r5
breq_s r12,0,.Loop
.Lend:
and.f r1,r1,r5
sub.ne r3,r3,4
#ifdef __LITTLE_ENDIAN__
mov.eq r1,r12
asr.f 0,r1,8
bmsk.f 0,r1,15
sub r0,r3,r0
add.cc r0,r0,1
jne.d [blink]
asl.f 0,r1,9
j_s.d [blink]
sbc r0,r0,-2
#else /* BIG ENDIAN */
mov.eq r2,r6
asl_s r2,r2,7
mov.eq r1,r12
bic_s r1,r1,r2
asr.f 0,r1,16
sub r0,r3,r0
add.pl r0,r0,1
jne.d [blink]
add.eq r0,r0,1
j_s.d [blink]
add.cc r0,r0,1
#endif /* ENDIAN */
.balign 4
.Learly_end:
b.d .Lend
sub_s.ne r1,r1,r1
ENDFUNC (strlen)
#endif /* (__ARC600__ || !__ARC_NORM__) && !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 4,439
|
plugin-libc/newlib/libc/machine/arc/strcmp.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcmp.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* This is optimized primarily for the ARC700.
It would be possible to speed up the loops by one cycle / word
respective one cycle / byte by forcing double source 1 alignment, unrolling
by a factor of two, and speculatively loading the second word / byte of
source 1; however, that would increase the overhead for loop setup / finish,
and strcmp might often terminate early. */
#ifndef __ARCHS__
ENTRY (strcmp)
or r2,r0,r1
bmsk_s r2,r2,1
brne_l r2,0,.Lcharloop
mov_s r12,0x01010101
ror r5,r12
.Lwordloop:
ld.ab r2,[r0,4]
ld.ab r3,[r1,4]
nop_s
sub r4,r2,r12
bic r4,r4,r2
and r4,r4,r5
brne_l r4,0,.Lfound0
breq r2,r3,.Lwordloop
#ifdef __LITTLE_ENDIAN__
xor r0,r2,r3 ; mask for difference
sub_s r1,r0,1
bic_s r0,r0,r1 ; mask for least significant difference bit
sub r1,r5,r0
xor r0,r5,r1 ; mask for least significant difference byte
and_s r2,r2,r0
and_s r3,r3,r0
#endif /* LITTLE ENDIAN */
cmp_s r2,r3
mov_s r0,1
j_s.d [blink]
bset.lo r0,r0,31
.balign 4
#ifdef __LITTLE_ENDIAN__
.Lfound0:
xor r0,r2,r3 ; mask for difference
or r0,r0,r4 ; or in zero indicator
sub_s r1,r0,1
bic_s r0,r0,r1 ; mask for least significant difference bit
sub r1,r5,r0
xor r0,r5,r1 ; mask for least significant difference byte
and_s r2,r2,r0
and_s r3,r3,r0
sub.f r0,r2,r3
mov.hi r0,1
j_s.d [blink]
bset.lo r0,r0,31
#else /* BIG ENDIAN */
/* The zero-detection above can mis-detect 0x01 bytes as zeroes
because of carry-propagateion from a lower significant zero byte.
We can compensate for this by checking that bit0 is zero.
This compensation is not necessary in the step where we
get a low estimate for r2, because in any affected bytes
we already have 0x00 or 0x01, which will remain unchanged
when bit 7 is cleared. */
.balign 4
.Lfound0:
#ifdef __ARC_BARREL_SHIFTER__
lsr r0,r4,8
lsr_s r1,r2
bic_s r2,r2,r0 ; get low estimate for r2 and get ...
bic_s r0,r0,r1 ; <this is the adjusted mask for zeros>
or_s r3,r3,r0 ; ... high estimate r3 so that r2 > r3 will ...
cmp_s r3,r2 ; ... be independent of trailing garbage
or_s r2,r2,r0 ; likewise for r3 > r2
bic_s r3,r3,r0
rlc r0,0 ; r0 := r2 > r3 ? 1 : 0
cmp_s r2,r3
j_s.d [blink]
bset.lo r0,r0,31
#else /* __ARC_BARREL_SHIFTER__ */
/* Fall through to .Lcharloop. */
sub_s r0,r0,4
sub_s r1,r1,4
#endif /* __ARC_BARREL_SHIFTER__ */
#endif /* ENDIAN */
.balign 4
.Lcharloop:
ldb.ab r2,[r0,1]
ldb.ab r3,[r1,1]
nop_s
breq_l r2,0,.Lcmpend
breq r2,r3,.Lcharloop
.Lcmpend:
j_s.d [blink]
sub r0,r2,r3
ENDFUNC (strcmp)
#endif /* !__ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,445
|
plugin-libc/newlib/libc/machine/arc/strcpy-bs-arc600.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC600__) && defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch.
Note that short and long instructions have been scheduled to avoid
branch stalls.
This version is optimized for the ARC600 pipeline. */
ENTRY (strcpy)
or r2,r0,r1
bmsk.f 0,r2,1
mov r8,0x01010101
bne.d .Lcharloop
mov_s r10,r0
ld_l r3,[r1,0]
bbit0.d r1,2,.Loop_setup
ror r12,r8
sub r2,r3,r8
bic_s r2,r2,r3
and_s r2,r2,r12
brne_s r2,0,.Lr3z
st.ab r3,[r10,4]
ld.a r3,[r1,4]
.Loop_setup:
ld.a r4,[r1,4]
sub r2,r3,r8
and.f r2,r2,r12
sub r5,r4,r8
and.eq.f r5,r5,r12
b.d .Loop_start
mov_s r6,r3
.balign 4
.Loop:
ld.a r3,[r1,4]
st r4,[r10,4]
ld.a r4,[r1,4]
sub r2,r3,r8
and.f r2,r2,r12
sub r5,r4,r8
and.eq.f r5,r5,r12
st.ab r6,[r10,8]
mov r6,r3
.Loop_start:
beq.d .Loop
bic_s r2,r2,r3
brne.d r2,0,.Lr3z
and r5,r5,r12
bic r5,r5,r4
breq.d r5,0,.Loop
mov_s r3,r4
st.ab r6,[r10,4]
#ifdef __LITTLE_ENDIAN__
.Lr3z: bmsk.f r1,r3,7
.Lr3z_loop:
lsr_s r3,r3,8
stb.ab r1,[r10,1]
bne.d .Lr3z_loop
bmsk.f r1,r3,7
j_s [blink]
#else
.Lr3z: lsr.f r1,r3,24
.Lr3z_loop:
asl_s r3,r3,8
stb.ab r1,[r10,1]
bne.d .Lr3z_loop
lsr.f r1,r3,24
j_s [blink]
#endif
.balign 4
.Lcharloop:
ldb.ab r3,[r1,1]
brne.d r3,0,.Lcharloop
stb.ab r3,[r10,1]
j [blink]
ENDFUNC (strcpy)
#endif /* __ARC600__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,718
|
plugin-libc/newlib/libc/machine/arc/strncpy.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strncpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch. */
#if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
#define BRand(a,b,l) and a,a,b ` brne_s a,0,l
ENTRY (strncpy)
cmp_s r2,8
or r12,r0,r1
bmsk.cc.f r12,r12,1
brne.d r12,0,.Lbytewise
mov_s r10,r0
ld_s r3,[r1,0]
mov r8,0x01010101
add r6,r0,r2
sub r6,r6,8
bbit0.d r1,2,.Loop_start
ror r11,r8
sub r12,r3,r8
bic_l r12,r12,r3
BRand (r12,r11,.Lr3z)
mov_s r4,r3
ld.a r3,[r1,4]
st.ab r4,[r10,4]
.balign 4
.Loop_start:
brhs r10,r6,.Loop_end
1:
ld.a r4,[r1,4]
sub r12,r3,r8
bic_s r12,r12,r3
BRand (r12,r11,.Lr3z2)
st.ab r3,[r10,8]
sub r12,r4,r8
bic r12,r12,r4
BRand (r12,r11,.Lr4z)
ld.a r3,[r1,4]
brlo.d r10,r6,1b
st r4,[r10,-4]
.Loop_end:
add r6,r6,4
brhs r10,r6,.Lastword
sub r12,r3,r8
bic_s r12,r12,r3
BRand (r12,r11,.Lr3z)
add_s r1,r1,4
st.ab r3,[r10,4]
.Lastword:
sub_s r2,r2,1
b.d .Lstart_charloop
bmsk.f r2,r2,1
.balign 4
nop_s
.Lr3z2: sub_s r1,r1,4
.Lr4z:
.Lr3z:
.balign 4
.Lr3z_loop:
ldb.ab r3,[r1,1]
brne.d r3,0,.Lr3z_loop
stb.ab r3,[r10,1]
.Lzero_rest:
; __strncpy_bzero requires:
; return value in r0
; zeroing length in r2
; zeroing start address in r3
mov_s r3,r10
add_s r2,r2,r0
b.d __strncpy_bzero
sub_s r2,r2,r3
.balign 4
.Lbytewise:
sub.f r2,r2,1
jcs [blink]
.Lstart_charloop:
mov_s r3,r10
.Lcharloop:
ldb.ab r12,[r1,1]
beq.d .Last_byte
sub.f r2,r2,1
brne.d r12,0,.Lcharloop
stb.ab r12,[r3,1]
b.d __strncpy_bzero
stb.ab r12,[r3,1]
.Last_byte:
j_s.d [blink]
stb_s r12,[r3]
ENDFUNC (strncpy)
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 5,236
|
plugin-libc/newlib/libc/machine/arc/strchr-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strchr.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* In order to search for a zero in a W, we calculate
X := (W - 0x01010101) & ~W & 0x80808080;
In the little endian case:
If no byte in W is zero, X will be zero; otherwise, the least significant
byte of X which is nonzero indicates the least significant byte of W that
is zero.
In the big endian case:
X will be zero iff no byte in W is zero.
If X is nonzero, to find out which is the most significant zero byte
in W, we calculate:
Y := ~(((W | 0x80808080) - 0x01010101) | W) & 0x80808080;
Each byte in Y is 0x80 if the the corresponding byte in
W is zero, otherwise that byte of Y is 0. */
#if defined (__ARC_BARREL_SHIFTER__) && \
(defined (__ARC600__) || (!defined (__ARC_NORM__) && !defined (__ARC601__)))
ENTRY (strchr)
bmsk.f r2,r0,1
mov_s r3,0x01010101
extb_s r1,r1
asl r5,r1,8
or r5,r5,r1
beq.d .Laligned
asl r4,r5,16
sub_s r0,r0,r2
asl_s r2,r2,3
#ifdef __LITTLE_ENDIAN__
asl r7,r3,r2
#else
lsr r7,r3,r2
#endif
ld_s r2,[r0]
or r5,r5,r4
ror r4,r3
sub r12,r2,r7
bic_s r12,r12,r2
and r12,r12,r4
brne.d r12,0,.Lfound0_ua
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
#ifdef __LITTLE_ENDIAN__
and.f r7,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
b.d .Lfound_char_ua
btst r7,7
#else
and.f r8,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
bic r12,r7,r6
asl_s r12,r12,7
and.f r2,r8,r12
b.d .Lfound_char_ua
sub_s r0,r0,4
#endif
.balign 4
.Laligned:
ld_s r2,[r0]
or r5,r5,r4
ror r4,r3
sub r12,r2,r3
bic_s r12,r12,r2
and r12,r12,r4
.Loop:
brne.d r12,0,.Lfound0
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r3
bic r12,r12,r6
and.f r7,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
; Found searched-for character. r0 has already advanced to next word.
#ifdef __LITTLE_ENDIAN__
/* We only need the information about the first matching byte
(i.e. the least significant matching byte) to be exact,
hence there is no problem with carry effects. */
.Lfound_char:
btst r7,7
.Lfound_char_ua:
sub_s r0,r0,4
add.eq r0,r0,1
btst.eq r7,15
add.eq r0,r0,1
btst.eq r7,23
j_s.d [blink]
add.eq r0,r0,1
.balign 4
.Lfound0_ua:
mov_l r3,r7
.Lfound0:
sub r2,r6,r3
bic r2,r2,r6
and r2,r2,r4
or r3,r12,r2
sub_s r12,r3,1
xor_s r3,r3,r12
tst_s r2,r3
lsr r2,r3,31
lsr r12,r3,16
jeq.d [blink]
mov.eq r0,0
lsr r3,r3,8
sub_s r2,r2,r12
sub_s r2,r2,r3
bmsk_s r2,r2,1
j_s.d [blink]
add_s r0,r0,r2
#else /* BIG ENDIAN */
.Lfound_char:
asl r6,r6,7
sub_s r0,r0,4
bic.f r2,r7,r6
.Lfound_char_ua:
add.pl r0,r0,1
jmi.d [blink]
btst_s r2,23
add.eq r0,r0,1
btst.eq r2,15
j_s.d [blink]
add.eq r0,r0,1
; N.B. if we searched for a char zero and found it in the MSB,
; and ignored matches are identical, we will take the early exit
; like for an ordinary found zero - except for the extra stalls at jhi -
; but still compute the right result.
.Lfound0_ua:
mov_s r3,r7
.Lfound0:
asl_s r2,r2,7
or r7,r6,r4
bic_s r12,r12,r2
sub r2,r7,r3
or r2,r2,r6
bic r2,r4,r2
cmp r12,r2
mov.hi r0,0
btst.ls r2,31
jhi.d [blink]
add.eq r0,r0,1
btst.eq r2,23
add.eq r0,r0,1
btst.eq r2,15
j_s.d [blink]
add.eq r0,r0,1
#endif /* ENDIAN */
ENDFUNC (strchr)
#endif /* __ARC_BARREL_SHIFTER__ &&
(__ARC600__ || (!__ARC_NORM__ && !__ARC601__)) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 2,589
|
plugin-libc/newlib/libc/machine/riscv/setjmp.S
|
/* Copyright (c) 2017 SiFive Inc. All rights reserved.
This copyrighted material is made available to anyone wishing to use,
modify, copy, or redistribute it subject to the terms and conditions
of the FreeBSD License. This program is distributed in the hope that
it will be useful, but WITHOUT ANY WARRANTY expressed or implied,
including the implied warranties of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. A copy of this license is available at
http://www.opensource.org/licenses.
*/
#include <sys/asm.h>
/* int setjmp (jmp_buf); */
.globl setjmp
.type setjmp, @function
setjmp:
REG_S ra, 0*SZREG(a0)
REG_S s0, 1*SZREG(a0)
REG_S s1, 2*SZREG(a0)
#ifndef __riscv_32e
REG_S s2, 3*SZREG(a0)
REG_S s3, 4*SZREG(a0)
REG_S s4, 5*SZREG(a0)
REG_S s5, 6*SZREG(a0)
REG_S s6, 7*SZREG(a0)
REG_S s7, 8*SZREG(a0)
REG_S s8, 9*SZREG(a0)
REG_S s9, 10*SZREG(a0)
REG_S s10,11*SZREG(a0)
REG_S s11,12*SZREG(a0)
REG_S sp, 13*SZREG(a0)
#else
REG_S sp, 3*SZREG(a0)
#endif
#ifndef __riscv_float_abi_soft
FREG_S fs0, 14*SZREG+ 0*SZFREG(a0)
FREG_S fs1, 14*SZREG+ 1*SZFREG(a0)
FREG_S fs2, 14*SZREG+ 2*SZFREG(a0)
FREG_S fs3, 14*SZREG+ 3*SZFREG(a0)
FREG_S fs4, 14*SZREG+ 4*SZFREG(a0)
FREG_S fs5, 14*SZREG+ 5*SZFREG(a0)
FREG_S fs6, 14*SZREG+ 6*SZFREG(a0)
FREG_S fs7, 14*SZREG+ 7*SZFREG(a0)
FREG_S fs8, 14*SZREG+ 8*SZFREG(a0)
FREG_S fs9, 14*SZREG+ 9*SZFREG(a0)
FREG_S fs10,14*SZREG+10*SZFREG(a0)
FREG_S fs11,14*SZREG+11*SZFREG(a0)
#endif
li a0, 0
ret
.size setjmp, .-setjmp
/* volatile void longjmp (jmp_buf, int); */
.globl longjmp
.type longjmp, @function
longjmp:
REG_L ra, 0*SZREG(a0)
REG_L s0, 1*SZREG(a0)
REG_L s1, 2*SZREG(a0)
#ifndef __riscv_32e
REG_L s2, 3*SZREG(a0)
REG_L s3, 4*SZREG(a0)
REG_L s4, 5*SZREG(a0)
REG_L s5, 6*SZREG(a0)
REG_L s6, 7*SZREG(a0)
REG_L s7, 8*SZREG(a0)
REG_L s8, 9*SZREG(a0)
REG_L s9, 10*SZREG(a0)
REG_L s10,11*SZREG(a0)
REG_L s11,12*SZREG(a0)
REG_L sp, 13*SZREG(a0)
#else
REG_L sp, 3*SZREG(a0)
#endif
#ifndef __riscv_float_abi_soft
FREG_L fs0, 14*SZREG+ 0*SZFREG(a0)
FREG_L fs1, 14*SZREG+ 1*SZFREG(a0)
FREG_L fs2, 14*SZREG+ 2*SZFREG(a0)
FREG_L fs3, 14*SZREG+ 3*SZFREG(a0)
FREG_L fs4, 14*SZREG+ 4*SZFREG(a0)
FREG_L fs5, 14*SZREG+ 5*SZFREG(a0)
FREG_L fs6, 14*SZREG+ 6*SZFREG(a0)
FREG_L fs7, 14*SZREG+ 7*SZFREG(a0)
FREG_L fs8, 14*SZREG+ 8*SZFREG(a0)
FREG_L fs9, 14*SZREG+ 9*SZFREG(a0)
FREG_L fs10,14*SZREG+10*SZFREG(a0)
FREG_L fs11,14*SZREG+11*SZFREG(a0)
#endif
seqz a0, a1
add a0, a0, a1 # a0 = (a1 == 0) ? 1 : a1
ret
.size longjmp, .-longjmp
|
4ms/metamodule-plugin-sdk
| 1,981
|
plugin-libc/newlib/libc/machine/riscv/memset.S
|
/* Copyright (c) 2017 SiFive Inc. All rights reserved.
This copyrighted material is made available to anyone wishing to use,
modify, copy, or redistribute it subject to the terms and conditions
of the FreeBSD License. This program is distributed in the hope that
it will be useful, but WITHOUT ANY WARRANTY expressed or implied,
including the implied warranties of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. A copy of this license is available at
http://www.opensource.org/licenses.
*/
.text
.global memset
.type memset, @function
memset:
#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
mv t1, a0
beqz a2, 2f
1:
sb a1, 0(t1)
add a2, a2, -1
add t1, t1, 1
bnez a2, 1b
2:
ret
#else
li t1, 15
move a4, a0
bleu a2, t1, .Ltiny
and a5, a4, 15
bnez a5, .Lmisaligned
.Laligned:
bnez a1, .Lwordify
.Lwordified:
and a3, a2, ~15
and a2, a2, 15
add a3, a3, a4
#if __riscv_xlen == 64
1:sd a1, 0(a4)
sd a1, 8(a4)
#else
1:sw a1, 0(a4)
sw a1, 4(a4)
sw a1, 8(a4)
sw a1, 12(a4)
#endif
add a4, a4, 16
bltu a4, a3, 1b
bnez a2, .Ltiny
ret
.Ltiny:
sub a3, t1, a2
sll a3, a3, 2
1:auipc t0, %pcrel_hi(.Ltable)
add a3, a3, t0
.option push
.option norvc
.Ltable_misaligned:
jr a3, %pcrel_lo(1b)
.Ltable:
sb a1,14(a4)
sb a1,13(a4)
sb a1,12(a4)
sb a1,11(a4)
sb a1,10(a4)
sb a1, 9(a4)
sb a1, 8(a4)
sb a1, 7(a4)
sb a1, 6(a4)
sb a1, 5(a4)
sb a1, 4(a4)
sb a1, 3(a4)
sb a1, 2(a4)
sb a1, 1(a4)
sb a1, 0(a4)
.option pop
ret
.Lwordify:
and a1, a1, 0xFF
sll a3, a1, 8
or a1, a1, a3
sll a3, a1, 16
or a1, a1, a3
#if __riscv_xlen == 64
sll a3, a1, 32
or a1, a1, a3
#endif
j .Lwordified
.Lmisaligned:
sll a3, a5, 2
1:auipc t0, %pcrel_hi(.Ltable_misaligned)
add a3, a3, t0
mv t0, ra
jalr a3, %pcrel_lo(1b)
mv ra, t0
add a5, a5, -16
sub a4, a4, a5
add a2, a2, a5
bleu a2, t1, .Ltiny
j .Laligned
#endif
.size memset, .-memset
|
4ms/metamodule-plugin-sdk
| 3,647
|
plugin-libc/newlib/libc/machine/riscv/strcmp.S
|
/* Copyright (c) 2017 SiFive Inc. All rights reserved.
This copyrighted material is made available to anyone wishing to use,
modify, copy, or redistribute it subject to the terms and conditions
of the FreeBSD License. This program is distributed in the hope that
it will be useful, but WITHOUT ANY WARRANTY expressed or implied,
including the implied warranties of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. A copy of this license is available at
http://www.opensource.org/licenses.
*/
#include <sys/asm.h>
.text
.globl strcmp
.type strcmp, @function
strcmp:
#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
1:
lbu a2, 0(a0)
lbu a3, 0(a1)
add a0, a0, 1
add a1, a1, 1
bne a2, a3, 2f
bnez a2, 1b
2:
sub a0, a2, a3
ret
.size strcmp, .-strcmp
#else
or a4, a0, a1
li t2, -1
and a4, a4, SZREG-1
bnez a4, .Lmisaligned
#if SZREG == 4
li a5, 0x7f7f7f7f
#else
ld a5, mask
#endif
.macro check_one_word i n
REG_L a2, \i*SZREG(a0)
REG_L a3, \i*SZREG(a1)
and t0, a2, a5
or t1, a2, a5
add t0, t0, a5
or t0, t0, t1
bne t0, t2, .Lnull\i
.if \i+1-\n
bne a2, a3, .Lmismatch
.else
add a0, a0, \n*SZREG
add a1, a1, \n*SZREG
beq a2, a3, .Lloop
# fall through to .Lmismatch
.endif
.endm
.macro foundnull i n
.ifne \i
.Lnull\i:
add a0, a0, \i*SZREG
add a1, a1, \i*SZREG
.ifeq \i-1
.Lnull0:
.endif
bne a2, a3, .Lmisaligned
li a0, 0
ret
.endif
.endm
.Lloop:
# examine full words at a time, favoring strings of a couple dozen chars
#if __riscv_xlen == 32
check_one_word 0 5
check_one_word 1 5
check_one_word 2 5
check_one_word 3 5
check_one_word 4 5
#else
check_one_word 0 3
check_one_word 1 3
check_one_word 2 3
#endif
# backwards branch to .Lloop contained above
.Lmismatch:
# words don't match, but a2 has no null byte.
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#if __riscv_xlen == 64
sll a4, a2, 48
sll a5, a3, 48
bne a4, a5, .Lmismatch_upper
sll a4, a2, 32
sll a5, a3, 32
bne a4, a5, .Lmismatch_upper
#endif
sll a4, a2, 16
sll a5, a3, 16
bne a4, a5, .Lmismatch_upper
srl a4, a2, 8*SZREG-16
srl a5, a3, 8*SZREG-16
sub a0, a4, a5
and a1, a0, 0xff
bnez a1, 1f
ret
.Lmismatch_upper:
srl a4, a4, 8*SZREG-16
srl a5, a5, 8*SZREG-16
sub a0, a4, a5
and a1, a0, 0xff
bnez a1, 1f
ret
1:and a4, a4, 0xff
and a5, a5, 0xff
sub a0, a4, a5
ret
#else
#if __riscv_xlen == 64
srl a4, a2, 48
srl a5, a3, 48
bne a4, a5, .Lmismatch_lower
srl a4, a2, 32
srl a5, a3, 32
bne a4, a5, .Lmismatch_lower
#endif
srl a4, a2, 16
srl a5, a3, 16
bne a4, a5, .Lmismatch_lower
srl a4, a2, 8
srl a5, a3, 8
bne a4, a5, 1f
and a4, a2, 0xff
and a5, a3, 0xff
1:sub a0, a4, a5
ret
.Lmismatch_lower:
srl a2, a4, 8
srl a3, a5, 8
bne a2, a3, 1f
and a2, a4, 0xff
and a3, a5, 0xff
1:sub a0, a2, a3
ret
#endif
.Lmisaligned:
# misaligned
lbu a2, 0(a0)
lbu a3, 0(a1)
add a0, a0, 1
add a1, a1, 1
bne a2, a3, 1f
bnez a2, .Lmisaligned
1:
sub a0, a2, a3
ret
# cases in which a null byte was detected
#if __riscv_xlen == 32
foundnull 0 5
foundnull 1 5
foundnull 2 5
foundnull 3 5
foundnull 4 5
#else
foundnull 0 3
foundnull 1 3
foundnull 2 3
#endif
.size strcmp, .-strcmp
#if SZREG == 8
.section .srodata.cst8,"aM",@progbits,8
.align 3
mask:
.dword 0x7f7f7f7f7f7f7f7f
#endif
#endif
|
4ms/metamodule-plugin-sdk
| 7,726
|
plugin-libc/newlib/libc/machine/sh/memcpy.S
|
!
! Fast SH memcpy
!
! by Toshiyasu Morita (tm@netcom.com)
! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
! SH5 code Copyright 2002 SuperH Ltd.
!
! Entry: ARG0: destination pointer
! ARG1: source pointer
! ARG3: byte count
!
! Exit: RESULT: destination pointer
! any other registers in the range r0-r7: trashed
!
! Notes: Usually one wants to do small reads and write a longword, but
! unfortunately it is difficult in some cases to concatanate bytes
! into a longword on the SH, so this does a longword read and small
! writes.
!
! This implementation makes two assumptions about how it is called:
!
! 1.: If the byte count is nonzero, the address of the last byte to be
! copied is unsigned greater than the address of the first byte to
! be copied. This could be easily swapped for a signed comparison,
! but the algorithm used needs some comparison.
!
! 2.: When there are two or three bytes in the last word of an 11-or-more
! bytes memory chunk to b copied, the rest of the word can be read
! without side effects.
! This could be easily changed by increasing the minumum size of
! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
! however, this would cost a few extra cyles on average.
! For SHmedia, the assumption is that any quadword can be read in its
! enirety if at least one byte is included in the copy.
!
#include "asm.h"
ENTRY(memcpy)
#if __SHMEDIA__
#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
ld.b r3,0,r63
pta/l Large,tr0
movi 25,r0
bgeu/u r4,r0,tr0
nsb r4,r0
shlli r0,5,r0
movi (L1-L0+63*32 + 1) & 0xffff,r1
sub r1, r0, r0
L0: ptrel r0,tr0
add r2,r4,r5
ptabs r18,tr1
add r3,r4,r6
blink tr0,r63
.balign 8
L1:
/* 0 byte memcpy */
blink tr1,r63
L4_7: /* 4..7 byte memcpy cntd. */
stlo.l r2, 0, r0
or r6, r7, r6
sthi.l r5, -1, r6
stlo.l r5, -4, r6
blink tr1,r63
L2_3: /* 2 or 3 byte memcpy cntd. */
st.b r5,-1,r6
blink tr1,r63
/* 1 byte memcpy */
ld.b r3,0,r0
st.b r2,0,r0
blink tr1,r63
L8_15: /* 8..15 byte memcpy cntd. */
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
/* 2 or 3 byte memcpy */
ld.b r3,0,r0
ld.b r2,0,r63
ld.b r3,1,r1
st.b r2,0,r0
pta/l L2_3,tr0
ld.b r6,-1,r6
st.b r2,1,r1
blink tr0, r63
/* 4 .. 7 byte memcpy */
LDUAL (r3, 0, r0, r1)
pta L4_7, tr0
ldlo.l r6, -4, r7
or r0, r1, r0
sthi.l r2, 3, r0
ldhi.l r6, -1, r6
blink tr0, r63
/* 8 .. 15 byte memcpy */
LDUAQ (r3, 0, r0, r1)
pta L8_15, tr0
ldlo.q r6, -8, r7
or r0, r1, r0
sthi.q r2, 7, r0
ldhi.q r6, -1, r6
blink tr0, r63
/* 16 .. 24 byte memcpy */
LDUAQ (r3, 0, r0, r1)
LDUAQ (r3, 8, r8, r9)
or r0, r1, r0
sthi.q r2, 7, r0
or r8, r9, r8
sthi.q r2, 15, r8
ldlo.q r6, -8, r7
ldhi.q r6, -1, r6
stlo.q r2, 8, r8
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
Large:
ld.b r2, 0, r63
pta/l Loop_ua, tr1
ori r3, -8, r7
sub r2, r7, r22
sub r3, r2, r6
add r2, r4, r5
ldlo.q r3, 0, r0
addi r5, -16, r5
movi 64+8, r27 // could subtract r7 from that.
stlo.q r2, 0, r0
sthi.q r2, 7, r0
ldx.q r22, r6, r0
bgtu/l r27, r4, tr1
addi r5, -48, r27
pta/l Loop_line, tr0
addi r6, 64, r36
addi r6, -24, r19
addi r6, -16, r20
addi r6, -8, r21
Loop_line:
ldx.q r22, r36, r63
alloco r22, 32
addi r22, 32, r22
ldx.q r22, r19, r23
sthi.q r22, -25, r0
ldx.q r22, r20, r24
ldx.q r22, r21, r25
stlo.q r22, -32, r0
ldx.q r22, r6, r0
sthi.q r22, -17, r23
sthi.q r22, -9, r24
sthi.q r22, -1, r25
stlo.q r22, -24, r23
stlo.q r22, -16, r24
stlo.q r22, -8, r25
bgeu r27, r22, tr0
Loop_ua:
addi r22, 8, r22
sthi.q r22, -1, r0
stlo.q r22, -8, r0
ldx.q r22, r6, r0
bgtu/l r5, r22, tr1
add r3, r4, r7
ldlo.q r7, -8, r1
sthi.q r22, 7, r0
ldhi.q r7, -1, r7
ptabs r18,tr1
stlo.q r22, 0, r0
or r1, r7, r1
sthi.q r5, 15, r1
stlo.q r5, 8, r1
blink tr1, r63
#else /* ! SHMEDIA, i.e. SH1 .. SH4 / SHcompact */
#ifdef __SH5__
#define DST r2
#define SRC r3
#define COUNT r4
#define TMP0 r5
#define TMP1 r6
#define RESULT r2
#else
#define DST r4
#define SRC r5
#define COUNT r6
#define TMP0 r2
#define TMP1 r3
#define RESULT r0
#endif
#ifdef __LITTLE_ENDIAN__
! Little endian version copies with increasing addresses.
mov DST,TMP1 ! Save return value
mov #11,r0 ! Check if small number of bytes
cmp/hs r0,COUNT
! COUNT becomes src end address
SL(bf, L_small, add SRC,COUNT)
mov #1,r1
tst r1,SRC ! check if source even
SL(bt, L_even, mov COUNT,r7)
mov.b @SRC+,r0 ! no, make it even.
mov.b r0,@DST
add #1,DST
L_even: tst r1,DST ! check if destination is even
add #-3,r7
SL(bf, L_odddst, mov #2,r1)
tst r1,DST ! check if destination is 4-byte aligned
mov DST,r0
SL(bt, L_al4dst, sub SRC,r0)
mov.w @SRC+,TMP0
mov.w TMP0,@DST
! add #2,DST DST is dead here.
L_al4dst:
tst r1,SRC
bt L_al4both
mov.w @SRC+,r1
swap.w r1,r1
add #-6,r0
add #-6,r7 ! r7 := src end address minus 9.
.align 2
L_2l_loop:
mov.l @SRC+,TMP0 ! Read & write two longwords per iteration
xtrct TMP0,r1
mov.l r1,@(r0,SRC)
cmp/hs r7,SRC
mov.l @SRC+,r1
xtrct r1,TMP0
mov.l TMP0,@(r0,SRC)
bf L_2l_loop
add #-2,SRC
bra L_cleanup
add #5,r0
L_al4both:
add #-4,r0
.align 2
L_al4both_loop:
mov.l @SRC+,DST ! Read longword, write longword per iteration
cmp/hs r7,SRC
SL(bf, L_al4both_loop, mov.l DST,@(r0,SRC))
bra L_cleanup
add #3,r0
L_odddst:
tst r1,SRC
SL(bt, L_al4src, add #-1,DST)
mov.w @SRC+,r0
mov.b r0,@(1,DST)
shlr8 r0
mov.b r0,@(2,DST)
add #2,DST
L_al4src:
.align 2
L_odd_loop:
mov.l @SRC+,r0 ! Read longword, write byte, word, byte per iteration
cmp/hs r7,SRC
mov.b r0,@(1,DST)
shlr8 r0
mov.w r0,@(2,DST)
shlr16 r0
mov.b r0,@(4,DST)
SL(bf, L_odd_loop, add #4,DST)
.align 2 ! avoid nop in more frequently executed code.
L_cleanup2:
mov DST,r0
sub SRC,r0
L_cleanup:
cmp/eq COUNT,SRC
bt L_ready
.align 2
L_cleanup_loop:
mov.b @SRC+,r1
cmp/eq COUNT,SRC
mov.b r1,@(r0,SRC)
bf L_cleanup_loop
L_ready:
rts
mov TMP1,RESULT
L_small:
bra L_cleanup2
add #-1,DST
#else /* ! __LITTLE_ENDIAN__ */
! Big endian version copies with decreasing addresses.
mov DST,r0
add COUNT,r0
sub DST,SRC
mov #11,r1
cmp/hs r1,COUNT
SL(bf, L_small, add #-1,SRC)
mov SRC,TMP1
add r0,TMP1
shlr TMP1
SL(bt, L_even,
mov DST,r7)
mov.b @(r0,SRC),TMP0
add #-1,TMP1
mov.b TMP0,@-r0
L_even:
tst #1,r0
add #-1,SRC
SL(bf, L_odddst, add #8,r7)
tst #2,r0
bt L_al4dst
add #-1,TMP1
mov.w @(r0,SRC),r1
mov.w r1,@-r0
L_al4dst:
shlr TMP1
bt L_al4both
mov.w @(r0,SRC),r1
swap.w r1,r1
add #4,r7
add #-4,SRC
.align 2
L_2l_loop:
mov.l @(r0,SRC),TMP0
xtrct TMP0,r1
mov.l r1,@-r0
cmp/hs r7,r0
mov.l @(r0,SRC),r1
xtrct r1,TMP0
mov.l TMP0,@-r0
bt L_2l_loop
bra L_cleanup
add #5,SRC
nop ! avoid nop in executed code.
L_al4both:
add #-2,SRC
.align 2
L_al4both_loop:
mov.l @(r0,SRC),r1
cmp/hs r7,r0
SL(bt, L_al4both_loop,
mov.l r1,@-r0)
bra L_cleanup
add #3,SRC
nop ! avoid nop in executed code.
L_odddst:
shlr TMP1
bt L_al4src
mov.w @(r0,SRC),r1
mov.b r1,@-r0
shlr8 r1
mov.b r1,@-r0
L_al4src:
add #-2,SRC
.align 2
L_odd_loop:
mov.l @(r0,SRC),TMP0
cmp/hs r7,r0
mov.b TMP0,@-r0
shlr8 TMP0
mov.w TMP0,@-r0
shlr16 TMP0
mov.b TMP0,@-r0
bt L_odd_loop
add #3,SRC
L_cleanup:
L_small:
cmp/eq DST,r0
bt L_ready
add #1,DST
.align 2
L_cleanup_loop:
mov.b @(r0,SRC),TMP0
cmp/eq DST,r0
mov.b TMP0,@-r0
bf L_cleanup_loop
L_ready:
rts
mov r0,RESULT
#endif /* ! __LITTLE_ENDIAN__ */
#endif /* ! SHMEDIA */
|
4ms/metamodule-plugin-sdk
| 3,695
|
plugin-libc/newlib/libc/machine/sh/setjmp.S
|
/* We want to pretend we're in SHmedia mode, even when assembling for
SHcompact. */
#if __SH5__ == 32 && ! __SHMEDIA__
# undef __SHMEDIA__
# define __SHMEDIA__ 1
#endif
#if __SHMEDIA__
.mode SHmedia
#endif
#include "asm.h"
ENTRY(setjmp)
#if __SH5__
ptabs r18, tr0
gettr tr5, r5
gettr tr6, r6
gettr tr7, r7
st.q r2, 0*8, r18
st.q r2, 1*8, r10
st.q r2, 2*8, r11
st.q r2, 3*8, r12
st.q r2, 4*8, r13
st.q r2, 5*8, r14
st.q r2, 6*8, r15
st.q r2, 7*8, r28
st.q r2, 8*8, r29
st.q r2, 9*8, r30
st.q r2, 10*8, r31
st.q r2, 11*8, r32
st.q r2, 12*8, r33
st.q r2, 13*8, r34
st.q r2, 14*8, r35
st.q r2, 15*8, r44
st.q r2, 16*8, r45
st.q r2, 17*8, r46
st.q r2, 18*8, r47
st.q r2, 19*8, r48
st.q r2, 20*8, r49
st.q r2, 21*8, r50
st.q r2, 22*8, r51
st.q r2, 23*8, r52
st.q r2, 24*8, r53
st.q r2, 25*8, r54
st.q r2, 26*8, r55
st.q r2, 27*8, r56
st.q r2, 28*8, r57
st.q r2, 29*8, r58
st.q r2, 30*8, r59
st.q r2, 31*8, r5
st.q r2, 32*8, r6
st.q r2, 33*8, r7
#if ! __SH4_NOFPU__
fst.d r2, 34*8, dr12
fst.d r2, 35*8, dr14
fst.d r2, 36*8, dr36
fst.d r2, 37*8, dr38
fst.d r2, 38*8, dr40
fst.d r2, 39*8, dr42
fst.d r2, 40*8, dr44
fst.d r2, 41*8, dr46
fst.d r2, 42*8, dr48
fst.d r2, 43*8, dr50
fst.d r2, 44*8, dr52
fst.d r2, 45*8, dr54
fst.d r2, 46*8, dr56
fst.d r2, 47*8, dr58
fst.d r2, 48*8, dr60
fst.d r2, 49*8, dr62
#endif
movi 0, r2
blink tr0, r63
#else
#if defined (__SH2E__) || defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__)
add #(13*4),r4
#else
add #(9*4),r4
#endif
sts.l pr,@-r4
#if defined (__SH2E__) || defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__)
fmov.s fr15,@-r4 ! call saved floating point registers
fmov.s fr14,@-r4
fmov.s fr13,@-r4
fmov.s fr12,@-r4
#endif
mov.l r15,@-r4 ! call saved integer registers
mov.l r14,@-r4
mov.l r13,@-r4
mov.l r12,@-r4
mov.l r11,@-r4
mov.l r10,@-r4
mov.l r9,@-r4
mov.l r8,@-r4
rts
mov #0,r0
#endif /* __SH5__ */
ENTRY(longjmp)
#if __SH5__
ld.q r2, 0*8, r18
ptabs r18, tr0
ld.q r2, 1*8, r10
ld.q r2, 2*8, r11
ld.q r2, 3*8, r12
ld.q r2, 4*8, r13
ld.q r2, 5*8, r14
ld.q r2, 6*8, r15
ld.q r2, 7*8, r28
ld.q r2, 8*8, r29
ld.q r2, 9*8, r30
ld.q r2, 10*8, r31
ld.q r2, 11*8, r32
ld.q r2, 12*8, r33
ld.q r2, 13*8, r34
ld.q r2, 14*8, r35
ld.q r2, 15*8, r44
ld.q r2, 16*8, r45
ld.q r2, 17*8, r46
ld.q r2, 18*8, r47
ld.q r2, 19*8, r48
ld.q r2, 20*8, r49
ld.q r2, 21*8, r50
ld.q r2, 22*8, r51
ld.q r2, 23*8, r52
ld.q r2, 24*8, r53
ld.q r2, 25*8, r54
ld.q r2, 26*8, r55
ld.q r2, 27*8, r56
ld.q r2, 28*8, r57
ld.q r2, 29*8, r58
ld.q r2, 30*8, r59
ld.q r2, 31*8, r5
ld.q r2, 32*8, r6
ld.q r2, 33*8, r7
ptabs r5, tr5
ptabs r6, tr6
ptabs r7, tr7
#if ! __SH4_NOFPU__
fld.d r2, 34*8, dr12
fld.d r2, 35*8, dr14
fld.d r2, 36*8, dr36
fld.d r2, 37*8, dr38
fld.d r2, 38*8, dr40
fld.d r2, 39*8, dr42
fld.d r2, 40*8, dr44
fld.d r2, 41*8, dr46
fld.d r2, 42*8, dr48
fld.d r2, 43*8, dr50
fld.d r2, 44*8, dr52
fld.d r2, 45*8, dr54
fld.d r2, 46*8, dr56
fld.d r2, 47*8, dr58
fld.d r2, 48*8, dr60
fld.d r2, 49*8, dr62
#endif
movi 1, r2
cmvne r3, r3, r2
blink tr0, r63
#else
mov.l @r4+,r8
mov.l @r4+,r9
mov.l @r4+,r10
mov.l @r4+,r11
mov.l @r4+,r12
mov.l @r4+,r13
mov.l @r4+,r14
mov.l @r4+,r15
#if defined (__SH2E__) || defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__)
fmov.s @r4+,fr12 ! call saved floating point registers
fmov.s @r4+,fr13
fmov.s @r4+,fr14
fmov.s @r4+,fr15
#endif
lds.l @r4+,pr
mov r5,r0
tst r0,r0
bf retr4
movt r0
retr4: rts
nop
#endif /* __SH5__ */
|
4ms/metamodule-plugin-sdk
| 2,524
|
plugin-libc/newlib/libc/machine/sh/strcpy.S
|
! Entry: arg0: destination
! arg1: source
! Exit: result: destination
!
! SH5 code Copyright 2002 SuperH Ltd.
#include "asm.h"
ENTRY(strcpy)
#if __SHMEDIA__
pta/l shortstring,tr1
ldlo.q r3,0,r4
ptabs r18,tr4
shlli r3,3,r7
addi r2, 8, r0
mcmpeq.b r4,r63,r6
SHHI r6,r7,r6
bnei/u r6,0,tr1 // shortstring
pta/l no_lddst, tr2
ori r3,-8,r23
sub r2, r23, r0
sub r3, r2, r21
addi r21, 8, r20
ldx.q r0, r21, r5
pta/l loop, tr0
ori r2,-8,r22
mcmpeq.b r5, r63, r6
bgt/u r22, r23, tr2 // no_lddst
// r22 < r23 : Need to do a load from the destination.
// r22 == r23 : Doesn't actually need to load from destination,
// but still can be handled here.
ldlo.q r2, 0, r9
movi -1, r8
SHLO r8, r7, r8
mcmv r4, r8, r9
stlo.q r2, 0, r9
beqi/l r6, 0, tr0 // loop
add r5, r63, r4
addi r0, 8, r0
blink tr1, r63 // shortstring
no_lddst:
// r22 > r23: note that for r22 == r23 the sthi.q would clobber
// bytes before the destination region.
stlo.q r2, 0, r4
SHHI r4, r7, r4
sthi.q r0, -1, r4
beqi/l r6, 0, tr0 // loop
add r5, r63, r4
addi r0, 8, r0
shortstring:
#ifndef __LITTLE_ENDIAN__
pta/l shortstring2,tr1
byterev r4,r4
#endif
shortstring2:
st.b r0,-8,r4
andi r4,0xff,r5
shlri r4,8,r4
addi r0,1,r0
bnei/l r5,0,tr1
blink tr4,r63 // return
.balign 8
loop:
stlo.q r0, 0, r5
ldx.q r0, r20, r4
addi r0, 16, r0
sthi.q r0, -9, r5
mcmpeq.b r4, r63, r6
bnei/u r6, 0, tr1 // shortstring
ldx.q r0, r21, r5
stlo.q r0, -8, r4
sthi.q r0, -1, r4
mcmpeq.b r5, r63, r6
beqi/l r6, 0, tr0 // loop
add r5, r63, r4
addi r0, 8, r0
blink tr1, r63 // shortstring
#else /* ! __SHMEDIA__, i.e. SH 1..4 / SHcompact */
#ifdef __SH5__
#define DST r2
#define SRC r3
#define TMP r4
#define RESULT R2
! r0,r1,r3,r4: clobbered
#else
#define DST r4
#define SRC r5
#define TMP r2
#define RESULT r0
! r1-r2,r5: clobbered
#endif
mov DST,r0
or SRC,r0
tst #3,r0
SL(bf, L_setup_char_loop, mov DST,r0)
mov.l @SRC+,r1
mov #0,TMP
cmp/str TMP,r1
SL(bt, Longword_loop_end, sub SRC,r0)
.align 2
Longword_loop:
mov.l r1,@(r0,SRC)
mov.l @SRC+,r1
cmp/str TMP,r1
bt Longword_loop_end
mov.l r1,@(r0,SRC)
mov.l @SRC+,r1
cmp/str TMP,r1
bf Longword_loop
Longword_loop_end:
add #-4,SRC
add #3,r0
.align 2
L_char_loop:
mov.b @SRC+,r1
L_char_loop_start:
tst r1,r1
SL(bf, L_char_loop, mov.b r1,@(r0,SRC))
rts
mov DST,RESULT
L_setup_char_loop:
mov.b @SRC+,r1
bra L_char_loop_start
sub SRC,r0
#endif /* ! __SHMEDIA__ */
|
4ms/metamodule-plugin-sdk
| 3,191
|
plugin-libc/newlib/libc/machine/sh/memset.S
|
!
! Fast SH memset
!
! by Toshiyasu Morita (tm@netcom.com)
!
! SH5 code by J"orn Rennecke (joern.rennecke@superh.com)
! Copyright 2002 SuperH Ltd.
!
#include "asm.h"
ENTRY(memset)
#if __SHMEDIA__
pta/l multiquad, tr0
ptabs r18, tr2
andi r2, -8, r25
add r2, r4, r5
addi r5, -1, r20 // calculate end address.
andi r20, -8, r20
cmveq r4, r25, r20
bne/u r25, r20, tr0 // multiquad
! This sequence could clobber volatile objects that are in the same
! quadword as a very short char array.
! ldlo.q r2, 0, r7
! shlli r4, 2, r4
! movi -1, r8
! SHHI r8, r4, r8
! SHHI r8, r4, r8
! mcmv r7, r8, r3
! stlo.q r2, 0, r3
pta/l setlongs, tr0
movi 4, r8
bgeu/u r4, r8, tr0
pta/l endset, tr0
beqi/u r4, 0, tr0
st.b r2, 0, r3
beqi/u r4, 1, tr0
nop
st.b r2, 1, r3
beqi/l r4, 2, tr0
st.b r2,2,r3
endset: blink tr2, r63
setlongs:
mshflo.b r3, r3, r3
mperm.w r3, r63, r3 // Fill pattern now in every byte of r3
stlo.l r2, 0, r3
nop
nop
sthi.l r5, -1, r3
blink tr2, r63
multiquad:
mshflo.b r3, r3, r3
mperm.w r3, r63, r3 // Fill pattern now in every byte of r3
pta/l lastquad, tr0
stlo.q r2, 0, r3
sub r20, r25, r24
movi 64, r9
beqi/u r24, 8, tr0 // lastquad
pta/l loop, tr1
addi r20, -7*8, r8 // loop end address; This might overflow, so we need
// to use a different test before we start the loop
bgeu/u r24, r9, tr1// loop
st.q r25, 8, r3
shlri r24, 4, r24
st.q r20, -8, r3
beqi/u r24, 1, tr0 // lastquad
st.q r25, 16, r3
st.q r20, -16, r3
beqi/u r24, 2, tr0 // lastquad
st.q r25, 24, r3
st.q r20, -24, r3
lastquad:
sthi.q r5, -1, r3
blink tr2,r63
loop:
alloco r25, 32
st.q r25, 8, r3
st.q r25, 16, r3
st.q r25, 24, r3
st.q r25, 32, r3
addi r25, 32, r25
bgeu/l r8, r25, tr1 // loop
st.q r20, -40, r3
st.q r20, -32, r3
st.q r20, -24, r3
st.q r20, -16, r3
st.q r20, -8, r3
sthi.q r5, -1, r3
blink tr2,r63
#else /* ! SHMEDIA, i.e. SH1 .. SH4 / SHcompact */
! Entry: r4: destination pointer
! r5: fill value
! r6: byte count
!
! Exit: r0-r3: trashed
!
! This assumes that the first four bytes of the address space (0..3) are
! reserved - usually by the linker script. Otherwise, we would had to check
! for the case of objects of the size 12..15 at address 0..3 .
#ifdef __SH5__
#define DST r2
#define VAL r3
#define CNT r4
#define TMP r5
#else
#define DST r4
#define VAL r5
#define CNT r6
#define TMP r2
#endif
mov #12,r0 ! Check for small number of bytes
cmp/gt CNT,r0
mov DST,r0
SL(bt, L_store_byte_loop_check0, add DST,CNT)
tst #3,r0 ! Align destination
SL(bt, L_dup_bytes, extu.b r5,r5)
.balignw 4,0x0009
L_align_loop:
mov.b VAL,@r0
add #1,r0
tst #3,r0
bf L_align_loop
L_dup_bytes:
swap.b VAL,TMP ! Duplicate bytes across longword
or TMP,VAL
swap.w VAL,TMP
or TMP,VAL
add #-16,CNT
.balignw 4,0x0009
L_store_long_loop:
mov.l VAL,@r0 ! Store double longs to memory
cmp/hs CNT,r0
mov.l VAL,@(4,r0)
SL(bf, L_store_long_loop, add #8,r0)
add #16,CNT
L_store_byte_loop_check0:
cmp/eq CNT,r0
bt L_exit
.balignw 4,0x0009
L_store_byte_loop:
mov.b VAL,@r0 ! Store bytes to memory
add #1,r0
cmp/eq CNT,r0
bf L_store_byte_loop
L_exit:
rts
mov r4,r0
#endif /* ! SHMEDIA */
|
4ms/metamodule-plugin-sdk
| 1,370
|
plugin-libc/newlib/libc/machine/sh/strlen.S
|
! Entry: arg0: string start address
! Exit: result: length
!
! Copyright 2002 SuperH Ltd.
#include "asm.h"
ENTRY(strlen)
#if __SHMEDIA__
ldlo.q r2,0,r3
ptabs/l r18,tr0
pta/l loop,tr1
andi r2,-8,r0
shlli r2,3,r1
mcmpeq.b r3,r63,r3
SHHI r3,r1,r4
beqi/u r4,0,tr1 // loop
#ifdef __LITTLE_ENDIAN__
movi -1,r2
addi r3,-1,r4
msad.ubq r3,r4,r2
#else
shlri r3,1,r3
nsb r3,r3
shlri r3,3,r2
#endif
blink tr0,r63
loop:
ldlo.q r0,8,r3
addi r0,8,r0
ldlo.q r0,8,r63
mcmpeq.b r3,r63,r3
beqi/l r3,0,tr1 // loop
sub r0,r2,r2
#ifdef __LITTLE_ENDIAN__
addi r3,-1,r4
addi r2,-1,r2
msad.ubq r3,r4,r2
#else
shlri r3,1,r3
nsb r3,r3
shlri r3,3,r3
add r2,r3,r2
#endif
blink tr0,r63
#else /* ! __SHMEDIA__, i.e. SH 1..4 / SHcompact */
#ifdef __SH5__
#define STR_INIT r2
#define STR_ORIG r0
#define STR_COPY STR_ORIG
#define MASK r1
#define TMP r3
#define RESULT r2
! r0,r1,r3: clobbered
#else
#define STR_INIT r4
#define STR_ORIG STR_INIT
#define STR_COPY STR
#define MASK r1
#define TMP r3
#define RESULT r0
! r1,r3: clobbered
#endif
#define STR RESULT
mov #3,MASK
and STR_INIT,MASK
tst MASK,MASK
SL(bf, L_char_loop, mov STR_INIT, STR_COPY)
L_word_loop:
mov.l @STR+,TMP
cmp/str MASK,TMP
bf L_word_loop
add #-4,STR
L_char_loop:
mov.b @STR+,TMP
tst TMP,TMP
bf L_char_loop
add #-1,STR
rts
sub STR_ORIG,STR
#endif /* ! __SHMEDIA__ */
|
4ms/metamodule-plugin-sdk
| 4,092
|
plugin-libc/newlib/libc/machine/sh/strcmp.S
|
! SH5 code Copyright 2002 SuperH Ltd.
#include "asm.h"
ENTRY(strcmp)
#if __SHMEDIA__
ld.ub r2,0,r4
pt/l quickret0,tr0
ld.ub r3,0,r5
ptabs r18,tr2
beqi/u r4,0,tr0
ld.ub r2,1,r6
bne/u r4,r5,tr0
pt/l quickret1,tr1
ld.ub r3,1,r7
beqi/u r6,0,tr1
ld.ub r2,2,r4
bne/u r6,r7,tr1
ld.ub r3,2,r5
beqi/u r4,0,tr0
ld.ub r2,3,r6
bne/u r4,r5,tr0
ld.ub r3,3,r7
beqi/u r6,0,tr1
ld.ub r2,4,r4
bne/u r6,r7,tr1
ld.ub r3,4,r5
beqi/u r4,0,tr0
ld.ub r2,5,r6
bne/u r4,r5,tr0
ld.ub r3,5,r7
beqi/u r6,0,tr1
ld.ub r2,6,r4
bne/u r6,r7,tr1
ld.ub r3,6,r5
beqi/u r4,0,tr0
ld.ub r2,7,r6
bne/u r4,r5,tr0
ld.ub r3,7,r7
beqi/u r6,0,tr1
sub r3,r2,r3
bne/u r6,r7,tr1
andi r2,-8,r2
add r3,r2,r3
ldlo.q r3,8,r23
pt r23_zero,tr0
shlli r3,3,r22
sub r63,r22,r20
movi 0x101,r6
mperm.w r6,r63,r6
SHLO r6,r22,r7
msubs.ub r7,r23,r8
pt loop,tr1
bnei/u r8,0,tr0 // r23_zero
pt found_zero,tr0
addi r3,15,r3
andi r3,-8,r3
sub r3,r2,r3
bne/l r7,r6,tr1 // loop
/* The strings are aligned to each other. */
/* It is possible to have a loop with six cycles / iteration
by re-ordering the exit conditions, but then it needs extra
time and/or code to sort out the r4 != r5 case. */
pt al_loop,tr1
pt al_found_zero,tr0
al_loop:
ld.q r2,8,r4
ldx.q r2,r3,r5
addi r2,8,r2
mcmpeq.b r63,r4,r8
pt cmp_quad,tr3
bnei/u r8,0,tr0 // al_found_zero
beq/l r4,r5,tr1 // al_loop
blink tr3,r63 // cmp_quad
.balign 8
quickret0:
sub r4,r5,r2
blink tr2,r63
quickret1:
sub r6,r7,r2
blink tr2,r63
loop:
ld.q r2,8,r4
ldx.q r2,r3,r19
addi r2,8,r2
msubs.ub r6,r4,r8
mcmpeq.b r63,r19,r9
SHHI r19,r20,r21
or r21,r23,r5
SHLO r19,r22,r23
bne/u r8,r9,tr0 // found_zero
beq/l r4,r5,tr1 // loop
cmp_quad:
#ifdef __LITTLE_ENDIAN__
byterev r4,r4
byterev r5,r5
#endif
cmpgtu r4,r5,r6
cmpgtu r5,r4,r7
sub r6,r7,r2
blink tr2,r63
found_zero:
pt zero_now,tr0
pt cmp_quad,tr1
SHHI r9,r20,r7
bne/u r8,r7,tr0 // zero_now
bne/u r4,r5,tr1 // cmp_quad
SHLO r9,r22,r8
r23_zero:
ld.q r2,8,r4
add r23,r63,r5
zero_now:
al_found_zero:
/* We konw that one of the values has at lest one zero, and r8 holds
an 0x01 or 0xff mask for every zero found in one of the operands.
If both operands have the first zero in the same place, this mask
allows us to truncate the comparison to the valid bytes in the
strings. If the first zero is in different places, it doesn't
matter if some invalid bytes are included, since the comparison
of the zero with the non-zero will determine the outcome. */
#ifdef __LITTLE_ENDIAN__
shlli r8,8,r8
addi r8,-1,r9
andc r9,r8,r8
and r8,r4,r4
and r8,r5,r5
#else
shlri r8,1,r8
nsb r8,r8
addi r8,8,r8
andi r8,56,r8
sub r63,r8,r8
shlrd r4,r8,r4
shlrd r5,r8,r5
#endif
#ifdef __LITTLE_ENDIAN__
byterev r4,r4
byterev r5,r5
#endif
cmpgtu r4,r5,r6
cmpgtu r5,r4,r7
sub r6,r7,r2
blink tr2,r63
#else /* ! __SHMEDIA__, i.e. SH 1..4 / SHcompact */
#ifdef __SH5__
#define STR1 r2
#define STR2 r3
#define RESULT r2
#define TMP r4
#else
! Entry: r4: string1
! r5: string2
! Exit: r0: result
! r1-r2,r4-r5: clobbered
#define STR1 r4
#define STR2 r5
#define RESULT r0
#define TMP r2
#endif /* __SH5__ */
mov STR1,r0
or STR2,r0
tst #3,r0
bf L_setup_char_loop
mov #0,r0
#ifdef DELAYED_BRANCHES
mov.l @STR1+,r1
.align 2
Longword_loop:
mov.l @STR2+,TMP
cmp/str r0,r1
bt Longword_loop_end
cmp/eq r1,TMP
bt.s Longword_loop
mov.l @STR1+,r1
add #-4, STR1
Longword_loop_end:
add #-4, STR1
add #-4, STR2
L_setup_char_loop:
mov.b @STR1+,r0
.align 2
L_char_loop:
mov.b @STR2+,r1
tst r0,r0
bt L_return
cmp/eq r0,r1
bt.s L_char_loop
mov.b @STR1+,r0
add #-2,STR1
mov.b @STR1,r0
#else /* ! DELAYED_BRANCHES */
.align 2
Longword_loop:
mov.l @r4+,r1
mov.l @r5+,r2
cmp/str r0,r1
bt Longword_loop_end
cmp/eq r1,r2
bt Longword_loop
Longword_loop_end:
add #-4, r4
add #-4, r5
.align 2
L_setup_char_loop:
L_char_loop:
mov.b @r4+,r0
mov.b @r5+,r1
tst r0,r0
bt L_return
cmp/eq r0,r1
bt L_char_loop
#endif
L_return:
extu.b r0,RESULT
extu.b r1,r1
rts
sub r1,RESULT
#endif /* ! __SHMEDIA__ */
|
4ms/metamodule-plugin-sdk
| 4,945
|
plugin-libc/newlib/libc/machine/sh/strncpy.S
|
/* Copyright 2003 SuperH Ltd. */
#include "asm.h"
#ifdef __SH5__
#if __SHMEDIA__
#ifdef __LITTLE_ENDIAN__
#define ZPAD_MASK(src, dst) addi src, -1, dst
#else
#define ZPAD_MASK(src, dst) \
byterev src, dst; addi dst, -1, dst; byterev dst, dst
#endif
/* We assume that the destination is not in the first 16 bytes of memory.
A typical linker script will put the text section first, and as
this code is longer that 16 bytes, you have to get out of your way
to put data there. */
ENTRY(strncpy)
pt L_small, tr2
ldlo.q r3, 0, r0
shlli r3, 3, r19
mcmpeq.b r0, r63, r1
SHHI r1, r19, r7
add r2, r4, r20
addi r20, -8, r5
/* If the size is greater than 8, we know we can read beyond the first
(possibly partial) quadword, and write out a full first and last
(possibly unaligned and/or overlapping) quadword. */
bge/u r2, r5, tr2 // L_small
pt L_found0, tr0
addi r2, 8, r22
bnei/u r7, 0, tr0 // L_found0
ori r3, -8, r38
pt L_end_early, tr1
sub r2, r38, r22
stlo.q r2, 0, r0
sthi.q r2, 7, r0
sub r3, r2, r6
ldx.q r22, r6, r0
/* Before each iteration, check that we can store in full the next quad we
are about to fetch. */
addi r5, -8, r36
bgtu/u r22, r36, tr1 // L_end_early
pt L_scan0, tr1
L_scan0:
addi r22, 8, r22
mcmpeq.b r0, r63, r1
stlo.q r22, -8, r0
bnei/u r1, 0, tr0 // L_found0
sthi.q r22, -1, r0
ldx.q r22, r6, r0
bgeu/l r36, r22, tr1 // L_scan0
L_end:
// At end; we might re-read a few bytes when we fetch the last quad.
// branch mispredict, so load is ready now.
mcmpeq.b r0, r63, r1
addi r22, 8, r22
bnei/u r1, 0, tr0 // L_found0
add r3, r4, r7
ldlo.q r7, -8, r1
ldhi.q r7, -1, r7
ptabs r18, tr0
stlo.q r22, -8, r0
or r1, r7, r1
mcmpeq.b r1, r63, r7
sthi.q r22, -1, r0
ZPAD_MASK (r7, r7)
and r1, r7, r1 // mask out non-zero bytes after first zero byte
stlo.q r20, -8, r1
sthi.q r20, -1, r1
blink tr0, r63
L_end_early:
/* Check if we can store the current quad in full. */
pt L_end, tr1
add r3, r4, r7
bgtu/u r5, r22, tr1 // L_end // Not really unlikely, but gap is short.
/* If not, that means we can just proceed to process the last quad.
Two pipeline stalls are unavoidable, as we don't have enough ILP. */
ldlo.q r7, -8, r1
ldhi.q r7, -1, r7
ptabs r18, tr0
or r1, r7, r1
mcmpeq.b r1, r63, r7
ZPAD_MASK (r7, r7)
and r1, r7, r1 // mask out non-zero bytes after first zero byte
stlo.q r20, -8, r1
sthi.q r20, -1, r1
blink tr0, r63
L_found0:
// r0: string to store, not yet zero-padding normalized.
// r1: result of mcmpeq.b r0, r63, r1.
// r22: store address plus 8. I.e. address where zero padding beyond the
// string in r0 goes.
// r20: store end address.
// r5: store end address minus 8.
pt L_write0_multiquad, tr0
ZPAD_MASK (r1, r1)
and r0, r1, r0 // mask out non-zero bytes after first zero byte
stlo.q r22, -8, r0
sthi.q r22, -1, r0
andi r22, -8, r1 // Check if zeros to write fit in one quad word.
bgtu/l r5, r1, tr0 // L_write0_multiquad
ptabs r18, tr1
sub r20, r22, r1
shlli r1, 2, r1 // Do shift in two steps so that 64 bit case is
SHLO r0, r1, r0 // handled correctly.
SHLO r0, r1, r0
sthi.q r20, -1, r0
blink tr1, r63
L_write0_multiquad:
pt L_write0_loop, tr0
ptabs r18, tr1
stlo.q r22, 0, r63
sthi.q r20, -1, r63
addi r1, 8, r1
bgeu/l r5, r1, tr0 // L_write0_loop
blink tr1, r63
L_write0_loop:
st.q r1, 0 ,r63
addi r1, 8, r1
bgeu/l r5, r1, tr0 // L_write0_loop
blink tr1, r63
L_small:
// r0: string to store, not yet zero-padding normalized.
// r1: result of mcmpeq.b r0, r63, r1.
// r7: nonzero indicates relevant zero found r0.
// r2: store address.
// r3: read address.
// r4: size, max 8
// r20: store end address.
// r5: store end address minus 8.
pt L_nohi, tr0
pt L_small_storelong, tr1
ptabs r18, tr2
sub r63, r4, r23
bnei/u r7, 0, tr0 // L_nohi
ori r3, -8, r7
bge/l r23, r7, tr0 // L_nohi
ldhi.q r3, 7, r1
or r0, r1, r0
mcmpeq.b r0, r63, r1
L_nohi:
ZPAD_MASK (r1, r1)
and r0, r1, r0
movi 4, r19
bge/u r4, r19, tr1 // L_small_storelong
pt L_small_end, tr0
#ifndef __LITTLE_ENDIAN__
byterev r0, r0
#endif
beqi/u r4, 0, tr0 // L_small_end
st.b r2, 0, r0
beqi/u r4, 1, tr0 // L_small_end
shlri r0, 8, r0
st.b r2, 1, r0
beqi/u r4, 2, tr0 // L_small_end
shlri r0, 8, r0
st.b r2, 2, r0
L_small_end:
blink tr2, r63
L_small_storelong:
shlli r23, 3, r7
SHHI r0, r7, r1
#ifdef __LITTLE_ENDIAN__
shlri r1, 32, r1
#else
shlri r0, 32, r0
#endif
stlo.l r2, 0, r0
sthi.l r2, 3, r0
stlo.l r20, -4, r1
sthi.l r20, -1, r1
blink tr2, r63
#else /* SHcompact */
/* This code is optimized for size. Instruction selection is SH5 specific.
SH4 should use a different version. */
ENTRY(strncpy)
mov #0, r6
cmp/eq r4, r6
bt return
mov r2, r5
add #-1, r5
add r5, r4
loop:
bt/s found0
add #1, r5
mov.b @r3+, r1
found0:
cmp/eq r5,r4
mov.b r1, @r5
bf/s loop
cmp/eq r1, r6
return:
rts
nop
#endif /* SHcompact */
#endif /* __SH5__ */
|
4ms/metamodule-plugin-sdk
| 3,123
|
plugin-libc/newlib/libc/machine/arm/strcmp-armv6m.S
|
/*
* Copyright (c) 2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Implementation of strcmp for ARMv6m. This version is only used in
ARMv6-M when we want an efficient implementation. Otherwize if the
code size is preferred, strcmp-armv4t.S will be used. */
.thumb_func
.syntax unified
.arch armv6-m
.macro DoSub n, label
subs r0, r0, r1
#ifdef __ARM_BIG_ENDIAN
lsrs r1, r4, \n
#else
lsls r1, r4, \n
#endif
orrs r1, r0
bne \label
.endm
.macro Byte_Test n, label
lsrs r0, r2, \n
lsrs r1, r3, \n
DoSub \n, \label
.endm
.text
def_fn strcmp
.cfi_sections .debug_frame
.cfi_startproc
mov r2, r0
push {r4, r5, r6, lr}
orrs r2, r1
lsls r2, r2, #30
bne 6f
ldr r5, =0x01010101
lsls r6, r5, #7
1:
ldmia r0!, {r2}
ldmia r1!, {r3}
subs r4, r2, r5
bics r4, r2
ands r4, r6
beq 3f
#ifdef __ARM_BIG_ENDIAN
Byte_Test #24, 4f
Byte_Test #16, 4f
Byte_Test #8, 4f
b 7f
3:
cmp r2, r3
beq 1b
cmp r2, r3
#else
uxtb r0, r2
uxtb r1, r3
DoSub #24, 2f
uxth r0, r2
uxth r1, r3
DoSub #16, 2f
lsls r0, r2, #8
lsls r1, r3, #8
lsrs r0, r0, #8
lsrs r1, r1, #8
DoSub #8, 2f
lsrs r0, r2, #24
lsrs r1, r3, #24
subs r0, r0, r1
2:
pop {r4, r5, r6, pc}
3:
cmp r2, r3
beq 1b
rev r0, r2
rev r1, r3
cmp r0, r1
#endif
bls 5f
movs r0, #1
4:
pop {r4, r5, r6, pc}
5:
movs r0, #0
mvns r0, r0
pop {r4, r5, r6, pc}
6:
ldrb r2, [r0, #0]
ldrb r3, [r1, #0]
adds r0, #1
adds r1, #1
cmp r2, #0
beq 7f
cmp r2, r3
bne 7f
ldrb r2, [r0, #0]
ldrb r3, [r1, #0]
adds r0, #1
adds r1, #1
cmp r2, #0
beq 7f
cmp r2, r3
beq 6b
7:
subs r0, r2, r3
pop {r4, r5, r6, pc}
.cfi_endproc
.size strcmp, . - strcmp
|
4ms/metamodule-plugin-sdk
| 6,243
|
plugin-libc/newlib/libc/machine/arm/strlen-armv7.S
|
/* Copyright (c) 2010-2011,2013 Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Assumes:
ARMv6T2 or ARMv7E-M, AArch32
*/
/* Copyright (c) 2015 ARM Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#include "arm-acle-compat.h"
#include "arm_asm.h"
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#ifdef __ARMEB__
#define S2LO lsl
#define S2HI lsr
#else
#define S2LO lsr
#define S2HI lsl
#endif
/* This code requires Thumb. */
#if __ARM_ARCH_PROFILE == 'M'
#if __ARM_ARCH >= 8
/* keep config inherited from -march=. */
#else
.arch armv7e-m
#endif /* if __ARM_ARCH >= 8 */
#else
.arch armv6t2
#endif
.eabi_attribute Tag_ARM_ISA_use, 0
.thumb
.syntax unified
/* Parameters and result. */
#define srcin r0
#define result r0
/* Internal variables. */
#define src r1
#define data1a r2
#define data1b r3
#define const_m1 r12
#define const_0 r4
#define tmp1 r4 /* Overlaps const_0 */
#define tmp2 r5
def_fn strlen p2align=6
.fnstart
.cfi_startproc
prologue 4 5 push_ip=HAVE_PAC_LEAF
pld [srcin, #0]
bic src, srcin, #7
mvn const_m1, #0
ands tmp1, srcin, #7 /* (8 - bytes) to alignment. */
pld [src, #32]
bne.w .Lmisaligned8
mov const_0, #0
mov result, #-8
.Lloop_aligned:
/* Bytes 0-7. */
ldrd data1a, data1b, [src]
pld [src, #64]
add result, result, #8
.Lstart_realigned:
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
uadd8 data1b, data1b, const_m1
sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
cbnz data1b, .Lnull_found
/* Bytes 8-15. */
ldrd data1a, data1b, [src, #8]
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
add result, result, #8
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
uadd8 data1b, data1b, const_m1
sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
cbnz data1b, .Lnull_found
/* Bytes 16-23. */
ldrd data1a, data1b, [src, #16]
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
add result, result, #8
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
uadd8 data1b, data1b, const_m1
sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
cbnz data1b, .Lnull_found
/* Bytes 24-31. */
ldrd data1a, data1b, [src, #24]
add src, src, #32
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
add result, result, #8
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
uadd8 data1b, data1b, const_m1
sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
cmp data1b, #0
beq .Lloop_aligned
.Lnull_found:
.cfi_remember_state
cmp data1a, #0
itt eq
addeq result, result, #4
moveq data1a, data1b
#ifndef __ARMEB__
rev data1a, data1a
#endif
clz data1a, data1a
add result, result, data1a, lsr #3 /* Bits -> Bytes. */
epilogue 4 5 push_ip=HAVE_PAC_LEAF
.Lmisaligned8:
.cfi_restore_state
ldrd data1a, data1b, [src]
and tmp2, tmp1, #3
rsb result, tmp1, #0
lsl tmp2, tmp2, #3 /* Bytes -> bits. */
tst tmp1, #4
pld [src, #64]
S2HI tmp2, const_m1, tmp2
orn data1a, data1a, tmp2
itt ne
ornne data1b, data1b, tmp2
movne data1a, const_m1
mov const_0, #0
b .Lstart_realigned
.cfi_endproc
.cantunwind
.fnend
.size strlen, . - strlen
|
4ms/metamodule-plugin-sdk
| 1,999
|
plugin-libc/newlib/libc/machine/arm/memcpy.S
|
/*
* Copyright (c) 2013-2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* The structure of the following #if #else #endif conditional chain
must match the chain in memcpy-stub.c. */
#include "../../../../include/arm-acle-compat.h"
#if defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED)
/* Defined in memcpy-stub.c. */
#elif (__ARM_ARCH >= 7 && __ARM_ARCH_PROFILE == 'A' \
&& defined (__ARM_FEATURE_UNALIGNED))
#include "memcpy-armv7a.S"
#elif __ARM_ARCH_ISA_THUMB == 2 && !__ARM_ARCH_ISA_ARM
#include "memcpy-armv7m.S"
#else
/* Defined in memcpy-stub.c. */
#endif
|
4ms/metamodule-plugin-sdk
| 1,934
|
plugin-libc/newlib/libc/machine/arm/strlen-thumb1-Os.S
|
/* Copyright (c) 2015 ARM Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
.arch armv4t
.eabi_attribute Tag_also_compatible_with, "\006\013" /* ARMv6-M. */
.eabi_attribute Tag_ARM_ISA_use, 0
.thumb
.syntax unified
def_fn strlen p2align=1
movs r3, #0
1:
ldrb r2, [r0, r3]
adds r3, r3, #1
cmp r2, #0
bne 1b
subs r0, r3, #1
bx lr
.size strlen, . - strlen
|
4ms/metamodule-plugin-sdk
| 7,156
|
plugin-libc/newlib/libc/machine/arm/setjmp.S
|
/* This is a simple version of setjmp and longjmp.
Nick Clifton, Cygnus Solutions, 13 June 1997. */
#include "../../../../include/arm-acle-compat.h"
/* ANSI concatenation macros. */
#define CONCAT(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a##b
#ifndef __USER_LABEL_PREFIX__
#error __USER_LABEL_PREFIX__ not defined
#endif
#define SYM(x) CONCAT (__USER_LABEL_PREFIX__, x)
#ifdef __ELF__
#define TYPE(x) .type SYM(x),function
#define SIZE(x) .size SYM(x), . - SYM(x)
#else
#define TYPE(x)
#define SIZE(x)
#endif
/* Jump buffer allocation sizes. */
#define JUMPBUF_CORE_REGS_SIZE (10 * 4)
#define JUMPBUF_FP_REGS_SIZE (8 * 8)
#define JUMPBUF_PAC (JUMPBUF_CORE_REGS_SIZE + JUMPBUF_FP_REGS_SIZE + 0)
/* Arm/Thumb interworking support:
The interworking scheme expects functions to use a BX instruction
to return control to their parent. Since we need this code to work
in both interworked and non-interworked environments as well as with
older processors which do not have the BX instruction we do the
following:
Test the return address.
If the bottom bit is clear perform an "old style" function exit.
(We know that we are in ARM mode and returning to an ARM mode caller).
Otherwise use the BX instruction to perform the function exit.
We know that we will never attempt to perform the BX instruction on
an older processor, because that kind of processor will never be
interworked, and a return address with the bottom bit set will never
be generated.
In addition, we do not actually assemble the BX instruction as this would
require us to tell the assembler that the processor is an ARM7TDMI and
it would store this information in the binary. We want this binary to be
able to be linked with binaries compiled for older processors however, so
we do not want such information stored there.
If we are running using the APCS-26 convention however, then we never
test the bottom bit, because this is part of the processor status.
Instead we just do a normal return, since we know that we cannot be
returning to a Thumb caller - the Thumb does not support APCS-26.
Function entry is much simpler. If we are compiling for the Thumb we
just switch into ARM mode and then drop through into the rest of the
function. The function exit code will take care of the restore to
Thumb mode.
For Thumb-2 do everything in Thumb mode. */
.syntax unified
/* GCC 12.1 and later will tell the assembler exactly which floating
point (or MVE) unit is required and we don't want to override
that. Conversely, older versions of the compiler don't pass this
information so we need to enable the VFP version that is most
appropriate. The choice here should support all suitable VFP
versions that the older toolchains can handle. */
#if __GNUC__ && __GNUC__ < 12
/* Ensure that FPU instructions are correctly compiled and, likewise,
the appropriate build attributes are added to the resulting object
file. Check whether the MVE extension is present and whether
we have support for hardware floating point-operations. VFPxd
covers all the cases we need in this file for hardware
floating-point and should be compatible with all required FPUs
that we need to support. */
# if __ARM_FP
.fpu vfpxd
# endif
# if __ARM_FEATURE_MVE
.arch_extension mve
# endif
#endif
#if __ARM_ARCH_ISA_THUMB == 1 && !__ARM_ARCH_ISA_ARM
/* ARMv6-M-like has to be implemented in Thumb mode. */
.thumb
.thumb_func
.globl SYM (setjmp)
TYPE (setjmp)
SYM (setjmp):
/* Save registers in jump buffer. */
stmia r0!, {r4, r5, r6, r7}
mov r1, r8
mov r2, r9
mov r3, r10
mov r4, fp
mov r5, sp
mov r6, lr
stmia r0!, {r1, r2, r3, r4, r5, r6}
subs r0, r0, #40
/* Restore callee-saved low regs. */
ldmia r0!, {r4, r5, r6, r7}
/* Return zero. */
movs r0, #0
bx lr
.thumb_func
.globl SYM (longjmp)
TYPE (longjmp)
SYM (longjmp):
/* Restore High regs. */
adds r0, r0, #16
ldmia r0!, {r2, r3, r4, r5, r6}
mov r8, r2
mov r9, r3
mov r10, r4
mov fp, r5
mov sp, r6
ldmia r0!, {r3} /* lr */
/* Restore low regs. */
subs r0, r0, #40
ldmia r0!, {r4, r5, r6, r7}
/* Return the result argument, or 1 if it is zero. */
movs r0, r1
bne 1f
movs r0, #1
1:
bx r3
#else
#ifdef __APCS_26__
#define RET movs pc, lr
#elif defined(__thumb2__)
#define RET bx lr
#else
#define RET tst lr, #1; \
moveq pc, lr ; \
.inst 0xe12fff1e /* bx lr */
#endif
#ifdef __thumb2__
.macro COND where when
i\where \when
.endm
#else
.macro COND where when
.endm
#endif
#if defined(__thumb2__)
.macro MODE
.thumb
.thumb_func
.endm
.macro PROLOGUE name
.endm
#elif defined(__thumb__)
#define MODE .thumb_func
.macro PROLOGUE name
.code 16
bx pc
nop
.code 32
SYM (.arm_start_of.\name):
.endm
#else /* Arm */
#define MODE .code 32
.macro PROLOGUE name
.endm
#endif
.macro FUNC_START name
.text
.align 2
MODE
.globl SYM (\name)
.fnstart
.cfi_startproc
TYPE (\name)
SYM (\name):
PROLOGUE \name
.endm
.macro FUNC_END name
RET
.cfi_endproc
.fnend
SIZE (\name)
.endm
/* --------------------------------------------------------------------
int setjmp (jmp_buf);
-------------------------------------------------------------------- */
FUNC_START setjmp
#if __ARM_FEATURE_PAC_DEFAULT
# if __ARM_FEATURE_BTI_DEFAULT
pacbti ip, lr, sp
# else
pac ip, lr, sp
# endif /* __ARM_FEATURE_BTI_DEFAULT */
mov r3, ip
str r3, [r0, #JUMPBUF_PAC]
.cfi_register 143, 12
#else
# if __ARM_FEATURE_BTI_DEFAULT
bti
# endif /* __ARM_FEATURE_BTI_DEFAULT */
#endif /* __ARM_FEATURE_PAC_DEFAULT */
/* Save all the callee-preserved registers into the jump buffer. */
#ifdef __thumb2__
mov ip, sp
stmia r0!, { r4-r10, fp, ip, lr }
#else
stmia r0!, { r4-r10, fp, sp, lr }
#endif
#if defined __ARM_FP || defined __ARM_FEATURE_MVE
vstm r0, { d8-d15 }
#endif
/* When setting up the jump buffer return 0. */
mov r0, #0
#if __ARM_FEATURE_PAC_DEFAULT
mov ip, r3
aut ip, lr, sp
#endif /* __ARM_FEATURE_PAC_DEFAULT */
FUNC_END setjmp
/* --------------------------------------------------------------------
volatile void longjmp (jmp_buf, int);
-------------------------------------------------------------------- */
FUNC_START longjmp
#if __ARM_FEATURE_BTI_DEFAULT
bti
#endif /* __ARM_FEATURE_BTI_DEFAULT */
#if __ARM_FEATURE_PAC_DEFAULT
/* Keep original jmpbuf address for retrieving pac-code
for authentication. */
mov r2, r0
#endif /* __ARM_FEATURE_PAC_DEFAULT */
/* If we have stack extension code it ought to be handled here. */
/* Restore the registers, retrieving the state when setjmp() was called. */
#ifdef __thumb2__
ldmia r0!, { r4-r10, fp, ip, lr }
mov sp, ip
#else
ldmia r0!, { r4-r10, fp, sp, lr }
#endif
#if defined __ARM_FP || defined __ARM_FEATURE_MVE
vldm r0, { d8-d15 }
#endif
/* Put the return value into the integer result register.
But if it is zero then return 1 instead. */
movs r0, r1
it eq
moveq r0, #1
#if __ARM_FEATURE_PAC_DEFAULT
ldr ip, [r2, #JUMPBUF_PAC]
aut ip, lr, sp
#endif /* __ARM_FEATURE_PAC_DEFAULT */
FUNC_END longjmp
#endif
|
4ms/metamodule-plugin-sdk
| 15,559
|
plugin-libc/newlib/libc/machine/arm/memcpy-armv7a.S
|
/* Copyright (c) 2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This memcpy routine is optimised for Cortex-A15 cores and takes advantage
of VFP or NEON when built with the appropriate flags.
Assumptions:
ARMv6 (ARMv7-a if using Neon)
ARM state
Unaligned accesses
LDRD/STRD support unaligned word accesses
If compiled with GCC, this file should be enclosed within following
pre-processing check:
if defined (__ARM_ARCH_7A__) && defined (__ARM_FEATURE_UNALIGNED)
*/
.syntax unified
/* This implementation requires ARM state. */
.arm
#ifdef __ARM_NEON__
.fpu neon
.arch armv7-a
# define FRAME_SIZE 4
# define USE_VFP
# define USE_NEON
#elif !defined (__SOFTFP__)
.arch armv6
.fpu vfpv2
# define FRAME_SIZE 32
# define USE_VFP
#else
.arch armv6
# define FRAME_SIZE 32
#endif
/* Old versions of GAS incorrectly implement the NEON align semantics. */
#ifdef BROKEN_ASM_NEON_ALIGN
#define ALIGN(addr, align) addr,:align
#else
#define ALIGN(addr, align) addr:align
#endif
#define PC_OFFSET 8 /* PC pipeline compensation. */
#define INSN_SIZE 4
/* Call parameters. */
#define dstin r0
#define src r1
#define count r2
/* Locals. */
#define tmp1 r3
#define dst ip
#define tmp2 r10
#ifndef USE_NEON
/* For bulk copies using GP registers. */
#define A_l r2 /* Call-clobbered. */
#define A_h r3 /* Call-clobbered. */
#define B_l r4
#define B_h r5
#define C_l r6
#define C_h r7
#define D_l r8
#define D_h r9
#endif
/* Number of lines ahead to pre-fetch data. If you change this the code
below will need adjustment to compensate. */
#define prefetch_lines 5
#ifdef USE_VFP
.macro cpy_line_vfp vreg, base
vstr \vreg, [dst, #\base]
vldr \vreg, [src, #\base]
vstr d0, [dst, #\base + 8]
vldr d0, [src, #\base + 8]
vstr d1, [dst, #\base + 16]
vldr d1, [src, #\base + 16]
vstr d2, [dst, #\base + 24]
vldr d2, [src, #\base + 24]
vstr \vreg, [dst, #\base + 32]
vldr \vreg, [src, #\base + prefetch_lines * 64 - 32]
vstr d0, [dst, #\base + 40]
vldr d0, [src, #\base + 40]
vstr d1, [dst, #\base + 48]
vldr d1, [src, #\base + 48]
vstr d2, [dst, #\base + 56]
vldr d2, [src, #\base + 56]
.endm
.macro cpy_tail_vfp vreg, base
vstr \vreg, [dst, #\base]
vldr \vreg, [src, #\base]
vstr d0, [dst, #\base + 8]
vldr d0, [src, #\base + 8]
vstr d1, [dst, #\base + 16]
vldr d1, [src, #\base + 16]
vstr d2, [dst, #\base + 24]
vldr d2, [src, #\base + 24]
vstr \vreg, [dst, #\base + 32]
vstr d0, [dst, #\base + 40]
vldr d0, [src, #\base + 40]
vstr d1, [dst, #\base + 48]
vldr d1, [src, #\base + 48]
vstr d2, [dst, #\base + 56]
vldr d2, [src, #\base + 56]
.endm
#endif
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn memcpy p2align=6
mov dst, dstin /* Preserve dstin, we need to return it. */
cmp count, #64
bge .Lcpy_not_short
/* Deal with small copies quickly by dropping straight into the
exit block. */
.Ltail63unaligned:
#ifdef USE_NEON
and tmp1, count, #0x38
rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
add pc, pc, tmp1
vld1.8 {d0}, [src]! /* 14 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 12 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 10 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 8 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 6 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 4 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 2 words to go. */
vst1.8 {d0}, [dst]!
tst count, #4
ldrne tmp1, [src], #4
strne tmp1, [dst], #4
#else
/* Copy up to 15 full words of data. May not be aligned. */
/* Cannot use VFP for unaligned data. */
and tmp1, count, #0x3c
add dst, dst, tmp1
add src, src, tmp1
rsb tmp1, tmp1, #(60 - PC_OFFSET/2 + INSN_SIZE/2)
/* Jump directly into the sequence below at the correct offset. */
add pc, pc, tmp1, lsl #1
ldr tmp1, [src, #-60] /* 15 words to go. */
str tmp1, [dst, #-60]
ldr tmp1, [src, #-56] /* 14 words to go. */
str tmp1, [dst, #-56]
ldr tmp1, [src, #-52]
str tmp1, [dst, #-52]
ldr tmp1, [src, #-48] /* 12 words to go. */
str tmp1, [dst, #-48]
ldr tmp1, [src, #-44]
str tmp1, [dst, #-44]
ldr tmp1, [src, #-40] /* 10 words to go. */
str tmp1, [dst, #-40]
ldr tmp1, [src, #-36]
str tmp1, [dst, #-36]
ldr tmp1, [src, #-32] /* 8 words to go. */
str tmp1, [dst, #-32]
ldr tmp1, [src, #-28]
str tmp1, [dst, #-28]
ldr tmp1, [src, #-24] /* 6 words to go. */
str tmp1, [dst, #-24]
ldr tmp1, [src, #-20]
str tmp1, [dst, #-20]
ldr tmp1, [src, #-16] /* 4 words to go. */
str tmp1, [dst, #-16]
ldr tmp1, [src, #-12]
str tmp1, [dst, #-12]
ldr tmp1, [src, #-8] /* 2 words to go. */
str tmp1, [dst, #-8]
ldr tmp1, [src, #-4]
str tmp1, [dst, #-4]
#endif
lsls count, count, #31
ldrhcs tmp1, [src], #2
ldrbne src, [src] /* Src is dead, use as a scratch. */
strhcs tmp1, [dst], #2
strbne src, [dst]
bx lr
.Lcpy_not_short:
/* At least 64 bytes to copy, but don't know the alignment yet. */
str tmp2, [sp, #-FRAME_SIZE]!
and tmp2, src, #7
and tmp1, dst, #7
cmp tmp1, tmp2
bne .Lcpy_notaligned
#ifdef USE_VFP
/* Magic dust alert! Force VFP on Cortex-A9. Experiments show
that the FP pipeline is much better at streaming loads and
stores. This is outside the critical loop. */
vmov.f32 s0, s0
#endif
/* SRC and DST have the same mutual 32-bit alignment, but we may
still need to pre-copy some bytes to get to natural alignment.
We bring DST into full 64-bit alignment. */
lsls tmp2, dst, #29
beq 1f
rsbs tmp2, tmp2, #0
sub count, count, tmp2, lsr #29
ldrmi tmp1, [src], #4
strmi tmp1, [dst], #4
lsls tmp2, tmp2, #2
ldrhcs tmp1, [src], #2
ldrbne tmp2, [src], #1
strhcs tmp1, [dst], #2
strbne tmp2, [dst], #1
1:
subs tmp2, count, #64 /* Use tmp2 for count. */
blt .Ltail63aligned
cmp tmp2, #512
bge .Lcpy_body_long
.Lcpy_body_medium: /* Count in tmp2. */
#ifdef USE_VFP
1:
vldr d0, [src, #0]
subs tmp2, tmp2, #64
vldr d1, [src, #8]
vstr d0, [dst, #0]
vldr d0, [src, #16]
vstr d1, [dst, #8]
vldr d1, [src, #24]
vstr d0, [dst, #16]
vldr d0, [src, #32]
vstr d1, [dst, #24]
vldr d1, [src, #40]
vstr d0, [dst, #32]
vldr d0, [src, #48]
vstr d1, [dst, #40]
vldr d1, [src, #56]
vstr d0, [dst, #48]
add src, src, #64
vstr d1, [dst, #56]
add dst, dst, #64
bge 1b
tst tmp2, #0x3f
beq .Ldone
.Ltail63aligned: /* Count in tmp2. */
and tmp1, tmp2, #0x38
add dst, dst, tmp1
add src, src, tmp1
rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
add pc, pc, tmp1
vldr d0, [src, #-56] /* 14 words to go. */
vstr d0, [dst, #-56]
vldr d0, [src, #-48] /* 12 words to go. */
vstr d0, [dst, #-48]
vldr d0, [src, #-40] /* 10 words to go. */
vstr d0, [dst, #-40]
vldr d0, [src, #-32] /* 8 words to go. */
vstr d0, [dst, #-32]
vldr d0, [src, #-24] /* 6 words to go. */
vstr d0, [dst, #-24]
vldr d0, [src, #-16] /* 4 words to go. */
vstr d0, [dst, #-16]
vldr d0, [src, #-8] /* 2 words to go. */
vstr d0, [dst, #-8]
#else
sub src, src, #8
sub dst, dst, #8
1:
ldrd A_l, A_h, [src, #8]
strd A_l, A_h, [dst, #8]
ldrd A_l, A_h, [src, #16]
strd A_l, A_h, [dst, #16]
ldrd A_l, A_h, [src, #24]
strd A_l, A_h, [dst, #24]
ldrd A_l, A_h, [src, #32]
strd A_l, A_h, [dst, #32]
ldrd A_l, A_h, [src, #40]
strd A_l, A_h, [dst, #40]
ldrd A_l, A_h, [src, #48]
strd A_l, A_h, [dst, #48]
ldrd A_l, A_h, [src, #56]
strd A_l, A_h, [dst, #56]
ldrd A_l, A_h, [src, #64]!
strd A_l, A_h, [dst, #64]!
subs tmp2, tmp2, #64
bge 1b
tst tmp2, #0x3f
bne 1f
ldr tmp2,[sp], #FRAME_SIZE
bx lr
1:
add src, src, #8
add dst, dst, #8
.Ltail63aligned: /* Count in tmp2. */
/* Copy up to 7 d-words of data. Similar to Ltail63unaligned, but
we know that the src and dest are 32-bit aligned so we can use
LDRD/STRD to improve efficiency. */
/* TMP2 is now negative, but we don't care about that. The bottom
six bits still tell us how many bytes are left to copy. */
and tmp1, tmp2, #0x38
add dst, dst, tmp1
add src, src, tmp1
rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
add pc, pc, tmp1
ldrd A_l, A_h, [src, #-56] /* 14 words to go. */
strd A_l, A_h, [dst, #-56]
ldrd A_l, A_h, [src, #-48] /* 12 words to go. */
strd A_l, A_h, [dst, #-48]
ldrd A_l, A_h, [src, #-40] /* 10 words to go. */
strd A_l, A_h, [dst, #-40]
ldrd A_l, A_h, [src, #-32] /* 8 words to go. */
strd A_l, A_h, [dst, #-32]
ldrd A_l, A_h, [src, #-24] /* 6 words to go. */
strd A_l, A_h, [dst, #-24]
ldrd A_l, A_h, [src, #-16] /* 4 words to go. */
strd A_l, A_h, [dst, #-16]
ldrd A_l, A_h, [src, #-8] /* 2 words to go. */
strd A_l, A_h, [dst, #-8]
#endif
tst tmp2, #4
ldrne tmp1, [src], #4
strne tmp1, [dst], #4
lsls tmp2, tmp2, #31 /* Count (tmp2) now dead. */
ldrhcs tmp1, [src], #2
ldrbne tmp2, [src]
strhcs tmp1, [dst], #2
strbne tmp2, [dst]
.Ldone:
ldr tmp2, [sp], #FRAME_SIZE
bx lr
.Lcpy_body_long: /* Count in tmp2. */
/* Long copy. We know that there's at least (prefetch_lines * 64)
bytes to go. */
#ifdef USE_VFP
/* Don't use PLD. Instead, read some data in advance of the current
copy position into a register. This should act like a PLD
operation but we won't have to repeat the transfer. */
vldr d3, [src, #0]
vldr d4, [src, #64]
vldr d5, [src, #128]
vldr d6, [src, #192]
vldr d7, [src, #256]
vldr d0, [src, #8]
vldr d1, [src, #16]
vldr d2, [src, #24]
add src, src, #32
subs tmp2, tmp2, #prefetch_lines * 64 * 2
blt 2f
1:
cpy_line_vfp d3, 0
cpy_line_vfp d4, 64
cpy_line_vfp d5, 128
add dst, dst, #3 * 64
add src, src, #3 * 64
cpy_line_vfp d6, 0
cpy_line_vfp d7, 64
add dst, dst, #2 * 64
add src, src, #2 * 64
subs tmp2, tmp2, #prefetch_lines * 64
bge 1b
2:
cpy_tail_vfp d3, 0
cpy_tail_vfp d4, 64
cpy_tail_vfp d5, 128
add src, src, #3 * 64
add dst, dst, #3 * 64
cpy_tail_vfp d6, 0
vstr d7, [dst, #64]
vldr d7, [src, #64]
vstr d0, [dst, #64 + 8]
vldr d0, [src, #64 + 8]
vstr d1, [dst, #64 + 16]
vldr d1, [src, #64 + 16]
vstr d2, [dst, #64 + 24]
vldr d2, [src, #64 + 24]
vstr d7, [dst, #64 + 32]
add src, src, #96
vstr d0, [dst, #64 + 40]
vstr d1, [dst, #64 + 48]
vstr d2, [dst, #64 + 56]
add dst, dst, #128
add tmp2, tmp2, #prefetch_lines * 64
b .Lcpy_body_medium
#else
/* Long copy. Use an SMS style loop to maximize the I/O
bandwidth of the core. We don't have enough spare registers
to synthesise prefetching, so use PLD operations. */
/* Pre-bias src and dst. */
sub src, src, #8
sub dst, dst, #8
pld [src, #8]
pld [src, #72]
subs tmp2, tmp2, #64
pld [src, #136]
ldrd A_l, A_h, [src, #8]
strd B_l, B_h, [sp, #8]
ldrd B_l, B_h, [src, #16]
strd C_l, C_h, [sp, #16]
ldrd C_l, C_h, [src, #24]
strd D_l, D_h, [sp, #24]
pld [src, #200]
ldrd D_l, D_h, [src, #32]!
b 1f
.p2align 6
2:
pld [src, #232]
strd A_l, A_h, [dst, #40]
ldrd A_l, A_h, [src, #40]
strd B_l, B_h, [dst, #48]
ldrd B_l, B_h, [src, #48]
strd C_l, C_h, [dst, #56]
ldrd C_l, C_h, [src, #56]
strd D_l, D_h, [dst, #64]!
ldrd D_l, D_h, [src, #64]!
subs tmp2, tmp2, #64
1:
strd A_l, A_h, [dst, #8]
ldrd A_l, A_h, [src, #8]
strd B_l, B_h, [dst, #16]
ldrd B_l, B_h, [src, #16]
strd C_l, C_h, [dst, #24]
ldrd C_l, C_h, [src, #24]
strd D_l, D_h, [dst, #32]
ldrd D_l, D_h, [src, #32]
bcs 2b
/* Save the remaining bytes and restore the callee-saved regs. */
strd A_l, A_h, [dst, #40]
add src, src, #40
strd B_l, B_h, [dst, #48]
ldrd B_l, B_h, [sp, #8]
strd C_l, C_h, [dst, #56]
ldrd C_l, C_h, [sp, #16]
strd D_l, D_h, [dst, #64]
ldrd D_l, D_h, [sp, #24]
add dst, dst, #72
tst tmp2, #0x3f
bne .Ltail63aligned
ldr tmp2, [sp], #FRAME_SIZE
bx lr
#endif
.Lcpy_notaligned:
pld [src]
pld [src, #64]
/* There's at least 64 bytes to copy, but there is no mutual
alignment. */
/* Bring DST to 64-bit alignment. */
lsls tmp2, dst, #29
pld [src, #(2 * 64)]
beq 1f
rsbs tmp2, tmp2, #0
sub count, count, tmp2, lsr #29
ldrmi tmp1, [src], #4
strmi tmp1, [dst], #4
lsls tmp2, tmp2, #2
ldrbne tmp1, [src], #1
ldrhcs tmp2, [src], #2
strbne tmp1, [dst], #1
strhcs tmp2, [dst], #2
1:
pld [src, #(3 * 64)]
subs count, count, #64
ldrmi tmp2, [sp], #FRAME_SIZE
bmi .Ltail63unaligned
pld [src, #(4 * 64)]
#ifdef USE_NEON
vld1.8 {d0-d3}, [src]!
vld1.8 {d4-d7}, [src]!
subs count, count, #64
bmi 2f
1:
pld [src, #(4 * 64)]
vst1.8 {d0-d3}, [ALIGN (dst, 64)]!
vld1.8 {d0-d3}, [src]!
vst1.8 {d4-d7}, [ALIGN (dst, 64)]!
vld1.8 {d4-d7}, [src]!
subs count, count, #64
bpl 1b
2:
vst1.8 {d0-d3}, [ALIGN (dst, 64)]!
vst1.8 {d4-d7}, [ALIGN (dst, 64)]!
ands count, count, #0x3f
#else
/* Use an SMS style loop to maximize the I/O bandwidth. */
sub src, src, #4
sub dst, dst, #8
subs tmp2, count, #64 /* Use tmp2 for count. */
ldr A_l, [src, #4]
ldr A_h, [src, #8]
strd B_l, B_h, [sp, #8]
ldr B_l, [src, #12]
ldr B_h, [src, #16]
strd C_l, C_h, [sp, #16]
ldr C_l, [src, #20]
ldr C_h, [src, #24]
strd D_l, D_h, [sp, #24]
ldr D_l, [src, #28]
ldr D_h, [src, #32]!
b 1f
.p2align 6
2:
pld [src, #(5 * 64) - (32 - 4)]
strd A_l, A_h, [dst, #40]
ldr A_l, [src, #36]
ldr A_h, [src, #40]
strd B_l, B_h, [dst, #48]
ldr B_l, [src, #44]
ldr B_h, [src, #48]
strd C_l, C_h, [dst, #56]
ldr C_l, [src, #52]
ldr C_h, [src, #56]
strd D_l, D_h, [dst, #64]!
ldr D_l, [src, #60]
ldr D_h, [src, #64]!
subs tmp2, tmp2, #64
1:
strd A_l, A_h, [dst, #8]
ldr A_l, [src, #4]
ldr A_h, [src, #8]
strd B_l, B_h, [dst, #16]
ldr B_l, [src, #12]
ldr B_h, [src, #16]
strd C_l, C_h, [dst, #24]
ldr C_l, [src, #20]
ldr C_h, [src, #24]
strd D_l, D_h, [dst, #32]
ldr D_l, [src, #28]
ldr D_h, [src, #32]
bcs 2b
/* Save the remaining bytes and restore the callee-saved regs. */
strd A_l, A_h, [dst, #40]
add src, src, #36
strd B_l, B_h, [dst, #48]
ldrd B_l, B_h, [sp, #8]
strd C_l, C_h, [dst, #56]
ldrd C_l, C_h, [sp, #16]
strd D_l, D_h, [dst, #64]
ldrd D_l, D_h, [sp, #24]
add dst, dst, #72
ands count, tmp2, #0x3f
#endif
ldr tmp2, [sp], #FRAME_SIZE
bne .Ltail63unaligned
bx lr
.size memcpy, . - memcpy
|
4ms/metamodule-plugin-sdk
| 9,453
|
plugin-libc/newlib/libc/machine/arm/strcmp-armv7m.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Very similar to the generic code, but uses Thumb2 as implemented
in ARMv7-M. */
#include "arm_asm.h"
/* Parameters and result. */
#define src1 r0
#define src2 r1
#define result r0 /* Overlaps src1. */
/* Internal variables. */
#define data1 r2
#define data2 r3
#define tmp2 r5
#define tmp1 r12
#define syndrome r12 /* Overlaps tmp1 */
.thumb
.syntax unified
def_fn strcmp
.fnstart
.cfi_sections .debug_frame
.cfi_startproc
prologue push_ip=HAVE_PAC_LEAF
eor tmp1, src1, src2
tst tmp1, #3
/* Strings not at same byte offset from a word boundary. */
bne .Lstrcmp_unaligned
ands tmp1, src1, #3
bic src1, src1, #3
bic src2, src2, #3
ldr data1, [src1], #4
it eq
ldreq data2, [src2], #4
beq 4f
/* Although s1 and s2 have identical initial alignment, they are
not currently word aligned. Rather than comparing bytes,
make sure that any bytes fetched from before the addressed
bytes are forced to 0xff. Then they will always compare
equal. */
eor tmp1, tmp1, #3
mvn data2, #MSB
lsl tmp1, tmp1, #3
S2LO tmp1, data2, tmp1
ldr data2, [src2], #4
orr data1, data1, tmp1
orr data2, data2, tmp1
.p2align 2
/* Critical loop. */
4:
sub syndrome, data1, #0x01010101
cmp data1, data2
/* check for any zero bytes in first word */
itttt eq
biceq syndrome, syndrome, data1
tsteq syndrome, #0x80808080
ldreq data1, [src1], #4
ldreq data2, [src2], #4
beq 4b
2:
.cfi_remember_state
/* There's a zero or a different byte in the word */
S2HI result, data1, #24
S2LO data1, data1, #8
cmp result, #1
it cs
cmpcs result, data2, S2HI #24
it eq
S2LOEQ data2, data2, #8
beq 2b
/* On a big-endian machine, RESULT contains the desired byte in bits
0-7; on a little-endian machine they are in bits 24-31. In
both cases the other bits in RESULT are all zero. For DATA2 the
interesting byte is at the other end of the word, but the
other bits are not necessarily zero. We need a signed result
representing the differnece in the unsigned bytes, so for the
little-endian case we can't just shift the interesting bits
up. */
#ifdef __ARM_BIG_ENDIAN
sub result, result, data2, lsr #24
#else
and data2, data2, #255
lsrs result, result, #24
subs result, result, data2
#endif
epilogue push_ip=HAVE_PAC_LEAF
#if 0
/* The assembly code below is based on the following alogrithm. */
#ifdef __ARM_BIG_ENDIAN
#define RSHIFT <<
#define LSHIFT >>
#else
#define RSHIFT >>
#define LSHIFT <<
#endif
#define body(shift) \
mask = 0xffffffffU RSHIFT shift; \
data1 = *src1++; \
data2 = *src2++; \
do \
{ \
tmp2 = data1 & mask; \
if (__builtin_expect(tmp2 != data2 RSHIFT shift, 0)) \
{ \
data2 RSHIFT= shift; \
break; \
} \
if (__builtin_expect(((data1 - b1) & ~data1) & (b1 << 7), 0)) \
{ \
/* See comment in assembler below re syndrome on big-endian */\
if ((((data1 - b1) & ~data1) & (b1 << 7)) & mask) \
data2 RSHIFT= shift; \
else \
{ \
data2 = *src2; \
tmp2 = data1 RSHIFT (32 - shift); \
data2 = (data2 LSHIFT (32 - shift)) RSHIFT (32 - shift); \
} \
break; \
} \
data2 = *src2++; \
tmp2 ^= data1; \
if (__builtin_expect(tmp2 != data2 LSHIFT (32 - shift), 0)) \
{ \
tmp2 = data1 >> (32 - shift); \
data2 = (data2 << (32 - shift)) RSHIFT (32 - shift); \
break; \
} \
data1 = *src1++; \
} while (1)
const unsigned* src1;
const unsigned* src2;
unsigned data1, data2;
unsigned mask;
unsigned shift;
unsigned b1 = 0x01010101;
char c1, c2;
unsigned tmp2;
while (((unsigned) s1) & 3)
{
c1 = *s1++;
c2 = *s2++;
if (c1 == 0 || c1 != c2)
return c1 - (int)c2;
}
src1 = (unsigned*) (((unsigned)s1) & ~3);
src2 = (unsigned*) (((unsigned)s2) & ~3);
tmp2 = ((unsigned) s2) & 3;
if (tmp2 == 1)
{
body(8);
}
else if (tmp2 == 2)
{
body(16);
}
else
{
body (24);
}
do
{
#ifdef __ARM_BIG_ENDIAN
c1 = (char) tmp2 >> 24;
c2 = (char) data2 >> 24;
#else /* not __ARM_BIG_ENDIAN */
c1 = (char) tmp2;
c2 = (char) data2;
#endif /* not __ARM_BIG_ENDIAN */
tmp2 RSHIFT= 8;
data2 RSHIFT= 8;
} while (c1 != 0 && c1 == c2);
return c1 - c2;
#endif /* 0 */
/* First of all, compare bytes until src1(sp1) is word-aligned. */
.Lstrcmp_unaligned:
.cfi_restore_state
tst src1, #3
beq 2f
.cfi_remember_state
ldrb data1, [src1], #1
ldrb data2, [src2], #1
cmp data1, #1
it cs
cmpcs data1, data2
beq .Lstrcmp_unaligned
sub result, data1, data2
epilogue push_ip=HAVE_PAC_LEAF
2:
.cfi_restore_state
stmfd sp!, {r5}
.cfi_adjust_cfa_offset 4
.cfi_rel_offset 5, 0
ldr data1, [src1], #4
and tmp2, src2, #3
bic src2, src2, #3
ldr data2, [src2], #4
cmp tmp2, #2
beq .Loverlap2
bhi .Loverlap1
/* Critical inner Loop: Block with 3 bytes initial overlap */
.p2align 2
.Loverlap3:
bic tmp2, data1, #MSB
cmp tmp2, data2, S2LO #8
sub syndrome, data1, #0x01010101
bic syndrome, syndrome, data1
bne 4f
ands syndrome, syndrome, #0x80808080
it eq
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #24
bne 6f
ldr data1, [src1], #4
b .Loverlap3
4:
S2LO data2, data2, #8
b .Lstrcmp_tail
5:
#ifdef __ARM_BIG_ENDIAN
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00. */
tst data1, #0xff000000
itt ne
tstne data1, #0x00ff0000
tstne data1, #0x0000ff00
beq .Lstrcmp_done_equal
#else
bics syndrome, syndrome, #0xff000000
bne .Lstrcmp_done_equal
#endif
ldrb data2, [src2]
S2LO tmp2, data1, #24
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #24
#endif
b .Lstrcmp_tail
6:
S2LO tmp2, data1, #24
and data2, data2, #LSB
b .Lstrcmp_tail
/* Critical inner Loop: Block with 2 bytes initial overlap. */
.p2align 2
.Loverlap2:
S2HI tmp2, data1, #16
sub syndrome, data1, #0x01010101
S2LO tmp2, tmp2, #16
bic syndrome, syndrome, data1
cmp tmp2, data2, S2LO #16
bne 4f
ands syndrome, syndrome, #0x80808080
it eq
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #16
bne 6f
ldr data1, [src1], #4
b .Loverlap2
5:
#ifdef __ARM_BIG_ENDIAN
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00 */
tst data1, #0xff000000
it ne
tstne data1, #0x00ff0000
beq .Lstrcmp_done_equal
#else
lsls syndrome, syndrome, #16
bne .Lstrcmp_done_equal
#endif
ldrh data2, [src2]
S2LO tmp2, data1, #16
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #16
#endif
b .Lstrcmp_tail
6:
S2HI data2, data2, #16
S2LO tmp2, data1, #16
4:
S2LO data2, data2, #16
b .Lstrcmp_tail
/* Critical inner Loop: Block with 1 byte initial overlap. */
.p2align 2
.Loverlap1:
and tmp2, data1, #LSB
cmp tmp2, data2, S2LO #24
sub syndrome, data1, #0x01010101
bic syndrome, syndrome, data1
bne 4f
ands syndrome, syndrome, #0x80808080
it eq
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #8
bne 6f
ldr data1, [src1], #4
b .Loverlap1
4:
S2LO data2, data2, #24
b .Lstrcmp_tail
5:
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00. */
tst data1, #LSB
beq .Lstrcmp_done_equal
ldr data2, [src2], #4
6:
S2LO tmp2, data1, #8
bic data2, data2, #MSB
b .Lstrcmp_tail
.Lstrcmp_done_equal:
mov result, #0
.cfi_remember_state
ldmfd sp!, {r5}
.cfi_restore 5
.cfi_adjust_cfa_offset -4
epilogue push_ip=HAVE_PAC_LEAF
.Lstrcmp_tail:
.cfi_restore_state
and r2, tmp2, #LSB
and result, data2, #LSB
cmp result, #1
it cs
cmpcs result, r2
itt eq
S2LOEQ tmp2, tmp2, #8
S2LOEQ data2, data2, #8
beq .Lstrcmp_tail
sub result, r2, result
ldmfd sp!, {r5}
.cfi_restore 5
.cfi_adjust_cfa_offset -4
epilogue push_ip=HAVE_PAC_LEAF
.cfi_endproc
.cantunwind
.fnend
.size strcmp, . - strcmp
|
4ms/metamodule-plugin-sdk
| 9,931
|
plugin-libc/newlib/libc/machine/arm/strcmp-armv4.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Basic ARM implementation. This should run on anything except
for ARMv6-M, but there are better implementations for later
revisions of the architecture. This version can support ARMv4T
ARM/Thumb interworking. */
/* Parameters and result. */
#define src1 r0
#define src2 r1
#define result r0 /* Overlaps src1. */
/* Internal variables. */
#define data1 r2
#define data2 r3
#define magic1 r4
#define tmp2 r5
#define tmp1 r12
#define syndrome r12 /* Overlaps tmp1 */
/* For armv4t and newer, toolchains will transparently convert
'bx lr' to 'mov pc, lr' if needed. GCC has deprecated support
for anything older than armv4t, but this should handle that
corner case in case anyone needs it anyway */
.macro RETURN
#if __ARM_ARCH <= 4 && __ARM_ARCH_ISA_THUMB == 0
mov pc, lr
#else
bx lr
#endif
.endm
.arm
def_fn strcmp
.cfi_sections .debug_frame
.cfi_startproc
eor tmp1, src1, src2
tst tmp1, #3
/* Strings not at same byte offset from a word boundary. */
bne .Lstrcmp_unaligned
ands tmp1, src1, #3
bic src1, src1, #3
bic src2, src2, #3
ldr data1, [src1], #4
ldreq data2, [src2], #4
beq 1f
/* Although s1 and s2 have identical initial alignment, they are
not currently word aligned. Rather than comparing bytes,
make sure that any bytes fetched from before the addressed
bytes are forced to 0xff. Then they will always compare
equal. */
eor tmp1, tmp1, #3
mvn data2, #MSB
lsl tmp1, tmp1, #3
S2LO tmp1, data2, tmp1
ldr data2, [src2], #4
orr data1, data1, tmp1
orr data2, data2, tmp1
1:
/* Load the 'magic' constant 0x01010101. */
str r4, [sp, #-4]!
.cfi_def_cfa_offset 4
.cfi_offset 4, -4
mov magic1, #1
orr magic1, magic1, magic1, lsl #8
orr magic1, magic1, magic1, lsl #16
.p2align 2
4:
sub syndrome, data1, magic1
cmp data1, data2
/* check for any zero bytes in first word */
biceq syndrome, syndrome, data1
tsteq syndrome, magic1, lsl #7
ldreq data1, [src1], #4
ldreq data2, [src2], #4
beq 4b
2:
/* There's a zero or a different byte in the word */
S2HI result, data1, #24
S2LO data1, data1, #8
cmp result, #1
cmpcs result, data2, S2HI #24
S2LOEQ data2, data2, #8
beq 2b
/* On a big-endian machine, RESULT contains the desired byte in bits
0-7; on a little-endian machine they are in bits 24-31. In
both cases the other bits in RESULT are all zero. For DATA2 the
interesting byte is at the other end of the word, but the
other bits are not necessarily zero. We need a signed result
representing the differnece in the unsigned bytes, so for the
little-endian case we can't just shift the interesting bits
up. */
#ifdef __ARM_BIG_ENDIAN
sub result, result, data2, lsr #24
#else
and data2, data2, #255
rsb result, data2, result, lsr #24
#endif
ldr r4, [sp], #4
.cfi_restore 4
.cfi_def_cfa_offset 0
RETURN
#if 0
/* The assembly code below is based on the following alogrithm. */
#ifdef __ARM_BIG_ENDIAN
#define RSHIFT <<
#define LSHIFT >>
#else
#define RSHIFT >>
#define LSHIFT <<
#endif
#define body(shift) \
mask = 0xffffffffU RSHIFT shift; \
data1 = *src1++; \
data2 = *src2++; \
do \
{ \
tmp2 = data1 & mask; \
if (__builtin_expect(tmp2 != data2 RSHIFT shift, 0)) \
{ \
data2 RSHIFT= shift; \
break; \
} \
if (__builtin_expect(((data1 - b1) & ~data1) & (b1 << 7), 0)) \
{ \
/* See comment in assembler below re syndrome on big-endian */\
if ((((data1 - b1) & ~data1) & (b1 << 7)) & mask) \
data2 RSHIFT= shift; \
else \
{ \
data2 = *src2; \
tmp2 = data1 RSHIFT (32 - shift); \
data2 = (data2 LSHIFT (32 - shift)) RSHIFT (32 - shift); \
} \
break; \
} \
data2 = *src2++; \
tmp2 ^= data1; \
if (__builtin_expect(tmp2 != data2 LSHIFT (32 - shift), 0)) \
{ \
tmp2 = data1 >> (32 - shift); \
data2 = (data2 << (32 - shift)) RSHIFT (32 - shift); \
break; \
} \
data1 = *src1++; \
} while (1)
const unsigned* src1;
const unsigned* src2;
unsigned data1, data2;
unsigned mask;
unsigned shift;
unsigned b1 = 0x01010101;
char c1, c2;
unsigned tmp2;
while (((unsigned) s1) & 3)
{
c1 = *s1++;
c2 = *s2++;
if (c1 == 0 || c1 != c2)
return c1 - (int)c2;
}
src1 = (unsigned*) (((unsigned)s1) & ~3);
src2 = (unsigned*) (((unsigned)s2) & ~3);
tmp2 = ((unsigned) s2) & 3;
if (tmp2 == 1)
{
body(8);
}
else if (tmp2 == 2)
{
body(16);
}
else
{
body (24);
}
do
{
#ifdef __ARM_BIG_ENDIAN
c1 = (char) tmp2 >> 24;
c2 = (char) data2 >> 24;
#else /* not __ARM_BIG_ENDIAN */
c1 = (char) tmp2;
c2 = (char) data2;
#endif /* not __ARM_BIG_ENDIAN */
tmp2 RSHIFT= 8;
data2 RSHIFT= 8;
} while (c1 != 0 && c1 == c2);
return c1 - c2;
#endif /* 0 */
/* First of all, compare bytes until src1(sp1) is word-aligned. */
.Lstrcmp_unaligned:
tst src1, #3
beq 2f
ldrb data1, [src1], #1
ldrb data2, [src2], #1
cmp data1, #1
cmpcs data1, data2
beq .Lstrcmp_unaligned
sub result, data1, data2
RETURN
2:
stmfd sp!, {r4, r5}
.cfi_def_cfa_offset 8
.cfi_offset 4, -8
.cfi_offset 5, -4
mov magic1, #1
orr magic1, magic1, magic1, lsl #8
orr magic1, magic1, magic1, lsl #16
ldr data1, [src1], #4
and tmp2, src2, #3
bic src2, src2, #3
ldr data2, [src2], #4
cmp tmp2, #2
beq .Loverlap2
bhi .Loverlap1
/* Critical inner Loop: Block with 3 bytes initial overlap */
.p2align 2
.Loverlap3:
bic tmp2, data1, #MSB
cmp tmp2, data2, S2LO #8
sub syndrome, data1, magic1
bic syndrome, syndrome, data1
bne 4f
ands syndrome, syndrome, magic1, lsl #7
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #24
bne 6f
ldr data1, [src1], #4
b .Loverlap3
4:
S2LO data2, data2, #8
b .Lstrcmp_tail
5:
#ifdef __ARM_BIG_ENDIAN
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00. */
tst data1, #0xff000000
tstne data1, #0x00ff0000
tstne data1, #0x0000ff00
beq .Lstrcmp_done_equal
#else
bics syndrome, syndrome, #0xff000000
bne .Lstrcmp_done_equal
#endif
ldrb data2, [src2]
S2LO tmp2, data1, #24
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #24
#endif
b .Lstrcmp_tail
6:
S2LO tmp2, data1, #24
and data2, data2, #LSB
b .Lstrcmp_tail
/* Critical inner Loop: Block with 2 bytes initial overlap. */
.p2align 2
.Loverlap2:
S2HI tmp2, data1, #16
sub syndrome, data1, magic1
S2LO tmp2, tmp2, #16
bic syndrome, syndrome, data1
cmp tmp2, data2, S2LO #16
bne 4f
ands syndrome, syndrome, magic1, lsl #7
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #16
bne 6f
ldr data1, [src1], #4
b .Loverlap2
5:
#ifdef __ARM_BIG_ENDIAN
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00 */
tst data1, #0xff000000
tstne data1, #0x00ff0000
beq .Lstrcmp_done_equal
#else
lsls syndrome, syndrome, #16
bne .Lstrcmp_done_equal
#endif
ldrh data2, [src2]
S2LO tmp2, data1, #16
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #16
#endif
b .Lstrcmp_tail
6:
S2HI data2, data2, #16
S2LO tmp2, data1, #16
4:
S2LO data2, data2, #16
b .Lstrcmp_tail
/* Critical inner Loop: Block with 1 byte initial overlap. */
.p2align 2
.Loverlap1:
and tmp2, data1, #LSB
cmp tmp2, data2, S2LO #24
sub syndrome, data1, magic1
bic syndrome, syndrome, data1
bne 4f
ands syndrome, syndrome, magic1, lsl #7
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #8
bne 6f
ldr data1, [src1], #4
b .Loverlap1
4:
S2LO data2, data2, #24
b .Lstrcmp_tail
5:
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00. */
tst data1, #LSB
beq .Lstrcmp_done_equal
ldr data2, [src2], #4
6:
S2LO tmp2, data1, #8
bic data2, data2, #MSB
b .Lstrcmp_tail
.Lstrcmp_done_equal:
mov result, #0
.cfi_remember_state
ldmfd sp!, {r4, r5}
.cfi_restore 4
.cfi_restore 5
.cfi_def_cfa_offset 0
RETURN
.Lstrcmp_tail:
.cfi_restore_state
and r2, tmp2, #LSB
and result, data2, #LSB
cmp result, #1
cmpcs result, r2
S2LOEQ tmp2, tmp2, #8
S2LOEQ data2, data2, #8
beq .Lstrcmp_tail
sub result, r2, result
ldmfd sp!, {r4, r5}
.cfi_restore 4
.cfi_restore 5
.cfi_def_cfa_offset 0
RETURN
.cfi_endproc
.size strcmp, . - strcmp
|
4ms/metamodule-plugin-sdk
| 2,147
|
plugin-libc/newlib/libc/machine/arm/aeabi_memmove-thumb2.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "arm_asm.h"
.thumb
.syntax unified
.global __aeabi_memmove
.type __aeabi_memmove, %function
ASM_ALIAS __aeabi_memmove4 __aeabi_memmove
ASM_ALIAS __aeabi_memmove8 __aeabi_memmove
__aeabi_memmove:
.fnstart
.cfi_startproc
prologue 4
cmp r0, r1
bls 3f
adds r3, r1, r2
cmp r0, r3
bcs 3f
adds r1, r0, r2
cbz r2, 2f
subs r2, r3, r2
1:
ldrb r4, [r3, #-1]!
cmp r2, r3
strb r4, [r1, #-1]!
bne 1b
2:
.cfi_remember_state
epilogue 4
3:
.cfi_restore_state
cmp r2, #0
beq 2b
add r2, r2, r1
subs r3, r0, #1
4:
ldrb r4, [r1], #1
cmp r2, r1
strb r4, [r3, #1]!
bne 4b
epilogue 4
.cfi_endproc
.cantunwind
.fnend
.size __aeabi_memmove, . - __aeabi_memmove
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.