repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
stsp/newlib-ia16
| 10,205
|
newlib/libc/machine/i960/strncpy_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "sncpy_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncpy (optimized assembler version for the CA)
dest_addr = strncpy (dest_addr, src_addr, max_bytes)
copy the null terminated string pointed to by src_addr to
the string space pointed to by dest_addr. Return the original
dest_addr. If the source string is shorter than max_bytes,
then null-pad the destination string.
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strcpy
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source is word aligned, destination is not
4) destination is word aligned, source is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 original dest ptr; not modified, so that it may be returned.
g1 src ptr; shift count
g2 max_bytes
g3 src ptr (word aligned)
g4 dest ptr (word aligned)
g5 0xff -- byte extraction mask
Little endian:
g6 lsw of double word for extraction of 4 bytes
g7 msw of double word for extraction of 4 bytes
Big endian:
g6 msw of double word for extraction of 4 bytes
g7 lsw of double word for extraction of 4 bytes
g13 return address
g14 byte extracted.
*/
#if __i960_BIG_ENDIAN__
#define MSW g6
#define LSW g7
#else
#define LSW g6
#define MSW g7
#endif
.globl _strncpy
.globl __strncpy
.leafproc _strncpy, __strncpy
.align 2
_strncpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncpy:
notand g1,3,g3 # extract word addr of start of src
lda (g14),g13 # preserve return address
cmpibge.f 0,g2,Lexit_code # Lexit if number of bytes to move is <= zero.
cmpo g3,g1 # check alignment of src
ld (g3),LSW # fetch word containing at least first byte
notand g0,3,g4 # extract word addr of start of dest
lda 4(g3),g3 # advance src word addr
bne.f Lcase_245 # branch if src is NOT word aligned
Lcase_13:
cmpo g0,g4 # check alignment of dest
lda 0xff,g5 # load mask for byte extraction
subo 4,g4,g4 # store is pre-incrementing; back up dest addr
bne.f Lcase_3 # branch if dest not word aligned
Lcase_1: # src and dest are word aligned
Lcase_1_wloop: # word copying loop
cmpi g2,4 # check for fewer than four bytes to move
lda (LSW),g1 # keep a copy of the src word
addo 4,g4,g4 # pre-increment dest addr
bl.f Lcase_1_cloop.a # branch if fewer than four bytes to copy
scanbyte 0,g1 # check for null byte in src word
ld (g3),LSW # pre-fetch next word of src
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
bo.f Lcase_1_cloop.c # branch if word contains null byte
addo 4,g3,g3 # post-increment src addr
st g1,(g4) # store word in dest string
b Lcase_1_wloop
Lcase_3_cloop.a:
Lcase_1_cloop.a: # character copying loop (max_bytes <= 3)
#if __i960_BIG_ENDIAN__
rotate 8,g1,g1 # move next byte into position for extraction
#endif
and g5,g1,g14 # extract next char
Lcase_1_cloop.b:
cmpdeci 0,g2,g2 # is max_bytes exhausted?
be.f Lexit_code # Lexit if max_bytes is exhausted
cmpo 0,g14 # check for null byte
stob g14,(g4) # store the byte in dest
#if ! __i960_BIG_ENDIAN__
shro 8,g1,g1 # move next byte into position for extraction
#endif
lda 1(g4),g4 # post-increment dest byte addr
bne.t Lcase_1_cloop.a # branch if null not reached
b Lcase_1_cloop.b
Lexit_code:
mov 0,g14 # conform to register conventions
bx (g13) # g0 = addr of dest; g14 = 0
Lrett:
ret
Lcase_1_cloop.c:
Lcase_3_cloop.c:
#if __i960_BIG_ENDIAN__
rotate 24,g5,g5 # move mask into position for testing next byte
#endif
and g5,g1,g14 # extract next char
cmpo 0,g14 # check for null byte
#if ! __i960_BIG_ENDIAN__
lda (g5),LSW # keep a copy of the current mask
shlo 8,g5,g5 # move mask into position for testing next byte
#endif
bne.t Lcase_1_cloop.c # branch if null not reached
#if __i960_BIG_ENDIAN__
subo 1,g5,g5 # null pad.
andnot g5,g1,g1 # last bytes to copy, and null pad rest of word
#else
subo 1,LSW,g5 # mask to get last bytes to copy, and null pad
and g5,g1,g1 # last bytes to copy, and null pad rest of word
#endif
st g1,(g4)
Lcase_1_zwloop: # zero word loop
cmpi g2,4 # check for fewer than four bytes to move
addo 4,g4,g4 # pre-increment dest addr
bl.f Lcase_1_cloop.b # branch if fewer than four bytes to copy
subo 4,g2,g2 # decrease max_byte count by the 4 bytes moved
st g14,(g4) # store word in dest string
b Lcase_1_zwloop
Lcase_3: # src is word aligned; dest is not
addo 8,g4,g4 # move dest word ptr to first word boundary
lda (g0),g1 # copy dest byte ptr
mov LSW,MSW # make copy of first word of src
lda 32,g14 # initialize shift count to zero (mod 32)
Lcase_25:
Lcase_3_cloop_at_start: # character copying loop for start of dest str
cmpdeci 0,g2,g2 # is max_bytes exhausted?
#if __i960_BIG_ENDIAN__
shro 24,MSW,g5 # extract next char
#else
and g5,MSW,g5 # extract next char
#endif
be.f Lexit_code # Lexit if max_bytes is exhausted
cmpo 0,g5 # check for null byte
stob g5,(g1) # store the byte in dest
addo 1,g1,g1 # post-increment dest ptr
lda 0xff,g5 # re-initialize byte extraction mask
bne.t 1f # drop thru if null byte reached (to pad)
movl 0,g6 # blank out remainder of input buffer
1:
cmpo g1,g4 # have we reached word boundary in dest yet?
#if __i960_BIG_ENDIAN__
lda -8(g14),g14 # augment the shift counter
rotate 8,MSW,MSW # move next byte into position for extraction
#else
lda 8(g14),g14 # augment the shift counter
shro 8,MSW,MSW # move next byte into position for extraction
#endif
bne.t Lcase_3_cloop_at_start # branch if reached word boundary?
ld (g3),MSW # fetch msw of operand for double shift
Lcase_4:
#if __i960_BIG_ENDIAN__
cmpobne 0,g14,Lcase_3_wloop # branch if src is still unaligned.
Lcase_3_wloop2:
cmpi g2,4 # less than four bytes to move?
lda (LSW),g1 # extract 4 bytes of src
lda 4(g3),g3 # post-increment src word addr
bl.f Lcase_3_cloop.a # branch if < four bytes left to move
scanbyte 0,g1 # check for null byte
lda (MSW),LSW # move msw to lsw
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
ld (g3),MSW # pre-fetch msw of operand for double shift
bo.f Lcase_3_cloop.c # branch if word contains null byte
st g1,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop2
#endif
Lcase_3_wloop:
cmpi g2,4 # less than four bytes to move?
eshro g14,g6,g1 # extract 4 bytes of src
lda 4(g3),g3 # post-increment src word addr
bl.f Lcase_3_cloop.a # branch if < four bytes left to move
scanbyte 0,g1 # check for null byte
lda (MSW),LSW # move msw to lsw
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
ld (g3),MSW # pre-fetch msw of operand for double shift
bo.f Lcase_3_cloop.c # branch if word contains null byte
st g1,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop
Lcase_245:
cmpo g0,g4 # check alignment of dest
ld (g3),MSW # pre-fetch second half
and 3,g1,g1 # compute shift count
lda 0xff,g5 # load mask for byte extraction
#if __i960_BIG_ENDIAN__
subo g1,4,g14 # adjust shift count for big endian.
shlo 3,g14,g14
#else
shlo 3,g1,g14
#endif
be.t Lcase_4 # branch if dest is word aligned
or g4,g1,g1 # is src earlier in word, later, or sync w/ dst
cmpo g0,g1 # < indicates first word of dest has more bytes
/* than first word of source. */
lda 4(g4),g4 # move dest word addr to first word boundary
eshro g14,g6,g5 # extract four bytes
lda (g0),g1
bg.f 1f
mov MSW,LSW
lda 4(g3),g3 # move src word addr to second word boundary
1:
mov g5,MSW
lda 0xff,g5
b Lcase_25
/* end of strncpy */
|
stsp/newlib-ia16
| 9,947
|
newlib/libc/machine/i960/strcpy_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strcp_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strcpy (optimized assembler version for the CA)
dest_addr = strcpy (dest_addr, src_addr)
copy the null terminated string pointed to by src_addr to
the string space pointed to by dest_addr. Return the original
dest_addr.
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strcpy
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source is word aligned, destination is not
4) destination is word aligned, source is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 original dest ptr; not modified, so that it may be returned.
g1 src ptr; shift count
g2 dest ptr; 4 bytes of src
g3 src ptr (word aligned)
g4 dest ptr (word aligned)
g5 0xff -- byte extraction mask
g6 lsw of double word for extraction of 4 bytes (little endian)
msw of double word for extraction of 4 bytes (big endian)
g7 msw of double word for extraction of 4 bytes (little endian)
lsw of double word for extraction of 4 bytes (big endian)
g13 return address
g14 byte extracted. When reaches null byte, which is zero, we will
be in conformance with register conventions, and can return to
the caller with a clear conscience.
procedure strcat
dest_addr = strcat (dest_addr, src_addr)
Appends the string pointed to by src_addr to the string pointed
to by dest_addr. The first character of the source string is
copied to the location initially occupied by the trailing null
byte of the destination string. Thereafter, characters are copied
from the source to the destination up thru the null byte that
trails the source string.
*/
#if __i960_BIG_ENDIAN__
#define MSW g6
#define LSW g7
#else
#define LSW g6
#define MSW g7
#endif
.globl _strcpy, _strcat
.globl __strcpy, __strcat
.leafproc _strcpy, __strcpy
.leafproc _strcat, __strcat
.align 2
_strcat:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcat:
notand g0,3,g4 # extract word addr of start of dest
lda (g14),g13 # preserve return address
and g0,3,LSW # extract byte offset of dest
ld (g4),MSW # fetch word containing at least first byte
shlo 3,LSW,g14 # get shift count for making mask for first word
subi 1,0,LSW # mask initially all ones
#if __i960_BIG_ENDIAN__
shro g14,LSW,LSW # get mask for bytes needed from first word
lda 0xff000000,g5 # byte extraction mask
#else
shlo g14,LSW,LSW # get mask for bytes needed from first word
lda 0xff,g5 # byte extraction mask
#endif
notor MSW,LSW,MSW # set unneeded bytes to all ones
Lsearch_for_word_with_null:
scanbyte 0,MSW # check for null byte
lda 4(g4),g4 # post-increment dest word pointer
mov MSW,LSW # keep a copy of current word
ld (g4),MSW # fetch next word of dest
bno.t Lsearch_for_word_with_null # branch if null not found yet
and g5,LSW,g14 # extract byte
cmpo 0,g14 # branch if null is first byte of word
subo 4,g4,g4 # move dest word ptr back to word with null
notand g1,3,g3 # extract word addr of start of src
lda (g4),g2 # set dest byte ptr to 1st byte of word w/ null
be.f Lcase_14
Lsearch_for_null:
#if __i960_BIG_ENDIAN__
shro 8,g5,g5 # move mask down to next byte
#else
shlo 8,g5,g5 # move mask up to next byte
#endif
lda 1(g2),g2 # move dest byte ptr to next byte
and g5,LSW,g14 # extract byte
cmpobne.t 0,g14,Lsearch_for_null # branch if null is not yet found
lda 0xff,g5 # byte extraction mask
b Lcase_235.a
_strcpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcpy:
notand g0,3,g4 # extract word addr of start of dest
lda (g14),g13 # preserve return address
cmpo g0,g4 # check alignment of dest
lda 0xff,g5 # load mask for byte extraction
notand g1,3,g3 # extract word addr of start of src
bne.f Lcase_235 # branch if dest is NOT word aligned
Lcase_14:
cmpo g3,g1 # check alignment of src
ld (g3),LSW # fetch word containing at least first byte
shlo 3,g1,g1 # compute shift count
lda 4(g3),g3 # advance src word addr
#if __i960_BIG_ENDIAN__
lda 0xff,g5 # byte extraction mask
#endif
bne.f Lcase_4 # branch if src is NOT word aligned
Lcase_1: # src and dest are word aligned
subo 4,g4,g4 # store is pre-incrementing; back up dest addr
Lcase_1_wloop: # word copying loop
scanbyte 0,LSW # check for null byte in src word
lda (LSW),g2 # keep a copy of the src word
addo 4,g4,g4 # pre-increment dest addr
ld (g3),LSW # pre-fetch next word of src
addo 4,g3,g3 # post-increment src addr
bo.f Lcase_1_cloop # branch if word contains null byte
st g2,(g4) # store word in dest string
b Lcase_1_wloop
Lcase_3_cloop:
Lcase_1_cloop: # character copying loop
#if __i960_BIG_ENDIAN__
rotate 8,g2,g2 # move next byte into position for extraction
and g5,g2,g14 # extract next char
#else
and g5,g2,g14 # extract next char
shro 8,g2,g2 # move next byte into position for extraction
#endif
cmpo 0,g14 # check for null byte
stob g14,(g4) # store the byte in dest
lda 1(g4),g4 # post-increment dest byte addr
bne.t Lcase_1_cloop # branch if null not reached
Lexit_code:
bx (g13) # g0 = addr of dest; g14 = 0
Lrett:
ret
Lcase_3: # src is word aligned; dest is not
addo 4,g4,g4 # move dest word ptr to first word boundary
mov LSW,MSW # make copy of first word of src
lda 0,g1 # initialize shift count to zero
Lcase_25:
Lcase_3_cloop_at_start: # character copying loop for start of dest str
#if __i960_BIG_ENDIAN__
rotate 8,MSW,MSW # move next byte into position for extraction
and g5,MSW,g14 # extract next char
#else
and g5,MSW,g14 # extract next char
shro 8,MSW,MSW # move next byte into position for extraction
#endif
cmpo 0,g14 # check for null byte
stob g14,(g2) # store the byte in dest
lda 1(g2),g2 # post-increment dest ptr
be.f Lexit_code # branch if null byte reached
cmpo g2,g4 # have we reached word boundary in dest?
lda 8(g1),g1 # augment the shift counter
bne.t Lcase_3_cloop_at_start
Lcase_4:
ld (g3),MSW # fetch msw of operand for double shift
#if __i960_BIG_ENDIAN__
subo g1,0,g1 # Adjust shift count for big endian.
#endif
Lcase_3_wloop:
eshro g1,g6,g2 # extract 4 bytes of src
lda 4(g3),g3 # post-increment src word addr
scanbyte 0,g2 # check for null byte
lda (MSW),LSW # move msw to lsw
ld (g3),MSW # pre-fetch msw of operand for double shift
bo.f Lcase_3_cloop # branch if word contains null byte
st g2,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop
Lcase_235:
lda (g0),g2 # copy dest ptr
Lcase_235.a:
cmpo g3,g1 # check alignment of src
ld (g3),LSW # fetch word containing at least first byte
and 3,g1,g14 # compute shift count
lda 4(g3),g3 # advance src word addr
shlo 3,g14,g1
be.t Lcase_3 # branch if dest is word aligned
or g4,g14,g14 # is src earlier in word, later, or sync w/ dst
ld (g3),MSW # pre-fetch second half
cmpo g2,g14 # < indicates first word of dest has more bytes
lda 4(g4),g4 # move dest word addr to first word boundary
/* than first word of source. */
#if __i960_BIG_ENDIAN__
subo g1,0,g14 # Adjust shift count for big endian.
eshro g14,g6,g14 # extract four bytes
bge.f 1f
#else
eshro g1,g6,g14 # extract four bytes
bg.f 1f
#endif
mov MSW,LSW
lda 4(g3),g3 # move src word addr to second word boundary
1:
mov g14,MSW
b Lcase_25
/* end of strcpy */
|
stsp/newlib-ia16
| 5,781
|
newlib/libc/machine/i960/strncat.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strncat.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncat (optimized assembler version for the 80960K Series)
dest_addr = strncat (dest_addr, src_addr, max_bytes)
append the null terminated string pointed to by src_addr to the null
terminated string pointed to by dest_addr. Return the original
dest_addr. If the source string is longer than max_bytes, then
append only max_bytes bytes, and tack on a null byte on the end.
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last two words of the
program's allocated memory space. This is so because strncat fetches
ahead. Disallowing the fetch ahead would impose a severe performance
penalty.
Strategy:
First, skip to the null byte in the destination string. Then
fetch the source string by words and store them by words to the
destination string, until there are fewer than three bytes left
to copy. Then, using the last word of the source (the one that
contains the remaining 0, 1, 2, or 3 bytes to be copied), store
a byte at a time until Ldone.
If, before exhausting the max_byte count, the null byte is encountered
in the source string, then just copy up thru the null byte.
Tactics:
1) Do NOT try to fetch and store the words in a word aligned manner
because, in my judgement, the performance degradation experienced due
to non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment.
*/
.globl _strncat
.globl __strncat
.leafproc _strncat,__strncat
.align 2
_strncat:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncat:
mov g14,g6
cmpibge 0, g2, Lno_operation # Lexit early if max_bytes <= 0
mov g0, g5
Lskip_word_loop:
ld (g5), g7 # fetch word of dest string
addo 4, g5, g5 # post-increment dest ptr
scanbyte 0, g7 # does it contain null byte?
bno Lskip_word_loop # if not, loop
subo 5, g5, g5 # adjust dest ptr
lda 0xff, g3 # byte extraction mask = 0xff;
Lskip_byte_loop:
and g7, g3, g14 # extract byte of last word of dest string
cmpo 0, g14 # is it null?
addo 1, g5, g5 # adjust dest ptr
shro 8, g7, g7 # position next byte for extraction
bne Lskip_byte_loop # loop if null not found yet
ld (g1), g7 # fetch first word of source string
Lwloop: # word copying loop
cmpo 4, g2 # max_bytes < 4 ?
addo 4, g1, g1 # post-increment source ptr
bge Lcloop.a # branch if less than 4 bytes to move
scanbyte 0, g7 # is null byte reached yet?
mov g7, g4 # keep a copy of the source word
be Lcloop # branch if null byte reached
ld (g1), g7 # pre-fetch next word of source
subo 4, g2, g2 # reduce max_byte counter
st g4, (g5) # store current word
addo 4, g5, g5 # post-increment destination ptr
b Lwloop
Lcloop.b:
addo 1, g5, g5 # post-increment destination ptr
shro 8, g7, g7 # position next byte for extraction
Lcloop: # character copying loop (max_byte > 3)
and g3, g7, g4 # extract character
cmpo 0, g4 # is it null?
stob g4, (g5) # store it
bne Lcloop.b # loop if null not encountered yet
bx (g6) # g0 = dest string address; g14 = 0
Lrett:
ret
Lcloop.c:
addo 1, g5, g5 # post-increment destination ptr
shro 8, g7, g7 # position next byte for extraction
Lcloop.a: # character copying loop (max_byte <= 3)
cmpdeco 0,g2,g2 # max_byte == 0?
and g3, g7, g4 # extract character
be Ldone # store null and Lexit if max_byte exhausted
cmpo 0, g4 # is it null?
stob g4, (g5) # store it
bne Lcloop.c # loop if null not encountered yet
Ldone: stob g14, (g5) # store trailing null
bx (g6) # g0 = dest string address; g14 = 0
Lno_operation: mov 0, g14 # conform to register conventions
bx (g6)
/* end of strncat */
|
stsp/newlib-ia16
| 4,578
|
newlib/libc/machine/i960/strncmp.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strncmp.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncmp (optimized assembler version for the 80960K Series)
result = strncmp (src1_addr, src2_addr, max_bytes)
compare the null terminated string pointed to by src1_addr to
the string pointed to by src2_addr. Return 0 iff the strings
are equal, -1 if src1_addr is lexicographically less than src2_addr,
and 1 if it is lexicographically greater. Do not compare more than
max_bytes bytes.
Undefined behavior will occur if the end of either source string
(i.e. the terminating null byte) is in the last two words of the
program's allocated memory space. This is so because strncmp
will fetch ahead. Disallowing the fetch ahead would impose
a severe performance penalty.
Strategy:
Fetch and compare the strings by words and go to a character
comparison loop as soon as a pair of words differ. If the
words are equal up through either the exhaustion of max_bytes
or the presence of the null byte, return 0 (equality). Otherwise,
the character comparator will return -1 or 1 for inequality, or
0 if the differing byte is after the null byte or after the
exhaustion of max_bytes.
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment.
*/
.globl _strncmp
.globl __strncmp
.leafproc _strncmp,__strncmp
.align 2
_strncmp:
#ifndef __PIC
lda .Lrett,g14
#else
lda .Lrett-(.+8)(ip),g14
#endif
__strncmp:
mov g14,g13
ldconst 0,g14
cmpibge 0,g2,Lequal_exit # Lexit early if max_bytes <= 0
addo g2,g0,g2
.Lwloop:
cmpo g0,g2 # are max_bytes exhausted?
ld (g0), g5 # fetch word of source_1
bge Lequal_exit # Lexit (equality) if max_bytes exhausted
ld (g1), g3 # fetch word of source_2
addo 4,g0,g0 # post-increment source_1 ptr
scanbyte 0,g5 # is a null byte present?
addo 4,g1,g1 # post-increment source_1 ptr
be .Lcloop.a # perform char comparator if null byte found
cmpobe g5,g3,.Lwloop # perform char comparator if words are unequal
.Lcloop.a: subo 4,g0,g0 # adjust max_byte counter
ldconst 0xff,g4 # byte extraction mask
.Lcloop: and g4,g5,g7 # compare individual bytes
and g4,g3,g6
cmpobne g7,g6,.diff # if different, return -1 or 1
cmpo 0,g6 # they are equal. are they null?
shlo 8,g4,g4 # position mask to extract next byte
be Lequal_exit # if they are null, Lexit (equality)
addo 1,g0,g0 # is max_bytes exhausted?
cmpobl g0,g2,.Lcloop # if not, loop. if so, Lexit (equality)
Lequal_exit:
mov 0,g0
bx (g13)
.Lrett:
ret
.diff: bl .neg
mov 1,g0
bx (g13)
.neg: subi 1,0,g0
.Lexit:
bx (g13)
/* end of strncmp */
|
stsp/newlib-ia16
| 4,835
|
newlib/libc/machine/i960/memchr.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memchr.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure memchr (optimized assembler version for the 80960K series)
src_addr = memchr (src_addr, char, max_bytes)
searching from src_addr for a span of max_bytes bytes, return a
pointer to the first byte in the source array that contains the
indicated char. Return null if the char is not found.
Undefined behavior will occur if the last byte of the source array
is in the last two words of the program's allocated memory space.
This is so because memchr fetches ahead. Disallowing the fetch
ahead would impose a severe performance penalty.
Strategy:
Fetch the source array by words and scanbyte the words for the
char until either a word with the byte is found or max_bytes is
exhausted. In the former case, move through the word to find the
matching byte and return its memory address. In the latter case,
return zero (null).
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that most source arrays (even more
true of most big source arrays) will be word aligned to begin with.
2) Rather than decrementing max_bytes to zero, I calculate the
address of the byte after the last byte of the source array, and
quit when the source byte pointer passes that. Refining, actually
I calculate the address of the fifth byte after the last byte of
the source array, because the source byte pointer is ahead of the
actual examination point due to fetch ahead.
*/
.globl _memchr
.globl __memchr
.leafproc _memchr, __memchr
.align 2
_memchr:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memchr:
mov g14,g13 # preserve return address
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
mov 0,g14 # conform to register linkage standard
cmpibge 0,g2,Lnot_found # do nothing if max_bytes <= 0
addo 4,g0,g6 # post-increment src word pointer
addo g2,g6,g2 # compute ending address from start and len
ld (g0),g4 # fetch first word
shlo 8,g1,g3 # broadcast the char to four bytes
or g1,g3,g3
shlo 16,g3,g5
or g3,g5,g3
Lsearch_for_word_with_char:
mov g4,g5 # keep a copy of word
scanbyte g3,g5 # check for byte with char
ld (g6),g4 # fetch next word of src
bo Lsearch_for_char # branch if null found
addo 4,g6,g6 # post-increment src word pointer
cmpobge g2,g6,Lsearch_for_word_with_char # branch if max_bytes > 3
Lnot_found:
mov 0,g0 # char not found. Return null
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
Lsearch_for_char:
cmpobe.f g6,g2,Lnot_found # quit if max_bytes exhausted
and g5,g7,g0 # extract byte
cmpo g1,g0 # is it char?
addo 1,g6,g6 # bump src byte ptr
shro 8,g5,g5 # shift word to position next byte
bne.t Lsearch_for_char
subo 5,g6,g0 # back up the byte pointer
bx (g13)
/* end of memchr */
|
stsp/newlib-ia16
| 4,628
|
newlib/libc/machine/i960/memcmp.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memcmp.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure memcmp (optimized assembler version for the 80960K series)
result = memcmp (src1_addr, src2_addr, max_bytes)
compare the byte array pointed to by src1_addr to the byte array
pointed to by src2_addr. Return 0 iff the arrays are equal, -1 iff
src1_addr is lexicographically less than src2_addr, and 1 iff it is
lexicographically greater. Do not compare more than max_bytes bytes.
Undefined behavior will occur if the end of either source array
is in the last two words of the program's allocated memory space.
This is so because memcmp fetches ahead. Disallowing the fetch ahead
would impose a severe performance penalty.
Strategy:
Fetch the source strings by words and compare the words until either
a differing word is found or max_bytes is exhausted. In the former
case, move through the words to find the differing byte and return
plus or minus one, appropriately. In the latter case, return zero
(equality).
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that most source arrays (even more
true of most big source arrays) will be word aligned to begin with.
2) Rather than decrementing max_bytes to zero, I calculate the
address of the byte after the last byte of the source_1 array, and
quit when the source byte pointer passes that.
*/
.globl _memcmp
.globl __memcmp
.leafproc _memcmp,__memcmp
.align 2
_memcmp:
#ifndef __PIC
lda .Lrett,g14
#else
lda .Lrett-(.+8)(ip),g14
#endif
__memcmp:
mov g14,g13 # preserve return address
ldconst 0,g14 # conform to register conventions
cmpibge 0,g2,Lequal_exit # quit if max_bytes <= 0
addo g0,g2,g2 # calculate byte addr of byte after last in src1
.Lwloop:
cmpo g0,g2
ld (g0), g5 # fetch word of source_1
bge Lequal_exit # quit (equal) if max_bytes exhausted
ld (g1), g3 # fetch word of source_2
addo 4,g0,g0 # post-increment source_1 byte ptr
addo 4,g1,g1 # post-increment source_2 byte ptr
cmpobe g5,g3,.Lwloop # branch if source words are equal
ldconst 0xff,g4 # byte extraction mask
subo 4,g0,g0 # back up src1 pointer
.Lcloop: and g4,g5,g7 # extract and compare individual bytes
and g4,g3,g6
cmpobne g7,g6,.diff # branch if they are different
shlo 8,g4,g4 # position mask for next extraction
addo 1,g0,g0
cmpobl g0,g2,.Lcloop # quit if max_bytes is exhausted
Lequal_exit:
mov 0,g0
bx (g13)
.Lrett:
ret
.diff: bl .neg # arrays differ at current byte.
/* return 1 or -1 appropriately */
mov 1,g0
bx (g13)
.neg: subi 1,0,g0
.Lexit:
bx (g13)
/* end or memcmp */
|
stsp/newlib-ia16
| 4,835
|
newlib/libc/machine/i960/memccpy.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memccpy.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1989,1993 Intel Corp., all rights reserved
*/
/*
procedure memccpy (optimized assembler version for the 80960K series)
dest_addr = memccpy (dest_addr, src_addr, char, len)
copy len bytes pointed to by src_addr to the space pointed to by
dest_addr, stopping if char is copied. If char is copied,
return address of byte after char in dest string; else null.
Undefined behavior will occur if the end of the source array is in
the last two words of the program's allocated memory space. This
is so because the routine fetches ahead. Disallowing the fetch
ahead would impose a severe performance penalty.
Undefined behavior will also occur if the source and destination
strings overlap.
Strategy:
Fetch the source array by words and store them by words to the
destination array, until there are fewer than three bytes left
to copy. Then, using the last word of the source (the one that
contains the remaining 0, 1, 2, or 3 bytes to be copied), store
a byte at a time until Ldone.
Tactics:
1) Do NOT try to fetch and store the words in a word aligned manner
because, in my judgement, the performance degradation experienced due
to non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment. This is supported by the intuition that most source and
destination arrays (even more true of most big source arrays) will
be word aligned to begin with.
2) Rather than decrementing len to zero,
I calculate the address of the byte after the last byte of the
destination array, and quit when the destination byte pointer passes
that.
*/
.globl _memccpy
.leafproc _memccpy, __memccpy
.align 2
_memccpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memccpy:
mov g14, g13 # preserve return address
cmpibge 0,g3,Lexit_char_not_found
addo g3,g1,g3 # compute beyond end of src
ld (g1), g7 # fetch first word of source
lda 0xff,g5 # mask for char
and g5,g2,g2 # extract only char
shlo 8,g2,g6
or g2,g6,g6
shlo 16,g6,g4
or g6,g4,g6 # word of char
b Lwloop_b
Lwloop_a:
ld (g1), g7 # fetch ahead next word of source
st g4, (g0) # store word to dest
addo 4, g0, g0 # post-increment dest pointer
Lwloop_b: # word copying loop
addo 4, g1, g1 # pre-increment src pointer
cmpo g3, g1 # is len <= 3 ?
mov g7, g4 # keep a copy of the current word
bl Lcloop_setup # quit word loop if less than 4 bytes
scanbyte g6, g7 # check for char
bno Lwloop_a # continue word loop if char not found.
Lcloop_setup:
subo 4, g1, g1 # back down src pointer
cmpobe g1, g3, Lexit_char_not_found
Lcloop_a: # character copying loop (len < 3)
and g5,g4,g7 # check the byte against char
cmpo g7,g2
stob g7,(g0) # store the byte
addo 1, g0, g0
be Lexit_char_found
addo 1,g1,g1
cmpo g1,g3
shro 8,g4,g4 # position next byte
bne Lcloop_a
Lexit_char_not_found:
mov 0, g0
Lexit_char_found:
lda 0,g14
bx (g13) # g0 = dest array address; g14 = 0
Lrett:
ret
/* end of memccpy */
|
stsp/newlib-ia16
| 2,480
|
newlib/libc/machine/i960/strdup.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strdup.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1989,1993 Intel Corp., all rights reserved
*/
/*
procedure strdup (optimized assembler version: 80960K series, 80960CA)
dest_addr = strdup (src_addr)
Allocate memory and copy thereto the string pointed to by src_addr.
Return the address of the copy, or null if unable to perform the
operation.
*/
.text
.align 2
.globl _strdup
_strdup:
mov g0,r3 # Keep a copy of the original string addr
callj _strlen # Determine how much to allocate
addo 1,g0,g0 # Add one byte for the null byte at end
callj _malloc # Allocate the storage
cmpo 0,g0
mov r3,g1 # Original string addr is now src for copy
bne.t _strcpy # Jump if allocation was successful
ret # Return the null ptr otherwise
/* end of strdup */
|
stsp/newlib-ia16
| 5,373
|
newlib/libc/machine/i960/memchr_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memchr_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure memchr (optimized assembler version for the CA)
src_addr = memchr (src_addr, char, max_bytes)
searching from src_addr for max_bytes bytes, return a pointer to the
first byte that contains the indicated byte in the source string.
Return null if the byte is not found.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last two words of the program's
allocated memory space. This is so because, in several cases, memchr
will fetch ahead. Disallowing the fetch ahead would impose a severe
performance penalty.
This program handles two cases:
1) the argument starts on a word boundary
2) the argument doesn't start on a word boundary
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 src ptr; upon return it is a pointer to the matching byte, or null
g1 char to seek
g2 maximum number of bytes to check
g3 char to seek, broadcast to all four bytes
g4 word of the source string
g5 copy of the word
g6 mask to avoid unimportant bytes in first word
g7 byte extraction mask
g13 return address
g14
*/
.globl _memchr
.globl __memchr
.leafproc _memchr, __memchr
.align 2
_memchr:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memchr:
mov g14,g13 # preserve return address
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
lda 0,g14 # conform to register linkage standard
cmpibge.f 0,g2,Lnot_found # do nothing if max_bytes <= 0
addo g0,g2,g2 # compute ending address from start and len
and g0,3,g6 # extract byte offset of src
notand g0,3,g0 # extract word addr of start of src
shlo 8,g1,g3 # broadcast the char to four bytes
ld (g0),g4 # fetch word containing at least first byte
or g1,g3,g3
shlo 16,g3,g5
cmpo g1,g7 # is char being sought 0xff?
or g5,g3,g3
shlo 3,g6,g6 # get shift count for making mask for first word
subi 1,0,g5 # mask initially all ones
#if __i960_BIG_ENDIAN__
shro g6,g5,g5 # get mask for bytes needed from first word
#else
shlo g6,g5,g5 # get mask for bytes needed from first word
#endif
notor g4,g5,g4 # set unneeded bytes to all ones
be.f Lsearch_for_0xff # branch if seeking 0xff
Lsearch_for_word_with_char:
scanbyte g3,g4 # check for byte with char
lda 4(g0),g0 # pre-increment src word pointer
mov g4,g5 # keep a copy of word
ld (g0),g4 # fetch next word of src
bo.f Lsearch_for_char # branch if null found
cmpoble.t g0,g2,Lsearch_for_word_with_char # branch if not null
Lnot_found:
mov 0,g0 # char not found. Return null
Lexit_code:
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
Lsearch_for_char:
subo 4,g0,g0 # back up the byte pointer
Lsearch_for_char.a:
cmpobe.f g0,g2,Lnot_found # quit if max_bytes exhausted
#if __i960_BIG_ENDIAN__
rotate 8,g5,g5 # shift word to position next byte
#endif
and g5,g7,g6 # extract byte
cmpo g1,g6 # is it char?
lda 1(g0),g0 # bump src byte ptr
#if ! __i960_BIG_ENDIAN__
shro 8,g5,g5 # shift word to position next byte
#endif
bne.t Lsearch_for_char.a
subo 1,g0,g0 # back up the byte pointer
b Lexit_code
Lsearch_for_0xff:
lda 0xf0f0f0f0,g6 # make first comparison mask for char=-1 case.
or g6,g5,g6
and g4,g6,g4 # make unimportant bytes of first word 0x0f
b Lsearch_for_word_with_char
/* end of memchr */
|
stsp/newlib-ia16
| 10,423
|
newlib/libc/machine/i960/strncat_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "sncat_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncat (optimized assembler version for the CA)
dest_addr = strncat (dest_addr, src_addr, max_bytes)
append the null terminated string pointed to by src_addr to the null
terminated string pointed to by dest_addr. Return the original
dest_addr. If the source string is longer than max_bytes, then
append only max_bytes bytes, and tack on a null byte on the end
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strncat
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source is word aligned, destination is not
4) destination is word aligned, source is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 original dest ptr; not modified, so that it may be returned.
g1 src ptr; shift count
g2 max_bytes
g3 src ptr (word aligned)
g4 dest ptr (word aligned)
g5 0xff -- byte extraction mask
Little endian:
g6 lsw of double word for extraction of 4 bytes
g7 msw of double word for extraction of 4 bytes
Big endian:
g6 msw of double word for extraction of 4 bytes
g7 lsw of double word for extraction of 4 bytes
g13 return address
g14 byte extracted.
*/
#if __i960_BIG_ENDIAN__
#define MSW g6
#define LSW g7
#else
#define LSW g6
#define MSW g7
#endif
.globl _strncat
.globl __strncat
.leafproc _strncat, __strncat
.align 2
_strncat:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncat:
notand g0,3,g4 # extract word addr of start of dest
lda (g14),g13 # preserve return address
cmpibge.f 0,g2,Lexit_code # Lexit if number of bytes to move is <= zero.
and g0,3,LSW # extract byte offset of dest
ld (g4),MSW # fetch word containing at least first byte
shlo 3,LSW,g14 # get shift count for making mask for first word
subi 1,0,LSW # mask initially all ones
#if __i960_BIG_ENDIAN__
shro g14,LSW,LSW # get mask for bytes needed from first word
#else
shlo g14,LSW,LSW # get mask for bytes needed from first word
#endif
notor MSW,LSW,MSW # set unneeded bytes to all ones
lda 0xff,g5 # byte extraction mask
Lsearch_for_word_with_null:
scanbyte 0,MSW # check for null byte
lda 4(g4),g4 # post-increment dest word pointer
mov MSW,LSW # keep a copy of current word
ld (g4),MSW # fetch next word of dest
bno.t Lsearch_for_word_with_null # branch if null not found yet
#if __i960_BIG_ENDIAN__
shro 24,LSW,g14 # extract byte
#else
and g5,LSW,g14 # extract byte
#endif
cmpo 0,g14 # branch if null is first byte of word
subo 4,g4,g4 # move dest word ptr to word with null
notand g1,3,g3 # extract word addr of start of src
bne.t Lsearch_for_null
Lcase_14:
cmpo g1,g3 # check alignment of source
ld (g3),LSW # fetch first word of source
shlo 3,g1,g14 # compute shift count
lda 4(g3),g3 # post-increment src addr
bne.f Lcase_4 # branch if source is unaligned
Lcase_1:
Lcase_1_wloop: # word copying loop
cmpi g2,4 # check for fewer than four bytes to move
lda (LSW),g1 # keep a copy of the src word
bl.f Lcase_1_cloop # branch if fewer than four bytes to copy
scanbyte 0,g1 # check for null byte in src word
ld (g3),LSW # pre-fetch next word of src
addo 4,g3,g3 # post-increment src addr
bo.f Lcase_1_cloop # branch if word contains null byte
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
st g1,(g4) # store word in dest string
addo 4,g4,g4 # post-increment dest addr
b Lcase_1_wloop
Lcase_3_cloop:
Lcase_1_cloop: # character copying loop (max_bytes <= 3)
cmpdeci 0,g2,g2 # is max_bytes exhausted?
#if __i960_BIG_ENDIAN__
rotate 8,g1,g1 # move next byte into position for extraction
#endif
and g5,g1,g14 # extract next char
be.f Lstore_null # if max_bytes is exhausted, store null and quit
cmpo 0,g14 # check for null byte
stob g14,(g4) # store the byte in dest
#if ! __i960_BIG_ENDIAN__
shro 8,g1,g1 # move next byte into position for extraction
#endif
lda 1(g4),g4 # post-increment dest byte addr
bne.t Lcase_1_cloop # branch if null not reached
bx (g13) # Lexit (g14 == 0)
Lstore_null:
mov 0,g14 # store null, and set g14 to zero
stob g14,(g4)
bx (g13)
Lsearch_for_null:
#if __i960_BIG_ENDIAN__
shlo 8,LSW,LSW # check next byte
shro 24,LSW,g14
#else
shlo 8,g5,g5 # move mask up to next byte
and g5,LSW,g14 # extract byte
#endif
lda 1(g4),g4 # move dest byte ptr to next byte
cmpobne.t 0,g14,Lsearch_for_null # branch if null is not yet found
Lcase_235:
cmpo g1,g3 # check alignment of src
ld (g3),LSW # pre-fetch word with start of src
and 3,g1,g1 # compute shift count
lda 0xff,g5 # load mask for byte extraction
shlo 3,g1,g14
lda 4(g3),g3 # post-increment src word counter
be.t Lcase_3 # branch if src is word aligned
and g4,3,MSW # extract byte offset for dest string
cmpo MSW,g1 # < indicates first word of dest has more bytes
/* than first word of source. */
ld (g3),MSW # fetch second word of src
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian
#endif
eshro g14,g6,g5 # extract four bytes
#if __i960_BIG_ENDIAN__
bge.f 1f
#else
bg.f 1f
#endif
mov MSW,LSW
lda 4(g3),g3 # move src word addr to second word boundary
1:
mov g5,MSW
lda 0xff,g5
b Lcase_25
Lcase_3: # src is word aligned; dest is not
mov LSW,MSW # make copy of first word of src
lda 32,g14 # initialize shift count to zero (mod 32)
Lcase_25:
Lcase_3_cloop_at_start: # character copying loop for start of dest str
cmpdeci 0,g2,g2 # is max_bytes exhausted?
#if __i960_BIG_ENDIAN__
shro 24,MSW,g5 # extract next char
#else
and g5,MSW,g5 # extract next char
#endif
be.f Lstore_null # Lexit if max_bytes is exhausted
cmpo 0,g5 # check for null byte
stob g5,(g4) # store the byte in dest
addo 1,g4,g4 # post-increment dest ptr
lda 0xff,g5 # re-initialize byte extraction mask
notand g4,3,g1 # extract word address
be.t Lexit_code # Lexit if null byte reached
cmpo g1,g4 # have we reached word boundary in dest yet?
#if __i960_BIG_ENDIAN__
lda -8(g14),g14 # augment the shift counter
rotate 8,MSW,MSW # move next byte into position for extraction
#else
lda 8(g14),g14 # augment the shift counter
shro 8,MSW,MSW # move next byte into position for extraction
#endif
bne.t Lcase_3_cloop_at_start # branch if reached word boundary?
#if __i960_BIG_ENDIAN__
cmpo 0,g14
ld (g3),MSW # fetch msw of operand for double shift
bne Lcase_3_wloop # branch if src is still unaligned.
Lcase_3_wloop2:
cmpi g2,4 # less than four bytes to move?
mov LSW,g1 # extract 4 bytes of src
lda 4(g3),g3 # post-increment src word addr
bl.f Lcase_3_cloop # branch if < four bytes left to move
scanbyte 0,g1 # check for null byte
mov MSW,LSW # move msw to lsw
ld (g3),MSW # pre-fetch msw of operand for double shift
bo.f Lcase_3_cloop # branch if word contains null byte
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
st g1,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop2
Lcase_4:
subo g14,0,g14 # adjust shift count for big endian
#else
Lcase_4:
#endif
ld (g3),MSW # fetch msw of operand for double shift
Lcase_3_wloop:
cmpi g2,4 # less than four bytes to move?
eshro g14,g6,g1 # extract 4 bytes of src
lda 4(g3),g3 # post-increment src word addr
bl.f Lcase_3_cloop # branch if < four bytes left to move
scanbyte 0,g1 # check for null byte
mov MSW,LSW # move msw to lsw
ld (g3),MSW # pre-fetch msw of operand for double shift
bo.f Lcase_3_cloop # branch if word contains null byte
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
st g1,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop
Lexit_code:
mov 0,g14 # conform to register conventions
bx (g13) # g0 = addr of dest; g14 = 0
Lrett:
ret
/* end of strncat */
|
stsp/newlib-ia16
| 6,881
|
newlib/libc/machine/i960/memcmp_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memcm_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1992,1993 Intel Corp., all rights reserved
*/
/*
procedure memcmp (optimized assembler version for the CA)
result = memcmp (src1_addr, src2_addr, max_bytes)
compare the byte array pointed to by src1_addr to the byte array
pointed to by src2_addr. Return 0 iff the arrays are equal, -1 if
src1_addr is lexicly less than src2_addr, and 1 if it is lexicly
greater. Do not compare more than max_bytes bytes.
Undefined behavior will occur if the end of either source array
is in the last word of the program's allocated memory space. This
is so because, in several cases, memcmp will fetch ahead one word.
Disallowing the fetch ahead would impose a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source1 is word aligned, source2 is not
4) source2 is word aligned, source1 is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g14 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers are sufficient to implement the routine.
The registers are used as follows:
g0 original src1 ptr; extracted word; return result
g1 src2 ptr; byt extraction mask
g2 maximum number of bytes to compare
g3 src2 word ptr
Little endian
g4 lsw of src1
g5 msw of src1
g6 src2 word
g7 src1 word ptr
Big endian
g4 msw of src1
g5 lsw of src1
g6 src1 word ptr
g7 src2 word
g13 return address
g14 shift count
*/
#if __i960_BIG_ENDIAN__
#define MSW g4
#define LSW g5
#define SRC1 g6
#define SRC2 g7
#else
#define LSW g4
#define MSW g5
#define SRC2 g6
#define SRC1 g7
#endif
.globl _memcmp
.globl __memcmp
.leafproc _memcmp, __memcmp
.align 2
_memcmp:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memcmp:
Lrestart:
#if __i960_BIG_ENDIAN__
subo 1,g0,SRC1
notand SRC1,3,SRC1 # extract word addr of start of src1
#else
notand g0,3,SRC1 # extract word addr of start of src1
#endif
lda (g14),g13 # preserve return address
cmpibge.f 0,g2,Lequal_exit # return equality if number bytes 0
notand g1,3,g3 # extract word addr of start of src2
ld (SRC1),LSW # fetch word with at least first byte of src1
cmpo g3,g1 # check alignment of src2
ld 4(SRC1),MSW # fetch second word of src1
shlo 3,g0,g14 # compute shift count for src1
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian.
#endif
ld (g3),SRC2 # fetch word with at least first byte of src2
eshro g14,g4,LSW # extract word of src1
lda 8(SRC1),SRC1 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
mov LSW,g0 # at least src2 is word aligned
lda 0xff,g1
Lwloop: # word comparing loop
cmpo SRC2,g0 # compare src1 and src2 words
lda 4(g3),g3 # pre-increment src2 addr
mov MSW,LSW # move msw of src1 to lsw
ld (SRC1),MSW # pre-fetch next msw of src1
subi 4,g2,g2 # decrement maximum byte count
bne.f Lcloop # branch if src1 and src2 unequal
cmpi 0,g2
ld (g3),SRC2 # pre-fetch next word of src2
eshro g14,g4,g0 # extract word of src1
lda 4(SRC1),SRC1 # post-increment src1 addr
bl.t Lwloop # branch if max_bytes not reached yet
b Lequal_exit # strings were equal up through max_bytes
Lcloop_setup: # setup for coming from Lsrc2_unaligned
mov LSW,g0 # restore extracted src1 word
subo 4,g2,g2 # make up for later re-incrementing
lda 0xff,g1 # byte extraction mask
Lcloop: # character comparing loop
#if __i960_BIG_ENDIAN__
rotate 24,g1,g1 # shift mask for next byte
#endif
and SRC2,g1,g3 # extract next char of src2
and g0,g1,LSW # extract next char of src1
cmpobne.f LSW,g3,.diff # check for equality
#if ! __i960_BIG_ENDIAN__
shlo 8,g1,g1 # shift mask for next byte
#endif
subi 1,g2,g2 # decrement character counter
b Lcloop # branch if null not reached
Lequal_exit: # words are equal up thru null byte
mov 0,g14 # conform to register conventions
lda 0,g0 # return zero, indicating equality
bx (g13) # return
Lrett:
ret
.diff:
addo 4,g2,g2 # to make up for extra decrement in loop
lda 0,g14
bl Lless_than_exit
Lgreater_than_exit:
cmpibge.f 0,g2,Lequal_exit # branch if difference is beyond max_bytes
mov 1,g0
bx (g13) # g0 = 1 (src1 > src2)
Lless_than_exit:
cmpibge.f 0,g2,Lequal_exit # branch if difference is beyond max_bytes
subi 1,0,g0
bx (g13) # g0 = -1 (src1 < src2)
Lsrc2_unaligned:
notor g1,3,g14 # first step in computing new src1 ptr
ld 4(g3),SRC1 # fetch second word of src2
shlo 3,g1,MSW # compute shift count for src2
#if __i960_BIG_ENDIAN__
subo MSW,0,MSW
#endif
eshro MSW,g6,SRC2 # extract word of src2
cmpo LSW,SRC2 # compare src1 and src2 words
lda 4(g3),g1 # set new src2 ptr
bne.f Lcloop_setup # first four bytes differ
subo g14,g0,g0 # second (final) step in computing new src1 ptr
addi g14,g2,g2 # compute new max_bytes too
lda (g13),g14 # prepare return pointer for Lrestart
b Lrestart # continue with both string fetches shifted
|
stsp/newlib-ia16
| 12,226
|
newlib/libc/machine/i960/memcpy_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memcp_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1992,1993 Intel Corp., all rights reserved
*/
/*
procedure memmove (optimized assembler version for the CA)
procedure memcpy (optimized assembler version for the CA)
dest_addr = memmove (dest_addr, src_addr, len)
dest_addr = memcpy (dest_addr, src_addr, len)
copy len bytes pointed to by src_addr to the space pointed to by
dest_addr. Return the original dest_addr.
Memcpy will fail if the source and destination string overlap
(in particular, if the end of the source is overlapped by the
beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Memmove will not fail if overlap exists.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, the
routine will fetch ahead one word. Disallowing the fetch ahead would
impose a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source is word aligned, destination is not
4) destination is word aligned, source is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 dest ptr; not modified, so that it may be returned
g1 src ptr; shift count
g2 len
g3 src ptr (word aligned)
g4 dest ptr (word aligned)
g5 -4 for Lbackwards move
Little endian
g6 lsw of double word for extraction of 4 bytes
g7 msw of double word for extraction of 4 bytes
Big endian
g6 msw of double word for extraction of 4 bytes
g7 lsw of double word for extraction of 4 bytes
g13 return address
g14 byte extracted.
*/
#if __i960_BIG_ENDIAN__
#define MSW g6
#define LSW g7
#else
#define LSW g6
#define MSW g7
#endif
.globl _memmove, _memcpy
.globl __memmove, __memcpy
.leafproc _memmove, __memmove
.leafproc _memcpy, __memcpy
.align 2
_memcpy:
_memmove:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memcpy:
__memmove:
cmpibge.f 0,g2,Lquick_exit # Lexit if number of bytes to move is <= zero.
cmpo g0,g1 # if dest starts earlier than src ...
lda (g14),g13 # preserve return address
addo g2,g1,g5 # compute addr of byte after last byte of src
be.f Lexit_code # no move necessary if src and dest are same
concmpo g5,g0 # ... or if dest starts after end of src ...
notand g1,3,g3 # extract word addr of start of src
bg.f Lbackwards # ... then drop thru, else do move backwards
cmpo g3,g1 # check alignment of src
ld (g3),LSW # fetch word containing at least first byte
notand g0,3,g4 # extract word addr of start of dest
lda 4(g3),g3 # advance src word addr
bne.f Lcase_245 # branch if src is NOT word aligned
Lcase_13:
cmpo g0,g4 # check alignment of dest
subo 4,g4,g4 # store is pre-incrementing; back up dest addr
be.t Lcase_1 # branch if dest word aligned
Lcase_3: # src is word aligned; dest is not
addo 8,g4,g4 # move dest word ptr to first word boundary
lda (g0),g1 # copy dest byte ptr
mov LSW,MSW # make copy of first word of src
lda 32,g14 # initialize shift count to zero (mod 32)
Lcase_25:
Lcase_3_cloop_at_start: # character copying loop for start of dest str
cmpdeci 0,g2,g2 # is max_bytes exhausted?
be.f Lexit_code # Lexit if max_bytes is exhausted
#if __i960_BIG_ENDIAN__
rotate 8,MSW,MSW # move next byte into position for extraction
subo 8,g14,g14 # augment the shift counter
stob MSW,(g1) # store the byte in dest
#else
addo 8,g14,g14 # augment the shift counter
stob MSW,(g1) # store the byte in dest
shro 8,MSW,MSW # move next byte into position for extraction
#endif
lda 1(g1),g1 # post-increment dest ptr
cmpobne.t g1,g4,Lcase_3_cloop_at_start # branch if reached word boundary
ld (g3),MSW # fetch msw of operand for double shift
Lcase_4:
Lcase_3_wloop:
cmpi g2,4 # less than four bytes to move?
lda 4(g3),g3 # post-increment src word addr
eshro g14,g6,g1 # extract 4 bytes of src
bl.f Lcase_3_cloop # branch if < four bytes left to move
mov MSW,LSW # move msw to lsw
ld (g3),MSW # pre-fetch msw of operand for double shift
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
st g1,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop
Lcase_1_wloop: # word copying loop
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
ld (g3),LSW # pre-fetch next word of src
addo 4,g3,g3 # post-increment src addr
st g1,(g4) # store word in dest string
Lcase_1: # src and dest are word aligned
cmpi g2,4 # check for fewer than four bytes to move
addo 4,g4,g4 # pre-increment dest addr
lda (LSW),g1 # keep a copy of the src word
bge.t Lcase_1_wloop # branch if at least four bytes to copy
Lcase_3_cloop:
cmpibe.f 0,g2,Lexit_code # Lexit if max_bytes is exhausted
Lcase_1_cloop:
#if __i960_BIG_ENDIAN__
rotate 8,g1,g1 # move next byte into position for extraction
#endif
subi 1,g2,g2
stob g1,(g4) # store the byte in dest
cmpi 0,g2
lda 1(g4),g4 # post-increment dest byte addr
#if ! __i960_BIG_ENDIAN__
shro 8,g1,g1 # move next byte into position for extraction
#endif
bne.t Lcase_1_cloop # Lexit if max_bytes is exhausted
Lexit_code:
mov 0,g14 # conform to register conventions
bx (g13) # g0 = addr of dest; g14 = 0
Lrett:
ret
Lcase_245:
cmpo g0,g4 # check alignment of dest
ld (g3),MSW # pre-fetch second half
and 3,g1,g1 # compute shift count
shlo 3,g1,g14
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian
#endif
be.t Lcase_4 # branch if dest is word aligned
or g4,g1,g1 # is src earlier in word, later, or sync w/ dst
cmpo g0,g1 # < indicates first word of dest has more bytes
lda 4(g4),g4 # move dest word addr to first word boundary
eshro g14,g6,g5 # extract four bytes
lda (g0),g1
#if __i960_BIG_ENDIAN__
bge.f 1f
#else
bg.f 1f
#endif
mov MSW,LSW
lda 4(g3),g3 # move src word addr to second word boundary
1:
mov g5,MSW
b Lcase_25
Lbackwards:
notand g5,3,MSW # extract word addr of byte after end of src
cmpo MSW,g5 # check alignment of end of src
subo 4,MSW,g3 # retreat src word addr
addo g2,g0,g1 # compute addr of byte after end of dest
notand g1,3,g4 # extract word addr of start of dest
bne.f Lcase.245 # branch if src is NOT word aligned
Lcase.13:
cmpo g1,g4 # check alignment of dest
ld (g3),MSW # fetch last word of src
subo 4,g3,g3 # retreat src word addr
be.t Lcase.1 # branch if dest word aligned
Lcase.3: # src is word aligned; dest is not
mov MSW,LSW # make copy of first word of src
lda 32,g14 # initialize shift count to zero (mod 32)
Lcase.25:
Lcase.3_cloop_at_start: # character copying loop for start of dest str
cmpdeci 0,g2,g2 # is max.bytes exhausted?
be.f Lexit_code # Lexit if max_bytes is exhausted
#if ! __i960_BIG_ENDIAN__
rotate 8,LSW,LSW # move next byte into position for storing
#endif
lda -1(g1),g1 # pre-decrement dest ptr
cmpo g1,g4 # have we reached word boundary in dest yet?
stob LSW,(g1) # store the byte in dest
#if __i960_BIG_ENDIAN__
shro 8,LSW,LSW # move next byte into position for storing
addo 8,g14,g14 # augment the shift counter
#else
subo 8,g14,g14 # augment the shift counter
#endif
bne.t Lcase.3_cloop_at_start # branch if reached word boundary?
ld (g3),LSW # fetch lsw of operand for double shift
#if __i960_BIG_ENDIAN__
cmpobne 0,g14,Lcase.3_wloop
Lcase.3_wloop2:
cmpi g2,4 # less than four bytes to move?
lda -4(g3),g3 # post-decrement src word addr
mov MSW,g1 # extract 4 bytes of src
lda (LSW),MSW # move lsw to msw
subo 4,g4,g4 # pre-decrement dest ptr
bl.f Lcase.3_cloop # branch if < four bytes left to move
ld (g3),LSW # pre-fetch lsw of operand for double shift
subi 4,g2,g2 # decrease max.byte count by the 4 bytes moved
st g1,(g4) # store 4 bytes to dest
b Lcase.3_wloop2
#endif
Lcase.4:
Lcase.3_wloop:
cmpi g2,4 # less than four bytes to move?
lda -4(g3),g3 # post-decrement src word addr
eshro g14,g6,g1 # extract 4 bytes of src
lda (LSW),MSW # move lsw to msw
subo 4,g4,g4 # pre-decrement dest ptr
bl.f Lcase.3_cloop # branch if < four bytes left to move
ld (g3),LSW # pre-fetch lsw of operand for double shift
subi 4,g2,g2 # decrease max.byte count by the 4 bytes moved
st g1,(g4) # store 4 bytes to dest
b Lcase.3_wloop
Lcase.1_wloop: # word copying loop
subi 4,g2,g2 # decrease max.byte count by the 4 bytes moved
ld (g3),MSW # pre-fetch next word of src
subo 4,g3,g3 # post-decrement src addr
st g1,(g4) # store word in dest string
Lcase.1: # src and dest are word aligned
cmpi g2,4 # check for fewer than four bytes to move
subo 4,g4,g4 # pre-decrement dest addr
lda (MSW),g1 # keep a copy of the src word
bge.t Lcase.1_wloop # branch if at least four bytes to copy
Lcase.3_cloop:
cmpibe.f 0,g2,Lexit_code # Lexit if max_bytes is exhausted
#if ! __i960_BIG_ENDIAN__
rotate 8,g1,g1 # move next byte into position for storing
#endif
lda 4(g4),g4 # pre-decremented dest addr 4 too much
Lcase.1_cloop:
subi 1,g4,g4 # pre-decrement dest byte addr
cmpi g4,g0 # has dest ptr reached beginning of dest?
stob g1,(g4) # store the byte in dest
#if __i960_BIG_ENDIAN__
shro 8,g1,g1 # move next byte into position for storing
#else
rotate 8,g1,g1 # move next byte into position for storing
#endif
bne.t Lcase.1_cloop # Lexit if move is completed
b Lexit_code
Lcase.245:
cmpo g1,g4 # check alignment of dest
ld (MSW),MSW # pre-fetch word with at least last byte
and 3,g5,g5 # compute shift count
ld (g3),LSW # pre-fetch second to last word
shlo 3,g5,g14
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian
#endif
be.t Lcase.4 # branch if dest is word aligned
or g4,g5,g5 # is src earlier in word, later, or sync w/ dst
cmpo g1,g5 # < indicates last word of dest has less bytes
eshro g14,g6,g5 # extract four bytes
bl.t 1f
mov LSW,MSW
#if ! __i960_BIG_ENDIAN__
be.t 1f
#endif
subo 4,g3,g3 # move src word addr to second word boundary
1:
mov g5,LSW
b Lcase.25
Lquick_exit:
mov g14,g13
b Lexit_code
/* end of memmove */
|
stsp/newlib-ia16
| 8,107
|
newlib/libc/machine/i960/memccpy_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "mccpy_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1989,1993 Intel Corp., all rights reserved
*/
/*
procedure memccpy (optimized assembler version for the 80960CA)
dest_addr = memccpy (dest_addr, src_addr, char, len)
copy len bytes pointed to by src_addr to the space pointed to by
dest_addr, stopping if char is copied. If char is copied,
return address of byte after char in dest string; else null.
Undefined behavior will occur if the end of the source array is in
the last two words of the program's allocated memory space. This
is so because the routine fetches ahead. Disallowing the fetch
ahead would impose a severe performance penalty.
Undefined behavior will also occur if the source and destination
strings overlap.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source is word aligned, destination is not
4) destination is word aligned, source is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine.
*/
#if __i960_BIG_ENDIAN__
#define MSW g6
#define LSW g7
#else
#define LSW g6
#define MSW g7
#endif
.globl _memccpy
.leafproc _memccpy, __memccpy
.align 2
_memccpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memccpy:
notand g1,3,g5 # extract word addr of start of src
lda (g14),g13 # preserve return address
cmpibge.f 0,g3,Lexit_char_not_found # Lexit if # of bytes to move is <= 0
cmpo g5,g1 # check alignment of src
ld (g5),LSW # fetch word containing at least first byte
notand g0,3,g4 # extract word addr of start of dest
lda 4(g5),g5 # advance src word addr
shlo 24,g2,g2 # reduce char to single byte
bne.f Lcase_245 # branch if src is NOT word aligned
Lcase_13:
cmpobe.t g0,g4,Lcase_1_setup # branch if dest word aligned
Lcase_3: # src is word aligned; dest is not
mov LSW,MSW # make copy of first word of src
addo 4,g4,g1 # move dest word ptr to first word boundary
lda 32,g14 # initialize shift count to zero
Lcase_25:
Lcase_3_cloop_at_start: # character copying loop for start of dest str
cmpdeci 0,g3,g3 # is max_bytes exhausted?
#if __i960_BIG_ENDIAN__
lda -8(g14),g14 # augment the shift counter
#else
lda 8(g14),g14 # augment the shift counter
#endif
be.f Lexit_char_not_found # Lexit if max_bytes is exhausted
#if __i960_BIG_ENDIAN__
rotate 8,MSW,MSW # move next byte into position for extraction
#endif
shlo 24,MSW,g4
stob MSW,(g0) # store the byte in dest
cmpo g4,g2
lda 1(g0),g0 # post-increment dest ptr
#if ! __i960_BIG_ENDIAN__
shro 8,MSW,MSW # move next byte into position for extraction
#endif
be.f Lexit_char_found # Lexit if char found
cmpobne.t g1,g0,Lcase_3_cloop_at_start # branch if reached word boundary
ld (g5),MSW # fetch msw of operand for double shift
Lcase_4:
shro 8,g2,g4
or g4,g2,g1
shro 16,g1,g4
or g4,g1,g4
#if __i960_BIG_ENDIAN__
cmpobne 0,g14,Lcase_3_wloop
Lcase_3_wloop2:
cmpi g3,4 # less than four bytes to move?
lda 4(g5),g5 # post-increment src word addr
mov LSW,g1 # extract 4 bytes of src
bl.f Lcase_13_cloop_setup # branch if < four bytes left to move
scanbyte g4,g1 # branch if word has char in it
bo.f Lcase_13_cloop_setup
mov MSW,LSW # move msw to lsw
ld (g5),MSW # pre-fetch msw of operand for double shift
subi 4,g3,g3 # decrease max_byte count by the 4 bytes moved
st g1,(g0) # store 4 bytes to dest
addo 4,g0,g0 # post-increment dest ptr
b Lcase_3_wloop2
#endif
Lcase_3_wloop:
cmpi g3,4 # less than four bytes to move?
lda 4(g5),g5 # post-increment src word addr
eshro g14,g6,g1 # extract 4 bytes of src
bl.f Lcase_13_cloop_setup # branch if < four bytes left to move
scanbyte g4,g1 # branch if word has char in it
bo.f Lcase_13_cloop_setup
mov MSW,LSW # move msw to lsw
ld (g5),MSW # pre-fetch msw of operand for double shift
subi 4,g3,g3 # decrease max_byte count by the 4 bytes moved
st g1,(g0) # store 4 bytes to dest
addo 4,g0,g0 # post-increment dest ptr
b Lcase_3_wloop
Lcase_1_setup:
subo 4,g0,g0 # store is pre-incrementing; back up dest addr
shro 8,g2,g4
or g4,g2,MSW
shro 16,MSW,g4
or g4,MSW,g4
b Lcase_1
Lcase_1_wloop: # word copying loop
subi 4,g3,g3 # decrease max_byte count by the 4 bytes moved
ld (g5),LSW # pre-fetch next word of src
addo 4,g5,g5 # post-increment src addr
st g1,(g0) # store word in dest string
Lcase_1: # src and dest are word aligned
cmpi g3,4 # check for fewer than four bytes to move
addo 4,g0,g0 # pre-increment dest addr
lda (LSW),g1 # keep a copy of the src word
bl.f Lcase_13_cloop_setup # branch if less than four bytes to copy
scanbyte LSW,g4 # branch if char is not in foursome
bno.t Lcase_1_wloop
Lcase_13_cloop_setup:
cmpibe.f 0,g3,Lexit_char_not_found # Lexit if max_bytes is exhausted
Lcase_1_cloop:
#if __i960_BIG_ENDIAN__
rotate 8,g1,g1 # move next byte into position for extraction
#endif
shlo 24,g1,g4
stob g1,(g0) # store the byte in dest
cmpo g4,g2
lda 1(g0),g0 # post-increment dest byte addr
subi 1,g3,g3
be.f Lexit_char_found # Lexit if char reached
cmpi 0,g3
#if ! __i960_BIG_ENDIAN__
shro 8,g1,g1 # move next byte into position for extraction
#endif
bne.t Lcase_1_cloop # continue if len not exhausted
Lexit_char_not_found:
mov 0,g0
Lexit_char_found:
lda 0,g14
bx (g13) # g0 = dest array address; g14 = 0
Lrett:
ret
Lcase_245:
cmpo g0,g4 # check alignment of dest
ld (g5),MSW # pre-fetch second half
and 3,g1,g1 # compute shift count
shlo 3,g1,g14
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian
#endif
be.t Lcase_4 # branch if dest is word aligned
or g4,g1,g1 # is src earlier in word, later, or sync w/ dst
cmpo g0,g1 # < indicates first word of dest has more bytes
/* than first word of source. */
eshro g14,g6,g4 # extract four bytes
lda 4(g0),g1 # move dest word addr to first word boundary
#if __i960_BIG_ENDIAN__
bge.f 1f
#else
bg.f 1f
#endif
mov MSW,LSW
lda 4(g5),g5 # move src word addr to second word boundary
1:
notand g1,3,g1
mov g4,MSW
b Lcase_25
/* end of memccpy */
|
stsp/newlib-ia16
| 6,476
|
newlib/libc/machine/i960/strcpy.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strcpy.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strcpy (optimized assembler version for the 80960K series)
procedure strcat (optimized assembler version for the 80960K series)
dest_addr = strcpy (dest_addr, src_addr)
copy the null terminated string pointed to by src_addr to
the string space pointed to by dest_addr. Return the original
dest_addr.
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last two words of the
program's allocated memory space. This is so because strcpy fetches
ahead. Disallowing the fetch ahead would impose a severe performance
penalty.
Strategy:
Fetch the source string and store the destination string by words
until the null byte is encountered. When the word with the null
byte is reached, store it by bytes up through the null byte only.
Tactics:
1) Do NOT try to fetch and store the words in a word aligned manner
because, in my judgement, the performance degradation experienced due
to non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment. This is supported by the intuition that most source and
destination strings will be word aligned to begin with.
procedure strcat
dest_addr = strcat (dest_addr, src_addr)
Appends the string pointed to by src_addr to the string pointed
to by dest_addr. The first character of the source string is
copied to the location initially occupied by the trailing null
byte of the destination string. Thereafter, characters are copied
from the source to the destination up thru the null byte that
trails the source string.
See the strcpy routine, above, for its caveats, as they apply here too.
Strategy:
Skip to the end (null byte) of the destination string, and then drop
into the strcpy code.
Tactics:
Skipping to the null byte is Ldone by reading the destination string
in long-words and scanbyte'ing them, then examining the bytes of the
word that contains the null byte, until the address of the null byte is
known. Then we drop into the strcpy routine. It is probable (approx.
three out of four times) that the destination string as strcpy sees
it will NOT be word aligned (i.e. that the null byte won't be the
last byte of a word). But it is not worth the complication to that
routine to force word aligned memory accesses to be gaurenteed.
*/
.globl _strcpy, _strcat
.globl __strcpy, __strcat
.leafproc _strcpy,__strcpy
.leafproc _strcat,__strcat
.align 2
_strcat:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcat:
mov g14,g13 # preserve return address
ldl (g0),g4 # fetch first two words
addo 8,g0,g2 # post-increment src word pointer
lda 0xff,g3 # byte extraction mask
Lsearch_for_word_with_null_byte:
scanbyte 0,g4 # check for null byte
mov g5,g7 # copy second word
bo.f Lsearch_for_null # branch if null found
scanbyte 0,g7 # check for null byte
ldl (g2),g4 # fetch next pair of word of src
addo 8,g2,g2 # post-increment src word pointer
bno Lsearch_for_word_with_null_byte # branch if null not found yet
subo 4,g2,g2 # back up the byte pointer
mov g7,g4 # move word with null to search word
Lsearch_for_null:
subo 9,g2,g5 # back up the byte pointer
Lsearch_for_null.a:
and g4,g3,g6 # extract byte
cmpo 0,g6 # is it null?
addo 1,g5,g5 # bump src byte ptr
shro 8,g4,g4 # shift word to position next byte
bne Lsearch_for_null.a
b Lend_of_dest_found
_strcpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcpy:
mov g0, g5
Lend_of_dest_found:
ld (g1), g2 # fetch first word of source
mov g14,g6 # preserve return address
lda 0xff, g3 # byte extraction mask = 0xff;
Lwloop: # word copying loop
addo 4, g1, g1 # post-increment source ptr
scanbyte 0, g2 # does source word contain null byte?
mov g2, g4 # save a copy of the source word
be Lcloop # branch if null present
ld (g1), g2 # pre-fetch next word of source
st g4, (g5) # store current word
addo 4, g5, g5 # post-increment dest ptr
b Lwloop
Lcloop: # character copying loop
and g3, g4, g14 # extract next char
shro 8, g4, g4 # position word for next byte extraction
cmpo 0, g14 # is it null?
stob g14, (g5) # store the byte
addo 1, g5, g5 # post-increment dest ptr
bne Lcloop # quit if null encountered
bx (g6) # g0 = dest string address; g14 = 0
Lrett:
ret
|
stsp/newlib-ia16
| 5,074
|
newlib/libc/machine/i960/strlen_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strle_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strlen (optimized assembler version for the CA)
src_addr = strlen (src_addr)
return the number of bytes that precede the null byte in the
string pointed to by src_addr.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last four words of the program's
allocated memory space. This is so because, in several cases, strlen
will fetch ahead several words. Disallowing the fetch ahead would
impose a severe performance penalty.
This program handles two cases:
1) the argument starts on a word boundary
2) the argument doesn't start on a word boundary
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 original src ptr; upon return it is the byte count.
g1
g2 src ptr
g3 mask
g4 even word of the source string
g5 odd word of the source string
g6 copy of even word, shift count
g7 copy of odd word
g13 return address
g14 byte extracted.
*/
.globl _strlen
.globl __strlen
.leafproc _strlen, __strlen
.align 2
_strlen:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strlen:
notand g0,3,g2 # extract word addr of start of src
lda (g14),g13 # preserve return address
and g0,3,g7 # extract byte offset of src
ld (g2),g5 # fetch word containing at least first byte
shlo 3,g7,g7 # get shift count for making mask for first word
lda 4(g2),g2 # post-increment src word pointer
subi 1,0,g3 # mask initially all ones
chkbit 2,g2 # are we on an even word boundary or an odd one?
#if __i960_BIG_ENDIAN__
shro g7,g3,g3 # get mask for bytes needed from first word
notor g5,g3,g7 # set unneeded bytes to all ones
lda 0xff000000,g3 # byte extraction mask
#else
shlo g7,g3,g3 # get mask for bytes needed from first word
notor g5,g3,g7 # set unneeded bytes to all ones
lda 0xff,g3 # byte extraction mask
#endif
bno.f Lodd_word # branch if first word is odd
mov g7,g4 # move first word to copy thereof
ld (g2),g5 # load odd word
lda 4(g2),g2 # post-increment src word pointer
Leven_word:
scanbyte 0,g4 # check for null byte
movl g4,g6 # copy both words
Lodd_word: # trickery! if we branch here, following branch
/* instruction will fall thru, as we want, */
/* effecting the load of g4 and g5 only. */
ldl (g2),g4 # fetch next pair of word of src
bo.f Lsearch_for_null # branch if null found
scanbyte 0,g7 # check for null byte
lda 8(g2),g2 # post-increment src word pointer
bno.t Leven_word # branch if null not found yet
subo 4,g2,g2 # back up the byte pointer
lda (g7),g6 # move odd word to search word
Lsearch_for_null:
subo 9,g2,g2 # back up the byte pointer
Lsearch_for_null.a:
and g6,g3,g14 # extract byte
cmpo 0,g14 # is it null?
lda 1(g2),g2 # bump src byte ptr
#if __i960_BIG_ENDIAN__
shlo 8,g6,g6 # shift word to position next byte
#else
shro 8,g6,g6 # shift word to position next byte
#endif
bne.t Lsearch_for_null.a
Lexit_code:
subo g0,g2,g0 # calculate string length
bx (g13) # g0 = addr of src; g14 = 0
Lrett:
ret
/* end of strlen */
|
stsp/newlib-ia16
| 3,179
|
newlib/libc/machine/i960/strcspn.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strcspn.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1989,1993 Intel Corp., all rights reserved
*/
/*
procedure strcspn (optimized assembler version: 80960K series, 80960CA)
len = strcspn (string, charset)
Return the number of characters in the maximum leading segment
of string which consists solely of characters NOT from charset.
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine.
*/
.globl _strcspn
.globl __strcspn
.leafproc _strcspn, __strcspn
.align 2
_strcspn:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcspn:
mov g14,g13 # save return address
lda (g0),g3 # copy string pointer
mov 0,g14 # conform to register conventions
Lnext_char:
ldob (g3),g7 # fetch next character of string
addo 1,g1,g2 # g2 will be the charset ptr
ldob (g1),g6 # fetch first character of charset
cmpobe.f 0,g7,Lexit # quit if at end of string
Lscan_set:
cmpo g6,g7 # is charset char same as string char?
ldob (g2),g5 # fetch next charset char
addo 1,g2,g2 # bump charset ptr
be.f Lexit
cmpo g6,0 # is charset exhausted?
lda (g5),g6
bne.t Lscan_set # check next character of charset
addo 1,g3,g3 # check next character of string
b Lnext_char
Lexit:
subo g0,g3,g0 # compute string length
bx (g13)
Lrett:
ret
/* end of strcspn */
|
stsp/newlib-ia16
| 3,917
|
newlib/libc/machine/i960/setjmp.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
/******************************************************************************/
/* */
/* setjmp(), longjmp() */
/* */
/******************************************************************************/
.file "setjmp.as"
.text
/* .link_pix */
.align 4
.globl _setjmp
_setjmp:
flushreg
andnot 0xf,pfp,g1 /* get pfp, mask out return status bits */
st g1, 0x58(g0) /* save fp of caller*/
/* save globals not killed by the calling convention */
stq g8, 0x40(g0) /* save g8-g11*/
st g12, 0x50(g0) /* save g12*/
st g14, 0x54(g0) /* save g14*/
/* save previous frame local registers */
ldq (g1), g4 /* get previous frame pfp, sp, rip, r3 */
stq g4, (g0) /* save pfp, sp, rip, r3 */
ldq 0x10(g1), g4 /* get previous frame r4-r7 */
stq g4, 0x10(g0) /* save r4-r7 */
ldq 0x20(g1), g4 /* get previous frame r8-r11 */
stq g4, 0x20(g0) /* save r8-r11 */
ldq 0x30(g1), g4 /* get previous frame r12-r15 */
stq g4, 0x30(g0) /* save r12-r15 */
mov 0, g0 /* return 0 */
ret
/*
* fake a return to the place that called the corresponding _setjmp
*/
.align 4
.globl _longjmp
_longjmp:
call 0f /* ensure there is at least one stack frame */
0:
flushreg /* do this before swapping stack */
ld 0x58(g0), pfp /* get fp of caller of setjmp */
/* restore local registers
* the following code modifies the frame of the function which originally
* called setjmp.
*/
ldq (g0), g4 /* get pfp, sp, rip, r3 */
stq g4, (pfp) /* restore pfp, sp, rip, r3 */
ldq 0x10(g0), g4 /* get r4-r7 */
stq g4, 0x10(pfp) /* restore r4-r7 */
ldq 0x20(g0), g4 /* get r8-r11 */
stq g4, 0x20(pfp) /* restore r8-r11 */
ldq 0x30(g0), g4 /* get r12-r15 */
stq g4, 0x30(pfp) /* restore r12-r15 */
/* restore global registers */
ldq 0x40(g0), g8 /* get old g8-g11 values */
ld 0x50(g0), g12 /* get old g12 value */
ld 0x54(g0), g14 /* get old g14 value */
mov g1, g0 /* get return value */
cmpo g0, 0 /* make sure it is not zero */
bne 0f
mov 1, g0 /* return 1 by default */
0:
ret /* return to caller of _setjmp */
|
stsp/newlib-ia16
| 2,619
|
newlib/libc/machine/h8500/psi.S
|
/* convert psi to si inplace
Note that `fp' below isn't a segment register.
It's r6, the frame pointer. */
#if __CODE__==32
#define RET prts
#else
#define RET rts
#endif
#define EXTPSISI_SN(r_msw,r_lsw,sp) ; \
.global __extpsisi##r_msw ; \
__extpsisi##r_msw: ; \
mov r_msw,r_lsw ; \
stc sp,r_msw ; \
RET
EXTPSISI_SN(r2,r3,dp)
EXTPSISI_SN(r4,r5,ep)
#define ADDPSI_AR_RN(sr,an,r_msw,r_lsw) \
.global __addpsi##an##r_msw ; \
__addpsi##an##r_msw: ; \
stc sr,@-sp ; \
add an,r_lsw ; \
addx @sp+,r_msw ; \
RET
ADDPSI_AR_RN(dp,r2,r0,r1)
ADDPSI_AR_RN(dp,r2,r3,r4)
ADDPSI_AR_RN(ep,r4,r0,r1)
ADDPSI_AR_RN(ep,r4,r1,r2)
ADDPSI_AR_RN(ep,r4,r3,r4)
ADDPSI_AR_RN(ep,r4,r5,fp)
ADDPSI_AR_RN(tp,fp,r0,r1)
#define ADDPSI_RN_AR(r_msw,r_lsw,sr,an,t_msw,t_lsw) \
.global __addpsi##r_msw##an ; \
__addpsi##r_msw##an: ; \
mov.w t_msw,@-sp ; \
mov.w t_lsw,@-sp ; \
stc sr,t_msw ; \
mov an,t_lsw ; \
add r_lsw,t_lsw ; \
addx r_msw,t_msw ; \
ldc t_msw,sr ; \
mov.w t_lsw,an ; \
mov.w @sp+,t_lsw ; \
mov.w @sp+,t_msw ; \
RET
ADDPSI_RN_AR(r0,r1,dp,r2,r4,r5)
ADDPSI_RN_AR(r0,r1,ep,r4,r2,r3)
#define EXTPSIHI_RN_RN(rm,r_msw,r_lsw) ; \
.global __extpsihi##rm##r_msw ; \
__extpsihi##rm##r_msw: ; \
mov rm,r_lsw ; \
clr.w r_msw ; \
RET
EXTPSIHI_RN_RN(r3,r0,r1)
EXTPSIHI_RN_RN(r4,r0,r1)
EXTPSIHI_RN_RN(r5,r0,r1)
EXTPSIHI_RN_RN(r2,r0,r1)
/* ifdefed out, because gcc doesn't like the # character in the above
macro. The macro expands into an assembly languange comment anyways,
so it serves no useful purpose. */
#if 0
#define EXTPSIHI_RN_SN(rm,r_msw,r_lsw) ; \
.global __extpsihi##rm##r_lsw ; \
__extpsihi##rm##r_lsw: ; \
mov rm,r_lsw ; \
ldc \#0,r_msw ; \
RET
EXTPSIHI_RN_SN(r0,dp,r2)
EXTPSIHI_RN_SN(r0,ep,r4)
EXTPSIHI_RN_SN(r1,dp,r2)
EXTPSIHI_RN_SN(r1,ep,r4)
EXTPSIHI_RN_SN(r3,dp,r2)
EXTPSIHI_RN_SN(r3,ep,r4)
EXTPSIHI_RN_SN(r5,dp,r2)
EXTPSIHI_RN_SN(r5,ep,r4)
EXTPSIHI_RN_SN(r2,ep,r4)
#endif
#define EXTPSISI_RN(r_msw,r_lsw) ; \
.global __extpsisi##r_msw ; \
__extpsisi##r_msw: ; \
RET
EXTPSISI_RN(r0,r1)
#define ADDPSI_SA_SB(sa,ra,sb,rb) ; \
.global __addpsi##ra##rb ; \
__addpsi##ra##rb: ; \
mov.w r0,@-sp ; \
mov.w r1,@-sp ; \
stc sa,r0 ; \
stc sb,r1 ; \
add.w ra,rb ; \
addx r0,r1 ; \
ldc r1,sb ; \
mov.w @sp+,r1 ; \
mov.w @sp+,r0 ; \
RET
ADDPSI_SA_SB(dp,r2,ep,r4)
ADDPSI_SA_SB(ep,r4,dp,r2)
ADDPSI_SA_SB(tp,fp,dp,r2)
ADDPSI_SA_SB(tp,fp,ep,r4)
ADDPSI_SA_SB(dp,r2,dp,r2)
.global __addpsir0r0
__addpsir0r0:
add.w r1,r1
addx r0,r0
RET
|
stsp/newlib-ia16
| 1,421
|
newlib/libc/machine/i386/strlen.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002, 2008 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (strlen)
SOTYPE_FUNCTION(strlen)
SYM (strlen):
pushl ebp
movl esp,ebp
pushl edi
movl 8(ebp),edx
#ifdef __OPTIMIZE_SIZE__
cld
movl edx,edi
movl $4294967295,ecx
xor eax,eax
repnz
scasb
#else
/* Modern x86 hardware is much faster at double-word
manipulation than with bytewise repnz scasb. */
/* Do byte-wise checks until string is aligned. */
movl edx,edi
test $3,edi
je L5
movb (edi),cl
incl edi
testb cl,cl
je L15
test $3,edi
je L5
movb (edi),cl
incl edi
testb cl,cl
je L15
test $3,edi
je L5
movb (edi),cl
incl edi
testb cl,cl
je L15
L5:
subl $4,edi
/* loop performing 4 byte mask checking for desired 0 byte */
.p2align 4,,7
L10:
addl $4,edi
movl (edi),ecx
leal -16843009(ecx),eax
notl ecx
andl ecx,eax
testl $-2139062144,eax
je L10
/* Find which of four bytes is 0. */
notl ecx
incl edi
testb cl,cl
je L15
incl edi
shrl $8,ecx
testb cl,cl
je L15
incl edi
shrl $8,ecx
testb cl,cl
je L15
incl edi
#endif
L15:
subl edx,edi
leal -1(edi),eax
leal -4(ebp),esp
popl edi
leave
ret
|
stsp/newlib-ia16
| 2,523
|
newlib/libc/machine/i386/strchr.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002, 2008 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (strchr)
SOTYPE_FUNCTION(strchr)
SYM (strchr):
pushl ebp
movl esp,ebp
pushl edi
pushl ebx
xorl ebx,ebx
movl 8(ebp),edi
addb 12(ebp),bl
#ifndef __OPTIMIZE_SIZE__
/* Special case strchr(p,0). */
je L25
/* Do byte-wise checks until string is aligned. */
test $3,edi
je L5
movl edi,eax
movb (eax),cl
testb cl,cl
je L14
cmpb bl,cl
je L19
incl edi
test $3,edi
je L5
movl edi,eax
movb (eax),cl
testb cl,cl
je L14
cmpb bl,cl
je L19
incl edi
test $3,edi
je L5
movl edi,eax
movb (eax),cl
testb cl,cl
je L14
cmpb bl,cl
je L19
incl edi
/* create 4 byte mask which is just the desired byte repeated 4 times */
L5:
movl ebx,ecx
sall $8,ebx
subl $4,edi
orl ecx,ebx
movl ebx,edx
sall $16,ebx
orl edx,ebx
/* loop performing 4 byte mask checking for 0 byte or desired byte */
.p2align 4,,7
L10:
addl $4,edi
movl (edi),ecx
leal -16843009(ecx),edx
movl ecx,eax
notl eax
andl eax,edx
testl $-2139062144,edx
jne L9
xorl ebx,ecx
leal -16843009(ecx),edx
notl ecx
andl ecx,edx
testl $-2139062144,edx
je L10
#endif /* not __OPTIMIZE_SIZE__ */
/* loop while (*s && *s++ != c) */
L9:
leal -1(edi),eax
.p2align 4,,7
L15:
incl eax
movb (eax),dl
testb dl,dl
je L14
cmpb bl,dl
jne L15
L14:
/* if (*s == c) return address otherwise return NULL */
cmpb bl,(eax)
je L19
xorl eax,eax
L19:
leal -8(ebp),esp
popl ebx
popl edi
leave
ret
#ifndef __OPTIMIZE_SIZE__
/* Special case strchr(p,0). */
#if 0
/* Hideous performance on modern machines. */
L25:
cld
movl $-1,ecx
xor eax,eax
repnz
scasb
leal -1(edi),eax
jmp L19
#endif
L25:
/* Do byte-wise checks until string is aligned. */
test $3,edi
je L26
movl edi,eax
movb (eax),cl
testb cl,cl
je L19
incl edi
test $3,edi
je L26
movl edi,eax
movb (eax),cl
testb cl,cl
je L19
incl edi
test $3,edi
je L26
movl edi,eax
movb (eax),cl
testb cl,cl
je L19
incl edi
L26:
subl $4,edi
/* loop performing 4 byte mask checking for desired 0 byte */
.p2align 4,,7
L27:
addl $4,edi
movl (edi),ecx
leal -16843009(ecx),edx
movl ecx,eax
notl eax
andl eax,edx
testl $-2139062144,edx
je L27
jmp L9
#endif /* !__OPTIMIZE_SIZE__ */
|
stsp/newlib-ia16
| 1,431
|
newlib/libc/machine/i386/memset.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002, 2008 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memset)
SOTYPE_FUNCTION(memset)
SYM (memset):
pushl ebp
movl esp,ebp
pushl edi
movl 8(ebp),edi
movzbl 12(ebp),eax
movl 16(ebp),ecx
cld
#ifndef __OPTIMIZE_SIZE__
/* Less than 16 bytes won't benefit from the 'rep stosl' loop. */
cmpl $16,ecx
jbe .L19
testl $7,edi
je .L10
/* It turns out that 8-byte aligned 'rep stosl' outperforms
4-byte aligned on some x86 platforms. */
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
/* At this point, ecx>8 and edi%8==0. */
.L10:
movb al,ah
movl eax,edx
sall $16,edx
orl edx,eax
movl ecx,edx
shrl $2,ecx
andl $3,edx
rep
stosl
movl edx,ecx
#endif /* not __OPTIMIZE_SIZE__ */
.L19:
rep
stosb
movl 8(ebp),eax
leal -4(ebp),esp
popl edi
leave
ret
|
stsp/newlib-ia16
| 1,087
|
newlib/libc/machine/i386/memcpy.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memcpy)
SOTYPE_FUNCTION(memcpy)
SYM (memcpy):
pushl ebp
movl esp,ebp
pushl esi
pushl edi
pushl ebx
movl 8(ebp),edi
movl 16(ebp),ecx
movl 12(ebp),esi
cld
#ifndef __OPTIMIZE_SIZE__
cmpl $8,ecx
jbe .L3
/* move any preceding bytes until destination address is long word aligned */
movl edi,edx
movl ecx,ebx
andl $3,edx
jz .L11
movl $4,ecx
subl edx,ecx
andl $3,ecx
subl ecx,ebx
rep
movsb
mov ebx,ecx
/* move bytes a long word at a time */
.L11:
shrl $2,ecx
.p2align 2
rep
movsl
movl ebx,ecx
andl $3,ecx
#endif /* !__OPTIMIZE_SIZE__ */
/* handle any remaining bytes */
.L3:
rep
movsb
.L5:
movl 8(ebp),eax
leal -12(ebp),esp
popl ebx
popl edi
popl esi
leave
ret
|
stsp/newlib-ia16
| 1,503
|
newlib/libc/machine/i386/memchr.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002, 2008 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memchr)
SOTYPE_FUNCTION(memchr)
SYM (memchr):
pushl ebp
movl esp,ebp
pushl edi
movzbl 12(ebp),eax
movl 16(ebp),ecx
movl 8(ebp),edi
xorl edx,edx
testl ecx,ecx
jz L20
#ifdef __OPTIMIZE_SIZE__
cld
repnz
scasb
setnz dl
decl edi
#else /* !__OPTIMIZE_SIZE__ */
/* Do byte-wise checks until string is aligned. */
testl $3,edi
je L5
cmpb (edi),al
je L15
incl edi
decl ecx
je L20
testl $3,edi
je L5
cmpb (edi),al
je L15
incl edi
decl ecx
je L20
testl $3,edi
je L5
cmpb (edi),al
je L15
incl edi
decl ecx
je L20
/* Create a mask, then check a word at a time. */
L5:
movb al,ah
movl eax,edx
sall $16,edx
orl edx,eax
pushl ebx
.p2align 4,,7
L8:
subl $4,ecx
jc L9
movl (edi),edx
addl $4,edi
xorl eax,edx
leal -16843009(edx),ebx
notl edx
andl edx,ebx
testl $-2139062144,ebx
je L8
subl $4,edi
L9:
popl ebx
xorl edx,edx
addl $4,ecx
je L20
/* Final byte-wise checks. */
.p2align 4,,7
L10:
cmpb (edi),al
je L15
incl edi
decl ecx
jne L10
xorl edi,edi
#endif /* !__OPTIMIZE_SIZE__ */
L15:
decl edx
andl edi,edx
L20:
movl edx,eax
leal -4(ebp),esp
popl edi
leave
ret
|
stsp/newlib-ia16
| 1,347
|
newlib/libc/machine/i386/memcmp.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memcmp)
SOTYPE_FUNCTION(memcmp)
SYM (memcmp):
pushl ebp
movl esp,ebp
subl $16,esp
pushl ebx
pushl edi
pushl esi
movl 8(ebp),edi
movl 12(ebp),esi
movl 16(ebp),ecx
cld
/* check if length is zero in which case just return 0 */
xorl eax,eax
testl ecx,ecx
jz L4
#ifndef __OPTIMIZE_SIZE__
/* if aligned on long boundary, compare doublewords at a time first */
movl edi,eax
orl esi,eax
testb $3,al
jne BYTECMP
movl ecx,ebx
shrl $2,ecx /* calculate number of long words to compare */
repz
cmpsl
jz L5
subl $4,esi
subl $4,edi
movl $4,ecx
jmp BYTECMP
L5:
andl $3,ebx /* calculate number of remaining bytes */
movl ebx,ecx
#endif /* not __OPTIMIZE_SIZE__ */
BYTECMP: /* compare any unaligned bytes or remainder bytes */
repz
cmpsb
/* set output to be < 0 if less than, 0 if equal, or > 0 if greater than */
L3:
xorl edx,edx
movb -1(esi),dl
xorl eax,eax
movb -1(edi),al
subl edx,eax
L4:
leal -28(ebp),esp
popl esi
popl edi
popl ebx
leave
ret
|
stsp/newlib-ia16
| 2,022
|
newlib/libc/machine/i386/memmove.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memmove)
SOTYPE_FUNCTION(memmove)
SYM (memmove):
pushl ebp
movl esp,ebp
pushl esi
pushl edi
pushl ebx
movl 8(ebp),edi
movl 16(ebp),ecx
movl 12(ebp),esi
/* check for destructive overlap (src < dst && dst < src + length) */
cld
cmpl edi,esi
jae .L2
leal -1(ecx,esi),ebx
cmpl ebx,edi
ja .L2
/* IF: destructive overlap, must copy backwards */
addl ecx,esi
addl ecx,edi
std
#ifndef __OPTIMIZE_SIZE__
cmpl $8,ecx
jbe .L13
.L18:
/* move trailing bytes in reverse until destination address is long word aligned */
movl edi,edx
movl ecx,ebx
andl $3,edx
jz .L21
movl edx,ecx
decl esi
decl edi
subl ecx,ebx
rep
movsb
mov ebx,ecx
incl esi
incl edi
.L21:
/* move bytes in reverse, a long word at a time */
shrl $2,ecx
subl $4,esi
subl $4,edi
rep
movsl
addl $4,esi
addl $4,edi
movl ebx,ecx
andl $3,ecx
#endif /* !__OPTIMIZE_SIZE__ */
/* handle any remaining bytes not on a long word boundary */
.L13:
decl esi
decl edi
.L15:
rep
movsb
jmp .L5
.p2align 4,,7
/* ELSE: no destructive overlap so we copy forwards */
.L2:
#ifndef __OPTIMIZE_SIZE__
cmpl $8,ecx
jbe .L3
/* move any preceding bytes until destination address is long word aligned */
movl edi,edx
movl ecx,ebx
andl $3,edx
jz .L11
movl $4,ecx
subl edx,ecx
andl $3,ecx
subl ecx,ebx
rep
movsb
mov ebx,ecx
/* move bytes a long word at a time */
.L11:
shrl $2,ecx
.p2align 2
rep
movsl
movl ebx,ecx
andl $3,ecx
#endif /* !__OPTIMIZE_SIZE__ */
/* handle any remaining bytes */
.L3:
rep
movsb
.L5:
movl 8(ebp),eax
cld
leal -12(ebp),esp
popl ebx
popl edi
popl esi
leave
ret
|
stsp/newlib-ia16
| 1,696
|
newlib/libc/machine/i386/setjmp.S
|
/* This is file is a merger of SETJMP.S and LONGJMP.S */
/*
* This file was modified to use the __USER_LABEL_PREFIX__ and
* __REGISTER_PREFIX__ macros defined by later versions of GNU cpp by
* Joel Sherrill (joel@OARcorp.com)
* Slight change: now includes i386mach.h for this (Werner Almesberger)
*
* Copyright (C) 1991 DJ Delorie
* All rights reserved.
*
* Redistribution, modification, and use in source and binary forms is permitted
* provided that the above copyright notice and following paragraph are
* duplicated in all such forms.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/*
** jmp_buf:
** eax ebx ecx edx esi edi ebp esp eip
** 0 4 8 12 16 20 24 28 32
*/
#include "i386mach.h"
.global SYM (setjmp)
.global SYM (longjmp)
SOTYPE_FUNCTION(setjmp)
SOTYPE_FUNCTION(longjmp)
SYM (setjmp):
pushl ebp
movl esp,ebp
pushl edi
movl 8 (ebp),edi
movl eax,0 (edi)
movl ebx,4 (edi)
movl ecx,8 (edi)
movl edx,12 (edi)
movl esi,16 (edi)
movl -4 (ebp),eax
movl eax,20 (edi)
movl 0 (ebp),eax
movl eax,24 (edi)
movl esp,eax
addl $12,eax
movl eax,28 (edi)
movl 4 (ebp),eax
movl eax,32 (edi)
popl edi
movl $0,eax
leave
ret
SYM (longjmp):
pushl ebp
movl esp,ebp
movl 8(ebp),edi /* get jmp_buf */
movl 12(ebp),eax /* store retval in j->eax */
testl eax,eax
jne 0f
incl eax
0:
movl eax,0(edi)
movl 24(edi),ebp
__CLI
movl 28(edi),esp
pushl 32(edi)
movl 0(edi),eax
movl 4(edi),ebx
movl 8(edi),ecx
movl 12(edi),edx
movl 16(edi),esi
movl 20(edi),edi
__STI
ret
|
stsp/newlib-ia16
| 1,640
|
newlib/libc/machine/h8300/memset.S
|
#include "setarch.h"
#include "defines.h"
#if defined (__H8300SX__)
.global _memset
_memset:
; Use er3 is a temporary since er0 must remain unchanged on exit.
mov.l er0,er3
; Fill er1 with the byte to copy.
mov.b r1l,r1h
mov.w r1,e1
; Account for any excess bytes and words that will be copied after
; the main loop. r2 >= 0 if there is a longword to copy.
sub #4,LEN(r2)
blo longs_done
; Copy one byte if doing so will make er3 word-aligned.
; This isn't needed for correctness but it makes the main loop
; slightly faster.
bld #0,r3l
bcc word_aligned
mov.b r1l,@er3+
sub #1,LEN(r2)
blo longs_done
word_aligned:
; Likewise one word for longword alignment.
bld #1,r3l
bcc long_copy
mov.w r1,@er3+
sub #2,LEN(r2)
blo longs_done
long_copy:
; Copy longwords.
mov.l er1,@er3+
sub #4,LEN(r2)
bhs long_copy
longs_done:
; At this point, we need to copy r2 & 3 bytes. Copy a word
; if necessary.
bld #1,r2l
bcc words_done
mov.w r1,@er3+
words_done:
; Copy a byte.
bld #0,r2l
bcc bytes_done
mov.b r1l,@er3+
bytes_done:
rts
#else
; A0P pointer to cursor
; A1P thing to copy
.global _memset
_memset:
; MOVP @(2/4,r7),A2P ; dst
; MOVP @(4/8,r7),A1 ; src thing
; MOVP @(6/12,r7),A3P ; len
MOVP A2P,A2P
beq quit
; A3 points to the end of the area
MOVP A0P,A3P
ADDP A2P,A3P
; see if we can do it in words
; by oring in the start of the buffer to the end address
or A0L,A2L
btst #0,A2L
bne byteloop
; we can do it a word at a time
mov.b A1L,A1H
wordloop:
mov.w A1,@-A3P
CMPP A3P,A0P
bne wordloop
quit: rts
byteloop:
mov.b A1L,@-A3P
CMPP A3P,A0P
bne byteloop
rts
#endif
|
stsp/newlib-ia16
| 3,291
|
newlib/libc/machine/h8300/memcpy.S
|
#include "setarch.h"
#include "defines.h"
#ifdef __H8300SX__
.global _memcpy
_memcpy:
stm.l er4-er6,@-er7
; Set up source and destination pointers for movmd.
mov.l er0,er6
mov.l er1,er5
; See whether the copy is long enough to use the movmd.l code.
; Although the code can handle anything longer than 6 bytes,
; it can be more expensive than movmd.b for small moves.
; It's better to use a higher threshold to account for this.
;
; Note that the exact overhead of the movmd.l checks depends on
; the alignments of the length and pointers. They are faster when
; er0 & 3 == er1 & 3 == er2 & 3, faster still when these values
; are 0. This threshold is a compromise between the various cases.
cmp #16,LEN(r2)
blo simple
; movmd.l only works for even addresses. If one of the addresses
; is odd and the other is not, fall back on a simple move.
bld #0,r5l
bxor #0,r6l
bcs simple
; Make the addresses even.
bld #0,r5l
bcc word_aligned
mov.b @er5+,@er6+
sub #1,LEN(r2)
word_aligned:
; See if copying one word would make the first operand longword
; aligned. Although this is only really worthwhile if it aligns
; the second operand as well, it's no worse if doesn't, so it
; hardly seems worth the overhead of a "band" check.
bld #1,r6l
bcc fast_copy
mov.w @er5+,@er6+
sub #2,LEN(r2)
fast_copy:
; Set (e)r4 to the number of longwords to copy.
mov LEN(r2),LEN(r4)
shlr #2,LEN(r4)
#ifdef __NORMAL_MODE__
; 16-bit pointers and size_ts: one movmd.l is enough. This code
; is never reached with r4 == 0.
movmd.l
and.w #3,r2
simple:
mov.w r2,r4
beq quit
movmd.b
quit:
rts/l er4-er6
#else
; Skip the first iteration if the number of longwords is divisible
; by 0x10000.
mov.w r4,r4
beq fast_loop_next
; This loop copies r4 (!= 0) longwords the first time round and 65536
; longwords on each iteration after that.
fast_loop:
movmd.l
fast_loop_next:
sub.w #1,e4
bhs fast_loop
; Mop up any left-over bytes. We could just fall through to the
; simple code after the "and" but the version below is quicker
; and only takes 10 more bytes.
and.w #3,r2
beq quit
mov.w r2,r4
movmd.b
quit:
rts/l er4-er6
simple:
; Simple bytewise copy. We need to handle all lengths, including zero.
mov.w r2,r4
beq simple_loop_next
simple_loop:
movmd.b
simple_loop_next:
sub.w #1,e2
bhs simple_loop
rts/l er4-er6
#endif
#else
.global _memcpy
_memcpy:
; MOVP @(2/4,r7),A0P ; dst
; MOVP @(4/8,r7),A1P ; src
; MOVP @(6/12,r7),A2P ; len
MOVP A0P,A3P ; keep copy of final dst
ADDP A2P,A0P ; point to end of dst
CMPP A0P,A3P ; see if anything to do
beq quit
ADDP A2P,A1P ; point to end of src
; lets see if we can do this in words
or A0L,A2L ; or in the dst address
or A3L,A2L ; or the length
or A1L,A2L ; or the src address
btst #0,A2L ; see if the lsb is zero
bne byteloop
wordloop:
#ifdef __NORMAL_MODE__
sub #2,A1P
#else
subs #2,A1P ; point to word
#endif
mov.w @A1P,A2 ; get word
mov.w A2,@-A0P ; save word
CMPP A0P,A3P ; at the front again ?
bne wordloop
rts
byteloop:
#ifdef __NORMAL_MODE__
sub #1,A1P
#else
subs #1,A1P ; point to byte
#endif
mov.b @A1P,A2L ; get byte
mov.b A2L,@-A0P ; save byte
CMPP A0P,A3P ; at the front again ?
bne byteloop
; return with A0 pointing to dst
quit: rts
#endif
|
stsp/newlib-ia16
| 1,037
|
newlib/libc/machine/h8300/setjmp.S
|
#include "setarch.h"
.file "setjmp.S"
.section .text
.align 2
.global _setjmp
_setjmp:
#if defined(__H8300SX__)
mov.l er7,@er0+
mov.l er6,@er0+
mov.l er5,@er0+
mov.l er4,@er0+
mov.l @sp,@er0
sub.l er0,er0
#elif defined(__H8300H__) || defined(__H8300S__)
mov.l er7,@er0
mov.l er6,@(4,er0)
mov.l er5,@(8,er0)
mov.l er4,@(12,er0)
mov.l @sp,er1
mov.l er1,@(16,er0)
sub.l er0,er0
#else
mov.w r7,@r0
mov.w r6,@(2,r0)
mov.w r5,@(4,r0)
mov.w r4,@(6,r0)
mov.w @sp,r1
mov.w r1,@(8,r0)
sub.w r0,r0
#endif
rts
.global _longjmp
_longjmp:
#if defined(__H8300H__) || defined (__H8300S__) || defined (__H8300SX__)
mov.l @er0+,er7
mov.l @er0+,er6
mov.l @er0+,er5
mov.l @er0+,er4
#if defined(__H8300SX__)
mov.l @er0,@sp
#else
mov.l @er0,er2
mov.l er2,@sp
#endif
#if (__INT_MAX__ <= 32767)
mov.w r1,r0
#else
mov.l er1,er0
#endif
bne .L1
sub er0,er0
adds #1,er0
#else
mov.w @r0+,r7
mov.w @r0+,r6
mov.w @r0+,r5
mov.w @r0+,r4
mov.w @r0,r2
mov.w r2,@sp
mov.w r1,r0
bne .L1
mov.w #1,r0
#endif
.L1:
rts
|
stsp/newlib-ia16
| 2,993
|
newlib/libc/machine/or1k/setjmp.S
|
/*
Copyright (c) 2014, Hesham ALMatary
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.align 4
.global setjmp
.type setjmp,@function
setjmp:
l.sw 4(r3), r1
l.sw 8(r3), r2
/* Skip r3-r8 as they are not preserved across function calls */
l.sw 36(r3), r9
/* Skip r10 as it's preserved to be used by TLS */
/* Skip r11, setjmp always set it to 0 */
/* The following set if registers are preserved across function calls */
l.sw 52(r3), r14
l.sw 60(r3), r16
l.sw 68(r3), r18
l.sw 76(r3), r20
l.sw 84(r3), r22
l.sw 92(r3), r24
l.sw 100(r3), r26
l.sw 108(r3), r28
l.sw 116(r3), r30
/* Save Status Register */
l.mfspr r13, r0, 17
l.sw 124(r3), r13
/* Set result register to 0 and jump */
// Different cases for optional delay slot
#if defined(__OR1K_NODELAY__)
l.addi r11, r0, 0
l.jr r9
#elif defined(__OR1K_DELAY__)
l.jr r9
l.addi r11, r0, 0
#else
l.addi r11, r0, 0
l.jr r9
l.nop
#endif
.align 4
.global longjmp
.type longjmp,@function
longjmp:
/* If the second argument to longjmp is zero, set return address to 1,
otherwise set it to the value of the second argument */
l.addi r11, r0, 1
l.sfne r4, r0
l.bf 1f
l.nop
l.addi r11, r4, 0
/* Load status register */
1:
l.lwz r15, 124(r3)
l.mtspr r0, r15, 17
l.lwz r1, 4(r3)
l.lwz r2, 8(r3)
/* Skip r3-r8 as they are not preserved across function calls */
l.lwz r9, 36(r3)
/* Skip r11 as it's always set by longjmp */
l.lwz r14, 52(r3)
l.lwz r16, 60(r3)
l.lwz r18, 68(r3)
l.lwz r20, 76(r3)
l.lwz r22, 84(r3)
l.lwz r24, 92(r3)
l.lwz r26, 100(r3)
l.lwz r28, 108(r3)
// Different cases for optional delay slot
#if defined(__OR1K_NODELAY__)
l.lwz r30, 116(r3)
l.jr r9
#elif defined(__OR1K_DELAY__)
l.jr r9
l.lwz r30, 116(r3)
#else
l.lwz r30, 116(r3)
l.jr r9
l.nop
#endif
|
stsp/newlib-ia16
| 3,119
|
newlib/libc/machine/cr16/setjmp.S
|
##############################################################################
# setjmp.S -- CR16 setjmp routine #
# #
# Copyright (c) 2004 National Semiconductor Corporation #
# #
# The authors hereby grant permission to use, copy, modify, distribute, #
# and license this software and its documentation for any purpose, provided #
# that existing copyright notices are retained in all copies and that this #
# notice is included verbatim in any distributions. No written agreement, #
# license, or royalty fee is required for any of the authorized uses. #
# Modifications to this software may be copyrighted by their authors #
# and need not follow the licensing terms described here, provided that #
# the new terms are clearly indicated on the first page of each file where #
# they apply. #
# #
# C library -- setjmp, longjmp #
# longjmp(a,v) #
# will generate a "return(v)" #
# from the last call to #
# setjmp(a) #
# by restoring r7-ra, sp, #
# and pc from 'a' #
# and doing a return. (Makes sure that longjmp never returns 0). #
##############################################################################
.text
.file "setjmp.s"
.align 4
.globl _setjmp
.align 4
_setjmp:
#r3, r2: .blkw
storw r7, 0(r3,r2)
addd $2, (r3,r2)
storw r8, 0(r3,r2)
addd $2, (r3,r2)
storw r9, 0(r3,r2)
addd $2, (r3,r2)
storw r10, 0(r3,r2)
addd $2, (r3,r2)
storw r11, 0(r3,r2)
addd $2, (r3,r2)
stord (r12), 0(r3,r2)
addd $4, (r3,r2)
stord (r13), 0(r3,r2)
addd $4, (r3,r2)
stord (ra), 0(r3,r2)
addd $4, (r3,r2)
stord (sp), 0(r3,r2)
movd $0,(r1,r0)
jump (ra)
.globl _longjmp
_longjmp:
#r3, r2: .blkw # pointer save area
#r5, r4: .blkw # ret vlaue
loadw 0(r3,r2), r7
addd $2, (r3,r2)
loadw 0(r3,r2), r8
addd $2, (r3,r2)
loadw 0(r3,r2), r9
addd $2, (r3,r2)
loadw 0(r3,r2), r10
addd $2, (r3,r2)
loadw 0(r3,r2), r11
addd $2, (r3,r2)
loadd 0(r3,r2), (r12)
addd $4, (r3,r2)
loadd 0(r3,r2), (r13)
addd $4, (r3,r2)
loadd 0(r3,r2), (ra)
addd $4, (r3,r2)
loadd 0(r3,r2), (sp)
#ifdef __INT32__
movd (r5,r4), (r1,r0)
cmpd $0, (r5,r4)
bne end1
movd $1, (r1,r0)
#else
movw r4, r0
cmpw $0, r4
bne end1
movw $1, r0
#endif
end1:
jump (ra)
.align 4
|
stsp/newlib-ia16
| 2,346
|
newlib/libc/machine/visium/setjmp.S
|
/* setjmp/longjmp for the Visium processor.
Copyright (c) 2015 Rolls-Royce Controls and Data Services Limited.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Rolls-Royce Controls and Data Services Limited nor
the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE. */
.text
.globl setjmp
.type setjmp, @function
setjmp:
write.l 0(r1),r11
write.l 1(r1),r12
write.l 2(r1),r13
write.l 3(r1),r14
write.l 4(r1),r15
write.l 5(r1),r16
write.l 6(r1),r17
write.l 7(r1),r18
write.l 8(r1),r19
write.l 9(r1),r21
write.l 10(r1),r22
write.l 11(r1),r23
bra tr,r21,r0
moviq r1,0
.size setjmp, .-setjmp
.globl longjmp
.type longjmp, @function
longjmp:
read.l r11,0(r1)
read.l r12,1(r1)
read.l r13,2(r1)
read.l r14,3(r1)
read.l r15,4(r1)
read.l r16,5(r1)
read.l r17,6(r1)
read.l r18,7(r1)
read.l r19,8(r1)
read.l r21,9(r1)
read.l r22,10(r1)
read.l r23,11(r1)
bra tr,r21,r0
move.l r1,r2
.size longjmp, .-longjmp
|
stsp/newlib-ia16
| 3,913
|
newlib/libc/machine/m32c/setjmp.S
|
/*
Copyright (c) 2005 Red Hat Incorporated.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of Red Hat Incorporated may not be used to endorse
or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(__r8c_cpu__) || defined(__m16c_cpu__)
#define A16 1
#endif
/* We implement setjmp/longjmp much like the way gcc implements
exceptions - we create new stack frames, then switch to them and
return. Thus, the two setjmp's below each push all the relevent
registers, then copy the whole frame into the buffer (first $sp is
moved, then smovf copies the frame itself), and the two longjmps
restore $sp, copy the frame back into place, and issue the same
return as the setjmp would have used.
Since the sizes of registers differs between the 16 and 24 bit
models, we provide separate implementations for each rather than
trying to parameterize them.
Jump buffer sizes: 21 bytes for 16 bit, 34 bytes for 24 bit.
*/
.text
#ifdef A16 /* 16 bit versions */
.global _setjmp
_setjmp:
enter #0
pushm r1,r2,r3,a0,a1,sb,fb
; At this point, the stack looks like this:
; ... [pc:3] [oldfb:2] <fb> [r1:2] [r2:2] [r3:2] [a0:2] [a1:2] [sb:2] [fb:2] <sp> */
mov.w r1,a1 ; a1 is the destination of smovf
mov.b #0,r1h
stc sp,a0 ; r1h:a0 is the source of smovf
mov.w a0,[a1]
add.w #2,a1
mov.w #19,r3 ; plus two for sp later
smovf.b
; Return 0 to caller.
mov.w #0,r0
popm r1,r2,r3,a0,a1,sb,fb
exitd
.global _longjmp
_longjmp:
enter #0
mov.w r1,a0 ; pointer to jump buf
mov.w r2,r0 ; setjmp's "new" return value
mov.b #0,r1h ; r1h: a0 is the source, now jmpbuf
mov.w [a0],a1 ; dest is new stack
ldc a1,sp
add.w #2,a0
mov.w #19,r3
smovf.b
;; now return to our caller with this newly restored frame
popm r1,r2,r3,a0,a1,sb,fb
exitd
#else /* 24 bit versions */
.global _setjmp
_setjmp:
enter #0
pushm r1,r2,r3,a0,a1,sb,fb
; At this point, the stack looks like this:
; ... [jbuf:4] [pc:4] [oldfb:4] <fb> [r1:2] [r2:2] [r3:2] [a0:4] [a1:4] [sb:4] [fb:4] <sp> */
mov.l 8[fb],a1 ; a1 is the destination of smovf
stc sp,a0 ; r1h:a0 is the source of smovf
mov.l a0,[a1]
add.l #4,a1
mov.w #30,r3 ; plus two for sp later
smovf.b
; Return 0 to caller.
mov.w #0,r0
popm r1,r2,r3,a0,a1,sb,fb
exitd
.global _longjmp
_longjmp:
enter #0
; ... [rv:2] [jbuf:4] [pc:4] [oldfb:4] <fb>
mov.l 8[fb],a0 ; pointer to jump buf
mov.w 12[fb],r0 ; setjmp's "new" return value
mov.l [a0],a1 ; dest is new stack
ldc a1,sp
add.l #4,a0
mov.w #30,r3
smovf.b
;; now return to our caller with this newly restored frame
popm r1,r2,r3,a0,a1,sb,fb
exitd
#endif
|
stsp/newlib-ia16
| 2,426
|
newlib/libc/machine/crx/setjmp.S
|
##############################################################################
# setjmp.S -- CRX setjmp routine #
# #
# Copyright (c) 2004 National Semiconductor Corporation #
# #
# The authors hereby grant permission to use, copy, modify, distribute, #
# and license this software and its documentation for any purpose, provided #
# that existing copyright notices are retained in all copies and that this #
# notice is included verbatim in any distributions. No written agreement, #
# license, or royalty fee is required for any of the authorized uses. #
# Modifications to this software may be copyrighted by their authors #
# and need not follow the licensing terms described here, provided that #
# the new terms are clearly indicated on the first page of each file where #
# they apply. #
# #
# C library -- setjmp, longjmp #
# longjmp(a,v) #
# will generate a "return(v)" #
# from the last call to #
# setjmp(a) #
# by restoring r7-ra, sp, #
# and pc from 'a' #
# and doing a return. (Makes sure that longjmp never returns 0). #
##############################################################################
.text
.file "setjmp.s"
.align 4
.globl _setjmp
.align 4
_setjmp:
#r2: .blkw
storm r2,{r7,r8,r9,r10,r11,r12,r13,r14}
stord sp,0(r2)
movd $0,r0
jump ra
.globl _longjmp
_longjmp:
#r2: .blkw # pointer save area
#r3: .blkw # ret vlaue
loadm r2, {r7,r8,r9,r10,r11,r12,r13,ra}
loadd 0(r2), sp
movd r3, r0
cmpd $0, r3
bne end1
movd $1, r0
end1:
jump ra
.align 4
|
stsp/newlib-ia16
| 1,908
|
newlib/libc/machine/z8k/memset.S
|
/*
* memset routine for Z8000
* Copyright (C) 2004 Christian Groessler <chris@groessler.org>
*
* Permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/* void *memset(void *buffer, int value, size_t length);
*/
name "memset.S"
.text
even
global _memset
_memset:
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
ld r5,rr14(#8)
ldl rr2,rr14(#10)
#else
pushl @rr14,rr6
#endif
/* rr2 - length
* rl5 - value
* rr6 - buffer
*/
testl rr2
jr z,finish
ldb rh5,rl5
ld r1,r5 /* r1 contains value */
bit r7,#0
jr z,not_odd
ldb @rr6,rl1
inc r7,#1
subl rr2,#1
jr z,finish
not_odd:ld r0,r3 /* remember length */
srl r3,#1
jr z,no_words
ldl rr4,rr6
ld @rr6,r1
inc r7,#2
dec r3,#1
jr z,no_words
ldir @rr6,@rr4,r3 /* fill words */
no_words:
bit r0,#0 /* one byte remaining? */
jr z,finish
ldb @rr6,rl1
finish:
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
#else
popl rr2,@rr14
#endif
#else /* above Z8001, below Z8002 */
unsegm
#ifdef __STD_CALL__
ld r7,r15(#2)
ld r6,r15(#4)
ld r5,r15(#6)
#else
ld r2,r7 /* buffer pointer return value */
#endif
/* r5 - length
* r6 - value
* r7 - buffer
*/
test r5
jr z,finish
ldb rh6,rl6
ld r1,r6 /* r1 contains value */
bit r7,#0
jr z,not_odd
ldb @r7,rl1
inc r7,#1
dec r5,#1
jr z,finish
not_odd:ld r0,r5 /* remember length */
srl r5,#1
jr z,no_words
ld r4,r7
ld @r7,r1
inc r7,#2
dec r5,#1
jr z,no_words
ldir @r7,@r4,r5 /* fill words */
no_words:
bit r0,#0 /* one byte remaining? */
jr z,finish
ldb @r7,rl1
finish:
#ifdef __STD_CALL__
ld r7,r15(#2)
#endif
#endif /* Z8002 */
ret
.end
|
stsp/newlib-ia16
| 2,426
|
newlib/libc/machine/z8k/memcpy.S
|
/*
* memcpy routine for Z8000
* Copyright (C) 2004 Christian Groessler <chris@groessler.org>
*
* Permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/* void *memcpy(void *dest, const void *src, size_t length);
*/
name "memcpy.S"
.text
even
global _memcpy
global memmove_entry
_memcpy:
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
ldl rr4,rr14(#8)
ldl rr2,rr14(#12)
#else
pushl @rr14,rr6
#endif
/* rr2 - length (high word ignored)
* rr4 - src
* rr6 - dest
*/
testl rr2
jr z,finish
memmove_entry: /* external entry point from memmove */
bitb rl7,#0 /* odd destination address? */
jr nz,testsrc
bitb rl5,#0 /* odd source address? */
jr nz,odd_copy
jr t,even_copy /* dest even, src odd */
testsrc:
bitb rl5,#0
jr z,odd_copy /* src even, dest odd */
ldib @rr6,@rr4,r3
jr ov,finish /* jump if r3 is zero now */
/* copy words */
even_copy:
ld r2,r3 /* remember length */
srl r3,#1
jr z,no_words
ldir @rr6,@rr4,r3
no_words:
bitb rl2,#0 /* odd length? */
jr z,finish
ldib @rr6,@rr4,r2 /* yes, copy last byte */
jr finish
/* copy bytes */
odd_copy:
ldirb @rr6,@rr4,r3
finish:
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
#else
popl rr2,@rr14
#endif
#else /* above Z8001, below Z8002 */
unsegm
#ifdef __STD_CALL__
ld r7,r15(#2)
ld r6,r15(#4)
ld r5,r15(#6)
#else
ld r2,r7 /* buffer pointer return value */
#endif
/* r5 - length
* r6 - src
* r7 - dest
*/
test r5
jr z,finish
memmove_entry: /* external entry point from memmove */
bitb rl7,#0 /* odd destination address? */
jr nz,testsrc
bitb rl6,#0 /* odd source address? */
jr nz,odd_copy
jr t,even_copy /* dest even, src odd */
testsrc:
bitb rl6,#0
jr z,odd_copy /* src even, dest odd */
ldib @r7,@r6,r5
jr ov,finish /* jump if r5 is zero now */
/* copy words */
even_copy:
ld r4,r5 /* remember length */
srl r5,#1
jr z,no_words
ldir @r7,@r6,r5
no_words:
bitb rl4,#0 /* odd length? */
jr z,finish
ldib @r7,@r6,r4 /* yes, copy last byte */
jr finish
/* copy bytes */
odd_copy:
ldirb @r7,@r6,r5
finish:
#ifdef __STD_CALL__
ld r7,r15(#2)
#endif
#endif /* Z8002 */
ret
.end
|
stsp/newlib-ia16
| 2,911
|
newlib/libc/machine/z8k/memcmp.S
|
/*
* memcmp routine for Z8000
* Copyright (C) 2004 Christian Groessler <chris@groessler.org>
*
* Permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/* int memcmp(const void *b1, const void *b2, size_t length);
*/
name "memcmp.S"
.text
even
global _memcmp
_memcmp:
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
ldl rr4,rr14(#8)
ldl rr2,rr14(#12)
#endif
/* rr2 - length (high word ignored)
* rr4 - b2
* rr6 - b1
*/
clr r1 /* initialize return value */
testl rr2
jr z,finish
bitb rl7,#0 /* odd b1? */
jr nz,testb2
bitb rl5,#0 /* odd b2? */
jr nz,odd_cmp /* b1 even, b2 odd */
jr t,even_cmp
testb2:
bitb rl5,#0
jr z,odd_cmp /* b2 even, b1 odd */
cpsib @rr6,@rr4,r3,eq
jr z,beq /* bytes are the same */
jr t,byte_diff
beq: jr ov,finish /* jump if r3 is zero now */
/* compare words */
even_cmp:
ld r2,r3 /* remember length */
srl r3,#1
jr z,no_words
cpsir @rr6,@rr4,r3,ne
jr nz,no_words
dec r7,#2
dec r5,#2 /* point to different bytes */
ldk r3,#2
jr t,odd_cmp
no_words:
bitb rl2,#0 /* odd length? */
jr z,finish
cpsib @rr6,@rr4,r3,eq
jr z,finish /* last bytes are the same */
jr t,byte_diff
/* compare bytes */
odd_cmp:
cpsirb @rr6,@rr4,r3,ne
jr nz,finish
byte_diff:
dec r7,#1
dec r5,#1 /* point to different bytes */
ldb rl1,@rr6
clr r0
ldb rl0,@rr4
sub r1,r0
finish: /* set return value */
#ifdef __STD_CALL__
ld r7,r1
#else
ld r2,r1
#endif
#else /* above Z8001, below Z8002 */
unsegm
#ifdef __STD_CALL__
ld r7,r15(#2)
ld r6,r15(#4)
ld r5,r15(#6)
#endif
/* r5 - length
* r6 - b2
* r7 - b1
*/
clr r1 /* initialize return value */
test r5
jr z,finish
bitb rl7,#0 /* odd destination address? */
jr nz,testb2
bitb rl6,#0 /* odd source address? */
jr nz,odd_cmp /* b1 even, b2 odd */
jr t,even_cmp
testb2:
bitb rl6,#0
jr z,odd_cmp /* b2 even, b1 odd */
cpsib @r7,@r6,r5,eq
jr z,beq /* bytes are the same */
jr t,byte_diff
beq: jr ov,finish /* jump if r3 is zero now */
/* compare words */
even_cmp:
ld r4,r5 /* remember length */
srl r5,#1
jr z,no_words
cpsir @r7,@r6,r5,ne
jr nz,no_words
dec r7,#2
dec r6,#2 /* point to different bytes */
ldk r5,#2
jr t,odd_cmp
no_words:
bitb rl4,#0 /* odd length? */
jr z,finish
cpsib @r7,@r6,r4,eq
jr z,finish /* last bytes are the same */
jr t,byte_diff
/* compare bytes */
odd_cmp:
cpsirb @r7,@r6,r5,ne
jr nz,finish
byte_diff:
dec r7,#1
dec r6,#1 /* point to different bytes */
ldb rl1,@r7
clr r0
ldb rl0,@r6
sub r1,r0
finish:
#ifdef __STD_CALL__
ld r7,r1
#else
ld r2,r1
#endif
#endif /* Z8002 */
ret
.end
|
stsp/newlib-ia16
| 3,306
|
newlib/libc/machine/z8k/memmove.S
|
/*
* memmove routine for Z8000
* Copyright (C) 2004 Christian Groessler <chris@groessler.org>
*
* Permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/* void *memmove(void *dest, const void *src, size_t length);
*/
name "memmove.S"
.text
even
global _memmove
_memmove:
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
ldl rr4,rr14(#8)
ldl rr2,rr14(#12)
#else
pushl @rr14,rr6
#endif
/* rr2 - length (high word ignored)
* rr4 - src
* rr6 - dest
*/
testl rr2
jr z,finish
/* check for destructive overlap (src < dest && dest < src + length) */
cpl rr6,rr4
jp ule,memmove_entry /* non-destructive, let memcpy do the work */
ldl rr0,rr2
addl rr0,rr4 /* rr0 = src + length */
cpl rr0,rr6
jp ult,memmove_entry /* non-destructive, let memcpy do the work */
/* set-up pointers to copy backwards, add (length - 1) */
addl rr4,rr2 /* src + length */
addl rr6,rr2 /* dest + length */
subl rr4,#1
subl rr6,#1
/* check alignment */
bitb rl7,#0 /* odd destination address? */
jr z,testsrc
bitb rl5,#0 /* odd source address? */
jr z,odd_copy
jr even_copy
testsrc:
bitb rl5,#0
jr nz,odd_copy /* src even, dest odd */
lddb @rr6,@rr4,r3
jr ov,finish /* jump if r5 is zero now */
/* copy words */
even_copy:
ld r2,r3 /* remember length */
srl r3,#1
/* jr z,no_words it cannot be zero here */
dec r5,#1
dec r7,#1
lddr @rr6,@rr4,r3
no_words:
bitb rl2,#0 /* odd length? */
jr z,finish
inc r5,#1
inc r7,#1
lddb @rr6,@rr4,r2 /* yes, copy last byte */
jr finish
/* copy bytes */
odd_copy:
lddrb @rr6,@rr4,r3
finish:
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
#else
popl rr2,@rr14
#endif
#else /* above Z8001, below Z8002 */
unsegm
#ifdef __STD_CALL__
ld r7,r15(#2)
ld r6,r15(#4)
ld r5,r15(#6)
#else
ld r2,r7 /* buffer pointer return value */
#endif
/* r5 - length
* r6 - src
* r7 - dest
*/
test r5
jr z,finish
/* check for destructive overlap (src < dest && dest < src + length) */
cp r7,r6
jp ule,memmove_entry /* non-destructive, let memcpy do the work */
ld r0,r5
add r0,r6 /* r0 = src + length */
cp r0,r7
jp ult,memmove_entry /* non-destructive, let memcpy do the work */
/* set-up pointers to copy backwards, add (length - 1) */
add r6,r5 /* src + length */
add r7,r5 /* dest + length */
dec r6,#1
dec r7,#1
/* check alignment */
bitb rl7,#0 /* odd destination address? */
jr z,testsrc
bitb rl6,#0 /* odd source address? */
jr z,odd_copy
jr even_copy
testsrc:
bitb rl6,#0
jr nz,odd_copy /* src even, dest odd */
lddb @r7,@r6,r5
jr ov,finish /* jump if r5 is zero now */
/* copy words */
even_copy:
ld r4,r5 /* remember length */
srl r5,#1
/* jr z,no_words it cannot be zero here */
dec r6,#1
dec r7,#1
lddr @r7,@r6,r5
no_words:
bitb rl4,#0 /* odd length? */
jr z,finish
inc r6,#1
inc r7,#1
lddb @r7,@r6,r4 /* yes, copy last byte */
jr finish
/* copy bytes */
odd_copy:
lddrb @r7,@r6,r5
finish:
#ifdef __STD_CALL__
ld r7,r15(#2)
#endif
#endif /* Z8002 */
ret
.end
|
stsp/newlib-ia16
| 1,905
|
newlib/libc/machine/z8k/setjmp.S
|
.global _setjmp
.global _longjmp
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
_setjmp:
ldl rr6,rr14(#4) ! get argument
ldl rr2,@rr14 ! fetch pc
ldl @rr6,rr2 ! save it
ldl rr6(#16),rr8
ldl rr6(#4),rr10
ldl rr6(#8),rr12 ! remember frame pointer
ldl rr6(#12),rr14 ! remember stack pointer
ldk r7,#0
ret t
_longjmp:
ldl rr4,rr14(#4) ! get first argument
ld r7,rr14(#8) ! get return value
ldl rr8,rr4(#16)
ldl rr10,rr4(#4)
ldl rr12,rr4(#8) ! restore old frame pointer
ldl rr14,rr4(#12) ! restore old stack pointer
ldl rr4,@rr4 ! return address
inc r15,#4
jp @rr4
#else /* above __STD_CALL_, below not */
_setjmp:
ldl rr2,@rr14 ! fetch pc
ldl @rr6,rr2 ! save it
ldl rr6(16),rr8
ldl rr6(4),rr10
ldl rr6(8),rr12 ! and the other special regs
ldl rr6(12),rr14
ldk r2,#0
ret t
_longjmp:
ld r2,r5 ! get return value
ldl rr4,rr6(0)
ldl rr8,rr6(16)
ldl rr10,rr6(4)
ldl rr12,rr6(8)
ldl rr14,rr6(12)
inc r15,#4
jp @rr4
#endif /* not __STD_CALL__ */
#else /* above Z8001, below Z8002 */
unseg
#ifdef __STD_CALL__
_setjmp:
ld r7,r15(#2) ! get argument
ld r2,@r15 ! fetch pc
ld @r7,r2 ! save it
ldl r7(#14),rr8
ldl r7(#2),rr10
ldl r7(#6),rr12 ! remember frame pointer
ldl r7(#10),rr14 ! remember stack pointer
ldk r7,#0
ret t
_longjmp:
ld r4,r15(#2) ! get first argument (jmp_buf)
ld r7,r15(#4) ! get return value
ldl rr8,r4(#14)
ldl rr10,r4(#2)
ldl rr12,r4(#6) ! restore old frame pointer
ldl rr14,r4(#10) ! restore old stack pointer
ld r4,@r4 ! return address
inc r15,#2
jp @r4
#else /* above __STD_CALL_, below not */
_setjmp:
ld r2,@r15 ! fetch pc
ld @r7,r2 ! save it
ldl r7(4),rr10
ldl r7(8),rr12 ! and the other special regs
ldl r7(12),rr14
ldk r2,#0
ret t
_longjmp:
ld r2,r6 ! get return value
ld r4,@r7
ldl rr10,r7(4)
ldl rr12,r7(8)
ldl rr14,r7(12)
inc r15,#2
jp @r4
#endif /* not __STD_CALL__ */
#endif /* Z8002 version */
|
stsp/newlib-ia16
| 2,194
|
newlib/libc/machine/d30v/setjmp.S
|
; setjmp/longjmp for D30V.
.text
.globl setjmp
.type setjmp,@function
.stabs "setjmp.S",100,0,0,setjmp
.stabs "int:t(0,1)=r(0,1);-2147483648;2147483647;",128,0,0,0
.stabs "setjmp:F(0,1)",36,0,1,setjmp
setjmp:
; Address of jmpbuf is passed in R2. Save the appropriate registers.
st2w r26, @(r2+,r0)
st2w r28, @(r2+,r0)
st2w r30, @(r2+,r0)
st2w r32, @(r2+,r0)
st2w r34, @(r2+,r0)
st2w r36, @(r2+,r0)
st2w r38, @(r2+,r0)
st2w r40, @(r2+,r0)
st2w r42, @(r2+,r0)
st2w r44, @(r2+,r0)
st2w r46, @(r2+,r0)
st2w r48, @(r2+,r0)
st2w r50, @(r2+,r0)
st2w r52, @(r2+,r0)
st2w r54, @(r2+,r0)
st2w r56, @(r2+,r0)
st2w r58, @(r2+,r0)
st2w r60, @(r2+,r0)
st2w r62, @(r2+,r0)
mvfacc r4, a1, 16
mvfacc r5, a1, 0
st2w r4, @(r2+,r0)
mvfsys r4, psw
mvfsys r5, rpt_c
st2w r4, @(r2+,r0)
mvfsys r4, rpt_s
mvfsys r5, rpt_e
st2w r4, @(r2+,r0)
mvfsys r4, mod_s
mvfsys r5, mod_e
st2w r4, @(r2+,r0)
; Return 0 to caller
add r2, r0, r0
jmp link
.Lsetjmp:
.size setjmp,.Lsetjmp-setjmp
.stabs "",36,0,0,.Lsetjmp-setjmp
.globl longjmp
.type longjmp,@function
.stabs "longjmp:F(0,1)",36,0,1,longjmp
longjmp:
; Address of jmpbuf is in R2. Restore the registers.
ld2w r26, @(r2+,r0)
ld2w r28, @(r2+,r0)
ld2w r30, @(r2+,r0)
ld2w r32, @(r2+,r0)
ld2w r34, @(r2+,r0)
ld2w r36, @(r2+,r0)
ld2w r38, @(r2+,r0)
ld2w r40, @(r2+,r0)
ld2w r42, @(r2+,r0)
ld2w r44, @(r2+,r0)
ld2w r46, @(r2+,r0)
ld2w r48, @(r2+,r0)
ld2w r50, @(r2+,r0)
ld2w r52, @(r2+,r0)
ld2w r54, @(r2+,r0)
ld2w r56, @(r2+,r0)
ld2w r58, @(r2+,r0)
ld2w r60, @(r2+,r0)
ld2w r62, @(r2+,r0)
ld2w r4, @(r2+,r0)
mvtacc a1, r4, r5
mvfsys r6, psw
ld2w r4, @(r2+,r0) /* psw, rpt_c */
and r6, r6, 0xfcff /* set rp, md bits from setjmp, leave */
and r4, r4, 0x0300 /* all other psw bits the same */
or r4, r4, r6
mvtsys psw, r4
mvtsys rpt_c, r5
ld2w r4, @(r2+,r0)
mvtsys rpt_s, r4
mvtsys rpt_e, r5
ld2w r4, @(r2+,r0)
mvtsys mod_s, r4
mvtsys mod_e, r5
; Value to return to caller is in R3. If caller attemped to return 0,
; return 1 instead.
cmpeq f0, r3, 0 || add r2, r3, r0
jmp link || add/tx r2, r2, 1
.Llongjmp:
.size longjmp,.Llongjmp-longjmp
.stabs "",36,0,0,.Llongjmp-longjmp
|
stsp/newlib-ia16
| 1,064
|
newlib/libc/machine/ia16/memmove.S
|
#include "call-cvt.h"
.arch i8086,jumps
.code16
.att_syntax prefix
TEXT_ (memmove.S.NEWLIB)
.global memmove
memmove:
ENTER_BX_(6)
pushw %si
pushw %di
pushw %es
movw %ds, %si
movw %si, %es
MOV_ARG0W_BX_(%di)
MOV_ARG2W_BX_(%si)
MOV_ARG4W_BX_(%cx)
movw %di, %ax
cmpw %di, %si
ja .L1
# dest > src => copy backwards
addw %cx, %si
addw %cx, %di
decw %di
std
cmpw %si, %di
jne .L2
# dest == src + 1 => copy backwards a byte at a time
decw %si
rep movsb
cld
popw %es
popw %di
popw %si
RET_(6)
.L2:
# dest > src + 1 => copy backwards two bytes at a time
decw %di
decw %si
decw %si
shrw $1, %cx
rep movsw
jnc .L3
incw %si
incw %di
movsb
.L3:
cld
popw %es
popw %di
popw %si
RET_(6)
.L1:
# dest < src => copy forwards
incw %di
cmpw %si, %di
jne .L4
# dest == src - 1 => copy forwards a byte at a time
decw %di
rep movsb
popw %es
popw %di
popw %si
RET_(6)
.L4:
# dest < src - 1 => copy forwards two bytes at a time
decw %di
shrw $1, %cx
rep movsw
adcw %cx, %cx
rep movsb
popw %es
popw %di
popw %si
RET_(6)
|
stsp/newlib-ia16
| 2,090
|
newlib/libc/machine/ia16/setjmp.S
|
# setjmp/longjmp for ia16. The jmpbuf looks like this:
#
# Register jmpbuf offset
# ax 0x00
# bx 0x02
# cx 0x04
# dx 0x06
# si 0x08
# di 0x0a
# bp 0x0c
# sp 0x0e
# es 0x10
# ip 0x12
#include "call-cvt.h"
.arch i8086,jumps
.code16
.att_syntax prefix
#ifdef __ELKS__
.text
.global _setjmp
_setjmp:
#else
TEXT_ (setjmp.S.NEWLIB)
.global setjmp
setjmp:
#endif
# %cs (if medium) # 6(%bp)
# %ip # 4(%bp)
pushw %bx # 2(%bp)
pushw %bp # 0(%bp)
movw %sp, %bp
#ifndef __IA16_CALLCVT_REGPARMCALL
# ifndef __IA16_CMODEL_IS_FAR_TEXT
movw 6(%bp), %bx # %bx points to jumpbuf.
# else
movw 8(%bp), %bx
# endif
#else
movw %ax, %bx
#endif
movw %ax, (%bx) # Save %ax.
movw 2(%bp), %ax
movw %ax, 2(%bx) # Save %bx.
movw %cx, 4(%bx) # Save %cx.
movw %dx, 6(%bx) # Save %dx.
movw %si, 8(%bx) # Save %si.
movw %di, 10(%bx) # Save %di.
movw 0(%bp), %ax
movw %ax, 12(%bx) # Save %bp.
#ifndef __IA16_CMODEL_IS_FAR_TEXT
leaw 6(%bp), %ax
#else
leaw 8(%bp), %ax
#endif
movw %ax, 14(%bx) # Save %sp.
movw 4(%bp), %ax
movw %es, 16(%bx) # Save %es.
movw %ax, 18(%bx) # Save %ip.
#ifdef __IA16_CMODEL_IS_FAR_TEXT
movw 6(%bp), %ax
movw %ax, 20(%bx) # Save %cs.
#endif
# Return 0 to caller.
xorw %ax, %ax
popw %bp
popw %bx
RET_(2)
#ifdef __ELKS__
.global _longjmp
_longjmp:
#else
.global longjmp
longjmp:
#endif
movw %sp, %bp
#ifndef __IA16_CALLCVT_REGPARMCALL
# ifndef __IA16_CMODEL_IS_FAR_TEXT
movw 4(%bp), %ax # Get retval.
movw 2(%bp), %bp # Get jmpbuf.
# else
movw 6(%bp), %ax
movw 4(%bp), %bp
# endif
#else
movw %ax, %bp
movw %dx, %ax
#endif
movw %ax, 0(%bp) # Store retval in jmpbuf[0].
lesw 14(%bp),%sp # Restore %sp and %es.
#ifdef __IA16_CMODEL_IS_FAR_TEXT
pushw 20(%bp)
#endif
pushw 18(%bp) # Restore %ip.
movw 10(%bp),%di # Restore %di.
movw 8(%bp), %si # Restore %si.
movw 6(%bp), %dx # Restore %dx.
movw 4(%bp), %cx # Restore %cx.
movw 2(%bp), %bx # Restore %bx.
movw 0(%bp), %ax # Restore %ax.
movw 12(%bp),%bp # Restore %bp.
# If caller attempted to return 0, return 1 instead.
cmpw $1, %ax
adcw $0, %ax
RET_(2)
|
stsp/newlib-ia16
| 1,479
|
newlib/libc/machine/mt/setjmp.S
|
# setjmp/longjmp for mt.
#
# The jmpbuf looks like this:
#
# Register jmpbuf offset
# R0 --- --
# R1 0x4 4
# R2 0x8 8
# R3 0xc 12
# R4 0x10 16
# R5 0x14 20
# R6 0x18 24
# R7 0x1c 28
# R8 0x20 32
# R9 ---- --
# R10 ---- --
# R11 0x2c 44
# R12 0x30 48
# R13 0x34 52
# R14 0x38 56
# R15 0x3c 60
#
# R1 contains the pointer to jmpbuf
.text
.global setjmp
.type setjmp ,@function
setjmp:
stw r1, r1, #4
or r0, r0, r0
stw r2, r1, #8
or r0, r0, r0
stw r3, r1, #12
or r0, r0, r0
stw r4, r1, #16
or r0, r0, r0
stw r5, r1, #20
or r0, r0, r0
stw r6, r1, #24
or r0, r0, r0
stw r7, r1, #28
or r0, r0, r0
stw r8, r1, #32
or r0, r0, r0
stw r11, r1, #44
or r0, r0, r0
stw r12, r1, #48
or r0, r0, r0
stw r13, r1, #52
or r0, r0, r0
stw r14, r1, #56
or r0, r0, r0
stw r15, r1, #60
jal r0, r14
addi r11, r0, #0
.Lend1:
.size setjmp,.Lend1-setjmp
.global longjmp
.type longjmp,@function
longjmp:
or r9, r1, r1
or r11, r2, r2
ldw r1, r1, #4
or r0, r0, r0
ldw r2, r1, #8
or r0, r0, r0
ldw r3, r1, #12
or r0, r0, r0
ldw r4, r1, #16
or r0, r0, r0
ldw r5, r1, #20
or r0, r0, r0
ldw r6, r1, #24
or r0, r0, r0
ldw r7, r1, #28
or r0, r0, r0
ldw r8, r1, #32
or r0, r0, r0
ldw r12, r1, #48
or r0, r0, r0
ldw r13, r1, #52
or r0, r0, r0
ldw r14, r1, #56
or r0, r0, r0
ldw r15, r1, #60
brne r0, r11, .L01
or r0, r0, r0
addi r11, r0, #1
.L01:
jal r0, r14
or r0, r0, r0
.Lend2:
.size longjmp,.Lend2-longjmp2
|
stsp/newlib-ia16
| 2,546
|
newlib/libc/machine/frv/setjmp.S
|
# setjmp/longjmp for Frv. The jmpbuf looks like this:
#
# Register jmpbuf offset
# R16-R31 0x0-0x03c
# R48-R63 0x40-0x7c
# FR16-FR31 0x80-0xbc
# FR48-FR63 0xc0-0xfc
# LR 0x100
# SP 0x104
# FP 0x108
#
# R8 contains the pointer to jmpbuf
#include <frv-asm.h>
.text
.global EXT(setjmp)
.type EXT(setjmp),@function
EXT(setjmp):
stdi gr16, @(gr8,0)
stdi gr18, @(gr8,8)
stdi gr20, @(gr8,16)
stdi gr22, @(gr8,24)
stdi gr24, @(gr8,32)
stdi gr26, @(gr8,40)
stdi gr28, @(gr8,48)
stdi gr30, @(gr8,56)
#if __FRV_GPR__ != 32
stdi gr48, @(gr8,64)
stdi gr50, @(gr8,72)
stdi gr52, @(gr8,80)
stdi gr54, @(gr8,88)
stdi gr56, @(gr8,96)
stdi gr58, @(gr8,104)
stdi gr60, @(gr8,112)
stdi gr62, @(gr8,120)
#endif
#if __FRV_FPR__ != 0
stdfi fr16, @(gr8,128)
stdfi fr18, @(gr8,136)
stdfi fr20, @(gr8,144)
stdfi fr22, @(gr8,152)
stdfi fr24, @(gr8,160)
stdfi fr26, @(gr8,168)
stdfi fr28, @(gr8,176)
stdfi fr30, @(gr8,184)
#if __FRV_FPR__ != 32
stdfi fr48, @(gr8,192)
stdfi fr50, @(gr8,200)
stdfi fr52, @(gr8,208)
stdfi fr54, @(gr8,216)
stdfi fr56, @(gr8,224)
stdfi fr58, @(gr8,232)
stdfi fr60, @(gr8,240)
stdfi fr62, @(gr8,248)
#endif
#endif
movsg lr, gr4
sti gr4, @(gr8,256)
sti sp, @(gr8,260)
sti fp, @(gr8,264)
mov gr0,gr8
ret
.Lend1:
.size EXT(setjmp),.Lend1-EXT(setjmp)
.global EXT(longjmp)
.type EXT(longjmp),@function
EXT(longjmp):
lddi @(gr8,0), gr16
lddi @(gr8,8), gr18
lddi @(gr8,16), gr20
lddi @(gr8,24), gr22
lddi @(gr8,32), gr24
lddi @(gr8,40), gr26
lddi @(gr8,48), gr28
lddi @(gr8,56), gr30
#if __FRV_GPR__ != 32
lddi @(gr8,64), gr48
lddi @(gr8,72), gr50
lddi @(gr8,80), gr52
lddi @(gr8,88), gr54
lddi @(gr8,96), gr56
lddi @(gr8,104), gr58
lddi @(gr8,112), gr60
lddi @(gr8,120), gr62
#endif
#if __FRV_FPR__ != 0
lddfi @(gr8,128), fr16
lddfi @(gr8,136), fr18
lddfi @(gr8,144), fr20
lddfi @(gr8,152), fr22
lddfi @(gr8,160), fr24
lddfi @(gr8,168), fr26
lddfi @(gr8,176), fr28
lddfi @(gr8,184), fr30
#if __FRV_FPR__ != 32
lddfi @(gr8,192), fr48
lddfi @(gr8,200), fr50
lddfi @(gr8,208), fr52
lddfi @(gr8,216), fr54
lddfi @(gr8,224), fr56
lddfi @(gr8,232), fr58
lddfi @(gr8,240), fr60
lddfi @(gr8,248), fr62
#endif
#endif
ldi @(gr8,256), gr4
movgs gr4,lr
ldi @(gr8,260), sp
ldi @(gr8,264), fp
# Value to return is in r9. If zero, return 1
cmp gr9, gr0, icc0
setlos #1, gr8
ckne icc0, cc4
cmov gr9, gr8, cc4, 1
ret
.Lend2:
.size EXT(longjmp),.Lend2-EXT(longjmp)
|
stsp/newlib-ia16
| 2,494
|
newlib/libc/machine/m68k/memset.S
|
/* a-memset.s -- memset, optimised for fido asm
*
* Copyright (c) 2007 mocom software GmbH & Co KG)
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#include "m68kasm.h"
.text
.align 4
.globl SYM(memset)
.type SYM(memset), @function
| memset, optimised
|
| strategy:
| - no argument testing (the original memcpy from the GNU lib does
| no checking either)
| - make sure the destination pointer (the write pointer) is long word
| aligned. This is the best you can do, because writing to unaligned
| addresses can be the most costfull thing one could do.
| - we fill long word wise if possible
|
| VG, 2006
|
| bugfixes:
| - distribution of byte value improved - in cases someone gives
| non-byte value
| - residue byte transfer was not working
|
| VG, April 2007
|
SYM(memset):
move.l 4(sp),a0 | dest ptr
move.l 8(sp),d0 | value
move.l 12(sp),d1 | len
cmp.l #16,d1
blo .Lbset | below, byte fills
|
move.l d2,-(sp) | need a register
move.b d0,d2 | distribute low byte to all byte in word
lsl.l #8,d0
move.b d2,d0
move.w d0,d2
swap d0 | rotate 16
move.w d2,d0
|
move.l a0,d2 | copy of src
neg.l d2 | 1 2 3 ==> 3 2 1
and.l #3,d2
beq 2f | is aligned
|
sub.l d2,d1 | fix length
lsr.l #1,d2 | word align needed?
bcc 1f
move.b d0,(a0)+ | fill byte
1:
lsr.l #1,d2 | long align needed?
bcc 2f
move.w d0,(a0)+ | fill word
2:
move.l d1,d2 | number of long transfers (at least 3)
lsr.l #2,d2
subq.l #1,d2
1:
move.l d0,(a0)+ | fill long words
.Llset:
#if !defined (__mcoldfire__)
dbra d2,1b | loop until done
sub.l #0x10000,d2
#else
subq.l #1,d2
#endif
bpl 1b
and.l #3,d1 | residue byte transfers, fixed
move.l (sp)+,d2 | restore d2
bra .Lbset
1:
move.b d0,(a0)+ | fill residue bytes
.Lbset:
#if !defined (__mcoldfire__)
dbra d1,1b | loop until done
#else
subq.l #1,d1
bpl 1b
#endif
move.l 4(sp),d0 | return value
rts
|
stsp/newlib-ia16
| 2,827
|
newlib/libc/machine/m68k/memcpy.S
|
/* a-memcpy.s -- memcpy, optimised for m68k asm
*
* Copyright (c) 2007 mocom software GmbH & Co KG)
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#include "m68kasm.h"
#if defined (__mcoldfire__) || defined (__mc68010__) || defined (__mc68020__) || defined (__mc68030__) || defined (__mc68040__) || defined (__mc68060__)
# define MISALIGNED_OK 1
#else
# define MISALIGNED_OK 0
#endif
.text
.align 4
.globl SYM(memcpy)
.type SYM(memcpy), @function
/* memcpy, optimised
*
* strategy:
* - no argument testing (the original memcpy from the GNU lib does
* no checking either)
* - make sure the destination pointer (the write pointer) is long word
* aligned. This is the best you can do, because writing to unaligned
* addresses can be the most costfull thing you could do.
* - Once you have figured that out, we do a little loop unrolling
* to further improve speed.
*/
SYM(memcpy):
move.l 4(sp),a0 | dest ptr
move.l 8(sp),a1 | src ptr
move.l 12(sp),d1 | len
cmp.l #8,d1 | if fewer than 8 bytes to transfer,
blo .Lresidue | do not optimise
#if !MISALIGNED_OK
/* Goto .Lresidue if either dest or src is not 4-byte aligned */
move.l a0,d0
and.l #3,d0
bne .Lresidue
move.l a1,d0
and.l #3,d0
bne .Lresidue
#else /* MISALIGNED_OK */
/* align dest */
move.l a0,d0 | copy of dest
neg.l d0
and.l #3,d0 | look for the lower two only
beq 2f | is aligned?
sub.l d0,d1
lsr.l #1,d0 | word align needed?
bcc 1f
move.b (a1)+,(a0)+
1:
lsr.l #1,d0 | long align needed?
bcc 2f
move.w (a1)+,(a0)+
2:
#endif /* !MISALIGNED_OK */
/* long word transfers */
move.l d1,d0
and.l #3,d1 | byte residue
lsr.l #3,d0
bcc 1f | carry set for 4-byte residue
move.l (a1)+,(a0)+
1:
lsr.l #1,d0 | number of 16-byte transfers
bcc .Lcopy | carry set for 8-byte residue
bra .Lcopy8
1:
move.l (a1)+,(a0)+
move.l (a1)+,(a0)+
.Lcopy8:
move.l (a1)+,(a0)+
move.l (a1)+,(a0)+
.Lcopy:
#if !defined (__mcoldfire__)
dbra d0,1b
sub.l #0x10000,d0
#else
subq.l #1,d0
#endif
bpl 1b
bra .Lresidue
1:
move.b (a1)+,(a0)+ | move residue bytes
.Lresidue:
#if !defined (__mcoldfire__)
dbra d1,1b | loop until done
#else
subq.l #1,d1
bpl 1b
#endif
move.l 4(sp),d0 | return value
rts
|
stsp/newlib-ia16
| 2,246
|
newlib/libc/machine/epiphany/setjmp.S
|
/* setjmp and longjmp
Copyright (c) 2011, Adapteva, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Adapteva nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE. */
.file "setjmp.S"
.section .text
.align 4
.global _setjmp
.type _setjmp, %function
_setjmp:
strd lr,[r0]
strd r4,[r0,1]
strd r6,[r0,2]
strd r8,[r0,3]
strd r10,[r0,4]
strd r32,[r0,5]
strd r34,[r0,6]
strd r36,[r0,7]
strd r38,[r0,8]
str sp,[r0,18]
mov r0,#0
rts
.size _setjmp, .-_setjmp
.global _longjmp
_longjmp:
ldrd lr,[r0] ; return address / r15
ldrd r4,[r0,1]
ldrd r6,[r0,2]
ldrd r8,[r0,3]
ldrd r10,[r0,4]
ldrd r32,[r0,5]
ldrd r34,[r0,6]
ldrd r36,[r0,7]
ldrd r38,[r0,8]
ldr sp,[r0,18]
sub r1,r1,0
mov r0,#1
movne r0,r1
jr lr
.size _longjmp, .-_longjmp
|
stsp/newlib-ia16
| 2,800
|
newlib/libc/machine/microblaze/longjmp.S
|
/* Copyright (c) 2001, 2009 Xilinx, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Xilinx nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* longjmp - non-local jump to a saved stack context
* args - r5 - jmp_buf
* r6 - val
*
* jmpbuf frame structure
* ---------------------
*
* +-------------+ + 0
* | r1 |
* +-------------+ + 4
* | r13 |
* | . |
* | . |
* | . |
* | r31 |
* +-------------+ + 80
* | . |
* | . |
*/
.globl longjmp
.section .text
.align 2
.ent longjmp
longjmp:
lwi r1, r5, 0
lwi r13, r5, 4
lwi r14, r5, 8
lwi r15, r5, 12
lwi r16, r5, 16
lwi r17, r5, 20
lwi r18, r5, 24
lwi r19, r5, 28
lwi r20, r5, 32
lwi r21, r5, 36
lwi r22, r5, 40
lwi r23, r5, 44
lwi r24, r5, 48
lwi r25, r5, 52
lwi r26, r5, 56
lwi r27, r5, 60
lwi r28, r5, 64
lwi r29, r5, 68
lwi r30, r5, 72
lwi r31, r5, 76
rtsd r15, 8
or r3, r0, r6
.end longjmp
|
stsp/newlib-ia16
| 2,776
|
newlib/libc/machine/microblaze/setjmp.S
|
/* Copyright (c) 2001, 2009 Xilinx, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Xilinx nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* setjmp - save stack context for non-local goto
* args - r5 - jmp_buf
*
* jmpbuf frame structure
* ---------------------
*
* +-------------+ + 0
* | r1 |
* +-------------+ + 4
* | r13 |
* | . |
* | . |
* | . |
* | r31 |
* +-------------+ + 80
* | . |
* | . |
*/
.globl setjmp
.section .text
.align 2
.ent setjmp
setjmp:
swi r1, r5, 0
swi r13, r5, 4
swi r14, r5, 8
swi r15, r5, 12
swi r16, r5, 16
swi r17, r5, 20
swi r18, r5, 24
swi r19, r5, 28
swi r20, r5, 32
swi r21, r5, 36
swi r22, r5, 40
swi r23, r5, 44
swi r24, r5, 48
swi r25, r5, 52
swi r26, r5, 56
swi r27, r5, 60
swi r28, r5, 64
swi r29, r5, 68
swi r30, r5, 72
swi r31, r5, 76
rtsd r15, 8
or r3, r0, r0
.end setjmp
|
stsp/newlib-ia16
| 1,579
|
newlib/libc/machine/spu/fiscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define fscanf fiscanf
#include "fscanf.S"
|
stsp/newlib-ia16
| 1,579
|
newlib/libc/machine/spu/siscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define sscanf siscanf
#include "sscanf.S"
|
stsp/newlib-ia16
| 1,582
|
newlib/libc/machine/spu/fiprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define fprintf fiprintf
#include "fprintf.S"
|
stsp/newlib-ia16
| 5,817
|
newlib/libc/machine/spu/stack_reg_va.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file contains code use to construct a PIC, spu side, syscall
* function with variable parameters in accordance with the CBE ABI.
*
* This function is equivalent to constructing a va_list structure and
* calling the va_list form of the function. Therefore, for example,
* a printf function stack frame will look like this:
*
* | Stack | high memory
* | Parms |
* | |
* |------------|
* | Link Reg |
* |------------|
* | Back Chain |<-----. <---- input SP
* |------------| |
* | Reg 74 | |
* |------------| |
* | Reg 73 | |
* |------------| |
* // ... // |
* |------------| |
* | Reg 5 | |
* |------------| |
* | Reg 4 |<--. |
* |------------| | |
* va_list.| call_stack |------'
* |------------| |
* va_list.| next_arg |---'
* |------------|
* | format (r3)| <---- start of parameters
* |------------| |------------|
* | stack | | |
* | code | |(Back Chain)| <---- output SP
* | 1-3 qwords | <---- code_ptr `------------'
* `------------'
* low memory
*
* This was written in assembly so that it is smaller than what would
* be produced by using va_start.
*/
#include "c99ppe.h"
#define parms $2 /* Number of fixed arguments */
#define offset $67
#define flag $68
#define regdec $69
#define link $70
#define code_ptr $71
#define ptr $72
#define inst $73
#define tmp $74
.text
.global __stack_reg_va
.type __stack_reg_va, @function
__stack_reg_va:
/* Save registers 69-74 explicitly so that we have some
* working registers.
*/
stqd $74, 16*(-1)($sp)
stqd $73, 16*(-2)($sp)
stqd $72, 16*(-3)($sp)
stqd $71, 16*(-4)($sp)
stqd $70, 16*(-5)($sp)
stqd $69, 16*(-6)($sp)
/* Construct self-modifying stack code that saves the remaining
* volatile registers onto the stack.
*/
il regdec, -1 /* for decrement register value in save instruction */
shlqbyi regdec, regdec, 12
il tmp, -(SPE_STACK_REGS+2+3)*16
a code_ptr, $sp, tmp
lqr tmp, save_regs_1 /* store stack code */
stqd tmp, 0(code_ptr)
lqr inst, save_regs_2
ai ptr, $sp, 16*(-6)
sync
bisl link, code_ptr /* branch to the constructed stack code */
/* Adjust pointer so that it points to the first variable
* argument on the stack.
*/
ai offset, parms, -1 /* offset = parms - 1 */
mpyi offset, offset, 16 /* offset = offset * 16 */
a ptr, ptr, offset /* ptr = ptr + offset */
/* Store the va_list to the parameter list.
*/
stqd $sp, 16*(-1)(ptr)
stqd ptr, 16*(-2)(ptr)
/* Make $3 store address.
*/
ai offset, parms, 2 /* offset = parms + 2 */
mpyi offset, offset, -16 /* offset = offset * -16 */
a ptr, ptr, offset /* ptr = ptr + offset */
/* Save all the fixed (non-variable arguments on the stack)
*/
ceqi flag, parms, 0x01 /* if(parms==1) flag=0xFFFFFFFF */
brnz flag, reg_3 /* if(flag!=0) jump */
ceqi flag, parms, 0x02 /* if(parms==2) flag=0xFFFFFFFF */
brnz flag, reg_4 /* if(flag!=0) jump */
stqd $5, 16*2(ptr)
reg_4:
stqd $4, 16*1(ptr)
reg_3:
stqd $3, 0(ptr)
il $3, -16*(SPE_STACK_REGS+2+2)
stqx $sp, $3, $sp /* save back chain */
a $sp, $sp, $3
bi $0 /* return to caller */
/***************************** stack code *********************************************/
/* The following code is copied into the stack for re-entract,
* self-modified, code execution. This code copies the volatile
* registers into a va_list parameter array.
*/
.balignl 16, 0
save_regs_1:
stqd inst, 16(code_ptr) /* store instruction */
sync
a inst, inst, regdec /* decrement register number in the instruction */
ceqbi tmp, inst, 3 /* if (reg-num == 3) tmp = 0x000000FF 000..0 */
save_regs_2:
stqd $68, -16(ptr)
ai ptr, ptr, -16
brz tmp, save_regs_1 /* if (tmp == 0) jump */
bi link /* finish to make va_list */
.size __stack_reg_va, .-__stack_reg_va
|
stsp/newlib-ia16
| 2,250
|
newlib/libc/machine/spu/fprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL fprintf
.type fprintf, @function
fprintf:
stqd $0, 16($sp) /* save caller address */
il $2, 2 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
brsl $0, __check_init
lqd $3, 16*2($sp) /* $3 <- saved FP on the stack frame */
lqd $2, 0($3) /* FP = fp->_fp */
rotqby $2, $2, $3
stqd $2, 16*2($sp) /* replace FP on the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VFPRINTF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size fprintf, .-fprintf
|
stsp/newlib-ia16
| 3,694
|
newlib/libc/machine/spu/spu-mcount.S
|
/*
(C) Copyright IBM Corp. 2008
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of IBM nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Author: Ken Werner <ken.werner@de.ibm.com>
*/
/* _mcount extracts the address of the function just entered and the address
of the caller of that function and then calls __mcount_internal. The
prologue calls mcount without saving any registers. The return address is
stored in $75. The _mcount function has to:
- create a new stack frame
- save registers $2 to $75 on the stack
- copy the two addresses ($0 and $75) into the argument registers $3 and $4
- call __mcount_internal
- restore registers
- return to $75 */
/* The following two convenience macros assist in the coding of the
saving and restoring the register.
saveregs first, last Saves registers from first to the last.
restoreregs first, last Restores registers from last down to first.
Note: first must be less than or equal to last. */
.macro saveregs first, last
stqd $\first, \first*16($SP)
.if \last-\first
saveregs "(\first+1)",\last
.endif
.endm
.macro restoreregs first, last
lqd $\last, \last*16($SP)
.if \last-\first
restoreregs \first,"(\last-1)"
.endif
.endm
/* _mcount needs to be resident since the overlay manager uses the scratch
registers too. */
.text
.align 3 /* 8 byte alignment. */
.global _mcount
.type _mcount, @function
_mcount:
stqd $lr, 16($sp) /* Save link register in the callers stack frame. */
stqd $lr, -1216($sp) /* Store back pointer. */
il $lr, -1216 /* Push a new stack frame. */
a $sp, $sp, $lr /* Frame size: 16 * (74 + 2) = 1216. */
/* Save registers $2 to $75 on the stack. */
saveregs 2, 75
/* Bring the __mcount_internal arguments in place. */
lqd $3, 1232($sp) /* frompc (the link register). */
ori $4, $75, 0 /* selfpc (the gcc prologue puts "brsl $75, _mcount" in
front of every function). */
brsl $lr, __mcount_internal
/* Restore register $2 to $75 from the stack. */
restoreregs 2, 75
il $lr, 1216
a $sp, $sp, $lr /* Pop the stack frame. */
lqd $lr, 16($sp) /* Restore link register. */
bi $75 /* Branch to the called function. */
|
stsp/newlib-ia16
| 2,043
|
newlib/libc/machine/spu/scanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL scanf
.type scanf, @function
scanf:
stqd $0, 16($sp) /* save caller address */
il $2, 1 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VSCANF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size scanf, .-scanf
|
stsp/newlib-ia16
| 2,244
|
newlib/libc/machine/spu/fscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL fscanf
.type fscanf, @function
fscanf:
stqd $0, 16($sp) /* save caller address */
il $2, 2 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
brsl $0, __check_init
lqd $3, 16*2($sp) /* $3 <- saved FP on the stack frame */
lqd $2, 0($3) /* FP = fp->_fp */
rotqby $2, $2, $3
stqd $2, 16*2($sp) /* replace FP on the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VFSCANF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size fscanf, .-fscanf
|
stsp/newlib-ia16
| 2,061
|
newlib/libc/machine/spu/snprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL snprintf
.type snprintf, @function
snprintf:
stqd $0, 16($sp) /* save caller address */
il $2, 3 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VSNPRINTF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size snprintf, .-snprintf
|
stsp/newlib-ia16
| 6,260
|
newlib/libc/machine/spu/spu_timer_flih.S
|
/*
(C) Copyright IBM Corp. 2008
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of IBM nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* First-level interrupt handler. */
/* The following two convenience macros assist in the coding of the
saving and restoring the volatile register starting from register
2 up to register 79.
saveregs first, last Saves registers from first to the last.
restoreregs first, last Restores registers from last down to first.
Note: first must be less than or equal to last. */
.macro saveregs first, last
stqd $\first, -(STACK_SKIP+\first)*16($SP)
.if \last-\first
saveregs "(\first+1)",\last
.endif
.endm
.macro restoreregs first, last
lqd $\last, (82-\last)*16($SP)
.if \last-\first
restoreregs \first,"(\last-1)"
.endif
.endm
.section .interrupt,"ax"
.align 3
.type spu_flih, @function
spu_flih:
/* Adjust the stack pointer to skip the maximum register save area
(STACK_SKIP quadword registers) in case an interrupt occurred while
executing a leaf function that used the stack area without actually
allocating its own stack frame. */
.set STACK_SKIP, 125
/* Save the current link register on a new stack frame for the
normal spu_flih() version of this file. */
stqd $0, -(STACK_SKIP+80)*16($SP)
stqd $SP, -(STACK_SKIP+82)*16($SP) /* Save back chain pointer. */
saveregs 2, 39
il $2, -(STACK_SKIP+82)*16 /* Stack frame size. */
rdch $3, $SPU_RdEventStat /* Read event status. */
rdch $6, $SPU_RdEventMask /* Read event mask. */
hbrp /* Open a slot for instruction prefetch. */
saveregs 40,59
clz $4, $3 /* Get first slih index. */
stqd $6, -(STACK_SKIP+1)*16($SP) /* Save event mask on stack. */
saveregs 60, 67
/* Do not disable/ack the decrementer event here.
The timer library manages this and expects it
to be enabled upon entry to the SLIH. */
il $7, 0x20
andc $5, $3, $7
andc $7, $6, $5 /* Clear event bits. */
saveregs 68, 69
wrch $SPU_WrEventAck, $3 /* Ack events(s) - include decrementer event. */
wrch $SPU_WrEventMask, $7 /* Disable event(s) - exclude decrementer event. */
saveregs 70, 79
a $SP, $SP, $2 /* Instantiate flih stack frame. */
next_event:
/* Fetch and dispatch the event handler for the first non-zero event. The
dispatch handler is indexed into the __spu_slih_handlers array using the
count of zero off the event status as an index. */
ila $5, __spu_slih_handlers /* Slih array offset. */
shli $4, $4, 2 /* Slih entry offset. */
lqx $5, $4, $5 /* Load slih address. */
rotqby $5, $5, $4 /* Rotate to word 0. */
bisl $0, $5 /* Branch to slih. */
clz $4, $3 /* Get next slih index. */
brnz $3, next_event
lqd $2, 81*16($SP) /* Read event mask from stack. */
restoreregs 40, 79
wrch $SPU_WrEventMask, $2 /* Restore event mask. */
hbrp /* Open a slot for instruction pre-fetch. */
restoreregs 2, 39
/* Restore the link register from the new stack frame for the
normal spu_flih() version of this file. */
lqd $0, 2*16($SP)
lqd $SP, 0*16($SP) /* restore stack pointer from back chain ptr. */
irete /* Return from interrupt and re-enable interrupts. */
.size spu_flih, .-spu_flih
/* spu_slih_handlers[]
Here we initialize 33 default event handlers. The first entry in this array
corresponds to the event handler for the event associated with bit 0 of
Channel 0 (External Event Status). The 32nd entry in this array corresponds
to bit 31 of Channel 0 (DMA Tag Status Update Event). The 33rd entry in
this array is a special case entry to handle "phantom events" which occur
when the channel count for Channel 0 is 1, causing an asynchronous SPU
interrupt, but the value returned for a read of Channel 0 is 0. The index
calculated into this array by spu_flih() for this case is 32, hence the
33rd entry. */
.data
.align 4
.extern __spu_default_slih
.global __spu_slih_handlers
.type __spu_slih_handlers, @object
__spu_slih_handlers:
.rept 33
.long __spu_default_slih
.endr
.size __spu_slih_handlers, .-__spu_slih_handlers
|
stsp/newlib-ia16
| 2,055
|
newlib/libc/machine/spu/sprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL sprintf
.type sprintf, @function
sprintf:
stqd $0, 16($sp) /* save caller address */
il $2, 2 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VSPRINTF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size sprintf, .-sprintf
|
stsp/newlib-ia16
| 1,576
|
newlib/libc/machine/spu/iscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define scanf iscanf
#include "scanf.S"
|
stsp/newlib-ia16
| 1,585
|
newlib/libc/machine/spu/sniprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define snprintf sniprintf
#include "snprintf.S"
|
stsp/newlib-ia16
| 1,582
|
newlib/libc/machine/spu/siprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define sprintf siprintf
#include "sprintf.S"
|
stsp/newlib-ia16
| 2,049
|
newlib/libc/machine/spu/printf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL printf
.type printf, @function
printf:
stqd $0, 16($sp) /* save caller address */
il $2, 1 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VPRINTF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size printf, .-printf
|
stsp/newlib-ia16
| 1,579
|
newlib/libc/machine/spu/iprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define printf iprintf
#include "printf.S"
|
stsp/newlib-ia16
| 2,049
|
newlib/libc/machine/spu/sscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL sscanf
.type sscanf, @function
sscanf:
stqd $0, 16($sp) /* save caller address */
il $2, 2 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VSSCANF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size sscanf, .-sscanf
|
stsp/newlib-ia16
| 4,238
|
newlib/libc/machine/spu/setjmp.S
|
/*
(C) Copyright IBM Corp. 2005, 2006
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of IBM nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Author: Andreas Neukoetter (ti95neuk@de.ibm.com)
*/
/*
int setjmp( jmp_buf env );
*/
.text
.align 2
.global setjmp
.type setjmp, @function
setjmp:
stqd $80, 2*16($3)
stqd $81, 3*16($3)
stqd $82, 4*16($3)
stqd $83, 5*16($3)
stqd $84, 6*16($3)
stqd $85, 7*16($3)
stqd $86, 8*16($3)
stqd $87, 9*16($3)
stqd $88, 10*16($3)
stqd $89, 11*16($3)
stqd $90, 12*16($3)
stqd $91, 13*16($3)
stqd $92, 14*16($3)
stqd $93, 15*16($3)
stqd $94, 16*16($3)
stqd $95, 17*16($3)
stqd $96, 18*16($3)
stqd $97, 19*16($3)
stqd $98, 20*16($3)
stqd $99, 21*16($3)
stqd $100, 22*16($3)
stqd $101, 23*16($3)
stqd $102, 24*16($3)
stqd $103, 25*16($3)
stqd $104, 26*16($3)
stqd $105, 27*16($3)
stqd $106, 28*16($3)
stqd $107, 29*16($3)
stqd $108, 30*16($3)
stqd $109, 31*16($3)
stqd $110, 32*16($3)
stqd $111, 33*16($3)
stqd $112, 34*16($3)
stqd $113, 35*16($3)
stqd $114, 36*16($3)
stqd $115, 37*16($3)
stqd $116, 38*16($3)
stqd $117, 39*16($3)
stqd $118, 40*16($3)
stqd $119, 41*16($3)
hbr setjmp_ret, $0
lnop # pipe1 bubble added for instruction fetch
stqd $120, 42*16($3)
stqd $121, 43*16($3)
stqd $122, 44*16($3)
stqd $123, 45*16($3)
stqd $124, 46*16($3)
stqd $125, 47*16($3)
stqd $126, 48*16($3)
stqd $127, 49*16($3)
stqd $0, 0*16($3)
stqd $1, 1*16($3)
il $3, 0
setjmp_ret:
bi $0
.size setjmp, .-setjmp
/*
int longjmp( jmp_buf env, int val );
*/
.text
.align 2
.global longjmp
.type longjmp, @function
longjmp:
lr $127, $1
lqd $0, 0*16($3)
lqd $1, 1*16($3)
sf $126, $127, $1
rotqbyi $126, $126, 12
fsmbi $127, 0x0F00
and $126, $126, $127
a $1, $1, $126
# restore all the non-volatile registers
lqd $80, 2*16($3)
lqd $81, 3*16($3)
lqd $82, 4*16($3)
lqd $83, 5*16($3)
lqd $84, 6*16($3)
lqd $85, 7*16($3)
lqd $86, 8*16($3)
lqd $87, 9*16($3)
lqd $88, 10*16($3)
lqd $89, 11*16($3)
lqd $90, 12*16($3)
lqd $91, 13*16($3)
lqd $92, 14*16($3)
lqd $93, 15*16($3)
lqd $94, 16*16($3)
lqd $95, 17*16($3)
lqd $96, 18*16($3)
lqd $97, 19*16($3)
lqd $98, 20*16($3)
lqd $99, 21*16($3)
lqd $100, 22*16($3)
lqd $101, 23*16($3)
lqd $102, 24*16($3)
lqd $103, 25*16($3)
lqd $104, 26*16($3)
lqd $105, 27*16($3)
lqd $106, 28*16($3)
lqd $107, 29*16($3)
lqd $108, 30*16($3)
lqd $109, 31*16($3)
hbr longjmp_ret, $0
lqd $110, 32*16($3)
lqd $111, 33*16($3)
lqd $112, 34*16($3)
lqd $113, 35*16($3)
lqd $114, 36*16($3)
lqd $115, 37*16($3)
lqd $116, 38*16($3)
lqd $117, 39*16($3)
lqd $118, 40*16($3)
lqd $119, 41*16($3)
lqd $120, 42*16($3)
lqd $121, 43*16($3)
lqd $122, 44*16($3)
lqd $123, 45*16($3)
lqd $124, 46*16($3)
lqd $125, 47*16($3)
ceqi $5, $4, 0
lqd $126, 48*16($3)
lqd $127, 49*16($3)
sf $3, $5, $4
longjmp_ret:
bi $0
.size longjmp, .-longjmp
|
stsp/newlib-ia16
| 3,027
|
newlib/libc/machine/nds32/memset.S
|
/*
Copyright (c) 2013 Andes Technology Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Function:
memset - fill memory with a constant byte
Syntax:
void *memset(void *s, int c, size_t n);
Description:
The memset function copies the value of c (converted to an unsigned char)
into each of the first n characters of the object pointed to by s.
Return value:
The memset function returns the value of s.
*/
.text
.align 2
.globl memset
.type memset, @function
memset:
/* Corner case. If n is zero, just go return. */
beqz $r2, .Lend_memset
/* Keep $r0 as return value.
Set $r4 as how many words to copy.
Set $r2 as how many bytes are less than a word. */
move $r5, $r0
srli $r4, $r2, 2
andi $r2, $r2, 3
beqz $r4, .Lbyte_set
/* Set $r1 a word-size pattern composed of the value of c
(converted to an unsigned char). Convert ??????ab to abababab. */
andi $r1, $r1, 0xff /* Set $r1 = 000000ab. */
slli $r3, $r1, 8 /* Set $r3 = 0000ab00. */
or $r1, $r1, $r3 /* Set $r1 = 0000abab. */
slli $r3, $r1, 16 /* Set $r3 = abab0000. */
or $r1, $r1, $r3 /* Set $r1 = abababab. */
.Lword_set:
/* Do the word set $r4 times. Then, do the byte set $r2 times. */
addi $r4, $r4, -1
smw.bim $r1, [$r5], $r1 /* Set a word-size. */
bnez $r4, .Lword_set /* Loop again ? */
beqz $r2, .Lend_memset /* Fall THRU or go return ? */
.Lbyte_set:
/* Do the byte set $r2 times. */
addi $r2, $r2, -1
sbi.p $r1, [$r5], 1 /* Set a byte-size. */
bnez $r2, .Lbyte_set /* Loop again ? */
.Lend_memset:
ret
.size memset, .-memset
|
stsp/newlib-ia16
| 2,731
|
newlib/libc/machine/nds32/memcpy.S
|
/*
Copyright (c) 2013 Andes Technology Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Function:
memcpy - copy memory regions
Syntax:
void *memcpy(void *s1, const void *s2, size_t n);
Description:
The memcpy function copies n characters from the object pointed to
by s2 into the object pointed to by s1. If copying takes place
between objects that overlap, the behavior is undefined.
Return value:
The memcpy function returns the value of s1.
*/
.text
.align 2
.globl memcpy
.type memcpy, @function
memcpy:
/* Corner cases. If *s1 equals *s2
or size_t is zero, just go return. */
beq $r0, $r1, .Lend_memcpy
beqz $r2, .Lend_memcpy
/* Keep *s1 as return value.
Set $r3 as how many words to copy.
Set $r2 as how many bytes are less than a word. */
move $r5, $r0
srli $r3, $r2, 2
andi $r2, $r2, 3
beqz $r3, .Lbyte_copy
.Lword_copy:
/* Do the word copy $r3 times. Then, do the byte copy $r2 times. */
lmw.bim $r4, [$r1], $r4, 0
addi $r3, $r3, -1
smw.bim $r4, [$r5], $r4, 0
bnez $r3, .Lword_copy /* Loop again ? */
beqz $r2, .Lend_memcpy /* Fall THRU or go return ? */
.Lbyte_copy:
/* Do the byte copy $r2 times. */
lbi.bi $r4, [$r1], 1
addi $r2, $r2, -1
sbi.bi $r4, [$r5], 1
bnez $r2, .Lbyte_copy /* Loop again ? */
.Lend_memcpy:
ret
.size memcpy, .-memcpy
|
stsp/newlib-ia16
| 3,494
|
newlib/libc/machine/nds32/strcmp.S
|
/*
Copyright (c) 2013 Andes Technology Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Function:
strcmp - compare two strings.
Syntax:
int strcmp(const char *s1, const char *s2);
Description:
This function compares the two strings s1 and s2. It returns an
integer less than, equal to, or greater than zero if s1 is found,
respectively, to be less than, to match, or be greater than s2.
Return value:
strcmp returns an integer less than, equal to, or greater than
zero if s1 (or the first n bytes thereof) is found, respectively,
to be less than, to match, or be greater than s2.
*/
.text
.align 2
.globl strcmp
.type strcmp, @function
strcmp:
/* If s1 or s2 are unaligned, then compare bytes. */
or $r5, $r1, $r0
andi $r5, $r5, #3
bnez $r5, .Lbyte_mode
/* If s1 and s2 are word-aligned, compare them a word at a time. */
lwi $r5, [$r0+(0)]
lwi $r3, [$r1+(0)]
bne $r5, $r3, .Lbyte_mode /* A difference was detected, so
search bytewise. */
/* It's more efficient to set bit mask outside the word_mode loop. */
sethi $r4, hi20(0xFEFEFEFF) /* Set $r4 as -0x01010101. */
ori $r4, $r4, lo12(0xFEFEFEFF)
sethi $r2, hi20(0x80808080)
ori $r2, $r2, lo12(0x80808080)
b .Ldetect_null
.align 2
.Lword_mode:
lmw.aim $r5, [$r0], $r5
lmw.aim $r3, [$r1], $r3
bne $r5, $r3, .Lbyte_mode
.Ldetect_null:
/* #define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080)
DETECTNULL returns nonzero if (long)X contains a NULL byte. */
nor $r3, $r5, $r5 /* r3 = ~(X) */
add $r5, $r5, $r4 /* r2 = ((X) - 0x01010101) */
and $r5, $r5, $r3 /* r2 = ~(X) & ((X) - 0x01010101) */
and $r5, $r5, $r2 /* r2= r2 & 0x80808080 */
beqz $r5, .Lword_mode /* No NULL byte, compare next word. */
/* To get here, *a1 == *a2, thus if we find a null in *a1,
then the strings must be equal, so return zero. */
movi $r0, #0
ret
.Lbyte_mode:
/* Byte-mode compare. */
lbi.bi $r5, [$r0], #1
lbi.bi $r3, [$r1], #1
bne $r5, $r3, 1f /* Mismatch, done. */
bnez $r5, .Lbyte_mode
1:
sub $r0, $r5, $r3
ret
.size strcmp, .-strcmp
|
stsp/newlib-ia16
| 2,863
|
newlib/libc/machine/nds32/strcpy.S
|
/*
Copyright (c) 2013 Andes Technology Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Function:
strcpy - copy a string.
Syntax:
char *strcpy(char *dest, const char *src);
Description:
This function copies the string pointed to by src into the array
point to by dest (include the teminating null character).
Return value:
strcpy returns the dest as given.
*/
.text
.align 2
.globl strcpy
.type strcpy, @function
strcpy:
move $r3, $r0 /* Keep r0 as reture value. */
/* If SRC or DEST is unaligned, then copy bytes. */
or $r2, $r1, $r0
andi $r2, $r2, #3
bnez $r2, .Lbyte_mode
.Lword_mode:
/* SRC and DEST are both "long int" aligned, try to do "long int"
sized copies. */
/* #define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080)
DETECTNULL returns nonzero if (long)X contains a NULL byte. */
lwi $r2, [$r1+(0)] /* r2 is X */
sethi $r4, hi20(0xFEFEFEFF)
ori $r4, $r4, lo12(0xFEFEFEFF)
add $r4, $r2, $r4 /* r4 = ((X) - 0x01010101) */
nor $r5, $r2, $r2 /* r5 = ~(X) */
and $r4, $r5, $r4 /* r4 = ~(X) & ((X) - 0x01010101) */
sethi $r5, hi20(0x80808080)
ori $r5, $r5, lo12(0x80808080)
and $r4, $r4, $r5 /* r4 = r4 & 0x80808080 */
bnez $r4, .Lbyte_mode /* Contains a NULL byte. */
swi.bi $r2, [$r3], #4
addi $r1, $r1, #4
b .Lword_mode
.Lbyte_mode:
lbi.bi $r4, [$r1], #1 /* r4 <- *src++ */
sbi.bi $r4, [$r3], #1 /* r4 -> *dest++ */
bnez $r4, .Lbyte_mode
ret
.size strcpy, .-strcpy
|
stsp/newlib-ia16
| 4,974
|
newlib/libc/machine/nds32/setjmp.S
|
/*
Copyright (c) 2013 Andes Technology Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of the company may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The setjmp/longjmp for nds32.
The usage of thirty-two 32-bit General Purpose Registers (GPR):
$r28 : $fp
$r29 : $gp
$r30 : $lp
$r31 : $sp
caller-save registers: $r0 ~ $r5, $r16 ~ $r23
callee-save registers: $r6 ~ $r10, $r11 ~ $r14
reserved for assembler : $r15
reserved for other use : $r24, $r25, $r26, $r27
Save all callee-save registers and $fp, $gp, $lp and $sp is enough in theory.
For debugging issue, the layout of jum_buf in here should be in sync with GDB.
The $r16 ~ $r19 are used to store D0/D1, keep them for backward-compatible.
*/
/* int setjmp(jmp_buf env); */
.text
.align 2
.global setjmp
.type setjmp, @function
setjmp:
#if __NDS32_REDUCED_REGS__
smw.bim $r6, [$r0], $r10, #0b0000
addi $r0, $r0, #32 /* Leave room to keep jum_buf all the same. */
smw.bim $r31, [$r0], $r31, #0b1111
#else
smw.bim $r6, [$r0], $r14, #0b0000
smw.bim $r16, [$r0], $r19, #0b1111
#endif
#if __NDS32_EXT_FPU_SP__ || __NDS32_EXT_FPU_DP__
/* Extract $fpcfg.freg (b[3:2]), then save into jmp_buf. */
fmfcfg $r2
slli $r2, $r2, #28
srli $r2, $r2, #30
swi.bi $r2, [$r0], #4
/* Make sure $r0 is double-word-aligned. */
addi $r0, $r0, #7
bitci $r0, $r0, #7
/* Case switch according to $fpcfg.freg */
beqz $r2, .LCFG0_save /* Branch if $fpcfg.freg = 0b00. */
xori $r15, $r2, #0b10
beqz $r15, .LCFG2_save /* Branch $fpcfg.freg = 0b10. */
srli $r2, $r2, #0b01
beqz $r2, .LCFG1_save /* Branch if $fpcfg.freg = 0b01. */
/* Fall-through if $fpcfg.freg = 0b11. */
.LCFG3_save:
fsdi.bi $fd31, [$r0], #8
fsdi.bi $fd29, [$r0], #8
fsdi.bi $fd27, [$r0], #8
fsdi.bi $fd25, [$r0], #8
fsdi.bi $fd23, [$r0], #8
fsdi.bi $fd21, [$r0], #8
fsdi.bi $fd19, [$r0], #8
fsdi.bi $fd17, [$r0], #8
.LCFG2_save:
fsdi.bi $fd15, [$r0], #8
fsdi.bi $fd13, [$r0], #8
fsdi.bi $fd11, [$r0], #8
fsdi.bi $fd9, [$r0], #8
.LCFG1_save:
fsdi.bi $fd7, [$r0], #8
fsdi.bi $fd5, [$r0], #8
.LCFG0_save:
fsdi.bi $fd3, [$r0], #8
#endif
/* Set return value to zero. */
movi $r0, 0
ret
.size setjmp, .-setjmp
/* void longjmp(jmp_buf env, int val); */
.text
.align 2
.global longjmp
.type longjmp, @function
longjmp:
#if __NDS32_REDUCED_REGS__
lmw.bim $r6, [$r0], $r10, #0b0000
addi $r0, $r0, #32
lmw.bim $r31, [$r0], $r31, #0b1111
#else
lmw.bim $r6, [$r0], $r14, #0b0000
lmw.bim $r16, [$r0], $r19, #0b1111
#endif
#if __NDS32_EXT_FPU_SP__ || __NDS32_EXT_FPU_DP__
/* Restore value of $fpcfg.freg (b[3:2]). */
lwi.bi $r2, [$r0], #4
/* Make sure $r0 is double-word-aligned. */
addi $r0, $r0, #7
bitci $r0, $r0, #7
/* Case switch according to $fpcfg.freg */
beqz $r2, .LCFG0_restore /* Branch if $fpcfg.freg = 0b00. */
xori $r15, $r2, #0b10
beqz $r15, .LCFG2_restore /* Branch $fpcfg.freg = 0b10. */
srli $r2, $r2, #0b01
beqz $r2, .LCFG1_restore /* Branch if $fpcfg.freg = 0b01. */
/* Fall-through if $fpcfg.freg = 0b11. */
.LCFG3_restore:
fldi.bi $fd31, [$r0], #8
fldi.bi $fd29, [$r0], #8
fldi.bi $fd27, [$r0], #8
fldi.bi $fd25, [$r0], #8
fldi.bi $fd23, [$r0], #8
fldi.bi $fd21, [$r0], #8
fldi.bi $fd19, [$r0], #8
fldi.bi $fd17, [$r0], #8
.LCFG2_restore:
fldi.bi $fd15, [$r0], #8
fldi.bi $fd13, [$r0], #8
fldi.bi $fd11, [$r0], #8
fldi.bi $fd9, [$r0], #8
.LCFG1_restore:
fldi.bi $fd7, [$r0], #8
fldi.bi $fd5, [$r0], #8
.LCFG0_restore:
fldi.bi $fd3, [$r0], #8
#endif
/* Set val as return value. If the value val is 0, 1 will be returned
instead. */
movi $r0, 1
cmovn $r0, $r1, $r1 /* r0=(r1!=0)? r1: r0 */
ret
.size longjmp, .-longjmp
|
stsp/newlib-ia16
| 2,935
|
newlib/libc/machine/lm32/setjmp.S
|
/*
* setjmp/longjmp for LatticeMico32.
* Contributed by Jon Beniston <jon@beniston.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.section .text
.align 4
.globl setjmp
.type setjmp,@function
.globl longjmp
.type longjmp,@function
/* setjmp: save all callee saves into jmp_buf
r1 - Address of jmp_buf
*/
setjmp:
sw (r1+0), r11
sw (r1+4), r12
sw (r1+8), r13
sw (r1+12), r14
sw (r1+16), r15
sw (r1+20), r16
sw (r1+24), r17
sw (r1+28), r18
sw (r1+32), r19
sw (r1+36), r20
sw (r1+40), r21
sw (r1+44), r22
sw (r1+48), r23
sw (r1+52), r24
sw (r1+56), r25
sw (r1+60), gp
sw (r1+64), fp
sw (r1+68), sp
sw (r1+72), ra
mvi r1, 0
ret
/* longjmp: restore all callee saves from jmp_buf
r1 - Address of jmb_buf
r2 - Value to return with
*/
.global longjmp
.type longjmp,@function
.align 4
longjmp:
lw r11, (r1+0)
lw r12, (r1+4)
lw r13, (r1+8)
lw r14, (r1+12)
lw r15, (r1+16)
lw r16, (r1+20)
lw r17, (r1+24)
lw r18, (r1+28)
lw r19, (r1+32)
lw r20, (r1+36)
lw r21, (r1+40)
lw r22, (r1+44)
lw r23, (r1+48)
lw r24, (r1+52)
lw r25, (r1+56)
lw gp, (r1+60)
lw fp, (r1+64)
lw sp, (r1+68)
lw ra, (r1+72)
mv r1, r2
ret
|
stsp/newlib-ia16
| 1,774
|
newlib/libc/machine/x86_64/memset.S
|
/*
* ====================================================
* Copyright (C) 2007 by Ellips BV. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "x86_64mach.h"
.global SYM (memset)
SOTYPE_FUNCTION(memset)
SYM (memset):
movq rdi, r9 /* Save return value */
movq rsi, rax
movq rdx, rcx
cmpq $16, rdx
jb byte_set
movq rdi, r8 /* Align on quad word boundary */
andq $7, r8
jz quadword_aligned
movq $8, rcx
subq r8, rcx
subq rcx, rdx
rep stosb
movq rdx, rcx
quadword_aligned:
movabs $0x0101010101010101, r8
movzbl sil, eax
imul r8, rax
cmpq $256, rdx
jb quadword_set
shrq $7, rcx /* Store 128 bytes at a time with minimum cache polution */
.p2align 4
loop:
movntiq rax, (rdi)
movntiq rax, 8 (rdi)
movntiq rax, 16 (rdi)
movntiq rax, 24 (rdi)
movntiq rax, 32 (rdi)
movntiq rax, 40 (rdi)
movntiq rax, 48 (rdi)
movntiq rax, 56 (rdi)
movntiq rax, 64 (rdi)
movntiq rax, 72 (rdi)
movntiq rax, 80 (rdi)
movntiq rax, 88 (rdi)
movntiq rax, 96 (rdi)
movntiq rax, 104 (rdi)
movntiq rax, 112 (rdi)
movntiq rax, 120 (rdi)
leaq 128 (rdi), rdi
dec rcx
jnz loop
sfence
movq rdx, rcx
andq $127, rcx
rep stosb
movq r9, rax
ret
byte_set:
rep stosb
movq r9, rax
ret
quadword_set:
shrq $3, rcx
.p2align 4
rep stosq
movq rdx, rcx
andq $7, rcx
rep stosb /* Store the remaining bytes */
movq r9, rax
ret
|
stsp/newlib-ia16
| 2,255
|
newlib/libc/machine/x86_64/memcpy.S
|
/*
* ====================================================
* Copyright (C) 2007 by Ellips BV. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "x86_64mach.h"
.global SYM (memcpy)
SOTYPE_FUNCTION(memcpy)
SYM (memcpy):
movq rdi, rax /* Store destination in return value */
cmpq $16, rdx
jb byte_copy
movq rdi, r8 /* Align destination on quad word boundary */
andq $7, r8
jz quadword_aligned
movq $8, rcx
subq r8, rcx
subq rcx, rdx
rep movsb
quadword_aligned:
cmpq $256, rdx
jb quadword_copy
pushq rax
pushq r12
pushq r13
pushq r14
movq rdx, rcx /* Copy 128 bytes at a time with minimum cache polution */
shrq $7, rcx
.p2align 4
loop:
prefetchnta 768 (rsi)
prefetchnta 832 (rsi)
movq (rsi), rax
movq 8 (rsi), r8
movq 16 (rsi), r9
movq 24 (rsi), r10
movq 32 (rsi), r11
movq 40 (rsi), r12
movq 48 (rsi), r13
movq 56 (rsi), r14
movntiq rax, (rdi)
movntiq r8 , 8 (rdi)
movntiq r9 , 16 (rdi)
movntiq r10, 24 (rdi)
movntiq r11, 32 (rdi)
movntiq r12, 40 (rdi)
movntiq r13, 48 (rdi)
movntiq r14, 56 (rdi)
movq 64 (rsi), rax
movq 72 (rsi), r8
movq 80 (rsi), r9
movq 88 (rsi), r10
movq 96 (rsi), r11
movq 104 (rsi), r12
movq 112 (rsi), r13
movq 120 (rsi), r14
movntiq rax, 64 (rdi)
movntiq r8 , 72 (rdi)
movntiq r9 , 80 (rdi)
movntiq r10, 88 (rdi)
movntiq r11, 96 (rdi)
movntiq r12, 104 (rdi)
movntiq r13, 112 (rdi)
movntiq r14, 120 (rdi)
leaq 128 (rsi), rsi
leaq 128 (rdi), rdi
dec rcx
jnz loop
sfence
movq rdx, rcx
andq $127, rcx
rep movsb
popq r14
popq r13
popq r12
popq rax
ret
byte_copy:
movq rdx, rcx
rep movsb
ret
quadword_copy:
movq rdx, rcx
shrq $3, rcx
.p2align 4
rep movsq
movq rdx, rcx
andq $7, rcx
rep movsb /* Copy the remaining bytes */
ret
|
stsp/newlib-ia16
| 1,083
|
newlib/libc/machine/x86_64/setjmp.S
|
/*
* ====================================================
* Copyright (C) 2007 by Ellips BV. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
/*
** jmp_buf:
** rbx rbp r12 r13 r14 r15 rsp rip
** 0 8 16 24 32 40 48 56
*/
#include "x86_64mach.h"
.global SYM (setjmp)
.global SYM (longjmp)
SOTYPE_FUNCTION(setjmp)
SOTYPE_FUNCTION(longjmp)
SYM (setjmp):
movq rbx, 0 (rdi)
movq rbp, 8 (rdi)
movq r12, 16 (rdi)
movq r13, 24 (rdi)
movq r14, 32 (rdi)
movq r15, 40 (rdi)
leaq 8 (rsp), rax
movq rax, 48 (rdi)
movq (rsp), rax
movq rax, 56 (rdi)
movq $0, rax
ret
SYM (longjmp):
movq rsi, rax /* Return value */
movq 8 (rdi), rbp
__CLI
movq 48 (rdi), rsp
pushq 56 (rdi)
movq 0 (rdi), rbx
movq 16 (rdi), r12
movq 24 (rdi), r13
movq 32 (rdi), r14
movq 40 (rdi), r15
__STI
ret
|
stsp/newlib-ia16
| 1,592
|
newlib/libc/machine/mn10300/strlen.S
|
.file "strlen.S"
.section .text
.global _strlen
.type _strlen,@function
_strlen:
movm [d2,d3,a2,a3],(sp)
.Lend_of_prologue:
mov d0,a0
mov a0,a2
#ifndef __OPTIMIZE_SIZE__
btst 3,d0
bne .L21
mov (a0),d0
mov -16843009,a1
mov a1,d1
add d0,d1
not d0
and d0,d1
mov -2139062144,d2
btst -2139062144,d1
bne .L21
setlb
inc4 a0
mov (a0),d0
mov a1,d1
add d0,d1
not d0
and d0,d1
and d2,d1
leq
jmp .L21
#endif
.L19:
inc a0
.L21:
movbu (a0),d3
cmp 0,d3
bne .L19
sub a2,a0
mov a0,d0
.Lepilogue:
ret [d2,d3,a2,a3],16
.Lend_of_strlen:
.size _strlen, .Lend_of_strlen - _strlen
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _strlen # FDE initial location
.4byte .Lend_of_strlen - _strlen # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _strlen
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
stsp/newlib-ia16
| 2,065
|
newlib/libc/machine/mn10300/strchr.S
|
.file "strchr.S"
.section .text
.global _strchr
.type _strchr,@function
_strchr:
movm [d2,d3,a2,a3],(sp)
add -12,sp
.Lend_of_prologue:
mov d0,a1
movbu d1,(7,sp)
#ifndef __OPTIMIZE_SIZE__
btst 3,d0
bne .L20
clr d0
setlb
mov sp,a2
mov d0,d3
add d3,a2
mov a2,a0
add 12,a0
movbu (7,sp),d3
movbu d3,(-4,a0)
inc d0
cmp 3,d0
lls
mov a1,a0
mov -16843009,a1
mov (a0),d2
mov a1,d1
add d2,d1
mov d2,d0
not d0
and d0,d1
mov -2139062144,d3
mov d3,(0,sp)
btst -2139062144,d1
bne .L27
jmp .L38
.L28:
inc4 a0
mov (a0),d2
mov a1,d1
add d2,d1
mov d2,d0
not d0
and d0,d1
mov (0,sp),d3
and d3,d1
bne .L27
.L38:
mov (8,sp),d0
xor d2,d0
mov a1,d1
add d0,d1
not d0
and d0,d1
and d3,d1
beq .L28
.L27:
mov a0,a1
.L20:
#endif
movbu (a1),d0
cmp 0,d0
beq .L32
movbu (7,sp),d1
setlb
cmp d1,d0
beq .L36
inc a1
movbu (a1),d0
cmp 0,d0
lne
.L32:
movbu (7,sp),d0
movbu (a1),d3
cmp d0,d3
beq .L36
mov 0,a0
jmp .Lepilogue
.L36:
mov a1,a0
.Lepilogue:
ret [d2,d3,a2,a3],28
.Lend_of_strchr:
.size _strchr, .Lend_of_strchr - _strchr
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _strchr # FDE initial location
.4byte .Lend_of_strchr - _strchr # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _strchr
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
stsp/newlib-ia16
| 1,826
|
newlib/libc/machine/mn10300/memset.S
|
.file "memset.S"
.section .text
.global _memset
.type _memset,@function
_memset:
movm [d2,d3,a2,a3], (sp)
.Lend_of_prologue:
mov d0, d3
mov d1, d2
mov (28, sp),a1
mov d3, a0
#ifndef __OPTIMIZE_SIZE__
cmp 3, a1
bls .L41
btst 3, d3
bne .L41
extbu d2
mov d2, d1
asl 8, d1
or d2, d1
mov d1, d0
asl 16, d0
or d0, d1
cmp 15, a1
bls .L36
setlb
mov d1, (a0)
inc4 a0
mov d1, (a0)
inc4 a0
mov d1, (a0)
inc4 a0
mov d1, (a0)
inc4 a0
add -16, a1
cmp 15, a1
lhi
.L36:
cmp 3, a1
bls .L41
setlb
mov d1, (a0)
inc4 a0
add -4, a1
cmp 3, a1
lhi
.L41:
#endif
cmp 0, a1
beq .Lepilogue
setlb
movbu d2, (a0)
inc a0
sub 1, a1
lne
.Lepilogue:
mov d3,a0
ret [d2,d3,a2,a3], 16
.Lend_of_memset:
.size _memset, .Lend_of_memset - _memset
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _memset # FDE initial location
.4byte .Lend_of_memset - _memset # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _memset
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
stsp/newlib-ia16
| 1,790
|
newlib/libc/machine/mn10300/memcpy.S
|
.file "memcpy.S"
.section .text
.global _memcpy
.type _memcpy,@function
_memcpy:
movm [d2,d3,a2,a3],(sp)
.Lend_of_prologue:
mov d0,d2
mov d1,a0
mov d2,a1
mov (28,sp),d1
#ifndef __OPTIMIZE_SIZE__
mov a0,d0
or d2,d0
btst 3,d0
bne .L37
cmp 15,d1
bls .L34
setlb
mov (a0),d0
mov d0,(a1)
inc4 a0
inc4 a1
mov (a0),d0
mov d0,(a1)
inc4 a0
inc4 a1
mov (a0),d0
mov d0,(a1)
inc4 a0
inc4 a1
mov (a0),d0
mov d0,(a1)
inc4 a0
inc4 a1
add -16,d1
cmp 15,d1
lhi
.L34:
cmp 3,d1
bls .L37
setlb
mov (a0),d0
mov d0,(a1)
inc4 a0
inc4 a1
add -4,d1
cmp 3,d1
lhi
.L37:
#endif
cmp 0,d1
beq .L36
setlb
movbu (a0),d0
movbu d0,(a1)
inc a0
inc a1
sub 1,d1
lne
.L36:
mov d2,a0
.Lepilogue:
ret [d2,d3,a2,a3],16
.Lend_of_memcpy:
.size _memcpy, .Lend_of_memcpy - _memcpy
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _memcpy # FDE initial location
.4byte .Lend_of_memcpy - _memcpy # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _memcpy
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
stsp/newlib-ia16
| 1,669
|
newlib/libc/machine/mn10300/strcmp.S
|
.file "strcmp.S"
.section .text
.global _strcmp
.type _strcmp,@function
_strcmp:
movm [d2,d3,a2,a3],(sp)
.Lend_of_prologue:
mov d0,a0
mov d1,a1
#ifndef __OPTIMIZE_SIZE__
or d1,d0
btst 3,d0
bne .L11
mov (a0),d1
mov (a1),d0
cmp d0,d1
bne .L11
mov -16843009,d3
setlb
mov (a0),d0
mov d3,d1
add d0,d1
not d0
and d0,d1
and -2139062144,d1
beq .L6
clr d0
jmp .Lepilogue
.L6:
inc4 a0
inc4 a1
mov (a0),d1
mov (a1),d0
cmp d0,d1
leq
.L11:
#endif
setlb
movbu (a1),d2
movbu (a0),d0
cmp 0,d0
beq .L9
cmp d2,d0
bne .L9
inc a0
inc a1
lra
.L9:
sub d2,d0
.Lepilogue:
ret [d2,d3,a2,a3],16
.Lend_of_strcmp:
.size _strcmp, .Lend_of_strcmp - _strcmp
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _strcmp # FDE initial location
.4byte .Lend_of_strcmp - _strcmp # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _strcmp
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
stsp/newlib-ia16
| 1,863
|
newlib/libc/machine/mn10300/memchr.S
|
.file "memchr.S"
.section .text
.global _memchr
.type _memchr,@function
_memchr:
movm [d2,d3,a2,a3],(sp)
.Lend_of_prologue:
mov d0,a0
mov d1,d2
mov (28,sp),a1
#ifndef __OPTIMIZE_SIZE__
cmp 3,a1
bls .L44
mov a0,d3
btst 3,d3
bne .L44
mov a0,a2
mov 0,a3
clr d1
setlb
mov a3,d0
asl 8,d0
mov d2,a3
add d0,a3
inc d1
cmp 3,d1
lls
cmp 3,a1
bls .L48
.L33:
mov (a2),d0
mov a3,d3
xor d3,d0
mov d0,d1
not d1
add -16843009,d0
and d1,d0
btst -2139062144,d0
beq .L34
mov a2,a0
clr d1
setlb
movbu (a0),d0
cmp d2,d0
beq .Lepilogue
inc a0
inc d1
cmp 3,d1
lls
.L34:
add -4,a1
inc4 a2
cmp 3,a1
bhi .L33
.L48:
mov a2,a0
.L44:
#endif
cmp 0,a1
beq .L50
setlb
movbu (a0),d0
cmp d2,d0
beq .Lepilogue
inc a0
sub 1,a1
lne
.L50:
mov 0,a0
.Lepilogue:
ret [d2,d3,a2,a3],16
.Lend_of_memchr:
.size _memchr, .Lend_of_memchr - _memchr
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _memchr # FDE initial location
.4byte .Lend_of_memchr - _memchr # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _memchr
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
stsp/newlib-ia16
| 1,618
|
newlib/libc/machine/mn10300/memcmp.S
|
.file "memcmp.S"
.section .text
.global _memcmp
.type _memcmp,@function
_memcmp:
movm [d2,d3,a2,a3],(sp)
.Lend_of_prologue:
mov d0,a0
mov d1,a1
mov (28,sp),a2
#ifndef __OPTIMIZE_SIZE__
cmp 3,a2
bls .L22
mov a1,d2
or d2,d0
btst 3,d0
bne .L22
setlb
mov (a0),d1
mov (a1),d0
cmp d0,d1
bne .L22
inc4 a0
inc4 a1
add -4,a2
cmp 3,a2
lhi
.L22:
#endif
cmp 0,a2
beq .L24
setlb
movbu (a0),d3
movbu (a1),d2
cmp d2,d3
beq .L23
mov d3,d0
sub d2,d0
jmp .Lepilogue
.L23:
inc a0
inc a1
sub 1,a2
lne
.L24:
clr d0
.Lepilogue:
ret [d2,d3,a2,a3],16
.Lend_of_func:
.size _memcmp, .Lend_of_func - _memcmp
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _memcmp # FDE initial location
.4byte .Lend_of_func - _memcmp # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _memcmp
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
stsp/newlib-ia16
| 1,655
|
newlib/libc/machine/mn10300/strcpy.S
|
.file "strcpy.S"
.section .text
.global _strcpy
.type _strcpy,@function
_strcpy:
movm [d2,d3,a2,a3],(sp)
.Lend_of_prologue:
mov d0,d3
mov d1,a0
mov d3,a1
mov a0,d0
#ifndef __OPTIMIZE_SIZE__
or d3,d0
btst 3,d0
bne .L2
mov (a0),d0
mov -16843009,a2
mov a2,d1
add d0,d1
not d0
and d0,d1
mov -2139062144,d2
btst -2139062144,d1
bne .L2
setlb
mov (a0),d0
mov d0,(a1)
inc4 a0
inc4 a1
mov (a0),d0
mov a2,d1
add d0,d1
not d0
and d0,d1
and d2,d1
leq
.L2:
#endif
setlb
movbu (a0),d0
movbu d0,(a1)
inc a0
inc a1
cmp 0,d0
lne
mov d3,a0
.Lepilogue:
ret [d2,d3,a2,a3],16
.Lend_of_strcpy:
.size _strcpy, .Lend_of_strcpy - _strcpy
.section .debug_frame,"",@progbits
.Lstart_of_debug_frame:
# Common Information Entry (CIE)
.4byte .Lend_of_CIE - .Lstart_of_CIE # CIE Length
.Lstart_of_CIE:
.4byte 0xffffffff # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "\0" # CIE Augmentation
.uleb128 0x1 # CIE Code Alignment Factor
.sleb128 -4 # CIE Data Alignment Factor
.byte 0x32 # CIE RA Column
.byte 0xc # DW_CFA_def_cfa
.uleb128 0x9
.uleb128 0x0
.byte 0xb2 # DW_CFA_offset, column 0x32
.uleb128 0x0
.align 2
.Lend_of_CIE:
# Frame Description Entry (FDE)
.4byte .Lend_of_FDE - .Lstart_of_FDE # FDE Length
.Lstart_of_FDE:
.4byte .Lstart_of_debug_frame # FDE CIE offset
.4byte _strcpy # FDE initial location
.4byte .Lend_of_strcpy - _strcpy # FDE address range
.byte 0x4 # DW_CFA_advance_loc4
.4byte .Lend_of_prologue - _strcpy
.byte 0xe # DW_CFA_def_cfa_offset
.uleb128 0x4
.byte 0x87 # DW_CFA_offset, column 0x7
.uleb128 0x1
.align 2
.Lend_of_FDE:
|
stsp/newlib-ia16
| 1,328
|
newlib/libc/machine/mn10300/setjmp.S
|
.file "setjmp.S"
.section .text
.align 1
.global _setjmp
#ifdef __AM33__
#ifdef __AM33_2__
.am33_2
#else
.am33
#endif
#endif
_setjmp:
mov d0,a0
mov d2,(0,a0)
mov d3,(4,a0)
mov mdr,d1
mov d1,(8,a0)
mov a2,(12,a0)
mov a3,(16,a0)
mov sp,a1
mov a1,(20,a0)
#ifdef __AM33__
add 24,a0
mov r4,(a0+)
mov r5,(a0+)
mov r6,(a0+)
mov r7,(a0+)
#ifdef __AM33_2__
fmov fs4,(a0+)
fmov fs5,(a0+)
fmov fs6,(a0+)
fmov fs7,(a0+)
fmov fs8,(a0+)
fmov fs9,(a0+)
fmov fs10,(a0+)
fmov fs11,(a0+)
fmov fs12,(a0+)
fmov fs13,(a0+)
fmov fs14,(a0+)
fmov fs15,(a0+)
fmov fs16,(a0+)
fmov fs17,(a0+)
fmov fs18,(a0+)
fmov fs19,(a0+)
#endif
#endif
sub d0,d0
rets
.global _longjmp
_longjmp:
mov d0,a0
mov (8,a0),d2
mov d2,mdr
mov (0,a0),d2
mov (4,a0),d3
mov (12,a0),a2
mov (16,a0),a3
mov (20,a0),a1
mov a1,sp
#ifdef __AM33__
add 24,a0
mov (a0+),r4
mov (a0+),r5
mov (a0+),r6
mov (a0+),r7
#ifdef __AM33_2__
fmov (a0+),fs4
fmov (a0+),fs5
fmov (a0+),fs6
fmov (a0+),fs7
fmov (a0+),fs8
fmov (a0+),fs9
fmov (a0+),fs10
fmov (a0+),fs11
fmov (a0+),fs12
fmov (a0+),fs13
fmov (a0+),fs14
fmov (a0+),fs15
fmov (a0+),fs16
fmov (a0+),fs17
fmov (a0+),fs18
fmov (a0+),fs19
#endif
#endif
cmp 0,d1
bne L1
mov 1,d1
L1:
mov d1,d0
retf [],0
|
stsp/newlib-ia16
| 2,487
|
newlib/libc/machine/nios2/setjmp.s
|
;/*
; * C library -- _setjmp, _longjmp
; *
; * _longjmp(a,v)
; * will generate a "return(v?v:1)" from
; * the last call to
; * _setjmp(a)
; * by unwinding the call stack.
; * The previous signal state is NOT restored.
; *
; *
; * Copyright (c) 2003 Altera Corporation
; * All rights reserved.
; *
; * Redistribution and use in source and binary forms, with or without
; * modification, are permitted provided that the following conditions
; * are met:
; *
; * o Redistributions of source code must retain the above copyright
; * notice, this list of conditions and the following disclaimer.
; * o Redistributions in binary form must reproduce the above copyright
; * notice, this list of conditions and the following disclaimer in the
; * documentation and/or other materials provided with the distribution.
; * o Neither the name of Altera Corporation nor the names of its
; * contributors may be used to endorse or promote products derived from
; * this software without specific prior written permission.
; *
; * THIS SOFTWARE IS PROVIDED BY ALTERA CORPORATION, THE COPYRIGHT HOLDER,
; * AND ITS CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
; * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
; * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
; * THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
; * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
; * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
; * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
; * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
; * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
; * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
; */
.section .text
.align 3
.globl setjmp
.type setjmp,@function
.globl longjmp
.type longjmp,@function
setjmp:
stw r16, 0(r4)
stw r17, 4(r4)
stw r18, 8(r4)
stw r19, 12(r4)
stw r20, 16(r4)
stw r21, 20(r4)
stw r22, 24(r4)
stw r23, 28(r4)
stw gp, 32(r4)
stw sp, 36(r4)
stw fp, 40(r4)
stw ra, 44(r4)
mov r2, zero
ret
longjmp:
ldw r16, 0(r4)
ldw r17, 4(r4)
ldw r18, 8(r4)
ldw r19, 12(r4)
ldw r20, 16(r4)
ldw r21, 20(r4)
ldw r22, 24(r4)
ldw r23, 28(r4)
ldw gp, 32(r4)
ldw sp, 36(r4)
ldw fp, 40(r4)
ldw ra, 44(r4)
mov r2, r5
bne r2, zero, 1f
movi r2, 1
1:
ret
|
stsp/newlib-ia16
| 3,629
|
newlib/libc/machine/rl78/setjmp.S
|
/*
Copyright (c) 2011 Red Hat Incorporated.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of Red Hat Incorporated may not be used to endorse
or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef __RL78_G10__
; clobberable
r8 = 0xffec8
r9 = 0xffec9
r10 = 0xffeca
r11 = 0xffecb
r12 = 0xffecc
r13 = 0xffecd
r14 = 0xffece
r15 = 0xffecf
; preserved
r16 = 0xffed0
r17 = 0xffed1
r18 = 0xffed2
r19 = 0xffed3
r20 = 0xffed4
r21 = 0xffed5
r22 = 0xffed6
r23 = 0xffed7
#else
; clobberable
r8 = 0xffef0
r9 = 0xffef1
r10 = 0xffef2
r11 = 0xffef3
r12 = 0xffef4
r13 = 0xffef5
r14 = 0xffef6
r15 = 0xffef7
; preserved
r16 = 0xffee8
r17 = 0xffee9
r18 = 0xffeea
r19 = 0xffeeb
r20 = 0xffeec
r21 = 0xffeed
r22 = 0xffeee
r23 = 0xffeef
#endif
/* The jump buffer has the following structure:
R0 .. R23 3*8 bytes
SP 2 bytes
ES 1 byte
CS 1 byte
PC 4 bytes
*/
.macro _saveb ofs,reg
mov a,\reg
mov [hl+\ofs],a
.endm
.macro _save ofs,reg
movw ax,\reg
movw [hl+\ofs],ax
.endm
.global _setjmp
.type _setjmp, @function
_setjmp:
;; R8 = setjmp (jmp_buf *[sp+4].w)
;; must return zero !!
push ax
push hl
push ax
movw ax, [sp+10]
movw hl, ax
pop ax
movw [hl], ax
_save 2, bc
_save 4, de
pop ax
movw [hl+6], ax
_save 8, r8
_save 10, r10
_save 12, r12
_save 14, r14
_save 16, r16
_save 18, r18
_save 20, r20
_save 22, r22
;; The sp we have now includes one more pushed reg, plus $PC
movw ax, sp
addw ax, #6
movw [hl+24], ax
_saveb 26, es
_saveb 27, cs
_save 28, [sp+2]
_save 30, [sp+4]
clrw ax
movw r8, ax
pop ax
ret
.size _setjmp, . - _setjmp
.macro _loadb ofs,reg
mov a,[hl+\ofs]
mov \reg,a
.endm
.macro _load ofs,reg
movw ax,[hl+\ofs]
movw \reg,ax
.endm
.macro _push ofs
movw ax,[hl+\ofs]
push ax
.endm
.global _longjmp
.type _longjmp, @function
_longjmp:
;; noreturn longjmp (jmp_buf *[sp+4].w, int [sp+6].w)
movw ax, [sp+6]
cmpw ax,#0
sknz
onew ax
movw r8, ax
movw ax, [sp+4]
movw hl, ax
movw ax, [hl+24]
movw sp, ax ; this is the *new* stack
_push 30 ; high half of PC
_push 28 ; low half of PC
_push 6 ; HL
_push 0 ; AX
_load 2, bc
_load 4, de
_load 10, r10
_load 12, r12
_load 14, r14
_load 16, r16
_load 18, r18
_load 20, r20
_load 22, r22
_loadb 26, es
_loadb 27, cs
pop ax
pop hl
ret ; pops PC (4 bytes)
.size _longjmp, . - _longjmp
|
stsp/newlib-ia16
| 1,089
|
newlib/libc/machine/d10v/setjmp.S
|
; setjmp/longjmp for D10V. The jmpbuf looks like this:
;
; Register jmpbuf offset
; R6 0x00
; R7 0x02
; R8 0x04
; R9 0x06
; R10 0x08
; R11 0x0a
; R13 (return address) 0x0c
; R15 (SP) 0x0E
.text
.globl setjmp
.type setjmp,@function
.stabs "setjmp.S",100,0,0,setjmp
.stabs "int:t(0,1)=r(0,1);-65536;65535;",128,0,0,0
.stabs "setjmp:F(0,1)",36,0,1,setjmp
setjmp:
; Address of jmpbuf is passed in R0. Save the appropriate registers.
st2w r6, @r0+
st2w r8, @r0+
st2w r10, @r0+
st r13, @r0+
st r15, @r0+
; Return 0 to caller
ldi r0, 0
jmp r13
.Lsetjmp:
.size setjmp,.Lsetjmp-setjmp
.stabs "",36,0,0,.Lsetjmp-setjmp
.globl longjmp
.type longjmp,@function
.stabs "longjmp:F(0,1)",36,0,1,longjmp
longjmp:
; Address of jmpbuf is in R0. Restore the registers.
ld2w r6, @r0+
ld2w r8, @r0+
ld2w r10, @r0+
ld r13, @r0+
ld r15, @r0+
; Value to return to caller is in R1. If caller attemped to return 0,
; return 1 instead.
mv r0, r1
cmpeqi r0, 0
exef0t || ldi r0,1
jmp r13
.Llongjmp:
.size longjmp,.Llongjmp-longjmp
.stabs "",36,0,0,.Llongjmp-longjmp
|
stsp/newlib-ia16
| 1,050
|
newlib/libc/machine/rx/strcat.S
|
.file "strcat.S"
.section .text
.global _strcat
.type _strcat,@function
_strcat:
;; On entry: r1 => Destination
;; r2 => Source
#ifdef __RX_DISALLOW_STRING_INSNS__
mov r1, r4 ; Save a copy of the dest pointer.
1: mov.b [r4+], r5 ; Find the NUL byte at the end of R4.
cmp #0, r5
bne 1b
sub #1, r4 ; Move R4 back to point at the NUL byte.
2: mov.b [r2+], r5 ; Copy bytes from R2 to R4 until we reach a NUL byte.
mov.b r5, [r4+]
cmp #0, r5
bne 2b
rts
#else
mov r1, r4 ; Save a copy of the dest pointer.
mov r2, r5 ; Save a copy of the source pointer.
mov #0, r2 ; Search for the NUL byte.
mov #-1, r3 ; Limit on the number of bytes examined.
suntil.b ; Find the end of the destination string.
sub #1, r1 ; suntil.b leaves r1 pointing to the byte beyond the match.
mov #-1, r3 ; Set a limit on the number of bytes copied.
mov r5, r2 ; Restore the source pointer.
smovu ; Copy source to destination
mov r4, r1 ; Return the original dest pointer.
rts
#endif
.size _strcat, . - _strcat
|
stsp/newlib-ia16
| 1,855
|
newlib/libc/machine/rx/strncat.S
|
.file "strncat.S"
.section .text
.global _strncat
.type _strncat,@function
_strncat:
;; On entry: r1 => Destination
;; r2 => Source
;; r3 => Max number of bytes to copy
#ifdef __RX_DISALLOW_STRING_INSNS__
cmp #0, r3 ; If max is zero we have nothing to do.
beq 2f
mov r1, r4 ; Leave the desintation pointer intact for the return value.
1: mov.b [r4+], r5 ; Find the NUL byte at the end of the destination.
cmp #0, r5
bne 1b
sub #1, r4
3: mov.b [r2+], r5 ; Copy bytes from the source into the destination ...
mov.b r5, [r4+]
cmp #0, r5 ; ... until we reach a NUL byte ...
beq 2f
sub #1, r3
bne 3b ; ... or we have copied N bytes.
2: rts
#else
mov r1, r4 ; Save a copy of the dest pointer.
mov r2, r5 ; Save a copy of the source pointer.
mov r3, r14 ; Save a copy of the byte count.
mov #0, r2 ; Search for the NUL byte.
mov #-1, r3 ; Search until we run out of memory.
suntil.b ; Find the end of the destination string.
sub #1, r1 ; suntil.b leaves r1 pointing to the byte beyond the NUL.
mov r14, r3 ; Restore the limit on the number of bytes copied.
mov r5, r2 ; Restore the source pointer.
mov r1, r5 ; Save a copy of the dest pointer.
smovu ; Copy source to destination.
add #0, r14, r3 ; Restore the number of bytes to copy (again), but this time set the Z flag as well.
beq 1f ; If we copied 0 bytes then we already know that the dest string is NUL terminated, so we do not have to do anything.
mov #0, r2 ; Otherwise we must check to see if a NUL byte
mov r5, r1 ; was included in the bytes that were copied.
suntil.b
beq 1f ; Z flag is set if a match was found.
add r14, r5 ; Point at byte after end of copied bytes.
mov.b #0, [r5] ; Store a NUL there.
1:
mov r4, r1 ; Return the original dest pointer.
rts
#endif
.size _strncat, . - _strncat
|
stsp/newlib-ia16
| 1,142
|
newlib/libc/machine/rx/memmove.S
|
.file "memmove.S"
.section .text
.global _memmove
.type _memmove,@function
_memmove:
;; R1: DEST
;; R2: SRC
;; R3: COUNT
#ifdef __RX_DISALLOW_STRING_INSNS__
/* Do not use the string instructions - they might prefetch
bytes from outside of valid memory. This is particularly
dangerous in I/O space. */
cmp #0, r3 ; If the count is zero, do nothing
beq 4f
cmp r1, r2
blt 3f ; If SRC < DEST copy backwards
mov r1, r14 ; Save a copy of DEST
5: mov.b [r2+], r5
mov.b r5, [r14+]
sub #1, r3
bne 5b
4: rts
3: add r3, r1
add r3, r2
6: mov.b [-r2], r5
mov.b r5, [-r1]
sub #1, r3
bne 6b
rts
#else
mov r1, r4 ; Save a copy of DEST
cmp r1, r2
blt 2f ; If SRC (r2) is less than DEST (r1) then copy backwards
smovf
1:
mov r4, r1 ; Return DEST
rts
2:
add r3, r1 ; The SMOVB instructions requires the DEST in r1 and the
add r3, r2 ; SRC in r2 but it needs them to point the last bytes of
sub #1, r2 ; the regions involved not the first bytes, hence these
sub #1, r1 ; additions and subtractions.
smovb
bra 1b
#endif /* SMOVF allowed. */
.size _memmove, . - _memmove
|
stsp/newlib-ia16
| 1,937
|
newlib/libc/machine/rx/setjmp.S
|
# setjmp/longjmp for Renesas RX.
#
# The jmpbuf looks like this:
#
# Register jmpbuf offset
# R0 0x0
# R1 0x4
# R2 0x8
# R3 0xc
# R4 0x10
# R5 0x14
# R6 0x18
# R7 0x1c
# R8 0x20
# R9 0x24
# R10 0x28
# R11 0x2c
# R12 0x30
# R13 0x34
# R14 0x38
# R15 0x3c
# PC 0x40
#
# R1 contains the pointer to jmpbuf:
#
# int R1 = setjmp (jmp_buf R1)
# void longjmp (jmp_buf R1, int R2)
#
# The ABI allows for R1-R5 to be clobbered by functions. We must be
# careful to always leave the stack in a usable state in case an
# interrupt happens.
.text
.global _setjmp
.type _setjmp, @function
_setjmp:
mov.l r0, [r1] ; save all the general registers
mov.l r1, 0x4[r1] ; longjmp won't use this, but someone else might.
mov.l r2, 0x8[r1]
mov.l r3, 0xc[r1]
mov.l r4, 0x10[r1]
mov.l r5, 0x14[r1]
mov.l r6, 0x18[r1]
mov.l r7, 0x1c[r1]
mov.l r8, 0x20[r1]
mov.l r9, 0x24[r1]
mov.l r10, 0x28[r1]
mov.l r11, 0x2c[r1]
mov.l r12, 0x30[r1]
mov.l r13, 0x34[r1]
mov.l r14, 0x38[r1]
mov.l r15, 0x3c[r1]
mov.l [r0], r2 ; get return address off the stack
mov.l r2, 0x40[r1] ; PC
mov #0, r1 ; Return 0.
rts
.Lend1:
.size _setjmp, .Lend1 - _setjmp
.global _longjmp
.type _longjmp, @function
_longjmp:
tst r2, r2 ; Set the Z flag if r2 is 0.
stz #1, r2 ; If the Z flag was set put 1 into the return register.
mov r2, 4[r1] ; Put r2 (our return value) into the setjmp buffer as r1.
mov.l [r1], r0 ; Restore the stack - there's a slot for PC
mov.l 0x40[r1], r2 ; Get the saved PC
mov.l r2, [r0] ; Overwrite the old return address
mov.l 0x3c[r1], r15
mov.l 0x38[r1], r14
mov.l 0x34[r1], r13
mov.l 0x30[r1], r12
mov.l 0x2c[r1], r11
mov.l 0x28[r1], r10
mov.l 0x24[r1], r9
mov.l 0x20[r1], r8
mov.l 0x1c[r1], r7
mov.l 0x18[r1], r6
mov.l 0x14[r1], r5
mov.l 0x10[r1], r4
mov.l 0xc[r1], r3
mov.l 0x8[r1], r2
mov.l 0x4[r1], r1 ; This sets up the new return value
rts
.Lend2:
.size _longjmp, .Lend2 - _longjmp
|
stsp/newlib-ia16
| 5,366
|
newlib/libc/machine/sparc/setjmp.S
|
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* Modified for incorporation into newlib by Joel Sherrill
* (joel@OARcorp.com), On-Line Applications Research, 1995.
* Did the following:
* + merged in DEFS.h
* + removed error check since it prevented using this setjmp
* to "context switch"
* + added the support for the "user label" and "register" prefix
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: $Header$
*/
#if defined(LIBC_SCCS) && !defined(lint)
.asciz "@(#)_setjmp.s 8.1 (Berkeley) 6/4/93"
#endif /* LIBC_SCCS and not lint */
/*
* Recent versions of GNU cpp define variables which indicate the
* need for underscores and percents. If not using GNU cpp or
* the version does not support this, then you will obviously
* have to define these as appropriate.
*/
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__ _
#endif
#ifndef __REGISTER_PREFIX__
#define __REGISTER_PREFIX__
#endif
/* ANSI concatenation macros. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
/*********************************************************************
*********************************************************************
* Contents of DEFS.h *
*********************************************************************
*********************************************************************/
#ifdef PROF
#define ENTRY(x) \
.align 4; .globl SYM(x); .proc 1; SYM(x):; .data; .align 4; 1: .long 0; \
.text; save %sp,-96,%sp; sethi %hi(1b),%o0; call mcount; \
or %lo(1b),%o0,%o0; restore
#else
#define ENTRY(x) \
.align 4; .globl SYM(x); .proc 1; SYM(x):
#endif
/*********************************************************************
*********************************************************************
* END of DEFS.h *
*********************************************************************
*********************************************************************/
/*
* C library -- _setjmp, _longjmp
*
* _longjmp(a,v)
* will generate a "return(v?v:1)" from
* the last call to
* _setjmp(a)
* by unwinding the call stack.
* The previous signal state is NOT restored.
*/
/* #include "DEFS.h" */
ENTRY(setjmp)
ENTRY(_setjmp)
st %sp, [%o0] /* caller's stack pointer */
st %i7, [%o0+4] /* caller's return pc */
st %fp, [%o0+8] /* store caller's frame pointer */
st %o7, [%o0+12]
retl
clr %o0 ! return 0
ENTRY(longjmp)
ENTRY(_longjmp)
ta 0x03 /* flush registers */
addcc %o1, %g0, %g1 ! compute v ? v : 1 in a global register
be,a 0f
mov 1, %g1
0:
ld [%o0], %sp /* caller's stack pointer */
ldd [%sp], %l0
ldd [%sp+8], %l2
ldd [%sp+16], %l4
ldd [%sp+24], %l6
ldd [%sp+32], %i0
ldd [%sp+40], %i2
ldd [%sp+48], %i4
ld [%o0+4], %i7 /* caller's return pc */
ld [%o0+8], %fp /* caller's frame pointer */
ld [%o0+12], %o7
jmp %o7 + 8 ! success, return %g1
mov %g1, %o0
|
stsp/newlib-ia16
| 2,106
|
winsup/cygwin/mcountFunc.S
|
/**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the mingw-w64 runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#include <_mingw_mac.h>
.file "mcountFunc.S"
.text
#ifdef _WIN64
.align 8
#else
.align 4
#endif
/* gcc always assumes the mcount public symbol has a single leading underscore
for our target. See gcc/config/i386.h; it isn't overridden in
config/i386/cygming.h or any other places for mingw */
.globl _mcount
.def _mcount; .scl 2; .type 32; .endef
_mcount:
#ifndef _WIN64
push %ebp
mov %esp, %ebp
push %eax
push %ecx
push %edx
movl 4(%ebp),%edx
movl (%ebp),%eax
movl 4(%eax),%eax
push %edx
push %eax
call __MINGW_USYMBOL(_mcount_private)
add $8, %esp
pop %edx
pop %ecx
pop %eax
leave
ret
#else
push %rbp
mov %rsp, %rbp
push %rax
push %rcx
push %rdx
push %r8
push %r9
push %r10
push %r11
movq 8(%rbp),%rdx
movq (%rbp),%rax
movq 8(%rax),%rcx
sub $40, %rsp
call __MINGW_USYMBOL(_mcount_private)
add $40, %rsp
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdx
pop %rcx
pop %rax
leave
ret
#endif
/* gcc always assumes the mcount public symbol has a single leading underscore
for our target. See gcc/config/i386.h; it isn't overridden in
config/i386/cygming.h or any other places for mingw. This is the entry
for new prologue mechanism required for x64 seh calling convention. */
.globl __fentry__
.def _mcount_top; .scl 2; .type 32; .endef
__fentry__:
#ifndef _WIN64
push %ebp
mov %esp, %ebp
push %eax
push %ecx
push %edx
movl 4(%ebp),%edx
movl 8(%ebp),%eax
push %edx
push %eax
call __MINGW_USYMBOL(_mcount_private)
add $8, %esp
pop %edx
pop %ecx
pop %eax
leave
ret
#else
push %rbp
mov %rsp, %rbp
push %rax
push %rcx
push %rdx
push %r8
push %r9
push %r10
push %r11
movq 8(%rbp),%rdx
movq 16(%rbp),%rcx
sub $40, %rsp
call __MINGW_USYMBOL(_mcount_private)
add $40, %rsp
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdx
pop %rcx
pop %rax
leave
ret
#endif
|
stsp/newlib-ia16
| 1,127
|
winsup/cygwin/math/ilogbl.S
|
/**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the mingw-w64 runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#include <_mingw_mac.h>
.file "ilogbl.S"
.text
#ifdef __x86_64__
.align 8
#else
.align 4
#endif
.globl __MINGW_USYMBOL(ilogbl)
.def __MINGW_USYMBOL(ilogbl); .scl 2; .type 32; .endef
__MINGW_USYMBOL(ilogbl):
#ifdef __x86_64__
fldt (%rcx)
fxam /* Is NaN or +-Inf? */
fstsw %ax
movb $0x45, %dh
andb %ah, %dh
cmpb $0x05, %dh
je 1f /* Is +-Inf, jump. */
fxtract
pushq %rax
fstp %st
fistpl (%rsp)
fwait
popq %rax
ret
1: fstp %st
movl $0x7fffffff, %eax
ret
#else
fldt 4(%esp)
/* I added the following ugly construct because ilogb(+-Inf) is
required to return INT_MAX in ISO C99.
-- jakub@redhat.com. */
fxam /* Is NaN or +-Inf? */
fstsw %ax
movb $0x45, %dh
andb %ah, %dh
cmpb $0x05, %dh
je 1f /* Is +-Inf, jump. */
fxtract
pushl %eax
fstp %st
fistpl (%esp)
fwait
popl %eax
ret
1: fstp %st
movl $0x7fffffff, %eax
ret
#endif
|
stsp/newlib-ia16
| 1,914
|
winsup/cygwin/math/log10l.S
|
/**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the mingw-w64 runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#include <_mingw_mac.h>
.file "log10l.S"
.text
#ifdef __x86_64__
.align 8
#else
.align 4
#endif
one: .double 1.0
/* It is not important that this constant is precise. It is only
a value which is known to be on the safe side for using the
fyl2xp1 instruction. */
limit: .double 0.29
.text
#ifdef __x86_64__
.align 8
#else
.align 4
#endif
.globl __MINGW_USYMBOL(log10l)
.def __MINGW_USYMBOL(log10l); .scl 2; .type 32; .endef
__MINGW_USYMBOL(log10l):
#ifdef __x86_64__
fldlg2 // log10(2)
fldt (%rdx) // x : log10(2)
fxam
fnstsw
fld %st // x : x : log10(2)
sahf
jc 3f // in case x is NaN or Inf
4: fsubl one(%rip) // x-1 : x : log10(2)
fld %st // x-1 : x-1 : x : log10(2)
fabs // |x-1| : x-1 : x : log10(2)
fcompl limit(%rip) // x-1 : x : log10(2)
fnstsw // x-1 : x : log10(2)
andb $0x45, %ah
jz 2f
fstp %st(1) // x-1 : log10(2)
fyl2xp1 // log10(x)
movq %rcx,%rax
movq $0,8(%rcx)
fstpt (%rcx)
ret
2: fstp %st(0) // x : log10(2)
fyl2x // log10(x)
movq %rcx,%rax
movq $0,8(%rcx)
fstpt (%rcx)
ret
3: jp 4b // in case x is Inf
fstp %st(1)
fstp %st(1)
movq %rcx,%rax
movq $0,8(%rcx)
fstpt (%rcx)
ret
#else
fldlg2 // log10(2)
fldt 4(%esp) // x : log10(2)
fxam
fnstsw
fld %st // x : x : log10(2)
sahf
jc 3f // in case x is NaN or Inf
4: fsubl one // x-1 : x : log10(2)
fld %st // x-1 : x-1 : x : log10(2)
fabs // |x-1| : x-1 : x : log10(2)
fcompl limit // x-1 : x : log10(2)
fnstsw // x-1 : x : log10(2)
andb $0x45, %ah
jz 2f
fstp %st(1) // x-1 : log10(2)
fyl2xp1 // log10(x)
ret
2: fstp %st(0) // x : log10(2)
fyl2x // log10(x)
ret
3: jp 4b // in case x is Inf
fstp %st(1)
fstp %st(1)
ret
#endif
|
stsp/newlib-ia16
| 2,918
|
winsup/cygwin/math/ceilf.S
|
/**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the mingw-w64 runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#include <_mingw_mac.h>
.file "ceilf.S"
.text
.align 4
.globl __MINGW_USYMBOL(ceilf)
.def __MINGW_USYMBOL(ceilf); .scl 2; .type 32; .endef
#ifdef __x86_64__
.seh_proc __MINGW_USYMBOL(ceilf)
#endif
__MINGW_USYMBOL(ceilf):
#if defined(_AMD64_) || defined(__x86_64__)
subq $24, %rsp
.seh_stackalloc 24
.seh_endprologue
movd %xmm0, 12(%rsp)
movl 12(%rsp), %eax
movl %eax, %ecx
movl %eax, %edx
sarl $23, %ecx
andl $255, %ecx
subl $127, %ecx
cmpl $22, %ecx
jg .l4
testl %ecx, %ecx
js .l5
movl $8388607, %r8d
sarl %cl, %r8d
testl %eax, %r8d
je .l3
addss .hugeval(%rip), %xmm0
ucomiss .zeroval(%rip), %xmm0
jbe .l2
testl %eax, %eax
jle .l1
movl $8388608, %eax
sarl %cl, %eax
addl %eax, %edx
.l1:
movl %r8d, %eax
notl %eax
andl %edx, %eax
.l2:
movl %eax, 8(%rsp)
movss 8(%rsp), %xmm0
.l3:
addq $24, %rsp
ret
.p2align 4,,10
.l4:
addl $-128, %ecx
jne .l3
addss %xmm0, %xmm0
addq $24, %rsp
ret
.p2align 4,,10
.l5:
addss .hugeval(%rip), %xmm0
ucomiss .zeroval(%rip), %xmm0
jbe .islesseqzero
testl %eax, %eax
js .l6
movl $1065353216, %edx
cmovne %edx, %eax
.islesseqzero:
movl %eax, 8(%rsp)
movss 8(%rsp), %xmm0
addq $24, %rsp
ret
.p2align 4,,10
.l6:
movl $-2147483648, 8(%rsp)
movss 8(%rsp), %xmm0
addq $24, %rsp
ret
.seh_endproc
.section .rdata,"dr"
.align 4
.hugeval:
.long 1900671690
.align 4
.zeroval:
.long 0
#elif defined(_ARM_) || defined(__arm__)
vmrs r1, fpscr
bic r0, r1, #0x00c00000
orr r0, r0, #0x00400000 /* Round towards Plus Infinity */
vmsr fpscr, r0
vcvt.s32.f32 s0, s0
vcvt.f32.s32 s0, s0
vmsr fpscr, r1
bx lr
#elif defined(_X86_) || defined(__i386__)
flds 4(%esp)
subl $8,%esp
fstcw 4(%esp) /* store fpu control word */
/* We use here %edx although only the low 1 bits are defined.
But none of the operations should care and they are faster
than the 16 bit operations. */
movl $0x0800,%edx /* round towards +oo */
orl 4(%esp),%edx
andl $0xfbff,%edx
movl %edx,(%esp)
fldcw (%esp) /* load modified control word */
frndint /* round */
fldcw 4(%esp) /* restore original control word */
addl $8,%esp
ret
#endif
|
stsp/newlib-ia16
| 1,451
|
winsup/cygwin/math/ceill.S
|
/**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the mingw-w64 runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#include <_mingw_mac.h>
.file "ceill.S"
.text
#ifdef __x86_64__
.align 8
#else
.align 4
#endif
.globl __MINGW_USYMBOL(ceill)
.def __MINGW_USYMBOL(ceill); .scl 2; .type 32; .endef
__MINGW_USYMBOL(ceill):
#if defined(_AMD64_) || defined(__x86_64__)
fldt (%rdx)
subq $24,%rsp
fstcw 8(%rsp) /* store fpu control word */
/* We use here %edx although only the low 1 bits are defined.
But none of the operations should care and they are faster
than the 16 bit operations. */
movl $0x0800,%edx /* round towards +oo */
orl 8(%rsp),%edx
andl $0xfbff,%edx
movl %edx,(%rsp)
fldcw (%rsp) /* load modified control word */
frndint /* round */
fldcw 8(%rsp) /* restore original control word */
addq $24,%rsp
movq %rcx,%rax
movq $0,8(%rcx)
fstpt (%rcx)
ret
#elif defined(_ARM_) || defined(__arm__)
vmrs r1, fpscr
bic r0, r1, #0x00c00000
orr r0, r0, #0x00400000 /* Round towards Plus Infinity */
vmsr fpscr, r0
vcvtr.s32.f64 s0, d0
vcvt.f64.s32 d0, s0
vmsr fpscr, r1
bx lr
#elif defined(_X86_) || defined(__i386__)
fldt 4(%esp)
subl $8,%esp
fstcw 4(%esp)
movl $0x0800,%edx
orl 4(%esp),%edx
andl $0xfbff,%edx
movl %edx,(%esp)
fldcw (%esp)
frndint
fldcw 4(%esp)
addl $8,%esp
ret
#endif
|
stsp/newlib-ia16
| 1,765
|
winsup/cygwin/math/floorl.S
|
/**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the mingw-w64 runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#include <_mingw_mac.h>
.file "floorl.S"
.text
#ifdef __x86_64__
.align 8
#else
.align 4
#endif
.globl __MINGW_USYMBOL(floorl)
.def __MINGW_USYMBOL(floorl); .scl 2; .type 32; .endef
__MINGW_USYMBOL(floorl):
#if defined(_AMD64_) || defined(__x86_64__)
fldt (%rdx)
subq $24,%rsp
fstcw 8(%rsp) /* store fpu control word */
/* We use here %edx although only the low 1 bits are defined.
But none of the operations should care and they are faster
than the 16 bit operations. */
movl $0x400,%edx /* round towards -oo */
orl 8(%rsp),%edx
andl $0xf7ff,%edx
movl %edx,(%rsp)
fldcw (%rsp) /* load modified control word */
frndint /* round */
fldcw 8(%rsp) /* restore original control word */
addq $24,%rsp
movq %rcx,%rax
movq $0,8(%rcx)
fstpt (%rcx)
ret
#elif defined(_ARM_) || defined(__arm__)
vmrs r1, fpscr
bic r0, r1, #0x00c00000
orr r0, r0, #0x00800000 /* Round towards Minus Infinity */
vmsr fpscr, r0
vcvtr.s32.f64 s0, d0
vcvt.f64.s32 d0, s0
vmsr fpscr, r1
bx lr
#elif defined(_X86_) || defined(__i386__)
fldt 4(%esp)
subl $8,%esp
fstcw 4(%esp) /* store fpu control word */
/* We use here %edx although only the low 1 bits are defined.
But none of the operations should care and they are faster
than the 16 bit operations. */
movl $0x400,%edx /* round towards -oo */
orl 4(%esp),%edx
andl $0xf7ff,%edx
movl %edx,(%esp)
fldcw (%esp) /* load modified control word */
frndint /* round */
fldcw 4(%esp) /* restore original control word */
addl $8,%esp
ret
#endif
|
stsp/newlib-ia16
| 1,541
|
winsup/cygwin/math/remquol.S
|
/**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the mingw-w64 runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#include <_mingw_mac.h>
.file "remquol.S"
.text
#ifdef __x86_64__
.align 8
#else
.align 4
#endif
.globl __MINGW_USYMBOL(remquol)
__MINGW_USYMBOL(remquol):
#ifdef __x86_64__
pushq %rcx
fldt (%r8)
fldt (%rdx)
1: fprem1
fstsw %ax
sahf
jp 1b
fstp %st(1)
movl %eax, %ecx
shrl $8, %eax
shrl $12, %ecx
andl $4, %ecx
andl $3, %eax
orl %eax, %ecx
movl $0xef2a60, %eax
leal (%ecx,%ecx,2),%ecx
shrl %cl, %eax
andl $7, %eax
movl 8(%rdx), %edx
xorl 8(%r8), %edx
testl $0x8000, %edx
jz 1f
negl %eax
1: movl %eax, (%r9)
popq %rcx
movq %rcx,%rax
movq $0,8(%rcx)
fstpt (%rcx)
ret
#else
fldt 4 +12(%esp)
fldt 4(%esp)
1: fprem1
fstsw %ax
sahf
jp 1b
fstp %st(1)
movl %eax, %ecx
shrl $8, %eax
shrl $12, %ecx
andl $4, %ecx
andl $3, %eax
orl %eax, %ecx
movl $0xef2a60, %eax
leal (%ecx,%ecx,2),%ecx
shrl %cl, %eax
andl $7, %eax
movl 4 +12 +12(%esp), %ecx
movl 4 +8(%esp), %edx
xorl 4 +12 +8(%esp), %edx
testl $0x8000, %edx
jz 1f
negl %eax
1: movl %eax, (%ecx)
ret
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.