repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
4ms/metamodule-plugin-sdk
| 5,234
|
plugin-libc/newlib/libc/machine/i960/strchr_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strch_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strchr (optimized assembler version for the CA)
src_addr = strchr (src_addr, char)
return a pointer to the first byte that contains the indicated
byte in the source string. Return null if the byte is not found.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last two words of the program's
allocated memory space. This is so because, in several cases, strchr
will fetch ahead. Disallowing the fetch ahead would impose a severe
performance penalty.
This program handles two cases:
1) the argument starts on a word boundary
2) the argument doesn't start on a word boundary
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 src ptr; upon return it is a pointer to the matching byte, or null
g1 char to seek
g2 mask to avoid unimportant bytes in first word
g3 char to seek, broadcast to all four bytes
g4 word of the source string
g5 copy of the word
g6 extracted character
g7 byte extraction mask
g13 return address
g14
*/
.globl _strchr
.globl __strchr
.leafproc _strchr, __strchr
.align 2
_strchr:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strchr:
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
shlo 8,g1,g2 # broadcast the char to four bytes
or g1,g2,g2
shlo 16,g2,g4
cmpo g1,g7 # is char being sought 0xff?
or g4,g2,g3
lda (g14),g13 # preserve return address
notand g0,3,g5 # extract word addr of start of src
lda 0,g14 # conform to register linkage standard
and g0,3,g6 # extract byte offset of src
ld (g5),g4 # fetch word containing at least first byte
shlo 3,g6,g6 # get shift count for making mask for first word
lda 4(g5),g0 # post-increment src word pointer
subi 1,0,g5 # mask initially all ones
#if __i960_BIG_ENDIAN__
shro g6,g5,g5 # get mask for bytes needed from first word
#else
shlo g6,g5,g5 # get mask for bytes needed from first word
#endif
notor g4,g5,g4 # set unneeded bytes to all ones
be.f Lsearch_for_0xff # branch if seeking 0xff
Lsearch_for_word_with_char_or_null:
scanbyte g3,g4 # check for byte with char
lda (g4),g5 # copy word
ld (g0),g4 # fetch next word of src
bo.f Lsearch_for_char # branch if null found
scanbyte 0,g5 # check for null byte
lda 4(g0),g0 # post-increment src word pointer
bno.t Lsearch_for_word_with_char_or_null # branch if not null
Lnot_found:
mov 0,g0 # char not found. Return null
Lexit_code:
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
Lsearch_for_char:
subo 5,g0,g0 # back up the byte pointer
Lsearch_for_char.a:
#if __i960_BIG_ENDIAN__
rotate 8,g5,g5 # shift word to position next byte
#endif
and g5,g7,g6 # extract byte
cmpo g1,g6 # is it char?
lda 1(g0),g0 # bump src byte ptr
#if ! __i960_BIG_ENDIAN__
shro 8,g5,g5 # shift word to position next byte
#endif
be.f Lexit_code
cmpobne.t 0,g6,Lsearch_for_char.a # quit if null comes before char
b Lnot_found
Lsearch_for_0xff:
lda 0xf0f0f0f0,g2 # make first comparison mask for char=-1 case.
or g5,g2,g2
and g4,g2,g4 # make unimportant bytes of first word 0x0f
b Lsearch_for_word_with_char_or_null
/* end of strchr */
|
4ms/metamodule-plugin-sdk
| 8,008
|
plugin-libc/newlib/libc/machine/i960/strcmp_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strcm_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strcmp (optimized assembler version for the CA)
result = strcmp (src1_addr, src2_addr)
compare the null terminated string pointed to by src1_addr to
the string space pointed to by src2_addr. Return 0 iff the strings
are equal, -1 if src1_addr is lexicly less than src2_addr, and 1
if it is lexicly greater.
Undefined behavior will occur if the end of either source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strcmp
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source1 is word aligned, source2 is not
4) source2 is word aligned, source1 is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g14 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers are sufficient to implement the routine.
The registers are used as follows:
g0 original src1 ptr; return result
g1 src2 ptr; 0xff -- byte extraction mask
g2 src1 word ptr
g3 src2 word ptr
Little endian:
g4 lsw of src1
g5 msw of src1
g6 src2 word
g7 extracted src1
Big endian:
g4 msw of src1
g5 lsw of src1
g6 extracted src1
g7 src2 word
g13 return address
g14 shift count
*/
#if __i960_BIG_ENDIAN__
#define MSW g4
#define LSW g5
#define SRC1 g6
#define SRC2 g7
#else
#define LSW g4
#define MSW g5
#define SRC2 g6
#define SRC1 g7
#endif
.globl _strcmp
.globl __strcmp
.leafproc _strcmp, __strcmp
.align 2
_strcmp:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strcmp:
Lrestart:
notand g0,3,g2 # extract word addr of start of src1
lda (g14),g13 # preserve return address
#if __i960_BIG_ENDIAN__
cmpo g0,g2 # check alignment of src1
#endif
ld (g2),LSW # fetch word with at least first byte of src1
notand g1,3,g3 # extract word addr of start of src2
ld 4(g2),MSW # fetch second word of src1
#if __i960_BIG_ENDIAN__
bne Lsrc1_unaligned # branch if src1 is unaligned
cmpo g3,g1 # check alignment of src2
ld (g3),SRC2 # fetch word with at least first byte of src2
mov LSW,SRC1 # extract word of src1
lda 8(g2),g2 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
/* src2 is word aligned */
Lwloop2: # word comparing loop
cmpo SRC2,SRC1 # compare src1 and src2 words
lda 0xff000000,g1 # byte extraction mask
mov MSW,LSW # move msw of src1 to lsw
ld (g2),MSW # pre-fetch next msw of src1
addo 4,g2,g2 # post-increment src1 addr
lda 4(g3),g3 # pre-increment src2 addr
bne.f Lcloop # branch if src1 and src2 unequal
scanbyte 0,SRC1 # check for null byte in src1 word
ld (g3),SRC2 # pre-fetch next word of src2
mov LSW,SRC1 # extract word of src1
lda 0,g0 # prepare to return zero, indicating equality
bno.t Lwloop2 # branch if null byte not encountered
/* words were equal and contained null byte */
mov 0,g14 # conform to register conventions
bx (g13) # return
Lsrc1_unaligned:
#endif
cmpo g3,g1 # check alignment of src2
ld (g3),SRC2 # fetch word with at least first byte of src2
shlo 3,g0,g14 # compute shift count for src1
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # 32 - shift count for big endian.
#endif
eshro g14,g4,SRC1 # extract word of src1
lda 8(g2),g2 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
/* at least src2 is word aligned */
Lwloop: # word comparing loop
cmpo SRC2,SRC1 # compare src1 and src2 words
#if __i960_BIG_ENDIAN__
lda 0xff000000,g1 # byte extraction mask
#else
lda 0xff,g1 # byte extraction mask
#endif
mov MSW,LSW # move msw of src1 to lsw
ld (g2),MSW # pre-fetch next msw of src1
addo 4,g2,g2 # post-increment src1 addr
lda 4(g3),g3 # pre-increment src2 addr
bne.f Lcloop # branch if src1 and src2 unequal
scanbyte 0,SRC1 # check for null byte in src1 word
ld (g3),SRC2 # pre-fetch next word of src2
eshro g14,g4,SRC1 # extract word of src1
lda 0,g0 # prepare to return zero, indicating equality
bno.t Lwloop # branch if null byte not encountered
/* words were equal and contained null byte */
mov 0,g14 # conform to register conventions
bx (g13) # return
Lcloop_setup: # setup for coming from Lsrc2_unaligned
mov LSW,SRC1 # restore extracted src1 word
#if __i960_BIG_ENDIAN__
lda 0xff000000,g1 # byte extraction mask
#else
lda 0xff,g1 # byte extraction mask
#endif
Lcloop: # character comparing loop
and SRC2,g1,g3 # extract next char of src2
and SRC1,g1,g0 # extract next char of src1
cmpobne.f g0,g3,.diff # check for equality
cmpo 0,g0 # check for null byte
#if __i960_BIG_ENDIAN__
shro 8,g1,g1 # shift mask for next byte
#else
shlo 8,g1,g1 # shift mask for next byte
#endif
bne.t Lcloop # branch if null not reached
/* words are equal up thru null byte */
mov 0,g14
bx (g13) # g0 = 0 (src1 == src2)
Lrett:
ret
.diff:
mov 0,g14
bl Lless_than_exit
Lgreater_than_exit:
mov 1,g0
bx (g13) # g0 = 1 (src1 > src2)
Lless_than_exit:
subi 1,0,g0
bx (g13) # g0 = -1 (src1 < src2)
Lsrc2_unaligned:
mov SRC1,LSW # retain src1 extracted word
ld 4(g3),SRC1 # fetch second word of src2
shlo 3,g1,MSW # compute shift count for src2
#if __i960_BIG_ENDIAN__
subo MSW,0,MSW # 32 - shift count for big endian.
#endif
eshro MSW,g6,SRC2 # extract word of src2
cmpo LSW,SRC2 # compare src1 and src2 words
notor g1,3,MSW # first step in computing new src1 ptr
lda 4(g3),g1 # set new src2 ptr
bne.f Lcloop_setup # first four bytes differ
scanbyte 0,LSW # check for null byte
lda (g13),g14 # prepare return pointer for Lrestart
subo MSW,g0,g0 # second (final) step in computing new src1 ptr
bno.t Lrestart # if null byte not encountered, continue
/* with both string fetches shifted such that */
/* src2 is now word aligned. */
mov 0,g14 # conform to register conventions.
lda 0,g0 # return indicator of equality.
bx (g13)
|
4ms/metamodule-plugin-sdk
| 10,205
|
plugin-libc/newlib/libc/machine/i960/strncpy_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "sncpy_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncpy (optimized assembler version for the CA)
dest_addr = strncpy (dest_addr, src_addr, max_bytes)
copy the null terminated string pointed to by src_addr to
the string space pointed to by dest_addr. Return the original
dest_addr. If the source string is shorter than max_bytes,
then null-pad the destination string.
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strcpy
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source is word aligned, destination is not
4) destination is word aligned, source is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g13 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers, plus tricky use of g14 are sufficient
to implement the routine. The registers are used as follows:
g0 original dest ptr; not modified, so that it may be returned.
g1 src ptr; shift count
g2 max_bytes
g3 src ptr (word aligned)
g4 dest ptr (word aligned)
g5 0xff -- byte extraction mask
Little endian:
g6 lsw of double word for extraction of 4 bytes
g7 msw of double word for extraction of 4 bytes
Big endian:
g6 msw of double word for extraction of 4 bytes
g7 lsw of double word for extraction of 4 bytes
g13 return address
g14 byte extracted.
*/
#if __i960_BIG_ENDIAN__
#define MSW g6
#define LSW g7
#else
#define LSW g6
#define MSW g7
#endif
.globl _strncpy
.globl __strncpy
.leafproc _strncpy, __strncpy
.align 2
_strncpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncpy:
notand g1,3,g3 # extract word addr of start of src
lda (g14),g13 # preserve return address
cmpibge.f 0,g2,Lexit_code # Lexit if number of bytes to move is <= zero.
cmpo g3,g1 # check alignment of src
ld (g3),LSW # fetch word containing at least first byte
notand g0,3,g4 # extract word addr of start of dest
lda 4(g3),g3 # advance src word addr
bne.f Lcase_245 # branch if src is NOT word aligned
Lcase_13:
cmpo g0,g4 # check alignment of dest
lda 0xff,g5 # load mask for byte extraction
subo 4,g4,g4 # store is pre-incrementing; back up dest addr
bne.f Lcase_3 # branch if dest not word aligned
Lcase_1: # src and dest are word aligned
Lcase_1_wloop: # word copying loop
cmpi g2,4 # check for fewer than four bytes to move
lda (LSW),g1 # keep a copy of the src word
addo 4,g4,g4 # pre-increment dest addr
bl.f Lcase_1_cloop.a # branch if fewer than four bytes to copy
scanbyte 0,g1 # check for null byte in src word
ld (g3),LSW # pre-fetch next word of src
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
bo.f Lcase_1_cloop.c # branch if word contains null byte
addo 4,g3,g3 # post-increment src addr
st g1,(g4) # store word in dest string
b Lcase_1_wloop
Lcase_3_cloop.a:
Lcase_1_cloop.a: # character copying loop (max_bytes <= 3)
#if __i960_BIG_ENDIAN__
rotate 8,g1,g1 # move next byte into position for extraction
#endif
and g5,g1,g14 # extract next char
Lcase_1_cloop.b:
cmpdeci 0,g2,g2 # is max_bytes exhausted?
be.f Lexit_code # Lexit if max_bytes is exhausted
cmpo 0,g14 # check for null byte
stob g14,(g4) # store the byte in dest
#if ! __i960_BIG_ENDIAN__
shro 8,g1,g1 # move next byte into position for extraction
#endif
lda 1(g4),g4 # post-increment dest byte addr
bne.t Lcase_1_cloop.a # branch if null not reached
b Lcase_1_cloop.b
Lexit_code:
mov 0,g14 # conform to register conventions
bx (g13) # g0 = addr of dest; g14 = 0
Lrett:
ret
Lcase_1_cloop.c:
Lcase_3_cloop.c:
#if __i960_BIG_ENDIAN__
rotate 24,g5,g5 # move mask into position for testing next byte
#endif
and g5,g1,g14 # extract next char
cmpo 0,g14 # check for null byte
#if ! __i960_BIG_ENDIAN__
lda (g5),LSW # keep a copy of the current mask
shlo 8,g5,g5 # move mask into position for testing next byte
#endif
bne.t Lcase_1_cloop.c # branch if null not reached
#if __i960_BIG_ENDIAN__
subo 1,g5,g5 # null pad.
andnot g5,g1,g1 # last bytes to copy, and null pad rest of word
#else
subo 1,LSW,g5 # mask to get last bytes to copy, and null pad
and g5,g1,g1 # last bytes to copy, and null pad rest of word
#endif
st g1,(g4)
Lcase_1_zwloop: # zero word loop
cmpi g2,4 # check for fewer than four bytes to move
addo 4,g4,g4 # pre-increment dest addr
bl.f Lcase_1_cloop.b # branch if fewer than four bytes to copy
subo 4,g2,g2 # decrease max_byte count by the 4 bytes moved
st g14,(g4) # store word in dest string
b Lcase_1_zwloop
Lcase_3: # src is word aligned; dest is not
addo 8,g4,g4 # move dest word ptr to first word boundary
lda (g0),g1 # copy dest byte ptr
mov LSW,MSW # make copy of first word of src
lda 32,g14 # initialize shift count to zero (mod 32)
Lcase_25:
Lcase_3_cloop_at_start: # character copying loop for start of dest str
cmpdeci 0,g2,g2 # is max_bytes exhausted?
#if __i960_BIG_ENDIAN__
shro 24,MSW,g5 # extract next char
#else
and g5,MSW,g5 # extract next char
#endif
be.f Lexit_code # Lexit if max_bytes is exhausted
cmpo 0,g5 # check for null byte
stob g5,(g1) # store the byte in dest
addo 1,g1,g1 # post-increment dest ptr
lda 0xff,g5 # re-initialize byte extraction mask
bne.t 1f # drop thru if null byte reached (to pad)
movl 0,g6 # blank out remainder of input buffer
1:
cmpo g1,g4 # have we reached word boundary in dest yet?
#if __i960_BIG_ENDIAN__
lda -8(g14),g14 # augment the shift counter
rotate 8,MSW,MSW # move next byte into position for extraction
#else
lda 8(g14),g14 # augment the shift counter
shro 8,MSW,MSW # move next byte into position for extraction
#endif
bne.t Lcase_3_cloop_at_start # branch if reached word boundary?
ld (g3),MSW # fetch msw of operand for double shift
Lcase_4:
#if __i960_BIG_ENDIAN__
cmpobne 0,g14,Lcase_3_wloop # branch if src is still unaligned.
Lcase_3_wloop2:
cmpi g2,4 # less than four bytes to move?
lda (LSW),g1 # extract 4 bytes of src
lda 4(g3),g3 # post-increment src word addr
bl.f Lcase_3_cloop.a # branch if < four bytes left to move
scanbyte 0,g1 # check for null byte
lda (MSW),LSW # move msw to lsw
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
ld (g3),MSW # pre-fetch msw of operand for double shift
bo.f Lcase_3_cloop.c # branch if word contains null byte
st g1,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop2
#endif
Lcase_3_wloop:
cmpi g2,4 # less than four bytes to move?
eshro g14,g6,g1 # extract 4 bytes of src
lda 4(g3),g3 # post-increment src word addr
bl.f Lcase_3_cloop.a # branch if < four bytes left to move
scanbyte 0,g1 # check for null byte
lda (MSW),LSW # move msw to lsw
subi 4,g2,g2 # decrease max_byte count by the 4 bytes moved
ld (g3),MSW # pre-fetch msw of operand for double shift
bo.f Lcase_3_cloop.c # branch if word contains null byte
st g1,(g4) # store 4 bytes to dest
addo 4,g4,g4 # post-increment dest ptr
b Lcase_3_wloop
Lcase_245:
cmpo g0,g4 # check alignment of dest
ld (g3),MSW # pre-fetch second half
and 3,g1,g1 # compute shift count
lda 0xff,g5 # load mask for byte extraction
#if __i960_BIG_ENDIAN__
subo g1,4,g14 # adjust shift count for big endian.
shlo 3,g14,g14
#else
shlo 3,g1,g14
#endif
be.t Lcase_4 # branch if dest is word aligned
or g4,g1,g1 # is src earlier in word, later, or sync w/ dst
cmpo g0,g1 # < indicates first word of dest has more bytes
/* than first word of source. */
lda 4(g4),g4 # move dest word addr to first word boundary
eshro g14,g6,g5 # extract four bytes
lda (g0),g1
bg.f 1f
mov MSW,LSW
lda 4(g3),g3 # move src word addr to second word boundary
1:
mov g5,MSW
lda 0xff,g5
b Lcase_25
/* end of strncpy */
|
4ms/metamodule-plugin-sdk
| 5,015
|
plugin-libc/newlib/libc/machine/i960/strrchr.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strrchr.s"
#ifdef __i960_BIG_ENDIAN__
#error "This does not work in big-endian"
#endif
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strrchr (optimized assembler version for the 80960K series)
src_addr = strrchr (src_addr, char)
return a pointer to the last byte that contains the indicated
byte in the source string. Return null if the byte is not found.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last two words of the program's
allocated memory space. This is so because strrchr fetches ahead.
Disallowing the fetch ahead would impose a severe performance penalty.
Strategy:
Fetch the source string by words and scanbyte the words for the
char until either a word with the byte is found or the null byte is
encountered. In the former case, move through the word to find the
matching byte and save its memory address, then continue the search.
In the latter case, return the saved address, or zero (null) if none
was ever found to save.
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that most source arrays (even more
true of most big source arrays) will be word aligned to begin with.
*/
.globl _strrchr
.globl __strrchr
.leafproc _strrchr, __strrchr
.align 2
_strrchr:
#ifdef __PIC
lda Lrett-(.+8)(ip),g14
#else
lda Lrett,g14
#endif
__strrchr:
ld (g0),g4 # fetch first word
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
shlo 8,g1,g2 # broadcast the char to four bytes
or g1,g2,g2
shlo 16,g2,g5
or g2,g5,g3
mov g14,g13 # preserve return address
addo 4,g0,g2 # post-increment src pointer
mov 1,g0 # prepare to return null pointer
mov g3,g6 # prepare to return null pointer
Lsearch_for_word_with_char_or_null:
mov g4,g5 # copy word
scanbyte 0,g5 # check for null byte
ld (g2),g4 # fetch next word of src
bo Lword_has_null # branch if null found
scanbyte g3,g5 # check for byte with char
addo 4,g2,g2 # post-increment src pointer
bno Lsearch_for_word_with_char_or_null # branch if no copy of char
mov g5,g6 # save word that has char in it (at least once)
subo 4,g2,g0 # save addr of byte after word with char
b Lsearch_for_word_with_char_or_null
Lword_has_null:
subo 4,g2,g2 # move src pointer back to word with null
Lfind_null:
addo 1,g2,g2 # advance src pointer to byte after current
and g7,g5,g14 # extract next byte
cmpo g1,g14 # is current byte char?
shro 8,g5,g5 # position next byte for extraction
bne 1f # skip if not char sought after
mov g2,g0 # save addr of byte after char
mov g3,g6 # save word of all char to short circuit search
1: cmpobne 0,g14,Lfind_null # is current byte null?
Lfind_last_char:
rotate 8,g6,g6 # position next highest byte
and g7,g6,g5 # extract byte
subo 1,g0,g0 # move pointer to that byte (or nullify)
cmpobne g5,g1,Lfind_last_char # branch if not at char
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
/* end of strrchr */
|
4ms/metamodule-plugin-sdk
| 8,544
|
plugin-libc/newlib/libc/machine/i960/strncmp_ca.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "sncmp_ca.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncmp (optimized assembler version for the CA)
result = strncmp (src1_addr, src2_addr, max_bytes)
compare the null terminated string pointed to by src1_addr to
the string space pointed to by src2_addr. Return 0 iff the strings
are equal, -1 if src1_addr is lexicly less than src2_addr, and 1
if it is lexicly greater. Do not compare more than max_bytes bytes.
Undefined behavior will occur if the end of either source string
(i.e. the terminating null byte) is in the last word of the program's
allocated memory space. This is so because, in several cases, strncmp
will fetch ahead one word. Disallowing the fetch ahead would impose
a severe performance penalty.
This program handles five cases:
1) both arguments start on a word boundary
2) neither are word aligned, but they are offset by the same amount
3) source1 is word aligned, source2 is not
4) source2 is word aligned, source1 is not
5) neither is word aligned, and they are offset by differing amounts
At the time of this writing, only g0 thru g7 and g14 are available
for use in this leafproc; other registers would have to be saved and
restored. These nine registers are sufficient to implement the routine.
The registers are used as follows:
g0 original src1 ptr; extracted word; return result
g1 src2 ptr; 0xff -- byte extraction mask
g2 maximum number of bytes to compare
g3 src2 word ptr
Little endian:
g4 lsw of src1
g5 msw of src1
g6 src2 word
g7 src1 word ptr
Big endian:
g4 msw of src1
g5 lsw of src1
g6 src1 word ptr
g7 src2 word
g13 return address
g14 shift count
*/
#if __i960_BIG_ENDIAN__
#define MSW g4
#define LSW g5
#define SRC1 g6
#define SRC2 g7
#else
#define LSW g4
#define MSW g5
#define SRC2 g6
#define SRC1 g7
#endif
.globl _strncmp
.globl __strncmp
.leafproc _strncmp, __strncmp
.align 2
_strncmp:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncmp:
Lrestart:
notand g0,3,SRC1 # extract word addr of start of src1
lda (g14),g13 # preserve return address
cmpibge.f 0,g2,Lequal_exit # return equality if number of bytes to
/* compare is none. */
#if __i960_BIG_ENDIAN__
cmpo g0,SRC1 # check alignment of src1
#endif
ld (SRC1),LSW # fetch word with at least first byte of src1
notand g1,3,g3 # extract word addr of start of src2
ld 4(SRC1),MSW # fetch second word of src1
#if __i960_BIG_ENDIAN__
bne Lsrc1_unaligned # branch if src1 is unaligned
cmpo g3,g1 # check alignment of src2
ld (g3),SRC2 # fetch word with at least first byte of src2
shlo 3,g0,g14 # compute shift count for src1
subo g14,0,g14 # adjust shift count for big endian
lda 8(SRC1),SRC1 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
/* src2 is word aligned */
mov LSW,g0
Lwloop2: # word comparing loop
cmpo SRC2,g0 # compare src1 and src2 words
lda 0xff000000,g1 # byte extraction mask
mov MSW,LSW # move msw of src1 to lsw
ld (SRC1),MSW # pre-fetch next msw of src1
addo 4,SRC1,SRC1 # post-increment src1 addr
lda 4(g3),g3 # pre-increment src2 addr
bne.f Lcloop # branch if src1 and src2 unequal
scanbyte 0,g0 # check for null byte in src1 word
ld (g3),SRC2 # pre-fetch next word of src2
mov LSW,g0 # extract word of src1
subi 4,g2,g2 # decrement maximum byte count
bo.f Lequal_exit # branch if null byte encountered
cmpibl.t 0,g2,Lwloop2 # branch if max_bytes not reached yet
b Lequal_exit # strings were equal up through max_bytes
Lsrc1_unaligned:
#endif
cmpo g3,g1 # check alignment of src2
ld (g3),SRC2 # fetch word with at least first byte of src2
shlo 3,g0,g14 # compute shift count for src1
#if __i960_BIG_ENDIAN__
subo g14,0,g14 # adjust shift count for big endian
#endif
eshro g14,g4,LSW # extract word of src1
lda 8(SRC1),SRC1 # advance src1 word addr
bne.f Lsrc2_unaligned # branch if src2 is NOT word aligned
/* at least src2 is word aligned */
mov LSW,g0
Lwloop: # word comparing loop
cmpo SRC2,g0 # compare src1 and src2 words
#if __i960_BIG_ENDIAN__
lda 0xff000000,g1 # byte extraction mask
#else
lda 0xff,g1 # byte extraction mask
#endif
mov MSW,LSW # move msw of src1 to lsw
ld (SRC1),MSW # pre-fetch next msw of src1
addo 4,SRC1,SRC1 # post-increment src1 addr
lda 4(g3),g3 # pre-increment src2 addr
bne.f Lcloop # branch if src1 and src2 unequal
scanbyte 0,g0 # check for null byte in src1 word
ld (g3),SRC2 # pre-fetch next word of src2
eshro g14,g4,g0 # extract word of src1
subi 4,g2,g2 # decrement maximum byte count
bo.f Lequal_exit # branch if null byte encountered
cmpibl.t 0,g2,Lwloop # branch if max_bytes not reached yet
b Lequal_exit # strings were equal up through max_bytes
Lcloop_setup: # setup for coming from Lsrc2_unaligned
mov LSW,g0 # restore extracted src1 word
#if __i960_BIG_ENDIAN__
lda 0xff000000,g1 # byte extraction mask
#else
lda 0xff,g1 # byte extraction mask
#endif
Lcloop: # character comparing loop
and SRC2,g1,g3 # extract next char of src2
and g0,g1,LSW # extract next char of src1
cmpobne.f LSW,g3,.diff # check for equality
cmpo 0,LSW # check for null byte
#if __i960_BIG_ENDIAN__
shro 8,g1,g1 # shift mask for next byte
#else
shlo 8,g1,g1 # shift mask for next byte
#endif
subi 1,g2,g2 # decrement character counter
bne.t Lcloop # branch if null not reached
/* words are equal up thru null byte */
Lequal_exit:
mov 0,g14 # conform to register conventions
lda 0,g0 # return zero, indicating equality
bx (g13) # return
Lrett:
ret
.diff:
mov 0,g14
bl Lless_than_exit
Lgreater_than_exit:
cmpibge.f 0,g2,Lequal_exit # branch if difference is beyond max_bytes
mov 1,g0
bx (g13) # g0 = 1 (src1 > src2)
Lless_than_exit:
cmpibge.f 0,g2,Lequal_exit # branch if difference is beyond max_bytes
subi 1,0,g0
bx (g13) # g0 = -1 (src1 < src2)
Lsrc2_unaligned:
notor g1,3,g14 # first step in computing new src1 ptr
ld 4(g3),SRC1 # fetch second word of src2
shlo 3,g1,MSW # compute shift count for src2
#if __i960_BIG_ENDIAN__
subo MSW,0,MSW # adjust shift count for big endian
#endif
eshro MSW,g6,SRC2 # extract word of src2
cmpo LSW,SRC2 # compare src1 and src2 words
lda 4(g3),g1 # set new src2 ptr
bne.f Lcloop_setup # first four bytes differ
scanbyte 0,LSW # check for null byte
subo g14,g0,g0 # second (final) step in computing new src1 ptr
addi g14,g2,g2 # compute new max_bytes too
lda (g13),g14 # prepare return pointer for Lrestart
bno.t Lrestart # if null byte not encountered, continue
/* with both string fetches shifted such that*/
/* src2 is now word aligned.*/
mov 0,g14 # conform to register conventions.
lda 0,g0 # return indicator of equality.
bx (g13)
|
4ms/metamodule-plugin-sdk
| 4,835
|
plugin-libc/newlib/libc/machine/i960/memchr.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memchr.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure memchr (optimized assembler version for the 80960K series)
src_addr = memchr (src_addr, char, max_bytes)
searching from src_addr for a span of max_bytes bytes, return a
pointer to the first byte in the source array that contains the
indicated char. Return null if the char is not found.
Undefined behavior will occur if the last byte of the source array
is in the last two words of the program's allocated memory space.
This is so because memchr fetches ahead. Disallowing the fetch
ahead would impose a severe performance penalty.
Strategy:
Fetch the source array by words and scanbyte the words for the
char until either a word with the byte is found or max_bytes is
exhausted. In the former case, move through the word to find the
matching byte and return its memory address. In the latter case,
return zero (null).
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that most source arrays (even more
true of most big source arrays) will be word aligned to begin with.
2) Rather than decrementing max_bytes to zero, I calculate the
address of the byte after the last byte of the source array, and
quit when the source byte pointer passes that. Refining, actually
I calculate the address of the fifth byte after the last byte of
the source array, because the source byte pointer is ahead of the
actual examination point due to fetch ahead.
*/
.globl _memchr
.globl __memchr
.leafproc _memchr, __memchr
.align 2
_memchr:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__memchr:
mov g14,g13 # preserve return address
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
mov 0,g14 # conform to register linkage standard
cmpibge 0,g2,Lnot_found # do nothing if max_bytes <= 0
addo 4,g0,g6 # post-increment src word pointer
addo g2,g6,g2 # compute ending address from start and len
ld (g0),g4 # fetch first word
shlo 8,g1,g3 # broadcast the char to four bytes
or g1,g3,g3
shlo 16,g3,g5
or g3,g5,g3
Lsearch_for_word_with_char:
mov g4,g5 # keep a copy of word
scanbyte g3,g5 # check for byte with char
ld (g6),g4 # fetch next word of src
bo Lsearch_for_char # branch if null found
addo 4,g6,g6 # post-increment src word pointer
cmpobge g2,g6,Lsearch_for_word_with_char # branch if max_bytes > 3
Lnot_found:
mov 0,g0 # char not found. Return null
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
Lsearch_for_char:
cmpobe.f g6,g2,Lnot_found # quit if max_bytes exhausted
and g5,g7,g0 # extract byte
cmpo g1,g0 # is it char?
addo 1,g6,g6 # bump src byte ptr
shro 8,g5,g5 # shift word to position next byte
bne.t Lsearch_for_char
subo 5,g6,g0 # back up the byte pointer
bx (g13)
/* end of memchr */
|
4ms/metamodule-plugin-sdk
| 4,628
|
plugin-libc/newlib/libc/machine/i960/memcmp.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "memcmp.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure memcmp (optimized assembler version for the 80960K series)
result = memcmp (src1_addr, src2_addr, max_bytes)
compare the byte array pointed to by src1_addr to the byte array
pointed to by src2_addr. Return 0 iff the arrays are equal, -1 iff
src1_addr is lexicographically less than src2_addr, and 1 iff it is
lexicographically greater. Do not compare more than max_bytes bytes.
Undefined behavior will occur if the end of either source array
is in the last two words of the program's allocated memory space.
This is so because memcmp fetches ahead. Disallowing the fetch ahead
would impose a severe performance penalty.
Strategy:
Fetch the source strings by words and compare the words until either
a differing word is found or max_bytes is exhausted. In the former
case, move through the words to find the differing byte and return
plus or minus one, appropriately. In the latter case, return zero
(equality).
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that most source arrays (even more
true of most big source arrays) will be word aligned to begin with.
2) Rather than decrementing max_bytes to zero, I calculate the
address of the byte after the last byte of the source_1 array, and
quit when the source byte pointer passes that.
*/
.globl _memcmp
.globl __memcmp
.leafproc _memcmp,__memcmp
.align 2
_memcmp:
#ifndef __PIC
lda .Lrett,g14
#else
lda .Lrett-(.+8)(ip),g14
#endif
__memcmp:
mov g14,g13 # preserve return address
ldconst 0,g14 # conform to register conventions
cmpibge 0,g2,Lequal_exit # quit if max_bytes <= 0
addo g0,g2,g2 # calculate byte addr of byte after last in src1
.Lwloop:
cmpo g0,g2
ld (g0), g5 # fetch word of source_1
bge Lequal_exit # quit (equal) if max_bytes exhausted
ld (g1), g3 # fetch word of source_2
addo 4,g0,g0 # post-increment source_1 byte ptr
addo 4,g1,g1 # post-increment source_2 byte ptr
cmpobe g5,g3,.Lwloop # branch if source words are equal
ldconst 0xff,g4 # byte extraction mask
subo 4,g0,g0 # back up src1 pointer
.Lcloop: and g4,g5,g7 # extract and compare individual bytes
and g4,g3,g6
cmpobne g7,g6,.diff # branch if they are different
shlo 8,g4,g4 # position mask for next extraction
addo 1,g0,g0
cmpobl g0,g2,.Lcloop # quit if max_bytes is exhausted
Lequal_exit:
mov 0,g0
bx (g13)
.Lrett:
ret
.diff: bl .neg # arrays differ at current byte.
/* return 1 or -1 appropriately */
mov 1,g0
bx (g13)
.neg: subi 1,0,g0
.Lexit:
bx (g13)
/* end or memcmp */
|
4ms/metamodule-plugin-sdk
| 4,046
|
plugin-libc/newlib/libc/machine/i960/strlen.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strlen.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strlen (optimized assembler version for the 80960K series)
src_addr = strlen (src_addr)
return the number of bytes that precede the null byte in the
string pointed to by src_addr.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last four words of the program's
allocated memory space. This is so because strlen fetches ahead
several words. Disallowing the fetch ahead would impose a severe
performance penalty.
Strategy:
Fetch the source array by long-words and scanbyte the words for the
null byte until found. Examine the word in which the null byte is
found, to determine its actual position, and return the length.
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that many source strings will be word
aligned to begin with.
*/
.globl _strlen
.globl __strlen
.leafproc _strlen, __strlen
.align 2
_strlen:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strlen:
mov g14,g13 # preserve return address
ldl (g0),g4 # fetch first two words
addo 8,g0,g2 # post-increment src word pointer
lda 0xff,g3 # byte extraction mask
Lsearch_for_word_with_null_byte:
scanbyte 0,g4 # check for null byte
mov g5,g7 # copy second word
bo.f Lsearch_for_null # branch if null found
scanbyte 0,g7 # check for null byte
ldl (g2),g4 # fetch next pair of word of src
addo 8,g2,g2 # post-increment src word pointer
bno Lsearch_for_word_with_null_byte # branch if null not found yet
subo 4,g2,g2 # back up the byte pointer
mov g7,g4 # move word with null to search word
Lsearch_for_null:
subo 9,g2,g2 # back up the byte pointer
Lsearch_for_null.a:
and g4,g3,g14 # extract byte
cmpo 0,g14 # is it null?
addo 1,g2,g2 # bump src byte ptr
shro 8,g4,g4 # shift word to position next byte
bne Lsearch_for_null.a
Lexit_code:
subo g0,g2,g0 # calculate string length
bx (g13) # g0 = addr of src; g14 = 0
Lrett:
ret
/* end of strlen */
|
4ms/metamodule-plugin-sdk
| 4,364
|
plugin-libc/newlib/libc/machine/i960/strchr.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strchr.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strchr (optimized assembler version for the 80960K series)
src_addr = strchr (src_addr, char)
return a pointer to the first byte that contains the indicated
byte in the source string. Return null if the byte is not found.
Undefined behavior will occur if the end of the source string (i.e.
the terminating null byte) is in the last two words of the program's
allocated memory space. This is so because strchr fetches ahead.
Disallowing the fetch ahead would impose a severe performance penalty.
Strategy:
Fetch the source string by words and scanbyte the words for the
char until either a word with the byte is found or the null byte is
encountered. In the former case, move through the word to find the
matching byte and return its memory address. In the latter case,
return zero (null).
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble that would be necessary to assure alignment. This
is supported by the intuition that most source arrays (even more
true of most big source arrays) will be word aligned to begin with.
*/
.globl _strchr
.globl __strchr
.leafproc _strchr, __strchr
.align 2
_strchr:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strchr:
ld (g0),g4 # fetch first word
lda 0xff,g7 # byte extraction mask
and g1,g7,g1 # make char an 8-bit ordinal
shlo 8,g1,g2 # broadcast the char to four bytes
or g1,g2,g2
shlo 16,g2,g5
or g2,g5,g3
mov g14,g13 # preserve return address
addo 4,g0,g0 # post-increment src pointer
mov 0,g14 # conform to register linkage standard
Lsearch_for_word_with_char_or_null:
mov g4,g5 # copy word
scanbyte g3,g5 # check for byte with char
ld (g0),g4 # fetch next word of src
bo Lsearch_for_char # branch if char found
scanbyte 0,g5 # check for null byte
addo 4,g0,g0 # post-increment src pointer
bno Lsearch_for_word_with_char_or_null # branch if not null
Lnot_found:
mov 0,g0 # char not found. Return null
Lexit_code:
bx (g13) # g0 = addr of char in src (or null); g14 = 0
Lrett:
ret
Lsearch_for_char:
subo 5,g0,g0 # back up the byte pointer
Lsearch_for_char.a:
and g5,g7,g6 # extract byte
cmpo g1,g6 # is it char?
addo 1,g0,g0 # bump src byte ptr
shro 8,g5,g5 # shift word to position next byte
be Lexit_code
cmpobne 0,g6,Lsearch_for_char.a # quit if null comes before char
b Lnot_found
/* end of strchr */
|
4ms/metamodule-plugin-sdk
| 4,396
|
plugin-libc/newlib/libc/machine/i960/strcmp.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strcmp.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strcmp (optimized assembler version for the 80960K Series)
result = strcmp (src1_addr, src2_addr)
compare the null terminated string pointed to by src1_addr to
the string pointed to by src2_addr. Return 0 iff the strings
are equal, -1 if src1_addr is lexicographically less than src2_addr,
and 1 if it is lexicographically greater.
Undefined behavior will occur if the end of either source string
(i.e. the terminating null byte) is in the last two words of the
program's allocated memory space. This is so because strcmp fetches
ahead. Disallowing the fetch ahead would impose a severe performance
penalty.
Strategy:
Fetch the source strings by words and compare the words until either
differing words are found or the null byte is encountered. In either
case, move through the word until either the differing byte if found,
in which case return -1 or 1 appropriately; or the null byte is
encountered, in which case, return zero (equality).
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment. This is supported by the intuition that many source
strings will be word aligned to begin with.
*/
.globl _strcmp
.globl __strcmp
.leafproc _strcmp,__strcmp
.align 2
_strcmp:
#ifndef __PIC
lda .Lrett,g14
#else
lda .Lrett-(.+8)(ip),g14
#endif
__strcmp:
ld (g0), g5 # fetch first word of source_1
mov g14,g7 # preserve return address
ldconst 0,g14 # conform to register conventions
ldconst 0xff,g4 # byte extraction mask
.Lwloop:
addo 4,g0,g0 # post-increment source_1 byte ptr
ld (g1), g3 # fetch word of source_2
scanbyte 0,g5 # does word have a null byte?
mov g5,g2 # save a copy of the source_1 word
be .Lcloop # branch if null byte encountered
cmpo g2,g3 # are the source words the same?
addo 4,g1,g1 # post-increment source_2 byte ptr
ld (g0), g5 # fetch ahead next word of source_1
be .Lwloop # fall thru if words are unequal
.Lcloop: and g4,g2,g5 # extract and compare individual bytes
and g4,g3,g6
cmpobne g5,g6,.diff # if they differ, go return 1 or -1
cmpo 0,g6 # they are the same. Are they null?
shlo 8,g4,g4 # position mask for next extraction
bne .Lcloop # loop if null not encountered
mov 0,g0 # return equality
bx (g7)
.Lrett:
ret
.diff: bl .neg
mov 1,g0
bx (g7)
.neg: subi 1,0,g0
.Lexit:
bx (g7)
|
4ms/metamodule-plugin-sdk
| 5,675
|
plugin-libc/newlib/libc/machine/i960/strncpy.S
|
/*******************************************************************************
*
* Copyright (c) 1993 Intel Corporation
*
* Intel hereby grants you permission to copy, modify, and distribute this
* software and its documentation. Intel grants this permission provided
* that the above copyright notice appears in all copies and that both the
* copyright notice and this permission notice appear in supporting
* documentation. In addition, Intel grants this permission provided that
* you prominently mark as "not part of the original" any modifications
* made to this software or documentation, and that the name of Intel
* Corporation not be used in advertising or publicity pertaining to
* distribution of the software or the documentation without specific,
* written prior permission.
*
* Intel Corporation provides this AS IS, WITHOUT ANY WARRANTY, EXPRESS OR
* IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY
* OR FITNESS FOR A PARTICULAR PURPOSE. Intel makes no guarantee or
* representations regarding the use of, or the results of the use of,
* the software and documentation in terms of correctness, accuracy,
* reliability, currentness, or otherwise; and you rely on the software,
* documentation and results solely at your own risk.
*
* IN NO EVENT SHALL INTEL BE LIABLE FOR ANY LOSS OF USE, LOSS OF BUSINESS,
* LOSS OF PROFITS, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES
* OF ANY KIND. IN NO EVENT SHALL INTEL'S TOTAL LIABILITY EXCEED THE SUM
* PAID TO INTEL FOR THE PRODUCT LICENSED HEREUNDER.
*
******************************************************************************/
.file "strncpy.s"
#ifdef __PIC
.pic
#endif
#ifdef __PID
.pid
#endif
/*
* (c) copyright 1988,1993 Intel Corp., all rights reserved
*/
/*
procedure strncpy (optimized assembler version for the 80960K Series)
dest_addr = strncpy (dest_addr, src_addr, max_bytes)
copy the null terminated string pointed to by src_addr to the
string pointed to by dest_addr. Return the original dest_addr.
If the source string is shorter than max_bytes, then null-pad
the destination string. If it is longer than max_bytes, the
copy stops at max_bytes bytes (and no terminating null appears
in the destination string).
This routine will fail if the source and destination string
overlap (in particular, if the end of the source is overlapped
by the beginning of the destination). The behavior is undefined.
This is acceptable according to the draft C standard.
Undefined behavior will also occur if the end of the source string
(i.e. the terminating null byte) is in the last two words of the
program's allocated memory space. This is so because strncpy fetches
ahead. Disallowing the fetch ahead would impose a severe performance
penalty.
Strategy:
Fetch and store the strings by words and go to a character move loop
as soon as a null byte is encountered. If max_bytes is exhausted
first, then terminate after moving only max_bytes (with the last
0, 1, 2, or 3 bytes moved as single bytes, not as a word).
Otherwise, the character move loop moves the last bytes or the
source string, and then null-pads the destination string until
max_bytes is exhausted.
Tactics:
1) Do NOT try to fetch the words in a word aligned manner because,
in my judgement, the performance degradation experienced due to
non-aligned accesses does NOT outweigh the time and complexity added
by the preamble and convoluted body that would be necessary to assure
alignment.
2) When the null byte is encountered in a source word, null out the
higher-numbered bytes in that word, store the word in the destination,
and go to the word null-padder, which may eventually go to the byte
null-padder.
*/
.globl _strncpy
.globl __strncpy
.leafproc _strncpy,__strncpy
.align 2
_strncpy:
#ifndef __PIC
lda Lrett,g14
#else
lda Lrett-(.+8)(ip),g14
#endif
__strncpy:
mov g14, g13
cmpibge 0,g2,Lexit # quit early if max_bytes <= 0
ld (g1), g7 # fetch the first word of the source
mov g0, g5
lda 0xff, g3 # byte extraction mask
addo g1, g2, g6
addo g2, g5, g2
Lwloop: # word copying loop
addo 4, g1, g1 # post-increment source ptr
cmpo g6, g1 # max_bytes < 4 ?
mov g7, g4 # keep a copy of source word
bl Lcloop.a # if less than four bytes to go, go to char loop
scanbyte 0, g4 # null byte found?
ld (g1), g7 # pre-fetch next word of the source
be Lcloop.c # go to char loop if null encountered
st g4, (g5) # store current word
addo 4, g5, g5 # post-increment destination ptr
b Lwloop
Lcloop.a: # character copying loop (max_bytes < 3)
and g3, g4, g14 # extract byte
Lcloop.b:
cmpo g2, g5 # max_bytes <= 0 ?
shro 8, g4, g4 # position word to extract next byte
be Lexit # exit if max_bytes exhausted
cmpo 0, g14 # is it null?
stob g14, (g5) # store it
addo 1, g5, g5 # post-increment dest ptr
bne Lcloop.a # branch if we are NOT null padding
b Lcloop.b # branch if we are null padding
Lexit:
mov 0, g14
bx (g13) # g0 = dest string address; g14 = 0
Lrett:
ret
Lcloop.c: # character copying loop
and g3, g4, g14 # extract byte
cmpo 0, g14 # is it null?
mov g3, g7 # save mask
shlo 8, g3, g3 # shift mask to next byte position
bne Lcloop.c # loop until null found
subo 1, g7, g3 # mask to null pad after null byte
and g3, g4, g4 # null-out stuff after null byte
st g4, (g5) # store last part of src and first of null-pad
subo 8,g2,g6 # adjust max_byte counter
Lzwloop:
cmpo g5, g6 # max_bytes < 4 ?
addo 4, g5, g5
bg Lcloop.b # if so, goto character loop
st g14, (g5) # store four null bytes
b Lzwloop
/* end of strncpy */
|
4ms/metamodule-plugin-sdk
| 3,261
|
plugin-libc/newlib/libc/machine/arc/memcpy.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC601__) || \
(!defined (__ARC_BARREL_SHIFTER__) && !defined (__ARCHS__))
/* Adapted from memcpy-bs.S. */
/* We assume that most sources and destinations are aligned, and
that also lengths are mostly a multiple of four, although to a lesser
extent. */
ENTRY (memcpy)
or r3,r0,r1
bmsk.f 0,r3,1
breq_s r2,0,.Lnil
mov_s r5,r0
bne.d .Lcopy_bytewise
add r6,r0,r2
sub_s r3,r2,1
ld_s r12,[r1,0]
bbit0.d r3,2,.Lnox4
sub r6,r6,8
st.ab r12,[r5,4]
ld.a r12,[r1,4]
.Lnox4:
brlo r2,9,.Lendloop
.Lnox4a:
ld_s r3,[r1,4]
st.ab r12,[r5,8]
ld.a r12,[r1,8]
brlo.d r5,r6,.Lnox4a
st r3,[r5,-4]
.Lendloop:
#ifdef __LITTLE_ENDIAN__
ld r3,[r5,0]
add3 r2,-1,r2
; uses long immediate
xor_s r12,r12,r3
bmsk r12,r12,r2
xor_s r12,r12,r3
#else /* BIG ENDIAN */
bmsk_s r2,r2,1
breq_s r2,0,.Last_store
ld r3,[r5,0]
sub3 r2,31,r2
; uses long immediate
xor_s r3,r3,r12
bmsk r3,r3,r2
xor_s r12,r12,r3
#endif /* ENDIAN */
.Last_store:
j_s.d [blink]
st r12,[r5,0]
.Lnil:
j_s [blink]
.balign 4
.Lcopy_bytewise:
ldb_s r12,[r1,0]
bbit1.d r2,0,.Lnox1
sub r6,r6,2
stb.ab r12,[r5,1]
ldb.a r12,[r1,1]
.Lnox1:
brlo r2,3,.Lendbloop
.Lnox1a:
ldb_s r3,[r1,1]
stb.ab r12,[r5,2]
ldb.a r12,[r1,2]
brlo.d r5,r6,.Lnox1a
stb r3,[r5,-1]
.Lendbloop:
j_s.d [blink]
stb r12,[r5,0]
ENDFUNC (memcpy)
#endif /* __ARC601__ || (!__ARC_BARREL_SHIFTER__ && !__ARCHS__) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,337
|
plugin-libc/newlib/libc/machine/arc/memcpy-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if !defined (__ARC601__) && !defined (__ARCHS__) \
&& defined (__ARC_BARREL_SHIFTER__)
/* Mostly optimized for ARC700, but not bad for ARC600 either. */
/* This memcpy implementation does not support objects of 1GB or larger -
the check for alignment does not work then. */
/* We assume that most sources and destinations are aligned, and
that also lengths are mostly a multiple of four, although to a lesser
extent. */
ENTRY (memcpy)
or r3,r0,r1
asl_s r3,r3,30
mov_s r5,r0
brls.d r2,r3,.Lcopy_bytewise
sub.f r3,r2,1
ld_s r12,[r1,0]
asr.f lp_count,r3,3
bbit0.d r3,2,.Lnox4
bmsk_s r2,r2,1
st.ab r12,[r5,4]
ld.a r12,[r1,4]
.Lnox4:
lppnz .Lendloop
ld_s r3,[r1,4]
st.ab r12,[r5,4]
ld.a r12,[r1,8]
st.ab r3,[r5,4]
.Lendloop:
breq_l r2,0,.Last_store
ld r3,[r5,0]
#ifdef __LITTLE_ENDIAN__
add3 r2,-1,r2
; uses long immediate
xor_s r12,r12,r3
bmsk r12,r12,r2
xor_s r12,r12,r3
#else /* BIG ENDIAN */
sub3 r2,31,r2
; uses long immediate
xor_s r3,r3,r12
bmsk r3,r3,r2
xor_s r12,r12,r3
#endif /* ENDIAN */
.Last_store:
j_s.d [blink]
st r12,[r5,0]
.balign 4
.Lcopy_bytewise:
jcs [blink]
ldb_s r12,[r1,0]
lsr.f lp_count,r3
bcc_s .Lnox1
stb.ab r12,[r5,1]
ldb.a r12,[r1,1]
.Lnox1:
lppnz .Lendbloop
ldb_s r3,[r1,1]
stb.ab r12,[r5,1]
ldb.a r12,[r1,2]
stb.ab r3,[r5,1]
.Lendbloop:
j_s.d [blink]
stb r12,[r5,0]
ENDFUNC (memcpy)
#endif /* !__ARC601__ && !__ARCHS__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 4,244
|
plugin-libc/newlib/libc/machine/arc/setjmp.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* ABI interface file
these are the stack mappings for the registers
as stored in the ABI for ARC */
.file "setjmp.S"
ABIr13 = 0
ABIr14 = ABIr13 + 4
ABIr15 = ABIr14 + 4
ABIr16 = ABIr15 + 4
ABIr17 = ABIr16 + 4
ABIr18 = ABIr17 + 4
ABIr19 = ABIr18 + 4
ABIr20 = ABIr19 + 4
ABIr21 = ABIr20 + 4
ABIr22 = ABIr21 + 4
ABIr23 = ABIr22 + 4
ABIr24 = ABIr23 + 4
ABIr25 = ABIr24 + 4
ABIr26 = ABIr25 + 4
ABIr27 = ABIr26 + 4
ABIr28 = ABIr27 + 4
ABIr29 = ABIr28 + 4
ABIr30 = ABIr29 + 4
ABIr31 = ABIr30 + 4
ABIlpc = ABIr31 + 4
ABIlps = ABIlpc + 4
ABIlpe = ABIlps + 4
ABIflg = ABIlpe + 4
ABImlo = ABIflg + 4
ABImhi = ABImlo + 4
.text
.align 4
.global setjmp
.type setjmp,@function
setjmp:
st r13, [r0, ABIr13]
st r14, [r0, ABIr14]
st r15, [r0, ABIr15]
st r16, [r0, ABIr16]
st r17, [r0, ABIr17]
st r18, [r0, ABIr18]
st r19, [r0, ABIr19]
st r20, [r0, ABIr20]
st r21, [r0, ABIr21]
st r22, [r0, ABIr22]
st r23, [r0, ABIr23]
st r24, [r0, ABIr24]
st r25, [r0, ABIr25]
st r26, [r0, ABIr26]
st r27, [r0, ABIr27]
st r28, [r0, ABIr28]
st r29, [r0, ABIr29]
st r30, [r0, ABIr30]
st blink, [r0, ABIr31]
st lp_count, [r0, ABIlpc]
lr r2, [lp_start]
lr r3, [lp_end]
st r2, [r0, ABIlps]
st r3, [r0, ABIlpe]
#if (!defined (__ARC700__) && !defined (__ARCEM__) && !defined (__ARCHS__))
; Till the configure changes are decided, and implemented, the code working on
; mlo/mhi and using mul64 should be disabled.
; st mlo, [r0, ABImlo]
; st mhi, [r0, ABImhi]
lr r2, [status32]
st r2, [r0, ABIflg]
#endif
j.d [blink]
mov r0,0
.Lfe1:
.size setjmp,.Lfe1-setjmp
.align 4
.global longjmp
.type longjmp,@function
longjmp:
; load registers
ld r13, [r0, ABIr13]
ld r14, [r0, ABIr14]
ld r15, [r0, ABIr15]
ld r16, [r0, ABIr16]
ld r17, [r0, ABIr17]
ld r18, [r0, ABIr18]
ld r19, [r0, ABIr19]
ld r20, [r0, ABIr20]
ld r21, [r0, ABIr21]
ld r22, [r0, ABIr22]
ld r23, [r0, ABIr23]
ld r24, [r0, ABIr24]
ld r25, [r0, ABIr25]
ld r26, [r0, ABIr26]
ld r27, [r0, ABIr27]
ld r28, [r0, ABIr28]
ld r3, [r0, ABIr29]
mov r29, r3
ld r3, [r0, ABIr30]
mov r30, r3
ld blink, [r0, ABIr31]
ld r3, [r0, ABIlpc]
mov lp_count, r3
ld r2, [r0, ABIlps]
ld r3, [r0, ABIlpe]
sr r2, [lp_start]
sr r3, [lp_end]
#if (!defined (__ARC700__) && !defined (__ARCEM__) && !defined (__ARCHS__))
ld r2, [r0, ABImlo]
ld r3, [r0, ABImhi]
; We do not support restoring of mulhi and mlo registers, yet.
; mulu64 0,r2,1 ; restores mlo
; mov 0,mlo ; force multiply to finish
; sr r3, [mulhi]
ld r2, [r0, ABIflg]
flag r2 ; restore "status32" register
#endif
mov.f r1, r1 ; to avoid return 0 from longjmp
mov.eq r1, 1
j.d [blink]
mov r0,r1
.Lfe2:
.size longjmp,.Lfe2-longjmp
|
4ms/metamodule-plugin-sdk
| 4,627
|
plugin-libc/newlib/libc/machine/arc/strncpy-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strncpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch.
Note that short and long instructions have been scheduled to avoid
branch stalls.
The beq_s to r3z could be made unaligned & long to avoid a stall
there, but the it is not likely to be taken often, and it
would also be likey to cost an unaligned mispredict at the next call. */
#if !defined (__ARC601__) && defined (__ARC_BARREL_SHIFTER__)
#if defined (__ARC700___) || defined (__ARCEM__) || defined (__ARCHS__)
#define BRand(a,b,l) tst a,b ` bne_l l
#else
#define BRand(a,b,l) and a,a,b ` brne_s a,0,l
#endif
ENTRY (strncpy)
cmp_s r2,8
or r12,r0,r1
bmsk.cc.f r12,r12,1
brne.d r12,0,.Lbytewise
mov_s r10,r0
ld_s r3,[r1,0]
mov r8,0x01010101
sub lp_count,r2,1
bbit0.d r1,2,.Loop_start
ror r11,r8
sub r12,r3,r8
bic_l r12,r12,r3
BRand (r12,r11,.Lr3z)
mov_s r4,r3
ld.a r3,[r1,4]
sub lp_count,lp_count,4
st.ab r4,[r10,4]
.balign 4
.Loop_start:
lsr.f lp_count,lp_count,3
lpne .Loop_end
ld.a r4,[r1,4]
sub r12,r3,r8
bic_s r12,r12,r3
BRand (r12,r11,.Lr3z)
st.ab r3,[r10,4]
sub r12,r4,r8
bic r12,r12,r4
BRand (r12,r11,.Lr4z)
ld.a r3,[r1,4]
st.ab r4,[r10,4]
.Loop_end:
bcc_s .Lastword
ld.a r4,[r1,4]
sub r12,r3,r8
bic_s r12,r12,r3
BRand (r12,r11,.Lr3z)
st.ab r3,[r10,4]
mov_s r3,r4
.Lastword:
and.f lp_count,r2,3
mov.eq lp_count,4
lp .Last_byte_end
#ifdef __LITTLE_ENDIAN__
bmsk.f r1,r3,7
lsr.ne r3,r3,8
#else
lsr.f r1,r3,24
asl.ne r3,r3,8
#endif
stb.ab r1,[r10,1]
.Last_byte_end:
j_s [blink]
.balign 4
.Lr4z:
mov_l r3,r4
.Lr3z:
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
#ifdef __LITTLE_ENDIAN__
bmsk.f r1,r3,7
lsr_s r3,r3,8
#else
lsr.f r1,r3,24
asl_s r3,r3,8
#endif
bne.d .Lr3z
stb.ab r1,[r10,1]
#else /* ! __ARC700__ */
#ifdef __LITTLE_ENDIAN__
bmsk.f r1,r3,7
.Lr3z_loop:
lsr_s r3,r3,8
stb.ab r1,[r10,1]
bne.d .Lr3z_loop
bmsk.f r1,r3,7
#else
lsr.f r1,r3,24
.Lr3z_loop:
asl_s r3,r3,8
stb.ab r1,[r10,1]
bne.d .Lr3z_loop
lsr.f r1,r3,24
#endif /* ENDIAN */
#endif /* ! __ARC700__ */
.Lzero_rest:
; __strncpy_bzero requires:
; return value in r0
; zeroing length in r2
; zeroing start address in r3
mov_s r3,r10
add_s r2,r2,r0
b.d __strncpy_bzero
sub_s r2,r2,r3
.balign 4
.Lbytewise:
sub.f r2,r2,1
mov_l r3,r0
jcs [blink]
.Lcharloop:
ldb.ab r12,[r1,1]
beq.d .Last_byte
sub.f r2,r2,1
brne.d r12,0,.Lcharloop
stb.ab r12,[r3,1]
b.d __strncpy_bzero
stb.ab r12,[r3,1]
.Last_byte:
j_s.d [blink]
stb_l r12,[r3]
ENDFUNC (strncpy)
#endif /* !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 2,941
|
plugin-libc/newlib/libc/machine/arc/strcpy.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch.
This version is a compromise between speed for the 601 pipeline and code
size. */
ENTRY (strcpy)
or r2,r0,r1
bmsk.f 0,r2,1
mov r8,0x01010101
bne.d .Lcharloop
mov_s r10,r0
ld_s r3,[r1]
bbit0.d r1,2,.Loop_start
ror r12,r8
sub r2,r3,r8
bic_s r2,r2,r3
and_s r2,r2,r12
brne_s r2,0,.Lr3z
mov r4,r3
sub_s r1,r1,4
.balign 4
.Loop:
ld.a r3,[r1,8]
st.ab r4,[r10,4]
.Loop_start:
ld r4,[r1,4]
sub r2,r3,r8
bic_s r2,r2,r3
tst_s r2,r12
sub r5,r4,r8
bic r5,r5,r4
bne_s .Lr3z
and r5,r5,r12
breq.d r5,0,.Loop
st.ab r3,[r10,4]
;mov_s r3,r4
add_s r1,r1,4
.balign 4
.Lr3z:
.Lcharloop:
ldb.ab r3,[r1,1]
brne.d r3,0,.Lcharloop
stb.ab r3,[r10,1]
j_s [blink]
ENDFUNC (strcpy)
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,269
|
plugin-libc/newlib/libc/machine/arc/strlen-bs-norm.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strlen.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__)
ENTRY (strlen)
or r3,r0,7
ld r2,[r3,-7]
ld.a r6,[r3,-3]
mov r4,0x01010101
; uses long immediate
#ifdef __LITTLE_ENDIAN__
asl_s r1,r0,3
btst_s r0,2
asl r7,r4,r1
ror r5,r4
sub r1,r2,r7
bic_s r1,r1,r2
mov.eq r7,r4
sub r12,r6,r7
bic r12,r12,r6
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#else /* BIG ENDIAN */
ror r5,r4
btst_s r0,2
mov_s r1,31
sub3 r7,r1,r0
sub r1,r2,r4
bic_s r1,r1,r2
bmsk r1,r1,r7
sub r12,r6,r4
bic r12,r12,r6
bmsk.ne r12,r12,r7
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#endif /* ENDIAN */
.Loop:
ld_s r2,[r3,4]
ld.a r6,[r3,8]
; stall for load result
sub r1,r2,r4
bic_s r1,r1,r2
sub r12,r6,r4
bic r12,r12,r6
or_l r12,r12,r1
and r12,r12,r5
breq_l r12,0,.Loop
.Lend:
and.f r1,r1,r5
sub.ne r3,r3,4
mov.eq r1,r12
#ifdef __LITTLE_ENDIAN__
sub_s r2,r1,1
bic_s r2,r2,r1
norm r1,r2
sub_s r0,r0,3
lsr_s r1,r1,3
sub r0,r3,r0
j_s.d [blink]
sub_l r0,r0,r1
#else /* BIG ENDIAN */
lsr_s r1,r1,7
mov.eq r2,r6
bic_s r1,r1,r2
norm r1,r1
sub r0,r3,r0
lsr_s r1,r1,3
j_s.d [blink]
add_l r0,r0,r1
#endif /* ENDIAN */
.Learly_end:
b.d .Lend
sub_s.ne r1,r1,r1
ENDFUNC (strlen)
#endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_NORM__
&& __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 4,537
|
plugin-libc/newlib/libc/machine/arc/strchr-bs-norm.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strchr.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
/* ARC700 has a relatively long pipeline and branch prediction, so we want
to avoid branches that are hard to predict. On the other hand, the
presence of the norm instruction makes it easier to operate on whole
words branch-free. */
#include "asm.h"
#if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__)
ENTRY (strchr)
extb_s r1,r1
asl r5,r1,8
bmsk r2,r0,1
or r5,r5,r1
mov_s r3,0x01010101
breq.d r2,r0,.Laligned
asl r4,r5,16
sub_s r0,r0,r2
asl r7,r2,3
ld_s r2,[r0]
#ifdef __LITTLE_ENDIAN__
asl r7,r3,r7
#else
lsr r7,r3,r7
#endif
or r5,r5,r4
ror r4,r3
sub r12,r2,r7
bic_s r12,r12,r2
and r12,r12,r4
brne.d r12,0,.Lfound0_ua
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
#ifdef __LITTLE_ENDIAN__
and r7,r12,r4
breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
b_l .Lfound_char ; Likewise this one.
#else
and r12,r12,r4
breq_l r12,0,.Loop ; For speed, we want this branch to be unaligned.
lsr_s r12,r12,7
bic r2,r7,r6
b.d .Lfound_char_b
and_s r2,r2,r12
#endif
; /* We require this code address to be unaligned for speed... */
.Laligned:
ld_s r2,[r0]
or r5,r5,r4
ror r4,r3
; /* ... so that this code address is aligned, for itself and ... */
.Loop:
sub r12,r2,r3
bic_s r12,r12,r2
and r12,r12,r4
brne.d r12,0,.Lfound0
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r3
bic r12,r12,r6
and r7,r12,r4
breq r7,0,.Loop /* ... so that this branch is unaligned. */
; Found searched-for character. r0 has already advanced to next word.
#ifdef __LITTLE_ENDIAN__
/* We only need the information about the first matching byte
(i.e. the least significant matching byte) to be exact,
hence there is no problem with carry effects. */
.Lfound_char:
sub r3,r7,1
bic r3,r3,r7
norm r2,r3
sub_s r0,r0,1
asr_s r2,r2,3
j_l.d [blink]
sub_s r0,r0,r2
.balign 4
.Lfound0_ua:
mov_l r3,r7
.Lfound0:
sub r3,r6,r3
bic r3,r3,r6
and r2,r3,r4
or_s r12,r12,r2
sub_s r3,r12,1
bic_s r3,r3,r12
norm r3,r3
add_s r0,r0,3
asr_s r12,r3,3
asl.f 0,r2,r3
sub_s r0,r0,r12
j_s.d [blink]
mov.pl r0,0
#else /* BIG ENDIAN */
.Lfound_char:
lsr r7,r7,7
bic r2,r7,r6
.Lfound_char_b:
norm r2,r2
sub_s r0,r0,4
asr_s r2,r2,3
j_l.d [blink]
add_s r0,r0,r2
.Lfound0_ua:
mov_s r3,r7
.Lfound0:
asl_s r2,r2,7
or r7,r6,r4
bic_s r12,r12,r2
sub r2,r7,r3
or r2,r2,r6
bic r12,r2,r12
bic.f r3,r4,r12
norm r3,r3
add.pl r3,r3,1
asr_s r12,r3,3
asl.f 0,r2,r3
add_s r0,r0,r12
j_s.d [blink]
mov.mi r0,0
#endif /* ENDIAN */
ENDFUNC (strchr)
#endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_NORM__
&& __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 4,560
|
plugin-libc/newlib/libc/machine/arc/memset-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memset.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* ARC HS has it's own implementation of memset, yet we want this function
still to be compiled under "__dummy_memset" disguise, because strncpy
function uses __strncpy_bzero as a second entry point into memset. Would be
better to add __strncpy_bzero label to memset for ARC HS though, and even
better would be to avoid a second entry point into function. ARC HS always
has barrel-shifter, so this implementation will be always used for this
purpose. */
#if !defined (__ARC601__) && defined (__ARC_BARREL_SHIFTER__)
/* To deal with alignment/loop issues, SMALL must be at least 2. */
#define SMALL 7
.global __strncpy_bzero
.hidden __strncpy_bzero
/* __strncpy_bzero provides the following interface to strncpy:
r0: return value
r2: zeroing length
r3: zeroing start address
No attempt is made here for __strncpy_memset to speed up aligned
cases, because the copying of a string presumably leaves start address
and length alignment for the zeroing randomly distributed. */
#ifdef __ARCHS__
ENTRY (__dummy_memset)
#else
ENTRY (memset)
#endif
#if !defined (__ARC700__) && !defined (__ARCEM__)
#undef SMALL
#define SMALL 8 /* Even faster if aligned. */
brls.d r2,SMALL,.Ltiny
#endif
mov_s r3,r0
or r12,r0,r2
bmsk.f r12,r12,1
extb_s r1,r1
asl r12,r1,8
beq.d .Laligned
or_s r1,r1,r12
#if defined (__ARC700__) || defined (__ARCEM__)
brls r2,SMALL,.Ltiny
#endif
.Lnot_tiny:
add_s r12,r2,r0
stb r1,[r12,-1]
bclr_l r12,r12,0
stw r1,[r12,-2]
bmsk.f r12,r3,1
add_s r2,r2,r12
sub.ne r2,r2,4
stb.ab r1,[r3,1]
bclr_s r3,r3,0
stw.ab r1,[r3,2]
bclr_s r3,r3,1
.Laligned: ; This code address should be aligned for speed.
#if defined (__ARC700__) || defined (__ARCEM__)
asl r12,r1,16
lsr.f lp_count,r2,2
or_s r1,r1,r12
lpne .Loop_end
st.ab r1,[r3,4]
.Loop_end:
j_s [blink]
#else /* !__ARC700 */
lsr.f lp_count,r2,3
asl r12,r1,16
or_s r1,r1,r12
lpne .Loop_end
st.ab r1,[r3,4]
st.ab r1,[r3,4]
.Loop_end:
jcc [blink]
j_s.d [blink]
st_s r1,[r3]
#endif /* !__ARC700 */
#if defined (__ARC700__) || defined (__ARCEM__)
.balign 4
__strncpy_bzero:
brhi.d r2,17,.Lnot_tiny
mov_l r1,0
.Ltiny:
mov.f lp_count,r2
lpne .Ltiny_end
stb.ab r1,[r3,1]
.Ltiny_end:
j_s [blink]
#else /* !__ARC700__ */
#if SMALL > 8
FIXME
#endif
.balign 4
__strncpy_bzero:
brhi.d r2,8,.Lnot_tiny
mov_s r1,0
.Ltiny:
sub_s r2,r2,11
sub1 r12,pcl,r2
j_s [r12]
stb_s r1,[r3,7]
stb_s r1,[r3,6]
stb_s r1,[r3,5]
stb_s r1,[r3,4]
stb_s r1,[r3,3]
stb_s r1,[r3,2]
stb_s r1,[r3,1]
stb_s r1,[r3]
j_s [blink]
#endif /* !__ARC700 */
#ifdef __ARCHS__
ENDFUNC (__dummy_memset)
#else
ENDFUNC (memset)
#endif
#endif /* !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 7,794
|
plugin-libc/newlib/libc/machine/arc/memcpy-archs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARCHS__)
#ifdef __LITTLE_ENDIAN__
# define SHIFT_1(RX,RY,IMM) asl RX, RY, IMM ; <<
# define SHIFT_2(RX,RY,IMM) lsr RX, RY, IMM ; >>
# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM
# define MERGE_2(RX,RY,IMM)
# define EXTRACT_1(RX,RY,IMM) and RX, RY, 0xFFFF
# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, IMM
#else
# define SHIFT_1(RX,RY,IMM) lsr RX, RY, IMM ; >>
# define SHIFT_2(RX,RY,IMM) asl RX, RY, IMM ; <<
# define MERGE_1(RX,RY,IMM) asl RX, RY, IMM ; <<
# define MERGE_2(RX,RY,IMM) asl RX, RY, IMM ; <<
# define EXTRACT_1(RX,RY,IMM) lsr RX, RY, IMM
# define EXTRACT_2(RX,RY,IMM) lsr RX, RY, 0x08
#endif
#ifdef __ARC_LL64__
# define PREFETCH_READ(RX) prefetch [RX, 56]
# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
# define ZOLSHFT 5
# define ZOLAND 0x1F
#else
# define PREFETCH_READ(RX) prefetch [RX, 28]
# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
# define LOADX(DST,RX) ld.ab DST, [RX, 4]
# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
# define ZOLSHFT 4
# define ZOLAND 0xF
#endif
#ifdef __ARC_ALIGNED_ACCESS__
ENTRY (memcpy)
prefetch [r1] ; Prefetch the read location
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
; if size is zero
jz.d [blink]
mov r3, r0 ; don't clobber ret val
; if size <= 8
cmp r2, 8
bls.d @.Lsmallchunk
mov.f lp_count, r2
and.f r4, r0, 0x03
rsub lp_count, r4, 4
lpnz @.Laligndestination
; LOOP BEGIN
ldb.ab r5, [r1,1]
sub r2, r2, 1
stb.ab r5, [r3,1]
.Laligndestination:
; Check the alignment of the source
and.f r4, r1, 0x03
bnz.d @.Lsourceunaligned
; CASE 0: Both source and destination are 32bit aligned
; Convert len to Dwords, unfold x4
lsr.f lp_count, r2, ZOLSHFT
lpnz @.Lcopy32_64bytes
; LOOP START
LOADX (r6, r1)
PREFETCH_READ (r1)
PREFETCH_WRITE (r3)
LOADX (r8, r1)
LOADX (r10, r1)
LOADX (r4, r1)
STOREX (r6, r3)
STOREX (r8, r3)
STOREX (r10, r3)
STOREX (r4, r3)
.Lcopy32_64bytes:
and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes
.Lsmallchunk:
lpnz @.Lcopyremainingbytes
; LOOP START
ldb.ab r5, [r1,1]
stb.ab r5, [r3,1]
.Lcopyremainingbytes:
j [blink]
; END CASE 0
.Lsourceunaligned:
cmp r4, 2
beq.d @.LunalignedOffby2
sub r2, r2, 1
bhi.d @.LunalignedOffby3
ldb.ab r5, [r1, 1]
; CASE 1: The source is unaligned, off by 1
; Hence I need to read 1 byte for a 16bit alignment
; and 2bytes to reach 32bit alignment
ldh.ab r6, [r1, 2]
sub r2, r2, 2
; Convert to words, unfold x2
lsr.f lp_count, r2, 3
MERGE_1 (r6, r6, 8)
MERGE_2 (r5, r5, 24)
or r5, r5, r6
; Both src and dst are aligned
lpnz @.Lcopy8bytes_1
; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 24)
or r7, r7, r5
SHIFT_2 (r5, r6, 8)
SHIFT_1 (r9, r8, 24)
or r9, r9, r5
SHIFT_2 (r5, r8, 8)
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
.Lcopy8bytes_1:
; Write back the remaining 16bits
EXTRACT_1 (r6, r5, 16)
sth.ab r6, [r3, 2]
; Write back the remaining 8bits
EXTRACT_2 (r5, r5, 16)
stb.ab r5, [r3, 1]
and.f lp_count, r2, 0x07 ;Last 8bytes
lpnz @.Lcopybytewise_1
; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
.Lcopybytewise_1:
j [blink]
.LunalignedOffby2:
; CASE 2: The source is unaligned, off by 2
ldh.ab r5, [r1, 2]
sub r2, r2, 1
; Both src and dst are aligned
; Convert to words, unfold x2
lsr.f lp_count, r2, 3
#ifdef __BIG_ENDIAN__
asl.nz r5, r5, 16
#endif
lpnz @.Lcopy8bytes_2
; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 16)
or r7, r7, r5
SHIFT_2 (r5, r6, 16)
SHIFT_1 (r9, r8, 16)
or r9, r9, r5
SHIFT_2 (r5, r8, 16)
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
.Lcopy8bytes_2:
#ifdef __BIG_ENDIAN__
lsr.nz r5, r5, 16
#endif
sth.ab r5, [r3, 2]
and.f lp_count, r2, 0x07 ;Last 8bytes
lpnz @.Lcopybytewise_2
; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
.Lcopybytewise_2:
j [blink]
.LunalignedOffby3:
; CASE 3: The source is unaligned, off by 3
; Hence, I need to read 1byte for achieve the 32bit alignment
; Both src and dst are aligned
; Convert to words, unfold x2
lsr.f lp_count, r2, 3
#ifdef __BIG_ENDIAN__
asl.ne r5, r5, 24
#endif
lpnz @.Lcopy8bytes_3
; LOOP START
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 8)
or r7, r7, r5
SHIFT_2 (r5, r6, 24)
SHIFT_1 (r9, r8, 8)
or r9, r9, r5
SHIFT_2 (r5, r8, 24)
st.ab r7, [r3, 4]
st.ab r9, [r3, 4]
.Lcopy8bytes_3:
#ifdef __BIG_ENDIAN__
lsr.nz r5, r5, 24
#endif
stb.ab r5, [r3, 1]
and.f lp_count, r2, 0x07 ;Last 8bytes
lpnz @.Lcopybytewise_3
; LOOP START
ldb.ab r6, [r1,1]
stb.ab r6, [r3,1]
.Lcopybytewise_3:
j [blink]
ENDFUNC (memcpy)
#else
ENTRY(memcpy)
prefetch [r1] ; Prefetch the read location
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
mov r3, r0 ; don't clobber ret val
;;; if size <= 8
cmp r2, 8
bls.d @.Lsmallchunk
mov.f lp_count, r2
;;; Convert len to Dwords, unfold x4
lsr.f lp_count, r2, ZOLSHFT
lpnz @.Lcopyfast
;; LOOP START
LOADX (r6, r1)
PREFETCH_READ (r1)
PREFETCH_WRITE (r3)
LOADX (r8, r1)
LOADX (r10, r1)
LOADX (r4, r1)
STOREX (r6, r3)
STOREX (r8, r3)
STOREX (r10, r3)
STOREX (r4, r3)
.Lcopyfast:
#ifdef __ARC_LL64__
and r2, r2, ZOLAND ;Remaining 31 bytes
lsr.f lp_count, r2, 3 ;Convert to 64-bit words.
lpnz @.Lcopy64b
;; LOOP START
ldd.ab r6,[r1,8]
std.ab r6,[r3,8]
.Lcopy64b:
and.f lp_count, r2, 0x07 ; Last 7 bytes
#else
and.f lp_count, r2, ZOLAND
#endif
.Lsmallchunk:
lpnz @.Lcopyremainingbytes
;; LOOP START
ldb.ab r5, [r1,1]
stb.ab r5, [r3,1]
.Lcopyremainingbytes:
j [blink]
ENDFUNC(memcpy)
#endif
#endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,862
|
plugin-libc/newlib/libc/machine/arc/memset-archs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memset.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#ifdef __ARCHS__
#define USE_PREFETCH
#ifdef USE_PREFETCH
#define PREWRITE(A,B) prefetchw [(A),(B)]
#else
#define PREWRITE(A,B) prealloc [(A),(B)]
#endif
ENTRY (memset)
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
; if size is zero
jz.d [blink]
mov r3, r0 ; don't clobber ret val
; if length < 8
brls.d.nt r2, 8, .Lsmallchunk
mov.f lp_count,r2
and.f r4, r0, 0x03
rsub lp_count, r4, 4
lpnz @.Laligndestination
; LOOP BEGIN
stb.ab r1, [r3,1]
sub r2, r2, 1
.Laligndestination:
; Destination is aligned
and r1, r1, 0xFF
asl r4, r1, 8
or r4, r4, r1
asl r5, r4, 16
or r5, r5, r4
mov r4, r5
sub3 lp_count, r2, 8
cmp r2, 64
bmsk.hi r2, r2, 5
mov.ls lp_count, 0
add3.hi r2, r2, 8
; Convert len to Dwords, unfold x8
lsr.f lp_count, lp_count, 6
lpnz @.Lset64bytes
; LOOP START
PREWRITE (r3, 64) ;Prefetch the next write location
#ifdef __ARC_LL64__
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
#else
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
#endif
.Lset64bytes:
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes
; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
#ifdef __ARC_LL64__
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
#else
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
#endif
.Lset32bytes:
and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
.Lsmallchunk:
lpnz .Lcopy3bytes
; LOOP START
stb.ab r1, [r3, 1]
.Lcopy3bytes:
j [blink]
ENDFUNC (memset)
#endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,419
|
plugin-libc/newlib/libc/machine/arc/memset.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memset.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC601__) \
|| (!defined (__ARC_BARREL_SHIFTER__) && !defined (__ARCHS__))
/* To deal with alignment/loop issues, SMALL must be at least 2. */
#define SMALL 8 /* Even faster if aligned. */
.global __strncpy_bzero
.hidden __strncpy_bzero
/* __strncpy_bzero provides the following interface to strncpy:
r0: return value
r2: zeroing length
r3: zeroing start address
No attempt is made here for __strncpy_memset to speed up aligned
cases, because the copying of a string presumably leaves start address
and length alignment for the zeroing randomly distributed. */
ENTRY (memset)
brls.d r2,SMALL,.Ltiny
mov_s r3,r0
or r12,r0,r2
bmsk.f r12,r12,1
breq_s r1,0,.Lbzero
mov r4,0
stb.a r1,[sp,-4]
stb r1,[sp,1]
stb r1,[sp,2]
stb r1,[sp,3]
ld.ab r1,[sp,4]
.Lbzero:
beq.d .Laligned
.Lbzero2:
add r6,r2,r3
.Lnot_tiny:
stb r1,[r6,-1]
bclr r12,r6,0
stw r1,[r12,-2]
stb.ab r1,[r3,1]
bclr_s r3,r3,0
stw.ab r1,[r3,2]
bclr_s r3,r3,1
.Laligned: ; This code address should be aligned for speed.
sub r6,r6,8
brlo.d r6,r3,.Loop_end
sub r6,r6,8
3:
st_l r1,[r3,4]
brhs.d r6,r3,3b
st.ab r1,[r3,8]
.Loop_end:
bic r12,r6,3
j_s.d [blink]
st_s r1,[r12,12]
.balign 4
__strncpy_bzero:
brhi.d r2,8,.Lbzero2
mov_s r1,0
.Ltiny:
sub_s r2,r2,11
sub1 r12,pcl,r2
j_s [r12]
stb_s r1,[r3,7]
stb_s r1,[r3,6]
stb_s r1,[r3,5]
stb_s r1,[r3,4]
stb_s r1,[r3,3]
stb_s r1,[r3,2]
stb_s r1,[r3,1]
stb_s r1,[r3]
j_s [blink]
ENDFUNC (memset)
#endif /* __ARC601__ || (!__ARC_BARREL_SHIFTER__ && !__ARCHS__) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 2,969
|
plugin-libc/newlib/libc/machine/arc/strcmp-archs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcmp.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#ifdef __ARCHS__
ENTRY (strcmp)
or r2, r0, r1
bmsk_s r2, r2, 1
brne r2, 0, @.Lcharloop
; s1 and s2 are word aligned
ld.ab r2, [r0, 4]
mov_s r12, 0x01010101
ror r11, r12
.align 4
.LwordLoop:
ld.ab r3, [r1, 4]
; Detect NULL char in str1
sub r4, r2, r12
ld.ab r5, [r0, 4]
bic r4, r4, r2
and r4, r4, r11
brne.d.nt r4, 0, .LfoundNULL
; Check if the read locations are the same
cmp r2, r3
beq.d .LwordLoop
mov.eq r2, r5
; A match is found, spot it out
#ifdef __LITTLE_ENDIAN__
swape r3, r3
mov_s r0, 1
swape r2, r2
#else
mov_s r0, 1
#endif
cmp_s r2, r3
j_s.d [blink]
bset.lo r0, r0, 31
.align 4
.LfoundNULL:
#ifdef __BIG_ENDIAN__
swape r4, r4
swape r2, r2
swape r3, r3
#endif
; Find null byte
ffs r0, r4
bmsk r2, r2, r0
bmsk r3, r3, r0
swape r2, r2
swape r3, r3
; make the return value
sub.f r0, r2, r3
mov.hi r0, 1
j_s.d [blink]
bset.lo r0, r0, 31
.align 4
.Lcharloop:
ldb.ab r2, [r0, 1]
ldb.ab r3, [r1, 1]
nop
breq r2, 0, .Lcmpend
breq r2, r3, .Lcharloop
.align 4
.Lcmpend:
j_s.d [blink]
sub r0, r2, r3
ENDFUNC (strcmp)
#endif /* __ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 5,138
|
plugin-libc/newlib/libc/machine/arc/memcmp-bs-norm.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcmp.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if !defined (__ARC601__) && defined (__ARC_NORM__) \
&& defined (__ARC_BARREL_SHIFTER__)
#ifdef __LITTLE_ENDIAN__
#define WORD2 r2
#define SHIFT r3
#else /* BIG ENDIAN */
#define WORD2 r3
#define SHIFT r2
#endif
ENTRY (memcmp)
or r12,r0,r1
asl_s r12,r12,30
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
sub_l r3,r2,1
brls r2,r12,.Lbytewise
#else
brls.d r2,r12,.Lbytewise
sub_s r3,r2,1
#endif
ld r4,[r0,0]
ld r5,[r1,0]
lsr.f lp_count,r3,3
#ifdef __ARCEM__
/* A branch can't be the last instruction in a zero overhead loop.
So we move the branch to the start of the loop, duplicate it
after the end, and set up r12 so that the branch isn't taken
initially. */
mov_s r12,WORD2
lpne .Loop_end
brne WORD2,r12,.Lodd
ld WORD2,[r0,4]
#else
lpne .Loop_end
ld_s WORD2,[r0,4]
#endif
ld_s r12,[r1,4]
brne r4,r5,.Leven
ld.a r4,[r0,8]
ld.a r5,[r1,8]
#ifdef __ARCEM__
.Loop_end:
brne WORD2,r12,.Lodd
#else
brne WORD2,r12,.Lodd
#ifdef __ARCHS__
nop
#endif
.Loop_end:
#endif
asl_s SHIFT,SHIFT,3
bcc_s .Last_cmp
brne r4,r5,.Leven
ld r4,[r0,4]
ld r5,[r1,4]
#ifdef __LITTLE_ENDIAN__
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
nop_s
; one more load latency cycle
.Last_cmp:
xor r0,r4,r5
bset r0,r0,SHIFT
sub_s r1,r0,1
bic_s r1,r1,r0
norm r1,r1
b.d .Leven_cmp
and r1,r1,24
.Leven:
xor r0,r4,r5
sub_s r1,r0,1
bic_s r1,r1,r0
norm r1,r1
; slow track insn
and r1,r1,24
.Leven_cmp:
asl r2,r4,r1
asl r12,r5,r1
lsr_s r2,r2,1
lsr_s r12,r12,1
j_s.d [blink]
sub r0,r2,r12
.balign 4
.Lodd:
xor r0,WORD2,r12
sub_s r1,r0,1
bic_s r1,r1,r0
norm r1,r1
; slow track insn
and r1,r1,24
asl_s r2,r2,r1
asl_s r12,r12,r1
lsr_s r2,r2,1
lsr_s r12,r12,1
j_s.d [blink]
sub r0,r2,r12
#else /* !__ARC700__ */
.balign 4
.Last_cmp:
xor r0,r4,r5
b.d .Leven_cmp
bset r0,r0,SHIFT
.Lodd:
mov_s r4,WORD2
mov_s r5,r12
.Leven:
xor r0,r4,r5
.Leven_cmp:
mov_s r1,0x80808080
; uses long immediate
sub_s r12,r0,1
bic_s r0,r0,r12
sub r0,r1,r0
xor_s r0,r0,r1
and r1,r5,r0
and r0,r4,r0
xor.f 0,r0,r1
sub_s r0,r0,r1
j_s.d [blink]
mov.mi r0,r1
#endif /* !__ARC700__ */
#else /* BIG ENDIAN */
.Last_cmp:
neg_s SHIFT,SHIFT
lsr r4,r4,SHIFT
lsr r5,r5,SHIFT
; slow track insn
.Leven:
sub.f r0,r4,r5
mov.ne r0,1
j_s.d [blink]
bset.cs r0,r0,31
.Lodd:
cmp_s WORD2,r12
#if defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)
mov_s r0,1
j_s.d [blink]
bset.cs r0,r0,31
#else
j_s.d [blink]
rrc r0,2
#endif /* __ARC700__ || __ARCEM__ || __ARCHS__ */
#endif /* ENDIAN */
.balign 4
.Lbytewise:
breq r2,0,.Lnil
ldb r4,[r0,0]
ldb r5,[r1,0]
lsr.f lp_count,r3
#ifdef __ARCEM__
mov r12,r3
lpne .Lbyte_end
brne r3,r12,.Lbyte_odd
#else
lpne .Lbyte_end
#endif
ldb_s r3,[r0,1]
ldb_l r12,[r1,1]
brne r4,r5,.Lbyte_even
ldb.a r4,[r0,2]
ldb.a r5,[r1,2]
#ifdef __ARCEM__
.Lbyte_end:
brne r3,r12,.Lbyte_odd
#else
brne r3,r12,.Lbyte_odd
#ifdef __ARCHS__
nop
#endif
.Lbyte_end:
#endif
bcc_l .Lbyte_even
brne r4,r5,.Lbyte_even
ldb_s r3,[r0,1]
ldb_s r12,[r1,1]
.Lbyte_odd:
j_s.d [blink]
sub r0,r3,r12
.Lbyte_even:
j_s.d [blink]
sub r0,r4,r5
.Lnil:
j_s.d [blink]
mov_l r0,0
ENDFUNC (memcmp)
#endif /* !__ARC601__ && __ARC_NORM__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,322
|
plugin-libc/newlib/libc/machine/arc/strcpy-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if (defined (__ARC700__) || defined (__ARCEM__) || defined (__ARCHS__)) \
&& defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch.
Note that short and long instructions have been scheduled to avoid
branch stalls.
The beq_s to r3z could be made unaligned & long to avoid a stall
there, but the it is not likely to be taken often, and it
would also be likey to cost an unaligned mispredict at the next call. */
ENTRY (strcpy)
or r2,r0,r1
bmsk_s r2,r2,1
brne.d r2,0,charloop
mov_s r10,r0
ld_s r3,[r1,0]
mov r8,0x01010101
bbit0.d r1,2,loop_start
ror r12,r8
sub r2,r3,r8
bic_s r2,r2,r3
tst_s r2,r12
bne_l r3z
mov_s r4,r3
.balign 4
loop:
ld.a r3,[r1,4]
st.ab r4,[r10,4]
loop_start:
ld.a r4,[r1,4]
sub r2,r3,r8
bic_s r2,r2,r3
tst_l r2,r12
bne_l r3z
st.ab r3,[r10,4]
sub r2,r4,r8
bic r2,r2,r4
tst_l r2,r12
beq_l loop
mov_s r3,r4
#ifdef __LITTLE_ENDIAN__
r3z: bmsk.f r1,r3,7
lsr_s r3,r3,8
#else
r3z: lsr.f r1,r3,24
asl_s r3,r3,8
#endif
bne.d r3z
stb.ab r1,[r10,1]
j_s [blink]
.balign 4
charloop:
ldb.ab r3,[r1,1]
brne.d r3,0,charloop
stb.ab r3,[r10,1]
j [blink]
ENDFUNC (strcpy)
#endif /* (__ARC700__ || __ARCEM__ || __ARCHS__) && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,969
|
plugin-libc/newlib/libc/machine/arc/memcmp.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/memcmp.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC601__) || !defined (__ARC_NORM__) \
|| !defined (__ARC_BARREL_SHIFTER__)
/* Addresses are unsigned, and at 0 is the vector table, so it's OK to assume
that we can subtract 8 from a source end address without underflow. */
ENTRY (memcmp)
or r12,r0,r1
tst r12,3
breq r2,0,.Lnil
add_s r3,r0,r2
/* This algorithm for big endian targets sometimes works incorrectly
when sources are aligned. To be precise the last step is omitted.
Just use a simple bytewise variant until the algorithm is reviewed
and fixed. */
#ifdef __LITTLE_ENDIAN__
bne_s .Lbytewise
#else /* BIG ENDIAN */
b_s .Lbytewise
#endif /* ENDIAN */
sub r6,r3,8
ld r4,[r0,0]
ld r5,[r1,0]
2:
brhs r0,r6,.Loop_end
ld_s r3,[r0,4]
ld_s r12,[r1,4]
brne r4,r5,.Leven
ld.a r4,[r0,8]
breq.d r3,r12,2b
ld.a r5,[r1,8]
#ifdef __LITTLE_ENDIAN__
mov_s r4,r3
b.d .Lodd
mov_s r5,r12
#else /* BIG ENDIAN */
cmp_s r3,r12
j_s.d [blink]
rrc r0,2
#endif /* ENDIAN */
.balign 4
.Loop_end:
sub r3,r0,r6
brhs r3,4,.Last_cmp
brne r4,r5,.Leven
ld r4,[r0,4]
ld r5,[r1,4]
#ifdef __LITTLE_ENDIAN__
.balign 4
.Last_cmp:
mov_l r0,24
add3 r2,r0,r2
xor r0,r4,r5
b.d .Leven_cmp
bset r0,r0,r2
.Lodd:
.Leven:
xor r0,r4,r5
.Leven_cmp:
mov_s r1,0x80808080
; uses long immediate
sub_s r12,r0,1
bic_s r0,r0,r12
sub r0,r1,r0
xor_s r0,r0,r1
and r1,r5,r0
and r0,r4,r0
#else /* BIG ENDIAN */
.Last_cmp:
mov_s r3,0
sub3 r2,r3,r2
sub_s r3,r3,1
bclr r3,r3,r2
add_l r3,r3,1
and r0,r4,r3
and r1,r5,r3
.Leven:
#endif /* ENDIAN */
xor.f 0,r0,r1
sub_s r0,r0,r1
j_s.d [blink]
mov.mi r0,r1
.balign 4
.Lbytewise:
ldb r4,[r0,0]
ldb r5,[r1,0]
sub r6,r3,2
3:
brhs r0,r6,.Lbyte_end
ldb_s r3,[r0,1]
ldb_s r12,[r1,1]
brne r4,r5,.Lbyte_even
ldb.a r4,[r0,2]
breq.d r3,r12,3b
ldb.a r5,[r1,2]
.Lbyte_odd:
j_s.d [blink]
sub r0,r3,r12
.balign 4
.Lbyte_end:
bbit1 r2,0,.Lbyte_even
brne r4,r5,.Lbyte_even
ldb r4,[r0,1]
ldb r5,[r1,1]
.Lbyte_even:
j_s.d [blink]
sub r0,r4,r5
.Lnil:
j_s.d [blink]
mov_s r0,0
ENDFUNC (memcmp)
#endif /* __ARC601__ || !__ARC_NORM__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 4,143
|
plugin-libc/newlib/libc/machine/arc/strlen.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strlen.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined(__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
/* This code is optimized for the ARC601 pipeline without barrel shifter. */
ENTRY (strlen)
or r3,r0,7
ld r2,[r3,-7]
ld.a r6,[r3,-3]
mov r4,0x01010101
; uses long immediate
#ifdef __LITTLE_ENDIAN__
bmsk.f 0,r0,1
mov_s r1,31
add3_s r1,r1,r0
bmsk r7,r4,r1
xor.ne r7,r7,r4
btst_s r0,2
ror r5,r4
sub r1,r2,r7
bic_s r1,r1,r2
mov.eq r7,r4
sub r12,r6,r7
bic r12,r12,r6
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#else /* BIG ENDIAN */
add.f r1,r4,30 ; r1 mod 31 := -1; clear carry
ror r5,r4
sub3 r7,r1,r0
btst_s r0,2
sub r1,r2,r4
bic_s r1,r1,r2
bmsk r1,r1,r7
sub r12,r6,r4
bic r12,r12,r6
bmsk.ne r12,r12,r7
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#endif /* ENDIAN */
.Loop:
ld_s r2,[r3,4]
ld.a r6,[r3,8]
; stall for load result
sub r1,r2,r4
bic_s r1,r1,r2
sub r12,r6,r4
bic r12,r12,r6
or_s r12,r12,r1
and r12,r12,r5
breq_s r12,0,.Loop
.Lend:
and.f r1,r1,r5
sub.ne r3,r3,4
#ifdef __LITTLE_ENDIAN__
mov.eq r1,r12
btst_s r1,7
sub r0,r3,r0
add.eq r0,r0,1
bmsk.f 0,r1,15
add.eq r0,r0,1
bmsk.f 0,r1,23
j_s.d [blink]
add.eq r0,r0,1
#else /* BIG ENDIAN */
#ifdef __OPTIMIZE_SIZE__
1: ldb_s r1,[r3]
breq_s r1,0,0f
ldb.a r1,[r3,1]
breq_s r1,0,0f
ldb.a r1,[r3,1]
breq_s r1,0,0f
add_s r3,r3,1
0: j_s.d [blink]
sub r0,r3,r0
#define SPECIAL_EARLY_END
.Learly_end:
mov_s r3,r0
b_s 1b
#elif 0 /* Need more information about pipeline to assess if this is faster. */
mov.eq r2,r6
and r2,r2,r5
sub1 r2,r4,r2
mov.eq r1,r12
bic.f r1,r1,r2
sub r0,r3,r0
add.pl r0,r0,1
btst.pl r1,23
add.eq r0,r0,1
btst.eq r1,15
j_s.d [blink]
add.eq r0,r0,1
#else /* !__OPTIMIZE_SIZE__ */
/* Need carry clear here. */
mov.eq r2,r6
1: bmsk r1,r2,23
breq r1,r2,0f
bmsk r2,r1,15
breq.d r1,r2,0f
add_s r3,r3,1
cmp r2,0x100
add_s r3,r3,2
0: j_s.d [blink]
sbc r0,r3,r0
#define SPECIAL_EARLY_END
.Learly_end:
sub_s.ne r1,r1,r1
mov_s r12,0
bset r12,r12,r7
sub1 r2,r2,r12
b.d .Lend
sub1.ne r6,r6,r12
#endif /* !__OPTIMIZE_SIZE__ */
#endif /* ENDIAN */
#ifndef SPECIAL_EARLY_END
.balign 4
.Learly_end:
b.d .Lend
sub_s.ne r1,r1,r1
#endif /* !SPECIAL_EARLY_END */
ENDFUNC (strlen)
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__*/
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 5,286
|
plugin-libc/newlib/libc/machine/arc/strchr.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strchr.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* In order to search for a zero in a W, we calculate
X := (W - 0x01010101) & ~W & 0x80808080;
In the little endian case:
If no byte in W is zero, X will be zero; otherwise, the least significant
byte of X which is nonzero indicates the least significant byte of W that
is zero.
In the big endian case:
X will be zero iff no byte in W is zero.
If X is nonzero, to find out which is the most significant zero byte
in W, we calculate:
Y := ~(((W | 0x80808080) - 0x01010101) | W) & 0x80808080;
Each byte in Y is 0x80 if the the corresponding byte in
W is zero, otherwise that byte of Y is 0. */
#if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
ENTRY (strchr)
bmsk.f r2,r0,1
mov_s r3,0x01010101
extb_s r1,r1
mov r8,0
add3 r5,r8,r1
add3 r5,r8,r5
add2 r5,r1,r5
add3 r4,r8,r5
add3 r4,r8,r4
add3 r4,r8,r4
add3 r4,r8,r4
beq.d .Laligned
add3 r4,r8,r4
sub_s r0,r0,r2
#ifdef __LITTLE_ENDIAN__
add3.f r2,-1,r2
bmsk r7,r3,r2
rsub.pl r7,r7,r3
#else
mov_s r12,31
sub3 r2,r12,r2
bmsk r7,r3,r2
#endif
ld_s r2,[r0]
add1 r5,r5,r4
ror r4,r3
sub r12,r2,r7
bic_s r12,r12,r2
and r12,r12,r4
brne.d r12,0,.Lfound0_ua
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
#ifdef __LITTLE_ENDIAN__
and.f r7,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
b.d .Lfound_char_ua
btst r7,7
#else
and.f r8,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
bic r12,r7,r6
bic r2,r3,r12
sub1 r2,r3,r2
sub_s r0,r0,4
b.d .Lfound_char_ua
bic.f r2,r8,r2
#endif
.balign 4
.Laligned:
ld_s r2,[r0]
add1 r5,r5,r4
ror r4,r3
sub r12,r2,r3
bic_s r12,r12,r2
and r12,r12,r4
.Loop:
brne.d r12,0,.Lfound0
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r3
bic r12,r12,r6
and.f r7,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
; Found searched-for character. r0 has already advanced to next word.
#ifdef __LITTLE_ENDIAN__
/* We only need the information about the first matching byte
(i.e. the least significant matching byte) to be exact,
hence there is no problem with carry effects. */
.Lfound_char:
btst r7,7
.Lfound_char_ua:
sub_s r0,r0,4
add.eq r0,r0,1
btst.eq r7,15
add.eq r0,r0,1
btst.eq r7,23
j_s.d [blink]
add.eq r0,r0,1
.balign 4
.Lfound0_ua:
mov_l r3,r7
.Lfound0:
sub r2,r6,r3
bic r2,r2,r6
and r2,r2,r4
or r3,r12,r2
sub_s r12,r3,1
xor_s r3,r3,r12
cmp 0xffff,r3
; cmp uses limm ; ARC600 would do: asl.f 0,r3,9
tst_s r2,r3
mov.eq r0,0
add.mi r0,r0,1
btst.ne r3,15
j_s.d [blink]
adc.ne r0,r0,1
#else /* BIG ENDIAN */
.Lfound_char:
and r2,r6,r3
sub1 r2,r3,r2
sub_s r0,r0,4
bic.f r2,r7,r2
.Lfound_char_ua:
add.pl r0,r0,1
jmi.d [blink]
btst_s r2,23
add.eq r0,r0,1
btst.eq r2,15
j_s.d [blink]
add.eq r0,r0,1
; N.B. if we searched for a char zero and found it in the MSB,
; and ignored matches are identical, we will take the early exit
; like for an ordinary found zero - except for the extra stalls at jhi -
; but still compute the right result.
.Lfound0_ua:
mov_s r3,r7
.Lfound0:
and_s r2,r2,r3
sub1 r2,r3,r2
or r7,r6,r4
bic_s r12,r12,r2
sub r2,r7,r3
or r2,r2,r6
bic r2,r4,r2
cmp_s r12,r2
mov.hi r0,0
btst.ls r2,31
jhi.d [blink]
add.eq r0,r0,1
btst.eq r2,23
add.eq r0,r0,1
btst.eq r2,15
j_s.d [blink]
add.eq r0,r0,1
#endif /* ENDIAN */
ENDFUNC (strchr)
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,353
|
plugin-libc/newlib/libc/machine/arc/strlen-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strlen.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if (defined (__ARC600__) || !defined (__ARC_NORM__)) && !defined (__ARC601__) \
&& defined (__ARC_BARREL_SHIFTER__)
/* This code is optimized for the ARC600 pipeline. */
ENTRY (strlen)
or r3,r0,7
ld r2,[r3,-7]
ld.a r6,[r3,-3]
mov r4,0x01010101
; uses long immediate
#ifdef __LITTLE_ENDIAN__
asl_s r1,r0,3
btst_s r0,2
asl r7,r4,r1
ror r5,r4
sub r1,r2,r7
bic_l r1,r1,r2
mov.eq r7,r4
sub r12,r6,r7
bic r12,r12,r6
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#else /* BIG ENDIAN */
ror r5,r4
btst_s r0,2
mov_s r1,31
sub3 r7,r1,r0
sub r1,r2,r4
bic_l r1,r1,r2
bmsk r1,r1,r7
sub r12,r6,r4
bic r12,r12,r6
bmsk.ne r12,r12,r7
or.eq r12,r12,r1
and r12,r12,r5
brne r12,0,.Learly_end
#endif /* ENDIAN */
.Loop:
ld_s r2,[r3,4]
ld.a r6,[r3,8]
; stall for load result
sub r1,r2,r4
bic_s r1,r1,r2
sub r12,r6,r4
bic r12,r12,r6
or_s r12,r12,r1
and r12,r12,r5
breq_s r12,0,.Loop
.Lend:
and.f r1,r1,r5
sub.ne r3,r3,4
#ifdef __LITTLE_ENDIAN__
mov.eq r1,r12
asr.f 0,r1,8
bmsk.f 0,r1,15
sub r0,r3,r0
add.cc r0,r0,1
jne.d [blink]
asl.f 0,r1,9
j_s.d [blink]
sbc r0,r0,-2
#else /* BIG ENDIAN */
mov.eq r2,r6
asl_s r2,r2,7
mov.eq r1,r12
bic_s r1,r1,r2
asr.f 0,r1,16
sub r0,r3,r0
add.pl r0,r0,1
jne.d [blink]
add.eq r0,r0,1
j_s.d [blink]
add.cc r0,r0,1
#endif /* ENDIAN */
.balign 4
.Learly_end:
b.d .Lend
sub_s.ne r1,r1,r1
ENDFUNC (strlen)
#endif /* (__ARC600__ || !__ARC_NORM__) && !__ARC601__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 4,439
|
plugin-libc/newlib/libc/machine/arc/strcmp.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcmp.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* This is optimized primarily for the ARC700.
It would be possible to speed up the loops by one cycle / word
respective one cycle / byte by forcing double source 1 alignment, unrolling
by a factor of two, and speculatively loading the second word / byte of
source 1; however, that would increase the overhead for loop setup / finish,
and strcmp might often terminate early. */
#ifndef __ARCHS__
ENTRY (strcmp)
or r2,r0,r1
bmsk_s r2,r2,1
brne_l r2,0,.Lcharloop
mov_s r12,0x01010101
ror r5,r12
.Lwordloop:
ld.ab r2,[r0,4]
ld.ab r3,[r1,4]
nop_s
sub r4,r2,r12
bic r4,r4,r2
and r4,r4,r5
brne_l r4,0,.Lfound0
breq r2,r3,.Lwordloop
#ifdef __LITTLE_ENDIAN__
xor r0,r2,r3 ; mask for difference
sub_s r1,r0,1
bic_s r0,r0,r1 ; mask for least significant difference bit
sub r1,r5,r0
xor r0,r5,r1 ; mask for least significant difference byte
and_s r2,r2,r0
and_s r3,r3,r0
#endif /* LITTLE ENDIAN */
cmp_s r2,r3
mov_s r0,1
j_s.d [blink]
bset.lo r0,r0,31
.balign 4
#ifdef __LITTLE_ENDIAN__
.Lfound0:
xor r0,r2,r3 ; mask for difference
or r0,r0,r4 ; or in zero indicator
sub_s r1,r0,1
bic_s r0,r0,r1 ; mask for least significant difference bit
sub r1,r5,r0
xor r0,r5,r1 ; mask for least significant difference byte
and_s r2,r2,r0
and_s r3,r3,r0
sub.f r0,r2,r3
mov.hi r0,1
j_s.d [blink]
bset.lo r0,r0,31
#else /* BIG ENDIAN */
/* The zero-detection above can mis-detect 0x01 bytes as zeroes
because of carry-propagateion from a lower significant zero byte.
We can compensate for this by checking that bit0 is zero.
This compensation is not necessary in the step where we
get a low estimate for r2, because in any affected bytes
we already have 0x00 or 0x01, which will remain unchanged
when bit 7 is cleared. */
.balign 4
.Lfound0:
#ifdef __ARC_BARREL_SHIFTER__
lsr r0,r4,8
lsr_s r1,r2
bic_s r2,r2,r0 ; get low estimate for r2 and get ...
bic_s r0,r0,r1 ; <this is the adjusted mask for zeros>
or_s r3,r3,r0 ; ... high estimate r3 so that r2 > r3 will ...
cmp_s r3,r2 ; ... be independent of trailing garbage
or_s r2,r2,r0 ; likewise for r3 > r2
bic_s r3,r3,r0
rlc r0,0 ; r0 := r2 > r3 ? 1 : 0
cmp_s r2,r3
j_s.d [blink]
bset.lo r0,r0,31
#else /* __ARC_BARREL_SHIFTER__ */
/* Fall through to .Lcharloop. */
sub_s r0,r0,4
sub_s r1,r1,4
#endif /* __ARC_BARREL_SHIFTER__ */
#endif /* ENDIAN */
.balign 4
.Lcharloop:
ldb.ab r2,[r0,1]
ldb.ab r3,[r1,1]
nop_s
breq_l r2,0,.Lcmpend
breq r2,r3,.Lcharloop
.Lcmpend:
j_s.d [blink]
sub r0,r2,r3
ENDFUNC (strcmp)
#endif /* !__ARCHS__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,445
|
plugin-libc/newlib/libc/machine/arc/strcpy-bs-arc600.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strcpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
#if defined (__ARC600__) && defined (__ARC_BARREL_SHIFTER__)
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch.
Note that short and long instructions have been scheduled to avoid
branch stalls.
This version is optimized for the ARC600 pipeline. */
ENTRY (strcpy)
or r2,r0,r1
bmsk.f 0,r2,1
mov r8,0x01010101
bne.d .Lcharloop
mov_s r10,r0
ld_l r3,[r1,0]
bbit0.d r1,2,.Loop_setup
ror r12,r8
sub r2,r3,r8
bic_s r2,r2,r3
and_s r2,r2,r12
brne_s r2,0,.Lr3z
st.ab r3,[r10,4]
ld.a r3,[r1,4]
.Loop_setup:
ld.a r4,[r1,4]
sub r2,r3,r8
and.f r2,r2,r12
sub r5,r4,r8
and.eq.f r5,r5,r12
b.d .Loop_start
mov_s r6,r3
.balign 4
.Loop:
ld.a r3,[r1,4]
st r4,[r10,4]
ld.a r4,[r1,4]
sub r2,r3,r8
and.f r2,r2,r12
sub r5,r4,r8
and.eq.f r5,r5,r12
st.ab r6,[r10,8]
mov r6,r3
.Loop_start:
beq.d .Loop
bic_s r2,r2,r3
brne.d r2,0,.Lr3z
and r5,r5,r12
bic r5,r5,r4
breq.d r5,0,.Loop
mov_s r3,r4
st.ab r6,[r10,4]
#ifdef __LITTLE_ENDIAN__
.Lr3z: bmsk.f r1,r3,7
.Lr3z_loop:
lsr_s r3,r3,8
stb.ab r1,[r10,1]
bne.d .Lr3z_loop
bmsk.f r1,r3,7
j_s [blink]
#else
.Lr3z: lsr.f r1,r3,24
.Lr3z_loop:
asl_s r3,r3,8
stb.ab r1,[r10,1]
bne.d .Lr3z_loop
lsr.f r1,r3,24
j_s [blink]
#endif
.balign 4
.Lcharloop:
ldb.ab r3,[r1,1]
brne.d r3,0,.Lcharloop
stb.ab r3,[r10,1]
j [blink]
ENDFUNC (strcpy)
#endif /* __ARC600__ && __ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 3,718
|
plugin-libc/newlib/libc/machine/arc/strncpy.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strncpy.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
it 8 byte aligned. Thus, we can do a little read-ahead, without
dereferencing a cache line that we should not touch. */
#if defined (__ARC601__) || !defined (__ARC_BARREL_SHIFTER__)
#define BRand(a,b,l) and a,a,b ` brne_s a,0,l
ENTRY (strncpy)
cmp_s r2,8
or r12,r0,r1
bmsk.cc.f r12,r12,1
brne.d r12,0,.Lbytewise
mov_s r10,r0
ld_s r3,[r1,0]
mov r8,0x01010101
add r6,r0,r2
sub r6,r6,8
bbit0.d r1,2,.Loop_start
ror r11,r8
sub r12,r3,r8
bic_l r12,r12,r3
BRand (r12,r11,.Lr3z)
mov_s r4,r3
ld.a r3,[r1,4]
st.ab r4,[r10,4]
.balign 4
.Loop_start:
brhs r10,r6,.Loop_end
1:
ld.a r4,[r1,4]
sub r12,r3,r8
bic_s r12,r12,r3
BRand (r12,r11,.Lr3z2)
st.ab r3,[r10,8]
sub r12,r4,r8
bic r12,r12,r4
BRand (r12,r11,.Lr4z)
ld.a r3,[r1,4]
brlo.d r10,r6,1b
st r4,[r10,-4]
.Loop_end:
add r6,r6,4
brhs r10,r6,.Lastword
sub r12,r3,r8
bic_s r12,r12,r3
BRand (r12,r11,.Lr3z)
add_s r1,r1,4
st.ab r3,[r10,4]
.Lastword:
sub_s r2,r2,1
b.d .Lstart_charloop
bmsk.f r2,r2,1
.balign 4
nop_s
.Lr3z2: sub_s r1,r1,4
.Lr4z:
.Lr3z:
.balign 4
.Lr3z_loop:
ldb.ab r3,[r1,1]
brne.d r3,0,.Lr3z_loop
stb.ab r3,[r10,1]
.Lzero_rest:
; __strncpy_bzero requires:
; return value in r0
; zeroing length in r2
; zeroing start address in r3
mov_s r3,r10
add_s r2,r2,r0
b.d __strncpy_bzero
sub_s r2,r2,r3
.balign 4
.Lbytewise:
sub.f r2,r2,1
jcs [blink]
.Lstart_charloop:
mov_s r3,r10
.Lcharloop:
ldb.ab r12,[r1,1]
beq.d .Last_byte
sub.f r2,r2,1
brne.d r12,0,.Lcharloop
stb.ab r12,[r3,1]
b.d __strncpy_bzero
stb.ab r12,[r3,1]
.Last_byte:
j_s.d [blink]
stb_s r12,[r3]
ENDFUNC (strncpy)
#endif /* __ARC601__ || !__ARC_BARREL_SHIFTER__ */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 5,236
|
plugin-libc/newlib/libc/machine/arc/strchr-bs.S
|
/*
Copyright (c) 2015, Synopsys, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3) Neither the name of the Synopsys, Inc., nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* This implementation is optimized for performance. For code size a generic
implementation of this function from newlib/libc/string/strchr.c will be
used. */
#if !defined (__OPTIMIZE_SIZE__) && !defined (PREFER_SIZE_OVER_SPEED)
#include "asm.h"
/* In order to search for a zero in a W, we calculate
X := (W - 0x01010101) & ~W & 0x80808080;
In the little endian case:
If no byte in W is zero, X will be zero; otherwise, the least significant
byte of X which is nonzero indicates the least significant byte of W that
is zero.
In the big endian case:
X will be zero iff no byte in W is zero.
If X is nonzero, to find out which is the most significant zero byte
in W, we calculate:
Y := ~(((W | 0x80808080) - 0x01010101) | W) & 0x80808080;
Each byte in Y is 0x80 if the the corresponding byte in
W is zero, otherwise that byte of Y is 0. */
#if defined (__ARC_BARREL_SHIFTER__) && \
(defined (__ARC600__) || (!defined (__ARC_NORM__) && !defined (__ARC601__)))
ENTRY (strchr)
bmsk.f r2,r0,1
mov_s r3,0x01010101
extb_s r1,r1
asl r5,r1,8
or r5,r5,r1
beq.d .Laligned
asl r4,r5,16
sub_s r0,r0,r2
asl_s r2,r2,3
#ifdef __LITTLE_ENDIAN__
asl r7,r3,r2
#else
lsr r7,r3,r2
#endif
ld_s r2,[r0]
or r5,r5,r4
ror r4,r3
sub r12,r2,r7
bic_s r12,r12,r2
and r12,r12,r4
brne.d r12,0,.Lfound0_ua
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
#ifdef __LITTLE_ENDIAN__
and.f r7,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
b.d .Lfound_char_ua
btst r7,7
#else
and.f r8,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
bic r12,r7,r6
asl_s r12,r12,7
and.f r2,r8,r12
b.d .Lfound_char_ua
sub_s r0,r0,4
#endif
.balign 4
.Laligned:
ld_s r2,[r0]
or r5,r5,r4
ror r4,r3
sub r12,r2,r3
bic_s r12,r12,r2
and r12,r12,r4
.Loop:
brne.d r12,0,.Lfound0
xor r6,r2,r5
ld.a r2,[r0,4]
sub r12,r6,r3
bic r12,r12,r6
and.f r7,r12,r4
sub r12,r2,r3
bic_s r12,r12,r2
beq.d .Loop
and r12,r12,r4
; Found searched-for character. r0 has already advanced to next word.
#ifdef __LITTLE_ENDIAN__
/* We only need the information about the first matching byte
(i.e. the least significant matching byte) to be exact,
hence there is no problem with carry effects. */
.Lfound_char:
btst r7,7
.Lfound_char_ua:
sub_s r0,r0,4
add.eq r0,r0,1
btst.eq r7,15
add.eq r0,r0,1
btst.eq r7,23
j_s.d [blink]
add.eq r0,r0,1
.balign 4
.Lfound0_ua:
mov_l r3,r7
.Lfound0:
sub r2,r6,r3
bic r2,r2,r6
and r2,r2,r4
or r3,r12,r2
sub_s r12,r3,1
xor_s r3,r3,r12
tst_s r2,r3
lsr r2,r3,31
lsr r12,r3,16
jeq.d [blink]
mov.eq r0,0
lsr r3,r3,8
sub_s r2,r2,r12
sub_s r2,r2,r3
bmsk_s r2,r2,1
j_s.d [blink]
add_s r0,r0,r2
#else /* BIG ENDIAN */
.Lfound_char:
asl r6,r6,7
sub_s r0,r0,4
bic.f r2,r7,r6
.Lfound_char_ua:
add.pl r0,r0,1
jmi.d [blink]
btst_s r2,23
add.eq r0,r0,1
btst.eq r2,15
j_s.d [blink]
add.eq r0,r0,1
; N.B. if we searched for a char zero and found it in the MSB,
; and ignored matches are identical, we will take the early exit
; like for an ordinary found zero - except for the extra stalls at jhi -
; but still compute the right result.
.Lfound0_ua:
mov_s r3,r7
.Lfound0:
asl_s r2,r2,7
or r7,r6,r4
bic_s r12,r12,r2
sub r2,r7,r3
or r2,r2,r6
bic r2,r4,r2
cmp r12,r2
mov.hi r0,0
btst.ls r2,31
jhi.d [blink]
add.eq r0,r0,1
btst.eq r2,23
add.eq r0,r0,1
btst.eq r2,15
j_s.d [blink]
add.eq r0,r0,1
#endif /* ENDIAN */
ENDFUNC (strchr)
#endif /* __ARC_BARREL_SHIFTER__ &&
(__ARC600__ || (!__ARC_NORM__ && !__ARC601__)) */
#endif /* !__OPTIMIZE_SIZE__ && !PREFER_SIZE_OVER_SPEED */
|
4ms/metamodule-plugin-sdk
| 2,589
|
plugin-libc/newlib/libc/machine/riscv/setjmp.S
|
/* Copyright (c) 2017 SiFive Inc. All rights reserved.
This copyrighted material is made available to anyone wishing to use,
modify, copy, or redistribute it subject to the terms and conditions
of the FreeBSD License. This program is distributed in the hope that
it will be useful, but WITHOUT ANY WARRANTY expressed or implied,
including the implied warranties of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. A copy of this license is available at
http://www.opensource.org/licenses.
*/
#include <sys/asm.h>
/* int setjmp (jmp_buf); */
.globl setjmp
.type setjmp, @function
setjmp:
REG_S ra, 0*SZREG(a0)
REG_S s0, 1*SZREG(a0)
REG_S s1, 2*SZREG(a0)
#ifndef __riscv_32e
REG_S s2, 3*SZREG(a0)
REG_S s3, 4*SZREG(a0)
REG_S s4, 5*SZREG(a0)
REG_S s5, 6*SZREG(a0)
REG_S s6, 7*SZREG(a0)
REG_S s7, 8*SZREG(a0)
REG_S s8, 9*SZREG(a0)
REG_S s9, 10*SZREG(a0)
REG_S s10,11*SZREG(a0)
REG_S s11,12*SZREG(a0)
REG_S sp, 13*SZREG(a0)
#else
REG_S sp, 3*SZREG(a0)
#endif
#ifndef __riscv_float_abi_soft
FREG_S fs0, 14*SZREG+ 0*SZFREG(a0)
FREG_S fs1, 14*SZREG+ 1*SZFREG(a0)
FREG_S fs2, 14*SZREG+ 2*SZFREG(a0)
FREG_S fs3, 14*SZREG+ 3*SZFREG(a0)
FREG_S fs4, 14*SZREG+ 4*SZFREG(a0)
FREG_S fs5, 14*SZREG+ 5*SZFREG(a0)
FREG_S fs6, 14*SZREG+ 6*SZFREG(a0)
FREG_S fs7, 14*SZREG+ 7*SZFREG(a0)
FREG_S fs8, 14*SZREG+ 8*SZFREG(a0)
FREG_S fs9, 14*SZREG+ 9*SZFREG(a0)
FREG_S fs10,14*SZREG+10*SZFREG(a0)
FREG_S fs11,14*SZREG+11*SZFREG(a0)
#endif
li a0, 0
ret
.size setjmp, .-setjmp
/* volatile void longjmp (jmp_buf, int); */
.globl longjmp
.type longjmp, @function
longjmp:
REG_L ra, 0*SZREG(a0)
REG_L s0, 1*SZREG(a0)
REG_L s1, 2*SZREG(a0)
#ifndef __riscv_32e
REG_L s2, 3*SZREG(a0)
REG_L s3, 4*SZREG(a0)
REG_L s4, 5*SZREG(a0)
REG_L s5, 6*SZREG(a0)
REG_L s6, 7*SZREG(a0)
REG_L s7, 8*SZREG(a0)
REG_L s8, 9*SZREG(a0)
REG_L s9, 10*SZREG(a0)
REG_L s10,11*SZREG(a0)
REG_L s11,12*SZREG(a0)
REG_L sp, 13*SZREG(a0)
#else
REG_L sp, 3*SZREG(a0)
#endif
#ifndef __riscv_float_abi_soft
FREG_L fs0, 14*SZREG+ 0*SZFREG(a0)
FREG_L fs1, 14*SZREG+ 1*SZFREG(a0)
FREG_L fs2, 14*SZREG+ 2*SZFREG(a0)
FREG_L fs3, 14*SZREG+ 3*SZFREG(a0)
FREG_L fs4, 14*SZREG+ 4*SZFREG(a0)
FREG_L fs5, 14*SZREG+ 5*SZFREG(a0)
FREG_L fs6, 14*SZREG+ 6*SZFREG(a0)
FREG_L fs7, 14*SZREG+ 7*SZFREG(a0)
FREG_L fs8, 14*SZREG+ 8*SZFREG(a0)
FREG_L fs9, 14*SZREG+ 9*SZFREG(a0)
FREG_L fs10,14*SZREG+10*SZFREG(a0)
FREG_L fs11,14*SZREG+11*SZFREG(a0)
#endif
seqz a0, a1
add a0, a0, a1 # a0 = (a1 == 0) ? 1 : a1
ret
.size longjmp, .-longjmp
|
4ms/metamodule-plugin-sdk
| 1,981
|
plugin-libc/newlib/libc/machine/riscv/memset.S
|
/* Copyright (c) 2017 SiFive Inc. All rights reserved.
This copyrighted material is made available to anyone wishing to use,
modify, copy, or redistribute it subject to the terms and conditions
of the FreeBSD License. This program is distributed in the hope that
it will be useful, but WITHOUT ANY WARRANTY expressed or implied,
including the implied warranties of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. A copy of this license is available at
http://www.opensource.org/licenses.
*/
.text
.global memset
.type memset, @function
memset:
#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
mv t1, a0
beqz a2, 2f
1:
sb a1, 0(t1)
add a2, a2, -1
add t1, t1, 1
bnez a2, 1b
2:
ret
#else
li t1, 15
move a4, a0
bleu a2, t1, .Ltiny
and a5, a4, 15
bnez a5, .Lmisaligned
.Laligned:
bnez a1, .Lwordify
.Lwordified:
and a3, a2, ~15
and a2, a2, 15
add a3, a3, a4
#if __riscv_xlen == 64
1:sd a1, 0(a4)
sd a1, 8(a4)
#else
1:sw a1, 0(a4)
sw a1, 4(a4)
sw a1, 8(a4)
sw a1, 12(a4)
#endif
add a4, a4, 16
bltu a4, a3, 1b
bnez a2, .Ltiny
ret
.Ltiny:
sub a3, t1, a2
sll a3, a3, 2
1:auipc t0, %pcrel_hi(.Ltable)
add a3, a3, t0
.option push
.option norvc
.Ltable_misaligned:
jr a3, %pcrel_lo(1b)
.Ltable:
sb a1,14(a4)
sb a1,13(a4)
sb a1,12(a4)
sb a1,11(a4)
sb a1,10(a4)
sb a1, 9(a4)
sb a1, 8(a4)
sb a1, 7(a4)
sb a1, 6(a4)
sb a1, 5(a4)
sb a1, 4(a4)
sb a1, 3(a4)
sb a1, 2(a4)
sb a1, 1(a4)
sb a1, 0(a4)
.option pop
ret
.Lwordify:
and a1, a1, 0xFF
sll a3, a1, 8
or a1, a1, a3
sll a3, a1, 16
or a1, a1, a3
#if __riscv_xlen == 64
sll a3, a1, 32
or a1, a1, a3
#endif
j .Lwordified
.Lmisaligned:
sll a3, a5, 2
1:auipc t0, %pcrel_hi(.Ltable_misaligned)
add a3, a3, t0
mv t0, ra
jalr a3, %pcrel_lo(1b)
mv ra, t0
add a5, a5, -16
sub a4, a4, a5
add a2, a2, a5
bleu a2, t1, .Ltiny
j .Laligned
#endif
.size memset, .-memset
|
4ms/metamodule-plugin-sdk
| 3,647
|
plugin-libc/newlib/libc/machine/riscv/strcmp.S
|
/* Copyright (c) 2017 SiFive Inc. All rights reserved.
This copyrighted material is made available to anyone wishing to use,
modify, copy, or redistribute it subject to the terms and conditions
of the FreeBSD License. This program is distributed in the hope that
it will be useful, but WITHOUT ANY WARRANTY expressed or implied,
including the implied warranties of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. A copy of this license is available at
http://www.opensource.org/licenses.
*/
#include <sys/asm.h>
.text
.globl strcmp
.type strcmp, @function
strcmp:
#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__)
1:
lbu a2, 0(a0)
lbu a3, 0(a1)
add a0, a0, 1
add a1, a1, 1
bne a2, a3, 2f
bnez a2, 1b
2:
sub a0, a2, a3
ret
.size strcmp, .-strcmp
#else
or a4, a0, a1
li t2, -1
and a4, a4, SZREG-1
bnez a4, .Lmisaligned
#if SZREG == 4
li a5, 0x7f7f7f7f
#else
ld a5, mask
#endif
.macro check_one_word i n
REG_L a2, \i*SZREG(a0)
REG_L a3, \i*SZREG(a1)
and t0, a2, a5
or t1, a2, a5
add t0, t0, a5
or t0, t0, t1
bne t0, t2, .Lnull\i
.if \i+1-\n
bne a2, a3, .Lmismatch
.else
add a0, a0, \n*SZREG
add a1, a1, \n*SZREG
beq a2, a3, .Lloop
# fall through to .Lmismatch
.endif
.endm
.macro foundnull i n
.ifne \i
.Lnull\i:
add a0, a0, \i*SZREG
add a1, a1, \i*SZREG
.ifeq \i-1
.Lnull0:
.endif
bne a2, a3, .Lmisaligned
li a0, 0
ret
.endif
.endm
.Lloop:
# examine full words at a time, favoring strings of a couple dozen chars
#if __riscv_xlen == 32
check_one_word 0 5
check_one_word 1 5
check_one_word 2 5
check_one_word 3 5
check_one_word 4 5
#else
check_one_word 0 3
check_one_word 1 3
check_one_word 2 3
#endif
# backwards branch to .Lloop contained above
.Lmismatch:
# words don't match, but a2 has no null byte.
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#if __riscv_xlen == 64
sll a4, a2, 48
sll a5, a3, 48
bne a4, a5, .Lmismatch_upper
sll a4, a2, 32
sll a5, a3, 32
bne a4, a5, .Lmismatch_upper
#endif
sll a4, a2, 16
sll a5, a3, 16
bne a4, a5, .Lmismatch_upper
srl a4, a2, 8*SZREG-16
srl a5, a3, 8*SZREG-16
sub a0, a4, a5
and a1, a0, 0xff
bnez a1, 1f
ret
.Lmismatch_upper:
srl a4, a4, 8*SZREG-16
srl a5, a5, 8*SZREG-16
sub a0, a4, a5
and a1, a0, 0xff
bnez a1, 1f
ret
1:and a4, a4, 0xff
and a5, a5, 0xff
sub a0, a4, a5
ret
#else
#if __riscv_xlen == 64
srl a4, a2, 48
srl a5, a3, 48
bne a4, a5, .Lmismatch_lower
srl a4, a2, 32
srl a5, a3, 32
bne a4, a5, .Lmismatch_lower
#endif
srl a4, a2, 16
srl a5, a3, 16
bne a4, a5, .Lmismatch_lower
srl a4, a2, 8
srl a5, a3, 8
bne a4, a5, 1f
and a4, a2, 0xff
and a5, a3, 0xff
1:sub a0, a4, a5
ret
.Lmismatch_lower:
srl a2, a4, 8
srl a3, a5, 8
bne a2, a3, 1f
and a2, a4, 0xff
and a3, a5, 0xff
1:sub a0, a2, a3
ret
#endif
.Lmisaligned:
# misaligned
lbu a2, 0(a0)
lbu a3, 0(a1)
add a0, a0, 1
add a1, a1, 1
bne a2, a3, 1f
bnez a2, .Lmisaligned
1:
sub a0, a2, a3
ret
# cases in which a null byte was detected
#if __riscv_xlen == 32
foundnull 0 5
foundnull 1 5
foundnull 2 5
foundnull 3 5
foundnull 4 5
#else
foundnull 0 3
foundnull 1 3
foundnull 2 3
#endif
.size strcmp, .-strcmp
#if SZREG == 8
.section .srodata.cst8,"aM",@progbits,8
.align 3
mask:
.dword 0x7f7f7f7f7f7f7f7f
#endif
#endif
|
4ms/metamodule-plugin-sdk
| 7,726
|
plugin-libc/newlib/libc/machine/sh/memcpy.S
|
!
! Fast SH memcpy
!
! by Toshiyasu Morita (tm@netcom.com)
! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
! SH5 code Copyright 2002 SuperH Ltd.
!
! Entry: ARG0: destination pointer
! ARG1: source pointer
! ARG3: byte count
!
! Exit: RESULT: destination pointer
! any other registers in the range r0-r7: trashed
!
! Notes: Usually one wants to do small reads and write a longword, but
! unfortunately it is difficult in some cases to concatanate bytes
! into a longword on the SH, so this does a longword read and small
! writes.
!
! This implementation makes two assumptions about how it is called:
!
! 1.: If the byte count is nonzero, the address of the last byte to be
! copied is unsigned greater than the address of the first byte to
! be copied. This could be easily swapped for a signed comparison,
! but the algorithm used needs some comparison.
!
! 2.: When there are two or three bytes in the last word of an 11-or-more
! bytes memory chunk to b copied, the rest of the word can be read
! without side effects.
! This could be easily changed by increasing the minumum size of
! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
! however, this would cost a few extra cyles on average.
! For SHmedia, the assumption is that any quadword can be read in its
! enirety if at least one byte is included in the copy.
!
#include "asm.h"
ENTRY(memcpy)
#if __SHMEDIA__
#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
ld.b r3,0,r63
pta/l Large,tr0
movi 25,r0
bgeu/u r4,r0,tr0
nsb r4,r0
shlli r0,5,r0
movi (L1-L0+63*32 + 1) & 0xffff,r1
sub r1, r0, r0
L0: ptrel r0,tr0
add r2,r4,r5
ptabs r18,tr1
add r3,r4,r6
blink tr0,r63
.balign 8
L1:
/* 0 byte memcpy */
blink tr1,r63
L4_7: /* 4..7 byte memcpy cntd. */
stlo.l r2, 0, r0
or r6, r7, r6
sthi.l r5, -1, r6
stlo.l r5, -4, r6
blink tr1,r63
L2_3: /* 2 or 3 byte memcpy cntd. */
st.b r5,-1,r6
blink tr1,r63
/* 1 byte memcpy */
ld.b r3,0,r0
st.b r2,0,r0
blink tr1,r63
L8_15: /* 8..15 byte memcpy cntd. */
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
/* 2 or 3 byte memcpy */
ld.b r3,0,r0
ld.b r2,0,r63
ld.b r3,1,r1
st.b r2,0,r0
pta/l L2_3,tr0
ld.b r6,-1,r6
st.b r2,1,r1
blink tr0, r63
/* 4 .. 7 byte memcpy */
LDUAL (r3, 0, r0, r1)
pta L4_7, tr0
ldlo.l r6, -4, r7
or r0, r1, r0
sthi.l r2, 3, r0
ldhi.l r6, -1, r6
blink tr0, r63
/* 8 .. 15 byte memcpy */
LDUAQ (r3, 0, r0, r1)
pta L8_15, tr0
ldlo.q r6, -8, r7
or r0, r1, r0
sthi.q r2, 7, r0
ldhi.q r6, -1, r6
blink tr0, r63
/* 16 .. 24 byte memcpy */
LDUAQ (r3, 0, r0, r1)
LDUAQ (r3, 8, r8, r9)
or r0, r1, r0
sthi.q r2, 7, r0
or r8, r9, r8
sthi.q r2, 15, r8
ldlo.q r6, -8, r7
ldhi.q r6, -1, r6
stlo.q r2, 8, r8
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
Large:
ld.b r2, 0, r63
pta/l Loop_ua, tr1
ori r3, -8, r7
sub r2, r7, r22
sub r3, r2, r6
add r2, r4, r5
ldlo.q r3, 0, r0
addi r5, -16, r5
movi 64+8, r27 // could subtract r7 from that.
stlo.q r2, 0, r0
sthi.q r2, 7, r0
ldx.q r22, r6, r0
bgtu/l r27, r4, tr1
addi r5, -48, r27
pta/l Loop_line, tr0
addi r6, 64, r36
addi r6, -24, r19
addi r6, -16, r20
addi r6, -8, r21
Loop_line:
ldx.q r22, r36, r63
alloco r22, 32
addi r22, 32, r22
ldx.q r22, r19, r23
sthi.q r22, -25, r0
ldx.q r22, r20, r24
ldx.q r22, r21, r25
stlo.q r22, -32, r0
ldx.q r22, r6, r0
sthi.q r22, -17, r23
sthi.q r22, -9, r24
sthi.q r22, -1, r25
stlo.q r22, -24, r23
stlo.q r22, -16, r24
stlo.q r22, -8, r25
bgeu r27, r22, tr0
Loop_ua:
addi r22, 8, r22
sthi.q r22, -1, r0
stlo.q r22, -8, r0
ldx.q r22, r6, r0
bgtu/l r5, r22, tr1
add r3, r4, r7
ldlo.q r7, -8, r1
sthi.q r22, 7, r0
ldhi.q r7, -1, r7
ptabs r18,tr1
stlo.q r22, 0, r0
or r1, r7, r1
sthi.q r5, 15, r1
stlo.q r5, 8, r1
blink tr1, r63
#else /* ! SHMEDIA, i.e. SH1 .. SH4 / SHcompact */
#ifdef __SH5__
#define DST r2
#define SRC r3
#define COUNT r4
#define TMP0 r5
#define TMP1 r6
#define RESULT r2
#else
#define DST r4
#define SRC r5
#define COUNT r6
#define TMP0 r2
#define TMP1 r3
#define RESULT r0
#endif
#ifdef __LITTLE_ENDIAN__
! Little endian version copies with increasing addresses.
mov DST,TMP1 ! Save return value
mov #11,r0 ! Check if small number of bytes
cmp/hs r0,COUNT
! COUNT becomes src end address
SL(bf, L_small, add SRC,COUNT)
mov #1,r1
tst r1,SRC ! check if source even
SL(bt, L_even, mov COUNT,r7)
mov.b @SRC+,r0 ! no, make it even.
mov.b r0,@DST
add #1,DST
L_even: tst r1,DST ! check if destination is even
add #-3,r7
SL(bf, L_odddst, mov #2,r1)
tst r1,DST ! check if destination is 4-byte aligned
mov DST,r0
SL(bt, L_al4dst, sub SRC,r0)
mov.w @SRC+,TMP0
mov.w TMP0,@DST
! add #2,DST DST is dead here.
L_al4dst:
tst r1,SRC
bt L_al4both
mov.w @SRC+,r1
swap.w r1,r1
add #-6,r0
add #-6,r7 ! r7 := src end address minus 9.
.align 2
L_2l_loop:
mov.l @SRC+,TMP0 ! Read & write two longwords per iteration
xtrct TMP0,r1
mov.l r1,@(r0,SRC)
cmp/hs r7,SRC
mov.l @SRC+,r1
xtrct r1,TMP0
mov.l TMP0,@(r0,SRC)
bf L_2l_loop
add #-2,SRC
bra L_cleanup
add #5,r0
L_al4both:
add #-4,r0
.align 2
L_al4both_loop:
mov.l @SRC+,DST ! Read longword, write longword per iteration
cmp/hs r7,SRC
SL(bf, L_al4both_loop, mov.l DST,@(r0,SRC))
bra L_cleanup
add #3,r0
L_odddst:
tst r1,SRC
SL(bt, L_al4src, add #-1,DST)
mov.w @SRC+,r0
mov.b r0,@(1,DST)
shlr8 r0
mov.b r0,@(2,DST)
add #2,DST
L_al4src:
.align 2
L_odd_loop:
mov.l @SRC+,r0 ! Read longword, write byte, word, byte per iteration
cmp/hs r7,SRC
mov.b r0,@(1,DST)
shlr8 r0
mov.w r0,@(2,DST)
shlr16 r0
mov.b r0,@(4,DST)
SL(bf, L_odd_loop, add #4,DST)
.align 2 ! avoid nop in more frequently executed code.
L_cleanup2:
mov DST,r0
sub SRC,r0
L_cleanup:
cmp/eq COUNT,SRC
bt L_ready
.align 2
L_cleanup_loop:
mov.b @SRC+,r1
cmp/eq COUNT,SRC
mov.b r1,@(r0,SRC)
bf L_cleanup_loop
L_ready:
rts
mov TMP1,RESULT
L_small:
bra L_cleanup2
add #-1,DST
#else /* ! __LITTLE_ENDIAN__ */
! Big endian version copies with decreasing addresses.
mov DST,r0
add COUNT,r0
sub DST,SRC
mov #11,r1
cmp/hs r1,COUNT
SL(bf, L_small, add #-1,SRC)
mov SRC,TMP1
add r0,TMP1
shlr TMP1
SL(bt, L_even,
mov DST,r7)
mov.b @(r0,SRC),TMP0
add #-1,TMP1
mov.b TMP0,@-r0
L_even:
tst #1,r0
add #-1,SRC
SL(bf, L_odddst, add #8,r7)
tst #2,r0
bt L_al4dst
add #-1,TMP1
mov.w @(r0,SRC),r1
mov.w r1,@-r0
L_al4dst:
shlr TMP1
bt L_al4both
mov.w @(r0,SRC),r1
swap.w r1,r1
add #4,r7
add #-4,SRC
.align 2
L_2l_loop:
mov.l @(r0,SRC),TMP0
xtrct TMP0,r1
mov.l r1,@-r0
cmp/hs r7,r0
mov.l @(r0,SRC),r1
xtrct r1,TMP0
mov.l TMP0,@-r0
bt L_2l_loop
bra L_cleanup
add #5,SRC
nop ! avoid nop in executed code.
L_al4both:
add #-2,SRC
.align 2
L_al4both_loop:
mov.l @(r0,SRC),r1
cmp/hs r7,r0
SL(bt, L_al4both_loop,
mov.l r1,@-r0)
bra L_cleanup
add #3,SRC
nop ! avoid nop in executed code.
L_odddst:
shlr TMP1
bt L_al4src
mov.w @(r0,SRC),r1
mov.b r1,@-r0
shlr8 r1
mov.b r1,@-r0
L_al4src:
add #-2,SRC
.align 2
L_odd_loop:
mov.l @(r0,SRC),TMP0
cmp/hs r7,r0
mov.b TMP0,@-r0
shlr8 TMP0
mov.w TMP0,@-r0
shlr16 TMP0
mov.b TMP0,@-r0
bt L_odd_loop
add #3,SRC
L_cleanup:
L_small:
cmp/eq DST,r0
bt L_ready
add #1,DST
.align 2
L_cleanup_loop:
mov.b @(r0,SRC),TMP0
cmp/eq DST,r0
mov.b TMP0,@-r0
bf L_cleanup_loop
L_ready:
rts
mov r0,RESULT
#endif /* ! __LITTLE_ENDIAN__ */
#endif /* ! SHMEDIA */
|
4ms/metamodule-plugin-sdk
| 3,695
|
plugin-libc/newlib/libc/machine/sh/setjmp.S
|
/* We want to pretend we're in SHmedia mode, even when assembling for
SHcompact. */
#if __SH5__ == 32 && ! __SHMEDIA__
# undef __SHMEDIA__
# define __SHMEDIA__ 1
#endif
#if __SHMEDIA__
.mode SHmedia
#endif
#include "asm.h"
ENTRY(setjmp)
#if __SH5__
ptabs r18, tr0
gettr tr5, r5
gettr tr6, r6
gettr tr7, r7
st.q r2, 0*8, r18
st.q r2, 1*8, r10
st.q r2, 2*8, r11
st.q r2, 3*8, r12
st.q r2, 4*8, r13
st.q r2, 5*8, r14
st.q r2, 6*8, r15
st.q r2, 7*8, r28
st.q r2, 8*8, r29
st.q r2, 9*8, r30
st.q r2, 10*8, r31
st.q r2, 11*8, r32
st.q r2, 12*8, r33
st.q r2, 13*8, r34
st.q r2, 14*8, r35
st.q r2, 15*8, r44
st.q r2, 16*8, r45
st.q r2, 17*8, r46
st.q r2, 18*8, r47
st.q r2, 19*8, r48
st.q r2, 20*8, r49
st.q r2, 21*8, r50
st.q r2, 22*8, r51
st.q r2, 23*8, r52
st.q r2, 24*8, r53
st.q r2, 25*8, r54
st.q r2, 26*8, r55
st.q r2, 27*8, r56
st.q r2, 28*8, r57
st.q r2, 29*8, r58
st.q r2, 30*8, r59
st.q r2, 31*8, r5
st.q r2, 32*8, r6
st.q r2, 33*8, r7
#if ! __SH4_NOFPU__
fst.d r2, 34*8, dr12
fst.d r2, 35*8, dr14
fst.d r2, 36*8, dr36
fst.d r2, 37*8, dr38
fst.d r2, 38*8, dr40
fst.d r2, 39*8, dr42
fst.d r2, 40*8, dr44
fst.d r2, 41*8, dr46
fst.d r2, 42*8, dr48
fst.d r2, 43*8, dr50
fst.d r2, 44*8, dr52
fst.d r2, 45*8, dr54
fst.d r2, 46*8, dr56
fst.d r2, 47*8, dr58
fst.d r2, 48*8, dr60
fst.d r2, 49*8, dr62
#endif
movi 0, r2
blink tr0, r63
#else
#if defined (__SH2E__) || defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__)
add #(13*4),r4
#else
add #(9*4),r4
#endif
sts.l pr,@-r4
#if defined (__SH2E__) || defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__)
fmov.s fr15,@-r4 ! call saved floating point registers
fmov.s fr14,@-r4
fmov.s fr13,@-r4
fmov.s fr12,@-r4
#endif
mov.l r15,@-r4 ! call saved integer registers
mov.l r14,@-r4
mov.l r13,@-r4
mov.l r12,@-r4
mov.l r11,@-r4
mov.l r10,@-r4
mov.l r9,@-r4
mov.l r8,@-r4
rts
mov #0,r0
#endif /* __SH5__ */
ENTRY(longjmp)
#if __SH5__
ld.q r2, 0*8, r18
ptabs r18, tr0
ld.q r2, 1*8, r10
ld.q r2, 2*8, r11
ld.q r2, 3*8, r12
ld.q r2, 4*8, r13
ld.q r2, 5*8, r14
ld.q r2, 6*8, r15
ld.q r2, 7*8, r28
ld.q r2, 8*8, r29
ld.q r2, 9*8, r30
ld.q r2, 10*8, r31
ld.q r2, 11*8, r32
ld.q r2, 12*8, r33
ld.q r2, 13*8, r34
ld.q r2, 14*8, r35
ld.q r2, 15*8, r44
ld.q r2, 16*8, r45
ld.q r2, 17*8, r46
ld.q r2, 18*8, r47
ld.q r2, 19*8, r48
ld.q r2, 20*8, r49
ld.q r2, 21*8, r50
ld.q r2, 22*8, r51
ld.q r2, 23*8, r52
ld.q r2, 24*8, r53
ld.q r2, 25*8, r54
ld.q r2, 26*8, r55
ld.q r2, 27*8, r56
ld.q r2, 28*8, r57
ld.q r2, 29*8, r58
ld.q r2, 30*8, r59
ld.q r2, 31*8, r5
ld.q r2, 32*8, r6
ld.q r2, 33*8, r7
ptabs r5, tr5
ptabs r6, tr6
ptabs r7, tr7
#if ! __SH4_NOFPU__
fld.d r2, 34*8, dr12
fld.d r2, 35*8, dr14
fld.d r2, 36*8, dr36
fld.d r2, 37*8, dr38
fld.d r2, 38*8, dr40
fld.d r2, 39*8, dr42
fld.d r2, 40*8, dr44
fld.d r2, 41*8, dr46
fld.d r2, 42*8, dr48
fld.d r2, 43*8, dr50
fld.d r2, 44*8, dr52
fld.d r2, 45*8, dr54
fld.d r2, 46*8, dr56
fld.d r2, 47*8, dr58
fld.d r2, 48*8, dr60
fld.d r2, 49*8, dr62
#endif
movi 1, r2
cmvne r3, r3, r2
blink tr0, r63
#else
mov.l @r4+,r8
mov.l @r4+,r9
mov.l @r4+,r10
mov.l @r4+,r11
mov.l @r4+,r12
mov.l @r4+,r13
mov.l @r4+,r14
mov.l @r4+,r15
#if defined (__SH2E__) || defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__)
fmov.s @r4+,fr12 ! call saved floating point registers
fmov.s @r4+,fr13
fmov.s @r4+,fr14
fmov.s @r4+,fr15
#endif
lds.l @r4+,pr
mov r5,r0
tst r0,r0
bf retr4
movt r0
retr4: rts
nop
#endif /* __SH5__ */
|
4ms/metamodule-plugin-sdk
| 2,524
|
plugin-libc/newlib/libc/machine/sh/strcpy.S
|
! Entry: arg0: destination
! arg1: source
! Exit: result: destination
!
! SH5 code Copyright 2002 SuperH Ltd.
#include "asm.h"
ENTRY(strcpy)
#if __SHMEDIA__
pta/l shortstring,tr1
ldlo.q r3,0,r4
ptabs r18,tr4
shlli r3,3,r7
addi r2, 8, r0
mcmpeq.b r4,r63,r6
SHHI r6,r7,r6
bnei/u r6,0,tr1 // shortstring
pta/l no_lddst, tr2
ori r3,-8,r23
sub r2, r23, r0
sub r3, r2, r21
addi r21, 8, r20
ldx.q r0, r21, r5
pta/l loop, tr0
ori r2,-8,r22
mcmpeq.b r5, r63, r6
bgt/u r22, r23, tr2 // no_lddst
// r22 < r23 : Need to do a load from the destination.
// r22 == r23 : Doesn't actually need to load from destination,
// but still can be handled here.
ldlo.q r2, 0, r9
movi -1, r8
SHLO r8, r7, r8
mcmv r4, r8, r9
stlo.q r2, 0, r9
beqi/l r6, 0, tr0 // loop
add r5, r63, r4
addi r0, 8, r0
blink tr1, r63 // shortstring
no_lddst:
// r22 > r23: note that for r22 == r23 the sthi.q would clobber
// bytes before the destination region.
stlo.q r2, 0, r4
SHHI r4, r7, r4
sthi.q r0, -1, r4
beqi/l r6, 0, tr0 // loop
add r5, r63, r4
addi r0, 8, r0
shortstring:
#ifndef __LITTLE_ENDIAN__
pta/l shortstring2,tr1
byterev r4,r4
#endif
shortstring2:
st.b r0,-8,r4
andi r4,0xff,r5
shlri r4,8,r4
addi r0,1,r0
bnei/l r5,0,tr1
blink tr4,r63 // return
.balign 8
loop:
stlo.q r0, 0, r5
ldx.q r0, r20, r4
addi r0, 16, r0
sthi.q r0, -9, r5
mcmpeq.b r4, r63, r6
bnei/u r6, 0, tr1 // shortstring
ldx.q r0, r21, r5
stlo.q r0, -8, r4
sthi.q r0, -1, r4
mcmpeq.b r5, r63, r6
beqi/l r6, 0, tr0 // loop
add r5, r63, r4
addi r0, 8, r0
blink tr1, r63 // shortstring
#else /* ! __SHMEDIA__, i.e. SH 1..4 / SHcompact */
#ifdef __SH5__
#define DST r2
#define SRC r3
#define TMP r4
#define RESULT R2
! r0,r1,r3,r4: clobbered
#else
#define DST r4
#define SRC r5
#define TMP r2
#define RESULT r0
! r1-r2,r5: clobbered
#endif
mov DST,r0
or SRC,r0
tst #3,r0
SL(bf, L_setup_char_loop, mov DST,r0)
mov.l @SRC+,r1
mov #0,TMP
cmp/str TMP,r1
SL(bt, Longword_loop_end, sub SRC,r0)
.align 2
Longword_loop:
mov.l r1,@(r0,SRC)
mov.l @SRC+,r1
cmp/str TMP,r1
bt Longword_loop_end
mov.l r1,@(r0,SRC)
mov.l @SRC+,r1
cmp/str TMP,r1
bf Longword_loop
Longword_loop_end:
add #-4,SRC
add #3,r0
.align 2
L_char_loop:
mov.b @SRC+,r1
L_char_loop_start:
tst r1,r1
SL(bf, L_char_loop, mov.b r1,@(r0,SRC))
rts
mov DST,RESULT
L_setup_char_loop:
mov.b @SRC+,r1
bra L_char_loop_start
sub SRC,r0
#endif /* ! __SHMEDIA__ */
|
4ms/metamodule-plugin-sdk
| 3,191
|
plugin-libc/newlib/libc/machine/sh/memset.S
|
!
! Fast SH memset
!
! by Toshiyasu Morita (tm@netcom.com)
!
! SH5 code by J"orn Rennecke (joern.rennecke@superh.com)
! Copyright 2002 SuperH Ltd.
!
#include "asm.h"
ENTRY(memset)
#if __SHMEDIA__
pta/l multiquad, tr0
ptabs r18, tr2
andi r2, -8, r25
add r2, r4, r5
addi r5, -1, r20 // calculate end address.
andi r20, -8, r20
cmveq r4, r25, r20
bne/u r25, r20, tr0 // multiquad
! This sequence could clobber volatile objects that are in the same
! quadword as a very short char array.
! ldlo.q r2, 0, r7
! shlli r4, 2, r4
! movi -1, r8
! SHHI r8, r4, r8
! SHHI r8, r4, r8
! mcmv r7, r8, r3
! stlo.q r2, 0, r3
pta/l setlongs, tr0
movi 4, r8
bgeu/u r4, r8, tr0
pta/l endset, tr0
beqi/u r4, 0, tr0
st.b r2, 0, r3
beqi/u r4, 1, tr0
nop
st.b r2, 1, r3
beqi/l r4, 2, tr0
st.b r2,2,r3
endset: blink tr2, r63
setlongs:
mshflo.b r3, r3, r3
mperm.w r3, r63, r3 // Fill pattern now in every byte of r3
stlo.l r2, 0, r3
nop
nop
sthi.l r5, -1, r3
blink tr2, r63
multiquad:
mshflo.b r3, r3, r3
mperm.w r3, r63, r3 // Fill pattern now in every byte of r3
pta/l lastquad, tr0
stlo.q r2, 0, r3
sub r20, r25, r24
movi 64, r9
beqi/u r24, 8, tr0 // lastquad
pta/l loop, tr1
addi r20, -7*8, r8 // loop end address; This might overflow, so we need
// to use a different test before we start the loop
bgeu/u r24, r9, tr1// loop
st.q r25, 8, r3
shlri r24, 4, r24
st.q r20, -8, r3
beqi/u r24, 1, tr0 // lastquad
st.q r25, 16, r3
st.q r20, -16, r3
beqi/u r24, 2, tr0 // lastquad
st.q r25, 24, r3
st.q r20, -24, r3
lastquad:
sthi.q r5, -1, r3
blink tr2,r63
loop:
alloco r25, 32
st.q r25, 8, r3
st.q r25, 16, r3
st.q r25, 24, r3
st.q r25, 32, r3
addi r25, 32, r25
bgeu/l r8, r25, tr1 // loop
st.q r20, -40, r3
st.q r20, -32, r3
st.q r20, -24, r3
st.q r20, -16, r3
st.q r20, -8, r3
sthi.q r5, -1, r3
blink tr2,r63
#else /* ! SHMEDIA, i.e. SH1 .. SH4 / SHcompact */
! Entry: r4: destination pointer
! r5: fill value
! r6: byte count
!
! Exit: r0-r3: trashed
!
! This assumes that the first four bytes of the address space (0..3) are
! reserved - usually by the linker script. Otherwise, we would had to check
! for the case of objects of the size 12..15 at address 0..3 .
#ifdef __SH5__
#define DST r2
#define VAL r3
#define CNT r4
#define TMP r5
#else
#define DST r4
#define VAL r5
#define CNT r6
#define TMP r2
#endif
mov #12,r0 ! Check for small number of bytes
cmp/gt CNT,r0
mov DST,r0
SL(bt, L_store_byte_loop_check0, add DST,CNT)
tst #3,r0 ! Align destination
SL(bt, L_dup_bytes, extu.b r5,r5)
.balignw 4,0x0009
L_align_loop:
mov.b VAL,@r0
add #1,r0
tst #3,r0
bf L_align_loop
L_dup_bytes:
swap.b VAL,TMP ! Duplicate bytes across longword
or TMP,VAL
swap.w VAL,TMP
or TMP,VAL
add #-16,CNT
.balignw 4,0x0009
L_store_long_loop:
mov.l VAL,@r0 ! Store double longs to memory
cmp/hs CNT,r0
mov.l VAL,@(4,r0)
SL(bf, L_store_long_loop, add #8,r0)
add #16,CNT
L_store_byte_loop_check0:
cmp/eq CNT,r0
bt L_exit
.balignw 4,0x0009
L_store_byte_loop:
mov.b VAL,@r0 ! Store bytes to memory
add #1,r0
cmp/eq CNT,r0
bf L_store_byte_loop
L_exit:
rts
mov r4,r0
#endif /* ! SHMEDIA */
|
4ms/metamodule-plugin-sdk
| 1,370
|
plugin-libc/newlib/libc/machine/sh/strlen.S
|
! Entry: arg0: string start address
! Exit: result: length
!
! Copyright 2002 SuperH Ltd.
#include "asm.h"
ENTRY(strlen)
#if __SHMEDIA__
ldlo.q r2,0,r3
ptabs/l r18,tr0
pta/l loop,tr1
andi r2,-8,r0
shlli r2,3,r1
mcmpeq.b r3,r63,r3
SHHI r3,r1,r4
beqi/u r4,0,tr1 // loop
#ifdef __LITTLE_ENDIAN__
movi -1,r2
addi r3,-1,r4
msad.ubq r3,r4,r2
#else
shlri r3,1,r3
nsb r3,r3
shlri r3,3,r2
#endif
blink tr0,r63
loop:
ldlo.q r0,8,r3
addi r0,8,r0
ldlo.q r0,8,r63
mcmpeq.b r3,r63,r3
beqi/l r3,0,tr1 // loop
sub r0,r2,r2
#ifdef __LITTLE_ENDIAN__
addi r3,-1,r4
addi r2,-1,r2
msad.ubq r3,r4,r2
#else
shlri r3,1,r3
nsb r3,r3
shlri r3,3,r3
add r2,r3,r2
#endif
blink tr0,r63
#else /* ! __SHMEDIA__, i.e. SH 1..4 / SHcompact */
#ifdef __SH5__
#define STR_INIT r2
#define STR_ORIG r0
#define STR_COPY STR_ORIG
#define MASK r1
#define TMP r3
#define RESULT r2
! r0,r1,r3: clobbered
#else
#define STR_INIT r4
#define STR_ORIG STR_INIT
#define STR_COPY STR
#define MASK r1
#define TMP r3
#define RESULT r0
! r1,r3: clobbered
#endif
#define STR RESULT
mov #3,MASK
and STR_INIT,MASK
tst MASK,MASK
SL(bf, L_char_loop, mov STR_INIT, STR_COPY)
L_word_loop:
mov.l @STR+,TMP
cmp/str MASK,TMP
bf L_word_loop
add #-4,STR
L_char_loop:
mov.b @STR+,TMP
tst TMP,TMP
bf L_char_loop
add #-1,STR
rts
sub STR_ORIG,STR
#endif /* ! __SHMEDIA__ */
|
4ms/metamodule-plugin-sdk
| 4,092
|
plugin-libc/newlib/libc/machine/sh/strcmp.S
|
! SH5 code Copyright 2002 SuperH Ltd.
#include "asm.h"
ENTRY(strcmp)
#if __SHMEDIA__
ld.ub r2,0,r4
pt/l quickret0,tr0
ld.ub r3,0,r5
ptabs r18,tr2
beqi/u r4,0,tr0
ld.ub r2,1,r6
bne/u r4,r5,tr0
pt/l quickret1,tr1
ld.ub r3,1,r7
beqi/u r6,0,tr1
ld.ub r2,2,r4
bne/u r6,r7,tr1
ld.ub r3,2,r5
beqi/u r4,0,tr0
ld.ub r2,3,r6
bne/u r4,r5,tr0
ld.ub r3,3,r7
beqi/u r6,0,tr1
ld.ub r2,4,r4
bne/u r6,r7,tr1
ld.ub r3,4,r5
beqi/u r4,0,tr0
ld.ub r2,5,r6
bne/u r4,r5,tr0
ld.ub r3,5,r7
beqi/u r6,0,tr1
ld.ub r2,6,r4
bne/u r6,r7,tr1
ld.ub r3,6,r5
beqi/u r4,0,tr0
ld.ub r2,7,r6
bne/u r4,r5,tr0
ld.ub r3,7,r7
beqi/u r6,0,tr1
sub r3,r2,r3
bne/u r6,r7,tr1
andi r2,-8,r2
add r3,r2,r3
ldlo.q r3,8,r23
pt r23_zero,tr0
shlli r3,3,r22
sub r63,r22,r20
movi 0x101,r6
mperm.w r6,r63,r6
SHLO r6,r22,r7
msubs.ub r7,r23,r8
pt loop,tr1
bnei/u r8,0,tr0 // r23_zero
pt found_zero,tr0
addi r3,15,r3
andi r3,-8,r3
sub r3,r2,r3
bne/l r7,r6,tr1 // loop
/* The strings are aligned to each other. */
/* It is possible to have a loop with six cycles / iteration
by re-ordering the exit conditions, but then it needs extra
time and/or code to sort out the r4 != r5 case. */
pt al_loop,tr1
pt al_found_zero,tr0
al_loop:
ld.q r2,8,r4
ldx.q r2,r3,r5
addi r2,8,r2
mcmpeq.b r63,r4,r8
pt cmp_quad,tr3
bnei/u r8,0,tr0 // al_found_zero
beq/l r4,r5,tr1 // al_loop
blink tr3,r63 // cmp_quad
.balign 8
quickret0:
sub r4,r5,r2
blink tr2,r63
quickret1:
sub r6,r7,r2
blink tr2,r63
loop:
ld.q r2,8,r4
ldx.q r2,r3,r19
addi r2,8,r2
msubs.ub r6,r4,r8
mcmpeq.b r63,r19,r9
SHHI r19,r20,r21
or r21,r23,r5
SHLO r19,r22,r23
bne/u r8,r9,tr0 // found_zero
beq/l r4,r5,tr1 // loop
cmp_quad:
#ifdef __LITTLE_ENDIAN__
byterev r4,r4
byterev r5,r5
#endif
cmpgtu r4,r5,r6
cmpgtu r5,r4,r7
sub r6,r7,r2
blink tr2,r63
found_zero:
pt zero_now,tr0
pt cmp_quad,tr1
SHHI r9,r20,r7
bne/u r8,r7,tr0 // zero_now
bne/u r4,r5,tr1 // cmp_quad
SHLO r9,r22,r8
r23_zero:
ld.q r2,8,r4
add r23,r63,r5
zero_now:
al_found_zero:
/* We konw that one of the values has at lest one zero, and r8 holds
an 0x01 or 0xff mask for every zero found in one of the operands.
If both operands have the first zero in the same place, this mask
allows us to truncate the comparison to the valid bytes in the
strings. If the first zero is in different places, it doesn't
matter if some invalid bytes are included, since the comparison
of the zero with the non-zero will determine the outcome. */
#ifdef __LITTLE_ENDIAN__
shlli r8,8,r8
addi r8,-1,r9
andc r9,r8,r8
and r8,r4,r4
and r8,r5,r5
#else
shlri r8,1,r8
nsb r8,r8
addi r8,8,r8
andi r8,56,r8
sub r63,r8,r8
shlrd r4,r8,r4
shlrd r5,r8,r5
#endif
#ifdef __LITTLE_ENDIAN__
byterev r4,r4
byterev r5,r5
#endif
cmpgtu r4,r5,r6
cmpgtu r5,r4,r7
sub r6,r7,r2
blink tr2,r63
#else /* ! __SHMEDIA__, i.e. SH 1..4 / SHcompact */
#ifdef __SH5__
#define STR1 r2
#define STR2 r3
#define RESULT r2
#define TMP r4
#else
! Entry: r4: string1
! r5: string2
! Exit: r0: result
! r1-r2,r4-r5: clobbered
#define STR1 r4
#define STR2 r5
#define RESULT r0
#define TMP r2
#endif /* __SH5__ */
mov STR1,r0
or STR2,r0
tst #3,r0
bf L_setup_char_loop
mov #0,r0
#ifdef DELAYED_BRANCHES
mov.l @STR1+,r1
.align 2
Longword_loop:
mov.l @STR2+,TMP
cmp/str r0,r1
bt Longword_loop_end
cmp/eq r1,TMP
bt.s Longword_loop
mov.l @STR1+,r1
add #-4, STR1
Longword_loop_end:
add #-4, STR1
add #-4, STR2
L_setup_char_loop:
mov.b @STR1+,r0
.align 2
L_char_loop:
mov.b @STR2+,r1
tst r0,r0
bt L_return
cmp/eq r0,r1
bt.s L_char_loop
mov.b @STR1+,r0
add #-2,STR1
mov.b @STR1,r0
#else /* ! DELAYED_BRANCHES */
.align 2
Longword_loop:
mov.l @r4+,r1
mov.l @r5+,r2
cmp/str r0,r1
bt Longword_loop_end
cmp/eq r1,r2
bt Longword_loop
Longword_loop_end:
add #-4, r4
add #-4, r5
.align 2
L_setup_char_loop:
L_char_loop:
mov.b @r4+,r0
mov.b @r5+,r1
tst r0,r0
bt L_return
cmp/eq r0,r1
bt L_char_loop
#endif
L_return:
extu.b r0,RESULT
extu.b r1,r1
rts
sub r1,RESULT
#endif /* ! __SHMEDIA__ */
|
4ms/metamodule-plugin-sdk
| 4,945
|
plugin-libc/newlib/libc/machine/sh/strncpy.S
|
/* Copyright 2003 SuperH Ltd. */
#include "asm.h"
#ifdef __SH5__
#if __SHMEDIA__
#ifdef __LITTLE_ENDIAN__
#define ZPAD_MASK(src, dst) addi src, -1, dst
#else
#define ZPAD_MASK(src, dst) \
byterev src, dst; addi dst, -1, dst; byterev dst, dst
#endif
/* We assume that the destination is not in the first 16 bytes of memory.
A typical linker script will put the text section first, and as
this code is longer that 16 bytes, you have to get out of your way
to put data there. */
ENTRY(strncpy)
pt L_small, tr2
ldlo.q r3, 0, r0
shlli r3, 3, r19
mcmpeq.b r0, r63, r1
SHHI r1, r19, r7
add r2, r4, r20
addi r20, -8, r5
/* If the size is greater than 8, we know we can read beyond the first
(possibly partial) quadword, and write out a full first and last
(possibly unaligned and/or overlapping) quadword. */
bge/u r2, r5, tr2 // L_small
pt L_found0, tr0
addi r2, 8, r22
bnei/u r7, 0, tr0 // L_found0
ori r3, -8, r38
pt L_end_early, tr1
sub r2, r38, r22
stlo.q r2, 0, r0
sthi.q r2, 7, r0
sub r3, r2, r6
ldx.q r22, r6, r0
/* Before each iteration, check that we can store in full the next quad we
are about to fetch. */
addi r5, -8, r36
bgtu/u r22, r36, tr1 // L_end_early
pt L_scan0, tr1
L_scan0:
addi r22, 8, r22
mcmpeq.b r0, r63, r1
stlo.q r22, -8, r0
bnei/u r1, 0, tr0 // L_found0
sthi.q r22, -1, r0
ldx.q r22, r6, r0
bgeu/l r36, r22, tr1 // L_scan0
L_end:
// At end; we might re-read a few bytes when we fetch the last quad.
// branch mispredict, so load is ready now.
mcmpeq.b r0, r63, r1
addi r22, 8, r22
bnei/u r1, 0, tr0 // L_found0
add r3, r4, r7
ldlo.q r7, -8, r1
ldhi.q r7, -1, r7
ptabs r18, tr0
stlo.q r22, -8, r0
or r1, r7, r1
mcmpeq.b r1, r63, r7
sthi.q r22, -1, r0
ZPAD_MASK (r7, r7)
and r1, r7, r1 // mask out non-zero bytes after first zero byte
stlo.q r20, -8, r1
sthi.q r20, -1, r1
blink tr0, r63
L_end_early:
/* Check if we can store the current quad in full. */
pt L_end, tr1
add r3, r4, r7
bgtu/u r5, r22, tr1 // L_end // Not really unlikely, but gap is short.
/* If not, that means we can just proceed to process the last quad.
Two pipeline stalls are unavoidable, as we don't have enough ILP. */
ldlo.q r7, -8, r1
ldhi.q r7, -1, r7
ptabs r18, tr0
or r1, r7, r1
mcmpeq.b r1, r63, r7
ZPAD_MASK (r7, r7)
and r1, r7, r1 // mask out non-zero bytes after first zero byte
stlo.q r20, -8, r1
sthi.q r20, -1, r1
blink tr0, r63
L_found0:
// r0: string to store, not yet zero-padding normalized.
// r1: result of mcmpeq.b r0, r63, r1.
// r22: store address plus 8. I.e. address where zero padding beyond the
// string in r0 goes.
// r20: store end address.
// r5: store end address minus 8.
pt L_write0_multiquad, tr0
ZPAD_MASK (r1, r1)
and r0, r1, r0 // mask out non-zero bytes after first zero byte
stlo.q r22, -8, r0
sthi.q r22, -1, r0
andi r22, -8, r1 // Check if zeros to write fit in one quad word.
bgtu/l r5, r1, tr0 // L_write0_multiquad
ptabs r18, tr1
sub r20, r22, r1
shlli r1, 2, r1 // Do shift in two steps so that 64 bit case is
SHLO r0, r1, r0 // handled correctly.
SHLO r0, r1, r0
sthi.q r20, -1, r0
blink tr1, r63
L_write0_multiquad:
pt L_write0_loop, tr0
ptabs r18, tr1
stlo.q r22, 0, r63
sthi.q r20, -1, r63
addi r1, 8, r1
bgeu/l r5, r1, tr0 // L_write0_loop
blink tr1, r63
L_write0_loop:
st.q r1, 0 ,r63
addi r1, 8, r1
bgeu/l r5, r1, tr0 // L_write0_loop
blink tr1, r63
L_small:
// r0: string to store, not yet zero-padding normalized.
// r1: result of mcmpeq.b r0, r63, r1.
// r7: nonzero indicates relevant zero found r0.
// r2: store address.
// r3: read address.
// r4: size, max 8
// r20: store end address.
// r5: store end address minus 8.
pt L_nohi, tr0
pt L_small_storelong, tr1
ptabs r18, tr2
sub r63, r4, r23
bnei/u r7, 0, tr0 // L_nohi
ori r3, -8, r7
bge/l r23, r7, tr0 // L_nohi
ldhi.q r3, 7, r1
or r0, r1, r0
mcmpeq.b r0, r63, r1
L_nohi:
ZPAD_MASK (r1, r1)
and r0, r1, r0
movi 4, r19
bge/u r4, r19, tr1 // L_small_storelong
pt L_small_end, tr0
#ifndef __LITTLE_ENDIAN__
byterev r0, r0
#endif
beqi/u r4, 0, tr0 // L_small_end
st.b r2, 0, r0
beqi/u r4, 1, tr0 // L_small_end
shlri r0, 8, r0
st.b r2, 1, r0
beqi/u r4, 2, tr0 // L_small_end
shlri r0, 8, r0
st.b r2, 2, r0
L_small_end:
blink tr2, r63
L_small_storelong:
shlli r23, 3, r7
SHHI r0, r7, r1
#ifdef __LITTLE_ENDIAN__
shlri r1, 32, r1
#else
shlri r0, 32, r0
#endif
stlo.l r2, 0, r0
sthi.l r2, 3, r0
stlo.l r20, -4, r1
sthi.l r20, -1, r1
blink tr2, r63
#else /* SHcompact */
/* This code is optimized for size. Instruction selection is SH5 specific.
SH4 should use a different version. */
ENTRY(strncpy)
mov #0, r6
cmp/eq r4, r6
bt return
mov r2, r5
add #-1, r5
add r5, r4
loop:
bt/s found0
add #1, r5
mov.b @r3+, r1
found0:
cmp/eq r5,r4
mov.b r1, @r5
bf/s loop
cmp/eq r1, r6
return:
rts
nop
#endif /* SHcompact */
#endif /* __SH5__ */
|
4ms/metamodule-plugin-sdk
| 3,123
|
plugin-libc/newlib/libc/machine/arm/strcmp-armv6m.S
|
/*
* Copyright (c) 2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Implementation of strcmp for ARMv6m. This version is only used in
ARMv6-M when we want an efficient implementation. Otherwize if the
code size is preferred, strcmp-armv4t.S will be used. */
.thumb_func
.syntax unified
.arch armv6-m
.macro DoSub n, label
subs r0, r0, r1
#ifdef __ARM_BIG_ENDIAN
lsrs r1, r4, \n
#else
lsls r1, r4, \n
#endif
orrs r1, r0
bne \label
.endm
.macro Byte_Test n, label
lsrs r0, r2, \n
lsrs r1, r3, \n
DoSub \n, \label
.endm
.text
def_fn strcmp
.cfi_sections .debug_frame
.cfi_startproc
mov r2, r0
push {r4, r5, r6, lr}
orrs r2, r1
lsls r2, r2, #30
bne 6f
ldr r5, =0x01010101
lsls r6, r5, #7
1:
ldmia r0!, {r2}
ldmia r1!, {r3}
subs r4, r2, r5
bics r4, r2
ands r4, r6
beq 3f
#ifdef __ARM_BIG_ENDIAN
Byte_Test #24, 4f
Byte_Test #16, 4f
Byte_Test #8, 4f
b 7f
3:
cmp r2, r3
beq 1b
cmp r2, r3
#else
uxtb r0, r2
uxtb r1, r3
DoSub #24, 2f
uxth r0, r2
uxth r1, r3
DoSub #16, 2f
lsls r0, r2, #8
lsls r1, r3, #8
lsrs r0, r0, #8
lsrs r1, r1, #8
DoSub #8, 2f
lsrs r0, r2, #24
lsrs r1, r3, #24
subs r0, r0, r1
2:
pop {r4, r5, r6, pc}
3:
cmp r2, r3
beq 1b
rev r0, r2
rev r1, r3
cmp r0, r1
#endif
bls 5f
movs r0, #1
4:
pop {r4, r5, r6, pc}
5:
movs r0, #0
mvns r0, r0
pop {r4, r5, r6, pc}
6:
ldrb r2, [r0, #0]
ldrb r3, [r1, #0]
adds r0, #1
adds r1, #1
cmp r2, #0
beq 7f
cmp r2, r3
bne 7f
ldrb r2, [r0, #0]
ldrb r3, [r1, #0]
adds r0, #1
adds r1, #1
cmp r2, #0
beq 7f
cmp r2, r3
beq 6b
7:
subs r0, r2, r3
pop {r4, r5, r6, pc}
.cfi_endproc
.size strcmp, . - strcmp
|
4ms/metamodule-plugin-sdk
| 6,243
|
plugin-libc/newlib/libc/machine/arm/strlen-armv7.S
|
/* Copyright (c) 2010-2011,2013 Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Assumes:
ARMv6T2 or ARMv7E-M, AArch32
*/
/* Copyright (c) 2015 ARM Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#include "arm-acle-compat.h"
#include "arm_asm.h"
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#ifdef __ARMEB__
#define S2LO lsl
#define S2HI lsr
#else
#define S2LO lsr
#define S2HI lsl
#endif
/* This code requires Thumb. */
#if __ARM_ARCH_PROFILE == 'M'
#if __ARM_ARCH >= 8
/* keep config inherited from -march=. */
#else
.arch armv7e-m
#endif /* if __ARM_ARCH >= 8 */
#else
.arch armv6t2
#endif
.eabi_attribute Tag_ARM_ISA_use, 0
.thumb
.syntax unified
/* Parameters and result. */
#define srcin r0
#define result r0
/* Internal variables. */
#define src r1
#define data1a r2
#define data1b r3
#define const_m1 r12
#define const_0 r4
#define tmp1 r4 /* Overlaps const_0 */
#define tmp2 r5
def_fn strlen p2align=6
.fnstart
.cfi_startproc
prologue 4 5 push_ip=HAVE_PAC_LEAF
pld [srcin, #0]
bic src, srcin, #7
mvn const_m1, #0
ands tmp1, srcin, #7 /* (8 - bytes) to alignment. */
pld [src, #32]
bne.w .Lmisaligned8
mov const_0, #0
mov result, #-8
.Lloop_aligned:
/* Bytes 0-7. */
ldrd data1a, data1b, [src]
pld [src, #64]
add result, result, #8
.Lstart_realigned:
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
uadd8 data1b, data1b, const_m1
sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
cbnz data1b, .Lnull_found
/* Bytes 8-15. */
ldrd data1a, data1b, [src, #8]
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
add result, result, #8
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
uadd8 data1b, data1b, const_m1
sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
cbnz data1b, .Lnull_found
/* Bytes 16-23. */
ldrd data1a, data1b, [src, #16]
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
add result, result, #8
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
uadd8 data1b, data1b, const_m1
sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
cbnz data1b, .Lnull_found
/* Bytes 24-31. */
ldrd data1a, data1b, [src, #24]
add src, src, #32
uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
add result, result, #8
sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
uadd8 data1b, data1b, const_m1
sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
cmp data1b, #0
beq .Lloop_aligned
.Lnull_found:
.cfi_remember_state
cmp data1a, #0
itt eq
addeq result, result, #4
moveq data1a, data1b
#ifndef __ARMEB__
rev data1a, data1a
#endif
clz data1a, data1a
add result, result, data1a, lsr #3 /* Bits -> Bytes. */
epilogue 4 5 push_ip=HAVE_PAC_LEAF
.Lmisaligned8:
.cfi_restore_state
ldrd data1a, data1b, [src]
and tmp2, tmp1, #3
rsb result, tmp1, #0
lsl tmp2, tmp2, #3 /* Bytes -> bits. */
tst tmp1, #4
pld [src, #64]
S2HI tmp2, const_m1, tmp2
orn data1a, data1a, tmp2
itt ne
ornne data1b, data1b, tmp2
movne data1a, const_m1
mov const_0, #0
b .Lstart_realigned
.cfi_endproc
.cantunwind
.fnend
.size strlen, . - strlen
|
4ms/metamodule-plugin-sdk
| 1,999
|
plugin-libc/newlib/libc/machine/arm/memcpy.S
|
/*
* Copyright (c) 2013-2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* The structure of the following #if #else #endif conditional chain
must match the chain in memcpy-stub.c. */
#include "../../../../include/arm-acle-compat.h"
#if defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED)
/* Defined in memcpy-stub.c. */
#elif (__ARM_ARCH >= 7 && __ARM_ARCH_PROFILE == 'A' \
&& defined (__ARM_FEATURE_UNALIGNED))
#include "memcpy-armv7a.S"
#elif __ARM_ARCH_ISA_THUMB == 2 && !__ARM_ARCH_ISA_ARM
#include "memcpy-armv7m.S"
#else
/* Defined in memcpy-stub.c. */
#endif
|
4ms/metamodule-plugin-sdk
| 1,934
|
plugin-libc/newlib/libc/machine/arm/strlen-thumb1-Os.S
|
/* Copyright (c) 2015 ARM Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
.arch armv4t
.eabi_attribute Tag_also_compatible_with, "\006\013" /* ARMv6-M. */
.eabi_attribute Tag_ARM_ISA_use, 0
.thumb
.syntax unified
def_fn strlen p2align=1
movs r3, #0
1:
ldrb r2, [r0, r3]
adds r3, r3, #1
cmp r2, #0
bne 1b
subs r0, r3, #1
bx lr
.size strlen, . - strlen
|
4ms/metamodule-plugin-sdk
| 7,156
|
plugin-libc/newlib/libc/machine/arm/setjmp.S
|
/* This is a simple version of setjmp and longjmp.
Nick Clifton, Cygnus Solutions, 13 June 1997. */
#include "../../../../include/arm-acle-compat.h"
/* ANSI concatenation macros. */
#define CONCAT(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a##b
#ifndef __USER_LABEL_PREFIX__
#error __USER_LABEL_PREFIX__ not defined
#endif
#define SYM(x) CONCAT (__USER_LABEL_PREFIX__, x)
#ifdef __ELF__
#define TYPE(x) .type SYM(x),function
#define SIZE(x) .size SYM(x), . - SYM(x)
#else
#define TYPE(x)
#define SIZE(x)
#endif
/* Jump buffer allocation sizes. */
#define JUMPBUF_CORE_REGS_SIZE (10 * 4)
#define JUMPBUF_FP_REGS_SIZE (8 * 8)
#define JUMPBUF_PAC (JUMPBUF_CORE_REGS_SIZE + JUMPBUF_FP_REGS_SIZE + 0)
/* Arm/Thumb interworking support:
The interworking scheme expects functions to use a BX instruction
to return control to their parent. Since we need this code to work
in both interworked and non-interworked environments as well as with
older processors which do not have the BX instruction we do the
following:
Test the return address.
If the bottom bit is clear perform an "old style" function exit.
(We know that we are in ARM mode and returning to an ARM mode caller).
Otherwise use the BX instruction to perform the function exit.
We know that we will never attempt to perform the BX instruction on
an older processor, because that kind of processor will never be
interworked, and a return address with the bottom bit set will never
be generated.
In addition, we do not actually assemble the BX instruction as this would
require us to tell the assembler that the processor is an ARM7TDMI and
it would store this information in the binary. We want this binary to be
able to be linked with binaries compiled for older processors however, so
we do not want such information stored there.
If we are running using the APCS-26 convention however, then we never
test the bottom bit, because this is part of the processor status.
Instead we just do a normal return, since we know that we cannot be
returning to a Thumb caller - the Thumb does not support APCS-26.
Function entry is much simpler. If we are compiling for the Thumb we
just switch into ARM mode and then drop through into the rest of the
function. The function exit code will take care of the restore to
Thumb mode.
For Thumb-2 do everything in Thumb mode. */
.syntax unified
/* GCC 12.1 and later will tell the assembler exactly which floating
point (or MVE) unit is required and we don't want to override
that. Conversely, older versions of the compiler don't pass this
information so we need to enable the VFP version that is most
appropriate. The choice here should support all suitable VFP
versions that the older toolchains can handle. */
#if __GNUC__ && __GNUC__ < 12
/* Ensure that FPU instructions are correctly compiled and, likewise,
the appropriate build attributes are added to the resulting object
file. Check whether the MVE extension is present and whether
we have support for hardware floating point-operations. VFPxd
covers all the cases we need in this file for hardware
floating-point and should be compatible with all required FPUs
that we need to support. */
# if __ARM_FP
.fpu vfpxd
# endif
# if __ARM_FEATURE_MVE
.arch_extension mve
# endif
#endif
#if __ARM_ARCH_ISA_THUMB == 1 && !__ARM_ARCH_ISA_ARM
/* ARMv6-M-like has to be implemented in Thumb mode. */
.thumb
.thumb_func
.globl SYM (setjmp)
TYPE (setjmp)
SYM (setjmp):
/* Save registers in jump buffer. */
stmia r0!, {r4, r5, r6, r7}
mov r1, r8
mov r2, r9
mov r3, r10
mov r4, fp
mov r5, sp
mov r6, lr
stmia r0!, {r1, r2, r3, r4, r5, r6}
subs r0, r0, #40
/* Restore callee-saved low regs. */
ldmia r0!, {r4, r5, r6, r7}
/* Return zero. */
movs r0, #0
bx lr
.thumb_func
.globl SYM (longjmp)
TYPE (longjmp)
SYM (longjmp):
/* Restore High regs. */
adds r0, r0, #16
ldmia r0!, {r2, r3, r4, r5, r6}
mov r8, r2
mov r9, r3
mov r10, r4
mov fp, r5
mov sp, r6
ldmia r0!, {r3} /* lr */
/* Restore low regs. */
subs r0, r0, #40
ldmia r0!, {r4, r5, r6, r7}
/* Return the result argument, or 1 if it is zero. */
movs r0, r1
bne 1f
movs r0, #1
1:
bx r3
#else
#ifdef __APCS_26__
#define RET movs pc, lr
#elif defined(__thumb2__)
#define RET bx lr
#else
#define RET tst lr, #1; \
moveq pc, lr ; \
.inst 0xe12fff1e /* bx lr */
#endif
#ifdef __thumb2__
.macro COND where when
i\where \when
.endm
#else
.macro COND where when
.endm
#endif
#if defined(__thumb2__)
.macro MODE
.thumb
.thumb_func
.endm
.macro PROLOGUE name
.endm
#elif defined(__thumb__)
#define MODE .thumb_func
.macro PROLOGUE name
.code 16
bx pc
nop
.code 32
SYM (.arm_start_of.\name):
.endm
#else /* Arm */
#define MODE .code 32
.macro PROLOGUE name
.endm
#endif
.macro FUNC_START name
.text
.align 2
MODE
.globl SYM (\name)
.fnstart
.cfi_startproc
TYPE (\name)
SYM (\name):
PROLOGUE \name
.endm
.macro FUNC_END name
RET
.cfi_endproc
.fnend
SIZE (\name)
.endm
/* --------------------------------------------------------------------
int setjmp (jmp_buf);
-------------------------------------------------------------------- */
FUNC_START setjmp
#if __ARM_FEATURE_PAC_DEFAULT
# if __ARM_FEATURE_BTI_DEFAULT
pacbti ip, lr, sp
# else
pac ip, lr, sp
# endif /* __ARM_FEATURE_BTI_DEFAULT */
mov r3, ip
str r3, [r0, #JUMPBUF_PAC]
.cfi_register 143, 12
#else
# if __ARM_FEATURE_BTI_DEFAULT
bti
# endif /* __ARM_FEATURE_BTI_DEFAULT */
#endif /* __ARM_FEATURE_PAC_DEFAULT */
/* Save all the callee-preserved registers into the jump buffer. */
#ifdef __thumb2__
mov ip, sp
stmia r0!, { r4-r10, fp, ip, lr }
#else
stmia r0!, { r4-r10, fp, sp, lr }
#endif
#if defined __ARM_FP || defined __ARM_FEATURE_MVE
vstm r0, { d8-d15 }
#endif
/* When setting up the jump buffer return 0. */
mov r0, #0
#if __ARM_FEATURE_PAC_DEFAULT
mov ip, r3
aut ip, lr, sp
#endif /* __ARM_FEATURE_PAC_DEFAULT */
FUNC_END setjmp
/* --------------------------------------------------------------------
volatile void longjmp (jmp_buf, int);
-------------------------------------------------------------------- */
FUNC_START longjmp
#if __ARM_FEATURE_BTI_DEFAULT
bti
#endif /* __ARM_FEATURE_BTI_DEFAULT */
#if __ARM_FEATURE_PAC_DEFAULT
/* Keep original jmpbuf address for retrieving pac-code
for authentication. */
mov r2, r0
#endif /* __ARM_FEATURE_PAC_DEFAULT */
/* If we have stack extension code it ought to be handled here. */
/* Restore the registers, retrieving the state when setjmp() was called. */
#ifdef __thumb2__
ldmia r0!, { r4-r10, fp, ip, lr }
mov sp, ip
#else
ldmia r0!, { r4-r10, fp, sp, lr }
#endif
#if defined __ARM_FP || defined __ARM_FEATURE_MVE
vldm r0, { d8-d15 }
#endif
/* Put the return value into the integer result register.
But if it is zero then return 1 instead. */
movs r0, r1
it eq
moveq r0, #1
#if __ARM_FEATURE_PAC_DEFAULT
ldr ip, [r2, #JUMPBUF_PAC]
aut ip, lr, sp
#endif /* __ARM_FEATURE_PAC_DEFAULT */
FUNC_END longjmp
#endif
|
4ms/metamodule-plugin-sdk
| 15,559
|
plugin-libc/newlib/libc/machine/arm/memcpy-armv7a.S
|
/* Copyright (c) 2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This memcpy routine is optimised for Cortex-A15 cores and takes advantage
of VFP or NEON when built with the appropriate flags.
Assumptions:
ARMv6 (ARMv7-a if using Neon)
ARM state
Unaligned accesses
LDRD/STRD support unaligned word accesses
If compiled with GCC, this file should be enclosed within following
pre-processing check:
if defined (__ARM_ARCH_7A__) && defined (__ARM_FEATURE_UNALIGNED)
*/
.syntax unified
/* This implementation requires ARM state. */
.arm
#ifdef __ARM_NEON__
.fpu neon
.arch armv7-a
# define FRAME_SIZE 4
# define USE_VFP
# define USE_NEON
#elif !defined (__SOFTFP__)
.arch armv6
.fpu vfpv2
# define FRAME_SIZE 32
# define USE_VFP
#else
.arch armv6
# define FRAME_SIZE 32
#endif
/* Old versions of GAS incorrectly implement the NEON align semantics. */
#ifdef BROKEN_ASM_NEON_ALIGN
#define ALIGN(addr, align) addr,:align
#else
#define ALIGN(addr, align) addr:align
#endif
#define PC_OFFSET 8 /* PC pipeline compensation. */
#define INSN_SIZE 4
/* Call parameters. */
#define dstin r0
#define src r1
#define count r2
/* Locals. */
#define tmp1 r3
#define dst ip
#define tmp2 r10
#ifndef USE_NEON
/* For bulk copies using GP registers. */
#define A_l r2 /* Call-clobbered. */
#define A_h r3 /* Call-clobbered. */
#define B_l r4
#define B_h r5
#define C_l r6
#define C_h r7
#define D_l r8
#define D_h r9
#endif
/* Number of lines ahead to pre-fetch data. If you change this the code
below will need adjustment to compensate. */
#define prefetch_lines 5
#ifdef USE_VFP
.macro cpy_line_vfp vreg, base
vstr \vreg, [dst, #\base]
vldr \vreg, [src, #\base]
vstr d0, [dst, #\base + 8]
vldr d0, [src, #\base + 8]
vstr d1, [dst, #\base + 16]
vldr d1, [src, #\base + 16]
vstr d2, [dst, #\base + 24]
vldr d2, [src, #\base + 24]
vstr \vreg, [dst, #\base + 32]
vldr \vreg, [src, #\base + prefetch_lines * 64 - 32]
vstr d0, [dst, #\base + 40]
vldr d0, [src, #\base + 40]
vstr d1, [dst, #\base + 48]
vldr d1, [src, #\base + 48]
vstr d2, [dst, #\base + 56]
vldr d2, [src, #\base + 56]
.endm
.macro cpy_tail_vfp vreg, base
vstr \vreg, [dst, #\base]
vldr \vreg, [src, #\base]
vstr d0, [dst, #\base + 8]
vldr d0, [src, #\base + 8]
vstr d1, [dst, #\base + 16]
vldr d1, [src, #\base + 16]
vstr d2, [dst, #\base + 24]
vldr d2, [src, #\base + 24]
vstr \vreg, [dst, #\base + 32]
vstr d0, [dst, #\base + 40]
vldr d0, [src, #\base + 40]
vstr d1, [dst, #\base + 48]
vldr d1, [src, #\base + 48]
vstr d2, [dst, #\base + 56]
vldr d2, [src, #\base + 56]
.endm
#endif
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
def_fn memcpy p2align=6
mov dst, dstin /* Preserve dstin, we need to return it. */
cmp count, #64
bge .Lcpy_not_short
/* Deal with small copies quickly by dropping straight into the
exit block. */
.Ltail63unaligned:
#ifdef USE_NEON
and tmp1, count, #0x38
rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
add pc, pc, tmp1
vld1.8 {d0}, [src]! /* 14 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 12 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 10 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 8 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 6 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 4 words to go. */
vst1.8 {d0}, [dst]!
vld1.8 {d0}, [src]! /* 2 words to go. */
vst1.8 {d0}, [dst]!
tst count, #4
ldrne tmp1, [src], #4
strne tmp1, [dst], #4
#else
/* Copy up to 15 full words of data. May not be aligned. */
/* Cannot use VFP for unaligned data. */
and tmp1, count, #0x3c
add dst, dst, tmp1
add src, src, tmp1
rsb tmp1, tmp1, #(60 - PC_OFFSET/2 + INSN_SIZE/2)
/* Jump directly into the sequence below at the correct offset. */
add pc, pc, tmp1, lsl #1
ldr tmp1, [src, #-60] /* 15 words to go. */
str tmp1, [dst, #-60]
ldr tmp1, [src, #-56] /* 14 words to go. */
str tmp1, [dst, #-56]
ldr tmp1, [src, #-52]
str tmp1, [dst, #-52]
ldr tmp1, [src, #-48] /* 12 words to go. */
str tmp1, [dst, #-48]
ldr tmp1, [src, #-44]
str tmp1, [dst, #-44]
ldr tmp1, [src, #-40] /* 10 words to go. */
str tmp1, [dst, #-40]
ldr tmp1, [src, #-36]
str tmp1, [dst, #-36]
ldr tmp1, [src, #-32] /* 8 words to go. */
str tmp1, [dst, #-32]
ldr tmp1, [src, #-28]
str tmp1, [dst, #-28]
ldr tmp1, [src, #-24] /* 6 words to go. */
str tmp1, [dst, #-24]
ldr tmp1, [src, #-20]
str tmp1, [dst, #-20]
ldr tmp1, [src, #-16] /* 4 words to go. */
str tmp1, [dst, #-16]
ldr tmp1, [src, #-12]
str tmp1, [dst, #-12]
ldr tmp1, [src, #-8] /* 2 words to go. */
str tmp1, [dst, #-8]
ldr tmp1, [src, #-4]
str tmp1, [dst, #-4]
#endif
lsls count, count, #31
ldrhcs tmp1, [src], #2
ldrbne src, [src] /* Src is dead, use as a scratch. */
strhcs tmp1, [dst], #2
strbne src, [dst]
bx lr
.Lcpy_not_short:
/* At least 64 bytes to copy, but don't know the alignment yet. */
str tmp2, [sp, #-FRAME_SIZE]!
and tmp2, src, #7
and tmp1, dst, #7
cmp tmp1, tmp2
bne .Lcpy_notaligned
#ifdef USE_VFP
/* Magic dust alert! Force VFP on Cortex-A9. Experiments show
that the FP pipeline is much better at streaming loads and
stores. This is outside the critical loop. */
vmov.f32 s0, s0
#endif
/* SRC and DST have the same mutual 32-bit alignment, but we may
still need to pre-copy some bytes to get to natural alignment.
We bring DST into full 64-bit alignment. */
lsls tmp2, dst, #29
beq 1f
rsbs tmp2, tmp2, #0
sub count, count, tmp2, lsr #29
ldrmi tmp1, [src], #4
strmi tmp1, [dst], #4
lsls tmp2, tmp2, #2
ldrhcs tmp1, [src], #2
ldrbne tmp2, [src], #1
strhcs tmp1, [dst], #2
strbne tmp2, [dst], #1
1:
subs tmp2, count, #64 /* Use tmp2 for count. */
blt .Ltail63aligned
cmp tmp2, #512
bge .Lcpy_body_long
.Lcpy_body_medium: /* Count in tmp2. */
#ifdef USE_VFP
1:
vldr d0, [src, #0]
subs tmp2, tmp2, #64
vldr d1, [src, #8]
vstr d0, [dst, #0]
vldr d0, [src, #16]
vstr d1, [dst, #8]
vldr d1, [src, #24]
vstr d0, [dst, #16]
vldr d0, [src, #32]
vstr d1, [dst, #24]
vldr d1, [src, #40]
vstr d0, [dst, #32]
vldr d0, [src, #48]
vstr d1, [dst, #40]
vldr d1, [src, #56]
vstr d0, [dst, #48]
add src, src, #64
vstr d1, [dst, #56]
add dst, dst, #64
bge 1b
tst tmp2, #0x3f
beq .Ldone
.Ltail63aligned: /* Count in tmp2. */
and tmp1, tmp2, #0x38
add dst, dst, tmp1
add src, src, tmp1
rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
add pc, pc, tmp1
vldr d0, [src, #-56] /* 14 words to go. */
vstr d0, [dst, #-56]
vldr d0, [src, #-48] /* 12 words to go. */
vstr d0, [dst, #-48]
vldr d0, [src, #-40] /* 10 words to go. */
vstr d0, [dst, #-40]
vldr d0, [src, #-32] /* 8 words to go. */
vstr d0, [dst, #-32]
vldr d0, [src, #-24] /* 6 words to go. */
vstr d0, [dst, #-24]
vldr d0, [src, #-16] /* 4 words to go. */
vstr d0, [dst, #-16]
vldr d0, [src, #-8] /* 2 words to go. */
vstr d0, [dst, #-8]
#else
sub src, src, #8
sub dst, dst, #8
1:
ldrd A_l, A_h, [src, #8]
strd A_l, A_h, [dst, #8]
ldrd A_l, A_h, [src, #16]
strd A_l, A_h, [dst, #16]
ldrd A_l, A_h, [src, #24]
strd A_l, A_h, [dst, #24]
ldrd A_l, A_h, [src, #32]
strd A_l, A_h, [dst, #32]
ldrd A_l, A_h, [src, #40]
strd A_l, A_h, [dst, #40]
ldrd A_l, A_h, [src, #48]
strd A_l, A_h, [dst, #48]
ldrd A_l, A_h, [src, #56]
strd A_l, A_h, [dst, #56]
ldrd A_l, A_h, [src, #64]!
strd A_l, A_h, [dst, #64]!
subs tmp2, tmp2, #64
bge 1b
tst tmp2, #0x3f
bne 1f
ldr tmp2,[sp], #FRAME_SIZE
bx lr
1:
add src, src, #8
add dst, dst, #8
.Ltail63aligned: /* Count in tmp2. */
/* Copy up to 7 d-words of data. Similar to Ltail63unaligned, but
we know that the src and dest are 32-bit aligned so we can use
LDRD/STRD to improve efficiency. */
/* TMP2 is now negative, but we don't care about that. The bottom
six bits still tell us how many bytes are left to copy. */
and tmp1, tmp2, #0x38
add dst, dst, tmp1
add src, src, tmp1
rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
add pc, pc, tmp1
ldrd A_l, A_h, [src, #-56] /* 14 words to go. */
strd A_l, A_h, [dst, #-56]
ldrd A_l, A_h, [src, #-48] /* 12 words to go. */
strd A_l, A_h, [dst, #-48]
ldrd A_l, A_h, [src, #-40] /* 10 words to go. */
strd A_l, A_h, [dst, #-40]
ldrd A_l, A_h, [src, #-32] /* 8 words to go. */
strd A_l, A_h, [dst, #-32]
ldrd A_l, A_h, [src, #-24] /* 6 words to go. */
strd A_l, A_h, [dst, #-24]
ldrd A_l, A_h, [src, #-16] /* 4 words to go. */
strd A_l, A_h, [dst, #-16]
ldrd A_l, A_h, [src, #-8] /* 2 words to go. */
strd A_l, A_h, [dst, #-8]
#endif
tst tmp2, #4
ldrne tmp1, [src], #4
strne tmp1, [dst], #4
lsls tmp2, tmp2, #31 /* Count (tmp2) now dead. */
ldrhcs tmp1, [src], #2
ldrbne tmp2, [src]
strhcs tmp1, [dst], #2
strbne tmp2, [dst]
.Ldone:
ldr tmp2, [sp], #FRAME_SIZE
bx lr
.Lcpy_body_long: /* Count in tmp2. */
/* Long copy. We know that there's at least (prefetch_lines * 64)
bytes to go. */
#ifdef USE_VFP
/* Don't use PLD. Instead, read some data in advance of the current
copy position into a register. This should act like a PLD
operation but we won't have to repeat the transfer. */
vldr d3, [src, #0]
vldr d4, [src, #64]
vldr d5, [src, #128]
vldr d6, [src, #192]
vldr d7, [src, #256]
vldr d0, [src, #8]
vldr d1, [src, #16]
vldr d2, [src, #24]
add src, src, #32
subs tmp2, tmp2, #prefetch_lines * 64 * 2
blt 2f
1:
cpy_line_vfp d3, 0
cpy_line_vfp d4, 64
cpy_line_vfp d5, 128
add dst, dst, #3 * 64
add src, src, #3 * 64
cpy_line_vfp d6, 0
cpy_line_vfp d7, 64
add dst, dst, #2 * 64
add src, src, #2 * 64
subs tmp2, tmp2, #prefetch_lines * 64
bge 1b
2:
cpy_tail_vfp d3, 0
cpy_tail_vfp d4, 64
cpy_tail_vfp d5, 128
add src, src, #3 * 64
add dst, dst, #3 * 64
cpy_tail_vfp d6, 0
vstr d7, [dst, #64]
vldr d7, [src, #64]
vstr d0, [dst, #64 + 8]
vldr d0, [src, #64 + 8]
vstr d1, [dst, #64 + 16]
vldr d1, [src, #64 + 16]
vstr d2, [dst, #64 + 24]
vldr d2, [src, #64 + 24]
vstr d7, [dst, #64 + 32]
add src, src, #96
vstr d0, [dst, #64 + 40]
vstr d1, [dst, #64 + 48]
vstr d2, [dst, #64 + 56]
add dst, dst, #128
add tmp2, tmp2, #prefetch_lines * 64
b .Lcpy_body_medium
#else
/* Long copy. Use an SMS style loop to maximize the I/O
bandwidth of the core. We don't have enough spare registers
to synthesise prefetching, so use PLD operations. */
/* Pre-bias src and dst. */
sub src, src, #8
sub dst, dst, #8
pld [src, #8]
pld [src, #72]
subs tmp2, tmp2, #64
pld [src, #136]
ldrd A_l, A_h, [src, #8]
strd B_l, B_h, [sp, #8]
ldrd B_l, B_h, [src, #16]
strd C_l, C_h, [sp, #16]
ldrd C_l, C_h, [src, #24]
strd D_l, D_h, [sp, #24]
pld [src, #200]
ldrd D_l, D_h, [src, #32]!
b 1f
.p2align 6
2:
pld [src, #232]
strd A_l, A_h, [dst, #40]
ldrd A_l, A_h, [src, #40]
strd B_l, B_h, [dst, #48]
ldrd B_l, B_h, [src, #48]
strd C_l, C_h, [dst, #56]
ldrd C_l, C_h, [src, #56]
strd D_l, D_h, [dst, #64]!
ldrd D_l, D_h, [src, #64]!
subs tmp2, tmp2, #64
1:
strd A_l, A_h, [dst, #8]
ldrd A_l, A_h, [src, #8]
strd B_l, B_h, [dst, #16]
ldrd B_l, B_h, [src, #16]
strd C_l, C_h, [dst, #24]
ldrd C_l, C_h, [src, #24]
strd D_l, D_h, [dst, #32]
ldrd D_l, D_h, [src, #32]
bcs 2b
/* Save the remaining bytes and restore the callee-saved regs. */
strd A_l, A_h, [dst, #40]
add src, src, #40
strd B_l, B_h, [dst, #48]
ldrd B_l, B_h, [sp, #8]
strd C_l, C_h, [dst, #56]
ldrd C_l, C_h, [sp, #16]
strd D_l, D_h, [dst, #64]
ldrd D_l, D_h, [sp, #24]
add dst, dst, #72
tst tmp2, #0x3f
bne .Ltail63aligned
ldr tmp2, [sp], #FRAME_SIZE
bx lr
#endif
.Lcpy_notaligned:
pld [src]
pld [src, #64]
/* There's at least 64 bytes to copy, but there is no mutual
alignment. */
/* Bring DST to 64-bit alignment. */
lsls tmp2, dst, #29
pld [src, #(2 * 64)]
beq 1f
rsbs tmp2, tmp2, #0
sub count, count, tmp2, lsr #29
ldrmi tmp1, [src], #4
strmi tmp1, [dst], #4
lsls tmp2, tmp2, #2
ldrbne tmp1, [src], #1
ldrhcs tmp2, [src], #2
strbne tmp1, [dst], #1
strhcs tmp2, [dst], #2
1:
pld [src, #(3 * 64)]
subs count, count, #64
ldrmi tmp2, [sp], #FRAME_SIZE
bmi .Ltail63unaligned
pld [src, #(4 * 64)]
#ifdef USE_NEON
vld1.8 {d0-d3}, [src]!
vld1.8 {d4-d7}, [src]!
subs count, count, #64
bmi 2f
1:
pld [src, #(4 * 64)]
vst1.8 {d0-d3}, [ALIGN (dst, 64)]!
vld1.8 {d0-d3}, [src]!
vst1.8 {d4-d7}, [ALIGN (dst, 64)]!
vld1.8 {d4-d7}, [src]!
subs count, count, #64
bpl 1b
2:
vst1.8 {d0-d3}, [ALIGN (dst, 64)]!
vst1.8 {d4-d7}, [ALIGN (dst, 64)]!
ands count, count, #0x3f
#else
/* Use an SMS style loop to maximize the I/O bandwidth. */
sub src, src, #4
sub dst, dst, #8
subs tmp2, count, #64 /* Use tmp2 for count. */
ldr A_l, [src, #4]
ldr A_h, [src, #8]
strd B_l, B_h, [sp, #8]
ldr B_l, [src, #12]
ldr B_h, [src, #16]
strd C_l, C_h, [sp, #16]
ldr C_l, [src, #20]
ldr C_h, [src, #24]
strd D_l, D_h, [sp, #24]
ldr D_l, [src, #28]
ldr D_h, [src, #32]!
b 1f
.p2align 6
2:
pld [src, #(5 * 64) - (32 - 4)]
strd A_l, A_h, [dst, #40]
ldr A_l, [src, #36]
ldr A_h, [src, #40]
strd B_l, B_h, [dst, #48]
ldr B_l, [src, #44]
ldr B_h, [src, #48]
strd C_l, C_h, [dst, #56]
ldr C_l, [src, #52]
ldr C_h, [src, #56]
strd D_l, D_h, [dst, #64]!
ldr D_l, [src, #60]
ldr D_h, [src, #64]!
subs tmp2, tmp2, #64
1:
strd A_l, A_h, [dst, #8]
ldr A_l, [src, #4]
ldr A_h, [src, #8]
strd B_l, B_h, [dst, #16]
ldr B_l, [src, #12]
ldr B_h, [src, #16]
strd C_l, C_h, [dst, #24]
ldr C_l, [src, #20]
ldr C_h, [src, #24]
strd D_l, D_h, [dst, #32]
ldr D_l, [src, #28]
ldr D_h, [src, #32]
bcs 2b
/* Save the remaining bytes and restore the callee-saved regs. */
strd A_l, A_h, [dst, #40]
add src, src, #36
strd B_l, B_h, [dst, #48]
ldrd B_l, B_h, [sp, #8]
strd C_l, C_h, [dst, #56]
ldrd C_l, C_h, [sp, #16]
strd D_l, D_h, [dst, #64]
ldrd D_l, D_h, [sp, #24]
add dst, dst, #72
ands count, tmp2, #0x3f
#endif
ldr tmp2, [sp], #FRAME_SIZE
bne .Ltail63unaligned
bx lr
.size memcpy, . - memcpy
|
4ms/metamodule-plugin-sdk
| 9,453
|
plugin-libc/newlib/libc/machine/arm/strcmp-armv7m.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Very similar to the generic code, but uses Thumb2 as implemented
in ARMv7-M. */
#include "arm_asm.h"
/* Parameters and result. */
#define src1 r0
#define src2 r1
#define result r0 /* Overlaps src1. */
/* Internal variables. */
#define data1 r2
#define data2 r3
#define tmp2 r5
#define tmp1 r12
#define syndrome r12 /* Overlaps tmp1 */
.thumb
.syntax unified
def_fn strcmp
.fnstart
.cfi_sections .debug_frame
.cfi_startproc
prologue push_ip=HAVE_PAC_LEAF
eor tmp1, src1, src2
tst tmp1, #3
/* Strings not at same byte offset from a word boundary. */
bne .Lstrcmp_unaligned
ands tmp1, src1, #3
bic src1, src1, #3
bic src2, src2, #3
ldr data1, [src1], #4
it eq
ldreq data2, [src2], #4
beq 4f
/* Although s1 and s2 have identical initial alignment, they are
not currently word aligned. Rather than comparing bytes,
make sure that any bytes fetched from before the addressed
bytes are forced to 0xff. Then they will always compare
equal. */
eor tmp1, tmp1, #3
mvn data2, #MSB
lsl tmp1, tmp1, #3
S2LO tmp1, data2, tmp1
ldr data2, [src2], #4
orr data1, data1, tmp1
orr data2, data2, tmp1
.p2align 2
/* Critical loop. */
4:
sub syndrome, data1, #0x01010101
cmp data1, data2
/* check for any zero bytes in first word */
itttt eq
biceq syndrome, syndrome, data1
tsteq syndrome, #0x80808080
ldreq data1, [src1], #4
ldreq data2, [src2], #4
beq 4b
2:
.cfi_remember_state
/* There's a zero or a different byte in the word */
S2HI result, data1, #24
S2LO data1, data1, #8
cmp result, #1
it cs
cmpcs result, data2, S2HI #24
it eq
S2LOEQ data2, data2, #8
beq 2b
/* On a big-endian machine, RESULT contains the desired byte in bits
0-7; on a little-endian machine they are in bits 24-31. In
both cases the other bits in RESULT are all zero. For DATA2 the
interesting byte is at the other end of the word, but the
other bits are not necessarily zero. We need a signed result
representing the differnece in the unsigned bytes, so for the
little-endian case we can't just shift the interesting bits
up. */
#ifdef __ARM_BIG_ENDIAN
sub result, result, data2, lsr #24
#else
and data2, data2, #255
lsrs result, result, #24
subs result, result, data2
#endif
epilogue push_ip=HAVE_PAC_LEAF
#if 0
/* The assembly code below is based on the following alogrithm. */
#ifdef __ARM_BIG_ENDIAN
#define RSHIFT <<
#define LSHIFT >>
#else
#define RSHIFT >>
#define LSHIFT <<
#endif
#define body(shift) \
mask = 0xffffffffU RSHIFT shift; \
data1 = *src1++; \
data2 = *src2++; \
do \
{ \
tmp2 = data1 & mask; \
if (__builtin_expect(tmp2 != data2 RSHIFT shift, 0)) \
{ \
data2 RSHIFT= shift; \
break; \
} \
if (__builtin_expect(((data1 - b1) & ~data1) & (b1 << 7), 0)) \
{ \
/* See comment in assembler below re syndrome on big-endian */\
if ((((data1 - b1) & ~data1) & (b1 << 7)) & mask) \
data2 RSHIFT= shift; \
else \
{ \
data2 = *src2; \
tmp2 = data1 RSHIFT (32 - shift); \
data2 = (data2 LSHIFT (32 - shift)) RSHIFT (32 - shift); \
} \
break; \
} \
data2 = *src2++; \
tmp2 ^= data1; \
if (__builtin_expect(tmp2 != data2 LSHIFT (32 - shift), 0)) \
{ \
tmp2 = data1 >> (32 - shift); \
data2 = (data2 << (32 - shift)) RSHIFT (32 - shift); \
break; \
} \
data1 = *src1++; \
} while (1)
const unsigned* src1;
const unsigned* src2;
unsigned data1, data2;
unsigned mask;
unsigned shift;
unsigned b1 = 0x01010101;
char c1, c2;
unsigned tmp2;
while (((unsigned) s1) & 3)
{
c1 = *s1++;
c2 = *s2++;
if (c1 == 0 || c1 != c2)
return c1 - (int)c2;
}
src1 = (unsigned*) (((unsigned)s1) & ~3);
src2 = (unsigned*) (((unsigned)s2) & ~3);
tmp2 = ((unsigned) s2) & 3;
if (tmp2 == 1)
{
body(8);
}
else if (tmp2 == 2)
{
body(16);
}
else
{
body (24);
}
do
{
#ifdef __ARM_BIG_ENDIAN
c1 = (char) tmp2 >> 24;
c2 = (char) data2 >> 24;
#else /* not __ARM_BIG_ENDIAN */
c1 = (char) tmp2;
c2 = (char) data2;
#endif /* not __ARM_BIG_ENDIAN */
tmp2 RSHIFT= 8;
data2 RSHIFT= 8;
} while (c1 != 0 && c1 == c2);
return c1 - c2;
#endif /* 0 */
/* First of all, compare bytes until src1(sp1) is word-aligned. */
.Lstrcmp_unaligned:
.cfi_restore_state
tst src1, #3
beq 2f
.cfi_remember_state
ldrb data1, [src1], #1
ldrb data2, [src2], #1
cmp data1, #1
it cs
cmpcs data1, data2
beq .Lstrcmp_unaligned
sub result, data1, data2
epilogue push_ip=HAVE_PAC_LEAF
2:
.cfi_restore_state
stmfd sp!, {r5}
.cfi_adjust_cfa_offset 4
.cfi_rel_offset 5, 0
ldr data1, [src1], #4
and tmp2, src2, #3
bic src2, src2, #3
ldr data2, [src2], #4
cmp tmp2, #2
beq .Loverlap2
bhi .Loverlap1
/* Critical inner Loop: Block with 3 bytes initial overlap */
.p2align 2
.Loverlap3:
bic tmp2, data1, #MSB
cmp tmp2, data2, S2LO #8
sub syndrome, data1, #0x01010101
bic syndrome, syndrome, data1
bne 4f
ands syndrome, syndrome, #0x80808080
it eq
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #24
bne 6f
ldr data1, [src1], #4
b .Loverlap3
4:
S2LO data2, data2, #8
b .Lstrcmp_tail
5:
#ifdef __ARM_BIG_ENDIAN
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00. */
tst data1, #0xff000000
itt ne
tstne data1, #0x00ff0000
tstne data1, #0x0000ff00
beq .Lstrcmp_done_equal
#else
bics syndrome, syndrome, #0xff000000
bne .Lstrcmp_done_equal
#endif
ldrb data2, [src2]
S2LO tmp2, data1, #24
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #24
#endif
b .Lstrcmp_tail
6:
S2LO tmp2, data1, #24
and data2, data2, #LSB
b .Lstrcmp_tail
/* Critical inner Loop: Block with 2 bytes initial overlap. */
.p2align 2
.Loverlap2:
S2HI tmp2, data1, #16
sub syndrome, data1, #0x01010101
S2LO tmp2, tmp2, #16
bic syndrome, syndrome, data1
cmp tmp2, data2, S2LO #16
bne 4f
ands syndrome, syndrome, #0x80808080
it eq
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #16
bne 6f
ldr data1, [src1], #4
b .Loverlap2
5:
#ifdef __ARM_BIG_ENDIAN
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00 */
tst data1, #0xff000000
it ne
tstne data1, #0x00ff0000
beq .Lstrcmp_done_equal
#else
lsls syndrome, syndrome, #16
bne .Lstrcmp_done_equal
#endif
ldrh data2, [src2]
S2LO tmp2, data1, #16
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #16
#endif
b .Lstrcmp_tail
6:
S2HI data2, data2, #16
S2LO tmp2, data1, #16
4:
S2LO data2, data2, #16
b .Lstrcmp_tail
/* Critical inner Loop: Block with 1 byte initial overlap. */
.p2align 2
.Loverlap1:
and tmp2, data1, #LSB
cmp tmp2, data2, S2LO #24
sub syndrome, data1, #0x01010101
bic syndrome, syndrome, data1
bne 4f
ands syndrome, syndrome, #0x80808080
it eq
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #8
bne 6f
ldr data1, [src1], #4
b .Loverlap1
4:
S2LO data2, data2, #24
b .Lstrcmp_tail
5:
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00. */
tst data1, #LSB
beq .Lstrcmp_done_equal
ldr data2, [src2], #4
6:
S2LO tmp2, data1, #8
bic data2, data2, #MSB
b .Lstrcmp_tail
.Lstrcmp_done_equal:
mov result, #0
.cfi_remember_state
ldmfd sp!, {r5}
.cfi_restore 5
.cfi_adjust_cfa_offset -4
epilogue push_ip=HAVE_PAC_LEAF
.Lstrcmp_tail:
.cfi_restore_state
and r2, tmp2, #LSB
and result, data2, #LSB
cmp result, #1
it cs
cmpcs result, r2
itt eq
S2LOEQ tmp2, tmp2, #8
S2LOEQ data2, data2, #8
beq .Lstrcmp_tail
sub result, r2, result
ldmfd sp!, {r5}
.cfi_restore 5
.cfi_adjust_cfa_offset -4
epilogue push_ip=HAVE_PAC_LEAF
.cfi_endproc
.cantunwind
.fnend
.size strcmp, . - strcmp
|
4ms/metamodule-plugin-sdk
| 9,931
|
plugin-libc/newlib/libc/machine/arm/strcmp-armv4.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Basic ARM implementation. This should run on anything except
for ARMv6-M, but there are better implementations for later
revisions of the architecture. This version can support ARMv4T
ARM/Thumb interworking. */
/* Parameters and result. */
#define src1 r0
#define src2 r1
#define result r0 /* Overlaps src1. */
/* Internal variables. */
#define data1 r2
#define data2 r3
#define magic1 r4
#define tmp2 r5
#define tmp1 r12
#define syndrome r12 /* Overlaps tmp1 */
/* For armv4t and newer, toolchains will transparently convert
'bx lr' to 'mov pc, lr' if needed. GCC has deprecated support
for anything older than armv4t, but this should handle that
corner case in case anyone needs it anyway */
.macro RETURN
#if __ARM_ARCH <= 4 && __ARM_ARCH_ISA_THUMB == 0
mov pc, lr
#else
bx lr
#endif
.endm
.arm
def_fn strcmp
.cfi_sections .debug_frame
.cfi_startproc
eor tmp1, src1, src2
tst tmp1, #3
/* Strings not at same byte offset from a word boundary. */
bne .Lstrcmp_unaligned
ands tmp1, src1, #3
bic src1, src1, #3
bic src2, src2, #3
ldr data1, [src1], #4
ldreq data2, [src2], #4
beq 1f
/* Although s1 and s2 have identical initial alignment, they are
not currently word aligned. Rather than comparing bytes,
make sure that any bytes fetched from before the addressed
bytes are forced to 0xff. Then they will always compare
equal. */
eor tmp1, tmp1, #3
mvn data2, #MSB
lsl tmp1, tmp1, #3
S2LO tmp1, data2, tmp1
ldr data2, [src2], #4
orr data1, data1, tmp1
orr data2, data2, tmp1
1:
/* Load the 'magic' constant 0x01010101. */
str r4, [sp, #-4]!
.cfi_def_cfa_offset 4
.cfi_offset 4, -4
mov magic1, #1
orr magic1, magic1, magic1, lsl #8
orr magic1, magic1, magic1, lsl #16
.p2align 2
4:
sub syndrome, data1, magic1
cmp data1, data2
/* check for any zero bytes in first word */
biceq syndrome, syndrome, data1
tsteq syndrome, magic1, lsl #7
ldreq data1, [src1], #4
ldreq data2, [src2], #4
beq 4b
2:
/* There's a zero or a different byte in the word */
S2HI result, data1, #24
S2LO data1, data1, #8
cmp result, #1
cmpcs result, data2, S2HI #24
S2LOEQ data2, data2, #8
beq 2b
/* On a big-endian machine, RESULT contains the desired byte in bits
0-7; on a little-endian machine they are in bits 24-31. In
both cases the other bits in RESULT are all zero. For DATA2 the
interesting byte is at the other end of the word, but the
other bits are not necessarily zero. We need a signed result
representing the differnece in the unsigned bytes, so for the
little-endian case we can't just shift the interesting bits
up. */
#ifdef __ARM_BIG_ENDIAN
sub result, result, data2, lsr #24
#else
and data2, data2, #255
rsb result, data2, result, lsr #24
#endif
ldr r4, [sp], #4
.cfi_restore 4
.cfi_def_cfa_offset 0
RETURN
#if 0
/* The assembly code below is based on the following alogrithm. */
#ifdef __ARM_BIG_ENDIAN
#define RSHIFT <<
#define LSHIFT >>
#else
#define RSHIFT >>
#define LSHIFT <<
#endif
#define body(shift) \
mask = 0xffffffffU RSHIFT shift; \
data1 = *src1++; \
data2 = *src2++; \
do \
{ \
tmp2 = data1 & mask; \
if (__builtin_expect(tmp2 != data2 RSHIFT shift, 0)) \
{ \
data2 RSHIFT= shift; \
break; \
} \
if (__builtin_expect(((data1 - b1) & ~data1) & (b1 << 7), 0)) \
{ \
/* See comment in assembler below re syndrome on big-endian */\
if ((((data1 - b1) & ~data1) & (b1 << 7)) & mask) \
data2 RSHIFT= shift; \
else \
{ \
data2 = *src2; \
tmp2 = data1 RSHIFT (32 - shift); \
data2 = (data2 LSHIFT (32 - shift)) RSHIFT (32 - shift); \
} \
break; \
} \
data2 = *src2++; \
tmp2 ^= data1; \
if (__builtin_expect(tmp2 != data2 LSHIFT (32 - shift), 0)) \
{ \
tmp2 = data1 >> (32 - shift); \
data2 = (data2 << (32 - shift)) RSHIFT (32 - shift); \
break; \
} \
data1 = *src1++; \
} while (1)
const unsigned* src1;
const unsigned* src2;
unsigned data1, data2;
unsigned mask;
unsigned shift;
unsigned b1 = 0x01010101;
char c1, c2;
unsigned tmp2;
while (((unsigned) s1) & 3)
{
c1 = *s1++;
c2 = *s2++;
if (c1 == 0 || c1 != c2)
return c1 - (int)c2;
}
src1 = (unsigned*) (((unsigned)s1) & ~3);
src2 = (unsigned*) (((unsigned)s2) & ~3);
tmp2 = ((unsigned) s2) & 3;
if (tmp2 == 1)
{
body(8);
}
else if (tmp2 == 2)
{
body(16);
}
else
{
body (24);
}
do
{
#ifdef __ARM_BIG_ENDIAN
c1 = (char) tmp2 >> 24;
c2 = (char) data2 >> 24;
#else /* not __ARM_BIG_ENDIAN */
c1 = (char) tmp2;
c2 = (char) data2;
#endif /* not __ARM_BIG_ENDIAN */
tmp2 RSHIFT= 8;
data2 RSHIFT= 8;
} while (c1 != 0 && c1 == c2);
return c1 - c2;
#endif /* 0 */
/* First of all, compare bytes until src1(sp1) is word-aligned. */
.Lstrcmp_unaligned:
tst src1, #3
beq 2f
ldrb data1, [src1], #1
ldrb data2, [src2], #1
cmp data1, #1
cmpcs data1, data2
beq .Lstrcmp_unaligned
sub result, data1, data2
RETURN
2:
stmfd sp!, {r4, r5}
.cfi_def_cfa_offset 8
.cfi_offset 4, -8
.cfi_offset 5, -4
mov magic1, #1
orr magic1, magic1, magic1, lsl #8
orr magic1, magic1, magic1, lsl #16
ldr data1, [src1], #4
and tmp2, src2, #3
bic src2, src2, #3
ldr data2, [src2], #4
cmp tmp2, #2
beq .Loverlap2
bhi .Loverlap1
/* Critical inner Loop: Block with 3 bytes initial overlap */
.p2align 2
.Loverlap3:
bic tmp2, data1, #MSB
cmp tmp2, data2, S2LO #8
sub syndrome, data1, magic1
bic syndrome, syndrome, data1
bne 4f
ands syndrome, syndrome, magic1, lsl #7
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #24
bne 6f
ldr data1, [src1], #4
b .Loverlap3
4:
S2LO data2, data2, #8
b .Lstrcmp_tail
5:
#ifdef __ARM_BIG_ENDIAN
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00. */
tst data1, #0xff000000
tstne data1, #0x00ff0000
tstne data1, #0x0000ff00
beq .Lstrcmp_done_equal
#else
bics syndrome, syndrome, #0xff000000
bne .Lstrcmp_done_equal
#endif
ldrb data2, [src2]
S2LO tmp2, data1, #24
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #24
#endif
b .Lstrcmp_tail
6:
S2LO tmp2, data1, #24
and data2, data2, #LSB
b .Lstrcmp_tail
/* Critical inner Loop: Block with 2 bytes initial overlap. */
.p2align 2
.Loverlap2:
S2HI tmp2, data1, #16
sub syndrome, data1, magic1
S2LO tmp2, tmp2, #16
bic syndrome, syndrome, data1
cmp tmp2, data2, S2LO #16
bne 4f
ands syndrome, syndrome, magic1, lsl #7
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #16
bne 6f
ldr data1, [src1], #4
b .Loverlap2
5:
#ifdef __ARM_BIG_ENDIAN
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00 */
tst data1, #0xff000000
tstne data1, #0x00ff0000
beq .Lstrcmp_done_equal
#else
lsls syndrome, syndrome, #16
bne .Lstrcmp_done_equal
#endif
ldrh data2, [src2]
S2LO tmp2, data1, #16
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #16
#endif
b .Lstrcmp_tail
6:
S2HI data2, data2, #16
S2LO tmp2, data1, #16
4:
S2LO data2, data2, #16
b .Lstrcmp_tail
/* Critical inner Loop: Block with 1 byte initial overlap. */
.p2align 2
.Loverlap1:
and tmp2, data1, #LSB
cmp tmp2, data2, S2LO #24
sub syndrome, data1, magic1
bic syndrome, syndrome, data1
bne 4f
ands syndrome, syndrome, magic1, lsl #7
ldreq data2, [src2], #4
bne 5f
eor tmp2, tmp2, data1
cmp tmp2, data2, S2HI #8
bne 6f
ldr data1, [src1], #4
b .Loverlap1
4:
S2LO data2, data2, #24
b .Lstrcmp_tail
5:
/* The syndrome value may contain false ones if the string ends
with the bytes 0x01 0x00. */
tst data1, #LSB
beq .Lstrcmp_done_equal
ldr data2, [src2], #4
6:
S2LO tmp2, data1, #8
bic data2, data2, #MSB
b .Lstrcmp_tail
.Lstrcmp_done_equal:
mov result, #0
.cfi_remember_state
ldmfd sp!, {r4, r5}
.cfi_restore 4
.cfi_restore 5
.cfi_def_cfa_offset 0
RETURN
.Lstrcmp_tail:
.cfi_restore_state
and r2, tmp2, #LSB
and result, data2, #LSB
cmp result, #1
cmpcs result, r2
S2LOEQ tmp2, tmp2, #8
S2LOEQ data2, data2, #8
beq .Lstrcmp_tail
sub result, r2, result
ldmfd sp!, {r4, r5}
.cfi_restore 4
.cfi_restore 5
.cfi_def_cfa_offset 0
RETURN
.cfi_endproc
.size strcmp, . - strcmp
|
4ms/metamodule-plugin-sdk
| 2,147
|
plugin-libc/newlib/libc/machine/arm/aeabi_memmove-thumb2.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "arm_asm.h"
.thumb
.syntax unified
.global __aeabi_memmove
.type __aeabi_memmove, %function
ASM_ALIAS __aeabi_memmove4 __aeabi_memmove
ASM_ALIAS __aeabi_memmove8 __aeabi_memmove
__aeabi_memmove:
.fnstart
.cfi_startproc
prologue 4
cmp r0, r1
bls 3f
adds r3, r1, r2
cmp r0, r3
bcs 3f
adds r1, r0, r2
cbz r2, 2f
subs r2, r3, r2
1:
ldrb r4, [r3, #-1]!
cmp r2, r3
strb r4, [r1, #-1]!
bne 1b
2:
.cfi_remember_state
epilogue 4
3:
.cfi_restore_state
cmp r2, #0
beq 2b
add r2, r2, r1
subs r3, r0, #1
4:
ldrb r4, [r1], #1
cmp r2, r1
strb r4, [r3, #1]!
bne 4b
epilogue 4
.cfi_endproc
.cantunwind
.fnend
.size __aeabi_memmove, . - __aeabi_memmove
|
4ms/metamodule-plugin-sdk
| 10,403
|
plugin-libc/newlib/libc/machine/arm/aeabi_memcpy-armv7a.S
|
/*
* Copyright (c) 2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "../../../../include/arm-acle-compat.h"
/* NOTE: This ifdef MUST match the one in aeabi_memcpy.c. */
#if defined (__ARM_ARCH_7A__) && defined (__ARM_FEATURE_UNALIGNED) && \
(defined (__ARM_NEON__) || !defined (__SOFTFP__))
.syntax unified
.global __aeabi_memcpy
.type __aeabi_memcpy, %function
__aeabi_memcpy:
/* Assumes that n >= 0, and dst, src are valid pointers.
If there is at least 8 bytes to copy, use LDRD/STRD.
If src and dst are misaligned with different offsets,
first copy byte by byte until dst is aligned,
and then copy using LDRD/STRD and shift if needed.
When less than 8 left, copy a word and then byte by byte. */
/* Save registers (r0 holds the return value):
optimized push {r0, r4, r5, lr}.
To try and improve performance, stack layout changed,
i.e., not keeping the stack looking like users expect
(highest numbered register at highest address). */
push {r0, lr}
strd r4, r5, [sp, #-8]!
/* Get copying of tiny blocks out of the way first. */
/* Is there at least 4 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_4 /* If n < 4. */
/* Check word alignment. */
ands ip, r0, #3 /* ip = last 2 bits of dst. */
bne dst_not_word_aligned /* If dst is not word-aligned. */
/* Get here if dst is word-aligned. */
ands ip, r1, #3 /* ip = last 2 bits of src. */
bne src_not_word_aligned /* If src is not word-aligned. */
word_aligned:
/* Get here if source and dst both are word-aligned.
The number of bytes remaining to copy is r2+4. */
/* Is there is at least 64 bytes to copy? */
subs r2, r2, #60
blt copy_less_than_64 /* If r2 + 4 < 64. */
/* First, align the destination buffer to 8-bytes,
to make sure double loads and stores don't cross cache line boundary,
as they are then more expensive even if the data is in the cache
(require two load/store issue cycles instead of one).
If only one of the buffers is not 8-bytes aligned,
then it's more important to align dst than src,
because there is more penalty for stores
than loads that cross cacheline boundary.
This check and realignment are only worth doing
if there is a lot to copy. */
/* Get here if dst is word aligned,
i.e., the 2 least significant bits are 0.
If dst is not 2w aligned (i.e., the 3rd bit is not set in dst),
then copy 1 word (4 bytes). */
ands r3, r0, #4
beq two_word_aligned /* If dst already two-word aligned. */
ldr r3, [r1], #4
str r3, [r0], #4
subs r2, r2, #4
blt copy_less_than_64
two_word_aligned:
/* TODO: Align to cacheline (useful for PLD optimization). */
/* Every loop iteration copies 64 bytes. */
1:
.irp offset, #0, #8, #16, #24, #32, #40, #48, #56
ldrd r4, r5, [r1, \offset]
strd r4, r5, [r0, \offset]
.endr
add r0, r0, #64
add r1, r1, #64
subs r2, r2, #64
bge 1b /* If there is more to copy. */
copy_less_than_64:
/* Get here if less than 64 bytes to copy, -64 <= r2 < 0.
Restore the count if there is more than 7 bytes to copy. */
adds r2, r2, #56
blt copy_less_than_8
/* Copy 8 bytes at a time. */
2:
ldrd r4, r5, [r1], #8
strd r4, r5, [r0], #8
subs r2, r2, #8
bge 2b /* If there is more to copy. */
copy_less_than_8:
/* Get here if less than 8 bytes to copy, -8 <= r2 < 0.
Check if there is more to copy. */
cmn r2, #8
beq return /* If r2 + 8 == 0. */
/* Restore the count if there is more than 3 bytes to copy. */
adds r2, r2, #4
blt copy_less_than_4
/* Copy 4 bytes. */
ldr r3, [r1], #4
str r3, [r0], #4
copy_less_than_4:
/* Get here if less than 4 bytes to copy, -4 <= r2 < 0. */
/* Restore the count, check if there is more to copy. */
adds r2, r2, #4
beq return /* If r2 == 0. */
/* Get here with r2 is in {1,2,3}={01,10,11}. */
/* Logical shift left r2, insert 0s, update flags. */
lsls r2, r2, #31
/* Copy byte by byte.
Condition ne means the last bit of r2 is 0.
Condition cs means the second to last bit of r2 is set,
i.e., r2 is 1 or 3. */
itt ne
ldrbne r3, [r1], #1
strbne r3, [r0], #1
itttt cs
ldrbcs r4, [r1], #1
ldrbcs r5, [r1]
strbcs r4, [r0], #1
strbcs r5, [r0]
return:
/* Restore registers: optimized pop {r0, r4, r5, pc} */
ldrd r4, r5, [sp], #8
pop {r0, pc} /* This is the only return point of memcpy. */
dst_not_word_aligned:
/* Get here when dst is not aligned and ip has the last 2 bits of dst,
i.e., ip is the offset of dst from word.
The number of bytes that remains to copy is r2 + 4,
i.e., there are at least 4 bytes to copy.
Write a partial word (0 to 3 bytes), such that dst becomes
word-aligned. */
/* If dst is at ip bytes offset from a word (with 0 < ip < 4),
then there are (4 - ip) bytes to fill up to align dst to the next
word. */
rsb ip, ip, #4 /* ip = #4 - ip. */
cmp ip, #2
/* Copy byte by byte with conditionals. */
itt gt
ldrbgt r3, [r1], #1
strbgt r3, [r0], #1
itt ge
ldrbge r4, [r1], #1
strbge r4, [r0], #1
ldrb lr, [r1], #1
strb lr, [r0], #1
/* Update the count.
ip holds the number of bytes we have just copied. */
subs r2, r2, ip /* r2 = r2 - ip. */
blt copy_less_than_4 /* If r2 < ip. */
/* Get here if there are more than 4 bytes to copy.
Check if src is aligned. If beforehand src and dst were not word
aligned but congruent (same offset), then now they are both
word-aligned, and we can copy the rest efficiently (without
shifting). */
ands ip, r1, #3 /* ip = last 2 bits of src. */
beq word_aligned /* If r1 is word-aligned. */
src_not_word_aligned:
/* Get here when src is not word-aligned, but dst is word-aligned.
The number of bytes that remains to copy is r2+4. */
/* Copy word by word using LDR when alignment can be done in hardware,
i.e., SCTLR.A is set, supporting unaligned access in LDR and STR. */
subs r2, r2, #60
blt 8f
7:
/* Copy 64 bytes in every loop iteration. */
.irp offset, #0, #4, #8, #12, #16, #20, #24, #28, #32, #36, #40, #44, #48, #52, #56, #60
ldr r3, [r1, \offset]
str r3, [r0, \offset]
.endr
add r0, r0, #64
add r1, r1, #64
subs r2, r2, #64
bge 7b
8:
/* Get here if less than 64 bytes to copy, -64 <= r2 < 0.
Check if there is more than 3 bytes to copy. */
adds r2, r2, #60
blt copy_less_than_4
9:
/* Get here if there is less than 64 but at least 4 bytes to copy,
where the number of bytes to copy is r2+4. */
ldr r3, [r1], #4
str r3, [r0], #4
subs r2, r2, #4
bge 9b
b copy_less_than_4
.syntax unified
.global __aeabi_memcpy4
.type __aeabi_memcpy4, %function
__aeabi_memcpy4:
/* Assumes that both of its arguments are 4-byte aligned. */
push {r0, lr}
strd r4, r5, [sp, #-8]!
/* Is there at least 4 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_4 /* If n < 4. */
bl word_aligned
.syntax unified
.global __aeabi_memcpy8
.type __aeabi_memcpy8, %function
__aeabi_memcpy8:
/* Assumes that both of its arguments are 8-byte aligned. */
push {r0, lr}
strd r4, r5, [sp, #-8]!
/* Is there at least 4 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_4 /* If n < 4. */
/* Is there at least 8 bytes to copy? */
subs r2, r2, #4
blt copy_less_than_8 /* If n < 8. */
/* Is there at least 64 bytes to copy? */
subs r2, r2, #56
blt copy_less_than_64 /* if n + 8 < 64. */
bl two_word_aligned
#endif
|
4ms/metamodule-plugin-sdk
| 2,695
|
plugin-libc/newlib/libc/machine/arm/aeabi_memset-thumb2.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "arm_asm.h"
.thumb
.syntax unified
.global __aeabi_memset
.type __aeabi_memset, %function
.fnstart
.cfi_startproc
ASM_ALIAS __aeabi_memset4 __aeabi_memset
ASM_ALIAS __aeabi_memset8 __aeabi_memset
__aeabi_memset:
prologue 4 6
lsls r4, r0, #30
beq 10f
subs r4, r1, #1
cmp r1, #0
beq 9f
uxtb r5, r2
mov r3, r0
b 2f
1:
subs r1, r4, #1
cbz r4, 9f
mov r4, r1
2:
strb r5, [r3], #1
lsls r1, r3, #30
bne 1b
3:
cmp r4, #3
bls 7f
uxtb r5, r2
orr r5, r5, r5, lsl #8
cmp r4, #15
orr r5, r5, r5, lsl #16
bls 5f
mov r6, r4
add r1, r3, #16
4:
subs r6, r6, #16
cmp r6, #15
str r5, [r1, #-16]
str r5, [r1, #-12]
str r5, [r1, #-8]
str r5, [r1, #-4]
add r1, r1, #16
bhi 4b
sub r1, r4, #16
bic r1, r1, #15
and r4, r4, #15
adds r1, r1, #16
cmp r4, #3
add r3, r3, r1
bls 7f
5:
mov r6, r3
mov r1, r4
6:
subs r1, r1, #4
cmp r1, #3
str r5, [r6], #4
bhi 6b
subs r1, r4, #4
bic r1, r1, #3
adds r1, r1, #4
add r3, r3, r1
and r4, r4, #3
7:
cbz r4, 9f
uxtb r2, r2
add r4, r4, r3
8:
strb r2, [r3], #1
cmp r3, r4
bne 8b
9:
.cfi_remember_state
epilogue 4 6
10:
.cfi_restore_state
mov r4, r1
mov r3, r0
b 3b
.cfi_endproc
.cantunwind
.fnend
.size __aeabi_memset, . - __aeabi_memset
|
4ms/metamodule-plugin-sdk
| 2,828
|
plugin-libc/newlib/libc/machine/arm/aeabi_memset-thumb.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "arm-acle-compat.h"
.thumb
.syntax unified
.global __aeabi_memset
.type __aeabi_memset, %function
ASM_ALIAS __aeabi_memset4 __aeabi_memset
ASM_ALIAS __aeabi_memset8 __aeabi_memset
__aeabi_memset:
push {r4, r5, r6, lr}
lsls r3, r0, #30
beq 10f
subs r4, r1, #1
cmp r1, #0
beq 9f
lsls r5, r2, #24
lsrs r5, r5, #24
movs r3, r0
movs r6, #3
b 2f
1:
subs r1, r4, #1
cmp r4, #0
beq 9f
movs r4, r1
2:
adds r3, r3, #1
subs r1, r3, #1
strb r5, [r1]
tst r3, r6
bne 1b
3:
cmp r4, #3
bls 7f
movs r5, #255
ands r5, r2
lsls r1, r5, #8
orrs r5, r1
lsls r1, r5, #16
orrs r5, r1
cmp r4, #15
bls 5f
movs r6, r4
subs r6, r6, #16
lsrs r6, r6, #4
adds r6, r6, #1
lsls r6, r6, #4
movs r1, r3
adds r3, r3, r6
4:
str r5, [r1]
str r5, [r1, #4]
str r5, [r1, #8]
str r5, [r1, #12]
adds r1, r1, #16
cmp r3, r1
bne 4b
movs r1, #15
ands r4, r1
cmp r4, #3
bls 7f
5:
subs r6, r4, #4
lsrs r6, r6, #2
adds r6, r6, #1
lsls r6, r6, #2
movs r1, r3
adds r3, r3, r6
6:
stmia r1!, {r5}
cmp r3, r1
bne 6b
movs r1, #3
ands r4, r1
7:
cmp r4, #0
beq 9f
lsls r2, r2, #24
lsrs r2, r2, #24
adds r4, r3, r4
8:
strb r2, [r3]
adds r3, r3, #1
cmp r4, r3
bne 8b
9:
#if __ARM_ARCH >= 5
pop {r4, r5, r6, pc}
#else
pop {r4, r5, r6}
pop {r1}
bx r1
#endif
10:
movs r3, r0
movs r4, r1
b 3b
.size __aeabi_memset, . - __aeabi_memset
|
4ms/metamodule-plugin-sdk
| 2,028
|
plugin-libc/newlib/libc/machine/arm/strcmp-armv4t.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* This version is only used when we want a very basic Thumb1
implementation or for size, otherwise we use the base ARMv4
version. This is also suitable for ARMv6-M. */
.thumb
.syntax unified
.arch armv4t
.eabi_attribute Tag_also_compatible_with, "\006\013" /* ARMv6-M. */
.eabi_attribute Tag_ARM_ISA_use, 0
def_fn strcmp
.cfi_sections .debug_frame
.cfi_startproc
1:
ldrb r2, [r0]
ldrb r3, [r1]
cmp r2, #0
beq 2f
adds r0, r0, #1
adds r1, r1, #1
cmp r2, r3
beq 1b
2:
subs r0, r2, r3
bx lr
.cfi_endproc
.size strcmp, . - strcmp
|
4ms/metamodule-plugin-sdk
| 2,117
|
plugin-libc/newlib/libc/machine/arm/aeabi_memmove-thumb.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "arm-acle-compat.h"
.thumb
.syntax unified
.global __aeabi_memmove
.type __aeabi_memmove, %function
ASM_ALIAS __aeabi_memmove4 __aeabi_memmove
ASM_ALIAS __aeabi_memmove8 __aeabi_memmove
__aeabi_memmove:
push {r4, lr}
cmp r0, r1
bls 3f
adds r4, r1, r2
cmp r0, r4
bcs 3f
subs r3, r2, #1
cmp r2, #0
beq 2f
subs r2, r4, r2
1:
ldrb r1, [r2, r3]
strb r1, [r0, r3]
subs r3, r3, #1
bcs 1b
2:
#if __ARM_ARCH >= 5
pop {r4, pc}
#else
pop {r4}
pop {r1}
bx r1
#endif
3:
movs r3, #0
cmp r2, #0
beq 2b
4:
ldrb r4, [r1, r3]
strb r4, [r0, r3]
adds r3, r3, #1
cmp r2, r3
bne 4b
b 2b
.size __aeabi_memmove, . - __aeabi_memmove
|
4ms/metamodule-plugin-sdk
| 1,869
|
plugin-libc/newlib/libc/machine/arm/strcmp-arm-tiny.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Tiny version of strcmp in ARM state. Used only when optimizing
for size. Also supports Thumb-2. */
#include "arm_asm.h"
.syntax unified
def_fn strcmp
.fnstart
.cfi_sections .debug_frame
.cfi_startproc
prologue
1:
ldrb r2, [r0], #1
ldrb r3, [r1], #1
cmp r2, #1
it cs
cmpcs r2, r3
beq 1b
2:
subs r0, r2, r3
epilogue
.cfi_endproc
.cantunwind
.fnend
.size strcmp, . - strcmp
|
4ms/metamodule-plugin-sdk
| 1,881
|
plugin-libc/newlib/libc/machine/arm/aeabi_memmove-soft.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.macro ASM_ALIAS new old
.global \new
.type \new, %function
#if defined (__thumb__)
.thumb_set \new, \old
#else
.set \new, \old
#endif
.endm
/* NOTE: This ifdef MUST match the one in aeabi_memmove.c. */
#if !defined (__SOFTFP__)
# if defined (__thumb2__)
# include "aeabi_memmove-thumb2.S"
# elif defined (__thumb__)
# include "aeabi_memmove-thumb.S"
# else
# include "aeabi_memmove-arm.S"
# endif
#endif
|
4ms/metamodule-plugin-sdk
| 12,444
|
plugin-libc/newlib/libc/machine/arm/strcmp-armv7.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Implementation of strcmp for ARMv7 when DSP instructions are
available. Use ldrd to support wider loads, provided the data
is sufficiently aligned. Use saturating arithmetic to optimize
the compares. */
/* Build Options:
STRCMP_NO_PRECHECK: Don't run a quick pre-check of the first
byte in the string. If comparing completely random strings
the pre-check will save time, since there is a very high
probability of a mismatch in the first character: we save
significant overhead if this is the common case. However,
if strings are likely to be identical (eg because we're
verifying a hit in a hash table), then this check is largely
redundant. */
/* This version uses Thumb-2 code. */
.thumb
.syntax unified
#include "arm_asm.h"
/* Parameters and result. */
#define src1 r0
#define src2 r1
#define result r0 /* Overlaps src1. */
/* Internal variables. */
#define tmp1 r4
#define tmp2 r5
#define const_m1 r12
/* Additional internal variables for 64-bit aligned data. */
#define data1a r2
#define data1b r3
#define data2a r6
#define data2b r7
#define syndrome_a tmp1
#define syndrome_b tmp2
/* Additional internal variables for 32-bit aligned data. */
#define data1 r2
#define data2 r3
#define syndrome tmp2
/* Macro to compute and return the result value for word-aligned
cases. */
.macro strcmp_epilogue_aligned synd d1 d2 restore_r6
#ifdef __ARM_BIG_ENDIAN
/* If data1 contains a zero byte, then syndrome will contain a 1 in
bit 7 of that byte. Otherwise, the highest set bit in the
syndrome will highlight the first different bit. It is therefore
sufficient to extract the eight bits starting with the syndrome
bit. */
clz tmp1, \synd
lsl r1, \d2, tmp1
.if \restore_r6
ldrd r6, r7, [sp, #8]
.endif
.cfi_restore 6
.cfi_restore 7
lsl \d1, \d1, tmp1
.cfi_remember_state
lsr result, \d1, #24
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
.cfi_adjust_cfa_offset -16
sub result, result, r1, lsr #24
epilogue push_ip=HAVE_PAC_LEAF
#else
/* To use the big-endian trick we'd have to reverse all three words.
that's slower than this approach. */
rev \synd, \synd
clz tmp1, \synd
bic tmp1, tmp1, #7
lsr r1, \d2, tmp1
.cfi_remember_state
.if \restore_r6
ldrd r6, r7, [sp, #8]
.endif
.cfi_restore 6
.cfi_restore 7
lsr \d1, \d1, tmp1
and result, \d1, #255
and r1, r1, #255
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
.cfi_adjust_cfa_offset -16
sub result, result, r1
epilogue push_ip=HAVE_PAC_LEAF
#endif
.endm
.text
.p2align 5
def_fn strcmp
.fnstart
.cfi_sections .debug_frame
.cfi_startproc
prologue push_ip=HAVE_PAC_LEAF
#ifndef STRCMP_NO_PRECHECK
ldrb r2, [src1]
ldrb r3, [src2]
cmp r2, #1
it cs
cmpcs r2, r3
bne .Lfastpath_exit
#endif
strd r4, r5, [sp, #-16]!
.cfi_adjust_cfa_offset 16
.cfi_rel_offset 4, 0
.cfi_rel_offset 5, 4
orr tmp1, src1, src2
strd r6, r7, [sp, #8]
.cfi_rel_offset 6, 8
.cfi_rel_offset 7, 12
mvn const_m1, #0
lsl r2, tmp1, #29
cbz r2, .Lloop_aligned8
.Lnot_aligned:
eor tmp1, src1, src2
tst tmp1, #7
bne .Lmisaligned8
/* Deal with mutual misalignment by aligning downwards and then
masking off the unwanted loaded data to prevent a difference. */
and tmp1, src1, #7
bic src1, src1, #7
and tmp2, tmp1, #3
bic src2, src2, #7
lsl tmp2, tmp2, #3 /* Bytes -> bits. */
ldrd data1a, data1b, [src1], #16
tst tmp1, #4
ldrd data2a, data2b, [src2], #16
/* In thumb code we can't use MVN with a register shift, but
we do have ORN. */
S2HI tmp1, const_m1, tmp2
orn data1a, data1a, tmp1
orn data2a, data2a, tmp1
beq .Lstart_realigned8
orn data1b, data1b, tmp1
mov data1a, const_m1
orn data2b, data2b, tmp1
mov data2a, const_m1
b .Lstart_realigned8
/* Unwind the inner loop by a factor of 2, giving 16 bytes per
pass. */
.p2align 5,,12 /* Don't start in the tail bytes of a cache line. */
.p2align 2 /* Always word aligned. */
.Lloop_aligned8:
ldrd data1a, data1b, [src1], #16
ldrd data2a, data2b, [src2], #16
.Lstart_realigned8:
uadd8 syndrome_b, data1a, const_m1 /* Only want GE bits, */
eor syndrome_a, data1a, data2a
sel syndrome_a, syndrome_a, const_m1
cbnz syndrome_a, .Ldiff_in_a
uadd8 syndrome_b, data1b, const_m1 /* Only want GE bits. */
eor syndrome_b, data1b, data2b
sel syndrome_b, syndrome_b, const_m1
cbnz syndrome_b, .Ldiff_in_b
ldrd data1a, data1b, [src1, #-8]
ldrd data2a, data2b, [src2, #-8]
uadd8 syndrome_b, data1a, const_m1 /* Only want GE bits, */
eor syndrome_a, data1a, data2a
sel syndrome_a, syndrome_a, const_m1
uadd8 syndrome_b, data1b, const_m1 /* Only want GE bits. */
eor syndrome_b, data1b, data2b
sel syndrome_b, syndrome_b, const_m1
/* Can't use CBZ for backwards branch. */
orrs syndrome_b, syndrome_b, syndrome_a /* Only need if s_a == 0 */
beq .Lloop_aligned8
.Ldiff_found:
cbnz syndrome_a, .Ldiff_in_a
.Ldiff_in_b:
strcmp_epilogue_aligned syndrome_b, data1b, data2b 1
.Ldiff_in_a:
.cfi_restore_state
strcmp_epilogue_aligned syndrome_a, data1a, data2a 1
.cfi_restore_state
.Lmisaligned8:
tst tmp1, #3
bne .Lmisaligned4
ands tmp1, src1, #3
bne .Lmutual_align4
/* Unrolled by a factor of 2, to reduce the number of post-increment
operations. */
.Lloop_aligned4:
ldr data1, [src1], #8
ldr data2, [src2], #8
.Lstart_realigned4:
uadd8 syndrome, data1, const_m1 /* Only need GE bits. */
eor syndrome, data1, data2
sel syndrome, syndrome, const_m1
cbnz syndrome, .Laligned4_done
ldr data1, [src1, #-4]
ldr data2, [src2, #-4]
uadd8 syndrome, data1, const_m1
eor syndrome, data1, data2
sel syndrome, syndrome, const_m1
cmp syndrome, #0
beq .Lloop_aligned4
.Laligned4_done:
strcmp_epilogue_aligned syndrome, data1, data2, 0
.Lmutual_align4:
.cfi_restore_state
/* Deal with mutual misalignment by aligning downwards and then
masking off the unwanted loaded data to prevent a difference. */
lsl tmp1, tmp1, #3 /* Bytes -> bits. */
bic src1, src1, #3
ldr data1, [src1], #8
bic src2, src2, #3
ldr data2, [src2], #8
/* In thumb code we can't use MVN with a register shift, but
we do have ORN. */
S2HI tmp1, const_m1, tmp1
orn data1, data1, tmp1
orn data2, data2, tmp1
b .Lstart_realigned4
.Lmisaligned4:
ands tmp1, src1, #3
beq .Lsrc1_aligned
sub src2, src2, tmp1
bic src1, src1, #3
lsls tmp1, tmp1, #31
ldr data1, [src1], #4
beq .Laligned_m2
bcs .Laligned_m1
#ifdef STRCMP_NO_PRECHECK
ldrb data2, [src2, #1]
uxtb tmp1, data1, ror #BYTE1_OFFSET
subs tmp1, tmp1, data2
bne .Lmisaligned_exit
cbz data2, .Lmisaligned_exit
.Laligned_m2:
ldrb data2, [src2, #2]
uxtb tmp1, data1, ror #BYTE2_OFFSET
subs tmp1, tmp1, data2
bne .Lmisaligned_exit
cbz data2, .Lmisaligned_exit
.Laligned_m1:
ldrb data2, [src2, #3]
uxtb tmp1, data1, ror #BYTE3_OFFSET
subs tmp1, tmp1, data2
bne .Lmisaligned_exit
add src2, src2, #4
cbnz data2, .Lsrc1_aligned
#else /* STRCMP_NO_PRECHECK */
/* If we've done the pre-check, then we don't need to check the
first byte again here. */
ldrb data2, [src2, #2]
uxtb tmp1, data1, ror #BYTE2_OFFSET
subs tmp1, tmp1, data2
bne .Lmisaligned_exit
cbz data2, .Lmisaligned_exit
.Laligned_m2:
ldrb data2, [src2, #3]
uxtb tmp1, data1, ror #BYTE3_OFFSET
subs tmp1, tmp1, data2
bne .Lmisaligned_exit
cbnz data2, .Laligned_m1
#endif
.Lmisaligned_exit:
.cfi_remember_state
mov result, tmp1
ldr r4, [sp], #16
.cfi_restore 4
.cfi_adjust_cfa_offset -16
epilogue push_ip=HAVE_PAC_LEAF
#ifndef STRCMP_NO_PRECHECK
.Lfastpath_exit:
.cfi_restore_state
.cfi_remember_state
sub r0, r2, r3
epilogue push_ip=HAVE_PAC_LEAF
.Laligned_m1:
.cfi_restore_state
.cfi_remember_state
add src2, src2, #4
#endif
.Lsrc1_aligned:
.cfi_restore_state
/* src1 is word aligned, but src2 has no common alignment
with it. */
ldr data1, [src1], #4
lsls tmp1, src2, #31 /* C=src2[1], Z=src2[0]. */
bic src2, src2, #3
ldr data2, [src2], #4
bhi .Loverlap1 /* C=1, Z=0 => src2[1:0] = 0b11. */
bcs .Loverlap2 /* C=1, Z=1 => src2[1:0] = 0b10. */
/* (overlap3) C=0, Z=0 => src2[1:0] = 0b01. */
.Loverlap3:
bic tmp1, data1, #MSB
uadd8 syndrome, data1, const_m1
eors syndrome, tmp1, data2, S2LO #8
sel syndrome, syndrome, const_m1
bne 4f
cbnz syndrome, 5f
ldr data2, [src2], #4
eor tmp1, tmp1, data1
cmp tmp1, data2, S2HI #24
bne 6f
ldr data1, [src1], #4
b .Loverlap3
4:
S2LO data2, data2, #8
b .Lstrcmp_tail
5:
bics syndrome, syndrome, #MSB
bne .Lstrcmp_done_equal
/* We can only get here if the MSB of data1 contains 0, so
fast-path the exit. */
ldrb result, [src2]
.cfi_remember_state
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
/* R6/7 Not used in this sequence. */
.cfi_restore 6
.cfi_restore 7
.cfi_adjust_cfa_offset -16
neg result, result
epilogue push_ip=HAVE_PAC_LEAF
6:
.cfi_restore_state
S2LO data1, data1, #24
and data2, data2, #LSB
b .Lstrcmp_tail
.p2align 5,,12 /* Ensure at least 3 instructions in cache line. */
.Loverlap2:
and tmp1, data1, const_m1, S2LO #16
uadd8 syndrome, data1, const_m1
eors syndrome, tmp1, data2, S2LO #16
sel syndrome, syndrome, const_m1
bne 4f
cbnz syndrome, 5f
ldr data2, [src2], #4
eor tmp1, tmp1, data1
cmp tmp1, data2, S2HI #16
bne 6f
ldr data1, [src1], #4
b .Loverlap2
4:
S2LO data2, data2, #16
b .Lstrcmp_tail
5:
ands syndrome, syndrome, const_m1, S2LO #16
bne .Lstrcmp_done_equal
ldrh data2, [src2]
S2LO data1, data1, #16
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #16
#endif
b .Lstrcmp_tail
6:
S2LO data1, data1, #16
and data2, data2, const_m1, S2LO #16
b .Lstrcmp_tail
.p2align 5,,12 /* Ensure at least 3 instructions in cache line. */
.Loverlap1:
and tmp1, data1, #LSB
uadd8 syndrome, data1, const_m1
eors syndrome, tmp1, data2, S2LO #24
sel syndrome, syndrome, const_m1
bne 4f
cbnz syndrome, 5f
ldr data2, [src2], #4
eor tmp1, tmp1, data1
cmp tmp1, data2, S2HI #8
bne 6f
ldr data1, [src1], #4
b .Loverlap1
4:
S2LO data2, data2, #24
b .Lstrcmp_tail
5:
tst syndrome, #LSB
bne .Lstrcmp_done_equal
ldr data2, [src2]
6:
S2LO data1, data1, #8
bic data2, data2, #MSB
b .Lstrcmp_tail
.Lstrcmp_done_equal:
mov result, #0
.cfi_remember_state
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
/* R6/7 not used in this sequence. */
.cfi_restore 6
.cfi_restore 7
.cfi_adjust_cfa_offset -16
epilogue push_ip=HAVE_PAC_LEAF
.Lstrcmp_tail:
.cfi_restore_state
#ifndef __ARM_BIG_ENDIAN
rev data1, data1
rev data2, data2
/* Now everything looks big-endian... */
#endif
uadd8 tmp1, data1, const_m1
eor tmp1, data1, data2
sel syndrome, tmp1, const_m1
clz tmp1, syndrome
lsl data1, data1, tmp1
lsl data2, data2, tmp1
lsr result, data1, #24
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
/* R6/7 not used in this sequence. */
.cfi_restore 6
.cfi_restore 7
.cfi_adjust_cfa_offset -16
sub result, result, data2, lsr #24
epilogue push_ip=HAVE_PAC_LEAF
.cfi_endproc
.cantunwind
.fnend
.size strcmp, . - strcmp
|
4ms/metamodule-plugin-sdk
| 8,375
|
plugin-libc/newlib/libc/machine/arm/memcpy-armv7m.S
|
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* This memcpy routine is optimised for Cortex-M3/M4 cores with/without
unaligned access.
If compiled with GCC, this file should be enclosed within following
pre-processing check:
if defined (__ARM_ARCH_7M__) || defined (__ARM_ARCH_7EM__)
Prototype: void *memcpy (void *dst, const void *src, size_t count);
The job will be done in 5 steps.
Step 1: Align src/dest pointers, copy mis-aligned if fail to align both
Step 2: Repeatedly copy big block size of __OPT_BIG_BLOCK_SIZE
Step 3: Repeatedly copy big block size of __OPT_MID_BLOCK_SIZE
Step 4: Copy word by word
Step 5: Copy byte-to-byte
Tunable options:
__OPT_BIG_BLOCK_SIZE: Size of big block in words. Default to 64.
__OPT_MID_BLOCK_SIZE: Size of big block in words. Default to 16.
*/
#include "arm_asm.h"
#ifndef __OPT_BIG_BLOCK_SIZE
#define __OPT_BIG_BLOCK_SIZE (4 * 16)
#endif
#ifndef __OPT_MID_BLOCK_SIZE
#define __OPT_MID_BLOCK_SIZE (4 * 4)
#endif
#if __OPT_BIG_BLOCK_SIZE == 16
#define BEGIN_UNROLL_BIG_BLOCK \
.irp offset, 0,4,8,12
#elif __OPT_BIG_BLOCK_SIZE == 32
#define BEGIN_UNROLL_BIG_BLOCK \
.irp offset, 0,4,8,12,16,20,24,28
#elif __OPT_BIG_BLOCK_SIZE == 64
#define BEGIN_UNROLL_BIG_BLOCK \
.irp offset, 0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60
#else
#error "Illegal __OPT_BIG_BLOCK_SIZE"
#endif
#if __OPT_MID_BLOCK_SIZE == 8
#define BEGIN_UNROLL_MID_BLOCK \
.irp offset, 0,4
#elif __OPT_MID_BLOCK_SIZE == 16
#define BEGIN_UNROLL_MID_BLOCK \
.irp offset, 0,4,8,12
#else
#error "Illegal __OPT_MID_BLOCK_SIZE"
#endif
#define END_UNROLL .endr
.syntax unified
.text
.align 2
.global memcpy
.thumb
.thumb_func
.fnstart
.cfi_startproc
.type memcpy, %function
memcpy:
@ r0: dst
@ r1: src
@ r2: len
#ifdef __ARM_FEATURE_UNALIGNED
/* In case of UNALIGNED access supported, ip is not used in
function body. */
prologue push_ip=HAVE_PAC_LEAF
mov ip, r0
#else
prologue 0 push_ip=HAVE_PAC_LEAF
#endif /* __ARM_FEATURE_UNALIGNED */
orr r3, r1, r0
ands r3, r3, #3
bne .Lmisaligned_copy
.Lbig_block:
subs r2, __OPT_BIG_BLOCK_SIZE
blo .Lmid_block
/* Kernel loop for big block copy */
.align 2
.Lbig_block_loop:
BEGIN_UNROLL_BIG_BLOCK
#ifdef __ARM_ARCH_7EM__
ldr r3, [r1], #4
str r3, [r0], #4
END_UNROLL
#else /* __ARM_ARCH_7M__ */
ldr r3, [r1, \offset]
str r3, [r0, \offset]
END_UNROLL
adds r0, __OPT_BIG_BLOCK_SIZE
adds r1, __OPT_BIG_BLOCK_SIZE
#endif
subs r2, __OPT_BIG_BLOCK_SIZE
bhs .Lbig_block_loop
.Lmid_block:
adds r2, __OPT_BIG_BLOCK_SIZE - __OPT_MID_BLOCK_SIZE
blo .Lcopy_word_by_word
/* Kernel loop for mid-block copy */
.align 2
.Lmid_block_loop:
BEGIN_UNROLL_MID_BLOCK
#ifdef __ARM_ARCH_7EM__
ldr r3, [r1], #4
str r3, [r0], #4
END_UNROLL
#else /* __ARM_ARCH_7M__ */
ldr r3, [r1, \offset]
str r3, [r0, \offset]
END_UNROLL
adds r0, __OPT_MID_BLOCK_SIZE
adds r1, __OPT_MID_BLOCK_SIZE
#endif
subs r2, __OPT_MID_BLOCK_SIZE
bhs .Lmid_block_loop
.Lcopy_word_by_word:
adds r2, __OPT_MID_BLOCK_SIZE - 4
blo .Lcopy_less_than_4
/* Kernel loop for small block copy */
.align 2
.Lcopy_word_by_word_loop:
ldr r3, [r1], #4
str r3, [r0], #4
subs r2, #4
bhs .Lcopy_word_by_word_loop
.Lcopy_less_than_4:
adds r2, #4
beq .Ldone
lsls r2, r2, #31
itt ne
ldrbne r3, [r1], #1
strbne r3, [r0], #1
bcc .Ldone
#ifdef __ARM_FEATURE_UNALIGNED
ldrh r3, [r1]
strh r3, [r0]
#else
ldrb r3, [r1]
strb r3, [r0]
ldrb r3, [r1, #1]
strb r3, [r0, #1]
#endif /* __ARM_FEATURE_UNALIGNED */
.Ldone:
.cfi_remember_state
#ifdef __ARM_FEATURE_UNALIGNED
mov r0, ip
epilogue push_ip=HAVE_PAC_LEAF
#else
epilogue 0 push_ip=HAVE_PAC_LEAF
#endif /* __ARM_FEATURE_UNALIGNED */
.align 2
.Lmisaligned_copy:
.cfi_restore_state
#ifdef __ARM_FEATURE_UNALIGNED
/* Define label DST_ALIGNED to BIG_BLOCK. It will go to aligned copy
once destination is adjusted to aligned. */
#define Ldst_aligned Lbig_block
/* Copy word by word using LDR when alignment can be done in hardware,
i.e., SCTLR.A is set, supporting unaligned access in LDR and STR. */
cmp r2, #8
blo .Lbyte_copy
/* if src is aligned, just go to the big block loop. */
lsls r3, r1, #30
beq .Ldst_aligned
#else
/* if len < 12, misalignment adjustment has more overhead than
just byte-to-byte copy. Also, len must >=8 to guarantee code
afterward work correctly. */
cmp r2, #12
blo .Lbyte_copy
#endif /* __ARM_FEATURE_UNALIGNED */
/* Align dst only, not trying to align src. That is the because
handling of aligned src and misaligned dst need more overhead than
otherwise. By doing this the worst case is when initial src is aligned,
additional up to 4 byte additional copy will executed, which is
acceptable. */
ands r3, r0, #3
beq .Ldst_aligned
rsb r3, #4
subs r2, r3
lsls r3, r3, #31
itt ne
ldrbne r3, [r1], #1
strbne r3, [r0], #1
bcc .Ldst_aligned
#ifdef __ARM_FEATURE_UNALIGNED
ldrh r3, [r1], #2
strh r3, [r0], #2
b .Ldst_aligned
#else
ldrb r3, [r1], #1
strb r3, [r0], #1
ldrb r3, [r1], #1
strb r3, [r0], #1
/* Now that dst is aligned */
.Ldst_aligned:
/* if r1 is aligned now, it means r0/r1 has the same misalignment,
and they are both aligned now. Go aligned copy. */
ands r3, r1, #3
beq .Lbig_block
/* dst is aligned, but src isn't. Misaligned copy. */
push {r4, r5}
.cfi_adjust_cfa_offset 8
.cfi_rel_offset 4, 0
.cfi_rel_offset 5, 4
subs r2, #4
/* Backward r1 by misaligned bytes, to make r1 aligned.
Since we need to restore r1 to unaligned address after the loop,
we need keep the offset bytes to ip and sub it from r1 afterward. */
subs r1, r3
rsb ip, r3, #4
/* Pre-load on word */
ldr r4, [r1], #4
cmp r3, #2
beq .Lmisaligned_copy_2_2
cmp r3, #3
beq .Lmisaligned_copy_3_1
.macro mis_src_copy shift
1:
#ifdef __ARM_BIG_ENDIAN
lsls r4, r4, \shift
#else
lsrs r4, r4, \shift
#endif
ldr r3, [r1], #4
#ifdef __ARM_BIG_ENDIAN
lsrs r5, r3, 32-\shift
#else
lsls r5, r3, 32-\shift
#endif
orr r4, r4, r5
str r4, [r0], #4
mov r4, r3
subs r2, #4
bhs 1b
.endm
.Lmisaligned_copy_1_3:
mis_src_copy shift=8
b .Lsrc_misaligned_tail
.Lmisaligned_copy_3_1:
mis_src_copy shift=24
b .Lsrc_misaligned_tail
.Lmisaligned_copy_2_2:
/* For 2_2 misalignment, ldr is still faster than 2 x ldrh. */
mis_src_copy shift=16
.Lsrc_misaligned_tail:
adds r2, #4
subs r1, ip
pop {r4, r5}
.cfi_restore 4
.cfi_restore 5
.cfi_adjust_cfa_offset -8
#endif /* __ARM_FEATURE_UNALIGNED */
.Lbyte_copy:
subs r2, #4
blo .Lcopy_less_than_4
.Lbyte_copy_loop:
subs r2, #1
ldrb r3, [r1], #1
strb r3, [r0], #1
bhs .Lbyte_copy_loop
ldrb r3, [r1]
strb r3, [r0]
ldrb r3, [r1, #1]
strb r3, [r0, #1]
ldrb r3, [r1, #2]
strb r3, [r0, #2]
#ifdef __ARM_FEATURE_UNALIGNED
mov r0, ip
epilogue push_ip=HAVE_PAC_LEAF
#else
epilogue 0 push_ip=HAVE_PAC_LEAF
#endif /* __ARM_FEATURE_UNALIGNED */
.cfi_endproc
.cantunwind
.fnend
.size memcpy, .-memcpy
|
4ms/metamodule-plugin-sdk
| 2,172
|
plugin-libc/newlib/libc/machine/arm/strlen-thumb2-Os.S
|
/* Copyright (c) 2015 ARM Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#include "arm-acle-compat.h"
#include "arm_asm.h"
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#if __ARM_ARCH_PROFILE == 'M' && __ARM_ARCH >= 8
/* keep config inherited from -march=. */
#elif __ARM_ARCH_ISA_THUMB >= 2 && __ARM_ARCH >= 7
.arch armv7
#else
.arch armv6t2
#endif
.eabi_attribute Tag_ARM_ISA_use, 0
.thumb
.syntax unified
def_fn strlen p2align=1
.fnstart
.cfi_startproc
prologue
mov r3, r0
1: ldrb.w r2, [r3], #1
cmp r2, #0
bne 1b
subs r0, r3, r0
subs r0, #1
epilogue
.cfi_endproc
.cantunwind
.fnend
.size strlen, . - strlen
|
4ms/metamodule-plugin-sdk
| 2,015
|
plugin-libc/newlib/libc/machine/arm/aeabi_memmove-arm.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.arm
.syntax unified
.global __aeabi_memmove
.type __aeabi_memmove, %function
ASM_ALIAS __aeabi_memmove4 __aeabi_memmove
ASM_ALIAS __aeabi_memmove8 __aeabi_memmove
__aeabi_memmove:
cmp r0, r1
bls 2f
add r3, r1, r2
cmp r0, r3
bcs 2f
cmp r2, #0
add r1, r0, r2
bxeq lr
rsb r2, r2, r3
1:
ldrb ip, [r3, #-1]!
cmp r2, r3
strb ip, [r1, #-1]!
bne 1b
bx lr
2:
cmp r2, #0
addne r2, r1, r2
subne r3, r0, #1
beq 4f
3:
ldrb ip, [r1], #1
cmp r2, r1
strb ip, [r3, #1]!
bne 3b
bx lr
4:
bx lr
.size __aeabi_memmove, . - __aeabi_memmove
|
4ms/metamodule-plugin-sdk
| 12,693
|
plugin-libc/newlib/libc/machine/arm/memchr.S
|
/* Copyright (c) 2010-2011, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Linaro Limited nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Written by Dave Gilbert <david.gilbert@linaro.org>
This memchr routine is optimised on a Cortex-A9 and should work on
all ARMv7 processors. It has a fast path for short sizes, and has
an optimised path for large data sets; the worst case is finding the
match early in a large data set. */
/* Copyright (c) 2015 ARM Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
@ 2011-02-07 david.gilbert@linaro.org
@ Extracted from local git a5b438d861
@ 2011-07-14 david.gilbert@linaro.org
@ Import endianness fix from local git ea786f1b
@ 2011-10-11 david.gilbert@linaro.org
@ Import from cortex-strings bzr rev 63
@ Flip to ldrd (as suggested by Greta Yorsh)
@ Make conditional on CPU type
@ tidy
@ This code requires armv6t2 or later. Uses Thumb2.
.syntax unified
#include "../../../../include/arm-acle-compat.h"
#include "arm_asm.h"
@ NOTE: This ifdef MUST match the one in memchr-stub.c
#if defined (__ARM_NEON__) || defined (__ARM_NEON)
#if __ARM_ARCH >= 8 && __ARM_ARCH_PROFILE == 'R'
.arch armv8-r
#else
.arch armv7-a
#endif
.fpu neon
/* Arguments */
#define srcin r0
#define chrin r1
#define cntin r2
/* Retval */
#define result r0 /* Live range does not overlap with srcin */
/* Working registers */
#define src r1 /* Live range does not overlap with chrin */
#define tmp r3
#define synd r0 /* No overlap with srcin or result */
#define soff r12
/* Working NEON registers */
#define vrepchr q0
#define vdata0 q1
#define vdata0_0 d2 /* Lower half of vdata0 */
#define vdata0_1 d3 /* Upper half of vdata0 */
#define vdata1 q2
#define vdata1_0 d4 /* Lower half of vhas_chr0 */
#define vdata1_1 d5 /* Upper half of vhas_chr0 */
#define vrepmask q3
#define vrepmask0 d6
#define vrepmask1 d7
#define vend q4
#define vend0 d8
#define vend1 d9
/*
* Core algorithm:
*
* For each 32-byte chunk we calculate a 32-bit syndrome value, with one bit per
* byte. Each bit is set if the relevant byte matched the requested character
* and cleared otherwise. Since the bits in the syndrome reflect exactly the
* order in which things occur in the original string, counting trailing zeros
* allows to identify exactly which byte has matched.
*/
.text
.thumb_func
.align 4
.p2align 4,,15
.global memchr
.type memchr,%function
memchr:
.cfi_sections .debug_frame
.cfi_startproc
/* Use a simple loop if there are less than 8 bytes to search. */
cmp cntin, #7
bhi .Llargestr
and chrin, chrin, #0xff
.Lsmallstr:
subs cntin, cntin, #1
blo .Lnotfound /* Return not found if reached end. */
ldrb tmp, [srcin], #1
cmp tmp, chrin
bne .Lsmallstr /* Loop again if not found. */
/* Otherwise fixup address and return. */
sub result, result, #1
bx lr
.Llargestr:
vdup.8 vrepchr, chrin /* Duplicate char across all lanes. */
/*
* Magic constant 0x8040201008040201 allows us to identify which lane
* matches the requested byte.
*/
movw tmp, #0x0201
movt tmp, #0x0804
lsl soff, tmp, #4
vmov vrepmask0, tmp, soff
vmov vrepmask1, tmp, soff
/* Work with aligned 32-byte chunks */
bic src, srcin, #31
ands soff, srcin, #31
beq .Lloopintro /* Go straight to main loop if it's aligned. */
/*
* Input string is not 32-byte aligned. We calculate the syndrome
* value for the aligned 32 bytes block containing the first bytes
* and mask the irrelevant part.
*/
vld1.8 {vdata0, vdata1}, [src:256]!
sub tmp, soff, #32
adds cntin, cntin, tmp
vceq.i8 vdata0, vdata0, vrepchr
vceq.i8 vdata1, vdata1, vrepchr
vand vdata0, vdata0, vrepmask
vand vdata1, vdata1, vrepmask
vpadd.i8 vdata0_0, vdata0_0, vdata0_1
vpadd.i8 vdata1_0, vdata1_0, vdata1_1
vpadd.i8 vdata0_0, vdata0_0, vdata1_0
vpadd.i8 vdata0_0, vdata0_0, vdata0_0
vmov synd, vdata0_0[0]
/* Clear the soff lower bits */
lsr synd, synd, soff
lsl synd, synd, soff
/* The first block can also be the last */
bls .Lmasklast
/* Have we found something already? */
cbnz synd, .Ltail
.Lloopintro:
vpush {vend}
/* 264/265 correspond to d8/d9 for q4 */
.cfi_adjust_cfa_offset 16
.cfi_rel_offset 264, 0
.cfi_rel_offset 265, 8
.p2align 3,,7
.Lloop:
vld1.8 {vdata0, vdata1}, [src:256]!
subs cntin, cntin, #32
vceq.i8 vdata0, vdata0, vrepchr
vceq.i8 vdata1, vdata1, vrepchr
/* If we're out of data we finish regardless of the result. */
bls .Lend
/* Use a fast check for the termination condition. */
vorr vend, vdata0, vdata1
vorr vend0, vend0, vend1
vmov synd, tmp, vend0
orrs synd, synd, tmp
/* We're not out of data, loop if we haven't found the character. */
beq .Lloop
.Lend:
vpop {vend}
.cfi_adjust_cfa_offset -16
.cfi_restore 264
.cfi_restore 265
/* Termination condition found, let's calculate the syndrome value. */
vand vdata0, vdata0, vrepmask
vand vdata1, vdata1, vrepmask
vpadd.i8 vdata0_0, vdata0_0, vdata0_1
vpadd.i8 vdata1_0, vdata1_0, vdata1_1
vpadd.i8 vdata0_0, vdata0_0, vdata1_0
vpadd.i8 vdata0_0, vdata0_0, vdata0_0
vmov synd, vdata0_0[0]
cbz synd, .Lnotfound
bhi .Ltail
.Lmasklast:
/* Clear the (-cntin) upper bits to avoid out-of-bounds matches. */
neg cntin, cntin
lsl synd, synd, cntin
lsrs synd, synd, cntin
it eq
moveq src, #0 /* If no match, set src to 0 so the retval is 0. */
.Ltail:
/* Count the trailing zeros using bit reversing */
rbit synd, synd
/* Compensate the last post-increment */
sub src, src, #32
/* Count the leading zeros */
clz synd, synd
/* Compute the potential result and return */
add result, src, synd
bx lr
.Lnotfound:
/* Set result to NULL if not found and return */
mov result, #0
bx lr
.cfi_endproc
.size memchr, . - memchr
#elif __ARM_ARCH_ISA_THUMB >= 2 && defined (__ARM_FEATURE_DSP)
#if __ARM_ARCH_PROFILE == 'M'
#if __ARM_ARCH >= 8
/* keep config inherited from -march=. */
#else
.arch armv7e-m
#endif /* __ARM_ARCH >= 8 */
#else
.arch armv6t2
#endif /* __ARM_ARCH_PROFILE == 'M' */
@ this lets us check a flag in a 00/ff byte easily in either endianness
#ifdef __ARMEB__
#define CHARTSTMASK(c) 1<<(31-(c*8))
#else
#define CHARTSTMASK(c) 1<<(c*8)
#endif
.text
.thumb
@ ---------------------------------------------------------------------------
.thumb_func
.align 2
.p2align 4,,15
.global memchr
.type memchr,%function
.fnstart
.cfi_startproc
memchr:
@ r0 = start of memory to scan
@ r1 = character to look for
@ r2 = length
@ returns r0 = pointer to character or NULL if not found
prologue
and r1,r1,#0xff @ Don't trust the caller to pass a char
cmp r2,#16 @ If short don't bother with anything clever
blt 20f
tst r0, #7 @ If it's already aligned skip the next bit
beq 10f
@ Work up to an aligned point
5:
ldrb r3, [r0],#1
subs r2, r2, #1
cmp r3, r1
beq 50f @ If it matches exit found
tst r0, #7
cbz r2, 40f @ If we run off the end, exit not found
bne 5b @ If not aligned yet then do next byte
10:
@ We are aligned, we know we have at least 8 bytes to work with
push {r4,r5,r6,r7}
.cfi_adjust_cfa_offset 16
.cfi_rel_offset 4, 0
.cfi_rel_offset 5, 4
.cfi_rel_offset 6, 8
.cfi_rel_offset 7, 12
orr r1, r1, r1, lsl #8 @ expand the match word across all bytes
orr r1, r1, r1, lsl #16
bic r4, r2, #7 @ Number of double words to work with * 8
mvns r7, #0 @ all F's
movs r3, #0
15:
ldrd r5,r6,[r0],#8
subs r4, r4, #8
eor r5,r5, r1 @ r5,r6 have 00's where bytes match the target
eor r6,r6, r1
uadd8 r5, r5, r7 @ Par add 0xff - sets GE bits for bytes!=0
sel r5, r3, r7 @ bytes are 00 for none-00 bytes,
@ or ff for 00 bytes - NOTE INVERSION
uadd8 r6, r6, r7 @ Par add 0xff - sets GE bits for bytes!=0
sel r6, r5, r7 @ chained....bytes are 00 for none-00 bytes
@ or ff for 00 bytes - NOTE INVERSION
cbnz r6, 60f
bne 15b @ (Flags from the subs above)
pop {r4,r5,r6,r7}
.cfi_restore 7
.cfi_restore 6
.cfi_restore 5
.cfi_restore 4
.cfi_adjust_cfa_offset -16
and r1,r1,#0xff @ r1 back to a single character
and r2,r2,#7 @ Leave the count remaining as the number
@ after the double words have been done
20:
cbz r2, 40f @ 0 length or hit the end already then not found
21: @ Post aligned section, or just a short call
ldrb r3,[r0],#1
subs r2,r2,#1
eor r3,r3,r1 @ r3 = 0 if match - doesn't break flags from sub
cbz r3, 50f
bne 21b @ on r2 flags
40:
.cfi_remember_state
movs r0,#0 @ not found
epilogue
50:
.cfi_restore_state
.cfi_remember_state
subs r0,r0,#1 @ found
epilogue
60: @ We're here because the fast path found a hit
@ now we have to track down exactly which word it was
@ r0 points to the start of the double word after the one tested
@ r5 has the 00/ff pattern for the first word, r6 has the chained value
@ This point is reached from cbnz midway through label 15 prior to
@ popping r4-r7 off the stack. .cfi_restore_state alone disregards
@ this, so we manually correct this.
.cfi_restore_state @ Standard post-prologue state
.cfi_adjust_cfa_offset 16
.cfi_rel_offset 4, 0
.cfi_rel_offset 5, 4
.cfi_rel_offset 6, 8
.cfi_rel_offset 7, 12
cmp r5, #0
itte eq
moveq r5, r6 @ the end is in the 2nd word
subeq r0,r0,#3 @ Points to 2nd byte of 2nd word
subne r0,r0,#7 @ or 2nd byte of 1st word
@ r0 currently points to the 2nd byte of the word containing the hit
tst r5, # CHARTSTMASK(0) @ 1st character
bne 61f
adds r0,r0,#1
tst r5, # CHARTSTMASK(1) @ 2nd character
ittt eq
addeq r0,r0,#1
tsteq r5, # (3<<15) @ 2nd & 3rd character
@ If not the 3rd must be the last one
addeq r0,r0,#1
61:
pop {r4,r5,r6,r7}
.cfi_restore 7
.cfi_restore 6
.cfi_restore 5
.cfi_restore 4
.cfi_adjust_cfa_offset -16
subs r0,r0,#1
epilogue
.cfi_endproc
.cantunwind
.fnend
#else
/* Defined in memchr-stub.c. */
#endif
|
4ms/metamodule-plugin-sdk
| 1,877
|
plugin-libc/newlib/libc/machine/arm/aeabi_memset-soft.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.macro ASM_ALIAS new old
.global \new
.type \new, %function
#if defined (__thumb__)
.thumb_set \new, \old
#else
.set \new, \old
#endif
.endm
/* NOTE: This ifdef MUST match the one in aeabi_memset.c. */
#if !defined (__SOFTFP__)
# if defined (__thumb2__)
# include "aeabi_memset-thumb2.S"
# elif defined (__thumb__)
# include "aeabi_memset-thumb.S"
# else
# include "aeabi_memset-arm.S"
# endif
#endif
|
4ms/metamodule-plugin-sdk
| 2,146
|
plugin-libc/newlib/libc/machine/arm/strlen.S
|
/* Copyright (c) 2015 ARM Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
#include "../../../../include/arm-acle-compat.h"
#if defined __OPTIMIZE_SIZE__ || defined PREFER_SIZE_OVER_SPEED
#if __ARM_ARCH_ISA_THUMB == 2
#include "strlen-thumb2-Os.S"
#elif defined (__ARM_ARCH_ISA_THUMB)
#include "strlen-thumb1-Os.S"
#else
/* Implemented in strlen-stub.c. */
#endif
#else /* defined __OPTIMIZE_SIZE__ || defined PREFER_SIZE_OVER_SPEED */
#if defined __thumb__ && ! defined __thumb2__
/* Implemented in strlen-stub.c. */
#elif __ARM_ARCH_ISA_THUMB >= 2 && defined __ARM_FEATURE_DSP
#include "strlen-armv7.S"
#else
/* Implemented in strlen-stub.c. */
#endif
#endif
|
4ms/metamodule-plugin-sdk
| 11,787
|
plugin-libc/newlib/libc/machine/arm/strcmp-armv6.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Implementation of strcmp for ARMv6. Use ldrd to support wider
loads, provided the data is sufficiently aligned. Use
saturating arithmetic to optimize the compares. */
/* Build Options:
STRCMP_NO_PRECHECK: Don't run a quick pre-check of the first
byte in the string. If comparing completely random strings
the pre-check will save time, since there is a very high
probability of a mismatch in the first character: we save
significant overhead if this is the common case. However,
if strings are likely to be identical (eg because we're
verifying a hit in a hash table), then this check is largely
redundant. */
.arm
/* Parameters and result. */
#define src1 r0
#define src2 r1
#define result r0 /* Overlaps src1. */
/* Internal variables. */
#define tmp1 r4
#define tmp2 r5
#define const_m1 r12
/* Additional internal variables for 64-bit aligned data. */
#define data1a r2
#define data1b r3
#define data2a r6
#define data2b r7
#define syndrome_a tmp1
#define syndrome_b tmp2
/* Additional internal variables for 32-bit aligned data. */
#define data1 r2
#define data2 r3
#define syndrome tmp2
/* Macro to compute and return the result value for word-aligned
cases. */
.macro strcmp_epilogue_aligned synd d1 d2 restore_r6
#ifdef __ARM_BIG_ENDIAN
/* If data1 contains a zero byte, then syndrome will contain a 1 in
bit 7 of that byte. Otherwise, the highest set bit in the
syndrome will highlight the first different bit. It is therefore
sufficient to extract the eight bits starting with the syndrome
bit. */
clz tmp1, \synd
lsl r1, \d2, tmp1
.if \restore_r6
ldrd r6, r7, [sp, #8]
.endif
.cfi_restore 6
.cfi_restore 7
lsl \d1, \d1, tmp1
.cfi_remember_state
lsr result, \d1, #24
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
sub result, result, r1, lsr #24
bx lr
#else
/* To use the big-endian trick we'd have to reverse all three words.
that's slower than this approach. */
rev \synd, \synd
clz tmp1, \synd
bic tmp1, tmp1, #7
lsr r1, \d2, tmp1
.cfi_remember_state
.if \restore_r6
ldrd r6, r7, [sp, #8]
.endif
.cfi_restore 6
.cfi_restore 7
lsr \d1, \d1, tmp1
and result, \d1, #255
and r1, r1, #255
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
sub result, result, r1
bx lr
#endif
.endm
.text
.p2align 5
.Lstrcmp_start_addr:
#ifndef STRCMP_NO_PRECHECK
.Lfastpath_exit:
sub r0, r2, r3
bx lr
#endif
def_fn strcmp
#ifndef STRCMP_NO_PRECHECK
ldrb r2, [src1]
ldrb r3, [src2]
cmp r2, #1
cmpcs r2, r3
bne .Lfastpath_exit
#endif
.cfi_sections .debug_frame
.cfi_startproc
strd r4, r5, [sp, #-16]!
.cfi_def_cfa_offset 16
.cfi_offset 4, -16
.cfi_offset 5, -12
orr tmp1, src1, src2
strd r6, r7, [sp, #8]
.cfi_offset 6, -8
.cfi_offset 7, -4
mvn const_m1, #0
tst tmp1, #7
beq .Lloop_aligned8
.Lnot_aligned:
eor tmp1, src1, src2
tst tmp1, #7
bne .Lmisaligned8
/* Deal with mutual misalignment by aligning downwards and then
masking off the unwanted loaded data to prevent a difference. */
and tmp1, src1, #7
bic src1, src1, #7
and tmp2, tmp1, #3
bic src2, src2, #7
lsl tmp2, tmp2, #3 /* Bytes -> bits. */
ldrd data1a, data1b, [src1], #16
tst tmp1, #4
ldrd data2a, data2b, [src2], #16
/* In ARM code we can't use ORN, but with do have MVN with a
register shift. */
mvn tmp1, const_m1, S2HI tmp2
orr data1a, data1a, tmp1
orr data2a, data2a, tmp1
beq .Lstart_realigned8
orr data1b, data1b, tmp1
mov data1a, const_m1
orr data2b, data2b, tmp1
mov data2a, const_m1
b .Lstart_realigned8
/* Unwind the inner loop by a factor of 2, giving 16 bytes per
pass. */
.p2align 5,,12 /* Don't start in the tail bytes of a cache line. */
.p2align 2 /* Always word aligned. */
.Lloop_aligned8:
ldrd data1a, data1b, [src1], #16
ldrd data2a, data2b, [src2], #16
.Lstart_realigned8:
uadd8 syndrome_b, data1a, const_m1 /* Only want GE bits, */
eor syndrome_a, data1a, data2a
sel syndrome_a, syndrome_a, const_m1
uadd8 syndrome_b, data1b, const_m1 /* Only want GE bits. */
eor syndrome_b, data1b, data2b
sel syndrome_b, syndrome_b, const_m1
orrs syndrome_b, syndrome_b, syndrome_a /* Only need if s_a == 0 */
bne .Ldiff_found
ldrd data1a, data1b, [src1, #-8]
ldrd data2a, data2b, [src2, #-8]
uadd8 syndrome_b, data1a, const_m1 /* Only want GE bits, */
eor syndrome_a, data1a, data2a
sel syndrome_a, syndrome_a, const_m1
uadd8 syndrome_b, data1b, const_m1 /* Only want GE bits. */
eor syndrome_b, data1b, data2b
sel syndrome_b, syndrome_b, const_m1
orrs syndrome_b, syndrome_b, syndrome_a /* Only need if s_a == 0 */
beq .Lloop_aligned8
.Ldiff_found:
cmp syndrome_a, #0
bne .Ldiff_in_a
.Ldiff_in_b:
strcmp_epilogue_aligned syndrome_b, data1b, data2b 1
.Ldiff_in_a:
.cfi_restore_state
strcmp_epilogue_aligned syndrome_a, data1a, data2a 1
.cfi_restore_state
.Lmisaligned8:
tst tmp1, #3
bne .Lmisaligned4
ands tmp1, src1, #3
bne .Lmutual_align4
/* Unrolled by a factor of 2, to reduce the number of post-increment
operations. */
.Lloop_aligned4:
ldr data1, [src1], #8
ldr data2, [src2], #8
.Lstart_realigned4:
uadd8 syndrome, data1, const_m1 /* Only need GE bits. */
eor syndrome, data1, data2
sel syndrome, syndrome, const_m1
cmp syndrome, #0
bne .Laligned4_done
ldr data1, [src1, #-4]
ldr data2, [src2, #-4]
uadd8 syndrome, data1, const_m1
eor syndrome, data1, data2
sel syndrome, syndrome, const_m1
cmp syndrome, #0
beq .Lloop_aligned4
.Laligned4_done:
strcmp_epilogue_aligned syndrome, data1, data2, 0
.Lmutual_align4:
.cfi_restore_state
/* Deal with mutual misalignment by aligning downwards and then
masking off the unwanted loaded data to prevent a difference. */
lsl tmp1, tmp1, #3 /* Bytes -> bits. */
bic src1, src1, #3
ldr data1, [src1], #8
bic src2, src2, #3
ldr data2, [src2], #8
/* In ARM code we can't use ORN, but with do have MVN with a
register shift. */
mvn tmp1, const_m1, S2HI tmp1
orr data1, data1, tmp1
orr data2, data2, tmp1
b .Lstart_realigned4
.Lmisaligned4:
ands tmp1, src1, #3
beq .Lsrc1_aligned
sub src2, src2, tmp1
bic src1, src1, #3
lsls tmp1, tmp1, #31
ldr data1, [src1], #4
beq .Laligned_m2
bcs .Laligned_m1
#ifdef STRCMP_NO_PRECHECK
ldrb data2, [src2, #1]
uxtb tmp1, data1, ror #BYTE1_OFFSET
cmp tmp1, #1
cmpcs tmp1, data2
bne .Lmisaligned_exit
.Laligned_m2:
ldrb data2, [src2, #2]
uxtb tmp1, data1, ror #BYTE2_OFFSET
cmp tmp1, #1
cmpcs tmp1, data2
bne .Lmisaligned_exit
.Laligned_m1:
ldrb data2, [src2, #3]
uxtb tmp1, data1, ror #BYTE3_OFFSET
cmp tmp1, #1
cmpcs tmp1, data2
beq .Lsrc1_aligned
#else /* STRCMP_NO_PRECHECK */
/* If we've done the pre-check, then we don't need to check the
first byte again here. */
ldrb data2, [src2, #2]
uxtb tmp1, data1, ror #BYTE2_OFFSET
cmp tmp1, #1
cmpcs tmp1, data2
bne .Lmisaligned_exit
.Laligned_m2:
ldrb data2, [src2, #3]
uxtb tmp1, data1, ror #BYTE3_OFFSET
cmp tmp1, #1
cmpcs tmp1, data2
beq .Laligned_m1
#endif
.Lmisaligned_exit:
.cfi_remember_state
sub result, tmp1, data2
ldr r4, [sp], #16
.cfi_restore 4
bx lr
#ifndef STRCMP_NO_PRECHECK
.Laligned_m1:
add src2, src2, #4
#endif
.Lsrc1_aligned:
.cfi_restore_state
/* src1 is word aligned, but src2 has no common alignment
with it. */
ldr data1, [src1], #4
lsls tmp1, src2, #31 /* C=src2[1], Z=src2[0]. */
bic src2, src2, #3
ldr data2, [src2], #4
bhi .Loverlap1 /* C=1, Z=0 => src2[1:0] = 0b11. */
bcs .Loverlap2 /* C=1, Z=1 => src2[1:0] = 0b10. */
/* (overlap3) C=0, Z=0 => src2[1:0] = 0b01. */
.Loverlap3:
bic tmp1, data1, #MSB
uadd8 syndrome, data1, const_m1
eors syndrome, tmp1, data2, S2LO #8
sel syndrome, syndrome, const_m1
bne 4f
cmp syndrome, #0
ldreq data2, [src2], #4
bne 5f
eor tmp1, tmp1, data1
cmp tmp1, data2, S2HI #24
bne 6f
ldr data1, [src1], #4
b .Loverlap3
4:
S2LO data2, data2, #8
b .Lstrcmp_tail
5:
bics syndrome, syndrome, #MSB
bne .Lstrcmp_done_equal
/* We can only get here if the MSB of data1 contains 0, so
fast-path the exit. */
ldrb result, [src2]
.cfi_remember_state
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
/* R6/7 Not used in this sequence. */
.cfi_restore 6
.cfi_restore 7
neg result, result
bx lr
6:
.cfi_restore_state
S2LO data1, data1, #24
and data2, data2, #LSB
b .Lstrcmp_tail
.p2align 5,,12 /* Ensure at least 3 instructions in cache line. */
.Loverlap2:
and tmp1, data1, const_m1, S2LO #16
uadd8 syndrome, data1, const_m1
eors syndrome, tmp1, data2, S2LO #16
sel syndrome, syndrome, const_m1
bne 4f
cmp syndrome, #0
ldreq data2, [src2], #4
bne 5f
eor tmp1, tmp1, data1
cmp tmp1, data2, S2HI #16
bne 6f
ldr data1, [src1], #4
b .Loverlap2
4:
S2LO data2, data2, #16
b .Lstrcmp_tail
5:
ands syndrome, syndrome, const_m1, S2LO #16
bne .Lstrcmp_done_equal
ldrh data2, [src2]
S2LO data1, data1, #16
#ifdef __ARM_BIG_ENDIAN
lsl data2, data2, #16
#endif
b .Lstrcmp_tail
6:
S2LO data1, data1, #16
and data2, data2, const_m1, S2LO #16
b .Lstrcmp_tail
.p2align 5,,12 /* Ensure at least 3 instructions in cache line. */
.Loverlap1:
and tmp1, data1, #LSB
uadd8 syndrome, data1, const_m1
eors syndrome, tmp1, data2, S2LO #24
sel syndrome, syndrome, const_m1
bne 4f
cmp syndrome, #0
ldreq data2, [src2], #4
bne 5f
eor tmp1, tmp1, data1
cmp tmp1, data2, S2HI #8
bne 6f
ldr data1, [src1], #4
b .Loverlap1
4:
S2LO data2, data2, #24
b .Lstrcmp_tail
5:
tst syndrome, #LSB
bne .Lstrcmp_done_equal
ldr data2, [src2]
6:
S2LO data1, data1, #8
bic data2, data2, #MSB
b .Lstrcmp_tail
.Lstrcmp_done_equal:
mov result, #0
.cfi_remember_state
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
/* R6/7 not used in this sequence. */
.cfi_restore 6
.cfi_restore 7
bx lr
.Lstrcmp_tail:
.cfi_restore_state
#ifndef __ARM_BIG_ENDIAN
rev data1, data1
rev data2, data2
/* Now everything looks big-endian... */
#endif
uadd8 tmp1, data1, const_m1
eor tmp1, data1, data2
sel syndrome, tmp1, const_m1
clz tmp1, syndrome
lsl data1, data1, tmp1
lsl data2, data2, tmp1
lsr result, data1, #24
ldrd r4, r5, [sp], #16
.cfi_restore 4
.cfi_restore 5
/* R6/7 not used in this sequence. */
.cfi_restore 6
.cfi_restore 7
sub result, result, data2, lsr #24
bx lr
.cfi_endproc
.size strcmp, . - .Lstrcmp_start_addr
|
4ms/metamodule-plugin-sdk
| 2,582
|
plugin-libc/newlib/libc/machine/arm/aeabi_memset-arm.S
|
/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.arm
.syntax unified
.global __aeabi_memset
.type __aeabi_memset, %function
ASM_ALIAS __aeabi_memset4 __aeabi_memset
ASM_ALIAS __aeabi_memset8 __aeabi_memset
__aeabi_memset:
tst r0, #3
stmfd sp!, {r4, lr}
beq 10f
cmp r1, #0
sub r1, r1, #1
beq 9f
and ip, r2, #255
mov r3, r0
b 2f
1:
cmp r1, #0
sub r1, r1, #1
beq 9f
2:
strb ip, [r3], #1
tst r3, #3
bne 1b
3:
cmp r1, #3
bls 7f
and lr, r2, #255
orr lr, lr, lr, asl #8
cmp r1, #15
orr lr, lr, lr, asl #16
bls 5f
mov r4, r1
add ip, r3, #16
4:
sub r4, r4, #16
cmp r4, #15
str lr, [ip, #-16]
str lr, [ip, #-12]
str lr, [ip, #-8]
str lr, [ip, #-4]
add ip, ip, #16
bhi 4b
sub ip, r1, #16
bic ip, ip, #15
and r1, r1, #15
add ip, ip, #16
cmp r1, #3
add r3, r3, ip
bls 7f
5:
mov r4, r3
mov ip, r1
6:
sub ip, ip, #4
cmp ip, #3
str lr, [r4], #4
bhi 6b
sub ip, r1, #4
bic ip, ip, #3
add ip, ip, #4
add r3, r3, ip
and r1, r1, #3
7:
cmp r1, #0
andne r2, r2, #255
addne r1, r3, r1
beq 9f
8:
strb r2, [r3], #1
cmp r3, r1
bne 8b
9:
ldmfd sp!, {r4, lr}
bx lr
10:
mov r3, r0
b 3b
.size __aeabi_memset, . - __aeabi_memset
|
4ms/metamodule-plugin-sdk
| 2,915
|
plugin-libc/newlib/libc/machine/arm/strcmp.S
|
/*
* Copyright (c) 2012-2014 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Wrapper for the various implementations of strcmp. */
#include "../../../../include/arm-acle-compat.h"
#ifdef __ARM_BIG_ENDIAN
#define S2LO lsl
#define S2LOEQ lsleq
#define S2HI lsr
#define MSB 0x000000ff
#define LSB 0xff000000
#define BYTE0_OFFSET 24
#define BYTE1_OFFSET 16
#define BYTE2_OFFSET 8
#define BYTE3_OFFSET 0
#else /* not __ARM_BIG_ENDIAN */
#define S2LO lsr
#define S2LOEQ lsreq
#define S2HI lsl
#define BYTE0_OFFSET 0
#define BYTE1_OFFSET 8
#define BYTE2_OFFSET 16
#define BYTE3_OFFSET 24
#define MSB 0xff000000
#define LSB 0x000000ff
#endif /* not __ARM_BIG_ENDIAN */
.macro def_fn f p2align=0
.text
.p2align \p2align
.global \f
.type \f, %function
\f:
.endm
#if defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) \
|| (__ARM_ARCH_ISA_THUMB == 1 && !__ARM_ARCH_ISA_ARM)
# if defined (__thumb__) && !defined (__thumb2__)
/* Thumb1 only variant. If size is preferred, use strcmp-armv4t.S.
If speed is preferred, the strcmp function in strcmp-armv6m.S
will be used. */
# if defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED)
# include "strcmp-armv4t.S"
# else
# include "strcmp-armv6m.S"
# endif
# else
# include "strcmp-arm-tiny.S"
# endif
#elif __ARM_ARCH_ISA_THUMB == 2
# ifdef __ARM_FEATURE_SIMD32
# include "strcmp-armv7.S"
# else
# include "strcmp-armv7m.S"
# endif
#elif __ARM_ARCH >= 6
# include "strcmp-armv6.S"
#else
# include "strcmp-armv4.S"
#endif
|
4ms/metamodule-plugin-sdk
| 1,761
|
plugin-libc/newlib/libc/machine/pru/setjmp.s
|
/* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2018-2019 Dimitar Dimitrov <dimitar@dinux.eu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.section .text
.align 3
.globl setjmp
.type setjmp,@function
.globl longjmp
.type longjmp,@function
setjmp:
sbbo r2, r14, 0, 4*12 /* SP, RA, FP, r5-r13 */
ldi r14, 0
ret
longjmp:
lbbo r2, r14, 0, 4*12 /* SP, RA, FP, r5-r13 */
mov r14, r15 /* copy second arg to return location */
qbne 1f, r14, 0 /* per stdC, we cannot return 0 */
ldi r14, 1
1:
ret
|
4ms/metamodule-plugin-sdk
| 2,142
|
plugin-libc/newlib/libc/machine/m68hc11/setjmp.S
|
/* setjmp/longjmp routines for M68HC11 & M68HC12.
* Copyright (C) 1999, 2000, 2001, 2002 Stephane Carrez (stcarrez@nerim.fr)
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#if __INT__ == 32
# define val 4
# define INT32(X) X
#else
# define val 2
# define INT32(X)
#endif
#ifdef mc6811
# define REG(X) *X
#else
# define REG(X) X
#endif
.sect .text
.global setjmp
.global longjmp
#ifdef mc6811
setjmp:
xgdx
tsy
ldd 0,y
std 0,x
sty 2,x
ldd REG(_.frame)
std 4,x
ldd REG(_.d1)
std 6,x
ldd REG(_.d2)
std 8,x
ldd REG(_.d3)
std 10,x
ldd REG(_.d4)
std 12,x
ldd REG(_.d5)
std 14,x
ldd REG(_.d6)
std 16,x
ldd REG(_.d7)
std 18,x
ldd REG(_.d8)
std 20,x
INT32( ldx #0)
clra
clrb
rts
#else
setjmp:
xgdx
movw 0,sp,2,x+
sts 2,x+
movw _.frame,2,x+
movw _.d1,2,x+
movw _.d2,2,x+
movw _.d3,2,x+
movw _.d4,2,x+
movw _.d5,2,x+
movw _.d6,2,x+
movw _.d7,2,x+
movw _.d8,2,x+
INT32( ldx #0)
clra
clrb
rts
#endif
#ifdef mc6811
longjmp:
xgdx
tsy
ldd val,y
bne do_jump
ldd #1
do_jump:
xgdy
ldd 4,x
std REG(_.frame)
ldd 6,x
std REG(_.d1)
ldd 8,x
std REG(_.d2)
ldd 10,x
std REG(_.d3)
ldd 12,x
std REG(_.d4)
ldd 14,x
std REG(_.d5)
ldd 16,x
std REG(_.d6)
ldd 18,x
std REG(_.d7)
ldd 20,x
std REG(_.d8)
ldd 0,x
ldx 2,x
txs
std 0,x
INT32( ldx #0)
xgdy
rts
#else
longjmp:
xgdx
ldy val,sp
bne do_jump
ldy #1
do_jump:
ldd 4,x+
movw 2,x+,_.frame
movw 0,x,_.d1
movw 2,x,_.d2
movw 4,x,_.d3
movw 6,x,_.d4
movw 8,x,_.d5
movw 10,x,_.d6
movw 12,x,_.d7
movw 14,x,_.d8
ldx -4,x
txs
std 0,x
INT32( ldx #0)
xgdy
rts
#endif
|
4ms/metamodule-plugin-sdk
| 3,119
|
plugin-libc/newlib/libc/machine/cr16/setjmp.S
|
##############################################################################
# setjmp.S -- CR16 setjmp routine #
# #
# Copyright (c) 2004 National Semiconductor Corporation #
# #
# The authors hereby grant permission to use, copy, modify, distribute, #
# and license this software and its documentation for any purpose, provided #
# that existing copyright notices are retained in all copies and that this #
# notice is included verbatim in any distributions. No written agreement, #
# license, or royalty fee is required for any of the authorized uses. #
# Modifications to this software may be copyrighted by their authors #
# and need not follow the licensing terms described here, provided that #
# the new terms are clearly indicated on the first page of each file where #
# they apply. #
# #
# C library -- setjmp, longjmp #
# longjmp(a,v) #
# will generate a "return(v)" #
# from the last call to #
# setjmp(a) #
# by restoring r7-ra, sp, #
# and pc from 'a' #
# and doing a return. (Makes sure that longjmp never returns 0). #
##############################################################################
.text
.file "setjmp.s"
.align 4
.globl _setjmp
.align 4
_setjmp:
#r3, r2: .blkw
storw r7, 0(r3,r2)
addd $2, (r3,r2)
storw r8, 0(r3,r2)
addd $2, (r3,r2)
storw r9, 0(r3,r2)
addd $2, (r3,r2)
storw r10, 0(r3,r2)
addd $2, (r3,r2)
storw r11, 0(r3,r2)
addd $2, (r3,r2)
stord (r12), 0(r3,r2)
addd $4, (r3,r2)
stord (r13), 0(r3,r2)
addd $4, (r3,r2)
stord (ra), 0(r3,r2)
addd $4, (r3,r2)
stord (sp), 0(r3,r2)
movd $0,(r1,r0)
jump (ra)
.globl _longjmp
_longjmp:
#r3, r2: .blkw # pointer save area
#r5, r4: .blkw # ret vlaue
loadw 0(r3,r2), r7
addd $2, (r3,r2)
loadw 0(r3,r2), r8
addd $2, (r3,r2)
loadw 0(r3,r2), r9
addd $2, (r3,r2)
loadw 0(r3,r2), r10
addd $2, (r3,r2)
loadw 0(r3,r2), r11
addd $2, (r3,r2)
loadd 0(r3,r2), (r12)
addd $4, (r3,r2)
loadd 0(r3,r2), (r13)
addd $4, (r3,r2)
loadd 0(r3,r2), (ra)
addd $4, (r3,r2)
loadd 0(r3,r2), (sp)
#ifdef __INT32__
movd (r5,r4), (r1,r0)
cmpd $0, (r5,r4)
bne end1
movd $1, (r1,r0)
#else
movw r4, r0
cmpw $0, r4
bne end1
movw $1, r0
#endif
end1:
jump (ra)
.align 4
|
4ms/metamodule-plugin-sdk
| 1,786
|
plugin-libc/newlib/libc/machine/csky/setjmp.S
|
/* Copyright (c) 2020 C-SKY Microsystems All rights reserved.
This copyrighted material is made available to anyone wishing to use,
modify, copy, or redistribute it subject to the terms and conditions
of the FreeBSD License. This program is distributed in the hope that
it will be useful, but WITHOUT ANY WARRANTY expressed or implied,
including the implied warranties of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. A copy of this license is available at
http://www.opensource.org/licenses.
*/
.section .text
.align 3
.globl setjmp
.type setjmp,@function
.globl longjmp
.type longjmp,@function
setjmp:
#if defined(__CK801__)
stw r4, (r0, 0)
stw r5, (r0, 4)
stw r6, (r0, 8)
stw r7, (r0, 12)
stw r8, (r0, 16)
stw r15, (r0, 20)
stw sp, (r0, 24)
#elif defined(__CK802__)
stm r4-r11, (r0)
stw r15, (r0, 32)
stw sp, (r0, 36)
#else
stm r4-r11, (r0)
stw r15, (r0, 32)
stw r16, (r0, 36)
stw r17, (r0, 40)
stw r26, (r0, 44)
stw r27, (r0, 48)
stw r28, (r0, 52)
stw r29, (r0, 56)
stw r30, (r0, 60)
stw r31, (r0, 64)
stw sp, (r0, 68)
#endif
movi r0, 0
rts
longjmp:
#if defined(__CK801__)
ldw r4, (r0, 0)
ldw r5, (r0, 4)
ldw r6, (r0, 8)
ldw r7, (r0, 12)
ldw r8, (r0, 16)
ldw r15, (r0, 20)
ldw sp, (r0, 24)
#elif defined(__CK802__)
ldm r4-r11, (r0)
ldw r15, (r0, 32)
ldw sp, (r0, 36)
#else
ldm r4-r11, (r0)
ldw r15, (r0, 32)
ldw r16, (r0, 36)
ldw r17, (r0, 40)
ldw r26, (r0, 44)
ldw r27, (r0, 48)
ldw r28, (r0, 52)
ldw r29, (r0, 56)
ldw r30, (r0, 60)
ldw r31, (r0, 64)
ldw sp, (r0, 68)
#endif
mov r0, r1
cmpnei r1, 0
bt 1f
movi r0, 1
1: rts
|
4ms/metamodule-plugin-sdk
| 2,255
|
plugin-libc/newlib/libc/machine/x86_64/memcpy.S
|
/*
* ====================================================
* Copyright (C) 2007 by Ellips BV. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "x86_64mach.h"
.global SYM (memcpy)
SOTYPE_FUNCTION(memcpy)
SYM (memcpy):
movq rdi, rax /* Store destination in return value */
cmpq $16, rdx
jb byte_copy
movq rdi, r8 /* Align destination on quad word boundary */
andq $7, r8
jz quadword_aligned
movq $8, rcx
subq r8, rcx
subq rcx, rdx
rep movsb
quadword_aligned:
cmpq $256, rdx
jb quadword_copy
pushq rax
pushq r12
pushq r13
pushq r14
movq rdx, rcx /* Copy 128 bytes at a time with minimum cache polution */
shrq $7, rcx
.p2align 4
loop:
prefetchnta 768 (rsi)
prefetchnta 832 (rsi)
movq (rsi), rax
movq 8 (rsi), r8
movq 16 (rsi), r9
movq 24 (rsi), r10
movq 32 (rsi), r11
movq 40 (rsi), r12
movq 48 (rsi), r13
movq 56 (rsi), r14
movntiq rax, (rdi)
movntiq r8 , 8 (rdi)
movntiq r9 , 16 (rdi)
movntiq r10, 24 (rdi)
movntiq r11, 32 (rdi)
movntiq r12, 40 (rdi)
movntiq r13, 48 (rdi)
movntiq r14, 56 (rdi)
movq 64 (rsi), rax
movq 72 (rsi), r8
movq 80 (rsi), r9
movq 88 (rsi), r10
movq 96 (rsi), r11
movq 104 (rsi), r12
movq 112 (rsi), r13
movq 120 (rsi), r14
movntiq rax, 64 (rdi)
movntiq r8 , 72 (rdi)
movntiq r9 , 80 (rdi)
movntiq r10, 88 (rdi)
movntiq r11, 96 (rdi)
movntiq r12, 104 (rdi)
movntiq r13, 112 (rdi)
movntiq r14, 120 (rdi)
leaq 128 (rsi), rsi
leaq 128 (rdi), rdi
dec rcx
jnz loop
sfence
movq rdx, rcx
andq $127, rcx
rep movsb
popq r14
popq r13
popq r12
popq rax
ret
byte_copy:
movq rdx, rcx
rep movsb
ret
quadword_copy:
movq rdx, rcx
shrq $3, rcx
.p2align 4
rep movsq
movq rdx, rcx
andq $7, rcx
rep movsb /* Copy the remaining bytes */
ret
|
4ms/metamodule-plugin-sdk
| 1,083
|
plugin-libc/newlib/libc/machine/x86_64/setjmp.S
|
/*
* ====================================================
* Copyright (C) 2007 by Ellips BV. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
/*
** jmp_buf:
** rbx rbp r12 r13 r14 r15 rsp rip
** 0 8 16 24 32 40 48 56
*/
#include "x86_64mach.h"
.global SYM (setjmp)
.global SYM (longjmp)
SOTYPE_FUNCTION(setjmp)
SOTYPE_FUNCTION(longjmp)
SYM (setjmp):
movq rbx, 0 (rdi)
movq rbp, 8 (rdi)
movq r12, 16 (rdi)
movq r13, 24 (rdi)
movq r14, 32 (rdi)
movq r15, 40 (rdi)
leaq 8 (rsp), rax
movq rax, 48 (rdi)
movq (rsp), rax
movq rax, 56 (rdi)
movq $0, rax
ret
SYM (longjmp):
movq rsi, rax /* Return value */
movq 8 (rdi), rbp
__CLI
movq 48 (rdi), rsp
pushq 56 (rdi)
movq 0 (rdi), rbx
movq 16 (rdi), r12
movq 24 (rdi), r13
movq 32 (rdi), r14
movq 40 (rdi), r15
__STI
ret
|
4ms/metamodule-plugin-sdk
| 1,774
|
plugin-libc/newlib/libc/machine/x86_64/memset.S
|
/*
* ====================================================
* Copyright (C) 2007 by Ellips BV. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "x86_64mach.h"
.global SYM (memset)
SOTYPE_FUNCTION(memset)
SYM (memset):
movq rdi, r9 /* Save return value */
movq rsi, rax
movq rdx, rcx
cmpq $16, rdx
jb byte_set
movq rdi, r8 /* Align on quad word boundary */
andq $7, r8
jz quadword_aligned
movq $8, rcx
subq r8, rcx
subq rcx, rdx
rep stosb
movq rdx, rcx
quadword_aligned:
movabs $0x0101010101010101, r8
movzbl sil, eax
imul r8, rax
cmpq $256, rdx
jb quadword_set
shrq $7, rcx /* Store 128 bytes at a time with minimum cache polution */
.p2align 4
loop:
movntiq rax, (rdi)
movntiq rax, 8 (rdi)
movntiq rax, 16 (rdi)
movntiq rax, 24 (rdi)
movntiq rax, 32 (rdi)
movntiq rax, 40 (rdi)
movntiq rax, 48 (rdi)
movntiq rax, 56 (rdi)
movntiq rax, 64 (rdi)
movntiq rax, 72 (rdi)
movntiq rax, 80 (rdi)
movntiq rax, 88 (rdi)
movntiq rax, 96 (rdi)
movntiq rax, 104 (rdi)
movntiq rax, 112 (rdi)
movntiq rax, 120 (rdi)
leaq 128 (rdi), rdi
dec rcx
jnz loop
sfence
movq rdx, rcx
andq $127, rcx
rep stosb
movq r9, rax
ret
byte_set:
rep stosb
movq r9, rax
ret
quadword_set:
shrq $3, rcx
.p2align 4
rep stosq
movq rdx, rcx
andq $7, rcx
rep stosb /* Store the remaining bytes */
movq r9, rax
ret
|
4ms/metamodule-plugin-sdk
| 2,346
|
plugin-libc/newlib/libc/machine/visium/setjmp.S
|
/* setjmp/longjmp for the Visium processor.
Copyright (c) 2015 Rolls-Royce Controls and Data Services Limited.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Rolls-Royce Controls and Data Services Limited nor
the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE. */
.text
.globl setjmp
.type setjmp, @function
setjmp:
write.l 0(r1),r11
write.l 1(r1),r12
write.l 2(r1),r13
write.l 3(r1),r14
write.l 4(r1),r15
write.l 5(r1),r16
write.l 6(r1),r17
write.l 7(r1),r18
write.l 8(r1),r19
write.l 9(r1),r21
write.l 10(r1),r22
write.l 11(r1),r23
bra tr,r21,r0
moviq r1,0
.size setjmp, .-setjmp
.globl longjmp
.type longjmp, @function
longjmp:
read.l r11,0(r1)
read.l r12,1(r1)
read.l r13,2(r1)
read.l r14,3(r1)
read.l r15,4(r1)
read.l r16,5(r1)
read.l r17,6(r1)
read.l r18,7(r1)
read.l r19,8(r1)
read.l r21,9(r1)
read.l r22,10(r1)
read.l r23,11(r1)
bra tr,r21,r0
move.l r1,r2
.size longjmp, .-longjmp
|
4ms/metamodule-plugin-sdk
| 2,078
|
plugin-libc/newlib/libc/machine/mep/setjmp.S
|
#
# Setjmp/longjmp for MeP
#
# DJ Delorie, Red Hat Inc.
#
# 19 32-bit words in the jmpbuf:
# $0
# $1
# ...
# $15
# $pc
# $hi
# $lo
#
# Note that $0 is saved but not restored. It can't be restored
# as it's the return value of setjmp, but we save it in case
# some application wants to see it in the jmp_buf. Ideally,
# we should not need to save anything that is call-clobbered,
# but you never know what the user is going to tell gcc with -f
# options.
.noregerr
.text
.globl setjmp
.type setjmp,@function
setjmp:
# $1 is the address of the buffer. We return 0 in $0.
sw $0, ($1)
sw $1, 4($1)
sw $2, 8($1)
sw $3, 12($1)
sw $4, 16($1)
sw $5, 20($1)
sw $6, 24($1)
sw $7, 28($1)
sw $8, 32($1)
sw $9, 36($1)
sw $10, 40($1)
sw $11, 44($1)
sw $12, 48($1)
sw $13, 52($1)
sw $14, 56($1)
sw $15, 60($1)
ldc $0, $lp
sw $0, 64($1)
ldc $0, $opt
sra $0, 24
and3 $0, $0, 3
beqz $0, sj_skip_hilo
ldc $0, $hi
sw $0, 68($1)
ldc $0, $lo
sw $0, 72($1)
sj_skip_hilo:
mov $0, 0
ret
.globl longjmp
.type longjmp,@function
longjmp:
# $1 is the address of the buffer. $2 is the value setjmp
# returns. We do not faithfully restore $0 or $lp, because
# the act of calling setjmp clobbered those anyway.
bnez $2, rv_not_zero
mov $2, 1
rv_not_zero:
# We restore $sp first so we can save the return value there,
# otherwise we'd need to have another unrestored register.
lw $15, 60($1)
add3 $sp, $sp, -4
sw $2, ($sp)
# Now restore the general registers.
lw $2, 8($1)
lw $3, 12($1)
lw $4, 16($1)
lw $5, 20($1)
lw $6, 24($1)
lw $7, 28($1)
lw $8, 32($1)
lw $9, 36($1)
lw $10, 40($1)
lw $11, 44($1)
lw $12, 48($1)
lw $13, 52($1)
lw $14, 56($1)
# We restore $pc's value to $lp so that we can just ret later.
lw $0, 64($1)
stc $0, $lp
ldc $0, $opt
sra $0, 24
and3 $0, $0, 3
beqz $0, lj_skip_hilo
lw $0, 68($1)
stc $0, $hi
lw $0, 72($1)
stc $0, $lo
lj_skip_hilo:
# Restore $1
lw $1, 8($1)
# Get the return value off the stack, and restore the stack.
lw $0, ($sp)
add3 $sp, $sp, 4
ret
|
4ms/metamodule-plugin-sdk
| 2,140
|
plugin-libc/newlib/libc/machine/bfin/setjmp.S
|
/*
* setjmp for the Blackfin processor
*
* Copyright (C) 2006 Analog Devices, Inc.
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#define _ASM
#define _SETJMP_H
.text;
.align 4;
.globl _setjmp;
.type _setjmp, STT_FUNC;
_setjmp:
[--SP] = P0; /* Save P0 */
P0 = R0;
R0 = [SP++];
[P0 + 0x00] = R0; /* Save saved P0 */
[P0 + 0x04] = P1;
[P0 + 0x08] = P2;
[P0 + 0x0C] = P3;
[P0 + 0x10] = P4;
[P0 + 0x14] = P5;
[P0 + 0x18] = FP; /* Frame Pointer */
[P0 + 0x1C] = SP; /* Stack Pointer */
[P0 + 0x20] = P0; /* Data Registers */
[P0 + 0x24] = R1;
[P0 + 0x28] = R2;
[P0 + 0x2C] = R3;
[P0 + 0x30] = R4;
[P0 + 0x34] = R5;
[P0 + 0x38] = R6;
[P0 + 0x3C] = R7;
R0 = ASTAT;
[P0 + 0x40] = R0;
R0 = LC0; /* Loop Counters */
[P0 + 0x44] = R0;
R0 = LC1;
[P0 + 0x48] = R0;
R0 = A0.W; /* Accumulators */
[P0 + 0x4C] = R0;
R0 = A0.X;
[P0 + 0x50] = R0;
R0 = A1.W;
[P0 + 0x54] = R0;
R0 = A1.X;
[P0 + 0x58] = R0;
R0 = I0; /* Index Registers */
[P0 + 0x5C] = R0;
R0 = I1;
[P0 + 0x60] = R0;
R0 = I2;
[P0 + 0x64] = R0;
R0 = I3;
[P0 + 0x68] = R0;
R0 = M0; /* Modifier Registers */
[P0 + 0x6C] = R0;
R0 = M1;
[P0 + 0x70] = R0;
R0 = M2;
[P0 + 0x74] = R0;
R0 = M3;
[P0 + 0x78] = R0;
R0 = L0; /* Length Registers */
[P0 + 0x7c] = R0;
R0 = L1;
[P0 + 0x80] = R0;
R0 = L2;
[P0 + 0x84] = R0;
R0 = L3;
[P0 + 0x88] = R0;
R0 = B0; /* Base Registers */
[P0 + 0x8C] = R0;
R0 = B1;
[P0 + 0x90] = R0;
R0 = B2;
[P0 + 0x94] = R0;
R0 = B3;
[P0 + 0x98] = R0;
R0 = RETS;
[P0 + 0x9C] = R0;
R0 = 0;
RTS;
.size _setjmp, .-_setjmp;
|
4ms/metamodule-plugin-sdk
| 2,327
|
plugin-libc/newlib/libc/machine/bfin/longjmp.S
|
/*
* longjmp for the Blackfin processor
*
* Copyright (C) 2006 Analog Devices, Inc.
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#define _ASM
#define _SETJMP_H
.text;
.align 4;
.globl _longjmp;
.type _longjmp, STT_FUNC;
_longjmp:
P0 = R0;
R0 = [P0 + 0x00];
[--SP] = R0; /* Put P0 on the stack */
P1 = [P0 + 0x04];
P2 = [P0 + 0x08];
P3 = [P0 + 0x0C];
P4 = [P0 + 0x10];
P5 = [P0 + 0x14];
FP = [P0 + 0x18];
R0 = [SP++]; /* Grab P0 from old stack */
SP = [P0 + 0x1C]; /* Update Stack Pointer */
[--SP] = R0; /* Put P0 on new stack */
[--SP] = R1; /* Put VAL arg on new stack */
R0 = [P0 + 0x20]; /* Data Registers */
R1 = [P0 + 0x24];
R2 = [P0 + 0x28];
R3 = [P0 + 0x2C];
R4 = [P0 + 0x30];
R5 = [P0 + 0x34];
R6 = [P0 + 0x38];
R7 = [P0 + 0x3C];
R0 = [P0 + 0x40];
ASTAT = R0;
R0 = [P0 + 0x44]; /* Loop Counters */
LC0 = R0;
R0 = [P0 + 0x48];
LC1 = R0;
R0 = [P0 + 0x4C]; /* Accumulators */
A0.W = R0;
R0 = [P0 + 0x50];
A0.X = R0;
R0 = [P0 + 0x54];
A1.W = R0;
R0 = [P0 + 0x58];
A1.X = R0;
R0 = [P0 + 0x5C]; /* Index Registers */
I0 = R0;
R0 = [P0 + 0x60];
I1 = R0;
R0 = [P0 + 0x64];
I2 = R0;
R0 = [P0 + 0x68];
I3 = R0;
R0 = [P0 + 0x6C]; /* Modifier Registers */
M0 = R0;
R0 = [P0 + 0x70];
M1 = R0;
R0 = [P0 + 0x74];
M2 = R0;
R0 = [P0 + 0x78];
M3 = R0;
R0 = [P0 + 0x7C]; /* Length Registers */
L0 = R0;
R0 = [P0 + 0x80];
L1 = R0;
R0 = [P0 + 0x84];
L2 = R0;
R0 = [P0 + 0x88];
L3 = R0;
R0 = [P0 + 0x8C]; /* Base Registers */
B0 = R0;
R0 = [P0 + 0x90];
B1 = R0;
R0 = [P0 + 0x94];
B2 = R0;
R0 = [P0 + 0x98];
B3 = R0;
R0 = [P0 + 0x9C]; /* Return Address (PC) */
RETS = R0;
R0 = [SP++];
P0 = [SP++];
CC = R0 == 0;
IF !CC JUMP 1f;
R0 = 1;
1:
RTS;
.size _longjmp, .-_longjmp;
|
4ms/metamodule-plugin-sdk
| 3,956
|
plugin-libc/newlib/libc/machine/tic6x/setjmp.S
|
;******************************************************************************
;* SETJMP v7.2.0I10181 *
;* *
;* Copyright (c) 1996-2010 Texas Instruments Incorporated *
;* http://www.ti.com/ *
;* *
;* Redistribution and use in source and binary forms, with or without *
;* modification, are permitted provided that the following conditions *
;* are met: *
;* *
;* Redistributions of source code must retain the above copyright *
;* notice, this list of conditions and the following disclaimer. *
;* *
;* Redistributions in binary form must reproduce the above copyright *
;* notice, this list of conditions and the following disclaimer in *
;* the documentation and/or other materials provided with the *
;* distribution. *
;* *
;* Neither the name of Texas Instruments Incorporated nor the names *
;* of its contributors may be used to endorse or promote products *
;* derived from this software without specific prior written *
;* permission. *
;* *
;* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS *
;* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT *
;* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR *
;* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT *
;* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *
;* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *
;* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *
;* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *
;* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *
;* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *
;* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
;* *
;******************************************************************************
.text
.globl setjmp
.type setjmp,%function
setjmp:
MV .L2X A4, B4
|| STW .D1T2 B3, *+A4(48)
STW .D1T1 A10, *+A4(0)
|| STW .D2T2 B10, *+B4(4)
|| RET .S2 B3
STW .D1T1 A11, *+A4(8)
|| STW .D2T2 B11, *+B4(12)
STW .D1T1 A12, *+A4(16)
|| STW .D2T2 B12, *+B4(20)
STW .D1T1 A13, *+A4(24)
|| STW .D2T2 B13, *+B4(28)
STW .D1T1 A14, *+A4(32)
|| STW .D2T2 B14, *+B4(36)
STW .D1T1 A15, *+A4(40)
|| STW .D2T2 B15, *+B4(44)
|| ZERO .S1 A4
.size setjmp, . - setjmp
.globl longjmp
.type longjmp,%function
longjmp:
LDW .D1T1 *+A4(48), A3
MV .L2X A4, B6
|| MV .S1 A4, A6
|| MV .D2 B4, B2
LDW .D1T1 *+A6(0), A10
|| LDW .D2T2 *+B6(4), B10
|| [B2] MV .L1X B4, A4
|| [!B2] MVK .S1 1, A4
LDW .D1T1 *+A6(8), A11
|| LDW .D2T2 *+B6(12), B11
LDW .D1T1 *+A6(16), A12
|| LDW .D2T2 *+B6(20), B12
LDW .D1T1 *+A6(24), A13
|| LDW .D2T2 *+B6(28), B13
LDW .D1T1 *+A6(32), A14
|| LDW .D2T2 *+B6(36), B14
LDW .D1T1 *+A6(40), A15
|| LDW .D2T2 *+B6(44), B15
|| RET .S2X A3
NOP 5
.size longjmp, . - longjmp
|
4ms/metamodule-plugin-sdk
| 1,074
|
plugin-libc/newlib/libc/machine/ft32/memcpy.S
|
/* A memcpy.c for FT32
Copyright (C) 2014 FTDI (support@ftdichip.com)
The authors hereby grant permission to use, copy, modify, distribute,
and license this software and its documentation for any purpose, provided
that existing copyright notices are retained in all copies and that this
notice is included verbatim in any distributions. No written agreement,
license, or royalty fee is required for any of the authorized uses.
Modifications to this software may be copyrighted by their authors
and need not follow the licensing terms described here, provided that
the new terms are clearly indicated on the first page of each file where
they apply. */
.text
.global memcpy
.type memcpy,@function
memcpy:
ldk $r3,32764
1:
cmp $r2,$r3
jmpc lte,2f
memcpy.b $r0,$r1,$r3
add $r0,$r0,$r3
add $r1,$r1,$r3
sub $r2,$r2,$r3
jmp 1b
2:
memcpy.b $r0,$r1,$r2
return
.Lend2:
.size memcpy,.Lend2-memcpy
|
4ms/metamodule-plugin-sdk
| 2,761
|
plugin-libc/newlib/libc/machine/ft32/setjmp.S
|
/* A setjmp.c for FT32
Copyright (C) 2014 FTDI (support@ftdichip.com)
The authors hereby grant permission to use, copy, modify, distribute,
and license this software and its documentation for any purpose, provided
that existing copyright notices are retained in all copies and that this
notice is included verbatim in any distributions. No written agreement,
license, or royalty fee is required for any of the authorized uses.
Modifications to this software may be copyrighted by their authors
and need not follow the licensing terms described here, provided that
the new terms are clearly indicated on the first page of each file where
they apply. */
# setjmp/longjmp for FT32.
# Total jumpbuf size is 108 bytes, or 27 words.
#
.text
.global setjmp
.type setjmp,@function
setjmp:
pop.l $r5 # return address in $r5
sti.l $r0,0,$r5
sti.l $r0,4,$r6
sti.l $r0,8,$r7
sti.l $r0,12,$r8
sti.l $r0,16,$r9
sti.l $r0,20,$r10
sti.l $r0,24,$r11
sti.l $r0,28,$r12
sti.l $r0,32,$r13
sti.l $r0,36,$r14
sti.l $r0,40,$r15
sti.l $r0,44,$r16
sti.l $r0,48,$r17
sti.l $r0,52,$r18
sti.l $r0,56,$r19
sti.l $r0,60,$r20
sti.l $r0,64,$r21
sti.l $r0,68,$r22
sti.l $r0,72,$r23
sti.l $r0,76,$r24
sti.l $r0,80,$r25
sti.l $r0,84,$r26
sti.l $r0,88,$r27
sti.l $r0,92,$r28
sti.l $r0,96,$r29
sti.l $r0,100,$r30
sti.l $r0,104,$r31
ldk.l $r0,0
jmpi $r5
.Lend1:
.size setjmp,.Lend1-setjmp
.global longjmp
.type longjmp,@function
longjmp:
cmp.l $r1,0
jmpc nz,.nonz
ldk.l $r1,1
.nonz:
ldi.l $r5,$r0,0
ldi.l $r6,$r0,4
ldi.l $r7,$r0,8
ldi.l $r8,$r0,12
ldi.l $r9,$r0,16
ldi.l $r10,$r0,20
ldi.l $r11,$r0,24
ldi.l $r12,$r0,28
ldi.l $r13,$r0,32
ldi.l $r14,$r0,36
ldi.l $r15,$r0,40
ldi.l $r16,$r0,44
ldi.l $r17,$r0,48
ldi.l $r18,$r0,52
ldi.l $r19,$r0,56
ldi.l $r20,$r0,60
ldi.l $r21,$r0,64
ldi.l $r22,$r0,68
ldi.l $r23,$r0,72
ldi.l $r24,$r0,76
ldi.l $r25,$r0,80
ldi.l $r26,$r0,84
ldi.l $r27,$r0,88
ldi.l $r28,$r0,92
ldi.l $r29,$r0,96
ldi.l $r30,$r0,100
ldi.l $r31,$r0,104
move.l $r0,$r1
jmpi $r5
.Lend2:
.size longjmp,.Lend2-longjmp
|
4ms/metamodule-plugin-sdk
| 1,046
|
plugin-libc/newlib/libc/machine/ft32/memset.S
|
/* A memset.c for FT32
Copyright (C) 2014 FTDI (support@ftdichip.com)
The authors hereby grant permission to use, copy, modify, distribute,
and license this software and its documentation for any purpose, provided
that existing copyright notices are retained in all copies and that this
notice is included verbatim in any distributions. No written agreement,
license, or royalty fee is required for any of the authorized uses.
Modifications to this software may be copyrighted by their authors
and need not follow the licensing terms described here, provided that
the new terms are clearly indicated on the first page of each file where
they apply. */
.text
.global memset
.type memset,@function
memset:
ldk $r3,32764
1:
cmp $r2,$r3
jmpc lte,2f
memset.b $r0,$r1,$r3
add $r0,$r0,$r3
sub $r2,$r2,$r3
jmp 1b
2:
memset.b $r0,$r1,$r2
return
.Lend2:
.size memset,.Lend2-memset
|
4ms/metamodule-plugin-sdk
| 25,998
|
plugin-libc/newlib/libc/machine/mips/memcpy.S
|
/*
* Copyright (c) 2012-2015
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef ANDROID_CHANGES
# include "machine/asm.h"
# include "machine/regdef.h"
# define USE_MEMMOVE_FOR_OVERLAP
# define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD_STREAMED
# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
#elif _LIBC
# include "machine/asm.h"
# include "machine/regdef.h"
# define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD_STREAMED
# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
#else
# include <regdef.h>
# include <sys/asm.h>
#endif
/* Check to see if the MIPS architecture we are compiling for supports
* prefetching.
*/
#if (__mips == 4) || (__mips == 5) || (__mips == 32) || (__mips == 64)
# ifndef DISABLE_PREFETCH
# define USE_PREFETCH
# endif
#endif
#if defined(_MIPS_SIM) && ((_MIPS_SIM == _ABI64) || (_MIPS_SIM == _ABIN32))
# ifndef DISABLE_DOUBLE
# define USE_DOUBLE
# endif
#endif
#if __mips_isa_rev > 5
# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
# undef PREFETCH_STORE_HINT
# define PREFETCH_STORE_HINT PREFETCH_HINT_STORE_STREAMED
# endif
# define R6_CODE
#endif
/* Some asm.h files do not have the L macro definition. */
#ifndef L
# if _MIPS_SIM == _ABIO32
# define L(label) $L ## label
# else
# define L(label) .L ## label
# endif
#endif
/* Some asm.h files do not have the PTR_ADDIU macro definition. */
#ifndef PTR_ADDIU
# ifdef USE_DOUBLE
# define PTR_ADDIU daddiu
# else
# define PTR_ADDIU addiu
# endif
#endif
/* Some asm.h files do not have the PTR_SRA macro definition. */
#ifndef PTR_SRA
# ifdef USE_DOUBLE
# define PTR_SRA dsra
# else
# define PTR_SRA sra
# endif
#endif
/* New R6 instructions that may not be in asm.h. */
#ifndef PTR_LSA
# if _MIPS_SIM == _ABI64
# define PTR_LSA dlsa
# else
# define PTR_LSA lsa
# endif
#endif
/*
* Using PREFETCH_HINT_LOAD_STREAMED instead of PREFETCH_LOAD on load
* prefetches appears to offer a slight preformance advantage.
*
* Using PREFETCH_HINT_PREPAREFORSTORE instead of PREFETCH_STORE
* or PREFETCH_STORE_STREAMED offers a large performance advantage
* but PREPAREFORSTORE has some special restrictions to consider.
*
* Prefetch with the 'prepare for store' hint does not copy a memory
* location into the cache, it just allocates a cache line and zeros
* it out. This means that if you do not write to the entire cache
* line before writing it out to memory some data will get zero'ed out
* when the cache line is written back to memory and data will be lost.
*
* Also if you are using this memcpy to copy overlapping buffers it may
* not behave correctly when using the 'prepare for store' hint. If you
* use the 'prepare for store' prefetch on a memory area that is in the
* memcpy source (as well as the memcpy destination), then you will get
* some data zero'ed out before you have a chance to read it and data will
* be lost.
*
* If you are going to use this memcpy routine with the 'prepare for store'
* prefetch you may want to set USE_MEMMOVE_FOR_OVERLAP in order to avoid
* the problem of running memcpy on overlapping buffers.
*
* There are ifdef'ed sections of this memcpy to make sure that it does not
* do prefetches on cache lines that are not going to be completely written.
* This code is only needed and only used when PREFETCH_STORE_HINT is set to
* PREFETCH_HINT_PREPAREFORSTORE. This code assumes that cache lines are
* 32 bytes and if the cache line is larger it will not work correctly.
*/
#ifdef USE_PREFETCH
# define PREFETCH_HINT_LOAD 0
# define PREFETCH_HINT_STORE 1
# define PREFETCH_HINT_LOAD_STREAMED 4
# define PREFETCH_HINT_STORE_STREAMED 5
# define PREFETCH_HINT_LOAD_RETAINED 6
# define PREFETCH_HINT_STORE_RETAINED 7
# define PREFETCH_HINT_WRITEBACK_INVAL 25
# define PREFETCH_HINT_PREPAREFORSTORE 30
/*
* If we have not picked out what hints to use at this point use the
* standard load and store prefetch hints.
*/
# ifndef PREFETCH_STORE_HINT
# define PREFETCH_STORE_HINT PREFETCH_HINT_STORE
# endif
# ifndef PREFETCH_LOAD_HINT
# define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD
# endif
/*
* We double everything when USE_DOUBLE is true so we do 2 prefetches to
* get 64 bytes in that case. The assumption is that each individual
* prefetch brings in 32 bytes.
*/
# ifdef USE_DOUBLE
# define PREFETCH_CHUNK 64
# define PREFETCH_FOR_LOAD(chunk, reg) \
pref PREFETCH_LOAD_HINT, (chunk)*64(reg); \
pref PREFETCH_LOAD_HINT, ((chunk)*64)+32(reg)
# define PREFETCH_FOR_STORE(chunk, reg) \
pref PREFETCH_STORE_HINT, (chunk)*64(reg); \
pref PREFETCH_STORE_HINT, ((chunk)*64)+32(reg)
# else
# define PREFETCH_CHUNK 32
# define PREFETCH_FOR_LOAD(chunk, reg) \
pref PREFETCH_LOAD_HINT, (chunk)*32(reg)
# define PREFETCH_FOR_STORE(chunk, reg) \
pref PREFETCH_STORE_HINT, (chunk)*32(reg)
# endif
/* MAX_PREFETCH_SIZE is the maximum size of a prefetch, it must not be less
* than PREFETCH_CHUNK, the assumed size of each prefetch. If the real size
* of a prefetch is greater than MAX_PREFETCH_SIZE and the PREPAREFORSTORE
* hint is used, the code will not work correctly. If PREPAREFORSTORE is not
* used then MAX_PREFETCH_SIZE does not matter. */
# define MAX_PREFETCH_SIZE 128
/* PREFETCH_LIMIT is set based on the fact that we never use an offset greater
* than 5 on a STORE prefetch and that a single prefetch can never be larger
* than MAX_PREFETCH_SIZE. We add the extra 32 when USE_DOUBLE is set because
* we actually do two prefetches in that case, one 32 bytes after the other. */
# ifdef USE_DOUBLE
# define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + 32 + MAX_PREFETCH_SIZE
# else
# define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + MAX_PREFETCH_SIZE
# endif
# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) \
&& ((PREFETCH_CHUNK * 4) < MAX_PREFETCH_SIZE)
/* We cannot handle this because the initial prefetches may fetch bytes that
* are before the buffer being copied. We start copies with an offset
* of 4 so avoid this situation when using PREPAREFORSTORE. */
#error "PREFETCH_CHUNK is too large and/or MAX_PREFETCH_SIZE is too small."
# endif
#else /* USE_PREFETCH not defined */
# define PREFETCH_FOR_LOAD(offset, reg)
# define PREFETCH_FOR_STORE(offset, reg)
#endif
/* Allow the routine to be named something else if desired. */
#ifndef MEMCPY_NAME
# define MEMCPY_NAME memcpy
#endif
/* We use these 32/64 bit registers as temporaries to do the copying. */
#define REG0 t0
#define REG1 t1
#define REG2 t2
#define REG3 t3
#if defined(_MIPS_SIM) && (_MIPS_SIM == _ABIO32 || _MIPS_SIM == _ABIO64)
# define REG4 t4
# define REG5 t5
# define REG6 t6
# define REG7 t7
#else
# define REG4 ta0
# define REG5 ta1
# define REG6 ta2
# define REG7 ta3
#endif
/* We load/store 64 bits at a time when USE_DOUBLE is true.
* The C_ prefix stands for CHUNK and is used to avoid macro name
* conflicts with system header files. */
#ifdef USE_DOUBLE
# define C_ST sd
# define C_LD ld
# if __MIPSEB
# define C_LDHI ldl /* high part is left in big-endian */
# define C_STHI sdl /* high part is left in big-endian */
# define C_LDLO ldr /* low part is right in big-endian */
# define C_STLO sdr /* low part is right in big-endian */
# else
# define C_LDHI ldr /* high part is right in little-endian */
# define C_STHI sdr /* high part is right in little-endian */
# define C_LDLO ldl /* low part is left in little-endian */
# define C_STLO sdl /* low part is left in little-endian */
# endif
# define C_ALIGN dalign /* r6 align instruction */
#else
# define C_ST sw
# define C_LD lw
# if __MIPSEB
# define C_LDHI lwl /* high part is left in big-endian */
# define C_STHI swl /* high part is left in big-endian */
# define C_LDLO lwr /* low part is right in big-endian */
# define C_STLO swr /* low part is right in big-endian */
# else
# define C_LDHI lwr /* high part is right in little-endian */
# define C_STHI swr /* high part is right in little-endian */
# define C_LDLO lwl /* low part is left in little-endian */
# define C_STLO swl /* low part is left in little-endian */
# endif
# define C_ALIGN align /* r6 align instruction */
#endif
/* Bookkeeping values for 32 vs. 64 bit mode. */
#ifdef USE_DOUBLE
# define NSIZE 8
# define NSIZEMASK 0x3f
# define NSIZEDMASK 0x7f
#else
# define NSIZE 4
# define NSIZEMASK 0x1f
# define NSIZEDMASK 0x3f
#endif
#define UNIT(unit) ((unit)*NSIZE)
#define UNITM1(unit) (((unit)*NSIZE)-1)
#ifdef ANDROID_CHANGES
LEAF(MEMCPY_NAME, 0)
#else
LEAF(MEMCPY_NAME)
#endif
.set nomips16
.set noreorder
/*
* Below we handle the case where memcpy is called with overlapping src and dst.
* Although memcpy is not required to handle this case, some parts of Android
* like Skia rely on such usage. We call memmove to handle such cases.
*/
#ifdef USE_MEMMOVE_FOR_OVERLAP
PTR_SUBU t0,a0,a1
PTR_SRA t2,t0,31
xor t1,t0,t2
PTR_SUBU t0,t1,t2
sltu t2,t0,a2
beq t2,zero,L(memcpy)
la t9,memmove
jr t9
nop
L(memcpy):
#endif
/*
* If the size is less than 2*NSIZE (8 or 16), go to L(lastb). Regardless of
* size, copy dst pointer to v0 for the return value.
*/
slti t2,a2,(2 * NSIZE)
bne t2,zero,L(lasts)
#if defined(RETURN_FIRST_PREFETCH) || defined(RETURN_LAST_PREFETCH)
move v0,zero
#else
move v0,a0
#endif
#ifndef R6_CODE
/*
* If src and dst have different alignments, go to L(unaligned), if they
* have the same alignment (but are not actually aligned) do a partial
* load/store to make them aligned. If they are both already aligned
* we can start copying at L(aligned).
*/
xor t8,a1,a0
andi t8,t8,(NSIZE-1) /* t8 is a0/a1 word-displacement */
bne t8,zero,L(unaligned)
PTR_SUBU a3, zero, a0
andi a3,a3,(NSIZE-1) /* copy a3 bytes to align a0/a1 */
beq a3,zero,L(aligned) /* if a3=0, it is already aligned */
PTR_SUBU a2,a2,a3 /* a2 is the remining bytes count */
C_LDHI t8,0(a1)
PTR_ADDU a1,a1,a3
C_STHI t8,0(a0)
PTR_ADDU a0,a0,a3
#else /* R6_CODE */
/*
* Align the destination and hope that the source gets aligned too. If it
* doesn't we jump to L(r6_unaligned*) to do unaligned copies using the r6
* align instruction.
*/
andi t8,a0,7
lapc t9,L(atable)
PTR_LSA t9,t8,t9,2
jrc t9
L(atable):
bc L(lb0)
bc L(lb7)
bc L(lb6)
bc L(lb5)
bc L(lb4)
bc L(lb3)
bc L(lb2)
bc L(lb1)
L(lb7):
lb a3, 6(a1)
sb a3, 6(a0)
L(lb6):
lb a3, 5(a1)
sb a3, 5(a0)
L(lb5):
lb a3, 4(a1)
sb a3, 4(a0)
L(lb4):
lb a3, 3(a1)
sb a3, 3(a0)
L(lb3):
lb a3, 2(a1)
sb a3, 2(a0)
L(lb2):
lb a3, 1(a1)
sb a3, 1(a0)
L(lb1):
lb a3, 0(a1)
sb a3, 0(a0)
li t9,8
subu t8,t9,t8
PTR_SUBU a2,a2,t8
PTR_ADDU a0,a0,t8
PTR_ADDU a1,a1,t8
L(lb0):
andi t8,a1,(NSIZE-1)
lapc t9,L(jtable)
PTR_LSA t9,t8,t9,2
jrc t9
L(jtable):
bc L(aligned)
bc L(r6_unaligned1)
bc L(r6_unaligned2)
bc L(r6_unaligned3)
# ifdef USE_DOUBLE
bc L(r6_unaligned4)
bc L(r6_unaligned5)
bc L(r6_unaligned6)
bc L(r6_unaligned7)
# endif
#endif /* R6_CODE */
L(aligned):
/*
* Now dst/src are both aligned to (word or double word) aligned addresses
* Set a2 to count how many bytes we have to copy after all the 64/128 byte
* chunks are copied and a3 to the dst pointer after all the 64/128 byte
* chunks have been copied. We will loop, incrementing a0 and a1 until a0
* equals a3.
*/
andi t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
beq a2,t8,L(chkw) /* if a2==t8, no 64-byte/128-byte chunks */
PTR_SUBU a3,a2,t8 /* subtract from a2 the reminder */
PTR_ADDU a3,a0,a3 /* Now a3 is the final dst after loop */
/* When in the loop we may prefetch with the 'prepare to store' hint,
* in this case the a0+x should not be past the "t0-32" address. This
* means: for x=128 the last "safe" a0 address is "t0-160". Alternatively,
* for x=64 the last "safe" a0 address is "t0-96" In the current version we
* will use "prefetch hint,128(a0)", so "t0-160" is the limit.
*/
#if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
PTR_ADDU t0,a0,a2 /* t0 is the "past the end" address */
PTR_SUBU t9,t0,PREFETCH_LIMIT /* t9 is the "last safe pref" address */
#endif
PREFETCH_FOR_LOAD (0, a1)
PREFETCH_FOR_LOAD (1, a1)
PREFETCH_FOR_LOAD (2, a1)
PREFETCH_FOR_LOAD (3, a1)
#if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
PREFETCH_FOR_STORE (1, a0)
PREFETCH_FOR_STORE (2, a0)
PREFETCH_FOR_STORE (3, a0)
#endif
#if defined(RETURN_FIRST_PREFETCH) && defined(USE_PREFETCH)
# if PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE
sltu v1,t9,a0
bgtz v1,L(skip_set)
nop
PTR_ADDIU v0,a0,(PREFETCH_CHUNK*4)
L(skip_set):
# else
PTR_ADDIU v0,a0,(PREFETCH_CHUNK*1)
# endif
#endif
#if defined(RETURN_LAST_PREFETCH) && defined(USE_PREFETCH) \
&& (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
PTR_ADDIU v0,a0,(PREFETCH_CHUNK*3)
# ifdef USE_DOUBLE
PTR_ADDIU v0,v0,32
# endif
#endif
L(loop16w):
C_LD t0,UNIT(0)(a1)
#if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
sltu v1,t9,a0 /* If a0 > t9 don't use next prefetch */
bgtz v1,L(skip_pref)
#endif
C_LD t1,UNIT(1)(a1)
#ifndef R6_CODE
PREFETCH_FOR_STORE (4, a0)
PREFETCH_FOR_STORE (5, a0)
#else
PREFETCH_FOR_STORE (2, a0)
#endif
#if defined(RETURN_LAST_PREFETCH) && defined(USE_PREFETCH)
PTR_ADDIU v0,a0,(PREFETCH_CHUNK*5)
# ifdef USE_DOUBLE
PTR_ADDIU v0,v0,32
# endif
#endif
L(skip_pref):
C_LD REG2,UNIT(2)(a1)
C_LD REG3,UNIT(3)(a1)
C_LD REG4,UNIT(4)(a1)
C_LD REG5,UNIT(5)(a1)
C_LD REG6,UNIT(6)(a1)
C_LD REG7,UNIT(7)(a1)
#ifndef R6_CODE
PREFETCH_FOR_LOAD (4, a1)
#else
PREFETCH_FOR_LOAD (3, a1)
#endif
C_ST t0,UNIT(0)(a0)
C_ST t1,UNIT(1)(a0)
C_ST REG2,UNIT(2)(a0)
C_ST REG3,UNIT(3)(a0)
C_ST REG4,UNIT(4)(a0)
C_ST REG5,UNIT(5)(a0)
C_ST REG6,UNIT(6)(a0)
C_ST REG7,UNIT(7)(a0)
C_LD t0,UNIT(8)(a1)
C_LD t1,UNIT(9)(a1)
C_LD REG2,UNIT(10)(a1)
C_LD REG3,UNIT(11)(a1)
C_LD REG4,UNIT(12)(a1)
C_LD REG5,UNIT(13)(a1)
C_LD REG6,UNIT(14)(a1)
C_LD REG7,UNIT(15)(a1)
#ifndef R6_CODE
PREFETCH_FOR_LOAD (5, a1)
#endif
C_ST t0,UNIT(8)(a0)
C_ST t1,UNIT(9)(a0)
C_ST REG2,UNIT(10)(a0)
C_ST REG3,UNIT(11)(a0)
C_ST REG4,UNIT(12)(a0)
C_ST REG5,UNIT(13)(a0)
C_ST REG6,UNIT(14)(a0)
C_ST REG7,UNIT(15)(a0)
PTR_ADDIU a0,a0,UNIT(16) /* adding 64/128 to dest */
bne a0,a3,L(loop16w)
PTR_ADDIU a1,a1,UNIT(16) /* adding 64/128 to src */
move a2,t8
/* Here we have src and dest word-aligned but less than 64-bytes or
* 128 bytes to go. Check for a 32(64) byte chunk and copy if if there
* is one. Otherwise jump down to L(chk1w) to handle the tail end of
* the copy.
*/
L(chkw):
PREFETCH_FOR_LOAD (0, a1)
andi t8,a2,NSIZEMASK /* Is there a 32-byte/64-byte chunk. */
/* The t8 is the reminder count past 32-bytes */
beq a2,t8,L(chk1w) /* When a2=t8, no 32-byte chunk */
nop
C_LD t0,UNIT(0)(a1)
C_LD t1,UNIT(1)(a1)
C_LD REG2,UNIT(2)(a1)
C_LD REG3,UNIT(3)(a1)
C_LD REG4,UNIT(4)(a1)
C_LD REG5,UNIT(5)(a1)
C_LD REG6,UNIT(6)(a1)
C_LD REG7,UNIT(7)(a1)
PTR_ADDIU a1,a1,UNIT(8)
C_ST t0,UNIT(0)(a0)
C_ST t1,UNIT(1)(a0)
C_ST REG2,UNIT(2)(a0)
C_ST REG3,UNIT(3)(a0)
C_ST REG4,UNIT(4)(a0)
C_ST REG5,UNIT(5)(a0)
C_ST REG6,UNIT(6)(a0)
C_ST REG7,UNIT(7)(a0)
PTR_ADDIU a0,a0,UNIT(8)
/*
* Here we have less than 32(64) bytes to copy. Set up for a loop to
* copy one word (or double word) at a time. Set a2 to count how many
* bytes we have to copy after all the word (or double word) chunks are
* copied and a3 to the dst pointer after all the (d)word chunks have
* been copied. We will loop, incrementing a0 and a1 until a0 equals a3.
*/
L(chk1w):
andi a2,t8,(NSIZE-1) /* a2 is the reminder past one (d)word chunks */
beq a2,t8,L(lastw)
PTR_SUBU a3,t8,a2 /* a3 is count of bytes in one (d)word chunks */
PTR_ADDU a3,a0,a3 /* a3 is the dst address after loop */
/* copying in words (4-byte or 8-byte chunks) */
L(wordCopy_loop):
C_LD REG3,UNIT(0)(a1)
PTR_ADDIU a0,a0,UNIT(1)
PTR_ADDIU a1,a1,UNIT(1)
bne a0,a3,L(wordCopy_loop)
C_ST REG3,UNIT(-1)(a0)
/* If we have been copying double words, see if we can copy a single word
before doing byte copies. We can have, at most, one word to copy. */
L(lastw):
#ifdef USE_DOUBLE
andi t8,a2,3 /* a2 is the remainder past 4 byte chunks. */
beq t8,a2,L(lastb)
move a2,t8
lw REG3,0(a1)
sw REG3,0(a0)
PTR_ADDIU a0,a0,4
PTR_ADDIU a1,a1,4
#endif
/* Copy the last 8 (or 16) bytes */
L(lastb):
blez a2,L(leave)
PTR_ADDU a3,a0,a2 /* a3 is the last dst address */
L(lastbloop):
lb v1,0(a1)
PTR_ADDIU a0,a0,1
PTR_ADDIU a1,a1,1
bne a0,a3,L(lastbloop)
sb v1,-1(a0)
L(leave):
j ra
nop
/* We jump here with a memcpy of less than 8 or 16 bytes, depending on
whether or not USE_DOUBLE is defined. Instead of just doing byte
copies, check the alignment and size and use lw/sw if possible.
Otherwise, do byte copies. */
L(lasts):
andi t8,a2,3
beq t8,a2,L(lastb)
andi t9,a0,3
bne t9,zero,L(lastb)
andi t9,a1,3
bne t9,zero,L(lastb)
PTR_SUBU a3,a2,t8
PTR_ADDU a3,a0,a3
L(wcopy_loop):
lw REG3,0(a1)
PTR_ADDIU a0,a0,4
PTR_ADDIU a1,a1,4
bne a0,a3,L(wcopy_loop)
sw REG3,-4(a0)
b L(lastb)
move a2,t8
#ifndef R6_CODE
/*
* UNALIGNED case, got here with a3 = "negu a0"
* This code is nearly identical to the aligned code above
* but only the destination (not the source) gets aligned
* so we need to do partial loads of the source followed
* by normal stores to the destination (once we have aligned
* the destination).
*/
L(unaligned):
andi a3,a3,(NSIZE-1) /* copy a3 bytes to align a0/a1 */
beqz a3,L(ua_chk16w) /* if a3=0, it is already aligned */
PTR_SUBU a2,a2,a3 /* a2 is the remining bytes count */
C_LDHI v1,UNIT(0)(a1)
C_LDLO v1,UNITM1(1)(a1)
PTR_ADDU a1,a1,a3
C_STHI v1,UNIT(0)(a0)
PTR_ADDU a0,a0,a3
/*
* Now the destination (but not the source) is aligned
* Set a2 to count how many bytes we have to copy after all the 64/128 byte
* chunks are copied and a3 to the dst pointer after all the 64/128 byte
* chunks have been copied. We will loop, incrementing a0 and a1 until a0
* equals a3.
*/
L(ua_chk16w):
andi t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
beq a2,t8,L(ua_chkw) /* if a2==t8, no 64-byte/128-byte chunks */
PTR_SUBU a3,a2,t8 /* subtract from a2 the reminder */
PTR_ADDU a3,a0,a3 /* Now a3 is the final dst after loop */
# if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
PTR_ADDU t0,a0,a2 /* t0 is the "past the end" address */
PTR_SUBU t9,t0,PREFETCH_LIMIT /* t9 is the "last safe pref" address */
# endif
PREFETCH_FOR_LOAD (0, a1)
PREFETCH_FOR_LOAD (1, a1)
PREFETCH_FOR_LOAD (2, a1)
# if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
PREFETCH_FOR_STORE (1, a0)
PREFETCH_FOR_STORE (2, a0)
PREFETCH_FOR_STORE (3, a0)
# endif
# if defined(RETURN_FIRST_PREFETCH) && defined(USE_PREFETCH)
# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
sltu v1,t9,a0
bgtz v1,L(ua_skip_set)
nop
PTR_ADDIU v0,a0,(PREFETCH_CHUNK*4)
L(ua_skip_set):
# else
PTR_ADDIU v0,a0,(PREFETCH_CHUNK*1)
# endif
# endif
L(ua_loop16w):
PREFETCH_FOR_LOAD (3, a1)
C_LDHI t0,UNIT(0)(a1)
C_LDHI t1,UNIT(1)(a1)
C_LDHI REG2,UNIT(2)(a1)
# if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
sltu v1,t9,a0
bgtz v1,L(ua_skip_pref)
# endif
C_LDHI REG3,UNIT(3)(a1)
PREFETCH_FOR_STORE (4, a0)
PREFETCH_FOR_STORE (5, a0)
L(ua_skip_pref):
C_LDHI REG4,UNIT(4)(a1)
C_LDHI REG5,UNIT(5)(a1)
C_LDHI REG6,UNIT(6)(a1)
C_LDHI REG7,UNIT(7)(a1)
C_LDLO t0,UNITM1(1)(a1)
C_LDLO t1,UNITM1(2)(a1)
C_LDLO REG2,UNITM1(3)(a1)
C_LDLO REG3,UNITM1(4)(a1)
C_LDLO REG4,UNITM1(5)(a1)
C_LDLO REG5,UNITM1(6)(a1)
C_LDLO REG6,UNITM1(7)(a1)
C_LDLO REG7,UNITM1(8)(a1)
PREFETCH_FOR_LOAD (4, a1)
C_ST t0,UNIT(0)(a0)
C_ST t1,UNIT(1)(a0)
C_ST REG2,UNIT(2)(a0)
C_ST REG3,UNIT(3)(a0)
C_ST REG4,UNIT(4)(a0)
C_ST REG5,UNIT(5)(a0)
C_ST REG6,UNIT(6)(a0)
C_ST REG7,UNIT(7)(a0)
C_LDHI t0,UNIT(8)(a1)
C_LDHI t1,UNIT(9)(a1)
C_LDHI REG2,UNIT(10)(a1)
C_LDHI REG3,UNIT(11)(a1)
C_LDHI REG4,UNIT(12)(a1)
C_LDHI REG5,UNIT(13)(a1)
C_LDHI REG6,UNIT(14)(a1)
C_LDHI REG7,UNIT(15)(a1)
C_LDLO t0,UNITM1(9)(a1)
C_LDLO t1,UNITM1(10)(a1)
C_LDLO REG2,UNITM1(11)(a1)
C_LDLO REG3,UNITM1(12)(a1)
C_LDLO REG4,UNITM1(13)(a1)
C_LDLO REG5,UNITM1(14)(a1)
C_LDLO REG6,UNITM1(15)(a1)
C_LDLO REG7,UNITM1(16)(a1)
PREFETCH_FOR_LOAD (5, a1)
C_ST t0,UNIT(8)(a0)
C_ST t1,UNIT(9)(a0)
C_ST REG2,UNIT(10)(a0)
C_ST REG3,UNIT(11)(a0)
C_ST REG4,UNIT(12)(a0)
C_ST REG5,UNIT(13)(a0)
C_ST REG6,UNIT(14)(a0)
C_ST REG7,UNIT(15)(a0)
PTR_ADDIU a0,a0,UNIT(16) /* adding 64/128 to dest */
bne a0,a3,L(ua_loop16w)
PTR_ADDIU a1,a1,UNIT(16) /* adding 64/128 to src */
move a2,t8
/* Here we have src and dest word-aligned but less than 64-bytes or
* 128 bytes to go. Check for a 32(64) byte chunk and copy if if there
* is one. Otherwise jump down to L(ua_chk1w) to handle the tail end of
* the copy. */
L(ua_chkw):
PREFETCH_FOR_LOAD (0, a1)
andi t8,a2,NSIZEMASK /* Is there a 32-byte/64-byte chunk. */
/* t8 is the reminder count past 32-bytes */
beq a2,t8,L(ua_chk1w) /* When a2=t8, no 32-byte chunk */
nop
C_LDHI t0,UNIT(0)(a1)
C_LDHI t1,UNIT(1)(a1)
C_LDHI REG2,UNIT(2)(a1)
C_LDHI REG3,UNIT(3)(a1)
C_LDHI REG4,UNIT(4)(a1)
C_LDHI REG5,UNIT(5)(a1)
C_LDHI REG6,UNIT(6)(a1)
C_LDHI REG7,UNIT(7)(a1)
C_LDLO t0,UNITM1(1)(a1)
C_LDLO t1,UNITM1(2)(a1)
C_LDLO REG2,UNITM1(3)(a1)
C_LDLO REG3,UNITM1(4)(a1)
C_LDLO REG4,UNITM1(5)(a1)
C_LDLO REG5,UNITM1(6)(a1)
C_LDLO REG6,UNITM1(7)(a1)
C_LDLO REG7,UNITM1(8)(a1)
PTR_ADDIU a1,a1,UNIT(8)
C_ST t0,UNIT(0)(a0)
C_ST t1,UNIT(1)(a0)
C_ST REG2,UNIT(2)(a0)
C_ST REG3,UNIT(3)(a0)
C_ST REG4,UNIT(4)(a0)
C_ST REG5,UNIT(5)(a0)
C_ST REG6,UNIT(6)(a0)
C_ST REG7,UNIT(7)(a0)
PTR_ADDIU a0,a0,UNIT(8)
/*
* Here we have less than 32(64) bytes to copy. Set up for a loop to
* copy one word (or double word) at a time.
*/
L(ua_chk1w):
andi a2,t8,(NSIZE-1) /* a2 is the reminder past one (d)word chunks */
beq a2,t8,L(ua_smallCopy)
PTR_SUBU a3,t8,a2 /* a3 is count of bytes in one (d)word chunks */
PTR_ADDU a3,a0,a3 /* a3 is the dst address after loop */
/* copying in words (4-byte or 8-byte chunks) */
L(ua_wordCopy_loop):
C_LDHI v1,UNIT(0)(a1)
C_LDLO v1,UNITM1(1)(a1)
PTR_ADDIU a0,a0,UNIT(1)
PTR_ADDIU a1,a1,UNIT(1)
bne a0,a3,L(ua_wordCopy_loop)
C_ST v1,UNIT(-1)(a0)
/* Copy the last 8 (or 16) bytes */
L(ua_smallCopy):
beqz a2,L(leave)
PTR_ADDU a3,a0,a2 /* a3 is the last dst address */
L(ua_smallCopy_loop):
lb v1,0(a1)
PTR_ADDIU a0,a0,1
PTR_ADDIU a1,a1,1
bne a0,a3,L(ua_smallCopy_loop)
sb v1,-1(a0)
j ra
nop
#else /* R6_CODE */
# if __MIPSEB
# define SWAP_REGS(X,Y) X, Y
# define ALIGN_OFFSET(N) (N)
# else
# define SWAP_REGS(X,Y) Y, X
# define ALIGN_OFFSET(N) (NSIZE-N)
# endif
# define R6_UNALIGNED_WORD_COPY(BYTEOFFSET) \
andi REG7, a2, (NSIZE-1);/* REG7 is # of bytes to by bytes. */ \
beq REG7, a2, L(lastb); /* Check for bytes to copy by word */ \
PTR_SUBU a3, a2, REG7; /* a3 is number of bytes to be copied in */ \
/* (d)word chunks. */ \
move a2, REG7; /* a2 is # of bytes to copy byte by byte */ \
/* after word loop is finished. */ \
PTR_ADDU REG6, a0, a3; /* REG6 is the dst address after loop. */ \
PTR_SUBU REG2, a1, t8; /* REG2 is the aligned src address. */ \
PTR_ADDU a1, a1, a3; /* a1 is addr of source after word loop. */ \
C_LD t0, UNIT(0)(REG2); /* Load first part of source. */ \
L(r6_ua_wordcopy##BYTEOFFSET): \
C_LD t1, UNIT(1)(REG2); /* Load second part of source. */ \
C_ALIGN REG3, SWAP_REGS(t1,t0), ALIGN_OFFSET(BYTEOFFSET); \
PTR_ADDIU a0, a0, UNIT(1); /* Increment destination pointer. */ \
PTR_ADDIU REG2, REG2, UNIT(1); /* Increment aligned source pointer.*/ \
move t0, t1; /* Move second part of source to first. */ \
bne a0, REG6,L(r6_ua_wordcopy##BYTEOFFSET); \
C_ST REG3, UNIT(-1)(a0); \
j L(lastb); \
nop
/* We are generating R6 code, the destination is 4 byte aligned and
the source is not 4 byte aligned. t8 is 1, 2, or 3 depending on the
alignment of the source. */
L(r6_unaligned1):
R6_UNALIGNED_WORD_COPY(1)
L(r6_unaligned2):
R6_UNALIGNED_WORD_COPY(2)
L(r6_unaligned3):
R6_UNALIGNED_WORD_COPY(3)
# ifdef USE_DOUBLE
L(r6_unaligned4):
R6_UNALIGNED_WORD_COPY(4)
L(r6_unaligned5):
R6_UNALIGNED_WORD_COPY(5)
L(r6_unaligned6):
R6_UNALIGNED_WORD_COPY(6)
L(r6_unaligned7):
R6_UNALIGNED_WORD_COPY(7)
# endif
#endif /* R6_CODE */
.set at
.set reorder
END(MEMCPY_NAME)
|
4ms/metamodule-plugin-sdk
| 3,986
|
plugin-libc/newlib/libc/machine/mips/setjmp.S
|
/* This is a simple version of setjmp and longjmp for MIPS 32 and 64.
Ian Lance Taylor, Cygnus Support, 13 May 1993. */
#ifdef __mips16
/* This file contains 32 bit assembly code. */
.set nomips16
#endif
#define GPR_LAYOUT \
GPR_OFFSET ($16, 0); \
GPR_OFFSET ($17, 1); \
GPR_OFFSET ($18, 2); \
GPR_OFFSET ($19, 3); \
GPR_OFFSET ($20, 4); \
GPR_OFFSET ($21, 5); \
GPR_OFFSET ($22, 6); \
GPR_OFFSET ($23, 7); \
GPR_OFFSET ($29, 8); \
GPR_OFFSET ($30, 9); \
GPR_OFFSET ($31, 10)
#define NUM_GPRS_SAVED 11
#ifdef __mips_hard_float
#if _MIPS_SIM == _ABIN32
#define FPR_LAYOUT \
FPR_OFFSET ($f20, 0); \
FPR_OFFSET ($f22, 1); \
FPR_OFFSET ($f24, 2); \
FPR_OFFSET ($f26, 3); \
FPR_OFFSET ($f28, 4); \
FPR_OFFSET ($f30, 5);
#elif _MIPS_SIM == _ABI64
#define FPR_LAYOUT \
FPR_OFFSET ($f24, 0); \
FPR_OFFSET ($f25, 1); \
FPR_OFFSET ($f26, 2); \
FPR_OFFSET ($f27, 3); \
FPR_OFFSET ($f28, 4); \
FPR_OFFSET ($f29, 5); \
FPR_OFFSET ($f30, 6); \
FPR_OFFSET ($f31, 7);
#elif __mips_fpr == 0 || __mips_fpr == 64
/* This deals with the o32 FPXX and FP64 cases. Here we must use
SDC1 and LDC1 to access the FPRs. These instructions require
8-byte aligned addresses.
Unfortunately, the MIPS jmp_buf only guarantees 4-byte alignment
and this cannot be increased without breaking compatibility with
pre-existing objects built against newlib. There are 11 GPRS
saved in the jmp_buf so a buffer that happens to be 8-byte aligned
ends up leaving the FPR slots 4-byte aligned and an (only) 4-byte
aligned buffer leads to the FPR slots being 8-byte aligned!
To resolve this, we move the location of $31 to the last slot
in the jmp_buf when the overall buffer is 8-byte aligned. $31
is simply loaded/stored twice to avoid adding complexity to the
GPR_LAYOUT macro above as well as FPR_LAYOUT.
The location of the last slot is index 22 which is calculated
from there being 11 GPRs saved and then 12 FPRs saved so the
index of the last FPR is 11+11.
The base of the jmp_buf is modified in $4 to allow the
FPR_OFFSET macros to just use the usual constant slot numbers
regardless of whether the realignment happened or not. */
#define FPR_LAYOUT \
and $8, $4, 4; \
beq $8, $0, 1f; \
GPR_OFFSET ($31, 22); \
addiu $4, $4, -4; \
1: \
FPR_OFFSET ($f20, 0); \
FPR_OFFSET ($f22, 2); \
FPR_OFFSET ($f24, 4); \
FPR_OFFSET ($f26, 6); \
FPR_OFFSET ($f28, 8); \
FPR_OFFSET ($f30, 10);
#else /* Assuming _MIPS_SIM == _ABIO32 */
#define FPR_LAYOUT \
FPR_OFFSET ($f20, 0); \
FPR_OFFSET ($f21, 1); \
FPR_OFFSET ($f22, 2); \
FPR_OFFSET ($f23, 3); \
FPR_OFFSET ($f24, 4); \
FPR_OFFSET ($f25, 5); \
FPR_OFFSET ($f26, 6); \
FPR_OFFSET ($f27, 7); \
FPR_OFFSET ($f28, 8); \
FPR_OFFSET ($f29, 9); \
FPR_OFFSET ($f30, 10); \
FPR_OFFSET ($f31, 11);
#endif
#else
#define FPR_LAYOUT
#endif
#ifdef __mips64
#define BYTES_PER_WORD 8
#define LOAD_GPR ld
#define LOAD_FPR ldc1
#define STORE_GPR sd
#define STORE_FPR sdc1
#else
#define LOAD_GPR lw
#define STORE_GPR sw
#define BYTES_PER_WORD 4
#if __mips_fpr == 0 || __mips_fpr == 64
#define LOAD_FPR ldc1
#define STORE_FPR sdc1
#else
#define LOAD_FPR lwc1
#define STORE_FPR swc1
#endif
#endif
#define GPOFF(INDEX) (INDEX * BYTES_PER_WORD)
#define FPOFF(INDEX) ((INDEX + NUM_GPRS_SAVED) * BYTES_PER_WORD)
/* int setjmp (jmp_buf); */
.globl setjmp
.ent setjmp
setjmp:
.frame $sp,0,$31
#define GPR_OFFSET(REG, INDEX) STORE_GPR REG,GPOFF(INDEX)($4)
#define FPR_OFFSET(REG, INDEX) STORE_FPR REG,FPOFF(INDEX)($4)
GPR_LAYOUT
FPR_LAYOUT
#undef GPR_OFFSET
#undef FPR_OFFSET
move $2,$0
j $31
.end setjmp
/* volatile void longjmp (jmp_buf, int); */
.globl longjmp
.ent longjmp
longjmp:
.frame $sp,0,$31
#define GPR_OFFSET(REG, INDEX) LOAD_GPR REG,GPOFF(INDEX)($4)
#define FPR_OFFSET(REG, INDEX) LOAD_FPR REG,FPOFF(INDEX)($4)
GPR_LAYOUT
FPR_LAYOUT
#undef GPR_OFFSET
#undef FPR_OFFSET
bne $5,$0,1f
li $5,1
1:
move $2,$5
j $31
.end longjmp
|
4ms/metamodule-plugin-sdk
| 13,411
|
plugin-libc/newlib/libc/machine/mips/memset.S
|
/*
* Copyright (c) 2013
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef ANDROID_CHANGES
# include "machine/asm.h"
# include "machine/regdef.h"
# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
#elif _LIBC
# include "machine/asm.h"
# include "machine/regdef.h"
# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
#else
# include <regdef.h>
# include <sys/asm.h>
#endif
/* Check to see if the MIPS architecture we are compiling for supports
prefetching. */
#if (__mips == 4) || (__mips == 5) || (__mips == 32) || (__mips == 64)
# ifndef DISABLE_PREFETCH
# define USE_PREFETCH
# endif
#endif
#if defined(_MIPS_SIM) && ((_MIPS_SIM == _ABI64) || (_MIPS_SIM == _ABIN32))
# ifndef DISABLE_DOUBLE
# define USE_DOUBLE
# endif
#endif
#ifndef USE_DOUBLE
# ifndef DISABLE_DOUBLE_ALIGN
# define DOUBLE_ALIGN
# endif
#endif
/* Some asm.h files do not have the L macro definition. */
#ifndef L
# if _MIPS_SIM == _ABIO32
# define L(label) $L ## label
# else
# define L(label) .L ## label
# endif
#endif
/* Some asm.h files do not have the PTR_ADDIU macro definition. */
#ifndef PTR_ADDIU
# ifdef USE_DOUBLE
# define PTR_ADDIU daddiu
# else
# define PTR_ADDIU addiu
# endif
#endif
/* New R6 instructions that may not be in asm.h. */
#ifndef PTR_LSA
# if _MIPS_SIM == _ABI64
# define PTR_LSA dlsa
# else
# define PTR_LSA lsa
# endif
#endif
/* Using PREFETCH_HINT_PREPAREFORSTORE instead of PREFETCH_STORE
or PREFETCH_STORE_STREAMED offers a large performance advantage
but PREPAREFORSTORE has some special restrictions to consider.
Prefetch with the 'prepare for store' hint does not copy a memory
location into the cache, it just allocates a cache line and zeros
it out. This means that if you do not write to the entire cache
line before writing it out to memory some data will get zero'ed out
when the cache line is written back to memory and data will be lost.
There are ifdef'ed sections of this memcpy to make sure that it does not
do prefetches on cache lines that are not going to be completely written.
This code is only needed and only used when PREFETCH_STORE_HINT is set to
PREFETCH_HINT_PREPAREFORSTORE. This code assumes that cache lines are
less than MAX_PREFETCH_SIZE bytes and if the cache line is larger it will
not work correctly. */
#ifdef USE_PREFETCH
# define PREFETCH_HINT_STORE 1
# define PREFETCH_HINT_STORE_STREAMED 5
# define PREFETCH_HINT_STORE_RETAINED 7
# define PREFETCH_HINT_PREPAREFORSTORE 30
/* If we have not picked out what hints to use at this point use the
standard load and store prefetch hints. */
# ifndef PREFETCH_STORE_HINT
# define PREFETCH_STORE_HINT PREFETCH_HINT_STORE
# endif
/* We double everything when USE_DOUBLE is true so we do 2 prefetches to
get 64 bytes in that case. The assumption is that each individual
prefetch brings in 32 bytes. */
# ifdef USE_DOUBLE
# define PREFETCH_CHUNK 64
# define PREFETCH_FOR_STORE(chunk, reg) \
pref PREFETCH_STORE_HINT, (chunk)*64(reg); \
pref PREFETCH_STORE_HINT, ((chunk)*64)+32(reg)
# else
# define PREFETCH_CHUNK 32
# define PREFETCH_FOR_STORE(chunk, reg) \
pref PREFETCH_STORE_HINT, (chunk)*32(reg)
# endif
/* MAX_PREFETCH_SIZE is the maximum size of a prefetch, it must not be less
than PREFETCH_CHUNK, the assumed size of each prefetch. If the real size
of a prefetch is greater than MAX_PREFETCH_SIZE and the PREPAREFORSTORE
hint is used, the code will not work correctly. If PREPAREFORSTORE is not
used than MAX_PREFETCH_SIZE does not matter. */
# define MAX_PREFETCH_SIZE 128
/* PREFETCH_LIMIT is set based on the fact that we never use an offset greater
than 5 on a STORE prefetch and that a single prefetch can never be larger
than MAX_PREFETCH_SIZE. We add the extra 32 when USE_DOUBLE is set because
we actually do two prefetches in that case, one 32 bytes after the other. */
# ifdef USE_DOUBLE
# define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + 32 + MAX_PREFETCH_SIZE
# else
# define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + MAX_PREFETCH_SIZE
# endif
# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) \
&& ((PREFETCH_CHUNK * 4) < MAX_PREFETCH_SIZE)
/* We cannot handle this because the initial prefetches may fetch bytes that
are before the buffer being copied. We start copies with an offset
of 4 so avoid this situation when using PREPAREFORSTORE. */
# error "PREFETCH_CHUNK is too large and/or MAX_PREFETCH_SIZE is too small."
# endif
#else /* USE_PREFETCH not defined */
# define PREFETCH_FOR_STORE(offset, reg)
#endif
#if __mips_isa_rev > 5
# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
# undef PREFETCH_STORE_HINT
# define PREFETCH_STORE_HINT PREFETCH_HINT_STORE_STREAMED
# endif
# define R6_CODE
#endif
/* Allow the routine to be named something else if desired. */
#ifndef MEMSET_NAME
# define MEMSET_NAME memset
#endif
/* We load/store 64 bits at a time when USE_DOUBLE is true.
The C_ prefix stands for CHUNK and is used to avoid macro name
conflicts with system header files. */
#ifdef USE_DOUBLE
# define C_ST sd
# if __MIPSEB
# define C_STHI sdl /* high part is left in big-endian */
# else
# define C_STHI sdr /* high part is right in little-endian */
# endif
#else
# define C_ST sw
# if __MIPSEB
# define C_STHI swl /* high part is left in big-endian */
# else
# define C_STHI swr /* high part is right in little-endian */
# endif
#endif
/* Bookkeeping values for 32 vs. 64 bit mode. */
#ifdef USE_DOUBLE
# define NSIZE 8
# define NSIZEMASK 0x3f
# define NSIZEDMASK 0x7f
#else
# define NSIZE 4
# define NSIZEMASK 0x1f
# define NSIZEDMASK 0x3f
#endif
#define UNIT(unit) ((unit)*NSIZE)
#define UNITM1(unit) (((unit)*NSIZE)-1)
#ifdef ANDROID_CHANGES
LEAF(MEMSET_NAME,0)
#else
LEAF(MEMSET_NAME)
#endif
.set nomips16
.set noreorder
/* If the size is less than 2*NSIZE (8 or 16), go to L(lastb). Regardless of
size, copy dst pointer to v0 for the return value. */
slti t2,a2,(2 * NSIZE)
bne t2,zero,L(lastb)
move v0,a0
/* If memset value is not zero, we copy it to all the bytes in a 32 or 64
bit word. */
beq a1,zero,L(set0) /* If memset value is zero no smear */
PTR_SUBU a3,zero,a0
nop
/* smear byte into 32 or 64 bit word */
#if ((__mips == 64) || (__mips == 32)) && (__mips_isa_rev >= 2)
# ifdef USE_DOUBLE
dins a1, a1, 8, 8 /* Replicate fill byte into half-word. */
dins a1, a1, 16, 16 /* Replicate fill byte into word. */
dins a1, a1, 32, 32 /* Replicate fill byte into dbl word. */
# else
ins a1, a1, 8, 8 /* Replicate fill byte into half-word. */
ins a1, a1, 16, 16 /* Replicate fill byte into word. */
# endif
#else
# ifdef USE_DOUBLE
and a1,0xff
dsll t2,a1,8
or a1,t2
dsll t2,a1,16
or a1,t2
dsll t2,a1,32
or a1,t2
# else
and a1,0xff
sll t2,a1,8
or a1,t2
sll t2,a1,16
or a1,t2
# endif
#endif
/* If the destination address is not aligned do a partial store to get it
aligned. If it is already aligned just jump to L(aligned). */
L(set0):
#ifndef R6_CODE
andi t2,a3,(NSIZE-1) /* word-unaligned address? */
beq t2,zero,L(aligned) /* t2 is the unalignment count */
PTR_SUBU a2,a2,t2
C_STHI a1,0(a0)
PTR_ADDU a0,a0,t2
#else /* R6_CODE */
andi t2,a0,(NSIZE-1)
lapc t9,L(atable)
PTR_LSA t9,t2,t9,2
jrc t9
L(atable):
bc L(aligned)
# ifdef USE_DOUBLE
bc L(lb7)
bc L(lb6)
bc L(lb5)
bc L(lb4)
# endif
bc L(lb3)
bc L(lb2)
bc L(lb1)
L(lb7):
sb a1,6(a0)
L(lb6):
sb a1,5(a0)
L(lb5):
sb a1,4(a0)
L(lb4):
sb a1,3(a0)
L(lb3):
sb a1,2(a0)
L(lb2):
sb a1,1(a0)
L(lb1):
sb a1,0(a0)
li t9,NSIZE
subu t2,t9,t2
PTR_SUBU a2,a2,t2
PTR_ADDU a0,a0,t2
#endif /* R6_CODE */
L(aligned):
/* If USE_DOUBLE is not set we may still want to align the data on a 16
byte boundry instead of an 8 byte boundry to maximize the opportunity
of proAptiv chips to do memory bonding (combining two sequential 4
byte stores into one 8 byte store). We know there are at least 4 bytes
left to store or we would have jumped to L(lastb) earlier in the code. */
#ifdef DOUBLE_ALIGN
andi t2,a3,4
beq t2,zero,L(double_aligned)
PTR_SUBU a2,a2,t2
sw a1,0(a0)
PTR_ADDU a0,a0,t2
L(double_aligned):
#endif
/* Now the destination is aligned to (word or double word) aligned address
Set a2 to count how many bytes we have to copy after all the 64/128 byte
chunks are copied and a3 to the dest pointer after all the 64/128 byte
chunks have been copied. We will loop, incrementing a0 until it equals
a3. */
andi t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
beq a2,t8,L(chkw) /* if a2==t8, no 64-byte/128-byte chunks */
PTR_SUBU a3,a2,t8 /* subtract from a2 the reminder */
PTR_ADDU a3,a0,a3 /* Now a3 is the final dst after loop */
/* When in the loop we may prefetch with the 'prepare to store' hint,
in this case the a0+x should not be past the "t0-32" address. This
means: for x=128 the last "safe" a0 address is "t0-160". Alternatively,
for x=64 the last "safe" a0 address is "t0-96" In the current version we
will use "prefetch hint,128(a0)", so "t0-160" is the limit. */
#if defined(USE_PREFETCH) \
&& (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
PTR_ADDU t0,a0,a2 /* t0 is the "past the end" address */
PTR_SUBU t9,t0,PREFETCH_LIMIT /* t9 is the "last safe pref" address */
#endif
#if defined(USE_PREFETCH) \
&& (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
PREFETCH_FOR_STORE (1, a0)
PREFETCH_FOR_STORE (2, a0)
PREFETCH_FOR_STORE (3, a0)
#endif
L(loop16w):
#if defined(USE_PREFETCH) \
&& (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
sltu v1,t9,a0 /* If a0 > t9 don't use next prefetch */
bgtz v1,L(skip_pref)
nop
#endif
#ifndef R6_CODE
PREFETCH_FOR_STORE (4, a0)
PREFETCH_FOR_STORE (5, a0)
#else
PREFETCH_FOR_STORE (2, a0)
#endif
L(skip_pref):
C_ST a1,UNIT(0)(a0)
C_ST a1,UNIT(1)(a0)
C_ST a1,UNIT(2)(a0)
C_ST a1,UNIT(3)(a0)
C_ST a1,UNIT(4)(a0)
C_ST a1,UNIT(5)(a0)
C_ST a1,UNIT(6)(a0)
C_ST a1,UNIT(7)(a0)
C_ST a1,UNIT(8)(a0)
C_ST a1,UNIT(9)(a0)
C_ST a1,UNIT(10)(a0)
C_ST a1,UNIT(11)(a0)
C_ST a1,UNIT(12)(a0)
C_ST a1,UNIT(13)(a0)
C_ST a1,UNIT(14)(a0)
C_ST a1,UNIT(15)(a0)
PTR_ADDIU a0,a0,UNIT(16) /* adding 64/128 to dest */
bne a0,a3,L(loop16w)
nop
move a2,t8
/* Here we have dest word-aligned but less than 64-bytes or 128 bytes to go.
Check for a 32(64) byte chunk and copy if if there is one. Otherwise
jump down to L(chk1w) to handle the tail end of the copy. */
L(chkw):
andi t8,a2,NSIZEMASK /* is there a 32-byte/64-byte chunk. */
/* the t8 is the reminder count past 32-bytes */
beq a2,t8,L(chk1w)/* when a2==t8, no 32-byte chunk */
nop
C_ST a1,UNIT(0)(a0)
C_ST a1,UNIT(1)(a0)
C_ST a1,UNIT(2)(a0)
C_ST a1,UNIT(3)(a0)
C_ST a1,UNIT(4)(a0)
C_ST a1,UNIT(5)(a0)
C_ST a1,UNIT(6)(a0)
C_ST a1,UNIT(7)(a0)
PTR_ADDIU a0,a0,UNIT(8)
/* Here we have less than 32(64) bytes to set. Set up for a loop to
copy one word (or double word) at a time. Set a2 to count how many
bytes we have to copy after all the word (or double word) chunks are
copied and a3 to the dest pointer after all the (d)word chunks have
been copied. We will loop, incrementing a0 until a0 equals a3. */
L(chk1w):
andi a2,t8,(NSIZE-1) /* a2 is the reminder past one (d)word chunks */
beq a2,t8,L(lastb)
PTR_SUBU a3,t8,a2 /* a3 is count of bytes in one (d)word chunks */
PTR_ADDU a3,a0,a3 /* a3 is the dst address after loop */
/* copying in words (4-byte or 8 byte chunks) */
L(wordCopy_loop):
PTR_ADDIU a0,a0,UNIT(1)
bne a0,a3,L(wordCopy_loop)
C_ST a1,UNIT(-1)(a0)
/* Copy the last 8 (or 16) bytes */
L(lastb):
blez a2,L(leave)
PTR_ADDU a3,a0,a2 /* a3 is the last dst address */
L(lastbloop):
PTR_ADDIU a0,a0,1
bne a0,a3,L(lastbloop)
sb a1,-1(a0)
L(leave):
j ra
nop
.set at
.set reorder
END(MEMSET_NAME)
|
4ms/metamodule-plugin-sdk
| 5,889
|
plugin-libc/newlib/libc/machine/mips/strcmp.S
|
/*
* Copyright (c) 2014
* Imagination Technologies Limited.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY IMAGINATION TECHNOLOGIES LIMITED ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL IMAGINATION TECHNOLOGIES LIMITED BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef ANDROID_CHANGES
# include "machine/asm.h"
# include "machine/regdef.h"
#elif _LIBC
# include "machine/asm.h"
# include "machine/regdef.h"
#else
# include <regdef.h>
# include <sys/asm.h>
#endif
/* Technically strcmp should not read past the end of the strings being
compared. We will read a full word that may contain excess bits beyond
the NULL string terminator but unless ENABLE_READAHEAD is set, we will not
read the next word after the end of string. Setting ENABLE_READAHEAD will
improve performance but is technically illegal based on the definition of
strcmp. */
#ifdef ENABLE_READAHEAD
# define DELAY_READ
#else
# define DELAY_READ nop
#endif
/* Testing on a little endian machine showed using CLZ was a
performance loss, so we are not turning it on by default. */
#if defined(ENABLE_CLZ) && (__mips_isa_rev > 1)
# define USE_CLZ
#endif
/* Some asm.h files do not have the L macro definition. */
#ifndef L
# if _MIPS_SIM == _ABIO32
# define L(label) $L ## label
# else
# define L(label) .L ## label
# endif
#endif
/* Some asm.h files do not have the PTR_ADDIU macro definition. */
#ifndef PTR_ADDIU
# ifdef USE_DOUBLE
# define PTR_ADDIU daddiu
# else
# define PTR_ADDIU addiu
# endif
#endif
/* Allow the routine to be named something else if desired. */
#ifndef STRCMP_NAME
# define STRCMP_NAME strcmp
#endif
#ifdef ANDROID_CHANGES
LEAF(STRCMP_NAME, 0)
#else
LEAF(STRCMP_NAME)
#endif
.set nomips16
.set noreorder
or t0, a0, a1
andi t0,0x3
bne t0, zero, L(byteloop)
/* Both strings are 4 byte aligned at this point. */
lui t8, 0x0101
ori t8, t8, 0x0101
lui t9, 0x7f7f
ori t9, 0x7f7f
#define STRCMP32(OFFSET) \
lw v0, OFFSET(a0); \
lw v1, OFFSET(a1); \
subu t0, v0, t8; \
bne v0, v1, L(worddiff); \
nor t1, v0, t9; \
and t0, t0, t1; \
bne t0, zero, L(returnzero)
L(wordloop):
STRCMP32(0)
DELAY_READ
STRCMP32(4)
DELAY_READ
STRCMP32(8)
DELAY_READ
STRCMP32(12)
DELAY_READ
STRCMP32(16)
DELAY_READ
STRCMP32(20)
DELAY_READ
STRCMP32(24)
DELAY_READ
STRCMP32(28)
PTR_ADDIU a0, a0, 32
b L(wordloop)
PTR_ADDIU a1, a1, 32
L(returnzero):
j ra
move v0, zero
L(worddiff):
#ifdef USE_CLZ
subu t0, v0, t8
nor t1, v0, t9
and t1, t0, t1
xor t0, v0, v1
or t0, t0, t1
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
wsbh t0, t0
rotr t0, t0, 16
# endif
clz t1, t0
and t1, 0xf8
# if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
neg t1
addu t1, 24
# endif
rotrv v0, v0, t1
rotrv v1, v1, t1
and v0, v0, 0xff
and v1, v1, 0xff
j ra
subu v0, v0, v1
#else /* USE_CLZ */
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
andi t0, v0, 0xff
beq t0, zero, L(wexit01)
andi t1, v1, 0xff
bne t0, t1, L(wexit01)
srl t8, v0, 8
srl t9, v1, 8
andi t8, t8, 0xff
beq t8, zero, L(wexit89)
andi t9, t9, 0xff
bne t8, t9, L(wexit89)
srl t0, v0, 16
srl t1, v1, 16
andi t0, t0, 0xff
beq t0, zero, L(wexit01)
andi t1, t1, 0xff
bne t0, t1, L(wexit01)
srl t8, v0, 24
srl t9, v1, 24
# else /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
srl t0, v0, 24
beq t0, zero, L(wexit01)
srl t1, v1, 24
bne t0, t1, L(wexit01)
srl t8, v0, 16
srl t9, v1, 16
andi t8, t8, 0xff
beq t8, zero, L(wexit89)
andi t9, t9, 0xff
bne t8, t9, L(wexit89)
srl t0, v0, 8
srl t1, v1, 8
andi t0, t0, 0xff
beq t0, zero, L(wexit01)
andi t1, t1, 0xff
bne t0, t1, L(wexit01)
andi t8, v0, 0xff
andi t9, v1, 0xff
# endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
L(wexit89):
j ra
subu v0, t8, t9
L(wexit01):
j ra
subu v0, t0, t1
#endif /* USE_CLZ */
/* It might seem better to do the 'beq' instruction between the two 'lbu'
instructions so that the nop is not needed but testing showed that this
code is actually faster (based on glibc strcmp test). */
#define BYTECMP01(OFFSET) \
lbu v0, OFFSET(a0); \
lbu v1, OFFSET(a1); \
beq v0, zero, L(bexit01); \
nop; \
bne v0, v1, L(bexit01)
#define BYTECMP89(OFFSET) \
lbu t8, OFFSET(a0); \
lbu t9, OFFSET(a1); \
beq t8, zero, L(bexit89); \
nop; \
bne t8, t9, L(bexit89)
L(byteloop):
BYTECMP01(0)
BYTECMP89(1)
BYTECMP01(2)
BYTECMP89(3)
BYTECMP01(4)
BYTECMP89(5)
BYTECMP01(6)
BYTECMP89(7)
PTR_ADDIU a0, a0, 8
b L(byteloop)
PTR_ADDIU a1, a1, 8
L(bexit01):
j ra
subu v0, v0, v1
L(bexit89):
j ra
subu v0, t8, t9
.set at
.set reorder
END(STRCMP_NAME)
|
4ms/metamodule-plugin-sdk
| 2,619
|
plugin-libc/newlib/libc/machine/h8500/psi.S
|
/* convert psi to si inplace
Note that `fp' below isn't a segment register.
It's r6, the frame pointer. */
#if __CODE__==32
#define RET prts
#else
#define RET rts
#endif
#define EXTPSISI_SN(r_msw,r_lsw,sp) ; \
.global __extpsisi##r_msw ; \
__extpsisi##r_msw: ; \
mov r_msw,r_lsw ; \
stc sp,r_msw ; \
RET
EXTPSISI_SN(r2,r3,dp)
EXTPSISI_SN(r4,r5,ep)
#define ADDPSI_AR_RN(sr,an,r_msw,r_lsw) \
.global __addpsi##an##r_msw ; \
__addpsi##an##r_msw: ; \
stc sr,@-sp ; \
add an,r_lsw ; \
addx @sp+,r_msw ; \
RET
ADDPSI_AR_RN(dp,r2,r0,r1)
ADDPSI_AR_RN(dp,r2,r3,r4)
ADDPSI_AR_RN(ep,r4,r0,r1)
ADDPSI_AR_RN(ep,r4,r1,r2)
ADDPSI_AR_RN(ep,r4,r3,r4)
ADDPSI_AR_RN(ep,r4,r5,fp)
ADDPSI_AR_RN(tp,fp,r0,r1)
#define ADDPSI_RN_AR(r_msw,r_lsw,sr,an,t_msw,t_lsw) \
.global __addpsi##r_msw##an ; \
__addpsi##r_msw##an: ; \
mov.w t_msw,@-sp ; \
mov.w t_lsw,@-sp ; \
stc sr,t_msw ; \
mov an,t_lsw ; \
add r_lsw,t_lsw ; \
addx r_msw,t_msw ; \
ldc t_msw,sr ; \
mov.w t_lsw,an ; \
mov.w @sp+,t_lsw ; \
mov.w @sp+,t_msw ; \
RET
ADDPSI_RN_AR(r0,r1,dp,r2,r4,r5)
ADDPSI_RN_AR(r0,r1,ep,r4,r2,r3)
#define EXTPSIHI_RN_RN(rm,r_msw,r_lsw) ; \
.global __extpsihi##rm##r_msw ; \
__extpsihi##rm##r_msw: ; \
mov rm,r_lsw ; \
clr.w r_msw ; \
RET
EXTPSIHI_RN_RN(r3,r0,r1)
EXTPSIHI_RN_RN(r4,r0,r1)
EXTPSIHI_RN_RN(r5,r0,r1)
EXTPSIHI_RN_RN(r2,r0,r1)
/* ifdefed out, because gcc doesn't like the # character in the above
macro. The macro expands into an assembly languange comment anyways,
so it serves no useful purpose. */
#if 0
#define EXTPSIHI_RN_SN(rm,r_msw,r_lsw) ; \
.global __extpsihi##rm##r_lsw ; \
__extpsihi##rm##r_lsw: ; \
mov rm,r_lsw ; \
ldc \#0,r_msw ; \
RET
EXTPSIHI_RN_SN(r0,dp,r2)
EXTPSIHI_RN_SN(r0,ep,r4)
EXTPSIHI_RN_SN(r1,dp,r2)
EXTPSIHI_RN_SN(r1,ep,r4)
EXTPSIHI_RN_SN(r3,dp,r2)
EXTPSIHI_RN_SN(r3,ep,r4)
EXTPSIHI_RN_SN(r5,dp,r2)
EXTPSIHI_RN_SN(r5,ep,r4)
EXTPSIHI_RN_SN(r2,ep,r4)
#endif
#define EXTPSISI_RN(r_msw,r_lsw) ; \
.global __extpsisi##r_msw ; \
__extpsisi##r_msw: ; \
RET
EXTPSISI_RN(r0,r1)
#define ADDPSI_SA_SB(sa,ra,sb,rb) ; \
.global __addpsi##ra##rb ; \
__addpsi##ra##rb: ; \
mov.w r0,@-sp ; \
mov.w r1,@-sp ; \
stc sa,r0 ; \
stc sb,r1 ; \
add.w ra,rb ; \
addx r0,r1 ; \
ldc r1,sb ; \
mov.w @sp+,r1 ; \
mov.w @sp+,r0 ; \
RET
ADDPSI_SA_SB(dp,r2,ep,r4)
ADDPSI_SA_SB(ep,r4,dp,r2)
ADDPSI_SA_SB(tp,fp,dp,r2)
ADDPSI_SA_SB(tp,fp,ep,r4)
ADDPSI_SA_SB(dp,r2,dp,r2)
.global __addpsir0r0
__addpsir0r0:
add.w r1,r1
addx r0,r0
RET
|
4ms/metamodule-plugin-sdk
| 2,981
|
plugin-libc/newlib/libc/machine/or1k/setjmp.S
|
/*
Copyright (c) 2014, Hesham ALMatary
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.align 4
.global setjmp
.type setjmp,@function
setjmp:
l.sw 0(r3), r1
l.sw 4(r3), r2
/* Skip r3-r8 as they are not preserved across function calls */
l.sw 8(r3), r9
/* Skip r10 as it's preserved to be used by TLS */
/* Skip r11, setjmp always set it to 0 */
/* The following set if registers are preserved across function calls */
l.sw 12(r3), r14
l.sw 16(r3), r16
l.sw 20(r3), r18
l.sw 24(r3), r20
l.sw 28(r3), r22
l.sw 32(r3), r24
l.sw 36(r3), r26
l.sw 40(r3), r28
l.sw 44(r3), r30
/* Save Status Register */
l.mfspr r13, r0, 17
l.sw 48(r3), r13
/* Set result register to 0 and jump */
// Different cases for optional delay slot
#if defined(__OR1K_NODELAY__)
l.addi r11, r0, 0
l.jr r9
#elif defined(__OR1K_DELAY__)
l.jr r9
l.addi r11, r0, 0
#else
l.addi r11, r0, 0
l.jr r9
l.nop
#endif
.align 4
.global longjmp
.type longjmp,@function
longjmp:
/* If the second argument to longjmp is zero, set return address to 1,
otherwise set it to the value of the second argument */
l.addi r11, r0, 1
l.sfeq r4, r0
l.bf 1f
l.nop
l.addi r11, r4, 0
/* Load status register */
1:
l.lwz r15, 48(r3)
l.mtspr r0, r15, 17
l.lwz r1, 0(r3)
l.lwz r2, 4(r3)
/* Skip r3-r8 as they are not preserved across function calls */
l.lwz r9, 8(r3)
/* Skip r11 as it's always set by longjmp */
l.lwz r14, 12(r3)
l.lwz r16, 16(r3)
l.lwz r18, 20(r3)
l.lwz r20, 24(r3)
l.lwz r22, 28(r3)
l.lwz r24, 32(r3)
l.lwz r26, 36(r3)
l.lwz r28, 40(r3)
// Different cases for optional delay slot
#if defined(__OR1K_NODELAY__)
l.lwz r30, 44(r3)
l.jr r9
#elif defined(__OR1K_DELAY__)
l.jr r9
l.lwz r30, 44(r3)
#else
l.lwz r30, 44(r3)
l.jr r9
l.nop
#endif
|
4ms/metamodule-plugin-sdk
| 10,465
|
plugin-libc/newlib/libc/machine/powerpc/setjmp.S
|
/* This is a simple version of setjmp and longjmp for the PowerPC.
Ian Lance Taylor, Cygnus Support, 9 Feb 1994.
Modified by Jeff Johnston, Red Hat Inc. 2 Oct 2001.
Modified by Sebastian Huber, embedded brains GmbH. 22 Sep 2022. */
#include "ppc-asm.h"
FUNC_START(setjmp)
#ifdef __ALTIVEC__
addi 3,3,15 # align Altivec to 16 byte boundary
#if __powerpc64__
clrrdi 3,3,4
#else
rlwinm 3,3,0,0,27
#endif
#else
addi 3,3,7 # align to 8 byte boundary
#if __powerpc64__
clrrdi 3,3,3
#else
rlwinm 3,3,0,0,28
#endif
#endif
#if __SPE__
/* If we are E500, then save 64-bit registers. */
evstdd 1,0(3) # offset 0
evstdd 2,8(3) # offset 8
evstdd 13,16(3) # offset 16
evstdd 14,24(3) # offset 24
evstdd 15,32(3) # offset 32
evstdd 16,40(3) # offset 40
evstdd 17,48(3) # offset 48
evstdd 18,56(3) # offset 56
evstdd 19,64(3) # offset 64
evstdd 20,72(3) # offset 72
evstdd 21,80(3) # offset 80
evstdd 22,88(3) # offset 88
evstdd 23,96(3) # offset 96
evstdd 24,104(3) # offset 104
evstdd 25,112(3) # offset 112
evstdd 26,120(3) # offset 120
evstdd 27,128(3) # offset 128
evstdd 28,136(3) # offset 136
evstdd 29,144(3) # offset 144
evstdd 30,152(3) # offset 152
evstdd 31,160(3) # offset 160
/* Add 164 to r3 to account for the amount of data we just
stored. Note that we are not adding 168 because the next
store instruction uses an offset of 4. */
addi 3,3,164
#elif __powerpc64__
/* In the first store, add 8 to r3 so that the subsequent floating
point stores are aligned on an 8 byte boundary and the Altivec
stores are aligned on a 16 byte boundary. */
stdu 1,8(3) # offset 8
stdu 2,8(3) # offset 16
stdu 13,8(3) # offset 24
stdu 14,8(3) # offset 32
stdu 15,8(3) # offset 40
stdu 16,8(3) # offset 48
stdu 17,8(3) # offset 56
stdu 18,8(3) # offset 64
stdu 19,8(3) # offset 72
stdu 20,8(3) # offset 80
stdu 21,8(3) # offset 88
stdu 22,8(3) # offset 96
stdu 23,8(3) # offset 104
stdu 24,8(3) # offset 112
stdu 25,8(3) # offset 120
stdu 26,8(3) # offset 128
stdu 27,8(3) # offset 136
stdu 28,8(3) # offset 144
stdu 29,8(3) # offset 152
stdu 30,8(3) # offset 160
stdu 31,8(3) # offset 168
mflr 4
stdu 4,8(3) # offset 176
mfcr 4
stwu 4,8(3) # offset 184
#else
stw 1,0(3) # offset 0
stwu 2,4(3) # offset 4
stwu 13,4(3) # offset 8
stwu 14,4(3) # offset 12
stwu 15,4(3) # offset 16
stwu 16,4(3) # offset 20
stwu 17,4(3) # offset 24
stwu 18,4(3) # offset 28
stwu 19,4(3) # offset 32
stwu 20,4(3) # offset 36
stwu 21,4(3) # offset 40
stwu 22,4(3) # offset 44
stwu 23,4(3) # offset 48
stwu 24,4(3) # offset 52
stwu 25,4(3) # offset 56
stwu 26,4(3) # offset 60
stwu 27,4(3) # offset 64
stwu 28,4(3) # offset 68
stwu 29,4(3) # offset 72
stwu 30,4(3) # offset 76
stwu 31,4(3) # offset 80
#endif
#if !__powerpc64__
/* If __SPE__, then add 84 to the offset shown from this point on until
the end of this function. This difference comes from the fact that
we save 21 64-bit registers instead of 21 32-bit registers above. */
mflr 4
stwu 4,4(3) # offset 84
mfcr 4
stwu 4,4(3) # offset 88
# one word pad to get floating point aligned on 8 byte boundary
#endif
/* Check whether we need to save FPRs. Checking __NO_FPRS__
on its own would be enough for GCC 4.1 and above, but older
compilers only define _SOFT_FLOAT, so check both. */
#if !defined (__NO_FPRS__) && !defined (_SOFT_FLOAT)
#if defined (__rtems__) && !defined (__PPC_CPU_E6500__)
/* For some RTEMS multilibs, the FPU and Altivec units are disabled
during interrupt handling. Do not save and restore the
corresponding registers in this case. */
mfmsr 5
andi. 5,5,0x2000
beq 1f
#endif
/* If __powerpc64__, then add 96 to the offset shown from this point on until
the end of this function. This difference comes from the fact that
we save 23 64-bit registers instead of 23 32-bit registers above and
we take alignement requirements of floating point and Altivec stores
into account. */
stfdu 14,8(3) # offset 96
stfdu 15,8(3) # offset 104
stfdu 16,8(3) # offset 112
stfdu 17,8(3) # offset 120
stfdu 18,8(3) # offset 128
stfdu 19,8(3) # offset 136
stfdu 20,8(3) # offset 144
stfdu 21,8(3) # offset 152
stfdu 22,8(3) # offset 160
stfdu 23,8(3) # offset 168
stfdu 24,8(3) # offset 176
stfdu 25,8(3) # offset 184
stfdu 26,8(3) # offset 192
stfdu 27,8(3) # offset 200
stfdu 28,8(3) # offset 208
stfdu 29,8(3) # offset 216
stfdu 30,8(3) # offset 224
stfdu 31,8(3) # offset 232
1:
#endif
/* This requires a total of 21 * 4 + 18 * 8 + 4 + 4 + 4
bytes == 60 * 4 bytes == 240 bytes. */
#ifdef __ALTIVEC__
#if defined (__rtems__) && !defined (__PPC_CPU_E6500__)
mfmsr 5
andis. 5,5,0x200
beq 1f
#endif
/* save Altivec vrsave and vr20-vr31 registers */
mfspr 4,256 # vrsave register
stwu 4,16(3) # offset 248
addi 3,3,8
stvx 20,0,3 # offset 256
addi 3,3,16
stvx 21,0,3 # offset 272
addi 3,3,16
stvx 22,0,3 # offset 288
addi 3,3,16
stvx 23,0,3 # offset 304
addi 3,3,16
stvx 24,0,3 # offset 320
addi 3,3,16
stvx 25,0,3 # offset 336
addi 3,3,16
stvx 26,0,3 # offset 352
addi 3,3,16
stvx 27,0,3 # offset 368
addi 3,3,16
stvx 28,0,3 # offset 384
addi 3,3,16
stvx 29,0,3 # offset 400
addi 3,3,16
stvx 30,0,3 # offset 416
addi 3,3,16
stvx 31,0,3 # offset 432
1:
/* This requires a total of 240 + 8 + 8 + 12 * 16 == 448 bytes. */
#endif
li 3,0
blr
FUNC_END(setjmp)
FUNC_START(longjmp)
#ifdef __ALTIVEC__
addi 3,3,15 # align Altivec to 16 byte boundary
#if __powerpc64__
clrrdi 3,3,4
#else
rlwinm 3,3,0,0,27
#endif
#else
addi 3,3,7 # align to 8 byte boundary
#if __powerpc64__
clrrdi 3,3,3
#else
rlwinm 3,3,0,0,28
#endif
#endif
#if __SPE__
/* If we are E500, then restore 64-bit registers. */
evldd 1,0(3) # offset 0
evldd 2,8(3) # offset 8
evldd 13,16(3) # offset 16
evldd 14,24(3) # offset 24
evldd 15,32(3) # offset 32
evldd 16,40(3) # offset 40
evldd 17,48(3) # offset 48
evldd 18,56(3) # offset 56
evldd 19,64(3) # offset 64
evldd 20,72(3) # offset 72
evldd 21,80(3) # offset 80
evldd 22,88(3) # offset 88
evldd 23,96(3) # offset 96
evldd 24,104(3) # offset 104
evldd 25,112(3) # offset 112
evldd 26,120(3) # offset 120
evldd 27,128(3) # offset 128
evldd 28,136(3) # offset 136
evldd 29,144(3) # offset 144
evldd 30,152(3) # offset 152
evldd 31,160(3) # offset 160
/* Add 164 to r3 to account for the amount of data we just
loaded. Note that we are not adding 168 because the next
load instruction uses an offset of 4. */
addi 3,3,164
#elif __powerpc64__
/* In the first load, add 8 to r3 so that the subsequent floating
point loades are aligned on an 8 byte boundary and the Altivec
loads are aligned on a 16 byte boundary. */
ldu 1,8(3) # offset 8
ldu 2,8(3) # offset 16
ldu 13,8(3) # offset 24
ldu 14,8(3) # offset 32
ldu 15,8(3) # offset 40
ldu 16,8(3) # offset 48
ldu 17,8(3) # offset 56
ldu 18,8(3) # offset 64
ldu 19,8(3) # offset 72
ldu 20,8(3) # offset 80
ldu 21,8(3) # offset 88
ldu 22,8(3) # offset 96
ldu 23,8(3) # offset 104
ldu 24,8(3) # offset 112
ldu 25,8(3) # offset 120
ldu 26,8(3) # offset 128
ldu 27,8(3) # offset 136
ldu 28,8(3) # offset 144
ldu 29,8(3) # offset 152
ldu 30,8(3) # offset 160
ldu 31,8(3) # offset 168
ldu 5,8(3) # offset 176
mtlr 5
lwzu 5,8(3) # offset 184
mtcrf 255,5
#else
lwz 1,0(3) # offset 0
lwzu 2,4(3) # offset 4
lwzu 13,4(3) # offset 8
lwzu 14,4(3) # offset 12
lwzu 15,4(3) # offset 16
lwzu 16,4(3) # offset 20
lwzu 17,4(3) # offset 24
lwzu 18,4(3) # offset 28
lwzu 19,4(3) # offset 32
lwzu 20,4(3) # offset 36
lwzu 21,4(3) # offset 40
lwzu 22,4(3) # offset 44
lwzu 23,4(3) # offset 48
lwzu 24,4(3) # offset 52
lwzu 25,4(3) # offset 56
lwzu 26,4(3) # offset 60
lwzu 27,4(3) # offset 64
lwzu 28,4(3) # offset 68
lwzu 29,4(3) # offset 72
lwzu 30,4(3) # offset 76
lwzu 31,4(3) # offset 80
#endif
/* If __SPE__, then add 84 to the offset shown from this point on until
the end of this function. This difference comes from the fact that
we restore 22 64-bit registers instead of 22 32-bit registers above. */
#if !__powerpc64__
lwzu 5,4(3) # offset 84
mtlr 5
lwzu 5,4(3) # offset 88
mtcrf 255,5
# one word pad to get floating point aligned on 8 byte boundary
#endif
/* Check whether we need to restore FPRs. Checking
__NO_FPRS__ on its own would be enough for GCC 4.1 and
above, but older compilers only define _SOFT_FLOAT, so
check both. */
#if !defined (__NO_FPRS__) && !defined (_SOFT_FLOAT)
#if defined (__rtems__) && !defined (__PPC_CPU_E6500__)
mfmsr 5
andi. 5,5,0x2000
beq 1f
#endif
/* If __powerpc64__, then add 96 to the offset shown from this point on until
the end of this function. This difference comes from the fact that
we restore 23 64-bit registers instead of 23 32-bit registers above and
we take alignement requirements of floating point and Altivec loads
into account. */
lfdu 14,8(3) # offset 96
lfdu 15,8(3) # offset 104
lfdu 16,8(3) # offset 112
lfdu 17,8(3) # offset 120
lfdu 18,8(3) # offset 128
lfdu 19,8(3) # offset 136
lfdu 20,8(3) # offset 144
lfdu 21,8(3) # offset 152
lfdu 22,8(3) # offset 160
lfdu 23,8(3) # offset 168
lfdu 24,8(3) # offset 176
lfdu 25,8(3) # offset 184
lfdu 26,8(3) # offset 192
lfdu 27,8(3) # offset 200
lfdu 28,8(3) # offset 208
lfdu 29,8(3) # offset 216
lfdu 30,8(3) # offset 224
lfdu 31,8(3) # offset 232
1:
#endif
#ifdef __ALTIVEC__
#if defined (__rtems__) && !defined (__PPC_CPU_E6500__)
mfmsr 5
andis. 5,5,0x200
beq 1f
#endif
/* restore Altivec vrsave and v20-v31 registers */
lwzu 5,16(3) # offset 248
mtspr 256,5 # vrsave
addi 3,3,8
lvx 20,0,3 # offset 256
addi 3,3,16
lvx 21,0,3 # offset 272
addi 3,3,16
lvx 22,0,3 # offset 288
addi 3,3,16
lvx 23,0,3 # offset 304
addi 3,3,16
lvx 24,0,3 # offset 320
addi 3,3,16
lvx 25,0,3 # offset 336
addi 3,3,16
lvx 26,0,3 # offset 352
addi 3,3,16
lvx 27,0,3 # offset 368
addi 3,3,16
lvx 28,0,3 # offset 384
addi 3,3,16
lvx 29,0,3 # offset 400
addi 3,3,16
lvx 30,0,3 # offset 416
addi 3,3,16
lvx 31,0,3 # offset 432
1:
#endif
mr. 3,4
bclr+ 4,2
li 3,1
blr
FUNC_END(longjmp)
|
4ms/metamodule-plugin-sdk
| 6,544
|
plugin-libc/newlib/libc/sys/d10v/trap.S
|
#include "sys/syscall.h"
#define SYSCALL(name) \
.global name ; \
name: ; \
ldi r4, SYS ## name ; \
bra __trap0
.text
.stabs "trap.S",100,0,0,__trap0
.stabs "int:t(0,1)=r(0,1);-65536;65535;",128,0,0,0
.stabs "long int:t(0,2)=r(0,1);0020000000000;0017777777777;",128,0,0,0
.stabs "_trap0:F(0,1)",36,0,1,__trap0
.stabs "arg1:P(0,1)",64,0,1,0
.stabs "arg2:P(0,1)",64,0,1,1
.stabs "arg3:P(0,1)",64,0,1,2
.stabs "arg4:P(0,1)",64,0,1,3
.stabs "number:P(0,1)",64,0,1,4
.global __trap0
.type __trap0,@function
__trap0:
trap 15 /* trap 15 returns result in r0, error code in r4 */
cmpeqi r4,0 /* is error code zero? */
brf0t ret /* yes, skip setting errno */
#if __INT__==32
st r4,@(errno+2,r14) /* no, set errno */
srai r4,15 /* sign extend high word */
st r4,@(errno,r14)
#else
st r4,@(errno,r14) /* no, set errno */
#endif
ret:
jmp r13 /* return to caller */
.Ltrap0:
.size __trap0,.Ltrap0-__trap0
.stabs "",36,0,0,.Ltrap0-__trap0
#define CONCAT(a,b) a ## b
#define STRING(a) #a
#define XSTRING(a) STRING(a)
#define XSTRING2(a,b) XSTRING(CONCAT(a,b))
#if __INT__==32
#define _read _read16
#define _lseek _lseek16
#define _write _write16
#define _close _close16
#define _open _open16
#define _creat _creat16
#define _exit _exit16
#define _stat _stat16
#define _chmod _chmod16
#define _chown _chown16
#define _fork _fork16
#define _wait _wait16
#define _execve _execve16
#define _execv _execv16
#define _pipe _pipe16
#define _kill _kill16
#define _getpid _getpid16
#endif
/* Until the assembler allows semicolon as a statement separator, */
/* we cannot use the SYSCALL macro. So expand it manually for now. */
/* #SYSCALL(_read) */
/* #SYSCALL(_lseek) */
/* #SYSCALL(_write) */
/* #SYSCALL(_close) */
/* #SYSCALL(_open) */
/* #SYSCALL(_creat) */
/* #SYSCALL(_exit) */
/* #SYSCALL(_stat) */
/* #SYSCALL(_chmod) */
/* #SYSCALL(_chown) */
/* #SYSCALL(_fork) */
/* #SYSCALL(_wait) */
/* #SYSCALL(_execve) */
/* #SYSCALL(_execv) */
/* #SYSCALL(_pipe) */
/* #SYSCALL(_getpid) */
/* #SYSCALL(_kill) */
.global _read
.type _read,@function
.stabs XSTRING2(_read,:F(0,1)),36,0,2,_read
.stabs "fd:P(0,1)",64,0,1,0
.stabs "ptr:P(0,1)",64,0,1,1
.stabs "len:P(0,1)",64,0,1,2
_read:
ldi r4, SYS_read
bra __trap0
.Lread:
.size _read,.-_read
.stabs "",36,0,0,.Lread-_read
.global _lseek
.type _lseek,@function
.stabs XSTRING2(_lseek,:F(0,1)),36,0,3,_lseek
.stabs "fd:P(0,1)",64,0,1,0
.stabs "offset:P(0,1)",64,0,1,2
.stabs "whence:p(0,1)",160,0,1,0
_lseek:
ldi r4, SYS_lseek
bra __trap0
.Llseek:
.size _lseek,.Llseek-_lseek
.stabs "",36,0,0,.Llseek-_lseek
.global _write
.type _write,@function
.stabs XSTRING2(_write,:F(0,1)),36,0,4,_write
.stabs "fd:P(0,1)",64,0,1,0
.stabs "ptr:P(0,1)",64,0,1,1
.stabs "len:P(0,1)",64,0,1,2
_write:
ldi r4, SYS_write
bra __trap0
.Lwrite:
.size _write,.Lwrite-_write
.stabs "",36,0,0,.Lwrite-_write
.global _close
.type _close,@function
.stabs XSTRING2(_close,:F(0,1)),36,0,5,_close
.stabs "fd:P(0,1)",64,0,1,0
_close:
ldi r4, SYS_close
bra __trap0
.Lclose:
.size _close,.Lclose-_close
.stabs "",36,0,0,.Lclose-_close
.global _open
.type _open,@function
.stabs XSTRING2(_open,:F(0,1)),36,0,6,_open
.stabs "name:P(0,1)",64,0,1,0
.stabs "flags:P(0,1)",64,0,1,1
.stabs "mode:P(0,1)",64,0,1,2
_open:
ldi r4, SYS_open
bra __trap0
.Lopen:
.size _open,.Lopen-_open
.stabs "",36,0,0,.Lopen-_open
.global _creat
.type _creat,@function
.stabs XSTRING2(_creat,:F(0,1)),36,0,7,_creat
.stabs "name:P(0,1)",64,0,1,0
.stabs "mode:P(0,1)",64,0,1,1
_creat:
ldi r4, SYS_creat
bra __trap0
.Lcreat:
.size _creat,.Lcreat-_creat
.stabs "",36,0,0,.Lcreat-_creat
.global _exit
.type _exit,@function
.stabs XSTRING2(_exit,:F(0,1)),36,0,8,_exit
.stabs "status:P(0,1)",64,0,1,0
_exit:
ldi r4, SYS_exit
bra __trap0
.Lexit:
.size _exit,.Lexit-_exit
.stabs "",36,0,0,.Lexit-_exit
.global _stat
.type _stat,@function
.stabs XSTRING2(_stat,:F(0,1)),36,0,9,_stat
.stabs "name:P(0,1)",64,0,1,0
.stabs "packet:P(0,1)",64,0,1,1
_stat:
ldi r4, SYS_stat
bra __trap0
.Lstat:
.size _stat,.Lstat-_stat
.stabs "",36,0,0,.Lstat-_stat
.global _chmod
.type _chmod,@function
.stabs XSTRING2(_chmod,:F(0,1)),36,0,10,_chmod
.stabs "name:P(0,1)",64,0,1,0
.stabs "mode:P(0,1)",64,0,1,1
_chmod:
ldi r4, SYS_chmod
bra __trap0
.Lchmod:
.size _chmod,.Lchmod-_chmod
.stabs "",36,0,0,.Lchmod-_chmod
.global _chown
.type _chown,@function
.stabs XSTRING2(_chown,:F(0,1)),36,0,11,_chown
.stabs "name:P(0,1)",64,0,1,0
.stabs "uid:P(0,1)",64,0,1,1
.stabs "gid:P(0,1)",64,0,1,2
_chown:
ldi r4, SYS_chown
bra __trap0
.Lchown:
.size _chown,.Lchown-_chown
.stabs "",36,0,0,.Lchown-_chown
.global _fork
.type _fork,@function
.stabs XSTRING2(_fork,:F(0,1)),36,0,12,_fork
_fork:
ldi r4, SYS_fork
bra __trap0
.Lfork:
.size _fork,.Lfork-_fork
.stabs "",36,0,0,.Lfork-_fork
.global _wait
.type _wait,@function
.stabs "status:P(0,1)",64,0,1,0
.stabs XSTRING2(_wait,:F(0,1)),36,0,13,_wait
_wait:
ldi r4, SYS_wait
bra __trap0
.Lwait:
.size _wait,.Lwait-_wait
.stabs "",36,0,0,.Lwait-_wait
.global _execve
.type _execve,@function
.stabs "name:P(0,1)",64,0,1,0
.stabs "argv:P(0,1)",64,0,1,1
.stabs "envp:P(0,1)",64,0,1,2
.stabs XSTRING2(_execve,:F(0,1)),36,0,14,_execve
_execve:
ldi r4, SYS_execve
bra __trap0
.Lexecve:
.size _execve,.Lexecve-_execve
.stabs "",36,0,0,.Lexecve-_execve
.global _execv
.type _execv,@function
.stabs XSTRING2(_execv,:F(0,1)),36,0,15,_execv
.stabs "name:P(0,1)",64,0,1,0
.stabs "argv:P(0,1)",64,0,1,1
_execv:
ldi r4, SYS_execv
bra __trap0
.Lexecv:
.size _execv,.Lexecv-_execv
.stabs "",36,0,0,.Lexecv-_execv
.global _pipe
.type _pipe,@function
.stabs XSTRING2(_pipe,:F(0,1)),36,0,16,_pipe
.stabs "fds:P(0,1)",64,0,1,0
_pipe:
ldi r4, SYS_pipe
bra __trap0
.Lpipe:
.size _pipe,.Lpipe-_pipe
.stabs "",36,0,0,.Lpipe-_pipe
.global time
.type time,@function
.stabs XSTRING2(time,:F(0,1)),36,0,17,time
.stabs "ptr:P(0,1)",64,0,1,0
time:
ldi r4, SYS_time
bra __trap0
.Ltime:
.size time,.Ltime-time
.stabs "",36,0,0,.Ltime-time
.global _kill
.type _kill,@function
.stabs XSTRING2(_kill,:F(0,1)),36,0,18,_kill
.stabs "pid:P(0,1)",64,0,1,0
.stabs "sig:P(0,1)",64,0,1,1
_kill:
ldi r4, SYS_kill
bra __trap0
.Lkill:
.size _kill,.Lkill-_kill
.stabs "",36,0,0,.Lkill-_kill
.global _getpid
.type _getpid,@function
.stabs XSTRING2(_getpid,:F(0,1)),36,0,19,_getpid
_getpid:
ldi r4, SYS_getpid
bra __trap0
.Lgetpid:
.size _getpid,.Lgetpid-_getpid
.stabs "",36,0,0,.Lgetpid-_getpid
|
4ms/metamodule-plugin-sdk
| 1,486
|
plugin-libc/newlib/libc/sys/d10v/crt0.S
|
.text
.global _start
.type _start,@function
.stabs "crt0.S",100,0,0,_start
.stabs "int:t(0,1)=r(0,1);-32768;32767;",128,0,0,0
.stabs "_start:F(0,1)",36,0,1,_start
_start:
; R14 always contains memory base address (0)
ldi r14,0
; Set the USER and SYSTEM stack pointers.
ldi r0, 0 ; zero arguments
ldi r1, 0
mvtc r0, psw ; select SPI and set it
ldi sp, _stack
ldi r10, 0x8000 ; select SPU/FP and set it
mvtc r10, psw || ldi r11, 0; clear stack frame
ldi sp, _stack - 0x200
ldi r13, 0
; Clear the BSS. Do it in two parts for efficiency: longwords first
; for most of it, then the remaining 0 to 3 bytes.
ldi r2, __bss_start ; R2 = start of BSS
ldi r3, _end ; R3 = end of BSS + 1
sub r3, r2 ; R3 = BSS size in bytes
mv r4, r3
srli r4, 2 ; R4 = BSS size in longwords (rounded down)
loop1:
cmpeqi r4, 0 ; more longords to zero out?
brf0t.s endloop1 ; nope
st2w r0, @r2+ ; yep, zero out another longword
subi r4, 1 ; decrement count
bra.l loop1 ; go do some more
endloop1:
and3 r4, r3, 3 ; get no. of remaining BSS bytes to clear
loop2:
cmpeqi r4, 0 ; more bytes to zero out?
brf0t.s endloop2 ; nope
stb r0, @r2 ; yep, zero out another byte
addi r2, 1 ; bump address
subi r4, 1 ; decrement count
bra.s loop2 ; go do some more
endloop2:
; Call main, then stop simulator
st r11, @-sp
st r13, @-sp
mv r11, sp
bl main
bl exit
stop
.Lstart:
.size _start,.Lstart-_start
.stabs "",36,0,0,.Lstart-_start
.section .stack
_stack: .long 1
|
4ms/metamodule-plugin-sdk
| 133,709
|
plugin-libc/newlib/libc/sys/rdos/rdos.S
|
/*#######################################################################
# RDOS operating system
# Copyright (C) 1988-2006, Leif Ekblad
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# The author of this program may be contacted at leif@rdos.net
#
# rdos.S
# GCC based interface to RDOS kernel
#
##########################################################################*/
#include "user.def"
.macro UserGate nr
.byte 0x9A
.long \nr
.word 2
.endm
/*##########################################################################
#
# Name : RdosSwapShort
#
# Purpose....: Byte reverse a short int
#
# Returns....: Result
#
##########################################################################*/
.global RdosSwapShort
RdosSwapShort:
pushl %ebp
movl %esp,%ebp
movw 8(%ebp),%ax
xchgb %ah,%al
leave
ret
/*##########################################################################
#
# Name : RdosSwapLong
#
# Purpose....: Byte reverse a long int
#
# Returns....: Result
#
##########################################################################*/
.global RdosSwapLong
RdosSwapLong:
pushl %ebp
movl %esp,%ebp
movl 8(%ebp),%eax
xchgb %ah,%al
roll $16,%eax
xchgb %ah,%al
leave
ret
/*##########################################################################
#
# Name : LocalToNetworkLong
#
# Purpose....: Convert a local long to network format
#
# Returns....: Network format
#
##########################################################################*/
.global LocalToNetworkLong
LocalToNetworkLong:
pushl %ebp
movl %esp,%ebp
movl 8(%ebp),%eax
xchgb %ah,%al
roll $16,%eax
xchgb %ah,%al
leave
ret
/*##########################################################################
#
# Name : NetworkToLocalLong
#
# Purpose....: Convert a network long to local format
#
# Returns....: Local format
#
##########################################################################*/
.global NetworkToLocalLong
NetworkToLocalLong:
pushl %ebp
movl %esp,%ebp
movl 8(%ebp),%eax
xchgb %ah,%al
roll $16,%eax
xchgb %ah,%al
leave
ret
/*##########################################################################
#
# Name : RdosGetThreadHandle
#
# Purpose....: Get current thread handle
#
# Returns....: Thread ID
#
##########################################################################*/
.global RdosGetThreadHandle
RdosGetThreadHandle:
pushl %ebp
movl %esp,%ebp
UserGate get_thread_nr
movzx %ax,%eax
leave
ret
/*##########################################################################
#
# Name : RdosGetThreadState
#
# Purpose....: Get thread state
#
# Parameters.: Thread #
# State buffer
#
##########################################################################*/
.global RdosGetThreadState
RdosGetThreadState:
pushl %ebp
movl %esp,%ebp
push %edi
movl 8(%ebp),%eax
movl 12(%ebp),%edx
UserGate get_thread_state_nr
jc rgtsFail
movl $1,%eax
jmp rgtsDone
rgtsFail:
xorl %eax,%eax
rgtsDone:
popl %edi
leave
ret
/*##########################################################################
#
# Name : RdosSuspendThread
#
# Purpose....: Suspend thread
#
# Parameters.: Thread #
#
##########################################################################*/
.global RdosSuspendThread
RdosSuspendThread:
pushl %ebp
movl %esp,%ebp
movl 8(%ebp),%eax
UserGate suspend_thread_nr
jc rsfFail
movl $1,%eax
jmp rsfDone
rsfFail:
xorl %eax,%eax
rsfDone:
leave
ret
/*##########################################################################
#
# Name : RdosExec
#
# Purpose....: Execute a program
#
# Parameters.: Program
# Commandline
#
##########################################################################*/
.global RdosExec
RdosExec:
pushl %ebp
movl %esp,%ebp
pushl %esi
pushl %edi
movl 8(%ebp),%esi
movl 12(%ebp),%edi
UserGate load_exe_nr
UserGate get_exit_code_nr
popl %edi
popl %esi
leave
ret
/*##########################################################################
#
# Name : RdosSpawn
#
# Purpose....: Create new process and run a program
#
# Parameters.: Program
# Commandline
# StartDir
#
##########################################################################*/
.global RdosSpawn
RdosSpawn:
pushl %ebp
movl %esp,%ebp
pushl %fs
pushl %ebx
pushl %edx
pushl %esi
pushl %edi
movw %ds,%dx
movw %dx,%fs
xorl %edx,%edx
movl 8(%ebp),%esi
movl 12(%ebp),%edi
movl 16(%ebp),%ebx
UserGate spawn_exe_nr
jc rsFail
movzx %dx,%eax
jmp rsDone
rsFail:
xorl %eax,%eax
rsDone:
popl %edi
popl %esi
popl %edx
popl %ebx
popw %fs
leave
ret
/*##########################################################################
#
# Name : RdosCpuReset
#
# Purpose....: Cpu reset
#
##########################################################################*/
.global RdosCpuReset
RdosCpuReset:
pushl %ebp
movl %esp,%ebp
UserGate cpu_reset_nr
leave
ret
/*##########################################################################
#
# Name : RdosGetVersion
#
# Purpose....: Get RDOS version
#
# Parameters.: &major
# &minor
# &release
#
##########################################################################*/
.global RdosGetVersion
RdosGetVersion:
pushl %ebp
movl %esp,%ebp
pushl %eax
pushl %ecx
pushl %edx
pushl %edi
UserGate get_version_nr
movzx %dx,%edx
movl 8(%ebp),%edi
movl %edx,(%edi)
movzx %ax,%eax
movl 12(%ebp),%edi
movl %eax,(%edi)
movzx %cx,%eax
movl 16(%ebp),%edi
movl %eax,(%edi)
popl %edi
popl %edx
popl %ecx
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosAllocateMem
#
# Purpose....: Allocate memory
#
# Parameters.: Bytes to allocate
#
# Returns....: Memory pointer
#
##########################################################################*/
.global RdosAllocateMem
RdosAllocateMem:
pushl %ebp
movl %esp,%ebp
pushl %edx
movl 8(%ebp),%eax
UserGate allocate_app_mem_nr
movl %edx,%eax
popl %edx
leave
ret
/*##########################################################################
#
# Name : RdosFreeMem
#
# Purpose....: Free memory
#
# Parameters.: Memory pointer
#
##########################################################################*/
.global RdosFreeMem
RdosFreeMem:
pushl %ebp
movl %esp,%ebp
pushl %edx
movl 8(%ebp),%edx
UserGate free_app_mem_nr
popl %edx
leave
ret
/*##########################################################################
#
# Name : RdosAppDebug
#
# Purpose....: App debug
#
##########################################################################*/
.global RdosAppDebug
RdosAppDebug:
pushl %ebp
movl %esp,%ebp
UserGate app_debug_nr
leave
ret
/*##########################################################################
#
# Name : RdosWaitMilli
#
# Purpose....: Wait a number of milliseconds
#
# Parameters.: Milliseconds to wait
#
##########################################################################*/
.global RdosWaitMilli
RdosWaitMilli:
pushl %ebp
movl %esp,%ebp
pushl %eax
movl 8(%ebp),%eax
UserGate wait_milli_nr
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosCreateSection
#
# Purpose....: Create section
#
# Returns....: Section handle
#
##########################################################################*/
.global RdosCreateSection
RdosCreateSection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
UserGate create_user_section_nr
movzx %bx,%eax
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDeleteSection
#
# Purpose....: Delete section
#
# Parameters.: Section handle
#
##########################################################################*/
.global RdosDeleteSection
RdosDeleteSection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate delete_user_section_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosEnterSection
#
# Purpose....: Enter section
#
# Parameters.: Section handle
#
##########################################################################*/
.global RdosEnterSection
RdosEnterSection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate enter_user_section_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosLeaveSection
#
# Purpose....: Leave section
#
# Parameters.: Section handle
#
##########################################################################*/
.global RdosLeaveSection
RdosLeaveSection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate leave_user_section_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCreateWait
#
# Purpose....: Create wait object
#
# Returns....: Wait handle
#
##########################################################################*/
.global RdosCreateWait
RdosCreateWait:
pushl %ebp
movl %esp,%ebp
pushl %ebx
UserGate create_wait_nr
movzx %bx,%eax
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseWait
#
# Purpose....: Close wait
#
# Parameters.: Wait handle
#
##########################################################################*/
.global RdosCloseWait
RdosCloseWait:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate close_wait_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCheckWait
#
# Purpose....: Check wait state
#
# Parameters.: Wait handle
#
# Returns....: Signalled ID or 0
#
##########################################################################*/
.global RdosCheckWait
RdosCheckWait:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
movw 8(%ebp),%bx
UserGate is_wait_idle_nr
movl %ecx,%eax
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosWaitForever
#
# Purpose....: Wait forever
#
# Parameters.: Wait handle
#
# Returns....: Signalled ID or 0
#
##########################################################################*/
.global RdosWaitForever
RdosWaitForever:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
movw 8(%ebp),%bx
UserGate wait_no_timeout_nr
jc rwfFail
movl %ecx,%eax
jmp rwfDone
rwfFail:
xorl %eax,%eax
rwfDone:
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosWaitTimeout
#
# Purpose....: Wait with timeout
#
# Parameters.: Wait handle
# Timeout in ms
#
# Returns....: Signalled ID or 0
#
##########################################################################*/
.global RdosWaitTimeout
RdosWaitTimeout:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
movl 12(%ebp),%eax
movl $1193,%edx
mull %edx
pushl %edx
pushl %eax
UserGate get_system_time_nr
popl %ebx
addl %ebx,%eax
popl %ebx
adcl %ebx,%edx
movw 8(%ebp),%bx
UserGate wait_timeout_nr
jc rwtFail
movl %ecx,%eax
jmp rwtDone
rwtFail:
xorl %eax,%eax
rwtDone:
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosStopWait
#
# Purpose....: Stop wait
#
# Parameters.: Wait handle
#
##########################################################################*/
.global RdosStopWait
RdosStopWait:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate stop_wait_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosRemoveWait
#
# Purpose....: Remove wait object from wait handle
#
# Parameters.: Wait handle
# ID
#
##########################################################################*/
.global RdosRemoveWait
RdosRemoveWait:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
movw 8(%ebp),%bx
movl 12(%ebp),%ecx
UserGate remove_wait_nr
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCreateSignal
#
# Purpose....: Create signal object
#
# Returns....: Signal handle
#
##########################################################################*/
.global RdosCreateSignal
RdosCreateSignal:
pushl %ebp
movl %esp,%ebp
pushl %ebx
UserGate create_signal_nr
movzx %bx,%eax
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosResetSignal
#
# Purpose....: Reset signal
#
# Parameters.: Signal handle
#
##########################################################################*/
.global RdosResetSignal
RdosResetSignal:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate reset_signal_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosIsSignalled
#
# Purpose....: Check if signalled
#
# Parameters.: Signal handle
#
# Returns....: TRUE if signalled
#
##########################################################################*/
.global RdosIsSignalled
RdosIsSignalled:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate is_signalled_nr
jc risdFree
movl $1,%eax
jmp risdDone
risdFree:
xorl %eax,%eax
risdDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetSignal
#
# Purpose....: Set signal
#
# Parameters.: Signal handle
#
##########################################################################*/
.global RdosSetSignal
RdosSetSignal:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate set_signal_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosFreeSignal
#
# Purpose....: Free signal handle
#
# Parameters.: Signal handle
#
##########################################################################*/
.global RdosFreeSignal
RdosFreeSignal:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate free_signal_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddWaitForSignal
#
# Purpose....: Add signal object to wait handle
#
# Parameters.: Wait handle
# Signal handle
# ID
#
##########################################################################*/
.global RdosAddWaitForSignal
RdosAddWaitForSignal:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
movw 8(%ebp),%bx
movw 12(%ebp),%ax
movl 16(%ebp),%ecx
UserGate add_wait_for_signal_nr
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddWaitForKeyboard
#
# Purpose....: Add keyboard to wait handle
#
# Parameters.: Wait handle
# ID
#
##########################################################################*/
.global RdosAddWaitForKeyboard
RdosAddWaitForKeyboard:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
movw 8(%ebp),%bx
movl 12(%ebp),%ecx
UserGate add_wait_for_keyboard_nr
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddWaitForMouse
#
# Purpose....: Add mouse to wait handle
#
# Parameters.: Wait handle
# ID
#
##########################################################################*/
.global RdosAddWaitForMouse
RdosAddWaitForMouse:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
movw 8(%ebp),%bx
movl 12(%ebp),%ecx
UserGate add_wait_for_mouse_nr
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddWaitForCom
#
# Purpose....: Add com object to wait handle
#
# Parameters.: Wait handle
# Com handle
# ID
#
##########################################################################*/
.global RdosAddWaitForCom
RdosAddWaitForCom:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
movw 8(%ebp),%bx
movw 12(%ebp),%ax
movl 16(%ebp),%ecx
UserGate add_wait_for_com_nr
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddWaitForAdc
#
# Purpose....: Add ADC object to wait handle
#
# Parameters.: Wait handle
# Adc handle
# ID
#
##########################################################################*/
.global RdosAddWaitForAdc
RdosAddWaitForAdc:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
movw 8(%ebp),%bx
movw 12(%ebp),%ax
movl 16(%ebp),%ecx
UserGate add_wait_for_adc_nr
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetTextMode
#
# Purpose....: Set text mode
#
##########################################################################*/
.global RdosSetTextMode
RdosSetTextMode:
pushl %ebp
movl %esp,%ebp
movw $3,%ax
UserGate set_video_mode_nr
leave
ret
/*##########################################################################
#
# Name : RdosSetVideoMode
#
# Purpose....: Set video mode
#
# Parameters.: &xres
# &yres
# &linesize
# &LFB
#
# Returns....: Bitmap handle
#
##########################################################################*/
.global RdosSetVideoMode
RdosSetVideoMode:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
movl 8(%ebp),%edi
movw (%edi),%ax
movl 12(%ebp),%edi
movw (%edi),%cx
movl 16(%ebp),%edi
movw (%edi),%dx
UserGate get_video_mode_nr
jc set_video_fail
UserGate set_video_mode_nr
jc set_video_fail
pushl %edi
movl 8(%ebp),%edi
movzx %ax,%eax
movl %eax,(%edi)
movl 12(%ebp),%edi
movzx %cx,%ecx
movl %ecx,(%edi)
movl 16(%ebp),%edi
movzx %dx,%edx
movl %edx,(%edi)
movl 20(%ebp),%edi
movzx %si,%esi
movl %esi,(%edi)
popl %edi
movl 24(%ebp),%eax
movl %edi,(%eax)
movzx %bx,%eax
jmp set_video_done
set_video_fail:
xorl %eax,%eax
movl 8(%ebp),%edi
movl %eax,(%edi)
movl 12(%ebp),%edi
movl %eax,(%edi)
movl 16(%ebp),%edi
movl %eax,(%edi)
movl 20(%ebp),%edi
movl %eax,(%edi)
movl 24(%ebp),%edi
movl %eax,(%edi)
set_video_done:
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetClipRect
#
# Purpose....: Set clip rectangle
#
# Parameters.: Bitmap handle
# xmin, xmax, ymin, ymax
#
##########################################################################*/
.global RdosSetClipRect
RdosSetClipRect:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
movw 8(%ebp),%bx
movw 12(%ebp),%cx
movw 16(%ebp),%dx
movw 20(%ebp),%si
movw 24(%ebp),%di
UserGate set_clip_rect_nr
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosClearClipRect
#
# Purpose....: Clear clip rectangle
#
# Parameters.: Bitmap handle
#
##########################################################################*/
.global RdosClearClipRect
RdosClearClipRect:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate clear_clip_rect_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetDrawColor
#
# Purpose....: Set draw color
#
# Parameters.: Bitmap handle
# Color
#
##########################################################################*/
.global RdosSetDrawColor
RdosSetDrawColor:
pushl %ebp
movl %esp,%ebp
pushl %eax
pushl %ebx
movw 8(%ebp),%bx
movl 12(%ebp),%eax
UserGate set_drawcolor_nr
popl %ebx
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosSetLGOP
#
# Purpose....: Set draw color
#
# Parameters.: Bitmap handle
# LGOP
#
##########################################################################*/
.global RdosSetLGOP
RdosSetLGOP:
pushl %ebp
movl %esp,%ebp
pushl %eax
pushl %ebx
movw 8(%ebp),%bx
movw 12(%ebp),%ax
UserGate set_lgop_nr
popl %ebx
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosSetHollowStyle
#
# Purpose....: Set hollow fill style
#
# Parameters.: Bitmap handle
#
##########################################################################*/
.global RdosSetHollowStyle
RdosSetHollowStyle:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate set_hollow_style_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetFilledStyle
#
# Purpose....: Set filled fill style
#
# Parameters.: Bitmap handle
#
##########################################################################*/
.global RdosSetFilledStyle
RdosSetFilledStyle:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate set_filled_style_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosOpenFont
#
# Purpose....: Open a font
#
# Parameters.: height
#
# Returns....: Font handle
#
##########################################################################*/
.global RdosOpenFont
RdosOpenFont:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%ax
UserGate open_font_nr
movzx %bx,%eax
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseFont
#
# Purpose....: Close font handle
#
# Parameters.: Font handle
#
##########################################################################*/
.global RdosCloseFont
RdosCloseFont:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate close_font_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetStringMetrics
#
# Purpose....: Get string metrics for text using font
#
# Parameters.: Font handle
# String
# &width
# &height
#
##########################################################################*/
.global RdosGetStringMetrics
RdosGetStringMetrics:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
pushl %edi
movw 8(%ebp),%bx
movl 12(%ebp),%edi
UserGate get_string_metrics_nr
movl 16(%ebp),%edi
movzx %cx,%ecx
movl %ecx,(%edi)
movl 20(%ebp),%edi
movzx %dx,%edx
movl %edx,(%edi)
popl %edi
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetFont
#
# Purpose....: Set font
#
# Parameters.: Bitmap handle
# Font handle
#
##########################################################################*/
.global RdosSetFont
RdosSetFont:
pushl %ebp
movl %esp,%ebp
pushl %eax
pushl %ebx
movw 8(%ebp),%bx
movw 12(%ebp),%ax
UserGate set_font_nr
popl %ebx
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosGetPixel
#
# Purpose....: Get pixel
#
# Parameters.: Bitmap handle
# x, y
#
##########################################################################*/
.global RdosGetPixel
RdosGetPixel:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
movw 8(%ebp),%bx
movw 12(%ebp),%cx
movw 16(%ebp),%dx
UserGate get_pixel_nr
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetPixel
#
# Purpose....: Set pixel
#
# Parameters.: Bitmap handle
# x, y
#
##########################################################################*/
.global RdosSetPixel
RdosSetPixel:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
movw 8(%ebp),%bx
movw 12(%ebp),%cx
movw 16(%ebp),%dx
UserGate set_pixel_nr
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosBlit
#
# Purpose....: Blit
#
# Parameters.: SrcHandle
# DestHandle
# width, height
# SrcX, SrcY
# DestX, DestY
#
##########################################################################*/
.global RdosBlit
RdosBlit:
pushl %ebp
movl %esp,%ebp
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
;
movw 8(%ebp),%ax
movw 12(%ebp),%bx
movw 16(%ebp),%cx
movw 20(%ebp),%dx
movw 28(%ebp),%si
shll $16,%esi
movw 24(%ebp),%si
movw 36(%ebp),%di
shll $16,%edi
movw 32(%ebp),%di
UserGate blit_nr
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosDrawMask
#
# Purpose....: Draw mask
#
# Parameters.: Bitmap handle
# mask
# RowSize
# width, height
# SrcX, SrcY
# DestX, DestY
#
##########################################################################*/
.global RdosDrawMask
RdosDrawMask:
pushl %ebp
movl %esp,%ebp
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
;
movw 8(%ebp),%bx
movl 12(%ebp),%edi
movw 16(%ebp),%ax
movw 24(%ebp),%si
shll $16,%esi
movw 20(%ebp),%si
movw 32(%ebp),%cx
shll $16,%ecx
movw 28(%ebp),%cx
movw 40(%ebp),%dx
shll $16,%edx
movw 36(%ebp),%dx
UserGate draw_mask_nr
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosDrawLine
#
# Purpose....: Draw a line
#
# Parameters.: Bitmap handle
# x1, y1
# x2, y2
#
##########################################################################*/
.global RdosDrawLine
RdosDrawLine:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
;
movw 8(%ebp),%bx
movw 12(%ebp),%cx
movw 16(%ebp),%dx
movw 20(%ebp),%si
movw 24(%ebp),%di
UserGate draw_line_nr
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDrawString
#
# Purpose....: Draw a string
#
# Parameters.: Bitmap handle
# x, y
# string
#
##########################################################################*/
.global RdosDrawString
RdosDrawString:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
pushl %edi
;
movw 8(%ebp),%bx
movw 12(%ebp),%cx
movw 16(%ebp),%dx
movl 20(%ebp),%edi
UserGate draw_string_nr
popl %edi
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDrawRect
#
# Purpose....: Draw a rect
#
# Parameters.: Bitmap handle
# x, y
# width, height
#
##########################################################################*/
.global RdosDrawRect
RdosDrawRect:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
movw 8(%ebp),%bx
movw 12(%ebp),%cx
movw 16(%ebp),%dx
movw 20(%ebp),%si
movw 24(%ebp),%di
UserGate draw_rect_nr
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDrawEllipse
#
# Purpose....: Draw an ellipse
#
# Parameters.: Bitmap handle
# x, y
# width, height
#
##########################################################################*/
.global RdosDrawEllipse
RdosDrawEllipse:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
movw 8(%ebp),%bx
movw 12(%ebp),%cx
movw 16(%ebp),%dx
movw 20(%ebp),%si
movw 24(%ebp),%di
UserGate draw_ellipse_nr
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCreateBitmap
#
# Purpose....: Create a bitmap
#
# Parameters.: BitsPerPixel
# width, height
#
# Returns....: Bitmap handle
#
##########################################################################*/
.global RdosCreateBitmap
RdosCreateBitmap:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
movw 8(%ebp),%ax
movw 12(%ebp),%cx
movw 16(%ebp),%dx
UserGate create_bitmap_nr
movzx %bx,%eax
;
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDuplicateBitmapHandle
#
# Purpose....: Duplicate bitmap handle for use in another thread / object
#
# Parameters.: Bitmap handle
#
# Returns....: Bitmap handle
#
##########################################################################*/
.global RdosDuplicateBitmapHandle
RdosDuplicateBitmapHandle:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate dup_bitmap_handle_nr
movzx %bx,%eax
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseBitmap
#
# Purpose....: Close bitmap handle
#
# Parameters.: Bitmap handle
#
##########################################################################*/
.global RdosCloseBitmap
RdosCloseBitmap:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate close_bitmap_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCreateStringBitmap
#
# Purpose....: Create bitmap from string & font
#
# Parameters.: Font
# string
#
# Returns....: Bitmap handle
#
##########################################################################*/
.global RdosCreateStringBitmap
RdosCreateStringBitmap:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %edi
movw 8(%ebp),%bx
movl 12(%ebp),%edi
UserGate create_string_bitmap_nr
movzx %bx,%eax
;
popl %edi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetBitmapInfo
#
# Purpose....: Get info about bitmap
#
# Parameters.: Bitmap handle
# &BitsPerPixel
# &width, &height
# &linesize
# &LFB
#
##########################################################################*/
.global RdosGetBitmapInfo
RdosGetBitmapInfo:
pushl %ebp
movl %esp,%ebp
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
movw 8(%ebp),%bx
UserGate get_bitmap_info_nr
jc gbiFail
pushl %edi
movl 12(%ebp),%edi
movzx %al,%eax
movl %eax,(%edi)
movl 16(%ebp),%edi
movzx %cx,%ecx
movl %ecx,(%edi)
movl 20(%ebp),%edi
movzx %dx,%edx
movl %edx,(%edi)
movl 24(%ebp),%edi
movzx %si,%esi
movl %esi,(%edi)
popl %edi
movl 28(%ebp),%eax
movl %edi,(%eax)
jmp gbiDone
gbiFail:
xorl %eax,%eax
movl 12(%ebp),%edi
movl %eax,(%edi)
movl 16(%ebp),%edi
movl %eax,(%edi)
movl 20(%ebp),%edi
movl %eax,(%edi)
movl 24(%ebp),%edi
movl %eax,(%edi)
movl 28(%ebp),%edi
movl %eax,(%edi)
gbiDone:
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosCreateSprite
#
# Purpose....: Create a sprite
#
# Parameters.: dest
# bitmap
# mask
# LGOP
#
# Returns....: Sprite handle
#
##########################################################################*/
.global RdosCreateSprite
RdosCreateSprite:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
movw 8(%ebp),%bx
movw 12(%ebp),%cx
movw 16(%ebp),%dx
movw 20(%ebp),%ax
UserGate create_sprite_nr
movzx %bx,%eax
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseSprite
#
# Purpose....: Close sprite handle
#
# Parameters.: Sprite handle
#
##########################################################################*/
.global RdosCloseSprite
RdosCloseSprite:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate close_sprite_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosShowSprite
#
# Purpose....: Show sprite
#
# Parameters.: Sprite handle
#
##########################################################################*/
.global RdosShowSprite
RdosShowSprite:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate show_sprite_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosHideSprite
#
# Purpose....: Hide sprite
#
# Parameters.: Sprite handle
#
##########################################################################*/
.global RdosHideSprite
RdosHideSprite:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate hide_sprite_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosMoveSprite
#
# Purpose....: Move sprite
#
# Parameters.: Sprite handle
# x, y
#
##########################################################################*/
.global RdosMoveSprite
RdosMoveSprite:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
movw 8(%ebp),%bx
movw 12(%ebp),%cx
movw 16(%ebp),%dx
UserGate move_sprite_nr
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetForeColor
#
# Purpose....: Set text-mode fore color
#
# Parameters.: palette index
#
##########################################################################*/
.global RdosSetForeColor
RdosSetForeColor:
pushl %ebp
movl %esp,%ebp
pushl %eax
movb 8(%ebp),%al
UserGate set_forecolor_nr
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosSetBackColor
#
# Purpose....: Set text-mode back color
#
# Parameters.: palette index
#
##########################################################################*/
.global RdosSetBackColor
RdosSetBackColor:
pushl %ebp
movl %esp,%ebp
pushl %eax
movb 8(%ebp),%al
UserGate set_backcolor_nr
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosGetSysTime
#
# Purpose....: Get system time
#
# Parameters.: &year, &month, &day
# &hour, &min, &sec, &ms
#
##########################################################################*/
.global RdosGetSysTime
RdosGetSysTime:
pushl %ebp
movl %esp,%ebp
pushal
UserGate get_system_time_nr
pushl %eax
UserGate binary_to_time_nr
pushl %edx
movl 8(%ebp),%esi
movzx %dx,%edx
movl %edx,(%esi)
movl 12(%ebp),%esi
movzx %ch,%edx
movl %edx,(%esi)
movl 16(%ebp),%esi
movzx %cl,%edx
movl %edx,(%esi)
movl 20(%ebp),%esi
movzx %bh,%edx
movl %edx,(%esi)
movl 24(%ebp),%esi
movzx %bl,%edx
movl %edx,(%esi)
movl 28(%ebp),%esi
movzx %ah,%edx
movl %edx,(%esi)
popl %edx
UserGate time_to_binary_nr
movl %eax,%ebx
popl %eax
subl %ebx,%eax
xorl %edx,%edx
movl $1192,%ebx
divl %ebx
movl 32(%ebp),%esi
movzx %ax,%eax
movl %eax,(%esi)
popal
leave
ret
/*##########################################################################
#
# Name : RdosGetTime
#
# Purpose....: Get time
#
# Parameters.: &year, &month, &day
# &hour, &min, &sec, &ms
#
##########################################################################*/
.global RdosGetTime
RdosGetTime:
pushl %ebp
movl %esp,%ebp
pushal
UserGate get_time_nr
pushl %eax
UserGate binary_to_time_nr
pushl %edx
movl 8(%ebp),%esi
movzx %dx,%edx
movl %edx,(%esi)
movl 12(%ebp),%esi
movzx %ch,%edx
movl %edx,(%esi)
movl 16(%ebp),%esi
movzx %cl,%edx
movl %edx,(%esi)
movl 20(%ebp),%esi
movzx %bh,%edx
movl %edx,(%esi)
movl 24(%ebp),%esi
movzx %bl,%edx
movl %edx,(%esi)
movl 28(%ebp),%esi
movzx %ah,%edx
movl %edx,(%esi)
popl %edx
UserGate time_to_binary_nr
movl %eax,%ebx
popl %eax
subl %ebx,%eax
xorl %edx,%edx
movl $1192,%ebx
divl %ebx
movl 32(%ebp),%esi
movzx %ax,%eax
movl %eax,(%esi)
popal
leave
ret
/*##########################################################################
#
# Name : RdosSetTime
#
# Purpose....: Set time
#
# Parameters.: year, month, day
# hour, min, sec, ms
#
##########################################################################*/
.global RdosSetTime
RdosSetTime:
pushl %ebp
movl %esp,%ebp
pushal
movw 8(%ebp),%dx
movb 12(%ebp),%ch
movb 16(%ebp),%cl
movb 20(%ebp),%bh
movb 24(%ebp),%bl
movb 28(%ebp),%ah
UserGate time_to_binary_nr
movl %edx,%edi
movl %eax,%esi
movl 32(%ebp),%eax
movl $1192,%edx
mull %edx
addl %eax,%esi
adcl $0,%edi
UserGate get_system_time_nr
subl %eax,%esi
sbbl %edx,%edi
movl %esi,%eax
movl %edi,%edx
UserGate update_time_nr
popal
leave
ret
/*##########################################################################
#
# Name : RdosTicsToRecord
#
# Purpose....: Convert tics to record format
#
# Parameters.: MSB, LSB
# &year, &month, &day
# &hour, &min, &sec, &ms
#
##########################################################################*/
.global RdosTicsToRecord
RdosTicsToRecord:
pushl %ebp
movl %esp,%ebp
pushal
movl 8(%ebp),%edx
movl 12(%ebp),%eax
addl $596,%eax
adcl $0,%edx
UserGate binary_to_time_nr
pushl %edx
movl 16(%ebp),%esi
movzx %dx,%edx
movl %edx,(%esi)
movl 20(%ebp),%esi
movzx %ch,%edx
movl %edx,(%esi)
movl 24(%ebp),%esi
movzx %cl,%edx
movl %edx,(%esi)
movl 28(%ebp),%esi
movzx %bh,%edx
movl %edx,(%esi)
movl 32(%ebp),%esi
movzx %bl,%edx
movl %edx,(%esi)
movl 36(%ebp),%esi
movzx %ah,%edx
movl %edx,(%esi)
popl %edx
UserGate time_to_binary_nr
movl %eax,%ebx
movl 12(%ebp),%eax
subl %edx,%eax
xorl %edx,%edx
movl $1192,%ebx
divl %ebx
movl 40(%ebp),%esi
cmpw %ax,1000
jne rttrSaveMs
decw %ax
rttrSaveMs:
movzx %ax,%eax
movl %eax,(%esi)
popal
leave
ret
/*##########################################################################
#
# Name : RdosRecordToTics
#
# Purpose....: Convert from record format to tics
#
# Parameters.: &MSB, &LSB
# year, month, day
# hour, min, sec, ms
#
##########################################################################*/
.global RdosRecordToTics
RdosRecordToTics:
pushl %ebp
movl %esp,%ebp
pushal
movl 40(%ebp),%eax
movl $1192,%edx
mull %edx
pushl %eax
movw 16(%ebp),%dx
movb 20(%ebp),%ch
movb 24(%ebp),%cl
movb 28(%ebp),%bh
movb 32(%ebp),%bl
movb 36(%ebp),%ah
UserGate time_to_binary_nr
popl %ebx
addl %ebx,%eax
adcl $0,%edx
movl 8(%ebp),%esi
movl %edx,(%esi)
movl 12(%ebp),%esi
movl %eax,(%esi)
popal
leave
ret
/*##########################################################################
#
# Name : RdosDecodeMsbTics
#
# Purpose....: Decode MSB tics
#
# Parameters.: MSB
# &day, &hour
#
##########################################################################*/
.global RdosDecodeMsbTics
RdosDecodeMsbTics:
pushl %ebp
movl %esp,%ebp
pushal
movl 8(%ebp),%eax
xorl %edx,%edx
movl $24,%ecx
divl %ecx
movl 12(%ebp),%ebx
movl %eax,(%ebx)
movl 16(%ebp),%ebx
movl %edx,(%ebx)
popal
leave
ret
/*##########################################################################
#
# Name : RdosDecodeLsbTics
#
# Purpose....: Decode LSB tics
#
# Parameters.: LSB
# &min, &sec, &ms, &us
#
##########################################################################*/
.global RdosDecodeLsbTics
RdosDecodeLsbTics:
pushl %ebp
movl %esp,%ebp
pushal
movl 8(%ebp),%eax
movl $60,%edx
mull %edx
movl 12(%ebp),%ebx
movl %edx,(%ebx)
movl $60,%edx
mull %edx
movl 16(%ebp),%ebx
movl %edx,(%ebx)
movl $1000,%edx
mull %edx
movl 20(%ebp),%ebx
movl %edx,(%ebx)
movl $1000,%edx
mull %edx
movl 24(%ebp),%ebx
movl %edx,(%ebx)
popal
leave
ret
/*##########################################################################
#
# Name : RdosDayOfWeek
#
# Purpose....: Get day of week
#
# Parameters.: year, month, day
#
# Returns....: day of week
#
##########################################################################*/
.global RdosDayOfWeek
RdosDayOfWeek:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
movl 8(%ebp),%edx
movb 12(%ebp),%ch
movb 16(%ebp),%cl
xorw %bx,%bx
xorb %ah,%ah
UserGate adjust_time_nr
pushw %dx
movl $365,%eax
imulw %dx
pushw %dx
pushw %ax
popl %ebx
popw %dx
UserGate passed_days_nr
decw %dx
shrw $2,%dx
incw %dx
addw %dx,%ax
addl %ebx,%eax
xorl %edx,%edx
addl $5,%eax
movl $7,%ebx
divl %ebx
movzx %dl,%eax
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetTics
#
# Purpose....: Get system tics
#
# Parameters.: &MSB, &LSB
#
##########################################################################*/
.global RdosGetTics
RdosGetTics:
pushl %ebp
movl %esp,%ebp
pushl %edx
pushl %esi
UserGate get_time_nr
movl 8(%ebp),%esi
movl %edx,(%esi)
movl 12(%ebp),%esi
movl %eax,(%esi)
popl %esi
popl %edx
leave
ret
/*##########################################################################
#
# Name : RdosAddTics
#
# Purpose....: Add tics to binary time
#
# Parameters.: &MSB, &LSB
# tics
#
##########################################################################*/
.global RdosAddTics
RdosAddTics:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 16(%ebp),%eax
movl 12(%ebp),%ebx
addl %eax,(%ebx)
movl 8(%ebp),%ebx
adcl $0,(%ebx)
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddMilli
#
# Purpose....: Add milliseconds to binary time
#
# Parameters.: &MSB, &LSB
# milli
#
##########################################################################*/
.global RdosAddMilli
RdosAddMilli:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 16(%ebp),%eax
movl $1193,%edx
mull %edx
movl 12(%ebp),%ebx
addl %eax,(%ebx)
movl 8(%ebp),%ebx
adcl %edx,(%ebx)
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddSec
#
# Purpose....: Add milliseconds to binary time
#
# Parameters.: &MSB, &LSB
# sec
#
##########################################################################*/
.global RdosAddSec
RdosAddSec:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 16(%ebp),%eax
movl $1193000,%edx
mull %edx
movl 12(%ebp),%ebx
addl %eax,(%ebx)
movl 8(%ebp),%ebx
adcl %edx,(%ebx)
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddMin
#
# Purpose....: Add minutes to binary time
#
# Parameters.: &MSB, &LSB
# min
#
##########################################################################*/
.global RdosAddMin
RdosAddMin:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 16(%ebp),%eax
movl $71582760,%edx
mull %edx
movl 12(%ebp),%ebx
addl %eax,(%ebx)
movl 8(%ebp),%ebx
adcl %edx,(%ebx)
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddHour
#
# Purpose....: Add hour to binary time
#
# Parameters.: &MSB, &LSB
# hour
#
##########################################################################*/
.global RdosAddHour
RdosAddHour:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 16(%ebp),%eax
movl 8(%ebp),%ebx
adc %eax,(%ebx)
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddDay
#
# Purpose....: Add days to binary time
#
# Parameters.: &MSB, &LSB
# days
#
##########################################################################*/
.global RdosAddDay
RdosAddDay:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 16(%ebp),%eax
movl $24,%edx
mull %edx
movl 8(%ebp),%ebx
adc %eax,(%ebx)
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSyncTime
#
# Purpose....: Synchronize time with NTP
#
# Parameters.: IP
#
##########################################################################*/
.global RdosSyncTime
RdosSyncTime:
pushl %ebp
movl %esp,%ebp
pushal
movl 8(%ebp),%edx
UserGate sync_time_nr
jc RdosSyncTimeFail
movl $1,%eax
jmp RdosSyncTimeDone
RdosSyncTimeFail:
xorl %eax,%eax
RdosSyncTimeDone:
popal
leave
ret
/*##########################################################################
#
# Name : RdosOpenCom
#
# Purpose....: Open com-port
#
# Parameters.: ID
# baudrate
# parity
# data bits
# stop bits
# SendBufferSize
# RecBufferSize
#
# Returns...: Com handle
#
##########################################################################*/
.global RdosOpenCom
RdosOpenCom:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
movb 8(%ebp),%al
movb 20(%ebp),%ah
movb 24(%ebp),%bl
movb 16(%ebp),%bh
movl 12(%ebp),%ecx
movw 28(%ebp),%si
movw 32(%ebp),%di
UserGate open_com_nr
movzx %bx,%eax
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseCom
#
# Purpose....: Close com-port
#
# Parameters.: Com handle
#
##########################################################################*/
.global RdosCloseCom
RdosCloseCom:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate close_com_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosFlushCom
#
# Purpose....: Flush com-port
#
# Parameters.: Com handle
#
##########################################################################*/
.global RdosFlushCom
RdosFlushCom:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate flush_com_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosReadCom
#
# Purpose....: Read com-port
#
# Parameters.: Com handle
#
# Returns....: Character
#
##########################################################################*/
.global RdosReadCom
RdosReadCom:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate read_com_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosWriteCom
#
# Purpose....: Write com-port
#
# Parameters.: Com handle
# char
#
# Returns....: 0 for success
#
##########################################################################*/
.global RdosWriteCom
RdosWriteCom:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
movb 12(%ebp),%al
UserGate write_com_nr
movzx %al,%eax
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosWaitForSendCompletedCom
#
# Purpose....: Wait until send buffer is empty
#
# Parameters.: Com handle
#
##########################################################################*/
.global RdosWaitForSendCompletedCom
RdosWaitForSendCompletedCom:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate wait_for_send_completed_com_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosEnableCts
#
# Purpose....: Enable CTS signal
#
# Parameters.: Com handle
#
##########################################################################*/
.global RdosEnableCts
RdosEnableCts:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate enable_cts_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDisableCts
#
# Purpose....: Disable CTS signal
#
# Parameters.: Com handle
#
##########################################################################*/
.global RdosDisableCts
RdosDisableCts:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate disable_cts_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosEnableAutoRts
#
# Purpose....: Enable auto RTS signal generation for RS485
#
# Parameters.: Com handle
#
##########################################################################*/
.global RdosEnableAutoRts
RdosEnableAutoRts:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate enable_auto_rts_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDisableAutoRts
#
# Purpose....: Disable auto RTS signal generation for RS485
#
# Parameters.: Com handle
#
##########################################################################*/
.global RdosDisableAutoRts
RdosDisableAutoRts:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate disable_auto_rts_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetDtr
#
# Purpose....: Set DTR active
#
# Parameters.: Com handle
#
##########################################################################*/
.global RdosSetDtr
RdosSetDtr:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate set_dtr_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosResetDtr
#
# Purpose....: Set DTR inactive
#
# Parameters.: Com handle
#
##########################################################################*/
.global RdosResetDtr
RdosResetDtr:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate reset_dtr_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetRts
#
# Purpose....: Set RTS active
#
# Parameters.: Com handle
#
##########################################################################*/
.global RdosSetRts
RdosSetRts:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate set_rts_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosResetRts
#
# Purpose....: Set RTS inactive
#
# Parameters.: Com handle
#
##########################################################################*/
.global RdosResetRts
RdosResetRts:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate reset_rts_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetReceiveBufferSpace
#
# Purpose....: Get receive buffer free space
#
# Parameters.: Com handle
#
# Returns....: Free bytes
#
##########################################################################*/
.global RdosGetReceiveBufferSpace
RdosGetReceiveBufferSpace:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate get_com_receive_space_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetSendBufferSpace
#
# Purpose....: Get send buffer free space
#
# Parameters.: Com handle
#
# Returns....: Free bytes
#
##########################################################################*/
.global RdosGetSendBufferSpace
RdosGetSendBufferSpace:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate get_com_send_space_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosOpenFile
#
# Purpose....: Open file
#
# Parameters.: Filename
# Access
#
# Returns...: File handle
#
##########################################################################*/
.global RdosOpenFile
RdosOpenFile:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edi
movl 8(%ebp),%edi
movb 12(%ebp),%cl
UserGate open_file_nr
jc OpenFileFailed
movzx %bx,%eax
jmp OpenFileDone
OpenFileFailed:
xorl %eax,%eax
OpenFileDone:
popl %edi
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCreateFile
#
# Purpose....: Create file
#
# Parameters.: Filename
# Attribute
#
# Returns...: File handle
#
##########################################################################*/
.global RdosCreateFile
RdosCreateFile:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edi
movl 8(%ebp),%edi
movw 12(%ebp),%cx
UserGate create_file_nr
jc CreateFileFailed
movzx %bx,%eax
jmp CreateFileDone
CreateFileFailed:
xorl %eax,%eax
CreateFileDone:
popl %edi
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseFile
#
# Purpose....: Close file
#
# Parameters.: File handle
#
##########################################################################*/
.global RdosCloseFile
RdosCloseFile:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate close_file_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosIsDevice
#
# Purpose....: Check if file is device
#
# Parameters.: TRUE if device
#
##########################################################################*/
.global RdosIsDevice
RdosIsDevice:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate get_ioctl_data_nr
testw $0x8000,%dx
jz ridFail
movl $1,%eax
jmp ridDone
ridFail:
xorl %eax,%eax
ridDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDuplFile
#
# Purpose....: Duplicate file handle
#
# Parameters.: File handle
#
# Returns....: File handle
#
##########################################################################*/
.global RdosDuplFile
RdosDuplFile:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate dupl_file_nr
jc DuplFileFailed
movzx %bx,%eax
jmp DuplFileDone
DuplFileFailed:
xorl %eax,%eax
DuplFileDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetFileSize
#
# Purpose....: Get file size
#
# Parameters.: File handle
#
# Returns....: Size
#
##########################################################################*/
.global RdosGetFileSize
RdosGetFileSize:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate get_file_size_nr
jnc GetFileSizeDone
GetFileSizeFail:
xorl %eax,%eax
GetFileSizeDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetFileSize
#
# Purpose....: Set file size
#
# Parameters.: File handle
# Size
#
##########################################################################*/
.global RdosSetFileSize
RdosSetFileSize:
pushl %ebp
movl %esp,%ebp
pushl %eax
pushl %ebx
movw 8(%ebp),%bx
movl 12(%ebp),%eax
UserGate set_file_size_nr
popl %ebx
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosGetFilePos
#
# Purpose....: Get file position
#
# Parameters.: File handle
#
# Returns....: Position
#
##########################################################################*/
.global RdosGetFilePos
RdosGetFilePos:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate get_file_pos_nr
jnc GetFilePosDone
GetFilePosFail:
xorl %eax,%eax
GetFilePosDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetFilePos
#
# Purpose....: Set file position
#
# Parameters.: File handle
# Position
#
##########################################################################*/
.global RdosSetFilePos
RdosSetFilePos:
pushl %ebp
movl %esp,%ebp
pushl %eax
pushl %ebx
movw 8(%ebp),%bx
movl 12(%ebp),%eax
UserGate set_file_pos_nr
popl %ebx
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosGetFileTime
#
# Purpose....: Get file time & date
#
# Parameters.: File handle
# &MSB, &LSB
#
##########################################################################*/
.global RdosGetFileTime
RdosGetFileTime:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %edi
movw 8(%ebp),%bx
UserGate get_file_time_nr
jc GetFileTimeDone
movl 12(%ebp),%edi
movl %edx,(%edi)
movl 16(%ebp),%edi
movl %eax,(%edi)
GetFileTimeDone:
popl %edi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetFileTime
#
# Purpose....: Set file time & date
#
# Parameters.: File handle
# MSB, LSB
#
##########################################################################*/
.global RdosSetFileTime
RdosSetFileTime:
pushl %ebp
movl %esp,%ebp
pushl %eax
pushl %ebx
pushl %edx
movw 8(%ebp),%bx
movl 12(%ebp),%edx
movl 16(%ebp),%eax
UserGate set_file_time_nr
popl %edx
popl %ebx
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosReadFile
#
# Purpose....: Read from file
#
# Parameters.: File handle
# buffer
# count
#
# Returns....: Read count
#
##########################################################################*/
.global RdosReadFile
RdosReadFile:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edi
movw 8(%ebp),%bx
movl 12(%ebp),%edi
movl 16(%ebp),%ecx
UserGate read_file_nr
popl %edi
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosWriteFile
#
# Purpose....: Write to file
#
# Parameters.: File handle
# buffer
# count
#
# Returns....: Written count
#
##########################################################################*/
.global RdosWriteFile
RdosWriteFile:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edi
movw 8(%ebp),%bx
movl 12(%ebp),%edi
movl 16(%ebp),%ecx
UserGate write_file_nr
popl %edi
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCreateMapping
#
# Purpose....: Create file mapping
#
# Parameters.: Size
#
# Returns...: Filemap handle
#
##########################################################################*/
.global RdosCreateMapping
RdosCreateMapping:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 8(%ebp),%eax
UserGate create_mapping_nr
movzx %bx,%eax
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCreateNamedMapping
#
# Purpose....: Create named file mapping
#
# Parameters.: Name
# Size
#
# Returns...: Filemap handle
#
##########################################################################*/
.global RdosCreateNamedMapping
RdosCreateNamedMapping:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %edi
movl 8(%ebp),%edi
movl 12(%ebp),%eax
UserGate create_named_mapping_nr
movzx %bx,%eax
popl %edi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCreateNamedFileMapping
#
# Purpose....: Create file named file mapping
#
# Parameters.: Name
# Size
# File handle
#
# Returns...: Filemap handle
#
##########################################################################*/
.global RdosCreateNamedFileMapping
RdosCreateNamedFileMapping:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %edi
movl 8(%ebp),%edi
movl 12(%ebp),%eax
movw 16(%ebp),%bx
UserGate create_named_file_mapping_nr
movzx %bx,%eax
popl %edi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosOpenNamedMapping
#
# Purpose....: Open named file mapping
#
# Parameters.: Name
#
# Returns...: Filemap handle
#
##########################################################################*/
.global RdosOpenNamedMapping
RdosOpenNamedMapping:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %edi
movl 8(%ebp),%edi
UserGate open_named_mapping_nr
movzx %bx,%eax
popl %edi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSyncMapping
#
# Purpose....: Sync file mapping
#
# Parameters.: Filemap handle
#
##########################################################################*/
.global RdosSyncMapping
RdosSyncMapping:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate sync_mapping_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseMapping
#
# Purpose....: Close file mapping
#
# Parameters.: Filemap handle
#
##########################################################################*/
.global RdosCloseMapping
RdosCloseMapping:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate close_mapping_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosMapView
#
# Purpose....: Map view of file into memory
#
# Parameters.: Filemap handle
# Offset
# Address
# Size
#
##########################################################################*/
.global RdosMapView
RdosMapView:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edi
movw 8(%ebp),%bx
movl 12(%ebp),%eax
movl 16(%ebp),%edi
movl 20(%ebp),%ecx
UserGate map_view_nr
popl %edi
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosUnmapView
#
# Purpose....: Unmap view of file
#
# Parameters.: Filemap handle
#
##########################################################################*/
.global RdosUnmapView
RdosUnmapView:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate unmap_view_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetCurDrive
#
# Purpose....: Set current drive
#
# Parameters.: Drive
#
##########################################################################*/
.global RdosSetCurDrive
RdosSetCurDrive:
pushl %ebp
movl %esp,%ebp
movb 8(%ebp),%al
UserGate set_cur_drive_nr
jc rscdrFail
movl $1,%eax
jmp rscdrDone
rscdrFail:
xorl %eax,%eax
rscdrDone:
leave
ret
/*##########################################################################
#
# Name : RdosGetCurDrive
#
# Purpose....: Get current drive
#
# Returns....: Drive
#
##########################################################################*/
.global RdosGetCurDrive
RdosGetCurDrive:
pushl %ebp
movl %esp,%ebp
xorl %eax,%eax
UserGate get_cur_drive_nr
movzx %al,%eax
leave
ret
/*##########################################################################
#
# Name : RdosSetCurDir
#
# Purpose....: Set current directory
#
# Parameters.: Pathname
#
##########################################################################*/
.global RdosSetCurDir
RdosSetCurDir:
pushl %ebp
movl %esp,%ebp
pushl %edi
movl 8(%ebp),%edi
UserGate set_cur_dir_nr
jc rscdFail
movl $1,%eax
jmp rscdDone
rscdFail:
xorl %eax,%eax
rscdDone:
popl %edi
leave
ret
/*##########################################################################
#
# Name : RdosGetCurDir
#
# Purpose....: Get current directory
#
# Parameters.: Drive
# Pathname
#
##########################################################################*/
.global RdosGetCurDir
RdosGetCurDir:
pushl %ebp
movl %esp,%ebp
pushl %edi
movb 8(%ebp),%al
movl 12(%ebp),%edi
UserGate get_cur_dir_nr
jc rgcdFail
movl $1,%eax
jmp rgcdDone
rgcdFail:
xorl %eax,%eax
rgcdDone:
popl %edi
leave
ret
/*##########################################################################
#
# Name : RdosMakeDir
#
# Purpose....: Create directory
#
# Parameters.: Pathname
#
##########################################################################*/
.global RdosMakeDir
RdosMakeDir:
pushl %ebp
movl %esp,%ebp
pushl %edi
movl 8(%ebp),%edi
UserGate make_dir_nr
jc mdFail
movl $1,%eax
jmp mdDone
mdFail:
xorl %eax,%eax
mdDone:
popl %edi
leave
ret
/*##########################################################################
#
# Name : RdosRemoveDir
#
# Purpose....: Remove directory
#
# Parameters.: Pathname
#
##########################################################################*/
.global RdosRemoveDir
RdosRemoveDir:
pushl %ebp
movl %esp,%ebp
pushl %edi
movl 8(%ebp),%edi
UserGate remove_dir_nr
jc rdFail
movl $1,%eax
jmp rdDone
rdFail:
xorl %eax,%eax
rdDone:
popl %edi
leave
ret
/*##########################################################################
#
# Name : RdosRenameFile
#
# Purpose....: Rename file
#
# Parameters.: ToName
# FromName
#
##########################################################################*/
.global RdosRenameFile
RdosRenameFile:
pushl %ebp
movl %esp,%ebp
pushl %esi
pushl %edi
movl 8(%ebp),%edi
movl 12(%ebp),%esi
UserGate rename_file_nr
jc rfFail
mov $1,%eax
jmp rfDone
rfFail:
xorl %eax,%eax
rfDone:
popl %edi
popl %esi
leave
ret
/*##########################################################################
#
# Name : RdosDeleteFile
#
# Purpose....: Delete file
#
# Parameters.: Pathname
#
##########################################################################*/
.global RdosDeleteFile
RdosDeleteFile:
pushl %ebp
movl %esp,%ebp
pushl %edi
movl 8(%ebp),%edi
UserGate delete_file_nr
jc dfFail
mov $1,%eax
jmp dfDone
dfFail:
xorl %eax,%eax
dfDone:
popl %edi
leave
ret
/*##########################################################################
#
# Name : RdosGetFileAttribute
#
# Purpose....: Get file attribute
#
# Parameters.: Pathname
# &Attrib
#
##########################################################################*/
.global RdosGetFileAttribute
RdosGetFileAttribute:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edi
movl 8(%ebp),%edi
UserGate get_file_attribute_nr
jc gfaFail
movl 12(%ebp),%edi
movzx %cx,%ecx
movl %ecx,(%edi)
movl $1,%eax
jmp gfaDone
gfaFail:
xorl %eax,%eax
gfaDone:
popl %edi
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosSetFileAttribute
#
# Purpose....: Set file attribute
#
# Parameters.: Pathname
# Attrib
#
##########################################################################*/
.global RdosSetFileAttribute
RdosSetFileAttribute:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edi
movl 8(%ebp),%edi
movw 12(%ebp),%cx
UserGate set_file_attribute_nr
jc sfaFail
movl $1,%eax
jmp sfaDone
sfaFail:
xorl %eax,%eax
sfaDone:
popl %edi
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosOpenDir
#
# Purpose....: Open directory
#
# Parameters.: Pathname
#
# Returns....: Dir handle
#
##########################################################################*/
.global RdosOpenDir
RdosOpenDir:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %edi
movl 8(%ebp),%edi
UserGate open_dir_nr
jc odFail
movzx %bx,%eax
jmp odDone
odFail:
xorl %eax,%eax
odDone:
popl %edi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseDir
#
# Purpose....: Close directory
#
# Parameters.: Dir handle
#
##########################################################################*/
.global RdosCloseDir
RdosCloseDir:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate close_dir_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosReadDir
#
# Purpose....: Read directory entry
#
# Parameters.: Dir handle
# Entry #
# MaxNameSize
# Name buffer
# &FileSize
# &Attribute
# &Msb time
# &Lsb time
#
##########################################################################*/
.global RdosReadDir
RdosReadDir:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edi
movw 8(%ebp),%bx
movw 12(%ebp),%dx
movw 16(%ebp),%cx
movl 20(%ebp),%edi
UserGate read_dir_nr
jc rdiFail
movl 24(%ebp),%edi
movl %ecx,(%edi)
movl 28(%ebp),%edi
movzx %bx,%ebx
movl %ebx,(%edi)
movl 32(%ebp),%edi
movl %edx,(%edi)
movl 36(%ebp),%edi
movl %eax,(%edi)
movl $1,%eax
jmp rdiDone
rdiFail:
xorl %eax,%eax
rdiDone:
popl %edi
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetFocus
#
# Purpose....: Set input focus
#
# Parameters.: Focus handle
#
##########################################################################*/
.global RdosSetFocus
RdosSetFocus:
pushl %ebp
movl %esp,%ebp
movl 8(%ebp),%eax
UserGate set_focus_nr
leave
ret
/*##########################################################################
#
# Name : RdosGetFocus
#
# Purpose....: Get input focus
#
# Returns....: Focus handle
#
##########################################################################*/
.global RdosGetFocus
RdosGetFocus:
pushl %ebp
movl %esp,%ebp
UserGate get_focus_nr
leave
ret
/*##########################################################################
#
# Name : RdosClearKeyboard
#
# Purpose....: Clear keyboard buffer
#
##########################################################################*/
.global RdosClearKeyboard
RdosClearKeyboard:
pushl %ebp
movl %esp,%ebp
UserGate flush_keyboard_nr
leave
ret
/*##########################################################################
#
# Name : RdosPollKeyboard
#
# Purpose....: Poll keyboard buffer
#
# Returns....: TRUE if non-empty
#
##########################################################################*/
.global RdosPollKeyboard
RdosPollKeyboard:
pushl %ebp
movl %esp,%ebp
UserGate poll_keyboard_nr
jc rpkEmpty
mov $1,%eax
jmp rpkDone
rpkEmpty:
xorl %eax,%eax
rpkDone:
leave
ret
/*##########################################################################
#
# Name : RdosReadKeyboard
#
# Purpose....: Read keyboard buffer
#
# Returns....: Scan code
#
##########################################################################*/
.global RdosReadKeyboard
RdosReadKeyboard:
pushl %ebp
movl %esp,%ebp
UserGate read_keyboard_nr
movzx %ax,%eax
leave
ret
/*##########################################################################
#
# Name : RdosGetKeyboardState
#
# Purpose....: Get keyboard buffer
#
# Returns....: Keyboard state
#
##########################################################################*/
.global RdosGetKeyboardState
RdosGetKeyboardState:
pushl %ebp
movl %esp,%ebp
UserGate get_keyboard_state_nr
movzx %ax,%eax
leave
ret
/*##########################################################################
#
# Name : RdosPutKeyboard
#
# Purpose....: Put scancode in keyboard buffer
#
##########################################################################*/
.global RdosPutKeyboard
RdosPutKeyboard:
pushl %ebp
movl %esp,%ebp
pushl %edx
movw 8(%ebp),%ax
movb 12(%ebp),%dl
movb 16(%ebp),%dh
UserGate put_keyboard_code_nr
popl %edx
leave
ret
/*##########################################################################
#
# Name : RdosPeekKeyEvent
#
# Purpose....: Peek keyboard event
#
##########################################################################*/
.global RdosPeekKeyEvent
RdosPeekKeyEvent:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
pushl %edi
UserGate peek_key_event_nr
jc rpeFail
movl 8(%ebp),%edi
movzx %ax,%eax
movl %eax,(%edi)
movl 12(%ebp),%edi
movzx %cx,%eax
movl %eax,(%edi)
movl 16(%ebp),%edi
movzx %dl,%eax
movl %eax,(%edi)
movl 20(%ebp),%edi
movzx %dh,%eax
movl %eax,(%edi)
movl $1,%eax
jmp rpeDone
rpeFail:
xorl %eax,%eax
rpeDone:
popl %edi
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosReadKeyEvent
#
# Purpose....: Read keyboard event
#
##########################################################################*/
.global RdosReadKeyEvent
RdosReadKeyEvent:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
pushl %edi
UserGate read_key_event_nr
jc rkeFail
movl 8(%ebp),%edi
movzx %ax,%eax
movl %eax,(%edi)
movl 12(%ebp),%edi
movzx %cx,%eax
movl %eax,(%edi)
movl 16(%ebp),%edi
movzx %dl,%eax
movl %eax,(%edi)
movl 20(%ebp),%edi
movzx %dh,%eax
movl %eax,(%edi)
movl $1,%eax
jmp rkeDone
rkeFail:
xorl %eax,%eax
rkeDone:
popl %edi
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosHideMouse
#
# Purpose....: Hide mouse cursor
#
##########################################################################*/
.global RdosHideMouse
RdosHideMouse:
pushl %ebp
movl %esp,%ebp
UserGate hide_mouse_nr
leave
ret
/*##########################################################################
#
# Name : RdosShowMouse
#
# Purpose....: Show mouse cursor
#
##########################################################################*/
.global RdosShowMouse
RdosShowMouse:
pushl %ebp
movl %esp,%ebp
UserGate show_mouse_nr
leave
ret
/*##########################################################################
#
# Name : RdosGetMousePosition
#
# Purpose....: Get mouse position
#
# Parameters.: &x, &y
#
##########################################################################*/
.global RdosGetMousePosition
RdosGetMousePosition:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
UserGate get_mouse_position_nr
movl 8(%ebp),%eax
movzx %cx,%ecx
movl %ecx,(%eax)
movl 12(%ebp),%eax
movzx %dx,%edx
movl %edx,(%eax)
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosSetMousePosition
#
# Purpose....: Set mouse position
#
# Parameters.: x, y
#
##########################################################################*/
.global RdosSetMousePosition
RdosSetMousePosition:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
movw 8(%ebp),%cx
movw 12(%ebp),%dx
UserGate set_mouse_position_nr
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosSetMouseWindow
#
# Purpose....: Set mouse window
#
# Parameters.: start x, start y
# end x, end y
#
##########################################################################*/
.global RdosSetMouseWindow
RdosSetMouseWindow:
pushl %ebp
movl %esp,%ebp
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
movw 8(%ebp),%ax
movw 12(%ebp),%bx
movw 16(%ebp),%cx
movw 20(%ebp),%dx
UserGate set_mouse_window_nr
popl %edx
popl %ecx
popl %ebx
popl %eax
leave
ret
/*##########################################################################
#
# Name : RdosSetMouseMickey
#
# Purpose....: Set mouse mickey
#
# Parameters.: x, y
#
##########################################################################*/
.global RdosSetMouseMickey
RdosSetMouseMickey:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
movw 8(%ebp),%cx
movw 12(%ebp),%dx
UserGate set_mouse_mickey_nr
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosGetCursorPosition
#
# Purpose....: Get cursor position
#
# Parameters.: &x, &y
#
##########################################################################*/
.global RdosGetCursorPosition
RdosGetCursorPosition:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
UserGate get_cursor_position_nr
movl 8(%ebp),%eax
movzx %cx,%ecx
movl %ecx,(%eax)
movl 12(%ebp),%eax
movzx %dx,%edx
movl %edx,(%eax)
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosSetCursorPosition
#
# Purpose....: Set cursor position
#
# Parameters.: x, y
#
##########################################################################*/
.global RdosSetCursorPosition
RdosSetCursorPosition:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
movw 8(%ebp),%cx
movw 12(%ebp),%dx
UserGate set_cursor_position_nr
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosGetLeftButton
#
# Purpose....: Check if left button is pressed
#
# Returns....: TRUE if pressed
#
##########################################################################*/
.global RdosGetLeftButton
RdosGetLeftButton:
pushl %ebp
movl %esp,%ebp
UserGate get_left_button_nr
jc get_left_rel
mov $1,%eax
jmp get_left_done
get_left_rel:
xorl %eax,%eax
get_left_done:
leave
ret
/*##########################################################################
#
# Name : RdosGetRightButton
#
# Purpose....: Check if right button is pressed
#
# Returns....: TRUE if pressed
#
##########################################################################*/
.global RdosGetRightButton
RdosGetRightButton:
pushl %ebp
movl %esp,%ebp
UserGate get_right_button_nr
jc get_right_rel
mov $1,%eax
jmp get_right_done
get_right_rel:
xorl %eax,%eax
get_right_done:
leave
ret
/*##########################################################################
#
# Name : RdosGetLeftButtonPressPosition
#
# Purpose....: Get left button press position
#
# Parameters.: &x, &y
#
##########################################################################*/
.global RdosGetLeftButtonPressPosition
RdosGetLeftButtonPressPosition:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
UserGate get_left_button_press_position_nr
movl 8(%ebp),%eax
movzx %cx,%ecx
movl %ecx,(%eax)
movl 12(%ebp),%eax
movzx %dx,%edx
movl %edx,(%eax)
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosGetRightButtonPressPosition
#
# Purpose....: Get right button press position
#
# Parameters.: &x, &y
#
##########################################################################*/
.global RdosGetRightButtonPressPosition
RdosGetRightButtonPressPosition:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
UserGate get_right_button_press_position_nr
movl 8(%ebp),%eax
movzx %cx,%ecx
movl %ecx,(%eax)
movl 12(%ebp),%eax
movzx %dx,%edx
movl %edx,(%eax)
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosGetLeftButtonReleasePosition
#
# Purpose....: Get left button release position
#
# Parameters.: &x, &y
#
##########################################################################*/
.global RdosGetLeftButtonReleasePosition
RdosGetLeftButtonReleasePosition:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
UserGate get_left_button_release_position_nr
movl 8(%ebp),%eax
movzx %cx,%ecx
movl %ecx,(%eax)
movl 12(%ebp),%eax
movzx %dx,%edx
movl %edx,(%eax)
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosGetRightButtonReleasePosition
#
# Purpose....: Get right button release position
#
# Parameters.: &x, &y
#
##########################################################################*/
.global RdosGetRightButtonReleasePosition
RdosGetRightButtonReleasePosition:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
UserGate get_right_button_release_position_nr
movl 8(%ebp),%eax
movzx %cx,%ecx
movl %ecx,(%eax)
movl 12(%ebp),%eax
movzx %dx,%edx
movl %edx,(%eax)
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosReadLine
#
# Purpose....: Read a line from keyboard
#
# Parameters.: Buffer
# Size
#
# Returns....: Read count
#
##########################################################################*/
.global RdosReadLine
RdosReadLine:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edi
movl 8(%ebp),%edi
movl 12(%ebp),%ecx
UserGate read_con_nr
popl %edi
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosWriteChar
#
# Purpose....: Write a character to screen
#
# Parameters.: Char
#
##########################################################################*/
.global RdosWriteChar
RdosWriteChar:
pushl %ebp
movl %esp,%ebp
movb 8(%ebp),%al
UserGate write_char_nr
leave
ret
/*##########################################################################
#
# Name : RdosWriteSizeString
#
# Purpose....: Write a fixed number of characters to screen
#
# Parameters.: String
# Count
#
##########################################################################*/
.global RdosWriteSizeString
RdosWriteSizeString:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edi
movl 8(%ebp),%edi
movl 12(%ebp),%ecx
UserGate write_size_string_nr
popl %edi
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosWriteString
#
# Purpose....: Write a string to screen
#
# Parameters.: String
#
##########################################################################*/
.global RdosWriteString
RdosWriteString:
pushl %ebp
movl %esp,%ebp
pushl %edi
movl 8(%ebp),%edi
UserGate write_asciiz_nr
popl %edi
leave
ret
/*##########################################################################
#
# Name : RdosNameToIp
#
# Purpose....: Convert host name to IP address
#
# Parameters.: Name
#
# Returns....: IP
#
##########################################################################*/
.global RdosNameToIp
RdosNameToIp:
pushl %ebp
movl %esp,%ebp
pushl %edi
movl 8(%ebp),%edi
UserGate name_to_ip_nr
jc rntiFail
movl %edx,%eax
jmp rntiDone
rntiFail:
xorl %eax,%eax
rntiDone:
popl %edi
leave
ret
/*##########################################################################
#
# Name : RdosGetIp
#
# Purpose....: Get my IP
#
# Returns....: IP
#
##########################################################################*/
.global RdosGetIp
RdosGetIp:
pushl %ebp
movl %esp,%ebp
UserGate get_ip_address_nr
movl %edx,%eax
leave
ret
/*##########################################################################
#
# Name : RdosIpToName
#
# Purpose....: Convert IP address to host name
#
# Parameters.: IP
# Name
# Size
#
##########################################################################*/
.global RdosIpToName
RdosIpToName:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
pushl %edi
;
movl 8(%ebp),%edx
movl 12(%ebp),%edi
movl 16(%ebp),%ecx
UserGate ip_to_name_nr
jnc ritnDone
ritnFail:
xorl %eax,%eax
ritnDone:
popl %edi
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosPing
#
# Purpose....: Ping node
#
# Parameters.: IP
# Timeout
#
##########################################################################*/
.global RdosPing
RdosPing:
pushl %ebp
movl %esp,%ebp
pushl %edx
;
movl 8(%ebp),%edx
movl 12(%ebp),%eax
UserGate ping_nr
jc ping_failed
movl $1,%eax
jmp ping_done
ping_failed:
xorl %eax,%eax
ping_done:
popl %edx
leave
ret
/*##########################################################################
#
# Name : RdosOpenTcpConnection
#
# Purpose....: Open an active connection over TCP
#
# Parameters.: RemoteIp
# LocalPort
# RemotePort
# Timeout in ms
# BufferSize
#
# Returns....: Conn handle
#
##########################################################################*/
.global RdosOpenTcpConnection
RdosOpenTcpConnection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %esi
pushl %edi
movl 8(%ebp),%edx
movw 12(%ebp),%si
movw 16(%ebp),%di
movl 20(%ebp),%eax
movl 24(%ebp),%ecx
UserGate open_tcp_connection_nr
mov $0,%eax
jc rotcDone
movl %ebx,%eax
rotcDone:
popl %edi
popl %esi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCreateTcpListen
#
# Purpose....: Create listen handle
#
# Parameters.: Port
# MaxConnections
# BufferSize
#
# Returns....: Listen handle
#
##########################################################################*/
.global RdosCreateTcpListen
RdosCreateTcpListen:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %esi
movw 8(%ebp),%si
movw 12(%ebp),%ax
movl 16(%ebp),%ecx
UserGate create_tcp_listen_nr
movzx %bx,%eax
jnc ctlDone
xorl %eax,%eax
ctlDone:
popl %esi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetTcpListen
#
# Purpose....: Get connection from listen
#
# Parameters.: Listen handle
#
# Returns....: Conn handle
#
##########################################################################*/
.global RdosGetTcpListen
RdosGetTcpListen:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate get_tcp_listen_nr
movzx %ax,%eax
jnc gtlDone
xorl %eax,%eax
gtlDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseTcpListen
#
# Purpose....: Close TCP listen
#
# Parameters.: Listen handle
#
##########################################################################*/
.global RdosCloseTcpListen
RdosCloseTcpListen:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate close_tcp_listen_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddWaitForTcpListen
#
# Purpose....: Add wait object to tcp listen
#
# Parameters.: Wait handle
# Listen handle
# ID
#
##########################################################################*/
.global RdosAddWaitForTcpListen
RdosAddWaitForTcpListen:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
movw 8(%ebp),%bx
movw 12(%ebp),%ax
movl 16(%ebp),%ecx
UserGate add_wait_for_tcp_listen_nr
movl $1,%eax
jnc awftlDone
xorl %eax,%eax
awftlDone:
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosWaitForTcpConnection
#
# Purpose....: Wait for Tcp connection to be established
#
# Parameters.: Conn handle
# Timeout ms
#
##########################################################################*/
.global RdosWaitForTcpConnection
RdosWaitForTcpConnection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
movl 12(%ebp),%eax
UserGate wait_for_tcp_connection_nr
movl $1,%eax
jnc wftcDone
xorl %eax,%eax
wftcDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddWaitForTcpConnection
#
# Purpose....: Add wait object to tcp connection
#
# Parameters.: Wait handle
# Conn handle
# ID
#
##########################################################################*/
.global RdosAddWaitForTcpConnection
RdosAddWaitForTcpConnection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
movw 8(%ebp),%bx
movw 12(%ebp),%ax
movl 16(%ebp),%ecx
UserGate add_wait_for_tcp_connection_nr
movl $1,%eax
jnc awftcDone
xorl %eax,%eax
awftcDone:
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseTcpConnection
#
# Purpose....: Close Tcp connection
#
# Parameters.: Conn handle
#
##########################################################################*/
.global RdosCloseTcpConnection
RdosCloseTcpConnection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate close_tcp_connection_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDeleteTcpConnection
#
# Purpose....: Delete Tcp connection
#
# Parameters.: Conn handle
#
##########################################################################*/
.global RdosDeleteTcpConnection
RdosDeleteTcpConnection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate delete_tcp_connection_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAbortTcpConnection
#
# Purpose....: Abort Tcp connection
#
# Parameters.: Conn handle
#
##########################################################################*/
.global RdosAbortTcpConnection
RdosAbortTcpConnection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate abort_tcp_connection_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosPushTcpConnection
#
# Purpose....: Push Tcp connection
#
# Parameters.: Conn handle
#
##########################################################################*/
.global RdosPushTcpConnection
RdosPushTcpConnection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate push_tcp_connection_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosPollTcpConnection
#
# Purpose....: Poll Tcp connection
#
# Parameters.: Conn handle
#
# Returns....: Available bytes in receive buffer
#
##########################################################################*/
.global RdosPollTcpConnection
RdosPollTcpConnection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate poll_tcp_connection_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosIsTcpConnectionClosed
#
# Purpose....: Check if connection is closed
#
# Parameters.: Conn handle
#
# Returns....: TRUE if closed
#
##########################################################################*/
.global RdosIsTcpConnectionClosed
RdosIsTcpConnectionClosed:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate is_tcp_connection_closed_nr
jc rptcClosed
xorl %eax,%eax
jmp rptcDone
rptcClosed:
movl $1,%eax
rptcDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetRemoteTcpConnectionIp
#
# Purpose....: Get remote IP
#
# Parameters.: Conn handle
#
# Returns....: IP
#
##########################################################################*/
.global RdosGetRemoteTcpConnectionIp
RdosGetRemoteTcpConnectionIp:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate get_remote_tcp_connection_ip_nr
jnc grtciDone
movl $0xFFFFFFFF,%eax
grtciDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetRemoteTcpConnectionPort
#
# Purpose....: Get remote port
#
# Parameters.: Conn handle
#
# Returns....: Port
#
##########################################################################*/
.global RdosGetRemoteTcpConnectionPort
RdosGetRemoteTcpConnectionPort:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate get_remote_tcp_connection_port_nr
jnc grtcpDone
movl $0,%eax
grtcpDone:
movzx %ax,%eax
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetLocalTcpConnectionPort
#
# Purpose....: Get local port
#
# Parameters.: Conn handle
#
# Returns....: Port
#
##########################################################################*/
.global RdosGetLocalTcpConnectionPort
RdosGetLocalTcpConnectionPort:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movw 8(%ebp),%bx
UserGate get_local_tcp_connection_port_nr
jnc gltcpDone
movl $0,%eax
gltcpDone:
movzx %ax,%eax
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosReadTcpConnection
#
# Purpose....: Read data from connection
#
# Parameters.: Conn handle
# Buffer
# Size
#
# Returns....: Read bytes
#
##########################################################################*/
.global RdosReadTcpConnection
RdosReadTcpConnection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edi
movw 8(%ebp),%bx
movl 12(%ebp),%edi
movl 16(%ebp),%ecx
UserGate read_tcp_connection_nr
popl %edi
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosWriteTcpConnection
#
# Purpose....: Write data fto connection
#
# Parameters.: Conn handle
# Buffer
# Size
#
# Returns....: Written bytes
#
##########################################################################*/
.global RdosWriteTcpConnection
RdosWriteTcpConnection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edi
movw 8(%ebp),%bx
movl 12(%ebp),%edi
movl 16(%ebp),%ecx
UserGate write_tcp_connection_nr
popl %edi
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetLocalMailslot
#
# Purpose....: Get local mailslot from name
#
# Parameters.: Name
#
# Returns....: Mailslot handle
#
##########################################################################*/
.global RdosGetLocalMailslot
RdosGetLocalMailslot:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %edi
movl 8(%ebp),%edi
UserGate get_local_mailslot_nr
jc rglmFail
movzx %bx,%eax
jmp rglmDone
rglmFail:
xorl %eax,%eax
rglmDone:
popl %edi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetRemoteMailslot
#
# Purpose....: Get remote mailslot from name
#
# Parameters.: IP
# Name
#
# Returns....: Mailslot handle
#
##########################################################################*/
.global RdosGetRemoteMailslot
RdosGetRemoteMailslot:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %edx
pushl %edi
movl 8(%ebp),%edx
movl 12(%ebp),%edi
UserGate get_remote_mailslot_nr
jc rgrmFail
movzx %bx,%eax
jmp rgrmDone
rgrmFail:
xorl %eax,%eax
rgrmDone:
popl %edi
popl %edx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosFreeMailslot
#
# Purpose....: Free mailslot
#
# Parameters.: Mailslot handle
#
##########################################################################*/
.global RdosFreeMailslot
RdosFreeMailslot:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 8(%ebp),%ebx
UserGate free_mailslot_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSendMailslot
#
# Purpose....: Send mailslot
#
# Parameters.: Mailslot handle
# Msg
# Size
# ReplyBuf
# MaxReplySize
#
# Returns....: Size of reply
#
##########################################################################*/
.global RdosSendMailslot
RdosSendMailslot:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %esi
pushl %edi
movw 8(%ebp),%bx
movl 12(%ebp),%esi
movl 16(%ebp),%ecx
movl 20(%ebp),%edi
movl 24(%ebp),%eax
UserGate send_mailslot_nr
jc smFail
movl %ecx,%eax
jmp smDone
smFail:
movl $0xFFFFFFFF,%eax
smDone:
popl %edi
popl %esi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDefineMailslot
#
# Purpose....: Define mailslot
#
# Parameters.: Name
# Max msg size
#
##########################################################################*/
.global RdosDefineMailslot
RdosDefineMailslot:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edi
movl 8(%ebp),%edi
movl 12(%ebp),%ecx
UserGate define_mailslot_nr
popl %edi
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosReceiveMailslot
#
# Purpose....: Receive from mailslot
#
# Parameters.: Msg buffer
#
# Returns....: Msg size
#
##########################################################################*/
.global RdosReceiveMailslot
RdosReceiveMailslot:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edi
movl 8(%ebp),%edi
UserGate receive_mailslot_nr
movl %ecx,%eax
popl %edi
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosReplyMailslot
#
# Purpose....: Reply to mailslot
#
# Parameters.: Msg buffer
# Msg size
#
##########################################################################*/
.global RdosReplyMailslot
RdosReplyMailslot:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edi
movl 8(%ebp),%edi
movl 12(%ebp),%ecx
UserGate reply_mailslot_nr
popl %edi
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosGetIdeDisc
#
# Purpose....: Get IDE disc
#
# Parameters.: Unit #
#
# Returns....: Disc #
#
##########################################################################*/
.global RdosGetIdeDisc
RdosGetIdeDisc:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movb 8(%ebp),%bl
UserGate get_ide_disc_nr
jc get_ide_disc_fail
movzx %al,%eax
jmp get_ide_disc_done
get_ide_disc_fail:
movl $0xFFFFFFFF,%eax
get_ide_disc_done:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetFloppyDisc
#
# Purpose....: Get floppy disc
#
# Parameters.: Unit #
#
# Returns....: Disc #
#
##########################################################################*/
.global RdosGetFloppyDisc
RdosGetFloppyDisc:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movb 8(%ebp),%bl
UserGate get_floppy_disc_nr
jc get_floppy_disc_fail
movzx %al,%eax
jmp get_floppy_disc_done
get_floppy_disc_fail:
movl $0xFFFFFFFF,%eax
get_floppy_disc_done:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetDiscInfo
#
# Purpose....: Get disc info
#
# Parameters.: Disc #
# Bytes / sector
# Total sectors
# BIOS sectors / cyl
# BIOS heads
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosGetDiscInfo
RdosGetDiscInfo:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
movb 8(%ebp),%al
UserGate get_disc_info_nr
jc get_disc_info_fail
movl 12(%ebp),%ebx
movzx %cx,%ecx
movl %ecx,(%ebx)
movl 16(%ebp),%ebx
movl %edx,(%ebx)
movl 20(%ebp),%ebx
movzx %si,%esi
movl %esi,(%ebx)
movl 24(%ebp),%ebx
movzx %di,%edi
movl %edi,(%ebx)
movl $1,%eax
jmp get_disc_info_done
get_disc_info_fail:
xorl %eax,%eax
get_disc_info_done:
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetDiscInfo
#
# Purpose....: Set disc info
#
# Parameters.: Disc #
# Bytes / sector
# Total sectors
# BIOS sectors / cyl
# BIOS heads
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosSetDiscInfo
RdosSetDiscInfo:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
pushl %esi
pushl %edi
movb 8(%ebp),%al
movl 12(%ebp),%ecx
movl 16(%ebp),%edx
movl 20(%ebp),%esi
movl 24(%ebp),%edi
UserGate set_disc_info_nr
jc set_disc_info_fail
movl $1,%eax
jmp set_disc_info_done
set_disc_info_fail:
xorl %eax,%eax
set_disc_info_done:
popl %edi
popl %esi
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosReadDisc
#
# Purpose....: Read from disc
#
# Parameters.: Disc #
# Sector #
# Buffer
# Size
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosReadDisc
RdosReadDisc:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
pushl %edi
movb 8(%ebp),%al
movl 12(%ebp),%edx
movl 16(%ebp),%edi
movl 20(%ebp),%ecx
UserGate read_disc_nr
jc read_disc_fail
movl $1,%eax
jmp read_disc_done
read_disc_fail:
xorl %eax,%eax
read_disc_done:
popl %edi
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosWriteDisc
#
# Purpose....: Write to disc
#
# Parameters.: Disc #
# Sector #
# Buffer
# Size
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosWriteDisc
RdosWriteDisc:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
pushl %edi
movb 8(%ebp),%al
movl 12(%ebp),%edx
movl 16(%ebp),%edi
movl 20(%ebp),%ecx
UserGate write_disc_nr
jc write_disc_fail
movl $1,%eax
jmp write_disc_done
write_disc_fail:
xorl %eax,%eax
write_disc_done:
popl %edi
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosAllocateFixedDrive
#
# Purpose....: Allocate fixed drive
#
# Parameters.: Drive #
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosAllocateFixedDrive
RdosAllocateFixedDrive:
pushl %ebp
movl %esp,%ebp
movb 8(%ebp),%al
UserGate allocate_fixed_drive_nr
jc allocate_fixed_drive_fail
movl $1,%eax
jmp allocate_fixed_drive_done
allocate_fixed_drive_fail:
xorl %eax,%eax
allocate_fixed_drive_done:
leave
ret
/*##########################################################################
#
# Name : RdosAllocateStaticDrive
#
# Purpose....: Allocate static drive
#
# Returns....: Drive #
#
##########################################################################*/
.global RdosAllocateStaticDrive
RdosAllocateStaticDrive:
pushl %ebp
movl %esp,%ebp
UserGate allocate_static_drive_nr
jc allocate_static_drive_fail
movzx %al,%eax
jmp allocate_static_drive_done
allocate_static_drive_fail:
xorl %eax,%eax
allocate_static_drive_done:
leave
ret
/*##########################################################################
#
# Name : RdosAllocateDynamicDrive
#
# Purpose....: Allocate dynamic drive
#
# Returns....: Drive #
#
##########################################################################*/
.global RdosAllocateDynamicDrive
RdosAllocateDynamicDrive:
pushl %ebp
movl %esp,%ebp
UserGate allocate_dynamic_drive_nr
jc allocate_dynamic_drive_fail
movzx %al,%eax
jmp allocate_dynamic_drive_done
allocate_dynamic_drive_fail:
xorl %eax,%eax
allocate_dynamic_drive_done:
leave
ret
/*##########################################################################
#
# Name : RdosGetRdfsInfo
#
# Purpose....: Get rdfs info
#
# Parameters.: Crypt tab
# Key tab
# Extent size tab
#
##########################################################################*/
.global RdosGetRdfsInfo
RdosGetRdfsInfo:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %esi
pushl %edi
movl 8(%ebp),%esi
movl 12(%ebp),%edi
movl 16(%ebp),%ebx
UserGate get_rdfs_info_nr
popl %edi
popl %esi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetDriveInfo
#
# Purpose....: Get drive info
#
# Parameters.: Drive #
# Free units
# Bytes per unit
# Total units
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosGetDriveInfo
RdosGetDriveInfo:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
movb 8(%ebp),%al
UserGate get_drive_info_nr
jc get_drive_info_fail
movl 12(%ebp),%ebx
movl %eax,(%ebx)
movl 16(%ebp),%ebx
movzx %cx,%ecx
movl %ecx,(%ebx)
movl 20(%ebp),%ebx
movl %edx,(%ebx)
movl $1,%eax
jmp get_drive_info_done
get_drive_info_fail:
xorl %eax,%eax
get_drive_info_done:
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDemandLoadDrive
#
# Purpose....: Demand load drive
#
# Parameters.: Drive #
#
##########################################################################*/
.global RdosDemandLoadDrive
RdosDemandLoadDrive:
pushl %ebp
movl %esp,%ebp
movb 8(%ebp),%al
UserGate demand_load_drive_nr
leave
ret
/*##########################################################################
#
# Name : RdosGetDriveDiscParam
#
# Purpose....: Get drive disc parameters
#
# Parameters.: Drive #
# Disc #
# Start sector
# Total sectors
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosGetDriveDiscParam
RdosGetDriveDiscParam:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %edx
movb 8(%ebp),%al
UserGate get_drive_disc_param_nr
jc get_drive_disc_param_fail
movl 12(%ebp),%ebx
movzx %al,%eax
movl %eax,(%ebx)
movl 16(%ebp),%ebx
movl %edx,(%ebx)
movl 20(%ebp),%ebx
movl %ecx,(%ebx)
movl $1,%eax
jmp get_drive_disc_param_done
get_drive_disc_param_fail:
xorl %eax,%eax
get_drive_disc_param_done:
popl %edx
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosFormatDrive
#
# Purpose....: Format drive
#
# Parameters.: Disc #
# Start sector
# Sectors
# FS name
#
# Returns....: Drive #
#
##########################################################################*/
.global RdosFormatDrive
RdosFormatDrive:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %edx
pushl %edi
movb 8(%ebp),%al
movl 12(%ebp),%edx
movl 16(%ebp),%ecx
movl 20(%ebp),%edi
UserGate format_drive_nr
jc rfdFail
movzx %al,%eax
jmp rfdDone
rfdFail:
xorl %eax,%eax
rfdDone:
popl %edi
popl %edx
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosGetExeName
#
# Purpose....: Get name of executable file
#
# Returns....: Exe pathname
#
##########################################################################*/
.global RdosGetExeName
RdosGetExeName:
pushl %ebp
movl %esp,%ebp
pushl %edi
UserGate get_exe_name_nr
jc rgenFail
movl %edi,%eax
jmp rgenDone
rgenFail:
xorl %eax,%eax
rgenDone:
popl %edi
leave
ret
/*##########################################################################
#
# Name : RdosOpenAdc
#
# Purpose....: Open handle to ADC channel
#
# Parameters.: Channel #
#
# Returns....: Adc handle
#
##########################################################################*/
.global RdosOpenAdc
RdosOpenAdc:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 8(%ebp),%eax
UserGate open_adc_nr
movw %bx,%ax
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseAdc
#
# Purpose....: Close ADC handle
#
# Parameters.: Adc handle
#
##########################################################################*/
.global RdosCloseAdc
RdosCloseAdc:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 8(%ebp),%ebx
UserGate close_adc_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDefineAdcTime
#
# Purpose....: Define time of next conversion
#
# Parameters.: Adc handle
# MSB time
# LSB time
#
##########################################################################*/
.global RdosDefineAdcTime
RdosDefineAdcTime:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %edx
movl 8(%ebp),%ebx
movl 12(%ebp),%edx
movl 16(%ebp),%eax
UserGate define_adc_time_nr
popl %edx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosReadAdc
#
# Purpose....: Read ADC
#
# Parameters.: Adc handle
#
# Returns....: Value
#
##########################################################################*/
.global RdosReadAdc
RdosReadAdc:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 8(%ebp),%ebx
UserGate read_adc_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosReadSerialLines
#
# Purpose....: Read serial lines
#
# Parameters.: Device
# &Value
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosReadSerialLines
RdosReadSerialLines:
pushl %ebp
movl %esp,%ebp
pushl %edx
pushl %esi
movb 8(%ebp),%dh
UserGate read_serial_lines_nr
jc rdsFail
movzx %al,%eax
movl 12(%ebp),%esi
movl %eax,(%esi)
movl $1,%eax
jmp rdsDone
rdsFail:
xorl %eax,%eax
rdsDone:
popl %esi
popl %edx
leave
ret
/*##########################################################################
#
# Name : RdosToggleSerialLine
#
# Purpose....: Toggle serial line
#
# Parameters.: Device
# Line
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosToggleSerialLine
RdosToggleSerialLine:
pushl %ebp
movl %esp,%ebp
pushl %edx
movb 8(%ebp),%dh
movb 12(%ebp),%dl
UserGate toggle_serial_line_nr
jc rtsFail
movl $1,%eax
jmp rtsDone
rtsFail:
xorl %eax,%eax
rtsDone:
popl %edx
leave
ret
/*##########################################################################
#
# Name : RdosReadSerialVal
#
# Purpose....: Read serial value
#
# Parameters.: Device
# Line
# &Val
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosReadSerialVal
RdosReadSerialVal:
pushl %ebp
movl %esp,%ebp
pushl %edx
pushl %esi
movb 8(%ebp),%dh
movb 12(%ebp),%dl
UserGate read_serial_val_nr
pushfw
shll $8,%eax
movl 16(%ebp),%esi
movl %eax,(%esi)
popfw
jc rdvFail
movl $1,%eax
jmp rdvDone
rdvFail:
xorl %eax,%eax
rdvDone:
popl %esi
popl %edx
leave
ret
/*##########################################################################
#
# Name : RdosWriteSerialVal
#
# Purpose....: Write serial value
#
# Parameters.: Device
# Line
# Val
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosWriteSerialVal
RdosWriteSerialVal:
pushl %ebp
movl %esp,%ebp
pushl %edx
movb 8(%ebp),%dh
movb 12(%ebp),%dl
movl 16(%ebp),%eax
sarl $8,%eax
UserGate write_serial_val_nr
jc rwvFail
movl $1,%eax
jmp rwvDone
rwvFail:
xorl %eax,%eax
rwvDone:
popl %edx
leave
ret
/*##########################################################################
#
# Name : RdosReadSerialRaw
#
# Purpose....: Read serial raw value
#
# Parameters.: Device
# Line
# &Val
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosReadSerialRaw
RdosReadSerialRaw:
pushl %ebp
movl %esp,%ebp
pushl %edx
pushl %esi
movb 8(%ebp),%dh
movb 12(%ebp),%dl
UserGate read_serial_val_nr
pushfw
movl 16(%ebp),%esi
movl %eax,(%esi)
popfw
jc rdrFail
movl $1,%eax
jmp rdrDone
rdrFail:
xorl %eax,%eax
rdrDone:
popl %esi
popl %edx
leave
ret
/*##########################################################################
#
# Name : RdosWriteSerialRaw
#
# Purpose....: Write serial raw value
#
# Parameters.: Device
# Line
# Val
#
# Returns....: TRUE if ok
#
##########################################################################*/
.global RdosWriteSerialRaw
RdosWriteSerialRaw:
pushl %ebp
movl %esp,%ebp
pushl %edx
movb 8(%ebp),%dh
movb 12(%ebp),%dl
movl 16(%ebp),%eax
UserGate write_serial_val_nr
jc rwrFail
movl $1,%eax
jmp rwrDone
rwrFail:
xorl %eax,%eax
rwrDone:
popl %edx
leave
ret
/*##########################################################################
#
# Name : RdosOpenSysEnv
#
# Purpose....: Open system environment
#
# Returns....: Env handle
#
##########################################################################*/
.global RdosOpenSysEnv
RdosOpenSysEnv:
pushl %ebp
movl %esp,%ebp
pushl %ebx
UserGate open_sys_env_nr
jc oseFail
movzx %bx,%eax
jmp oseDone
oseFail:
xorl %eax,%eax
oseDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosOpenProcessEnv
#
# Purpose....: Open process environment
#
# Returns....: Env handle
#
##########################################################################*/
.global RdosOpenProcessEnv
RdosOpenProcessEnv:
pushl %ebp
movl %esp,%ebp
pushl %ebx
UserGate open_proc_env_nr
jc opeFail
movzx %bx,%eax
jmp opeDone
opeFail:
xorl %eax,%eax
opeDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseEnv
#
# Purpose....: Close environment
#
# Parameters.: Env handle
#
##########################################################################*/
.global RdosCloseEnv
RdosCloseEnv:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 8(%ebp),%ebx
UserGate close_env_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosAddEnvVar
#
# Purpose....: Add environment variable
#
# Parameters.: Env handle
# var
# data
#
##########################################################################*/
.global RdosAddEnvVar
RdosAddEnvVar:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %esi
pushl %edi
movl 8(%ebp),%ebx
movl 12(%ebp),%esi
movl 16(%ebp),%edi
UserGate add_env_var_nr
popl %edi
popl %esi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDeleteEnvVar
#
# Purpose....: Delete environment variable
#
# Parameters.: Env handle
# var
#
##########################################################################*/
.global RdosDeleteEnvVar
RdosDeleteEnvVar:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %esi
movl 8(%ebp),%ebx
movl 12(%ebp),%esi
UserGate delete_env_var_nr
popl %esi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosFindEnvVar
#
# Purpose....: Find environment variable
#
# Parameters.: Env handle
# var
# data
#
##########################################################################*/
.global RdosFindEnvVar
RdosFindEnvVar:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %esi
pushl %edi
movl 8(%ebp),%ebx
movl 12(%ebp),%esi
movl 16(%ebp),%edi
UserGate find_env_var_nr
jc fevFail
movl $1,%eax
jmp fevDone
fevFail:
xorl %eax,%eax
fevDone:
popl %edi
popl %esi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGetEnvData
#
# Purpose....: Get raw environment data
#
# Parameters.: Env handle
# data
#
##########################################################################*/
.global RdosGetEnvData
RdosGetEnvData:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %edi
movl 8(%ebp),%ebx
movl 12(%ebp),%edi
UserGate get_env_data_nr
jnc gedDone
xorw %ax,%ax
stosw
gedDone:
popl %edi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosSetEnvData
#
# Purpose....: Set raw environment data
#
# Parameters.: Env handle
# data
#
##########################################################################*/
.global RdosSetEnvData
RdosSetEnvData:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %edi
movl 8(%ebp),%ebx
movl 12(%ebp),%edi
UserGate set_env_data_nr
popl %edi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosOpenSysIni
#
# Purpose....: Open system ini-file
#
# Returns....: Ini handle
#
##########################################################################*/
.global RdosOpenSysIni
RdosOpenSysIni:
pushl %ebp
movl %esp,%ebp
pushl %ebx
UserGate open_sys_ini_nr
jc osiFail
movzx %bx,%eax
jmp osiDone
osiFail:
xorl %eax,%eax
osiDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCloseIni
#
# Purpose....: Close ini-file
#
# Parameters.: Ini handle
#
##########################################################################*/
.global RdosCloseIni
RdosCloseIni:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 8(%ebp),%ebx
UserGate close_ini_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosGotoIniSection
#
# Purpose....: Goto ini section
#
# Parameters.: Ini handle
# SectionName
#
##########################################################################*/
.global RdosGotoIniSection
RdosGotoIniSection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %edi
movl 8(%ebp),%ebx
movl 12(%ebp),%edi
UserGate goto_ini_section_nr
jc gisFail
movl $1,%eax
jmp gisDone
gisFail:
xorl %eax,%eax
gisDone:
popl %edi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosRemoveIniSection
#
# Purpose....: Remove current ini section
#
# Parameters.: Ini handle
#
##########################################################################*/
.global RdosRemoveIniSection
RdosRemoveIniSection:
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 8(%ebp),%ebx
UserGate remove_ini_section_nr
jc risFail
movl $1,%eax
jmp risDone
risFail:
xorl %eax,%eax
risDone:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosReadIni
#
# Purpose....: Read ini var in current section
#
# Parameters.: Ini handle
# VarName
# Data
# MaxSize
#
##########################################################################*/
.global RdosReadIni
RdosReadIni:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
pushl %esi
pushl %edi
movl 8(%ebp),%ebx
movl 12(%ebp),%esi
movl 16(%ebp),%edi
movl 20(%ebp),%ecx
UserGate read_ini_nr
jc riFail
movl $1,%eax
jmp riDone
riFail:
xorl %eax,%eax
riDone:
popl %edi
popl %esi
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosWriteIni
#
# Purpose....: Write ini var in current section
#
# Parameters.: Ini handle
# VarName
# Data
#
##########################################################################*/
.global RdosWriteIni
RdosWriteIni:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %esi
pushl %edi
movl 8(%ebp),%ebx
movl 12(%ebp),%esi
movl 16(%ebp),%edi
UserGate write_ini_nr
jc wiFail
movl $1,%eax
jmp wiDone
wiFail:
xorl %eax,%eax
wiDone:
popl %edi
popl %esi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosDeleteIni
#
# Purpose....: Delete ini var in current section
#
# Parameters.: Ini handle
# VarName
#
##########################################################################*/
.global RdosDeleteIni
RdosDeleteIni:
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %esi
movl 8(%ebp),%ebx
movl 12(%ebp),%esi
UserGate delete_ini_nr
jc diFail
movl $1,%eax
jmp diDone
diFail:
xorl %eax,%eax
diDone:
popl %esi
popl %ebx
leave
ret
/*##########################################################################
#
# Name : RdosCreateFileDrive
#
# Purpose....: Create a new file-drive
#
# Parameters.: Drive
# Size
# FS name
# Filename
#
##########################################################################*/
.global RdosCreateFileDrive
RdosCreateFileDrive:
pushl %ebp
movl %esp,%ebp
pushl %ecx
pushl %esi
pushl %edi
movb 8(%ebp),%al
movl 12(%ebp),%ecx
movl 16(%ebp),%esi
movl 20(%ebp),%edi
UserGate create_file_drive_nr
jnc cfdOk
xorl %eax,%eax
jmp cfdDone
cfdOk:
movl $1,%eax
cfdDone:
popl %edi
popl %esi
popl %ecx
leave
ret
/*##########################################################################
#
# Name : RdosOpenFileDrive
#
# Purpose....: Open a new file-drive
#
# Parameters.: Drive
# Filename
#
##########################################################################*/
.global RdosOpenFileDrive
RdosOpenFileDrive:
pushl %ebp
movl %esp,%ebp
pushl %edi
movb 8(%ebp),%al
movl 12(%ebp),%edi
UserGate open_file_drive_nr
jnc ofdOk
xorl %eax,%eax
jmp ofdDone
ofdOk:
movl $1,%eax
ofdDone:
popl %edi
leave
ret
/*##########################################################################
#
# Name : RdosEnableStatusLed
#
# Purpose....: Enable status LED
#
##########################################################################*/
.global RdosEnableStatusLed
RdosEnableStatusLed:
pushl %ebp
movl %esp,%ebp
UserGate enable_status_led_nr
leave
ret
/*##########################################################################
#
# Name : RdosDisableStatusLed
#
# Purpose....: Disable status LED
#
##########################################################################*/
.global RdosDisableStatusLed
RdosDisableStatusLed:
pushl %ebp
movl %esp,%ebp
UserGate disable_status_led_nr
leave
ret
/*##########################################################################
#
# Name : RdosStartWatchdog
#
# Purpose....: Start watchdog
#
# Parameters.: Timeout, ms
#
##########################################################################*/
.global RdosStartWatchdog
RdosStartWatchdog:
pushl %ebp
movl %esp,%ebp
;
movl 8(%ebp),%eax
UserGate start_watchdog_nr
;
leave
ret
/*##########################################################################
#
# Name : RdosKickWatchdog
#
# Purpose....: Kick watchdog
#
##########################################################################*/
.global RdosKickWatchdog
RdosKickWatchdog:
pushl %ebp
movl %esp,%ebp
UserGate kick_watchdog_nr
leave
ret
|
4ms/metamodule-plugin-sdk
| 7,258
|
plugin-libc/newlib/libc/sys/rdos/crt0.S
|
/*#######################################################################
# RDOS operating system
# Copyright (C) 1988-2006, Leif Ekblad
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# The author of this program may be contacted at leif@rdos.net
#
# crt0.S
# GCC startupcode for RDOS
#
##########################################################################*/
#include "user.def"
KEY_ENTRIES = 256
.macro UserGate nr
.byte 0x9A
.long \nr
.word 2
.endm
.data
.align 8
_key_section:
.word 0
_key_ref_arr:
.long 0
_key_dtor_arr:
.long 0
.text
.align 4
/*##########################################################################
#
# Name : _start
#
# Purpose....: GCC startup-code
#
##########################################################################*/
.global _start
_start:
call get_impure_data_size
movl %eax,%ecx
UserGate allocate_app_mem_nr
xorl %eax,%eax
.byte 0x64
movl %edx,(%eax)
movl %edx,%edi
rep
stosb
pushl %edx
movl $(4 * KEY_ENTRIES),%eax
movl %eax,%ecx
UserGate allocate_app_mem_nr
movl $4,%eax
.byte 0x64
movl %edx,(%eax)
movl %edx,%edi
xorl %eax,%eax
rep
stosb
movl $(4 * KEY_ENTRIES),%eax
movl %eax,%ecx
UserGate allocate_app_mem_nr
movl %edx,_key_ref_arr
movl %edx,%edi
xorl %eax,%eax
rep
stosb
movl $(4 * KEY_ENTRIES),%eax
movl %eax,%ecx
UserGate allocate_app_mem_nr
movl %edx,_key_dtor_arr
movl %edx,%edi
xorl %eax,%eax
rep
stosb
UserGate create_user_section_nr
movw %bx,_key_section
call __init_rdos
add $4, %esp
movl $0x1000,%eax
UserGate allocate_app_mem_nr
pushl %edx
UserGate get_cmd_line_nr
xorl %ecx,%ecx
xorb %ah,%ah
arg_loop:
movl %edi,(%edx)
addl $4,%edx
movb (%edi),%al
orb %al,%al
je arg_done
arg_scan:
movb (%edi),%al
orb %al,%al
je next_arg
cmpb $0x22,%al
jne arg_no_quote
xorb $1,%ah
jmp arg_scan_next
arg_no_quote:
orb %ah,%ah
jnz arg_scan_next
cmpb $0x20,%al
je next_arg
cmpb $0x8,%al
je next_arg
arg_scan_next:
incl %edi
jmp arg_scan
next_arg:
incl %ecx
to_next_arg:
orb %al,%al
je arg_done
xorb %al,%al
movb %al,(%edi)
incl %edi
movb (%edi),%al
cmpb $0x20,%al
je to_next_arg
cmpb $0x8,%al
je to_next_arg
jmp arg_loop
arg_done:
int $3
pushl %ecx
call main
add $8, %esp
pushl %eax
call exit
/*##########################################################################
#
# Name : _exit
#
# Purpose....: GCC exit-code
#
##########################################################################*/
.global _exit
_exit:
pushl %ebp
movl %esp,%ebp
movl 8(%ebp),%eax
UserGate unload_exe_nr
/*##########################################################################
#
# Name : __getreent
#
# Purpose....: ?
#
##########################################################################*/
.global __getreent
__getreent:
xorl %eax,%eax
.byte 0x64
movl (%eax),%eax
ret
/*##########################################################################
#
# Name : __rdos_thread_key_create
#
# Purpose....: Emulate GCC pthread_key_create
#
# Parameters.: dtor
#
# Returns....: Key index
#
##########################################################################*/
.global __rdos_thread_key_create
__rdos_thread_key_create:
int $3
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
mov _key_section,%bx
UserGate enter_user_section_nr
movl _key_ref_arr,%ebx
movl KEY_ENTRIES,%ecx
rtkc_scan_loop:
movl (%ebx), %eax
orl %eax, %eax
jz rtkc_entry_found
add $4, %ebx
loop rtkc_scan_loop
movl $-1, %eax
jmp rtkc_leave
rtkc_entry_found:
movb $255,3(%ebx)
subl _key_ref_arr,%ebx
addl _key_dtor_arr,%ebx
movl 8(%ebp),%eax
movl %eax,(%ebx)
subl _key_dtor_arr,%ebx
movl %ebx,%eax
rtkc_leave:
mov _key_section, %bx
UserGate leave_user_section_nr
popl %ecx
popl %ebx
leave
ret
/*##########################################################################
#
# Name : __rdos_thread_key_delete
#
# Purpose....: Emulate GCC pthread_key_delete
#
# Parameters.: index
#
# Returns....: result
#
##########################################################################*/
.global __rdos_thread_key_delete
__rdos_thread_key_delete:
int $3
pushl %ebp
movl %esp,%ebp
pushl %ebx
mov _key_section,%bx
UserGate enter_user_section_nr
movl 8(%ebp),%ebx
testb $3,%bl
jnz rtkd_fail
cmpl $(4 * KEY_ENTRIES),%ebx
jae rtkd_fail
addl _key_ref_arr,%ebx
movb $0,3(%ebx)
mov (%ebx),%eax
orl %eax,%eax
jz rtkd_ok
subl _key_ref_arr,%ebx
movl $0,(%ebx)
jmp rtkd_ok
rtkd_fail:
movl $1,%eax
jmp rtkd_leave
rtkd_ok:
xorl %eax,%eax
rtkd_leave:
mov _key_section, %bx
UserGate leave_user_section_nr
popl %ebx
leave
ret
/*##########################################################################
#
# Name : __rdos_thread_getspecific
#
# Purpose....: Emulate GCC pthread_getspecific
#
# Parameters.: index
#
# Returns....: value
#
##########################################################################*/
.global __rdos_thread_getspecific
__rdos_thread_getspecific:
int $3
pushl %ebp
movl %esp,%ebp
pushl %ebx
movl 8(%ebp),%ebx
testb $3,%bl
jnz rtg_fail
cmpl $(4 * KEY_ENTRIES),%ebx
jae rtg_fail
movl $4,%eax
.byte 0x64
movl (%eax),%eax
addl %eax,%ebx
movl (%ebx),%eax
jmp rtg_done
rtg_fail:
xorl %eax,%eax
rtg_done:
popl %ebx
leave
ret
/*##########################################################################
#
# Name : __rdos_thread_setspecific
#
# Purpose....: Emulate GCC pthread_setspecific
#
# Parameters.: index
# value
#
##########################################################################*/
.global __rdos_thread_setspecific
__rdos_thread_setspecific:
int $3
pushl %ebp
movl %esp,%ebp
pushl %ebx
pushl %ecx
movl 8(%ebp),%ebx
testb $3,%bl
jnz rts_fail
cmpl $(4 * KEY_ENTRIES),%ebx
jae rts_fail
movl $4,%eax
.byte 0x64
movl (%eax),%eax
addl %eax,%ebx
movl 12(%ebp),%eax
movl %eax,(%ebx)
xorl %eax,%eax
jmp rts_done
rts_fail:
movl $1,%eax
rts_done:
popl %ebx
leave
ret
|
4ms/metamodule-plugin-sdk
| 1,590
|
plugin-libc/newlib/libc/sys/sysnecv850/crt0.S
|
# NEC V850 startup code
.section .text
.global _start
_start:
#if defined(__v850e__) || defined(__v850e2__) || defined(__v850e2v3__) || defined(__v850e3v5__)
movea 255, r0, r20
mov 65535, r21
mov hilo(_stack), sp
mov hilo(__ep), ep
mov hilo(__gp), gp
mov hilo(__ctbp), r6
ldsr r6, ctbp
#if defined(__v850e2v3__) || defined(__v850e3v5__)
// FPU enable
stsr psw, r6
movhi 1, r0, r7
or r7, r6
ldsr r6, psw
// Initialize the FPSR
movhi 2, r0, r6
ldsr r6, fpsr
#endif
mov hilo(_edata), r6
mov hilo(_end), r7
.L0:
st.w r0, 0[r6]
addi 4, r6, r6
cmp r7, r6
bl .L0
.L1:
jarl ___main, r31
addi -16, sp, sp
mov 0, r6
mov 0, r7
mov 0, r8
jarl _main, r31
mov r10, r6
jarl _exit, r31
# else
movea 255, r0, r20
mov r0, r21
ori 65535, r0, r21
movhi hi(_stack), r0, sp
movea lo(_stack), sp, sp
movhi hi(__ep), r0, ep
movea lo(__ep), ep, ep
movhi hi(__gp), r0, gp
movea lo(__gp), gp, gp
movhi hi(_edata), r0, r6
movea lo(_edata), r6, r6
movhi hi(_end), r0, r7
movea lo(_end), r7, r7
.L0:
st.b r0, 0[r6]
addi 1, r6, r6
cmp r7, r6
bl .L0
.L1:
jarl ___main, r31
addi -16, sp, sp
mov 0, r6
mov 0, r7
mov 0, r8
jarl _main, r31
mov r10, r6
jarl _exit, r31
# endif
.section .stack
_stack: .long 1
.section .data
.global ___dso_handle
.weak ___dso_handle
___dso_handle:
.long 0
|
4ms/metamodule-plugin-sdk
| 1,119
|
plugin-libc/newlib/libc/sys/sysmec/crt0.S
|
#ifdef __mn10300__
.section .text
.global _start
_start:
mov _stack-8,a0 # Load up the stack pointer
mov a0,sp
mov _edata,a0 # Get the start/end of bss
mov _end,a1
cmp a0,a1 # If no bss, then do nothing
beq .L0
clr d0 # clear d0
.L1:
movbu d0,(a0) # Clear a byte and bump pointer
inc a0
cmp a0,a1
bne .L1
.L0:
call ___main,[],0 # Call __main to run ctors/dtors
clr d0
clr d1
mov d0,(4,sp)
call _main,[],0 # Call main program
call _exit,[],0 # All done, no need to return or
# deallocate our stack.
.section ._stack
_stack: .long 1
#else
.section .text
.global _start
_start:
mov _stack-4,a3 # Load up the stack pointer and allocate
# our current frame.
mov _edata,a0 # Get the start/end of bss
mov _end,a1
cmp a0,a1 # If no bss, then do nothing
beqx .L0
sub d0,d0 # clear d0
.L1:
movb d0,(a0) # Clear a byte and bump pointer
add 1,a0
cmp a0,a1
bnex .L1
.L0:
jsr ___main
sub d0,d0
mov d0,d1
mov d0,(a3)
jsr _main # Call main program
jmp _exit # All done, no need to return or
# deallocate our stack.
.section ._stack
_stack: .long 1
#endif
|
4ms/metamodule-plugin-sdk
| 2,202
|
plugin-libc/newlib/libc/sys/sh/crt0.S
|
#ifdef __SH5__
.section .data,"aw"
.global ___data
___data:
.section .rodata,"a"
.global ___rodata
___rodata:
#if __SH5__ == 64
.section .text,"ax"
#define LOAD_ADDR(sym, reg) \
movi (sym >> 48) & 65535, reg; \
shori (sym >> 32) & 65535, reg; \
shori (sym >> 16) & 65535, reg; \
shori sym & 65535, reg
#else
.mode SHmedia
.section .text..SHmedia32,"ax"
#define LOAD_ADDR(sym, reg) \
movi (sym >> 16) & 65535, reg; \
shori sym & 65535, reg
#endif
.global start
start:
LOAD_ADDR (_stack, r15)
pt/l zero_bss_loop, tr0
pt/l _atexit, tr1
pt/l _init, tr5
pt/l _main, tr6
pt/l _exit, tr7
! zero out bss
LOAD_ADDR (_edata, r0)
LOAD_ADDR (_end, r1)
zero_bss_loop:
stx.q r0, r63, r63
addi r0, 8, r0
bgt/l r1, r0, tr0
LOAD_ADDR (___data, r26)
LOAD_ADDR (___rodata, r27)
#if ! __SH4_NOFPU__
getcon cr0, r0
movi 1, r1
shlli r1, 15, r1
or r1, r0, r0
putcon r0, cr0
#endif
! arrange for exit to call fini
LOAD_ADDR (_fini, r2)
blink tr1, r18
! call init
blink tr5, r18
! call the mainline
blink tr6, r18
! call exit
blink tr7, r18
#else
.section .text
.global start
start:
mov.l stack_k,r15
! zero out bss
mov.l edata_k,r0
mov.l end_k,r1
mov #0,r2
start_l:
mov.l r2,@r0
add #4,r0
cmp/ge r0,r1
bt start_l
#ifndef __SH2A_NOFPU__
#if defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY) || defined(__SH2A__)
mov.l set_fpscr_k, r1
jsr @r1
mov #0,r4
lds r3,fpscr
#endif /* defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || defined(__SH2A__) */
#endif /* !__SH2A_NOFPU__ */
! call the mainline
mov.l main_k,r0
jsr @r0
or r0,r0
! call exit
mov r0,r4
mov.l exit_k,r0
jsr @r0
or r0,r0
.align 2
#ifndef __SH2A_NOFPU__
#if defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || defined(__SH2A__)
set_fpscr_k:
.long ___set_fpscr
#endif /* defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(SH4_SINGLE_ONLY) || defined(__SH2A__) */
#endif /* !__SH2A_NOFPU__ */
stack_k:
.long _stack
edata_k:
.long _edata
end_k:
.long _end
main_k:
.long _main
exit_k:
.long _exit
#endif
|
4ms/metamodule-plugin-sdk
| 5,011
|
plugin-libc/newlib/libc/sys/arm/trap.S
|
/* Run-time exception support */
#ifndef __ARM_EABI__
#include "swi.h"
/* .text is used instead of .section .text so it works with arm-aout too. */
.text
.align 0
.global __rt_stkovf_split_big
.global __rt_stkovf_split_small
/* The following functions are provided for software stack checking.
If hardware stack-checking is being used then the code can be
compiled without the PCS entry checks, and simply rely on VM
management to extend the stack for a thread.
The stack extension event occurs when the PCS function entry code
would result in a stack-pointer beneath the stack-limit register
value. The system relies on the following map:
+-----------------------------------+ <-- end of stack block
| ... |
| ... |
| active stack |
| ... | <-- sp (stack-pointer) somewhere in here
| ... |
+-----------------------------------+ <-- sl (stack-limit)
| stack-extension handler workspace |
+-----------------------------------+ <-- base of stack block
The "stack-extension handler workspace" is an amount of memory in
which the stack overflow support code must execute. It must be
large enough to deal with the worst case path through the extension
code. At the moment the compiler expects this to be AT LEAST
256bytes. It uses this fact to code functions with small local
data usage within the overflow space.
In a true target environment We may need to increase the space
between sl and the true limit to allow for the stack extension
code, SWI handlers and for undefined instruction handlers of the
target environment. */
__rt_stkovf_split_small:
mov ip,sp @ Ensure we can calculate the stack required
@ and fall through to...
__rt_stkovf_split_big:
@ in: sp = current stack-pointer (beneath stack-limit)
@ sl = current stack-limit
@ ip = low stack point we require for the current function
@ lr = return address into the current function
@ fp = frame-pointer
@ original sp --> +----------------------------------+
@ | pc (12 ahead of PCS entry store) |
@ current fp ---> +----------------------------------+
@ | lr (on entry) pc (on exit) |
@ +----------------------------------+
@ | sp ("original sp" on entry) |
@ +----------------------------------+
@ | fp (on entry to function) |
@ +----------------------------------+
@ | |
@ | ..argument and work registers.. |
@ | |
@ current sp ---> +----------------------------------+
@
@ The "current sl" is somewhere between "original sp" and "current sp"
@ but above "true sl". The "current sl" should be at least 256bytes
@ above the "true sl". The 256byte stack guard should be large enough
@ to deal with the worst case function entry stacking (160bytes) plus
@ the stack overflow handler stacking requirements, plus the stack
@ required for the memory allocation routines.
@
@ Normal PCS entry (before stack overflow check) can stack 16
@ standard registers (64bytes) and 8 floating point registers
@ (96bytes). This gives a minimum stack guard of 160bytes (excluding
@ the stack required for the code). (Actually only a maximum of
@ 14standard registers are ever stacked on entry to a function).
@
@ NOTE: Structure returns are performed by the caller allocating a
@ dummy space on the stack and passing in a "phantom" arg1 into
@ the function. This means that we do not need to worry about
@ preserving the stack under "sp" even on function return.
@
@ Code should never poke values beneath sp. The sp register
@ should always be "dropped" first to cover the data. This
@ protects the data against any events that may try and use
@ the stack.
SUB ip, sp, ip @ extra stack required for function
@ Add stack extension code here. If desired a new stack chunk
@ can be allocated, and the register state updated suitably.
@ We now know how much extra stack the function requires.
@ Terminate the program for the moment:
swi SWI_Exit
#endif
|
4ms/metamodule-plugin-sdk
| 16,800
|
plugin-libc/newlib/libc/sys/arm/crt0.S
|
#include "newlib.h"
#include "arm.h"
#include "swi.h"
/* ANSI concatenation macros. */
#define CONCAT(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
#ifdef __USER_LABEL_PREFIX__
#define FUNCTION( name ) CONCAT (__USER_LABEL_PREFIX__, name)
#else
#error __USER_LABEL_PREFIX is not defined
#endif
#ifdef _HAVE_INITFINI_ARRAY
#define _init __libc_init_array
#define _fini __libc_fini_array
#endif
#if defined(__ARM_EABI__) && defined(__thumb__) && !defined(__thumb2__)
/* For Thumb1 we need to force the architecture to be sure that we get the
correct attributes on the object file; otherwise the assembler will get
confused and mark the object as being v6T2. */
#if defined(__ARM_ARCH_4T__)
.arch armv4t
#elif defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__)
/* Nothing in this object requires higher than v5. */
.arch armv5t
#elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
|| defined(__ARM_ARCH_6ZK__)
/* Nothing in this object requires higher than v6. */
.arch armv6
#elif defined(__ARM_ARCH_6M__)
#ifdef ARM_RDP_MONITOR
/* Object file uses SVC, so mark as v6s-m. */
.arch armv6s-m
#else
.arch armv6-m
#endif
#endif
#endif
/* .text is used instead of .section .text so it works with arm-aout too. */
.text
.syntax unified
#ifdef PREFER_THUMB
.thumb
.macro FUNC_START name
.global \name
.thumb_func
\name:
.endm
#else
.code 32
.macro FUNC_START name
.global \name
\name:
.endm
#endif
/* Annotation for EABI unwinding tables. */
.macro FN_EH_START
#if defined(__ELF__) && !defined(__USING_SJLJ_EXCEPTIONS__)
.fnstart
#endif
.endm
.macro FN_EH_END
#if defined(__ELF__) && !defined(__USING_SJLJ_EXCEPTIONS__)
/* Protect against unhandled exceptions. */
.cantunwind
.fnend
#endif
.endm
.macro indirect_call reg
#ifdef HAVE_CALL_INDIRECT
blx \reg
#else
mov lr, pc
mov pc, \reg
#endif
.endm
/* For armv4t and newer, toolchains will transparently convert
'bx lr' to 'mov pc, lr' if needed. GCC has deprecated support
for anything older than armv4t, but this should handle that
corner case in case anyone needs it anyway. */
.macro FN_RETURN
#if __ARM_ARCH <= 4 && __ARM_ARCH_ISA_THUMB == 0
mov pc, lr
#else
bx lr
#endif
.endm
/******************************************************************************
* User mode only: This routine makes default target specific Stack
* +-----+ <- SL_sys, Pointer initialization for different processor modes:
* | | SL_usr FIQ, Abort, IRQ, Undefined, Supervisor, System (User)
* | SYS | and setups a default Stack Limit in-case the code has
* | USR | -=0x10000 been compiled with "-mapcs-stack-check" for FIQ and
* | | System (User) modes.
* | |
* +-----+ <- initial SP,
* becomes SP_sys Hard-wiring SL value is not ideal, since there is
* and SL_usr currently no support for checking that the heap and
* stack have not collided, or that this default 64k is
* All modes: is enough for the program being executed. However,
* +-----+ <- SL_sys, it ensures that this simple crt0 world will not
* | | SL_usr immediately cause an overflow event.
* | SYS |
* | USR | -=0x10000 We go through all execution modes and set up SP
* | | for each of them.
* +-----+ <- SP_sys,
* | | SP_usr Notes:
* | SVC | -= 0x8000 - This code will not work as intended if the system
* | | starts in secure mode. In particular the methods
* +-----+ <- SP_svc of getting in and out of secure state are not as
* | | simple as writing to the CPSR mode bits.
* | IRQ | -= 0x2000 - Mode switch via CPSR is not allowed once in
* | | non-privileged mode or in hypervisor mode, so we
* ^ +-----+ <- SP_und take care not to enter "User" or "Hypervisor" mode
* s | | to set up its SP, and also skip most operations if
* t | UND | -= 0x1000 already in these modes.
* a | | Input parameters:
* c +-----+ <- SP_und - sp - Initialized SP
* k | | - r2 - May contain SL value from semihosting
* | ABT | -= 0x1000 SYS_HEAPINFO call
* g | | Scratch registers:
* r +-----+ <- SP_abt, - r1 - new value of CPSR
* o | | SL_fiq - r2 - intermediate value (in standalone mode)
* w | FIQ | -= 0x1000 - r3 - new SP value
* t | | - r4 - save/restore CPSR on entry/exit
* h +-----+ <- initial SP,
* becomes SP_fiq Declared as "weak" so that user can write and use
* his own implementation if current doesn't fit.
*
******************************************************************************/
.align 0
FUNC_START _stack_init
.weak FUNCTION (_stack_init)
FN_EH_START
/* M profile doesn't have CPSR register. */
#if (__ARM_ARCH_PROFILE != 'M')
/* Following code is compatible for both ARM and Thumb ISA. */
mrs r4, CPSR
mov r3, sp /* Save input SP value. */
ands r1, r4, #(CPSR_M_MASK)
beq .Lskip_cpu_modes
cmp r1, #(CPSR_M_HYP)
beq .Lskip_cpu_modes
/* FIQ mode, interrupts disabled. */
mov r1, #(CPSR_M_FIQ|CPSR_M_32BIT|CPSR_I_MASK|CPSR_F_MASK)
msr CPSR_c, r1
mov sp, r3
sub sl, sp, #0x1000 /* FIQ mode has its own SL. */
/* Abort mode, interrupts disabled. */
mov r3, sl
mov r1, #(CPSR_M_ABT|CPSR_M_32BIT|CPSR_I_MASK|CPSR_F_MASK)
msr CPSR_c, r1
mov sp, r3
sub r3, r3, #0x1000
/* Undefined mode, interrupts disabled. */
mov r1, #(CPSR_M_UND|CPSR_M_32BIT|CPSR_I_MASK|CPSR_F_MASK)
msr CPSR_c, r1
mov sp, r3
sub r3, r3, #0x1000
/* IRQ mode, interrupts disabled. */
mov r1, #(CPSR_M_IRQ|CPSR_M_32BIT|CPSR_I_MASK|CPSR_F_MASK)
msr CPSR_c, r1
mov sp, r3
sub r3, r3, #0x2000
/* Supervisory mode, interrupts disabled. */
mov r1, #(CPSR_M_SVR|CPSR_M_32BIT|CPSR_I_MASK|CPSR_F_MASK)
msr CPSR_c, r1
mov sp, r3
sub r3, r3, #0x8000 /* Min size 32k. */
bic r3, r3, #0x00FF /* Align with current 64k block. */
bic r3, r3, #0xFF00
# if __ARM_ARCH >= 4
/* System (shares regs with User) mode, interrupts disabled. */
mov r1, #(CPSR_M_SYS|CPSR_M_32BIT|CPSR_I_MASK|CPSR_F_MASK)
msr CPSR_c, r1
mov sp, r3
# else
/* Keep this for ARMv3, but GCC actually dropped it. */
/* Move value into user mode SP without changing modes, */
/* via '^' form of ldm. */
str r3, [r3, #-4]
ldmdb r3, {sp}^
# endif
/* Back to original mode, presumably SVC, with diabled FIQ/IRQ. */
orr r4, r4, #(CPSR_I_MASK|CPSR_F_MASK)
msr CPSR_c, r4
.Lskip_cpu_modes:
#endif
/* Set SL register. */
#if defined (ARM_RDI_MONITOR) /* semihosting */
cmp r2, #0
beq .Lsl_forced_zero
/* Allow slop for stack overflow handling and small frames. */
# ifdef THUMB1_ONLY
adds r2, #128
adds r2, #128
mov sl, r2
# else
add sl, r2, #256
# endif
.Lsl_forced_zero:
#else /* standalone */
/* r3 contains SP for System/User mode. Set SL = SP - 0x10000. */
#ifdef THUMB1_ONLY
movs r2, #64
lsls r2, r2, #10
subs r2, r3, r2
mov sl, r2
#else
/* Still assumes 256bytes below SL. */
sub sl, r3, #64 << 10
#endif
#endif
FN_RETURN
FN_EH_END
/*******************************************************************************
* Main library startup code.
*******************************************************************************/
.align 0
FUNC_START _mainCRTStartup
FUNC_START _start
FN_EH_START
/* Start by setting up a stack. */
#ifdef ARM_RDP_MONITOR
/* Issue Demon SWI to read stack info. */
swi SWI_GetEnv /* Returns command line in r0. */
mov sp,r1 /* and the highest memory address in r1. */
/* Stack limit is at end of data. */
/* Allow slop for stack overflow handling and small frames. */
#ifdef THUMB1_ONLY
ldr r0, .LC2
adds r0, #128
adds r0, #128
mov sl, r0
#else
ldr sl, .LC2
add sl, sl, #256
#endif
#else
#ifdef ARM_RDI_MONITOR
/* Issue Angel SWI to read stack info. */
movs r0, #AngelSWI_Reason_HeapInfo
adr r1, .LC0 /* Point at ptr to 4 words to receive data. */
#ifdef THUMB_VXM
bkpt AngelSWI
#elif defined(__thumb2__)
/* We are in thumb mode for startup on armv7 architectures. */
AngelSWIAsm AngelSWI
#else
/* We are always in ARM mode for startup on pre armv7 archs. */
AngelSWIAsm AngelSWI_ARM
#endif
ldr r0, .LC0 /* Point at values read. */
/* Set __heap_limit. */
ldr r1, [r0, #4]
cmp r1, #0
beq .LC33
ldr r2, =__heap_limit
str r1, [r2]
.LC33:
ldr r1, [r0, #0]
cmp r1, #0
bne .LC32
/* If the heap base value [r0, #0] is 0 then the heap base is actually
at the end of program data (i.e. __end__). See:
http://infocenter.arm.com/help/topic/com.arm.doc.dui0471-/Bacbefaa.html
for more information. */
ldr r1, .LC31
str r1, [r0, #0]
.LC32:
ldr r1, [r0, #8]
ldr r2, [r0, #12]
/* We skip setting SP/SL if 0 returned from semihosting.
- According to semihosting docs, if 0 returned from semihosting,
the system was unable to calculate the real value, so it's ok
to skip setting SP/SL to 0 here.
- Considering M-profile processors, We might want to initialize
SP by the first entry of vector table and return 0 to SYS_HEAPINFO
semihosting call, which will be skipped here.
- Considering R-profile processors there is no automatic SP init by hardware
so we need to initialize it by default value. */
ldr r3, .Lstack
cmp r1, #0
beq .LC26
mov r3, r1
.LC26:
mov sp, r3
/* r2 (SL value) will be used in _stack_init. */
bl FUNCTION (_stack_init)
#else /* standalone */
/* Set up the stack pointer to a fixed value. */
/* Changes by toralf:
- Allow linker script to provide stack via __stack symbol - see
defintion of .Lstack
- Provide "hooks" that may be used by the application to add
custom init code - see .Lhwinit and .Lswinit. */
ldr r3, .Lstack
cmp r3, #0
#ifdef __thumb2__
it eq
#endif
#ifdef THUMB1_ONLY
bne .LC28
ldr r3, .LC0
.LC28:
#else
ldreq r3, .LC0
#endif
/* Note: This 'mov' is essential when starting in User, and ensures we
always get *some* SP value for the initial mode, even if we
have somehow missed it below (in which case it gets the same
value as FIQ - not ideal, but better than nothing). */
mov sp, r3
/* We don't care of r2 value in standalone. */
bl FUNCTION (_stack_init)
#endif
#endif
/* Zero the memory in the .bss section. */
movs a2, #0 /* Second arg: fill value. */
mov fp, a2 /* Null frame pointer. */
mov r7, a2 /* Null frame pointer for Thumb. */
ldr a1, .LC1 /* First arg: start of memory block. */
ldr a3, .LC2
subs a3, a3, a1 /* Third arg: length of block. */
#if __thumb__ && !defined(PREFER_THUMB)
/* Enter Thumb mode... */
add a4, pc, #1 /* Get the address of the Thumb block. */
bx a4 /* Go there and start Thumb decoding. */
.code 16
.global __change_mode
.thumb_func
__change_mode:
#endif
bl FUNCTION (memset)
#if !defined (ARM_RDP_MONITOR) && !defined (ARM_RDI_MONITOR)
/* Changes by toralf: Taken from libgloss/m68k/crt0.S
initialize target specific stuff. Only execute these
functions it they exist. */
ldr r3, .Lhwinit
cmp r3, #0
beq .LC24
indirect_call r3
.LC24:
ldr r3, .Lswinit
cmp r3, #0
beq .LC25
indirect_call r3
.LC25:
movs r0, #0 /* No arguments. */
movs r1, #0 /* No argv either. */
#else
/* Need to set up standard file handles. */
bl FUNCTION (initialise_monitor_handles)
#ifdef ARM_RDP_MONITOR
swi SWI_GetEnv /* Sets r0 to point to the command line. */
movs r1, r0
#else
movs r0, #AngelSWI_Reason_GetCmdLine
ldr r1, .LC30 /* Space for command line. */
#ifdef THUMB_VXM
bkpt AngelSWI
#else
AngelSWIAsm AngelSWI
#endif
ldr r1, .LC30
ldr r1, [r1]
#endif
/* Parse string at r1. */
movs r0, #0 /* Count of arguments so far. */
/* Push a NULL argument onto the end of the list. */
#ifdef __thumb__
push {r0}
#else
stmfd sp!, {r0}
#endif
.LC10:
/* Skip leading blanks. */
#ifdef __thumb__
ldrb r3, [r1]
adds r1, #1
#else
ldrb r3, [r1], #1
#endif
cmp r3, #0
beq .LC12
cmp r3, #' '
beq .LC10
/* See whether we are scanning a string. */
cmp r3, #'\"'
#ifdef __thumb__
beq .LC20
cmp r3, #'\''
bne .LC21
.LC20:
movs r2, r3
b .LC22
.LC21:
movs r2, #' ' /* Terminator type. */
subs r1, r1, #1 /* Adjust back to point at start char. */
.LC22:
#else
cmpne r3, #'\''
moveq r2, r3
movne r2, #' ' /* Terminator type. */
subne r1, r1, #1 /* Adjust back to point at start char. */
#endif
/* Stack a pointer to the current argument. */
#ifdef __thumb__
push {r1}
#else
stmfd sp!, {r1}
#endif
adds r0, r0, #1
.LC11:
#ifdef __thumb__
ldrb r3, [r1]
adds r1, #1
#else
ldrb r3, [r1], #1
#endif
cmp r3, #0
beq .LC12
cmp r2, r3 /* Reached terminator ? */
bne .LC11
movs r2, #0
subs r3, r1, #1
strb r2, [r3] /* Terminate the arg string. */
b .LC10
.LC12:
mov r1, sp /* Point at stacked arg pointers. */
/* We've now got the stacked args in order, reverse them. */
#ifdef __thumb__
movs r2, r0
lsls r2, #2
add r2, sp
mov r3, sp
.LC15: cmp r2, r3
bls .LC14
subs r2, #4
ldr r4, [r2]
ldr r5, [r3]
str r5, [r2]
str r4, [r3]
adds r3, #4
b .LC15
.LC14:
/* Ensure doubleword stack alignment. */
mov r4, sp
movs r5, #7
bics r4, r5
mov sp, r4
#else
add r2, sp, r0, LSL #2 /* End of args. */
mov r3, sp /* Start of args. */
.LC13: cmp r2, r3
ldrhi r4,[r2, #-4] /* Reverse ends of list. */
ldrhi r5, [r3]
strhi r5, [r2, #-4]!
strhi r4, [r3], #4
bhi .LC13
/* Ensure doubleword stack alignment. */
bic sp, sp, #7
#endif
#endif
#ifdef __USES_INITFINI__
/* Some arm/elf targets use the .init and .fini sections
to create constructors and destructors, and for these
targets we need to call the _init function and arrange
for _fini to be called at program exit. */
movs r4, r0
movs r5, r1
#ifdef _LITE_EXIT
/* Make reference to atexit weak to avoid unconditionally pulling in
support code. Refer to comments in __atexit.c for more details. */
.weak FUNCTION(atexit)
ldr r0, .Latexit
cmp r0, #0
beq .Lweak_atexit
#endif
ldr r0, .Lfini
bl FUNCTION (atexit)
.Lweak_atexit:
bl FUNCTION (_init)
movs r0, r4
movs r1, r5
#endif
bl FUNCTION (main)
bl FUNCTION (exit) /* Should not return. */
#if __thumb__ && !defined(PREFER_THUMB)
/* Come out of Thumb mode. This code should be redundant. */
mov a4, pc
bx a4
.code 32
.global change_back
change_back:
/* Halt the execution. This code should never be executed. */
/* With no debug monitor, this probably aborts (eventually).
With a Demon debug monitor, this halts cleanly.
With an Angel debug monitor, this will report 'Unknown SWI'. */
swi SWI_Exit
#endif
FN_EH_END
/* For Thumb, constants must be after the code since only
positive offsets are supported for PC relative addresses. */
.align 0
.LC0:
#ifdef ARM_RDI_MONITOR
.word HeapBase
#else
#ifndef ARM_RDP_MONITOR
/* Changes by toralf: Provide alternative "stack" variable whose value
may be defined externally; .Lstack will be used instead of .LC0 if
it points to a non-0 value. Also set up references to "hooks" that
may be used by the application to provide additional init code. */
#ifdef __pe__
.word 0x800000
#else
.word 0x80000 /* Top of RAM on the PIE board. */
#endif
.Lhwinit:
.word FUNCTION (hardware_init_hook)
.Lswinit:
.word FUNCTION (software_init_hook)
/* Set up defaults for the above variables in the form of weak symbols
- so that application will link correctly, and get value 0 in
runtime (meaning "ignore setting") for the variables, when the user
does not provide the symbols. (The linker uses a weak symbol if,
and only if, a normal version of the same symbol isn't provided
e.g. by a linker script or another object file.) */
.weak FUNCTION (hardware_init_hook)
.weak FUNCTION (software_init_hook)
#endif
#endif
.Lstack:
.word __stack
.weak __stack
.LC1:
.word __bss_start__
.LC2:
.word __bss_end__
#ifdef __USES_INITFINI__
#ifdef _LITE_EXIT
.Latexit:
.word FUNCTION(atexit)
/* Weak reference _fini in case of lite exit. */
.weak FUNCTION(_fini)
#endif
.Lfini:
.word FUNCTION(_fini)
#endif
#ifdef ARM_RDI_MONITOR
.LC30:
.word AngelSWIArgs
.LC31:
.word __end__
/* Workspace for Angel calls. */
.data
/* Data returned by monitor SWI. */
.global __stack_base__
HeapBase: .word 0
HeapLimit: .word 0
__stack_base__: .word 0
StackLimit: .word 0
CommandLine: .space 256,0 /* Maximum length of 255 chars handled. */
AngelSWIArgs:
.word CommandLine
.word 255
#endif
#ifdef __pe__
.section .idata$3
.long 0,0,0,0,0,0,0,0
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.