repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
32bitmicro/newlib-nano-1.0
| 2,914
|
libgloss/cris/irqtable.S
|
/* Default interrupt table for CRIS/CRISv32.
Copyright (C) 2007 Axis Communications.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Neither the name of Axis Communications nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY AXIS COMMUNICATIONS AND ITS CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AXIS
COMMUNICATIONS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE. */
#ifdef __ELF__
.section .startup,"ax"
#else
.text
#endif
#if defined (__ELF__) || defined (IN_CRT0)
; This is included from crt0.S for a.out, as we can't have it as
; a separate object file in a library due to the lack of named
; section support and the required placement at address 3*4.
; We define an interrupt table with references to the symbols
; _.irq_XX where XX are hex numbers 3..ff (lower-case). They
; are satisfied by weak aliases to the _.irq stub function in
; this file. When overridden, the overriding function must be
; in code explicitly linked in, i.e. *not* in a library.
.global __irqtable_at_irq3
__irqtable_at_irq3:
.irpc irqno,3456789abcdef
.weak _.irq_0\irqno
.set _.irq_0\irqno,_.irq
.dword _.irq_0\irqno
.endr
.irpc irqhd,123456789abcdef
.irpc irqld,0123456789abcdef
.weak _.irq_\irqhd\irqld
.set _.irq_\irqhd\irqld,_.irq
.dword _.irq_\irqhd\irqld
.endr
.endr
; No use having a separate file with default _.irq_[0-f][0-f]
; definitions; just provide a single stub with a weak definition
; and make it up to the user to provide a strong definition that
; they force to be linked in (i.e. not in a library or at least
; together with another symbol they know is linked in).
.text
_.irq:
#ifdef __arch_common_v10_v32
; This is just to allow the multilib to compile without
; hackery: the "common" subset doesn't recognize
; interrupt-return insns.
#elif __CRIS_arch_version >= 32
rete
rfe
#else
reti
nop
#endif
#endif /* __ELF__ || IN_CRT0 */
|
32bitmicro/newlib-nano-1.0
| 1,859
|
libgloss/cris/setup.S
|
/* Support for standalone CRIS/CRISv32 code.
Copyright (C) 2005, 2007 Axis Communications.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Neither the name of Axis Communications nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY AXIS COMMUNICATIONS AND ITS CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AXIS
COMMUNICATIONS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE. */
.text
.global __setup
__setup:
/* Make sure to save SRP. */
#ifdef __arch_common_v10_v32
/* Can't do anything if we don't know for which arch. This file is
then only a placeholder. Oh, and we can't use the "ret" and "nop"
insns in "common" code. */
move $srp,$r9
jump $r9
setf
#else
/* Code missing:
- Initialize RAM circuits.
- Initialize serial output and input.
- Set stack-pointer. */
ret
nop
#endif
|
32bitmicro/newlib-nano-1.0
| 2,590
|
libgloss/arm/redboot-crt0.S
|
#include "arm.h"
.file "crt0.S"
#define XGLUE(a,b) a##b
#define GLUE(a,b) XGLUE(a,b)
#ifdef __USER_LABEL_PREFIX__
#define SYM_NAME( name ) GLUE (__USER_LABEL_PREFIX__, name)
#else
#error __USER_LABEL_PREFIX is not defined
#endif
.text
.syntax unified
/* Setup the assembly entry point. */
#ifdef THUMB_V7_V6M
.macro FUNC_START name
.global \name
.thumb_func
\name:
.endm
.thumb
#else
.macro FUNC_START name
.global \name
\name:
.endm
.code 32
#endif
FUNC_START SYM_NAME(start)
FUNC_START SYM_NAME(_start)
/* Unnecessary to set fp for v6-m/v7-m, which don't support
ARM state. */
#ifndef THUMB_V7M_V6M
mov fp, #0 /* Null frame pointer. */
#endif
movs r7, #0 /* Null frame pointer for Thumb. */
/* Enable interrupts for gdb debugging. */
#ifdef THUMB_V7_V6M
cpsie if
#else
mrs r0, cpsr
bic r0, r0, #0xC0
msr cpsr, r0
#endif
movs a2, #0 /* Second arg: fill value. */
ldr a1, .LC1 /* First arg: start of memory block. */
ldr a3, .LC2
subs a3, a3, a1 /* Third arg: length of block. */
#ifdef GCRT0
/* Zero out the bss without using memset.
Using memset is bad because it may be instrumented for
profiling, but at this point, the profiling data structures
have not been set up.
FIXME: This loop could be a lot more efficient. */
subs a3, a3, #0
beq 2f
1: strb a2, [a1]
subs a3, a3, #1
add a1, a1, #1
bne 1b
2:
/* Nothing to left to clear. */
#endif
#if defined(__thumb__) && !defined(THUMB_V7_V6M)
/* Enter Thumb mode. */
add a4, pc, #1 /* Get the address of the Thumb block. */
bx a4 /* Go there and start Thumb decoding. */
.code 16
.global __change_mode
.thumb_func
__change_mode:
#endif
#ifndef GCRT0
bl SYM_NAME(memset)
#endif
bl SYM_NAME(__get_memtop)
subs r0, r0, #32
mov sp, r0
#ifdef __USES_INITFINI__
/* Some arm/elf targets use the .init and .fini sections
to create constructors and destructors, and for these
targets we need to call the _init function and arrange
for _fini to be called at program exit. */
ldr r0, .Lfini
bl SYM_NAME (atexit)
bl SYM_NAME (_init)
#endif
movs a1, #0
ldr a2, .LC3
mov a3, a2
bl SYM_NAME(main)
1: bl SYM_NAME(exit)
b 1b
.align 2
.LC1:
.word __bss_start__
.LC2:
.word __bss_end__
.LC3:
.word 0
#ifdef __USES_INITFINI__
.Lfini:
.word SYM_NAME(_fini)
#endif
#if 0
#ifdef __thumb__
.code 16
#endif
.global SYM_NAME(__syscall)
#ifdef __thumb__
.thumb_func
#else
.align 4
#endif
SYM_NAME(__syscall):
mov r12, lr
#ifdef __thumb__
swi 0x18
#else
swi 0x180001
#endif
mov pc, r12
#endif
|
32bitmicro/newlib-nano-1.0
| 3,896
|
libgloss/arm/linux-syscalls0.S
|
/** Linux system call interface for the ARM processor.
* Written by Shaun Jackman <sjackman@gmail.com>.
* Copyright 2006 Pathway Connectivity
*
* Permission to use, copy, modify, and distribute this software
* is freely granted, provided that this notice is preserved.
*/
#include "linux-syscall.h"
#if __thumb__
# define FUNC(name) .type name, %function; .thumb_func; name:
# define SET .thumb_set
#else
# define FUNC(name) .type name, %function; name:
# define SET .set
#endif
#define GLOBAL(name) .global name; FUNC(name)
#define SIZE(name) .size name, .-name
# define SYSCALL4(name) \
GLOBAL(_ ## name); \
swi #SYS_ ## name; \
b _set_errno; \
SIZE(_ ## name)
# define SYSCALL6(name) \
GLOBAL(_ ## name); \
push { r4 - r5 }; \
ldr r4, [sp, #8]; \
ldr r5, [sp, #12]; \
swi #SYS_ ## name; \
pop { r4 - r5 }; \
b _set_errno; \
SIZE(_ ## name)
#define SYSCALL0(name) SYSCALL3(name)
#define SYSCALL3(name) SYSCALL4(name)
#define SYSCALL1(name) SYSCALL3(name)
#define SYSCALL2(name) SYSCALL3(name)
#define SYSCALL5(name) SYSCALL6(name)
SYSCALL1(alarm)
SYSCALL1(brk)
SYSCALL1(chdir)
SYSCALL2(chmod)
SYSCALL3(chown)
SYSCALL1(close)
SYSCALL1(dup)
SYSCALL2(dup2)
SYSCALL3(execve)
SYSCALL1(exit)
SYSCALL3(fcntl)
SYSCALL2(fstat)
SYSCALL2(ftruncate)
SYSCALL3(getdents)
SYSCALL0(getegid)
SYSCALL0(geteuid)
SYSCALL0(getgid)
SYSCALL2(getgroups)
SYSCALL1(getpgid)
SYSCALL0(getpgrp)
SYSCALL0(getpid)
SYSCALL0(getuid)
SYSCALL2(gettimeofday)
SYSCALL3(ioctl)
SYSCALL2(kill)
SYSCALL3(lchown)
SYSCALL2(link)
SYSCALL3(lseek)
SYSCALL2(lstat)
SYSCALL2(mkdir)
SYSCALL3(mknod)
SYSCALL2(nanosleep)
SYSCALL3(open)
SYSCALL0(pause)
SYSCALL1(pipe)
SYSCALL3(read)
SYSCALL3(readlink)
SYSCALL4(reboot)
SYSCALL1(rmdir)
SYSCALL5(select)
SYSCALL2(setpgid)
SYSCALL1(setgid)
SYSCALL0(setsid)
SYSCALL1(setuid)
SYSCALL3(sigprocmask)
SYSCALL2(socketcall)
SYSCALL2(stat)
SYSCALL1(stime)
SYSCALL2(symlink)
SYSCALL1(sync)
SYSCALL1(sysinfo)
SYSCALL1(times)
SYSCALL2(truncate)
SYSCALL1(umask)
SYSCALL1(uname)
SYSCALL1(unlink)
SYSCALL2(utime)
SYSCALL0(vfork)
SYSCALL4(wait4)
SYSCALL3(write)
#define ALIAS(name) .GLOBAL name; SET name, _ ## name
ALIAS(alarm)
ALIAS(chdir)
ALIAS(chmod)
ALIAS(chown)
ALIAS(dup)
ALIAS(dup2)
ALIAS(ftruncate)
ALIAS(getdents)
ALIAS(getegid)
ALIAS(geteuid)
ALIAS(getgid)
ALIAS(getgroups)
ALIAS(getpgid)
ALIAS(getpgrp)
ALIAS(getuid)
ALIAS(ioctl)
ALIAS(lchown)
ALIAS(lstat)
ALIAS(mkdir)
ALIAS(mknod)
ALIAS(nanosleep)
ALIAS(pause)
ALIAS(pipe)
ALIAS(readlink)
ALIAS(rmdir)
ALIAS(select)
ALIAS(setgid)
ALIAS(setpgid)
ALIAS(setsid)
ALIAS(setuid)
ALIAS(sigprocmask)
ALIAS(stime)
ALIAS(symlink)
ALIAS(sync)
ALIAS(sysinfo)
ALIAS(truncate)
ALIAS(umask)
ALIAS(uname)
ALIAS(utime)
ALIAS(vfork)
ALIAS(wait4)
# define SOCKETCALL(name, NAME) \
GLOBAL(name); \
push { r0 - r3 }; \
mov r0, #SYS_ ## NAME; \
b _socketcall_tail; \
SIZE(name)
FUNC(_socketcall_tail)
mov r1, sp
push { lr }
bl _socketcall
pop { r3 }
add sp, #16
#if defined(__ARM_ARCH_2__) || defined(__ARM_ARCH_3__) \
|| defined(__ARM_ARCH_3M__) || defined(__ARM_ARCH_4__)
mov pc, r3
#else
bx r3
#endif
SIZE(_socketcall_tail)
#define SOCKETCALL2(name, NAME) SOCKETCALL(name, NAME)
#define SOCKETCALL3(name, NAME) SOCKETCALL(name, NAME)
#define SOCKETCALL4(name, NAME) SOCKETCALL(name, NAME)
#define SOCKETCALL5(name, NAME) SOCKETCALL(name, NAME)
#define SOCKETCALL6(name, NAME) SOCKETCALL(name, NAME)
SOCKETCALL3(accept, ACCEPT)
SOCKETCALL3(bind, BIND)
SOCKETCALL3(connect, CONNECT)
SOCKETCALL3(getpeername, GETPEERNAME)
SOCKETCALL3(getsockname, GETSOCKNAME)
SOCKETCALL5(getsockopt, GETSOCKOPT)
SOCKETCALL2(listen, LISTEN)
SOCKETCALL4(recv, RECV)
SOCKETCALL6(recvfrom, RECVFROM)
SOCKETCALL3(recvmsg, RECVMSG)
SOCKETCALL4(send, SEND)
SOCKETCALL3(sendmsg, SENDMSG)
SOCKETCALL6(sendto, SENDTO)
SOCKETCALL5(setsockopt, SETSOCKOPT)
SOCKETCALL2(shutdown, SHUTDOWN)
SOCKETCALL3(socket, SOCKET)
SOCKETCALL4(socketpair, SOCKETPAIR)
|
32bitmicro/newlib-nano-1.0
| 5,028
|
libgloss/arm/trap.S
|
#include "arm.h"
/* Run-time exception support */
#ifndef THUMB_V7_V6M
#include "swi.h"
/* .text is used instead of .section .text so it works with arm-aout too. */
.text
.align 0
.global __rt_stkovf_split_big
.global __rt_stkovf_split_small
/* The following functions are provided for software stack checking.
If hardware stack-checking is being used then the code can be
compiled without the PCS entry checks, and simply rely on VM
management to extend the stack for a thread.
The stack extension event occurs when the PCS function entry code
would result in a stack-pointer beneath the stack-limit register
value. The system relies on the following map:
+-----------------------------------+ <-- end of stack block
| ... |
| ... |
| active stack |
| ... | <-- sp (stack-pointer) somewhere in here
| ... |
+-----------------------------------+ <-- sl (stack-limit)
| stack-extension handler workspace |
+-----------------------------------+ <-- base of stack block
The "stack-extension handler workspace" is an amount of memory in
which the stack overflow support code must execute. It must be
large enough to deal with the worst case path through the extension
code. At the moment the compiler expects this to be AT LEAST
256bytes. It uses this fact to code functions with small local
data usage within the overflow space.
In a true target environment We may need to increase the space
between sl and the true limit to allow for the stack extension
code, SWI handlers and for undefined instruction handlers of the
target environment. */
__rt_stkovf_split_small:
mov ip,sp @ Ensure we can calculate the stack required
@ and fall through to...
__rt_stkovf_split_big:
@ in: sp = current stack-pointer (beneath stack-limit)
@ sl = current stack-limit
@ ip = low stack point we require for the current function
@ lr = return address into the current function
@ fp = frame-pointer
@ original sp --> +----------------------------------+
@ | pc (12 ahead of PCS entry store) |
@ current fp ---> +----------------------------------+
@ | lr (on entry) pc (on exit) |
@ +----------------------------------+
@ | sp ("original sp" on entry) |
@ +----------------------------------+
@ | fp (on entry to function) |
@ +----------------------------------+
@ | |
@ | ..argument and work registers.. |
@ | |
@ current sp ---> +----------------------------------+
@
@ The "current sl" is somewhere between "original sp" and "current sp"
@ but above "true sl". The "current sl" should be at least 256bytes
@ above the "true sl". The 256byte stack guard should be large enough
@ to deal with the worst case function entry stacking (160bytes) plus
@ the stack overflow handler stacking requirements, plus the stack
@ required for the memory allocation routines.
@
@ Normal PCS entry (before stack overflow check) can stack 16
@ standard registers (64bytes) and 8 floating point registers
@ (96bytes). This gives a minimum stack guard of 160bytes (excluding
@ the stack required for the code). (Actually only a maximum of
@ 14standard registers are ever stacked on entry to a function).
@
@ NOTE: Structure returns are performed by the caller allocating a
@ dummy space on the stack and passing in a "phantom" arg1 into
@ the function. This means that we do not need to worry about
@ preserving the stack under "sp" even on function return.
@
@ Code should never poke values beneath sp. The sp register
@ should always be "dropped" first to cover the data. This
@ protects the data against any events that may try and use
@ the stack.
SUB ip, sp, ip @ extra stack required for function
@ Add stack extension code here. If desired a new stack chunk
@ can be allocated, and the register state updated suitably.
@ We now know how much extra stack the function requires.
@ Terminate the program for the moment:
swi SWI_Exit
#endif
|
32bitmicro/newlib-nano-1.0
| 11,262
|
libgloss/arm/crt0.S
|
#include "newlib.h"
#include "arm.h"
#include "swi.h"
/* ANSI concatenation macros. */
#define CONCAT(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
#ifdef __USER_LABEL_PREFIX__
#define FUNCTION( name ) CONCAT (__USER_LABEL_PREFIX__, name)
#else
#error __USER_LABEL_PREFIX is not defined
#endif
#ifdef HAVE_INITFINI_ARRAY
#define _init __libc_init_array
#define _fini __libc_fini_array
#endif
/* .text is used instead of .section .text so it works with arm-aout too. */
.text
.syntax unified
#ifdef THUMB_V7_V6M
.thumb
.macro FUNC_START name
.global \name
.thumb_func
\name:
.endm
#else
.code 32
.macro FUNC_START name
.global \name
\name:
.endm
#endif
.macro indirect_call reg
#ifdef HAVE_CALL_INDIRECT
blx \reg
#else
mov lr, pc
mov pc, \reg
#endif
.endm
.align 0
FUNC_START _mainCRTStartup
FUNC_START _start
FUNC_START start
#if defined(__ELF__) && !defined(__USING_SJLJ_EXCEPTIONS__)
/* Annotation for EABI unwinding tables. */
.fnstart
#endif
/* Start by setting up a stack */
#ifdef ARM_RDP_MONITOR
/* Issue Demon SWI to read stack info */
swi SWI_GetEnv /* Returns command line in r0 */
mov sp,r1 /* and the highest memory address in r1 */
/* stack limit is at end of data */
/* allow slop for stack overflow handling and small frames */
#ifdef __ARM_ARCH_6M__
ldr r0, .LC2
adds r0, #128
adds r0, #128
mov sl, r0
#else
ldr sl, .LC2
add sl, sl, #256
#endif
#else
#ifdef ARM_RDI_MONITOR
/* Issue Angel SWI to read stack info */
movs r0, #AngelSWI_Reason_HeapInfo
adr r1, .LC0 /* point at ptr to 4 words to receive data */
#ifdef THUMB_V7M_V6M
bkpt AngelSWI
#elif defined(__thumb2__)
/* We are in thumb mode for startup on armv7 architectures. */
AngelSWIAsm AngelSWI
#else
/* We are always in ARM mode for startup on pre armv7 archs. */
AngelSWIAsm AngelSWI_ARM
#endif
ldr r0, .LC0 /* point at values read */
ldr r1, [r0, #8]
ldr r2, [r0, #12]
/* We skip setting sp/sl if 0 returned from semihosting.
- According to semihosting docs, if 0 returned from semihosting,
the system was unable to calculate the real value, so it's ok
to skip setting sp/sl to 0 here.
- Considering M-profile processors, We might want to initialize
sp by the first entry of vector table and return 0 to SYS_HEAPINFO
semihosting call, which will be skipped here. */
cmp r1, #0
beq .LC26
mov sp, r1
.LC26:
cmp r2, #0
beq .LC27
/* allow slop for stack overflow handling and small frames */
#ifdef __ARM_ARCH_6M__
adds r2, #128
adds r2, #128
mov sl, r2
#else
add sl, r2, #256
#endif
.LC27:
#else
/* Set up the stack pointer to a fixed value */
/* Changes by toralf:
- Allow linker script to provide stack via __stack symbol - see
defintion of .Lstack
- Provide "hooks" that may be used by the application to add
custom init code - see .Lhwinit and .Lswinit
- Go through all execution modes and set up stack for each of them.
Loosely based on init.s from ARM/Motorola example code.
Note: Mode switch via CPSR is not allowed once in non-privileged
mode, so we take care not to enter "User" to set up its sp,
and also skip most operations if already in that mode. */
ldr r3, .Lstack
cmp r3, #0
#ifdef __thumb2__
it eq
#endif
#ifdef __ARM_ARCH_6M__
bne .LC28
ldr r3, .LC0
.LC28:
#else
ldreq r3, .LC0
#endif
/* Note: This 'mov' is essential when starting in User, and ensures we
always get *some* sp value for the initial mode, even if we
have somehow missed it below (in which case it gets the same
value as FIQ - not ideal, but better than nothing.) */
mov sp, r3
#ifdef THUMB_V7_V6M
/* XXX Fill in stack assignments for interrupt modes. */
#else
mrs r2, CPSR
tst r2, #0x0F /* Test mode bits - in User of all are 0 */
beq .LC23 /* "eq" means r2 AND #0x0F is 0 */
msr CPSR_c, #0xD1 /* FIRQ mode, interrupts disabled */
mov sp, r3
sub sl, sp, #0x1000 /* This mode also has its own sl (see below) */
mov r3, sl
msr CPSR_c, #0xD7 /* Abort mode, interrupts disabled */
mov sp, r3
sub r3, r3, #0x1000
msr CPSR_c, #0xDB /* Undefined mode, interrupts disabled */
mov sp, r3
sub r3, r3, #0x1000
msr CPSR_c, #0xD2 /* IRQ mode, interrupts disabled */
mov sp, r3
sub r3, r3, #0x2000
msr CPSR_c, #0xD3 /* Supervisory mode, interrupts disabled */
mov sp, r3
sub r3, r3, #0x8000 /* Min size 32k */
bic r3, r3, #0x00FF /* Align with current 64k block */
bic r3, r3, #0xFF00
str r3, [r3, #-4] /* Move value into user mode sp without */
ldmdb r3, {sp}^ /* changing modes, via '^' form of ldm */
orr r2, r2, #0xC0 /* Back to original mode, presumably SVC, */
msr CPSR_c, r2 /* with FIQ/IRQ disable bits forced to 1 */
#endif
.LC23:
/* Setup a default stack-limit in-case the code has been
compiled with "-mapcs-stack-check". Hard-wiring this value
is not ideal, since there is currently no support for
checking that the heap and stack have not collided, or that
this default 64k is enough for the program being executed.
However, it ensures that this simple crt0 world will not
immediately cause an overflow event: */
#ifdef __ARM_ARCH_6M__
movs r2, #64
lsls r2, r2, #10
subs r2, r3, r2
mov sl, r2
#else
sub sl, r3, #64 << 10 /* Still assumes 256bytes below sl */
#endif
#endif
#endif
/* Zero the memory in the .bss section. */
movs a2, #0 /* Second arg: fill value */
mov fp, a2 /* Null frame pointer */
mov r7, a2 /* Null frame pointer for Thumb */
ldr a1, .LC1 /* First arg: start of memory block */
ldr a3, .LC2
subs a3, a3, a1 /* Third arg: length of block */
#if defined(__thumb__) && !defined(THUMB_V7_V6M)
/* Enter Thumb mode.... */
add a4, pc, #1 /* Get the address of the Thumb block */
bx a4 /* Go there and start Thumb decoding */
.code 16
.global __change_mode
.thumb_func
__change_mode:
#endif
bl FUNCTION (memset)
#if !defined (ARM_RDP_MONITOR) && !defined (ARM_RDI_MONITOR)
/* Changes by toralf: Taken from libgloss/m68k/crt0.S
* initialize target specific stuff. Only execute these
* functions it they exist.
*/
ldr r3, .Lhwinit
cmp r3, #0
beq .LC24
indirect_call r3
.LC24:
ldr r3, .Lswinit
cmp r3, #0
beq .LC25
indirect_call r3
.LC25:
movs r0, #0 /* no arguments */
movs r1, #0 /* no argv either */
#else
/* Need to set up standard file handles */
bl FUNCTION (initialise_monitor_handles)
#ifdef ARM_RDP_MONITOR
swi SWI_GetEnv /* sets r0 to point to the command line */
mov r1, r0
#else
movs r0, #AngelSWI_Reason_GetCmdLine
adr r1, .LC30 /* Space for command line */
AngelSWIAsm AngelSWI
ldr r1, .LC30
#endif
/* Parse string at r1 */
movs r0, #0 /* count of arguments so far */
/* Push a NULL argument onto the end of the list. */
#ifdef __thumb__
push {r0}
#else
stmfd sp!, {r0}
#endif
.LC10:
/* Skip leading blanks */
#ifdef __thumb__
ldrb r3, [r1]
adds r1, #1
#else
ldrb r3, [r1], #1
#endif
cmp r3, #0
beq .LC12
cmp r3, #' '
beq .LC10
/* See whether we are scanning a string */
cmp r3, #'"'
#ifdef __thumb__
beq .LC20
cmp r3, #'\''
bne .LC21
.LC20:
mov r2, r3
b .LC22
.LC21:
movs r2, #' ' /* terminator type */
subs r1, r1, #1 /* adjust back to point at start char */
.LC22:
#else
cmpne r3, #'\''
moveq r2, r3
movne r2, #' ' /* terminator type */
subne r1, r1, #1 /* adjust back to point at start char */
#endif
/* Stack a pointer to the current argument */
#ifdef __thumb__
push {r1}
#else
stmfd sp!, {r1}
#endif
adds r0, r0, #1
.LC11:
#ifdef __thumb__
ldrb r3, [r1]
adds r1, #1
#else
ldrb r3, [r1], #1
#endif
cmp r3, #0
beq .LC12
cmp r2, r3 /* reached terminator? */
bne .LC11
movs r2, #0
subs r3, r1, #1
strb r2, [r3] /* terminate the arg string */
b .LC10
.LC12:
mov r1, sp /* point at stacked arg pointers */
/* We've now got the stacked args in order reverse the */
#ifdef __thumb__
mov r2, r0
lsls r2, #2
add r2, sp
mov r3, sp
.LC15: cmp r2, r3
bls .LC14
subs r2, #4
ldr r4, [r2]
ldr r5, [r3]
str r5, [r2]
str r4, [r3]
adds r3, #4
b .LC15
.LC14:
/* Ensure doubleword stack alignment. */
mov r4, sp
movs r5, #7
bics r4, r5
mov sp, r4
#else
add r2, sp, r0, LSL #2 /* End of args */
mov r3, sp /* Start of args */
.LC13: cmp r2, r3
ldrhi r4,[r2, #-4] /* Reverse ends of list */
ldrhi r5, [r3]
strhi r5, [r2, #-4]!
strhi r4, [r3], #4
bhi .LC13
/* Ensure doubleword stack alignment. */
bic sp, sp, #7
#endif
#endif
#ifdef __USES_INITFINI__
/* Some arm/elf targets use the .init and .fini sections
to create constructors and destructors, and for these
targets we need to call the _init function and arrange
for _fini to be called at program exit. */
mov r4, r0
mov r5, r1
movs r0, #0
ldr r1, .Lfini
movs r2, #0
movs r3, #0
bl FUNCTION (__register_exitproc)
bl FUNCTION (_init)
mov r0, r4
mov r1, r5
#endif
bl FUNCTION (main)
bl FUNCTION (exit) /* Should not return. */
#if defined(__thumb__) && !defined(THUMB_V7_V6M)
/* Come out of Thumb mode. This code should be redundant. */
mov a4, pc
bx a4
.code 32
.global change_back
change_back:
/* Halt the execution. This code should never be executed. */
/* With no debug monitor, this probably aborts (eventually).
With a Demon debug monitor, this halts cleanly.
With an Angel debug monitor, this will report 'Unknown SWI'. */
swi SWI_Exit
#endif
/* For Thumb, constants must be after the code since only
positive offsets are supported for PC relative addresses. */
.align 0
.LC0:
#ifdef ARM_RDI_MONITOR
.word HeapBase
#else
#ifndef ARM_RDP_MONITOR
/* Changes by toralf: Provide alternative "stack" variable whose value
may be defined externally; .Lstack will be used instead of .LC0 if
it points to a non-0 value. Also set up references to "hooks" that
may be used by the application to provide additional init code. */
#ifdef __pe__
.word 0x800000
#else
.word 0x80000 /* Top of RAM on the PIE board. */
#endif
.Lstack:
.word __stack
.Lhwinit:
.word FUNCTION (hardware_init_hook)
.Lswinit:
.word FUNCTION (software_init_hook)
/* Set up defaults for the above variables in the form of weak symbols
- so that application will link correctly, and get value 0 in
runtime (meaning "ignore setting") for the variables, when the user
does not provide the symbols. (The linker uses a weak symbol if,
and only if, a normal version of the same symbol isn't provided
e.g. by a linker script or another object file.) */
.weak __stack
.weak FUNCTION (hardware_init_hook)
.weak FUNCTION (software_init_hook)
#endif
#endif
#if defined(__ELF__) && !defined(__USING_SJLJ_EXCEPTIONS__)
/* Protect against unhandled exceptions. */
.cantunwind
.fnend
#endif
.LC1:
.word __bss_start__
.LC2:
.word __bss_end__
#ifdef __USES_INITFINI__
.Lfini:
.word FUNCTION(_fini)
#endif
#ifdef ARM_RDI_MONITOR
.LC30:
.word CommandLine
.word 255
/* Workspace for Angel calls. */
.data
/* Data returned by monitor SWI. */
.global __stack_base__
HeapBase: .word 0
HeapLimit: .word 0
__stack_base__: .word 0
StackLimit: .word 0
CommandLine: .space 256,0 /* Maximum length of 255 chars handled. */
#endif
#ifdef __pe__
.section .idata$3
.long 0,0,0,0,0,0,0,0
#endif
|
32bitmicro/newlib-nano-1.0
| 2,323
|
libgloss/m68hc11/sci-inout.S
|
/* M68HC11/M68HC12 serial line operations
* Copyright (C) 1999, 2001, 2003, 2004 Stephane Carrez (stcarrez@nerim.fr)
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#ifdef __HAVE_SHORT_INT__
.mode mshort
#else
.mode mlong
#endif
#if defined(__USE_RTC__)
.macro ret
#if defined(mc68hc12)
rtc
#else
jmp __return_32
#endif
.endm
#else
.macro ret
rts
.endm
#endif
#ifdef mc68hc12
SC0CR1 = 0xC2
SC0CR2 = 0xC3
SC0SR1 = 0xC4
SC0DRL = 0xC7
SC0BD = 0xC0
.sect .data
.globl _m68hc12_ports
_m68hc12_ports: .word 0
.sect .text
.globl outbyte
;;;
;;; int outbyte(char c);
;;;
;;; B : Character to send
;;;
outbyte:
bsr _sci_init
L1:
ldaa SC0SR1,x
bge L1
stab SC0DRL,x
ldab SC0CR2,x
orab #0x8
stab SC0CR2,x
ret
.sect .text
.globl inbyte
;;;
;;; char inbyte(void);
;;;
inbyte:
bsr _sci_init
ldaa SC0SR1,x
bita #0x20
beq inbyte
ldab SC0CR2,x
ret
.globl _sci_init
.sect .text
_sci_init:
ldx _m68hc12_ports
beq do_init
dex
rts
do_init:
ldx #0x1
stx _m68hc12_ports
dex
ldd #26
std SC0BD,x
ldaa #0
staa SC0CR1,x
ldaa #0xC
staa SC0CR2,x
rts
#else
BAUD = 0x2b
SCCR1= 0x2c
SCCR2= 0x2d
SCSR = 0x2e
SCDR = 0x2f
.sect .data
.globl _m68hc11_ports
_m68hc11_ports: .word 0
.sect .text
.globl outbyte
;;;
;;; int outbyte(char c);
;;;
;;; B : Character to send
;;;
outbyte:
bsr _sci_init
L1:
ldaa SCSR,x
bge L1
stab SCDR,x
ldab SCCR2,x
orab #0x8
stab SCCR2,x
ret
.sect .text
.globl inbyte
;;;
;;; char inbyte(void);
;;;
inbyte:
bsr _sci_init
ldaa SCSR,x
bita #0x20
beq inbyte
ldab SCDR,x
ret
.globl _sci_init
.sect .text
_sci_init:
ldx _m68hc11_ports
beq do_init
rts
do_init:
ldx #0x1000
stx _m68hc11_ports
ldaa #0x30
staa BAUD,x
clra
staa SCCR1,x
ldaa #0xC
staa SCCR2,x
rts
#endif
|
32bitmicro/newlib-nano-1.0
| 2,090
|
libgloss/m68hc11/crt0.S
|
/* Startup code for M68HC11/M68HC12.
* Copyright (C) 1999, 2000, 2001, 2002 Stephane Carrez (stcarrez@nerim.fr)
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
;-----------------------------------------
; startup code
;-----------------------------------------
.file "crt0.s"
;;
;;
;; The linker concatenate the .install* sections in the following order:
;;
;; .install0 Setup the stack pointer
;; .install1 Place holder for applications
;; .install2 Optional installation of data section in memory
;; .install3 Place holder for applications
;; .install4 Invokes the main
;;
.sect .install0,"ax",@progbits
.globl _start
_start:
;;
;; At this step, the stack is not initialized and interrupts are masked.
;; Applications only have 64 cycles to initialize some registers.
;;
;; To have a generic/configurable startup, initialize the stack to
;; the end of some memory region. The _stack symbol is defined by
;; the linker.
;;
lds #_stack
.sect .install2,"ax",@progbits
;;
;; Call a specific initialization operation. The default is empty.
;; It can be overriden by applications. It is intended to initialize
;; the 68hc11 registers. Function prototype is:
;;
;; int __premain(void);
;;
jsr __premain
;;
;;
;;
.sect .install4,"ax",@progbits
jsr main
fatal:
jsr exit
bra fatal
;-----------------------------------------
; end startup code
;-----------------------------------------
;; Force loading of data section mapping and bss clear
.globl __map_data_section
.globl __init_bss_section
|
32bitmicro/newlib-nano-1.0
| 1,553
|
libgloss/cr16/crtn.S
|
/* Specialized code needed to support construction and destruction of
file-scope objects in C++ and Java code, and to support exception handling.
Copyright (C) 1999 Free Software Foundation, Inc.
Contributed by Charles-Antoine Gauthier (charles.gauthier@iit.nrc.ca).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
the resulting executable to be covered by the GNU General Public License.
This exception does not however invalidate any other reasons why
the executable file might be covered by the GNU General Public License. */
/*
* This file supplies function epilogues for the .init and .fini sections.
* It is linked in after all other files.
*/
.file "crtn.o"
.ident "GNU C crtn.o"
.section .init
popret ra
.section .fini
popret ra
|
32bitmicro/newlib-nano-1.0
| 1,646
|
libgloss/cr16/crti.S
|
/* Specialized code needed to support construction and destruction of
file-scope objects in C++ and Java code, and to support exception handling.
Copyright (C) 1999 Free Software Foundation, Inc.
Contributed by Charles-Antoine Gauthier (charles.gauthier@iit.nrc.ca).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
the resulting executable to be covered by the GNU General Public License.
This exception does not however invalidate any other reasons why
the executable file might be covered by the GNU General Public License. */
/*
* This file just supplies function prologues for the .init and .fini
* sections. It is linked in before crtbegin.o.
*/
.file "crti.o"
.ident "GNU C crti.o"
.section .init
.globl _init
.type _init,@function
_init:
push ra
.section .fini
.globl _fini
.type _fini,@function
_fini:
push ra
|
32bitmicro/newlib-nano-1.0
| 4,105
|
libgloss/cr16/crt1.S
|
##############################################################################
# crt0.s -- CR16 default start-up routine #
# #
# Copyright (c) 2004 National Semiconductor Corporation #
# #
# The authors hereby grant permission to use, copy, modify, distribute, #
# and license this software and its documentation for any purpose, provided #
# that existing copyright notices are retained in all copies and that this #
# notice is included verbatim in any distributions. No written agreement, #
# license, or royalty fee is required for any of the authorized uses. #
# Modifications to this software may be copyrighted by their authors #
# and need not follow the licensing terms described here, provided that #
# the new terms are clearly indicated on the first page of each file where #
# they apply. #
# #
# This is the start routine of your CR16 program. #
# It is linked with your application automatically. You can use #
# this routine as a template and modify it to your needs, yet this #
# file must be supplied for the compiler. #
# It is assumed that the following symbols are defined in your linker #
# script: __STACK_START, __ISTACK_START #
##############################################################################
.text
#ifdef __CR16CP__
.align 4
#else
.align 2
#endif
.global _main
.global _atexit
.global _exit
.global _start
.global __fini
.global __init
.global __STACK_START
.global __ISTACK_START
_start:
#----------------------------------------------------------------------------#
# Initialize the stack pointers. The constants __STACK_START and #
# __ISTACK_START should be defined in the linker script. #
movd $__STACK_START, (sp)
movd $__ISTACK_START, (r1,r0)
lprd (r1,r0), isp
#----------------------------------------------------------------------------#
# Initialize the default sections according to the linker script. #
# bal (ra), __init_bss_data
#----------------------------------------------------------------------#
# Set the Extended Dispatch bit in the CFG register. This is the #
# default configuration for CR16C. #
spr cfg, r0 # Set dispatch table width
orw $0x100, r0
lpr r0, cfg
#----------------------------------------------------------------------------#
#----------------------------------------------------------------------------#
# Handle global and static constructurs execution and setup #
# destructors to be called from exit. #
bal (ra),__init
movd $__fini@c, (r3,r2)
bal (ra), _atexit
#----------------------------------------------------------------------------#
# Jump to the main function in your application. #
#ifdef __INT32__
movd $0, (r3,r2) # Number of arguments
movd $0, (r5,r4) # conatins pointer to argument string.
#else
movw $0, r2 # Number of arguments
movd $0, (r4,r3) # conatins pointer to argument string.
#endif
bal (ra), _main
#----------------------------------------------------------------------------#
# Upon returning from the main function (if it isn't an infinite loop), #
# jump to the exit function. The exit function is located in the #
# library 'libc.a'. #
#ifdef __INT32__
movd (r1,r0), (r3,r2) # _main return value gets forwarded.
#else
movw r0, r2 # _main return value gets forwarded.
#endif
br _exit # returns control to the functional simulator.
|
32bitmicro/newlib-nano-1.0
| 2,822
|
libgloss/pa/setjmp.S
|
/*
* Copyright (c) 1990,1994 The University of Utah and
* the Computer Systems Laboratory (CSL). All rights reserved.
*
* Permission to use, copy, modify and distribute this software is hereby
* granted provided that (1) source code retains these copyright, permission,
* and disclaimer notices, and (2) redistributions including binaries
* reproduce the notices in supporting documentation, and (3) all advertising
* materials mentioning features or use of this software display the following
* acknowledgement: ``This product includes software developed by the
* Computer Systems Laboratory at the University of Utah.''
*
* THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
* IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
* ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* CSL requests users of this software to return to csl-dist@cs.utah.edu any
* improvements that they make and grant CSL redistribution rights.
*
* Utah $Hdr: _setjmp.s 1.9 94/12/16$
*/
.space $TEXT$
.subspa $CODE$
/*
* The PA jmp_buf is 48 words arranged as follows:
*
* 0- 9: sigcontext
* 10-26: callee save GRs (r3-r18) and DP (r27)
* 27: callee save SRs (sr3)
* 28-47: callee save FRs (fr12-fr21)
*/
/*
* int
* setjmp(env)
* jmp_buf env;
*
* This routine does not restore signal state.
*/
.export setjmp,entry
.export _setjmp,entry
.proc
.callinfo
setjmp
_setjmp
.entry
/*
* save sp and rp in sigcontext, skip the rest
*/
stw %r30,8(%r26)
stw %r2,24(%r26)
ldo 40(%r26),%r26
/*
* save dp and the callee saves registers
*/
stwm %r3,4(%r26)
stwm %r4,4(%r26)
stwm %r5,4(%r26)
stwm %r6,4(%r26)
stwm %r7,4(%r26)
stwm %r8,4(%r26)
stwm %r9,4(%r26)
stwm %r10,4(%r26)
stwm %r11,4(%r26)
stwm %r12,4(%r26)
stwm %r13,4(%r26)
stwm %r14,4(%r26)
stwm %r15,4(%r26)
stwm %r16,4(%r26)
stwm %r17,4(%r26)
stwm %r18,4(%r26)
stwm %r27,4(%r26)
mfsp %sr3,%r9
stwm %r9,4(%r26)
bv 0(%r2)
copy %r0,%r28
.exit
.procend
/*
* void
* longjmp(env, val)
* jmp_buf env;
* int val;
*
* This routine does not retore signal state.
* This routine does not override a zero val.
*/
.export longjmp,entry
.export _longjmp,entry
.proc
.callinfo
longjmp
_longjmp
.entry
/*
* restore sp and rp
*/
ldw 8(%r26),%r30
ldw 24(%r26),%r2
ldo 40(%r26),%r26
/*
* restore callee saves registers
*/
ldwm 4(%r26),%r3
ldwm 4(%r26),%r4
ldwm 4(%r26),%r5
ldwm 4(%r26),%r6
ldwm 4(%r26),%r7
ldwm 4(%r26),%r8
ldwm 4(%r26),%r9
ldwm 4(%r26),%r10
ldwm 4(%r26),%r11
ldwm 4(%r26),%r12
ldwm 4(%r26),%r13
ldwm 4(%r26),%r14
ldwm 4(%r26),%r15
ldwm 4(%r26),%r16
ldwm 4(%r26),%r17
ldwm 4(%r26),%r18
ldwm 4(%r26),%r27
ldwm 4(%r26),%r9
mtsp %r9,%sr3
bv 0(%r2)
copy %r25,%r28
.exit
.procend
|
32bitmicro/newlib-nano-1.0
| 56,787
|
libgloss/pa/hp-milli.s
|
;
; (c) Copyright 1986 HEWLETT-PACKARD COMPANY
;
; To anyone who acknowledges that this file is provided "AS IS"
; without any express or implied warranty:
; permission to use, copy, modify, and distribute this file
; for any purpose is hereby granted without fee, provided that
; the above copyright notice and this notice appears in all
; copies, and that the name of Hewlett-Packard Company not be
; used in advertising or publicity pertaining to distribution
; of the software without specific, written prior permission.
; Hewlett-Packard Company makes no representations about the
; suitability of this software for any purpose.
;
; Standard Hardware Register Definitions for Use with Assembler
; version A.08.06
; - fr16-31 added at Utah
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Hardware General Registers
r0: .equ 0
r1: .equ 1
r2: .equ 2
r3: .equ 3
r4: .equ 4
r5: .equ 5
r6: .equ 6
r7: .equ 7
r8: .equ 8
r9: .equ 9
r10: .equ 10
r11: .equ 11
r12: .equ 12
r13: .equ 13
r14: .equ 14
r15: .equ 15
r16: .equ 16
r17: .equ 17
r18: .equ 18
r19: .equ 19
r20: .equ 20
r21: .equ 21
r22: .equ 22
r23: .equ 23
r24: .equ 24
r25: .equ 25
r26: .equ 26
r27: .equ 27
r28: .equ 28
r29: .equ 29
r30: .equ 30
r31: .equ 31
; Hardware Space Registers
sr0: .equ 0
sr1: .equ 1
sr2: .equ 2
sr3: .equ 3
sr4: .equ 4
sr5: .equ 5
sr6: .equ 6
sr7: .equ 7
; Hardware Floating Point Registers
fr0: .equ 0
fr1: .equ 1
fr2: .equ 2
fr3: .equ 3
fr4: .equ 4
fr5: .equ 5
fr6: .equ 6
fr7: .equ 7
fr8: .equ 8
fr9: .equ 9
fr10: .equ 10
fr11: .equ 11
fr12: .equ 12
fr13: .equ 13
fr14: .equ 14
fr15: .equ 15
fr16: .equ 16
fr17: .equ 17
fr18: .equ 18
fr19: .equ 19
fr20: .equ 20
fr21: .equ 21
fr22: .equ 22
fr23: .equ 23
fr24: .equ 24
fr25: .equ 25
fr26: .equ 26
fr27: .equ 27
fr28: .equ 28
fr29: .equ 29
fr30: .equ 30
fr31: .equ 31
; Hardware Control Registers
cr0: .equ 0
rctr: .equ 0 ; Recovery Counter Register
cr8: .equ 8 ; Protection ID 1
pidr1: .equ 8
cr9: .equ 9 ; Protection ID 2
pidr2: .equ 9
cr10: .equ 10
ccr: .equ 10 ; Coprocessor Confiquration Register
cr11: .equ 11
sar: .equ 11 ; Shift Amount Register
cr12: .equ 12
pidr3: .equ 12 ; Protection ID 3
cr13: .equ 13
pidr4: .equ 13 ; Protection ID 4
cr14: .equ 14
iva: .equ 14 ; Interrupt Vector Address
cr15: .equ 15
eiem: .equ 15 ; External Interrupt Enable Mask
cr16: .equ 16
itmr: .equ 16 ; Interval Timer
cr17: .equ 17
pcsq: .equ 17 ; Program Counter Space queue
cr18: .equ 18
pcoq: .equ 18 ; Program Counter Offset queue
cr19: .equ 19
iir: .equ 19 ; Interruption Instruction Register
cr20: .equ 20
isr: .equ 20 ; Interruption Space Register
cr21: .equ 21
ior: .equ 21 ; Interruption Offset Register
cr22: .equ 22
ipsw: .equ 22 ; Interrpution Processor Status Word
cr23: .equ 23
eirr: .equ 23 ; External Interrupt Request
cr24: .equ 24
ppda: .equ 24 ; Physcial Page Directory Address
tr0: .equ 24 ; Temporary register 0
cr25: .equ 25
hta: .equ 25 ; Hash Table Address
tr1: .equ 25 ; Temporary register 1
cr26: .equ 26
tr2: .equ 26 ; Temporary register 2
cr27: .equ 27
tr3: .equ 27 ; Temporary register 3
cr28: .equ 28
tr4: .equ 28 ; Temporary register 4
cr29: .equ 29
tr5: .equ 29 ; Temporary register 5
cr30: .equ 30
tr6: .equ 30 ; Temporary register 6
cr31: .equ 31
tr7: .equ 31 ; Temporary register 7
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Procedure Call Convention ~
; Register Definitions for Use with Assembler ~
; version A.08.06
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Software Architecture General Registers
rp: .equ r2 ; return pointer
mrp: .equ r31 ; millicode return pointer
ret0: .equ r28 ; return value
ret1: .equ r29 ; return value (high part of double)
sl: .equ r29 ; static link
sp: .equ r30 ; stack pointer
dp: .equ r27 ; data pointer
arg0: .equ r26 ; argument
arg1: .equ r25 ; argument or high part of double argument
arg2: .equ r24 ; argument
arg3: .equ r23 ; argument or high part of double argument
;_____________________________________________________________________________
; Software Architecture Space Registers
; sr0 ; return link form BLE
sret: .equ sr1 ; return value
sarg: .equ sr1 ; argument
; sr4 ; PC SPACE tracker
; sr5 ; process private data
;_____________________________________________________________________________
; Software Architecture Pseudo Registers
previous_sp: .equ 64 ; old stack pointer (locates previous frame)
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Standard space and subspace definitions. version A.08.06
; These are generally suitable for programs on HP_UX and HPE.
; Statements commented out are used when building such things as operating
; system kernels.
;;;;;;;;;;;;;;;;
.SPACE $TEXT$, SPNUM=0,SORT=8
.subspa $MILLICODE$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=8
.subspa $LIT$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=16
.subspa $CODE$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=24
; Additional code subspaces should have ALIGN=8 for an interspace BV
; and should have SORT=24.
;
; For an incomplete executable (program bound to shared libraries),
; sort keys $GLOBAL$ -1 and $GLOBAL$ -2 are reserved for the $DLT$
; and $PLT$ subspaces respectively.
;;;;;;;;;;;;;;;
.SPACE $PRIVATE$, SPNUM=1,PRIVATE,SORT=16
.subspa $GLOBAL$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=40
.import $global$
.subspa $DATA$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=16
.subspa $BSS$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82,ZERO
.SPACE $TEXT$
.SUBSPA $MILLICODE$
.align 8
.EXPORT $$remI,millicode
; .IMPORT cerror
$$remI:
.PROC
.CALLINFO millicode
.ENTRY
addit,= 0,arg1,r0
add,>= r0,arg0,ret1
sub r0,ret1,ret1
sub r0,arg1,r1
ds r0,r1,r0
or r0,r0,r1
add ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
ds r1,arg1,r1
addc ret1,ret1,ret1
movb,>=,n r1,ret1,remI300
add,< arg1,r0,r0
add,tr r1,arg1,ret1
sub r1,arg1,ret1
remI300: add,>= arg0,r0,r0
sub r0,ret1,ret1
bv r0(r31)
nop
.EXIT
.PROCEND
bit1: .equ 1
bit30: .equ 30
bit31: .equ 31
len2: .equ 2
len4: .equ 4
$$dyncall:
.proc
.callinfo NO_CALLS
.entry
.export $$dyncall,MILLICODE
bb,>=,n 22,bit30,noshlibs
depi 0,bit31,len2,22
ldw 4(22),19
ldw 0(22),22
noshlibs:
ldsid (22),r1
mtsp r1,sr0
be 0(sr0,r22)
stw rp,-24(sp)
.exit
.procend
temp: .EQU r1
retreg: .EQU ret1 ; r29
.export $$divU,millicode
.import $$divU_3,millicode
.import $$divU_5,millicode
.import $$divU_6,millicode
.import $$divU_7,millicode
.import $$divU_9,millicode
.import $$divU_10,millicode
.import $$divU_12,millicode
.import $$divU_14,millicode
.import $$divU_15,millicode
$$divU:
.proc
.callinfo millicode
.entry
; The subtract is not nullified since it does no harm and can be used
; by the two cases that branch back to "normal".
comib,>= 15,arg1,special_divisor
sub r0,arg1,temp ; clear carry, negate the divisor
ds r0,temp,r0 ; set V-bit to 1
normal:
add arg0,arg0,retreg ; shift msb bit into carry
ds r0,arg1,temp ; 1st divide step, if no carry
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 2nd divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 3rd divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 4th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 5th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 6th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 7th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 8th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 9th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 10th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 11th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 12th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 13th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 14th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 15th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 16th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 17th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 18th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 19th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 20th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 21st divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 22nd divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 23rd divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 24th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 25th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 26th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 27th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 28th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 29th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 30th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 31st divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 32nd divide step,
bv 0(r31)
addc retreg,retreg,retreg ; shift last retreg bit into retreg
;_____________________________________________________________________________
; handle the cases where divisor is a small constant or has high bit on
special_divisor:
comib,> 0,arg1,big_divisor
nop
blr arg1,r0
nop
zero_divisor: ; this label is here to provide external visibility
addit,= 0,arg1,0 ; trap for zero dvr
nop
bv 0(r31) ; divisor == 1
copy arg0,retreg
bv 0(r31) ; divisor == 2
extru arg0,30,31,retreg
b,n $$divU_3 ; divisor == 3
nop
bv 0(r31) ; divisor == 4
extru arg0,29,30,retreg
b,n $$divU_5 ; divisor == 5
nop
b,n $$divU_6 ; divisor == 6
nop
b,n $$divU_7 ; divisor == 7
nop
bv 0(r31) ; divisor == 8
extru arg0,28,29,retreg
b,n $$divU_9 ; divisor == 9
nop
b,n $$divU_10 ; divisor == 10
nop
b normal ; divisor == 11
ds r0,temp,r0 ; set V-bit to 1
b,n $$divU_12 ; divisor == 12
nop
b normal ; divisor == 13
ds r0,temp,r0 ; set V-bit to 1
b,n $$divU_14 ; divisor == 14
nop
b,n $$divU_15 ; divisor == 15
nop
;_____________________________________________________________________________
; Handle the case where the high bit is on in the divisor.
; Compute: if( dividend>=divisor) quotient=1; else quotient=0;
; Note: dividend>==divisor iff dividend-divisor does not borrow
; and not borrow iff carry
big_divisor:
sub arg0,arg1,r0
bv 0(r31)
addc r0,r0,retreg
.exit
.procend
.end
t2: .EQU r1
; x2 .EQU arg0 ; r26
t1: .EQU arg1 ; r25
; x1 .EQU ret1 ; r29
;_____________________________________________________________________________
$$divide_by_constant:
.PROC
.CALLINFO millicode
.entry
.export $$divide_by_constant,millicode
; Provides a "nice" label for the code covered by the unwind descriptor
; for things like gprof.
$$divI_2:
.EXPORT $$divI_2,MILLICODE
COMCLR,>= arg0,0,0
ADDI 1,arg0,arg0
bv 0(r31)
EXTRS arg0,30,31,ret1
$$divI_4:
.EXPORT $$divI_4,MILLICODE
COMCLR,>= arg0,0,0
ADDI 3,arg0,arg0
bv 0(r31)
EXTRS arg0,29,30,ret1
$$divI_8:
.EXPORT $$divI_8,MILLICODE
COMCLR,>= arg0,0,0
ADDI 7,arg0,arg0
bv 0(r31)
EXTRS arg0,28,29,ret1
$$divI_16:
.EXPORT $$divI_16,MILLICODE
COMCLR,>= arg0,0,0
ADDI 15,arg0,arg0
bv 0(r31)
EXTRS arg0,27,28,ret1
$$divI_3:
.EXPORT $$divI_3,MILLICODE
COMB,<,N arg0,0,$neg3
ADDI 1,arg0,arg0
EXTRU arg0,1,2,ret1
SH2ADD arg0,arg0,arg0
B $pos
ADDC ret1,0,ret1
$neg3:
SUBI 1,arg0,arg0
EXTRU arg0,1,2,ret1
SH2ADD arg0,arg0,arg0
B $neg
ADDC ret1,0,ret1
$$divU_3:
.EXPORT $$divU_3,MILLICODE
ADDI 1,arg0,arg0
ADDC 0,0,ret1
SHD ret1,arg0,30,t1
SH2ADD arg0,arg0,arg0
B $pos
ADDC ret1,t1,ret1
$$divI_5:
.EXPORT $$divI_5,MILLICODE
COMB,<,N arg0,0,$neg5
ADDI 3,arg0,t1
SH1ADD arg0,t1,arg0
B $pos
ADDC 0,0,ret1
$neg5:
SUB 0,arg0,arg0
ADDI 1,arg0,arg0
SHD 0,arg0,31,ret1
SH1ADD arg0,arg0,arg0
B $neg
ADDC ret1,0,ret1
$$divU_5:
.EXPORT $$divU_5,MILLICODE
ADDI 1,arg0,arg0
ADDC 0,0,ret1
SHD ret1,arg0,31,t1
SH1ADD arg0,arg0,arg0
B $pos
ADDC t1,ret1,ret1
$$divI_6:
.EXPORT $$divI_6,MILLICODE
COMB,<,N arg0,0,$neg6
EXTRU arg0,30,31,arg0
ADDI 5,arg0,t1
SH2ADD arg0,t1,arg0
B $pos
ADDC 0,0,ret1
$neg6:
SUBI 2,arg0,arg0
EXTRU arg0,30,31,arg0
SHD 0,arg0,30,ret1
SH2ADD arg0,arg0,arg0
B $neg
ADDC ret1,0,ret1
$$divU_6:
.EXPORT $$divU_6,MILLICODE
EXTRU arg0,30,31,arg0
ADDI 1,arg0,arg0
SHD 0,arg0,30,ret1
SH2ADD arg0,arg0,arg0
B $pos
ADDC ret1,0,ret1
$$divU_10:
.EXPORT $$divU_10,MILLICODE
EXTRU arg0,30,31,arg0
ADDI 3,arg0,t1
SH1ADD arg0,t1,arg0
ADDC 0,0,ret1
$pos:
SHD ret1,arg0,28,t1
SHD arg0,0,28,t2
ADD arg0,t2,arg0
ADDC ret1,t1,ret1
$pos_for_17:
SHD ret1,arg0,24,t1
SHD arg0,0,24,t2
ADD arg0,t2,arg0
ADDC ret1,t1,ret1
SHD ret1,arg0,16,t1
SHD arg0,0,16,t2
ADD arg0,t2,arg0
bv 0(r31)
ADDC ret1,t1,ret1
$$divI_10:
.EXPORT $$divI_10,MILLICODE
COMB,< arg0,0,$neg10
COPY 0,ret1
EXTRU arg0,30,31,arg0
ADDIB,TR 1,arg0,$pos
SH1ADD arg0,arg0,arg0
$neg10:
SUBI 2,arg0,arg0
EXTRU arg0,30,31,arg0
SH1ADD arg0,arg0,arg0
$neg:
SHD ret1,arg0,28,t1
SHD arg0,0,28,t2
ADD arg0,t2,arg0
ADDC ret1,t1,ret1
$neg_for_17:
SHD ret1,arg0,24,t1
SHD arg0,0,24,t2
ADD arg0,t2,arg0
ADDC ret1,t1,ret1
SHD ret1,arg0,16,t1
SHD arg0,0,16,t2
ADD arg0,t2,arg0
ADDC ret1,t1,ret1
bv 0(r31)
SUB 0,ret1,ret1
$$divI_12:
.EXPORT $$divI_12,MILLICODE
COMB,< arg0,0,$neg12
COPY 0,ret1
EXTRU arg0,29,30,arg0
ADDIB,TR 1,arg0,$pos
SH2ADD arg0,arg0,arg0
$neg12:
SUBI 4,arg0,arg0
EXTRU arg0,29,30,arg0
B $neg
SH2ADD arg0,arg0,arg0
$$divU_12:
.EXPORT $$divU_12,MILLICODE
EXTRU arg0,29,30,arg0
ADDI 5,arg0,t1
SH2ADD arg0,t1,arg0
B $pos
ADDC 0,0,ret1
$$divI_15:
.EXPORT $$divI_15,MILLICODE
COMB,< arg0,0,$neg15
COPY 0,ret1
ADDIB,TR 1,arg0,$pos+4
SHD ret1,arg0,28,t1
$neg15:
B $neg
SUBI 1,arg0,arg0
$$divU_15:
.EXPORT $$divU_15,MILLICODE
ADDI 1,arg0,arg0
B $pos
ADDC 0,0,ret1
$$divI_17:
.EXPORT $$divI_17,MILLICODE
COMB,<,N arg0,0,$neg17
ADDI 1,arg0,arg0
SHD 0,arg0,28,t1
SHD arg0,0,28,t2
SUB t2,arg0,arg0
B $pos_for_17
SUBB t1,0,ret1
$neg17:
SUBI 1,arg0,arg0
SHD 0,arg0,28,t1
SHD arg0,0,28,t2
SUB t2,arg0,arg0
B $neg_for_17
SUBB t1,0,ret1
$$divU_17:
.EXPORT $$divU_17,MILLICODE
ADDI 1,arg0,arg0
ADDC 0,0,ret1
SHD ret1,arg0,28,t1
$u17:
SHD arg0,0,28,t2
SUB t2,arg0,arg0
B $pos_for_17
SUBB t1,ret1,ret1
$$divI_7:
.EXPORT $$divI_7,MILLICODE
COMB,<,N arg0,0,$neg7
$7:
ADDI 1,arg0,arg0
SHD 0,arg0,29,ret1
SH3ADD arg0,arg0,arg0
ADDC ret1,0,ret1
$pos7:
SHD ret1,arg0,26,t1
SHD arg0,0,26,t2
ADD arg0,t2,arg0
ADDC ret1,t1,ret1
SHD ret1,arg0,20,t1
SHD arg0,0,20,t2
ADD arg0,t2,arg0
ADDC ret1,t1,t1
COPY 0,ret1
SHD,= t1,arg0,24,t1
$1:
ADDB,TR t1,ret1,$2
EXTRU arg0,31,24,arg0
bv,n 0(r31)
$2:
ADDB,TR t1,arg0,$1
EXTRU,= arg0,7,8,t1
$neg7:
SUBI 1,arg0,arg0
$8:
SHD 0,arg0,29,ret1
SH3ADD arg0,arg0,arg0
ADDC ret1,0,ret1
$neg7_shift:
SHD ret1,arg0,26,t1
SHD arg0,0,26,t2
ADD arg0,t2,arg0
ADDC ret1,t1,ret1
SHD ret1,arg0,20,t1
SHD arg0,0,20,t2
ADD arg0,t2,arg0
ADDC ret1,t1,t1
COPY 0,ret1
SHD,= t1,arg0,24,t1
$3:
ADDB,TR t1,ret1,$4
EXTRU arg0,31,24,arg0
bv 0(r31)
SUB 0,ret1,ret1
$4:
ADDB,TR t1,arg0,$3
EXTRU,= arg0,7,8,t1
$$divU_7:
.EXPORT $$divU_7,MILLICODE
ADDI 1,arg0,arg0
ADDC 0,0,ret1
SHD ret1,arg0,29,t1
SH3ADD arg0,arg0,arg0
B $pos7
ADDC t1,ret1,ret1
$$divI_9:
.EXPORT $$divI_9,MILLICODE
COMB,<,N arg0,0,$neg9
ADDI 1,arg0,arg0
SHD 0,arg0,29,t1
SHD arg0,0,29,t2
SUB t2,arg0,arg0
B $pos7
SUBB t1,0,ret1
$neg9:
SUBI 1,arg0,arg0
SHD 0,arg0,29,t1
SHD arg0,0,29,t2
SUB t2,arg0,arg0
B $neg7_shift
SUBB t1,0,ret1
$$divU_9:
.EXPORT $$divU_9,MILLICODE
ADDI 1,arg0,arg0
ADDC 0,0,ret1
SHD ret1,arg0,29,t1
SHD arg0,0,29,t2
SUB t2,arg0,arg0
B $pos7
SUBB t1,ret1,ret1
$$divI_14:
.EXPORT $$divI_14,MILLICODE
COMB,<,N arg0,0,$neg14
$$divU_14:
.EXPORT $$divU_14,MILLICODE
B $7
EXTRU arg0,30,31,arg0
$neg14:
SUBI 2,arg0,arg0
B $8
EXTRU arg0,30,31,arg0
.exit
.PROCEND
.END
rmndr: .EQU ret1 ; r29
.export $$remU,millicode
$$remU:
.proc
.callinfo millicode
.entry
comib,>=,n 0,arg1,special_case
sub r0,arg1,rmndr ; clear carry, negate the divisor
ds r0,rmndr,r0 ; set V-bit to 1
add arg0,arg0,temp ; shift msb bit into carry
ds r0,arg1,rmndr ; 1st divide step, if no carry
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 2nd divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 3rd divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 4th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 5th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 6th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 7th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 8th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 9th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 10th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 11th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 12th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 13th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 14th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 15th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 16th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 17th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 18th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 19th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 20th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 21st divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 22nd divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 23rd divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 24th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 25th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 26th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 27th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 28th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 29th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 30th divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 31st divide step
addc temp,temp,temp ; shift temp with/into carry
ds rmndr,arg1,rmndr ; 32nd divide step,
comiclr,<= 0,rmndr,r0
add rmndr,arg1,rmndr ; correction
; .exit
bv,n 0(r31)
nop
; Putting >= on the last DS and deleting COMICLR does not work!
;_____________________________________________________________________________
special_case:
addit,= 0,arg1,r0 ; trap on div by zero
sub,>>= arg0,arg1,rmndr
copy arg0,rmndr
bv,n 0(r31)
nop
.exit
.procend
.end
; Use bv 0(r31) and bv,n 0(r31) instead.
; #define return bv 0(%mrp)
; #define return_n bv,n 0(%mrp)
.subspa $MILLICODE$
.align 16
$$mulI:
.proc
.callinfo millicode
.entry
.export $$mulI, millicode
combt,<<= %r25,%r26,l4 ; swap args if unsigned %r25>%r26
copy 0,%r29 ; zero out the result
xor %r26,%r25,%r26 ; swap %r26 & %r25 using the
xor %r26,%r25,%r25 ; old xor trick
xor %r26,%r25,%r26
l4: combt,<= 0,%r26,l3 ; if %r26>=0 then proceed like unsigned
zdep %r25,30,8,%r1 ; %r1 = (%r25&0xff)<<1 *********
sub,> 0,%r25,%r1 ; otherwise negate both and
combt,<=,n %r26,%r1,l2 ; swap back if |%r26|<|%r25|
sub 0,%r26,%r25
movb,tr,n %r1,%r26,l2 ; 10th inst.
l0: add %r29,%r1,%r29 ; add in this partial product
l1: zdep %r26,23,24,%r26 ; %r26 <<= 8 ******************
l2: zdep %r25,30,8,%r1 ; %r1 = (%r25&0xff)<<1 *********
l3: blr %r1,0 ; case on these 8 bits ******
extru %r25,23,24,%r25 ; %r25 >>= 8 ******************
;16 insts before this.
; %r26 <<= 8 **************************
x0: comb,<> %r25,0,l2 ! zdep %r26,23,24,%r26 ! bv,n 0(r31) ! nop
x1: comb,<> %r25,0,l1 ! add %r29,%r26,%r29 ! bv,n 0(r31) ! nop
x2: comb,<> %r25,0,l1 ! sh1add %r26,%r29,%r29 ! bv,n 0(r31) ! nop
x3: comb,<> %r25,0,l0 ! sh1add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29
x4: comb,<> %r25,0,l1 ! sh2add %r26,%r29,%r29 ! bv,n 0(r31) ! nop
x5: comb,<> %r25,0,l0 ! sh2add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29
x6: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)
x7: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r26,%r29,%r29 ! b,n ret_t0
x8: comb,<> %r25,0,l1 ! sh3add %r26,%r29,%r29 ! bv,n 0(r31) ! nop
x9: comb,<> %r25,0,l0 ! sh3add %r26,%r26,%r1 ! bv 0(r31) ! add %r29,%r1,%r29
x10: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)
x11: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r29,%r29 ! b,n ret_t0
x12: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)
x13: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r29,%r29 ! b,n ret_t0
x14: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x15: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh1add %r1,%r1,%r1 ! b,n ret_t0
x16: zdep %r26,27,28,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
x17: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r26,%r1,%r1 ! b,n ret_t0
x18: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh1add %r1,%r29,%r29 ! bv,n 0(r31)
x19: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh1add %r1,%r26,%r1 ! b,n ret_t0
x20: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)
x21: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0
x22: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x23: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
x24: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)
x25: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r1,%r1 ! b,n ret_t0
x26: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x27: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r1,%r1 ! b,n ret_t0
x28: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x29: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x30: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x31: zdep %r26,26,27,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
x32: zdep %r26,26,27,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
x33: sh3add %r26,0,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0
x34: zdep %r26,27,28,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x35: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r26,%r1,%r1
x36: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh2add %r1,%r29,%r29 ! bv,n 0(r31)
x37: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r26,%r1 ! b,n ret_t0
x38: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x39: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
x40: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)
x41: sh2add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r26,%r1 ! b,n ret_t0
x42: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x43: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
x44: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x45: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! sh2add %r1,%r1,%r1 ! b,n ret_t0
x46: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! add %r1,%r26,%r1
x47: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1
x48: sh1add %r26,%r26,%r1 ! comb,<> %r25,0,l0 ! zdep %r1,27,28,%r1 ! b,n ret_t0
x49: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r26,%r1,%r1
x50: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x51: sh3add %r26,%r26,%r1 ! sh3add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
x52: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x53: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x54: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x55: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
x56: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
x57: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
x58: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
x59: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1
x60: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x61: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x62: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x63: zdep %r26,25,26,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
x64: zdep %r26,25,26,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
x65: sh3add %r26,0,%r1 ! comb,<> %r25,0,l0 ! sh3add %r1,%r26,%r1 ! b,n ret_t0
x66: zdep %r26,26,27,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x67: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
x68: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x69: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x70: zdep %r26,25,26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1
x71: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_t0 ! sub %r1,%r26,%r1
x72: sh3add %r26,%r26,%r1 ! comb,<> %r25,0,l1 ! sh3add %r1,%r29,%r29 ! bv,n 0(r31)
x73: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! add %r29,%r1,%r29
x74: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x75: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
x76: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x77: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x78: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
x79: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
x80: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! add %r29,%r1,%r29
x81: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_shift ! add %r29,%r1,%r29
x82: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x83: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
x84: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x85: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
x86: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
x87: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r26,%r1,%r1
x88: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
x89: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
x90: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x91: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
x92: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
x93: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
x94: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r26,%r1,%r1
x95: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
x96: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x97: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x98: zdep %r26,26,27,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh1add %r26,%r1,%r1
x99: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
x100: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x101: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x102: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
x103: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r1,%r26,%r1
x104: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
x105: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
x106: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
x107: sh3add %r26,%r26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t02a0 ! sh3add %r1,%r26,%r1
x108: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x109: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x110: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
x111: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
x112: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! zdep %r1,27,28,%r1
x113: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1
x114: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
x115: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
x116: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1
x117: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1
x118: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0a0 ! sh3add %r1,%r1,%r1
x119: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh3add %r1,%r1,%r1
x120: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
x121: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
x122: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
x123: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
x124: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x125: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
x126: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x127: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
x128: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l1 ! add %r29,%r1,%r29 ! bv,n 0(r31)
x129: zdep %r26,24,25,%r1 ! comb,<> %r25,0,l0 ! add %r1,%r26,%r1 ! b,n ret_t0
x130: zdep %r26,25,26,%r1 ! add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x131: sh3add %r26,0,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
x132: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x133: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x134: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
x135: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
x136: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
x137: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
x138: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
x139: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh2add %r1,%r26,%r1
x140: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r1,%r1
x141: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1
x142: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_2t0 ! sub %r1,%r26,%r1
x143: zdep %r26,27,28,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
x144: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x145: sh3add %r26,%r26,%r1 ! sh3add %r1,0,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
x146: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x147: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
x148: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x149: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x150: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
x151: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1
x152: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
x153: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
x154: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
x155: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
x156: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
x157: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1
x158: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sub %r1,%r26,%r1
x159: zdep %r26,26,27,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
x160: sh2add %r26,%r26,%r1 ! sh2add %r1,0,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
x161: sh3add %r26,0,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x162: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x163: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r26,%r1
x164: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x165: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
x166: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
x167: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1
x168: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
x169: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
x170: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
x171: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1
x172: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
x173: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! sh3add %r1,%r1,%r1
x174: zdep %r26,26,27,%r1 ! sh1add %r26,%r1,%r1 ! b e_t04a0 ! sh2add %r1,%r1,%r1
x175: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1
x176: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_8t0 ! add %r1,%r26,%r1
x177: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_8t0a0 ! add %r1,%r26,%r1
x178: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh3add %r1,%r26,%r1
x179: sh2add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0a0 ! sh3add %r1,%r26,%r1
x180: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x181: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x182: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh1add %r1,%r26,%r1
x183: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh1add %r1,%r26,%r1
x184: sh2add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0 ! add %r1,%r26,%r1
x185: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
x186: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
x187: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1
x188: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r26,%r1,%r1
x189: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r1,%r1
x190: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1
x191: zdep %r26,25,26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sub %r1,%r26,%r1
x192: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
x193: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
x194: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
x195: sh3add %r26,0,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
x196: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
x197: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1
x198: zdep %r26,25,26,%r1 ! sh1add %r26,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
x199: sh3add %r26,0,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
x200: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
x201: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
x202: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
x203: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh2add %r1,%r26,%r1
x204: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r1,%r1
x205: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
x206: zdep %r26,25,26,%r1 ! sh2add %r26,%r1,%r1 ! b e_t02a0 ! sh1add %r1,%r1,%r1
x207: sh3add %r26,0,%r1 ! sh1add %r1,%r26,%r1 ! b e_3t0 ! sh2add %r1,%r26,%r1
x208: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_8t0 ! add %r1,%r26,%r1
x209: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_8t0a0 ! add %r1,%r26,%r1
x210: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1
x211: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh2add %r1,%r1,%r1
x212: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1
x213: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_4t0a0 ! sh2add %r1,%r26,%r1
x214: sh3add %r26,%r26,%r1 ! sh2add %r26,%r1,%r1 ! b e2t04a0 ! sh3add %r1,%r26,%r1
x215: sh2add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1
x216: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
x217: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
x218: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r26,%r1
x219: sh3add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
x220: sh1add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0 ! sh1add %r1,%r26,%r1
x221: sh1add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_4t0a0 ! sh1add %r1,%r26,%r1
x222: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
x223: sh3add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
x224: sh3add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_8t0 ! add %r1,%r26,%r1
x225: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0 ! sh2add %r1,%r1,%r1
x226: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_t02a0 ! zdep %r1,26,27,%r1
x227: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_t02a0 ! sh2add %r1,%r1,%r1
x228: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0 ! sh1add %r1,%r1,%r1
x229: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_4t0a0 ! sh1add %r1,%r1,%r1
x230: sh3add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_5t0 ! add %r1,%r26,%r1
x231: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_3t0 ! sh2add %r1,%r26,%r1
x232: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_8t0 ! sh2add %r1,%r26,%r1
x233: sh1add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e_8t0a0 ! sh2add %r1,%r26,%r1
x234: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0 ! sh3add %r1,%r1,%r1
x235: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e_2t0a0 ! sh3add %r1,%r1,%r1
x236: sh3add %r26,%r26,%r1 ! sh1add %r1,%r26,%r1 ! b e4t08a0 ! sh1add %r1,%r1,%r1
x237: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_3t0 ! sub %r1,%r26,%r1
x238: sh1add %r26,%r26,%r1 ! sh2add %r1,%r26,%r1 ! b e2t04a0 ! sh3add %r1,%r1,%r1
x239: zdep %r26,27,28,%r1 ! sh2add %r1,%r1,%r1 ! b e_t0ma0 ! sh1add %r1,%r1,%r1
x240: sh3add %r26,%r26,%r1 ! add %r1,%r26,%r1 ! b e_8t0 ! sh1add %r1,%r1,%r1
x241: sh3add %r26,%r26,%r1 ! add %r1,%r26,%r1 ! b e_8t0a0 ! sh1add %r1,%r1,%r1
x242: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_2t0 ! sh3add %r1,%r26,%r1
x243: sh3add %r26,%r26,%r1 ! sh3add %r1,%r1,%r1 ! b e_t0 ! sh1add %r1,%r1,%r1
x244: sh2add %r26,%r26,%r1 ! sh1add %r1,%r1,%r1 ! b e_4t0 ! sh2add %r1,%r26,%r1
x245: sh3add %r26,0,%r1 ! sh1add %r1,%r1,%r1 ! b e_5t0 ! sh1add %r1,%r26,%r1
x246: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0 ! sh1add %r1,%r1,%r1
x247: sh2add %r26,%r26,%r1 ! sh3add %r1,%r26,%r1 ! b e_2t0a0 ! sh1add %r1,%r1,%r1
x248: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh3add %r1,%r29,%r29
x249: zdep %r26,26,27,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh3add %r1,%r26,%r1
x250: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0 ! sh2add %r1,%r1,%r1
x251: sh2add %r26,%r26,%r1 ! sh2add %r1,%r1,%r1 ! b e_2t0a0 ! sh2add %r1,%r1,%r1
x252: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh2add %r1,%r29,%r29
x253: zdep %r26,25,26,%r1 ! sub %r1,%r26,%r1 ! b e_t0 ! sh2add %r1,%r26,%r1
x254: zdep %r26,24,25,%r1 ! sub %r1,%r26,%r1 ! b e_shift ! sh1add %r1,%r29,%r29
x255: zdep %r26,23,24,%r1 ! comb,<> %r25,0,l0 ! sub %r1,%r26,%r1 ! b,n ret_t0
;1040 insts before this.
ret_t0: bv 0(r31)
e_t0: add %r29,%r1,%r29
e_shift: comb,<> %r25,0,l2
zdep %r26,23,24,%r26 ; %r26 <<= 8 ***********
bv,n 0(r31)
e_t0ma0: comb,<> %r25,0,l0
sub %r1,%r26,%r1
bv 0(r31)
add %r29,%r1,%r29
e_t0a0: comb,<> %r25,0,l0
add %r1,%r26,%r1
bv 0(r31)
add %r29,%r1,%r29
e_t02a0: comb,<> %r25,0,l0
sh1add %r26,%r1,%r1
bv 0(r31)
add %r29,%r1,%r29
e_t04a0: comb,<> %r25,0,l0
sh2add %r26,%r1,%r1
bv 0(r31)
add %r29,%r1,%r29
e_2t0: comb,<> %r25,0,l1
sh1add %r1,%r29,%r29
bv,n 0(r31)
e_2t0a0: comb,<> %r25,0,l0
sh1add %r1,%r26,%r1
bv 0(r31)
add %r29,%r1,%r29
e2t04a0: sh1add %r26,%r1,%r1
comb,<> %r25,0,l1
sh1add %r1,%r29,%r29
bv,n 0(r31)
e_3t0: comb,<> %r25,0,l0
sh1add %r1,%r1,%r1
bv 0(r31)
add %r29,%r1,%r29
e_4t0: comb,<> %r25,0,l1
sh2add %r1,%r29,%r29
bv,n 0(r31)
e_4t0a0: comb,<> %r25,0,l0
sh2add %r1,%r26,%r1
bv 0(r31)
add %r29,%r1,%r29
e4t08a0: sh1add %r26,%r1,%r1
comb,<> %r25,0,l1
sh2add %r1,%r29,%r29
bv,n 0(r31)
e_5t0: comb,<> %r25,0,l0
sh2add %r1,%r1,%r1
bv 0(r31)
add %r29,%r1,%r29
e_8t0: comb,<> %r25,0,l1
sh3add %r1,%r29,%r29
bv,n 0(r31)
e_8t0a0: comb,<> %r25,0,l0
sh3add %r1,%r26,%r1
bv 0(r31)
add %r29,%r1,%r29
.exit
.procend
.end
.import $$divI_2,millicode
.import $$divI_3,millicode
.import $$divI_4,millicode
.import $$divI_5,millicode
.import $$divI_6,millicode
.import $$divI_7,millicode
.import $$divI_8,millicode
.import $$divI_9,millicode
.import $$divI_10,millicode
.import $$divI_12,millicode
.import $$divI_14,millicode
.import $$divI_15,millicode
.export $$divI,millicode
.export $$divoI,millicode
$$divoI:
.proc
.callinfo millicode
.entry
comib,=,n -1,arg1,negative1 ; when divisor == -1
$$divI:
comib,>>=,n 15,arg1,small_divisor
add,>= 0,arg0,retreg ; move dividend, if retreg < 0,
normal1:
sub 0,retreg,retreg ; make it positive
sub 0,arg1,temp ; clear carry,
; negate the divisor
ds 0,temp,0 ; set V-bit to the comple-
; ment of the divisor sign
add retreg,retreg,retreg ; shift msb bit into carry
ds r0,arg1,temp ; 1st divide step, if no carry
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 2nd divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 3rd divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 4th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 5th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 6th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 7th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 8th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 9th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 10th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 11th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 12th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 13th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 14th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 15th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 16th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 17th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 18th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 19th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 20th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 21st divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 22nd divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 23rd divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 24th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 25th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 26th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 27th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 28th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 29th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 30th divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 31st divide step
addc retreg,retreg,retreg ; shift retreg with/into carry
ds temp,arg1,temp ; 32nd divide step,
addc retreg,retreg,retreg ; shift last retreg bit into retreg
xor,>= arg0,arg1,0 ; get correct sign of quotient
sub 0,retreg,retreg ; based on operand signs
bv,n 0(r31)
nop
;______________________________________________________________________
small_divisor:
blr,n arg1,r0
nop
; table for divisor == 0,1, ... ,15
addit,= 0,arg1,r0 ; trap if divisor == 0
nop
bv 0(r31) ; divisor == 1
copy arg0,retreg
b,n $$divI_2 ; divisor == 2
nop
b,n $$divI_3 ; divisor == 3
nop
b,n $$divI_4 ; divisor == 4
nop
b,n $$divI_5 ; divisor == 5
nop
b,n $$divI_6 ; divisor == 6
nop
b,n $$divI_7 ; divisor == 7
nop
b,n $$divI_8 ; divisor == 8
nop
b,n $$divI_9 ; divisor == 9
nop
b,n $$divI_10 ; divisor == 10
nop
b normal1 ; divisor == 11
add,>= 0,arg0,retreg
b,n $$divI_12 ; divisor == 12
nop
b normal1 ; divisor == 13
add,>= 0,arg0,retreg
b,n $$divI_14 ; divisor == 14
nop
b,n $$divI_15 ; divisor == 15
nop
;______________________________________________________________________
negative1:
sub 0,arg0,retreg ; result is negation of dividend
bv 0(r31)
addo arg0,arg1,r0 ; trap iff dividend==0x80000000 && divisor==-1
.exit
.procend
.subspa $LIT$
___hp_free_copyright:
.export ___hp_free_copyright,data
.align 4
.string "(c) Copyright 1986 HEWLETT-PACKARD COMPANY\x0aTo anyone who acknowledges that this file is provided \"AS IS\"\x0awithout any express or implied warranty:\x0a permission to use, copy, modify, and distribute this file\x0afor any purpose is hereby granted without fee, provided that\x0athe above copyright notice and this notice appears in all\x0acopies, and that the name of Hewlett-Packard Company not be\x0aused in advertising or publicity pertaining to distribution\x0aof the software without specific, written prior permission.\x0aHewlett-Packard Company makes no representations about the\x0asuitability of this software for any purpose.\x0a\x00"
.align 4
.end
|
32bitmicro/newlib-nano-1.0
| 2,774
|
libgloss/pa/crt0.S
|
/* crt0.S -- startup file for hppa.
*
* Copyright (c) 1995 Cygnus Support
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
.VERSION "1.0"
.COPYRIGHT "crt0.o for the PA"
.DATA
/*
* Set up the standard spaces (sections) These definitions come
* from /lib/pcc_prefix.s.
*/
.TEXT
/*
* stuff we need that's defined elsewhere.
*/
.IMPORT main, CODE
.IMPORT exit, CODE
.IMPORT _bss_start, DATA
.IMPORT _end, DATA
.IMPORT environ, DATA
/*
* start -- set things up so the application will run.
*
*/
.PROC
.CALLINFO SAVE_SP, FRAME=48
.EXPORT $START$,ENTRY
$START$
/* FIXME: this writes to page zero */
;; setup the %30 (stack pointer) with some memory
ldil L%_stack,%r30
ldo R%_stack(%r30),%r30
;; we need to set %r27 (global data pointer) here too
ldil L%$global$,%r27
ldo R%$global$(%r27),%r27 ; same problem as above
/*
* zerobss -- zero out the bss section
*/
; load the start of bss
ldil L%_bss_start,%r4
ldo R%_bss_start(%r4),%r4
; load the end of bss
ldil L%_end,%r5
ldo R%_end(%r5),%r5
L$bssloop
addi -1,%r5,%r5 ; decrement _bss_end
stb %r0,0(0,%r5) ; we do this by bytes for now even
; though it's slower, it's safer
combf,= %r4,%r5, L$bssloop
nop
ldi 1,%ret0
/*
* Call the main routine from the application to get it going.
* main (argc, argv, environ)
* We pass argv as a pointer to NULL.
*/
ldil L%main,%r22
ble R%main(%sr4,%r22)
copy %r31,%r2
/*
* Call exit() from the C library with the return value from main()
*/
copy %r28,%r26
ldil L%exit,%r22
ble R%exit(%sr4,%r22)
copy %r31,%r2
.PROCEND
/*
* _exit -- Exit from the application. Normally we cause a user trap
* to return to the ROM monitor for another run.
*/
.EXPORT _exit, ENTRY
_exit
.PROC
.CALLINFO
.ENTRY
;; This just causes a breakpoint exception
break 0x0,0x0
bv,n (%rp)
nop
.EXIT
.PROCEND
/*
* _sr4export -- support for called functions. (mostly for GDB)
*/
.EXPORT _sr4export, ENTRY
_sr4export:
.PROC
.CALLINFO
.ENTRY
ble 0(%sr4,%r22)
copy %r31,%rp
ldw -24(%sr0,%sp),%rp
ldsid (%sr0,%rp),%r1
mtsp %r1,%sr0
be,n 0(%sr0,%rp)
nop
.EXIT
.PROCEND
|
32bitmicro/newlib-nano-1.0
| 3,149
|
libgloss/pa/op50n-io.S
|
/* op50n-io.S -- low-level I/O routines for the Oki OP50N eval board.
*
* Copyright (c) 1995 Cygnus Support
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
mon_start .EQU 0xd0000000
where_dp .EQU mon_start+4*4
where_ci .EQU mon_start+14*4
where_co .EQU mon_start+15*4
where_read .EQU mon_start+22*4
where_write .EQU mon_start+23*4
/*
*int c = inbyte(wait);
*/
.space $TEXT$
.align 4
.EXPORT inbyte,CODE,ARGW0=GR,RTNVAL=GR
inbyte
.PROC
.CALLINFO CALLER,FRAME=64,SAVE_RP
.ENTRY
stw %r2,-20(0,%r30)
ldo 64(%r30),%r30
stw %r27,-56(0,%r30) ; save my dp
ldil l%where_dp,%r27 ; load monitors dp
ldw r%where_dp(0,%r27),%r27
ldil l%where_ci,%r1
ldw r%where_ci(0,%r1),%r1
ble 0(0,%r1)
copy %r31,%r2
ldw -56(0,%r30),%r27 ; load my dp
ldw -84(0,%r30),%r2
ldo -64(%r30),%r30
bv %r0(%r2)
nop
.EXIT
.PROCEND
/* int c = outbyte(c);
*/
.EXPORT outbyte,CODE,ARGW0=GR,RTNVAL=GR
outbyte
.PROC
.CALLINFO CALLER,FRAME=64,SAVE_RP
.ENTRY
stw %r2,-20(0,%r30)
ldo 64(%r30),%r30
stw %r27,-56(0,%r30) ; save my dp
ldil l%where_dp,%r27 ; load monitors dp
ldw r%where_dp(0,%r27),%r27
ldil l%where_co,%r1
ldw r%where_co(0,%r1),%r1
ble 0(0,%r1)
copy %r31,%r2
ldw -56(0,%r30),%r27 ; load my dp
ldw -84(0,%r30),%r2
ldo -64(%r30),%r30
bv %r0(%r2)
nop
.EXIT
.PROCEND
#if 0
/* cnt = read(fd, bufp, cnt);
*/
.EXPORT read,CODE,ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RTNVAL=NO
read
.PROC
.CALLINFO FRAME=64,CALLS,SAVE_RP
.ENTRY
stw %r2,-20(0,%r30)
ldo 64(%r30),%r30
stw %dp,-56(0,%r30) ; save my dp
ldil l%where_dp,%dp ; load monitors dp
ldw r%where_dp(0,%dp), %dp
ldil l%where_read,%r1
ldw r%where_read(0,%r1), %r1
ble 0(0,%r1)
copy %r31,%r2
ldw -56(0,%r30),%dp ; load my dp
ldw -84(0,%r30),%r2
bv %r0(%r2)
ldo -64(%r30),%r30
.EXIT
.PROCEND
/* cnt = write(fd, bufp, cnt);
*/
.EXPORT write,CODE,ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RTNVAL=NO
write
.PROC
.CALLINFO FRAME=64,CALLS,SAVE_RP
.ENTRY
stw %r2,-20(0,%r30)
ldo 64(%r30),%r30
stw %dp,-56(0,%r30) ; save my dp
ldil l%where_dp,%dp ; load monitors dp
ldw r%where_dp(0,%dp), %dp
ldil l%where_write,%r1
ldw r%where_write(0,%r1), %r1
ble 0(0,%r1)
copy %r31,%r2
ldw -56(0,%r30),%dp ; load my dp
ldw -84(0,%r30),%r2
bv %r0(%r2)
ldo -64(%r30),%r30
.EXIT
.PROCEND
#endif
|
32bitmicro/newlib-nano-1.0
| 2,141
|
libgloss/xstormy16/crt0.s
|
# XSTORMY16 startup code
# Interrupt vectors at 0x8000.
.section .int_vec,"ax"
.global _start
.align 1
_start:
;; Reset, watchdog timer interrupt
jmpf _int_reset
;; base timer interrupt
jmpf _int_basetimer
;; timer 0
jmpf _int_timer0
;; timer 1
jmpf _int_timer1
;; SIO0 interrupt
jmpf _int_sio0
;; SIO1 interrupt
jmpf _int_sio1
;; port0 interrupt
jmpf _int_port0
;; port1 interrupt
jmpf _int_port1
# Reset code, set up memory and call main.
.section .rodata
2: .word __rdata
.text
_int_reset:
;; Set up the stack pointer.
mov r0,#__stack
bz r0,#0,0f
mov sp,r0
0:
;; Zero the data space
mov r0,#_edata
mov r1,#_end
mov r2,#0
0: mov.w (r0++),r2
blt r0,r1,0b
;; Copy data from ROM into RAM. ROM area may be above 64k,
;; but RAM may not.
mov r1,#__data
mov r3,#_edata
mov r4,#2b
mov.w r0,(r4++)
mov.w r2,(r4)
mov r8,r2
;; If _data == _rdata there's no need to copy anything.
bnz r0,r1,0f
bz r2,#0,1f
0: movf.w r2,(r0++)
bnz r0,#0,2f
add r8,#1
2: mov.w (r1++),r2
blt r1,r3,0b
1:
;; Call hardware init routine
callf _hwinit
;; Call initialization routines
callf _init
;; Set up fini routines to be called from exit
mov r2,#@fptr(_fini)
callf atexit
;; Call main() with empty argc/argv/envp
mov r2,#0
mov r3,#0
mov r4,#0
callf main
;; Exit.
callf exit
;; Should never reach this code.
halt
1: .size _int_reset,1b-_int_reset
# Stub interrupt routines.
.globl _int_timer0
.weak _int_timer0
.globl _int_timer1
.weak _int_timer1
.globl _int_sio0
.weak _int_sio0
.globl _int_sio1
.weak _int_sio1
.globl _int_port0
.weak _int_port0
.globl _int_port1
.weak _int_port1
.globl _int_basetimer
.weak _int_basetimer
_int_timer0:
_int_timer1:
_int_sio0:
_int_sio1:
_int_port0:
_int_port1:
_int_basetimer:
iret
1: .size _int_timer0,1b-_int_timer0
# Stub hardware init
.globl _hwinit
.weak _hwinit
_hwinit:
ret
1: .size _int_hwinit,1b-_int_hwinit
# The first word in .data has address 0, so it's not a good
# idea to use it as its address conflicts with NULL.
# Place a HALT instruction there to try to catch NULL pointer
# dereferences.
.data
halt
|
32bitmicro/newlib-nano-1.0
| 3,872
|
libgloss/xstormy16/crt0_stub.s
|
# XSTORMY16 startup code for GDB stub.
# CPU Data for Sanyo EVA debugger at 0x7F00
.section .cpudata,"ax"
.byte 0x00,0x02,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x44,0x35,0x39,0x52,0x30,0x30,0x30,0x30,0x2E,0x4F,0x50,0x54,0x00,0x00,0x00,0x00
.byte 0x4c,0x43,0x35,0x39,0x52,0x30,0x30,0x30,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x80,0x00,0x20,0x48,0x00,0x00,0x00
.byte 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x20,0x01,0x12,0x31,0x23,0x59
# Interrupt vectors at 0x8000.
.section .int_vec,"ax"
.global _start
.align 1
_start:
;; Reset, watchdog timer interrupt
jmpf _int_reset
;; base timer interrupt
jmpf _int_basetimer
;; timer 0
jmpf _int_timer0
;; timer 1
jmpf _int_timer1
;; SIO0 interrupt
jmpf _int_sio0
;; SIO1 interrupt
jmpf _int_sio1
;; port0 interrupt
jmpf _int_port0
;; port1 interrupt
jmpf _int_port1
.org 0x80
;; sys interrupt (0x8080)
jmpf _int_sys
;; Application void write(char *buf, int nbytes)
;; This jmps to a stub function to packetize the buf for GDB
jmpf gdb_write
;; Application int read(char *buf, int nbytes)
jmpf gdb_read
.text
# Reset code, set up memory and call main.
_int_reset:
;; Set up the application stack pointer.
mov sp,#0x002
;; Zero the data space
mov r0,#_edata
mov r1,#_end
mov r2,#0
0: mov.w (r0++),r2
blt r0,r1,0b
;; Init the UART
callf uart_init
;; Turn on illegal insn trap
mov r0,r14
set1 r0,#11
mov r14,r0
mov.b 0x7f08,#0x11
mov.b 0x7f09,#0x10
;; "breakpoint" sends us into stub.
0:
.hword 0x0006
br 0b
_int_sys:
push r13
mov r13,#registers
mov.w (r13++),r0
mov.w (r13++),r1
mov.w (r13++),r2
mov.w (r13++),r3
mov.w (r13++),r4
mov.w (r13++),r5
mov.w (r13++),r6
mov.w (r13++),r7
mov r0,r8
mov.w (r13++),r0
mov r0,r9
mov.w (r13++),r0
mov r0,r10
mov.w (r13++),r0
mov r0,r11
mov.w (r13++),r0
mov r0,r12
mov.w (r13++),r0
pop r0
mov.w (r13++),r0 ; R13
pop r0
mov.w (r13++),r0 ; PSW
mov r0,r15
sub r0,#4
mov.w (r13++),r0 ; SP
pop r0
pop r1
mov.w (r13++),r1 ; PCL
mov.w (r13++),r0 ; PCH
;; switch to stub stack and invoke stub
mov sp,#0x700
callf handle_exception
mov r0,#registers+34
mov.w r1,(r0) ; PCH
mov.w r2,(--r0) ; PCL
mov.w r3,(--r0) ; SP
mov r15,r3
push r2
push r1
mov.w r1,(--r0) ; PSW
push r1
mov.w r1,(--r0)
mov r13,r1
mov.w r1,(--r0)
mov r12,r1
mov.w r1,(--r0)
mov r11,r1
mov.w r1,(--r0)
mov r10,r1
mov.w r1,(--r0)
mov r9,r1
mov.w r1,(--r0)
mov r8,r1
mov.w r7,(--r0)
mov.w r6,(--r0)
mov.w r5,(--r0)
mov.w r4,(--r0)
mov.w r3,(--r0)
mov.w r2,(--r0)
mov.w r1,(--r0)
mov.w r0,(--r0)
iret
1: .size _int_sys,1b-_int_sys
|
32bitmicro/newlib-nano-1.0
| 2,388
|
libgloss/xstormy16/syscalls.S
|
# xstormy16 system calls for the simulator
#include <syscall.h>
.text
.globl _exit
_exit:
mov r1,#SYS_exit
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size exit,0b-_exit
.globl _open
_open:
mov r1,#SYS_open
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size open,0b-_open
.globl _close
_close:
mov r1,#SYS_close
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size close,0b-_close
.globl _read
_read:
mov r1,#SYS_read
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size read,0b-_read
.globl _write
_write:
mov r1,#SYS_write
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size write,0b-_write
.globl _lseek
_lseek:
mov r1,#SYS_lseek
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size lseek,0b-_lseek
.globl _unlink
_unlink:
mov r1,#SYS_unlink
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size unlink,0b-_unlink
.globl _getpid
_getpid:
mov r1,#SYS_getpid
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size getpid,0b-_getpid
.globl _kill
_kill:
mov r1,#SYS_kill
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size kill,0b-_kill
.globl _fstat
_fstat:
mov r1,#SYS_fstat
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size fstat,0b-_fstat
.globl _chdir
_chdir:
mov r1,#SYS_chdir
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size chdir,0b-_chdir
.globl _stat
_stat:
mov r1,#SYS_stat
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size stat,0b-_stat
.globl _chmod
_chmod:
mov r1,#SYS_chmod
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size chmod,0b-_chmod
.globl _utime
_utime:
mov r1,#SYS_utime
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size utime,0b-_utime
.globl _time
_time:
mov r1,#SYS_time
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size time,0b-_time
.globl _gettimeofday
_gettimeofday:
mov r1,#SYS_gettimeofday
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size gettimeofday,0b-_gettimeofday
.globl _times
_times:
mov r1,#SYS_times
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size times,0b-_times
.globl _link
_link:
mov r1,#SYS_link
.hword 0x0001
bnz r1,#0,syscall_error
ret
0: .size link,0b-_link
syscall_error:
# Return value for the syscall is in r2. Save it here, as
# _errno will overwrite it with the address of the errno
# variable. r0 is the errno.
push r2
push r0
callf __errno
pop r0
mov.w (r2),r0
pop r2
ret
0: .size syscall_error,0b-syscall_error
|
32bitmicro/newlib-nano-1.0
| 10,578
|
libgloss/mep/sim-crt0.S
|
# Copyright (c) 2003 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the BSD
# License. This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY expressed or implied, including the implied
# warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. A copy of
# this license is available at http://www.opensource.org/licenses. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the BSD License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Toshiba Media Processor startup file (crt0.S)
#
# Designed for user programs running in the 0-2Mb startup section.
# Designed for the simulator by default.
#
# Exception/Interrupt Handler Locations
# CFG.EVM CFG.EVA CFG.IVA Exception INTn
## 0 - - 0x0000_0000 0x0000_0030 rom
## 1 0 0 0x0020_0000 0x0020_0030 local RAM / local RAM
## 1 1 0 0x0080_0000 0x0020_0000 ext RAM / local RAM
## 1 0 1 0x0020_0000 0x0080_0000 local RAM / ext RAM
## 1 1 1 0x0080_0000 0x0080_0030 ext RAM / ext RAM
#
# Exceptions
# Reset 0x0000_0000
# NMI 0x0000_0000+4
# RI (Base Address) +0x08
# ZDIV (Base Address) +0x0C
# BRK (Base Address) +0x10
# SWI (Base Address) +0x14
# DSP (Base Address) +0x1C
# COP (Base Address) +0x20
.set _local_ram_base, 0x00200000
.set _ext_ram_base, 0x00800000
.set _int_base_offset, 0x30
#include "syscall.h"
.macro if_bitfield_zero reg, start, length, label
ldc $0, \reg
srl $0, \start
and3 $0, $0, (1 << \length) - 1
beqz $0,\label
.endm
.macro if_bitfield_notN reg, start, length, N, label
ldc $0, \reg
srl $0, \start
and3 $0, $0, (1 << \length) - 1
bnei $0,\N, \label
.endm
.macro if_bitfield_eqN reg, start, length, N, label
ldc $0, \reg
srl $0, \start
and3 $0, $0, (1 << \length) - 1
beqi $0,\N, \label
.endm
.macro if_bitfield_ltN reg, start, length, N, label
ldc $0, \reg
srl $0, \start
and3 $0, $0, (1 << \length) - 1
blti $0,\N, \label
.endm
.section .hwinit, "ax"
# CCFG.ICSZ
if_bitfield_zero reg=$ccfg, start=16, length=7, label=.Lend_enable_icache
__enable_icache:
# set ICE(cfg[1])
ldc $1,$cfg
or3 $1,$1,2
stc $1,$cfg
nop
nop
nop
nop
nop
.Lend_enable_icache:
ret
__enable_dcache:
# CCFG.DCSZ
if_bitfield_zero reg=$ccfg, start=0, length=7, label=.Lend_enable_dcache
# set DCE(cfg[0])
ldc $1,$cfg
or3 $1,$1,1
stc $1,$cfg
nop
nop
nop
nop
nop
ret
.Lend_enable_dcache:
.text
#ifdef NOVEC
.global _reset
_reset:
#endif
.global _start
_start:
mov $fp, 0 # for unwinding
# $sp set
movh $sp, %uhi(__stack_table)
or3 $sp, $sp, %lo(__stack_table)
# initialize sp, gp, tp
# get CPU ID
ldc $0, $id
srl $0, 16
# load ID-specific stack pointer
sl2ad3 $0, $0, $sp # $0 = ($0 << 2) + $sp
lw $sp,($0) # $sp = *($0)
mov $0,0xfffffff8
and $sp, $0
#ifndef NOVEC
# copy exception vector table
# RCFG.IRSZ
if_bitfield_zero reg=$rcfg, start=16, length=7, label=.Lend_ev_imem
# handle imem
movh $11,%uhi(_local_ram_base)
or3 $11,$11,%lo(_local_ram_base)
# clear CFG.EVA ([23])
ldc $0,$cfg
movh $1, %uhi(0xff7fffff)
or3 $1, $1, %lo(0xff7fffff)
and $0,$1
stc $0,$cfg
bra .Ldo_repeat_ev
.Lend_ev_imem:
#ifdef UseSDRAM
movh $11,%uhi(_ext_ram_base)
or3 $11,$11,%lo(_ext_ram_base)
# set CFG.EVA ([23])
ldc $0,$cfg
movh $1,%uhi(1<<23)
or3 $1,$1,%lo(1<<23)
or $0,$1
stc $0,$cfg
#else
# handle ROM
bra .Lend_ev
#endif
.Ldo_repeat_ev:
# set CFG.EVM ([4])
ldc $0,$cfg
or3 $0,$0,(1<<4)
stc $0,$cfg
# copy _exception_table to $11
movh $12,%uhi(_exception_table)
or3 $12,$12,%lo(_exception_table)
mov $13,8
repeat $13,.Lrepeat_ev
lw $1,0($12)
add $12,4
.Lrepeat_ev:
sw $1,0($11)
add $11,4
.Lend_ev:
# copy interrupt vector table
# RCFG.IRSZ
if_bitfield_zero reg=$rcfg, start=16, length=7, label=.Lend_iv_imem
# handle imem
movh $11,%uhi(_local_ram_base)
or3 $11,$11,%lo(_int_base_offset)
# clear CFG.IVA ([22])
ldc $0,$cfg
movh $1,%uhi(0xffbfffff) # ~(1<<22)
or3 $1,$1,%lo(0xffbfffff)
and $0,$1
stc $0,$cfg
bra .Ldo_repeat_iv
.Lend_iv_imem:
#ifdef UseSDRAM
movh $11,%uhi(_ext_ram_base)
or3 $11,$11,%lo(_int_base_offset)
# set CFG. IVA ([22])
ldc $0,$cfg
movh $1,%uhi(1<<22)
or3 $1,$1,%lo(1<<22)
or $0,$1
stc $0,$cfg
#else
# handle ROM
bra .Lend_iv
#endif
.Ldo_repeat_iv:
# set CFG.IVM ([3])
ldc $0,$cfg
or3 $0,$0,(1<<3)
stc $0,$cfg
# copy _interrupt_table to $11
movh $12,%uhi(_interrupt_table)
or3 $12,$12,%lo(_interrupt_table)
mov $13,32
add $13,-1
and3 $13,$13,127
repeat $13,.Lrepeat_iv
lw $1,0($12)
add $12,4
.Lrepeat_iv:
sw $1,0($11)
add $11,4
.Lend_iv:
# initialize instruction cache
# Icache Size CCFG.ICSZ ([22..16]) KByte
if_bitfield_zero reg=$ccfg, start=16, length=7, label=.Lend_ic
mov $3,$0 # cache size in KB
# ID.ID
if_bitfield_ltN reg=$ID, start=8, length=8, N=3, label=.Lend_mepc3_ic
# Line Size CCFG.ICSZ ([26..24]) Byte
if_bitfield_ltN reg=$ccfg, start=24, length=3, N=2, label=.Lend_ic
bgei $0,5,.Lend_ic
add3 $1,$0,3 # bit width of line size
mov $0,$3
# clear tag
mov $2,10
sub $2,$1
sll $0,$2 # *KByte/(line size)
add $0,-1
mov $2,1
sll $2,$1 # line size
bra .Ldo_repeat_icache
.Lend_mepc3_ic:
# ICache: $0 KByte
mov $0,$3
# clear tag
sll $0,(10-5) # *KByte/(32byte=linesize)
add $0,-1
mov $2,32
.Ldo_repeat_icache:
mov $1,0
bra 0f
# Align this code on an 8 byte boundary in order to keep the repeat
# loop entirely within the instruction fetch buffer.
.p2align 3
0:
movh $3,%hi(0x00310000) # for tag
repeat $0,.Lrepeat_icache
add $0,-1
.Lrepeat_icache:
sw $1,0($3)
add3 $3,$3,$2
.Lenable_icache:
movh $1,%hi(__enable_icache)
add3 $1,$1,%lo(__enable_icache)
jsr $1
.Lend_ic:
# initialize data cache
# Dcache Size CCFG.DCSZ ([6..0]) KByte
if_bitfield_zero reg=$ccfg, start=0, length=7, label=.Lend_dc
mov $3,$0 # cache size in KB
# ID.ID
if_bitfield_ltN reg=$ID, start=8, length=8, N=3, label=.Lend_mepc3_dc
# Line Size CCFG.DCSZ ([10..8]) Byte
if_bitfield_ltN reg=$ccfg, start=8, length=3, N=2, label=.Lend_dc
bgei $0,5,.Lend_dc
add3 $1,$0,3 # bit width of line size
mov $0,$3
# clear tag
mov $2,10
sub $2,$1
sll $0,$2 # *KByte/(line size)
add $0,-1
mov $2,1
sll $2,$1 # line size
bra .Ldo_repeat_dcache
.Lend_mepc3_dc:
# DCache: $0 KByte
mov $0,$3
# clear tag
sll $0,(10-5) # *KByte/(32byte=linesize)
add $0,-1
mov $2,32
.Ldo_repeat_dcache:
mov $1,0
movh $3,%hi(0x00330000) # for tag
repeat $0,.Lrepeat_dcache
add $0,-1
.Lrepeat_dcache:
sw $1,0($3)
add3 $3,$3,$2
.Lenable_dcache:
movh $1,%hi(__enable_dcache)
add3 $1,$1,%lo(__enable_dcache)
jsr $1
.Lend_dc:
# NOVEC
#endif
mov $0, 0
movh $gp, %uhi(__sdabase)
or3 $gp, $gp, %lo(__sdabase)
movh $tp, %uhi(__tpbase)
or3 $tp, $tp, %lo(__tpbase)
# zero out BSS
movh $1, %uhi(__bss_start)
or3 $1, $1, %lo(__bss_start)
mov $2, 0
movh $3, %uhi(_end)
or3 $3, $3, %lo(_end)
sub $3, $1
bsr memset
movh $1, %uhi(__sbss_start)
or3 $1, $1, %lo(__sbss_start)
mov $2, 0
movh $3, %uhi(__sbss_end)
or3 $3, $3, %lo(__sbss_end)
sub $3, $1
bsr memset
movh $1, %uhi(__farbss_start)
or3 $1, $1, %lo(__farbss_start)
mov $2, 0
movh $3, %uhi(__farbss_end)
or3 $3, $3, %lo(__farbss_end)
sub $3, $1
bsr memset
# enable interrupts
ei
# construct global class variables
bsr __invoke_init_section
# invoke main
mov $1, 0 # argc, argv, envp
mov $2, 0
mov $3, 0
bsr main
mov $1, $0
bsr exit
.global _exit
_exit:
# Prevent _exit recursion
movh $3, %uhi(_exit_in_progress)
or3 $3, $3, %lo(_exit_in_progress)
lw $5, ($3)
bnez $5, _skip_fini
mov $5, 1
sw $5, ($3)
# We don't need to preserve $5 because we're going to exit anyway.
mov $5,$1
# destruct global class variables
bsr __invoke_fini_section
mov $1,$5
_skip_fini:
#ifdef NOSIM
_exit_loop:
bra _exit_loop
#else
.2byte 0x7800 | ((SYS_exit & 0xe) << 7) | ((SYS_exit & 1) << 4)
ret
#endif
.data
_exit_in_progress: .word 0
# For these two, the epilogue is in crtn.S
.section .init
__invoke_init_section:
add $sp, -8
ldc $0, $lp
sw $0, ($sp)
.section .fini
__invoke_fini_section:
add $sp, -8
ldc $0, $lp
sw $0, ($sp)
#ifndef NOVEC
.section .vec, "ax"
.core
.org 0x0, 0
.global _exception_table
.type _exception_table,@function
_exception_table:
.p2align 2
.org 0x0000, 0
.global _reset
_reset:
jmp _handler_RESET
.org 0x0004, 0
jmp _handler_NMI
.org 0x0008, 0
jmp _handler_RI
.org 0x000c, 0
jmp _handler_ZDIV
.org 0x0010, 0
jmp _handler_BRK
.org 0x0014, 0
jmp _handler_SWI
.org 0x0018, 0
jmp _handler_DEBUG
.org 0x001c, 0
jmp _handler_DSP
.org 0x0020, 0
jmp _handler_COP
.org 0x30, 0
.global _interrupt_table
.type _interrupt_table,@function
_interrupt_table:
.org 0x0030
jmp _handler_INT0
.org 0x0034
jmp _handler_INT1
.org 0x0038
jmp _handler_INT2
.org 0x003c
jmp _handler_INT3
.org 0x0040
jmp _handler_INT4
.org 0x0044
jmp _handler_INT5
.org 0x0048
jmp _handler_INT6
.org 0x004c
jmp _handler_INT7
.org 0x0050
jmp _handler_INT8
.org 0x0054
jmp _handler_INT9
.org 0x0058
jmp _handler_INT10
.org 0x005c
jmp _handler_INT11
.org 0x0060
jmp _handler_INT12
.org 0x0064
jmp _handler_INT13
.org 0x0068
jmp _handler_INT14
.org 0x006c
jmp _handler_INT15
.org 0x0070
jmp _handler_INT16
.org 0x0074
jmp _handler_INT17
.org 0x0078
jmp _handler_INT18
.org 0x007c
jmp _handler_INT19
.org 0x0080
jmp _handler_INT20
.org 0x0084
jmp _handler_INT21
.org 0x0088
jmp _handler_INT22
.org 0x008c
jmp _handler_INT23
.org 0x0090
jmp _handler_INT24
.org 0x0094
jmp _handler_INT25
.org 0x0098
jmp _handler_INT26
.org 0x009c
jmp _handler_INT27
.org 0x00a0
jmp _handler_INT28
.org 0x00a4
jmp _handler_INT29
.org 0x00a8
jmp _handler_INT30
.org 0x00ac
jmp _handler_INT31
# NOVEC
#endif
|
32bitmicro/newlib-nano-1.0
| 1,518
|
libgloss/mep/syscalls.S
|
/*
* Copyright (c) 2003 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use, modify,
* copy, or redistribute it subject to the terms and conditions of the BSD
* License. This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY expressed or implied, including the implied
* warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. A copy
* of this license is available at http://www.opensource.org/licenses. Any
* Red Hat trademarks that are incorporated in the source code or documentation
* are not subject to the BSD License and may only be used or replicated with
* the express permission of Red Hat, Inc.
*/
#include "syscall.h"
/* Return ABI: $1 is errno, $0 is return value. */
#define S(n) \
.global n ; .weak n ; n: ; \
.2byte 0x7800 | ((SYS_##n & 0xe) << 7) | ((SYS_##n & 1) << 4) ; bra sysret
.text
#define SYS___mep_write SYS_write
#define SYS___mep_read SYS_read
#define SYS__Sid_config SYS_reconfig
S(open)
S(close)
S(__mep_read)
S(__mep_write)
S(lseek)
S(unlink)
S(getpid)
S(kill)
S(fstat)
/* ARGV support. */
S(argvlen)
S(argv)
/* These are extras added for one reason or another. */
S(chdir)
S(stat)
S(chmod)
S(utime)
S(time)
S(gettimeofday)
S(times)
S(link)
S(_Sid_config)
sysret:
add3 $sp, $sp, -16
sw $0, 0($sp)
sw $1, 4($sp)
ldc $2, $lp
sw $2, 8($sp)
bsr __errno
lw $1, 4($sp)
sw $1, ($0)
lw $0, 0($sp)
lw $2, 8($sp)
stc $2, $lp
add3 $sp, $sp, 16
ret
|
32bitmicro/newlib-nano-1.0
| 1,030
|
libgloss/rs6000/mvme-outbyte.S
|
/*
* mvme-outbyte.S -- outbyte function for targets using the ppcbug monitor
*
* Copyright (c) 1995 Cygnus Support
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#include "ppc-asm.h"
.file "mvme-outbyte.S"
.text
FUNC_START(outbyte)
li r10,0x20
sc
blr
FUNC_END(outbyte)
FUNC_START(__outstr)
li r10,0x21
sc
blr
FUNC_END(__outstr)
FUNC_START(__outln)
li r10,0x22
sc
blr
FUNC_END(__outln)
FUNC_START(__pcrlf)
li r10,0x26
sc
blr
FUNC_END(__pcrlf)
|
32bitmicro/newlib-nano-1.0
| 7,072
|
libgloss/rs6000/sol-syscall.S
|
/*
* solaris-syscall.S -- System call stubs for Solaris.
*
* Copyright (c) 1996 Cygnus Support
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#include "ppc-asm.h"
.file "solaris-syscall.S"
#define SYS_syscall 0
#define SYS_exit 1
#define SYS_fork 2
#define SYS_read 3
#define SYS_write 4
#define SYS_open 5
#define SYS_close 6
#define SYS_wait 7
#define SYS_creat 8
#define SYS_link 9
#define SYS_unlink 10
#define SYS_exec 11
#define SYS_chdir 12
#define SYS_time 13
#define SYS_mknod 14
#define SYS_chmod 15
#define SYS_chown 16
#define SYS_brk 17
#define SYS_stat 18
#define SYS_lseek 19
#define SYS_getpid 20
#define SYS_mount 21
#define SYS_umount 22
#define SYS_setuid 23
#define SYS_getuid 24
#define SYS_stime 25
#define SYS_ptrace 26
#define SYS_alarm 27
#define SYS_fstat 28
#define SYS_pause 29
#define SYS_utime 30
#define SYS_stty 31
#define SYS_gtty 32
#define SYS_access 33
#define SYS_nice 34
#define SYS_statfs 35
#define SYS_sync 36
#define SYS_kill 37
#define SYS_fstatfs 38
#define SYS_pgrpsys 39
#define SYS_xenix 40
#define SYS_dup 41
#define SYS_pipe 42
#define SYS_times 43
#define SYS_profil 44
#define SYS_plock 45
#define SYS_setgid 46
#define SYS_getgid 47
#define SYS_signal 48
#define SYS_msgsys 49
#define SYS_syssun 50
#define SYS_sysi86 50
#define SYS_sysppc 50
#define SYS_acct 51
#define SYS_shmsys 52
#define SYS_semsys 53
#define SYS_ioctl 54
#define SYS_uadmin 55
#define SYS_utssys 57
#define SYS_fdsync 58
#define SYS_execve 59
#define SYS_umask 60
#define SYS_chroot 61
#define SYS_fcntl 62
#define SYS_ulimit 63
#define SYS_rmdir 79
#define SYS_mkdir 80
#define SYS_getdents 81
#define SYS_sysfs 84
#define SYS_getmsg 85
#define SYS_putmsg 86
#define SYS_poll 87
#define SYS_lstat 88
#define SYS_symlink 89
#define SYS_readlink 90
#define SYS_setgroups 91
#define SYS_getgroups 92
#define SYS_fchmod 93
#define SYS_fchown 94
#define SYS_sigprocmask 95
#define SYS_sigsuspend 96
#define SYS_sigaltstack 97
#define SYS_sigaction 98
#define SYS_sigpending 99
#define SYS_context 100
#define SYS_evsys 101
#define SYS_evtrapret 102
#define SYS_statvfs 103
#define SYS_fstatvfs 104
#define SYS_nfssys 106
#define SYS_waitsys 107
#define SYS_sigsendsys 108
#define SYS_hrtsys 109
#define SYS_acancel 110
#define SYS_async 111
#define SYS_priocntlsys 112
#define SYS_pathconf 113
#define SYS_mincore 114
#define SYS_mmap 115
#define SYS_mprotect 116
#define SYS_munmap 117
#define SYS_fpathconf 118
#define SYS_vfork 119
#define SYS_fchdir 120
#define SYS_readv 121
#define SYS_writev 122
#define SYS_xstat 123
#define SYS_lxstat 124
#define SYS_fxstat 125
#define SYS_xmknod 126
#define SYS_clocal 127
#define SYS_setrlimit 128
#define SYS_getrlimit 129
#define SYS_lchown 130
#define SYS_memcntl 131
#define SYS_getpmsg 132
#define SYS_putpmsg 133
#define SYS_rename 134
#define SYS_uname 135
#define SYS_setegid 136
#define SYS_sysconfig 137
#define SYS_adjtime 138
#define SYS_systeminfo 139
#define SYS_seteuid 141
#define SYS_vtrace 142
#define SYS_fork1 143
#define SYS_sigtimedwait 144
#define SYS_lwp_info 145
#define SYS_yield 146
#define SYS_lwp_sema_wait 147
#define SYS_lwp_sema_post 148
#define SYS_modctl 152
#define SYS_fchroot 153
#define SYS_utimes 154
#define SYS_vhangup 155
#define SYS_gettimeofday 156
#define SYS_getitimer 157
#define SYS_setitimer 158
#define SYS_lwp_create 159
#define SYS_lwp_exit 160
#define SYS_lwp_suspend 161
#define SYS_lwp_continue 162
#define SYS_lwp_kill 163
#define SYS_lwp_self 164
#define SYS_lwp_setprivate 165
#define SYS_lwp_getprivate 166
#define SYS_lwp_wait 167
#define SYS_lwp_mutex_unlock 168
#define SYS_lwp_mutex_lock 169
#define SYS_lwp_cond_wait 170
#define SYS_lwp_cond_signal 171
#define SYS_lwp_cond_broadcast 172
#define SYS_pread 173
#define SYS_pwrite 174
#define SYS_llseek 175
#define SYS_inst_sync 176
#define SYS_kaio 178
#define SYS_tsolsys 184
#define SYS_acl 185
#define SYS_auditsys 186
#define SYS_processor_bind 187
#define SYS_processor_info 188
#define SYS_p_online 189
#define SYS_sigqueue 190
#define SYS_clock_gettime 191
#define SYS_clock_settime 192
#define SYS_clock_getres 193
#define SYS_timer_create 194
#define SYS_timer_delete 195
#define SYS_timer_settime 196
#define SYS_timer_gettime 197
#define SYS_timer_getoverrun 198
#define SYS_nanosleep 199
#define SYS_facl 200
#define SYS_door 201
#define SYS_setreuid 202
#define SYS_setregid 203
#define SYS_install_utrap 204
#define SYS_signotifywait 210
#define SYS_lwp_sigredirect 211
#define SYS_lwp_alarm 212
.text
FUNC_START(_exit)
li r0,SYS_exit
sc
/*
* Insure that the debugger tells the client that the PC is in _exit,
* not whatever function happens to follow this function.
*/
0: trap
b 0b /* we never should return, but... */
FUNC_END(_exit)
#define SYSCALL(syscall,name) \
FUNC_START(name); \
li r0,syscall; \
sc; \
bns+ 0f; \
b FUNC_NAME(_cerror); \
0: blr; \
FUNC_END(name)
SYSCALL(SYS_access,access)
SYSCALL(SYS_alarm,alarm)
SYSCALL(SYS_brk,brk)
SYSCALL(SYS_chdir,chdir)
SYSCALL(SYS_chmod,chomd)
SYSCALL(SYS_chown,chown)
SYSCALL(SYS_close,close)
SYSCALL(SYS_creat,creat)
SYSCALL(SYS_dup,dup)
SYSCALL(SYS_exec,exec)
SYSCALL(SYS_fork,fork)
SYSCALL(SYS_fstat,_fstat)
SYSCALL(SYS_getpid,getpid)
SYSCALL(SYS_ioctl,ioctl)
SYSCALL(SYS_kill,kill)
SYSCALL(SYS_link,link)
SYSCALL(SYS_lseek,lseek)
SYSCALL(SYS_nice,nice)
SYSCALL(SYS_open,open)
SYSCALL(SYS_pause,pause)
SYSCALL(SYS_pipe,pipe)
SYSCALL(SYS_ptrace,ptrace)
SYSCALL(SYS_read,read)
SYSCALL(SYS_signal,signal)
SYSCALL(SYS_stat,_stat)
SYSCALL(SYS_sync,sync)
SYSCALL(SYS_sysppc,sysppc)
SYSCALL(SYS_time,time)
SYSCALL(SYS_times,times)
SYSCALL(SYS_unlink,unlink)
SYSCALL(SYS_wait,wait)
SYSCALL(SYS_write,write)
SYSCALL(SYS_umask,umask)
SYSCALL(SYS_execve,execve)
SYSCALL(SYS_fcntl,fcntl)
SYSCALL(SYS_ulimit,ulimit)
SYSCALL(SYS_mkdir,mkdir)
SYSCALL(SYS_rmdir,rmdir)
SYSCALL(SYS_getdents,getdents)
SYSCALL(SYS_lstat,_lstat)
SYSCALL(SYS_symlink,symlink)
SYSCALL(SYS_readlink,readlink)
SYSCALL(SYS_sigprocmask,sigprocmask)
SYSCALL(SYS_sigsuspend,sigsuspend)
SYSCALL(SYS_sigaction,sigaction)
SYSCALL(SYS_mmap,mmap)
SYSCALL(SYS_mprotect,mprotect)
SYSCALL(SYS_munmap,munmap)
SYSCALL(SYS_fpathconf,fpathconf)
SYSCALL(SYS_vfork,vfork)
SYSCALL(SYS_setrlimit,setrlimit)
SYSCALL(SYS_getrlimit,getrlimit)
SYSCALL(SYS_rename,rename)
SYSCALL(SYS_utimes,utimes)
SYSCALL(SYS_gettimeofday,gettimeofday)
|
32bitmicro/newlib-nano-1.0
| 1,355
|
libgloss/rs6000/sim-crt0.S
|
/*
* crt0.S -- startup file for PowerPC systems.
*
* Copyright (c) 1995 Cygnus Support
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#include "ppc-asm.h"
.file "sim-crt0.S"
.text
.globl _start
.type _start,@function
_start:
lis r0,0
stw r0,0(sp) /* clear back chain */
stwu sp,-64(sp) /* push another stack frame */
/* Let her rip */
bl FUNC_NAME(main)
/* return value from main is argument to exit */
bl FUNC_NAME(exit)
.Lstart:
.size _start,.Lstart-_start
.extern FUNC_NAME(atexit)
.globl FUNC_NAME(__atexit)
.section ".sdata","aw"
.align 2
FUNC_NAME(__atexit): /* tell C's eabi-ctor's we have an atexit function */
.long FUNC_NAME(atexit)@fixup /* and that it is to register __do_global_dtors */
.section ".fixup","aw"
.align 2
.long FUNC_NAME(__atexit)
|
32bitmicro/newlib-nano-1.0
| 1,701
|
libgloss/rs6000/simulator.S
|
/*
* simulator.S -- PowerPC simulator system calls.
*
* Copyright (c) 1995, 2000, 2001 Cygnus Support
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#include "ppc-asm.h"
FUNC_START(_exit)
li r0, 1
sc
/*
* Insure that the debugger tells the client that the PC is in _exit,
* not whatever function happens to follow this function.
*/
0: trap
b 0b /* we never should return, but... */
FUNC_END(_exit)
FUNC_START(read)
li r0,3
sc
bnslr+
b FUNC_NAME(_cerror)
FUNC_END(read)
FUNC_START(write)
li r0,4
sc
bnslr+
b FUNC_NAME(_cerror)
FUNC_END(write)
FUNC_START(open)
li r0,5
sc
bnslr+
b FUNC_NAME(_cerror)
FUNC_END(open)
FUNC_START(close)
li r0,6
sc
bnslr+
b FUNC_NAME(_cerror)
FUNC_END(close)
FUNC_START(brk)
li r0,17
sc
bnslr+
b FUNC_NAME(_cerror)
FUNC_END(brk)
FUNC_START(access)
li r0,33
sc
bnslr+
b FUNC_NAME(_cerror)
FUNC_END(access)
FUNC_START(dup)
li r0,41
sc
bnslr+
b FUNC_NAME(_cerror)
FUNC_END(dup)
FUNC_START(gettimeofday)
li r0,116
sc
bns+ 0f
b FUNC_NAME(_cerror)
0: blr
FUNC_END(gettimeofday)
FUNC_START(lseek)
li r0,199
sc
bnslr+
b FUNC_NAME(_cerror)
FUNC_END(lseek)
|
32bitmicro/newlib-nano-1.0
| 5,580
|
libgloss/rs6000/xil-crt0.S
|
/*-----------------------------------------------------------------------------
//
// Copyright (c) 2004, 2009 Xilinx, Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of Xilinx nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//---------------------------------------------------------------------------*/
.file "xil-crt0.S"
.section ".got2","aw"
.align 2
.LCTOC1 = . + 32768
.Lsbss_start = .-.LCTOC1
.long __sbss_start
.Lsbss_end = .-.LCTOC1
.long __sbss_end
.Lbss_start = .-.LCTOC1
.long __bss_start
.Lbss_end = .-.LCTOC1
.long __bss_end
.Lstack = .-.LCTOC1
.long __stack
.Lsda = .-.LCTOC1
.long _SDA_BASE_ /* address of the first small data area */
.Lsda2 = .-.LCTOC1
.long _SDA2_BASE_ /* address of the second small data area */
.text
.globl _start
_start:
bl __cpu_init /* Initialize the CPU first (BSP provides this) */
lis 5,.LCTOC1@h
ori 5,5,.LCTOC1@l
lwz 13,.Lsda(5) /* load r13 with _SDA_BASE_ address */
lwz 2,.Lsda2(5) /* load r2 with _SDA2_BASE_ address */
#ifndef SIMULATOR
/* clear sbss */
lwz 6,.Lsbss_start(5) /* calculate beginning of the SBSS */
lwz 7,.Lsbss_end(5) /* calculate end of the SBSS */
cmplw 1,6,7
bc 4,4,.Lenclsbss /* If no SBSS, no clearing required */
li 0,0 /* zero to clear memory */
subf 8,6,7 /* number of bytes to zero */
srwi. 9,8,2 /* number of words to zero */
beq .Lstbyteloopsbss /* Check if the number of bytes was less than 4 */
mtctr 9
addi 6,6,-4 /* adjust so we can use stwu */
.Lloopsbss:
stwu 0,4(6) /* zero sbss */
bdnz .Lloopsbss
.Lstbyteloopsbss:
andi. 9,8,3 /* Calculate how many trailing bytes we have */
beq 0,.Lenclsbss
mtctr 9
addi 6,6,-1 /* adjust, so we can use stbu */
.Lbyteloopsbss:
stbu 0,1(6)
bdnz .Lbyteloopsbss
.Lenclsbss:
.Lstclbss:
/* clear bss */
lwz 6,.Lbss_start(5) /* calculate beginning of the BSS */
lwz 7,.Lbss_end(5) /* calculate end of the BSS */
cmplw 1,6,7
bc 4,4,.Lenclbss /* If no BSS, no clearing required */
li 0,0 /* zero to clear memory */
subf 8,6,7 /* number of bytes to zero */
srwi. 9,8,2 /* number of words to zero */
beq .Lstbyteloopbss /* Check if the number of bytes was less than 4 */
mtctr 9
addi 6,6,-4 /* adjust so we can use stwu */
.Lloopbss:
stwu 0,4(6) /* zero bss */
bdnz .Lloopbss
.Lstbyteloopbss:
andi. 9,8,3 /* Calculate how many trailing bytes we have */
beq 0,.Lenclbss /* If zero, we are done */
mtctr 9
addi 6,6,-1 /* adjust, so we can use stbu */
.Lbyteloopbss:
stbu 0,1(6)
bdnz .Lbyteloopbss
.Lenclbss:
#endif /* SIMULATOR */
/* set stack pointer */
lwz 1,.Lstack(5) /* stack address */
/* set up initial stack frame */
addi 1,1,-8 /* location of back chain */
lis 0,0
stw 0,0(1) /* set end of back chain */
/* initialize base timer to zero */
mtspr 0x11c,0
mtspr 0x11d,0
#ifdef HAVE_XFPU
/* On the Xilinx PPC405 and PPC440, the MSR
must be explicitly set to mark the prescence
of an FPU */
mfpvr 0
rlwinm 0,0,0,12,15
cmpwi 7,0,8192
mfmsr 0
ori 0,0,8192
beq- 7,fpu_init_done
do_405:
oris 0,0,512
fpu_init_done:
mtmsr 0
#endif
#ifdef PROFILING
/* Setup profiling stuff */
bl _profile_init
#endif /* PROFILING */
/* Call __init */
bl __init
/* Let her rip */
bl main
/* Invoke the language cleanup functions */
bl __fini
#ifdef PROFILING
/* Cleanup profiling stuff */
bl _profile_clean
#endif /* PROFILING */
/* Call __init */
/* All done */
bl exit
/* Trap has been removed for both simulation and hardware */
.globl _exit
_exit:
b _exit
.Lstart:
.size _start,.Lstart-_start
|
32bitmicro/newlib-nano-1.0
| 3,331
|
libgloss/rs6000/crt0.S
|
/*
* crt0.S -- startup file for PowerPC systems.
*
* Copyright (c) 1995 Cygnus Support
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#include "ppc-asm.h"
.file "crt0.S"
.section ".got2","aw"
.align 2
.LCTOC1 = .+32768
.extern FUNC_NAME(atexit)
.globl FUNC_NAME(__atexit)
.section ".sdata","aw"
.align 2
FUNC_NAME(__atexit): /* tell C's eabi-ctor's we have an atexit function */
.long FUNC_NAME(atexit)@fixup /* and that it is to register __do_global_dtors */
.section ".fixup","aw"
.align 2
.long FUNC_NAME(__atexit)
.section ".got2","aw"
.Ltable = .-.LCTOC1
.long .LCTOC1 /* address we think .LCTOC1 is loaded at */
.Lsbss_start = .-.LCTOC1
.long __sbss_start
.Lsbss_end = .-.LCTOC1
.long __sbss_end
.Lbss_start = .-.LCTOC1
.long __bss_start
.Lend = .-.LCTOC1
.long _end
.Lstack = .-.LCTOC1 /* stack address if set by user */
.long __stack
.text
.Lptr:
.long .LCTOC1-.Laddr
.globl _start
.type _start,@function
_start:
bl .Laddr /* get current address */
.Laddr:
mflr r4 /* real address of .Laddr */
lwz r5,(.Lptr-.Laddr)(r4) /* linker generated address of .LCTOC1 */
add r5,r5,r4 /* correct to real pointer */
lwz r4,.Ltable(r5) /* get linker's idea of where .Laddr is */
subf r4,r4,r5 /* calculate difference between where linked and current */
/* clear bss and sbss */
lwz r6,.Lbss_start(r5) /* calculate beginning of the BSS */
lwz r7,.Lend(r5) /* calculate end of the BSS */
add r6,r6,r4 /* adjust pointers */
add r7,r7,r4
cmplw 1,r6,r7
bc 4,4,.Ldone1
subf r8,r6,r7 /* number of bytes to zero */
srwi r9,r8,2 /* number of words to zero */
mtctr r9
li r0,0 /* zero to clear memory */
addi r6,r6,-4 /* adjust so we can use stwu */
.Lloop:
stwu r0,4(r6) /* zero bss */
bdnz .Lloop
.Ldone1:
lwz r6,.Lsbss_start(r5) /* calculate beginning of the SBSS */
lwz r7,.Lsbss_end(r5) /* calculate end of the SBSS */
add r6,r6,r4 /* adjust pointers */
add r7,r7,r4
cmplw 1,r6,r7
bc 4,4,.Ldone
subf r8,r6,r7 /* number of bytes to zero */
srwi r9,r8,2 /* number of words to zero */
mtctr r9
li r0,0 /* zero to clear memory */
addi r6,r6,-4 /* adjust so we can use stwu */
.Lloop2:
stwu r0,4(r6) /* zero bss */
bdnz .Lloop2
.Ldone:
lwz r0,.Lstack(r5) /* stack address or 0 */
cmplwi 1,r0,0 /* equal to 0? */
bc 12,6,.Lnostack /* use default stack if == 0 */
mr sp,r0 /* use user defined stack */
.Lnostack:
/* set up initial stack frame */
addi sp,sp,-4 /* make sure we don't overwrite debug mem */
lis r0,0
stw r0,0(sp) /* clear back chain */
stwu sp,-64(sp) /* push another stack frame */
/* Let her rip */
bl FUNC_NAME(main)
/* return value from main is argument to exit */
bl FUNC_NAME(exit)
trap
.Lstart:
.size _start,.Lstart-_start
|
32bitmicro/newlib-nano-1.0
| 1,604
|
libgloss/bfin/crt0.S
|
/*
* crt0.S for the Blackfin processor
*
* Copyright (C) 2006 Analog Devices, Inc.
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
.text
.align 2
.global __start
__start:
/* Start by setting up a stack */
link 0xc;
/* Zero the memory in the .bss section. */
p0.l = __edata;
p0.h = __edata;
p1.l = __end;
p1.h = __end;
p1 -= p0;
r0 = 0;
lsetup (L$L$clear_bss, L$L$clear_bss) lc0 = p1;
L$L$clear_bss:
B [p0++] = r0;
#ifdef __BFIN_FDPIC__
/* Set up GOT pointer. */
P0.L = __ROFIXUP_END__;
P0.H = __ROFIXUP_END__;
P4 = [P0 - 4];
#endif
/* Need to set up standard file handles */
/* Parse string at r1 */
p0.l = __init;
p0.h = __init;
P3 = P4;
call (p0)
p0.l = _atexit;
p0.h = _atexit;
#ifdef __BFIN_FDPIC__
r0 = [P4 + __fini@FUNCDESC_GOT17M4] ;
P3 = P4;
#else
r0.l = __fini;
r0.h = __fini;
#endif
call (p0)
p0.l = ___setup_argv_and_call_main;
p0.h = ___setup_argv_and_call_main;
P3 = P4;
call (p0)
p0.l = _exit;
p0.h = _exit;
P3 = P4;
jump (p0) /* Should not return. */
nop;
|
32bitmicro/newlib-nano-1.0
| 14,305
|
libgloss/bfin/basiccrt.S
|
/*
* Basic startup code for Blackfin processor
*
* Copyright (C) 2008 Analog Devices, Inc.
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
// basic startup code which
// - turns the cycle counter on
// - loads up FP & SP (both supervisor and user)
// - initialises the device drivers (FIOCRT)
// - calls monstartup to set up the profiling routines (PROFCRT)
// - calls the C++ startup (CPLUSCRT)
// - initialises argc/argv (FIOCRT/normal)
// - calls _main
// - calls _exit (which calls monexit to dump accumulated prof data (PROFCRT))
// - defines dummy IO routines (!FIOCRT)
#include <sys/platform.h>
#include <cplb.h>
#include <sys/anomaly_macros_rtl.h>
#define IVBh (EVT0 >> 16)
#define IVBl (EVT0 & 0xFFFF)
#define UNASSIGNED_VAL 0
#define UNASSIGNED_FILL 0
// just IVG15
#define INTERRUPT_BITS 0x400
#if defined(_ADI_THREADS) || \
!defined(__ADSPLPBLACKFIN__) || defined(__ADSPBF561__) || defined(__ADSPBF566__)
#define SET_CLOCK_SPEED 0
#else
#define SET_CLOCK_SPEED 1
#endif
#if SET_CLOCK_SPEED == 1
#include <sys/pll.h>
#define SET_CLK_MSEL 0x16
#define SET_CLK_DF 0
#define SET_CLK_LOCK_COUNT 0x300
#define SET_CLK_CSEL 0
#define SET_CLK_SSEL 5
/*
** CLKIN == 27MHz on the EZ-Kits.
** D==0 means CLKIN is passed to PLL without dividing.
** MSEL==0x16 means VCO==27*0x16 == 594MHz
** CSEL==0 means CCLK==VCO == 594MHz
** SSEL==5 means SCLK==VCO/5 == 118MHz
*/
#endif
#ifdef __ADSPBF561_COREB__
.section .b.text,"ax",@progbits
.align 2;
.global __coreb_start;
.type __coreb_start, STT_FUNC;
__coreb_start:
#else
.text;
.align 2;
.global __start;
.type __start, STT_FUNC;
__start:
#endif
#if WA_05000109
// Avoid Anomaly ID 05000109.
# define SYSCFG_VALUE 0x30
R1 = SYSCFG_VALUE;
SYSCFG = R1;
#endif
#if WA_05000229
// Avoid Anomaly 05-00-0229: DMA5_CONFIG and SPI_CTL not cleared on reset.
R1 = 0x400;
#if defined(__ADSPBF538__) || defined(__ADSPBF539__)
P0.L = SPI0_CTL & 0xFFFF;
P0.H = SPI0_CTL >> 16;
W[P0] = R1.L;
#else
P0.L = SPI_CTL & 0xFFFF;
P0.H = SPI_CTL >> 16;
W[P0] = R1.L;
#endif
P0.L = DMA5_CONFIG & 0xFFFF;
P0.H = DMA5_CONFIG >> 16;
R1 = 0;
W[P0] = R1.L;
#endif
// Zap loop counters to zero, to make sure that
// hw loops are disabled - it could be really baffling
// if the counters and bottom regs are set, and we happen
// to run into them.
R7 = 0;
LC0 = R7;
LC1 = R7;
// Clear the DAG Length regs too, so that it's safe to
// use I-regs without them wrapping around.
L0 = R7;
L1 = R7;
L2 = R7;
L3 = R7;
// Zero ITEST_COMMAND and DTEST_COMMAND
// (in case they have crud in them and
// does a write somewhere when we enable cache)
I0.L = (ITEST_COMMAND & 0xFFFF);
I0.H = (ITEST_COMMAND >> 16);
I1.L = (DTEST_COMMAND & 0xFFFF);
I1.H = (DTEST_COMMAND >> 16);
R7 = 0;
[I0] = R7;
[I1] = R7;
// It seems writing ITEST_COMMAND from SDRAM with icache enabled
// needs SSYNC.
#ifdef __BFIN_SDRAM
SSYNC;
#else
CSYNC;
#endif
// Initialise the Event Vector table.
P0.H = IVBh;
P0.L = IVBl;
// Install __unknown_exception_occurred in EVT so that
// there is defined behaviour.
P0 += 2*4; // Skip Emulation and Reset
P1 = 13;
R1.L = __unknown_exception_occurred;
R1.H = __unknown_exception_occurred;
LSETUP (L$ivt,L$ivt) LC0 = P1;
L$ivt: [P0++] = R1;
// Set IVG15's handler to be the start of the mode-change
// code. Then, before we return from the Reset back to user
// mode, we'll raise IVG15. This will mean we stay in supervisor
// mode, and continue from the mode-change point., but at a
// much lower priority.
P1.H = L$supervisor_mode;
P1.L = L$supervisor_mode;
[P0] = P1;
// Initialise the stack.
// Note: this points just past the end of the section.
// First write should be with [--SP].
#ifdef __BFIN_SDRAM
SP.L = __end + 0x400000 - 12;
SP.H = __end + 0x400000 - 12;
#else
#ifdef __ADSPBF561_COREB__
SP.L=__coreb_stack_end - 12;
SP.H=__coreb_stack_end - 12;
#else
SP.L=__stack_end - 12;
SP.H=__stack_end - 12;
#endif
#endif
usp = sp;
// We're still in supervisor mode at the moment, so the FP
// needs to point to the supervisor stack.
FP = SP;
// And make space for incoming "parameters" for functions
// we call from here:
SP += -12;
// Zero out bss section
#ifdef __BFIN_SDRAM
R0.L = ___bss_start;
R0.H = ___bss_start;
R1.L = __end;
R1.H = __end;
#else
#ifdef __ADSPBF561_COREB__
R0.L = __coreb_bss_start;
R0.H = __coreb_bss_start;
R1.L = __coreb_bss_end;
R1.H = __coreb_bss_end;
#else
R0.L = __bss_start;
R0.H = __bss_start;
R1.L = __bss_end;
R1.H = __bss_end;
#endif
#endif
R2 = R1 - R0;
R1 = 0;
#ifdef __ADSPBF561_COREB__
CALL.X __coreb_memset;
#else
CALL.X _memset;
#endif
R0 = INTERRUPT_BITS;
R0 <<= 5; // Bits 0-4 not settable.
// CALL.X __install_default_handlers;
R4 = R0; // Save modified list
R0 = SYSCFG; // Enable the Cycle counter
BITSET(R0,1);
SYSCFG = R0;
#if WA_05000137
// Avoid anomaly #05000137
// Set the port preferences of DAG0 and DAG1 to be
// different; this gives better performance when
// performing dual-dag operations on SDRAM.
P0.L = DMEM_CONTROL & 0xFFFF;
P0.H = DMEM_CONTROL >> 16;
R0 = [P0];
BITSET(R0, 12);
BITCLR(R0, 13);
[P0] = R0;
CSYNC;
#endif
// Reinitialise data areas in RAM from ROM, if MemInit's
// been used.
// CALL.X _mi_initialize;
#if defined(__ADSPLPBLACKFIN__)
#if SET_CLOCK_SPEED == 1
#if 0
// Check if this feature is enabled, i.e. ___clk_ctrl is defined to non-zero
P0.L = ___clk_ctrl;
P0.H = ___clk_ctrl;
R0 = MAX_IN_STARTUP;
R1 = [P0];
R0 = R0 - R1;
CC = R0;
IF CC JUMP L$clock_is_set;
#endif
// Investigate whether we are a suitable revision
// for boosting the system clocks.
// speed.
P0.L = DSPID & 0xFFFF;
P0.H = DSPID >> 16;
R0 = [P0];
R0 = R0.L (Z);
CC = R0 < 2;
IF CC JUMP L$clock_is_set;
// Set the internal Voltage-Controlled Oscillator (VCO)
R0 = SET_CLK_MSEL (Z);
R1 = SET_CLK_DF (Z);
R2 = SET_CLK_LOCK_COUNT (Z);
CALL.X __pll_set_system_vco;
// Set the Core and System clocks
R0 = SET_CLK_CSEL (Z);
R1 = SET_CLK_SSEL (Z);
CALL.X __pll_set_system_clocks;
L$clock_is_set:
#endif
#endif /* ADSPLPBLACKFIN */
#if defined(__ADSPBF561__) || defined(__ADSPBF566__)
// Initialise the multi-core data tables.
// A dummy function will be called if we are not linking with
// -multicore
// CALL.X __mc_data_initialise;
#endif
#if 0
// Write the cplb exception handler to the EVT if approprate and
// initialise the CPLBs if they're needed. couldn't do
// this before we set up the stacks.
P2.H = ___cplb_ctrl;
P2.L = ___cplb_ctrl;
R0 = CPLB_ENABLE_ANY_CPLBS;
R6 = [P2];
R0 = R0 & R6;
CC = R0;
IF !CC JUMP L$no_cplbs;
#if !defined(_ADI_THREADS)
P1.H = __cplb_hdr;
P1.L = __cplb_hdr;
P0.H = IVBh;
P0.L = IVBl;
[P0+12] = P1; // write exception handler
#endif /* _ADI_THREADS */
R0 = R6;
CALL.X __cplb_init;
#endif
L$no_cplbs:
// Enable interrupts
STI R4; // Using the mask from default handlers
RAISE 15;
// Move the processor into user mode.
P0.L=L$still_interrupt_in_ipend;
P0.H=L$still_interrupt_in_ipend;
RETI=P0;
L$still_interrupt_in_ipend:
rti; // keep doing 'rti' until we've 'finished' servicing all
// interrupts of priority higher than IVG15. Normally one
// would expect to only have the reset interrupt in IPEND
// being serviced, but occasionally when debugging this may
// not be the case - if restart is hit when servicing an
// interrupt.
//
// When we clear all bits from IPEND, we'll enter user mode,
// then we'll automatically jump to supervisor_mode to start
// servicing IVG15 (which we will 'service' for the whole
// program, so that the program is in supervisor mode.
//
// Need to do this to 'finish' servicing the reset interupt.
L$supervisor_mode:
[--SP] = RETI; // re-enables the interrupt system
R0.L = UNASSIGNED_VAL;
R0.H = UNASSIGNED_VAL;
#if UNASSIGNED_FILL
R2=R0;
R3=R0;
R4=R0;
R5=R0;
R6=R0;
R7=R0;
P0=R0;
P1=R0;
P2=R0;
P3=R0;
P4=R0;
P5=R0;
#endif
// Push a RETS and Old FP onto the stack, for sanity.
[--SP]=R0;
[--SP]=R0;
// Make sure the FP is sensible.
FP = SP;
// And leave space for incoming "parameters"
SP += -12;
#ifdef PROFCRT
CALL.X monstartup; // initialise profiling routines
#endif /* PROFCRT */
#ifndef __ADSPBF561_COREB__
CALL.X __init;
R0.L = __fini;
R0.H = __fini;
CALL.X _atexit;
#endif
#if !defined(_ADI_THREADS)
#ifdef FIOCRT
// FILE IO provides access to real command-line arguments.
CALL.X __getargv;
r1.l=__Argv;
r1.h=__Argv;
#else
// Default to having no arguments and a null list.
R0=0;
#ifdef __ADSPBF561_COREB__
R1.L=L$argv_coreb;
R1.H=L$argv_coreb;
#else
R1.L=L$argv;
R1.H=L$argv;
#endif
#endif /* FIOCRT */
#endif /* _ADI_THREADS */
// At long last, call the application program.
#ifdef __ADSPBF561_COREB__
CALL.X _coreb_main;
#else
CALL.X _main;
#endif
#if !defined(_ADI_THREADS)
#ifndef __ADSPBF561_COREB__
CALL.X _exit; // passing in main's return value
#endif
#endif
#ifdef __ADSPBF561_COREB__
.size __coreb_start, .-__coreb_start
#else
.size __start, .-__start
#endif
.align 2
.type __unknown_exception_occurred, STT_FUNC;
__unknown_exception_occurred:
// This function is invoked by the default exception
// handler, if it does not recognise the kind of
// exception that has occurred. In other words, the
// default handler only handles some of the system's
// exception types, and it does not expect any others
// to occur. If your application is going to be using
// other kinds of exceptions, you must replace the
// default handler with your own, that handles all the
// exceptions you will use.
//
// Since there's nothing we can do, we just loop here
// at what we hope is a suitably informative label.
IDLE;
CSYNC;
JUMP __unknown_exception_occurred;
RTS;
.size __unknown_exception_occurred, .-__unknown_exception_occurred
#if defined(__ADSPLPBLACKFIN__)
#if SET_CLOCK_SPEED == 1
/*
** CLKIN == 27MHz on the EZ-Kits.
** D==0 means CLKIN is passed to PLL without dividing.
** MSEL==0x16 means VCO==27*0x16 == 594MHz
** CSEL==0 means CCLK==VCO == 594MHz
** SSEL==5 means SCLK==VCO/5 == 118MHz
*/
// int pll_set_system_clocks(int csel, int ssel)
// returns 0 for success, -1 for error.
.align 2
.type __pll_set_system_clocks, STT_FUNC;
__pll_set_system_clocks:
P0.H = PLL_DIV >> 16;
P0.L = PLL_DIV & 0xFFFF;
R2 = W[P0] (Z);
// Plant CSEL and SSEL
R0 <<= 16;
R0.L = (4 << 8) | 2; // 2 bits, at posn 4
R1 <<= 16;
R1.L = 4; // 4 bits, at posn 0
R2 = DEPOSIT(R2, R0);
#if defined(__WORKAROUND_DREG_COMP_LATENCY)
// Work around anomaly 05-00-0209 which affects the DEPOSIT
// instruction (and the EXTRACT, SIGNBITS, and EXPADJ instructions)
// if the previous instruction created any of its operands
NOP;
#endif
R2 = DEPOSIT(R2, R1);
W[P0] = R2;
SSYNC;
RTS;
.size __pll_set_system_clocks, .-__pll_set_system_clocks
// int pll_set_system_vco(int msel, int df, lockcnt)
.align 2
.type __pll_set_system_vco, STT_FUNC;
__pll_set_system_vco:
P0.H = PLL_CTL >> 16;
P0.L = PLL_CTL & 0xFFFF;
R3 = W[P0] (Z);
P2 = R3; // Save copy
R3 >>= 1; // Drop old DF
R1 = ROT R1 BY -1; // Move DF into CC
R3 = ROT R3 BY 1; // and into ctl space.
R0 <<= 16; // Set up pattern reg
R0.L = (9<<8) | 6; // (6 bits at posn 9)
R1 = P2; // Get the old version
R3 = DEPOSIT(R3, R0);
CC = R1 == R3; // and if we haven't changed
IF CC JUMP L$done; // Anything, return
CC = R2 == 0; // Use default lockcount if
IF CC JUMP L$wakeup; // user one is zero.
P2.H = PLL_LOCKCNT >> 16;
P2.L = PLL_LOCKCNT & 0xFFFF;
W[P2] = R2; // Set the lock counter
L$wakeup:
P2.H = SIC_IWR >> 16;
P2.L = SIC_IWR & 0xFFFF;
R2 = [P2];
BITSET(R2, 0); // enable PLL Wakeup
[P2] = R2;
W[P0] = R3; // Update PLL_CTL
SSYNC;
CLI R2; // Avoid unnecessary interrupts
IDLE; // Wait until PLL has locked
STI R2; // Restore interrupts.
L$done:
RTS;
.size __pll_set_system_vco, .-__pll_set_system_vco
#endif
#endif /* ADSPLPBLACKFIN */
#ifdef __ADSPBF561_COREB__
.section .b.text,"ax",@progbits
.type __coreb_memset, STT_FUNC
__coreb_memset:
P0 = R0 ; /* P0 = address */
P2 = R2 ; /* P2 = count */
R3 = R0 + R2; /* end */
CC = R2 <= 7(IU);
IF CC JUMP .Ltoo_small;
R1 = R1.B (Z); /* R1 = fill char */
R2 = 3;
R2 = R0 & R2; /* addr bottom two bits */
CC = R2 == 0; /* AZ set if zero. */
IF !CC JUMP .Lforce_align ; /* Jump if addr not aligned. */
.Laligned:
P1 = P2 >> 2; /* count = n/4 */
R2 = R1 << 8; /* create quad filler */
R2.L = R2.L + R1.L(NS);
R2.H = R2.L + R1.H(NS);
P2 = R3;
LSETUP (.Lquad_loop , .Lquad_loop) LC0=P1;
.Lquad_loop:
[P0++] = R2;
CC = P0 == P2;
IF !CC JUMP .Lbytes_left;
RTS;
.Lbytes_left:
R2 = R3; /* end point */
R3 = P0; /* current position */
R2 = R2 - R3; /* bytes left */
P2 = R2;
.Ltoo_small:
CC = P2 == 0; /* Check zero count */
IF CC JUMP .Lfinished; /* Unusual */
.Lbytes:
LSETUP (.Lbyte_loop , .Lbyte_loop) LC0=P2;
.Lbyte_loop:
B[P0++] = R1;
.Lfinished:
RTS;
.Lforce_align:
CC = BITTST (R0, 0); /* odd byte */
R0 = 4;
R0 = R0 - R2;
P1 = R0;
R0 = P0; /* Recover return address */
IF !CC JUMP .Lskip1;
B[P0++] = R1;
.Lskip1:
CC = R2 <= 2; /* 2 bytes */
P2 -= P1; /* reduce count */
IF !CC JUMP .Laligned;
B[P0++] = R1;
B[P0++] = R1;
JUMP .Laligned;
.size __coreb_memset,.-__coreb_memset
#endif
#ifdef __ADSPBF561_COREB__
.section .b.bss,"aw",@progbits
.align 4
.type L$argv_coreb, @object
.size L$argv_coreb, 4
L$argv_coreb:
.zero 4
#else
.local L$argv
.comm L$argv,4,4
#endif
|
32bitmicro/newlib-nano-1.0
| 2,084
|
libgloss/tic6x/crt0.S
|
/* crt0.S for the TI C6X series of processors
Copyright (c) 2010 CodeSourcery, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of CodeSourcery nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY CODESOURCERY, INC. ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CODESOURCERY BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE. */
.text
.align 2
.global _start
_start:
/* Start by setting up a stack */
mvkl .s2 _STACK_START - 4, B15
mvkh .s2 _STACK_START - 4, B15
and .s2 -8, B15, B15
mvkl .s2 __c6xabi_DSBT_BASE, B14
mvkh .s2 __c6xabi_DSBT_BASE, B14
#ifdef __DSBT__
stw .d2t2 B14, *B14
#endif
/* Zero the memory in the .bss section. */
/* Set up GOT pointer. */
mvkl .s2 1f, B3
mvkh .s2 1f, B3
call .s2 _init
nop 5
1:
mvkl .s2 1f, B3
mvkh .s2 1f, B3
call .s2 main
nop 5
1:
b .s2 exit
nop 5
|
32bitmicro/newlib-nano-1.0
| 4,426
|
libgloss/mips/crt0_cygmon.S
|
/*
* crt0_cygmon.S -- Minimal startup file for MIPS targets running Cygmon.
*
* Copyright (c) 1995, 1996, 1997, 2000 Red Hat, Inc.
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
/*
* This file contains the minimal startup code necessary.
* This will not do any hardware initialization. It is assumed that we are talking to Cygmon
* and therefore the hardware will be initialized properly.
*/
#ifdef __mips16
/* This file contains 32 bit assembly code. */
.set nomips16
#endif
#include "regs.S"
/*
* Set up some room for a stack. We just grab a chunk of memory.
*/
#define STACK_SIZE 0x4000
#define GLOBAL_SIZE 0x2000
#define STARTUP_STACK_SIZE 0x0100
.comm __memsize, 12
.comm __lstack, STARTUP_STACK_SIZE
.comm __stackbase,4
.text
.align 4
/*
* Without the following nop, GDB thinks _start is a data variable.
* This is probably a bug in GDB in handling a symbol that is at the
* start of the .text section.
*/
nop
.globl _start
.ent _start
_start:
.set noreorder
la gp, _gp # set the global data pointer, defined in the linker script
.end _start
/*
* zero out the bss section.
*/
.globl __memsize
.globl get_mem_info .text
.globl zerobss
.ent zerobss
zerobss:
la v0, _fbss # These variables are defined in the linker script
la v1, _end
3:
sw zero, 0(v0)
bltu v0, v1, 3b
addiu v0, v0, 4 # executed in delay slot
/*
* Setup a small stack so we can run some C code,
* and get the usable memory size.
*/
la t0, __lstack
addiu sp, t0, STARTUP_STACK_SIZE
la a0, __memsize
jal get_mem_info
nop
/*
* Setup the stack pointer --
* get_mem_info returns the top of memory, so just use that In
* addition, we must subtract 24 bytes for the 3 8 byte
* arguments to main, in case main wants to write them back to
* the stack. The caller is supposed to allocate stack space
* for parameters in registers in the old MIPS ABIs. We must
* do this even though we aren't passing arguments, because
* main might be declared to have them.
* Some ports need a larger alignment for the stack, so we
* subtract 32, which satisifes the stack for the arguments and
* keeps the stack pointer better aligned.
*/
subu v0, v0, 32
move sp, v0
sw sp, __stackbase # keep this for future ref
.end zerobss
/*
* initialize target specific stuff. Only execute these
* functions it they exist.
*/
.globl hardware_init_hook .text
.globl software_init_hook .text
.globl __do_global_dtors .text
.globl atexit .text
.globl exit .text
.globl init
.ent init
init:
la t9, hardware_init_hook # init the hardware if needed
beq t9, zero, 6f
nop
jal t9
nop
6:
la t9, software_init_hook # init the software if needed
beq t9, zero, 7f
nop
jal t9
nop
7:
la a0, __do_global_dtors
jal atexit
nop
#ifdef GCRT0
.globl _ftext
.globl _extext
la a0, _ftext
la a1, _etext
jal monstartup
nop
#endif
move a0,zero # set argc to 0
jal main # call the program start function
nop
# fall through to the "exit" routine
jal exit # call libc exit to run the G++
# destructors
move a0, v0 # pass through the exit code
.end init
/*
* _exit -- Exit from the application. Normally we cause a user trap
* to return to the ROM monitor for another run. NOTE: This is
* the only other routine we provide in the crt0.o object, since
* it may be tied to the "_start" routine. It also allows
* executables that contain a complete world to be linked with
* just the crt0.o object.
*/
.globl _exit
.ent _exit
_exit:
7:
#ifdef GCRT0
jal _mcleanup
nop
#endif
# Cygmon expects a break 5
break 5
nop
b 7b # loop back just in-case
nop
.end _exit
/* EOF crt0.S */
|
32bitmicro/newlib-nano-1.0
| 1,357
|
libgloss/mips/idtmon.S
|
/*
* idtmon.S -- lo-level entry points into IDT monitor.
*
* Copyright (c) 1996 Cygnus Support
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#ifdef __mips16
/* This file contains 32 bit assembly code. */
.set nomips16
#endif
#include "regs.S"
.text
.align 2
/* Provide named functions for entry into the IDT monitor: */
#define INDIRECT(name,index) \
.globl name; \
.ent name; \
name: la $2,+(0xbfc00000+((index)*8)); \
j $2; \
.end name
/* The following magic numbers are for the slots into the IDT monitor: */
INDIRECT(open,6)
INDIRECT(read,7)
INDIRECT(write,8)
INDIRECT(close,10)
INDIRECT(inbyte,11)
INDIRECT(outbyte,12)
INDIRECT(mon_printf,16)
INDIRECT(_flush_cache,28)
INDIRECT(get_mem_info,55) /* expects pointer to three word vector */
/* EOF idtmon.S */
|
32bitmicro/newlib-nano-1.0
| 12,695
|
libgloss/mips/vr5xxx.S
|
/*
* vr5xxx.S -- CPU specific support routines
*
* Copyright (c) 1999 Cygnus Solutions
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
/* This file cloned from vr4300.S by dlindsay@cygnus.com
* and recoded to suit Vr5432 and Vr5000.
* Should be no worse for Vr43{00,05,10}.
* Specifically, __cpu_flush() has been changed (a) to allow for the hardware
* difference (in set associativity) between the Vr5432 and Vr5000,
* and (b) to flush the optional secondary cache of the Vr5000.
*/
/* Processor Revision Identifier (PRID) Register: Implementation Numbers */
#define IMPL_VR5432 0x54
/* Cache Constants not determinable dynamically */
#define VR5000_2NDLINE 32 /* secondary cache line size */
#define VR5432_LINE 32 /* I,Dcache line sizes */
#define VR5432_SIZE (16*1024) /* I,Dcache half-size */
#ifndef __mips64
.set mips3
#endif
#ifdef __mips16
/* This file contains 32 bit assembly code. */
.set nomips16
#endif
#include "regs.S"
.text
.align 2
# Taken from "R4300 Preliminary RISC Processor Specification
# Revision 2.0 January 1995" page 39: "The Count
# register... increments at a constant rate... at one-half the
# PClock speed."
# We can use this fact to provide small polled delays.
.globl __cpu_timer_poll
.ent __cpu_timer_poll
__cpu_timer_poll:
.set noreorder
# in: a0 = (unsigned int) number of PClock ticks to wait for
# out: void
# The Vr4300 counter updates at half PClock, so divide by 2 to
# get counter delta:
bnezl a0, 1f # continue if delta non-zero
srl a0, a0, 1 # divide ticks by 2 {DELAY SLOT}
# perform a quick return to the caller:
j ra
nop # {DELAY SLOT}
1:
mfc0 v0, C0_COUNT # get current counter value
nop
nop
# We cannot just do the simple test, of adding our delta onto
# the current value (ignoring overflow) and then checking for
# equality. The counter is incrementing every two PClocks,
# which means the counter value can change between
# instructions, making it hard to sample at the exact value
# desired.
# However, we do know that our entry delta value is less than
# half the number space (since we divide by 2 on entry). This
# means we can use a difference in signs to indicate timer
# overflow.
addu a0, v0, a0 # unsigned add (ignore overflow)
# We know have our end value (which will have been
# sign-extended to fill the 64bit register value).
2:
# get current counter value:
mfc0 v0, C0_COUNT
nop
nop
# This is an unsigned 32bit subtraction:
subu v0, a0, v0 # delta = (end - now) {DELAY SLOT}
bgtzl v0, 2b # looping back is most likely
nop
# We have now been delayed (in the foreground) for AT LEAST
# the required number of counter ticks.
j ra # return to caller
nop # {DELAY SLOT}
.set reorder
.end __cpu_timer_poll
# Flush the processor caches to memory:
.globl __cpu_flush
.ent __cpu_flush
__cpu_flush:
.set noreorder
# NOTE: The Vr4300 and Vr5432 *CANNOT* have any secondary cache.
# On those, SC (bit 17 of CONFIG register) is hard-wired to 1,
# except that email from Dennis_Han@el.nec.com says that old
# versions of the Vr5432 incorrectly hard-wired this bit to 0.
# The Vr5000 has an optional direct-mapped secondary cache,
# and the SC bit correctly indicates this.
# So, for the 4300 and 5432 we want to just
# flush the primary Data and Instruction caches.
# For the 5000 it is desired to flush the secondary cache too.
# There is an operation difference worth noting.
# The 4300 and 5000 primary caches use VA bit 14 to choose cache set,
# whereas 5432 primary caches use VA bit 0.
# This code interprets the relevant Config register bits as
# much as possible, except for the 5432.
# The code therefore has some portability.
# However, the associativity issues mean you should not just assume
# that this code works anywhere. Also, the secondary cache set
# size is hardwired, since the 5000 series does not define codes
# for variant sizes.
# Note: this version of the code flushes D$ before I$.
# It is difficult to construct a case where that matters,
# but it cant hurt.
mfc0 a0, C0_PRID # a0 = Processor Revision register
nop # dlindsay: unclear why the nops, but
nop # vr4300.S had such so I do too.
srl a2, a0, PR_IMP # want bits 8..15
andi a2, a2, 0x255 # mask: now a2 = Implementation # field
li a1, IMPL_VR5432
beq a1, a2, 8f # use Vr5432-specific flush algorithm
nop
# Non-Vr5432 version of the code.
# (The distinctions being: CONFIG is truthful about secondary cache,
# and we act as if the primary Icache and Dcache are direct mapped.)
mfc0 t0, C0_CONFIG # t0 = CONFIG register
nop
nop
li a1, 1 # a1=1, a useful constant
srl a2, t0, CR_IC # want IC field of CONFIG
andi a2, a2, 0x7 # mask: now a2= code for Icache size
add a2, a2, 12 # +12
sllv a2, a1, a2 # a2=primary instruction cache size in bytes
srl a3, t0, CR_DC # DC field of CONFIG
andi a3, a3, 0x7 # mask: now a3= code for Dcache size
add a3, a3, 12 # +12
sllv a3, a1, a3 # a3=primary data cache size in bytes
li t2, (1 << CR_IB) # t2=mask over IB boolean
and t2, t2, t0 # test IB field of CONFIG register value
beqz t2, 1f #
li a1, 16 # 16 bytes (branch shadow: always loaded.)
li a1, 32 # non-zero, then 32bytes
1:
li t2, (1 << CR_DB) # t2=mask over DB boolean
and t2, t2, t0 # test BD field of CONFIG register value
beqz t2, 2f #
li a0, 16 # 16bytes (branch shadow: always loaded.)
li a0, 32 # non-zero, then 32bytes
2:
lui t1, ((K0BASE >> 16) & 0xFFFF)
ori t1, t1, (K0BASE & 0xFFFF)
# At this point,
# a0 = primary Dcache line size in bytes
# a1 = primary Icache line size in bytes
# a2 = primary Icache size in bytes
# a3 = primary Dcache size in bytes
# t0 = CONFIG value
# t1 = a round unmapped cached base address (we are in kernel mode)
# t2,t3 scratch
addi t3, t1, 0 # t3=t1=start address for any cache
add t2, t3, a3 # t2=end adress+1 of Dcache
sub t2, t2, a0 # t2=address of last line in Dcache
3:
cache INDEX_WRITEBACK_INVALIDATE_D,0(t3)
bne t3, t2, 3b #
addu t3, a0 # (delay slot) increment by Dcache line size
# Now check CONFIG to see if there is a secondary cache
lui t2, (1 << (CR_SC-16)) # t2=mask over SC boolean
and t2, t2, t0 # test SC in CONFIG
bnez t2, 6f
# There is a secondary cache. Find out its sizes.
srl t3, t0, CR_SS # want SS field of CONFIG
andi t3, t3, 0x3 # mask: now t3= code for cache size.
beqz t3, 4f
lui a3, ((512*1024)>>16) # a3= 512K, code was 0
addu t3, -1 # decrement code
beqz t3, 4f
lui a3, ((1024*1024)>>16) # a3= 1 M, code 1
addu t3, -1 # decrement code
beqz t3, 4f
lui a3, ((2*1024*1024)>>16) # a3= 2 M, code 2
j 6f # no secondary cache, code 3
4: # a3 = secondary cache size in bytes
li a0, VR5000_2NDLINE # no codes assigned for other than 32
# At this point,
# a0 = secondary cache line size in bytes
# a1 = primary Icache line size in bytes
# a2 = primary Icache size in bytes
# a3 = secondary cache size in bytes
# t1 = a round unmapped cached base address (we are in kernel mode)
# t2,t3 scratch
addi t3, t1, 0 # t3=t1=start address for any cache
add t2, t3, a3 # t2=end address+1 of secondary cache
sub t2, t2, a0 # t2=address of last line in secondary cache
5:
cache INDEX_WRITEBACK_INVALIDATE_SD,0(t3)
bne t3, t2, 5b
addu t3, a0 # (delay slot) increment by line size
6: # Any optional secondary cache done. Now do I-cache and return.
# At this point,
# a1 = primary Icache line size in bytes
# a2 = primary Icache size in bytes
# t1 = a round unmapped cached base address (we are in kernel mode)
# t2,t3 scratch
add t2, t1, a2 # t2=end adress+1 of Icache
sub t2, t2, a1 # t2=address of last line in Icache
7:
cache INDEX_INVALIDATE_I,0(t1)
bne t1, t2, 7b
addu t1, a1 # (delay slot) increment by Icache line size
j ra # return to the caller
nop
8:
# Vr5432 version of the cpu_flush code.
# (The distinctions being: CONFIG can not be trusted about secondary
# cache (which does not exist). The primary caches use Virtual Address Bit 0
# to control set selection.
# Code does not consult CONFIG about cache sizes: knows the hardwired sizes.
# Since both I and D have the same size and line size, uses a merged loop.
li a0, VR5432_LINE
li a1, VR5432_SIZE
lui t1, ((K0BASE >> 16) & 0xFFFF)
ori t1, t1, (K0BASE & 0xFFFF)
# a0 = cache line size in bytes
# a1 = 1/2 cache size in bytes
# t1 = a round unmapped cached base address (we are in kernel mode)
add t2, t1, a1 # t2=end address+1
sub t2, t2, a0 # t2=address of last line in Icache
9:
cache INDEX_WRITEBACK_INVALIDATE_D,0(t1) # set 0
cache INDEX_WRITEBACK_INVALIDATE_D,1(t1) # set 1
cache INDEX_INVALIDATE_I,0(t1) # set 0
cache INDEX_INVALIDATE_I,1(t1) # set 1
bne t1, t2, 9b
addu t1, a0
j ra # return to the caller
nop
.set reorder
.end __cpu_flush
# NOTE: This variable should *NOT* be addressed relative to
# the $gp register since this code is executed before $gp is
# initialised... hence we leave it in the text area. This will
# cause problems if this routine is ever ROMmed:
.globl __buserr_cnt
__buserr_cnt:
.word 0
.align 3
__k1_save:
.word 0
.word 0
.align 2
.ent __buserr
.globl __buserr
__buserr:
.set noat
.set noreorder
# k0 and k1 available for use:
mfc0 k0,C0_CAUSE
nop
nop
andi k0,k0,0x7c
sub k0,k0,7 << 2
beq k0,$0,__buserr_do
nop
# call the previous handler
la k0,__previous
jr k0
nop
#
__buserr_do:
# TODO: check that the cause is indeed a bus error
# - if not then just jump to the previous handler
la k0,__k1_save
sd k1,0(k0)
#
la k1,__buserr_cnt
lw k0,0(k1) # increment counter
addu k0,1
sw k0,0(k1)
#
la k0,__k1_save
ld k1,0(k0)
#
mfc0 k0,C0_EPC
nop
nop
addu k0,k0,4 # skip offending instruction
mtc0 k0,C0_EPC # update EPC
nop
nop
eret
# j k0
# rfe
.set reorder
.set at
.end __buserr
__exception_code:
.set noreorder
lui k0,%hi(__buserr)
daddiu k0,k0,%lo(__buserr)
jr k0
nop
.set reorder
__exception_code_end:
.data
__previous:
.space (__exception_code_end - __exception_code)
# This subtracting two addresses is working
# but is not garenteed to continue working.
# The assemble reserves the right to put these
# two labels into different frags, and then
# cant take their difference.
.text
.ent __default_buserr_handler
.globl __default_buserr_handler
__default_buserr_handler:
.set noreorder
# attach our simple bus error handler:
# in: void
# out: void
mfc0 a0,C0_SR
nop
li a1,SR_BEV
and a1,a1,a0
beq a1,$0,baseaddr
lui a0,0x8000 # delay slot
lui a0,0xbfc0
daddiu a0,a0,0x0200
baseaddr:
daddiu a0,a0,0x0180
# a0 = base vector table address
la a1,__exception_code_end
la a2,__exception_code
subu a1,a1,a2
la a3,__previous
# there must be a better way of doing this????
copyloop:
lw v0,0(a0)
sw v0,0(a3)
lw v0,0(a2)
sw v0,0(a0)
daddiu a0,a0,4
daddiu a2,a2,4
daddiu a3,a3,4
subu a1,a1,4
bne a1,$0,copyloop
nop
la a0,__buserr_cnt
sw $0,0(a0)
j ra
nop
.set reorder
.end __default_buserr_handler
.ent __restore_buserr_handler
.globl __restore_buserr_handler
__restore_buserr_handler:
.set noreorder
# restore original (monitor) bus error handler
# in: void
# out: void
mfc0 a0,C0_SR
nop
li a1,SR_BEV
and a1,a1,a0
beq a1,$0,res_baseaddr
lui a0,0x8000 # delay slot
lui a0,0xbfc0
daddiu a0,a0,0x0200
res_baseaddr:
daddiu a0,a0,0x0180
# a0 = base vector table address
la a1,__exception_code_end
la a3,__exception_code
subu a1,a1,a3
la a3,__previous
# there must be a better way of doing this????
res_copyloop:
lw v0,0(a3)
sw v0,0(a0)
daddiu a0,a0,4
daddiu a3,a3,4
subu a1,a1,4
bne a1,$0,res_copyloop
nop
j ra
nop
.set reorder
.end __restore_buserr_handler
.ent __buserr_count
.globl __buserr_count
__buserr_count:
.set noreorder
# restore original (monitor) bus error handler
# in: void
# out: unsigned int __buserr_cnt
la v0,__buserr_cnt
lw v0,0(v0)
j ra
nop
.set reorder
.end __buserr_count
/* EOF vr5xxx.S */
|
32bitmicro/newlib-nano-1.0
| 7,118
|
libgloss/mips/entry.S
|
/* entry.S - exception handler for emulating MIPS16 'entry' and 'exit'
pseudo-instructions. These instructions are generated by the compiler
when the -mentry switch is used. The instructions are not implemented
in the MIPS16 CPU; hence the exception handler that emulates them.
This module contains the following public functions:
* void __install_entry_handler(void);
This function installs the entry/exit exception handler. It should
be called before executing any MIPS16 functions that were compiled with
-mentry, typically before main() is called.
* void __remove_entry_handler(void);
This function removes the entry/exit exception handler. It should
be called when the program is exiting, or when it is known that no
more MIPS16 functions compiled with -mentry will be called.
*/
#ifdef __mips16
/* This file contains 32 bit assembly code. */
.set nomips16
#endif
#include "regs.S"
#define CAUSE_EXCMASK 0x3c /* mask for ExcCode in Cause Register */
#define EXC_RI 0x28 /* 101000 == 10 << 2 */
/* Set DEBUG to 1 to enable recording of the last 16 interrupt causes. */
#define DEBUG 0
#if DEBUG
.sdata
int_count:
.space 4 /* interrupt count modulo 16 */
int_cause:
.space 4*16 /* last 16 interrupt causes */
#endif
.text
.set noreorder /* Do NOT reorder instructions */
/* __entry_exit_handler - the reserved instruction exception handler
that emulates the entry and exit instruction. */
__entry_exit_handler:
.set noat /* Do NOT use at register */
#if DEBUG
/* Must avoid using 'la' pseudo-op because it uses gp register, which
may not have a good value in an exception handler. */
# la k0, int_count /* intcount = (intcount + 1) & 0xf */
lui k0 ,%hi(int_count)
addiu k0, k0 ,%lo(int_count)
lw k1, (k0)
addiu k1, k1, 1
andi k1, k1, 0x0f
sw k1, (k0)
# la k0, int_cause /* k1 = &int_cause[intcount] */
lui k0, %hi(int_cause)
addiu k0, k0, %lo(int_cause)
sll k1, k1, 2
add k1, k1, k0
#endif
mfc0 k0, C0_CAUSE /* Fetch cause */
#if DEBUG
sw k0, -4(k1) /* Save exception cause in buffer */
#endif
mfc0 k1, C0_EPC /* Check for Reserved Inst. without */
and k0, CAUSE_EXCMASK /* destroying any register */
subu k0, EXC_RI
bne k0, zero, check_others /* Sorry, go do something else */
and k0, k1, 1 /* Check for TR mode (pc.0 = 1) */
beq k0, zero, ri_in_32 /* Sorry, RI in 32-bit mode */
xor k1, 1
/* Since we now are going to emulate or die, we can use all the T-registers */
/* that MIPS16 does not use (at, t0-t8), and we don't have to save them. */
.set at /* Now it's ok to use at again */
#if 0
j leave
rfe
#endif
lhu t0, 0(k1) /* Fetch the offending instruction */
xor t8, k1, 1 /* Prepare t8 for exit */
and t1, t0, 0xf81f /* Check for entry/exit opcode */
bne t1, 0xe809, other_ri
deareg: and t1, t0, 0x0700 /* Isolate the three a-bits */
srl t1, 6 /* Adjust them so x4 is applied */
slt t2, t1, 17 /* See if this is the exit instruction */
beqz t2, doexit
la t2, savea
subu t2, t1
jr t2 /* Jump into the instruction table */
rfe /* We run the rest in user-mode */
/* This is the entry instruction! */
sw a3, 12(sp) /* 4: a0-a3 saved */
sw a2, 8(sp) /* 3: a0-a2 saved */
sw a1, 4(sp) /* 2: a0-a1 saved */
sw a0, 0(sp) /* 1: a0 saved */
savea: /* 0: No arg regs saved */
dera: and t1, t0, 0x0020 /* Isolate the save-ra bit */
move t7, sp /* Temporary SP */
beq t1, zero, desreg
subu sp, 32 /* Default SP adjustment */
sw ra, -4(t7)
subu t7, 4
desreg: and t1, t0, 0x00c0 /* Isolate the two s-bits */
beq t1, zero, leave
subu t1, 0x0040
beq t1, zero, leave /* Only one to save... */
sw s0, -4(t7) /* Do the first one */
sw s1, -8(t7) /* Do the last one */
leave: jr t8 /* Exit to unmodified EPC */
nop /* Urgh - the only nop!! */
doexf0: mtc1 v0,$f0 /* Copy float value */
b doex2
doexf1: mtc1 v1,$f0 /* Copy double value */
mtc1 v0,$f1
b doex2
doexit: slt t2, t1, 21
beq t2, zero, doexf0
slt t2, t1, 25
beq t2, zero, doexf1
doex2: and t1, t0, 0x0020 /* Isolate ra bit */
beq t1, zero, dxsreg /* t1 holds ra-bit */
addu t7, sp, 32 /* Temporary SP */
lw ra, -4(t7)
subu t7, 4
dxsreg: and t1, t0, 0x00c0 /* Isolate the two s-bits */
beq t1, zero, leavex
subu t1, 0x0040
beq t1, zero, leavex /* Only one to save... */
lw s0, -4(t7) /* Do the first one */
lw s1, -8(t7) /* Do the last one */
leavex: jr ra /* Exit to ra */
addu sp, 32 /* Clean up stack pointer */
/* Come here for exceptions we can't handle. */
ri_in_32:
other_ri:
check_others: /* call the previous handler */
la k0,__previous
jr k0
nop
__exception_code:
.set noreorder
la k0, __entry_exit_handler
# lui k0, %hi(exception)
# addiu k0, k0, %lo(exception)
jr k0
nop
.set reorder
__exception_code_end:
.data
__previous:
.space (__exception_code_end - __exception_code)
.text
/* void __install_entry_handler(void)
Install our entry/exit reserved instruction exception handler.
*/
.ent __install_entry_handler
.globl __install_entry_handler
__install_entry_handler:
.set noreorder
mfc0 a0,C0_SR
nop
li a1,SR_BEV
and a1,a1,a0
beq a1,$0,baseaddr
lui a0,0x8000 /* delay slot */
lui a0,0xbfc0
addiu a0,a0,0x0100
baseaddr:
addiu a0,a0,0x080 /* a0 = base vector table address */
li a1,(__exception_code_end - __exception_code)
la a2,__exception_code
la a3,__previous
/* there must be a better way of doing this???? */
copyloop:
lw v0,0(a0)
sw v0,0(a3)
lw v0,0(a2)
sw v0,0(a0)
addiu a0,a0,4
addiu a2,a2,4
addiu a3,a3,4
subu a1,a1,4
bne a1,$0,copyloop
nop
j ra
nop
.set reorder
.end __install_entry_handler
/* void __remove_entry_handler(void);
Remove our entry/exit reserved instruction exception handler.
*/
.ent __remove_entry_handler
.globl __remove_entry_handler
__remove_entry_handler:
.set noreorder
mfc0 a0,C0_SR
nop
li a1,SR_BEV
and a1,a1,a0
beq a1,$0,res_baseaddr
lui a0,0x8000 /* delay slot */
lui a0,0xbfc0
addiu a0,a0,0x0200
res_baseaddr:
addiu a0,a0,0x0180 /* a0 = base vector table address */
li a1,(__exception_code_end - __exception_code)
la a3,__previous
/* there must be a better way of doing this???? */
res_copyloop:
lw v0,0(a3)
sw v0,0(a0)
addiu a0,a0,4
addiu a3,a3,4
subu a1,a1,4
bne a1,$0,res_copyloop
nop
j ra
nop
.set reorder
.end __remove_entry_handler
/* software_init_hook - install entry/exit handler and arrange to have it
removed at exit. This function is called by crt0.S. */
.text
.globl software_init_hook
.ent software_init_hook
software_init_hook:
.set noreorder
subu sp, sp, 8 /* allocate stack space */
sw ra, 4(sp) /* save return address */
jal __install_entry_handler /* install entry/exit handler */
nop
lui a0, %hi(__remove_entry_handler) /* arrange for exit to */
jal atexit /* de-install handler */
addiu a0, a0, %lo(__remove_entry_handler) /* delay slot */
lw ra, 4(sp) /* get return address */
j ra /* return */
addu sp, sp, 8 /* deallocate stack */
.set reorder
.end software_init_hook
|
32bitmicro/newlib-nano-1.0
| 7,161
|
libgloss/mips/crt0_cfe.S
|
/*
* crt0_cfe.S -- Runtime startup for MIPS targets running CFE.
*
* Copyright 2003
* Broadcom Corporation. All rights reserved.
*
* This software is furnished under license and may be used and copied only
* in accordance with the following terms and conditions. Subject to these
* conditions, you may download, copy, install, use, modify and distribute
* modified or unmodified copies of this software in source and/or binary
* form. No title or ownership is transferred hereby.
*
* 1) Any source code used, modified or distributed must reproduce and
* retain this copyright notice and list of conditions as they appear in
* the source file.
*
* 2) No right is granted to use any trade name, trademark, or logo of
* Broadcom Corporation. The "Broadcom Corporation" name may not be
* used to endorse or promote products derived from this software
* without the prior written permission of Broadcom Corporation.
*
* 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
* NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
* FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
* LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Derived from crt0_cygmon.S:
*
* Copyright (c) 1995, 1996, 1997, 2000 Red Hat, Inc.
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
/*
* This file does minimal runtime startup for code running under
* CFE firmware.
*
* It does minimal hardware initialization. In particular
* it sets Status:FR to match the requested floating point
* mode.
*
* It is meant to be linked with the other files provided by libcfe.a,
* and calls routines in those files.
*/
#ifdef __mips16
/* This file contains 32 bit assembly code. */
.set nomips16
#endif
#ifdef __mips_embedded_pic
# error -membedded-pic is not supported.
#endif
#include "regs.S"
/*
* Set up some room for a stack. We just grab a chunk of memory.
*/
#define STARTUP_STACK_SIZE (1 * 1024)
.comm _lstack, STARTUP_STACK_SIZE
.text
.align 4
/*
* Without the following nop, GDB thinks _start is a data variable.
* This is probably a bug in GDB in handling a symbol that is at the
* start of the .text section.
*/
nop
/*
* On entry, the following values have been passed in registers
* by the firmware:
*
* a0: firmware handle
* a1: zero (unused)
* a2: firmware callback entrypoint
* a3: CFE entrypoint seal (unused)
*
* They must be preserved until the CFE entrypoint and handle
* are passed to __libcfe_init().
*/
.globl _start
.ent _start
_start:
.set noreorder
/* Set the global data pointer, defined in the linker script. */
la gp, _gp
#ifndef __mips_soft_float
/* If compiled for hard float, set the FPU mode based on the
compilation flags. Note that this assumes that enough code
will run after the mtc0 to clear any hazards. */
mfc0 t0, C0_SR
or t0, t0, (SR_CU1 | SR_FR)
#if (__mips_fpr == 32)
xor t0, t0, SR_FR /* If 32-bit FP mode, clear FR. */
#endif
mtc0 t0, C0_SR
#endif
.end _start
/*
* zero out the bss section.
*/
.globl _zerobss
.ent _zerobss
_zerobss:
/* These variables are defined in the linker script. */
la v0, _fbss
la v1, _end
3:
sw zero, 0(v0)
bltu v0, v1, 3b
addiu v0, v0, 4 /* Delay slot. */
.end _zerobss
/*
* Setup a small stack so we can run some C code, and do
* the library initialization. (32 bytes are saved for
* the argument registers' stack slots.)
*/
.globl _stackinit
.ent _stackinit
_stackinit:
la t0, _lstack
addiu sp, t0, (STARTUP_STACK_SIZE - 32)
jal __libcfe_init
nop
/*
* Setup the stack pointer --
* __libcfe_init() returns the value to be used as the top of
* the program's stack.
*
* We subtract 32 bytes for the 4 argument registers, in case
* main() wants to write them back to the stack. The caller
* allocates stack space for parameters in the old MIPS ABIs.
* We must do this even though we aren't passing arguments,
* because main might be declared to have them.)
*
* We subtract 32 more bytes for the argv/envp setup for the
* call to main().
*/
subu v0, v0, 64
move sp, v0
.end _stackinit
/*
* initialize target specific stuff. Only execute these
* functions it they exist.
*/
.globl hardware_init_hook .text
.globl software_init_hook .text
.type _fini,@function
.type _init,@function
.globl atexit .text
.globl exit .text
.globl _crt0init
.ent _crt0init
_crt0init:
la t9, hardware_init_hook # init the hardware if needed
beq t9, zero, 6f
nop
jal t9
nop
6:
la t9, software_init_hook # init the software if needed
beq t9, zero, 7f
nop
jal t9
nop
7:
la a0, _fini
jal atexit
nop
#ifdef GCRT0
.globl _ftext
.globl _extext
la a0, _ftext
la a1, _etext
jal monstartup
nop
#endif
jal _init # run global constructors
nop
addiu a1,sp,32 # argv = sp + 32
addiu a2,sp,40 # envp = sp + 40
#if __mips64
sd zero,(a1) # argv[argc] = 0
sd zero,(a2) # envp[0] = 0
#else
sw zero,(a1)
sw zero,(a2)
#endif
jal main # call the program start function
move a0,zero # set argc to 0; delay slot.
# fall through to the "exit" routine
jal exit # call libc exit to run the G++
# destructors
move a0, v0 # pass through the exit code
.end _crt0init
/*
* _exit -- Exit from the application. This is provided in this file because
* program exit should shut down profiling (if GCRT0 is defined),
* and only this file is compiled with GCRT0 defined.
*/
.globl _exit
.ent _exit
_exit:
7:
move s0, a0 /* Save in case we loop. */
#ifdef GCRT0
jal _mcleanup
nop
#endif
la t0, hardware_exit_hook
beq t0,zero,1f
nop
jal t0
nop
1:
/* Call into the library to do the heavy lifting. */
jal __libcfe_exit
move a0, s0 /* Delay slot. */
b 7b /* Loop back just in case. */
nop
.end _exit
/* EOF crt0_cfe.S */
|
32bitmicro/newlib-nano-1.0
| 4,707
|
libgloss/mips/pmon.S
|
/*
* pmon.S -- low-level entry points into PMON monitor.
*
* Copyright (c) 1996, 1997, 2002 Cygnus Support
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#ifdef __mips16
/* This file contains 32 bit assembly code. */
.set nomips16
#endif
#if !defined(__mips64)
/* This machine does not support 64-bit operations. */
#define ADDU addu
#define SUBU subu
#else
/* This machine supports 64-bit operations. */
#define ADDU daddu
#define SUBU dsubu
#endif
#include "regs.S"
.text
.align 2
#ifdef LSI
#define PMON_VECTOR 0xffffffffbfc00200
#else
#define PMON_VECTOR 0xffffffffbfc00500
#endif
#ifndef __mips_eabi
/* Provide named functions for entry into the monitor: */
#define INDIRECT(name,index) \
.globl name; \
.ent name; \
.set noreorder; \
name: la $2,+(PMON_VECTOR+((index)*4)); \
lw $2,0($2); \
j $2; \
nop; \
.set reorder; \
.end name
#else
#define INDIRECT(name,index) \
.globl name; \
.ent name; \
.set noreorder; \
name: la $2,+(PMON_VECTOR+((index)*4)); \
lw $2,0($2); \
SUBU sp,sp,0x40; \
sd ra,0x38(sp); \
sd fp,0x30(sp); \
jal $2; \
move fp,sp; \
ld ra,0x38(sp); \
ld fp,0x30(sp); \
j ra; \
ADDU sp,sp,0x40; \
.set reorder; \
.end name
#endif
/* The following magic numbers are for the slots into the PMON monitor */
/* The first are used as the lo-level library run-time: */
INDIRECT(read,0)
INDIRECT(write,1)
INDIRECT(open,2)
INDIRECT(close,3)
/* The following are useful monitor routines: */
INDIRECT(mon_ioctl,4)
INDIRECT(mon_printf,5)
INDIRECT(mon_vsprintf,6)
INDIRECT(mon_ttctl,7)
INDIRECT(mon_cliexit,8)
INDIRECT(mon_getenv,9)
INDIRECT(mon_onintr,10)
INDIRECT(mon_flush_cache,11)
INDIRECT(_flush_cache,11)
INDIRECT(mon_exception,12)
/* The following routine is required by the "print()" function: */
.globl outbyte
.ent outbyte
.set noreorder
outbyte:
subu sp,sp,0x20 /* allocate stack space for string */
sd ra,0x18(sp) /* stack return address */
sd fp,0x10(sp) /* stack frame-pointer */
move fp,sp /* take a copy of the stack pointer */
/* We leave so much space on the stack for the string (16
characters), since the call to mon_printf seems to corrupt
the 8bytes at offset 8 into the string/stack. */
sb a0,0x00(sp) /* character to print */
sb z0,0x01(sp) /* NUL terminator */
jal mon_printf /* and output the string */
move a0,sp /* take a copy of the string pointer {DELAY SLOT} */
move sp,fp /* recover stack pointer */
ld ra,0x18(sp) /* recover return address */
ld fp,0x10(sp) /* recover frame-pointer */
j ra /* return to the caller */
addu sp,sp,0x20 /* dump the stack space {DELAY SLOT} */
.set reorder
.end outbyte
/* The following routine is required by the "sbrk()" function: */
.globl get_mem_info
.ent get_mem_info
.set noreorder
get_mem_info:
# in: a0 = pointer to 3 word structure
# out: void
subu sp,sp,0x18 /* create some stack space */
sd ra,0x00(sp) /* stack return address */
sd fp,0x08(sp) /* stack frame-pointer */
sd a0,0x10(sp) /* stack structure pointer */
move fp,sp /* take a copy of the stack pointer */
# The monitor has already sized memory, but unfortunately we
# do not have access to the data location containing the
# memory size.
jal __sizemem
nop
ld a0,0x10(sp) # recover structure pointer
sw v0,0(a0) # amount of memory available
# Deal with getting the cache size information:
mfc0 a1, C0_CONFIG
nop
nop
andi a2,a1,0x7 << 9 # bits 11..9 for instruction cache size
sll a2,a2,12 - 8
sw a2,4(a0)
andi a2,a1,0x7 << 6 # bits 8..6 for data cache size
sll a2,a2,12 - 5
sw a2,8(a0) # data cache size
#
move sp,fp /* recover stack pointer */
ld ra,0x00(sp) /* recover return address */
ld fp,0x08(sp) /* recover frame-pointer */
j ra /* return to the caller */
addu sp,sp,0x18 /* restore stack pointer {DELAY SLOT} */
.set reorder
.end get_mem_info
#ifdef LSI
# For the LSI MiniRISC board, we can safely assume that we have
# at least one megabyte of RAM.
.globl __sizemem
.ent __sizemem
__sizemem:
li v0,0x100000
j ra
.end __sizemem
#else
#endif
/* EOF pmon.S */
|
32bitmicro/newlib-nano-1.0
| 6,537
|
libgloss/mips/crt0.S
|
/*
* crt0.S -- startup file for MIPS.
*
* Copyright (c) 1995, 1996, 1997, 2001 Cygnus Support
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#ifdef __mips16
/* This file contains 32 bit assembly code. */
.set nomips16
#endif
#include "regs.S"
/*
* Set up some room for a stack. We just grab a chunk of memory.
*/
#define STACK_SIZE 0x4000
#define GLOBAL_SIZE 0x2000
#define STARTUP_STACK_SIZE 0x0100
/* This is for referencing addresses that are not in the .sdata or
.sbss section under embedded-pic, or before we've set up gp. */
#ifdef __mips_embedded_pic
# ifdef __mips64
# define LA(t,x) la t,x-PICBASE ; daddu t,s0,t
# else
# define LA(t,x) la t,x-PICBASE ; addu t,s0,t
# endif
#else /* __mips_embedded_pic */
# define LA(t,x) la t,x
#endif /* __mips_embedded_pic */
.comm __memsize, 12
.comm __lstack, STARTUP_STACK_SIZE
.text
.align 2
/* Without the following nop, GDB thinks _start is a data variable.
* This is probably a bug in GDB in handling a symbol that is at the
* start of the .text section.
*/
nop
.globl hardware_hazard_hook .text
.globl _start
.ent _start
_start:
.set noreorder
#ifdef __mips_embedded_pic
#define PICBASE start_PICBASE
PICBASE = .+8
bal PICBASE
nop
move s0,$31
#endif
#if __mips<3
# define STATUS_MASK (SR_CU1|SR_PE)
#else
/* Post-mips2 has no SR_PE bit. */
# ifdef __mips64
/* Turn on 64-bit addressing and additional float regs. */
# define STATUS_MASK (SR_CU1|SR_FR|SR_KX|SR_SX|SR_UX)
# else
# if __mips_fpr==32
# define STATUS_MASK (SR_CU1)
# else
/* Turn on additional float regs. */
# define STATUS_MASK (SR_CU1|SR_FR)
# endif
# endif
#endif
li v0, STATUS_MASK
mtc0 v0, C0_SR
mtc0 zero, C0_CAUSE
nop
/* Avoid hazard from FPU enable and other SR changes. */
LA (t0, hardware_hazard_hook)
beq t0,zero,1f
nop
jal t0
nop
1:
/* Check for FPU presence. Don't check if we know that soft_float is
being used. (This also avoids illegal instruction exceptions.) */
#ifndef __mips_soft_float
li t2,0xAAAA5555
mtc1 t2,fp0 /* write to FPR 0 */
mtc1 zero,fp1 /* write to FPR 1 */
mfc1 t0,fp0
mfc1 t1,fp1
nop
bne t0,t2,1f /* check for match */
nop
bne t1,zero,1f /* double check */
nop
j 2f /* FPU is present. */
nop
#endif
1:
/* FPU is not present. Set status register to say that. */
li v0, (STATUS_MASK-(STATUS_MASK & SR_CU1))
mtc0 v0, C0_SR
nop
/* Avoid hazard from FPU disable. */
LA (t0, hardware_hazard_hook)
beq t0,zero,2f
nop
jal t0
nop
2:
/* Fix high bits, if any, of the PC so that exception handling
doesn't get confused. */
LA (v0, 3f)
jr v0
nop
3:
LA (gp, _gp) # set the global data pointer
.end _start
/*
* zero out the bss section.
*/
.globl __memsize
.globl get_mem_info .text
.globl __stack
.globl __global
.ent zerobss
zerobss:
LA (v0, _fbss)
LA (v1, _end)
3:
sw zero,0(v0)
bltu v0,v1,3b
addiu v0,v0,4 # executed in delay slot
la t0, __lstack # make a small stack so we
addiu sp, t0, STARTUP_STACK_SIZE # can run some C code
la a0, __memsize # get the usable memory size
jal get_mem_info
nop
/* setup the stack pointer */
LA (t0, __stack) # is __stack set ?
bne t0,zero,4f
nop
/* NOTE: a0[0] contains the amount of memory available, and
not the last memory address. */
la a0, __memsize
lw t0,0(a0) # last address of memory available
la t1,K0BASE # cached kernel memory
addu t0,t0,t1 # get the end of memory address
/* Allocate 32 bytes for the register parameters. Allocate 16
bytes for a null argv and envp. Round the result up to 64
bytes to preserve alignment. */
subu t0,t0,64
4:
move sp,t0 # set stack pointer
.end zerobss
/*
* initialize target specific stuff. Only execute these
* functions it they exist.
*/
.globl hardware_init_hook .text
.globl software_init_hook .text
.type _fini,@function
.type _init,@function
.globl atexit .text
.globl exit .text
.ent init
init:
LA (t9, hardware_init_hook) # init the hardware if needed
beq t9,zero,6f
nop
jal t9
nop
6:
LA (t9, software_init_hook) # init the hardware if needed
beq t9,zero,7f
nop
jal t9
nop
7:
LA (a0, _fini)
jal atexit
nop
#ifdef GCRT0
.globl _ftext
.globl _extext
LA (a0, _ftext)
LA (a1, _etext)
jal monstartup
nop
#endif
jal _init # run global constructors
nop
addiu a1,sp,32 # argv = sp + 32
addiu a2,sp,40 # envp = sp + 40
#if __mips64
sd zero,(a1) # argv[argc] = 0
sd zero,(a2) # envp[0] = 0
#else
sw zero,(a1)
sw zero,(a2)
#endif
jal main # call the program start function
move a0,zero # set argc to 0
# fall through to the "exit" routine
jal exit # call libc exit to run the G++
# destructors
move a0,v0 # pass through the exit code
.end init
/* Assume the PICBASE set up above is no longer valid below here. */
#ifdef __mips_embedded_pic
#undef PICBASE
#endif
/*
* _exit -- Exit from the application. Normally we cause a user trap
* to return to the ROM monitor for another run. NOTE: This is
* the only other routine we provide in the crt0.o object, since
* it may be tied to the "_start" routine. It also allows
* executables that contain a complete world to be linked with
* just the crt0.o object.
*/
.globl hardware_exit_hook .text
.globl _exit
.ent _exit
_exit:
7:
#ifdef __mips_embedded_pic
/* Need to reinit PICBASE, since we might be called via exit()
rather than via a return path which would restore old s0. */
#define PICBASE exit_PICBASE
PICBASE = .+8
bal PICBASE
nop
move s0,$31
#endif
#ifdef GCRT0
LA (t0, _mcleanup)
jal t0
nop
#endif
LA (t0, hardware_exit_hook)
beq t0,zero,1f
nop
jal t0
nop
1:
# break instruction can cope with 0xfffff, but GAS limits the range:
break 1023
b 7b # but loop back just in-case
nop
.end _exit
/* Assume the PICBASE set up above is no longer valid below here. */
#ifdef __mips_embedded_pic
#undef PICBASE
#endif
/* EOF crt0.S */
|
32bitmicro/newlib-nano-1.0
| 8,225
|
libgloss/mips/vr4300.S
|
/*
* vr4300.S -- CPU specific support routines
*
* Copyright (c) 1995,1996 Cygnus Support
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#ifndef __mips64
.set mips3
#endif
#ifdef __mips16
/* This file contains 32 bit assembly code. */
.set nomips16
#endif
#include "regs.S"
.text
.align 2
# Taken from "R4300 Preliminary RISC Processor Specification
# Revision 2.0 January 1995" page 39: "The Count
# register... increments at a constant rate... at one-half the
# PClock speed."
# We can use this fact to provide small polled delays.
.globl __cpu_timer_poll
.ent __cpu_timer_poll
__cpu_timer_poll:
.set noreorder
# in: a0 = (unsigned int) number of PClock ticks to wait for
# out: void
# The Vr4300 counter updates at half PClock, so divide by 2 to
# get counter delta:
bnezl a0, 1f # continue if delta non-zero
srl a0, a0, 1 # divide ticks by 2 {DELAY SLOT}
# perform a quick return to the caller:
j ra
nop # {DELAY SLOT}
1:
mfc0 v0, C0_COUNT # get current counter value
nop
nop
# We cannot just do the simple test, of adding our delta onto
# the current value (ignoring overflow) and then checking for
# equality. The counter is incrementing every two PClocks,
# which means the counter value can change between
# instructions, making it hard to sample at the exact value
# desired.
# However, we do know that our entry delta value is less than
# half the number space (since we divide by 2 on entry). This
# means we can use a difference in signs to indicate timer
# overflow.
addu a0, v0, a0 # unsigned add (ignore overflow)
# We know have our end value (which will have been
# sign-extended to fill the 64bit register value).
2:
# get current counter value:
mfc0 v0, C0_COUNT
nop
nop
# This is an unsigned 32bit subtraction:
subu v0, a0, v0 # delta = (end - now) {DELAY SLOT}
bgtzl v0, 2b # looping back is most likely
nop
# We have now been delayed (in the foreground) for AT LEAST
# the required number of counter ticks.
j ra # return to caller
nop # {DELAY SLOT}
.set reorder
.end __cpu_timer_poll
# Flush the processor caches to memory:
.globl __cpu_flush
.ent __cpu_flush
__cpu_flush:
.set noreorder
# NOTE: The Vr4300 *CANNOT* have any secondary cache (bit 17
# of the CONFIG registered is hard-wired to 1). We just
# provide code to flush the Data and Instruction caches.
# Even though the Vr4300 has hard-wired cache and cache line
# sizes, we still interpret the relevant Config register
# bits. This allows this code to be used for other conforming
# MIPS architectures if desired.
# Get the config register
mfc0 a0, C0_CONFIG
nop
nop
li a1, 1 # a useful constant
#
srl a2, a0, 9 # bits 11..9 for instruction cache size
andi a2, a2, 0x7 # 3bits of information
add a2, a2, 12 # get full power-of-2 value
sllv a2, a1, a2 # instruction cache size
#
srl a3, a0, 6 # bits 8..6 for data cache size
andi a3, a3, 0x7 # 3bits of information
add a3, a3, 12 # get full power-of-2 value
sllv a3, a1, a3 # data cache size
#
li a1, (1 << 5) # check IB (instruction cache line size)
and a1, a0, a1 # mask against the CONFIG register value
beqz a1, 1f # branch on result of delay slot operation
nop
li a1, 32 # non-zero, then 32bytes
j 2f # continue
nop
1:
li a1, 16 # 16bytes
2:
#
li t0, (1 << 4) # check DB (data cache line size)
and a0, a0, t0 # mask against the CONFIG register value
beqz a0, 3f # branch on result of delay slot operation
nop
li a0, 32 # non-zero, then 32bytes
j 4f # continue
nop
3:
li a0, 16 # 16bytes
4:
#
# a0 = data cache line size
# a1 = instruction cache line size
# a2 = instruction cache size
# a3 = data cache size
#
lui t0, ((K0BASE >> 16) & 0xFFFF)
ori t0, t0, (K0BASE & 0xFFFF)
addu t1, t0, a2 # end cache address
subu t2, a1, 1 # line size mask
not t2 # invert the mask
and t3, t0, t2 # get start address
addu t1, -1
and t1, t2 # get end address
5:
cache INDEX_INVALIDATE_I,0(t3)
bne t3, t1, 5b
addu t3, a1
#
addu t1, t0, a3 # end cache address
subu t2, a0, 1 # line size mask
not t2 # invert the mask
and t3, t0, t2 # get start address
addu t1, -1
and t1, t2 # get end address
6:
cache INDEX_WRITEBACK_INVALIDATE_D,0(t3)
bne t3, t1, 6b
addu t3, a0
#
j ra # return to the caller
nop
.set reorder
.end __cpu_flush
# NOTE: This variable should *NOT* be addressed relative to
# the $gp register since this code is executed before $gp is
# initialised... hence we leave it in the text area. This will
# cause problems if this routine is ever ROMmed:
.globl __buserr_cnt
__buserr_cnt:
.word 0
.align 3
__k1_save:
.word 0
.word 0
.align 2
.ent __buserr
.globl __buserr
__buserr:
.set noat
.set noreorder
# k0 and k1 available for use:
mfc0 k0,C0_CAUSE
nop
nop
andi k0,k0,0x7c
sub k0,k0,7 << 2
beq k0,$0,__buserr_do
nop
# call the previous handler
la k0,__previous
jr k0
nop
#
__buserr_do:
# TODO: check that the cause is indeed a bus error
# - if not then just jump to the previous handler
la k0,__k1_save
sd k1,0(k0)
#
la k1,__buserr_cnt
lw k0,0(k1) # increment counter
addu k0,1
sw k0,0(k1)
#
la k0,__k1_save
ld k1,0(k0)
#
mfc0 k0,C0_EPC
nop
nop
addu k0,k0,4 # skip offending instruction
mtc0 k0,C0_EPC # update EPC
nop
nop
eret
# j k0
# rfe
.set reorder
.set at
.end __buserr
__exception_code:
.set noreorder
lui k0,%hi(__buserr)
daddiu k0,k0,%lo(__buserr)
jr k0
nop
.set reorder
__exception_code_end:
.data
__previous:
.space (__exception_code_end - __exception_code)
# This subtracting two addresses is working
# but is not garenteed to continue working.
# The assemble reserves the right to put these
# two labels into different frags, and then
# cant take their difference.
.text
.ent __default_buserr_handler
.globl __default_buserr_handler
__default_buserr_handler:
.set noreorder
# attach our simple bus error handler:
# in: void
# out: void
mfc0 a0,C0_SR
nop
li a1,SR_BEV
and a1,a1,a0
beq a1,$0,baseaddr
lui a0,0x8000 # delay slot
lui a0,0xbfc0
daddiu a0,a0,0x0200
baseaddr:
daddiu a0,a0,0x0180
# a0 = base vector table address
la a1,__exception_code_end
la a2,__exception_code
subu a1,a1,a2
la a3,__previous
# there must be a better way of doing this????
copyloop:
lw v0,0(a0)
sw v0,0(a3)
lw v0,0(a2)
sw v0,0(a0)
daddiu a0,a0,4
daddiu a2,a2,4
daddiu a3,a3,4
subu a1,a1,4
bne a1,$0,copyloop
nop
la a0,__buserr_cnt
sw $0,0(a0)
j ra
nop
.set reorder
.end __default_buserr_handler
.ent __restore_buserr_handler
.globl __restore_buserr_handler
__restore_buserr_handler:
.set noreorder
# restore original (monitor) bus error handler
# in: void
# out: void
mfc0 a0,C0_SR
nop
li a1,SR_BEV
and a1,a1,a0
beq a1,$0,res_baseaddr
lui a0,0x8000 # delay slot
lui a0,0xbfc0
daddiu a0,a0,0x0200
res_baseaddr:
daddiu a0,a0,0x0180
# a0 = base vector table address
la a1,__exception_code_end
la a3,__exception_code
subu a1,a1,a3
la a3,__previous
# there must be a better way of doing this????
res_copyloop:
lw v0,0(a3)
sw v0,0(a0)
daddiu a0,a0,4
daddiu a3,a3,4
subu a1,a1,4
bne a1,$0,res_copyloop
nop
j ra
nop
.set reorder
.end __restore_buserr_handler
.ent __buserr_count
.globl __buserr_count
__buserr_count:
.set noreorder
# restore original (monitor) bus error handler
# in: void
# out: unsigned int __buserr_cnt
la v0,__buserr_cnt
lw v0,0(v0)
j ra
nop
.set reorder
.end __buserr_count
/* EOF vr4300.S */
|
32bitmicro/newlib-nano-1.0
| 6,009
|
libgloss/mips/regs.S
|
/*
* regs.S -- standard MIPS register names.
*
* Copyright (c) 1995 Cygnus Support
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
/* Standard MIPS register names: */
#define zero $0
#define z0 $0
#define v0 $2
#define v1 $3
#define a0 $4
#define a1 $5
#define a2 $6
#define a3 $7
#define t0 $8
#define t1 $9
#define t2 $10
#define t3 $11
#define t4 $12
#define t5 $13
#define t6 $14
#define t7 $15
#define s0 $16
#define s1 $17
#define s2 $18
#define s3 $19
#define s4 $20
#define s5 $21
#define s6 $22
#define s7 $23
#define t8 $24
#define t9 $25
#define k0 $26 /* kernel private register 0 */
#define k1 $27 /* kernel private register 1 */
#define gp $28 /* global data pointer */
#define sp $29 /* stack-pointer */
#define fp $30 /* frame-pointer */
#define ra $31 /* return address */
#define pc $pc /* pc, used on mips16 */
#define fp0 $f0
#define fp1 $f1
/* Useful memory constants: */
#ifndef __mips64
#define K0BASE 0x80000000
#define K1BASE 0xA0000000
#define K0BASE_ADDR ((char *)K0BASE)
#define K1BASE_ADDR ((char *)K1BASE)
#else
#define K0BASE 0xFFFFFFFF80000000
#define K1BASE 0xFFFFFFFFA0000000
#define K0BASE_ADDR ((char *)0xFFFFFFFF80000000LL)
#define K1BASE_ADDR ((char *)0xFFFFFFFFA0000000LL)
#endif
#define PHYS_TO_K1(a) ((unsigned)(a) | K1BASE)
/* Standard Co-Processor 0 registers */
#define C0_COUNT $9 /* Count Register */
#define C0_SR $12 /* Status Register */
#define C0_CAUSE $13 /* last exception description */
#define C0_EPC $14 /* Exception error address */
#define C0_PRID $15 /* Processor Revision ID */
#define C0_CONFIG $16 /* CPU configuration */
/* Standard Processor Revision ID Register field offsets */
#define PR_IMP 8
/* Standard Config Register field offsets */
#define CR_DB 4
#define CR_IB 5
#define CR_DC 6 /* NOTE v4121 semantics != 43,5xxx semantics */
#define CR_IC 9 /* NOTE v4121 semantics != 43,5xxx semantics */
#define CR_SC 17
#define CR_SS 20
#define CR_SB 22
/* Standard Status Register bitmasks: */
#define SR_CU1 0x20000000 /* Mark CP1 as usable */
#define SR_FR 0x04000000 /* Enable MIPS III FP registers */
#define SR_BEV 0x00400000 /* Controls location of exception vectors */
#define SR_PE 0x00100000 /* Mark soft reset (clear parity error) */
#define SR_KX 0x00000080 /* Kernel extended addressing enabled */
#define SR_SX 0x00000040 /* Supervisor extended addressing enabled */
#define SR_UX 0x00000020 /* User extended addressing enabled */
/* Standard (R4000) cache operations. Taken from "MIPS R4000
Microprocessor User's Manual" 2nd edition: */
#define CACHE_I (0) /* primary instruction */
#define CACHE_D (1) /* primary data */
#define CACHE_SI (2) /* secondary instruction */
#define CACHE_SD (3) /* secondary data (or combined instruction/data) */
#define INDEX_INVALIDATE (0) /* also encodes WRITEBACK if CACHE_D or CACHE_SD */
#define INDEX_LOAD_TAG (1)
#define INDEX_STORE_TAG (2)
#define CREATE_DIRTY_EXCLUSIVE (3) /* CACHE_D and CACHE_SD only */
#define HIT_INVALIDATE (4)
#define CACHE_FILL (5) /* CACHE_I only */
#define HIT_WRITEBACK_INVALIDATE (5) /* CACHE_D and CACHE_SD only */
#define HIT_WRITEBACK (6) /* CACHE_I, CACHE_D and CACHE_SD only */
#define HIT_SET_VIRTUAL (7) /* CACHE_SI and CACHE_SD only */
#define BUILD_CACHE_OP(o,c) (((o) << 2) | (c))
/* Individual cache operations: */
#define INDEX_INVALIDATE_I BUILD_CACHE_OP(INDEX_INVALIDATE,CACHE_I)
#define INDEX_WRITEBACK_INVALIDATE_D BUILD_CACHE_OP(INDEX_INVALIDATE,CACHE_D)
#define INDEX_INVALIDATE_SI BUILD_CACHE_OP(INDEX_INVALIDATE,CACHE_SI)
#define INDEX_WRITEBACK_INVALIDATE_SD BUILD_CACHE_OP(INDEX_INVALIDATE,CACHE_SD)
#define INDEX_LOAD_TAG_I BUILD_CACHE_OP(INDEX_LOAD_TAG,CACHE_I)
#define INDEX_LOAD_TAG_D BUILD_CACHE_OP(INDEX_LOAD_TAG,CACHE_D)
#define INDEX_LOAD_TAG_SI BUILD_CACHE_OP(INDEX_LOAD_TAG,CACHE_SI)
#define INDEX_LOAD_TAG_SD BUILD_CACHE_OP(INDEX_LOAD_TAG,CACHE_SD)
#define INDEX_STORE_TAG_I BUILD_CACHE_OP(INDEX_STORE_TAG,CACHE_I)
#define INDEX_STORE_TAG_D BUILD_CACHE_OP(INDEX_STORE_TAG,CACHE_D)
#define INDEX_STORE_TAG_SI BUILD_CACHE_OP(INDEX_STORE_TAG,CACHE_SI)
#define INDEX_STORE_TAG_SD BUILD_CACHE_OP(INDEX_STORE_TAG,CACHE_SD)
#define CREATE_DIRTY_EXCLUSIVE_D BUILD_CACHE_OP(CREATE_DIRTY_EXCLUSIVE,CACHE_D)
#define CREATE_DIRTY_EXCLUSIVE_SD BUILD_CACHE_OP(CREATE_DIRTY_EXCLUSIVE,CACHE_SD)
#define HIT_INVALIDATE_I BUILD_CACHE_OP(HIT_INVALIDATE,CACHE_I)
#define HIT_INVALIDATE_D BUILD_CACHE_OP(HIT_INVALIDATE,CACHE_D)
#define HIT_INVALIDATE_SI BUILD_CACHE_OP(HIT_INVALIDATE,CACHE_SI)
#define HIT_INVALIDATE_SD BUILD_CACHE_OP(HIT_INVALIDATE,CACHE_SD)
#define CACHE_FILL_I BUILD_CACHE_OP(CACHE_FILL,CACHE_I)
#define HIT_WRITEBACK_INVALIDATE_D BUILD_CACHE_OP(HIT_WRITEBACK_INVALIDATE,CACHE_D)
#define HIT_WRITEBACK_INVALIDATE_SD BUILD_CACHE_OP(HIT_WRITEBACK_INVALIDATE,CACHE_SD)
#define HIT_WRITEBACK_I BUILD_CACHE_OP(HIT_WRITEBACK,CACHE_I)
#define HIT_WRITEBACK_D BUILD_CACHE_OP(HIT_WRITEBACK,CACHE_D)
#define HIT_WRITEBACK_SD BUILD_CACHE_OP(HIT_WRITEBACK,CACHE_SD)
#define HIT_SET_VIRTUAL_SI BUILD_CACHE_OP(HIT_SET_VIRTUAL,CACHE_SI)
#define HIT_SET_VIRTUAL_SD BUILD_CACHE_OP(HIT_SET_VIRTUAL,CACHE_SD)
/*> EOF regs.S <*/
|
32bitmicro/newlib-nano-1.0
| 3,131
|
libgloss/sparc/libsys/libsys-crt0.S
|
! C run time start off
! This file supports:
!
! - both 32bit pointer and 64bit pointer environments (at compile time)
! - an imposed stack bias (of 2047) (at run time)
! - medium/low and medium/anywhere code models (at run time)
! Initial stack setup:
!
! bottom of stack (higher memory address)
! ...
! text of environment strings
! text of argument strings
! envp[envc] = 0 (4/8 bytes)
! ...
! env[0] (4/8 bytes)
! argv[argc] = 0 (4/8 bytes)
! ...
! argv[0] (4/8 bytes)
! argc (4/8 bytes)
! register save area (64 bits by 16 registers = 128 bytes)
! top of stack (%sp)
! Stack Bias:
!
! It is the responsibility of the o/s to set this up.
! We handle both a 0 and 2047 value for the stack bias.
! Medium/Anywhere code model support:
!
! In this model %g4 points to the start of the data segment.
! The text segment can go anywhere, but %g4 points to the *data* segment.
! It is up to the compiler/linker to get this right.
!
! Since this model is statically linked the start of the data segment
! is known at link time. Eg:
!
! sethi %hh(data_start), %g1
! sethi %lm(data_start), %g4
! or %g1, %hm(data_start), %g1
! or %g4, %lo(data_start), %g4
! sllx %g1, 32, %g1
! or %g4, %g1, %g4
!
! FIXME: For now we just assume 0.
! FIXME: if %g1 contains a non-zero value, atexit() should be invoked
! with this value.
#include "syscallasm.h"
#ifndef TARGET_PTR_SIZE
#define TARGET_PTR_SIZE 32
#endif
TEXT_SECTION
ALIGN (4)
GLOBAL (ASM_PRIVATE_SYMBOL (start))
ASM_PRIVATE_SYMBOL (start):
clr %fp
! We use %g4 even if the code model is Medium/Low (simplifies the code).
clr %g4 ! Medium/Anywhere base reg
! If there is a stack bias in effect, account for it in %g5. Then always
! add %g5 to stack references below. This way the code can be used with
! or without an imposed bias.
andcc %sp, 1, %g5
bnz,a .LHaveBias
mov 2047, %g5
.LHaveBias:
add %sp, %g5, %sp
#if TARGET_PTR_SIZE == 32
! FIXME: We apparently assume here that there is no reserved word.
! This is probably correct, but try to verify it.
ld [%sp + 0x80], %o0 ! argc
add %sp, 0x84, %o1 ! argv
add %o0, 1, %o2
sll %o2, 2, %o2
#else /* TARGET_PTR_SIZE == 64 */
ld [%sp + 0x8c], %o0 ! argc.lo
add %sp, 0x90, %o1 ! argv
add %o0, 1, %o2
sll %o2, 3, %o2
#endif
add %o1, %o2, %o2 ! envp
sethi %hi (ASM_SYMBOL (environ)), %o3
or %o3, %lo (ASM_SYMBOL (environ)), %o3
#if TARGET_PTR_SIZE == 32
st %o2, [%o3 + %g4]
#else /* TARGET_PTR_SIZE == 64 */
stx %o2, [%o3 + %g4]
#endif
! Restore any stack bias before we call main() ...
sub %sp, %g5, %sp
GLOBAL (ASM_SYMBOL (main))
call ASM_SYMBOL (main)
! FIXME: Not sure if this is needed anymore.
#if TARGET_PTR_SIZE == 32
sub %sp, 0x20, %sp ! room to push args
#else /* TARGET_PTR_SIZE == 64 */
sub %sp, 0x30, %sp ! room to push args
#endif
GLOBAL (ASM_SYMBOL (exit))
call ASM_SYMBOL (exit)
nop
GLOBAL (ASM_SYMBOL (_exit))
call ASM_SYMBOL (_exit)
nop
set SYS_exit, %g1
ta SYSCALL_TRAP ! in case user redefines __exit
! If all the above methods fail to terminate the program, try an illegal insn.
! If that does not work, the o/s is hosed more than we are.
WORD (0)
|
32bitmicro/newlib-nano-1.0
| 1,033
|
libgloss/sparc/libsys/cerror.S
|
! Set errno.
! This function is called by all the syscall stubs.
!
! FIXME: We assume errno is the first member of struct _reent.
! Not sure what to do about this.
#include "syscallasm.h"
DATA_SECTION
ALIGN (4)
GLOBAL (ASM_SYMBOL (errno)) ! FIXME: ASM_PRIVATE_SYMBOL ?
ASM_SYMBOL (errno):
WORD (0)
TEXT_SECTION
ALIGN (4)
GLOBAL (ASM_PRIVATE_SYMBOL (cerror))
ASM_PRIVATE_SYMBOL (cerror):
sethi %hi (ASM_SYMBOL (errno)),%g1
st %o0,[%g1+%lo (ASM_SYMBOL (errno))]
jmpl %o7+8,%g0
mov -1,%o0
GLOBAL (ASM_PRIVATE_SYMBOL (cerror_r))
ASM_PRIVATE_SYMBOL (cerror_r):
st %o0,[%o1]
jmpl %o7+8,%g0
mov -1,%o0
! Since all system calls need this file, we put various state globals
! here as well.
DATA_SECTION
! CURBRK contains the current top of allocated space.
! END is a private symbol in svr4, but a public one in sunos4.
! FIXME: CURBRK is 4 bytes for now.
ALIGN (4)
GLOBAL (ASM_PRIVATE_SYMBOL (curbrk))
ASM_PRIVATE_SYMBOL (curbrk):
#ifdef SVR4
WORD (ASM_PRIVATE_SYMBOL (end))
#else
WORD (ASM_SYMBOL (end))
#endif
|
32bitmicro/newlib-nano-1.0
| 1,215
|
libgloss/sparc/libsys/sbrk.S
|
! sbrk() system call
#include "syscallasm.h"
TEXT_SECTION
ALIGN (4)
#ifdef REENT
GLOBAL (ASM_SYMBOL (_sbrk_r))
ASM_SYMBOL (_sbrk_r):
mov %o0,%o5
mov %o1,%o0
#else
GLOBAL (ASM_SYMBOL (sbrk))
ASM_SYMBOL (sbrk):
#endif
add %o0,7,%o0
andn %o0,7,%o0
sethi %hi (ASM_PRIVATE_SYMBOL (curbrk)),%o2
#ifdef __sparc_v9__
lduw [%o2+%lo (ASM_PRIVATE_SYMBOL (curbrk))],%o3
#else
ld [%o2+%lo (ASM_PRIVATE_SYMBOL (curbrk))],%o3
#endif
add %o3,7,%o3
andn %o3,7,%o3
add %o3,%o0,%o0
mov %o0,%o4
mov SYS_brk,%g1
ta SYSCALL_TRAP
bcs err
nop
st %o4,[%o2+%lo (ASM_PRIVATE_SYMBOL (curbrk))]
jmpl %o7+8,%g0
mov %o3,%o0
#ifdef REENT
GLOBAL (ASM_SYMBOL (_brk_r))
ASM_SYMBOL (_brk_r):
mov %o0,%o5
mov %o1,%o0
#else
GLOBAL (ASM_SYMBOL (brk))
ASM_SYMBOL (brk):
#endif
add %o0,7,%o0
andn %o0,7,%o0
mov %o0,%o2
mov SYS_brk,%g1
ta SYSCALL_TRAP
bcs err
nop
sethi %hi (ASM_PRIVATE_SYMBOL (curbrk)),%o3
st %o2,[%o3+%lo (ASM_PRIVATE_SYMBOL (curbrk))]
retl
mov %g0,%o0
err:
#ifdef REENT
sethi %hi (ASM_PRIVATE_SYMBOL (cerror_r)),%g1
or %g1,%lo (ASM_PRIVATE_SYMBOL (cerror_r)),%g1
#else
sethi %hi (ASM_PRIVATE_SYMBOL (cerror)),%g1
or %g1,%lo (ASM_PRIVATE_SYMBOL (cerror)),%g1
#endif
jmpl %g1,%g0
mov %o5,%o1
|
33cn/plugin
| 31,125
|
plugin/dapp/evm/executor/vm/common/crypto/bls12381/arithmetic_x86.s
|
// +build amd64,blsasm amd64,blsadx
#include "textflag.h"
// addition w/ modular reduction
// a = (a + b) % p
TEXT ·addAssign(SB), NOSPLIT, $0-16
// |
MOVQ a+0(FP), DI
MOVQ b+8(FP), SI
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
// |
ADDQ (SI), R8
ADCQ 8(SI), R9
ADCQ 16(SI), R10
ADCQ 24(SI), R11
ADCQ 32(SI), R12
ADCQ 40(SI), R13
// |
MOVQ R8, R14
MOVQ R9, R15
MOVQ R10, CX
MOVQ R11, DX
MOVQ R12, SI
MOVQ R13, BX
MOVQ $0xb9feffffffffaaab, AX
SUBQ AX, R14
MOVQ $0x1eabfffeb153ffff, AX
SBBQ AX, R15
MOVQ $0x6730d2a0f6b0f624, AX
SBBQ AX, CX
MOVQ $0x64774b84f38512bf, AX
SBBQ AX, DX
MOVQ $0x4b1ba7b6434bacd7, AX
SBBQ AX, SI
MOVQ $0x1a0111ea397fe69a, AX
SBBQ AX, BX
CMOVQCC R14, R8
CMOVQCC R15, R9
CMOVQCC CX, R10
CMOVQCC DX, R11
CMOVQCC SI, R12
CMOVQCC BX, R13
// |
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// addition w/ modular reduction
// c = (a + b) % p
TEXT ·add(SB), NOSPLIT, $0-24
// |
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
// |
ADDQ (SI), R8
ADCQ 8(SI), R9
ADCQ 16(SI), R10
ADCQ 24(SI), R11
ADCQ 32(SI), R12
ADCQ 40(SI), R13
// |
MOVQ R8, R14
MOVQ R9, R15
MOVQ R10, CX
MOVQ R11, DX
MOVQ R12, SI
MOVQ R13, BX
MOVQ $0xb9feffffffffaaab, DI
SUBQ DI, R14
MOVQ $0x1eabfffeb153ffff, DI
SBBQ DI, R15
MOVQ $0x6730d2a0f6b0f624, DI
SBBQ DI, CX
MOVQ $0x64774b84f38512bf, DI
SBBQ DI, DX
MOVQ $0x4b1ba7b6434bacd7, DI
SBBQ DI, SI
MOVQ $0x1a0111ea397fe69a, DI
SBBQ DI, BX
CMOVQCC R14, R8
CMOVQCC R15, R9
CMOVQCC CX, R10
CMOVQCC DX, R11
CMOVQCC SI, R12
CMOVQCC BX, R13
// |
MOVQ c+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// addition w/o reduction check
// c = (a + b)
TEXT ·ladd(SB), NOSPLIT, $0-24
// |
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
// |
ADDQ (SI), R8
ADCQ 8(SI), R9
ADCQ 16(SI), R10
ADCQ 24(SI), R11
ADCQ 32(SI), R12
ADCQ 40(SI), R13
// |
MOVQ c+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// addition w/o reduction check
// a = a + b
TEXT ·laddAssign(SB), NOSPLIT, $0-16
// |
MOVQ a+0(FP), DI
MOVQ b+8(FP), SI
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
// |
ADDQ (SI), R8
ADCQ 8(SI), R9
ADCQ 16(SI), R10
ADCQ 24(SI), R11
ADCQ 32(SI), R12
ADCQ 40(SI), R13
// |
MOVQ a+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// subtraction w/ modular reduction
// c = (a - b) % p
TEXT ·sub(SB), NOSPLIT, $0-24
// |
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
XORQ AX, AX
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
SUBQ (SI), R8
SBBQ 8(SI), R9
SBBQ 16(SI), R10
SBBQ 24(SI), R11
SBBQ 32(SI), R12
SBBQ 40(SI), R13
// |
MOVQ $0xb9feffffffffaaab, R14
MOVQ $0x1eabfffeb153ffff, R15
MOVQ $0x6730d2a0f6b0f624, CX
MOVQ $0x64774b84f38512bf, DX
MOVQ $0x4b1ba7b6434bacd7, SI
MOVQ $0x1a0111ea397fe69a, BX
CMOVQCC AX, R14
CMOVQCC AX, R15
CMOVQCC AX, CX
CMOVQCC AX, DX
CMOVQCC AX, SI
CMOVQCC AX, BX
ADDQ R14, R8
ADCQ R15, R9
ADCQ CX, R10
ADCQ DX, R11
ADCQ SI, R12
ADCQ BX, R13
// |
MOVQ c+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// subtraction w/ modular reduction
// a = (a - b) % p
TEXT ·subAssign(SB), NOSPLIT, $0-16
// |
MOVQ a+0(FP), DI
MOVQ b+8(FP), SI
XORQ AX, AX
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
SUBQ (SI), R8
SBBQ 8(SI), R9
SBBQ 16(SI), R10
SBBQ 24(SI), R11
SBBQ 32(SI), R12
SBBQ 40(SI), R13
// |
MOVQ $0xb9feffffffffaaab, R14
MOVQ $0x1eabfffeb153ffff, R15
MOVQ $0x6730d2a0f6b0f624, CX
MOVQ $0x64774b84f38512bf, DX
MOVQ $0x4b1ba7b6434bacd7, SI
MOVQ $0x1a0111ea397fe69a, BX
CMOVQCC AX, R14
CMOVQCC AX, R15
CMOVQCC AX, CX
CMOVQCC AX, DX
CMOVQCC AX, SI
CMOVQCC AX, BX
ADDQ R14, R8
ADCQ R15, R9
ADCQ CX, R10
ADCQ DX, R11
ADCQ SI, R12
ADCQ BX, R13
// |
MOVQ a+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// subtraction w/o reduction check
// a = (a - b)
TEXT ·lsubAssign(SB), NOSPLIT, $0-16
// |
MOVQ a+0(FP), DI
MOVQ b+8(FP), SI
// |
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
SUBQ (SI), R8
SBBQ 8(SI), R9
SBBQ 16(SI), R10
SBBQ 24(SI), R11
SBBQ 32(SI), R12
SBBQ 40(SI), R13
// |
MOVQ a+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// doubling w/ reduction
// c = (2 * a) % p
TEXT ·double(SB), NOSPLIT, $0-16
// |
MOVQ a+8(FP), DI
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
ADDQ R8, R8
ADCQ R9, R9
ADCQ R10, R10
ADCQ R11, R11
ADCQ R12, R12
ADCQ R13, R13
// |
MOVQ R8, R14
MOVQ R9, R15
MOVQ R10, CX
MOVQ R11, DX
MOVQ R12, SI
MOVQ R13, BX
MOVQ $0xb9feffffffffaaab, DI
SUBQ DI, R14
MOVQ $0x1eabfffeb153ffff, DI
SBBQ DI, R15
MOVQ $0x6730d2a0f6b0f624, DI
SBBQ DI, CX
MOVQ $0x64774b84f38512bf, DI
SBBQ DI, DX
MOVQ $0x4b1ba7b6434bacd7, DI
SBBQ DI, SI
MOVQ $0x1a0111ea397fe69a, DI
SBBQ DI, BX
CMOVQCC R14, R8
CMOVQCC R15, R9
CMOVQCC CX, R10
CMOVQCC DX, R11
CMOVQCC SI, R12
CMOVQCC BX, R13
// |
MOVQ c+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// doubling w/ reduction
// a = (2 * a) % p
TEXT ·doubleAssign(SB), NOSPLIT, $0-8
// |
MOVQ a+0(FP), DI
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
ADDQ R8, R8
ADCQ R9, R9
ADCQ R10, R10
ADCQ R11, R11
ADCQ R12, R12
ADCQ R13, R13
// |
MOVQ R8, R14
MOVQ R9, R15
MOVQ R10, CX
MOVQ R11, DX
MOVQ R12, SI
MOVQ R13, BX
MOVQ $0xb9feffffffffaaab, AX
SUBQ AX, R14
MOVQ $0x1eabfffeb153ffff, AX
SBBQ AX, R15
MOVQ $0x6730d2a0f6b0f624, AX
SBBQ AX, CX
MOVQ $0x64774b84f38512bf, AX
SBBQ AX, DX
MOVQ $0x4b1ba7b6434bacd7, AX
SBBQ AX, SI
MOVQ $0x1a0111ea397fe69a, AX
SBBQ AX, BX
CMOVQCC R14, R8
CMOVQCC R15, R9
CMOVQCC CX, R10
CMOVQCC DX, R11
CMOVQCC SI, R12
CMOVQCC BX, R13
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// doubling w/o reduction
// c = 2 * a
TEXT ·ldouble(SB), NOSPLIT, $0-16
// |
MOVQ a+8(FP), DI
MOVQ (DI), R8
MOVQ 8(DI), R9
MOVQ 16(DI), R10
MOVQ 24(DI), R11
MOVQ 32(DI), R12
MOVQ 40(DI), R13
// |
ADDQ R8, R8
ADCQ R9, R9
ADCQ R10, R10
ADCQ R11, R11
ADCQ R12, R12
ADCQ R13, R13
// |
MOVQ c+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
TEXT ·_neg(SB), NOSPLIT, $0-16
// |
MOVQ a+8(FP), DI
// |
MOVQ $0xb9feffffffffaaab, R8
MOVQ $0x1eabfffeb153ffff, R9
MOVQ $0x6730d2a0f6b0f624, R10
MOVQ $0x64774b84f38512bf, R11
MOVQ $0x4b1ba7b6434bacd7, R12
MOVQ $0x1a0111ea397fe69a, R13
SUBQ (DI), R8
SBBQ 8(DI), R9
SBBQ 16(DI), R10
SBBQ 24(DI), R11
SBBQ 32(DI), R12
SBBQ 40(DI), R13
// |
MOVQ c+0(FP), DI
MOVQ R8, (DI)
MOVQ R9, 8(DI)
MOVQ R10, 16(DI)
MOVQ R11, 24(DI)
MOVQ R12, 32(DI)
MOVQ R13, 40(DI)
RET
/* | end */
// multiplication without using MULX/ADX
// c = a * b % p
TEXT ·mulNoADX(SB), NOSPLIT, $24-24
// |
/* inputs */
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
MOVQ $0x00, R9
MOVQ $0x00, R10
MOVQ $0x00, R11
MOVQ $0x00, R12
MOVQ $0x00, R13
MOVQ $0x00, R14
MOVQ $0x00, R15
// |
/* i0 */
// | a0 @ CX
MOVQ (DI), CX
// | a0 * b0
MOVQ (SI), AX
MULQ CX
MOVQ AX, (SP)
MOVQ DX, R8
// | a0 * b1
MOVQ 8(SI), AX
MULQ CX
ADDQ AX, R8
ADCQ DX, R9
// | a0 * b2
MOVQ 16(SI), AX
MULQ CX
ADDQ AX, R9
ADCQ DX, R10
// | a0 * b3
MOVQ 24(SI), AX
MULQ CX
ADDQ AX, R10
ADCQ DX, R11
// | a0 * b4
MOVQ 32(SI), AX
MULQ CX
ADDQ AX, R11
ADCQ DX, R12
// | a0 * b5
MOVQ 40(SI), AX
MULQ CX
ADDQ AX, R12
ADCQ DX, R13
// |
/* i1 */
// | a1 @ CX
MOVQ 8(DI), CX
MOVQ $0x00, BX
// | a1 * b0
MOVQ (SI), AX
MULQ CX
ADDQ AX, R8
ADCQ DX, R9
ADCQ $0x00, R10
ADCQ $0x00, BX
MOVQ R8, 8(SP)
MOVQ $0x00, R8
// | a1 * b1
MOVQ 8(SI), AX
MULQ CX
ADDQ AX, R9
ADCQ DX, R10
ADCQ BX, R11
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a1 * b2
MOVQ 16(SI), AX
MULQ CX
ADDQ AX, R10
ADCQ DX, R11
ADCQ BX, R12
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a1 * b3
MOVQ 24(SI), AX
MULQ CX
ADDQ AX, R11
ADCQ DX, R12
ADCQ BX, R13
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a1 * b4
MOVQ 32(SI), AX
MULQ CX
ADDQ AX, R12
ADCQ DX, R13
ADCQ BX, R14
// | a1 * b5
MOVQ 40(SI), AX
MULQ CX
ADDQ AX, R13
ADCQ DX, R14
// |
/* i2 */
// | a2 @ CX
MOVQ 16(DI), CX
MOVQ $0x00, BX
// | a2 * b0
MOVQ (SI), AX
MULQ CX
ADDQ AX, R9
ADCQ DX, R10
ADCQ $0x00, R11
ADCQ $0x00, BX
MOVQ R9, 16(SP)
MOVQ $0x00, R9
// | a2 * b1
MOVQ 8(SI), AX
MULQ CX
ADDQ AX, R10
ADCQ DX, R11
ADCQ BX, R12
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a2 * b2
MOVQ 16(SI), AX
MULQ CX
ADDQ AX, R11
ADCQ DX, R12
ADCQ BX, R13
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a2 * b3
MOVQ 24(SI), AX
MULQ CX
ADDQ AX, R12
ADCQ DX, R13
ADCQ BX, R14
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a2 * b4
MOVQ 32(SI), AX
MULQ CX
ADDQ AX, R13
ADCQ DX, R14
ADCQ BX, R15
// | a2 * b5
MOVQ 40(SI), AX
MULQ CX
ADDQ AX, R14
ADCQ DX, R15
// |
/* i3 */
// | a3 @ CX
MOVQ 24(DI), CX
MOVQ $0x00, BX
// | a3 * b0
MOVQ (SI), AX
MULQ CX
ADDQ AX, R10
ADCQ DX, R11
ADCQ $0x00, R12
ADCQ $0x00, BX
// | a3 * b1
MOVQ 8(SI), AX
MULQ CX
ADDQ AX, R11
ADCQ DX, R12
ADCQ BX, R13
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a3 * b2
MOVQ 16(SI), AX
MULQ CX
ADDQ AX, R12
ADCQ DX, R13
ADCQ BX, R14
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a3 * b3
MOVQ 24(SI), AX
MULQ CX
ADDQ AX, R13
ADCQ DX, R14
ADCQ BX, R15
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a3 * b4
MOVQ 32(SI), AX
MULQ CX
ADDQ AX, R14
ADCQ DX, R15
ADCQ BX, R8
// | a3 * b5
MOVQ 40(SI), AX
MULQ CX
ADDQ AX, R15
ADCQ DX, R8
// |
/* i4 */
// | a4 @ CX
MOVQ 32(DI), CX
MOVQ $0x00, BX
// | a4 * b0
MOVQ (SI), AX
MULQ CX
ADDQ AX, R11
ADCQ DX, R12
ADCQ $0x00, R13
ADCQ $0x00, BX
// | a4 * b1
MOVQ 8(SI), AX
MULQ CX
ADDQ AX, R12
ADCQ DX, R13
ADCQ BX, R14
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a4 * b2
MOVQ 16(SI), AX
MULQ CX
ADDQ AX, R13
ADCQ DX, R14
ADCQ BX, R15
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a4 * b3
MOVQ 24(SI), AX
MULQ CX
ADDQ AX, R14
ADCQ DX, R15
ADCQ BX, R8
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a4 * b4
MOVQ 32(SI), AX
MULQ CX
ADDQ AX, R15
ADCQ DX, R8
ADCQ BX, R9
// | a4 * b5
MOVQ 40(SI), AX
MULQ CX
ADDQ AX, R8
ADCQ DX, R9
// |
/* i5 */
// | a5 @ CX
MOVQ 40(DI), CX
MOVQ $0x00, BX
// | a5 * b0
MOVQ (SI), AX
MULQ CX
ADDQ AX, R12
ADCQ DX, R13
ADCQ $0x00, R14
ADCQ $0x00, BX
// | a5 * b1
MOVQ 8(SI), AX
MULQ CX
ADDQ AX, R13
ADCQ DX, R14
ADCQ BX, R15
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a5 * b2
MOVQ 16(SI), AX
MULQ CX
ADDQ AX, R14
ADCQ DX, R15
ADCQ BX, R8
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a5 * b3
MOVQ 24(SI), AX
MULQ CX
ADDQ AX, R15
ADCQ DX, R8
ADCQ BX, R9
MOVQ $0x00, BX
ADCQ $0x00, BX
// | a5 * b4
MOVQ 32(SI), AX
MULQ CX
ADDQ AX, R8
ADCQ DX, R9
ADCQ $0x00, BX
// | a5 * b5
MOVQ 40(SI), AX
MULQ CX
ADDQ AX, R9
ADCQ DX, BX
// |
/* */
// |
// | W
// | 0 (SP) | 1 8(SP) | 2 16(SP) | 3 R10 | 4 R11 | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 R9 | 11 BX
MOVQ (SP), CX
MOVQ 8(SP), DI
MOVQ 16(SP), SI
MOVQ BX, (SP)
MOVQ R9, 8(SP)
// |
/* montgomery reduction */
// |
/* i0 */
// |
// | W
// | 0 CX | 1 DI | 2 SI | 3 R10 | 4 R11 | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 8(SP) | 11 (SP)
// | | u0 = w0 * inp
MOVQ CX, AX
MULQ ·inp+0(SB)
MOVQ AX, R9
MOVQ $0x00, BX
// |
/* */
// | j0
// | w0 @ CX
MOVQ ·modulus+0(SB), AX
MULQ R9
ADDQ AX, CX
ADCQ DX, BX
// | j1
// | w1 @ DI
MOVQ ·modulus+8(SB), AX
MULQ R9
ADDQ AX, DI
ADCQ $0x00, DX
ADDQ BX, DI
MOVQ $0x00, BX
ADCQ DX, BX
// | j2
// | w2 @ SI
MOVQ ·modulus+16(SB), AX
MULQ R9
ADDQ AX, SI
ADCQ $0x00, DX
ADDQ BX, SI
MOVQ $0x00, BX
ADCQ DX, BX
// | j3
// | w3 @ R10
MOVQ ·modulus+24(SB), AX
MULQ R9
ADDQ AX, R10
ADCQ $0x00, DX
ADDQ BX, R10
MOVQ $0x00, BX
ADCQ DX, BX
// | j4
// | w4 @ R11
MOVQ ·modulus+32(SB), AX
MULQ R9
ADDQ AX, R11
ADCQ $0x00, DX
ADDQ BX, R11
MOVQ $0x00, BX
ADCQ DX, BX
// | j5
// | w5 @ R12
MOVQ ·modulus+40(SB), AX
MULQ R9
ADDQ AX, R12
ADCQ $0x00, DX
ADDQ BX, R12
// | w6 @ R13
ADCQ DX, R13
ADCQ $0x00, CX
// |
/* i1 */
// |
// | W
// | 0 - | 1 DI | 2 SI | 3 R10 | 4 R11 | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 8(SP) | 11 (SP)
// | | u1 = w1 * inp
MOVQ DI, AX
MULQ ·inp+0(SB)
MOVQ AX, R9
MOVQ $0x00, BX
// |
/* */
// | j0
// | w1 @ DI
MOVQ ·modulus+0(SB), AX
MULQ R9
ADDQ AX, DI
ADCQ DX, BX
// | j1
// | w2 @ SI
MOVQ ·modulus+8(SB), AX
MULQ R9
ADDQ AX, SI
ADCQ $0x00, DX
ADDQ BX, SI
MOVQ $0x00, BX
ADCQ DX, BX
// | j2
// | w3 @ R10
MOVQ ·modulus+16(SB), AX
MULQ R9
ADDQ AX, R10
ADCQ $0x00, DX
ADDQ BX, R10
MOVQ $0x00, BX
ADCQ DX, BX
// | j3
// | w4 @ R11
MOVQ ·modulus+24(SB), AX
MULQ R9
ADDQ AX, R11
ADCQ $0x00, DX
ADDQ BX, R11
MOVQ $0x00, BX
ADCQ DX, BX
// | j4
// | w5 @ R12
MOVQ ·modulus+32(SB), AX
MULQ R9
ADDQ AX, R12
ADCQ $0x00, DX
ADDQ BX, R12
MOVQ $0x00, BX
ADCQ DX, BX
// | j5
// | w6 @ R13
MOVQ ·modulus+40(SB), AX
MULQ R9
ADDQ AX, R13
ADCQ DX, CX
ADDQ BX, R13
// | w7 @ R14
ADCQ CX, R14
MOVQ $0x00, CX
ADCQ $0x00, CX
// |
/* i2 */
// |
// | W
// | 0 - | 1 - | 2 SI | 3 R10 | 4 R11 | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 8(SP) | 11 (SP)
// | | u2 = w2 * inp
MOVQ SI, AX
MULQ ·inp+0(SB)
MOVQ AX, R9
MOVQ $0x00, BX
// |
/* */
// | j0
// | w2 @ SI
MOVQ ·modulus+0(SB), AX
MULQ R9
ADDQ AX, SI
ADCQ DX, BX
// | j1
// | w3 @ R10
MOVQ ·modulus+8(SB), AX
MULQ R9
ADDQ AX, R10
ADCQ $0x00, DX
ADDQ BX, R10
MOVQ $0x00, BX
ADCQ DX, BX
// | j2
// | w4 @ R11
MOVQ ·modulus+16(SB), AX
MULQ R9
ADDQ AX, R11
ADCQ $0x00, DX
ADDQ BX, R11
MOVQ $0x00, BX
ADCQ DX, BX
// | j3
// | w5 @ R12
MOVQ ·modulus+24(SB), AX
MULQ R9
ADDQ AX, R12
ADCQ $0x00, DX
ADDQ BX, R12
MOVQ $0x00, BX
ADCQ DX, BX
// | j4
// | w6 @ R13
MOVQ ·modulus+32(SB), AX
MULQ R9
ADDQ AX, R13
ADCQ $0x00, DX
ADDQ BX, R13
MOVQ $0x00, BX
ADCQ DX, BX
// | j5
// | w7 @ R14
MOVQ ·modulus+40(SB), AX
MULQ R9
ADDQ AX, R14
ADCQ DX, CX
ADDQ BX, R14
// | w8 @ R15
ADCQ CX, R15
MOVQ $0x00, CX
ADCQ $0x00, CX
// |
/* i3 */
// |
// | W
// | 0 - | 1 - | 2 - | 3 R10 | 4 R11 | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 8(SP) | 11 (SP)
// | | u3 = w3 * inp
MOVQ R10, AX
MULQ ·inp+0(SB)
MOVQ AX, R9
MOVQ $0x00, BX
// |
/* */
// | j0
// | w3 @ R10
MOVQ ·modulus+0(SB), AX
MULQ R9
ADDQ AX, R10
ADCQ DX, BX
// | j1
// | w4 @ R11
MOVQ ·modulus+8(SB), AX
MULQ R9
ADDQ AX, R11
ADCQ $0x00, DX
ADDQ BX, R11
MOVQ $0x00, BX
ADCQ DX, BX
// | j2
// | w5 @ R12
MOVQ ·modulus+16(SB), AX
MULQ R9
ADDQ AX, R12
ADCQ $0x00, DX
ADDQ BX, R12
MOVQ $0x00, BX
ADCQ DX, BX
// | j3
// | w6 @ R13
MOVQ ·modulus+24(SB), AX
MULQ R9
ADDQ AX, R13
ADCQ $0x00, DX
ADDQ BX, R13
MOVQ $0x00, BX
ADCQ DX, BX
// | j4
// | w7 @ R14
MOVQ ·modulus+32(SB), AX
MULQ R9
ADDQ AX, R14
ADCQ $0x00, DX
ADDQ BX, R14
MOVQ $0x00, BX
ADCQ DX, BX
// | j5
// | w8 @ R15
MOVQ ·modulus+40(SB), AX
MULQ R9
ADDQ AX, R15
ADCQ DX, CX
ADDQ BX, R15
// | w9 @ R8
ADCQ CX, R8
MOVQ $0x00, CX
ADCQ $0x00, CX
// |
/* i4 */
// |
// | W
// | 0 - | 1 - | 2 - | 3 - | 4 R11 | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 8(SP) | 11 (SP)
// | | u4 = w4 * inp
MOVQ R11, AX
MULQ ·inp+0(SB)
MOVQ AX, R9
MOVQ $0x00, BX
// |
/* */
// | j0
// | w4 @ R11
MOVQ ·modulus+0(SB), AX
MULQ R9
ADDQ AX, R11
ADCQ DX, BX
// | j1
// | w5 @ R12
MOVQ ·modulus+8(SB), AX
MULQ R9
ADDQ AX, R12
ADCQ $0x00, DX
ADDQ BX, R12
MOVQ $0x00, BX
ADCQ DX, BX
// | j2
// | w6 @ R13
MOVQ ·modulus+16(SB), AX
MULQ R9
ADDQ AX, R13
ADCQ $0x00, DX
ADDQ BX, R13
MOVQ $0x00, BX
ADCQ DX, BX
// | j3
// | w7 @ R14
MOVQ ·modulus+24(SB), AX
MULQ R9
ADDQ AX, R14
ADCQ $0x00, DX
ADDQ BX, R14
MOVQ $0x00, BX
ADCQ DX, BX
// | j4
// | w8 @ R15
MOVQ ·modulus+32(SB), AX
MULQ R9
ADDQ AX, R15
ADCQ $0x00, DX
ADDQ BX, R15
MOVQ $0x00, BX
ADCQ DX, BX
// | j5
// | w9 @ R8
MOVQ ·modulus+40(SB), AX
MULQ R9
ADDQ AX, R8
ADCQ DX, CX
ADDQ BX, R8
// | move to idle register
MOVQ 8(SP), DI
// | w10 @ DI
ADCQ CX, DI
MOVQ $0x00, CX
ADCQ $0x00, CX
// |
/* i5 */
// |
// | W
// | 0 - | 1 - | 2 - | 3 - | 4 - | 5 R12
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 DI | 11 (SP)
// | | u5 = w5 * inp
MOVQ R12, AX
MULQ ·inp+0(SB)
MOVQ AX, R9
MOVQ $0x00, BX
// |
/* */
// | j0
// | w5 @ R12
MOVQ ·modulus+0(SB), AX
MULQ R9
ADDQ AX, R12
ADCQ DX, BX
// | j1
// | w6 @ R13
MOVQ ·modulus+8(SB), AX
MULQ R9
ADDQ AX, R13
ADCQ $0x00, DX
ADDQ BX, R13
MOVQ $0x00, BX
ADCQ DX, BX
// | j2
// | w7 @ R14
MOVQ ·modulus+16(SB), AX
MULQ R9
ADDQ AX, R14
ADCQ $0x00, DX
ADDQ BX, R14
MOVQ $0x00, BX
ADCQ DX, BX
// | j3
// | w8 @ R15
MOVQ ·modulus+24(SB), AX
MULQ R9
ADDQ AX, R15
ADCQ $0x00, DX
ADDQ BX, R15
MOVQ $0x00, BX
ADCQ DX, BX
// | j4
// | w9 @ R8
MOVQ ·modulus+32(SB), AX
MULQ R9
ADDQ AX, R8
ADCQ $0x00, DX
ADDQ BX, R8
MOVQ $0x00, BX
ADCQ DX, BX
// | j5
// | w10 @ DI
MOVQ ·modulus+40(SB), AX
MULQ R9
ADDQ AX, DI
ADCQ DX, CX
ADDQ BX, DI
// | w11 @ CX
ADCQ (SP), CX
// |
// | W montgomerry reduction ends
// | 0 - | 1 - | 2 - | 3 - | 4 - | 5 -
// | 6 R13 | 7 R14 | 8 R15 | 9 R8 | 10 DI | 11 CX
// |
/* modular reduction */
MOVQ R13, R10
SUBQ ·modulus+0(SB), R10
MOVQ R14, R11
SBBQ ·modulus+8(SB), R11
MOVQ R15, R12
SBBQ ·modulus+16(SB), R12
MOVQ R8, AX
SBBQ ·modulus+24(SB), AX
MOVQ DI, BX
SBBQ ·modulus+32(SB), BX
MOVQ CX, R9
SBBQ ·modulus+40(SB), R9
// |
/* out */
MOVQ c+0(FP), SI
CMOVQCC R10, R13
MOVQ R13, (SI)
CMOVQCC R11, R14
MOVQ R14, 8(SI)
CMOVQCC R12, R15
MOVQ R15, 16(SI)
CMOVQCC AX, R8
MOVQ R8, 24(SI)
CMOVQCC BX, DI
MOVQ DI, 32(SI)
CMOVQCC R9, CX
MOVQ CX, 40(SI)
RET
// |
/* end */
// multiplication
// c = a * b % p
TEXT ·mulADX(SB), NOSPLIT, $16-24
// |
/* inputs */
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
XORQ AX, AX
// |
/* i0 */
// | a0 @ DX
MOVQ (DI), DX
// | a0 * b0
MULXQ (SI), AX, CX
MOVQ AX, (SP)
// | a0 * b1
MULXQ 8(SI), AX, R8
ADCXQ AX, CX
// | a0 * b2
MULXQ 16(SI), AX, R9
ADCXQ AX, R8
// | a0 * b3
MULXQ 24(SI), AX, R10
ADCXQ AX, R9
// | a0 * b4
MULXQ 32(SI), AX, R11
ADCXQ AX, R10
// | a0 * b5
MULXQ 40(SI), AX, R12
ADCXQ AX, R11
ADCQ $0x00, R12
// |
/* i1 */
// | a1 @ DX
MOVQ 8(DI), DX
XORQ R13, R13
// | a1 * b0
MULXQ (SI), AX, BX
ADOXQ AX, CX
ADCXQ BX, R8
MOVQ CX, 8(SP)
// | a1 * b1
MULXQ 8(SI), AX, BX
ADOXQ AX, R8
ADCXQ BX, R9
// | a1 * b2
MULXQ 16(SI), AX, BX
ADOXQ AX, R9
ADCXQ BX, R10
// | a1 * b3
MULXQ 24(SI), AX, BX
ADOXQ AX, R10
ADCXQ BX, R11
// | a1 * b4
MULXQ 32(SI), AX, BX
ADOXQ AX, R11
ADCXQ BX, R12
// | a1 * b5
MULXQ 40(SI), AX, BX
ADOXQ AX, R12
ADOXQ R13, R13
ADCXQ BX, R13
// |
/* i2 */
// | a2 @ DX
MOVQ 16(DI), DX
XORQ R14, R14
// | a2 * b0
MULXQ (SI), AX, BX
ADOXQ AX, R8
ADCXQ BX, R9
// | a2 * b1
MULXQ 8(SI), AX, BX
ADOXQ AX, R9
ADCXQ BX, R10
// | a2 * b2
MULXQ 16(SI), AX, BX
ADOXQ AX, R10
ADCXQ BX, R11
// | a2 * b3
MULXQ 24(SI), AX, BX
ADOXQ AX, R11
ADCXQ BX, R12
// | a2 * b4
MULXQ 32(SI), AX, BX
ADOXQ AX, R12
ADCXQ BX, R13
// | a2 * b5
MULXQ 40(SI), AX, BX
ADOXQ AX, R13
ADOXQ R14, R14
ADCXQ BX, R14
// |
/* i3 */
// | a3 @ DX
MOVQ 24(DI), DX
XORQ R15, R15
// | a3 * b0
MULXQ (SI), AX, BX
ADOXQ AX, R9
ADCXQ BX, R10
// | a3 * b1
MULXQ 8(SI), AX, BX
ADOXQ AX, R10
ADCXQ BX, R11
// | a3 * b2
MULXQ 16(SI), AX, BX
ADOXQ AX, R11
ADCXQ BX, R12
// | a3 * b3
MULXQ 24(SI), AX, BX
ADOXQ AX, R12
ADCXQ BX, R13
// | a3 * b4
MULXQ 32(SI), AX, BX
ADOXQ AX, R13
ADCXQ BX, R14
// | a3 * b5
MULXQ 40(SI), AX, BX
ADOXQ AX, R14
ADOXQ R15, R15
ADCXQ BX, R15
// |
/* i4 */
// | a4 @ DX
MOVQ 32(DI), DX
XORQ CX, CX
// | a4 * b0
MULXQ (SI), AX, BX
ADOXQ AX, R10
ADCXQ BX, R11
// | a4 * b1
MULXQ 8(SI), AX, BX
ADOXQ AX, R11
ADCXQ BX, R12
// | a4 * b2
MULXQ 16(SI), AX, BX
ADOXQ AX, R12
ADCXQ BX, R13
// | a4 * b3
MULXQ 24(SI), AX, BX
ADOXQ AX, R13
ADCXQ BX, R14
// | a4 * b4
MULXQ 32(SI), AX, BX
ADOXQ AX, R14
ADCXQ BX, R15
// | a4 * b5
MULXQ 40(SI), AX, BX
ADOXQ AX, R15
ADOXQ CX, CX
ADCXQ BX, CX
// |
/* i5 */
// | a5 @ DX
MOVQ 40(DI), DX
XORQ DI, DI
// | a5 * b0
MULXQ (SI), AX, BX
ADOXQ AX, R11
ADCXQ BX, R12
// | a5 * b1
MULXQ 8(SI), AX, BX
ADOXQ AX, R12
ADCXQ BX, R13
// | a5 * b2
MULXQ 16(SI), AX, BX
ADOXQ AX, R13
ADCXQ BX, R14
// | a5 * b3
MULXQ 24(SI), AX, BX
ADOXQ AX, R14
ADCXQ BX, R15
// | a5 * b4
MULXQ 32(SI), AX, BX
ADOXQ AX, R15
ADCXQ BX, CX
// | a5 * b5
MULXQ 40(SI), AX, BX
ADOXQ AX, CX
ADOXQ BX, DI
ADCQ $0x00, DI
// |
/* */
// |
// | W
// | 0 (SP) | 1 8(SP) | 2 R8 | 3 R9 | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 DI
MOVQ (SP), BX
MOVQ 8(SP), SI
MOVQ DI, (SP)
// |
// | W ready to mont
// | 0 BX | 1 SI | 2 R8 | 3 R9 | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// |
/* montgomery reduction */
// | clear flags
XORQ AX, AX
// |
/* i0 */
// |
// | W
// | 0 BX | 1 SI | 2 R8 | 3 R9 | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// | | u0 = w0 * inp
MOVQ BX, DX
MULXQ ·inp+0(SB), DX, DI
// |
/* */
// | j0
// | w0 @ BX
MULXQ ·modulus+0(SB), AX, DI
ADOXQ AX, BX
ADCXQ DI, SI
// | j1
// | w1 @ SI
MULXQ ·modulus+8(SB), AX, DI
ADOXQ AX, SI
ADCXQ DI, R8
// | j2
// | w2 @ R8
MULXQ ·modulus+16(SB), AX, DI
ADOXQ AX, R8
ADCXQ DI, R9
// | j3
// | w3 @ R9
MULXQ ·modulus+24(SB), AX, DI
ADOXQ AX, R9
ADCXQ DI, R10
// | j4
// | w4 @ R10
MULXQ ·modulus+32(SB), AX, DI
ADOXQ AX, R10
ADCXQ DI, R11
// | j5
// | w5 @ R11
MULXQ ·modulus+40(SB), AX, DI
ADOXQ AX, R11
ADCXQ DI, R12
ADOXQ BX, R12
ADCXQ BX, BX
MOVQ $0x00, AX
ADOXQ AX, BX
// | clear flags
XORQ AX, AX
// |
/* i1 */
// |
// | W
// | 0 - | 1 SI | 2 R8 | 3 R9 | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// | | u1 = w1 * inp
MOVQ SI, DX
MULXQ ·inp+0(SB), DX, DI
// |
/* */
// | j0
// | w1 @ SI
MULXQ ·modulus+0(SB), AX, DI
ADOXQ AX, SI
ADCXQ DI, R8
// | j1
// | w2 @ R8
MULXQ ·modulus+8(SB), AX, DI
ADOXQ AX, R8
ADCXQ DI, R9
// | j2
// | w3 @ R9
MULXQ ·modulus+16(SB), AX, DI
ADOXQ AX, R9
ADCXQ DI, R10
// | j3
// | w4 @ R10
MULXQ ·modulus+24(SB), AX, DI
ADOXQ AX, R10
ADCXQ DI, R11
// | j4
// | w5 @ R11
MULXQ ·modulus+32(SB), AX, DI
ADOXQ AX, R11
ADCXQ DI, R12
// | j5
// | w6 @ R12
MULXQ ·modulus+40(SB), AX, DI
ADOXQ AX, R12
ADCXQ DI, R13
ADOXQ BX, R13
ADCXQ SI, SI
MOVQ $0x00, AX
ADOXQ AX, SI
// | clear flags
XORQ AX, AX
// |
/* i2 */
// |
// | W
// | 0 - | 1 - | 2 R8 | 3 R9 | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// | | u2 = w2 * inp
MOVQ R8, DX
MULXQ ·inp+0(SB), DX, DI
// |
/* */
// | j0
// | w2 @ R8
MULXQ ·modulus+0(SB), AX, DI
ADOXQ AX, R8
ADCXQ DI, R9
// | j1
// | w3 @ R9
MULXQ ·modulus+8(SB), AX, DI
ADOXQ AX, R9
ADCXQ DI, R10
// | j2
// | w4 @ R10
MULXQ ·modulus+16(SB), AX, DI
ADOXQ AX, R10
ADCXQ DI, R11
// | j3
// | w5 @ R11
MULXQ ·modulus+24(SB), AX, DI
ADOXQ AX, R11
ADCXQ DI, R12
// | j4
// | w6 @ R12
MULXQ ·modulus+32(SB), AX, DI
ADOXQ AX, R12
ADCXQ DI, R13
// | j5
// | w7 @ R13
MULXQ ·modulus+40(SB), AX, DI
ADOXQ AX, R13
ADCXQ DI, R14
ADOXQ SI, R14
ADCXQ R8, R8
MOVQ $0x00, AX
ADOXQ AX, R8
// | clear flags
XORQ AX, AX
// |
/* i3 */
// |
// | W
// | 0 - | 1 - | 2 - | 3 R9 | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// | | u3 = w3 * inp
MOVQ R9, DX
MULXQ ·inp+0(SB), DX, DI
// |
/* */
// | j0
// | w3 @ R9
MULXQ ·modulus+0(SB), AX, DI
ADOXQ AX, R9
ADCXQ DI, R10
// | j1
// | w4 @ R10
MULXQ ·modulus+8(SB), AX, DI
ADOXQ AX, R10
ADCXQ DI, R11
// | j2
// | w5 @ R11
MULXQ ·modulus+16(SB), AX, DI
ADOXQ AX, R11
ADCXQ DI, R12
// | j3
// | w6 @ R12
MULXQ ·modulus+24(SB), AX, DI
ADOXQ AX, R12
ADCXQ DI, R13
// | j4
// | w7 @ R13
MULXQ ·modulus+32(SB), AX, DI
ADOXQ AX, R13
ADCXQ DI, R14
// | j5
// | w8 @ R14
MULXQ ·modulus+40(SB), AX, DI
ADOXQ AX, R14
ADCXQ DI, R15
ADOXQ R8, R15
ADCXQ R9, R9
MOVQ $0x00, AX
ADOXQ AX, R9
// | clear flags
XORQ AX, AX
// |
/* i4 */
// |
// | W
// | 0 - | 1 - | 2 - | 3 - | 4 R10 | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// | | u4 = w4 * inp
MOVQ R10, DX
MULXQ ·inp+0(SB), DX, DI
// |
/* */
// | j0
// | w4 @ R10
MULXQ ·modulus+0(SB), AX, DI
ADOXQ AX, R10
ADCXQ DI, R11
// | j1
// | w5 @ R11
MULXQ ·modulus+8(SB), AX, DI
ADOXQ AX, R11
ADCXQ DI, R12
// | j2
// | w6 @ R12
MULXQ ·modulus+16(SB), AX, DI
ADOXQ AX, R12
ADCXQ DI, R13
// | j3
// | w7 @ R13
MULXQ ·modulus+24(SB), AX, DI
ADOXQ AX, R13
ADCXQ DI, R14
// | j4
// | w8 @ R14
MULXQ ·modulus+32(SB), AX, DI
ADOXQ AX, R14
ADCXQ DI, R15
// | j5
// | w9 @ R15
MULXQ ·modulus+40(SB), AX, DI
ADOXQ AX, R15
ADCXQ DI, CX
ADOXQ R9, CX
ADCXQ R10, R10
MOVQ $0x00, AX
ADOXQ AX, R10
// | clear flags
XORQ AX, AX
// |
/* i5 */
// |
// | W
// | 0 - | 1 - | 2 - | 3 - | 4 - | 5 R11
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 (SP)
// | | u5 = w5 * inp
MOVQ R11, DX
MULXQ ·inp+0(SB), DX, DI
// |
/* */
// | j0
// | w5 @ R11
MULXQ ·modulus+0(SB), AX, DI
ADOXQ AX, R11
ADCXQ DI, R12
// | j1
// | w6 @ R12
MULXQ ·modulus+8(SB), AX, DI
ADOXQ AX, R12
ADCXQ DI, R13
// | j2
// | w7 @ R13
MULXQ ·modulus+16(SB), AX, DI
ADOXQ AX, R13
ADCXQ DI, R14
// | j3
// | w8 @ R14
MULXQ ·modulus+24(SB), AX, DI
ADOXQ AX, R14
ADCXQ DI, R15
// | j4
// | w9 @ R15
MULXQ ·modulus+32(SB), AX, DI
ADOXQ AX, R15
ADCXQ DI, CX
// | j5
// | w10 @ CX
MULXQ ·modulus+40(SB), AX, DI
ADOXQ AX, CX
// | w11 @ (SP)
// | move to an idle register
MOVQ (SP), BX
ADCXQ DI, BX
ADOXQ R10, BX
// |
// | W montgomery reduction ends
// | 0 - | 1 - | 2 - | 3 - | 4 - | 5 -
// | 6 R12 | 7 R13 | 8 R14 | 9 R15 | 10 CX | 11 BX
// |
/* modular reduction */
MOVQ R12, AX
SUBQ ·modulus+0(SB), AX
MOVQ R13, DI
SBBQ ·modulus+8(SB), DI
MOVQ R14, SI
SBBQ ·modulus+16(SB), SI
MOVQ R15, R8
SBBQ ·modulus+24(SB), R8
MOVQ CX, R9
SBBQ ·modulus+32(SB), R9
MOVQ BX, R10
SBBQ ·modulus+40(SB), R10
// |
/* out */
MOVQ c+0(FP), R11
CMOVQCC AX, R12
MOVQ R12, (R11)
CMOVQCC DI, R13
MOVQ R13, 8(R11)
CMOVQCC SI, R14
MOVQ R14, 16(R11)
CMOVQCC R8, R15
MOVQ R15, 24(R11)
CMOVQCC R9, CX
MOVQ CX, 32(R11)
CMOVQCC R10, BX
MOVQ BX, 40(R11)
RET
// |
/* end */
|
33cn/plugin
| 7,684
|
plugin/dapp/evm/executor/vm/common/crypto/blake2b/blake2b_amd64.s
|
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine
#include "textflag.h"
DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16
DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16
DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16
DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16
DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
MOVO v4, t1; \
MOVO v5, v4; \
MOVO t1, v5; \
MOVO v6, t1; \
PUNPCKLQDQ v6, t2; \
PUNPCKHQDQ v7, v6; \
PUNPCKHQDQ t2, v6; \
PUNPCKLQDQ v7, t2; \
MOVO t1, v7; \
MOVO v2, t1; \
PUNPCKHQDQ t2, v7; \
PUNPCKLQDQ v3, t2; \
PUNPCKHQDQ t2, v2; \
PUNPCKLQDQ t1, t2; \
PUNPCKHQDQ t2, v3
#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
MOVO v4, t1; \
MOVO v5, v4; \
MOVO t1, v5; \
MOVO v2, t1; \
PUNPCKLQDQ v2, t2; \
PUNPCKHQDQ v3, v2; \
PUNPCKHQDQ t2, v2; \
PUNPCKLQDQ v3, t2; \
MOVO t1, v3; \
MOVO v6, t1; \
PUNPCKHQDQ t2, v3; \
PUNPCKLQDQ v7, t2; \
PUNPCKHQDQ t2, v6; \
PUNPCKLQDQ t1, t2; \
PUNPCKHQDQ t2, v7
#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
PADDQ m0, v0; \
PADDQ m1, v1; \
PADDQ v2, v0; \
PADDQ v3, v1; \
PXOR v0, v6; \
PXOR v1, v7; \
PSHUFD $0xB1, v6, v6; \
PSHUFD $0xB1, v7, v7; \
PADDQ v6, v4; \
PADDQ v7, v5; \
PXOR v4, v2; \
PXOR v5, v3; \
PSHUFB c40, v2; \
PSHUFB c40, v3; \
PADDQ m2, v0; \
PADDQ m3, v1; \
PADDQ v2, v0; \
PADDQ v3, v1; \
PXOR v0, v6; \
PXOR v1, v7; \
PSHUFB c48, v6; \
PSHUFB c48, v7; \
PADDQ v6, v4; \
PADDQ v7, v5; \
PXOR v4, v2; \
PXOR v5, v3; \
MOVOU v2, t0; \
PADDQ v2, t0; \
PSRLQ $63, v2; \
PXOR t0, v2; \
MOVOU v3, t0; \
PADDQ v3, t0; \
PSRLQ $63, v3; \
PXOR t0, v3
#define LOAD_MSG(m0, m1, m2, m3, i0, i1, i2, i3, i4, i5, i6, i7) \
MOVQ i0*8(SI), m0; \
PINSRQ $1, i1*8(SI), m0; \
MOVQ i2*8(SI), m1; \
PINSRQ $1, i3*8(SI), m1; \
MOVQ i4*8(SI), m2; \
PINSRQ $1, i5*8(SI), m2; \
MOVQ i6*8(SI), m3; \
PINSRQ $1, i7*8(SI), m3
// func fSSE4(h *[8]uint64, m *[16]uint64, c0, c1 uint64, flag uint64, rounds uint64)
TEXT ·fSSE4(SB), 4, $24-48 // frame size = 8 + 16 byte alignment
MOVQ h+0(FP), AX
MOVQ m+8(FP), SI
MOVQ c0+16(FP), R8
MOVQ c1+24(FP), R9
MOVQ flag+32(FP), CX
MOVQ rounds+40(FP), BX
MOVQ SP, BP
MOVQ SP, R10
ADDQ $15, R10
ANDQ $~15, R10
MOVQ R10, SP
MOVOU ·iv3<>(SB), X0
MOVO X0, 0(SP)
XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0)
MOVOU ·c40<>(SB), X13
MOVOU ·c48<>(SB), X14
MOVOU 0(AX), X12
MOVOU 16(AX), X15
MOVQ R8, X8
PINSRQ $1, R9, X8
MOVO X12, X0
MOVO X15, X1
MOVOU 32(AX), X2
MOVOU 48(AX), X3
MOVOU ·iv0<>(SB), X4
MOVOU ·iv1<>(SB), X5
MOVOU ·iv2<>(SB), X6
PXOR X8, X6
MOVO 0(SP), X7
loop:
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 0, 2, 4, 6, 1, 3, 5, 7)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 8, 10, 12, 14, 9, 11, 13, 15)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 14, 4, 9, 13, 10, 8, 15, 6)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 1, 0, 11, 5, 12, 2, 7, 3)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 11, 12, 5, 15, 8, 0, 2, 13)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 10, 3, 7, 9, 14, 6, 1, 4)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 7, 3, 13, 11, 9, 1, 12, 14)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 2, 5, 4, 15, 6, 10, 0, 8)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 9, 5, 2, 10, 0, 7, 4, 15)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 14, 11, 6, 3, 1, 12, 8, 13)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 2, 6, 0, 8, 12, 10, 11, 3)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 4, 7, 15, 1, 13, 5, 14, 9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 12, 1, 14, 4, 5, 15, 13, 10)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 0, 6, 9, 8, 7, 3, 2, 11)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 13, 7, 12, 3, 11, 14, 1, 9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 5, 15, 8, 2, 0, 4, 6, 10)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 6, 14, 11, 0, 15, 9, 3, 8)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 12, 13, 1, 10, 2, 7, 4, 5)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
SUBQ $1, BX; JCS done
LOAD_MSG(X8, X9, X10, X11, 10, 8, 7, 1, 2, 4, 6, 5)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, 15, 9, 3, 13, 11, 14, 12, 0)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
JMP loop
done:
MOVOU 32(AX), X10
MOVOU 48(AX), X11
PXOR X0, X12
PXOR X1, X15
PXOR X2, X10
PXOR X3, X11
PXOR X4, X12
PXOR X5, X15
PXOR X6, X10
PXOR X7, X11
MOVOU X10, 32(AX)
MOVOU X11, 48(AX)
MOVOU X12, 0(AX)
MOVOU X15, 16(AX)
MOVQ BP, SP
RET
|
33cn/plugin
| 23,301
|
plugin/dapp/evm/executor/vm/common/crypto/blake2b/blake2bAVX2_amd64.s
|
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7,amd64,!gccgo,!appengine
#include "textflag.h"
DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
VPADDQ m0, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFD $-79, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPSHUFB c40, Y1, Y1; \
VPADDQ m1, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFB c48, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPADDQ Y1, Y1, t; \
VPSRLQ $63, Y1, Y1; \
VPXOR t, Y1, Y1; \
VPERMQ_0x39_Y1_Y1; \
VPERMQ_0x4E_Y2_Y2; \
VPERMQ_0x93_Y3_Y3; \
VPADDQ m2, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFD $-79, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPSHUFB c40, Y1, Y1; \
VPADDQ m3, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFB c48, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPADDQ Y1, Y1, t; \
VPSRLQ $63, Y1, Y1; \
VPXOR t, Y1, Y1; \
VPERMQ_0x39_Y3_Y3; \
VPERMQ_0x4E_Y2_Y2; \
VPERMQ_0x93_Y1_Y1
#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
// load msg: Y12 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
VMOVQ_SI_X12(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X12(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y12, Y12
// load msg: Y13 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
VMOVQ_SI_X13(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X13(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y13, Y13
// load msg: Y14 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
VMOVQ_SI_X14(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X14(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y14, Y14
// load msg: Y15 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
VMOVQ_SI_X15(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X15(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
VMOVQ_SI_X12_0; \
VMOVQ_SI_X11(4*8); \
VPINSRQ_1_SI_X12(2*8); \
VPINSRQ_1_SI_X11(6*8); \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
VMOVQ_SI_X11(11*8); \
VPSHUFD $0x4E, 0*8(SI), X14; \
VPINSRQ_1_SI_X11(5*8); \
VINSERTI128 $1, X11, Y14, Y14; \
LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
VMOVQ_SI_X11(5*8); \
VMOVDQU 11*8(SI), X12; \
VPINSRQ_1_SI_X11(15*8); \
VINSERTI128 $1, X11, Y12, Y12; \
VMOVQ_SI_X13(8*8); \
VMOVQ_SI_X11(2*8); \
VPINSRQ_1_SI_X13_0; \
VPINSRQ_1_SI_X11(13*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
VMOVQ_SI_X15(6*8); \
VMOVQ_SI_X11_0; \
VPINSRQ_1_SI_X15(10*8); \
VPINSRQ_1_SI_X11(8*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
VMOVQ_SI_X13_0; \
VMOVQ_SI_X11(4*8); \
VPINSRQ_1_SI_X13(7*8); \
VPINSRQ_1_SI_X11(15*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X11_0; \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X11(8*8); \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
VMOVQ_SI_X14_0; \
VPSHUFD $0x4E, 8*8(SI), X11; \
VPINSRQ_1_SI_X14(6*8); \
VINSERTI128 $1, X11, Y14, Y14; \
LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
VMOVQ_SI_X15_0; \
VMOVQ_SI_X11(6*8); \
VPINSRQ_1_SI_X15(4*8); \
VPINSRQ_1_SI_X11(10*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
VMOVQ_SI_X12(6*8); \
VMOVQ_SI_X11(11*8); \
VPINSRQ_1_SI_X12(14*8); \
VPINSRQ_1_SI_X11_0; \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
VMOVQ_SI_X11(1*8); \
VMOVDQU 12*8(SI), X14; \
VPINSRQ_1_SI_X11(10*8); \
VINSERTI128 $1, X11, Y14, Y14; \
VMOVQ_SI_X15(2*8); \
VMOVDQU 4*8(SI), X11; \
VPINSRQ_1_SI_X15(7*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
VMOVQ_SI_X13(2*8); \
VPSHUFD $0x4E, 5*8(SI), X11; \
VPINSRQ_1_SI_X13(4*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
VMOVQ_SI_X15(11*8); \
VMOVQ_SI_X11(12*8); \
VPINSRQ_1_SI_X15(14*8); \
VPINSRQ_1_SI_X11_0; \
VINSERTI128 $1, X11, Y15, Y15
// func fAVX2(h *[8]uint64, m *[16]uint64, c0, c1 uint64, flag uint64, rounds uint64)
TEXT ·fAVX2(SB), 4, $64-48 // frame size = 32 + 32 byte alignment
MOVQ h+0(FP), AX
MOVQ m+8(FP), SI
MOVQ c0+16(FP), R8
MOVQ c1+24(FP), R9
MOVQ flag+32(FP), CX
MOVQ rounds+40(FP), BX
MOVQ SP, DX
MOVQ SP, R10
ADDQ $31, R10
ANDQ $~31, R10
MOVQ R10, SP
MOVQ CX, 16(SP)
XORQ CX, CX
MOVQ CX, 24(SP)
VMOVDQU ·AVX2_c40<>(SB), Y4
VMOVDQU ·AVX2_c48<>(SB), Y5
VMOVDQU 0(AX), Y8
VMOVDQU 32(AX), Y9
VMOVDQU ·AVX2_iv0<>(SB), Y6
VMOVDQU ·AVX2_iv1<>(SB), Y7
MOVQ R8, 0(SP)
MOVQ R9, 8(SP)
VMOVDQA Y8, Y0
VMOVDQA Y9, Y1
VMOVDQA Y6, Y2
VPXOR 0(SP), Y7, Y3
loop:
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
SUBQ $1, BX; JCS done
LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
JMP loop
done:
VPXOR Y0, Y8, Y8
VPXOR Y1, Y9, Y9
VPXOR Y2, Y8, Y8
VPXOR Y3, Y9, Y9
VMOVDQU Y8, 0(AX)
VMOVDQU Y9, 32(AX)
VZEROUPPER
MOVQ DX, SP
RET
#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
#define SHUFFLE_AVX() \
VMOVDQA X6, X13; \
VMOVDQA X2, X14; \
VMOVDQA X4, X6; \
VPUNPCKLQDQ_X13_X13_X15; \
VMOVDQA X5, X4; \
VMOVDQA X6, X5; \
VPUNPCKHQDQ_X15_X7_X6; \
VPUNPCKLQDQ_X7_X7_X15; \
VPUNPCKHQDQ_X15_X13_X7; \
VPUNPCKLQDQ_X3_X3_X15; \
VPUNPCKHQDQ_X15_X2_X2; \
VPUNPCKLQDQ_X14_X14_X15; \
VPUNPCKHQDQ_X15_X3_X3; \
#define SHUFFLE_AVX_INV() \
VMOVDQA X2, X13; \
VMOVDQA X4, X14; \
VPUNPCKLQDQ_X2_X2_X15; \
VMOVDQA X5, X4; \
VPUNPCKHQDQ_X15_X3_X2; \
VMOVDQA X14, X5; \
VPUNPCKLQDQ_X3_X3_X15; \
VMOVDQA X6, X14; \
VPUNPCKHQDQ_X15_X13_X3; \
VPUNPCKLQDQ_X7_X7_X15; \
VPUNPCKHQDQ_X15_X6_X6; \
VPUNPCKLQDQ_X14_X14_X15; \
VPUNPCKHQDQ_X15_X7_X7; \
#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
VPADDQ m0, v0, v0; \
VPADDQ v2, v0, v0; \
VPADDQ m1, v1, v1; \
VPADDQ v3, v1, v1; \
VPXOR v0, v6, v6; \
VPXOR v1, v7, v7; \
VPSHUFD $-79, v6, v6; \
VPSHUFD $-79, v7, v7; \
VPADDQ v6, v4, v4; \
VPADDQ v7, v5, v5; \
VPXOR v4, v2, v2; \
VPXOR v5, v3, v3; \
VPSHUFB c40, v2, v2; \
VPSHUFB c40, v3, v3; \
VPADDQ m2, v0, v0; \
VPADDQ v2, v0, v0; \
VPADDQ m3, v1, v1; \
VPADDQ v3, v1, v1; \
VPXOR v0, v6, v6; \
VPXOR v1, v7, v7; \
VPSHUFB c48, v6, v6; \
VPSHUFB c48, v7, v7; \
VPADDQ v6, v4, v4; \
VPADDQ v7, v5, v5; \
VPXOR v4, v2, v2; \
VPXOR v5, v3, v3; \
VPADDQ v2, v2, t0; \
VPSRLQ $63, v2, v2; \
VPXOR t0, v2, v2; \
VPADDQ v3, v3, t0; \
VPSRLQ $63, v3, v3; \
VPXOR t0, v3, v3
// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
VMOVQ_SI_X12(i0*8); \
VMOVQ_SI_X13(i2*8); \
VMOVQ_SI_X14(i4*8); \
VMOVQ_SI_X15(i6*8); \
VPINSRQ_1_SI_X12(i1*8); \
VPINSRQ_1_SI_X13(i3*8); \
VPINSRQ_1_SI_X14(i5*8); \
VPINSRQ_1_SI_X15(i7*8)
// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
VMOVQ_SI_X12_0; \
VMOVQ_SI_X13(4*8); \
VMOVQ_SI_X14(1*8); \
VMOVQ_SI_X15(5*8); \
VPINSRQ_1_SI_X12(2*8); \
VPINSRQ_1_SI_X13(6*8); \
VPINSRQ_1_SI_X14(3*8); \
VPINSRQ_1_SI_X15(7*8)
// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
VPSHUFD $0x4E, 0*8(SI), X12; \
VMOVQ_SI_X13(11*8); \
VMOVQ_SI_X14(12*8); \
VMOVQ_SI_X15(7*8); \
VPINSRQ_1_SI_X13(5*8); \
VPINSRQ_1_SI_X14(2*8); \
VPINSRQ_1_SI_X15(3*8)
// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
VMOVDQU 11*8(SI), X12; \
VMOVQ_SI_X13(5*8); \
VMOVQ_SI_X14(8*8); \
VMOVQ_SI_X15(2*8); \
VPINSRQ_1_SI_X13(15*8); \
VPINSRQ_1_SI_X14_0; \
VPINSRQ_1_SI_X15(13*8)
// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X13(4*8); \
VMOVQ_SI_X14(6*8); \
VMOVQ_SI_X15_0; \
VPINSRQ_1_SI_X12(5*8); \
VPINSRQ_1_SI_X13(15*8); \
VPINSRQ_1_SI_X14(10*8); \
VPINSRQ_1_SI_X15(8*8)
// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
VMOVQ_SI_X12(9*8); \
VMOVQ_SI_X13(2*8); \
VMOVQ_SI_X14_0; \
VMOVQ_SI_X15(4*8); \
VPINSRQ_1_SI_X12(5*8); \
VPINSRQ_1_SI_X13(10*8); \
VPINSRQ_1_SI_X14(7*8); \
VPINSRQ_1_SI_X15(15*8)
// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X13_0; \
VMOVQ_SI_X14(12*8); \
VMOVQ_SI_X15(11*8); \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X13(8*8); \
VPINSRQ_1_SI_X14(10*8); \
VPINSRQ_1_SI_X15(3*8)
// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
MOVQ 0*8(SI), X12; \
VPSHUFD $0x4E, 8*8(SI), X13; \
MOVQ 7*8(SI), X14; \
MOVQ 2*8(SI), X15; \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X14(3*8); \
VPINSRQ_1_SI_X15(11*8)
// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
MOVQ 6*8(SI), X12; \
MOVQ 11*8(SI), X13; \
MOVQ 15*8(SI), X14; \
MOVQ 3*8(SI), X15; \
VPINSRQ_1_SI_X12(14*8); \
VPINSRQ_1_SI_X13_0; \
VPINSRQ_1_SI_X14(9*8); \
VPINSRQ_1_SI_X15(8*8)
// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
MOVQ 5*8(SI), X12; \
MOVQ 8*8(SI), X13; \
MOVQ 0*8(SI), X14; \
MOVQ 6*8(SI), X15; \
VPINSRQ_1_SI_X12(15*8); \
VPINSRQ_1_SI_X13(2*8); \
VPINSRQ_1_SI_X14(4*8); \
VPINSRQ_1_SI_X15(10*8)
// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
VMOVDQU 12*8(SI), X12; \
MOVQ 1*8(SI), X13; \
MOVQ 2*8(SI), X14; \
VPINSRQ_1_SI_X13(10*8); \
VPINSRQ_1_SI_X14(7*8); \
VMOVDQU 4*8(SI), X15
// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
MOVQ 15*8(SI), X12; \
MOVQ 3*8(SI), X13; \
MOVQ 11*8(SI), X14; \
MOVQ 12*8(SI), X15; \
VPINSRQ_1_SI_X12(9*8); \
VPINSRQ_1_SI_X13(13*8); \
VPINSRQ_1_SI_X14(14*8); \
VPINSRQ_1_SI_X15_0
// func fAVX(h *[8]uint64, m *[16]uint64, c0, c1 uint64, flag uint64, rounds uint64)
TEXT ·fAVX(SB), 4, $24-48 // frame size = 8 + 16 byte alignment
MOVQ h+0(FP), AX
MOVQ m+8(FP), SI
MOVQ c0+16(FP), R8
MOVQ c1+24(FP), R9
MOVQ flag+32(FP), CX
MOVQ rounds+40(FP), BX
MOVQ SP, BP
MOVQ SP, R10
ADDQ $15, R10
ANDQ $~15, R10
MOVQ R10, SP
VMOVDQU ·AVX_c40<>(SB), X0
VMOVDQU ·AVX_c48<>(SB), X1
VMOVDQA X0, X8
VMOVDQA X1, X9
VMOVDQU ·AVX_iv3<>(SB), X0
VMOVDQA X0, 0(SP)
XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0)
VMOVDQU 0(AX), X10
VMOVDQU 16(AX), X11
VMOVDQU 32(AX), X2
VMOVDQU 48(AX), X3
VMOVQ_R8_X15
VPINSRQ_1_R9_X15
VMOVDQA X10, X0
VMOVDQA X11, X1
VMOVDQU ·AVX_iv0<>(SB), X4
VMOVDQU ·AVX_iv1<>(SB), X5
VMOVDQU ·AVX_iv2<>(SB), X6
VPXOR X15, X6, X6
VMOVDQA 0(SP), X7
loop:
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
SUBQ $1, BX; JCS done
LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
JMP loop
done:
VMOVDQU 32(AX), X14
VMOVDQU 48(AX), X15
VPXOR X0, X10, X10
VPXOR X1, X11, X11
VPXOR X2, X14, X14
VPXOR X3, X15, X15
VPXOR X4, X10, X10
VPXOR X5, X11, X11
VPXOR X6, X14, X2
VPXOR X7, X15, X3
VMOVDQU X2, 32(AX)
VMOVDQU X3, 48(AX)
VMOVDQU X10, 0(AX)
VMOVDQU X11, 16(AX)
VZEROUPPER
MOVQ BP, SP
RET
|
33cn/plugin
| 1,870
|
plugin/dapp/evm/executor/vm/common/crypto/bn256/cloudflare/gfp_arm64.s
|
// +build arm64,!generic
#define storeBlock(a0,a1,a2,a3, r) \
MOVD a0, 0+r \
MOVD a1, 8+r \
MOVD a2, 16+r \
MOVD a3, 24+r
#define loadBlock(r, a0,a1,a2,a3) \
MOVD 0+r, a0 \
MOVD 8+r, a1 \
MOVD 16+r, a2 \
MOVD 24+r, a3
#define loadModulus(p0,p1,p2,p3) \
MOVD ·p2+0(SB), p0 \
MOVD ·p2+8(SB), p1 \
MOVD ·p2+16(SB), p2 \
MOVD ·p2+24(SB), p3
#include "mul_arm64.h"
TEXT ·gfpNeg(SB),0,$0-16
MOVD a+8(FP), R0
loadBlock(0(R0), R1,R2,R3,R4)
loadModulus(R5,R6,R7,R8)
SUBS R1, R5, R1
SBCS R2, R6, R2
SBCS R3, R7, R3
SBCS R4, R8, R4
SUBS R5, R1, R5
SBCS R6, R2, R6
SBCS R7, R3, R7
SBCS R8, R4, R8
CSEL CS, R5, R1, R1
CSEL CS, R6, R2, R2
CSEL CS, R7, R3, R3
CSEL CS, R8, R4, R4
MOVD c+0(FP), R0
storeBlock(R1,R2,R3,R4, 0(R0))
RET
TEXT ·gfpAdd(SB),0,$0-24
MOVD a+8(FP), R0
loadBlock(0(R0), R1,R2,R3,R4)
MOVD b+16(FP), R0
loadBlock(0(R0), R5,R6,R7,R8)
loadModulus(R9,R10,R11,R12)
MOVD ZR, R0
ADDS R5, R1
ADCS R6, R2
ADCS R7, R3
ADCS R8, R4
ADCS ZR, R0
SUBS R9, R1, R5
SBCS R10, R2, R6
SBCS R11, R3, R7
SBCS R12, R4, R8
SBCS ZR, R0, R0
CSEL CS, R5, R1, R1
CSEL CS, R6, R2, R2
CSEL CS, R7, R3, R3
CSEL CS, R8, R4, R4
MOVD c+0(FP), R0
storeBlock(R1,R2,R3,R4, 0(R0))
RET
TEXT ·gfpSub(SB),0,$0-24
MOVD a+8(FP), R0
loadBlock(0(R0), R1,R2,R3,R4)
MOVD b+16(FP), R0
loadBlock(0(R0), R5,R6,R7,R8)
loadModulus(R9,R10,R11,R12)
SUBS R5, R1
SBCS R6, R2
SBCS R7, R3
SBCS R8, R4
CSEL CS, ZR, R9, R9
CSEL CS, ZR, R10, R10
CSEL CS, ZR, R11, R11
CSEL CS, ZR, R12, R12
ADDS R9, R1
ADCS R10, R2
ADCS R11, R3
ADCS R12, R4
MOVD c+0(FP), R0
storeBlock(R1,R2,R3,R4, 0(R0))
RET
TEXT ·gfpMul(SB),0,$0-24
MOVD a+8(FP), R0
loadBlock(0(R0), R1,R2,R3,R4)
MOVD b+16(FP), R0
loadBlock(0(R0), R5,R6,R7,R8)
mul(R9,R10,R11,R12,R13,R14,R15,R16)
gfpReduce()
MOVD c+0(FP), R0
storeBlock(R1,R2,R3,R4, 0(R0))
RET
|
33cn/plugin
| 2,200
|
plugin/dapp/evm/executor/vm/common/crypto/bn256/cloudflare/gfp_amd64.s
|
// +build amd64,!generic
#define storeBlock(a0,a1,a2,a3, r) \
MOVQ a0, 0+r \
MOVQ a1, 8+r \
MOVQ a2, 16+r \
MOVQ a3, 24+r
#define loadBlock(r, a0,a1,a2,a3) \
MOVQ 0+r, a0 \
MOVQ 8+r, a1 \
MOVQ 16+r, a2 \
MOVQ 24+r, a3
#define gfpCarry(a0,a1,a2,a3,a4, b0,b1,b2,b3,b4) \
\ // b = a-p
MOVQ a0, b0 \
MOVQ a1, b1 \
MOVQ a2, b2 \
MOVQ a3, b3 \
MOVQ a4, b4 \
\
SUBQ ·p2+0(SB), b0 \
SBBQ ·p2+8(SB), b1 \
SBBQ ·p2+16(SB), b2 \
SBBQ ·p2+24(SB), b3 \
SBBQ $0, b4 \
\
\ // if b is negative then return a
\ // else return b
CMOVQCC b0, a0 \
CMOVQCC b1, a1 \
CMOVQCC b2, a2 \
CMOVQCC b3, a3
#include "mul_amd64.h"
#include "mul_bmi2_amd64.h"
TEXT ·gfpNeg(SB),0,$0-16
MOVQ ·p2+0(SB), R8
MOVQ ·p2+8(SB), R9
MOVQ ·p2+16(SB), R10
MOVQ ·p2+24(SB), R11
MOVQ a+8(FP), DI
SUBQ 0(DI), R8
SBBQ 8(DI), R9
SBBQ 16(DI), R10
SBBQ 24(DI), R11
MOVQ $0, AX
gfpCarry(R8,R9,R10,R11,AX, R12,R13,R14,R15,BX)
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
RET
TEXT ·gfpAdd(SB),0,$0-24
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
loadBlock(0(DI), R8,R9,R10,R11)
MOVQ $0, R12
ADDQ 0(SI), R8
ADCQ 8(SI), R9
ADCQ 16(SI), R10
ADCQ 24(SI), R11
ADCQ $0, R12
gfpCarry(R8,R9,R10,R11,R12, R13,R14,R15,AX,BX)
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
RET
TEXT ·gfpSub(SB),0,$0-24
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
loadBlock(0(DI), R8,R9,R10,R11)
MOVQ ·p2+0(SB), R12
MOVQ ·p2+8(SB), R13
MOVQ ·p2+16(SB), R14
MOVQ ·p2+24(SB), R15
MOVQ $0, AX
SUBQ 0(SI), R8
SBBQ 8(SI), R9
SBBQ 16(SI), R10
SBBQ 24(SI), R11
CMOVQCC AX, R12
CMOVQCC AX, R13
CMOVQCC AX, R14
CMOVQCC AX, R15
ADDQ R12, R8
ADCQ R13, R9
ADCQ R14, R10
ADCQ R15, R11
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
RET
TEXT ·gfpMul(SB),0,$160-24
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
// Jump to a slightly different implementation if MULX isn't supported.
CMPB ·hasBMI2(SB), $0
JE nobmi2Mul
mulBMI2(0(DI),8(DI),16(DI),24(DI), 0(SI))
storeBlock( R8, R9,R10,R11, 0(SP))
storeBlock(R12,R13,R14,R15, 32(SP))
gfpReduceBMI2()
JMP end
nobmi2Mul:
mul(0(DI),8(DI),16(DI),24(DI), 0(SI), 0(SP))
gfpReduce(0(SP))
end:
MOVQ c+0(FP), DI
storeBlock(R12,R13,R14,R15, 0(DI))
RET
|
32bitmicro/newlib-nano-1.0
| 1,321
|
newlib/libc/machine/rx/strncat.S
|
.file "strncat.S"
.section .text
.global _strncat
.type _strncat,@function
_strncat:
;; On entry: r1 => Destination
;; r2 => Source
;; r3 => Max number of bytes to copy
mov r1, r4 ; Save a copy of the dest pointer.
mov r2, r5 ; Save a copy of the source pointer.
mov r3, r14 ; Save a copy of the byte count.
mov #0, r2 ; Search for the NUL byte.
mov #-1, r3 ; Search until we run out of memory.
suntil.b ; Find the end of the destination string.
sub #1, r1 ; suntil.b leaves r1 pointing to the byte beyond the NUL.
mov r14, r3 ; Restore the limit on the number of bytes copied.
mov r5, r2 ; Restore the source pointer.
mov r1, r5 ; Save a copy of the dest pointer.
smovu ; Copy source to destination.
add #0, r14, r3 ; Restore the number of bytes to copy (again), but this time set the Z flag as well.
beq 1f ; If we copied 0 bytes then we already know that the dest string is NUL terminated, so we do not have to do anything.
mov #0, r2 ; Otherwise we must check to see if a NUL byte
mov r5, r1 ; was included in the bytes that were copied.
suntil.b
beq 1f ; Z flag is set if a match was found.
add r14, r5 ; Point at byte after end of copied bytes.
mov.b #0, [r5] ; Store a NUL there.
1:
mov r4, r1 ; Return the original dest pointer.
rts
|
32bitmicro/newlib-nano-1.0
| 1,937
|
newlib/libc/machine/rx/setjmp.S
|
# setjmp/longjmp for Renesas RX.
#
# The jmpbuf looks like this:
#
# Register jmpbuf offset
# R0 0x0
# R1 0x4
# R2 0x8
# R3 0xc
# R4 0x10
# R5 0x14
# R6 0x18
# R7 0x1c
# R8 0x20
# R9 0x24
# R10 0x28
# R11 0x2c
# R12 0x30
# R13 0x34
# R14 0x38
# R15 0x3c
# PC 0x40
#
# R1 contains the pointer to jmpbuf:
#
# int R1 = setjmp (jmp_buf R1)
# void longjmp (jmp_buf R1, int R2)
#
# The ABI allows for R1-R5 to be clobbered by functions. We must be
# careful to always leave the stack in a usable state in case an
# interrupt happens.
.text
.global _setjmp
.type _setjmp, @function
_setjmp:
mov.l r0, [r1] ; save all the general registers
mov.l r1, 0x4[r1] ; longjmp won't use this, but someone else might.
mov.l r2, 0x8[r1]
mov.l r3, 0xc[r1]
mov.l r4, 0x10[r1]
mov.l r5, 0x14[r1]
mov.l r6, 0x18[r1]
mov.l r7, 0x1c[r1]
mov.l r8, 0x20[r1]
mov.l r9, 0x24[r1]
mov.l r10, 0x28[r1]
mov.l r11, 0x2c[r1]
mov.l r12, 0x30[r1]
mov.l r13, 0x34[r1]
mov.l r14, 0x38[r1]
mov.l r15, 0x3c[r1]
mov.l [r0], r2 ; get return address off the stack
mov.l r2, 0x40[r1] ; PC
mov #0, r1 ; Return 0.
rts
.Lend1:
.size _setjmp, .Lend1 - _setjmp
.global _longjmp
.type _longjmp, @function
_longjmp:
tst r2, r2 ; Set the Z flag if r2 is 0.
stz #1, r2 ; If the Z flag was set put 1 into the return register.
mov r2, 4[r1] ; Put r2 (our return value) into the setjmp buffer as r1.
mov.l [r1], r0 ; Restore the stack - there's a slot for PC
mov.l 0x40[r1], r2 ; Get the saved PC
mov.l r2, [r0] ; Overwrite the old return address
mov.l 0x3c[r1], r15
mov.l 0x38[r1], r14
mov.l 0x34[r1], r13
mov.l 0x30[r1], r12
mov.l 0x2c[r1], r11
mov.l 0x28[r1], r10
mov.l 0x24[r1], r9
mov.l 0x20[r1], r8
mov.l 0x1c[r1], r7
mov.l 0x18[r1], r6
mov.l 0x14[r1], r5
mov.l 0x10[r1], r4
mov.l 0xc[r1], r3
mov.l 0x8[r1], r2
mov.l 0x4[r1], r1 ; This sets up the new return value
rts
.Lend2:
.size _longjmp, .Lend2 - _longjmp
|
32bitmicro/newlib-nano-1.0
| 2,827
|
newlib/libc/machine/m68k/memcpy.S
|
/* a-memcpy.s -- memcpy, optimised for m68k asm
*
* Copyright (c) 2007 mocom software GmbH & Co KG)
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#include "m68kasm.h"
#if defined (__mcoldfire__) || defined (__mc68010__) || defined (__mc68020__) || defined (__mc68030__) || defined (__mc68040__) || defined (__mc68060__)
# define MISALIGNED_OK 1
#else
# define MISALIGNED_OK 0
#endif
.text
.align 4
.globl SYM(memcpy)
.type SYM(memcpy), @function
/* memcpy, optimised
*
* strategy:
* - no argument testing (the original memcpy from the GNU lib does
* no checking either)
* - make sure the destination pointer (the write pointer) is long word
* aligned. This is the best you can do, because writing to unaligned
* addresses can be the most costfull thing you could do.
* - Once you have figured that out, we do a little loop unrolling
* to further improve speed.
*/
SYM(memcpy):
move.l 4(sp),a0 | dest ptr
move.l 8(sp),a1 | src ptr
move.l 12(sp),d1 | len
cmp.l #8,d1 | if fewer than 8 bytes to transfer,
blo .Lresidue | do not optimise
#if !MISALIGNED_OK
/* Goto .Lresidue if either dest or src is not 4-byte aligned */
move.l a0,d0
and.l #3,d0
bne .Lresidue
move.l a1,d0
and.l #3,d0
bne .Lresidue
#else /* MISALIGNED_OK */
/* align dest */
move.l a0,d0 | copy of dest
neg.l d0
and.l #3,d0 | look for the lower two only
beq 2f | is aligned?
sub.l d0,d1
lsr.l #1,d0 | word align needed?
bcc 1f
move.b (a1)+,(a0)+
1:
lsr.l #1,d0 | long align needed?
bcc 2f
move.w (a1)+,(a0)+
2:
#endif /* !MISALIGNED_OK */
/* long word transfers */
move.l d1,d0
and.l #3,d1 | byte residue
lsr.l #3,d0
bcc 1f | carry set for 4-byte residue
move.l (a1)+,(a0)+
1:
lsr.l #1,d0 | number of 16-byte transfers
bcc .Lcopy | carry set for 8-byte residue
bra .Lcopy8
1:
move.l (a1)+,(a0)+
move.l (a1)+,(a0)+
.Lcopy8:
move.l (a1)+,(a0)+
move.l (a1)+,(a0)+
.Lcopy:
#if !defined (__mcoldfire__)
dbra d0,1b
sub.l #0x10000,d0
#else
subq.l #1,d0
#endif
bpl 1b
bra .Lresidue
1:
move.b (a1)+,(a0)+ | move residue bytes
.Lresidue:
#if !defined (__mcoldfire__)
dbra d1,1b | loop until done
#else
subq.l #1,d1
bpl 1b
#endif
move.l 4(sp),d0 | return value
rts
|
32bitmicro/newlib-nano-1.0
| 2,494
|
newlib/libc/machine/m68k/memset.S
|
/* a-memset.s -- memset, optimised for fido asm
*
* Copyright (c) 2007 mocom software GmbH & Co KG)
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
#include "m68kasm.h"
.text
.align 4
.globl SYM(memset)
.type SYM(memset), @function
| memset, optimised
|
| strategy:
| - no argument testing (the original memcpy from the GNU lib does
| no checking either)
| - make sure the destination pointer (the write pointer) is long word
| aligned. This is the best you can do, because writing to unaligned
| addresses can be the most costfull thing one could do.
| - we fill long word wise if possible
|
| VG, 2006
|
| bugfixes:
| - distribution of byte value improved - in cases someone gives
| non-byte value
| - residue byte transfer was not working
|
| VG, April 2007
|
SYM(memset):
move.l 4(sp),a0 | dest ptr
move.l 8(sp),d0 | value
move.l 12(sp),d1 | len
cmp.l #16,d1
blo .Lbset | below, byte fills
|
move.l d2,-(sp) | need a register
move.b d0,d2 | distribute low byte to all byte in word
lsl.l #8,d0
move.b d2,d0
move.w d0,d2
swap d0 | rotate 16
move.w d2,d0
|
move.l a0,d2 | copy of src
neg.l d2 | 1 2 3 ==> 3 2 1
and.l #3,d2
beq 2f | is aligned
|
sub.l d2,d1 | fix length
lsr.l #1,d2 | word align needed?
bcc 1f
move.b d0,(a0)+ | fill byte
1:
lsr.l #1,d2 | long align needed?
bcc 2f
move.w d0,(a0)+ | fill word
2:
move.l d1,d2 | number of long transfers (at least 3)
lsr.l #2,d2
subq.l #1,d2
1:
move.l d0,(a0)+ | fill long words
.Llset:
#if !defined (__mcoldfire__)
dbra d2,1b | loop until done
sub.l #0x10000,d2
#else
subq.l #1,d2
#endif
bpl 1b
and.l #3,d1 | residue byte transfers, fixed
move.l (sp)+,d2 | restore d2
bra .Lbset
1:
move.b d0,(a0)+ | fill residue bytes
.Lbset:
#if !defined (__mcoldfire__)
dbra d1,1b | loop until done
#else
subq.l #1,d1
bpl 1b
#endif
move.l 4(sp),d0 | return value
rts
|
32bitmicro/newlib-nano-1.0
| 1,479
|
newlib/libc/machine/mt/setjmp.S
|
# setjmp/longjmp for mt.
#
# The jmpbuf looks like this:
#
# Register jmpbuf offset
# R0 --- --
# R1 0x4 4
# R2 0x8 8
# R3 0xc 12
# R4 0x10 16
# R5 0x14 20
# R6 0x18 24
# R7 0x1c 28
# R8 0x20 32
# R9 ---- --
# R10 ---- --
# R11 0x2c 44
# R12 0x30 48
# R13 0x34 52
# R14 0x38 56
# R15 0x3c 60
#
# R1 contains the pointer to jmpbuf
.text
.global setjmp
.type setjmp ,@function
setjmp:
stw r1, r1, #4
or r0, r0, r0
stw r2, r1, #8
or r0, r0, r0
stw r3, r1, #12
or r0, r0, r0
stw r4, r1, #16
or r0, r0, r0
stw r5, r1, #20
or r0, r0, r0
stw r6, r1, #24
or r0, r0, r0
stw r7, r1, #28
or r0, r0, r0
stw r8, r1, #32
or r0, r0, r0
stw r11, r1, #44
or r0, r0, r0
stw r12, r1, #48
or r0, r0, r0
stw r13, r1, #52
or r0, r0, r0
stw r14, r1, #56
or r0, r0, r0
stw r15, r1, #60
jal r0, r14
addi r11, r0, #0
.Lend1:
.size setjmp,.Lend1-setjmp
.global longjmp
.type longjmp,@function
longjmp:
or r9, r1, r1
or r11, r2, r2
ldw r1, r1, #4
or r0, r0, r0
ldw r2, r1, #8
or r0, r0, r0
ldw r3, r1, #12
or r0, r0, r0
ldw r4, r1, #16
or r0, r0, r0
ldw r5, r1, #20
or r0, r0, r0
ldw r6, r1, #24
or r0, r0, r0
ldw r7, r1, #28
or r0, r0, r0
ldw r8, r1, #32
or r0, r0, r0
ldw r12, r1, #48
or r0, r0, r0
ldw r13, r1, #52
or r0, r0, r0
ldw r14, r1, #56
or r0, r0, r0
ldw r15, r1, #60
brne r0, r11, .L01
or r0, r0, r0
addi r11, r0, #1
.L01:
jal r0, r14
or r0, r0, r0
.Lend2:
.size longjmp,.Lend2-longjmp2
|
32bitmicro/newlib-nano-1.0
| 2,426
|
newlib/libc/machine/crx/setjmp.S
|
##############################################################################
# setjmp.S -- CRX setjmp routine #
# #
# Copyright (c) 2004 National Semiconductor Corporation #
# #
# The authors hereby grant permission to use, copy, modify, distribute, #
# and license this software and its documentation for any purpose, provided #
# that existing copyright notices are retained in all copies and that this #
# notice is included verbatim in any distributions. No written agreement, #
# license, or royalty fee is required for any of the authorized uses. #
# Modifications to this software may be copyrighted by their authors #
# and need not follow the licensing terms described here, provided that #
# the new terms are clearly indicated on the first page of each file where #
# they apply. #
# #
# C library -- setjmp, longjmp #
# longjmp(a,v) #
# will generate a "return(v)" #
# from the last call to #
# setjmp(a) #
# by restoring r7-ra, sp, #
# and pc from 'a' #
# and doing a return. (Makes sure that longjmp never returns 0). #
##############################################################################
.text
.file "setjmp.s"
.align 4
.globl _setjmp
.align 4
_setjmp:
#r2: .blkw
storm r2,{r7,r8,r9,r10,r11,r12,r13,r14}
stord sp,0(r2)
movd $0,r0
jump ra
.globl _longjmp
_longjmp:
#r2: .blkw # pointer save area
#r3: .blkw # ret vlaue
loadm r2, {r7,r8,r9,r10,r11,r12,r13,ra}
loadd 0(r2), sp
movd r3, r0
cmpd $0, r3
bne end1
movd $1, r0
end1:
jump ra
.align 4
|
32bitmicro/newlib-nano-1.0
| 1,089
|
newlib/libc/machine/d10v/setjmp.S
|
; setjmp/longjmp for D10V. The jmpbuf looks like this:
;
; Register jmpbuf offset
; R6 0x00
; R7 0x02
; R8 0x04
; R9 0x06
; R10 0x08
; R11 0x0a
; R13 (return address) 0x0c
; R15 (SP) 0x0E
.text
.globl setjmp
.type setjmp,@function
.stabs "setjmp.S",100,0,0,setjmp
.stabs "int:t(0,1)=r(0,1);-65536;65535;",128,0,0,0
.stabs "setjmp:F(0,1)",36,0,1,setjmp
setjmp:
; Address of jmpbuf is passed in R0. Save the appropriate registers.
st2w r6, @r0+
st2w r8, @r0+
st2w r10, @r0+
st r13, @r0+
st r15, @r0+
; Return 0 to caller
ldi r0, 0
jmp r13
.Lsetjmp:
.size setjmp,.Lsetjmp-setjmp
.stabs "",36,0,0,.Lsetjmp-setjmp
.globl longjmp
.type longjmp,@function
.stabs "longjmp:F(0,1)",36,0,1,longjmp
longjmp:
; Address of jmpbuf is in R0. Restore the registers.
ld2w r6, @r0+
ld2w r8, @r0+
ld2w r10, @r0+
ld r13, @r0+
ld r15, @r0+
; Value to return to caller is in R1. If caller attemped to return 0,
; return 1 instead.
mv r0, r1
cmpeqi r0, 0
exef0t || ldi r0,1
jmp r13
.Llongjmp:
.size longjmp,.Llongjmp-longjmp
.stabs "",36,0,0,.Llongjmp-longjmp
|
32bitmicro/newlib-nano-1.0
| 1,087
|
newlib/libc/machine/i386/memcpy.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memcpy)
SOTYPE_FUNCTION(memcpy)
SYM (memcpy):
pushl ebp
movl esp,ebp
pushl esi
pushl edi
pushl ebx
movl 8(ebp),edi
movl 16(ebp),ecx
movl 12(ebp),esi
cld
#ifndef __OPTIMIZE_SIZE__
cmpl $8,ecx
jbe .L3
/* move any preceding bytes until destination address is long word aligned */
movl edi,edx
movl ecx,ebx
andl $3,edx
jz .L11
movl $4,ecx
subl edx,ecx
andl $3,ecx
subl ecx,ebx
rep
movsb
mov ebx,ecx
/* move bytes a long word at a time */
.L11:
shrl $2,ecx
.p2align 2
rep
movsl
movl ebx,ecx
andl $3,ecx
#endif /* !__OPTIMIZE_SIZE__ */
/* handle any remaining bytes */
.L3:
rep
movsb
.L5:
movl 8(ebp),eax
leal -12(ebp),esp
popl ebx
popl edi
popl esi
leave
ret
|
32bitmicro/newlib-nano-1.0
| 1,681
|
newlib/libc/machine/i386/setjmp.S
|
/* This is file is a merger of SETJMP.S and LONGJMP.S */
/*
* This file was modified to use the __USER_LABEL_PREFIX__ and
* __REGISTER_PREFIX__ macros defined by later versions of GNU cpp by
* Joel Sherrill (joel@OARcorp.com)
* Slight change: now includes i386mach.h for this (Werner Almesberger)
*
* Copyright (C) 1991 DJ Delorie
* All rights reserved.
*
* Redistribution and use in source and binary forms is permitted
* provided that the above copyright notice and following paragraph are
* duplicated in all such forms.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/*
** jmp_buf:
** eax ebx ecx edx esi edi ebp esp eip
** 0 4 8 12 16 20 24 28 32
*/
#include "i386mach.h"
.global SYM (setjmp)
.global SYM (longjmp)
SOTYPE_FUNCTION(setjmp)
SOTYPE_FUNCTION(longjmp)
SYM (setjmp):
pushl ebp
movl esp,ebp
pushl edi
movl 8 (ebp),edi
movl eax,0 (edi)
movl ebx,4 (edi)
movl ecx,8 (edi)
movl edx,12 (edi)
movl esi,16 (edi)
movl -4 (ebp),eax
movl eax,20 (edi)
movl 0 (ebp),eax
movl eax,24 (edi)
movl esp,eax
addl $12,eax
movl eax,28 (edi)
movl 4 (ebp),eax
movl eax,32 (edi)
popl edi
movl $0,eax
leave
ret
SYM (longjmp):
pushl ebp
movl esp,ebp
movl 8(ebp),edi /* get jmp_buf */
movl 12(ebp),eax /* store retval in j->eax */
testl eax,eax
jne 0f
incl eax
0:
movl eax,0(edi)
movl 24(edi),ebp
__CLI
movl 28(edi),esp
pushl 32(edi)
movl 0(edi),eax
movl 4(edi),ebx
movl 8(edi),ecx
movl 12(edi),edx
movl 16(edi),esi
movl 20(edi),edi
__STI
ret
|
32bitmicro/newlib-nano-1.0
| 2,022
|
newlib/libc/machine/i386/memmove.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memmove)
SOTYPE_FUNCTION(memmove)
SYM (memmove):
pushl ebp
movl esp,ebp
pushl esi
pushl edi
pushl ebx
movl 8(ebp),edi
movl 16(ebp),ecx
movl 12(ebp),esi
/* check for destructive overlap (src < dst && dst < src + length) */
cld
cmpl edi,esi
jae .L2
leal -1(ecx,esi),ebx
cmpl ebx,edi
ja .L2
/* IF: destructive overlap, must copy backwards */
addl ecx,esi
addl ecx,edi
std
#ifndef __OPTIMIZE_SIZE__
cmpl $8,ecx
jbe .L13
.L18:
/* move trailing bytes in reverse until destination address is long word aligned */
movl edi,edx
movl ecx,ebx
andl $3,edx
jz .L21
movl edx,ecx
decl esi
decl edi
subl ecx,ebx
rep
movsb
mov ebx,ecx
incl esi
incl edi
.L21:
/* move bytes in reverse, a long word at a time */
shrl $2,ecx
subl $4,esi
subl $4,edi
rep
movsl
addl $4,esi
addl $4,edi
movl ebx,ecx
andl $3,ecx
#endif /* !__OPTIMIZE_SIZE__ */
/* handle any remaining bytes not on a long word boundary */
.L13:
decl esi
decl edi
.L15:
rep
movsb
jmp .L5
.p2align 4,,7
/* ELSE: no destructive overlap so we copy forwards */
.L2:
#ifndef __OPTIMIZE_SIZE__
cmpl $8,ecx
jbe .L3
/* move any preceding bytes until destination address is long word aligned */
movl edi,edx
movl ecx,ebx
andl $3,edx
jz .L11
movl $4,ecx
subl edx,ecx
andl $3,ecx
subl ecx,ebx
rep
movsb
mov ebx,ecx
/* move bytes a long word at a time */
.L11:
shrl $2,ecx
.p2align 2
rep
movsl
movl ebx,ecx
andl $3,ecx
#endif /* !__OPTIMIZE_SIZE__ */
/* handle any remaining bytes */
.L3:
rep
movsb
.L5:
movl 8(ebp),eax
cld
leal -12(ebp),esp
popl ebx
popl edi
popl esi
leave
ret
|
32bitmicro/newlib-nano-1.0
| 1,431
|
newlib/libc/machine/i386/memset.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002, 2008 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memset)
SOTYPE_FUNCTION(memset)
SYM (memset):
pushl ebp
movl esp,ebp
pushl edi
movl 8(ebp),edi
movzbl 12(ebp),eax
movl 16(ebp),ecx
cld
#ifndef __OPTIMIZE_SIZE__
/* Less than 16 bytes won't benefit from the 'rep stosl' loop. */
cmpl $16,ecx
jbe .L19
testl $7,edi
je .L10
/* It turns out that 8-byte aligned 'rep stosl' outperforms
4-byte aligned on some x86 platforms. */
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
testl $7,edi
je .L10
movb al,(edi)
incl edi
decl ecx
/* At this point, ecx>8 and edi%8==0. */
.L10:
movb al,ah
movl eax,edx
sall $16,edx
orl edx,eax
movl ecx,edx
shrl $2,ecx
andl $3,edx
rep
stosl
movl edx,ecx
#endif /* not __OPTIMIZE_SIZE__ */
.L19:
rep
stosb
movl 8(ebp),eax
leal -4(ebp),esp
popl edi
leave
ret
|
32bitmicro/newlib-nano-1.0
| 1,503
|
newlib/libc/machine/i386/memchr.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002, 2008 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memchr)
SOTYPE_FUNCTION(memchr)
SYM (memchr):
pushl ebp
movl esp,ebp
pushl edi
movzbl 12(ebp),eax
movl 16(ebp),ecx
movl 8(ebp),edi
xorl edx,edx
testl ecx,ecx
jz L20
#ifdef __OPTIMIZE_SIZE__
cld
repnz
scasb
setnz dl
decl edi
#else /* !__OPTIMIZE_SIZE__ */
/* Do byte-wise checks until string is aligned. */
testl $3,edi
je L5
cmpb (edi),al
je L15
incl edi
decl ecx
je L20
testl $3,edi
je L5
cmpb (edi),al
je L15
incl edi
decl ecx
je L20
testl $3,edi
je L5
cmpb (edi),al
je L15
incl edi
decl ecx
je L20
/* Create a mask, then check a word at a time. */
L5:
movb al,ah
movl eax,edx
sall $16,edx
orl edx,eax
pushl ebx
.p2align 4,,7
L8:
subl $4,ecx
jc L9
movl (edi),edx
addl $4,edi
xorl eax,edx
leal -16843009(edx),ebx
notl edx
andl edx,ebx
testl $-2139062144,ebx
je L8
subl $4,edi
L9:
popl ebx
xorl edx,edx
addl $4,ecx
je L20
/* Final byte-wise checks. */
.p2align 4,,7
L10:
cmpb (edi),al
je L15
incl edi
decl ecx
jne L10
xorl edi,edi
#endif /* !__OPTIMIZE_SIZE__ */
L15:
decl edx
andl edi,edx
L20:
movl edx,eax
leal -4(ebp),esp
popl edi
leave
ret
|
32bitmicro/newlib-nano-1.0
| 1,347
|
newlib/libc/machine/i386/memcmp.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (memcmp)
SOTYPE_FUNCTION(memcmp)
SYM (memcmp):
pushl ebp
movl esp,ebp
subl $16,esp
pushl ebx
pushl edi
pushl esi
movl 8(ebp),edi
movl 12(ebp),esi
movl 16(ebp),ecx
cld
/* check if length is zero in which case just return 0 */
xorl eax,eax
testl ecx,ecx
jz L4
#ifndef __OPTIMIZE_SIZE__
/* if aligned on long boundary, compare doublewords at a time first */
movl edi,eax
orl esi,eax
testb $3,al
jne BYTECMP
movl ecx,ebx
shrl $2,ecx /* calculate number of long words to compare */
repz
cmpsl
jz L5
subl $4,esi
subl $4,edi
movl $4,ecx
jmp BYTECMP
L5:
andl $3,ebx /* calculate number of remaining bytes */
movl ebx,ecx
#endif /* not __OPTIMIZE_SIZE__ */
BYTECMP: /* compare any unaligned bytes or remainder bytes */
repz
cmpsb
/* set output to be < 0 if less than, 0 if equal, or > 0 if greater than */
L3:
xorl edx,edx
movb -1(esi),dl
xorl eax,eax
movb -1(edi),al
subl edx,eax
L4:
leal -28(ebp),esp
popl esi
popl edi
popl ebx
leave
ret
|
32bitmicro/newlib-nano-1.0
| 1,421
|
newlib/libc/machine/i386/strlen.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002, 2008 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (strlen)
SOTYPE_FUNCTION(strlen)
SYM (strlen):
pushl ebp
movl esp,ebp
pushl edi
movl 8(ebp),edx
#ifdef __OPTIMIZE_SIZE__
cld
movl edx,edi
movl $4294967295,ecx
xor eax,eax
repnz
scasb
#else
/* Modern x86 hardware is much faster at double-word
manipulation than with bytewise repnz scasb. */
/* Do byte-wise checks until string is aligned. */
movl edx,edi
test $3,edi
je L5
movb (edi),cl
incl edi
testb cl,cl
je L15
test $3,edi
je L5
movb (edi),cl
incl edi
testb cl,cl
je L15
test $3,edi
je L5
movb (edi),cl
incl edi
testb cl,cl
je L15
L5:
subl $4,edi
/* loop performing 4 byte mask checking for desired 0 byte */
.p2align 4,,7
L10:
addl $4,edi
movl (edi),ecx
leal -16843009(ecx),eax
notl ecx
andl ecx,eax
testl $-2139062144,eax
je L10
/* Find which of four bytes is 0. */
notl ecx
incl edi
testb cl,cl
je L15
incl edi
shrl $8,ecx
testb cl,cl
je L15
incl edi
shrl $8,ecx
testb cl,cl
je L15
incl edi
#endif
L15:
subl edx,edi
leal -1(edi),eax
leal -4(ebp),esp
popl edi
leave
ret
|
32bitmicro/newlib-nano-1.0
| 2,523
|
newlib/libc/machine/i386/strchr.S
|
/*
* ====================================================
* Copyright (C) 1998, 2002, 2008 by Red Hat Inc. All rights reserved.
*
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
#include "i386mach.h"
.global SYM (strchr)
SOTYPE_FUNCTION(strchr)
SYM (strchr):
pushl ebp
movl esp,ebp
pushl edi
pushl ebx
xorl ebx,ebx
movl 8(ebp),edi
addb 12(ebp),bl
#ifndef __OPTIMIZE_SIZE__
/* Special case strchr(p,0). */
je L25
/* Do byte-wise checks until string is aligned. */
test $3,edi
je L5
movl edi,eax
movb (eax),cl
testb cl,cl
je L14
cmpb bl,cl
je L19
incl edi
test $3,edi
je L5
movl edi,eax
movb (eax),cl
testb cl,cl
je L14
cmpb bl,cl
je L19
incl edi
test $3,edi
je L5
movl edi,eax
movb (eax),cl
testb cl,cl
je L14
cmpb bl,cl
je L19
incl edi
/* create 4 byte mask which is just the desired byte repeated 4 times */
L5:
movl ebx,ecx
sall $8,ebx
subl $4,edi
orl ecx,ebx
movl ebx,edx
sall $16,ebx
orl edx,ebx
/* loop performing 4 byte mask checking for 0 byte or desired byte */
.p2align 4,,7
L10:
addl $4,edi
movl (edi),ecx
leal -16843009(ecx),edx
movl ecx,eax
notl eax
andl eax,edx
testl $-2139062144,edx
jne L9
xorl ebx,ecx
leal -16843009(ecx),edx
notl ecx
andl ecx,edx
testl $-2139062144,edx
je L10
#endif /* not __OPTIMIZE_SIZE__ */
/* loop while (*s && *s++ != c) */
L9:
leal -1(edi),eax
.p2align 4,,7
L15:
incl eax
movb (eax),dl
testb dl,dl
je L14
cmpb bl,dl
jne L15
L14:
/* if (*s == c) return address otherwise return NULL */
cmpb bl,(eax)
je L19
xorl eax,eax
L19:
leal -8(ebp),esp
popl ebx
popl edi
leave
ret
#ifndef __OPTIMIZE_SIZE__
/* Special case strchr(p,0). */
#if 0
/* Hideous performance on modern machines. */
L25:
cld
movl $-1,ecx
xor eax,eax
repnz
scasb
leal -1(edi),eax
jmp L19
#endif
L25:
/* Do byte-wise checks until string is aligned. */
test $3,edi
je L26
movl edi,eax
movb (eax),cl
testb cl,cl
je L19
incl edi
test $3,edi
je L26
movl edi,eax
movb (eax),cl
testb cl,cl
je L19
incl edi
test $3,edi
je L26
movl edi,eax
movb (eax),cl
testb cl,cl
je L19
incl edi
L26:
subl $4,edi
/* loop performing 4 byte mask checking for desired 0 byte */
.p2align 4,,7
L27:
addl $4,edi
movl (edi),ecx
leal -16843009(ecx),edx
movl ecx,eax
notl eax
andl eax,edx
testl $-2139062144,edx
je L27
jmp L9
#endif /* !__OPTIMIZE_SIZE__ */
|
32bitmicro/newlib-nano-1.0
| 2,194
|
newlib/libc/machine/d30v/setjmp.S
|
; setjmp/longjmp for D30V.
.text
.globl setjmp
.type setjmp,@function
.stabs "setjmp.S",100,0,0,setjmp
.stabs "int:t(0,1)=r(0,1);-2147483648;2147483647;",128,0,0,0
.stabs "setjmp:F(0,1)",36,0,1,setjmp
setjmp:
; Address of jmpbuf is passed in R2. Save the appropriate registers.
st2w r26, @(r2+,r0)
st2w r28, @(r2+,r0)
st2w r30, @(r2+,r0)
st2w r32, @(r2+,r0)
st2w r34, @(r2+,r0)
st2w r36, @(r2+,r0)
st2w r38, @(r2+,r0)
st2w r40, @(r2+,r0)
st2w r42, @(r2+,r0)
st2w r44, @(r2+,r0)
st2w r46, @(r2+,r0)
st2w r48, @(r2+,r0)
st2w r50, @(r2+,r0)
st2w r52, @(r2+,r0)
st2w r54, @(r2+,r0)
st2w r56, @(r2+,r0)
st2w r58, @(r2+,r0)
st2w r60, @(r2+,r0)
st2w r62, @(r2+,r0)
mvfacc r4, a1, 16
mvfacc r5, a1, 0
st2w r4, @(r2+,r0)
mvfsys r4, psw
mvfsys r5, rpt_c
st2w r4, @(r2+,r0)
mvfsys r4, rpt_s
mvfsys r5, rpt_e
st2w r4, @(r2+,r0)
mvfsys r4, mod_s
mvfsys r5, mod_e
st2w r4, @(r2+,r0)
; Return 0 to caller
add r2, r0, r0
jmp link
.Lsetjmp:
.size setjmp,.Lsetjmp-setjmp
.stabs "",36,0,0,.Lsetjmp-setjmp
.globl longjmp
.type longjmp,@function
.stabs "longjmp:F(0,1)",36,0,1,longjmp
longjmp:
; Address of jmpbuf is in R2. Restore the registers.
ld2w r26, @(r2+,r0)
ld2w r28, @(r2+,r0)
ld2w r30, @(r2+,r0)
ld2w r32, @(r2+,r0)
ld2w r34, @(r2+,r0)
ld2w r36, @(r2+,r0)
ld2w r38, @(r2+,r0)
ld2w r40, @(r2+,r0)
ld2w r42, @(r2+,r0)
ld2w r44, @(r2+,r0)
ld2w r46, @(r2+,r0)
ld2w r48, @(r2+,r0)
ld2w r50, @(r2+,r0)
ld2w r52, @(r2+,r0)
ld2w r54, @(r2+,r0)
ld2w r56, @(r2+,r0)
ld2w r58, @(r2+,r0)
ld2w r60, @(r2+,r0)
ld2w r62, @(r2+,r0)
ld2w r4, @(r2+,r0)
mvtacc a1, r4, r5
mvfsys r6, psw
ld2w r4, @(r2+,r0) /* psw, rpt_c */
and r6, r6, 0xfcff /* set rp, md bits from setjmp, leave */
and r4, r4, 0x0300 /* all other psw bits the same */
or r4, r4, r6
mvtsys psw, r4
mvtsys rpt_c, r5
ld2w r4, @(r2+,r0)
mvtsys rpt_s, r4
mvtsys rpt_e, r5
ld2w r4, @(r2+,r0)
mvtsys mod_s, r4
mvtsys mod_e, r5
; Value to return to caller is in R3. If caller attemped to return 0,
; return 1 instead.
cmpeq f0, r3, 0 || add r2, r3, r0
jmp link || add/tx r2, r2, 1
.Llongjmp:
.size longjmp,.Llongjmp-longjmp
.stabs "",36,0,0,.Llongjmp-longjmp
|
32bitmicro/newlib-nano-1.0
| 2,547
|
newlib/libc/machine/frv/setjmp.S
|
# setjmp/longjmp for Frv. The jmpbuf looks like this:
#
# Register jmpbuf offset
# R16-R31 0x0-0x03c
# R48-R63 0x40-0x7c
# FR16-FR31 0x80-0xbc
# FR48-FR63 0xc0-0xfc
# LR 0x100
# SP 0x104
# FP 0x108
#
# R8 contains the pointer to jmpbuf
#include <frv-asm.h>
.text
.global EXT(setjmp)
.type EXT(setjmp),@function
EXT(setjmp):
stdi gr16, @(gr8,0)
stdi gr18, @(gr8,8)
stdi gr20, @(gr8,16)
stdi gr22, @(gr8,24)
stdi gr24, @(gr8,32)
stdi gr26, @(gr8,40)
stdi gr28, @(gr8,48)
stdi gr30, @(gr8,56)
#if __FRV_GPR__ != 32
stdi gr48, @(gr8,64)
stdi gr50, @(gr8,72)
stdi gr52, @(gr8,80)
stdi gr54, @(gr8,88)
stdi gr56, @(gr8,96)
stdi gr58, @(gr8,104)
stdi gr60, @(gr8,112)
stdi gr62, @(gr8,120)
#endif
#if __FRV_FPR__ != 0
stdfi fr16, @(gr8,128)
stdfi fr18, @(gr8,136)
stdfi fr20, @(gr8,144)
stdfi fr22, @(gr8,152)
stdfi fr24, @(gr8,160)
stdfi fr26, @(gr8,168)
stdfi fr28, @(gr8,176)
stdfi fr30, @(gr8,184)
#if __FRV_FPR__ != 32
stdfi fr48, @(gr8,192)
stdfi fr50, @(gr8,200)
stdfi fr52, @(gr8,208)
stdfi fr54, @(gr8,216)
stdfi fr56, @(gr8,224)
stdfi fr58, @(gr8,232)
stdfi fr60, @(gr8,240)
stdfi fr62, @(gr8,248)
#endif
#endif
movsg lr, gr4
sti gr4, @(gr8,256)
sti sp, @(gr8,260)
sti fp, @(gr8,264)
mov gr0,gr8
ret
.Lend1:
.size EXT(setjmp),.Lend1-EXT(setjmp)
.global EXT(longjmp)
.type EXT(longjmp),@function
EXT(longjmp):
lddi @(gr8,0), gr16
lddi @(gr8,8), gr18
lddi @(gr8,16), gr20
lddi @(gr8,24), gr22
lddi @(gr8,32), gr24
lddi @(gr8,40), gr26
lddi @(gr8,48), gr28
lddi @(gr8,56), gr30
#if __FRV_GPR__ != 32
lddi @(gr8,64), gr48
lddi @(gr8,72), gr50
lddi @(gr8,80), gr52
lddi @(gr8,88), gr54
lddi @(gr8,96), gr56
lddi @(gr8,104), gr58
lddi @(gr8,112), gr60
lddi @(gr8,120), gr62
#endif
#if __FRV_FPR__ != 0
lddfi @(gr8,128), fr16
lddfi @(gr8,136), fr18
lddfi @(gr8,144), fr20
lddfi @(gr8,152), fr22
lddfi @(gr8,160), fr24
lddfi @(gr8,168), fr26
lddfi @(gr8,176), fr28
lddfi @(gr8,184), fr30
#if __FRV_FPR__ != 32
lddfi @(gr8,192), fr48
lddfi @(gr8,200), fr50
lddfi @(gr8,208), fr52
lddfi @(gr8,216), fr54
lddfi @(gr8,224), fr56
lddfi @(gr8,232), fr58
lddfi @(gr8,240), fr60
lddfi @(gr8,248), fr62
#endif
#endif
ldi @(gr8,256), gr4
movgs gr4,lr
ldi @(gr8,260), sp
ldi @(gr8,264), fp
# Value to return is in r9. If zero, return 1
cmp gr9, gr0, icc0
setlos #1, gr8
ckne icc0, cc4
cmov gr9, gr8, cc4, 1
ret
.Lend2:
.size EXT(longjmp),.Lend2-EXT(longjmp2)
|
32bitmicro/newlib-nano-1.0
| 2,935
|
newlib/libc/machine/lm32/setjmp.S
|
/*
* setjmp/longjmp for LatticeMico32.
* Contributed by Jon Beniston <jon@beniston.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.section .text
.align 4
.globl setjmp
.type setjmp,@function
.globl longjmp
.type longjmp,@function
/* setjmp: save all callee saves into jmp_buf
r1 - Address of jmp_buf
*/
setjmp:
sw (r1+0), r11
sw (r1+4), r12
sw (r1+8), r13
sw (r1+12), r14
sw (r1+16), r15
sw (r1+20), r16
sw (r1+24), r17
sw (r1+28), r18
sw (r1+32), r19
sw (r1+36), r20
sw (r1+40), r21
sw (r1+44), r22
sw (r1+48), r23
sw (r1+52), r24
sw (r1+56), r25
sw (r1+60), gp
sw (r1+64), fp
sw (r1+68), sp
sw (r1+72), ra
mvi r1, 0
ret
/* longjmp: restore all callee saves from jmp_buf
r1 - Address of jmb_buf
r2 - Value to return with
*/
.global longjmp
.type longjmp,@function
.align 4
longjmp:
lw r11, (r1+0)
lw r12, (r1+4)
lw r13, (r1+8)
lw r14, (r1+12)
lw r15, (r1+16)
lw r16, (r1+20)
lw r17, (r1+24)
lw r18, (r1+28)
lw r19, (r1+32)
lw r20, (r1+36)
lw r21, (r1+40)
lw r22, (r1+44)
lw r23, (r1+48)
lw r24, (r1+52)
lw r25, (r1+56)
lw gp, (r1+60)
lw fp, (r1+64)
lw sp, (r1+68)
lw ra, (r1+72)
mv r1, r2
ret
|
32bitmicro/newlib-nano-1.0
| 2,339
|
newlib/libc/machine/moxie/setjmp.S
|
/* A setjmp.c for Moxie
Copyright (C) 2009 Anthony Green
The authors hereby grant permission to use, copy, modify, distribute,
and license this software and its documentation for any purpose, provided
that existing copyright notices are retained in all copies and that this
notice is included verbatim in any distributions. No written agreement,
license, or royalty fee is required for any of the authorized uses.
Modifications to this software may be copyrighted by their authors
and need not follow the licensing terms described here, provided that
the new terms are clearly indicated on the first page of each file where
they apply. */
# setjmp/longjmp for moxie. The jmpbuf looks like this:
#
# Register jmpbuf offset
# $r0 0x00
# $r1 0x04
# $r2 0x08
# $r3 0x0c
# $r4 0x10
# $r5 0x14
# $r6 0x18
# $r7 0x1c
# $r8 0x20
# $r9 0x24
# $r10 0x28
# $r11 0x2c
# $r12 0x30
# $r13 0x34
# $fp 0x38
# $sp 0x3c
.text
.global setjmp
.type setjmp,@function
setjmp:
st.l ($r0), $r0
sto.l 0x04($r0), $r1
sto.l 0x08($r0), $r2
sto.l 0x0c($r0), $r3
sto.l 0x10($r0), $r4
sto.l 0x14($r0), $r5
sto.l 0x18($r0), $r6
sto.l 0x1c($r0), $r7
sto.l 0x20($r0), $r8
sto.l 0x24($r0), $r9
sto.l 0x28($r0), $r10
sto.l 0x2c($r0), $r11
sto.l 0x30($r0), $r12
sto.l 0x34($r0), $r13
sto.l 0x38($r0), $sp
sto.l 0x3c($r0), $fp
ldi.l $r0, 0x00
ret
.Lend1:
.size setjmp,.Lend1-setjmp
.global longjmp
.type longjmp,@function
longjmp:
ldi.l $r2, 0x00
cmp $r1, $r2
beq .Lreturn1
ldo.l $r1, 0x04($r0)
ldo.l $r2, 0x08($r0)
ldo.l $r3, 0x0c($r0)
ldo.l $r4, 0x10($r0)
ldo.l $r5, 0x14($r0)
ldo.l $r6, 0x18($r0)
ldo.l $r7, 0x1c($r0)
ldo.l $r8, 0x20($r0)
ldo.l $r9, 0x24($r0)
ldo.l $r10, 0x28($r0)
ldo.l $r11, 0x2c($r0)
ldo.l $r12, 0x30($r0)
ldo.l $r13, 0x34($r0)
ldo.l $sp, 0x38($r0)
ldo.l $fp, 0x3c($r0)
mov $r0, $r1
ret
.Lreturn1:
ldo.l $r1, 0x04($r0)
ldo.l $r2, 0x08($r0)
ldo.l $r3, 0x0c($r0)
ldo.l $r4, 0x10($r0)
ldo.l $r5, 0x14($r0)
ldo.l $r6, 0x18($r0)
ldo.l $r7, 0x1c($r0)
ldo.l $r8, 0x20($r0)
ldo.l $r9, 0x24($r0)
ldo.l $r10, 0x28($r0)
ldo.l $r11, 0x2c($r0)
ldo.l $r12, 0x30($r0)
ldo.l $r13, 0x34($r0)
ldo.l $sp, 0x38($r0)
ldo.l $fp, 0x3c($r0)
ldi.l $r0, 0x01
ret
.Lend2:
.size longjmp,.Lend2-longjmp
|
32bitmicro/newlib-nano-1.0
| 2,776
|
newlib/libc/machine/microblaze/setjmp.S
|
/* Copyright (c) 2001, 2009 Xilinx, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Xilinx nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* setjmp - save stack context for non-local goto
* args - r5 - jmp_buf
*
* jmpbuf frame structure
* ---------------------
*
* +-------------+ + 0
* | r1 |
* +-------------+ + 4
* | r13 |
* | . |
* | . |
* | . |
* | r31 |
* +-------------+ + 80
* | . |
* | . |
*/
.globl setjmp
.section .text
.align 2
.ent setjmp
setjmp:
swi r1, r5, 0
swi r13, r5, 4
swi r14, r5, 8
swi r15, r5, 12
swi r16, r5, 16
swi r17, r5, 20
swi r18, r5, 24
swi r19, r5, 28
swi r20, r5, 32
swi r21, r5, 36
swi r22, r5, 40
swi r23, r5, 44
swi r24, r5, 48
swi r25, r5, 52
swi r26, r5, 56
swi r27, r5, 60
swi r28, r5, 64
swi r29, r5, 68
swi r30, r5, 72
swi r31, r5, 76
rtsd r15, 8
or r3, r0, r0
.end setjmp
|
32bitmicro/newlib-nano-1.0
| 2,800
|
newlib/libc/machine/microblaze/longjmp.S
|
/* Copyright (c) 2001, 2009 Xilinx, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Xilinx nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* longjmp - non-local jump to a saved stack context
* args - r5 - jmp_buf
* r6 - val
*
* jmpbuf frame structure
* ---------------------
*
* +-------------+ + 0
* | r1 |
* +-------------+ + 4
* | r13 |
* | . |
* | . |
* | . |
* | r31 |
* +-------------+ + 80
* | . |
* | . |
*/
.globl longjmp
.section .text
.align 2
.ent longjmp
longjmp:
lwi r1, r5, 0
lwi r13, r5, 4
lwi r14, r5, 8
lwi r15, r5, 12
lwi r16, r5, 16
lwi r17, r5, 20
lwi r18, r5, 24
lwi r19, r5, 28
lwi r20, r5, 32
lwi r21, r5, 36
lwi r22, r5, 40
lwi r23, r5, 44
lwi r24, r5, 48
lwi r25, r5, 52
lwi r26, r5, 56
lwi r27, r5, 60
lwi r28, r5, 64
lwi r29, r5, 68
lwi r30, r5, 72
lwi r31, r5, 76
rtsd r15, 8
or r3, r0, r6
.end longjmp
|
32bitmicro/newlib-nano-1.0
| 6,581
|
newlib/libc/machine/hppa/memcpy.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* HPUX_ID: @(#) $Revision: 1.1 $ */
/*
* memcpy(s1, s2, n)
*
* Copy n characters from s2 to s1; returns s1.
*/
#define d_addr arg0
#define s_addr arg1
#define count arg2
#define tmp5 arg3
#define tmp1 r19
#define tmp2 r20
#define tmp3 r21
#define tmp4 r22
#define tmp6 r31
#include "DEFS.h"
ENTRY(memcpy)
comib,>= 5,count,byteloop /* If count is <= 6 don't get fancy.*/
movb,=,n d_addr,ret0,done /* The return value is defined to be the value of d_addr. DELAY SLOT */
/* if d_addr is null then exit */
extru s_addr,31,2,tmp1 /* Extract the low two bits of the source address. */
extru d_addr,31,2,tmp2 /* Extract the low two bits of the destination address. */
add count,tmp2,count /* pre increment the count to adjust for alignment of s1 */
comb,<> tmp2,tmp1,not_aligned /* see if s1 is aligned w.r.t. s2. */
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT. */
/* aligned */
/* We will now begin the 16 byte at a time word move if count >= 16 ! */
/* Else we will branch to the 4 byte-at-a time word move ! */
addibt,<,n -16,count,chekchunk /* If count < 16 then we can't move 16 byte chunks ! */
/* actually we can legally move 13 or more bytes on the first loop. */
/* These loads and stores are done so as to prevent processor interlock. */
chunks:
ldwm 16(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 16 */
ldw -12(0,s_addr),tmp2 /* tmp2 = 2nd word */
ldw -8(0,s_addr),tmp3 /* tmp3 = 3rd word */
ldw -4(0,s_addr),tmp4 /* tmp4 = 4th word */
/* Now store the results ! */
stbys,b,m tmp1,4(0,d_addr) /* tmp1 = 1st word stored d_addr += 16 also take care of front porch. */
stwm tmp2,4(0,d_addr) /* tmp2 = 2nd word stored. */
stwm tmp3,4(0,d_addr) /* tmp3 = 3rd word stored. */
addibf,< -16,count,chunks /* If count is still >= 16 do another loop. */
stwm tmp4,4(0,d_addr) /* tmp4 = 4th word stored. DELAY SLOT */
chekchunk:
addibt,<,n 12,count,back_porch /* since the count is already decremented by -16 we're testing */
/* to see if there are at least 4 bytes left ? */
subchunk:
ldws,ma 4(s_addr),tmp1 /* tmp1 = *s_addr++ */
addibf,< -4,count,subchunk /* count -= 4 */
stbys,b,m tmp1,4(d_addr) /* *d_addr++ = tmp1 */
back_porch:
addibt,=,n 4,count,done /* if count = 0 we're, of course, done ! */
ldws 0(s_addr),tmp1 /* load up the back_porch */
add d_addr,count,d_addr/* final store address is +1 too high ! */
bv 0(r2) /* return--were done. */
stbys,e tmp1,0(d_addr) /* kerplunk! whew ! */
/* Begin non_aligned code. (no refrence to politics) */
not_aligned:
sub,>= tmp2,tmp1,tmp3 /* compute the shift quantity again and skip the load if tmp2 > tmp1. */
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++ */
zdep tmp3,28,29,tmp4 /* compute the number of bits to shift based on the number of bytes above. */
mtctl tmp4,11 /* load the shift count into cr11 = shift count register. */
addibt,<,n -16,count,chkchnk2 /* first step in pre adjustment of count for looping. */
chunk2:
ldwm 16(0,s_addr),tmp2 /* get either first or second word . tmp2 = *s_addr++ */
ldw -12(s_addr),tmp3
ldw -8(s_addr),tmp4
ldw -4(s_addr),tmp5
vshd tmp1,tmp2,tmp6 /* position data ! */
stbys,b,m tmp6,4(0,d_addr) /* store ! */
vshd tmp2,tmp3,tmp6 /* position data ! */
stwm tmp6,4(0,d_addr) /* store ! */
vshd tmp3,tmp4,tmp6 /* position data ! */
stwm tmp6,4(0,d_addr) /* store ! */
vshd tmp4,tmp5,tmp6 /* position data ! */
stwm tmp6,4(0,d_addr) /* store the data ! */
addibf,< -16,count,chunk2 /* If count is still >= 16 do another loop. */
copy tmp5,tmp1
chkchnk2:
addibt,<,n 12,count,bp_0 /* if we don't have 4 bytes left then do the back porch (bp_0) */
subchnk2:
ldwm 4(0,s_addr),tmp2 /* get next word ! */
vshd tmp1,tmp2,tmp3 /* position data ! */
addibt,< -4,count,bp_1 /* decrement count and when count < 4 goto back_porch (bp_1) */
stbys,b,m tmp3,4(0,d_addr) /* store ! */
ldwm 4(0,s_addr),tmp1 /* get 4th word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
addib,>= -4,count,subchnk2 /* decrement count and when count <= 4 go to back porch (bp_2) */
stbys,b,m tmp3,4(0,d_addr) /* store the data ! */
bp_0: copy tmp1,tmp2 /* switch registers used in the shift process. */
bp_1: addibt,<=,n 4,count,done /* if count = -4 this implies that count = 0 -> done */
add d_addr,count,d_addr /* bump destination address to be +1 too high ! */
mfctl sar,tmp3 /* suppress final ldwm unless result used */
extru tmp3,28,2,tmp3 /* convert bitshift to byteshift */
sub,<= count,tmp3,r0 /* bytes unused if (count-byteshift <= 0*/
ldwm 4(0,s_addr),tmp1 /* get final word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
bv 0(r2) /* return */
stbys,e tmp3,0(0,d_addr) /* store the data ! */
/* here we do ye old byte-at-a-time moves. */
byteloop:
comb,>=,n 0,count,done
encore:
ldbs,ma 1(s_addr),tmp1
addibf,= -1,count,encore
stbs,ma tmp1,1(d_addr)
done:
EXIT(memcpy)
|
32bitmicro/newlib-nano-1.0
| 8,430
|
newlib/libc/machine/hppa/strncat.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/*HPUX_ID: @(#) $Revision: 1.1 $ */
/* strncat(s1,s2,n) : concatonate at most n characters from s2 onto s1 */
#include "DEFS.h"
#define d_addr r26
#define s_addr r25
#define count r24
#define tmp1 r19
#define tmp2 r20
#define tmp3 r21
#define tmp4 r22
#define tmp5 arg3
#define tmp6 r31
#define save r1
#define tmp7 ret1 /* source offset-- reset to orig source addr if not aligned */
ENTRY(strncat)
comb,= r0,s_addr,quit /* quit if s2=NULL */
copy d_addr,ret0 /* The return value is the value of d_addr. DELAY SLOT*/
/* First look for end of s1 (d_addr) */
extru d_addr,31,2,tmp1 /* Extract the low two bits of the dest address. */
combt,= tmp1,r0,dont_mask
dep 0,31,2,d_addr /*set word alignment */
ldwm 4(d_addr),tmp2
sh3add tmp1,r0,save /* build mask based on tmp1 */
mtctl save,11
zvdepi -2,32,save
or save,tmp2,tmp2
uxor,nbz tmp2,r0,save
search:
b,n found_end /* nullified under uxor conditions above and below */
dont_mask:
ldwm 4(d_addr),tmp2
comib,tr r0,r0,search
uxor,nbz tmp2,r0,save
found_end: /* at this point d_addr points to word */
extru,<> save,7,8,r0 /* following word with null */
addib,tr,n -4,d_addr,begin_copy /*set d_addr to end of s1 */
extru,<> save,15,8,r0
addib,tr,n -3,d_addr,begin_copy
extru,<> save,23,8,r0
addi -1,d_addr,d_addr
addi -1,d_addr,d_addr
begin_copy:
addibt,<,n -4,count,byteloop /* If count is <= 4 don't get fancy.*/
extru s_addr,31,2,tmp4 /* Extract the low two bits of the source address.*/
extru d_addr,31,2,tmp5 /* Extract the low two bits of the destination address.*/
add count,tmp5,count /* pre increment the count by the byte address so that the count is*/
copy s_addr,tmp6 /* save original s_addr in case we find null in first word */
copy s_addr, tmp7 /* save s_addr in case we find null before first store */
comb,<> tmp5,tmp4,not_aligned /* branch if tmp5<>tmp4. */
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT.*/
/* aligned*/
combt,= tmp5,r0,skip_mask
ldwm 4(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 4 (DELAY SLOT)*/
sh3add tmp5,r0,save /* compute mask in save*/
mtctl save,11
zvdepi -2,32,save
or save,tmp1,tmp1 /* or mask with data*/
uxor,nbz tmp1,r0,save /* check for null*/
b,n null1
addibt,< -4,count,back_porch
stbys,b,m tmp1,4(0,d_addr) /* store word (delay slot)*/
chunks:
ldwm 4(0,s_addr),tmp1 /* get a word*/
skip_mask:
uxor,nbz tmp1,r0,save /* check for null*/
b,n align_null1
addibf,< -4,count,chunks
stbys,b,m tmp1,4(0,d_addr) /* store word (delay slot)*/
back_porch: /* last word to store*/
addibt,=,n 4,count,done /* if count = 0 we're, of course, done !*/
ldws 0(s_addr),tmp1 /* load up the back_porch*/
sh3add count,r0, save /* setup right mask based on count*/
mtctl save,r11
zvdepi -2,32,save /*save now has left-hand mask*/
uaddcm r0,save,save /*form right hand mask */
or tmp1,save,tmp1 /*and insert data*/
uxor,nbz tmp1,r0,save /* check for null*/
b,n null2
add d_addr,count,d_addr/* final store address is +1 too high !*/
b done
stbys,e tmp1,0(d_addr) /* done */
/* Begin non_aligned code. */
not_aligned:
sub,>= tmp5,tmp4,tmp6 /* compute the shift amt.and skip load if tmp5 > tmp4.*/
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++*/
zdep tmp6,28,29,tmp4 /* compute the number of bits to shift */
mtctl tmp4,11 /* load the shift count into cr11 = shift count register.*/
addibt,<,n -4,count,chkchnk2 /* first step in pre adjustment of count for looping.*/
ldwm 4(0,s_addr),tmp2 /* get either first or second word from source. */
combt,= tmp5,r0,skip_mask4 /* don't mask if whole word is valid*/
vshd tmp1,tmp2,tmp3 /* position data ! (delay slot)*/
sh3add tmp5,r0,save /* setup r1*/
mtctl save,r11 /* setup mask in save*/
zvdepi -2,32,save
or save, tmp3, tmp3
mtctl tmp4,11 /* re-load the shift count into cr11 */
skip_mask4:
uxor,nbz tmp3, r0, save
b,n null4 /* special case for first word */
copy r0, tmp5 /* zero out tmp5 so we don't try to mask again*/
copy r0, tmp7 /* zero out tmp7 so we don't try to use original s_addr anymore */
b continue
stbys,b,m tmp3,4(0,d_addr) /* store ! */
chunk2:
ldwm 4(0,s_addr),tmp2
vshd tmp1,tmp2,tmp3
skip_mask2:
uxor,nbz tmp3, r0, save
b,n null3
stbys,b,m tmp3,4(0,d_addr) /* store ! */
continue:
ldwm 4(0,s_addr),tmp1 /* get 2nd word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
uxor,nbz tmp3, r0, save
b,n null3
addibf,< -8,count,chunk2 /* If count is still >= 8 do another loop.*/
stbys,b,m tmp3,4(0,d_addr) /* store !*/
chkchnk2:
addibt,<,n 4,count,bp_0 /* if we don't have 4 bytes left then do the back porch (bp_0)*/
subchnk2: /* we have less than 8 chars to copy*/
ldwm 4(0,s_addr),tmp2 /* get next word !*/
combt,= tmp5,r0,skip_mask3
vshd tmp1,tmp2,tmp3 /* position data !*/
sh3add tmp5,r0,save /* setup r1*/
mtctl save,r11 /* setup mask in save*/
zvdepi -2,32,save
or save, tmp3, tmp3
mtctl tmp4,11 /* restore shift value again */
skip_mask3:
uxor,nbz tmp3,r0,save
b,n null3
copy r0,tmp5 /* zero out tmp5 so null3 does correct alignment */
copy r0,tmp7 /* zero out tmp7 so we don't use orignal s_addr since no longer valid */
b bp_1 /* we now have less than 4 bytes to move*/
stbys,b,m tmp3,4(0,d_addr) /* store !*/
bp_0:
copy tmp1,tmp2 /* switch registers for shift process */
addibt,<=,n 4,count,done /* if count = -4 this implies that count = 0 -> done */
bp_1:
ldwm 4(0,s_addr),tmp1 /* get final word ! */
vshd tmp2,tmp1,tmp3 /* position data !*/
uxor,nbz tmp3,r0,save /* if no-byte-zero */
b,n bp_null /* don't goto no_null-find which null instead */
no_null:
add d_addr,count,d_addr /* set up d_addr for stbys,e */
b done /* were done*/
stbys,e tmp3,0(0,d_addr) /* store the data !*/
/* here we do ye old byte-at-a-time moves.*/
align_null1:
b byteloop
addi -4,s_addr,s_addr
null1:
copy tmp6,s_addr /* restore orig s_addr (aligned only) */
byteloop:
addibt,= 4,count,done
null2:
ldbs,ma 1(s_addr),tmp1
encore:
combt,=,n tmp1,r0, done
stbs,ma tmp1,1(d_addr)
addibf,=,n -1,count,encore
ldbs,ma 1(s_addr),tmp1
b,n done
bp_null:
addi -4,count,count /* fudge count 'cause byteloop will re-increment */
null3: /* not_aligned case reset s_addr and finish byte-wise */
combt,=,n r0,tmp7,null3a /* if tmp7 is not valid address then branch below */
b byteloop /* otherwise reset s_addr to tmp7 and finish */
copy tmp7, s_addr
null3a: /* right shift target */
addibt,<,n 0,tmp6,null3b /* if left shifting */
sub r0,tmp6,tmp6 /* do null3b code */
addi -4,tmp6,tmp6
b byteloop
add tmp6,s_addr,s_addr /* reset s_addr by 4 + shift_amt */
null3b:
subi -8,tmp6,tmp6
add tmp5,tmp6,tmp6 /* adjust by the dest offset if this is our first store */
b byteloop
add tmp6,s_addr,s_addr /* adjust s_addr by (8-shift_amt-dest_off) */
null4:
add,> tmp6,r0,tmp6 /* if left shift */
b,n null3 /* then do null3 */
b byteloop
addi -4,s_addr,s_addr /* adj source only by 4 */
done:
bv 0(r2)
stbs r0,0(d_addr)
quit:
EXIT(strncat)
|
32bitmicro/newlib-nano-1.0
| 4,191
|
newlib/libc/machine/hppa/setjmp.S
|
/* Copyright (c) 1995, 2002 Red Hat Incorporated.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* The name of Red Hat Incorporated may not be used to endorse
* or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Note I don't know an easy way to get the FP version into the
traditional C library and the non-FP version into the soft-float
library. Maybe we'll have to have -msoft-float trigger something
like -DSOFT_FLOAT if this issue ever arises. */
#include "DEFS.h"
#if 0
.SPACE $PRIVATE$
.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
.SPACE $TEXT$
.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
.IMPORT $global$,DATA
.IMPORT $$dyncall,MILLICODE
; gcc_compiled.:
#endif
TEXT_SEGMENT
.align 4
.EXPORT setjmp,ENTRY,PRIV_LEV=3,ARGW0=GR,RTNVAL=GR
setjmp
.PROC
.CALLINFO FRAME=64,NO_CALLS,SAVE_SP,ENTRY_GR=3
.ENTRY
stwm %r30,4(%r26)
stwm %r2,4(%r26)
stwm %r3,4(%r26)
stwm %r4,4(%r26)
stwm %r5,4(%r26)
stwm %r6,4(%r26)
stwm %r7,4(%r26)
stwm %r8,4(%r26)
stwm %r9,4(%r26)
stwm %r10,4(%r26)
stwm %r11,4(%r26)
stwm %r12,4(%r26)
stwm %r13,4(%r26)
stwm %r14,4(%r26)
stwm %r15,4(%r26)
stwm %r16,4(%r26)
stwm %r17,4(%r26)
stwm %r18,4(%r26)
stwm %r27,4(%r26)
#ifdef FP
; jmp_buf may only have a 4 byte alignment, so handle FP stores
; very carefully.
fstds %fr12,-16(%r30)
ldw -16(%r30),%r28
stwm %r28,4(%r26)
ldw -12(%r30),%r28
stwm %r28,4(%r26)
fstds %fr13,-16(%r30)
ldw -16(%r30),%r28
stwm %r28,4(%r26)
ldw -12(%r30),%r28
stwm %r28,4(%r26)
fstds %fr14,-16(%r30)
ldw -16(%r30),%r28
stwm %r28,4(%r26)
ldw -12(%r30),%r28
stwm %r28,4(%r26)
fstds %fr15,-16(%r30)
ldw -16(%r30),%r28
stwm %r28,4(%r26)
ldw -12(%r30),%r28
stwm %r28,4(%r26)
#endif
bv 0(%r2)
copy %r0,%r28
.EXIT
.PROCEND
.align 4
.EXPORT longjmp,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,RTNVAL=GR
longjmp
.PROC
.CALLINFO FRAME=64,NO_CALLS,SAVE_SP,ENTRY_GR=3
.ENTRY
ldwm 4(%r26),%r30
ldwm 4(%r26),%r2
ldwm 4(%r26),%r3
ldwm 4(%r26),%r4
ldwm 4(%r26),%r5
ldwm 4(%r26),%r6
ldwm 4(%r26),%r7
ldwm 4(%r26),%r8
ldwm 4(%r26),%r9
ldwm 4(%r26),%r10
ldwm 4(%r26),%r11
ldwm 4(%r26),%r12
ldwm 4(%r26),%r13
ldwm 4(%r26),%r14
ldwm 4(%r26),%r15
ldwm 4(%r26),%r16
ldwm 4(%r26),%r17
ldwm 4(%r26),%r18
ldwm 4(%r26),%r27
#ifdef FP
ldwm 4(%r26),%r28
stw %r28,-16(%r30)
ldwm 4(%r26),%r28
stw %r28,-12(%r30)
fldds -16(%r30),%fr12
ldwm 4(%r26),%r28
stw %r28,-16(%r30)
ldwm 4(%r26),%r28
stw %r28,-12(%r30)
fldds -16(%r30),%fr13
ldwm 4(%r26),%r28
stw %r28,-16(%r30)
ldwm 4(%r26),%r28
stw %r28,-12(%r30)
fldds -16(%r30),%fr14
ldwm 4(%r26),%r28
stw %r28,-16(%r30)
ldwm 4(%r26),%r28
stw %r28,-12(%r30)
fldds -16(%r30),%fr15
#endif
comclr,<> %r0,%r25,%r0
ldi 1,%r25
bv 0(%r2)
copy %r25,%r28
.EXIT
.PROCEND
|
32bitmicro/newlib-nano-1.0
| 10,201
|
newlib/libc/machine/hppa/strcpy.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/*
A faster strcpy.
by
Jerry Huck (aligned case)
Daryl Odnert (equal-alignment case)
Edgar Circenis (non-aligned case)
*/
/*
* strcpy(s1, s2)
*
* Copy string s2 to s1. s1 must be large enough.
* return s1
*/
#include "DEFS.h"
#define d_addr r26
#define s_addr r25
#define tmp6 r24
#define tmp1 r19
#define evenside r19
#define tmp2 r20
#define oddside r20
#define tmp3 r21
#define tmp4 r22
#define tmp5 arg3
#define save r1
ENTRY(strcpy)
/* Do some quick alignment checking on and fast path both word aligned */
extru,<> s_addr,31,2,tmp6 /*Is source word aligned? */
ldwm 4(0,s_addr),oddside /*Assume yes and guess that it
is double-word aligned. */
dep,= d_addr,29,2,tmp6 /*Is target word aligned? */
b case_analysis
copy d_addr,ret0
/* Both are aligned. First source word already loaded assuming that
source was oddword aligned. Fall through (therefore fastest) code
shuffles the registers to join the main loop */
bothaligned:
bb,>= s_addr,29,twoatatime /*Branch if source was odd aligned*/
uxor,nbz oddside,r0,save
/* Even aligned source. save holds that operand.
Do one iteration of the main copy loop juggling the registers to avoid
one copy. */
b,n nullfound
ldwm 4(s_addr),oddside
stwm save,4(d_addr)
uxor,nbz oddside,r0,save
b,n nullfound
ldwm 4(s_addr),evenside
stwm oddside,4(d_addr)
uxor,nbz evenside,r0,save
b,n nullfound
ldwm 4(s_addr),oddside
/* Main loop body. Entry expects evenside still to be stored, oddside
just loaded. */
loop:
stwm evenside,4(d_addr)
uxor,nbz oddside,r0,save
/* mid loop entry */
twoatatime:
b,n nullfound
ldwm 4(s_addr),evenside
stwm oddside,4(d_addr)
uxor,sbz evenside,r0,save
b loop
ldwm 4(s_addr),oddside
/* fall through when null found in evenside. oddside actually loaded */
nullfound: /* adjust d_addr and store final word */
extru,<> save,7,8,r0 /* pick up leftmost byte */
addib,tr,n 1,d_addr,store_final
extru,<> save,15,8,r0
addib,tr,n 2,d_addr,store_final
extru,<> save,23,8,r0
addib,tr 3,d_addr,store_final2
bv 0(rp)
stw save,0(d_addr)
store_final:
bv 0(rp)
store_final2:
stbys,e save,0(d_addr) /* delay slot */
case_analysis:
blr tmp6,r0
nop
/* NOTE: the delay slots for the non-aligned cases load a */
/* shift quantity which is TGT-SRC into tmp3. */
/* Note also, the case for both strings being word aligned */
/* is already checked before the BLR is executed, so that */
/* case can never occur. */
/* TGT SRC */
nop /* 00 00 can't happen */
nop
b neg_aligned_copy /* 00 01 */
ldi -1,tmp3 /* load shift quantity. delay slot */
b neg_aligned_copy /* 00 10 */
ldi -2,tmp3 /* load shift quantity. delay slot */
b neg_aligned_copy /* 00 11 */
ldi -3,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy0 /* 01 00 */
ldi 1,tmp3 /* load shift quantity. delay slot */
b equal_alignment_1 /* 01 01 */
ldbs,ma 1(s_addr),tmp1
b neg_aligned_copy /* 01 10 */
ldi -1,tmp3 /* load shift quantity. delay slot */
b neg_aligned_copy /* 01 11 */
ldi -2,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy0 /* 10 00 */
ldi 2,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy /* 10 01 */
ldi 1,tmp3 /* load shift quantity. delay slot */
b equal_alignment_2 /* 10 10 */
ldhs,ma 2(s_addr),tmp1
b neg_aligned_copy /* 10 11 */
ldi -1,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy0 /* 11 00 */
ldi 3,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy /* 11 01 */
ldi 2,tmp3 /* load shift quantity. delay slot */
b pos_aligned_copy /* 11 10 */
ldi 1,tmp3 /* load shift quantity. delay slot */
ldbs,ma 1(s_addr),tmp1 /* 11 11 */
comiclr,<> r0,tmp1,r0
bv 0(rp) /* return if 1st byte was null */
stbs,ma tmp1,1(d_addr) /* store a byte to dst string */
b bothaligned /* can now goto word_aligned */
ldwm 4(s_addr),oddside /* load next word of source */
equal_alignment_1:
comiclr,<> r0,tmp1,r0 /* nullify next if tmp1 <> 0 */
bv 0(rp) /* return if null byte found */
stbs,ma tmp1,1(d_addr) /* store a byte to dst string */
ldhs,ma 2(s_addr),tmp1 /* load next halfword */
equal_alignment_2:
extru,<> tmp1,23,8,tmp6 /* look at left byte of halfword */
bv 0(rp) /* return if 1st byte was null */
stbs,ma tmp6,1(d_addr)
extru,<> tmp1,31,8,r0
bv 0(rp) /* return if 2nd byte was null */
stbs,ma tmp1,1(d_addr)
b bothaligned
ldwm 4(s_addr),oddside /* load next word */
/* source and destination are not aligned, so we do it the hard way. */
/* target alignment is greater than source alignment */
pos_aligned_copy0:
addi -4,s_addr,s_addr
pos_aligned_copy:
extru d_addr,31,2,tmp6 /* Extract low 2 bits of the dest addr */
extru s_addr,31,2,tmp1 /* Extract low 2 bits of the src addr */
dep r0,31,2,s_addr /* Compute word address of the source. */
sh3add tmp3,r0,tmp4 /* compute shift amt */
ldwm 4(0,s_addr),tmp2 /* get 1st source word */
sh3add tmp1,r0,save /* setup mask shift amount */
mtctl save,r11 /* set-up cr11 for mask */
zvdepi -2,32,save /* create mask */
or save,tmp2,tmp2 /* mask unused bytes in src */
ldi -1,tmp1 /* load tmp1 with 0xffffffff */
mtctl tmp4,r11 /* shift count -> shift count reg */
vshd tmp1,tmp2,tmp3 /* position data ! */
uxor,nbz tmp3,r0,save
b,n first_null
uxor,nbz tmp2,r0,save
b nullfound1
mtctl tmp4,r11 /* re-load shift cnt (delay slot) */
b loop_entry
ldwm 4(0,s_addr),tmp1 /* get next word. delay slot */
neg_aligned_copy:
extru d_addr,31,2,tmp6 /* Extract low 2 bits of the dest addr */
extru s_addr,31,2,tmp2 /* Extract low 2 bits of the src addr */
dep r0,31,2,s_addr /* Compute word address of the source. */
sh3add tmp3,r0,tmp4 /* compute shift amt */
ldwm 4(0,s_addr),tmp1 /* load first word from source. */
/* check to see if next word can be read safely */
sh3add tmp2,r0,save
mtctl save,r11 /* shift count -> shift count reg */
zvdepi -2,32,save
or save, tmp1, tmp1
uxor,nbz tmp1,r0,save /* any nulls in first word? */
b first_null0
mtctl tmp4,r11
ldwm 4(0,s_addr),tmp2 /* load second word from source */
combt,= tmp6,r0,chunk1 /* don't mask if whole word valid */
vshd tmp1,tmp2,tmp3 /* position data ! */
sh3add tmp6,r0,save /* setup r1 */
mtctl save,r11 /* set-up cr11 for mask */
zvdepi -2,32,save
or save, tmp3, tmp3
uxor,nbz tmp3,r0,save
b,n first_null
uxor,nbz tmp2,r0,save
b nullfound1
mtctl tmp4,r11 /* re-load shift cnt (delay slot) */
b loop_entry
ldwm 4(0,s_addr),tmp1 /* get next word. delay slot */
chunk1:
uxor,nbz tmp2,r0,save
b nullfound0
vshd tmp1,tmp2,tmp3
did_mask:
ldwm 4(0,s_addr),tmp1 /* get next word ! */
loop_entry:
stbys,b,m tmp3,4(0,d_addr) /* store ! */
uxor,nbz tmp1, r0, save
b nullfound2
vshd tmp2,tmp1,tmp3 /* position data ! */
ldwm 4(s_addr),tmp2
stwm tmp3,4(d_addr)
uxor,sbz tmp2,r0,save
b did_mask
nullfound0:
vshd tmp1,tmp2,tmp3 /* delay slot */
uxor,nbz tmp3,r0,save
b,n nullfound
nullfound1:
stbys,b,m tmp3,4(0,d_addr)
b nullfound
vshd tmp2,r0,save /* delay slot */
nullfound2:
uxor,nbz tmp3,r0,save
b,n nullfound
stwm tmp3,4(d_addr)
b nullfound
/* notice that delay slot is in next routine */
first_null0: /* null found in first word of non-aligned (wrt d_addr) */
vshd tmp1,r0,save /* delay slot */
combt,= tmp6,r0,check4
extru save,7,8,tmp4
first_null:
addibt,= -1,tmp6,check3 /* check last 3 bytes of word */
extru save,15,8,tmp4
addibt,=,n -1,tmp6,check2 /* check last 2 bytes */
bv 0(rp) /* null in last byte--store and exit */
stbys,b save, 0(d_addr)
check4:
combt,= tmp4,r0,done
stbs,ma tmp4,1(d_addr)
extru,<> save,15,8,tmp4
check3:
combt,= tmp4,r0,done
stbs,ma tmp4,1(d_addr)
check2:
extru,<> save,23,8,tmp4
bv 0(rp)
stbs,ma tmp4,1(d_addr)
bv 0(rp)
stbs r0,0(d_addr)
done:
EXIT(strcpy)
|
32bitmicro/newlib-nano-1.0
| 7,404
|
newlib/libc/machine/hppa/strncmp.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* strcmp(s1, s2) */
/* returns integer: < 0 iff s1 lexicographically less than s2 */
/* > 0 iff s1 lexicographically greater than s2 */
/* = 0 iff s1 lexicographically equal to s2 */
/* = 0 iff s1 lexicographically equal to s2 */
/* quit after n charachters */
#include "DEFS.h"
#define s1 26
#define s2 25
#define tmp1 19
#define s2word 20
#define tmp3 21
#define tmp7 22
#define s1word 29
#define save 1
#define tmp6 23
#define tmp5 28
#define count 24
ENTRY(strncmp)
combt,<,n r0,count,search /* N <= 0 yields equality */
bv r0(rp) /* */
copy 0,ret0 /* return 0 (DELAY SLOT) */
search: combf,=,n s1,s2,findout /* s1 != s2? */
bv r0(rp) /* */
copy 0,ret0 /* return 0 (delay slot) */
findout:
comibf,=,n 0,s1,checks1 /* s1 == NULL? */
ldbs 0(0,s2),ret0 /* */
bv r0(rp) /* */
subi 0,ret0,ret0 /* ret0 <- -*s2 */
checks1:
comibf,=,n 0,s2,checkitout /* s2 == NULL? */
bv r0(rp) /* */
ldbs 0(0,s1),28 /* return *s1 */
checkitout:
extru s2,31,2,tmp1 /* Extract the low two bits of the s2. */
extru s1,31,2,tmp5 /* Extract the low two bits of the s1 */
sub,= tmp5,tmp1,tmp3 /* Are s1 & s2 aligned with each other? */
b not_aligned /* It's more complicated (not_aligned) */
dep 0,31,2,s1 /* Compute word address of s1 (DELAY SLOT) */
dep 0,31,2,s2 /* Compute word address of s2 */
ldwm 4(0,s1),s1word /* get next s1 word s1+=4 */
combt,= tmp5,r0,skipmask /* skip masking, if we can */
ldwm 4(0,s2),s2word /* get next s2 word s2+=4 (DELAY SLOT) */
add tmp5,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /* mask s1word (s1) */
or save,s2word,s2word /* mask s2word (s2) */
skipmask:
combt,=,n s1word,s2word,chknulls /* are these words equal? */
checkbyte:
extru s1word,7,8,tmp3 /* get first byte (character) */
ckbyte2: extru s2word,7,8,tmp7 /* get first byte (character) */
combf,= tmp3,tmp7,done /* quit if first byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
comibt,=,n 0,tmp3,done /* have we reached the end of string */
/* if so done ret0 already has zero */
addibt,<=,n -1,count,done /* have we checked N chars? ret0 == 0 */
extru s1word,15,8,tmp3 /* get second byte (character) */
extru s2word,15,8,tmp7 /* get second byte (character) */
combf,= tmp3,tmp7,done /* quit if second byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
comibt,=,n 0,tmp3,done /* have we reached the end of string */
/* if so done ret0 already has zero */
addibt,<=,n -1,count,done /* have we checked N chars? */
extru s1word,23,8,tmp3 /* get third byte (character) */
extru s2word,23,8,tmp7 /* get third byte (character) */
combf,= tmp3,tmp7,done /* done if third byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
comibt,=,n 0,tmp3,done /* have we reached the end of string */
/* if so done ret0 already has zero */
addibt,<=,n -1,count,done /* have we checked N chars? */
extru s1word,31,8,tmp3 /* get last byte (character) */
extru s2word,31,8,tmp7 /* get last byte (character) */
bv r0(rp) /* */
sub tmp3,tmp7,ret0 /* the last characters in the word is */
/* where the difference is, so return */
/* the difference and we're outta here */
chknulls:
addibt,<=,n -4,count,zero /* have we checked N chars? */
uxor,nbz s1word,0,0 /* don't have to check s2 Just quit */
bv r0(rp) /* */
copy 0,28 /* return 0 */
ldwm 4(0,s2),s2word /* get next s2 word s2+=4 */
b skipmask /* keep checking */
ldwm 4(0,s1),s1word /* get next s1 word s1+=4 */
not_aligned:
dep r0,31,2,s2 /* Compute word address of s2 */
combt,<,n r0,tmp3,shifts1 /* Do we shift s1 or s2 */
sh3add tmp3,r0,tmp3 /* eight bits per byte so mul by 8 */
ldwm 4(0,s1),s1word /* get first word of s1 */
ldwm 4(0,s2),s2word /* get first word or s2 */
combt,=,n r0,tmp5,masks2 /* Do we need to mask beginning of s1 */
add tmp5,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /* */
masks2: sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word /* */
mtctl tmp3,11 /* Move shift amount to CR11 */
more: uxor,nbz s2word,r0,r0 /* Is there a null in first word */
b,n chunk1 /* */
ldwm 4(0,s2),tmp7 /* load second word to enable us to shift */
vshd s2word,tmp7,s2word /* */
combf,=,n s1word,s2word,ckbyte2 /* */
extru s1word,7,8,tmp3 /* get first byte (DELAY SLOT) */
addibt,<=,n -4,count,zero /* have we checked N chars? */
uxor,nbz s1word,0,0 /* even though they're equal we could be done */
b,n zero
copy tmp7,s2word /* */
b more /* keep checking */
ldwm 4(0,s1),s1word /* get next s1 (DELAY SLOT) */
chunk1:
vshd s2word,r0,s2word /* */
b ckbyte2 /* */
extru s1word,7,8,tmp3 /* */
shifts1:
sh3add tmp3,r0,tmp3 /* eight bits per byte so mul by 4 */
sub r0,tmp3,tmp3 /* Get negative value for left shift */
ldwm 4(0,s2),s2word /* get first word of s2 */
ldwm 4(0,s1),s1word /* get first word or s1 */
combt,=,n r0,tmp1,masks1 /* Do we need to mask beginning of s2 */
add tmp1,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word /* */
masks1: sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /* */
mtctl tmp3,11 /* Move shift amount to CR11 */
more1: uxor,nbz s1word,r0,r0 /* Is there a null in first byte */
b,n chunk2 /* */
ldwm 4(0,s1),tmp7 /* load second word to enable us to shift */
vshd s1word,tmp7,s1word /* */
combf,=,n s2word,s1word,ckbyte2 /* */
extru s1word,7,8,tmp3 /* get first byte (DELAY SLOT) */
addibt,<=,n -4,count,zero /* have we checked N chars? */
uxor,nbz s2word,0,0 /* even though they're equal we could be done */
b,n zero /* zero ret0 and quit */
copy tmp7,s1word /* */
b more1 /* keep checking */
ldwm 4(0,s2),s2word /* get next s2 (DELAY SLOT) */
chunk2:
vshd s1word,r0,s1word /* */
b ckbyte2 /* */
extru s1word,7,8,tmp3 /* */
zero: copy r0,ret0
done:
EXIT(strncmp)
|
32bitmicro/newlib-nano-1.0
| 2,480
|
newlib/libc/machine/hppa/memset.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* SPECTRUM_ID: @(#)memset.s 37.4 86/08/25 */
/*
* memset(s, c, n)
*
* Sets first n chars in memory area s to value of character c.
* Returns s.
*/
#ifndef _NAMESPACE_CLEAN
#define NOSECDEF /* prevent _memset from being defined as entry */
#endif
#include "DEFS.h"
#define TO arg0
#define FILLCHAR arg1
#define COUNT arg2
#define TMP r31
ENTRY(memset)
comb,<= COUNT,r0,msexit /* return if count not positive */
copy TO,ret0 /* return value is start of copy */
comibf,<,n 5,COUNT,msbyteloop /* be straightforward */
dep FILLCHAR,23,8,FILLCHAR /* dup low byte */
dep FILLCHAR,15,16,FILLCHAR /* into high bytes */
add TO,COUNT,TMP /* TMP points just past fill area */
stbys,m FILLCHAR,0(TO) /* fill out first word */
/*
* If we're pointing to high-order byte, no fill will happen,
* but permissions will be checked. We don't want this (we
* might be pointing at the beginning of a protected region),
* so we branch around stbys if neither low bits are set.
*/
bb,<,n TMP,31,filend /* if low bit is set, stbys */
bb,>=,n TMP,30,endfil /* if next lowest bit isn't set */
/* (and lowest isn't, either) */
/* do not stbys */
filend:
stbys,m,e FILLCHAR,0(TMP) /* fill out the last */
endfil:
addi 4, TO, TO
sub TMP,TO,COUNT /* will now divide by 4 */
comb,=,n COUNT,r0,msexit /* If count is zero ret. */
extru,<> COUNT,31,4,r1
b msquadloop
depi 0,31,4,COUNT /* will now divide by 16 */
mswordloop:
addib,<> -4,r1,mswordloop
stws,ma FILLCHAR,4(TO)
comb,=,n COUNT,r0,msexit /* If count is zero ret. */
msquadloop:
stws,ma FILLCHAR,4(TO)
stws,ma FILLCHAR,4(TO)
stws,ma FILLCHAR,4(TO)
addib,<> -16,COUNT,msquadloop
stws,ma FILLCHAR,4(TO)
b,n msexit
msbyteloop:
addib,<> -1,COUNT,msbyteloop
stbs,ma FILLCHAR,1(TO)
msexit:
EXIT(memset)
|
32bitmicro/newlib-nano-1.0
| 1,332
|
newlib/libc/machine/hppa/memchr.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* SPECTRUM_ID: @(#)memchr.s 37.4 86/04/23 */
/*
* memchr(s, c, n)
*
* returns pointer to first occurrence of char c
* in first n characters of memory area s,
* or null if c does not occur.
*/
#include "DEFS.h"
#define FROM arg0
#define CHAR arg1
#define COUNT arg2
#define TEMP1 r19
ENTRY(memchr)
comb,<= COUNT,r0,memchrexit /* return if count is zero */
copy r0,ret0 /* null if c not found in n chars */
depi 0,23,24,CHAR /* make char unsigned */
ldbs,ma 1(FROM),TEMP1
memchrloop:
comb,=,n TEMP1,CHAR,memchrequal
addib,<> -1,COUNT,memchrloop
ldbs,ma 1(FROM),TEMP1
b,n memchrexit
memchrequal:
ldo -1(FROM),ret0
memchrexit:
EXIT(memchr)
|
32bitmicro/newlib-nano-1.0
| 7,108
|
newlib/libc/machine/hppa/memcmp.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* memcmp(s1, s2, n) */
/* returns integer: < 0 iff s1 lexicographically less than s2 */
/* > 0 iff s1 lexicographically greater than s2 */
/* = 0 iff s1 lexicographically equal to s2 */
/* = 0 iff s1 lexicographically equal to s2 */
/* quit after n charachters */
#ifndef _NAMESPACE_CLEAN
#define NOSECDEF /* prevents _memcmp from becoming primary entry */
#endif
#include "DEFS.h"
#define s1 26
#define s2 25
#define tmp1 19
#define s2word 20
#define tmp3 21
#define tmp7 22
#define s1word 29
#define save 1
#define tmp6 23
#define tmp5 28
#define count 24
ENTRY(memcmp)
combt,<,n r0,count,search /*N <= 0 yields equality */
b done /**/
copy 0,ret0 /*return 0 (DELAY SLOT) */
search: combf,=,n s1,s2,findout /*s1 != s2? */
b done
copy 0,ret0 /*return 0 (delay slot) */
findout:
comibf,=,n 0,s1,checks1 /*s1 == NULL? */
ldbs 0(0,s2),ret0 /**/
b done /*quit */
sub 0,ret0,ret0 /*ret0 <- -*s2 */
checks1:
comibf,=,n 0,s2,checkitout /*s2 == NULL? */
b done /* quit */
ldbs 0(0,s1),28 /* return *s1 */
checkitout:
extru s2,31,2,tmp1 /* Extract the low two bits of the s2. */
extru s1,31,2,tmp5 /* Extract the low two bits of the s1 */
sub,= tmp5,tmp1,tmp3 /* Are s1 & s2 aligned with each other? */
b not_aligned /* It's more complicated (not_aligned) */
dep 0,31,2,s1 /* Compute word address of s1 (DELAY SLOT) */
dep 0,31,2,s2 /* Compute word address of s2 */
ldwm 4(0,s1),s1word /* get next s1 word s1+=4 */
combt,= tmp5,r0,skipmask /* skip masking, if we can */
ldwm 4(0,s2),s2word /* get next s2 word s2+=4 (DELAY SLOT) */
add tmp5,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /* mask s1word (s1) */
or save,s2word,s2word /* mask s2word (s2) */
skipmask:
combt,=,n s1word,s2word,checkN /* We may be done */
checkbyte:
extru s1word,7,8,tmp3 /* get first byte (character) */
ckbyte2: extru s2word,7,8,tmp7 /* get first byte (character) */
combf,= tmp3,tmp7,done /* quit if first byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
addibt,<=,n -1,count,done /* have we checked N chars? ret0 == 0 */
extru s1word,15,8,tmp3 /* get second byte (character) */
extru s2word,15,8,tmp7 /* get second byte (character) */
combf,= tmp3,tmp7,done /* quit if second byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
addibt,<=,n -1,count,done /* have we checked N chars? */
extru s1word,23,8,tmp3 /* get third byte (character) */
extru s2word,23,8,tmp7 /* get third byte (character) */
combf,= tmp3,tmp7,done /* done if third byte is not equal */
sub tmp3,tmp7,ret0 /* return difference (delay slot) */
addibt,<=,n -1,count,done /* have we checked N chars? */
extru s1word,31,8,tmp3 /* get last byte (character) */
extru s2word,31,8,tmp7 /* get last byte (character) */
b done /* if we reach this point we know that */
sub tmp3,tmp7,ret0 /* the last character in the word is */
/* where the difference is, so return */
/* the difference and we're outta here */
checkN:
addibt,<=,n -4,count,zero /* have we checked N chars? */
ldwm 4(0,s2),s2word /* get next s2 word s2+=4 */
b skipmask /* keep checking */
ldwm 4(0,s1),s1word /* get next s1 word s1+=4 */
not_aligned:
dep r0,31,2,s2 /* Compute word address of s2 */
combt,<,n r0,tmp3,shifts1 /* Do we shift s1 or s2 */
sh3add tmp3,r0,tmp3 /* eight bits per byte so mul by 8 */
ldwm 4(0,s1),s1word /* get first word of s1 */
ldwm 4(0,s2),s2word /* get first word or s2 */
combt,=,n r0,tmp5,masks2 /* Do we need to mask beginning of s1 */
add tmp5,count,count /* bump count by the number of bytes */
/* we are going to mask */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word /**/
masks2: sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word /**/
subi 4,tmp1,tmp1 /* tmp1 now has the number of byte that */
/* are valid in s2word before the vshd */
mtctl tmp3,11 /* Move shift amount to CR11 */
more: combt,<=,n count,tmp1,chunk1 /* Can we do the vshd? */
ldwm 4(0,s2),tmp7 /* load second word to enable us to shift */
vshd s2word,tmp7,s2word /**/
combf,=,n s1word,s2word,ckbyte2 /**/
extru s1word,7,8,tmp3 /* get first byte (DELAY SLOT) */
addibt,<=,n -4,count,zero /* have we checked N chars? */
copy tmp7,s2word /**/
b more /* keep checking */
ldwm 4(0,s1),s1word /* get next s1 (DELAY SLOT) */
chunk1:
vshd s2word,r0,s2word /* do an arithmetic shift left to position data */
b ckbyte2 /**/
extru s1word,7,8,tmp3 /**/
shifts1:
sh3add tmp3,r0,tmp3 /* eight bits per byte so mul by 8 */
sub r0,tmp3,tmp3 /* Get negative value for left shift */
dep r0,31,2,s2 /* Compute word address of s2 */
ldwm 4(0,s2),s2word /* get first word of s2 */
ldwm 4(0,s1),s1word /* get first word or s1 */
combt,=,n r0,tmp1,masks1 /*Do we need to mask beginning of s2 */
add tmp1,count,count /*bump count by the number of bytes */
/* we are going to mask */
sh3add tmp1,r0,save /*save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /*load save with proper mask */
or save,s2word,s2word /**/
masks1: sh3add tmp5,r0,save /*save now has number of bits to mask */
mtctl save,11
zvdepi -2,32,save /*load save with proper mask */
or save,s1word,s1word /**/
subi 4,tmp5,tmp5 /*tmp5 now has the number of byte that */
/*are valid in s1word before the vshd */
mtctl tmp3,11 /*Move shift amount to CR11 */
more1: combt,<=,n count,tmp5,chunk2 /*Can we do the vshd? */
ldwm 4(0,s1),tmp7 /*load second word to enable us to shift */
vshd s1word,tmp7,s1word /**/
combf,=,n s2word,s1word,ckbyte2 /**/
extru s1word,7,8,tmp3 /*get first byte (DELAY SLOT) */
addibt,<=,n -4,count,zero /*have we checked N chars? */
copy tmp7,s1word /**/
b more1 /*keep checking */
ldwm 4(0,s2),s2word /*get next s2 (DELAY SLOT) */
chunk2:
vshd s1word,r0,s1word /**/
b ckbyte2 /**/
extru s1word,7,8,tmp3 /**/
zero: copy r0,ret0
done:
EXIT(memcmp)
|
32bitmicro/newlib-nano-1.0
| 3,376
|
newlib/libc/machine/hppa/strlen.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* HPUX_ID = "@(#) $Revision: 1.1 $" */
/* strlen(s): Return length of string s */
#define start arg0
#define end ret0
#define tmp1 arg1
#define tmp2 arg2
#include "DEFS.h"
ENTRY(strlen)
movb,=,n start,end,$null_ptr
depi 0,31,2,end
comb,<> start,end,$not_aligned
ldws,ma 4(end),tmp1
comib,tr 0,0,$loop /* avoid INDIGO two register interlock */
uxor,nbz 0,tmp1,0
$not_aligned:
/*
; Tricky code. The problem is that the value of of the word
; including the start of the string has some garbage bytes that
; may be 0. We don't want them to stop the string scan. So
; we make those bytes non-zero (and any old non-zero value
; will do). Notice that the end pointer has been rounded
; down to a word boundary, and then incremented to the next
; word by the time we get here. Therefore, (start-end) has
; one of the values (-3, -2, or -1). Use uaddcm to do the
; subtraction (instead of sub), and the result will be
; (-4, -3, or -2). Multiply this by 8, and put into the
; shift register (which truncates to the last 5 bits) and
; the value will be (0, 8, or 16). Use this as a bit position,
; and drop a mask down into tmp1. All the garbage bytes will
; have at least 1 bit affected by the vdepi, so all the garbage
; in this first word will be non-zero garbage.
*/
uaddcm start,end,tmp2 /* tmp2 <- { -4, -3, -2 } */
sh3add tmp2,0,tmp2 /* tmp2 <- { -32, -24, -16 } */
mtsar tmp2 /* sar <- { 0, 8, 16 } */
vdepi -1,32,tmp1
uxor,nbz 0,tmp1,0
$loop:
b,n $end_loop
ldws,ma 4(end),tmp1
comib,tr 0,0,$loop /* avoid INDIGO two register interlock */
uxor,nbz 0,tmp1,0
$end_loop:
/* adjust the end pointer to one past the end of the string */
extru,<> tmp1,7,8,0
addib,tr,n -3,end,$out
extru,<> tmp1,15,8,0
addib,tr,n -2,end,$out
extru,<> tmp1,23,8,0
addi -1,end,end
$out:
bv 0(rp)
/*
; tricky code. the end pointer is just beyond the terminating
; null byte, so the length is (end-start-1). use uaddcm
; to do this in 1 instruction
*/
uaddcm end,start,ret0
$null_ptr:
EXIT(strlen)
|
32bitmicro/newlib-nano-1.0
| 5,263
|
newlib/libc/machine/hppa/strcat.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* HPUX_ID: @(#) $Revision: 1.1 $ */
/*
* strcat(s1, s2)
*
* Concatenate s2 on the end of s1. S1's space must be large enough.
* Return s1.
*/
#include "DEFS.h"
#define d_addr r26
#define s_addr r25
#define tmp6 r24
#define tmp1 r19
#define tmp2 r20
#define tmp3 r21
#define tmp4 r22
#define tmp5 arg3
#define save r1
ENTRY(strcat)
comb,= r0,s_addr,done /* quit if s2=NULL */
copy d_addr,ret0 /* The return value is the value of d_addr. DELAY SLOT*/
/* First look for end of s1 (d_addr) */
extru d_addr,31,2,tmp1 /* Extract the low two bits of the dest address. */
combt,= tmp1,r0,dont_mask
dep 0,31,2,d_addr /*set word alignment */
ldwm 4(d_addr),tmp2
sh3add tmp1,r0,save /* build mask based on tmp1 */
mtctl save,11
zvdepi -2,32,save
or save,tmp2,tmp2
uxor,nbz tmp2,r0,save
search:
b,n found_end /* nullified under uxor conditions above and below */
dont_mask:
ldwm 4(d_addr),tmp2
comib,tr r0,r0,search
uxor,nbz tmp2,r0,save
found_end: /* at this point d_addr points to word */
extru,<> save,7,8,r0 /* following word with null */
addib,tr,n -4,d_addr,begin_copy /*set d_addr to end of s1 */
extru,<> save,15,8,r0
addib,tr,n -3,d_addr,begin_copy
extru,<> save,23,8,r0
addi -1,d_addr,d_addr
addi -1,d_addr,d_addr
begin_copy:
extru s_addr,31,2,tmp1 /* Extract the low two bits of the source address. */
extru d_addr,31,2,tmp6 /* Extract the low two bits of the destination address. */
sub,= tmp6,tmp1,tmp3 /* Compute the shift quantity and don't branch if tmp6=tmp1. */
b not_aligned /* Not_aligned says that shifts Will be needed. */
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT. */
/* aligned */
combt,= tmp6,r0,skip_mask
ldwm 4(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 4 (DELAY SLOT) */
sh3add tmp6,r0,save
mtctl save,r11
zvdepi -2,32,save
or save,tmp1,tmp1
uxor,nbz tmp1,r0,save
b,n first_null /* special case: null in first word */
b,n skip_mask2
chunks:
b,n null_found /* delay slot for uxor below */
skip_mask2:
stbys,b,m tmp1,4(d_addr)
ldwm 4(s_addr),tmp1
skip_mask:
comib,tr 0,0,chunks
uxor,nbz tmp1,r0,save
/* Begin non_aligned code. */
not_aligned:
sh3add,>= tmp3,r0,tmp4 /* compute the shift amt.and skip load if tmp6 > tmp1. */
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++ */
ldwm 4(0,s_addr),tmp2 /* get either first or second word from source. */
combt,= tmp6,r0,chunk2 /* don't mask if whole word is valid */
mtctl tmp4,11 /* load the shift count into cr11 = shift count register. */
vshd tmp1,tmp2,tmp3 /* position data ! (delay slot) */
sh3add tmp6,r0,save /* setup r1 */
mtctl save,r11 /* set-up cr11 for mask */
zvdepi -2,32,save
or save, tmp3, tmp3
uxor,nbz tmp3,r0,save
b,n first_null2
b did_mask
mtctl tmp4,11 /* re-load the shift count into cr11 */
chunk2:
vshd tmp1,tmp2,tmp3
uxor,nbz tmp3, r0, save
b,n null_found
did_mask:
stbys,b,m tmp3,4(0,d_addr) /* store ! */
ldwm 4(0,s_addr),tmp1 /* get next word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
uxor,nbz tmp3, r0, save
b,n null_found
stwm tmp3,4(d_addr)
comib,tr 0,0,chunk2
ldwm 4(s_addr),tmp2
null_found: /* adjust d_addr and store final word */
extru,<> save,7,8,r0
addib,tr,n 1,d_addr,store_final
extru,<> save,15,8,r0
addib,tr,n 2,d_addr,store_final
extru,<> save,23,8,r0
addib,tr 3,d_addr,store_final2
bv 0(r2)
stw save,0(d_addr)
store_final:
bv 0(r2)
store_final2:
stbys,e save,0(d_addr) /* delay slot */
first_null: /* null found in first word of aligned (wrt d_addr) */
addi -4,s_addr,s_addr
ldbx tmp6(s_addr),tmp4
add tmp6,s_addr,s_addr
comib,= 0,tmp4,done
stbs,ma tmp4,1(d_addr)
ldbs 1(s_addr),tmp4
comib,= 0,tmp4,done
stbs,ma tmp4,1(d_addr)
bv 0(r2) /* done */
stbs 0,0(d_addr)
first_null2: /* null found in first word of non-aligned (wrt d_addr) */
addibt,= -1,tmp6,check3 /* check last 3 bytes of word */
extru save,15,8,tmp4
addibt,=,n -1,tmp6,check2 /* check last 2 bytes */
bv 0(r2)
stbys,b save, 0(d_addr)
check3:
combt,= tmp4,r0,done
stbs,ma tmp4,1(d_addr)
check2:
extru,<> save,23,8,tmp4
bv 0(r2)
stbs,ma tmp4,1(d_addr)
bv 0(r2)
stbs r0,0(d_addr)
done:
EXIT(strcat)
|
32bitmicro/newlib-nano-1.0
| 7,312
|
newlib/libc/machine/hppa/pcc_prefix.s
|
;
; (c) Copyright 1986 HEWLETT-PACKARD COMPANY
;
; To anyone who acknowledges that this file is provided "AS IS"
; without any express or implied warranty:
; permission to use, copy, modify, and distribute this file
; for any purpose is hereby granted without fee, provided that
; the above copyright notice and this notice appears in all
; copies, and that the name of Hewlett-Packard Company not be
; used in advertising or publicity pertaining to distribution
; of the software without specific, written prior permission.
; Hewlett-Packard Company makes no representations about the
; suitability of this software for any purpose.
;
; Standard Hardware Register Definitions for Use with Assembler
; version A.08.06
; - fr16-31 added at Utah
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Hardware General Registers
r0: .equ 0
r1: .equ 1
r2: .equ 2
r3: .equ 3
r4: .equ 4
r5: .equ 5
r6: .equ 6
r7: .equ 7
r8: .equ 8
r9: .equ 9
r10: .equ 10
r11: .equ 11
r12: .equ 12
r13: .equ 13
r14: .equ 14
r15: .equ 15
r16: .equ 16
r17: .equ 17
r18: .equ 18
r19: .equ 19
r20: .equ 20
r21: .equ 21
r22: .equ 22
r23: .equ 23
r24: .equ 24
r25: .equ 25
r26: .equ 26
r27: .equ 27
r28: .equ 28
r29: .equ 29
r30: .equ 30
r31: .equ 31
; Hardware Space Registers
sr0: .equ 0
sr1: .equ 1
sr2: .equ 2
sr3: .equ 3
sr4: .equ 4
sr5: .equ 5
sr6: .equ 6
sr7: .equ 7
; Hardware Floating Point Registers
fr0: .equ 0
fr1: .equ 1
fr2: .equ 2
fr3: .equ 3
fr4: .equ 4
fr5: .equ 5
fr6: .equ 6
fr7: .equ 7
fr8: .equ 8
fr9: .equ 9
fr10: .equ 10
fr11: .equ 11
fr12: .equ 12
fr13: .equ 13
fr14: .equ 14
fr15: .equ 15
fr16: .equ 16
fr17: .equ 17
fr18: .equ 18
fr19: .equ 19
fr20: .equ 20
fr21: .equ 21
fr22: .equ 22
fr23: .equ 23
fr24: .equ 24
fr25: .equ 25
fr26: .equ 26
fr27: .equ 27
fr28: .equ 28
fr29: .equ 29
fr30: .equ 30
fr31: .equ 31
; Hardware Control Registers
cr0: .equ 0
rctr: .equ 0 ; Recovery Counter Register
cr8: .equ 8 ; Protection ID 1
pidr1: .equ 8
cr9: .equ 9 ; Protection ID 2
pidr2: .equ 9
cr10: .equ 10
ccr: .equ 10 ; Coprocessor Confiquration Register
cr11: .equ 11
sar: .equ 11 ; Shift Amount Register
cr12: .equ 12
pidr3: .equ 12 ; Protection ID 3
cr13: .equ 13
pidr4: .equ 13 ; Protection ID 4
cr14: .equ 14
iva: .equ 14 ; Interrupt Vector Address
cr15: .equ 15
eiem: .equ 15 ; External Interrupt Enable Mask
cr16: .equ 16
itmr: .equ 16 ; Interval Timer
cr17: .equ 17
pcsq: .equ 17 ; Program Counter Space queue
cr18: .equ 18
pcoq: .equ 18 ; Program Counter Offset queue
cr19: .equ 19
iir: .equ 19 ; Interruption Instruction Register
cr20: .equ 20
isr: .equ 20 ; Interruption Space Register
cr21: .equ 21
ior: .equ 21 ; Interruption Offset Register
cr22: .equ 22
ipsw: .equ 22 ; Interrpution Processor Status Word
cr23: .equ 23
eirr: .equ 23 ; External Interrupt Request
cr24: .equ 24
ppda: .equ 24 ; Physcial Page Directory Address
tr0: .equ 24 ; Temporary register 0
cr25: .equ 25
hta: .equ 25 ; Hash Table Address
tr1: .equ 25 ; Temporary register 1
cr26: .equ 26
tr2: .equ 26 ; Temporary register 2
cr27: .equ 27
tr3: .equ 27 ; Temporary register 3
cr28: .equ 28
tr4: .equ 28 ; Temporary register 4
cr29: .equ 29
tr5: .equ 29 ; Temporary register 5
cr30: .equ 30
tr6: .equ 30 ; Temporary register 6
cr31: .equ 31
tr7: .equ 31 ; Temporary register 7
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Procedure Call Convention ~
; Register Definitions for Use with Assembler ~
; version A.08.06
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Software Architecture General Registers
rp: .equ r2 ; return pointer
mrp: .equ r31 ; millicode return pointer
ret0: .equ r28 ; return value
ret1: .equ r29 ; return value (high part of double)
sl: .equ r29 ; static link
sp: .equ r30 ; stack pointer
dp: .equ r27 ; data pointer
arg0: .equ r26 ; argument
arg1: .equ r25 ; argument or high part of double argument
arg2: .equ r24 ; argument
arg3: .equ r23 ; argument or high part of double argument
;_____________________________________________________________________________
; Software Architecture Space Registers
; sr0 ; return link form BLE
sret: .equ sr1 ; return value
sarg: .equ sr1 ; argument
; sr4 ; PC SPACE tracker
; sr5 ; process private data
;_____________________________________________________________________________
; Software Architecture Pseudo Registers
previous_sp: .equ 64 ; old stack pointer (locates previous frame)
#if 0
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
; Standard space and subspace definitions. version A.08.06
; These are generally suitable for programs on HP_UX and HPE.
; Statements commented out are used when building such things as operating
; system kernels.
;;;;;;;;;;;;;;;;
.SPACE $TEXT$, SPNUM=0,SORT=8
; .subspa $FIRST$, QUAD=0,ALIGN=2048,ACCESS=0x2c,SORT=4,FIRST
; .subspa $REAL$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=4,FIRST,LOCK
.subspa $MILLICODE$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=8
.subspa $LIT$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=16
.subspa $CODE$, QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=24
; .subspa $UNWIND$, QUAD=0,ALIGN=4,ACCESS=0x2c,SORT=64
; .subspa $RECOVER$, QUAD=0,ALIGN=4,ACCESS=0x2c,SORT=80
; .subspa $RESERVED$, QUAD=0,ALIGN=8,ACCESS=0x73,SORT=82
; .subspa $GATE$, QUAD=0,ALIGN=8,ACCESS=0x4c,SORT=84,CODE_ONLY
; Additional code subspaces should have ALIGN=8 for an interspace BV
; and should have SORT=24.
;
; For an incomplete executable (program bound to shared libraries),
; sort keys $GLOBAL$ -1 and $GLOBAL$ -2 are reserved for the $DLT$
; and $PLT$ subspaces respectively.
;;;;;;;;;;;;;;;
.SPACE $PRIVATE$, SPNUM=1,PRIVATE,SORT=16
.subspa $GLOBAL$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=40
.import $global$
.subspa $SHORTDATA$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=24
.subspa $DATA$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=16
.subspa $PFA_COUNTER$, QUAD=1,ALIGN=4,ACCESS=0x1f,SORT=8
.subspa $SHORTBSS$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=80,ZERO
.subspa $BSS$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82,ZERO
; .subspa $PCB$, QUAD=1,ALIGN=8,ACCESS=0x10,SORT=82
; .subspa $STACK$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82
; .subspa $HEAP$, QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82
;;;;;;;;;;;;;;;;
; .SPACE $PFA$, SPNUM=0,PRIVATE,UNLOADABLE,SORT=64
; .subspa $PFA_ADDRESS$, ALIGN=4,ACCESS=0x2c,UNLOADABLE
;;;;;;;;;;;;;;;;
; .SPACE $DEBUG$, SPNUM=2,PRIVATE,UNLOADABLE,SORT=80
; .subspa $HEADER$, ALIGN=4,ACCESS=0,UNLOADABLE,FIRST
; .subspa $GNTT$, ALIGN=4,ACCESS=0,UNLOADABLE
; .subspa $LNTT$, ALIGN=4,ACCESS=0,UNLOADABLE
; .subspa $SLT$, ALIGN=4,ACCESS=0,UNLOADABLE
; .subspa $VT$, ALIGN=4,ACCESS=0,UNLOADABLE
; To satisfy the copyright terms each .o will have a reference
; the the actual copyright. This will force the actual copyright
; message to be brought in from libgloss/hp-milli.s
.space $PRIVATE$
.subspa $DATA$
#else
.data
#endif
.import ___hp_free_copyright,data
L$copyright .word ___hp_free_copyright
|
32bitmicro/newlib-nano-1.0
| 9,050
|
newlib/libc/machine/hppa/strcmp.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/*
strcmp
Jerry Huck
Edgar Circenis
*/
/*
* strcmp(s1, s2)
*
* returns integer: < 0 iff s1 lexicographically less than s2
* > 0 iff s1 lexicographically greater than s2
* = 0 iff s1 lexicographically equal to s2
*/
#include "DEFS.h"
#define s1 26
#define s2 25
#define tmp1 19
#define s2word 20
#define tmp3 21
#define tmp7 22
#define s1word 23
#define save 1
#define tmp6 24
#define tmp5 28
ENTRY(strcmp)
comb,=,n s1,s2,samestring
comib,=,n 0,s1,s1isnull
comib,=,n 0,s2,s2isnull
/* Hope for word alignment. Pick up low two bits of each adress */
extru,<> s1,31,2,tmp1
ldwm 4(s1),s1word
dep,= s2,29,2,tmp1
b,n case_analysis
/* Start looping until null is found in s1 or they mis-compare */
loop:
ldwm 4(s2),s2word
loop_plus:
uxor,nbz s1word,r0,r0 /* Null in this? */
b,n nullins1
comb,=,n s1word,s2word,loop
ldwm 4(s1),s1word
/* The words do not compare equal and s1 does not have a null.
Need to treat words as unsigned and generate either a positive
or negative return value */
wordcomparereturn:
comclr,>> s1word,s2word,ret0 /*Set ret0 to 0 and skip if greater*/
ldi -2,ret0 /*Set ret0 to -2 when less */
bv r0(rp)
addi 1,ret0,ret0 /*Fix return value to be -1 or +1 */
/* s1 has a null. s2 has not been checked. */
nullins1:
/*If s2 has no nulls this is simple, but assume that it might
and fix up s1 to allow the word comparision to work by
scanning s1 and duplicating all the bytes in s2 below that byte into
the remainder of s1. A remainder only exists if the zero byte
is found in the upper three bytes */
extru,<> s1word,7,8,r0 /*in the first byte? */
dep,tr s2word,31,24,s1word /*copy low 3 bytes of *s2 into *s1 */
extru,<> s1word,15,8,r0 /*in the second byte? */
dep,tr s2word,31,16,s1word /*copy low 2 bytes of *s2 into *s1 */
extru,<> s1word,23,8,r0 /*in the third byte? */
dep s2word,31,8,s1word /*copy low 1 byte of *s2 into *s1 */
/* Do the normal unsigned compare and return */
comclr,<> s1word,s2word,ret0 /*Set ret0 to 0 and skip if not equal */
bv,n r0(rp)
comclr,>> s1word,s2word,ret0 /*Set ret0 to 0 and skip if greater*/
ldi -2,ret0 /*Set ret0 to -2 when less */
bv r0(rp)
addi 1,ret0,ret0 /*Fix return value to be -1 or +1 */
/* s1 and s2 are the same string and therefore equal */
samestring:
bv r0(rp)
copy r0,ret0
/* s1 is null. Treat as string of nulls. Therefore return
the negative of s2's first byte. s2 cannot be zero. */
s1isnull:
ldbs 0(0,s2),ret0
bv r0(rp)
sub 0,ret0,ret0
/* s2 is null. Treat as string of nulls. Therefore return
s1's first byte. s1 cannot be zero. */
s2isnull:
bv r0(rp)
ldbs 0(0,s1),ret0
case_analysis:
blr tmp1,r0
nop
/*
Case statement for non-aligned cases (we've already
checked the aligned case.
NOTE: for non-aligned cases, the absolute shift value
gets loaded into tmp3.
*/
/* S2 S1 */
nop /* 00 00 can't happen */
nop
b shifts2 /* 00 01 */
ldi 8,tmp3 /* load shift count (delay slot) */
b shifts2 /* 00 10 */
ldi 16,tmp3 /* load shift count (delay slot) */
b shifts2 /* 00 11 */
ldi 24,tmp3 /* load shift count (delay slot) */
b shifts1_0 /* 01 00 */
ldi 8,tmp3 /* load shift count (delay slot) */
b eq_align1 /* 01 01 */
ldbs,ma 1(s1),s1word
b shifts2 /* 01 10 */
ldi 8,tmp3 /* load shift count (delay slot) */
b shifts2 /* 01 11 */
ldi 16,tmp3 /* load shift count (delay slot) */
b shifts1_0 /* 10 00 */
ldi 16,tmp3 /* load shift count (delay slot) */
b shifts1 /* 10 01 */
ldi 8,tmp3 /* load shift count (delay slot) */
b eq_align2 /* 10 10 */
ldhs,ma 2(s1),s1word
b shifts2 /* 10 11 */
ldi 8,tmp3 /* load shift count (delay slot) */
b shifts1_0 /* 11 00 */
ldi 24,tmp3 /* load shift count (delay slot) */
b shifts1 /* 11 01 */
ldi 16,tmp3 /* load shift count (delay slot) */
b shifts1 /* 11 10 */
ldi 8,tmp3 /* load shift count (delay slot) */
ldbs,ma 1(s1),s1word /* 11 11 */
ldbs,ma 1(s2),s2word
sub,= s1word,s2word,ret0 /* if not equal, we can return now */
bv,n r0(rp)
comclr,<> s1word,r0,ret0
bv,n r0(rp)
b loop /* fall into main loop */
ldwm 4(s1),s1word
eq_align1:
ldbs,ma 1(s2),s2word
sub,= s1word,s2word,ret0 /* if not equal, we can return now */
bv,n r0(rp)
comclr,<> s1word,r0,ret0
bv,n r0(rp)
/* fall through to half-word aligned case */
ldhs,ma 2(s1),s1word /* load next halfword */
eq_align2:
ldhs,ma 2(s2),s2word /* load next halfword */
/* form the mask: 0xffff0000 and mask leading nulls in s1word and s2word
so that we can fall into the main loop with word aligned data */
ldi 16,save
mtctl save,r11
zvdepi -2,32,save
or save,s1word,s1word
b loop_plus /* fall into main loop */
or save,s2word,s2word
/* s2's alignment is greater than s1's alignment, so we will shift s1 */
shifts1_0:
addi -4,s1,s1 /* fix up s1 due to earlier read */
shifts1:
extru s1,31,2,tmp1
extru s2,31,2,tmp5
dep r0,31,2,s1 /* Compute word address of s1 */
dep r0,31,2,s2 /* Compute word address of s2 */
ldwm 4(s1),s1word /* get first word of s1 */
ldwm 4(s2),s2word /* get first word of s2 */
combt,=,n r0,tmp1,masks2 /* Do we need to mask beginning of s1 */
sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,r11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word
masks2:
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,r11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word
ldi -1,tmp7 /* load tmp7 with 0xffffffff */
mtctl tmp3,r11 /* Move shift amount to CR11 */
more: uxor,nbz s1word,r0,r0 /* Is there a null in s1? */
b ends1
vshd tmp7,s1word,save
combf,=,n save,s2word,cmps1
ldwm 4(s1),tmp7
ldwm 4(s2),s2word
uxor,nbz tmp7,r0,r0 /* is there a null in s1? */
b ends1_0
vshd s1word,tmp7,save
combf,=,n save,s2word,cmps1
ldwm 4(s1),s1word
b more
ldwm 4(s2),s2word
cmps1: movb,tr save,s1word,wordcomparereturn
nop
ends1_0:
copy tmp7,s1word /* move tmp7 to s1word */
ends1:
combf,=,n save,s2word,nullins1 /* branch if no match */
copy save,s1word /* delay slot */
/* At this point, we know that we've read a null */
/* from s1, so we can't read more from s1 */
uxor,nbz save,r0,r0 /* are the strings equal? */
b,n samestring
vshd s1word,r0,s1word
b nullins1
ldwm 4(s2),s2word
/* s1's alignment is greater than s2's alignment, so we will shift s2 */
shifts2:
extru s1,31,2,tmp1
extru s2,31,2,tmp5
dep r0,31,2,s1 /* Compute word address of s1 */
dep r0,31,2,s2 /* Compute word address of s2 */
ldwm 4(s2),s2word /* get first word of s2 */
ldwm 4(s1),s1word /* get first word of s1 */
combt,=,n r0,tmp5,masks1 /* Do we need to mask beginning of s2 */
sh3add tmp5,r0,save /* save now has number of bits to mask */
mtctl save,r11
zvdepi -2,32,save /* load save with proper mask */
or save,s2word,s2word
masks1:
sh3add tmp1,r0,save /* save now has number of bits to mask */
mtctl save,r11
zvdepi -2,32,save /* load save with proper mask */
or save,s1word,s1word
ldi -1,tmp7 /* load tmp7 with 0xffffffff */
mtctl tmp3,r11 /* Move shift amount to CR11 */
more1: uxor,nbz s2word,r0,r0 /* is there a null in s2? */
b ends2
vshd tmp7,s2word,save
combf,=,n s1word,save,cmps2
ldwm 4(s2),tmp7
ldwm 4(s1),s1word
uxor,nbz tmp7,r0,r0 /* is there a null in s2? */
b ends2_0
vshd s2word,tmp7,save
combf,=,n s1word,save,cmps2
ldwm 4(s2),s2word
b more1
ldwm 4(s1),s1word
cmps2: movb,tr save,s2word,wordcomparereturn
nop
ends2_0:
copy tmp7,s2word /* move tmp7 to s2word */
ends2:
combf,=,n s1word,save,nullins1 /* branch if no match */
copy save,s2word /* delay slot */
/* At this point, we know that we've read a null */
/* from s2, so we can't read more from s2 */
uxor,nbz save,r0,r0 /* are the strings equal? */
b,n samestring
vshd s2word,r0,s2word
b nullins1
ldwm 4(s1),s1word
EXIT(strcmp)
|
32bitmicro/newlib-nano-1.0
| 8,853
|
newlib/libc/machine/hppa/strncpy.S
|
/*
* (c) Copyright 1986 HEWLETT-PACKARD COMPANY
*
* To anyone who acknowledges that this file is provided "AS IS"
* without any express or implied warranty:
* permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies, and that the name of Hewlett-Packard Company not be
* used in advertising or publicity pertaining to distribution
* of the software without specific, written prior permission.
* Hewlett-Packard Company makes no representations about the
* suitability of this software for any purpose.
*/
/* HPUX_ID: @(#) $Revision: 1.1 $ */
/*
* strncpy(s1, s2, n)
*
* Copy s2 to s1, truncating or null-padding to always copy n bytes
* return s1
*/
#include "DEFS.h"
#define d_addr r26
#define s_addr r25
#define count r24
#define tmp1 r19
#define tmp2 r20
#define tmp3 r21
#define tmp4 r22
#define tmp5 arg3
#define save r1
ENTRY(strncpy)
combt,= s_addr,r0,pad_null_bytes1 /* if s2==NULL then pad nulls and exit */
copy d_addr,ret0 /* The return value is defined to be the value of d_addr. DELAY SLOT*/
addibt,<,n -4,count,byteloop /* If count is <= 4 don't get fancy.*/
extru s_addr,31,2,tmp1 /* Extract the low two bits of the source address.*/
extru d_addr,31,2,tmp5 /* Extract the low two bits of the destination address.*/
add count,tmp5,count /* pre increment the count by the byte address so that the count is*/
comb,<> tmp5,tmp1,not_aligned /* branch if tmp5<>tmp1. */
dep 0,31,2,s_addr /* Compute the word address of the source. DELAY SLOT.*/
/* aligned*/
combt,= tmp5,r0,skip_mask
ldwm 4(0,s_addr),tmp1 /* tmp1 = *s_addr s_addr += 4 (DELAY SLOT)*/
sh3add tmp5,r0,save /* compute mask in save*/
mtctl save,11
zvdepi -2,32,save
b skip_mask /* don't reload tmp1*/
or save,tmp1,tmp1 /* or mask with data*/
chunks:
ldwm 4(0,s_addr),tmp1 /* get a word*/
skip_mask:
uxor,nbz tmp1,r0,save /* check for null*/
b,n null1
addibf,< -4,count,chunks
stbys,b,m tmp1,4(0,d_addr) /* store word (delay slot)*/
/* back_porch last word to store*/
addibt,=,n 4,count,done /* if count = 0 we're, of course, done !*/
ldws 0(s_addr),tmp1 /* load up the back_porch*/
add d_addr,count,d_addr/* final store address is +1 too high !*/
sh3add count,r0, save /* setup right mask based on count*/
mtctl save,r11
zvdepi -2,32,save /*save now has left-hand mask*/
uaddcm r0,save,save /*form right hand mask */
or tmp1,save,tmp1 /*and insert data*/
uxor,nbz tmp1,r0,save /* check for null*/
b,n null2
bv 0(r2)
stbys,e tmp1,0(d_addr) /* done */
/* Begin non_aligned code. */
not_aligned:
sub,>= tmp5,tmp1,tmp3 /* compute the shift amt.and skip load if tmp5 > tmp1.*/
ldwm 4(0,s_addr),tmp1 /* load up the first word from the source. tmp1 = *s_addr++*/
zdep tmp3,28,29,tmp4 /* compute the number of bits to shift */
mtctl tmp4,11 /* load the shift count into cr11 = shift count register.*/
addibt,<,n -4,count,chkchnk2 /* first step in pre adjustment of count for looping.*/
ldwm 4(0,s_addr),tmp2 /* get either first or second word from source. */
combt,= tmp5,r0,skip_mask2 /* don't mask if whole word is valid*/
vshd tmp1,tmp2,tmp3 /* position data ! (delay slot)*/
sh3add tmp5,r0,save /* setup r1*/
mtctl save,r11 /* setup mask in save*/
zvdepi -2,32,save
or save, tmp3, tmp3
mtctl tmp4,11 /* re-load the shift count into cr11 */
b skip_mask2
copy r0, tmp5 /* zero out tmp5 so we don't try to mask again*/
chunk2:
ldwm 4(0,s_addr),tmp2
vshd tmp1,tmp2,tmp3
skip_mask2:
uxor,nbz tmp3, r0, save
b,n null3
stbys,b,m tmp3,4(0,d_addr) /* store ! */
ldwm 4(0,s_addr),tmp1 /* get 2nd word ! */
vshd tmp2,tmp1,tmp3 /* position data ! */
uxor,nbz tmp3, r0, save
b,n null4
addibf,< -8,count,chunk2 /* If count is still >= 8 do another loop.*/
stbys,b,m tmp3,4(0,d_addr) /* store !*/
chkchnk2:
addibt,<,n 4,count,bp_0 /* if we don't have 4 bytes left then do the back porch (bp_0)*/
subchnk2: /* we have less than 8 chars to copy*/
ldwm 4(0,s_addr),tmp2 /* get next word !*/
combt,= tmp5,r0,skip_mask3
vshd tmp1,tmp2,tmp3 /* position data !*/
sh3add tmp5,r0,save /* setup r1*/
mtctl save,r11 /* setup mask in save*/
zvdepi -2,32,save
or save, tmp3, tmp3
mtctl tmp4,11 /* restore shift value again */
copy r0, tmp5 /* zero out tmp5 so we don't try to mask again*/
skip_mask3:
uxor,nbz tmp3,r0,save
b,n null4
b bp_1 /* we now have less than 4 bytes to move*/
stbys,b,m tmp3,4(0,d_addr) /* store !*/
bp_0:
copy tmp1,tmp2 /* switch registers used in the shift process.*/
addibt,<=,n 4,count,done /* if count = -4 this implies that count = 0 -> done */
bp_1:
ldwm 4(0,s_addr),tmp1 /* get final word ! */
vshd tmp2,tmp1,tmp3 /* position data !*/
uxor,sbz tmp3,r0,save /* if some-byte-zero */
b no_null /* don't goto no_null-find which null instead */
add d_addr,count,d_addr /* get d_addr ready for stbys,e */
extru,<> save,7,8,r0
b found_null5
copy r0, tmp5
extru,<> save,15,8,r0
b found_null5
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
extru,<> save,23,8,r0
b found_null5
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
ldo -1(r0),tmp5 /* setup mask (FFFFFFFF)*/
found_null5:
and tmp3,tmp5,tmp3 /* zero out tmp5 based on mask in tmp5*/
no_null:
bv 0(r2) /* were done*/
stbys,e tmp3,0(0,d_addr) /* store the data !*/
/* here we do ye old byte-at-a-time moves.*/
byteloop:
addibt,=,n 4,count,done
comb,= 0,s_addr,done
stbs r0,0(d_addr) /* store null in case s_addr == NULL */
ldbs,ma 1(s_addr),tmp1
encore:
combt,=,n tmp1,r0, pad_null_bytes1
stbs,ma tmp1,1(d_addr)
addibf,=,n -1,count,encore
ldbs,ma 1(s_addr),tmp1
b,n done
pnb_1:
addibt,=,n 4,count,done /* if count was already 0 then we're done*/
pad_null_bytes1:
combt,=,n count,r0,done /* if count==0 then exit */
pad_null_bytes2:
addibf,= -1,count,pad_null_bytes2
stbs,ma r0,1(d_addr)
b,n done
pad_nulls:
addibf,<=,n -4,count,pad_nulls
stwm r0,4(d_addr)
b,n pnb_1
null1:
extru,<> save,7,8,r0
b found_null1
copy r0, tmp5
extru,<> save,15,8,r0
b found_null1
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
extru,<> save,23,8,r0
b found_null1
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
ldo -1(r0),tmp5 /* setup mask (FFFFFFFF)*/
found_null1:
and tmp1,tmp5,tmp1 /*zero out tmp1 according to mask*/
b pad_nulls /* nullify remaining count bytes*/
stbys,b,m tmp1,4(0,d_addr) /* first word (account for alignment)*/
null2: /* back porch case. We have less than 4 bytes to go.*/
extru,<> save,7,8,r0 /* is null in 1st byte? */
b found_null2
copy r0, tmp5
extru,<> save,15,8,r0 /* is null in 2nd byte? */
b found_null2
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
b found_null2 /* null must be in 3rd byte */
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
found_null2:
and tmp1,tmp5,tmp1 /*zero out tmp1 according to mask*/
bv 0(r2) /* we're done*/
stbys,e tmp1,0(0,d_addr) /* last word (back porch)*/
null3: /* not_aligned case where null is found in first of two words--adjust count*/
extru,<> save,7,8,r0
b found_null3
copy r0, tmp5
extru,<> save,15,8,r0
b found_null3
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
extru,<> save,23,8,r0
b found_null3
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
ldo -1(r0),tmp5 /* setup mask (FFFFFFFF)*/
found_null3:
addi 4,count,count /* fix count since null is in first of two words*/
and tmp3,tmp5,tmp3 /*zero out tmp3 according to mask*/
b pad_nulls /* nullify remaining count bytes*/
stbys,b,m tmp3,4(d_addr)
null4: /* not_aligned case where null is found in second of two words*/
extru,<> save,7,8,r0
b found_null4
copy r0, tmp5
extru,<> save,15,8,r0
b found_null4
ldil 0x1FE000,tmp5 /* setup mask (FF000000)*/
extru,<> save,23,8,r0
b found_null4
ldil 0x1FFFE0,tmp5 /* setup mask (FFFF0000)*/
ldo -1(r0),tmp5 /* setup mask (FFFFFFFF)*/
found_null4:
and tmp3,tmp5,tmp3 /*zero out tmp4 according to mask*/
b pad_nulls /* nullify remaining count bytes*/
stbys,b,m tmp3,4(d_addr)
done:
EXIT(strncpy)
|
32bitmicro/newlib-nano-1.0
| 3,913
|
newlib/libc/machine/m32c/setjmp.S
|
/*
Copyright (c) 2005 Red Hat Incorporated.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The name of Red Hat Incorporated may not be used to endorse
or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL RED HAT INCORPORATED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(__r8c_cpu__) || defined(__m16c_cpu__)
#define A16 1
#endif
/* We implement setjmp/longjmp much like the way gcc implements
exceptions - we create new stack frames, then switch to them and
return. Thus, the two setjmp's below each push all the relevent
registers, then copy the whole frame into the buffer (first $sp is
moved, then smovf copies the frame itself), and the two longjmps
restore $sp, copy the frame back into place, and issue the same
return as the setjmp would have used.
Since the sizes of registers differs between the 16 and 24 bit
models, we provide separate implementations for each rather than
trying to parameterize them.
Jump buffer sizes: 21 bytes for 16 bit, 34 bytes for 24 bit.
*/
.text
#ifdef A16 /* 16 bit versions */
.global _setjmp
_setjmp:
enter #0
pushm r1,r2,r3,a0,a1,sb,fb
; At this point, the stack looks like this:
; ... [pc:3] [oldfb:2] <fb> [r1:2] [r2:2] [r3:2] [a0:2] [a1:2] [sb:2] [fb:2] <sp> */
mov.w r1,a1 ; a1 is the destination of smovf
mov.b #0,r1h
stc sp,a0 ; r1h:a0 is the source of smovf
mov.w a0,[a1]
add.w #2,a1
mov.w #19,r3 ; plus two for sp later
smovf.b
; Return 0 to caller.
mov.w #0,r0
popm r1,r2,r3,a0,a1,sb,fb
exitd
.global _longjmp
_longjmp:
enter #0
mov.w r1,a0 ; pointer to jump buf
mov.w r2,r0 ; setjmp's "new" return value
mov.b #0,r1h ; r1h: a0 is the source, now jmpbuf
mov.w [a0],a1 ; dest is new stack
ldc a1,sp
add.w #2,a0
mov.w #19,r3
smovf.b
;; now return to our caller with this newly restored frame
popm r1,r2,r3,a0,a1,sb,fb
exitd
#else /* 24 bit versions */
.global _setjmp
_setjmp:
enter #0
pushm r1,r2,r3,a0,a1,sb,fb
; At this point, the stack looks like this:
; ... [jbuf:4] [pc:4] [oldfb:4] <fb> [r1:2] [r2:2] [r3:2] [a0:4] [a1:4] [sb:4] [fb:4] <sp> */
mov.l 8[fb],a1 ; a1 is the destination of smovf
stc sp,a0 ; r1h:a0 is the source of smovf
mov.l a0,[a1]
add.l #4,a1
mov.w #30,r3 ; plus two for sp later
smovf.b
; Return 0 to caller.
mov.w #0,r0
popm r1,r2,r3,a0,a1,sb,fb
exitd
.global _longjmp
_longjmp:
enter #0
; ... [rv:2] [jbuf:4] [pc:4] [oldfb:4] <fb>
mov.l 8[fb],a0 ; pointer to jump buf
mov.w 12[fb],r0 ; setjmp's "new" return value
mov.l [a0],a1 ; dest is new stack
ldc a1,sp
add.l #4,a0
mov.w #30,r3
smovf.b
;; now return to our caller with this newly restored frame
popm r1,r2,r3,a0,a1,sb,fb
exitd
#endif
|
32bitmicro/newlib-nano-1.0
| 2,426
|
newlib/libc/machine/z8k/memcpy.S
|
/*
* memcpy routine for Z8000
* Copyright (C) 2004 Christian Groessler <chris@groessler.org>
*
* Permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/* void *memcpy(void *dest, const void *src, size_t length);
*/
name "memcpy.S"
.text
even
global _memcpy
global memmove_entry
_memcpy:
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
ldl rr4,rr14(#8)
ldl rr2,rr14(#12)
#else
pushl @rr14,rr6
#endif
/* rr2 - length (high word ignored)
* rr4 - src
* rr6 - dest
*/
testl rr2
jr z,finish
memmove_entry: /* external entry point from memmove */
bitb rl7,#0 /* odd destination address? */
jr nz,testsrc
bitb rl5,#0 /* odd source address? */
jr nz,odd_copy
jr t,even_copy /* dest even, src odd */
testsrc:
bitb rl5,#0
jr z,odd_copy /* src even, dest odd */
ldib @rr6,@rr4,r3
jr ov,finish /* jump if r3 is zero now */
/* copy words */
even_copy:
ld r2,r3 /* remember length */
srl r3,#1
jr z,no_words
ldir @rr6,@rr4,r3
no_words:
bitb rl2,#0 /* odd length? */
jr z,finish
ldib @rr6,@rr4,r2 /* yes, copy last byte */
jr finish
/* copy bytes */
odd_copy:
ldirb @rr6,@rr4,r3
finish:
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
#else
popl rr2,@rr14
#endif
#else /* above Z8001, below Z8002 */
unsegm
#ifdef __STD_CALL__
ld r7,r15(#2)
ld r6,r15(#4)
ld r5,r15(#6)
#else
ld r2,r7 /* buffer pointer return value */
#endif
/* r5 - length
* r6 - src
* r7 - dest
*/
test r5
jr z,finish
memmove_entry: /* external entry point from memmove */
bitb rl7,#0 /* odd destination address? */
jr nz,testsrc
bitb rl6,#0 /* odd source address? */
jr nz,odd_copy
jr t,even_copy /* dest even, src odd */
testsrc:
bitb rl6,#0
jr z,odd_copy /* src even, dest odd */
ldib @r7,@r6,r5
jr ov,finish /* jump if r5 is zero now */
/* copy words */
even_copy:
ld r4,r5 /* remember length */
srl r5,#1
jr z,no_words
ldir @r7,@r6,r5
no_words:
bitb rl4,#0 /* odd length? */
jr z,finish
ldib @r7,@r6,r4 /* yes, copy last byte */
jr finish
/* copy bytes */
odd_copy:
ldirb @r7,@r6,r5
finish:
#ifdef __STD_CALL__
ld r7,r15(#2)
#endif
#endif /* Z8002 */
ret
.end
|
32bitmicro/newlib-nano-1.0
| 1,905
|
newlib/libc/machine/z8k/setjmp.S
|
.global _setjmp
.global _longjmp
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
_setjmp:
ldl rr6,rr14(#4) ! get argument
ldl rr2,@rr14 ! fetch pc
ldl @rr6,rr2 ! save it
ldl rr6(#16),rr8
ldl rr6(#4),rr10
ldl rr6(#8),rr12 ! remember frame pointer
ldl rr6(#12),rr14 ! remember stack pointer
ldk r7,#0
ret t
_longjmp:
ldl rr4,rr14(#4) ! get first argument
ld r7,rr14(#8) ! get return value
ldl rr8,rr4(#16)
ldl rr10,rr4(#4)
ldl rr12,rr4(#8) ! restore old frame pointer
ldl rr14,rr4(#12) ! restore old stack pointer
ldl rr4,@rr4 ! return address
inc r15,#4
jp @rr4
#else /* above __STD_CALL_, below not */
_setjmp:
ldl rr2,@rr14 ! fetch pc
ldl @rr6,rr2 ! save it
ldl rr6(16),rr8
ldl rr6(4),rr10
ldl rr6(8),rr12 ! and the other special regs
ldl rr6(12),rr14
ldk r2,#0
ret t
_longjmp:
ld r2,r5 ! get return value
ldl rr4,rr6(0)
ldl rr8,rr6(16)
ldl rr10,rr6(4)
ldl rr12,rr6(8)
ldl rr14,rr6(12)
inc r15,#4
jp @rr4
#endif /* not __STD_CALL__ */
#else /* above Z8001, below Z8002 */
unseg
#ifdef __STD_CALL__
_setjmp:
ld r7,r15(#2) ! get argument
ld r2,@r15 ! fetch pc
ld @r7,r2 ! save it
ldl r7(#14),rr8
ldl r7(#2),rr10
ldl r7(#6),rr12 ! remember frame pointer
ldl r7(#10),rr14 ! remember stack pointer
ldk r7,#0
ret t
_longjmp:
ld r4,r15(#2) ! get first argument (jmp_buf)
ld r7,r15(#4) ! get return value
ldl rr8,r4(#14)
ldl rr10,r4(#2)
ldl rr12,r4(#6) ! restore old frame pointer
ldl rr14,r4(#10) ! restore old stack pointer
ld r4,@r4 ! return address
inc r15,#2
jp @r4
#else /* above __STD_CALL_, below not */
_setjmp:
ld r2,@r15 ! fetch pc
ld @r7,r2 ! save it
ldl r7(4),rr10
ldl r7(8),rr12 ! and the other special regs
ldl r7(12),rr14
ldk r2,#0
ret t
_longjmp:
ld r2,r6 ! get return value
ld r4,@r7
ldl rr10,r7(4)
ldl rr12,r7(8)
ldl rr14,r7(12)
inc r15,#2
jp @r4
#endif /* not __STD_CALL__ */
#endif /* Z8002 version */
|
32bitmicro/newlib-nano-1.0
| 3,306
|
newlib/libc/machine/z8k/memmove.S
|
/*
* memmove routine for Z8000
* Copyright (C) 2004 Christian Groessler <chris@groessler.org>
*
* Permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/* void *memmove(void *dest, const void *src, size_t length);
*/
name "memmove.S"
.text
even
global _memmove
_memmove:
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
ldl rr4,rr14(#8)
ldl rr2,rr14(#12)
#else
pushl @rr14,rr6
#endif
/* rr2 - length (high word ignored)
* rr4 - src
* rr6 - dest
*/
testl rr2
jr z,finish
/* check for destructive overlap (src < dest && dest < src + length) */
cpl rr6,rr4
jp ule,memmove_entry /* non-destructive, let memcpy do the work */
ldl rr0,rr2
addl rr0,rr4 /* rr0 = src + length */
cpl rr0,rr6
jp ult,memmove_entry /* non-destructive, let memcpy do the work */
/* set-up pointers to copy backwards, add (length - 1) */
addl rr4,rr2 /* src + length */
addl rr6,rr2 /* dest + length */
subl rr4,#1
subl rr6,#1
/* check alignment */
bitb rl7,#0 /* odd destination address? */
jr z,testsrc
bitb rl5,#0 /* odd source address? */
jr z,odd_copy
jr even_copy
testsrc:
bitb rl5,#0
jr nz,odd_copy /* src even, dest odd */
lddb @rr6,@rr4,r3
jr ov,finish /* jump if r5 is zero now */
/* copy words */
even_copy:
ld r2,r3 /* remember length */
srl r3,#1
/* jr z,no_words it cannot be zero here */
dec r5,#1
dec r7,#1
lddr @rr6,@rr4,r3
no_words:
bitb rl2,#0 /* odd length? */
jr z,finish
inc r5,#1
inc r7,#1
lddb @rr6,@rr4,r2 /* yes, copy last byte */
jr finish
/* copy bytes */
odd_copy:
lddrb @rr6,@rr4,r3
finish:
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
#else
popl rr2,@rr14
#endif
#else /* above Z8001, below Z8002 */
unsegm
#ifdef __STD_CALL__
ld r7,r15(#2)
ld r6,r15(#4)
ld r5,r15(#6)
#else
ld r2,r7 /* buffer pointer return value */
#endif
/* r5 - length
* r6 - src
* r7 - dest
*/
test r5
jr z,finish
/* check for destructive overlap (src < dest && dest < src + length) */
cp r7,r6
jp ule,memmove_entry /* non-destructive, let memcpy do the work */
ld r0,r5
add r0,r6 /* r0 = src + length */
cp r0,r7
jp ult,memmove_entry /* non-destructive, let memcpy do the work */
/* set-up pointers to copy backwards, add (length - 1) */
add r6,r5 /* src + length */
add r7,r5 /* dest + length */
dec r6,#1
dec r7,#1
/* check alignment */
bitb rl7,#0 /* odd destination address? */
jr z,testsrc
bitb rl6,#0 /* odd source address? */
jr z,odd_copy
jr even_copy
testsrc:
bitb rl6,#0
jr nz,odd_copy /* src even, dest odd */
lddb @r7,@r6,r5
jr ov,finish /* jump if r5 is zero now */
/* copy words */
even_copy:
ld r4,r5 /* remember length */
srl r5,#1
/* jr z,no_words it cannot be zero here */
dec r6,#1
dec r7,#1
lddr @r7,@r6,r5
no_words:
bitb rl4,#0 /* odd length? */
jr z,finish
inc r6,#1
inc r7,#1
lddb @r7,@r6,r4 /* yes, copy last byte */
jr finish
/* copy bytes */
odd_copy:
lddrb @r7,@r6,r5
finish:
#ifdef __STD_CALL__
ld r7,r15(#2)
#endif
#endif /* Z8002 */
ret
.end
|
32bitmicro/newlib-nano-1.0
| 1,908
|
newlib/libc/machine/z8k/memset.S
|
/*
* memset routine for Z8000
* Copyright (C) 2004 Christian Groessler <chris@groessler.org>
*
* Permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/* void *memset(void *buffer, int value, size_t length);
*/
name "memset.S"
.text
even
global _memset
_memset:
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
ld r5,rr14(#8)
ldl rr2,rr14(#10)
#else
pushl @rr14,rr6
#endif
/* rr2 - length
* rl5 - value
* rr6 - buffer
*/
testl rr2
jr z,finish
ldb rh5,rl5
ld r1,r5 /* r1 contains value */
bit r7,#0
jr z,not_odd
ldb @rr6,rl1
inc r7,#1
subl rr2,#1
jr z,finish
not_odd:ld r0,r3 /* remember length */
srl r3,#1
jr z,no_words
ldl rr4,rr6
ld @rr6,r1
inc r7,#2
dec r3,#1
jr z,no_words
ldir @rr6,@rr4,r3 /* fill words */
no_words:
bit r0,#0 /* one byte remaining? */
jr z,finish
ldb @rr6,rl1
finish:
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
#else
popl rr2,@rr14
#endif
#else /* above Z8001, below Z8002 */
unsegm
#ifdef __STD_CALL__
ld r7,r15(#2)
ld r6,r15(#4)
ld r5,r15(#6)
#else
ld r2,r7 /* buffer pointer return value */
#endif
/* r5 - length
* r6 - value
* r7 - buffer
*/
test r5
jr z,finish
ldb rh6,rl6
ld r1,r6 /* r1 contains value */
bit r7,#0
jr z,not_odd
ldb @r7,rl1
inc r7,#1
dec r5,#1
jr z,finish
not_odd:ld r0,r5 /* remember length */
srl r5,#1
jr z,no_words
ld r4,r7
ld @r7,r1
inc r7,#2
dec r5,#1
jr z,no_words
ldir @r7,@r4,r5 /* fill words */
no_words:
bit r0,#0 /* one byte remaining? */
jr z,finish
ldb @r7,rl1
finish:
#ifdef __STD_CALL__
ld r7,r15(#2)
#endif
#endif /* Z8002 */
ret
.end
|
32bitmicro/newlib-nano-1.0
| 2,911
|
newlib/libc/machine/z8k/memcmp.S
|
/*
* memcmp routine for Z8000
* Copyright (C) 2004 Christian Groessler <chris@groessler.org>
*
* Permission to use, copy, modify, and distribute this file
* for any purpose is hereby granted without fee, provided that
* the above copyright notice and this notice appears in all
* copies.
*
* This file is distributed WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
/* int memcmp(const void *b1, const void *b2, size_t length);
*/
name "memcmp.S"
.text
even
global _memcmp
_memcmp:
#ifdef __Z8001__
segm
#ifdef __STD_CALL__
ldl rr6,rr14(#4)
ldl rr4,rr14(#8)
ldl rr2,rr14(#12)
#endif
/* rr2 - length (high word ignored)
* rr4 - b2
* rr6 - b1
*/
clr r1 /* initialize return value */
testl rr2
jr z,finish
bitb rl7,#0 /* odd b1? */
jr nz,testb2
bitb rl5,#0 /* odd b2? */
jr nz,odd_cmp /* b1 even, b2 odd */
jr t,even_cmp
testb2:
bitb rl5,#0
jr z,odd_cmp /* b2 even, b1 odd */
cpsib @rr6,@rr4,r3,eq
jr z,beq /* bytes are the same */
jr t,byte_diff
beq: jr ov,finish /* jump if r3 is zero now */
/* compare words */
even_cmp:
ld r2,r3 /* remember length */
srl r3,#1
jr z,no_words
cpsir @rr6,@rr4,r3,ne
jr nz,no_words
dec r7,#2
dec r5,#2 /* point to different bytes */
ldk r3,#2
jr t,odd_cmp
no_words:
bitb rl2,#0 /* odd length? */
jr z,finish
cpsib @rr6,@rr4,r3,eq
jr z,finish /* last bytes are the same */
jr t,byte_diff
/* compare bytes */
odd_cmp:
cpsirb @rr6,@rr4,r3,ne
jr nz,finish
byte_diff:
dec r7,#1
dec r5,#1 /* point to different bytes */
ldb rl1,@rr6
clr r0
ldb rl0,@rr4
sub r1,r0
finish: /* set return value */
#ifdef __STD_CALL__
ld r7,r1
#else
ld r2,r1
#endif
#else /* above Z8001, below Z8002 */
unsegm
#ifdef __STD_CALL__
ld r7,r15(#2)
ld r6,r15(#4)
ld r5,r15(#6)
#endif
/* r5 - length
* r6 - b2
* r7 - b1
*/
clr r1 /* initialize return value */
test r5
jr z,finish
bitb rl7,#0 /* odd destination address? */
jr nz,testb2
bitb rl6,#0 /* odd source address? */
jr nz,odd_cmp /* b1 even, b2 odd */
jr t,even_cmp
testb2:
bitb rl6,#0
jr z,odd_cmp /* b2 even, b1 odd */
cpsib @r7,@r6,r5,eq
jr z,beq /* bytes are the same */
jr t,byte_diff
beq: jr ov,finish /* jump if r3 is zero now */
/* compare words */
even_cmp:
ld r4,r5 /* remember length */
srl r5,#1
jr z,no_words
cpsir @r7,@r6,r5,ne
jr nz,no_words
dec r7,#2
dec r6,#2 /* point to different bytes */
ldk r5,#2
jr t,odd_cmp
no_words:
bitb rl4,#0 /* odd length? */
jr z,finish
cpsib @r7,@r6,r4,eq
jr z,finish /* last bytes are the same */
jr t,byte_diff
/* compare bytes */
odd_cmp:
cpsirb @r7,@r6,r5,ne
jr nz,finish
byte_diff:
dec r7,#1
dec r6,#1 /* point to different bytes */
ldb rl1,@r7
clr r0
ldb rl0,@r6
sub r1,r0
finish:
#ifdef __STD_CALL__
ld r7,r1
#else
ld r2,r1
#endif
#endif /* Z8002 */
ret
.end
|
32bitmicro/newlib-nano-1.0
| 6,260
|
newlib/libc/machine/spu/spu_timer_flih.S
|
/*
(C) Copyright IBM Corp. 2008
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of IBM nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* First-level interrupt handler. */
/* The following two convenience macros assist in the coding of the
saving and restoring the volatile register starting from register
2 up to register 79.
saveregs first, last Saves registers from first to the last.
restoreregs first, last Restores registers from last down to first.
Note: first must be less than or equal to last. */
.macro saveregs first, last
stqd $\first, -(STACK_SKIP+\first)*16($SP)
.if \last-\first
saveregs "(\first+1)",\last
.endif
.endm
.macro restoreregs first, last
lqd $\last, (82-\last)*16($SP)
.if \last-\first
restoreregs \first,"(\last-1)"
.endif
.endm
.section .interrupt,"ax"
.align 3
.type spu_flih, @function
spu_flih:
/* Adjust the stack pointer to skip the maximum register save area
(STACK_SKIP quadword registers) in case an interrupt occurred while
executing a leaf function that used the stack area without actually
allocating its own stack frame. */
.set STACK_SKIP, 125
/* Save the current link register on a new stack frame for the
normal spu_flih() version of this file. */
stqd $0, -(STACK_SKIP+80)*16($SP)
stqd $SP, -(STACK_SKIP+82)*16($SP) /* Save back chain pointer. */
saveregs 2, 39
il $2, -(STACK_SKIP+82)*16 /* Stack frame size. */
rdch $3, $SPU_RdEventStat /* Read event status. */
rdch $6, $SPU_RdEventMask /* Read event mask. */
hbrp /* Open a slot for instruction prefetch. */
saveregs 40,59
clz $4, $3 /* Get first slih index. */
stqd $6, -(STACK_SKIP+1)*16($SP) /* Save event mask on stack. */
saveregs 60, 67
/* Do not disable/ack the decrementer event here.
The timer library manages this and expects it
to be enabled upon entry to the SLIH. */
il $7, 0x20
andc $5, $3, $7
andc $7, $6, $5 /* Clear event bits. */
saveregs 68, 69
wrch $SPU_WrEventAck, $3 /* Ack events(s) - include decrementer event. */
wrch $SPU_WrEventMask, $7 /* Disable event(s) - exclude decrementer event. */
saveregs 70, 79
a $SP, $SP, $2 /* Instantiate flih stack frame. */
next_event:
/* Fetch and dispatch the event handler for the first non-zero event. The
dispatch handler is indexed into the __spu_slih_handlers array using the
count of zero off the event status as an index. */
ila $5, __spu_slih_handlers /* Slih array offset. */
shli $4, $4, 2 /* Slih entry offset. */
lqx $5, $4, $5 /* Load slih address. */
rotqby $5, $5, $4 /* Rotate to word 0. */
bisl $0, $5 /* Branch to slih. */
clz $4, $3 /* Get next slih index. */
brnz $3, next_event
lqd $2, 81*16($SP) /* Read event mask from stack. */
restoreregs 40, 79
wrch $SPU_WrEventMask, $2 /* Restore event mask. */
hbrp /* Open a slot for instruction pre-fetch. */
restoreregs 2, 39
/* Restore the link register from the new stack frame for the
normal spu_flih() version of this file. */
lqd $0, 2*16($SP)
lqd $SP, 0*16($SP) /* restore stack pointer from back chain ptr. */
irete /* Return from interrupt and re-enable interrupts. */
.size spu_flih, .-spu_flih
/* spu_slih_handlers[]
Here we initialize 33 default event handlers. The first entry in this array
corresponds to the event handler for the event associated with bit 0 of
Channel 0 (External Event Status). The 32nd entry in this array corresponds
to bit 31 of Channel 0 (DMA Tag Status Update Event). The 33rd entry in
this array is a special case entry to handle "phantom events" which occur
when the channel count for Channel 0 is 1, causing an asynchronous SPU
interrupt, but the value returned for a read of Channel 0 is 0. The index
calculated into this array by spu_flih() for this case is 32, hence the
33rd entry. */
.data
.align 4
.extern __spu_default_slih
.global __spu_slih_handlers
.type __spu_slih_handlers, @object
__spu_slih_handlers:
.rept 33
.long __spu_default_slih
.endr
.size __spu_slih_handlers, .-__spu_slih_handlers
|
32bitmicro/newlib-nano-1.0
| 1,585
|
newlib/libc/machine/spu/sniprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define snprintf sniprintf
#include "snprintf.S"
|
32bitmicro/newlib-nano-1.0
| 2,250
|
newlib/libc/machine/spu/fprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL fprintf
.type fprintf, @function
fprintf:
stqd $0, 16($sp) /* save caller address */
il $2, 2 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
brsl $0, __check_init
lqd $3, 16*2($sp) /* $3 <- saved FP on the stack frame */
lqd $2, 0($3) /* FP = fp->_fp */
rotqby $2, $2, $3
stqd $2, 16*2($sp) /* replace FP on the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VFPRINTF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size fprintf, .-fprintf
|
32bitmicro/newlib-nano-1.0
| 5,817
|
newlib/libc/machine/spu/stack_reg_va.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file contains code use to construct a PIC, spu side, syscall
* function with variable parameters in accordance with the CBE ABI.
*
* This function is equivalent to constructing a va_list structure and
* calling the va_list form of the function. Therefore, for example,
* a printf function stack frame will look like this:
*
* | Stack | high memory
* | Parms |
* | |
* |------------|
* | Link Reg |
* |------------|
* | Back Chain |<-----. <---- input SP
* |------------| |
* | Reg 74 | |
* |------------| |
* | Reg 73 | |
* |------------| |
* // ... // |
* |------------| |
* | Reg 5 | |
* |------------| |
* | Reg 4 |<--. |
* |------------| | |
* va_list.| call_stack |------'
* |------------| |
* va_list.| next_arg |---'
* |------------|
* | format (r3)| <---- start of parameters
* |------------| |------------|
* | stack | | |
* | code | |(Back Chain)| <---- output SP
* | 1-3 qwords | <---- code_ptr `------------'
* `------------'
* low memory
*
* This was written in assembly so that it is smaller than what would
* be produced by using va_start.
*/
#include "c99ppe.h"
#define parms $2 /* Number of fixed arguments */
#define offset $67
#define flag $68
#define regdec $69
#define link $70
#define code_ptr $71
#define ptr $72
#define inst $73
#define tmp $74
.text
.global __stack_reg_va
.type __stack_reg_va, @function
__stack_reg_va:
/* Save registers 69-74 explicitly so that we have some
* working registers.
*/
stqd $74, 16*(-1)($sp)
stqd $73, 16*(-2)($sp)
stqd $72, 16*(-3)($sp)
stqd $71, 16*(-4)($sp)
stqd $70, 16*(-5)($sp)
stqd $69, 16*(-6)($sp)
/* Construct self-modifying stack code that saves the remaining
* volatile registers onto the stack.
*/
il regdec, -1 /* for decrement register value in save instruction */
shlqbyi regdec, regdec, 12
il tmp, -(SPE_STACK_REGS+2+3)*16
a code_ptr, $sp, tmp
lqr tmp, save_regs_1 /* store stack code */
stqd tmp, 0(code_ptr)
lqr inst, save_regs_2
ai ptr, $sp, 16*(-6)
sync
bisl link, code_ptr /* branch to the constructed stack code */
/* Adjust pointer so that it points to the first variable
* argument on the stack.
*/
ai offset, parms, -1 /* offset = parms - 1 */
mpyi offset, offset, 16 /* offset = offset * 16 */
a ptr, ptr, offset /* ptr = ptr + offset */
/* Store the va_list to the parameter list.
*/
stqd $sp, 16*(-1)(ptr)
stqd ptr, 16*(-2)(ptr)
/* Make $3 store address.
*/
ai offset, parms, 2 /* offset = parms + 2 */
mpyi offset, offset, -16 /* offset = offset * -16 */
a ptr, ptr, offset /* ptr = ptr + offset */
/* Save all the fixed (non-variable arguments on the stack)
*/
ceqi flag, parms, 0x01 /* if(parms==1) flag=0xFFFFFFFF */
brnz flag, reg_3 /* if(flag!=0) jump */
ceqi flag, parms, 0x02 /* if(parms==2) flag=0xFFFFFFFF */
brnz flag, reg_4 /* if(flag!=0) jump */
stqd $5, 16*2(ptr)
reg_4:
stqd $4, 16*1(ptr)
reg_3:
stqd $3, 0(ptr)
il $3, -16*(SPE_STACK_REGS+2+2)
stqx $sp, $3, $sp /* save back chain */
a $sp, $sp, $3
bi $0 /* return to caller */
/***************************** stack code *********************************************/
/* The following code is copied into the stack for re-entract,
* self-modified, code execution. This code copies the volatile
* registers into a va_list parameter array.
*/
.balignl 16, 0
save_regs_1:
stqd inst, 16(code_ptr) /* store instruction */
sync
a inst, inst, regdec /* decrement register number in the instruction */
ceqbi tmp, inst, 3 /* if (reg-num == 3) tmp = 0x000000FF 000..0 */
save_regs_2:
stqd $68, -16(ptr)
ai ptr, ptr, -16
brz tmp, save_regs_1 /* if (tmp == 0) jump */
bi link /* finish to make va_list */
.size __stack_reg_va, .-__stack_reg_va
|
32bitmicro/newlib-nano-1.0
| 4,238
|
newlib/libc/machine/spu/setjmp.S
|
/*
(C) Copyright IBM Corp. 2005, 2006
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of IBM nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Author: Andreas Neukoetter (ti95neuk@de.ibm.com)
*/
/*
int setjmp( jmp_buf env );
*/
.text
.align 2
.global setjmp
.type setjmp, @function
setjmp:
stqd $80, 2*16($3)
stqd $81, 3*16($3)
stqd $82, 4*16($3)
stqd $83, 5*16($3)
stqd $84, 6*16($3)
stqd $85, 7*16($3)
stqd $86, 8*16($3)
stqd $87, 9*16($3)
stqd $88, 10*16($3)
stqd $89, 11*16($3)
stqd $90, 12*16($3)
stqd $91, 13*16($3)
stqd $92, 14*16($3)
stqd $93, 15*16($3)
stqd $94, 16*16($3)
stqd $95, 17*16($3)
stqd $96, 18*16($3)
stqd $97, 19*16($3)
stqd $98, 20*16($3)
stqd $99, 21*16($3)
stqd $100, 22*16($3)
stqd $101, 23*16($3)
stqd $102, 24*16($3)
stqd $103, 25*16($3)
stqd $104, 26*16($3)
stqd $105, 27*16($3)
stqd $106, 28*16($3)
stqd $107, 29*16($3)
stqd $108, 30*16($3)
stqd $109, 31*16($3)
stqd $110, 32*16($3)
stqd $111, 33*16($3)
stqd $112, 34*16($3)
stqd $113, 35*16($3)
stqd $114, 36*16($3)
stqd $115, 37*16($3)
stqd $116, 38*16($3)
stqd $117, 39*16($3)
stqd $118, 40*16($3)
stqd $119, 41*16($3)
hbr setjmp_ret, $0
lnop # pipe1 bubble added for instruction fetch
stqd $120, 42*16($3)
stqd $121, 43*16($3)
stqd $122, 44*16($3)
stqd $123, 45*16($3)
stqd $124, 46*16($3)
stqd $125, 47*16($3)
stqd $126, 48*16($3)
stqd $127, 49*16($3)
stqd $0, 0*16($3)
stqd $1, 1*16($3)
il $3, 0
setjmp_ret:
bi $0
.size setjmp, .-setjmp
/*
int longjmp( jmp_buf env, int val );
*/
.text
.align 2
.global longjmp
.type longjmp, @function
longjmp:
lr $127, $1
lqd $0, 0*16($3)
lqd $1, 1*16($3)
sf $126, $127, $1
rotqbyi $126, $126, 12
fsmbi $127, 0x0F00
and $126, $126, $127
a $1, $1, $126
# restore all the non-volatile registers
lqd $80, 2*16($3)
lqd $81, 3*16($3)
lqd $82, 4*16($3)
lqd $83, 5*16($3)
lqd $84, 6*16($3)
lqd $85, 7*16($3)
lqd $86, 8*16($3)
lqd $87, 9*16($3)
lqd $88, 10*16($3)
lqd $89, 11*16($3)
lqd $90, 12*16($3)
lqd $91, 13*16($3)
lqd $92, 14*16($3)
lqd $93, 15*16($3)
lqd $94, 16*16($3)
lqd $95, 17*16($3)
lqd $96, 18*16($3)
lqd $97, 19*16($3)
lqd $98, 20*16($3)
lqd $99, 21*16($3)
lqd $100, 22*16($3)
lqd $101, 23*16($3)
lqd $102, 24*16($3)
lqd $103, 25*16($3)
lqd $104, 26*16($3)
lqd $105, 27*16($3)
lqd $106, 28*16($3)
lqd $107, 29*16($3)
lqd $108, 30*16($3)
lqd $109, 31*16($3)
hbr longjmp_ret, $0
lqd $110, 32*16($3)
lqd $111, 33*16($3)
lqd $112, 34*16($3)
lqd $113, 35*16($3)
lqd $114, 36*16($3)
lqd $115, 37*16($3)
lqd $116, 38*16($3)
lqd $117, 39*16($3)
lqd $118, 40*16($3)
lqd $119, 41*16($3)
lqd $120, 42*16($3)
lqd $121, 43*16($3)
lqd $122, 44*16($3)
lqd $123, 45*16($3)
lqd $124, 46*16($3)
lqd $125, 47*16($3)
ceqi $5, $4, 0
lqd $126, 48*16($3)
lqd $127, 49*16($3)
sf $3, $5, $4
longjmp_ret:
bi $0
.size longjmp, .-longjmp
|
32bitmicro/newlib-nano-1.0
| 1,576
|
newlib/libc/machine/spu/iscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define scanf iscanf
#include "scanf.S"
|
32bitmicro/newlib-nano-1.0
| 2,244
|
newlib/libc/machine/spu/fscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL fscanf
.type fscanf, @function
fscanf:
stqd $0, 16($sp) /* save caller address */
il $2, 2 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
brsl $0, __check_init
lqd $3, 16*2($sp) /* $3 <- saved FP on the stack frame */
lqd $2, 0($3) /* FP = fp->_fp */
rotqby $2, $2, $3
stqd $2, 16*2($sp) /* replace FP on the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VFSCANF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size fscanf, .-fscanf
|
32bitmicro/newlib-nano-1.0
| 1,582
|
newlib/libc/machine/spu/siprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define sprintf siprintf
#include "sprintf.S"
|
32bitmicro/newlib-nano-1.0
| 2,055
|
newlib/libc/machine/spu/sprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL sprintf
.type sprintf, @function
sprintf:
stqd $0, 16($sp) /* save caller address */
il $2, 2 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VSPRINTF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size sprintf, .-sprintf
|
32bitmicro/newlib-nano-1.0
| 1,579
|
newlib/libc/machine/spu/iprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define printf iprintf
#include "printf.S"
|
32bitmicro/newlib-nano-1.0
| 1,579
|
newlib/libc/machine/spu/fiscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define fscanf fiscanf
#include "fscanf.S"
|
32bitmicro/newlib-nano-1.0
| 1,579
|
newlib/libc/machine/spu/siscanf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define sscanf siscanf
#include "sscanf.S"
|
32bitmicro/newlib-nano-1.0
| 2,049
|
newlib/libc/machine/spu/printf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "c99ppe.h"
.text
.align 4
GLOBL printf
.type printf, @function
printf:
stqd $0, 16($sp) /* save caller address */
il $2, 1 /* number of fixed arguments */
brsl $0, __stack_reg_va /* save register to the stack frame */
il $3, SPE_C99_SIGNALCODE
il $4, SPE_C99_VPRINTF
ai $5, $sp, 16*2 /* data ($3 save address) */
brsl $0, __send_to_ppe
il $2, 16*(SPE_STACK_REGS+2+2)
a $sp, $sp, $2
lqd $0, 16($sp) /* load caller address */
bi $0 /* return to caller */
.size printf, .-printf
|
32bitmicro/newlib-nano-1.0
| 1,582
|
newlib/libc/machine/spu/fiprintf.S
|
/*
Copyright (c) 2007, Toshiba Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the names of Toshiba nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define fprintf fiprintf
#include "fprintf.S"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.