repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
airbus-seclab/ramooflax
| 1,256
|
setup/src/core/entry.s
|
/*
** Copyright (C) 2016 Airbus Group, stephane duverger <stephane.duverger@airbus.com>
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License along
** with this program; if not, write to the Free Software Foundation, Inc.,
** 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/*
** 8KB kernel stack
*/
.section .stack, "aw", @nobits
.align 16
.space 0x2000
/* .byte 0 */
/* .p2align 13 */
.text
.globl entry
.type entry,"function"
/*
** - make us uninterruptible
** - set initial stack for loader
** - clear eflags
** - init setup with grub multiboot info
** - start the vmm
*/
entry:
cli
movq $__kernel_start__, %rsp
pushq $0
popf
call init
jmp vmm_start
|
airbus-seclab/ramooflax
| 2,223
|
vmm/src/svm/svm_insn.s
|
/*
** Copyright (C) 2016 Airbus Group, stephane duverger <stephane.duverger@airbus.com>
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License along
** with this program; if not, write to the Free Software Foundation, Inc.,
** 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
.text
.globl entry
.type entry,"function"
.globl svm_vmrun
.type svm_vmrun,"function"
.globl svm_vmexit
.type svm_vmexit,"function"
/*
** VMM entry point
** directly resumes
** a VM
*/
entry:
/*
** VM-entry
*/
svm_vmrun:
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
add $8, %rsp /* XXX: pop %rsp */
pop %rbx
pop %rdx
pop %rcx
pop %rax
vmload %rax
sti
vmrun %rax
/*
** VM-exit
**
** XXX: we should sub "cli", "push all"
** cycle number (sampled via setup)
** from eax,edx returned by "rdtsc"
** before calling svm_vmexit_handler()
*/
svm_vmexit:
cli
push %rax
push %rcx
push %rdx
push %rbx
sub $8, %rsp /* XXX: push %rsp */
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
lfence
rdtsc
mov %edx, %edi
shl $32, %rdi
or %rax, %rdi
xor %rbp, %rbp
call svm_vmexit_handler
jmp svm_vmrun
|
airbus-seclab/ramooflax
| 3,646
|
vmm/src/vmx/vmx_insn.s
|
/*
** Copyright (C) 2016 Airbus Group, stephane duverger <stephane.duverger@airbus.com>
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License along
** with this program; if not, write to the Free Software Foundation, Inc.,
** 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
.text
.globl __vmx_vmwrite
.type __vmx_vmwrite,"function"
.globl __vmx_vmread
.type __vmx_vmread,"function"
.globl vmx_vmresume
.type vmx_vmresume,"function"
.globl vmx_vmexit
.type vmx_vmexit,"function"
.globl entry
.type entry,"function"
/*
** VMM entry point
** is vm-exit handler
** (set to host rip)
*/
entry:
/*
** VM-exit
**
** XXX: we should sub "push all" cycle number
** (sampled via setup) from rax,rdx
** returned by "rdtsc" before calling
** vmx_vmexit_handler()
*/
vmx_vmexit:
push %rax
push %rcx
push %rdx
push %rbx
sub $8, %rsp /* XXX: push %rsp */
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
lfence
rdtsc
mov %edx, %edi
shl $32, %rdi
or %rax, %rdi
xor %rbp, %rbp
call vmx_vmexit_handler
/*
** VM-entry
*/
vmx_vmresume:
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
add $8, %rsp /* XXX: pop %rsp */
pop %rbx
pop %rdx
pop %rcx
pop %rax
vmresume
/*
** VM-entry failure
**
** Params:
** RDI = mem64 VMX error code ptr = @vmx_err
*/
__vmx_vmresume_failure_wrapper:
sub $8, %rsp
mov %rsp, %rdi
call vmx_check_error
movl (%rdi), %edi
jmp vmx_vmresume_failure
/*
** VM write
**
** params:
** RDI = mem64 VMX error code ptr
** RSI = value to write
** RDX = VMCS field encoding
**
** returns:
** 0 on failure
** 1 on success
*/
__vmx_vmwrite:
vmwrite %rsi, %rdx
jmp vmx_check_error
/*
** VM read
**
** params:
** RDI = mem64 VMX error code ptr
** RDI = mem64 read value ptr
** RDX = VMCS field encoding
**
** returns:
** 0 on failure
** 1 on success
*/
__vmx_vmread:
vmread %rdx, (%rsi)
jmp vmx_check_error
/*
** Failure handling
*/
vmx_check_error:
jz vmx_fail_valid
jc vmx_fail_invalid
vmx_success:
mov $1, %rax
ret
/*
** VM Fail Valid : ZF=1
**
** read VMCS instruction error (0x4400)
** store it to (%rdi)
*/
vmx_fail_valid:
push %rdx
mov $0x4400, %rdx
vmread %rdx, (%rdi)
pop %rdx
jmp vmx_fail
/*
** VM Fail Invalid : CF=1
**
** VMCS instruction error code is 0
*/
vmx_fail_invalid:
movl $0, (%rdi)
vmx_fail:
xor %rax, %rax
ret
|
airbus-seclab/ramooflax
| 5,711
|
vmm/src/core/idt.s
|
/*
** Copyright (C) 2016 Airbus Group, stephane duverger <stephane.duverger@airbus.com>
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License along
** with this program; if not, write to the Free Software Foundation, Inc.,
** 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
.text
.globl resume_from_intr
.type resume_from_intr,"function"
idt_checkmode:
bt $16, irq_msg(%rip) /* preemt */
jnc idt_common
btr $17, irq_msg(%rip) /* rmode */
jnc idt_common
sub $16, %rsp
mov %rax, (%rsp)
mov 16(%rsp), %rax
movq $-1, 16(%rsp)
mov %rax, 8(%rsp)
jmp idt_common_rcx
/*
** ring0 int64 stack layout
**
** +176 (22*8) SS
** +168 RSP
** +160 RFLAGS
** +152 CS
** +144 RIP
** +136 ERR CODE
** +128 INT NUMBER
** +120 (15*8) RAX
** ... GPR(s)
** +0 R15
*/
idt_common:
push %rax
idt_common_rcx:
push %rcx
push %rdx
push %rbx
push %rsp
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
mov %rsp, %rdi
call intr_hdlr
resume_from_intr:
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
pop %rsp
pop %rbx
pop %rdx
pop %rcx
pop %rax
add $16, %rsp
rex.w iret
/*
** IDT handlers
*/
.section .idt_jmp, "ax", @progbits
idt_trampoline:
/* divide error (no) */
.align 16
pushq $-1
pushq $0
jmp idt_common
/* debug (no) */
.align 16
pushq $-1
pushq $1
jmp idt_common
/* nmi (no ) */
.align 16
pushq $-1
pushq $2
jmp idt_common
/* breakpoint (no) */
.align 16
pushq $-1
pushq $3
jmp idt_common
/* overflow (no) */
.align 16
pushq $-1
pushq $4
jmp idt_common
/* bound (no) */
.align 16
pushq $-1
pushq $5
jmp idt_common
/* invalid opcode (no) */
.align 16
pushq $-1
pushq $6
jmp idt_common
/* device not available (no) */
.align 16
pushq $-1
pushq $7
jmp idt_common
/* double fault (yes) */
.align 16
pushq $8
jmp idt_checkmode
/* copro segment (no) */
.align 16
pushq $-1
pushq $9
jmp idt_common
/* TSS invalid (yes) */
.align 16
pushq $10
jmp idt_checkmode
/* Segment not present (yes) */
.align 16
pushq $11
jmp idt_checkmode
/* stack segment fault (yes) */
.align 16
pushq $12
jmp idt_checkmode
/* general protection (yes) */
.align 16
pushq $13
jmp idt_checkmode
/* page fault (yes) */
.align 16
pushq $14
jmp idt_checkmode
/* intel reserved */
.align 16
pushq $-1
pushq $15
jmp idt_common
/* fpu (no ) */
.align 16
pushq $-1
pushq $16
jmp idt_common
/* alignment (yes) */
.align 16
pushq $17
jmp idt_checkmode
/* machine check (no) */
.align 16
pushq $-1
pushq $18
jmp idt_common
/* simd (no */
.align 16
pushq $-1
pushq $19
jmp idt_common
/* intel reserved 20-31 and user available 32-255 */
.irp nr, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255
.align 16
pushq $-1
pushq $\nr
jmp idt_common
.endr
|
aircrack-ng/aircrack-ng-archive
| 20,498
|
src/sha1-sse2.S
|
// SHA-1 SSE2 implementation, (C) 2008 Alvaro Salmador (naplam33@msn.com), ported from Simon Marechal's SHA-1 MMX - License: GPLv2
// SHA-1 MMX implementation, (C) 2005 Simon Marechal (simon@banquise.net) - License: Public Domain
// This code computes two (with sse now four) SHA-1 digests at the same time. It
// doesn't take care of padding (0x80 and size << 3), so make
// sure the last input block is properly padded. Both 64-byte
// input blocks must be (four bytes) interleaved.
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations
// including the two.
// You must obey the GNU General Public License in all respects
// for all of the code used other than OpenSSL. * If you modify
// file(s) with this exception, you may extend this exception to your
// version of the file(s), but you are not obligated to do so. * If you
// do not wish to do so, delete this exception statement from your
// version. * If you delete this exception statement from all source
// files in the program, then also delete it here.
#if defined(__x86_64__) && (defined(__APPLE__) || defined(__CYGWIN__))
#define PRELOAD(x)
#define MANGLE(x) x(%rip)
#define INIT_PIC()
#define END_PIC()
#else
#ifdef __PIC__
#ifdef __x86_64__
#define PRELOAD(x) movq x@GOTPCREL(%rip), %rbx;
#define MANGLE(x) (%rbx)
#define INIT_PIC() pushq %rbx
#define END_PIC() popq %rbx
#else
#undef __i686 /* gcc builtin define gets in our way */
#define PRELOAD(x)
#define MANGLE(x) x ## @GOTOFF(%ebx)
#define INIT_PIC() \
call __i686.get_pc_thunk.bx ; \
addl $_GLOBAL_OFFSET_TABLE_, %ebx
#define END_PIC()
#endif
#else
#define PRELOAD(x)
#define MANGLE(x) x
#define INIT_PIC()
#define END_PIC()
#endif
#endif
#if defined(__i386__) || defined(__x86_64__)
.globl shasse2_init;
.globl shasse2_ends;
.globl shasse2_data;
.globl shasse2_cpuid;
.globl _shasse2_init;
.globl _shasse2_ends;
.globl _shasse2_data;
.globl _shasse2_cpuid;
.data
#ifdef __APPLE__
.align(12)
#else
.align(16)
#endif
const_init_a:
.long 0x67452301
.long 0x67452301
.long 0x67452301
.long 0x67452301
const_init_b:
.long 0xEFCDAB89
.long 0xEFCDAB89
.long 0xEFCDAB89
.long 0xEFCDAB89
const_init_c:
.long 0x98BADCFE
.long 0x98BADCFE
.long 0x98BADCFE
.long 0x98BADCFE
const_init_d:
.long 0x10325476
.long 0x10325476
.long 0x10325476
.long 0x10325476
const_init_e:
.long 0xC3D2E1F0
.long 0xC3D2E1F0
.long 0xC3D2E1F0
.long 0xC3D2E1F0
const_stage0:
.long 0x5A827999
.long 0x5A827999
.long 0x5A827999
.long 0x5A827999
const_stage1:
.long 0x6ED9EBA1
.long 0x6ED9EBA1
.long 0x6ED9EBA1
.long 0x6ED9EBA1
const_stage2:
.long 0x8F1BBCDC
.long 0x8F1BBCDC
.long 0x8F1BBCDC
.long 0x8F1BBCDC
const_stage3:
.long 0xCA62C1D6
.long 0xCA62C1D6
.long 0xCA62C1D6
.long 0xCA62C1D6
const_ff00:
.long 0xFF00FF00
.long 0xFF00FF00
.long 0xFF00FF00
.long 0xFF00FF00
const_00ff:
.long 0x00FF00FF
.long 0x00FF00FF
.long 0x00FF00FF
.long 0x00FF00FF
#define ctxa %xmm0
#define ctxb %xmm1
#define ctxc %xmm2
#define ctxd %xmm3
#define ctxe %xmm4
#define tmp1 %xmm5
#define tmp2 %xmm6
#define tmp3 %xmm7
#define tmp4 ctxa
#define tmp5 ctxb
#if defined(__x86_64__) && defined(__CYGWIN__)
#define edx_rsi %rdx
#define ecx_rdx %r8
#define eax_rdi %rcx
#elif defined(__x86_64__)
#define edx_rsi %rsi
#define ecx_rdx %rdx
#define eax_rdi %rdi
#else
#define edx_rsi %edx
#define ecx_rdx %ecx
#define eax_rdi %eax
#endif
// movdqa movapd
#define F0(x,y,z) \
movdqa x, tmp2; \
movdqa x, tmp1; \
pand y, tmp2; \
pandn z, tmp1; \
por tmp2, tmp1;
#define F1(x,y,z) \
movdqa z, tmp1; \
pxor y, tmp1; \
pxor x, tmp1
#define F2(x,y,z) \
movdqa x, tmp1; \
movdqa x, tmp2; \
pand y, tmp1; \
por y, tmp2; \
pand z, tmp2; \
por tmp2, tmp1;
#if defined(__x86_64__) && defined(__CYGWIN__)
#define subRoundX(a, b, c, d, e, f, k, data) \
f(b,c,d); \
movdqa a, tmp2; \
movdqa a, tmp3; \
paddd tmp1, e; \
pslld $5, tmp2; \
psrld $27, tmp3; \
por tmp3, tmp2; \
paddd tmp2, e; \
movdqa b, tmp2; \
pslld $30, b; \
paddd MANGLE(k), e; \
psrld $2, tmp2; \
por tmp2, b; \
movdqa (data*16)(edx_rsi), tmp1; \
movdqa tmp1, tmp2; \
pand MANGLE(const_ff00), tmp1; \
pand MANGLE(const_00ff), tmp2; \
psrld $8, tmp1; \
pslld $8, tmp2; \
por tmp2, tmp1; \
movdqa tmp1, tmp2; \
psrld $16, tmp1; \
pslld $16, tmp2; \
por tmp2, tmp1; \
movdqa tmp1, (data*16)(ecx_rdx); \
paddd tmp1, e;
#else
#define subRoundX(a, b, c, d, e, f, k, data) \
f(b,c,d); \
movdqa a, tmp2; \
movdqa a, tmp3; \
paddd tmp1, e; \
pslld $5, tmp2; \
psrld $27, tmp3; \
por tmp3, tmp2; \
paddd tmp2, e; \
movdqa b, tmp2; \
pslld $30, b; \
PRELOAD(k) \
paddd MANGLE(k), e; \
psrld $2, tmp2; \
por tmp2, b; \
movdqa (data*16)(edx_rsi), tmp1; \
movdqa tmp1, tmp2; \
PRELOAD(const_ff00) \
pand MANGLE(const_ff00), tmp1; \
PRELOAD(const_00ff) \
pand MANGLE(const_00ff), tmp2; \
psrld $8, tmp1; \
pslld $8, tmp2; \
por tmp2, tmp1; \
movdqa tmp1, tmp2; \
psrld $16, tmp1; \
pslld $16, tmp2; \
por tmp2, tmp1; \
movdqa tmp1, (data*16)(ecx_rdx); \
paddd tmp1, e;
#endif
#if defined(__x86_64__) && defined(__CYGWIN__)
#define subRoundY(a, b, c, d, e, f, k, data) \
movdqa ((data- 3)*16)(ecx_rdx), tmp1; \
pxor ((data- 8)*16)(ecx_rdx), tmp1; \
pxor ((data-14)*16)(ecx_rdx), tmp1; \
pxor ((data-16)*16)(ecx_rdx), tmp1; \
movdqa tmp1, tmp2; \
pslld $1, tmp1; \
psrld $31, tmp2; \
por tmp2, tmp1; \
movdqa tmp1, (data*16)(ecx_rdx); \
paddd tmp1, e; \
f(b,c,d); \
movdqa a, tmp2; \
movdqa a, tmp3; \
paddd tmp1, e; \
pslld $5, tmp2; \
psrld $27, tmp3; \
por tmp3, tmp2; \
paddd tmp2, e; \
movdqa b, tmp2; \
pslld $30, b; \
paddd MANGLE(k), e; \
psrld $2, tmp2; \
por tmp2, b;
#else
#define subRoundY(a, b, c, d, e, f, k, data) \
movdqa ((data- 3)*16)(ecx_rdx), tmp1; \
pxor ((data- 8)*16)(ecx_rdx), tmp1; \
pxor ((data-14)*16)(ecx_rdx), tmp1; \
pxor ((data-16)*16)(ecx_rdx), tmp1; \
movdqa tmp1, tmp2; \
pslld $1, tmp1; \
psrld $31, tmp2; \
por tmp2, tmp1; \
movdqa tmp1, (data*16)(ecx_rdx); \
paddd tmp1, e; \
f(b,c,d); \
movdqa a, tmp2; \
movdqa a, tmp3; \
paddd tmp1, e; \
pslld $5, tmp2; \
psrld $27, tmp3; \
por tmp3, tmp2; \
paddd tmp2, e; \
movdqa b, tmp2; \
pslld $30, b; \
PRELOAD(k) \
paddd MANGLE(k), e; \
psrld $2, tmp2; \
por tmp2, b;
#endif
.text
// arg 1 (eax) (64bit: rdi): context (4*20 bytes)
shasse2_init:
_shasse2_init:
INIT_PIC()
#if !defined(__x86_64__) && !defined(__CYGWIN__)
PRELOAD(const_init_a)
#endif
movdqa MANGLE(const_init_a), ctxa
#if !defined(__x86_64__) && !defined(__CYGWIN__)
PRELOAD(const_init_b)
#endif
movdqa MANGLE(const_init_b), ctxb
#if !defined(__x86_64__) && !defined(__CYGWIN__)
PRELOAD(const_init_c)
#endif
movdqa MANGLE(const_init_c), ctxc
#if !defined(__x86_64__) && !defined(__CYGWIN__)
PRELOAD(const_init_d)
#endif
movdqa MANGLE(const_init_d), ctxd
#if !defined(__x86_64__) && !defined(__CYGWIN__)
PRELOAD(const_init_e)
#endif
movdqa MANGLE(const_init_e), ctxe
movdqa ctxa, 0(eax_rdi)
movdqa ctxb, 16(eax_rdi)
movdqa ctxc, 32(eax_rdi)
movdqa ctxd, 48(eax_rdi)
movdqa ctxe, 64(eax_rdi)
END_PIC()
ret
// arg 1 (eax) (64bit: rdi): context (4*20 bytes)
// arg 2 (edx) (64bit: rsi) : digests (4*20 bytes)
shasse2_ends:
_shasse2_ends:
INIT_PIC()
movdqa 0(eax_rdi), ctxa
movdqa 16(eax_rdi), ctxb
movdqa 32(eax_rdi), ctxc
movdqa 48(eax_rdi), ctxd
movdqa 64(eax_rdi), ctxe
#if !defined(__x86_64__) && !defined(__CYGWIN__)
PRELOAD(const_ff00)
#endif
movdqa MANGLE(const_ff00), tmp3
movdqa ctxa, tmp1
movdqa ctxb, tmp2
pand tmp3, ctxa
pand tmp3, ctxb
#if !defined(__x86_64__) && !defined(__CYGWIN__)
PRELOAD(const_00ff)
#endif
movdqa MANGLE(const_00ff), tmp3
pand tmp3, tmp1
pand tmp3, tmp2
psrld $8, ctxa
psrld $8, ctxb
pslld $8, tmp1
pslld $8, tmp2
por tmp1, ctxa
por tmp2, ctxb
movdqa ctxa, tmp1
movdqa ctxb, tmp2
psrld $16, ctxa
psrld $16, ctxb
pslld $16, tmp1
pslld $16, tmp2
por tmp1, ctxa
por tmp2, ctxb
movdqa ctxa, 0(edx_rsi)
movdqa ctxb, 16(edx_rsi)
#if !defined(__x86_64__) && !defined(__CYGWIN__)
PRELOAD(const_ff00)
#endif
movdqa MANGLE(const_ff00), tmp5
movdqa ctxc, tmp1
movdqa ctxd, tmp2
movdqa ctxe, tmp3
pand tmp5, ctxc
pand tmp5, ctxd
pand tmp5, ctxe
#if !defined(__x86_64__) && !defined(__CYGWIN__)
PRELOAD(const_00ff)
#endif
movdqa MANGLE(const_00ff), tmp5
pand tmp5, tmp1
pand tmp5, tmp2
pand tmp5, tmp3
psrld $8, ctxc
psrld $8, ctxd
psrld $8, ctxe
pslld $8, tmp1
pslld $8, tmp2
pslld $8, tmp3
por tmp1, ctxc
por tmp2, ctxd
por tmp3, ctxe
movdqa ctxc, tmp1
movdqa ctxd, tmp2
movdqa ctxe, tmp3
psrld $16, ctxc
psrld $16, ctxd
psrld $16, ctxe
pslld $16, tmp1
pslld $16, tmp2
pslld $16, tmp3
por tmp1, ctxc
por tmp2, ctxd
por tmp3, ctxe
movdqa ctxc, 32(edx_rsi)
movdqa ctxd, 48(edx_rsi)
movdqa ctxe, 64(edx_rsi)
END_PIC()
ret
// arg 1 (eax) (64bit: rdi): context (4*20 bytes)
// arg 2 (edx) (64bit: rsi): input data (4*64 bytes)
// arg 3 (ecx) (64bit: rdx): workspace (1280 bytes)
shasse2_data:
_shasse2_data:
INIT_PIC()
movdqa 0(eax_rdi), ctxa
movdqa 16(eax_rdi), ctxb
movdqa 32(eax_rdi), ctxc
movdqa 48(eax_rdi), ctxd
movdqa 64(eax_rdi), ctxe
round0:
prefetchnta (edx_rsi)
subRoundX( ctxa, ctxb, ctxc, ctxd, ctxe, F0, const_stage0, 0 );
subRoundX( ctxe, ctxa, ctxb, ctxc, ctxd, F0, const_stage0, 1 );
subRoundX( ctxd, ctxe, ctxa, ctxb, ctxc, F0, const_stage0, 2 );
subRoundX( ctxc, ctxd, ctxe, ctxa, ctxb, F0, const_stage0, 3 );
subRoundX( ctxb, ctxc, ctxd, ctxe, ctxa, F0, const_stage0, 4 );
subRoundX( ctxa, ctxb, ctxc, ctxd, ctxe, F0, const_stage0, 5 );
subRoundX( ctxe, ctxa, ctxb, ctxc, ctxd, F0, const_stage0, 6 );
subRoundX( ctxd, ctxe, ctxa, ctxb, ctxc, F0, const_stage0, 7 );
subRoundX( ctxc, ctxd, ctxe, ctxa, ctxb, F0, const_stage0, 8 );
subRoundX( ctxb, ctxc, ctxd, ctxe, ctxa, F0, const_stage0, 9 );
subRoundX( ctxa, ctxb, ctxc, ctxd, ctxe, F0, const_stage0, 10 );
subRoundX( ctxe, ctxa, ctxb, ctxc, ctxd, F0, const_stage0, 11 );
subRoundX( ctxd, ctxe, ctxa, ctxb, ctxc, F0, const_stage0, 12 );
subRoundX( ctxc, ctxd, ctxe, ctxa, ctxb, F0, const_stage0, 13 );
subRoundX( ctxb, ctxc, ctxd, ctxe, ctxa, F0, const_stage0, 14 );
subRoundX( ctxa, ctxb, ctxc, ctxd, ctxe, F0, const_stage0, 15 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F0, const_stage0, 16 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F0, const_stage0, 17 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F0, const_stage0, 18 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F0, const_stage0, 19 );
round1:
subRoundY( ctxa, ctxb, ctxc, ctxd, ctxe, F1, const_stage1, 20 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F1, const_stage1, 21 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F1, const_stage1, 22 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F1, const_stage1, 23 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F1, const_stage1, 24 );
subRoundY( ctxa, ctxb, ctxc, ctxd, ctxe, F1, const_stage1, 25 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F1, const_stage1, 26 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F1, const_stage1, 27 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F1, const_stage1, 28 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F1, const_stage1, 29 );
subRoundY( ctxa, ctxb, ctxc, ctxd, ctxe, F1, const_stage1, 30 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F1, const_stage1, 31 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F1, const_stage1, 32 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F1, const_stage1, 33 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F1, const_stage1, 34 );
subRoundY( ctxa, ctxb, ctxc, ctxd, ctxe, F1, const_stage1, 35 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F1, const_stage1, 36 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F1, const_stage1, 37 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F1, const_stage1, 38 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F1, const_stage1, 39 );
round2:
subRoundY( ctxa, ctxb, ctxc, ctxd, ctxe, F2, const_stage2, 40 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F2, const_stage2, 41 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F2, const_stage2, 42 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F2, const_stage2, 43 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F2, const_stage2, 44 );
subRoundY( ctxa, ctxb, ctxc, ctxd, ctxe, F2, const_stage2, 45 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F2, const_stage2, 46 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F2, const_stage2, 47 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F2, const_stage2, 48 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F2, const_stage2, 49 );
subRoundY( ctxa, ctxb, ctxc, ctxd, ctxe, F2, const_stage2, 50 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F2, const_stage2, 51 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F2, const_stage2, 52 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F2, const_stage2, 53 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F2, const_stage2, 54 );
subRoundY( ctxa, ctxb, ctxc, ctxd, ctxe, F2, const_stage2, 55 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F2, const_stage2, 56 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F2, const_stage2, 57 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F2, const_stage2, 58 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F2, const_stage2, 59 );
round3:
subRoundY( ctxa, ctxb, ctxc, ctxd, ctxe, F1, const_stage3, 60 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F1, const_stage3, 61 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F1, const_stage3, 62 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F1, const_stage3, 63 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F1, const_stage3, 64 );
subRoundY( ctxa, ctxb, ctxc, ctxd, ctxe, F1, const_stage3, 65 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F1, const_stage3, 66 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F1, const_stage3, 67 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F1, const_stage3, 68 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F1, const_stage3, 69 );
subRoundY( ctxa, ctxb, ctxc, ctxd, ctxe, F1, const_stage3, 70 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F1, const_stage3, 71 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F1, const_stage3, 72 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F1, const_stage3, 73 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F1, const_stage3, 74 );
subRoundY( ctxa, ctxb, ctxc, ctxd, ctxe, F1, const_stage3, 75 );
subRoundY( ctxe, ctxa, ctxb, ctxc, ctxd, F1, const_stage3, 76 );
subRoundY( ctxd, ctxe, ctxa, ctxb, ctxc, F1, const_stage3, 77 );
subRoundY( ctxc, ctxd, ctxe, ctxa, ctxb, F1, const_stage3, 78 );
subRoundY( ctxb, ctxc, ctxd, ctxe, ctxa, F1, const_stage3, 79 );
paddd 0(eax_rdi), ctxa
paddd 16(eax_rdi), ctxb
paddd 32(eax_rdi), ctxc
paddd 48(eax_rdi), ctxd
paddd 64(eax_rdi), ctxe
movdqa ctxa, 0(eax_rdi)
movdqa ctxb, 16(eax_rdi)
movdqa ctxc, 32(eax_rdi)
movdqa ctxd, 48(eax_rdi)
movdqa ctxe, 64(eax_rdi)
END_PIC()
ret
// returns 0 if neither MMX nor SSE2 are supported; 1 if MMX is supported; 2 if SSE2 is also supported
shasse2_cpuid:
_shasse2_cpuid:
#ifndef __x86_64__
pushfl
pushfl
popl %eax
movl %eax, %ecx
xorl $0x200000, %eax
push %eax
popfl
pushfl
popl %eax
popfl
xorl %ecx, %eax
jnz do_cpuid
ret
do_cpuid:
#endif
#ifdef __x86_64__
push %rbx
push %rcx
push %rdx
#else
push %ebx
push %ecx
push %edx
#endif
movl $1, %eax
cpuid
testl $0x00800000, %edx // bit 23 (MMX)
jz no_mmx
testl $0x04000000, %edx // bit 26 (SSE2)
jz mmx_only
// sse2 supported:
movl $2, %eax
jmp cpuid_exit
mmx_only:
movl $1, %eax
jmp cpuid_exit
no_mmx:
movl $0, %eax
cpuid_exit:
#ifdef __x86_64__
pop %rdx
pop %rcx
pop %rbx
#else
pop %edx
pop %ecx
pop %ebx
#endif
ret
#ifdef __i386__
#ifdef __PIC__
#ifndef __APPLE__
.section .gnu.linkonce.t.__i686.get_pc_thunk.bx,"ax",@progbits
#endif
.globl __i686.get_pc_thunk.bx
#ifdef __APPLE__
.private_extern __i686.get_pc_thunk.bx
#else
.hidden __i686.get_pc_thunk.bx
.type __i686.get_pc_thunk.bx,@function
#endif
__i686.get_pc_thunk.bx:
movl (%esp), %ebx
ret
#endif
#endif
#endif
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits
#endif
|
Air-duino/Arduino-AirMCU
| 88,395
|
variants/AIR401/AIR401_DEV/qfplib-m0-full.S
|
@ Copyright 2019-2020 Mark Owen
@ http://www.quinapalus.com
@ E-mail: qfp@quinapalus.com
@
@ This file is free software: you can redistribute it and/or modify
@ it under the terms of version 2 of the GNU General Public License
@ as published by the Free Software Foundation.
@
@ This file is distributed in the hope that it will be useful,
@ but WITHOUT ANY WARRANTY; without even the implied warranty of
@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@ GNU General Public License for more details.
@
@ You should have received a copy of the GNU General Public License
@ along with this file. If not, see <http://www.gnu.org/licenses/> or
@ write to the Free Software Foundation, Inc., 51 Franklin Street,
@ Fifth Floor, Boston, MA 02110-1301, USA.
.syntax unified
.cpu cortex-m0plus
.thumb
@ exported symbols
.global __aeabi_fadd
.global __aeabi_fsub
.global __aeabi_fmul
.global __aeabi_fdiv
.global qfp_fcmp
.global _fsqrt
.global _ffix
.global qfp_float2fix
.global _ffixu
.global qfp_float2ufix
.global _fflt
.global qfp_fix2float
.global _ffltu
.global qfp_ufix2float
.global _ll_sto_f
.global qfp_fix642float
.global _ll_uto_f
.global qfp_ufix642float
@ .global qfp_fcos
@ .global qfp_fsin
@ .global qfp_ftan
@ .global qfp_fatan2
@ .global qfp_fexp
@ .global qfp_fln
.global __aeabi_dadd
.global __aeabi_dsub
.global __aeabi_dmul
.global __aeabi_ddiv
.global _dsqrt
@ .global qfp_dcos
@ .global qfp_dsin
@ .global qfp_dtan
@ .global qfp_datan2
@ .global qfp_dexp
@ .global qfp_dln
.global qfp_dcmp
.global _ll_sfrom_f
.global qfp_float2fix64
.global _ll_ufrom_f
.global qfp_float2ufix64
.global _dfix
.global qfp_double2fix
.global _dfixu
.global qfp_double2ufix
.global _ll_sfrom_d
.global qfp_double2fix64
.global _ll_ufrom_d
.global qfp_double2ufix64
.global _dflt
.global qfp_fix2double
.global _dfltu
.global qfp_ufix2double
.global _ll_sto_d
.global qfp_fix642double
.global _ll_uto_d
.global qfp_ufix642double
.global __aeabi_d2f
.global __aeabi_f2d
qfp_lib_start:
@ exchange r0<->r1, r2<->r3
xchxy:
push {r0,r2,r14}
mov r0,r1
mov r2,r3
pop {r1,r3,r15}
@ IEEE single in r0-> signed (two's complemennt) mantissa in r0 9Q23 (24 significant bits), signed exponent (bias removed) in r2
@ trashes r4; zero, denormal -> mantissa=+/-1, exponent=-380; Inf, NaN -> mantissa=+/-1, exponent=+640
unpackx:
lsrs r2,r0,#23 @ save exponent and sign
lsls r0,#9 @ extract mantissa
lsrs r0,#9
movs r4,#1
lsls r4,#23
orrs r0,r4 @ reinstate implied leading 1
cmp r2,#255 @ test sign bit
uxtb r2,r2 @ clear it
bls 1f @ branch on positive
rsbs r0,#0 @ negate mantissa
1:
subs r2,#1
cmp r2,#254 @ zero/denormal/Inf/NaN?
bhs 2f
subs r2,#126 @ remove exponent bias: can now be -126..+127
bx r14
2: @ here with special-case values
cmp r0,#0
mov r0,r4 @ set mantissa to +1
bpl 3f
rsbs r0,#0 @ zero/denormal/Inf/NaN: mantissa=+/-1
3:
subs r2,#126 @ zero/denormal: exponent -> -127; Inf, NaN: exponent -> 128
lsls r2,#2 @ zero/denormal: exponent -> -508; Inf, NaN: exponent -> 512
adds r2,#128 @ zero/denormal: exponent -> -380; Inf, NaN: exponent -> 640
bx r14
@ normalise and pack signed mantissa in r0 nominally 3Q29, signed exponent in r2-> IEEE single in r0
@ trashes r4, preserves r1,r3
@ r5: "sticky bits", must be zero iff all result bits below r0 are zero for correct rounding
packx:
lsrs r4,r0,#31 @ save sign bit
lsls r4,r4,#31 @ sign now in b31
bpl 2f @ skip if positive
cmp r5,#0
beq 11f
adds r0,#1 @ fiddle carry in to following rsb if sticky bits are non-zero
11:
rsbs r0,#0 @ can now treat r0 as unsigned
packx0:
bmi 3f @ catch r0=0x80000000 case
2:
subs r2,#1 @ normalisation loop
adds r0,r0
beq 1f @ zero? special case
bpl 2b @ normalise so leading "1" in bit 31
3:
adds r2,#129 @ (mis-)offset exponent
bne 12f @ special case: highest denormal can round to lowest normal
adds r0,#0x80 @ in special case, need to add 256 to r0 for rounding
bcs 4f @ tripped carry? then have leading 1 in C as required
12:
adds r0,#0x80 @ rounding
bcs 4f @ tripped carry? then have leading 1 in C as required (and result is even so can ignore sticky bits)
cmp r5,#0
beq 7f @ sticky bits zero?
8:
lsls r0,#1 @ remove leading 1
9:
subs r2,#1 @ compensate exponent on this path
4:
cmp r2,#254
bge 5f @ overflow?
adds r2,#1 @ correct exponent offset
ble 10f @ denormal/underflow?
lsrs r0,#9 @ align mantissa
lsls r2,#23 @ align exponent
orrs r0,r2 @ assemble exponent and mantissa
6:
orrs r0,r4 @ apply sign
1:
bx r14
5:
movs r0,#0xff @ create infinity
lsls r0,#23
b 6b
10:
movs r0,#0 @ create zero
bx r14
7: @ sticky bit rounding case
lsls r5,r0,#24 @ check bottom 8 bits of r0
bne 8b @ in rounding-tie case?
lsrs r0,#9 @ ensure even result
lsls r0,#10
b 9b
.align 2
.ltorg
@ signed multiply r0 1Q23 by r1 4Q23, result in r0 7Q25, sticky bits in r5
@ trashes r3,r4
mul0:
uxth r3,r0 @ Q23
asrs r4,r1,#16 @ Q7
muls r3,r4 @ L*H, Q30 signed
asrs r4,r0,#16 @ Q7
uxth r5,r1 @ Q23
muls r4,r5 @ H*L, Q30 signed
adds r3,r4 @ sum of middle partial products
uxth r4,r0
muls r4,r5 @ L*L, Q46 unsigned
lsls r5,r4,#16 @ initialise sticky bits from low half of low partial product
lsrs r4,#16 @ Q25
adds r3,r4 @ add high half of low partial product to sum of middle partial products
@ (cannot generate carry by limits on input arguments)
asrs r0,#16 @ Q7
asrs r1,#16 @ Q7
muls r0,r1 @ H*H, Q14 signed
lsls r0,#11 @ high partial product Q25
lsls r1,r3,#27 @ sticky
orrs r5,r1 @ collect further sticky bits
asrs r1,r3,#5 @ middle partial products Q25
adds r0,r1 @ final result
bx r14
.thumb_func
qfp_fcmp:
lsls r2,r0,#1
lsrs r2,#24
beq 1f
cmp r2,#0xff
bne 2f
1:
lsrs r0,#23 @ clear mantissa if NaN or denormal
lsls r0,#23
2:
lsls r2,r1,#1
lsrs r2,#24
beq 1f
cmp r2,#0xff
bne 2f
1:
lsrs r1,#23 @ clear mantissa if NaN or denormal
lsls r1,#23
2:
movs r2,#1 @ initialise result
eors r1,r0
bmi 4f @ opposite signs? then can proceed on basis of sign of x
eors r1,r0 @ restore y
bpl 1f
rsbs r2,#0 @ both negative? flip comparison
1:
cmp r0,r1
bgt 2f
blt 3f
5:
movs r2,#0
3:
rsbs r2,#0
2:
subs r0,r2,#0
bx r14
4:
orrs r1,r0
adds r1,r1
beq 5b
cmp r0,#0
bge 2b
b 3b
@ convert float to signed int, rounding towards -Inf, clamping
.thumb_func
_ffix:
movs r1,#0 @ fall through
@ convert float in r0 to signed fixed point in r0, clamping
.thumb_func
qfp_float2fix:
push {r4,r14}
bl unpackx
movs r3,r2
adds r3,#130
bmi 6f @ -0?
add r2,r1 @ incorporate binary point position into exponent
subs r2,#23 @ r2 is now amount of left shift required
blt 1f @ requires right shift?
cmp r2,#7 @ overflow?
ble 4f
3: @ overflow
asrs r1,r0,#31 @ +ve:0 -ve:0xffffffff
mvns r1,r1 @ +ve:0xffffffff -ve:0
movs r0,#1
lsls r0,#31
5:
eors r0,r1 @ +ve:0x7fffffff -ve:0x80000000 (unsigned path: 0xffffffff)
pop {r4,r15}
1:
rsbs r2,#0 @ right shift for r0, >0
cmp r2,#32
blt 2f @ more than 32 bits of right shift?
movs r2,#32
2:
asrs r0,r0,r2
pop {r4,r15}
6:
movs r0,#0
pop {r4,r15}
@ unsigned version
.thumb_func
_ffixu:
movs r1,#0 @ fall through
.thumb_func
qfp_float2ufix:
push {r4,r14}
bl unpackx
add r2,r1 @ incorporate binary point position into exponent
movs r1,r0
bmi 5b @ negative? return zero
subs r2,#23 @ r2 is now amount of left shift required
blt 1b @ requires right shift?
mvns r1,r0 @ ready to return 0xffffffff
cmp r2,#8 @ overflow?
bgt 5b
4:
lsls r0,r0,r2 @ result fits, left shifted
pop {r4,r15}
@ convert uint64 to float, rounding
.thumb_func
_ll_uto_f:
movs r2,#0 @ fall through
@ convert unsigned 64-bit fix to float, rounding; number of r0:r1 bits after point in r2
.thumb_func
qfp_ufix642float:
push {r4,r5,r14}
cmp r1,#0
bpl 3f @ positive? we can use signed code
lsls r5,r1,#31 @ contribution to sticky bits
orrs r5,r0
lsrs r0,r1,#1
subs r2,#1
b 4f
@ convert int64 to float, rounding
.thumb_func
_ll_sto_f:
movs r2,#0 @ fall through
@ convert signed 64-bit fix to float, rounding; number of r0:r1 bits after point in r2
.thumb_func
qfp_fix642float:
push {r4,r5,r14}
3:
movs r5,r0
orrs r5,r1
beq ret_pop45 @ zero? return +0
asrs r5,r1,#31 @ sign bits
2:
asrs r4,r1,#24 @ try shifting 7 bits at a time
cmp r4,r5
bne 1f @ next shift will overflow?
lsls r1,#7
lsrs r4,r0,#25
orrs r1,r4
lsls r0,#7
adds r2,#7
b 2b
1:
movs r5,r0
movs r0,r1
4:
rsbs r2,#0
adds r2,#32+29
b packret
@ convert signed int to float, rounding
.thumb_func
_fflt:
movs r1,#0 @ fall through
@ convert signed fix to float, rounding; number of r0 bits after point in r1
.thumb_func
qfp_fix2float:
push {r4,r5,r14}
1:
movs r2,#29
subs r2,r1 @ fix exponent
packretns: @ pack and return, sticky bits=0
movs r5,#0
packret: @ common return point: "pack and return"
bl packx
ret_pop45:
pop {r4,r5,r15}
@ unsigned version
.thumb_func
_ffltu:
movs r1,#0 @ fall through
.thumb_func
qfp_ufix2float:
push {r4,r5,r14}
cmp r0,#0
bge 1b @ treat <2^31 as signed
movs r2,#30
subs r2,r1 @ fix exponent
lsls r5,r0,#31 @ one sticky bit
lsrs r0,#1
b packret
@ All the scientific functions are implemented using the CORDIC algorithm. For notation,
@ details not explained in the comments below, and a good overall survey see
@ "50 Years of CORDIC: Algorithms, Architectures, and Applications" by Meher et al.,
@ IEEE Transactions on Circuits and Systems Part I, Volume 56 Issue 9.
@ Register use:
@ r0: x
@ r1: y
@ r2: z/omega
@ r3: coefficient pointer
@ r4,r12: m
@ r5: i (shift)
cordic_start: @ initialisation
movs r5,#0 @ initial shift=0
mov r12,r4
b 5f
cordic_vstep: @ one step of algorithm in vector mode
cmp r1,#0 @ check sign of y
bgt 4f
b 1f
cordic_rstep: @ one step of algorithm in rotation mode
cmp r2,#0 @ check sign of angle
bge 1f
4:
subs r1,r6 @ negative rotation: y=y-(x>>i)
rsbs r7,#0
adds r2,r4 @ accumulate angle
b 2f
1:
adds r1,r6 @ positive rotation: y=y+(x>>i)
subs r2,r4 @ accumulate angle
2:
mov r4,r12
muls r7,r4 @ apply sign from m
subs r0,r7 @ finish rotation: x=x{+/-}(y>>i)
5:
ldmia r3!,{r4} @ fetch next angle from table and bump pointer
lsrs r4,#1 @ repeated angle?
bcs 3f
adds r5,#1 @ adjust shift if not
3:
mov r6,r0
asrs r6,r5 @ x>>i
mov r7,r1
asrs r7,r5 @ y>>i
lsrs r4,#1 @ shift end flag into carry
bx r14
@ CORDIC rotation mode
cordic_rot:
push {r6,r7,r14}
bl cordic_start @ initialise
1:
bl cordic_rstep
bcc 1b @ step until table finished
asrs r6,r0,#14 @ remaining small rotations can be linearised: see IV.B of paper referenced above
asrs r7,r1,#14
asrs r2,#3
muls r6,r2 @ all remaining CORDIC steps in a multiplication
muls r7,r2
mov r4,r12
muls r7,r4
asrs r6,#12
asrs r7,#12
subs r0,r7 @ x=x{+/-}(yz>>k)
adds r1,r6 @ y=y+(xz>>k)
cordic_exit:
pop {r6,r7,r15}
@ CORDIC vector mode
cordic_vec:
push {r6,r7,r14}
bl cordic_start @ initialise
1:
bl cordic_vstep
bcc 1b @ step until table finished
4:
cmp r1,#0 @ continue as in cordic_vstep but without using table; x is not affected as y is small
bgt 2f @ check sign of y
adds r1,r6 @ positive rotation: y=y+(x>>i)
subs r2,r4 @ accumulate angle
b 3f
2:
subs r1,r6 @ negative rotation: y=y-(x>>i)
adds r2,r4 @ accumulate angle
3:
asrs r6,#1
asrs r4,#1 @ next "table entry"
bne 4b
b cordic_exit
@ .thumb_func
@ qfp_fsin: @ calculate sin and cos using CORDIC rotation method
@ push {r4,r5,r14}
@ movs r1,#24
@ bl qfp_float2fix @ range reduction by repeated subtraction/addition in fixed point
@ ldr r4,pi_q29
@ lsrs r4,#4 @ 2pi Q24
@ 1:
@ subs r0,r4
@ bge 1b
@ 1:
@ adds r0,r4
@ bmi 1b @ now in range 0..2pi
@ lsls r2,r0,#2 @ z Q26
@ lsls r5,r4,#1 @ pi Q26 (r4=pi/2 Q26)
@ ldr r0,=#0x136e9db4 @ initialise CORDIC x,y with scaling
@ movs r1,#0
@ 1:
@ cmp r2,r4 @ >pi/2?
@ blt 2f
@ subs r2,r5 @ reduce range to -pi/2..pi/2
@ rsbs r0,#0 @ rotate vector by pi
@ b 1b
@ 2:
@ lsls r2,#3 @ Q29
@ adr r3,tab_cc @ circular coefficients
@ movs r4,#1 @ m=1
@ bl cordic_rot
@ adds r1,#9 @ fiddle factor to make sin(0)==0
@ movs r2,#0 @ exponents to zero
@ movs r3,#0
@ movs r5,#0 @ no sticky bits
@ bl clampx
@ bl packx @ pack cosine
@ bl xchxy
@ bl clampx
@ b packretns @ pack sine
@ .thumb_func
@ qfp_fcos:
@ push {r14}
@ bl qfp_fsin
@ mov r0,r1 @ extract cosine result
@ pop {r15}
@ @ force r0 to lie in range [-1,1] Q29
@ clampx:
@ movs r4,#1
@ lsls r4,#29
@ cmp r0,r4
@ bgt 1f
@ rsbs r4,#0
@ cmp r0,r4
@ ble 1f
@ bx r14
@ 1:
@ movs r0,r4
@ bx r14
@ .thumb_func
@ qfp_ftan:
@ push {r4,r5,r6,r14}
@ bl qfp_fsin @ sine in r0/r2, cosine in r1/r3
@ b fdiv_n @ sin/cos
@ .thumb_func
@ qfp_fexp:
@ push {r4,r5,r14}
@ movs r1,#24
@ bl qfp_float2fix @ Q24: covers entire valid input range
@ asrs r1,r0,#16 @ Q8
@ ldr r2,=#5909 @ log_2(e) Q12
@ muls r2,r1 @ estimate exponent of result Q20 (always an underestimate)
@ asrs r2,#20 @ Q0
@ lsls r1,r0,#6 @ Q30
@ ldr r0,=#0x2c5c85fe @ ln(2) Q30
@ muls r0,r2 @ accurate contribution of estimated exponent
@ subs r1,r0 @ residual to be exponentiated, guaranteed ≥0, < about 0.75 Q30
@ @ here
@ @ r1: mantissa to exponentiate, 0...~0.75 Q30
@ @ r2: first exponent estimate
@ movs r5,#1 @ shift
@ adr r3,ftab_exp @ could use alternate words from dtab_exp to save space if required
@ movs r0,#1
@ lsls r0,#29 @ x=1 Q29
@ 3:
@ ldmia r3!,{r4}
@ subs r4,r1,r4
@ bmi 1f
@ movs r1,r4 @ keep result of subtraction
@ movs r4,r0
@ lsrs r4,r5
@ adcs r0,r4 @ x+=x>>i with rounding
@ 1:
@ adds r5,#1
@ cmp r5,#15
@ bne 3b
@ @ here
@ @ r0: exp a Q29 1..2+
@ @ r1: ε (residual x where x=a+ε), < 2^-14 Q30
@ @ r2: first exponent estimate
@ @ and we wish to calculate exp x=exp a exp ε~(exp a)(1+ε)
@ lsrs r3,r0,#15 @ exp a Q14
@ muls r3,r1 @ ε exp a Q44
@ lsrs r3,#15 @ ε exp a Q29
@ adcs r0,r3 @ (1+ε) exp a Q29 with rounding
@ b packretns @ pack result
@ .thumb_func
@ qfp_fln:
@ push {r4,r5,r14}
@ asrs r1,r0,#23
@ bmi 3f @ -ve argument?
@ beq 3f @ 0 argument?
@ cmp r1,#0xff
@ beq 4f @ +Inf/NaN
@ bl unpackx
@ adds r2,#1
@ ldr r3,=#0x2c5c85fe @ ln(2) Q30
@ lsrs r1,r3,#14 @ ln(2) Q16
@ muls r1,r2 @ result estimate Q16
@ asrs r1,#16 @ integer contribution to result
@ muls r3,r2
@ lsls r4,r1,#30
@ subs r3,r4 @ fractional contribution to result Q30, signed
@ lsls r0,#8 @ Q31
@ @ here
@ @ r0: mantissa Q31
@ @ r1: integer contribution to result
@ @ r3: fractional contribution to result Q30, signed
@ movs r5,#1 @ shift
@ adr r4,ftab_exp @ could use alternate words from dtab_exp to save space if required
@ 2:
@ movs r2,r0
@ lsrs r2,r5
@ adcs r2,r0 @ x+(x>>i) with rounding
@ bcs 1f @ >=2?
@ movs r0,r2 @ keep result
@ ldr r2,[r4]
@ subs r3,r2
@ 1:
@ adds r4,#4
@ adds r5,#1
@ cmp r5,#15
@ bne 2b
@ @ here
@ @ r0: residual x, nearly 2 Q31
@ @ r1: integer contribution to result
@ @ r3: fractional part of result Q30
@ asrs r0,#2
@ adds r0,r3,r0
@ cmp r1,#0
@ bne 2f
@ asrs r0,#1
@ lsls r1,#29
@ adds r0,r1
@ movs r2,#0
@ b packretns
@ 2:
@ lsls r1,#24
@ asrs r0,#6 @ Q24
@ adcs r0,r1 @ with rounding
@ movs r2,#5
@ b packretns
@ 3:
@ ldr r0,=#0xff800000 @ -Inf
@ pop {r4,r5,r15}
@ 4:
@ ldr r0,=#0x7f800000 @ +Inf
@ pop {r4,r5,r15}
@ .align 2
@ ftab_exp:
@ .word 0x19f323ed @ log 1+2^-1 Q30
@ .word 0x0e47fbe4 @ log 1+2^-2 Q30
@ .word 0x0789c1dc @ log 1+2^-3 Q30
@ .word 0x03e14618 @ log 1+2^-4 Q30
@ .word 0x01f829b1 @ log 1+2^-5 Q30
@ .word 0x00fe0546 @ log 1+2^-6 Q30
@ .word 0x007f80aa @ log 1+2^-7 Q30
@ .word 0x003fe015 @ log 1+2^-8 Q30
@ .word 0x001ff803 @ log 1+2^-9 Q30
@ .word 0x000ffe00 @ log 1+2^-10 Q30
@ .word 0x0007ff80 @ log 1+2^-11 Q30
@ .word 0x0003ffe0 @ log 1+2^-12 Q30
@ .word 0x0001fff8 @ log 1+2^-13 Q30
@ .word 0x0000fffe @ log 1+2^-14 Q30
@ .thumb_func
@ qfp_fatan2:
@ push {r4,r5,r14}
@ @ unpack arguments and shift one down to have common exponent
@ bl unpackx
@ bl xchxy
@ bl unpackx
@ lsls r0,r0,#5 @ Q28
@ lsls r1,r1,#5 @ Q28
@ adds r4,r2,r3 @ this is -760 if both arguments are 0 and at least -380-126=-506 otherwise
@ asrs r4,#9
@ adds r4,#1
@ bmi 2f @ force y to 0 proper, so result will be zero
@ subs r4,r2,r3 @ calculate shift
@ bge 1f @ ex>=ey?
@ rsbs r4,#0 @ make shift positive
@ asrs r0,r4
@ cmp r4,#28
@ blo 3f
@ asrs r0,#31
@ b 3f
@ 1:
@ asrs r1,r4
@ cmp r4,#28
@ blo 3f
@ 2:
@ @ here |x|>>|y| or both x and y are ±0
@ cmp r0,#0
@ bge 4f @ x positive, return signed 0
@ ldr r0,pi_q29 @ x negative, return +/- pi
@ asrs r1,#31
@ eors r0,r1
@ b 7f
@ 4:
@ asrs r0,r1,#31
@ b 7f
@ 3:
@ movs r2,#0 @ initial angle
@ cmp r0,#0 @ x negative
@ bge 5f
@ rsbs r0,#0 @ rotate to 1st/4th quadrants
@ rsbs r1,#0
@ ldr r2,pi_q29 @ pi Q29
@ 5:
@ adr r3,tab_cc @ circular coefficients
@ movs r4,#1 @ m=1
@ bl cordic_vec @ also produces magnitude (with scaling factor 1.646760119), which is discarded
@ mov r0,r2 @ result here is -pi/2..3pi/2 Q29
@ @ asrs r2,#29
@ @ subs r0,r2
@ ldr r2,pi_q29 @ pi Q29
@ adds r4,r0,r2 @ attempt to fix -3pi/2..-pi case
@ bcs 6f @ -pi/2..0? leave result as is
@ subs r4,r0,r2 @ <pi? leave as is
@ bmi 6f
@ subs r0,r4,r2 @ >pi: take off 2pi
@ 6:
@ subs r0,#1 @ fiddle factor so atan2(0,1)==0
@ 7:
@ movs r2,#0 @ exponent for pack
@ b packretns
@ .align 2
@ .ltorg
@ @ first entry in following table is pi Q29
@ pi_q29:
@ @ circular CORDIC coefficients: atan(2^-i), b0=flag for preventing shift, b1=flag for end of table
@ tab_cc:
@ .word 0x1921fb54*4+1 @ no shift before first iteration
@ .word 0x0ed63383*4+0
@ .word 0x07d6dd7e*4+0
@ .word 0x03fab753*4+0
@ .word 0x01ff55bb*4+0
@ .word 0x00ffeaae*4+0
@ .word 0x007ffd55*4+0
@ .word 0x003fffab*4+0
@ .word 0x001ffff5*4+0
@ .word 0x000fffff*4+0
@ .word 0x0007ffff*4+0
@ .word 0x00040000*4+0
@ .word 0x00020000*4+0+2 @ +2 marks end
.align 2
.thumb_func
__aeabi_fsub:
ldr r2,=#0x80000000
eors r1,r2 @ flip sign on second argument
@ drop into fadd, on .align2:ed boundary
.thumb_func
__aeabi_fadd:
push {r4,r5,r6,r14}
asrs r4,r0,#31
lsls r2,r0,#1
lsrs r2,#24 @ x exponent
beq fa_xe0
cmp r2,#255
beq fa_xe255
fa_xe:
asrs r5,r1,#31
lsls r3,r1,#1
lsrs r3,#24 @ y exponent
beq fa_ye0
cmp r3,#255
beq fa_ye255
fa_ye:
ldr r6,=#0x007fffff
ands r0,r0,r6 @ extract mantissa bits
ands r1,r1,r6
adds r6,#1 @ r6=0x00800000
orrs r0,r0,r6 @ set implied 1
orrs r1,r1,r6
eors r0,r0,r4 @ complement...
eors r1,r1,r5
subs r0,r0,r4 @ ... and add 1 if sign bit is set: 2's complement
subs r1,r1,r5
subs r5,r3,r2 @ ye-xe
subs r4,r2,r3 @ xe-ye
bmi fa_ygtx
@ here xe>=ye
cmp r4,#30
bge fa_xmgty @ xe much greater than ye?
adds r5,#32
movs r3,r2 @ save exponent
@ here y in r1 must be shifted down r4 places to align with x in r0
movs r2,r1
lsls r2,r2,r5 @ keep the bits we will shift off the bottom of r1
asrs r1,r1,r4
b fa_0
.ltorg
fa_ymgtx:
movs r2,#0 @ result is just y
movs r0,r1
b fa_1
fa_xmgty:
movs r3,r2 @ result is just x
movs r2,#0
b fa_1
fa_ygtx:
@ here ye>xe
cmp r5,#30
bge fa_ymgtx @ ye much greater than xe?
adds r4,#32
@ here x in r0 must be shifted down r5 places to align with y in r1
movs r2,r0
lsls r2,r2,r4 @ keep the bits we will shift off the bottom of r0
asrs r0,r0,r5
fa_0:
adds r0,r1 @ result is now in r0:r2, possibly highly denormalised or zero; exponent in r3
beq fa_9 @ if zero, inputs must have been of identical magnitude and opposite sign, so return +0
fa_1:
lsrs r1,r0,#31 @ sign bit
beq fa_8
mvns r0,r0
rsbs r2,r2,#0
bne fa_8
adds r0,#1
fa_8:
adds r6,r6
@ r6=0x01000000
cmp r0,r6
bhs fa_2
fa_3:
adds r2,r2 @ normalisation loop
adcs r0,r0
subs r3,#1 @ adjust exponent
cmp r0,r6
blo fa_3
fa_2:
@ here r0:r2 is the result mantissa 0x01000000<=r0<0x02000000, r3 the exponent, and r1 the sign bit
lsrs r0,#1
bcc fa_4
@ rounding bits here are 1:r2
adds r0,#1 @ round up
cmp r2,#0
beq fa_5 @ sticky bits all zero?
fa_4:
cmp r3,#254
bhs fa_6 @ exponent too large or negative?
lsls r1,#31 @ pack everything
add r0,r1
lsls r3,#23
add r0,r3
fa_end:
pop {r4,r5,r6,r15}
fa_9:
cmp r2,#0 @ result zero?
beq fa_end @ return +0
b fa_1
fa_5:
lsrs r0,#1
lsls r0,#1 @ round to even
b fa_4
fa_6:
bge fa_7
@ underflow
@ can handle denormals here
lsls r0,r1,#31 @ result is signed zero
pop {r4,r5,r6,r15}
fa_7:
@ overflow
lsls r0,r1,#8
adds r0,#255
lsls r0,#23 @ result is signed infinity
pop {r4,r5,r6,r15}
fa_xe0:
@ can handle denormals here
subs r2,#32
adds r2,r4 @ exponent -32 for +Inf, -33 for -Inf
b fa_xe
fa_xe255:
@ can handle NaNs here
lsls r2,#8
add r2,r2,r4 @ exponent ~64k for +Inf, ~64k-1 for -Inf
b fa_xe
fa_ye0:
@ can handle denormals here
subs r3,#32
adds r3,r5 @ exponent -32 for +Inf, -33 for -Inf
b fa_ye
fa_ye255:
@ can handle NaNs here
lsls r3,#8
add r3,r3,r5 @ exponent ~64k for +Inf, ~64k-1 for -Inf
b fa_ye
.align 2
.thumb_func
__aeabi_fmul:
push {r7,r14}
mov r2,r0
eors r2,r1 @ sign of result
lsrs r2,#31
lsls r2,#31
mov r14,r2
lsls r0,#1
lsls r1,#1
lsrs r2,r0,#24 @ xe
beq fm_xe0
cmp r2,#255
beq fm_xe255
fm_xe:
lsrs r3,r1,#24 @ ye
beq fm_ye0
cmp r3,#255
beq fm_ye255
fm_ye:
adds r7,r2,r3 @ exponent of result (will possibly be incremented)
subs r7,#128 @ adjust bias for packing
lsls r0,#8 @ x mantissa
lsls r1,#8 @ y mantissa
lsrs r0,#9
lsrs r1,#9
adds r2,r0,r1 @ for later
mov r12,r2
lsrs r2,r0,#7 @ x[22..7] Q16
lsrs r3,r1,#7 @ y[22..7] Q16
muls r2,r2,r3 @ result [45..14] Q32: never an overestimate and worst case error is 2*(2^7-1)*(2^23-2^7)+(2^7-1)^2 = 2130690049 < 2^31
muls r0,r0,r1 @ result [31..0] Q46
lsrs r2,#18 @ result [45..32] Q14
bcc 1f
cmp r0,#0
bmi 1f
adds r2,#1 @ fix error in r2
1:
lsls r3,r0,#9 @ bits off bottom of result
lsrs r0,#23 @ Q23
lsls r2,#9
adds r0,r2 @ cut'n'shut
add r0,r12 @ implied 1*(x+y) to compensate for no insertion of implied 1s
@ result-1 in r3:r0 Q23+32, i.e., in range [0,3)
lsrs r1,r0,#23
bne fm_0 @ branch if we need to shift down one place
@ here 1<=result<2
cmp r7,#254
bhs fm_3a @ catches both underflow and overflow
lsls r3,#1 @ sticky bits at top of R3, rounding bit in carry
bcc fm_1 @ no rounding
beq fm_2 @ rounding tie?
adds r0,#1 @ round up
fm_1:
adds r7,#1 @ for implied 1
lsls r7,#23 @ pack result
add r0,r7
add r0,r14
pop {r7,r15}
fm_2: @ rounding tie
adds r0,#1
fm_3:
lsrs r0,#1
lsls r0,#1 @ clear bottom bit
b fm_1
@ here 1<=result-1<3
fm_0:
adds r7,#1 @ increment exponent
cmp r7,#254
bhs fm_3b @ catches both underflow and overflow
lsrs r0,#1 @ shift mantissa down
bcc fm_1a @ no rounding
adds r0,#1 @ assume we will round up
cmp r3,#0 @ sticky bits
beq fm_3c @ rounding tie?
fm_1a:
adds r7,r7
adds r7,#1 @ for implied 1
lsls r7,#22 @ pack result
add r0,r7
add r0,r14
pop {r7,r15}
fm_3c:
lsrs r0,#1
lsls r0,#1 @ clear bottom bit
b fm_1a
fm_xe0:
subs r2,#16
fm_xe255:
lsls r2,#8
b fm_xe
fm_ye0:
subs r3,#16
fm_ye255:
lsls r3,#8
b fm_ye
@ here the result is under- or overflowing
fm_3b:
bge fm_4 @ branch on overflow
@ trap case where result is denormal 0x007fffff + 0.5ulp or more
adds r7,#1 @ exponent=-1?
bne fm_5
@ corrected mantissa will be >= 3.FFFFFC (0x1fffffe Q23)
@ so r0 >= 2.FFFFFC (0x17ffffe Q23)
adds r0,#2
lsrs r0,#23
cmp r0,#3
bne fm_5
b fm_6
fm_3a:
bge fm_4 @ branch on overflow
@ trap case where result is denormal 0x007fffff + 0.5ulp or more
adds r7,#1 @ exponent=-1?
bne fm_5
adds r0,#1 @ mantissa=0xffffff (i.e., r0=0x7fffff)?
lsrs r0,#23
beq fm_5
fm_6:
movs r0,#1 @ return smallest normal
lsls r0,#23
add r0,r14
pop {r7,r15}
fm_5:
mov r0,r14
pop {r7,r15}
fm_4:
movs r0,#0xff
lsls r0,#23
add r0,r14
pop {r7,r15}
@ This version of the division algorithm uses external divider hardware to estimate the
@ reciprocal of the divisor to about 14 bits; then a multiplication step to get a first
@ quotient estimate; then the remainder based on this estimate is used to calculate a
@ correction to the quotient. The result is good to about 27 bits and so we only need
@ to calculate the exact remainder when close to a rounding boundary.
.align 2
.thumb_func
__aeabi_fdiv:
push {r4,r5,r6,r14}
fdiv_n:
movs r4,#1
lsls r4,#23 @ implied 1 position
lsls r2,r1,#9 @ clear out sign and exponent
lsrs r2,r2,#9
orrs r2,r2,r4 @ divisor mantissa Q23 with implied 1
@ here
@ r0=packed dividend
@ r1=packed divisor
@ r2=divisor mantissa Q23
@ r4=1<<23
// see divtest.c
lsrs r3,r2,#18 @ x2=x>>18; // Q5 32..63
adr r5,rcpapp-32
ldrb r3,[r5,r3] @ u=lut5[x2-32]; // Q8
lsls r5,r2,#5
muls r5,r5,r3
asrs r5,#14 @ e=(i32)(u*(x<<5))>>14; // Q22
asrs r6,r5,#11
muls r6,r6,r6 @ e2=(e>>11)*(e>>11); // Q22
subs r5,r6
muls r5,r5,r3 @ c=(e-e2)*u; // Q30
lsls r6,r3,#8
asrs r5,#13
adds r5,#1
asrs r5,#1
subs r5,r6,r5 @ u0=(u<<8)-((c+0x2000)>>14); // Q16
@ here
@ r0=packed dividend
@ r1=packed divisor
@ r2=divisor mantissa Q23
@ r4=1<<23
@ r5=reciprocal estimate Q16
lsrs r6,r0,#23
uxtb r3,r6 @ dividend exponent
lsls r0,#9
lsrs r0,#9
orrs r0,r0,r4 @ dividend mantissa Q23
lsrs r1,#23
eors r6,r1 @ sign of result in bit 8
lsrs r6,#8
lsls r6,#31 @ sign of result in bit 31, other bits clear
@ here
@ r0=dividend mantissa Q23
@ r1=divisor sign+exponent
@ r2=divisor mantissa Q23
@ r3=dividend exponent
@ r5=reciprocal estimate Q16
@ r6b31=sign of result
uxtb r1,r1 @ divisor exponent
cmp r1,#0
beq retinf
cmp r1,#255
beq 20f @ divisor is infinite
cmp r3,#0
beq retzero
cmp r3,#255
beq retinf
subs r3,r1 @ initial result exponent (no bias)
adds r3,#125 @ add bias
lsrs r1,r0,#8 @ dividend mantissa Q15
@ here
@ r0=dividend mantissa Q23
@ r1=dividend mantissa Q15
@ r2=divisor mantissa Q23
@ r3=initial result exponent
@ r5=reciprocal estimate Q16
@ r6b31=sign of result
muls r1,r5
lsrs r1,#16 @ Q15 qu0=(q15)(u*y0);
lsls r0,r0,#15 @ dividend Q38
movs r4,r2
muls r4,r1 @ Q38 qu0*x
subs r4,r0,r4 @ Q38 re0=(y<<15)-qu0*x; note this remainder is signed
asrs r4,#10
muls r4,r5 @ Q44 qu1=(re0>>10)*u; this quotient correction is also signed
asrs r4,#16 @ Q28
lsls r1,#13
adds r1,r1,r4 @ Q28 qu=(qu0<<13)+(qu1>>16);
@ here
@ r0=dividend mantissa Q38
@ r1=quotient Q28
@ r2=divisor mantissa Q23
@ r3=initial result exponent
@ r6b31=sign of result
lsrs r4,r1,#28
bne 1f
@ here the quotient is less than 1<<28 (i.e., result mantissa <1.0)
adds r1,#5
lsrs r4,r1,#4 @ rounding + small reduction in systematic bias
bcc 2f @ skip if we are not near a rounding boundary
lsrs r1,#3 @ quotient Q25
lsls r0,#10 @ dividend mantissa Q48
muls r1,r1,r2 @ quotient*divisor Q48
subs r0,r0,r1 @ remainder Q48
bmi 2f
b 3f
1:
@ here the quotient is at least 1<<28 (i.e., result mantissa >=1.0)
adds r3,#1 @ bump exponent (and shift mantissa down one more place)
adds r1,#9
lsrs r4,r1,#5 @ rounding + small reduction in systematic bias
bcc 2f @ skip if we are not near a rounding boundary
lsrs r1,#4 @ quotient Q24
lsls r0,#9 @ dividend mantissa Q47
muls r1,r1,r2 @ quotient*divisor Q47
subs r0,r0,r1 @ remainder Q47
bmi 2f
3:
adds r4,#1 @ increment quotient as we are above the rounding boundary
@ here
@ r3=result exponent
@ r4=correctly rounded quotient Q23 in range [1,2] *note closed interval*
@ r6b31=sign of result
2:
cmp r3,#254
bhs 10f @ this catches both underflow and overflow
lsls r1,r3,#23
adds r0,r4,r1
adds r0,r6
pop {r4,r5,r6,r15}
@ here divisor is infinite; dividend exponent in r3
20:
cmp r3,#255
bne retzero
retinf:
movs r0,#255
21:
lsls r0,#23
orrs r0,r6
pop {r4,r5,r6,r15}
10:
bge retinf @ overflow?
adds r1,r3,#1
bne retzero @ exponent <-1? return 0
@ here exponent is exactly -1
lsrs r1,r4,#25
bcc retzero @ mantissa is not 01000000?
@ return minimum normal
movs r0,#1
lsls r0,#23
orrs r0,r6
pop {r4,r5,r6,r15}
retzero:
movs r0,r6
pop {r4,r5,r6,r15}
@ x2=[32:1:63]/32;
@ round(256 ./(x2+1/64))
.align 2
rcpapp:
.byte 252,245,237,231,224,218,213,207,202,197,193,188,184,180,176,172
.byte 169,165,162,159,156,153,150,148,145,142,140,138,135,133,131,129
@ The square root routine uses an initial approximation to the reciprocal of the square root of the argument based
@ on the top four bits of the mantissa (possibly shifted one place to make the exponent even). It then performs two
@ Newton-Raphson iterations, resulting in about 14 bits of accuracy. This reciprocal is then multiplied by
@ the original argument to produce an approximation to the result, again with about 14 bits of accuracy.
@ Then a remainder is calculated, and multiplied by the reciprocal estiamte to generate a correction term
@ giving a final answer to about 28 bits of accuracy. A final remainder calculation rounds to the correct
@ result if necessary.
@ Again, the fixed-point calculation is carefully implemented to preserve accuracy, and similar comments to those
@ made above on the fast division routine apply.
@ The reciprocal square root calculation has been tested for all possible (possibly shifted) input mantissa values.
.align 2
.thumb_func
_fsqrt:
push {r4}
lsls r1,r0,#1
bcs sq_0 @ negative?
lsls r1,#8
lsrs r1,#9 @ mantissa
movs r2,#1
lsls r2,#23
adds r1,r2 @ insert implied 1
lsrs r2,r0,#23 @ extract exponent
beq sq_2 @ zero?
cmp r2,#255 @ infinite?
beq sq_1
adds r2,#125 @ correction for packing
asrs r2,#1 @ exponent/2, LSB into carry
bcc 1f
lsls r1,#1 @ was even: double mantissa; mantissa y now 1..4 Q23
1:
adr r4,rsqrtapp-4@ first four table entries are never accessed because of the mantissa's leading 1
lsrs r3,r1,#21 @ y Q2
ldrb r4,[r4,r3] @ initial approximation to reciprocal square root a0 Q8
lsrs r0,r1,#7 @ y Q16: first Newton-Raphson iteration
muls r0,r4 @ a0*y Q24
muls r0,r4 @ r0=p0=a0*y*y Q32
asrs r0,#12 @ r0 Q20
muls r0,r4 @ dy0=a0*r0 Q28
asrs r0,#13 @ dy0 Q15
lsls r4,#8 @ a0 Q16
subs r4,r0 @ a1=a0-dy0/2 Q16-Q15/2 -> Q16
adds r4,#170 @ mostly remove systematic error in this approximation: gains approximately 1 bit
movs r0,r4 @ second Newton-Raphson iteration
muls r0,r0 @ a1*a1 Q32
lsrs r0,#15 @ a1*a1 Q17
lsrs r3,r1,#8 @ y Q15
muls r0,r3 @ r1=p1=a1*a1*y Q32
asrs r0,#12 @ r1 Q20
muls r0,r4 @ dy1=a1*r1 Q36
asrs r0,#21 @ dy1 Q15
subs r4,r0 @ a2=a1-dy1/2 Q16-Q15/2 -> Q16
muls r3,r4 @ a3=y*a2 Q31
lsrs r3,#15 @ a3 Q16
@ here a2 is an approximation to the reciprocal square root
@ and a3 is an approximation to the square root
movs r0,r3
muls r0,r0 @ a3*a3 Q32
lsls r1,#9 @ y Q32
subs r0,r1,r0 @ r2=y-a3*a3 Q32 remainder
asrs r0,#5 @ r2 Q27
muls r4,r0 @ r2*a2 Q43
lsls r3,#7 @ a3 Q23
asrs r0,r4,#15 @ r2*a2 Q28
adds r0,#16 @ rounding to Q24
asrs r0,r0,#6 @ r2*a2 Q22
add r3,r0 @ a4 Q23: candidate final result
bcc sq_3 @ near rounding boundary? skip if no rounding needed
mov r4,r3
adcs r4,r4 @ a4+0.5ulp Q24
muls r4,r4 @ Q48
lsls r1,#16 @ y Q48
subs r1,r4 @ remainder Q48
bmi sq_3
adds r3,#1 @ round up
sq_3:
lsls r2,#23 @ pack exponent
adds r0,r2,r3
sq_6:
pop {r4}
bx r14
sq_0:
lsrs r1,#24
beq sq_2 @ -0: return it
@ here negative and not -0: return -Inf
asrs r0,#31
sq_5:
lsls r0,#23
b sq_6
sq_1: @ +Inf
lsrs r0,#23
b sq_5
sq_2:
lsrs r0,#31
lsls r0,#31
b sq_6
@ round(sqrt(2^22./[72:16:248]))
rsqrtapp:
.byte 0xf1,0xda,0xc9,0xbb, 0xb0,0xa6,0x9e,0x97, 0x91,0x8b,0x86,0x82
@ Notation:
@ rx:ry means the concatenation of rx and ry with rx having the less significant bits
@ IEEE double in ra:rb ->
@ mantissa in ra:rb 12Q52 (53 significant bits) with implied 1 set
@ exponent in re
@ sign in rs
@ trashes rt
.macro mdunpack ra,rb,re,rs,rt
lsrs \re,\rb,#20 @ extract sign and exponent
subs \rs,\re,#1
lsls \rs,#20
subs \rb,\rs @ clear sign and exponent in mantissa; insert implied 1
lsrs \rs,\re,#11 @ sign
lsls \re,#21
lsrs \re,#21 @ exponent
beq l\@_1 @ zero exponent?
adds \rt,\re,#1
lsrs \rt,#11
beq l\@_2 @ exponent != 0x7ff? then done
l\@_1:
movs \ra,#0
movs \rb,#1
lsls \rb,#20
subs \re,#128
lsls \re,#12
l\@_2:
.endm
@ IEEE double in ra:rb ->
@ signed mantissa in ra:rb 12Q52 (53 significant bits) with implied 1
@ exponent in re
@ trashes rt0 and rt1
@ +zero, +denormal -> exponent=-0x80000
@ -zero, -denormal -> exponent=-0x80000
@ +Inf, +NaN -> exponent=+0x77f000
@ -Inf, -NaN -> exponent=+0x77e000
.macro mdunpacks ra,rb,re,rt0,rt1
lsrs \re,\rb,#20 @ extract sign and exponent
lsrs \rt1,\rb,#31 @ sign only
subs \rt0,\re,#1
lsls \rt0,#20
subs \rb,\rt0 @ clear sign and exponent in mantissa; insert implied 1
lsls \re,#21
bcc l\@_1 @ skip on positive
mvns \rb,\rb @ negate mantissa
rsbs \ra,#0
bcc l\@_1
adds \rb,#1
l\@_1:
lsrs \re,#21
beq l\@_2 @ zero exponent?
adds \rt0,\re,#1
lsrs \rt0,#11
beq l\@_3 @ exponent != 0x7ff? then done
subs \re,\rt1
l\@_2:
movs \ra,#0
lsls \rt1,#1 @ +ve: 0 -ve: 2
adds \rb,\rt1,#1 @ +ve: 1 -ve: 3
lsls \rb,#30 @ create +/-1 mantissa
asrs \rb,#10
subs \re,#128
lsls \re,#12
l\@_3:
.endm
.align 2
.thumb_func
__aeabi_dsub:
push {r4-r7,r14}
movs r4,#1
lsls r4,#31
eors r3,r4 @ flip sign on second argument
b da_entry @ continue in dadd
.align 2
.thumb_func
__aeabi_dadd:
push {r4-r7,r14}
da_entry:
mdunpacks r0,r1,r4,r6,r7
mdunpacks r2,r3,r5,r6,r7
subs r7,r5,r4 @ ye-xe
subs r6,r4,r5 @ xe-ye
bmi da_ygtx
@ here xe>=ye: need to shift y down r6 places
mov r12,r4 @ save exponent
cmp r6,#32
bge da_xrgty @ xe rather greater than ye?
adds r7,#32
movs r4,r2
lsls r4,r4,r7 @ rounding bit + sticky bits
da_xgty0:
movs r5,r3
lsls r5,r5,r7
lsrs r2,r6
asrs r3,r6
orrs r2,r5
da_add:
adds r0,r2
adcs r1,r3
da_pack:
@ here unnormalised signed result (possibly 0) is in r0:r1 with exponent r12, rounding + sticky bits in r4
@ Note that if a large normalisation shift is required then the arguments were close in magnitude and so we
@ cannot have not gone via the xrgty/yrgtx paths. There will therefore always be enough high bits in r4
@ to provide a correct continuation of the exact result.
@ now pack result back up
lsrs r3,r1,#31 @ get sign bit
beq 1f @ skip on positive
mvns r1,r1 @ negate mantissa
mvns r0,r0
movs r2,#0
rsbs r4,#0
adcs r0,r2
adcs r1,r2
1:
mov r2,r12 @ get exponent
lsrs r5,r1,#21
bne da_0 @ shift down required?
lsrs r5,r1,#20
bne da_1 @ normalised?
cmp r0,#0
beq da_5 @ could mantissa be zero?
da_2:
adds r4,r4
adcs r0,r0
adcs r1,r1
subs r2,#1 @ adjust exponent
lsrs r5,r1,#20
beq da_2
da_1:
lsls r4,#1 @ check rounding bit
bcc da_3
da_4:
adds r0,#1 @ round up
bcc 2f
adds r1,#1
2:
cmp r4,#0 @ sticky bits zero?
bne da_3
lsrs r0,#1 @ round to even
lsls r0,#1
da_3:
subs r2,#1
bmi da_6
adds r4,r2,#2 @ check if exponent is overflowing
lsrs r4,#11
bne da_7
lsls r2,#20 @ pack exponent and sign
add r1,r2
lsls r3,#31
add r1,r3
pop {r4-r7,r15}
da_7:
@ here exponent overflow: return signed infinity
lsls r1,r3,#31
ldr r3,=#0x7ff00000
orrs r1,r3
b 1f
da_6:
@ here exponent underflow: return signed zero
lsls r1,r3,#31
1:
movs r0,#0
pop {r4-r7,r15}
da_5:
@ here mantissa could be zero
cmp r1,#0
bne da_2
cmp r4,#0
bne da_2
@ inputs must have been of identical magnitude and opposite sign, so return +0
pop {r4-r7,r15}
da_0:
@ here a shift down by one place is required for normalisation
adds r2,#1 @ adjust exponent
lsls r6,r0,#31 @ save rounding bit
lsrs r0,#1
lsls r5,r1,#31
orrs r0,r5
lsrs r1,#1
cmp r6,#0
beq da_3
b da_4
da_xrgty: @ xe>ye and shift>=32 places
cmp r6,#60
bge da_xmgty @ xe much greater than ye?
subs r6,#32
adds r7,#64
movs r4,r2
lsls r4,r4,r7 @ these would be shifted off the bottom of the sticky bits
beq 1f
movs r4,#1
1:
lsrs r2,r2,r6
orrs r4,r2
movs r2,r3
lsls r3,r3,r7
orrs r4,r3
asrs r3,r2,#31 @ propagate sign bit
b da_xgty0
da_ygtx:
@ here ye>xe: need to shift x down r7 places
mov r12,r5 @ save exponent
cmp r7,#32
bge da_yrgtx @ ye rather greater than xe?
adds r6,#32
movs r4,r0
lsls r4,r4,r6 @ rounding bit + sticky bits
da_ygtx0:
movs r5,r1
lsls r5,r5,r6
lsrs r0,r7
asrs r1,r7
orrs r0,r5
b da_add
da_yrgtx:
cmp r7,#60
bge da_ymgtx @ ye much greater than xe?
subs r7,#32
adds r6,#64
movs r4,r0
lsls r4,r4,r6 @ these would be shifted off the bottom of the sticky bits
beq 1f
movs r4,#1
1:
lsrs r0,r0,r7
orrs r4,r0
movs r0,r1
lsls r1,r1,r6
orrs r4,r1
asrs r1,r0,#31 @ propagate sign bit
b da_ygtx0
da_ymgtx: @ result is just y
movs r0,r2
movs r1,r3
da_xmgty: @ result is just x
movs r4,#0 @ clear sticky bits
b da_pack
.ltorg
@ equivalent of UMULL
@ needs five temporary registers
@ can have rt3==rx, in which case rx trashed
@ can have rt4==ry, in which case ry trashed
@ can have rzl==rx
@ can have rzh==ry
@ can have rzl,rzh==rt3,rt4
.macro mul32_32_64 rx,ry,rzl,rzh,rt0,rt1,rt2,rt3,rt4
@ t0 t1 t2 t3 t4
@ (x) (y)
uxth \rt0,\rx @ xl
uxth \rt1,\ry @ yl
muls \rt0,\rt1 @ xlyl=L
lsrs \rt2,\rx,#16 @ xh
muls \rt1,\rt2 @ xhyl=M0
lsrs \rt4,\ry,#16 @ yh
muls \rt2,\rt4 @ xhyh=H
uxth \rt3,\rx @ xl
muls \rt3,\rt4 @ xlyh=M1
adds \rt1,\rt3 @ M0+M1=M
bcc l\@_1 @ addition of the two cross terms can overflow, so add carry into H
movs \rt3,#1 @ 1
lsls \rt3,#16 @ 0x10000
adds \rt2,\rt3 @ H'
l\@_1:
@ t0 t1 t2 t3 t4
@ (zl) (zh)
lsls \rzl,\rt1,#16 @ ML
lsrs \rzh,\rt1,#16 @ MH
adds \rzl,\rt0 @ ZL
adcs \rzh,\rt2 @ ZH
.endm
@ SUMULL: x signed, y unsigned
@ in table below ¯ means signed variable
@ needs five temporary registers
@ can have rt3==rx, in which case rx trashed
@ can have rt4==ry, in which case ry trashed
@ can have rzl==rx
@ can have rzh==ry
@ can have rzl,rzh==rt3,rt4
.macro muls32_32_64 rx,ry,rzl,rzh,rt0,rt1,rt2,rt3,rt4
@ t0 t1 t2 t3 t4
@ ¯(x) (y)
uxth \rt0,\rx @ xl
uxth \rt1,\ry @ yl
muls \rt0,\rt1 @ xlyl=L
asrs \rt2,\rx,#16 @ ¯xh
muls \rt1,\rt2 @ ¯xhyl=M0
lsrs \rt4,\ry,#16 @ yh
muls \rt2,\rt4 @ ¯xhyh=H
uxth \rt3,\rx @ xl
muls \rt3,\rt4 @ xlyh=M1
asrs \rt4,\rt1,#31 @ M0sx (M1 sign extension is zero)
adds \rt1,\rt3 @ M0+M1=M
movs \rt3,#0 @ 0
adcs \rt4,\rt3 @ ¯Msx
lsls \rt4,#16 @ ¯Msx<<16
adds \rt2,\rt4 @ H'
@ t0 t1 t2 t3 t4
@ (zl) (zh)
lsls \rzl,\rt1,#16 @ M~
lsrs \rzh,\rt1,#16 @ M~
adds \rzl,\rt0 @ ZL
adcs \rzh,\rt2 @ ¯ZH
.endm
@ SSMULL: x signed, y signed
@ in table below ¯ means signed variable
@ needs five temporary registers
@ can have rt3==rx, in which case rx trashed
@ can have rt4==ry, in which case ry trashed
@ can have rzl==rx
@ can have rzh==ry
@ can have rzl,rzh==rt3,rt4
.macro muls32_s32_64 rx,ry,rzl,rzh,rt0,rt1,rt2,rt3,rt4
@ t0 t1 t2 t3 t4
@ ¯(x) (y)
uxth \rt0,\rx @ xl
uxth \rt1,\ry @ yl
muls \rt0,\rt1 @ xlyl=L
asrs \rt2,\rx,#16 @ ¯xh
muls \rt1,\rt2 @ ¯xhyl=M0
asrs \rt4,\ry,#16 @ ¯yh
muls \rt2,\rt4 @ ¯xhyh=H
uxth \rt3,\rx @ xl
muls \rt3,\rt4 @ ¯xlyh=M1
adds \rt1,\rt3 @ ¯M0+M1=M
asrs \rt3,\rt1,#31 @ Msx
bvc l\@_1 @
mvns \rt3,\rt3 @ ¯Msx flip sign extension bits if overflow
l\@_1:
lsls \rt3,#16 @ ¯Msx<<16
adds \rt2,\rt3 @ H'
@ t0 t1 t2 t3 t4
@ (zl) (zh)
lsls \rzl,\rt1,#16 @ M~
lsrs \rzh,\rt1,#16 @ M~
adds \rzl,\rt0 @ ZL
adcs \rzh,\rt2 @ ¯ZH
.endm
@ can have rt2==rx, in which case rx trashed
@ can have rzl==rx
@ can have rzh==rt1
.macro square32_64 rx,rzl,rzh,rt0,rt1,rt2
@ t0 t1 t2 zl zh
uxth \rt0,\rx @ xl
muls \rt0,\rt0 @ xlxl=L
uxth \rt1,\rx @ xl
lsrs \rt2,\rx,#16 @ xh
muls \rt1,\rt2 @ xlxh=M
muls \rt2,\rt2 @ xhxh=H
lsls \rzl,\rt1,#17 @ ML
lsrs \rzh,\rt1,#15 @ MH
adds \rzl,\rt0 @ ZL
adcs \rzh,\rt2 @ ZH
.endm
.align 2
.thumb_func
__aeabi_dmul:
push {r4-r7,r14}
mdunpack r0,r1,r4,r6,r5
mov r12,r4
mdunpack r2,r3,r4,r7,r5
eors r7,r6 @ sign of result
add r4,r12 @ exponent of result
push {r0-r2,r4,r7}
@ accumulate full product in r12:r5:r6:r7
mul32_32_64 r0,r2, r0,r5, r4,r6,r7,r0,r5 @ XL*YL
mov r12,r0 @ save LL bits
mul32_32_64 r1,r3, r6,r7, r0,r2,r4,r6,r7 @ XH*YH
pop {r0} @ XL
mul32_32_64 r0,r3, r0,r3, r1,r2,r4,r0,r3 @ XL*YH
adds r5,r0
adcs r6,r3
movs r0,#0
adcs r7,r0
pop {r1,r2} @ XH,YL
mul32_32_64 r1,r2, r1,r2, r0,r3,r4, r1,r2 @ XH*YL
adds r5,r1
adcs r6,r2
movs r0,#0
adcs r7,r0
@ here r5:r6:r7 holds the product [1..4) in Q(104-32)=Q72, with extra LSBs in r12
pop {r3,r4} @ exponent in r3, sign in r4
lsls r1,r7,#11
lsrs r2,r6,#21
orrs r1,r2
lsls r0,r6,#11
lsrs r2,r5,#21
orrs r0,r2
lsls r5,#11 @ now r5:r0:r1 Q83=Q(51+32), extra LSBs in r12
lsrs r2,r1,#20
bne 1f @ skip if in range [2..4)
adds r5,r5 @ shift up so always [2..4) Q83, i.e. [1..2) Q84=Q(52+32)
adcs r0,r0
adcs r1,r1
subs r3,#1 @ correct exponent
1:
ldr r6,=#0x3ff
subs r3,r6 @ correct for exponent bias
lsls r6,#1 @ 0x7fe
cmp r3,r6
bhs dm_0 @ exponent over- or underflow
lsls r5,#1 @ rounding bit to carry
bcc 1f @ result is correctly rounded
adds r0,#1
movs r6,#0
adcs r1,r6 @ round up
mov r6,r12 @ remaining sticky bits
orrs r5,r6
bne 1f @ some sticky bits set?
lsrs r0,#1
lsls r0,#1 @ round to even
1:
lsls r3,#20
adds r1,r3
dm_2:
lsls r4,#31
add r1,r4
pop {r4-r7,r15}
@ here for exponent over- or underflow
dm_0:
bge dm_1 @ overflow?
adds r3,#1 @ would-be zero exponent?
bne 1f
adds r0,#1
bne 1f @ all-ones mantissa?
adds r1,#1
lsrs r7,r1,#21
beq 1f
lsrs r1,#1
b dm_2
1:
lsls r1,r4,#31
movs r0,#0
pop {r4-r7,r15}
@ here for exponent overflow
dm_1:
adds r6,#1 @ 0x7ff
lsls r1,r6,#20
movs r0,#0
b dm_2
.ltorg
@ Approach to division y/x is as follows.
@
@ First generate u1, an approximation to 1/x to about 29 bits. Multiply this by the top
@ 32 bits of y to generate a0, a first approximation to the result (good to 28 bits or so).
@ Calculate the exact remainder r0=y-a0*x, which will be about 0. Calculate a correction
@ d0=r0*u1, and then write a1=a0+d0. If near a rounding boundary, compute the exact
@ remainder r1=y-a1*x (which can be done using r0 as a basis) to determine whether to
@ round up or down.
@
@ The calculation of 1/x is as given in dreciptest.c. That code verifies exhaustively
@ that | u1*x-1 | < 10*2^-32.
@
@ More precisely:
@
@ x0=(q16)x;
@ x1=(q30)x;
@ y0=(q31)y;
@ u0=(q15~)"(0xffffffffU/(unsigned int)roundq(x/x_ulp))/powq(2,16)"(x0); // q15 approximation to 1/x; "~" denotes rounding rather than truncation
@ v=(q30)(u0*x1-1);
@ u1=(q30)u0-(q30~)(u0*v);
@
@ a0=(q30)(u1*y0);
@ r0=(q82)y-a0*x;
@ r0x=(q57)r0;
@ d0=r0x*u1;
@ a1=d0+a0;
@
@ Error analysis
@
@ Use Greek letters to represent the errors introduced by rounding and truncation.
@
@ r₀ = y - a₀x
@ = y - [ u₁ ( y - α ) - β ] x where 0 ≤ α < 2^-31, 0 ≤ β < 2^-30
@ = y ( 1 - u₁x ) + ( u₁α + β ) x
@
@ Hence
@
@ | r₀ / x | < 2 * 10*2^-32 + 2^-31 + 2^-30
@ = 26*2^-32
@
@ r₁ = y - a₁x
@ = y - a₀x - d₀x
@ = r₀ - d₀x
@ = r₀ - u₁ ( r₀ - γ ) x where 0 ≤ γ < 2^-57
@ = r₀ ( 1 - u₁x ) + u₁γx
@
@ Hence
@
@ | r₁ / x | < 26*2^-32 * 10*2^-32 + 2^-57
@ = (260+128)*2^-64
@ < 2^-55
@
@ Empirically it seems to be nearly twice as good as this.
@
@ To determine correctly whether the exact remainder calculation can be skipped we need a result
@ accurate to < 0.25ulp. In the case where x>y the quotient will be shifted up one place for normalisation
@ and so 1ulp is 2^-53 and so the calculation above suffices.
.align 2
.thumb_func
__aeabi_ddiv:
push {r4-r7,r14}
ddiv0: @ entry point from dtan
mdunpack r2,r3,r4,r7,r6 @ unpack divisor
@ unpack dividend by hand to save on register use
lsrs r6,r1,#31
adds r6,r7
mov r12,r6 @ result sign in r12b0; r12b1 trashed
lsls r1,#1
lsrs r7,r1,#21 @ exponent
beq 1f @ zero exponent?
adds r6,r7,#1
lsrs r6,#11
beq 2f @ exponent != 0x7ff? then done
1:
movs r0,#0
movs r1,#0
subs r7,#64 @ less drastic fiddling of exponents to get 0/0, Inf/Inf correct
lsls r7,#12
2:
subs r6,r7,r4
lsls r6,#2
add r12,r12,r6 @ (signed) exponent in r12[31..8]
subs r7,#1 @ implied 1
lsls r7,#21
subs r1,r7
lsrs r1,#1
// see dreciptest-boxc.c
lsrs r4,r3,#15 @ x2=x>>15; // Q5 32..63
ldr r5,=#(rcpapp-32)
ldrb r4,[r5,r4] @ u=lut5[x2-32]; // Q8
lsls r5,r3,#8
muls r5,r5,r4
asrs r5,#14 @ e=(i32)(u*(x<<8))>>14; // Q22
asrs r6,r5,#11
muls r6,r6,r6 @ e2=(e>>11)*(e>>11); // Q22
subs r5,r6
muls r5,r5,r4 @ c=(e-e2)*u; // Q30
lsls r6,r4,#7
asrs r5,#14
adds r5,#1
asrs r5,#1
subs r6,r5 @ u0=(u<<7)-((c+0x4000)>>15); // Q15
@ here
@ r0:r1 y mantissa
@ r2:r3 x mantissa
@ r6 u0, first approximation to 1/x Q15
@ r12: result sign, exponent
lsls r4,r3,#10
lsrs r5,r2,#22
orrs r5,r4 @ x1=(q30)x
muls r5,r6 @ u0*x1 Q45
asrs r5,#15 @ v=u0*x1-1 Q30
muls r5,r6 @ u0*v Q45
asrs r5,#14
adds r5,#1
asrs r5,#1 @ round u0*v to Q30
lsls r6,#15
subs r6,r5 @ u1 Q30
@ here
@ r0:r1 y mantissa
@ r2:r3 x mantissa
@ r6 u1, second approximation to 1/x Q30
@ r12: result sign, exponent
push {r2,r3}
lsls r4,r1,#11
lsrs r5,r0,#21
orrs r4,r5 @ y0=(q31)y
mul32_32_64 r4,r6, r4,r5, r2,r3,r7,r4,r5 @ y0*u1 Q61
adds r4,r4
adcs r5,r5 @ a0=(q30)(y0*u1)
@ here
@ r0:r1 y mantissa
@ r5 a0, first approximation to y/x Q30
@ r6 u1, second approximation to 1/x Q30
@ r12 result sign, exponent
ldr r2,[r13,#0] @ xL
mul32_32_64 r2,r5, r2,r3, r1,r4,r7,r2,r3 @ xL*a0
ldr r4,[r13,#4] @ xH
muls r4,r5 @ xH*a0
adds r3,r4 @ r2:r3 now x*a0 Q82
lsrs r2,#25
lsls r1,r3,#7
orrs r2,r1 @ r2 now x*a0 Q57; r7:r2 is x*a0 Q89
lsls r4,r0,#5 @ y Q57
subs r0,r4,r2 @ r0x=y-x*a0 Q57 (signed)
@ here
@ r0 r0x Q57
@ r5 a0, first approximation to y/x Q30
@ r4 yL Q57
@ r6 u1 Q30
@ r12 result sign, exponent
muls32_32_64 r0,r6, r7,r6, r1,r2,r3, r7,r6 @ r7:r6 r0x*u1 Q87
asrs r3,r6,#25
adds r5,r3
lsls r3,r6,#7 @ r3:r5 a1 Q62 (but bottom 7 bits are zero so 55 bits of precision after binary point)
@ here we could recover another 7 bits of precision (but not accuracy) from the top of r7
@ but these bits are thrown away in the rounding and conversion to Q52 below
@ here
@ r3:r5 a1 Q62 candidate quotient [0.5,2) or so
@ r4 yL Q57
@ r12 result sign, exponent
movs r6,#0
adds r3,#128 @ for initial rounding to Q53
adcs r5,r5,r6
lsrs r1,r5,#30
bne dd_0
@ here candidate quotient a1 is in range [0.5,1)
@ so 30 significant bits in r5
lsls r4,#1 @ y now Q58
lsrs r1,r5,#9 @ to Q52
lsls r0,r5,#23
lsrs r3,#9 @ 0.5ulp-significance bit in carry: if this is 1 we may need to correct result
orrs r0,r3
bcs dd_1
b dd_2
dd_0:
@ here candidate quotient a1 is in range [1,2)
@ so 31 significant bits in r5
movs r2,#4
add r12,r12,r2 @ fix exponent; r3:r5 now effectively Q61
adds r3,#128 @ complete rounding to Q53
adcs r5,r5,r6
lsrs r1,r5,#10
lsls r0,r5,#22
lsrs r3,#10 @ 0.5ulp-significance bit in carry: if this is 1 we may need to correct result
orrs r0,r3
bcc dd_2
dd_1:
@ here
@ r0:r1 rounded result Q53 [0.5,1) or Q52 [1,2), but may not be correctly rounded-to-nearest
@ r4 yL Q58 or Q57
@ r12 result sign, exponent
@ carry set
adcs r0,r0,r0
adcs r1,r1,r1 @ z Q53 with 1 in LSB
lsls r4,#16 @ Q105-32=Q73
ldr r2,[r13,#0] @ xL Q52
ldr r3,[r13,#4] @ xH Q20
movs r5,r1 @ zH Q21
muls r5,r2 @ zH*xL Q73
subs r4,r5
muls r3,r0 @ zL*xH Q73
subs r4,r3
mul32_32_64 r2,r0, r2,r3, r5,r6,r7,r2,r3 @ xL*zL
rsbs r2,#0 @ borrow from low half?
sbcs r4,r3 @ y-xz Q73 (remainder bits 52..73)
cmp r4,#0
bmi 1f
movs r2,#0 @ round up
adds r0,#1
adcs r1,r2
1:
lsrs r0,#1 @ shift back down to Q52
lsls r2,r1,#31
orrs r0,r2
lsrs r1,#1
dd_2:
add r13,#8
mov r2,r12
lsls r7,r2,#31 @ result sign
asrs r2,#2 @ result exponent
ldr r3,=#0x3fd
adds r2,r3
ldr r3,=#0x7fe
cmp r2,r3
bhs dd_3 @ over- or underflow?
lsls r2,#20
adds r1,r2 @ pack exponent
dd_5:
adds r1,r7 @ pack sign
pop {r4-r7,r15}
dd_3:
movs r0,#0
cmp r2,#0
bgt dd_4 @ overflow?
movs r1,r7
pop {r4-r7,r15}
dd_4:
adds r3,#1 @ 0x7ff
lsls r1,r3,#20
b dd_5
/*
Approach to square root x=sqrt(y) is as follows.
First generate a3, an approximation to 1/sqrt(y) to about 30 bits. Multiply this by y
to give a4~sqrt(y) to about 28 bits and a remainder r4=y-a4^2. Then, because
d sqrt(y) / dy = 1 / (2 sqrt(y)) let d4=r4*a3/2 and then the value a5=a4+d4 is
a better approximation to sqrt(y). If this is near a rounding boundary we
compute an exact remainder y-a5*a5 to decide whether to round up or down.
The calculation of a3 and a4 is as given in dsqrttest.c. That code verifies exhaustively
that | 1 - a3a4 | < 10*2^-32, | r4 | < 40*2^-32 and | r4/y | < 20*2^-32.
More precisely, with "y" representing y truncated to 30 binary places:
u=(q3)y; // 24-entry table
a0=(q8~)"1/sqrtq(x+x_ulp/2)"(u); // first approximation from table
p0=(q16)(a0*a0) * (q16)y;
r0=(q20)(p0-1);
dy0=(q15)(r0*a0); // Newton-Raphson correction term
a1=(q16)a0-dy0/2; // good to ~9 bits
p1=(q19)(a1*a1)*(q19)y;
r1=(q23)(p1-1);
dy1=(q15~)(r1*a1); // second Newton-Raphson correction
a2x=(q16)a1-dy1/2; // good to ~16 bits
a2=a2x-a2x/1t16; // prevent overflow of a2*a2 in 32 bits
p2=(a2*a2)*(q30)y; // Q62
r2=(q36)(p2-1+1t-31);
dy2=(q30)(r2*a2); // Q52->Q30
a3=(q31)a2-dy2/2; // good to about 30 bits
a4=(q30)(a3*(q30)y+1t-31); // good to about 28 bits
Error analysis
r₄ = y - a₄²
d₄ = 1/2 a₃r₄
a₅ = a₄ + d₄
r₅ = y - a₅²
= y - ( a₄ + d₄ )²
= y - a₄² - a₃a₄r₄ - 1/4 a₃²r₄²
= r₄ - a₃a₄r₄ - 1/4 a₃²r₄²
| r₅ | < | r₄ | | 1 - a₃a₄ | + 1/4 r₄²
a₅ = √y √( 1 - r₅/y )
= √y ( 1 - 1/2 r₅/y + ... )
So to first order (second order being very tiny)
√y - a₅ = 1/2 r₅/y
and
| √y - a₅ | < 1/2 ( | r₄/y | | 1 - a₃a₄ | + 1/4 r₄²/y )
From dsqrttest.c (conservatively):
< 1/2 ( 20*2^-32 * 10*2^-32 + 1/4 * 40*2^-32*20*2^-32 )
= 1/2 ( 200 + 200 ) * 2^-64
< 2^-56
Empirically we see about 1ulp worst-case error including rounding at Q57.
To determine correctly whether the exact remainder calculation can be skipped we need a result
accurate to < 0.25ulp at Q52, or 2^-54.
*/
dq_2:
bge dq_3 @ +Inf?
movs r1,#0
b dq_4
dq_0:
lsrs r1,#31
lsls r1,#31 @ preserve sign bit
lsrs r2,#21 @ extract exponent
beq dq_4 @ -0? return it
asrs r1,#11 @ make -Inf
b dq_4
dq_3:
ldr r1,=#0x7ff
lsls r1,#20 @ return +Inf
dq_4:
movs r0,#0
dq_1:
bx r14
.align 2
.thumb_func
_dsqrt:
lsls r2,r1,#1
bcs dq_0 @ negative?
lsrs r2,#21 @ extract exponent
subs r2,#1
ldr r3,=#0x7fe
cmp r2,r3
bhs dq_2 @ catches 0 and +Inf
push {r4-r7,r14}
lsls r4,r2,#20
subs r1,r4 @ insert implied 1
lsrs r2,#1
bcc 1f @ even exponent? skip
adds r0,r0,r0 @ odd exponent: shift up mantissa
adcs r1,r1,r1
1:
lsrs r3,#2
adds r2,r3
lsls r2,#20
mov r12,r2 @ save result exponent
@ here
@ r0:r1 y mantissa Q52 [1,4)
@ r12 result exponent
adr r4,drsqrtapp-8 @ first eight table entries are never accessed because of the mantissa's leading 1
lsrs r2,r1,#17 @ y Q3
ldrb r2,[r4,r2] @ initial approximation to reciprocal square root a0 Q8
lsrs r3,r1,#4 @ first Newton-Raphson iteration
muls r3,r2
muls r3,r2 @ i32 p0=a0*a0*(y>>14); // Q32
asrs r3,r3,#12 @ i32 r0=p0>>12; // Q20
muls r3,r2
asrs r3,#13 @ i32 dy0=(r0*a0)>>13; // Q15
lsls r2,#8
subs r2,r3 @ i32 a1=(a0<<8)-dy0; // Q16
movs r3,r2
muls r3,r3
lsrs r3,#13
lsrs r4,r1,#1
muls r3,r4 @ i32 p1=((a1*a1)>>11)*(y>>11); // Q19*Q19=Q38
asrs r3,#15 @ i32 r1=p1>>15; // Q23
muls r3,r2
asrs r3,#23
adds r3,#1
asrs r3,#1 @ i32 dy1=(r1*a1+(1<<23))>>24; // Q23*Q16=Q39; Q15
subs r2,r3 @ i32 a2=a1-dy1; // Q16
lsrs r3,r2,#16
subs r2,r3 @ if(a2>=0x10000) a2=0xffff; to prevent overflow of a2*a2
@ here
@ r0:r1 y mantissa
@ r2 a2 ~ 1/sqrt(y) Q16
@ r12 result exponent
movs r3,r2
muls r3,r3
lsls r1,#10
lsrs r4,r0,#22
orrs r1,r4 @ y Q30
mul32_32_64 r1,r3, r4,r3, r5,r6,r7,r4,r3 @ i64 p2=(ui64)(a2*a2)*(ui64)y; // Q62 r4:r3
lsls r5,r3,#6
lsrs r4,#26
orrs r4,r5
adds r4,#0x20 @ i32 r2=(p2>>26)+0x20; // Q36 r4
uxth r5,r4
muls r5,r2
asrs r4,#16
muls r4,r2
lsrs r5,#16
adds r4,r5
asrs r4,#6 @ i32 dy2=((i64)r2*(i64)a2)>>22; // Q36*Q16=Q52; Q30
lsls r2,#15
subs r2,r4
@ here
@ r0 y low bits
@ r1 y Q30
@ r2 a3 ~ 1/sqrt(y) Q31
@ r12 result exponent
mul32_32_64 r2,r1, r3,r4, r5,r6,r7,r3,r4
adds r3,r3,r3
adcs r4,r4,r4
adds r3,r3,r3
movs r3,#0
adcs r3,r4 @ ui32 a4=((ui64)a3*(ui64)y+(1U<<31))>>31; // Q30
@ here
@ r0 y low bits
@ r1 y Q30
@ r2 a3 Q31 ~ 1/sqrt(y)
@ r3 a4 Q30 ~ sqrt(y)
@ r12 result exponent
square32_64 r3, r4,r5, r6,r5,r7
lsls r6,r0,#8
lsrs r7,r1,#2
subs r6,r4
sbcs r7,r5 @ r4=(q60)y-a4*a4
@ by exhaustive testing, r4 = fffffffc0e134fdc .. 00000003c2bf539c Q60
lsls r5,r7,#29
lsrs r6,#3
adcs r6,r5 @ r4 Q57 with rounding
muls32_32_64 r6,r2, r6,r2, r4,r5,r7,r6,r2 @ d4=a3*r4/2 Q89
@ r4+d4 is correct to 1ULP at Q57, tested on ~9bn cases including all extreme values of r4 for each possible y Q30
adds r2,#8
asrs r2,#5 @ d4 Q52, rounded to Q53 with spare bit in carry
@ here
@ r0 y low bits
@ r1 y Q30
@ r2 d4 Q52, rounded to Q53
@ C flag contains d4_b53
@ r3 a4 Q30
bcs dq_5
lsrs r5,r3,#10 @ a4 Q52
lsls r4,r3,#22
asrs r1,r2,#31
adds r0,r2,r4
adcs r1,r5 @ a4+d4
add r1,r12 @ pack exponent
pop {r4-r7,r15}
.ltorg
@ round(sqrt(2^22./[68:8:252]))
drsqrtapp:
.byte 0xf8,0xeb,0xdf,0xd6,0xcd,0xc5,0xbe,0xb8
.byte 0xb2,0xad,0xa8,0xa4,0xa0,0x9c,0x99,0x95
.byte 0x92,0x8f,0x8d,0x8a,0x88,0x85,0x83,0x81
dq_5:
@ here we are near a rounding boundary, C is set
adcs r2,r2,r2 @ d4 Q53+1ulp
lsrs r5,r3,#9
lsls r4,r3,#23 @ r4:r5 a4 Q53
asrs r1,r2,#31
adds r4,r2,r4
adcs r5,r1 @ r4:r5 a5=a4+d4 Q53+1ulp
movs r3,r5
muls r3,r4
square32_64 r4,r1,r2,r6,r2,r7
adds r2,r3
adds r2,r3 @ r1:r2 a5^2 Q106
lsls r0,#22 @ y Q84
rsbs r1,#0
sbcs r0,r2 @ remainder y-a5^2
bmi 1f @ y<a5^2: no need to increment a5
movs r3,#0
adds r4,#1
adcs r5,r3 @ bump a5 if over rounding boundary
1:
lsrs r0,r4,#1
lsrs r1,r5,#1
lsls r5,#31
orrs r0,r5
add r1,r12
pop {r4-r7,r15}
@ compare r0:r1 against r2:r3, returning -1/0/1 for <, =, >
@ also set flags accordingly
.thumb_func
qfp_dcmp:
push {r4,r6,r7,r14}
ldr r7,=#0x7ff @ flush NaNs and denormals
lsls r4,r1,#1
lsrs r4,#21
beq 1f
cmp r4,r7
bne 2f
1:
movs r0,#0
lsrs r1,#20
lsls r1,#20
2:
lsls r4,r3,#1
lsrs r4,#21
beq 1f
cmp r4,r7
bne 2f
1:
movs r2,#0
lsrs r3,#20
lsls r3,#20
2:
dcmp_fast_entry:
movs r6,#1
eors r3,r1
bmi 4f @ opposite signs? then can proceed on basis of sign of x
eors r3,r1 @ restore r3
bpl 1f
rsbs r6,#0 @ negative? flip comparison
1:
cmp r1,r3
bne 1f
cmp r0,r2
bhi 2f
blo 3f
5:
movs r6,#0 @ equal? result is 0
1:
bgt 2f
3:
rsbs r6,#0
2:
subs r0,r6,#0 @ copy and set flags
pop {r4,r6,r7,r15}
4:
orrs r3,r1 @ make -0==+0
adds r3,r3
orrs r3,r0
orrs r3,r2
beq 5b
cmp r1,#0
bge 2b
b 3b
@ "scientific" functions start here
.thumb_func
push_r8_r11:
mov r4,r8
mov r5,r9
mov r6,r10
mov r7,r11
push {r4-r7}
bx r14
.thumb_func
pop_r8_r11:
pop {r4-r7}
mov r8,r4
mov r9,r5
mov r10,r6
mov r11,r7
bx r14
@ double-length CORDIC rotation step
@ r0:r1 ω
@ r6 32-i (complementary shift)
@ r7 i (shift)
@ r8:r9 x
@ r10:r11 y
@ r12 coefficient pointer
@ an option in rotation mode would be to compute the sequence of σ values
@ in one pass, rotate the initial vector by the residual ω and then run a
@ second pass to compute the final x and y. This would relieve pressure
@ on registers and hence possibly be faster. The same trick does not work
@ in vectoring mode (but perhaps one could work to single precision in
@ a first pass and then double precision in a second pass?).
.thumb_func
dcordic_vec_step:
mov r2,r12
ldmia r2!,{r3,r4}
mov r12,r2
mov r2,r11
cmp r2,#0
blt 1f
b 2f
.thumb_func
dcordic_rot_step:
mov r2,r12
ldmia r2!,{r3,r4}
mov r12,r2
cmp r1,#0
bge 1f
2:
@ ω<0 / y>=0
@ ω+=dω
@ x+=y>>i, y-=x>>i
adds r0,r3
adcs r1,r4
mov r3,r11
asrs r3,r7
mov r4,r11
lsls r4,r6
mov r2,r10
lsrs r2,r7
orrs r2,r4 @ r2:r3 y>>i, rounding in carry
mov r4,r8
mov r5,r9 @ r4:r5 x
adcs r2,r4
adcs r3,r5 @ r2:r3 x+(y>>i)
mov r8,r2
mov r9,r3
mov r3,r5
lsls r3,r6
asrs r5,r7
lsrs r4,r7
orrs r4,r3 @ r4:r5 x>>i, rounding in carry
mov r2,r10
mov r3,r11
sbcs r2,r4
sbcs r3,r5 @ r2:r3 y-(x>>i)
mov r10,r2
mov r11,r3
bx r14
@ ω>0 / y<0
@ ω-=dω
@ x-=y>>i, y+=x>>i
1:
subs r0,r3
sbcs r1,r4
mov r3,r9
asrs r3,r7
mov r4,r9
lsls r4,r6
mov r2,r8
lsrs r2,r7
orrs r2,r4 @ r2:r3 x>>i, rounding in carry
mov r4,r10
mov r5,r11 @ r4:r5 y
adcs r2,r4
adcs r3,r5 @ r2:r3 y+(x>>i)
mov r10,r2
mov r11,r3
mov r3,r5
lsls r3,r6
asrs r5,r7
lsrs r4,r7
orrs r4,r3 @ r4:r5 y>>i, rounding in carry
mov r2,r8
mov r3,r9
sbcs r2,r4
sbcs r3,r5 @ r2:r3 x-(y>>i)
mov r8,r2
mov r9,r3
bx r14
ret_dzero:
movs r0,#0
movs r1,#0
bx r14
@ convert packed double in r0:r1 to signed/unsigned 32/64-bit integer/fixed-point value in r0:r1 [with r2 places after point], with rounding towards -Inf
@ fixed-point versions only work with reasonable values in r2 because of the way dunpacks works
.thumb_func
_dfix:
movs r2,#0 @ and fall through
.thumb_func
qfp_double2fix:
push {r14}
adds r2,#32
bl qfp_double2fix64
movs r0,r1
pop {r15}
.thumb_func
_dfixu:
movs r2,#0 @ and fall through
.thumb_func
qfp_double2ufix:
push {r14}
adds r2,#32
bl qfp_double2ufix64
movs r0,r1
pop {r15}
.thumb_func
_ll_sfrom_f:
movs r1,#0 @ and fall through
.thumb_func
qfp_float2fix64:
push {r14}
bl f2fix
b d2f64_a
.thumb_func
_ll_ufrom_f:
movs r1,#0 @ and fall through
.thumb_func
qfp_float2ufix64:
asrs r3,r0,#23 @ negative? return 0
bmi ret_dzero
@ and fall through
@ convert float in r0 to signed fixed point in r0:r1:r3, r1 places after point, rounding towards -Inf
@ result clamped so that r3 can only be 0 or -1
@ trashes r12
.thumb_func
f2fix:
push {r4,r14}
mov r12,r1
asrs r3,r0,#31
lsls r0,#1
lsrs r2,r0,#24
beq 1f @ zero?
cmp r2,#0xff @ Inf?
beq 2f
subs r1,r2,#1
subs r2,#0x7f @ remove exponent bias
lsls r1,#24
subs r0,r1 @ insert implied 1
eors r0,r3
subs r0,r3 @ top two's complement
asrs r1,r0,#4 @ convert to double format
lsls r0,#28
b d2fix_a
1:
movs r0,#0
movs r1,r0
movs r3,r0
pop {r4,r15}
2:
mvns r0,r3 @ return max/min value
mvns r1,r3
pop {r4,r15}
.thumb_func
_ll_sfrom_d:
movs r2,#0 @ and fall through
.thumb_func
qfp_double2fix64:
push {r14}
bl d2fix
d2f64_a:
asrs r2,r1,#31
cmp r2,r3
bne 1f @ sign extension bits fail to match sign of result?
pop {r15}
1:
mvns r0,r3
movs r1,#1
lsls r1,#31
eors r1,r1,r0 @ generate extreme fixed-point values
pop {r15}
.thumb_func
_ll_ufrom_d:
movs r2,#0 @ and fall through
.thumb_func
qfp_double2ufix64:
asrs r3,r1,#20 @ negative? return 0
bmi ret_dzero
@ and fall through
@ convert double in r0:r1 to signed fixed point in r0:r1:r3, r2 places after point, rounding towards -Inf
@ result clamped so that r3 can only be 0 or -1
@ trashes r12
.thumb_func
d2fix:
push {r4,r14}
mov r12,r2
bl dunpacks
asrs r4,r2,#16
adds r4,#1
bge 1f
movs r1,#0 @ -0 -> +0
1:
asrs r3,r1,#31
d2fix_a:
@ here
@ r0:r1 two's complement mantissa
@ r2 unbaised exponent
@ r3 mantissa sign extension bits
add r2,r12 @ exponent plus offset for required binary point position
subs r2,#52 @ required shift
bmi 1f @ shift down?
@ here a shift up by r2 places
cmp r2,#12 @ will clamp?
bge 2f
movs r4,r0
lsls r1,r2
lsls r0,r2
rsbs r2,#0
adds r2,#32 @ complementary shift
lsrs r4,r2
orrs r1,r4
pop {r4,r15}
2:
mvns r0,r3
mvns r1,r3 @ overflow: clamp to extreme fixed-point values
pop {r4,r15}
1:
@ here a shift down by -r2 places
adds r2,#32
bmi 1f @ long shift?
mov r4,r1
lsls r4,r2
rsbs r2,#0
adds r2,#32 @ complementary shift
asrs r1,r2
lsrs r0,r2
orrs r0,r4
pop {r4,r15}
1:
@ here a long shift down
movs r0,r1
asrs r1,#31 @ shift down 32 places
adds r2,#32
bmi 1f @ very long shift?
rsbs r2,#0
adds r2,#32
asrs r0,r2
pop {r4,r15}
1:
movs r0,r3 @ result very near zero: use sign extension bits
movs r1,r3
pop {r4,r15}
@ float <-> double conversions
.thumb_func
__aeabi_f2d:
lsrs r3,r0,#31 @ sign bit
lsls r3,#31
lsls r1,r0,#1
lsrs r2,r1,#24 @ exponent
beq 1f @ zero?
cmp r2,#0xff @ Inf?
beq 2f
lsrs r1,#4 @ exponent and top 20 bits of mantissa
ldr r2,=#(0x3ff-0x7f)<<20 @ difference in exponent offsets
adds r1,r2
orrs r1,r3
lsls r0,#29 @ bottom 3 bits of mantissa
bx r14
1:
movs r1,r3 @ return signed zero
3:
movs r0,#0
bx r14
2:
ldr r1,=#0x7ff00000 @ return signed infinity
adds r1,r3
b 3b
.thumb_func
__aeabi_d2f:
lsls r2,r1,#1
lsrs r2,#21 @ exponent
ldr r3,=#0x3ff-0x7f
subs r2,r3 @ fix exponent bias
ble 1f @ underflow or zero
cmp r2,#0xff
bge 2f @ overflow or infinity
lsls r2,#23 @ position exponent of result
lsrs r3,r1,#31
lsls r3,#31
orrs r2,r3 @ insert sign
lsls r3,r0,#3 @ rounding bits
lsrs r0,#29
lsls r1,#12
lsrs r1,#9
orrs r0,r1 @ assemble mantissa
orrs r0,r2 @ insert exponent and sign
lsls r3,#1
bcc 3f @ no rounding
beq 4f @ all sticky bits 0?
5:
adds r0,#1
3:
bx r14
4:
lsrs r3,r0,#1 @ odd? then round up
bcs 5b
bx r14
1:
beq 6f @ check case where value is just less than smallest normal
7:
lsrs r0,r1,#31
lsls r0,#31
bx r14
6:
lsls r2,r1,#12 @ 20 1:s at top of mantissa?
asrs r2,#12
adds r2,#1
bne 7b
lsrs r2,r0,#29 @ and 3 more 1:s?
cmp r2,#7
bne 7b
movs r2,#1 @ return smallest normal with correct sign
b 8f
2:
movs r2,#0xff
8:
lsrs r0,r1,#31 @ return signed infinity
lsls r0,#8
adds r0,r2
lsls r0,#23
bx r14
@ convert signed/unsigned 32/64-bit integer/fixed-point value in r0:r1 [with r2 places after point] to packed double in r0:r1, with rounding
.thumb_func
_dfltu:
movs r1,#0 @ and fall through
.thumb_func
qfp_ufix2double:
movs r2,r1
movs r1,#0
b qfp_ufix642double
.thumb_func
_dflt:
movs r1,#0 @ and fall through
.thumb_func
qfp_fix2double:
movs r2,r1
asrs r1,r0,#31 @ sign extend
b qfp_fix642double
.thumb_func
_ll_uto_d:
movs r2,#0 @ and fall through
.thumb_func
qfp_ufix642double:
movs r3,#0
b uf2d
.thumb_func
_ll_sto_d:
movs r2,#0 @ and fall through
.thumb_func
qfp_fix642double:
asrs r3,r1,#31 @ sign bit across all bits
eors r0,r3
eors r1,r3
subs r0,r3
sbcs r1,r3
uf2d:
push {r4,r5,r14}
ldr r4,=#0x432
subs r2,r4,r2 @ form biased exponent
@ here
@ r0:r1 unnormalised mantissa
@ r2 -Q (will become exponent)
@ r3 sign across all bits
cmp r1,#0
bne 1f @ short normalising shift?
movs r1,r0
beq 2f @ zero? return it
movs r0,#0
subs r2,#32 @ fix exponent
1:
asrs r4,r1,#21
bne 3f @ will need shift down (and rounding?)
bcs 4f @ normalised already?
5:
subs r2,#1
adds r0,r0 @ shift up
adcs r1,r1
lsrs r4,r1,#21
bcc 5b
4:
ldr r4,=#0x7fe
cmp r2,r4
bhs 6f @ over/underflow? return signed zero/infinity
7:
lsls r2,#20 @ pack and return
adds r1,r2
lsls r3,#31
adds r1,r3
2:
pop {r4,r5,r15}
6: @ return signed zero/infinity according to unclamped exponent in r2
mvns r2,r2
lsrs r2,#21
movs r0,#0
movs r1,#0
b 7b
3:
@ here we need to shift down to normalise and possibly round
bmi 1f @ already normalised to Q63?
2:
subs r2,#1
adds r0,r0 @ shift up
adcs r1,r1
bpl 2b
1:
@ here we have a 1 in b63 of r0:r1
adds r2,#11 @ correct exponent for subsequent shift down
lsls r4,r0,#21 @ save bits for rounding
lsrs r0,#11
lsls r5,r1,#21
orrs r0,r5
lsrs r1,#11
lsls r4,#1
beq 1f @ sticky bits are zero?
8:
movs r4,#0
adcs r0,r4
adcs r1,r4
b 4b
1:
bcc 4b @ sticky bits are zero but not on rounding boundary
lsrs r4,r0,#1 @ increment if odd (force round to even)
b 8b
.ltorg
.thumb_func
dunpacks:
mdunpacks r0,r1,r2,r3,r4
ldr r3,=#0x3ff
subs r2,r3 @ exponent without offset
bx r14
@ r0:r1 signed mantissa Q52
@ r2 unbiased exponent < 10 (i.e., |x|<2^10)
@ r4 pointer to:
@ - divisor reciprocal approximation r=1/d Q15
@ - divisor d Q62 0..20
@ - divisor d Q62 21..41
@ - divisor d Q62 42..62
@ returns:
@ r0:r1 reduced result y Q62, -0.6 d < y < 0.6 d (better in practice)
@ r2 quotient q (number of reductions)
@ if exponent >=10, returns r0:r1=0, r2=1024*mantissa sign
@ designed to work for 0.5<d<2, in particular d=ln2 (~0.7) and d=π/2 (~1.6)
@ .thumb_func
@ dreduce:
@ adds r2,#2 @ e+2
@ bmi 1f @ |x|<0.25, too small to need adjustment
@ cmp r2,#12
@ bge 4f
@ 2:
@ movs r5,#17
@ subs r5,r2 @ 15-e
@ movs r3,r1 @ Q20
@ asrs r3,r5 @ x Q5
@ adds r2,#8 @ e+10
@ adds r5,#7 @ 22-e = 32-(e+10)
@ movs r6,r0
@ lsrs r6,r5
@ lsls r0,r2
@ lsls r1,r2
@ orrs r1,r6 @ r0:r1 x Q62
@ ldmia r4,{r4-r7}
@ muls r3,r4 @ rx Q20
@ asrs r2,r3,#20
@ movs r3,#0
@ adcs r2,r3 @ rx Q0 rounded = q; for e.g. r=1.5 |q|<1.5*2^10
@ muls r5,r2 @ qd in pieces: L Q62
@ muls r6,r2 @ M Q41
@ muls r7,r2 @ H Q20
@ lsls r7,#10
@ asrs r4,r6,#11
@ lsls r6,#21
@ adds r6,r5
@ adcs r7,r4
@ asrs r5,#31
@ adds r7,r5 @ r6:r7 qd Q62
@ subs r0,r6
@ sbcs r1,r7 @ remainder Q62
@ bx r14
@ 4:
@ movs r2,#12 @ overflow: clamp to +/-1024
@ movs r0,#0
@ asrs r1,#31
@ lsls r1,#1
@ adds r1,#1
@ lsls r1,#20
@ b 2b
@ 1:
@ lsls r1,#8
@ lsrs r3,r0,#24
@ orrs r1,r3
@ lsls r0,#8 @ r0:r1 Q60, to be shifted down -r2 places
@ rsbs r3,r2,#0
@ adds r2,#32 @ shift down in r3, complementary shift in r2
@ bmi 1f @ long shift?
@ 2:
@ movs r4,r1
@ asrs r1,r3
@ lsls r4,r2
@ lsrs r0,r3
@ orrs r0,r4
@ movs r2,#0 @ rounding
@ adcs r0,r2
@ adcs r1,r2
@ bx r14
@ 1:
@ movs r0,r1 @ down 32 places
@ asrs r1,#31
@ subs r3,#32
@ adds r2,#32
@ bpl 2b
@ movs r0,#0 @ very long shift? return 0
@ movs r1,#0
@ movs r2,#0
@ bx r14
@ .thumb_func
@ qfp_dtan:
@ push {r4-r7,r14}
@ bl push_r8_r11
@ bl dsincos
@ mov r12,r0 @ save ε
@ bl dcos_finish
@ push {r0,r1}
@ mov r0,r12
@ bl dsin_finish
@ pop {r2,r3}
@ bl pop_r8_r11
@ b ddiv0 @ compute sin θ/cos θ
@ .thumb_func
@ qfp_dcos:
@ push {r4-r7,r14}
@ bl push_r8_r11
@ bl dsincos
@ bl dcos_finish
@ b 1f
@ .thumb_func
@ qfp_dsin:
@ push {r4-r7,r14}
@ bl push_r8_r11
@ bl dsincos
@ bl dsin_finish
@ 1:
@ bl pop_r8_r11
@ pop {r4-r7,r15}
@ @ unpack double θ in r0:r1, range reduce and calculate ε, cos α and sin α such that
@ @ θ=α+ε and |ε|≤2^-32
@ @ on return:
@ @ r0:r1 ε (residual ω, where θ=α+ε) Q62, |ε|≤2^-32 (so fits in r0)
@ @ r8:r9 cos α Q62
@ @ r10:r11 sin α Q62
@ .thumb_func
@ dsincos:
@ push {r14}
@ bl dunpacks
@ adr r4,dreddata0
@ bl dreduce
@ movs r4,#0
@ ldr r5,=#0x9df04dbb @ this value compensates for the non-unity scaling of the CORDIC rotations
@ ldr r6,=#0x36f656c5
@ lsls r2,#31
@ bcc 1f
@ @ quadrant 2 or 3
@ mvns r6,r6
@ rsbs r5,r5,#0
@ adcs r6,r4
@ 1:
@ lsls r2,#1
@ bcs 1f
@ @ even quadrant
@ mov r10,r4
@ mov r11,r4
@ mov r8,r5
@ mov r9,r6
@ b 2f
@ 1:
@ @ odd quadrant
@ mov r8,r4
@ mov r9,r4
@ mov r10,r5
@ mov r11,r6
@ 2:
@ adr r4,dtab_cc
@ mov r12,r4
@ movs r7,#1
@ movs r6,#31
@ 1:
@ bl dcordic_rot_step
@ adds r7,#1
@ subs r6,#1
@ cmp r7,#33
@ bne 1b
@ pop {r15}
@ dcos_finish:
@ @ here
@ @ r0:r1 ε (residual ω, where θ=α+ε) Q62, |ε|≤2^-32 (so fits in r0)
@ @ r8:r9 cos α Q62
@ @ r10:r11 sin α Q62
@ @ and we wish to calculate cos θ=cos(α+ε)~cos α - ε sin α
@ mov r1,r11
@ @ mov r2,r10
@ @ lsrs r2,#31
@ @ adds r1,r2 @ rounding improves accuracy very slightly
@ muls32_s32_64 r0,r1, r2,r3, r4,r5,r6,r2,r3
@ @ r2:r3 ε sin α Q(62+62-32)=Q92
@ mov r0,r8
@ mov r1,r9
@ lsls r5,r3,#2
@ asrs r3,r3,#30
@ lsrs r2,r2,#30
@ orrs r2,r5
@ sbcs r0,r2 @ include rounding
@ sbcs r1,r3
@ movs r2,#62
@ b qfp_fix642double
@ dsin_finish:
@ @ here
@ @ r0:r1 ε (residual ω, where θ=α+ε) Q62, |ε|≤2^-32 (so fits in r0)
@ @ r8:r9 cos α Q62
@ @ r10:r11 sin α Q62
@ @ and we wish to calculate sin θ=sin(α+ε)~sin α + ε cos α
@ mov r1,r9
@ muls32_s32_64 r0,r1, r2,r3, r4,r5,r6,r2,r3
@ @ r2:r3 ε cos α Q(62+62-32)=Q92
@ mov r0,r10
@ mov r1,r11
@ lsls r5,r3,#2
@ asrs r3,r3,#30
@ lsrs r2,r2,#30
@ orrs r2,r5
@ adcs r0,r2 @ include rounding
@ adcs r1,r3
@ movs r2,#62
@ b qfp_fix642double
@ .ltorg
@ .align 2
@ dreddata0:
@ .word 0x0000517d @ 2/π Q15
@ .word 0x0014611A @ π/2 Q62=6487ED5110B4611A split into 21-bit pieces
@ .word 0x000A8885
@ .word 0x001921FB
@ .thumb_func
@ qfp_datan2:
@ @ r0:r1 y
@ @ r2:r3 x
@ push {r4-r7,r14}
@ bl push_r8_r11
@ ldr r5,=#0x7ff00000
@ movs r4,r1
@ ands r4,r5 @ y==0?
@ beq 1f
@ cmp r4,r5 @ or Inf/NaN?
@ bne 2f
@ 1:
@ lsrs r1,#20 @ flush
@ lsls r1,#20
@ movs r0,#0
@ 2:
@ movs r4,r3
@ ands r4,r5 @ x==0?
@ beq 1f
@ cmp r4,r5 @ or Inf/NaN?
@ bne 2f
@ 1:
@ lsrs r3,#20 @ flush
@ lsls r3,#20
@ movs r2,#0
@ 2:
@ movs r6,#0 @ quadrant offset
@ lsls r5,#11 @ constant 0x80000000
@ cmp r3,#0
@ bpl 1f @ skip if x positive
@ movs r6,#2
@ eors r3,r5
@ eors r1,r5
@ bmi 1f @ quadrant offset=+2 if y was positive
@ rsbs r6,#0 @ quadrant offset=-2 if y was negative
@ 1:
@ @ now in quadrant 0 or 3
@ adds r7,r1,r5 @ r7=-r1
@ bpl 1f
@ @ y>=0: in quadrant 0
@ cmp r1,r3
@ ble 2f @ y<~x so 0≤θ<~π/4: skip
@ adds r6,#1
@ eors r1,r5 @ negate x
@ b 3f @ and exchange x and y = rotate by -π/2
@ 1:
@ cmp r3,r7
@ bge 2f @ -y<~x so -π/4<~θ≤0: skip
@ subs r6,#1
@ eors r3,r5 @ negate y and ...
@ 3:
@ movs r7,r0 @ exchange x and y
@ movs r0,r2
@ movs r2,r7
@ movs r7,r1
@ movs r1,r3
@ movs r3,r7
@ 2:
@ @ here -π/4<~θ<~π/4
@ @ r6 has quadrant offset
@ push {r6}
@ cmp r2,#0
@ bne 1f
@ cmp r3,#0
@ beq 10f @ x==0 going into division?
@ lsls r4,r3,#1
@ asrs r4,#21
@ adds r4,#1
@ bne 1f @ x==Inf going into division?
@ lsls r4,r1,#1
@ asrs r4,#21
@ adds r4,#1 @ y also ±Inf?
@ bne 10f
@ subs r1,#1 @ make them both just finite
@ subs r3,#1
@ b 1f
@ 10:
@ movs r0,#0
@ movs r1,#0
@ b 12f
@ 1:
@ bl __aeabi_ddiv
@ movs r2,#62
@ bl qfp_double2fix64
@ @ r0:r1 y/x
@ mov r10,r0
@ mov r11,r1
@ movs r0,#0 @ ω=0
@ movs r1,#0
@ mov r8,r0
@ movs r2,#1
@ lsls r2,#30
@ mov r9,r2 @ x=1
@ adr r4,dtab_cc
@ mov r12,r4
@ movs r7,#1
@ movs r6,#31
@ 1:
@ bl dcordic_vec_step
@ adds r7,#1
@ subs r6,#1
@ cmp r7,#33
@ bne 1b
@ @ r0:r1 atan(y/x) Q62
@ @ r8:r9 x residual Q62
@ @ r10:r11 y residual Q62
@ mov r2,r9
@ mov r3,r10
@ subs r2,#12 @ this makes atan(0)==0
@ @ the following is basically a division residual y/x ~ atan(residual y/x)
@ movs r4,#1
@ lsls r4,#29
@ movs r7,#0
@ 2:
@ lsrs r2,#1
@ movs r3,r3 @ preserve carry
@ bmi 1f
@ sbcs r3,r2
@ adds r0,r4
@ adcs r1,r7
@ lsrs r4,#1
@ bne 2b
@ b 3f
@ 1:
@ adcs r3,r2
@ subs r0,r4
@ sbcs r1,r7
@ lsrs r4,#1
@ bne 2b
@ 3:
@ lsls r6,r1,#31
@ asrs r1,#1
@ lsrs r0,#1
@ orrs r0,r6 @ Q61
@ 12:
@ pop {r6}
@ cmp r6,#0
@ beq 1f
@ ldr r4,=#0x885A308D @ π/2 Q61
@ ldr r5,=#0x3243F6A8
@ bpl 2f
@ mvns r4,r4 @ negative quadrant offset
@ mvns r5,r5
@ 2:
@ lsls r6,#31
@ bne 2f @ skip if quadrant offset is ±1
@ adds r0,r4
@ adcs r1,r5
@ 2:
@ adds r0,r4
@ adcs r1,r5
@ 1:
@ movs r2,#61
@ bl qfp_fix642double
@ bl pop_r8_r11
@ pop {r4-r7,r15}
@ .ltorg
@ dtab_cc:
@ .word 0x61bb4f69, 0x1dac6705 @ atan 2^-1 Q62
@ .word 0x96406eb1, 0x0fadbafc @ atan 2^-2 Q62
@ .word 0xab0bdb72, 0x07f56ea6 @ atan 2^-3 Q62
@ .word 0xe59fbd39, 0x03feab76 @ atan 2^-4 Q62
@ .word 0xba97624b, 0x01ffd55b @ atan 2^-5 Q62
@ .word 0xdddb94d6, 0x00fffaaa @ atan 2^-6 Q62
@ .word 0x56eeea5d, 0x007fff55 @ atan 2^-7 Q62
@ .word 0xaab7776e, 0x003fffea @ atan 2^-8 Q62
@ .word 0x5555bbbc, 0x001ffffd @ atan 2^-9 Q62
@ .word 0xaaaaadde, 0x000fffff @ atan 2^-10 Q62
@ .word 0xf555556f, 0x0007ffff @ atan 2^-11 Q62
@ .word 0xfeaaaaab, 0x0003ffff @ atan 2^-12 Q62
@ .word 0xffd55555, 0x0001ffff @ atan 2^-13 Q62
@ .word 0xfffaaaab, 0x0000ffff @ atan 2^-14 Q62
@ .word 0xffff5555, 0x00007fff @ atan 2^-15 Q62
@ .word 0xffffeaab, 0x00003fff @ atan 2^-16 Q62
@ .word 0xfffffd55, 0x00001fff @ atan 2^-17 Q62
@ .word 0xffffffab, 0x00000fff @ atan 2^-18 Q62
@ .word 0xfffffff5, 0x000007ff @ atan 2^-19 Q62
@ .word 0xffffffff, 0x000003ff @ atan 2^-20 Q62
@ .word 0x00000000, 0x00000200 @ atan 2^-21 Q62 @ consider optimising these
@ .word 0x00000000, 0x00000100 @ atan 2^-22 Q62
@ .word 0x00000000, 0x00000080 @ atan 2^-23 Q62
@ .word 0x00000000, 0x00000040 @ atan 2^-24 Q62
@ .word 0x00000000, 0x00000020 @ atan 2^-25 Q62
@ .word 0x00000000, 0x00000010 @ atan 2^-26 Q62
@ .word 0x00000000, 0x00000008 @ atan 2^-27 Q62
@ .word 0x00000000, 0x00000004 @ atan 2^-28 Q62
@ .word 0x00000000, 0x00000002 @ atan 2^-29 Q62
@ .word 0x00000000, 0x00000001 @ atan 2^-30 Q62
@ .word 0x80000000, 0x00000000 @ atan 2^-31 Q62
@ .word 0x40000000, 0x00000000 @ atan 2^-32 Q62
@ .thumb_func
@ qfp_dexp:
@ push {r4-r7,r14}
@ bl dunpacks
@ adr r4,dreddata1
@ bl dreduce
@ cmp r1,#0
@ bge 1f
@ ldr r4,=#0xF473DE6B
@ ldr r5,=#0x2C5C85FD @ ln2 Q62
@ adds r0,r4
@ adcs r1,r5
@ subs r2,#1
@ 1:
@ push {r2}
@ movs r7,#1 @ shift
@ adr r6,dtab_exp
@ movs r2,#0
@ movs r3,#1
@ lsls r3,#30 @ x=1 Q62
@ 3:
@ ldmia r6!,{r4,r5}
@ mov r12,r6
@ subs r0,r4
@ sbcs r1,r5
@ bmi 1f
@ rsbs r6,r7,#0
@ adds r6,#32 @ complementary shift
@ movs r5,r3
@ asrs r5,r7
@ movs r4,r3
@ lsls r4,r6
@ movs r6,r2
@ lsrs r6,r7 @ rounding bit in carry
@ orrs r4,r6
@ adcs r2,r4
@ adcs r3,r5 @ x+=x>>i
@ b 2f
@ 1:
@ adds r0,r4 @ restore argument
@ adcs r1,r5
@ 2:
@ mov r6,r12
@ adds r7,#1
@ cmp r7,#33
@ bne 3b
@ @ here
@ @ r0:r1 ε (residual x, where x=a+ε) Q62, |ε|≤2^-32 (so fits in r0)
@ @ r2:r3 exp a Q62
@ @ and we wish to calculate exp x=exp a exp ε~(exp a)(1+ε)
@ muls32_32_64 r0,r3, r4,r1, r5,r6,r7,r4,r1
@ @ r4:r1 ε exp a Q(62+62-32)=Q92
@ lsrs r4,#30
@ lsls r0,r1,#2
@ orrs r0,r4
@ asrs r1,#30
@ adds r0,r2
@ adcs r1,r3
@ pop {r2}
@ rsbs r2,#0
@ adds r2,#62
@ bl qfp_fix642double @ in principle we can pack faster than this because we know the exponent
@ pop {r4-r7,r15}
@ .ltorg
@ .thumb_func
@ qfp_dln:
@ push {r4-r7,r14}
@ lsls r7,r1,#1
@ bcs 5f @ <0 ...
@ asrs r7,#21
@ beq 5f @ ... or =0? return -Inf
@ adds r7,#1
@ beq 6f @ Inf/NaN? return +Inf
@ bl dunpacks
@ push {r2}
@ lsls r1,#9
@ lsrs r2,r0,#23
@ orrs r1,r2
@ lsls r0,#9
@ @ r0:r1 m Q61 = m/2 Q62 0.5≤m/2<1
@ movs r7,#1 @ shift
@ adr r6,dtab_exp
@ mov r12,r6
@ movs r2,#0
@ movs r3,#0 @ y=0 Q62
@ 3:
@ rsbs r6,r7,#0
@ adds r6,#32 @ complementary shift
@ movs r5,r1
@ asrs r5,r7
@ movs r4,r1
@ lsls r4,r6
@ movs r6,r0
@ lsrs r6,r7
@ orrs r4,r6 @ x>>i, rounding bit in carry
@ adcs r4,r0
@ adcs r5,r1 @ x+(x>>i)
@ lsrs r6,r5,#30
@ bne 1f @ x+(x>>i)>1?
@ movs r0,r4
@ movs r1,r5 @ x+=x>>i
@ mov r6,r12
@ ldmia r6!,{r4,r5}
@ subs r2,r4
@ sbcs r3,r5
@ 1:
@ movs r4,#8
@ add r12,r4
@ adds r7,#1
@ cmp r7,#33
@ bne 3b
@ @ here:
@ @ r0:r1 residual x, nearly 1 Q62
@ @ r2:r3 y ~ ln m/2 = ln m - ln2 Q62
@ @ result is y + ln2 + ln x ~ y + ln2 + (x-1)
@ lsls r1,#2
@ asrs r1,#2 @ x-1
@ adds r2,r0
@ adcs r3,r1
@ pop {r7}
@ @ here:
@ @ r2:r3 ln m/2 = ln m - ln2 Q62
@ @ r7 unbiased exponent
@ adr r4,dreddata1+4
@ ldmia r4,{r0,r1,r4}
@ adds r7,#1
@ muls r0,r7 @ Q62
@ muls r1,r7 @ Q41
@ muls r4,r7 @ Q20
@ lsls r7,r1,#21
@ asrs r1,#11
@ asrs r5,r1,#31
@ adds r0,r7
@ adcs r1,r5
@ lsls r7,r4,#10
@ asrs r4,#22
@ asrs r5,r1,#31
@ adds r1,r7
@ adcs r4,r5
@ @ r0:r1:r4 exponent*ln2 Q62
@ asrs r5,r3,#31
@ adds r0,r2
@ adcs r1,r3
@ adcs r4,r5
@ @ r0:r1:r4 result Q62
@ movs r2,#62
@ 1:
@ asrs r5,r1,#31
@ cmp r4,r5
@ beq 2f @ r4 a sign extension of r1?
@ lsrs r0,#4 @ no: shift down 4 places and try again
@ lsls r6,r1,#28
@ orrs r0,r6
@ lsrs r1,#4
@ lsls r6,r4,#28
@ orrs r1,r6
@ asrs r4,#4
@ subs r2,#4
@ b 1b
@ 2:
@ bl qfp_fix642double
@ pop {r4-r7,r15}
@ 5:
@ ldr r1,=#0xfff00000
@ movs r0,#0
@ pop {r4-r7,r15}
@ 6:
@ ldr r1,=#0x7ff00000
@ movs r0,#0
@ pop {r4-r7,r15}
@ .ltorg
@ .align 2
@ dreddata1:
@ .word 0x0000B8AA @ 1/ln2 Q15
@ .word 0x0013DE6B @ ln2 Q62 Q62=2C5C85FDF473DE6B split into 21-bit pieces
@ .word 0x000FEFA3
@ .word 0x000B1721
@ dtab_exp:
@ .word 0xbf984bf3, 0x19f323ec @ log 1+2^-1 Q62
@ .word 0xcd4d10d6, 0x0e47fbe3 @ log 1+2^-2 Q62
@ .word 0x8abcb97a, 0x0789c1db @ log 1+2^-3 Q62
@ .word 0x022c54cc, 0x03e14618 @ log 1+2^-4 Q62
@ .word 0xe7833005, 0x01f829b0 @ log 1+2^-5 Q62
@ .word 0x87e01f1e, 0x00fe0545 @ log 1+2^-6 Q62
@ .word 0xac419e24, 0x007f80a9 @ log 1+2^-7 Q62
@ .word 0x45621781, 0x003fe015 @ log 1+2^-8 Q62
@ .word 0xa9ab10e6, 0x001ff802 @ log 1+2^-9 Q62
@ .word 0x55455888, 0x000ffe00 @ log 1+2^-10 Q62
@ .word 0x0aa9aac4, 0x0007ff80 @ log 1+2^-11 Q62
@ .word 0x01554556, 0x0003ffe0 @ log 1+2^-12 Q62
@ .word 0x002aa9ab, 0x0001fff8 @ log 1+2^-13 Q62
@ .word 0x00055545, 0x0000fffe @ log 1+2^-14 Q62
@ .word 0x8000aaaa, 0x00007fff @ log 1+2^-15 Q62
@ .word 0xe0001555, 0x00003fff @ log 1+2^-16 Q62
@ .word 0xf80002ab, 0x00001fff @ log 1+2^-17 Q62
@ .word 0xfe000055, 0x00000fff @ log 1+2^-18 Q62
@ .word 0xff80000b, 0x000007ff @ log 1+2^-19 Q62
@ .word 0xffe00001, 0x000003ff @ log 1+2^-20 Q62
@ .word 0xfff80000, 0x000001ff @ log 1+2^-21 Q62
@ .word 0xfffe0000, 0x000000ff @ log 1+2^-22 Q62
@ .word 0xffff8000, 0x0000007f @ log 1+2^-23 Q62
@ .word 0xffffe000, 0x0000003f @ log 1+2^-24 Q62
@ .word 0xfffff800, 0x0000001f @ log 1+2^-25 Q62
@ .word 0xfffffe00, 0x0000000f @ log 1+2^-26 Q62
@ .word 0xffffff80, 0x00000007 @ log 1+2^-27 Q62
@ .word 0xffffffe0, 0x00000003 @ log 1+2^-28 Q62
@ .word 0xfffffff8, 0x00000001 @ log 1+2^-29 Q62
@ .word 0xfffffffe, 0x00000000 @ log 1+2^-30 Q62
@ .word 0x80000000, 0x00000000 @ log 1+2^-31 Q62
@ .word 0x40000000, 0x00000000 @ log 1+2^-32 Q62
qfp_lib_end:
|
Air-duino/Arduino-AirMCU
| 88,395
|
variants/AIR001/AirM2M_Air001_Board/qfplib-m0-full.S
|
@ Copyright 2019-2020 Mark Owen
@ http://www.quinapalus.com
@ E-mail: qfp@quinapalus.com
@
@ This file is free software: you can redistribute it and/or modify
@ it under the terms of version 2 of the GNU General Public License
@ as published by the Free Software Foundation.
@
@ This file is distributed in the hope that it will be useful,
@ but WITHOUT ANY WARRANTY; without even the implied warranty of
@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@ GNU General Public License for more details.
@
@ You should have received a copy of the GNU General Public License
@ along with this file. If not, see <http://www.gnu.org/licenses/> or
@ write to the Free Software Foundation, Inc., 51 Franklin Street,
@ Fifth Floor, Boston, MA 02110-1301, USA.
.syntax unified
.cpu cortex-m0plus
.thumb
@ exported symbols
.global __aeabi_fadd
.global __aeabi_fsub
.global __aeabi_fmul
.global __aeabi_fdiv
.global qfp_fcmp
.global _fsqrt
.global _ffix
.global qfp_float2fix
.global _ffixu
.global qfp_float2ufix
.global _fflt
.global qfp_fix2float
.global _ffltu
.global qfp_ufix2float
.global _ll_sto_f
.global qfp_fix642float
.global _ll_uto_f
.global qfp_ufix642float
@ .global qfp_fcos
@ .global qfp_fsin
@ .global qfp_ftan
@ .global qfp_fatan2
@ .global qfp_fexp
@ .global qfp_fln
.global __aeabi_dadd
.global __aeabi_dsub
.global __aeabi_dmul
.global __aeabi_ddiv
.global _dsqrt
@ .global qfp_dcos
@ .global qfp_dsin
@ .global qfp_dtan
@ .global qfp_datan2
@ .global qfp_dexp
@ .global qfp_dln
.global qfp_dcmp
.global _ll_sfrom_f
.global qfp_float2fix64
.global _ll_ufrom_f
.global qfp_float2ufix64
.global _dfix
.global qfp_double2fix
.global _dfixu
.global qfp_double2ufix
.global _ll_sfrom_d
.global qfp_double2fix64
.global _ll_ufrom_d
.global qfp_double2ufix64
.global _dflt
.global qfp_fix2double
.global _dfltu
.global qfp_ufix2double
.global _ll_sto_d
.global qfp_fix642double
.global _ll_uto_d
.global qfp_ufix642double
.global __aeabi_d2f
.global __aeabi_f2d
qfp_lib_start:
@ exchange r0<->r1, r2<->r3
xchxy:
push {r0,r2,r14}
mov r0,r1
mov r2,r3
pop {r1,r3,r15}
@ IEEE single in r0-> signed (two's complemennt) mantissa in r0 9Q23 (24 significant bits), signed exponent (bias removed) in r2
@ trashes r4; zero, denormal -> mantissa=+/-1, exponent=-380; Inf, NaN -> mantissa=+/-1, exponent=+640
unpackx:
lsrs r2,r0,#23 @ save exponent and sign
lsls r0,#9 @ extract mantissa
lsrs r0,#9
movs r4,#1
lsls r4,#23
orrs r0,r4 @ reinstate implied leading 1
cmp r2,#255 @ test sign bit
uxtb r2,r2 @ clear it
bls 1f @ branch on positive
rsbs r0,#0 @ negate mantissa
1:
subs r2,#1
cmp r2,#254 @ zero/denormal/Inf/NaN?
bhs 2f
subs r2,#126 @ remove exponent bias: can now be -126..+127
bx r14
2: @ here with special-case values
cmp r0,#0
mov r0,r4 @ set mantissa to +1
bpl 3f
rsbs r0,#0 @ zero/denormal/Inf/NaN: mantissa=+/-1
3:
subs r2,#126 @ zero/denormal: exponent -> -127; Inf, NaN: exponent -> 128
lsls r2,#2 @ zero/denormal: exponent -> -508; Inf, NaN: exponent -> 512
adds r2,#128 @ zero/denormal: exponent -> -380; Inf, NaN: exponent -> 640
bx r14
@ normalise and pack signed mantissa in r0 nominally 3Q29, signed exponent in r2-> IEEE single in r0
@ trashes r4, preserves r1,r3
@ r5: "sticky bits", must be zero iff all result bits below r0 are zero for correct rounding
packx:
lsrs r4,r0,#31 @ save sign bit
lsls r4,r4,#31 @ sign now in b31
bpl 2f @ skip if positive
cmp r5,#0
beq 11f
adds r0,#1 @ fiddle carry in to following rsb if sticky bits are non-zero
11:
rsbs r0,#0 @ can now treat r0 as unsigned
packx0:
bmi 3f @ catch r0=0x80000000 case
2:
subs r2,#1 @ normalisation loop
adds r0,r0
beq 1f @ zero? special case
bpl 2b @ normalise so leading "1" in bit 31
3:
adds r2,#129 @ (mis-)offset exponent
bne 12f @ special case: highest denormal can round to lowest normal
adds r0,#0x80 @ in special case, need to add 256 to r0 for rounding
bcs 4f @ tripped carry? then have leading 1 in C as required
12:
adds r0,#0x80 @ rounding
bcs 4f @ tripped carry? then have leading 1 in C as required (and result is even so can ignore sticky bits)
cmp r5,#0
beq 7f @ sticky bits zero?
8:
lsls r0,#1 @ remove leading 1
9:
subs r2,#1 @ compensate exponent on this path
4:
cmp r2,#254
bge 5f @ overflow?
adds r2,#1 @ correct exponent offset
ble 10f @ denormal/underflow?
lsrs r0,#9 @ align mantissa
lsls r2,#23 @ align exponent
orrs r0,r2 @ assemble exponent and mantissa
6:
orrs r0,r4 @ apply sign
1:
bx r14
5:
movs r0,#0xff @ create infinity
lsls r0,#23
b 6b
10:
movs r0,#0 @ create zero
bx r14
7: @ sticky bit rounding case
lsls r5,r0,#24 @ check bottom 8 bits of r0
bne 8b @ in rounding-tie case?
lsrs r0,#9 @ ensure even result
lsls r0,#10
b 9b
.align 2
.ltorg
@ signed multiply r0 1Q23 by r1 4Q23, result in r0 7Q25, sticky bits in r5
@ trashes r3,r4
mul0:
uxth r3,r0 @ Q23
asrs r4,r1,#16 @ Q7
muls r3,r4 @ L*H, Q30 signed
asrs r4,r0,#16 @ Q7
uxth r5,r1 @ Q23
muls r4,r5 @ H*L, Q30 signed
adds r3,r4 @ sum of middle partial products
uxth r4,r0
muls r4,r5 @ L*L, Q46 unsigned
lsls r5,r4,#16 @ initialise sticky bits from low half of low partial product
lsrs r4,#16 @ Q25
adds r3,r4 @ add high half of low partial product to sum of middle partial products
@ (cannot generate carry by limits on input arguments)
asrs r0,#16 @ Q7
asrs r1,#16 @ Q7
muls r0,r1 @ H*H, Q14 signed
lsls r0,#11 @ high partial product Q25
lsls r1,r3,#27 @ sticky
orrs r5,r1 @ collect further sticky bits
asrs r1,r3,#5 @ middle partial products Q25
adds r0,r1 @ final result
bx r14
.thumb_func
qfp_fcmp:
lsls r2,r0,#1
lsrs r2,#24
beq 1f
cmp r2,#0xff
bne 2f
1:
lsrs r0,#23 @ clear mantissa if NaN or denormal
lsls r0,#23
2:
lsls r2,r1,#1
lsrs r2,#24
beq 1f
cmp r2,#0xff
bne 2f
1:
lsrs r1,#23 @ clear mantissa if NaN or denormal
lsls r1,#23
2:
movs r2,#1 @ initialise result
eors r1,r0
bmi 4f @ opposite signs? then can proceed on basis of sign of x
eors r1,r0 @ restore y
bpl 1f
rsbs r2,#0 @ both negative? flip comparison
1:
cmp r0,r1
bgt 2f
blt 3f
5:
movs r2,#0
3:
rsbs r2,#0
2:
subs r0,r2,#0
bx r14
4:
orrs r1,r0
adds r1,r1
beq 5b
cmp r0,#0
bge 2b
b 3b
@ convert float to signed int, rounding towards -Inf, clamping
.thumb_func
_ffix:
movs r1,#0 @ fall through
@ convert float in r0 to signed fixed point in r0, clamping
.thumb_func
qfp_float2fix:
push {r4,r14}
bl unpackx
movs r3,r2
adds r3,#130
bmi 6f @ -0?
add r2,r1 @ incorporate binary point position into exponent
subs r2,#23 @ r2 is now amount of left shift required
blt 1f @ requires right shift?
cmp r2,#7 @ overflow?
ble 4f
3: @ overflow
asrs r1,r0,#31 @ +ve:0 -ve:0xffffffff
mvns r1,r1 @ +ve:0xffffffff -ve:0
movs r0,#1
lsls r0,#31
5:
eors r0,r1 @ +ve:0x7fffffff -ve:0x80000000 (unsigned path: 0xffffffff)
pop {r4,r15}
1:
rsbs r2,#0 @ right shift for r0, >0
cmp r2,#32
blt 2f @ more than 32 bits of right shift?
movs r2,#32
2:
asrs r0,r0,r2
pop {r4,r15}
6:
movs r0,#0
pop {r4,r15}
@ unsigned version
.thumb_func
_ffixu:
movs r1,#0 @ fall through
.thumb_func
qfp_float2ufix:
push {r4,r14}
bl unpackx
add r2,r1 @ incorporate binary point position into exponent
movs r1,r0
bmi 5b @ negative? return zero
subs r2,#23 @ r2 is now amount of left shift required
blt 1b @ requires right shift?
mvns r1,r0 @ ready to return 0xffffffff
cmp r2,#8 @ overflow?
bgt 5b
4:
lsls r0,r0,r2 @ result fits, left shifted
pop {r4,r15}
@ convert uint64 to float, rounding
.thumb_func
_ll_uto_f:
movs r2,#0 @ fall through
@ convert unsigned 64-bit fix to float, rounding; number of r0:r1 bits after point in r2
.thumb_func
qfp_ufix642float:
push {r4,r5,r14}
cmp r1,#0
bpl 3f @ positive? we can use signed code
lsls r5,r1,#31 @ contribution to sticky bits
orrs r5,r0
lsrs r0,r1,#1
subs r2,#1
b 4f
@ convert int64 to float, rounding
.thumb_func
_ll_sto_f:
movs r2,#0 @ fall through
@ convert signed 64-bit fix to float, rounding; number of r0:r1 bits after point in r2
.thumb_func
qfp_fix642float:
push {r4,r5,r14}
3:
movs r5,r0
orrs r5,r1
beq ret_pop45 @ zero? return +0
asrs r5,r1,#31 @ sign bits
2:
asrs r4,r1,#24 @ try shifting 7 bits at a time
cmp r4,r5
bne 1f @ next shift will overflow?
lsls r1,#7
lsrs r4,r0,#25
orrs r1,r4
lsls r0,#7
adds r2,#7
b 2b
1:
movs r5,r0
movs r0,r1
4:
rsbs r2,#0
adds r2,#32+29
b packret
@ convert signed int to float, rounding
.thumb_func
_fflt:
movs r1,#0 @ fall through
@ convert signed fix to float, rounding; number of r0 bits after point in r1
.thumb_func
qfp_fix2float:
push {r4,r5,r14}
1:
movs r2,#29
subs r2,r1 @ fix exponent
packretns: @ pack and return, sticky bits=0
movs r5,#0
packret: @ common return point: "pack and return"
bl packx
ret_pop45:
pop {r4,r5,r15}
@ unsigned version
.thumb_func
_ffltu:
movs r1,#0 @ fall through
.thumb_func
qfp_ufix2float:
push {r4,r5,r14}
cmp r0,#0
bge 1b @ treat <2^31 as signed
movs r2,#30
subs r2,r1 @ fix exponent
lsls r5,r0,#31 @ one sticky bit
lsrs r0,#1
b packret
@ All the scientific functions are implemented using the CORDIC algorithm. For notation,
@ details not explained in the comments below, and a good overall survey see
@ "50 Years of CORDIC: Algorithms, Architectures, and Applications" by Meher et al.,
@ IEEE Transactions on Circuits and Systems Part I, Volume 56 Issue 9.
@ Register use:
@ r0: x
@ r1: y
@ r2: z/omega
@ r3: coefficient pointer
@ r4,r12: m
@ r5: i (shift)
cordic_start: @ initialisation
movs r5,#0 @ initial shift=0
mov r12,r4
b 5f
cordic_vstep: @ one step of algorithm in vector mode
cmp r1,#0 @ check sign of y
bgt 4f
b 1f
cordic_rstep: @ one step of algorithm in rotation mode
cmp r2,#0 @ check sign of angle
bge 1f
4:
subs r1,r6 @ negative rotation: y=y-(x>>i)
rsbs r7,#0
adds r2,r4 @ accumulate angle
b 2f
1:
adds r1,r6 @ positive rotation: y=y+(x>>i)
subs r2,r4 @ accumulate angle
2:
mov r4,r12
muls r7,r4 @ apply sign from m
subs r0,r7 @ finish rotation: x=x{+/-}(y>>i)
5:
ldmia r3!,{r4} @ fetch next angle from table and bump pointer
lsrs r4,#1 @ repeated angle?
bcs 3f
adds r5,#1 @ adjust shift if not
3:
mov r6,r0
asrs r6,r5 @ x>>i
mov r7,r1
asrs r7,r5 @ y>>i
lsrs r4,#1 @ shift end flag into carry
bx r14
@ CORDIC rotation mode
cordic_rot:
push {r6,r7,r14}
bl cordic_start @ initialise
1:
bl cordic_rstep
bcc 1b @ step until table finished
asrs r6,r0,#14 @ remaining small rotations can be linearised: see IV.B of paper referenced above
asrs r7,r1,#14
asrs r2,#3
muls r6,r2 @ all remaining CORDIC steps in a multiplication
muls r7,r2
mov r4,r12
muls r7,r4
asrs r6,#12
asrs r7,#12
subs r0,r7 @ x=x{+/-}(yz>>k)
adds r1,r6 @ y=y+(xz>>k)
cordic_exit:
pop {r6,r7,r15}
@ CORDIC vector mode
cordic_vec:
push {r6,r7,r14}
bl cordic_start @ initialise
1:
bl cordic_vstep
bcc 1b @ step until table finished
4:
cmp r1,#0 @ continue as in cordic_vstep but without using table; x is not affected as y is small
bgt 2f @ check sign of y
adds r1,r6 @ positive rotation: y=y+(x>>i)
subs r2,r4 @ accumulate angle
b 3f
2:
subs r1,r6 @ negative rotation: y=y-(x>>i)
adds r2,r4 @ accumulate angle
3:
asrs r6,#1
asrs r4,#1 @ next "table entry"
bne 4b
b cordic_exit
@ .thumb_func
@ qfp_fsin: @ calculate sin and cos using CORDIC rotation method
@ push {r4,r5,r14}
@ movs r1,#24
@ bl qfp_float2fix @ range reduction by repeated subtraction/addition in fixed point
@ ldr r4,pi_q29
@ lsrs r4,#4 @ 2pi Q24
@ 1:
@ subs r0,r4
@ bge 1b
@ 1:
@ adds r0,r4
@ bmi 1b @ now in range 0..2pi
@ lsls r2,r0,#2 @ z Q26
@ lsls r5,r4,#1 @ pi Q26 (r4=pi/2 Q26)
@ ldr r0,=#0x136e9db4 @ initialise CORDIC x,y with scaling
@ movs r1,#0
@ 1:
@ cmp r2,r4 @ >pi/2?
@ blt 2f
@ subs r2,r5 @ reduce range to -pi/2..pi/2
@ rsbs r0,#0 @ rotate vector by pi
@ b 1b
@ 2:
@ lsls r2,#3 @ Q29
@ adr r3,tab_cc @ circular coefficients
@ movs r4,#1 @ m=1
@ bl cordic_rot
@ adds r1,#9 @ fiddle factor to make sin(0)==0
@ movs r2,#0 @ exponents to zero
@ movs r3,#0
@ movs r5,#0 @ no sticky bits
@ bl clampx
@ bl packx @ pack cosine
@ bl xchxy
@ bl clampx
@ b packretns @ pack sine
@ .thumb_func
@ qfp_fcos:
@ push {r14}
@ bl qfp_fsin
@ mov r0,r1 @ extract cosine result
@ pop {r15}
@ @ force r0 to lie in range [-1,1] Q29
@ clampx:
@ movs r4,#1
@ lsls r4,#29
@ cmp r0,r4
@ bgt 1f
@ rsbs r4,#0
@ cmp r0,r4
@ ble 1f
@ bx r14
@ 1:
@ movs r0,r4
@ bx r14
@ .thumb_func
@ qfp_ftan:
@ push {r4,r5,r6,r14}
@ bl qfp_fsin @ sine in r0/r2, cosine in r1/r3
@ b fdiv_n @ sin/cos
@ .thumb_func
@ qfp_fexp:
@ push {r4,r5,r14}
@ movs r1,#24
@ bl qfp_float2fix @ Q24: covers entire valid input range
@ asrs r1,r0,#16 @ Q8
@ ldr r2,=#5909 @ log_2(e) Q12
@ muls r2,r1 @ estimate exponent of result Q20 (always an underestimate)
@ asrs r2,#20 @ Q0
@ lsls r1,r0,#6 @ Q30
@ ldr r0,=#0x2c5c85fe @ ln(2) Q30
@ muls r0,r2 @ accurate contribution of estimated exponent
@ subs r1,r0 @ residual to be exponentiated, guaranteed ≥0, < about 0.75 Q30
@ @ here
@ @ r1: mantissa to exponentiate, 0...~0.75 Q30
@ @ r2: first exponent estimate
@ movs r5,#1 @ shift
@ adr r3,ftab_exp @ could use alternate words from dtab_exp to save space if required
@ movs r0,#1
@ lsls r0,#29 @ x=1 Q29
@ 3:
@ ldmia r3!,{r4}
@ subs r4,r1,r4
@ bmi 1f
@ movs r1,r4 @ keep result of subtraction
@ movs r4,r0
@ lsrs r4,r5
@ adcs r0,r4 @ x+=x>>i with rounding
@ 1:
@ adds r5,#1
@ cmp r5,#15
@ bne 3b
@ @ here
@ @ r0: exp a Q29 1..2+
@ @ r1: ε (residual x where x=a+ε), < 2^-14 Q30
@ @ r2: first exponent estimate
@ @ and we wish to calculate exp x=exp a exp ε~(exp a)(1+ε)
@ lsrs r3,r0,#15 @ exp a Q14
@ muls r3,r1 @ ε exp a Q44
@ lsrs r3,#15 @ ε exp a Q29
@ adcs r0,r3 @ (1+ε) exp a Q29 with rounding
@ b packretns @ pack result
@ .thumb_func
@ qfp_fln:
@ push {r4,r5,r14}
@ asrs r1,r0,#23
@ bmi 3f @ -ve argument?
@ beq 3f @ 0 argument?
@ cmp r1,#0xff
@ beq 4f @ +Inf/NaN
@ bl unpackx
@ adds r2,#1
@ ldr r3,=#0x2c5c85fe @ ln(2) Q30
@ lsrs r1,r3,#14 @ ln(2) Q16
@ muls r1,r2 @ result estimate Q16
@ asrs r1,#16 @ integer contribution to result
@ muls r3,r2
@ lsls r4,r1,#30
@ subs r3,r4 @ fractional contribution to result Q30, signed
@ lsls r0,#8 @ Q31
@ @ here
@ @ r0: mantissa Q31
@ @ r1: integer contribution to result
@ @ r3: fractional contribution to result Q30, signed
@ movs r5,#1 @ shift
@ adr r4,ftab_exp @ could use alternate words from dtab_exp to save space if required
@ 2:
@ movs r2,r0
@ lsrs r2,r5
@ adcs r2,r0 @ x+(x>>i) with rounding
@ bcs 1f @ >=2?
@ movs r0,r2 @ keep result
@ ldr r2,[r4]
@ subs r3,r2
@ 1:
@ adds r4,#4
@ adds r5,#1
@ cmp r5,#15
@ bne 2b
@ @ here
@ @ r0: residual x, nearly 2 Q31
@ @ r1: integer contribution to result
@ @ r3: fractional part of result Q30
@ asrs r0,#2
@ adds r0,r3,r0
@ cmp r1,#0
@ bne 2f
@ asrs r0,#1
@ lsls r1,#29
@ adds r0,r1
@ movs r2,#0
@ b packretns
@ 2:
@ lsls r1,#24
@ asrs r0,#6 @ Q24
@ adcs r0,r1 @ with rounding
@ movs r2,#5
@ b packretns
@ 3:
@ ldr r0,=#0xff800000 @ -Inf
@ pop {r4,r5,r15}
@ 4:
@ ldr r0,=#0x7f800000 @ +Inf
@ pop {r4,r5,r15}
@ .align 2
@ ftab_exp:
@ .word 0x19f323ed @ log 1+2^-1 Q30
@ .word 0x0e47fbe4 @ log 1+2^-2 Q30
@ .word 0x0789c1dc @ log 1+2^-3 Q30
@ .word 0x03e14618 @ log 1+2^-4 Q30
@ .word 0x01f829b1 @ log 1+2^-5 Q30
@ .word 0x00fe0546 @ log 1+2^-6 Q30
@ .word 0x007f80aa @ log 1+2^-7 Q30
@ .word 0x003fe015 @ log 1+2^-8 Q30
@ .word 0x001ff803 @ log 1+2^-9 Q30
@ .word 0x000ffe00 @ log 1+2^-10 Q30
@ .word 0x0007ff80 @ log 1+2^-11 Q30
@ .word 0x0003ffe0 @ log 1+2^-12 Q30
@ .word 0x0001fff8 @ log 1+2^-13 Q30
@ .word 0x0000fffe @ log 1+2^-14 Q30
@ .thumb_func
@ qfp_fatan2:
@ push {r4,r5,r14}
@ @ unpack arguments and shift one down to have common exponent
@ bl unpackx
@ bl xchxy
@ bl unpackx
@ lsls r0,r0,#5 @ Q28
@ lsls r1,r1,#5 @ Q28
@ adds r4,r2,r3 @ this is -760 if both arguments are 0 and at least -380-126=-506 otherwise
@ asrs r4,#9
@ adds r4,#1
@ bmi 2f @ force y to 0 proper, so result will be zero
@ subs r4,r2,r3 @ calculate shift
@ bge 1f @ ex>=ey?
@ rsbs r4,#0 @ make shift positive
@ asrs r0,r4
@ cmp r4,#28
@ blo 3f
@ asrs r0,#31
@ b 3f
@ 1:
@ asrs r1,r4
@ cmp r4,#28
@ blo 3f
@ 2:
@ @ here |x|>>|y| or both x and y are ±0
@ cmp r0,#0
@ bge 4f @ x positive, return signed 0
@ ldr r0,pi_q29 @ x negative, return +/- pi
@ asrs r1,#31
@ eors r0,r1
@ b 7f
@ 4:
@ asrs r0,r1,#31
@ b 7f
@ 3:
@ movs r2,#0 @ initial angle
@ cmp r0,#0 @ x negative
@ bge 5f
@ rsbs r0,#0 @ rotate to 1st/4th quadrants
@ rsbs r1,#0
@ ldr r2,pi_q29 @ pi Q29
@ 5:
@ adr r3,tab_cc @ circular coefficients
@ movs r4,#1 @ m=1
@ bl cordic_vec @ also produces magnitude (with scaling factor 1.646760119), which is discarded
@ mov r0,r2 @ result here is -pi/2..3pi/2 Q29
@ @ asrs r2,#29
@ @ subs r0,r2
@ ldr r2,pi_q29 @ pi Q29
@ adds r4,r0,r2 @ attempt to fix -3pi/2..-pi case
@ bcs 6f @ -pi/2..0? leave result as is
@ subs r4,r0,r2 @ <pi? leave as is
@ bmi 6f
@ subs r0,r4,r2 @ >pi: take off 2pi
@ 6:
@ subs r0,#1 @ fiddle factor so atan2(0,1)==0
@ 7:
@ movs r2,#0 @ exponent for pack
@ b packretns
@ .align 2
@ .ltorg
@ @ first entry in following table is pi Q29
@ pi_q29:
@ @ circular CORDIC coefficients: atan(2^-i), b0=flag for preventing shift, b1=flag for end of table
@ tab_cc:
@ .word 0x1921fb54*4+1 @ no shift before first iteration
@ .word 0x0ed63383*4+0
@ .word 0x07d6dd7e*4+0
@ .word 0x03fab753*4+0
@ .word 0x01ff55bb*4+0
@ .word 0x00ffeaae*4+0
@ .word 0x007ffd55*4+0
@ .word 0x003fffab*4+0
@ .word 0x001ffff5*4+0
@ .word 0x000fffff*4+0
@ .word 0x0007ffff*4+0
@ .word 0x00040000*4+0
@ .word 0x00020000*4+0+2 @ +2 marks end
.align 2
.thumb_func
__aeabi_fsub:
ldr r2,=#0x80000000
eors r1,r2 @ flip sign on second argument
@ drop into fadd, on .align2:ed boundary
.thumb_func
__aeabi_fadd:
push {r4,r5,r6,r14}
asrs r4,r0,#31
lsls r2,r0,#1
lsrs r2,#24 @ x exponent
beq fa_xe0
cmp r2,#255
beq fa_xe255
fa_xe:
asrs r5,r1,#31
lsls r3,r1,#1
lsrs r3,#24 @ y exponent
beq fa_ye0
cmp r3,#255
beq fa_ye255
fa_ye:
ldr r6,=#0x007fffff
ands r0,r0,r6 @ extract mantissa bits
ands r1,r1,r6
adds r6,#1 @ r6=0x00800000
orrs r0,r0,r6 @ set implied 1
orrs r1,r1,r6
eors r0,r0,r4 @ complement...
eors r1,r1,r5
subs r0,r0,r4 @ ... and add 1 if sign bit is set: 2's complement
subs r1,r1,r5
subs r5,r3,r2 @ ye-xe
subs r4,r2,r3 @ xe-ye
bmi fa_ygtx
@ here xe>=ye
cmp r4,#30
bge fa_xmgty @ xe much greater than ye?
adds r5,#32
movs r3,r2 @ save exponent
@ here y in r1 must be shifted down r4 places to align with x in r0
movs r2,r1
lsls r2,r2,r5 @ keep the bits we will shift off the bottom of r1
asrs r1,r1,r4
b fa_0
.ltorg
fa_ymgtx:
movs r2,#0 @ result is just y
movs r0,r1
b fa_1
fa_xmgty:
movs r3,r2 @ result is just x
movs r2,#0
b fa_1
fa_ygtx:
@ here ye>xe
cmp r5,#30
bge fa_ymgtx @ ye much greater than xe?
adds r4,#32
@ here x in r0 must be shifted down r5 places to align with y in r1
movs r2,r0
lsls r2,r2,r4 @ keep the bits we will shift off the bottom of r0
asrs r0,r0,r5
fa_0:
adds r0,r1 @ result is now in r0:r2, possibly highly denormalised or zero; exponent in r3
beq fa_9 @ if zero, inputs must have been of identical magnitude and opposite sign, so return +0
fa_1:
lsrs r1,r0,#31 @ sign bit
beq fa_8
mvns r0,r0
rsbs r2,r2,#0
bne fa_8
adds r0,#1
fa_8:
adds r6,r6
@ r6=0x01000000
cmp r0,r6
bhs fa_2
fa_3:
adds r2,r2 @ normalisation loop
adcs r0,r0
subs r3,#1 @ adjust exponent
cmp r0,r6
blo fa_3
fa_2:
@ here r0:r2 is the result mantissa 0x01000000<=r0<0x02000000, r3 the exponent, and r1 the sign bit
lsrs r0,#1
bcc fa_4
@ rounding bits here are 1:r2
adds r0,#1 @ round up
cmp r2,#0
beq fa_5 @ sticky bits all zero?
fa_4:
cmp r3,#254
bhs fa_6 @ exponent too large or negative?
lsls r1,#31 @ pack everything
add r0,r1
lsls r3,#23
add r0,r3
fa_end:
pop {r4,r5,r6,r15}
fa_9:
cmp r2,#0 @ result zero?
beq fa_end @ return +0
b fa_1
fa_5:
lsrs r0,#1
lsls r0,#1 @ round to even
b fa_4
fa_6:
bge fa_7
@ underflow
@ can handle denormals here
lsls r0,r1,#31 @ result is signed zero
pop {r4,r5,r6,r15}
fa_7:
@ overflow
lsls r0,r1,#8
adds r0,#255
lsls r0,#23 @ result is signed infinity
pop {r4,r5,r6,r15}
fa_xe0:
@ can handle denormals here
subs r2,#32
adds r2,r4 @ exponent -32 for +Inf, -33 for -Inf
b fa_xe
fa_xe255:
@ can handle NaNs here
lsls r2,#8
add r2,r2,r4 @ exponent ~64k for +Inf, ~64k-1 for -Inf
b fa_xe
fa_ye0:
@ can handle denormals here
subs r3,#32
adds r3,r5 @ exponent -32 for +Inf, -33 for -Inf
b fa_ye
fa_ye255:
@ can handle NaNs here
lsls r3,#8
add r3,r3,r5 @ exponent ~64k for +Inf, ~64k-1 for -Inf
b fa_ye
.align 2
.thumb_func
__aeabi_fmul:
push {r7,r14}
mov r2,r0
eors r2,r1 @ sign of result
lsrs r2,#31
lsls r2,#31
mov r14,r2
lsls r0,#1
lsls r1,#1
lsrs r2,r0,#24 @ xe
beq fm_xe0
cmp r2,#255
beq fm_xe255
fm_xe:
lsrs r3,r1,#24 @ ye
beq fm_ye0
cmp r3,#255
beq fm_ye255
fm_ye:
adds r7,r2,r3 @ exponent of result (will possibly be incremented)
subs r7,#128 @ adjust bias for packing
lsls r0,#8 @ x mantissa
lsls r1,#8 @ y mantissa
lsrs r0,#9
lsrs r1,#9
adds r2,r0,r1 @ for later
mov r12,r2
lsrs r2,r0,#7 @ x[22..7] Q16
lsrs r3,r1,#7 @ y[22..7] Q16
muls r2,r2,r3 @ result [45..14] Q32: never an overestimate and worst case error is 2*(2^7-1)*(2^23-2^7)+(2^7-1)^2 = 2130690049 < 2^31
muls r0,r0,r1 @ result [31..0] Q46
lsrs r2,#18 @ result [45..32] Q14
bcc 1f
cmp r0,#0
bmi 1f
adds r2,#1 @ fix error in r2
1:
lsls r3,r0,#9 @ bits off bottom of result
lsrs r0,#23 @ Q23
lsls r2,#9
adds r0,r2 @ cut'n'shut
add r0,r12 @ implied 1*(x+y) to compensate for no insertion of implied 1s
@ result-1 in r3:r0 Q23+32, i.e., in range [0,3)
lsrs r1,r0,#23
bne fm_0 @ branch if we need to shift down one place
@ here 1<=result<2
cmp r7,#254
bhs fm_3a @ catches both underflow and overflow
lsls r3,#1 @ sticky bits at top of R3, rounding bit in carry
bcc fm_1 @ no rounding
beq fm_2 @ rounding tie?
adds r0,#1 @ round up
fm_1:
adds r7,#1 @ for implied 1
lsls r7,#23 @ pack result
add r0,r7
add r0,r14
pop {r7,r15}
fm_2: @ rounding tie
adds r0,#1
fm_3:
lsrs r0,#1
lsls r0,#1 @ clear bottom bit
b fm_1
@ here 1<=result-1<3
fm_0:
adds r7,#1 @ increment exponent
cmp r7,#254
bhs fm_3b @ catches both underflow and overflow
lsrs r0,#1 @ shift mantissa down
bcc fm_1a @ no rounding
adds r0,#1 @ assume we will round up
cmp r3,#0 @ sticky bits
beq fm_3c @ rounding tie?
fm_1a:
adds r7,r7
adds r7,#1 @ for implied 1
lsls r7,#22 @ pack result
add r0,r7
add r0,r14
pop {r7,r15}
fm_3c:
lsrs r0,#1
lsls r0,#1 @ clear bottom bit
b fm_1a
fm_xe0:
subs r2,#16
fm_xe255:
lsls r2,#8
b fm_xe
fm_ye0:
subs r3,#16
fm_ye255:
lsls r3,#8
b fm_ye
@ here the result is under- or overflowing
fm_3b:
bge fm_4 @ branch on overflow
@ trap case where result is denormal 0x007fffff + 0.5ulp or more
adds r7,#1 @ exponent=-1?
bne fm_5
@ corrected mantissa will be >= 3.FFFFFC (0x1fffffe Q23)
@ so r0 >= 2.FFFFFC (0x17ffffe Q23)
adds r0,#2
lsrs r0,#23
cmp r0,#3
bne fm_5
b fm_6
fm_3a:
bge fm_4 @ branch on overflow
@ trap case where result is denormal 0x007fffff + 0.5ulp or more
adds r7,#1 @ exponent=-1?
bne fm_5
adds r0,#1 @ mantissa=0xffffff (i.e., r0=0x7fffff)?
lsrs r0,#23
beq fm_5
fm_6:
movs r0,#1 @ return smallest normal
lsls r0,#23
add r0,r14
pop {r7,r15}
fm_5:
mov r0,r14
pop {r7,r15}
fm_4:
movs r0,#0xff
lsls r0,#23
add r0,r14
pop {r7,r15}
@ This version of the division algorithm uses external divider hardware to estimate the
@ reciprocal of the divisor to about 14 bits; then a multiplication step to get a first
@ quotient estimate; then the remainder based on this estimate is used to calculate a
@ correction to the quotient. The result is good to about 27 bits and so we only need
@ to calculate the exact remainder when close to a rounding boundary.
.align 2
.thumb_func
__aeabi_fdiv:
push {r4,r5,r6,r14}
fdiv_n:
movs r4,#1
lsls r4,#23 @ implied 1 position
lsls r2,r1,#9 @ clear out sign and exponent
lsrs r2,r2,#9
orrs r2,r2,r4 @ divisor mantissa Q23 with implied 1
@ here
@ r0=packed dividend
@ r1=packed divisor
@ r2=divisor mantissa Q23
@ r4=1<<23
// see divtest.c
lsrs r3,r2,#18 @ x2=x>>18; // Q5 32..63
adr r5,rcpapp-32
ldrb r3,[r5,r3] @ u=lut5[x2-32]; // Q8
lsls r5,r2,#5
muls r5,r5,r3
asrs r5,#14 @ e=(i32)(u*(x<<5))>>14; // Q22
asrs r6,r5,#11
muls r6,r6,r6 @ e2=(e>>11)*(e>>11); // Q22
subs r5,r6
muls r5,r5,r3 @ c=(e-e2)*u; // Q30
lsls r6,r3,#8
asrs r5,#13
adds r5,#1
asrs r5,#1
subs r5,r6,r5 @ u0=(u<<8)-((c+0x2000)>>14); // Q16
@ here
@ r0=packed dividend
@ r1=packed divisor
@ r2=divisor mantissa Q23
@ r4=1<<23
@ r5=reciprocal estimate Q16
lsrs r6,r0,#23
uxtb r3,r6 @ dividend exponent
lsls r0,#9
lsrs r0,#9
orrs r0,r0,r4 @ dividend mantissa Q23
lsrs r1,#23
eors r6,r1 @ sign of result in bit 8
lsrs r6,#8
lsls r6,#31 @ sign of result in bit 31, other bits clear
@ here
@ r0=dividend mantissa Q23
@ r1=divisor sign+exponent
@ r2=divisor mantissa Q23
@ r3=dividend exponent
@ r5=reciprocal estimate Q16
@ r6b31=sign of result
uxtb r1,r1 @ divisor exponent
cmp r1,#0
beq retinf
cmp r1,#255
beq 20f @ divisor is infinite
cmp r3,#0
beq retzero
cmp r3,#255
beq retinf
subs r3,r1 @ initial result exponent (no bias)
adds r3,#125 @ add bias
lsrs r1,r0,#8 @ dividend mantissa Q15
@ here
@ r0=dividend mantissa Q23
@ r1=dividend mantissa Q15
@ r2=divisor mantissa Q23
@ r3=initial result exponent
@ r5=reciprocal estimate Q16
@ r6b31=sign of result
muls r1,r5
lsrs r1,#16 @ Q15 qu0=(q15)(u*y0);
lsls r0,r0,#15 @ dividend Q38
movs r4,r2
muls r4,r1 @ Q38 qu0*x
subs r4,r0,r4 @ Q38 re0=(y<<15)-qu0*x; note this remainder is signed
asrs r4,#10
muls r4,r5 @ Q44 qu1=(re0>>10)*u; this quotient correction is also signed
asrs r4,#16 @ Q28
lsls r1,#13
adds r1,r1,r4 @ Q28 qu=(qu0<<13)+(qu1>>16);
@ here
@ r0=dividend mantissa Q38
@ r1=quotient Q28
@ r2=divisor mantissa Q23
@ r3=initial result exponent
@ r6b31=sign of result
lsrs r4,r1,#28
bne 1f
@ here the quotient is less than 1<<28 (i.e., result mantissa <1.0)
adds r1,#5
lsrs r4,r1,#4 @ rounding + small reduction in systematic bias
bcc 2f @ skip if we are not near a rounding boundary
lsrs r1,#3 @ quotient Q25
lsls r0,#10 @ dividend mantissa Q48
muls r1,r1,r2 @ quotient*divisor Q48
subs r0,r0,r1 @ remainder Q48
bmi 2f
b 3f
1:
@ here the quotient is at least 1<<28 (i.e., result mantissa >=1.0)
adds r3,#1 @ bump exponent (and shift mantissa down one more place)
adds r1,#9
lsrs r4,r1,#5 @ rounding + small reduction in systematic bias
bcc 2f @ skip if we are not near a rounding boundary
lsrs r1,#4 @ quotient Q24
lsls r0,#9 @ dividend mantissa Q47
muls r1,r1,r2 @ quotient*divisor Q47
subs r0,r0,r1 @ remainder Q47
bmi 2f
3:
adds r4,#1 @ increment quotient as we are above the rounding boundary
@ here
@ r3=result exponent
@ r4=correctly rounded quotient Q23 in range [1,2] *note closed interval*
@ r6b31=sign of result
2:
cmp r3,#254
bhs 10f @ this catches both underflow and overflow
lsls r1,r3,#23
adds r0,r4,r1
adds r0,r6
pop {r4,r5,r6,r15}
@ here divisor is infinite; dividend exponent in r3
20:
cmp r3,#255
bne retzero
retinf:
movs r0,#255
21:
lsls r0,#23
orrs r0,r6
pop {r4,r5,r6,r15}
10:
bge retinf @ overflow?
adds r1,r3,#1
bne retzero @ exponent <-1? return 0
@ here exponent is exactly -1
lsrs r1,r4,#25
bcc retzero @ mantissa is not 01000000?
@ return minimum normal
movs r0,#1
lsls r0,#23
orrs r0,r6
pop {r4,r5,r6,r15}
retzero:
movs r0,r6
pop {r4,r5,r6,r15}
@ x2=[32:1:63]/32;
@ round(256 ./(x2+1/64))
.align 2
rcpapp:
.byte 252,245,237,231,224,218,213,207,202,197,193,188,184,180,176,172
.byte 169,165,162,159,156,153,150,148,145,142,140,138,135,133,131,129
@ The square root routine uses an initial approximation to the reciprocal of the square root of the argument based
@ on the top four bits of the mantissa (possibly shifted one place to make the exponent even). It then performs two
@ Newton-Raphson iterations, resulting in about 14 bits of accuracy. This reciprocal is then multiplied by
@ the original argument to produce an approximation to the result, again with about 14 bits of accuracy.
@ Then a remainder is calculated, and multiplied by the reciprocal estiamte to generate a correction term
@ giving a final answer to about 28 bits of accuracy. A final remainder calculation rounds to the correct
@ result if necessary.
@ Again, the fixed-point calculation is carefully implemented to preserve accuracy, and similar comments to those
@ made above on the fast division routine apply.
@ The reciprocal square root calculation has been tested for all possible (possibly shifted) input mantissa values.
.align 2
.thumb_func
_fsqrt:
push {r4}
lsls r1,r0,#1
bcs sq_0 @ negative?
lsls r1,#8
lsrs r1,#9 @ mantissa
movs r2,#1
lsls r2,#23
adds r1,r2 @ insert implied 1
lsrs r2,r0,#23 @ extract exponent
beq sq_2 @ zero?
cmp r2,#255 @ infinite?
beq sq_1
adds r2,#125 @ correction for packing
asrs r2,#1 @ exponent/2, LSB into carry
bcc 1f
lsls r1,#1 @ was even: double mantissa; mantissa y now 1..4 Q23
1:
adr r4,rsqrtapp-4@ first four table entries are never accessed because of the mantissa's leading 1
lsrs r3,r1,#21 @ y Q2
ldrb r4,[r4,r3] @ initial approximation to reciprocal square root a0 Q8
lsrs r0,r1,#7 @ y Q16: first Newton-Raphson iteration
muls r0,r4 @ a0*y Q24
muls r0,r4 @ r0=p0=a0*y*y Q32
asrs r0,#12 @ r0 Q20
muls r0,r4 @ dy0=a0*r0 Q28
asrs r0,#13 @ dy0 Q15
lsls r4,#8 @ a0 Q16
subs r4,r0 @ a1=a0-dy0/2 Q16-Q15/2 -> Q16
adds r4,#170 @ mostly remove systematic error in this approximation: gains approximately 1 bit
movs r0,r4 @ second Newton-Raphson iteration
muls r0,r0 @ a1*a1 Q32
lsrs r0,#15 @ a1*a1 Q17
lsrs r3,r1,#8 @ y Q15
muls r0,r3 @ r1=p1=a1*a1*y Q32
asrs r0,#12 @ r1 Q20
muls r0,r4 @ dy1=a1*r1 Q36
asrs r0,#21 @ dy1 Q15
subs r4,r0 @ a2=a1-dy1/2 Q16-Q15/2 -> Q16
muls r3,r4 @ a3=y*a2 Q31
lsrs r3,#15 @ a3 Q16
@ here a2 is an approximation to the reciprocal square root
@ and a3 is an approximation to the square root
movs r0,r3
muls r0,r0 @ a3*a3 Q32
lsls r1,#9 @ y Q32
subs r0,r1,r0 @ r2=y-a3*a3 Q32 remainder
asrs r0,#5 @ r2 Q27
muls r4,r0 @ r2*a2 Q43
lsls r3,#7 @ a3 Q23
asrs r0,r4,#15 @ r2*a2 Q28
adds r0,#16 @ rounding to Q24
asrs r0,r0,#6 @ r2*a2 Q22
add r3,r0 @ a4 Q23: candidate final result
bcc sq_3 @ near rounding boundary? skip if no rounding needed
mov r4,r3
adcs r4,r4 @ a4+0.5ulp Q24
muls r4,r4 @ Q48
lsls r1,#16 @ y Q48
subs r1,r4 @ remainder Q48
bmi sq_3
adds r3,#1 @ round up
sq_3:
lsls r2,#23 @ pack exponent
adds r0,r2,r3
sq_6:
pop {r4}
bx r14
sq_0:
lsrs r1,#24
beq sq_2 @ -0: return it
@ here negative and not -0: return -Inf
asrs r0,#31
sq_5:
lsls r0,#23
b sq_6
sq_1: @ +Inf
lsrs r0,#23
b sq_5
sq_2:
lsrs r0,#31
lsls r0,#31
b sq_6
@ round(sqrt(2^22./[72:16:248]))
rsqrtapp:
.byte 0xf1,0xda,0xc9,0xbb, 0xb0,0xa6,0x9e,0x97, 0x91,0x8b,0x86,0x82
@ Notation:
@ rx:ry means the concatenation of rx and ry with rx having the less significant bits
@ IEEE double in ra:rb ->
@ mantissa in ra:rb 12Q52 (53 significant bits) with implied 1 set
@ exponent in re
@ sign in rs
@ trashes rt
.macro mdunpack ra,rb,re,rs,rt
lsrs \re,\rb,#20 @ extract sign and exponent
subs \rs,\re,#1
lsls \rs,#20
subs \rb,\rs @ clear sign and exponent in mantissa; insert implied 1
lsrs \rs,\re,#11 @ sign
lsls \re,#21
lsrs \re,#21 @ exponent
beq l\@_1 @ zero exponent?
adds \rt,\re,#1
lsrs \rt,#11
beq l\@_2 @ exponent != 0x7ff? then done
l\@_1:
movs \ra,#0
movs \rb,#1
lsls \rb,#20
subs \re,#128
lsls \re,#12
l\@_2:
.endm
@ IEEE double in ra:rb ->
@ signed mantissa in ra:rb 12Q52 (53 significant bits) with implied 1
@ exponent in re
@ trashes rt0 and rt1
@ +zero, +denormal -> exponent=-0x80000
@ -zero, -denormal -> exponent=-0x80000
@ +Inf, +NaN -> exponent=+0x77f000
@ -Inf, -NaN -> exponent=+0x77e000
.macro mdunpacks ra,rb,re,rt0,rt1
lsrs \re,\rb,#20 @ extract sign and exponent
lsrs \rt1,\rb,#31 @ sign only
subs \rt0,\re,#1
lsls \rt0,#20
subs \rb,\rt0 @ clear sign and exponent in mantissa; insert implied 1
lsls \re,#21
bcc l\@_1 @ skip on positive
mvns \rb,\rb @ negate mantissa
rsbs \ra,#0
bcc l\@_1
adds \rb,#1
l\@_1:
lsrs \re,#21
beq l\@_2 @ zero exponent?
adds \rt0,\re,#1
lsrs \rt0,#11
beq l\@_3 @ exponent != 0x7ff? then done
subs \re,\rt1
l\@_2:
movs \ra,#0
lsls \rt1,#1 @ +ve: 0 -ve: 2
adds \rb,\rt1,#1 @ +ve: 1 -ve: 3
lsls \rb,#30 @ create +/-1 mantissa
asrs \rb,#10
subs \re,#128
lsls \re,#12
l\@_3:
.endm
.align 2
.thumb_func
__aeabi_dsub:
push {r4-r7,r14}
movs r4,#1
lsls r4,#31
eors r3,r4 @ flip sign on second argument
b da_entry @ continue in dadd
.align 2
.thumb_func
__aeabi_dadd:
push {r4-r7,r14}
da_entry:
mdunpacks r0,r1,r4,r6,r7
mdunpacks r2,r3,r5,r6,r7
subs r7,r5,r4 @ ye-xe
subs r6,r4,r5 @ xe-ye
bmi da_ygtx
@ here xe>=ye: need to shift y down r6 places
mov r12,r4 @ save exponent
cmp r6,#32
bge da_xrgty @ xe rather greater than ye?
adds r7,#32
movs r4,r2
lsls r4,r4,r7 @ rounding bit + sticky bits
da_xgty0:
movs r5,r3
lsls r5,r5,r7
lsrs r2,r6
asrs r3,r6
orrs r2,r5
da_add:
adds r0,r2
adcs r1,r3
da_pack:
@ here unnormalised signed result (possibly 0) is in r0:r1 with exponent r12, rounding + sticky bits in r4
@ Note that if a large normalisation shift is required then the arguments were close in magnitude and so we
@ cannot have not gone via the xrgty/yrgtx paths. There will therefore always be enough high bits in r4
@ to provide a correct continuation of the exact result.
@ now pack result back up
lsrs r3,r1,#31 @ get sign bit
beq 1f @ skip on positive
mvns r1,r1 @ negate mantissa
mvns r0,r0
movs r2,#0
rsbs r4,#0
adcs r0,r2
adcs r1,r2
1:
mov r2,r12 @ get exponent
lsrs r5,r1,#21
bne da_0 @ shift down required?
lsrs r5,r1,#20
bne da_1 @ normalised?
cmp r0,#0
beq da_5 @ could mantissa be zero?
da_2:
adds r4,r4
adcs r0,r0
adcs r1,r1
subs r2,#1 @ adjust exponent
lsrs r5,r1,#20
beq da_2
da_1:
lsls r4,#1 @ check rounding bit
bcc da_3
da_4:
adds r0,#1 @ round up
bcc 2f
adds r1,#1
2:
cmp r4,#0 @ sticky bits zero?
bne da_3
lsrs r0,#1 @ round to even
lsls r0,#1
da_3:
subs r2,#1
bmi da_6
adds r4,r2,#2 @ check if exponent is overflowing
lsrs r4,#11
bne da_7
lsls r2,#20 @ pack exponent and sign
add r1,r2
lsls r3,#31
add r1,r3
pop {r4-r7,r15}
da_7:
@ here exponent overflow: return signed infinity
lsls r1,r3,#31
ldr r3,=#0x7ff00000
orrs r1,r3
b 1f
da_6:
@ here exponent underflow: return signed zero
lsls r1,r3,#31
1:
movs r0,#0
pop {r4-r7,r15}
da_5:
@ here mantissa could be zero
cmp r1,#0
bne da_2
cmp r4,#0
bne da_2
@ inputs must have been of identical magnitude and opposite sign, so return +0
pop {r4-r7,r15}
da_0:
@ here a shift down by one place is required for normalisation
adds r2,#1 @ adjust exponent
lsls r6,r0,#31 @ save rounding bit
lsrs r0,#1
lsls r5,r1,#31
orrs r0,r5
lsrs r1,#1
cmp r6,#0
beq da_3
b da_4
da_xrgty: @ xe>ye and shift>=32 places
cmp r6,#60
bge da_xmgty @ xe much greater than ye?
subs r6,#32
adds r7,#64
movs r4,r2
lsls r4,r4,r7 @ these would be shifted off the bottom of the sticky bits
beq 1f
movs r4,#1
1:
lsrs r2,r2,r6
orrs r4,r2
movs r2,r3
lsls r3,r3,r7
orrs r4,r3
asrs r3,r2,#31 @ propagate sign bit
b da_xgty0
da_ygtx:
@ here ye>xe: need to shift x down r7 places
mov r12,r5 @ save exponent
cmp r7,#32
bge da_yrgtx @ ye rather greater than xe?
adds r6,#32
movs r4,r0
lsls r4,r4,r6 @ rounding bit + sticky bits
da_ygtx0:
movs r5,r1
lsls r5,r5,r6
lsrs r0,r7
asrs r1,r7
orrs r0,r5
b da_add
da_yrgtx:
cmp r7,#60
bge da_ymgtx @ ye much greater than xe?
subs r7,#32
adds r6,#64
movs r4,r0
lsls r4,r4,r6 @ these would be shifted off the bottom of the sticky bits
beq 1f
movs r4,#1
1:
lsrs r0,r0,r7
orrs r4,r0
movs r0,r1
lsls r1,r1,r6
orrs r4,r1
asrs r1,r0,#31 @ propagate sign bit
b da_ygtx0
da_ymgtx: @ result is just y
movs r0,r2
movs r1,r3
da_xmgty: @ result is just x
movs r4,#0 @ clear sticky bits
b da_pack
.ltorg
@ equivalent of UMULL
@ needs five temporary registers
@ can have rt3==rx, in which case rx trashed
@ can have rt4==ry, in which case ry trashed
@ can have rzl==rx
@ can have rzh==ry
@ can have rzl,rzh==rt3,rt4
.macro mul32_32_64 rx,ry,rzl,rzh,rt0,rt1,rt2,rt3,rt4
@ t0 t1 t2 t3 t4
@ (x) (y)
uxth \rt0,\rx @ xl
uxth \rt1,\ry @ yl
muls \rt0,\rt1 @ xlyl=L
lsrs \rt2,\rx,#16 @ xh
muls \rt1,\rt2 @ xhyl=M0
lsrs \rt4,\ry,#16 @ yh
muls \rt2,\rt4 @ xhyh=H
uxth \rt3,\rx @ xl
muls \rt3,\rt4 @ xlyh=M1
adds \rt1,\rt3 @ M0+M1=M
bcc l\@_1 @ addition of the two cross terms can overflow, so add carry into H
movs \rt3,#1 @ 1
lsls \rt3,#16 @ 0x10000
adds \rt2,\rt3 @ H'
l\@_1:
@ t0 t1 t2 t3 t4
@ (zl) (zh)
lsls \rzl,\rt1,#16 @ ML
lsrs \rzh,\rt1,#16 @ MH
adds \rzl,\rt0 @ ZL
adcs \rzh,\rt2 @ ZH
.endm
@ SUMULL: x signed, y unsigned
@ in table below ¯ means signed variable
@ needs five temporary registers
@ can have rt3==rx, in which case rx trashed
@ can have rt4==ry, in which case ry trashed
@ can have rzl==rx
@ can have rzh==ry
@ can have rzl,rzh==rt3,rt4
.macro muls32_32_64 rx,ry,rzl,rzh,rt0,rt1,rt2,rt3,rt4
@ t0 t1 t2 t3 t4
@ ¯(x) (y)
uxth \rt0,\rx @ xl
uxth \rt1,\ry @ yl
muls \rt0,\rt1 @ xlyl=L
asrs \rt2,\rx,#16 @ ¯xh
muls \rt1,\rt2 @ ¯xhyl=M0
lsrs \rt4,\ry,#16 @ yh
muls \rt2,\rt4 @ ¯xhyh=H
uxth \rt3,\rx @ xl
muls \rt3,\rt4 @ xlyh=M1
asrs \rt4,\rt1,#31 @ M0sx (M1 sign extension is zero)
adds \rt1,\rt3 @ M0+M1=M
movs \rt3,#0 @ 0
adcs \rt4,\rt3 @ ¯Msx
lsls \rt4,#16 @ ¯Msx<<16
adds \rt2,\rt4 @ H'
@ t0 t1 t2 t3 t4
@ (zl) (zh)
lsls \rzl,\rt1,#16 @ M~
lsrs \rzh,\rt1,#16 @ M~
adds \rzl,\rt0 @ ZL
adcs \rzh,\rt2 @ ¯ZH
.endm
@ SSMULL: x signed, y signed
@ in table below ¯ means signed variable
@ needs five temporary registers
@ can have rt3==rx, in which case rx trashed
@ can have rt4==ry, in which case ry trashed
@ can have rzl==rx
@ can have rzh==ry
@ can have rzl,rzh==rt3,rt4
.macro muls32_s32_64 rx,ry,rzl,rzh,rt0,rt1,rt2,rt3,rt4
@ t0 t1 t2 t3 t4
@ ¯(x) (y)
uxth \rt0,\rx @ xl
uxth \rt1,\ry @ yl
muls \rt0,\rt1 @ xlyl=L
asrs \rt2,\rx,#16 @ ¯xh
muls \rt1,\rt2 @ ¯xhyl=M0
asrs \rt4,\ry,#16 @ ¯yh
muls \rt2,\rt4 @ ¯xhyh=H
uxth \rt3,\rx @ xl
muls \rt3,\rt4 @ ¯xlyh=M1
adds \rt1,\rt3 @ ¯M0+M1=M
asrs \rt3,\rt1,#31 @ Msx
bvc l\@_1 @
mvns \rt3,\rt3 @ ¯Msx flip sign extension bits if overflow
l\@_1:
lsls \rt3,#16 @ ¯Msx<<16
adds \rt2,\rt3 @ H'
@ t0 t1 t2 t3 t4
@ (zl) (zh)
lsls \rzl,\rt1,#16 @ M~
lsrs \rzh,\rt1,#16 @ M~
adds \rzl,\rt0 @ ZL
adcs \rzh,\rt2 @ ¯ZH
.endm
@ can have rt2==rx, in which case rx trashed
@ can have rzl==rx
@ can have rzh==rt1
.macro square32_64 rx,rzl,rzh,rt0,rt1,rt2
@ t0 t1 t2 zl zh
uxth \rt0,\rx @ xl
muls \rt0,\rt0 @ xlxl=L
uxth \rt1,\rx @ xl
lsrs \rt2,\rx,#16 @ xh
muls \rt1,\rt2 @ xlxh=M
muls \rt2,\rt2 @ xhxh=H
lsls \rzl,\rt1,#17 @ ML
lsrs \rzh,\rt1,#15 @ MH
adds \rzl,\rt0 @ ZL
adcs \rzh,\rt2 @ ZH
.endm
.align 2
.thumb_func
__aeabi_dmul:
push {r4-r7,r14}
mdunpack r0,r1,r4,r6,r5
mov r12,r4
mdunpack r2,r3,r4,r7,r5
eors r7,r6 @ sign of result
add r4,r12 @ exponent of result
push {r0-r2,r4,r7}
@ accumulate full product in r12:r5:r6:r7
mul32_32_64 r0,r2, r0,r5, r4,r6,r7,r0,r5 @ XL*YL
mov r12,r0 @ save LL bits
mul32_32_64 r1,r3, r6,r7, r0,r2,r4,r6,r7 @ XH*YH
pop {r0} @ XL
mul32_32_64 r0,r3, r0,r3, r1,r2,r4,r0,r3 @ XL*YH
adds r5,r0
adcs r6,r3
movs r0,#0
adcs r7,r0
pop {r1,r2} @ XH,YL
mul32_32_64 r1,r2, r1,r2, r0,r3,r4, r1,r2 @ XH*YL
adds r5,r1
adcs r6,r2
movs r0,#0
adcs r7,r0
@ here r5:r6:r7 holds the product [1..4) in Q(104-32)=Q72, with extra LSBs in r12
pop {r3,r4} @ exponent in r3, sign in r4
lsls r1,r7,#11
lsrs r2,r6,#21
orrs r1,r2
lsls r0,r6,#11
lsrs r2,r5,#21
orrs r0,r2
lsls r5,#11 @ now r5:r0:r1 Q83=Q(51+32), extra LSBs in r12
lsrs r2,r1,#20
bne 1f @ skip if in range [2..4)
adds r5,r5 @ shift up so always [2..4) Q83, i.e. [1..2) Q84=Q(52+32)
adcs r0,r0
adcs r1,r1
subs r3,#1 @ correct exponent
1:
ldr r6,=#0x3ff
subs r3,r6 @ correct for exponent bias
lsls r6,#1 @ 0x7fe
cmp r3,r6
bhs dm_0 @ exponent over- or underflow
lsls r5,#1 @ rounding bit to carry
bcc 1f @ result is correctly rounded
adds r0,#1
movs r6,#0
adcs r1,r6 @ round up
mov r6,r12 @ remaining sticky bits
orrs r5,r6
bne 1f @ some sticky bits set?
lsrs r0,#1
lsls r0,#1 @ round to even
1:
lsls r3,#20
adds r1,r3
dm_2:
lsls r4,#31
add r1,r4
pop {r4-r7,r15}
@ here for exponent over- or underflow
dm_0:
bge dm_1 @ overflow?
adds r3,#1 @ would-be zero exponent?
bne 1f
adds r0,#1
bne 1f @ all-ones mantissa?
adds r1,#1
lsrs r7,r1,#21
beq 1f
lsrs r1,#1
b dm_2
1:
lsls r1,r4,#31
movs r0,#0
pop {r4-r7,r15}
@ here for exponent overflow
dm_1:
adds r6,#1 @ 0x7ff
lsls r1,r6,#20
movs r0,#0
b dm_2
.ltorg
@ Approach to division y/x is as follows.
@
@ First generate u1, an approximation to 1/x to about 29 bits. Multiply this by the top
@ 32 bits of y to generate a0, a first approximation to the result (good to 28 bits or so).
@ Calculate the exact remainder r0=y-a0*x, which will be about 0. Calculate a correction
@ d0=r0*u1, and then write a1=a0+d0. If near a rounding boundary, compute the exact
@ remainder r1=y-a1*x (which can be done using r0 as a basis) to determine whether to
@ round up or down.
@
@ The calculation of 1/x is as given in dreciptest.c. That code verifies exhaustively
@ that | u1*x-1 | < 10*2^-32.
@
@ More precisely:
@
@ x0=(q16)x;
@ x1=(q30)x;
@ y0=(q31)y;
@ u0=(q15~)"(0xffffffffU/(unsigned int)roundq(x/x_ulp))/powq(2,16)"(x0); // q15 approximation to 1/x; "~" denotes rounding rather than truncation
@ v=(q30)(u0*x1-1);
@ u1=(q30)u0-(q30~)(u0*v);
@
@ a0=(q30)(u1*y0);
@ r0=(q82)y-a0*x;
@ r0x=(q57)r0;
@ d0=r0x*u1;
@ a1=d0+a0;
@
@ Error analysis
@
@ Use Greek letters to represent the errors introduced by rounding and truncation.
@
@ r₀ = y - a₀x
@ = y - [ u₁ ( y - α ) - β ] x where 0 ≤ α < 2^-31, 0 ≤ β < 2^-30
@ = y ( 1 - u₁x ) + ( u₁α + β ) x
@
@ Hence
@
@ | r₀ / x | < 2 * 10*2^-32 + 2^-31 + 2^-30
@ = 26*2^-32
@
@ r₁ = y - a₁x
@ = y - a₀x - d₀x
@ = r₀ - d₀x
@ = r₀ - u₁ ( r₀ - γ ) x where 0 ≤ γ < 2^-57
@ = r₀ ( 1 - u₁x ) + u₁γx
@
@ Hence
@
@ | r₁ / x | < 26*2^-32 * 10*2^-32 + 2^-57
@ = (260+128)*2^-64
@ < 2^-55
@
@ Empirically it seems to be nearly twice as good as this.
@
@ To determine correctly whether the exact remainder calculation can be skipped we need a result
@ accurate to < 0.25ulp. In the case where x>y the quotient will be shifted up one place for normalisation
@ and so 1ulp is 2^-53 and so the calculation above suffices.
.align 2
.thumb_func
__aeabi_ddiv:
push {r4-r7,r14}
ddiv0: @ entry point from dtan
mdunpack r2,r3,r4,r7,r6 @ unpack divisor
@ unpack dividend by hand to save on register use
lsrs r6,r1,#31
adds r6,r7
mov r12,r6 @ result sign in r12b0; r12b1 trashed
lsls r1,#1
lsrs r7,r1,#21 @ exponent
beq 1f @ zero exponent?
adds r6,r7,#1
lsrs r6,#11
beq 2f @ exponent != 0x7ff? then done
1:
movs r0,#0
movs r1,#0
subs r7,#64 @ less drastic fiddling of exponents to get 0/0, Inf/Inf correct
lsls r7,#12
2:
subs r6,r7,r4
lsls r6,#2
add r12,r12,r6 @ (signed) exponent in r12[31..8]
subs r7,#1 @ implied 1
lsls r7,#21
subs r1,r7
lsrs r1,#1
// see dreciptest-boxc.c
lsrs r4,r3,#15 @ x2=x>>15; // Q5 32..63
ldr r5,=#(rcpapp-32)
ldrb r4,[r5,r4] @ u=lut5[x2-32]; // Q8
lsls r5,r3,#8
muls r5,r5,r4
asrs r5,#14 @ e=(i32)(u*(x<<8))>>14; // Q22
asrs r6,r5,#11
muls r6,r6,r6 @ e2=(e>>11)*(e>>11); // Q22
subs r5,r6
muls r5,r5,r4 @ c=(e-e2)*u; // Q30
lsls r6,r4,#7
asrs r5,#14
adds r5,#1
asrs r5,#1
subs r6,r5 @ u0=(u<<7)-((c+0x4000)>>15); // Q15
@ here
@ r0:r1 y mantissa
@ r2:r3 x mantissa
@ r6 u0, first approximation to 1/x Q15
@ r12: result sign, exponent
lsls r4,r3,#10
lsrs r5,r2,#22
orrs r5,r4 @ x1=(q30)x
muls r5,r6 @ u0*x1 Q45
asrs r5,#15 @ v=u0*x1-1 Q30
muls r5,r6 @ u0*v Q45
asrs r5,#14
adds r5,#1
asrs r5,#1 @ round u0*v to Q30
lsls r6,#15
subs r6,r5 @ u1 Q30
@ here
@ r0:r1 y mantissa
@ r2:r3 x mantissa
@ r6 u1, second approximation to 1/x Q30
@ r12: result sign, exponent
push {r2,r3}
lsls r4,r1,#11
lsrs r5,r0,#21
orrs r4,r5 @ y0=(q31)y
mul32_32_64 r4,r6, r4,r5, r2,r3,r7,r4,r5 @ y0*u1 Q61
adds r4,r4
adcs r5,r5 @ a0=(q30)(y0*u1)
@ here
@ r0:r1 y mantissa
@ r5 a0, first approximation to y/x Q30
@ r6 u1, second approximation to 1/x Q30
@ r12 result sign, exponent
ldr r2,[r13,#0] @ xL
mul32_32_64 r2,r5, r2,r3, r1,r4,r7,r2,r3 @ xL*a0
ldr r4,[r13,#4] @ xH
muls r4,r5 @ xH*a0
adds r3,r4 @ r2:r3 now x*a0 Q82
lsrs r2,#25
lsls r1,r3,#7
orrs r2,r1 @ r2 now x*a0 Q57; r7:r2 is x*a0 Q89
lsls r4,r0,#5 @ y Q57
subs r0,r4,r2 @ r0x=y-x*a0 Q57 (signed)
@ here
@ r0 r0x Q57
@ r5 a0, first approximation to y/x Q30
@ r4 yL Q57
@ r6 u1 Q30
@ r12 result sign, exponent
muls32_32_64 r0,r6, r7,r6, r1,r2,r3, r7,r6 @ r7:r6 r0x*u1 Q87
asrs r3,r6,#25
adds r5,r3
lsls r3,r6,#7 @ r3:r5 a1 Q62 (but bottom 7 bits are zero so 55 bits of precision after binary point)
@ here we could recover another 7 bits of precision (but not accuracy) from the top of r7
@ but these bits are thrown away in the rounding and conversion to Q52 below
@ here
@ r3:r5 a1 Q62 candidate quotient [0.5,2) or so
@ r4 yL Q57
@ r12 result sign, exponent
movs r6,#0
adds r3,#128 @ for initial rounding to Q53
adcs r5,r5,r6
lsrs r1,r5,#30
bne dd_0
@ here candidate quotient a1 is in range [0.5,1)
@ so 30 significant bits in r5
lsls r4,#1 @ y now Q58
lsrs r1,r5,#9 @ to Q52
lsls r0,r5,#23
lsrs r3,#9 @ 0.5ulp-significance bit in carry: if this is 1 we may need to correct result
orrs r0,r3
bcs dd_1
b dd_2
dd_0:
@ here candidate quotient a1 is in range [1,2)
@ so 31 significant bits in r5
movs r2,#4
add r12,r12,r2 @ fix exponent; r3:r5 now effectively Q61
adds r3,#128 @ complete rounding to Q53
adcs r5,r5,r6
lsrs r1,r5,#10
lsls r0,r5,#22
lsrs r3,#10 @ 0.5ulp-significance bit in carry: if this is 1 we may need to correct result
orrs r0,r3
bcc dd_2
dd_1:
@ here
@ r0:r1 rounded result Q53 [0.5,1) or Q52 [1,2), but may not be correctly rounded-to-nearest
@ r4 yL Q58 or Q57
@ r12 result sign, exponent
@ carry set
adcs r0,r0,r0
adcs r1,r1,r1 @ z Q53 with 1 in LSB
lsls r4,#16 @ Q105-32=Q73
ldr r2,[r13,#0] @ xL Q52
ldr r3,[r13,#4] @ xH Q20
movs r5,r1 @ zH Q21
muls r5,r2 @ zH*xL Q73
subs r4,r5
muls r3,r0 @ zL*xH Q73
subs r4,r3
mul32_32_64 r2,r0, r2,r3, r5,r6,r7,r2,r3 @ xL*zL
rsbs r2,#0 @ borrow from low half?
sbcs r4,r3 @ y-xz Q73 (remainder bits 52..73)
cmp r4,#0
bmi 1f
movs r2,#0 @ round up
adds r0,#1
adcs r1,r2
1:
lsrs r0,#1 @ shift back down to Q52
lsls r2,r1,#31
orrs r0,r2
lsrs r1,#1
dd_2:
add r13,#8
mov r2,r12
lsls r7,r2,#31 @ result sign
asrs r2,#2 @ result exponent
ldr r3,=#0x3fd
adds r2,r3
ldr r3,=#0x7fe
cmp r2,r3
bhs dd_3 @ over- or underflow?
lsls r2,#20
adds r1,r2 @ pack exponent
dd_5:
adds r1,r7 @ pack sign
pop {r4-r7,r15}
dd_3:
movs r0,#0
cmp r2,#0
bgt dd_4 @ overflow?
movs r1,r7
pop {r4-r7,r15}
dd_4:
adds r3,#1 @ 0x7ff
lsls r1,r3,#20
b dd_5
/*
Approach to square root x=sqrt(y) is as follows.
First generate a3, an approximation to 1/sqrt(y) to about 30 bits. Multiply this by y
to give a4~sqrt(y) to about 28 bits and a remainder r4=y-a4^2. Then, because
d sqrt(y) / dy = 1 / (2 sqrt(y)) let d4=r4*a3/2 and then the value a5=a4+d4 is
a better approximation to sqrt(y). If this is near a rounding boundary we
compute an exact remainder y-a5*a5 to decide whether to round up or down.
The calculation of a3 and a4 is as given in dsqrttest.c. That code verifies exhaustively
that | 1 - a3a4 | < 10*2^-32, | r4 | < 40*2^-32 and | r4/y | < 20*2^-32.
More precisely, with "y" representing y truncated to 30 binary places:
u=(q3)y; // 24-entry table
a0=(q8~)"1/sqrtq(x+x_ulp/2)"(u); // first approximation from table
p0=(q16)(a0*a0) * (q16)y;
r0=(q20)(p0-1);
dy0=(q15)(r0*a0); // Newton-Raphson correction term
a1=(q16)a0-dy0/2; // good to ~9 bits
p1=(q19)(a1*a1)*(q19)y;
r1=(q23)(p1-1);
dy1=(q15~)(r1*a1); // second Newton-Raphson correction
a2x=(q16)a1-dy1/2; // good to ~16 bits
a2=a2x-a2x/1t16; // prevent overflow of a2*a2 in 32 bits
p2=(a2*a2)*(q30)y; // Q62
r2=(q36)(p2-1+1t-31);
dy2=(q30)(r2*a2); // Q52->Q30
a3=(q31)a2-dy2/2; // good to about 30 bits
a4=(q30)(a3*(q30)y+1t-31); // good to about 28 bits
Error analysis
r₄ = y - a₄²
d₄ = 1/2 a₃r₄
a₅ = a₄ + d₄
r₅ = y - a₅²
= y - ( a₄ + d₄ )²
= y - a₄² - a₃a₄r₄ - 1/4 a₃²r₄²
= r₄ - a₃a₄r₄ - 1/4 a₃²r₄²
| r₅ | < | r₄ | | 1 - a₃a₄ | + 1/4 r₄²
a₅ = √y √( 1 - r₅/y )
= √y ( 1 - 1/2 r₅/y + ... )
So to first order (second order being very tiny)
√y - a₅ = 1/2 r₅/y
and
| √y - a₅ | < 1/2 ( | r₄/y | | 1 - a₃a₄ | + 1/4 r₄²/y )
From dsqrttest.c (conservatively):
< 1/2 ( 20*2^-32 * 10*2^-32 + 1/4 * 40*2^-32*20*2^-32 )
= 1/2 ( 200 + 200 ) * 2^-64
< 2^-56
Empirically we see about 1ulp worst-case error including rounding at Q57.
To determine correctly whether the exact remainder calculation can be skipped we need a result
accurate to < 0.25ulp at Q52, or 2^-54.
*/
dq_2:
bge dq_3 @ +Inf?
movs r1,#0
b dq_4
dq_0:
lsrs r1,#31
lsls r1,#31 @ preserve sign bit
lsrs r2,#21 @ extract exponent
beq dq_4 @ -0? return it
asrs r1,#11 @ make -Inf
b dq_4
dq_3:
ldr r1,=#0x7ff
lsls r1,#20 @ return +Inf
dq_4:
movs r0,#0
dq_1:
bx r14
.align 2
.thumb_func
_dsqrt:
lsls r2,r1,#1
bcs dq_0 @ negative?
lsrs r2,#21 @ extract exponent
subs r2,#1
ldr r3,=#0x7fe
cmp r2,r3
bhs dq_2 @ catches 0 and +Inf
push {r4-r7,r14}
lsls r4,r2,#20
subs r1,r4 @ insert implied 1
lsrs r2,#1
bcc 1f @ even exponent? skip
adds r0,r0,r0 @ odd exponent: shift up mantissa
adcs r1,r1,r1
1:
lsrs r3,#2
adds r2,r3
lsls r2,#20
mov r12,r2 @ save result exponent
@ here
@ r0:r1 y mantissa Q52 [1,4)
@ r12 result exponent
adr r4,drsqrtapp-8 @ first eight table entries are never accessed because of the mantissa's leading 1
lsrs r2,r1,#17 @ y Q3
ldrb r2,[r4,r2] @ initial approximation to reciprocal square root a0 Q8
lsrs r3,r1,#4 @ first Newton-Raphson iteration
muls r3,r2
muls r3,r2 @ i32 p0=a0*a0*(y>>14); // Q32
asrs r3,r3,#12 @ i32 r0=p0>>12; // Q20
muls r3,r2
asrs r3,#13 @ i32 dy0=(r0*a0)>>13; // Q15
lsls r2,#8
subs r2,r3 @ i32 a1=(a0<<8)-dy0; // Q16
movs r3,r2
muls r3,r3
lsrs r3,#13
lsrs r4,r1,#1
muls r3,r4 @ i32 p1=((a1*a1)>>11)*(y>>11); // Q19*Q19=Q38
asrs r3,#15 @ i32 r1=p1>>15; // Q23
muls r3,r2
asrs r3,#23
adds r3,#1
asrs r3,#1 @ i32 dy1=(r1*a1+(1<<23))>>24; // Q23*Q16=Q39; Q15
subs r2,r3 @ i32 a2=a1-dy1; // Q16
lsrs r3,r2,#16
subs r2,r3 @ if(a2>=0x10000) a2=0xffff; to prevent overflow of a2*a2
@ here
@ r0:r1 y mantissa
@ r2 a2 ~ 1/sqrt(y) Q16
@ r12 result exponent
movs r3,r2
muls r3,r3
lsls r1,#10
lsrs r4,r0,#22
orrs r1,r4 @ y Q30
mul32_32_64 r1,r3, r4,r3, r5,r6,r7,r4,r3 @ i64 p2=(ui64)(a2*a2)*(ui64)y; // Q62 r4:r3
lsls r5,r3,#6
lsrs r4,#26
orrs r4,r5
adds r4,#0x20 @ i32 r2=(p2>>26)+0x20; // Q36 r4
uxth r5,r4
muls r5,r2
asrs r4,#16
muls r4,r2
lsrs r5,#16
adds r4,r5
asrs r4,#6 @ i32 dy2=((i64)r2*(i64)a2)>>22; // Q36*Q16=Q52; Q30
lsls r2,#15
subs r2,r4
@ here
@ r0 y low bits
@ r1 y Q30
@ r2 a3 ~ 1/sqrt(y) Q31
@ r12 result exponent
mul32_32_64 r2,r1, r3,r4, r5,r6,r7,r3,r4
adds r3,r3,r3
adcs r4,r4,r4
adds r3,r3,r3
movs r3,#0
adcs r3,r4 @ ui32 a4=((ui64)a3*(ui64)y+(1U<<31))>>31; // Q30
@ here
@ r0 y low bits
@ r1 y Q30
@ r2 a3 Q31 ~ 1/sqrt(y)
@ r3 a4 Q30 ~ sqrt(y)
@ r12 result exponent
square32_64 r3, r4,r5, r6,r5,r7
lsls r6,r0,#8
lsrs r7,r1,#2
subs r6,r4
sbcs r7,r5 @ r4=(q60)y-a4*a4
@ by exhaustive testing, r4 = fffffffc0e134fdc .. 00000003c2bf539c Q60
lsls r5,r7,#29
lsrs r6,#3
adcs r6,r5 @ r4 Q57 with rounding
muls32_32_64 r6,r2, r6,r2, r4,r5,r7,r6,r2 @ d4=a3*r4/2 Q89
@ r4+d4 is correct to 1ULP at Q57, tested on ~9bn cases including all extreme values of r4 for each possible y Q30
adds r2,#8
asrs r2,#5 @ d4 Q52, rounded to Q53 with spare bit in carry
@ here
@ r0 y low bits
@ r1 y Q30
@ r2 d4 Q52, rounded to Q53
@ C flag contains d4_b53
@ r3 a4 Q30
bcs dq_5
lsrs r5,r3,#10 @ a4 Q52
lsls r4,r3,#22
asrs r1,r2,#31
adds r0,r2,r4
adcs r1,r5 @ a4+d4
add r1,r12 @ pack exponent
pop {r4-r7,r15}
.ltorg
@ round(sqrt(2^22./[68:8:252]))
drsqrtapp:
.byte 0xf8,0xeb,0xdf,0xd6,0xcd,0xc5,0xbe,0xb8
.byte 0xb2,0xad,0xa8,0xa4,0xa0,0x9c,0x99,0x95
.byte 0x92,0x8f,0x8d,0x8a,0x88,0x85,0x83,0x81
dq_5:
@ here we are near a rounding boundary, C is set
adcs r2,r2,r2 @ d4 Q53+1ulp
lsrs r5,r3,#9
lsls r4,r3,#23 @ r4:r5 a4 Q53
asrs r1,r2,#31
adds r4,r2,r4
adcs r5,r1 @ r4:r5 a5=a4+d4 Q53+1ulp
movs r3,r5
muls r3,r4
square32_64 r4,r1,r2,r6,r2,r7
adds r2,r3
adds r2,r3 @ r1:r2 a5^2 Q106
lsls r0,#22 @ y Q84
rsbs r1,#0
sbcs r0,r2 @ remainder y-a5^2
bmi 1f @ y<a5^2: no need to increment a5
movs r3,#0
adds r4,#1
adcs r5,r3 @ bump a5 if over rounding boundary
1:
lsrs r0,r4,#1
lsrs r1,r5,#1
lsls r5,#31
orrs r0,r5
add r1,r12
pop {r4-r7,r15}
@ compare r0:r1 against r2:r3, returning -1/0/1 for <, =, >
@ also set flags accordingly
.thumb_func
qfp_dcmp:
push {r4,r6,r7,r14}
ldr r7,=#0x7ff @ flush NaNs and denormals
lsls r4,r1,#1
lsrs r4,#21
beq 1f
cmp r4,r7
bne 2f
1:
movs r0,#0
lsrs r1,#20
lsls r1,#20
2:
lsls r4,r3,#1
lsrs r4,#21
beq 1f
cmp r4,r7
bne 2f
1:
movs r2,#0
lsrs r3,#20
lsls r3,#20
2:
dcmp_fast_entry:
movs r6,#1
eors r3,r1
bmi 4f @ opposite signs? then can proceed on basis of sign of x
eors r3,r1 @ restore r3
bpl 1f
rsbs r6,#0 @ negative? flip comparison
1:
cmp r1,r3
bne 1f
cmp r0,r2
bhi 2f
blo 3f
5:
movs r6,#0 @ equal? result is 0
1:
bgt 2f
3:
rsbs r6,#0
2:
subs r0,r6,#0 @ copy and set flags
pop {r4,r6,r7,r15}
4:
orrs r3,r1 @ make -0==+0
adds r3,r3
orrs r3,r0
orrs r3,r2
beq 5b
cmp r1,#0
bge 2b
b 3b
@ "scientific" functions start here
.thumb_func
push_r8_r11:
mov r4,r8
mov r5,r9
mov r6,r10
mov r7,r11
push {r4-r7}
bx r14
.thumb_func
pop_r8_r11:
pop {r4-r7}
mov r8,r4
mov r9,r5
mov r10,r6
mov r11,r7
bx r14
@ double-length CORDIC rotation step
@ r0:r1 ω
@ r6 32-i (complementary shift)
@ r7 i (shift)
@ r8:r9 x
@ r10:r11 y
@ r12 coefficient pointer
@ an option in rotation mode would be to compute the sequence of σ values
@ in one pass, rotate the initial vector by the residual ω and then run a
@ second pass to compute the final x and y. This would relieve pressure
@ on registers and hence possibly be faster. The same trick does not work
@ in vectoring mode (but perhaps one could work to single precision in
@ a first pass and then double precision in a second pass?).
.thumb_func
dcordic_vec_step:
mov r2,r12
ldmia r2!,{r3,r4}
mov r12,r2
mov r2,r11
cmp r2,#0
blt 1f
b 2f
.thumb_func
dcordic_rot_step:
mov r2,r12
ldmia r2!,{r3,r4}
mov r12,r2
cmp r1,#0
bge 1f
2:
@ ω<0 / y>=0
@ ω+=dω
@ x+=y>>i, y-=x>>i
adds r0,r3
adcs r1,r4
mov r3,r11
asrs r3,r7
mov r4,r11
lsls r4,r6
mov r2,r10
lsrs r2,r7
orrs r2,r4 @ r2:r3 y>>i, rounding in carry
mov r4,r8
mov r5,r9 @ r4:r5 x
adcs r2,r4
adcs r3,r5 @ r2:r3 x+(y>>i)
mov r8,r2
mov r9,r3
mov r3,r5
lsls r3,r6
asrs r5,r7
lsrs r4,r7
orrs r4,r3 @ r4:r5 x>>i, rounding in carry
mov r2,r10
mov r3,r11
sbcs r2,r4
sbcs r3,r5 @ r2:r3 y-(x>>i)
mov r10,r2
mov r11,r3
bx r14
@ ω>0 / y<0
@ ω-=dω
@ x-=y>>i, y+=x>>i
1:
subs r0,r3
sbcs r1,r4
mov r3,r9
asrs r3,r7
mov r4,r9
lsls r4,r6
mov r2,r8
lsrs r2,r7
orrs r2,r4 @ r2:r3 x>>i, rounding in carry
mov r4,r10
mov r5,r11 @ r4:r5 y
adcs r2,r4
adcs r3,r5 @ r2:r3 y+(x>>i)
mov r10,r2
mov r11,r3
mov r3,r5
lsls r3,r6
asrs r5,r7
lsrs r4,r7
orrs r4,r3 @ r4:r5 y>>i, rounding in carry
mov r2,r8
mov r3,r9
sbcs r2,r4
sbcs r3,r5 @ r2:r3 x-(y>>i)
mov r8,r2
mov r9,r3
bx r14
ret_dzero:
movs r0,#0
movs r1,#0
bx r14
@ convert packed double in r0:r1 to signed/unsigned 32/64-bit integer/fixed-point value in r0:r1 [with r2 places after point], with rounding towards -Inf
@ fixed-point versions only work with reasonable values in r2 because of the way dunpacks works
.thumb_func
_dfix:
movs r2,#0 @ and fall through
.thumb_func
qfp_double2fix:
push {r14}
adds r2,#32
bl qfp_double2fix64
movs r0,r1
pop {r15}
.thumb_func
_dfixu:
movs r2,#0 @ and fall through
.thumb_func
qfp_double2ufix:
push {r14}
adds r2,#32
bl qfp_double2ufix64
movs r0,r1
pop {r15}
.thumb_func
_ll_sfrom_f:
movs r1,#0 @ and fall through
.thumb_func
qfp_float2fix64:
push {r14}
bl f2fix
b d2f64_a
.thumb_func
_ll_ufrom_f:
movs r1,#0 @ and fall through
.thumb_func
qfp_float2ufix64:
asrs r3,r0,#23 @ negative? return 0
bmi ret_dzero
@ and fall through
@ convert float in r0 to signed fixed point in r0:r1:r3, r1 places after point, rounding towards -Inf
@ result clamped so that r3 can only be 0 or -1
@ trashes r12
.thumb_func
f2fix:
push {r4,r14}
mov r12,r1
asrs r3,r0,#31
lsls r0,#1
lsrs r2,r0,#24
beq 1f @ zero?
cmp r2,#0xff @ Inf?
beq 2f
subs r1,r2,#1
subs r2,#0x7f @ remove exponent bias
lsls r1,#24
subs r0,r1 @ insert implied 1
eors r0,r3
subs r0,r3 @ top two's complement
asrs r1,r0,#4 @ convert to double format
lsls r0,#28
b d2fix_a
1:
movs r0,#0
movs r1,r0
movs r3,r0
pop {r4,r15}
2:
mvns r0,r3 @ return max/min value
mvns r1,r3
pop {r4,r15}
.thumb_func
_ll_sfrom_d:
movs r2,#0 @ and fall through
.thumb_func
qfp_double2fix64:
push {r14}
bl d2fix
d2f64_a:
asrs r2,r1,#31
cmp r2,r3
bne 1f @ sign extension bits fail to match sign of result?
pop {r15}
1:
mvns r0,r3
movs r1,#1
lsls r1,#31
eors r1,r1,r0 @ generate extreme fixed-point values
pop {r15}
.thumb_func
_ll_ufrom_d:
movs r2,#0 @ and fall through
.thumb_func
qfp_double2ufix64:
asrs r3,r1,#20 @ negative? return 0
bmi ret_dzero
@ and fall through
@ convert double in r0:r1 to signed fixed point in r0:r1:r3, r2 places after point, rounding towards -Inf
@ result clamped so that r3 can only be 0 or -1
@ trashes r12
.thumb_func
d2fix:
push {r4,r14}
mov r12,r2
bl dunpacks
asrs r4,r2,#16
adds r4,#1
bge 1f
movs r1,#0 @ -0 -> +0
1:
asrs r3,r1,#31
d2fix_a:
@ here
@ r0:r1 two's complement mantissa
@ r2 unbaised exponent
@ r3 mantissa sign extension bits
add r2,r12 @ exponent plus offset for required binary point position
subs r2,#52 @ required shift
bmi 1f @ shift down?
@ here a shift up by r2 places
cmp r2,#12 @ will clamp?
bge 2f
movs r4,r0
lsls r1,r2
lsls r0,r2
rsbs r2,#0
adds r2,#32 @ complementary shift
lsrs r4,r2
orrs r1,r4
pop {r4,r15}
2:
mvns r0,r3
mvns r1,r3 @ overflow: clamp to extreme fixed-point values
pop {r4,r15}
1:
@ here a shift down by -r2 places
adds r2,#32
bmi 1f @ long shift?
mov r4,r1
lsls r4,r2
rsbs r2,#0
adds r2,#32 @ complementary shift
asrs r1,r2
lsrs r0,r2
orrs r0,r4
pop {r4,r15}
1:
@ here a long shift down
movs r0,r1
asrs r1,#31 @ shift down 32 places
adds r2,#32
bmi 1f @ very long shift?
rsbs r2,#0
adds r2,#32
asrs r0,r2
pop {r4,r15}
1:
movs r0,r3 @ result very near zero: use sign extension bits
movs r1,r3
pop {r4,r15}
@ float <-> double conversions
.thumb_func
__aeabi_f2d:
lsrs r3,r0,#31 @ sign bit
lsls r3,#31
lsls r1,r0,#1
lsrs r2,r1,#24 @ exponent
beq 1f @ zero?
cmp r2,#0xff @ Inf?
beq 2f
lsrs r1,#4 @ exponent and top 20 bits of mantissa
ldr r2,=#(0x3ff-0x7f)<<20 @ difference in exponent offsets
adds r1,r2
orrs r1,r3
lsls r0,#29 @ bottom 3 bits of mantissa
bx r14
1:
movs r1,r3 @ return signed zero
3:
movs r0,#0
bx r14
2:
ldr r1,=#0x7ff00000 @ return signed infinity
adds r1,r3
b 3b
.thumb_func
__aeabi_d2f:
lsls r2,r1,#1
lsrs r2,#21 @ exponent
ldr r3,=#0x3ff-0x7f
subs r2,r3 @ fix exponent bias
ble 1f @ underflow or zero
cmp r2,#0xff
bge 2f @ overflow or infinity
lsls r2,#23 @ position exponent of result
lsrs r3,r1,#31
lsls r3,#31
orrs r2,r3 @ insert sign
lsls r3,r0,#3 @ rounding bits
lsrs r0,#29
lsls r1,#12
lsrs r1,#9
orrs r0,r1 @ assemble mantissa
orrs r0,r2 @ insert exponent and sign
lsls r3,#1
bcc 3f @ no rounding
beq 4f @ all sticky bits 0?
5:
adds r0,#1
3:
bx r14
4:
lsrs r3,r0,#1 @ odd? then round up
bcs 5b
bx r14
1:
beq 6f @ check case where value is just less than smallest normal
7:
lsrs r0,r1,#31
lsls r0,#31
bx r14
6:
lsls r2,r1,#12 @ 20 1:s at top of mantissa?
asrs r2,#12
adds r2,#1
bne 7b
lsrs r2,r0,#29 @ and 3 more 1:s?
cmp r2,#7
bne 7b
movs r2,#1 @ return smallest normal with correct sign
b 8f
2:
movs r2,#0xff
8:
lsrs r0,r1,#31 @ return signed infinity
lsls r0,#8
adds r0,r2
lsls r0,#23
bx r14
@ convert signed/unsigned 32/64-bit integer/fixed-point value in r0:r1 [with r2 places after point] to packed double in r0:r1, with rounding
.thumb_func
_dfltu:
movs r1,#0 @ and fall through
.thumb_func
qfp_ufix2double:
movs r2,r1
movs r1,#0
b qfp_ufix642double
.thumb_func
_dflt:
movs r1,#0 @ and fall through
.thumb_func
qfp_fix2double:
movs r2,r1
asrs r1,r0,#31 @ sign extend
b qfp_fix642double
.thumb_func
_ll_uto_d:
movs r2,#0 @ and fall through
.thumb_func
qfp_ufix642double:
movs r3,#0
b uf2d
.thumb_func
_ll_sto_d:
movs r2,#0 @ and fall through
.thumb_func
qfp_fix642double:
asrs r3,r1,#31 @ sign bit across all bits
eors r0,r3
eors r1,r3
subs r0,r3
sbcs r1,r3
uf2d:
push {r4,r5,r14}
ldr r4,=#0x432
subs r2,r4,r2 @ form biased exponent
@ here
@ r0:r1 unnormalised mantissa
@ r2 -Q (will become exponent)
@ r3 sign across all bits
cmp r1,#0
bne 1f @ short normalising shift?
movs r1,r0
beq 2f @ zero? return it
movs r0,#0
subs r2,#32 @ fix exponent
1:
asrs r4,r1,#21
bne 3f @ will need shift down (and rounding?)
bcs 4f @ normalised already?
5:
subs r2,#1
adds r0,r0 @ shift up
adcs r1,r1
lsrs r4,r1,#21
bcc 5b
4:
ldr r4,=#0x7fe
cmp r2,r4
bhs 6f @ over/underflow? return signed zero/infinity
7:
lsls r2,#20 @ pack and return
adds r1,r2
lsls r3,#31
adds r1,r3
2:
pop {r4,r5,r15}
6: @ return signed zero/infinity according to unclamped exponent in r2
mvns r2,r2
lsrs r2,#21
movs r0,#0
movs r1,#0
b 7b
3:
@ here we need to shift down to normalise and possibly round
bmi 1f @ already normalised to Q63?
2:
subs r2,#1
adds r0,r0 @ shift up
adcs r1,r1
bpl 2b
1:
@ here we have a 1 in b63 of r0:r1
adds r2,#11 @ correct exponent for subsequent shift down
lsls r4,r0,#21 @ save bits for rounding
lsrs r0,#11
lsls r5,r1,#21
orrs r0,r5
lsrs r1,#11
lsls r4,#1
beq 1f @ sticky bits are zero?
8:
movs r4,#0
adcs r0,r4
adcs r1,r4
b 4b
1:
bcc 4b @ sticky bits are zero but not on rounding boundary
lsrs r4,r0,#1 @ increment if odd (force round to even)
b 8b
.ltorg
.thumb_func
dunpacks:
mdunpacks r0,r1,r2,r3,r4
ldr r3,=#0x3ff
subs r2,r3 @ exponent without offset
bx r14
@ r0:r1 signed mantissa Q52
@ r2 unbiased exponent < 10 (i.e., |x|<2^10)
@ r4 pointer to:
@ - divisor reciprocal approximation r=1/d Q15
@ - divisor d Q62 0..20
@ - divisor d Q62 21..41
@ - divisor d Q62 42..62
@ returns:
@ r0:r1 reduced result y Q62, -0.6 d < y < 0.6 d (better in practice)
@ r2 quotient q (number of reductions)
@ if exponent >=10, returns r0:r1=0, r2=1024*mantissa sign
@ designed to work for 0.5<d<2, in particular d=ln2 (~0.7) and d=π/2 (~1.6)
@ .thumb_func
@ dreduce:
@ adds r2,#2 @ e+2
@ bmi 1f @ |x|<0.25, too small to need adjustment
@ cmp r2,#12
@ bge 4f
@ 2:
@ movs r5,#17
@ subs r5,r2 @ 15-e
@ movs r3,r1 @ Q20
@ asrs r3,r5 @ x Q5
@ adds r2,#8 @ e+10
@ adds r5,#7 @ 22-e = 32-(e+10)
@ movs r6,r0
@ lsrs r6,r5
@ lsls r0,r2
@ lsls r1,r2
@ orrs r1,r6 @ r0:r1 x Q62
@ ldmia r4,{r4-r7}
@ muls r3,r4 @ rx Q20
@ asrs r2,r3,#20
@ movs r3,#0
@ adcs r2,r3 @ rx Q0 rounded = q; for e.g. r=1.5 |q|<1.5*2^10
@ muls r5,r2 @ qd in pieces: L Q62
@ muls r6,r2 @ M Q41
@ muls r7,r2 @ H Q20
@ lsls r7,#10
@ asrs r4,r6,#11
@ lsls r6,#21
@ adds r6,r5
@ adcs r7,r4
@ asrs r5,#31
@ adds r7,r5 @ r6:r7 qd Q62
@ subs r0,r6
@ sbcs r1,r7 @ remainder Q62
@ bx r14
@ 4:
@ movs r2,#12 @ overflow: clamp to +/-1024
@ movs r0,#0
@ asrs r1,#31
@ lsls r1,#1
@ adds r1,#1
@ lsls r1,#20
@ b 2b
@ 1:
@ lsls r1,#8
@ lsrs r3,r0,#24
@ orrs r1,r3
@ lsls r0,#8 @ r0:r1 Q60, to be shifted down -r2 places
@ rsbs r3,r2,#0
@ adds r2,#32 @ shift down in r3, complementary shift in r2
@ bmi 1f @ long shift?
@ 2:
@ movs r4,r1
@ asrs r1,r3
@ lsls r4,r2
@ lsrs r0,r3
@ orrs r0,r4
@ movs r2,#0 @ rounding
@ adcs r0,r2
@ adcs r1,r2
@ bx r14
@ 1:
@ movs r0,r1 @ down 32 places
@ asrs r1,#31
@ subs r3,#32
@ adds r2,#32
@ bpl 2b
@ movs r0,#0 @ very long shift? return 0
@ movs r1,#0
@ movs r2,#0
@ bx r14
@ .thumb_func
@ qfp_dtan:
@ push {r4-r7,r14}
@ bl push_r8_r11
@ bl dsincos
@ mov r12,r0 @ save ε
@ bl dcos_finish
@ push {r0,r1}
@ mov r0,r12
@ bl dsin_finish
@ pop {r2,r3}
@ bl pop_r8_r11
@ b ddiv0 @ compute sin θ/cos θ
@ .thumb_func
@ qfp_dcos:
@ push {r4-r7,r14}
@ bl push_r8_r11
@ bl dsincos
@ bl dcos_finish
@ b 1f
@ .thumb_func
@ qfp_dsin:
@ push {r4-r7,r14}
@ bl push_r8_r11
@ bl dsincos
@ bl dsin_finish
@ 1:
@ bl pop_r8_r11
@ pop {r4-r7,r15}
@ @ unpack double θ in r0:r1, range reduce and calculate ε, cos α and sin α such that
@ @ θ=α+ε and |ε|≤2^-32
@ @ on return:
@ @ r0:r1 ε (residual ω, where θ=α+ε) Q62, |ε|≤2^-32 (so fits in r0)
@ @ r8:r9 cos α Q62
@ @ r10:r11 sin α Q62
@ .thumb_func
@ dsincos:
@ push {r14}
@ bl dunpacks
@ adr r4,dreddata0
@ bl dreduce
@ movs r4,#0
@ ldr r5,=#0x9df04dbb @ this value compensates for the non-unity scaling of the CORDIC rotations
@ ldr r6,=#0x36f656c5
@ lsls r2,#31
@ bcc 1f
@ @ quadrant 2 or 3
@ mvns r6,r6
@ rsbs r5,r5,#0
@ adcs r6,r4
@ 1:
@ lsls r2,#1
@ bcs 1f
@ @ even quadrant
@ mov r10,r4
@ mov r11,r4
@ mov r8,r5
@ mov r9,r6
@ b 2f
@ 1:
@ @ odd quadrant
@ mov r8,r4
@ mov r9,r4
@ mov r10,r5
@ mov r11,r6
@ 2:
@ adr r4,dtab_cc
@ mov r12,r4
@ movs r7,#1
@ movs r6,#31
@ 1:
@ bl dcordic_rot_step
@ adds r7,#1
@ subs r6,#1
@ cmp r7,#33
@ bne 1b
@ pop {r15}
@ dcos_finish:
@ @ here
@ @ r0:r1 ε (residual ω, where θ=α+ε) Q62, |ε|≤2^-32 (so fits in r0)
@ @ r8:r9 cos α Q62
@ @ r10:r11 sin α Q62
@ @ and we wish to calculate cos θ=cos(α+ε)~cos α - ε sin α
@ mov r1,r11
@ @ mov r2,r10
@ @ lsrs r2,#31
@ @ adds r1,r2 @ rounding improves accuracy very slightly
@ muls32_s32_64 r0,r1, r2,r3, r4,r5,r6,r2,r3
@ @ r2:r3 ε sin α Q(62+62-32)=Q92
@ mov r0,r8
@ mov r1,r9
@ lsls r5,r3,#2
@ asrs r3,r3,#30
@ lsrs r2,r2,#30
@ orrs r2,r5
@ sbcs r0,r2 @ include rounding
@ sbcs r1,r3
@ movs r2,#62
@ b qfp_fix642double
@ dsin_finish:
@ @ here
@ @ r0:r1 ε (residual ω, where θ=α+ε) Q62, |ε|≤2^-32 (so fits in r0)
@ @ r8:r9 cos α Q62
@ @ r10:r11 sin α Q62
@ @ and we wish to calculate sin θ=sin(α+ε)~sin α + ε cos α
@ mov r1,r9
@ muls32_s32_64 r0,r1, r2,r3, r4,r5,r6,r2,r3
@ @ r2:r3 ε cos α Q(62+62-32)=Q92
@ mov r0,r10
@ mov r1,r11
@ lsls r5,r3,#2
@ asrs r3,r3,#30
@ lsrs r2,r2,#30
@ orrs r2,r5
@ adcs r0,r2 @ include rounding
@ adcs r1,r3
@ movs r2,#62
@ b qfp_fix642double
@ .ltorg
@ .align 2
@ dreddata0:
@ .word 0x0000517d @ 2/π Q15
@ .word 0x0014611A @ π/2 Q62=6487ED5110B4611A split into 21-bit pieces
@ .word 0x000A8885
@ .word 0x001921FB
@ .thumb_func
@ qfp_datan2:
@ @ r0:r1 y
@ @ r2:r3 x
@ push {r4-r7,r14}
@ bl push_r8_r11
@ ldr r5,=#0x7ff00000
@ movs r4,r1
@ ands r4,r5 @ y==0?
@ beq 1f
@ cmp r4,r5 @ or Inf/NaN?
@ bne 2f
@ 1:
@ lsrs r1,#20 @ flush
@ lsls r1,#20
@ movs r0,#0
@ 2:
@ movs r4,r3
@ ands r4,r5 @ x==0?
@ beq 1f
@ cmp r4,r5 @ or Inf/NaN?
@ bne 2f
@ 1:
@ lsrs r3,#20 @ flush
@ lsls r3,#20
@ movs r2,#0
@ 2:
@ movs r6,#0 @ quadrant offset
@ lsls r5,#11 @ constant 0x80000000
@ cmp r3,#0
@ bpl 1f @ skip if x positive
@ movs r6,#2
@ eors r3,r5
@ eors r1,r5
@ bmi 1f @ quadrant offset=+2 if y was positive
@ rsbs r6,#0 @ quadrant offset=-2 if y was negative
@ 1:
@ @ now in quadrant 0 or 3
@ adds r7,r1,r5 @ r7=-r1
@ bpl 1f
@ @ y>=0: in quadrant 0
@ cmp r1,r3
@ ble 2f @ y<~x so 0≤θ<~π/4: skip
@ adds r6,#1
@ eors r1,r5 @ negate x
@ b 3f @ and exchange x and y = rotate by -π/2
@ 1:
@ cmp r3,r7
@ bge 2f @ -y<~x so -π/4<~θ≤0: skip
@ subs r6,#1
@ eors r3,r5 @ negate y and ...
@ 3:
@ movs r7,r0 @ exchange x and y
@ movs r0,r2
@ movs r2,r7
@ movs r7,r1
@ movs r1,r3
@ movs r3,r7
@ 2:
@ @ here -π/4<~θ<~π/4
@ @ r6 has quadrant offset
@ push {r6}
@ cmp r2,#0
@ bne 1f
@ cmp r3,#0
@ beq 10f @ x==0 going into division?
@ lsls r4,r3,#1
@ asrs r4,#21
@ adds r4,#1
@ bne 1f @ x==Inf going into division?
@ lsls r4,r1,#1
@ asrs r4,#21
@ adds r4,#1 @ y also ±Inf?
@ bne 10f
@ subs r1,#1 @ make them both just finite
@ subs r3,#1
@ b 1f
@ 10:
@ movs r0,#0
@ movs r1,#0
@ b 12f
@ 1:
@ bl __aeabi_ddiv
@ movs r2,#62
@ bl qfp_double2fix64
@ @ r0:r1 y/x
@ mov r10,r0
@ mov r11,r1
@ movs r0,#0 @ ω=0
@ movs r1,#0
@ mov r8,r0
@ movs r2,#1
@ lsls r2,#30
@ mov r9,r2 @ x=1
@ adr r4,dtab_cc
@ mov r12,r4
@ movs r7,#1
@ movs r6,#31
@ 1:
@ bl dcordic_vec_step
@ adds r7,#1
@ subs r6,#1
@ cmp r7,#33
@ bne 1b
@ @ r0:r1 atan(y/x) Q62
@ @ r8:r9 x residual Q62
@ @ r10:r11 y residual Q62
@ mov r2,r9
@ mov r3,r10
@ subs r2,#12 @ this makes atan(0)==0
@ @ the following is basically a division residual y/x ~ atan(residual y/x)
@ movs r4,#1
@ lsls r4,#29
@ movs r7,#0
@ 2:
@ lsrs r2,#1
@ movs r3,r3 @ preserve carry
@ bmi 1f
@ sbcs r3,r2
@ adds r0,r4
@ adcs r1,r7
@ lsrs r4,#1
@ bne 2b
@ b 3f
@ 1:
@ adcs r3,r2
@ subs r0,r4
@ sbcs r1,r7
@ lsrs r4,#1
@ bne 2b
@ 3:
@ lsls r6,r1,#31
@ asrs r1,#1
@ lsrs r0,#1
@ orrs r0,r6 @ Q61
@ 12:
@ pop {r6}
@ cmp r6,#0
@ beq 1f
@ ldr r4,=#0x885A308D @ π/2 Q61
@ ldr r5,=#0x3243F6A8
@ bpl 2f
@ mvns r4,r4 @ negative quadrant offset
@ mvns r5,r5
@ 2:
@ lsls r6,#31
@ bne 2f @ skip if quadrant offset is ±1
@ adds r0,r4
@ adcs r1,r5
@ 2:
@ adds r0,r4
@ adcs r1,r5
@ 1:
@ movs r2,#61
@ bl qfp_fix642double
@ bl pop_r8_r11
@ pop {r4-r7,r15}
@ .ltorg
@ dtab_cc:
@ .word 0x61bb4f69, 0x1dac6705 @ atan 2^-1 Q62
@ .word 0x96406eb1, 0x0fadbafc @ atan 2^-2 Q62
@ .word 0xab0bdb72, 0x07f56ea6 @ atan 2^-3 Q62
@ .word 0xe59fbd39, 0x03feab76 @ atan 2^-4 Q62
@ .word 0xba97624b, 0x01ffd55b @ atan 2^-5 Q62
@ .word 0xdddb94d6, 0x00fffaaa @ atan 2^-6 Q62
@ .word 0x56eeea5d, 0x007fff55 @ atan 2^-7 Q62
@ .word 0xaab7776e, 0x003fffea @ atan 2^-8 Q62
@ .word 0x5555bbbc, 0x001ffffd @ atan 2^-9 Q62
@ .word 0xaaaaadde, 0x000fffff @ atan 2^-10 Q62
@ .word 0xf555556f, 0x0007ffff @ atan 2^-11 Q62
@ .word 0xfeaaaaab, 0x0003ffff @ atan 2^-12 Q62
@ .word 0xffd55555, 0x0001ffff @ atan 2^-13 Q62
@ .word 0xfffaaaab, 0x0000ffff @ atan 2^-14 Q62
@ .word 0xffff5555, 0x00007fff @ atan 2^-15 Q62
@ .word 0xffffeaab, 0x00003fff @ atan 2^-16 Q62
@ .word 0xfffffd55, 0x00001fff @ atan 2^-17 Q62
@ .word 0xffffffab, 0x00000fff @ atan 2^-18 Q62
@ .word 0xfffffff5, 0x000007ff @ atan 2^-19 Q62
@ .word 0xffffffff, 0x000003ff @ atan 2^-20 Q62
@ .word 0x00000000, 0x00000200 @ atan 2^-21 Q62 @ consider optimising these
@ .word 0x00000000, 0x00000100 @ atan 2^-22 Q62
@ .word 0x00000000, 0x00000080 @ atan 2^-23 Q62
@ .word 0x00000000, 0x00000040 @ atan 2^-24 Q62
@ .word 0x00000000, 0x00000020 @ atan 2^-25 Q62
@ .word 0x00000000, 0x00000010 @ atan 2^-26 Q62
@ .word 0x00000000, 0x00000008 @ atan 2^-27 Q62
@ .word 0x00000000, 0x00000004 @ atan 2^-28 Q62
@ .word 0x00000000, 0x00000002 @ atan 2^-29 Q62
@ .word 0x00000000, 0x00000001 @ atan 2^-30 Q62
@ .word 0x80000000, 0x00000000 @ atan 2^-31 Q62
@ .word 0x40000000, 0x00000000 @ atan 2^-32 Q62
@ .thumb_func
@ qfp_dexp:
@ push {r4-r7,r14}
@ bl dunpacks
@ adr r4,dreddata1
@ bl dreduce
@ cmp r1,#0
@ bge 1f
@ ldr r4,=#0xF473DE6B
@ ldr r5,=#0x2C5C85FD @ ln2 Q62
@ adds r0,r4
@ adcs r1,r5
@ subs r2,#1
@ 1:
@ push {r2}
@ movs r7,#1 @ shift
@ adr r6,dtab_exp
@ movs r2,#0
@ movs r3,#1
@ lsls r3,#30 @ x=1 Q62
@ 3:
@ ldmia r6!,{r4,r5}
@ mov r12,r6
@ subs r0,r4
@ sbcs r1,r5
@ bmi 1f
@ rsbs r6,r7,#0
@ adds r6,#32 @ complementary shift
@ movs r5,r3
@ asrs r5,r7
@ movs r4,r3
@ lsls r4,r6
@ movs r6,r2
@ lsrs r6,r7 @ rounding bit in carry
@ orrs r4,r6
@ adcs r2,r4
@ adcs r3,r5 @ x+=x>>i
@ b 2f
@ 1:
@ adds r0,r4 @ restore argument
@ adcs r1,r5
@ 2:
@ mov r6,r12
@ adds r7,#1
@ cmp r7,#33
@ bne 3b
@ @ here
@ @ r0:r1 ε (residual x, where x=a+ε) Q62, |ε|≤2^-32 (so fits in r0)
@ @ r2:r3 exp a Q62
@ @ and we wish to calculate exp x=exp a exp ε~(exp a)(1+ε)
@ muls32_32_64 r0,r3, r4,r1, r5,r6,r7,r4,r1
@ @ r4:r1 ε exp a Q(62+62-32)=Q92
@ lsrs r4,#30
@ lsls r0,r1,#2
@ orrs r0,r4
@ asrs r1,#30
@ adds r0,r2
@ adcs r1,r3
@ pop {r2}
@ rsbs r2,#0
@ adds r2,#62
@ bl qfp_fix642double @ in principle we can pack faster than this because we know the exponent
@ pop {r4-r7,r15}
@ .ltorg
@ .thumb_func
@ qfp_dln:
@ push {r4-r7,r14}
@ lsls r7,r1,#1
@ bcs 5f @ <0 ...
@ asrs r7,#21
@ beq 5f @ ... or =0? return -Inf
@ adds r7,#1
@ beq 6f @ Inf/NaN? return +Inf
@ bl dunpacks
@ push {r2}
@ lsls r1,#9
@ lsrs r2,r0,#23
@ orrs r1,r2
@ lsls r0,#9
@ @ r0:r1 m Q61 = m/2 Q62 0.5≤m/2<1
@ movs r7,#1 @ shift
@ adr r6,dtab_exp
@ mov r12,r6
@ movs r2,#0
@ movs r3,#0 @ y=0 Q62
@ 3:
@ rsbs r6,r7,#0
@ adds r6,#32 @ complementary shift
@ movs r5,r1
@ asrs r5,r7
@ movs r4,r1
@ lsls r4,r6
@ movs r6,r0
@ lsrs r6,r7
@ orrs r4,r6 @ x>>i, rounding bit in carry
@ adcs r4,r0
@ adcs r5,r1 @ x+(x>>i)
@ lsrs r6,r5,#30
@ bne 1f @ x+(x>>i)>1?
@ movs r0,r4
@ movs r1,r5 @ x+=x>>i
@ mov r6,r12
@ ldmia r6!,{r4,r5}
@ subs r2,r4
@ sbcs r3,r5
@ 1:
@ movs r4,#8
@ add r12,r4
@ adds r7,#1
@ cmp r7,#33
@ bne 3b
@ @ here:
@ @ r0:r1 residual x, nearly 1 Q62
@ @ r2:r3 y ~ ln m/2 = ln m - ln2 Q62
@ @ result is y + ln2 + ln x ~ y + ln2 + (x-1)
@ lsls r1,#2
@ asrs r1,#2 @ x-1
@ adds r2,r0
@ adcs r3,r1
@ pop {r7}
@ @ here:
@ @ r2:r3 ln m/2 = ln m - ln2 Q62
@ @ r7 unbiased exponent
@ adr r4,dreddata1+4
@ ldmia r4,{r0,r1,r4}
@ adds r7,#1
@ muls r0,r7 @ Q62
@ muls r1,r7 @ Q41
@ muls r4,r7 @ Q20
@ lsls r7,r1,#21
@ asrs r1,#11
@ asrs r5,r1,#31
@ adds r0,r7
@ adcs r1,r5
@ lsls r7,r4,#10
@ asrs r4,#22
@ asrs r5,r1,#31
@ adds r1,r7
@ adcs r4,r5
@ @ r0:r1:r4 exponent*ln2 Q62
@ asrs r5,r3,#31
@ adds r0,r2
@ adcs r1,r3
@ adcs r4,r5
@ @ r0:r1:r4 result Q62
@ movs r2,#62
@ 1:
@ asrs r5,r1,#31
@ cmp r4,r5
@ beq 2f @ r4 a sign extension of r1?
@ lsrs r0,#4 @ no: shift down 4 places and try again
@ lsls r6,r1,#28
@ orrs r0,r6
@ lsrs r1,#4
@ lsls r6,r4,#28
@ orrs r1,r6
@ asrs r4,#4
@ subs r2,#4
@ b 1b
@ 2:
@ bl qfp_fix642double
@ pop {r4-r7,r15}
@ 5:
@ ldr r1,=#0xfff00000
@ movs r0,#0
@ pop {r4-r7,r15}
@ 6:
@ ldr r1,=#0x7ff00000
@ movs r0,#0
@ pop {r4-r7,r15}
@ .ltorg
@ .align 2
@ dreddata1:
@ .word 0x0000B8AA @ 1/ln2 Q15
@ .word 0x0013DE6B @ ln2 Q62 Q62=2C5C85FDF473DE6B split into 21-bit pieces
@ .word 0x000FEFA3
@ .word 0x000B1721
@ dtab_exp:
@ .word 0xbf984bf3, 0x19f323ec @ log 1+2^-1 Q62
@ .word 0xcd4d10d6, 0x0e47fbe3 @ log 1+2^-2 Q62
@ .word 0x8abcb97a, 0x0789c1db @ log 1+2^-3 Q62
@ .word 0x022c54cc, 0x03e14618 @ log 1+2^-4 Q62
@ .word 0xe7833005, 0x01f829b0 @ log 1+2^-5 Q62
@ .word 0x87e01f1e, 0x00fe0545 @ log 1+2^-6 Q62
@ .word 0xac419e24, 0x007f80a9 @ log 1+2^-7 Q62
@ .word 0x45621781, 0x003fe015 @ log 1+2^-8 Q62
@ .word 0xa9ab10e6, 0x001ff802 @ log 1+2^-9 Q62
@ .word 0x55455888, 0x000ffe00 @ log 1+2^-10 Q62
@ .word 0x0aa9aac4, 0x0007ff80 @ log 1+2^-11 Q62
@ .word 0x01554556, 0x0003ffe0 @ log 1+2^-12 Q62
@ .word 0x002aa9ab, 0x0001fff8 @ log 1+2^-13 Q62
@ .word 0x00055545, 0x0000fffe @ log 1+2^-14 Q62
@ .word 0x8000aaaa, 0x00007fff @ log 1+2^-15 Q62
@ .word 0xe0001555, 0x00003fff @ log 1+2^-16 Q62
@ .word 0xf80002ab, 0x00001fff @ log 1+2^-17 Q62
@ .word 0xfe000055, 0x00000fff @ log 1+2^-18 Q62
@ .word 0xff80000b, 0x000007ff @ log 1+2^-19 Q62
@ .word 0xffe00001, 0x000003ff @ log 1+2^-20 Q62
@ .word 0xfff80000, 0x000001ff @ log 1+2^-21 Q62
@ .word 0xfffe0000, 0x000000ff @ log 1+2^-22 Q62
@ .word 0xffff8000, 0x0000007f @ log 1+2^-23 Q62
@ .word 0xffffe000, 0x0000003f @ log 1+2^-24 Q62
@ .word 0xfffff800, 0x0000001f @ log 1+2^-25 Q62
@ .word 0xfffffe00, 0x0000000f @ log 1+2^-26 Q62
@ .word 0xffffff80, 0x00000007 @ log 1+2^-27 Q62
@ .word 0xffffffe0, 0x00000003 @ log 1+2^-28 Q62
@ .word 0xfffffff8, 0x00000001 @ log 1+2^-29 Q62
@ .word 0xfffffffe, 0x00000000 @ log 1+2^-30 Q62
@ .word 0x80000000, 0x00000000 @ log 1+2^-31 Q62
@ .word 0x40000000, 0x00000000 @ log 1+2^-32 Q62
qfp_lib_end:
|
Air-duino/Arduino-AirMCU
| 88,395
|
variants/AIR001/AIR001_DEV/qfplib-m0-full.S
|
@ Copyright 2019-2020 Mark Owen
@ http://www.quinapalus.com
@ E-mail: qfp@quinapalus.com
@
@ This file is free software: you can redistribute it and/or modify
@ it under the terms of version 2 of the GNU General Public License
@ as published by the Free Software Foundation.
@
@ This file is distributed in the hope that it will be useful,
@ but WITHOUT ANY WARRANTY; without even the implied warranty of
@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
@ GNU General Public License for more details.
@
@ You should have received a copy of the GNU General Public License
@ along with this file. If not, see <http://www.gnu.org/licenses/> or
@ write to the Free Software Foundation, Inc., 51 Franklin Street,
@ Fifth Floor, Boston, MA 02110-1301, USA.
.syntax unified
.cpu cortex-m0plus
.thumb
@ exported symbols
.global __aeabi_fadd
.global __aeabi_fsub
.global __aeabi_fmul
.global __aeabi_fdiv
.global qfp_fcmp
.global _fsqrt
.global _ffix
.global qfp_float2fix
.global _ffixu
.global qfp_float2ufix
.global _fflt
.global qfp_fix2float
.global _ffltu
.global qfp_ufix2float
.global _ll_sto_f
.global qfp_fix642float
.global _ll_uto_f
.global qfp_ufix642float
@ .global qfp_fcos
@ .global qfp_fsin
@ .global qfp_ftan
@ .global qfp_fatan2
@ .global qfp_fexp
@ .global qfp_fln
.global __aeabi_dadd
.global __aeabi_dsub
.global __aeabi_dmul
.global __aeabi_ddiv
.global _dsqrt
@ .global qfp_dcos
@ .global qfp_dsin
@ .global qfp_dtan
@ .global qfp_datan2
@ .global qfp_dexp
@ .global qfp_dln
.global qfp_dcmp
.global _ll_sfrom_f
.global qfp_float2fix64
.global _ll_ufrom_f
.global qfp_float2ufix64
.global _dfix
.global qfp_double2fix
.global _dfixu
.global qfp_double2ufix
.global _ll_sfrom_d
.global qfp_double2fix64
.global _ll_ufrom_d
.global qfp_double2ufix64
.global _dflt
.global qfp_fix2double
.global _dfltu
.global qfp_ufix2double
.global _ll_sto_d
.global qfp_fix642double
.global _ll_uto_d
.global qfp_ufix642double
.global __aeabi_d2f
.global __aeabi_f2d
qfp_lib_start:
@ exchange r0<->r1, r2<->r3
xchxy:
push {r0,r2,r14}
mov r0,r1
mov r2,r3
pop {r1,r3,r15}
@ IEEE single in r0-> signed (two's complemennt) mantissa in r0 9Q23 (24 significant bits), signed exponent (bias removed) in r2
@ trashes r4; zero, denormal -> mantissa=+/-1, exponent=-380; Inf, NaN -> mantissa=+/-1, exponent=+640
unpackx:
lsrs r2,r0,#23 @ save exponent and sign
lsls r0,#9 @ extract mantissa
lsrs r0,#9
movs r4,#1
lsls r4,#23
orrs r0,r4 @ reinstate implied leading 1
cmp r2,#255 @ test sign bit
uxtb r2,r2 @ clear it
bls 1f @ branch on positive
rsbs r0,#0 @ negate mantissa
1:
subs r2,#1
cmp r2,#254 @ zero/denormal/Inf/NaN?
bhs 2f
subs r2,#126 @ remove exponent bias: can now be -126..+127
bx r14
2: @ here with special-case values
cmp r0,#0
mov r0,r4 @ set mantissa to +1
bpl 3f
rsbs r0,#0 @ zero/denormal/Inf/NaN: mantissa=+/-1
3:
subs r2,#126 @ zero/denormal: exponent -> -127; Inf, NaN: exponent -> 128
lsls r2,#2 @ zero/denormal: exponent -> -508; Inf, NaN: exponent -> 512
adds r2,#128 @ zero/denormal: exponent -> -380; Inf, NaN: exponent -> 640
bx r14
@ normalise and pack signed mantissa in r0 nominally 3Q29, signed exponent in r2-> IEEE single in r0
@ trashes r4, preserves r1,r3
@ r5: "sticky bits", must be zero iff all result bits below r0 are zero for correct rounding
packx:
lsrs r4,r0,#31 @ save sign bit
lsls r4,r4,#31 @ sign now in b31
bpl 2f @ skip if positive
cmp r5,#0
beq 11f
adds r0,#1 @ fiddle carry in to following rsb if sticky bits are non-zero
11:
rsbs r0,#0 @ can now treat r0 as unsigned
packx0:
bmi 3f @ catch r0=0x80000000 case
2:
subs r2,#1 @ normalisation loop
adds r0,r0
beq 1f @ zero? special case
bpl 2b @ normalise so leading "1" in bit 31
3:
adds r2,#129 @ (mis-)offset exponent
bne 12f @ special case: highest denormal can round to lowest normal
adds r0,#0x80 @ in special case, need to add 256 to r0 for rounding
bcs 4f @ tripped carry? then have leading 1 in C as required
12:
adds r0,#0x80 @ rounding
bcs 4f @ tripped carry? then have leading 1 in C as required (and result is even so can ignore sticky bits)
cmp r5,#0
beq 7f @ sticky bits zero?
8:
lsls r0,#1 @ remove leading 1
9:
subs r2,#1 @ compensate exponent on this path
4:
cmp r2,#254
bge 5f @ overflow?
adds r2,#1 @ correct exponent offset
ble 10f @ denormal/underflow?
lsrs r0,#9 @ align mantissa
lsls r2,#23 @ align exponent
orrs r0,r2 @ assemble exponent and mantissa
6:
orrs r0,r4 @ apply sign
1:
bx r14
5:
movs r0,#0xff @ create infinity
lsls r0,#23
b 6b
10:
movs r0,#0 @ create zero
bx r14
7: @ sticky bit rounding case
lsls r5,r0,#24 @ check bottom 8 bits of r0
bne 8b @ in rounding-tie case?
lsrs r0,#9 @ ensure even result
lsls r0,#10
b 9b
.align 2
.ltorg
@ signed multiply r0 1Q23 by r1 4Q23, result in r0 7Q25, sticky bits in r5
@ trashes r3,r4
mul0:
uxth r3,r0 @ Q23
asrs r4,r1,#16 @ Q7
muls r3,r4 @ L*H, Q30 signed
asrs r4,r0,#16 @ Q7
uxth r5,r1 @ Q23
muls r4,r5 @ H*L, Q30 signed
adds r3,r4 @ sum of middle partial products
uxth r4,r0
muls r4,r5 @ L*L, Q46 unsigned
lsls r5,r4,#16 @ initialise sticky bits from low half of low partial product
lsrs r4,#16 @ Q25
adds r3,r4 @ add high half of low partial product to sum of middle partial products
@ (cannot generate carry by limits on input arguments)
asrs r0,#16 @ Q7
asrs r1,#16 @ Q7
muls r0,r1 @ H*H, Q14 signed
lsls r0,#11 @ high partial product Q25
lsls r1,r3,#27 @ sticky
orrs r5,r1 @ collect further sticky bits
asrs r1,r3,#5 @ middle partial products Q25
adds r0,r1 @ final result
bx r14
.thumb_func
qfp_fcmp:
lsls r2,r0,#1
lsrs r2,#24
beq 1f
cmp r2,#0xff
bne 2f
1:
lsrs r0,#23 @ clear mantissa if NaN or denormal
lsls r0,#23
2:
lsls r2,r1,#1
lsrs r2,#24
beq 1f
cmp r2,#0xff
bne 2f
1:
lsrs r1,#23 @ clear mantissa if NaN or denormal
lsls r1,#23
2:
movs r2,#1 @ initialise result
eors r1,r0
bmi 4f @ opposite signs? then can proceed on basis of sign of x
eors r1,r0 @ restore y
bpl 1f
rsbs r2,#0 @ both negative? flip comparison
1:
cmp r0,r1
bgt 2f
blt 3f
5:
movs r2,#0
3:
rsbs r2,#0
2:
subs r0,r2,#0
bx r14
4:
orrs r1,r0
adds r1,r1
beq 5b
cmp r0,#0
bge 2b
b 3b
@ convert float to signed int, rounding towards -Inf, clamping
.thumb_func
_ffix:
movs r1,#0 @ fall through
@ convert float in r0 to signed fixed point in r0, clamping
.thumb_func
qfp_float2fix:
push {r4,r14}
bl unpackx
movs r3,r2
adds r3,#130
bmi 6f @ -0?
add r2,r1 @ incorporate binary point position into exponent
subs r2,#23 @ r2 is now amount of left shift required
blt 1f @ requires right shift?
cmp r2,#7 @ overflow?
ble 4f
3: @ overflow
asrs r1,r0,#31 @ +ve:0 -ve:0xffffffff
mvns r1,r1 @ +ve:0xffffffff -ve:0
movs r0,#1
lsls r0,#31
5:
eors r0,r1 @ +ve:0x7fffffff -ve:0x80000000 (unsigned path: 0xffffffff)
pop {r4,r15}
1:
rsbs r2,#0 @ right shift for r0, >0
cmp r2,#32
blt 2f @ more than 32 bits of right shift?
movs r2,#32
2:
asrs r0,r0,r2
pop {r4,r15}
6:
movs r0,#0
pop {r4,r15}
@ unsigned version
.thumb_func
_ffixu:
movs r1,#0 @ fall through
.thumb_func
qfp_float2ufix:
push {r4,r14}
bl unpackx
add r2,r1 @ incorporate binary point position into exponent
movs r1,r0
bmi 5b @ negative? return zero
subs r2,#23 @ r2 is now amount of left shift required
blt 1b @ requires right shift?
mvns r1,r0 @ ready to return 0xffffffff
cmp r2,#8 @ overflow?
bgt 5b
4:
lsls r0,r0,r2 @ result fits, left shifted
pop {r4,r15}
@ convert uint64 to float, rounding
.thumb_func
_ll_uto_f:
movs r2,#0 @ fall through
@ convert unsigned 64-bit fix to float, rounding; number of r0:r1 bits after point in r2
.thumb_func
qfp_ufix642float:
push {r4,r5,r14}
cmp r1,#0
bpl 3f @ positive? we can use signed code
lsls r5,r1,#31 @ contribution to sticky bits
orrs r5,r0
lsrs r0,r1,#1
subs r2,#1
b 4f
@ convert int64 to float, rounding
.thumb_func
_ll_sto_f:
movs r2,#0 @ fall through
@ convert signed 64-bit fix to float, rounding; number of r0:r1 bits after point in r2
.thumb_func
qfp_fix642float:
push {r4,r5,r14}
3:
movs r5,r0
orrs r5,r1
beq ret_pop45 @ zero? return +0
asrs r5,r1,#31 @ sign bits
2:
asrs r4,r1,#24 @ try shifting 7 bits at a time
cmp r4,r5
bne 1f @ next shift will overflow?
lsls r1,#7
lsrs r4,r0,#25
orrs r1,r4
lsls r0,#7
adds r2,#7
b 2b
1:
movs r5,r0
movs r0,r1
4:
rsbs r2,#0
adds r2,#32+29
b packret
@ convert signed int to float, rounding
.thumb_func
_fflt:
movs r1,#0 @ fall through
@ convert signed fix to float, rounding; number of r0 bits after point in r1
.thumb_func
qfp_fix2float:
push {r4,r5,r14}
1:
movs r2,#29
subs r2,r1 @ fix exponent
packretns: @ pack and return, sticky bits=0
movs r5,#0
packret: @ common return point: "pack and return"
bl packx
ret_pop45:
pop {r4,r5,r15}
@ unsigned version
.thumb_func
_ffltu:
movs r1,#0 @ fall through
.thumb_func
qfp_ufix2float:
push {r4,r5,r14}
cmp r0,#0
bge 1b @ treat <2^31 as signed
movs r2,#30
subs r2,r1 @ fix exponent
lsls r5,r0,#31 @ one sticky bit
lsrs r0,#1
b packret
@ All the scientific functions are implemented using the CORDIC algorithm. For notation,
@ details not explained in the comments below, and a good overall survey see
@ "50 Years of CORDIC: Algorithms, Architectures, and Applications" by Meher et al.,
@ IEEE Transactions on Circuits and Systems Part I, Volume 56 Issue 9.
@ Register use:
@ r0: x
@ r1: y
@ r2: z/omega
@ r3: coefficient pointer
@ r4,r12: m
@ r5: i (shift)
cordic_start: @ initialisation
movs r5,#0 @ initial shift=0
mov r12,r4
b 5f
cordic_vstep: @ one step of algorithm in vector mode
cmp r1,#0 @ check sign of y
bgt 4f
b 1f
cordic_rstep: @ one step of algorithm in rotation mode
cmp r2,#0 @ check sign of angle
bge 1f
4:
subs r1,r6 @ negative rotation: y=y-(x>>i)
rsbs r7,#0
adds r2,r4 @ accumulate angle
b 2f
1:
adds r1,r6 @ positive rotation: y=y+(x>>i)
subs r2,r4 @ accumulate angle
2:
mov r4,r12
muls r7,r4 @ apply sign from m
subs r0,r7 @ finish rotation: x=x{+/-}(y>>i)
5:
ldmia r3!,{r4} @ fetch next angle from table and bump pointer
lsrs r4,#1 @ repeated angle?
bcs 3f
adds r5,#1 @ adjust shift if not
3:
mov r6,r0
asrs r6,r5 @ x>>i
mov r7,r1
asrs r7,r5 @ y>>i
lsrs r4,#1 @ shift end flag into carry
bx r14
@ CORDIC rotation mode
cordic_rot:
push {r6,r7,r14}
bl cordic_start @ initialise
1:
bl cordic_rstep
bcc 1b @ step until table finished
asrs r6,r0,#14 @ remaining small rotations can be linearised: see IV.B of paper referenced above
asrs r7,r1,#14
asrs r2,#3
muls r6,r2 @ all remaining CORDIC steps in a multiplication
muls r7,r2
mov r4,r12
muls r7,r4
asrs r6,#12
asrs r7,#12
subs r0,r7 @ x=x{+/-}(yz>>k)
adds r1,r6 @ y=y+(xz>>k)
cordic_exit:
pop {r6,r7,r15}
@ CORDIC vector mode
cordic_vec:
push {r6,r7,r14}
bl cordic_start @ initialise
1:
bl cordic_vstep
bcc 1b @ step until table finished
4:
cmp r1,#0 @ continue as in cordic_vstep but without using table; x is not affected as y is small
bgt 2f @ check sign of y
adds r1,r6 @ positive rotation: y=y+(x>>i)
subs r2,r4 @ accumulate angle
b 3f
2:
subs r1,r6 @ negative rotation: y=y-(x>>i)
adds r2,r4 @ accumulate angle
3:
asrs r6,#1
asrs r4,#1 @ next "table entry"
bne 4b
b cordic_exit
@ .thumb_func
@ qfp_fsin: @ calculate sin and cos using CORDIC rotation method
@ push {r4,r5,r14}
@ movs r1,#24
@ bl qfp_float2fix @ range reduction by repeated subtraction/addition in fixed point
@ ldr r4,pi_q29
@ lsrs r4,#4 @ 2pi Q24
@ 1:
@ subs r0,r4
@ bge 1b
@ 1:
@ adds r0,r4
@ bmi 1b @ now in range 0..2pi
@ lsls r2,r0,#2 @ z Q26
@ lsls r5,r4,#1 @ pi Q26 (r4=pi/2 Q26)
@ ldr r0,=#0x136e9db4 @ initialise CORDIC x,y with scaling
@ movs r1,#0
@ 1:
@ cmp r2,r4 @ >pi/2?
@ blt 2f
@ subs r2,r5 @ reduce range to -pi/2..pi/2
@ rsbs r0,#0 @ rotate vector by pi
@ b 1b
@ 2:
@ lsls r2,#3 @ Q29
@ adr r3,tab_cc @ circular coefficients
@ movs r4,#1 @ m=1
@ bl cordic_rot
@ adds r1,#9 @ fiddle factor to make sin(0)==0
@ movs r2,#0 @ exponents to zero
@ movs r3,#0
@ movs r5,#0 @ no sticky bits
@ bl clampx
@ bl packx @ pack cosine
@ bl xchxy
@ bl clampx
@ b packretns @ pack sine
@ .thumb_func
@ qfp_fcos:
@ push {r14}
@ bl qfp_fsin
@ mov r0,r1 @ extract cosine result
@ pop {r15}
@ @ force r0 to lie in range [-1,1] Q29
@ clampx:
@ movs r4,#1
@ lsls r4,#29
@ cmp r0,r4
@ bgt 1f
@ rsbs r4,#0
@ cmp r0,r4
@ ble 1f
@ bx r14
@ 1:
@ movs r0,r4
@ bx r14
@ .thumb_func
@ qfp_ftan:
@ push {r4,r5,r6,r14}
@ bl qfp_fsin @ sine in r0/r2, cosine in r1/r3
@ b fdiv_n @ sin/cos
@ .thumb_func
@ qfp_fexp:
@ push {r4,r5,r14}
@ movs r1,#24
@ bl qfp_float2fix @ Q24: covers entire valid input range
@ asrs r1,r0,#16 @ Q8
@ ldr r2,=#5909 @ log_2(e) Q12
@ muls r2,r1 @ estimate exponent of result Q20 (always an underestimate)
@ asrs r2,#20 @ Q0
@ lsls r1,r0,#6 @ Q30
@ ldr r0,=#0x2c5c85fe @ ln(2) Q30
@ muls r0,r2 @ accurate contribution of estimated exponent
@ subs r1,r0 @ residual to be exponentiated, guaranteed ≥0, < about 0.75 Q30
@ @ here
@ @ r1: mantissa to exponentiate, 0...~0.75 Q30
@ @ r2: first exponent estimate
@ movs r5,#1 @ shift
@ adr r3,ftab_exp @ could use alternate words from dtab_exp to save space if required
@ movs r0,#1
@ lsls r0,#29 @ x=1 Q29
@ 3:
@ ldmia r3!,{r4}
@ subs r4,r1,r4
@ bmi 1f
@ movs r1,r4 @ keep result of subtraction
@ movs r4,r0
@ lsrs r4,r5
@ adcs r0,r4 @ x+=x>>i with rounding
@ 1:
@ adds r5,#1
@ cmp r5,#15
@ bne 3b
@ @ here
@ @ r0: exp a Q29 1..2+
@ @ r1: ε (residual x where x=a+ε), < 2^-14 Q30
@ @ r2: first exponent estimate
@ @ and we wish to calculate exp x=exp a exp ε~(exp a)(1+ε)
@ lsrs r3,r0,#15 @ exp a Q14
@ muls r3,r1 @ ε exp a Q44
@ lsrs r3,#15 @ ε exp a Q29
@ adcs r0,r3 @ (1+ε) exp a Q29 with rounding
@ b packretns @ pack result
@ .thumb_func
@ qfp_fln:
@ push {r4,r5,r14}
@ asrs r1,r0,#23
@ bmi 3f @ -ve argument?
@ beq 3f @ 0 argument?
@ cmp r1,#0xff
@ beq 4f @ +Inf/NaN
@ bl unpackx
@ adds r2,#1
@ ldr r3,=#0x2c5c85fe @ ln(2) Q30
@ lsrs r1,r3,#14 @ ln(2) Q16
@ muls r1,r2 @ result estimate Q16
@ asrs r1,#16 @ integer contribution to result
@ muls r3,r2
@ lsls r4,r1,#30
@ subs r3,r4 @ fractional contribution to result Q30, signed
@ lsls r0,#8 @ Q31
@ @ here
@ @ r0: mantissa Q31
@ @ r1: integer contribution to result
@ @ r3: fractional contribution to result Q30, signed
@ movs r5,#1 @ shift
@ adr r4,ftab_exp @ could use alternate words from dtab_exp to save space if required
@ 2:
@ movs r2,r0
@ lsrs r2,r5
@ adcs r2,r0 @ x+(x>>i) with rounding
@ bcs 1f @ >=2?
@ movs r0,r2 @ keep result
@ ldr r2,[r4]
@ subs r3,r2
@ 1:
@ adds r4,#4
@ adds r5,#1
@ cmp r5,#15
@ bne 2b
@ @ here
@ @ r0: residual x, nearly 2 Q31
@ @ r1: integer contribution to result
@ @ r3: fractional part of result Q30
@ asrs r0,#2
@ adds r0,r3,r0
@ cmp r1,#0
@ bne 2f
@ asrs r0,#1
@ lsls r1,#29
@ adds r0,r1
@ movs r2,#0
@ b packretns
@ 2:
@ lsls r1,#24
@ asrs r0,#6 @ Q24
@ adcs r0,r1 @ with rounding
@ movs r2,#5
@ b packretns
@ 3:
@ ldr r0,=#0xff800000 @ -Inf
@ pop {r4,r5,r15}
@ 4:
@ ldr r0,=#0x7f800000 @ +Inf
@ pop {r4,r5,r15}
@ .align 2
@ ftab_exp:
@ .word 0x19f323ed @ log 1+2^-1 Q30
@ .word 0x0e47fbe4 @ log 1+2^-2 Q30
@ .word 0x0789c1dc @ log 1+2^-3 Q30
@ .word 0x03e14618 @ log 1+2^-4 Q30
@ .word 0x01f829b1 @ log 1+2^-5 Q30
@ .word 0x00fe0546 @ log 1+2^-6 Q30
@ .word 0x007f80aa @ log 1+2^-7 Q30
@ .word 0x003fe015 @ log 1+2^-8 Q30
@ .word 0x001ff803 @ log 1+2^-9 Q30
@ .word 0x000ffe00 @ log 1+2^-10 Q30
@ .word 0x0007ff80 @ log 1+2^-11 Q30
@ .word 0x0003ffe0 @ log 1+2^-12 Q30
@ .word 0x0001fff8 @ log 1+2^-13 Q30
@ .word 0x0000fffe @ log 1+2^-14 Q30
@ .thumb_func
@ qfp_fatan2:
@ push {r4,r5,r14}
@ @ unpack arguments and shift one down to have common exponent
@ bl unpackx
@ bl xchxy
@ bl unpackx
@ lsls r0,r0,#5 @ Q28
@ lsls r1,r1,#5 @ Q28
@ adds r4,r2,r3 @ this is -760 if both arguments are 0 and at least -380-126=-506 otherwise
@ asrs r4,#9
@ adds r4,#1
@ bmi 2f @ force y to 0 proper, so result will be zero
@ subs r4,r2,r3 @ calculate shift
@ bge 1f @ ex>=ey?
@ rsbs r4,#0 @ make shift positive
@ asrs r0,r4
@ cmp r4,#28
@ blo 3f
@ asrs r0,#31
@ b 3f
@ 1:
@ asrs r1,r4
@ cmp r4,#28
@ blo 3f
@ 2:
@ @ here |x|>>|y| or both x and y are ±0
@ cmp r0,#0
@ bge 4f @ x positive, return signed 0
@ ldr r0,pi_q29 @ x negative, return +/- pi
@ asrs r1,#31
@ eors r0,r1
@ b 7f
@ 4:
@ asrs r0,r1,#31
@ b 7f
@ 3:
@ movs r2,#0 @ initial angle
@ cmp r0,#0 @ x negative
@ bge 5f
@ rsbs r0,#0 @ rotate to 1st/4th quadrants
@ rsbs r1,#0
@ ldr r2,pi_q29 @ pi Q29
@ 5:
@ adr r3,tab_cc @ circular coefficients
@ movs r4,#1 @ m=1
@ bl cordic_vec @ also produces magnitude (with scaling factor 1.646760119), which is discarded
@ mov r0,r2 @ result here is -pi/2..3pi/2 Q29
@ @ asrs r2,#29
@ @ subs r0,r2
@ ldr r2,pi_q29 @ pi Q29
@ adds r4,r0,r2 @ attempt to fix -3pi/2..-pi case
@ bcs 6f @ -pi/2..0? leave result as is
@ subs r4,r0,r2 @ <pi? leave as is
@ bmi 6f
@ subs r0,r4,r2 @ >pi: take off 2pi
@ 6:
@ subs r0,#1 @ fiddle factor so atan2(0,1)==0
@ 7:
@ movs r2,#0 @ exponent for pack
@ b packretns
@ .align 2
@ .ltorg
@ @ first entry in following table is pi Q29
@ pi_q29:
@ @ circular CORDIC coefficients: atan(2^-i), b0=flag for preventing shift, b1=flag for end of table
@ tab_cc:
@ .word 0x1921fb54*4+1 @ no shift before first iteration
@ .word 0x0ed63383*4+0
@ .word 0x07d6dd7e*4+0
@ .word 0x03fab753*4+0
@ .word 0x01ff55bb*4+0
@ .word 0x00ffeaae*4+0
@ .word 0x007ffd55*4+0
@ .word 0x003fffab*4+0
@ .word 0x001ffff5*4+0
@ .word 0x000fffff*4+0
@ .word 0x0007ffff*4+0
@ .word 0x00040000*4+0
@ .word 0x00020000*4+0+2 @ +2 marks end
.align 2
.thumb_func
__aeabi_fsub:
ldr r2,=#0x80000000
eors r1,r2 @ flip sign on second argument
@ drop into fadd, on .align2:ed boundary
.thumb_func
__aeabi_fadd:
push {r4,r5,r6,r14}
asrs r4,r0,#31
lsls r2,r0,#1
lsrs r2,#24 @ x exponent
beq fa_xe0
cmp r2,#255
beq fa_xe255
fa_xe:
asrs r5,r1,#31
lsls r3,r1,#1
lsrs r3,#24 @ y exponent
beq fa_ye0
cmp r3,#255
beq fa_ye255
fa_ye:
ldr r6,=#0x007fffff
ands r0,r0,r6 @ extract mantissa bits
ands r1,r1,r6
adds r6,#1 @ r6=0x00800000
orrs r0,r0,r6 @ set implied 1
orrs r1,r1,r6
eors r0,r0,r4 @ complement...
eors r1,r1,r5
subs r0,r0,r4 @ ... and add 1 if sign bit is set: 2's complement
subs r1,r1,r5
subs r5,r3,r2 @ ye-xe
subs r4,r2,r3 @ xe-ye
bmi fa_ygtx
@ here xe>=ye
cmp r4,#30
bge fa_xmgty @ xe much greater than ye?
adds r5,#32
movs r3,r2 @ save exponent
@ here y in r1 must be shifted down r4 places to align with x in r0
movs r2,r1
lsls r2,r2,r5 @ keep the bits we will shift off the bottom of r1
asrs r1,r1,r4
b fa_0
.ltorg
fa_ymgtx:
movs r2,#0 @ result is just y
movs r0,r1
b fa_1
fa_xmgty:
movs r3,r2 @ result is just x
movs r2,#0
b fa_1
fa_ygtx:
@ here ye>xe
cmp r5,#30
bge fa_ymgtx @ ye much greater than xe?
adds r4,#32
@ here x in r0 must be shifted down r5 places to align with y in r1
movs r2,r0
lsls r2,r2,r4 @ keep the bits we will shift off the bottom of r0
asrs r0,r0,r5
fa_0:
adds r0,r1 @ result is now in r0:r2, possibly highly denormalised or zero; exponent in r3
beq fa_9 @ if zero, inputs must have been of identical magnitude and opposite sign, so return +0
fa_1:
lsrs r1,r0,#31 @ sign bit
beq fa_8
mvns r0,r0
rsbs r2,r2,#0
bne fa_8
adds r0,#1
fa_8:
adds r6,r6
@ r6=0x01000000
cmp r0,r6
bhs fa_2
fa_3:
adds r2,r2 @ normalisation loop
adcs r0,r0
subs r3,#1 @ adjust exponent
cmp r0,r6
blo fa_3
fa_2:
@ here r0:r2 is the result mantissa 0x01000000<=r0<0x02000000, r3 the exponent, and r1 the sign bit
lsrs r0,#1
bcc fa_4
@ rounding bits here are 1:r2
adds r0,#1 @ round up
cmp r2,#0
beq fa_5 @ sticky bits all zero?
fa_4:
cmp r3,#254
bhs fa_6 @ exponent too large or negative?
lsls r1,#31 @ pack everything
add r0,r1
lsls r3,#23
add r0,r3
fa_end:
pop {r4,r5,r6,r15}
fa_9:
cmp r2,#0 @ result zero?
beq fa_end @ return +0
b fa_1
fa_5:
lsrs r0,#1
lsls r0,#1 @ round to even
b fa_4
fa_6:
bge fa_7
@ underflow
@ can handle denormals here
lsls r0,r1,#31 @ result is signed zero
pop {r4,r5,r6,r15}
fa_7:
@ overflow
lsls r0,r1,#8
adds r0,#255
lsls r0,#23 @ result is signed infinity
pop {r4,r5,r6,r15}
fa_xe0:
@ can handle denormals here
subs r2,#32
adds r2,r4 @ exponent -32 for +Inf, -33 for -Inf
b fa_xe
fa_xe255:
@ can handle NaNs here
lsls r2,#8
add r2,r2,r4 @ exponent ~64k for +Inf, ~64k-1 for -Inf
b fa_xe
fa_ye0:
@ can handle denormals here
subs r3,#32
adds r3,r5 @ exponent -32 for +Inf, -33 for -Inf
b fa_ye
fa_ye255:
@ can handle NaNs here
lsls r3,#8
add r3,r3,r5 @ exponent ~64k for +Inf, ~64k-1 for -Inf
b fa_ye
.align 2
.thumb_func
__aeabi_fmul:
push {r7,r14}
mov r2,r0
eors r2,r1 @ sign of result
lsrs r2,#31
lsls r2,#31
mov r14,r2
lsls r0,#1
lsls r1,#1
lsrs r2,r0,#24 @ xe
beq fm_xe0
cmp r2,#255
beq fm_xe255
fm_xe:
lsrs r3,r1,#24 @ ye
beq fm_ye0
cmp r3,#255
beq fm_ye255
fm_ye:
adds r7,r2,r3 @ exponent of result (will possibly be incremented)
subs r7,#128 @ adjust bias for packing
lsls r0,#8 @ x mantissa
lsls r1,#8 @ y mantissa
lsrs r0,#9
lsrs r1,#9
adds r2,r0,r1 @ for later
mov r12,r2
lsrs r2,r0,#7 @ x[22..7] Q16
lsrs r3,r1,#7 @ y[22..7] Q16
muls r2,r2,r3 @ result [45..14] Q32: never an overestimate and worst case error is 2*(2^7-1)*(2^23-2^7)+(2^7-1)^2 = 2130690049 < 2^31
muls r0,r0,r1 @ result [31..0] Q46
lsrs r2,#18 @ result [45..32] Q14
bcc 1f
cmp r0,#0
bmi 1f
adds r2,#1 @ fix error in r2
1:
lsls r3,r0,#9 @ bits off bottom of result
lsrs r0,#23 @ Q23
lsls r2,#9
adds r0,r2 @ cut'n'shut
add r0,r12 @ implied 1*(x+y) to compensate for no insertion of implied 1s
@ result-1 in r3:r0 Q23+32, i.e., in range [0,3)
lsrs r1,r0,#23
bne fm_0 @ branch if we need to shift down one place
@ here 1<=result<2
cmp r7,#254
bhs fm_3a @ catches both underflow and overflow
lsls r3,#1 @ sticky bits at top of R3, rounding bit in carry
bcc fm_1 @ no rounding
beq fm_2 @ rounding tie?
adds r0,#1 @ round up
fm_1:
adds r7,#1 @ for implied 1
lsls r7,#23 @ pack result
add r0,r7
add r0,r14
pop {r7,r15}
fm_2: @ rounding tie
adds r0,#1
fm_3:
lsrs r0,#1
lsls r0,#1 @ clear bottom bit
b fm_1
@ here 1<=result-1<3
fm_0:
adds r7,#1 @ increment exponent
cmp r7,#254
bhs fm_3b @ catches both underflow and overflow
lsrs r0,#1 @ shift mantissa down
bcc fm_1a @ no rounding
adds r0,#1 @ assume we will round up
cmp r3,#0 @ sticky bits
beq fm_3c @ rounding tie?
fm_1a:
adds r7,r7
adds r7,#1 @ for implied 1
lsls r7,#22 @ pack result
add r0,r7
add r0,r14
pop {r7,r15}
fm_3c:
lsrs r0,#1
lsls r0,#1 @ clear bottom bit
b fm_1a
fm_xe0:
subs r2,#16
fm_xe255:
lsls r2,#8
b fm_xe
fm_ye0:
subs r3,#16
fm_ye255:
lsls r3,#8
b fm_ye
@ here the result is under- or overflowing
fm_3b:
bge fm_4 @ branch on overflow
@ trap case where result is denormal 0x007fffff + 0.5ulp or more
adds r7,#1 @ exponent=-1?
bne fm_5
@ corrected mantissa will be >= 3.FFFFFC (0x1fffffe Q23)
@ so r0 >= 2.FFFFFC (0x17ffffe Q23)
adds r0,#2
lsrs r0,#23
cmp r0,#3
bne fm_5
b fm_6
fm_3a:
bge fm_4 @ branch on overflow
@ trap case where result is denormal 0x007fffff + 0.5ulp or more
adds r7,#1 @ exponent=-1?
bne fm_5
adds r0,#1 @ mantissa=0xffffff (i.e., r0=0x7fffff)?
lsrs r0,#23
beq fm_5
fm_6:
movs r0,#1 @ return smallest normal
lsls r0,#23
add r0,r14
pop {r7,r15}
fm_5:
mov r0,r14
pop {r7,r15}
fm_4:
movs r0,#0xff
lsls r0,#23
add r0,r14
pop {r7,r15}
@ This version of the division algorithm uses external divider hardware to estimate the
@ reciprocal of the divisor to about 14 bits; then a multiplication step to get a first
@ quotient estimate; then the remainder based on this estimate is used to calculate a
@ correction to the quotient. The result is good to about 27 bits and so we only need
@ to calculate the exact remainder when close to a rounding boundary.
.align 2
.thumb_func
__aeabi_fdiv:
push {r4,r5,r6,r14}
fdiv_n:
movs r4,#1
lsls r4,#23 @ implied 1 position
lsls r2,r1,#9 @ clear out sign and exponent
lsrs r2,r2,#9
orrs r2,r2,r4 @ divisor mantissa Q23 with implied 1
@ here
@ r0=packed dividend
@ r1=packed divisor
@ r2=divisor mantissa Q23
@ r4=1<<23
// see divtest.c
lsrs r3,r2,#18 @ x2=x>>18; // Q5 32..63
adr r5,rcpapp-32
ldrb r3,[r5,r3] @ u=lut5[x2-32]; // Q8
lsls r5,r2,#5
muls r5,r5,r3
asrs r5,#14 @ e=(i32)(u*(x<<5))>>14; // Q22
asrs r6,r5,#11
muls r6,r6,r6 @ e2=(e>>11)*(e>>11); // Q22
subs r5,r6
muls r5,r5,r3 @ c=(e-e2)*u; // Q30
lsls r6,r3,#8
asrs r5,#13
adds r5,#1
asrs r5,#1
subs r5,r6,r5 @ u0=(u<<8)-((c+0x2000)>>14); // Q16
@ here
@ r0=packed dividend
@ r1=packed divisor
@ r2=divisor mantissa Q23
@ r4=1<<23
@ r5=reciprocal estimate Q16
lsrs r6,r0,#23
uxtb r3,r6 @ dividend exponent
lsls r0,#9
lsrs r0,#9
orrs r0,r0,r4 @ dividend mantissa Q23
lsrs r1,#23
eors r6,r1 @ sign of result in bit 8
lsrs r6,#8
lsls r6,#31 @ sign of result in bit 31, other bits clear
@ here
@ r0=dividend mantissa Q23
@ r1=divisor sign+exponent
@ r2=divisor mantissa Q23
@ r3=dividend exponent
@ r5=reciprocal estimate Q16
@ r6b31=sign of result
uxtb r1,r1 @ divisor exponent
cmp r1,#0
beq retinf
cmp r1,#255
beq 20f @ divisor is infinite
cmp r3,#0
beq retzero
cmp r3,#255
beq retinf
subs r3,r1 @ initial result exponent (no bias)
adds r3,#125 @ add bias
lsrs r1,r0,#8 @ dividend mantissa Q15
@ here
@ r0=dividend mantissa Q23
@ r1=dividend mantissa Q15
@ r2=divisor mantissa Q23
@ r3=initial result exponent
@ r5=reciprocal estimate Q16
@ r6b31=sign of result
muls r1,r5
lsrs r1,#16 @ Q15 qu0=(q15)(u*y0);
lsls r0,r0,#15 @ dividend Q38
movs r4,r2
muls r4,r1 @ Q38 qu0*x
subs r4,r0,r4 @ Q38 re0=(y<<15)-qu0*x; note this remainder is signed
asrs r4,#10
muls r4,r5 @ Q44 qu1=(re0>>10)*u; this quotient correction is also signed
asrs r4,#16 @ Q28
lsls r1,#13
adds r1,r1,r4 @ Q28 qu=(qu0<<13)+(qu1>>16);
@ here
@ r0=dividend mantissa Q38
@ r1=quotient Q28
@ r2=divisor mantissa Q23
@ r3=initial result exponent
@ r6b31=sign of result
lsrs r4,r1,#28
bne 1f
@ here the quotient is less than 1<<28 (i.e., result mantissa <1.0)
adds r1,#5
lsrs r4,r1,#4 @ rounding + small reduction in systematic bias
bcc 2f @ skip if we are not near a rounding boundary
lsrs r1,#3 @ quotient Q25
lsls r0,#10 @ dividend mantissa Q48
muls r1,r1,r2 @ quotient*divisor Q48
subs r0,r0,r1 @ remainder Q48
bmi 2f
b 3f
1:
@ here the quotient is at least 1<<28 (i.e., result mantissa >=1.0)
adds r3,#1 @ bump exponent (and shift mantissa down one more place)
adds r1,#9
lsrs r4,r1,#5 @ rounding + small reduction in systematic bias
bcc 2f @ skip if we are not near a rounding boundary
lsrs r1,#4 @ quotient Q24
lsls r0,#9 @ dividend mantissa Q47
muls r1,r1,r2 @ quotient*divisor Q47
subs r0,r0,r1 @ remainder Q47
bmi 2f
3:
adds r4,#1 @ increment quotient as we are above the rounding boundary
@ here
@ r3=result exponent
@ r4=correctly rounded quotient Q23 in range [1,2] *note closed interval*
@ r6b31=sign of result
2:
cmp r3,#254
bhs 10f @ this catches both underflow and overflow
lsls r1,r3,#23
adds r0,r4,r1
adds r0,r6
pop {r4,r5,r6,r15}
@ here divisor is infinite; dividend exponent in r3
20:
cmp r3,#255
bne retzero
retinf:
movs r0,#255
21:
lsls r0,#23
orrs r0,r6
pop {r4,r5,r6,r15}
10:
bge retinf @ overflow?
adds r1,r3,#1
bne retzero @ exponent <-1? return 0
@ here exponent is exactly -1
lsrs r1,r4,#25
bcc retzero @ mantissa is not 01000000?
@ return minimum normal
movs r0,#1
lsls r0,#23
orrs r0,r6
pop {r4,r5,r6,r15}
retzero:
movs r0,r6
pop {r4,r5,r6,r15}
@ x2=[32:1:63]/32;
@ round(256 ./(x2+1/64))
.align 2
rcpapp:
.byte 252,245,237,231,224,218,213,207,202,197,193,188,184,180,176,172
.byte 169,165,162,159,156,153,150,148,145,142,140,138,135,133,131,129
@ The square root routine uses an initial approximation to the reciprocal of the square root of the argument based
@ on the top four bits of the mantissa (possibly shifted one place to make the exponent even). It then performs two
@ Newton-Raphson iterations, resulting in about 14 bits of accuracy. This reciprocal is then multiplied by
@ the original argument to produce an approximation to the result, again with about 14 bits of accuracy.
@ Then a remainder is calculated, and multiplied by the reciprocal estiamte to generate a correction term
@ giving a final answer to about 28 bits of accuracy. A final remainder calculation rounds to the correct
@ result if necessary.
@ Again, the fixed-point calculation is carefully implemented to preserve accuracy, and similar comments to those
@ made above on the fast division routine apply.
@ The reciprocal square root calculation has been tested for all possible (possibly shifted) input mantissa values.
.align 2
.thumb_func
_fsqrt:
push {r4}
lsls r1,r0,#1
bcs sq_0 @ negative?
lsls r1,#8
lsrs r1,#9 @ mantissa
movs r2,#1
lsls r2,#23
adds r1,r2 @ insert implied 1
lsrs r2,r0,#23 @ extract exponent
beq sq_2 @ zero?
cmp r2,#255 @ infinite?
beq sq_1
adds r2,#125 @ correction for packing
asrs r2,#1 @ exponent/2, LSB into carry
bcc 1f
lsls r1,#1 @ was even: double mantissa; mantissa y now 1..4 Q23
1:
adr r4,rsqrtapp-4@ first four table entries are never accessed because of the mantissa's leading 1
lsrs r3,r1,#21 @ y Q2
ldrb r4,[r4,r3] @ initial approximation to reciprocal square root a0 Q8
lsrs r0,r1,#7 @ y Q16: first Newton-Raphson iteration
muls r0,r4 @ a0*y Q24
muls r0,r4 @ r0=p0=a0*y*y Q32
asrs r0,#12 @ r0 Q20
muls r0,r4 @ dy0=a0*r0 Q28
asrs r0,#13 @ dy0 Q15
lsls r4,#8 @ a0 Q16
subs r4,r0 @ a1=a0-dy0/2 Q16-Q15/2 -> Q16
adds r4,#170 @ mostly remove systematic error in this approximation: gains approximately 1 bit
movs r0,r4 @ second Newton-Raphson iteration
muls r0,r0 @ a1*a1 Q32
lsrs r0,#15 @ a1*a1 Q17
lsrs r3,r1,#8 @ y Q15
muls r0,r3 @ r1=p1=a1*a1*y Q32
asrs r0,#12 @ r1 Q20
muls r0,r4 @ dy1=a1*r1 Q36
asrs r0,#21 @ dy1 Q15
subs r4,r0 @ a2=a1-dy1/2 Q16-Q15/2 -> Q16
muls r3,r4 @ a3=y*a2 Q31
lsrs r3,#15 @ a3 Q16
@ here a2 is an approximation to the reciprocal square root
@ and a3 is an approximation to the square root
movs r0,r3
muls r0,r0 @ a3*a3 Q32
lsls r1,#9 @ y Q32
subs r0,r1,r0 @ r2=y-a3*a3 Q32 remainder
asrs r0,#5 @ r2 Q27
muls r4,r0 @ r2*a2 Q43
lsls r3,#7 @ a3 Q23
asrs r0,r4,#15 @ r2*a2 Q28
adds r0,#16 @ rounding to Q24
asrs r0,r0,#6 @ r2*a2 Q22
add r3,r0 @ a4 Q23: candidate final result
bcc sq_3 @ near rounding boundary? skip if no rounding needed
mov r4,r3
adcs r4,r4 @ a4+0.5ulp Q24
muls r4,r4 @ Q48
lsls r1,#16 @ y Q48
subs r1,r4 @ remainder Q48
bmi sq_3
adds r3,#1 @ round up
sq_3:
lsls r2,#23 @ pack exponent
adds r0,r2,r3
sq_6:
pop {r4}
bx r14
sq_0:
lsrs r1,#24
beq sq_2 @ -0: return it
@ here negative and not -0: return -Inf
asrs r0,#31
sq_5:
lsls r0,#23
b sq_6
sq_1: @ +Inf
lsrs r0,#23
b sq_5
sq_2:
lsrs r0,#31
lsls r0,#31
b sq_6
@ round(sqrt(2^22./[72:16:248]))
rsqrtapp:
.byte 0xf1,0xda,0xc9,0xbb, 0xb0,0xa6,0x9e,0x97, 0x91,0x8b,0x86,0x82
@ Notation:
@ rx:ry means the concatenation of rx and ry with rx having the less significant bits
@ IEEE double in ra:rb ->
@ mantissa in ra:rb 12Q52 (53 significant bits) with implied 1 set
@ exponent in re
@ sign in rs
@ trashes rt
.macro mdunpack ra,rb,re,rs,rt
lsrs \re,\rb,#20 @ extract sign and exponent
subs \rs,\re,#1
lsls \rs,#20
subs \rb,\rs @ clear sign and exponent in mantissa; insert implied 1
lsrs \rs,\re,#11 @ sign
lsls \re,#21
lsrs \re,#21 @ exponent
beq l\@_1 @ zero exponent?
adds \rt,\re,#1
lsrs \rt,#11
beq l\@_2 @ exponent != 0x7ff? then done
l\@_1:
movs \ra,#0
movs \rb,#1
lsls \rb,#20
subs \re,#128
lsls \re,#12
l\@_2:
.endm
@ IEEE double in ra:rb ->
@ signed mantissa in ra:rb 12Q52 (53 significant bits) with implied 1
@ exponent in re
@ trashes rt0 and rt1
@ +zero, +denormal -> exponent=-0x80000
@ -zero, -denormal -> exponent=-0x80000
@ +Inf, +NaN -> exponent=+0x77f000
@ -Inf, -NaN -> exponent=+0x77e000
.macro mdunpacks ra,rb,re,rt0,rt1
lsrs \re,\rb,#20 @ extract sign and exponent
lsrs \rt1,\rb,#31 @ sign only
subs \rt0,\re,#1
lsls \rt0,#20
subs \rb,\rt0 @ clear sign and exponent in mantissa; insert implied 1
lsls \re,#21
bcc l\@_1 @ skip on positive
mvns \rb,\rb @ negate mantissa
rsbs \ra,#0
bcc l\@_1
adds \rb,#1
l\@_1:
lsrs \re,#21
beq l\@_2 @ zero exponent?
adds \rt0,\re,#1
lsrs \rt0,#11
beq l\@_3 @ exponent != 0x7ff? then done
subs \re,\rt1
l\@_2:
movs \ra,#0
lsls \rt1,#1 @ +ve: 0 -ve: 2
adds \rb,\rt1,#1 @ +ve: 1 -ve: 3
lsls \rb,#30 @ create +/-1 mantissa
asrs \rb,#10
subs \re,#128
lsls \re,#12
l\@_3:
.endm
.align 2
.thumb_func
__aeabi_dsub:
push {r4-r7,r14}
movs r4,#1
lsls r4,#31
eors r3,r4 @ flip sign on second argument
b da_entry @ continue in dadd
.align 2
.thumb_func
__aeabi_dadd:
push {r4-r7,r14}
da_entry:
mdunpacks r0,r1,r4,r6,r7
mdunpacks r2,r3,r5,r6,r7
subs r7,r5,r4 @ ye-xe
subs r6,r4,r5 @ xe-ye
bmi da_ygtx
@ here xe>=ye: need to shift y down r6 places
mov r12,r4 @ save exponent
cmp r6,#32
bge da_xrgty @ xe rather greater than ye?
adds r7,#32
movs r4,r2
lsls r4,r4,r7 @ rounding bit + sticky bits
da_xgty0:
movs r5,r3
lsls r5,r5,r7
lsrs r2,r6
asrs r3,r6
orrs r2,r5
da_add:
adds r0,r2
adcs r1,r3
da_pack:
@ here unnormalised signed result (possibly 0) is in r0:r1 with exponent r12, rounding + sticky bits in r4
@ Note that if a large normalisation shift is required then the arguments were close in magnitude and so we
@ cannot have not gone via the xrgty/yrgtx paths. There will therefore always be enough high bits in r4
@ to provide a correct continuation of the exact result.
@ now pack result back up
lsrs r3,r1,#31 @ get sign bit
beq 1f @ skip on positive
mvns r1,r1 @ negate mantissa
mvns r0,r0
movs r2,#0
rsbs r4,#0
adcs r0,r2
adcs r1,r2
1:
mov r2,r12 @ get exponent
lsrs r5,r1,#21
bne da_0 @ shift down required?
lsrs r5,r1,#20
bne da_1 @ normalised?
cmp r0,#0
beq da_5 @ could mantissa be zero?
da_2:
adds r4,r4
adcs r0,r0
adcs r1,r1
subs r2,#1 @ adjust exponent
lsrs r5,r1,#20
beq da_2
da_1:
lsls r4,#1 @ check rounding bit
bcc da_3
da_4:
adds r0,#1 @ round up
bcc 2f
adds r1,#1
2:
cmp r4,#0 @ sticky bits zero?
bne da_3
lsrs r0,#1 @ round to even
lsls r0,#1
da_3:
subs r2,#1
bmi da_6
adds r4,r2,#2 @ check if exponent is overflowing
lsrs r4,#11
bne da_7
lsls r2,#20 @ pack exponent and sign
add r1,r2
lsls r3,#31
add r1,r3
pop {r4-r7,r15}
da_7:
@ here exponent overflow: return signed infinity
lsls r1,r3,#31
ldr r3,=#0x7ff00000
orrs r1,r3
b 1f
da_6:
@ here exponent underflow: return signed zero
lsls r1,r3,#31
1:
movs r0,#0
pop {r4-r7,r15}
da_5:
@ here mantissa could be zero
cmp r1,#0
bne da_2
cmp r4,#0
bne da_2
@ inputs must have been of identical magnitude and opposite sign, so return +0
pop {r4-r7,r15}
da_0:
@ here a shift down by one place is required for normalisation
adds r2,#1 @ adjust exponent
lsls r6,r0,#31 @ save rounding bit
lsrs r0,#1
lsls r5,r1,#31
orrs r0,r5
lsrs r1,#1
cmp r6,#0
beq da_3
b da_4
da_xrgty: @ xe>ye and shift>=32 places
cmp r6,#60
bge da_xmgty @ xe much greater than ye?
subs r6,#32
adds r7,#64
movs r4,r2
lsls r4,r4,r7 @ these would be shifted off the bottom of the sticky bits
beq 1f
movs r4,#1
1:
lsrs r2,r2,r6
orrs r4,r2
movs r2,r3
lsls r3,r3,r7
orrs r4,r3
asrs r3,r2,#31 @ propagate sign bit
b da_xgty0
da_ygtx:
@ here ye>xe: need to shift x down r7 places
mov r12,r5 @ save exponent
cmp r7,#32
bge da_yrgtx @ ye rather greater than xe?
adds r6,#32
movs r4,r0
lsls r4,r4,r6 @ rounding bit + sticky bits
da_ygtx0:
movs r5,r1
lsls r5,r5,r6
lsrs r0,r7
asrs r1,r7
orrs r0,r5
b da_add
da_yrgtx:
cmp r7,#60
bge da_ymgtx @ ye much greater than xe?
subs r7,#32
adds r6,#64
movs r4,r0
lsls r4,r4,r6 @ these would be shifted off the bottom of the sticky bits
beq 1f
movs r4,#1
1:
lsrs r0,r0,r7
orrs r4,r0
movs r0,r1
lsls r1,r1,r6
orrs r4,r1
asrs r1,r0,#31 @ propagate sign bit
b da_ygtx0
da_ymgtx: @ result is just y
movs r0,r2
movs r1,r3
da_xmgty: @ result is just x
movs r4,#0 @ clear sticky bits
b da_pack
.ltorg
@ equivalent of UMULL
@ needs five temporary registers
@ can have rt3==rx, in which case rx trashed
@ can have rt4==ry, in which case ry trashed
@ can have rzl==rx
@ can have rzh==ry
@ can have rzl,rzh==rt3,rt4
.macro mul32_32_64 rx,ry,rzl,rzh,rt0,rt1,rt2,rt3,rt4
@ t0 t1 t2 t3 t4
@ (x) (y)
uxth \rt0,\rx @ xl
uxth \rt1,\ry @ yl
muls \rt0,\rt1 @ xlyl=L
lsrs \rt2,\rx,#16 @ xh
muls \rt1,\rt2 @ xhyl=M0
lsrs \rt4,\ry,#16 @ yh
muls \rt2,\rt4 @ xhyh=H
uxth \rt3,\rx @ xl
muls \rt3,\rt4 @ xlyh=M1
adds \rt1,\rt3 @ M0+M1=M
bcc l\@_1 @ addition of the two cross terms can overflow, so add carry into H
movs \rt3,#1 @ 1
lsls \rt3,#16 @ 0x10000
adds \rt2,\rt3 @ H'
l\@_1:
@ t0 t1 t2 t3 t4
@ (zl) (zh)
lsls \rzl,\rt1,#16 @ ML
lsrs \rzh,\rt1,#16 @ MH
adds \rzl,\rt0 @ ZL
adcs \rzh,\rt2 @ ZH
.endm
@ SUMULL: x signed, y unsigned
@ in table below ¯ means signed variable
@ needs five temporary registers
@ can have rt3==rx, in which case rx trashed
@ can have rt4==ry, in which case ry trashed
@ can have rzl==rx
@ can have rzh==ry
@ can have rzl,rzh==rt3,rt4
.macro muls32_32_64 rx,ry,rzl,rzh,rt0,rt1,rt2,rt3,rt4
@ t0 t1 t2 t3 t4
@ ¯(x) (y)
uxth \rt0,\rx @ xl
uxth \rt1,\ry @ yl
muls \rt0,\rt1 @ xlyl=L
asrs \rt2,\rx,#16 @ ¯xh
muls \rt1,\rt2 @ ¯xhyl=M0
lsrs \rt4,\ry,#16 @ yh
muls \rt2,\rt4 @ ¯xhyh=H
uxth \rt3,\rx @ xl
muls \rt3,\rt4 @ xlyh=M1
asrs \rt4,\rt1,#31 @ M0sx (M1 sign extension is zero)
adds \rt1,\rt3 @ M0+M1=M
movs \rt3,#0 @ 0
adcs \rt4,\rt3 @ ¯Msx
lsls \rt4,#16 @ ¯Msx<<16
adds \rt2,\rt4 @ H'
@ t0 t1 t2 t3 t4
@ (zl) (zh)
lsls \rzl,\rt1,#16 @ M~
lsrs \rzh,\rt1,#16 @ M~
adds \rzl,\rt0 @ ZL
adcs \rzh,\rt2 @ ¯ZH
.endm
@ SSMULL: x signed, y signed
@ in table below ¯ means signed variable
@ needs five temporary registers
@ can have rt3==rx, in which case rx trashed
@ can have rt4==ry, in which case ry trashed
@ can have rzl==rx
@ can have rzh==ry
@ can have rzl,rzh==rt3,rt4
.macro muls32_s32_64 rx,ry,rzl,rzh,rt0,rt1,rt2,rt3,rt4
@ t0 t1 t2 t3 t4
@ ¯(x) (y)
uxth \rt0,\rx @ xl
uxth \rt1,\ry @ yl
muls \rt0,\rt1 @ xlyl=L
asrs \rt2,\rx,#16 @ ¯xh
muls \rt1,\rt2 @ ¯xhyl=M0
asrs \rt4,\ry,#16 @ ¯yh
muls \rt2,\rt4 @ ¯xhyh=H
uxth \rt3,\rx @ xl
muls \rt3,\rt4 @ ¯xlyh=M1
adds \rt1,\rt3 @ ¯M0+M1=M
asrs \rt3,\rt1,#31 @ Msx
bvc l\@_1 @
mvns \rt3,\rt3 @ ¯Msx flip sign extension bits if overflow
l\@_1:
lsls \rt3,#16 @ ¯Msx<<16
adds \rt2,\rt3 @ H'
@ t0 t1 t2 t3 t4
@ (zl) (zh)
lsls \rzl,\rt1,#16 @ M~
lsrs \rzh,\rt1,#16 @ M~
adds \rzl,\rt0 @ ZL
adcs \rzh,\rt2 @ ¯ZH
.endm
@ can have rt2==rx, in which case rx trashed
@ can have rzl==rx
@ can have rzh==rt1
.macro square32_64 rx,rzl,rzh,rt0,rt1,rt2
@ t0 t1 t2 zl zh
uxth \rt0,\rx @ xl
muls \rt0,\rt0 @ xlxl=L
uxth \rt1,\rx @ xl
lsrs \rt2,\rx,#16 @ xh
muls \rt1,\rt2 @ xlxh=M
muls \rt2,\rt2 @ xhxh=H
lsls \rzl,\rt1,#17 @ ML
lsrs \rzh,\rt1,#15 @ MH
adds \rzl,\rt0 @ ZL
adcs \rzh,\rt2 @ ZH
.endm
.align 2
.thumb_func
__aeabi_dmul:
push {r4-r7,r14}
mdunpack r0,r1,r4,r6,r5
mov r12,r4
mdunpack r2,r3,r4,r7,r5
eors r7,r6 @ sign of result
add r4,r12 @ exponent of result
push {r0-r2,r4,r7}
@ accumulate full product in r12:r5:r6:r7
mul32_32_64 r0,r2, r0,r5, r4,r6,r7,r0,r5 @ XL*YL
mov r12,r0 @ save LL bits
mul32_32_64 r1,r3, r6,r7, r0,r2,r4,r6,r7 @ XH*YH
pop {r0} @ XL
mul32_32_64 r0,r3, r0,r3, r1,r2,r4,r0,r3 @ XL*YH
adds r5,r0
adcs r6,r3
movs r0,#0
adcs r7,r0
pop {r1,r2} @ XH,YL
mul32_32_64 r1,r2, r1,r2, r0,r3,r4, r1,r2 @ XH*YL
adds r5,r1
adcs r6,r2
movs r0,#0
adcs r7,r0
@ here r5:r6:r7 holds the product [1..4) in Q(104-32)=Q72, with extra LSBs in r12
pop {r3,r4} @ exponent in r3, sign in r4
lsls r1,r7,#11
lsrs r2,r6,#21
orrs r1,r2
lsls r0,r6,#11
lsrs r2,r5,#21
orrs r0,r2
lsls r5,#11 @ now r5:r0:r1 Q83=Q(51+32), extra LSBs in r12
lsrs r2,r1,#20
bne 1f @ skip if in range [2..4)
adds r5,r5 @ shift up so always [2..4) Q83, i.e. [1..2) Q84=Q(52+32)
adcs r0,r0
adcs r1,r1
subs r3,#1 @ correct exponent
1:
ldr r6,=#0x3ff
subs r3,r6 @ correct for exponent bias
lsls r6,#1 @ 0x7fe
cmp r3,r6
bhs dm_0 @ exponent over- or underflow
lsls r5,#1 @ rounding bit to carry
bcc 1f @ result is correctly rounded
adds r0,#1
movs r6,#0
adcs r1,r6 @ round up
mov r6,r12 @ remaining sticky bits
orrs r5,r6
bne 1f @ some sticky bits set?
lsrs r0,#1
lsls r0,#1 @ round to even
1:
lsls r3,#20
adds r1,r3
dm_2:
lsls r4,#31
add r1,r4
pop {r4-r7,r15}
@ here for exponent over- or underflow
dm_0:
bge dm_1 @ overflow?
adds r3,#1 @ would-be zero exponent?
bne 1f
adds r0,#1
bne 1f @ all-ones mantissa?
adds r1,#1
lsrs r7,r1,#21
beq 1f
lsrs r1,#1
b dm_2
1:
lsls r1,r4,#31
movs r0,#0
pop {r4-r7,r15}
@ here for exponent overflow
dm_1:
adds r6,#1 @ 0x7ff
lsls r1,r6,#20
movs r0,#0
b dm_2
.ltorg
@ Approach to division y/x is as follows.
@
@ First generate u1, an approximation to 1/x to about 29 bits. Multiply this by the top
@ 32 bits of y to generate a0, a first approximation to the result (good to 28 bits or so).
@ Calculate the exact remainder r0=y-a0*x, which will be about 0. Calculate a correction
@ d0=r0*u1, and then write a1=a0+d0. If near a rounding boundary, compute the exact
@ remainder r1=y-a1*x (which can be done using r0 as a basis) to determine whether to
@ round up or down.
@
@ The calculation of 1/x is as given in dreciptest.c. That code verifies exhaustively
@ that | u1*x-1 | < 10*2^-32.
@
@ More precisely:
@
@ x0=(q16)x;
@ x1=(q30)x;
@ y0=(q31)y;
@ u0=(q15~)"(0xffffffffU/(unsigned int)roundq(x/x_ulp))/powq(2,16)"(x0); // q15 approximation to 1/x; "~" denotes rounding rather than truncation
@ v=(q30)(u0*x1-1);
@ u1=(q30)u0-(q30~)(u0*v);
@
@ a0=(q30)(u1*y0);
@ r0=(q82)y-a0*x;
@ r0x=(q57)r0;
@ d0=r0x*u1;
@ a1=d0+a0;
@
@ Error analysis
@
@ Use Greek letters to represent the errors introduced by rounding and truncation.
@
@ r₀ = y - a₀x
@ = y - [ u₁ ( y - α ) - β ] x where 0 ≤ α < 2^-31, 0 ≤ β < 2^-30
@ = y ( 1 - u₁x ) + ( u₁α + β ) x
@
@ Hence
@
@ | r₀ / x | < 2 * 10*2^-32 + 2^-31 + 2^-30
@ = 26*2^-32
@
@ r₁ = y - a₁x
@ = y - a₀x - d₀x
@ = r₀ - d₀x
@ = r₀ - u₁ ( r₀ - γ ) x where 0 ≤ γ < 2^-57
@ = r₀ ( 1 - u₁x ) + u₁γx
@
@ Hence
@
@ | r₁ / x | < 26*2^-32 * 10*2^-32 + 2^-57
@ = (260+128)*2^-64
@ < 2^-55
@
@ Empirically it seems to be nearly twice as good as this.
@
@ To determine correctly whether the exact remainder calculation can be skipped we need a result
@ accurate to < 0.25ulp. In the case where x>y the quotient will be shifted up one place for normalisation
@ and so 1ulp is 2^-53 and so the calculation above suffices.
.align 2
.thumb_func
__aeabi_ddiv:
push {r4-r7,r14}
ddiv0: @ entry point from dtan
mdunpack r2,r3,r4,r7,r6 @ unpack divisor
@ unpack dividend by hand to save on register use
lsrs r6,r1,#31
adds r6,r7
mov r12,r6 @ result sign in r12b0; r12b1 trashed
lsls r1,#1
lsrs r7,r1,#21 @ exponent
beq 1f @ zero exponent?
adds r6,r7,#1
lsrs r6,#11
beq 2f @ exponent != 0x7ff? then done
1:
movs r0,#0
movs r1,#0
subs r7,#64 @ less drastic fiddling of exponents to get 0/0, Inf/Inf correct
lsls r7,#12
2:
subs r6,r7,r4
lsls r6,#2
add r12,r12,r6 @ (signed) exponent in r12[31..8]
subs r7,#1 @ implied 1
lsls r7,#21
subs r1,r7
lsrs r1,#1
// see dreciptest-boxc.c
lsrs r4,r3,#15 @ x2=x>>15; // Q5 32..63
ldr r5,=#(rcpapp-32)
ldrb r4,[r5,r4] @ u=lut5[x2-32]; // Q8
lsls r5,r3,#8
muls r5,r5,r4
asrs r5,#14 @ e=(i32)(u*(x<<8))>>14; // Q22
asrs r6,r5,#11
muls r6,r6,r6 @ e2=(e>>11)*(e>>11); // Q22
subs r5,r6
muls r5,r5,r4 @ c=(e-e2)*u; // Q30
lsls r6,r4,#7
asrs r5,#14
adds r5,#1
asrs r5,#1
subs r6,r5 @ u0=(u<<7)-((c+0x4000)>>15); // Q15
@ here
@ r0:r1 y mantissa
@ r2:r3 x mantissa
@ r6 u0, first approximation to 1/x Q15
@ r12: result sign, exponent
lsls r4,r3,#10
lsrs r5,r2,#22
orrs r5,r4 @ x1=(q30)x
muls r5,r6 @ u0*x1 Q45
asrs r5,#15 @ v=u0*x1-1 Q30
muls r5,r6 @ u0*v Q45
asrs r5,#14
adds r5,#1
asrs r5,#1 @ round u0*v to Q30
lsls r6,#15
subs r6,r5 @ u1 Q30
@ here
@ r0:r1 y mantissa
@ r2:r3 x mantissa
@ r6 u1, second approximation to 1/x Q30
@ r12: result sign, exponent
push {r2,r3}
lsls r4,r1,#11
lsrs r5,r0,#21
orrs r4,r5 @ y0=(q31)y
mul32_32_64 r4,r6, r4,r5, r2,r3,r7,r4,r5 @ y0*u1 Q61
adds r4,r4
adcs r5,r5 @ a0=(q30)(y0*u1)
@ here
@ r0:r1 y mantissa
@ r5 a0, first approximation to y/x Q30
@ r6 u1, second approximation to 1/x Q30
@ r12 result sign, exponent
ldr r2,[r13,#0] @ xL
mul32_32_64 r2,r5, r2,r3, r1,r4,r7,r2,r3 @ xL*a0
ldr r4,[r13,#4] @ xH
muls r4,r5 @ xH*a0
adds r3,r4 @ r2:r3 now x*a0 Q82
lsrs r2,#25
lsls r1,r3,#7
orrs r2,r1 @ r2 now x*a0 Q57; r7:r2 is x*a0 Q89
lsls r4,r0,#5 @ y Q57
subs r0,r4,r2 @ r0x=y-x*a0 Q57 (signed)
@ here
@ r0 r0x Q57
@ r5 a0, first approximation to y/x Q30
@ r4 yL Q57
@ r6 u1 Q30
@ r12 result sign, exponent
muls32_32_64 r0,r6, r7,r6, r1,r2,r3, r7,r6 @ r7:r6 r0x*u1 Q87
asrs r3,r6,#25
adds r5,r3
lsls r3,r6,#7 @ r3:r5 a1 Q62 (but bottom 7 bits are zero so 55 bits of precision after binary point)
@ here we could recover another 7 bits of precision (but not accuracy) from the top of r7
@ but these bits are thrown away in the rounding and conversion to Q52 below
@ here
@ r3:r5 a1 Q62 candidate quotient [0.5,2) or so
@ r4 yL Q57
@ r12 result sign, exponent
movs r6,#0
adds r3,#128 @ for initial rounding to Q53
adcs r5,r5,r6
lsrs r1,r5,#30
bne dd_0
@ here candidate quotient a1 is in range [0.5,1)
@ so 30 significant bits in r5
lsls r4,#1 @ y now Q58
lsrs r1,r5,#9 @ to Q52
lsls r0,r5,#23
lsrs r3,#9 @ 0.5ulp-significance bit in carry: if this is 1 we may need to correct result
orrs r0,r3
bcs dd_1
b dd_2
dd_0:
@ here candidate quotient a1 is in range [1,2)
@ so 31 significant bits in r5
movs r2,#4
add r12,r12,r2 @ fix exponent; r3:r5 now effectively Q61
adds r3,#128 @ complete rounding to Q53
adcs r5,r5,r6
lsrs r1,r5,#10
lsls r0,r5,#22
lsrs r3,#10 @ 0.5ulp-significance bit in carry: if this is 1 we may need to correct result
orrs r0,r3
bcc dd_2
dd_1:
@ here
@ r0:r1 rounded result Q53 [0.5,1) or Q52 [1,2), but may not be correctly rounded-to-nearest
@ r4 yL Q58 or Q57
@ r12 result sign, exponent
@ carry set
adcs r0,r0,r0
adcs r1,r1,r1 @ z Q53 with 1 in LSB
lsls r4,#16 @ Q105-32=Q73
ldr r2,[r13,#0] @ xL Q52
ldr r3,[r13,#4] @ xH Q20
movs r5,r1 @ zH Q21
muls r5,r2 @ zH*xL Q73
subs r4,r5
muls r3,r0 @ zL*xH Q73
subs r4,r3
mul32_32_64 r2,r0, r2,r3, r5,r6,r7,r2,r3 @ xL*zL
rsbs r2,#0 @ borrow from low half?
sbcs r4,r3 @ y-xz Q73 (remainder bits 52..73)
cmp r4,#0
bmi 1f
movs r2,#0 @ round up
adds r0,#1
adcs r1,r2
1:
lsrs r0,#1 @ shift back down to Q52
lsls r2,r1,#31
orrs r0,r2
lsrs r1,#1
dd_2:
add r13,#8
mov r2,r12
lsls r7,r2,#31 @ result sign
asrs r2,#2 @ result exponent
ldr r3,=#0x3fd
adds r2,r3
ldr r3,=#0x7fe
cmp r2,r3
bhs dd_3 @ over- or underflow?
lsls r2,#20
adds r1,r2 @ pack exponent
dd_5:
adds r1,r7 @ pack sign
pop {r4-r7,r15}
dd_3:
movs r0,#0
cmp r2,#0
bgt dd_4 @ overflow?
movs r1,r7
pop {r4-r7,r15}
dd_4:
adds r3,#1 @ 0x7ff
lsls r1,r3,#20
b dd_5
/*
Approach to square root x=sqrt(y) is as follows.
First generate a3, an approximation to 1/sqrt(y) to about 30 bits. Multiply this by y
to give a4~sqrt(y) to about 28 bits and a remainder r4=y-a4^2. Then, because
d sqrt(y) / dy = 1 / (2 sqrt(y)) let d4=r4*a3/2 and then the value a5=a4+d4 is
a better approximation to sqrt(y). If this is near a rounding boundary we
compute an exact remainder y-a5*a5 to decide whether to round up or down.
The calculation of a3 and a4 is as given in dsqrttest.c. That code verifies exhaustively
that | 1 - a3a4 | < 10*2^-32, | r4 | < 40*2^-32 and | r4/y | < 20*2^-32.
More precisely, with "y" representing y truncated to 30 binary places:
u=(q3)y; // 24-entry table
a0=(q8~)"1/sqrtq(x+x_ulp/2)"(u); // first approximation from table
p0=(q16)(a0*a0) * (q16)y;
r0=(q20)(p0-1);
dy0=(q15)(r0*a0); // Newton-Raphson correction term
a1=(q16)a0-dy0/2; // good to ~9 bits
p1=(q19)(a1*a1)*(q19)y;
r1=(q23)(p1-1);
dy1=(q15~)(r1*a1); // second Newton-Raphson correction
a2x=(q16)a1-dy1/2; // good to ~16 bits
a2=a2x-a2x/1t16; // prevent overflow of a2*a2 in 32 bits
p2=(a2*a2)*(q30)y; // Q62
r2=(q36)(p2-1+1t-31);
dy2=(q30)(r2*a2); // Q52->Q30
a3=(q31)a2-dy2/2; // good to about 30 bits
a4=(q30)(a3*(q30)y+1t-31); // good to about 28 bits
Error analysis
r₄ = y - a₄²
d₄ = 1/2 a₃r₄
a₅ = a₄ + d₄
r₅ = y - a₅²
= y - ( a₄ + d₄ )²
= y - a₄² - a₃a₄r₄ - 1/4 a₃²r₄²
= r₄ - a₃a₄r₄ - 1/4 a₃²r₄²
| r₅ | < | r₄ | | 1 - a₃a₄ | + 1/4 r₄²
a₅ = √y √( 1 - r₅/y )
= √y ( 1 - 1/2 r₅/y + ... )
So to first order (second order being very tiny)
√y - a₅ = 1/2 r₅/y
and
| √y - a₅ | < 1/2 ( | r₄/y | | 1 - a₃a₄ | + 1/4 r₄²/y )
From dsqrttest.c (conservatively):
< 1/2 ( 20*2^-32 * 10*2^-32 + 1/4 * 40*2^-32*20*2^-32 )
= 1/2 ( 200 + 200 ) * 2^-64
< 2^-56
Empirically we see about 1ulp worst-case error including rounding at Q57.
To determine correctly whether the exact remainder calculation can be skipped we need a result
accurate to < 0.25ulp at Q52, or 2^-54.
*/
dq_2:
bge dq_3 @ +Inf?
movs r1,#0
b dq_4
dq_0:
lsrs r1,#31
lsls r1,#31 @ preserve sign bit
lsrs r2,#21 @ extract exponent
beq dq_4 @ -0? return it
asrs r1,#11 @ make -Inf
b dq_4
dq_3:
ldr r1,=#0x7ff
lsls r1,#20 @ return +Inf
dq_4:
movs r0,#0
dq_1:
bx r14
.align 2
.thumb_func
_dsqrt:
lsls r2,r1,#1
bcs dq_0 @ negative?
lsrs r2,#21 @ extract exponent
subs r2,#1
ldr r3,=#0x7fe
cmp r2,r3
bhs dq_2 @ catches 0 and +Inf
push {r4-r7,r14}
lsls r4,r2,#20
subs r1,r4 @ insert implied 1
lsrs r2,#1
bcc 1f @ even exponent? skip
adds r0,r0,r0 @ odd exponent: shift up mantissa
adcs r1,r1,r1
1:
lsrs r3,#2
adds r2,r3
lsls r2,#20
mov r12,r2 @ save result exponent
@ here
@ r0:r1 y mantissa Q52 [1,4)
@ r12 result exponent
adr r4,drsqrtapp-8 @ first eight table entries are never accessed because of the mantissa's leading 1
lsrs r2,r1,#17 @ y Q3
ldrb r2,[r4,r2] @ initial approximation to reciprocal square root a0 Q8
lsrs r3,r1,#4 @ first Newton-Raphson iteration
muls r3,r2
muls r3,r2 @ i32 p0=a0*a0*(y>>14); // Q32
asrs r3,r3,#12 @ i32 r0=p0>>12; // Q20
muls r3,r2
asrs r3,#13 @ i32 dy0=(r0*a0)>>13; // Q15
lsls r2,#8
subs r2,r3 @ i32 a1=(a0<<8)-dy0; // Q16
movs r3,r2
muls r3,r3
lsrs r3,#13
lsrs r4,r1,#1
muls r3,r4 @ i32 p1=((a1*a1)>>11)*(y>>11); // Q19*Q19=Q38
asrs r3,#15 @ i32 r1=p1>>15; // Q23
muls r3,r2
asrs r3,#23
adds r3,#1
asrs r3,#1 @ i32 dy1=(r1*a1+(1<<23))>>24; // Q23*Q16=Q39; Q15
subs r2,r3 @ i32 a2=a1-dy1; // Q16
lsrs r3,r2,#16
subs r2,r3 @ if(a2>=0x10000) a2=0xffff; to prevent overflow of a2*a2
@ here
@ r0:r1 y mantissa
@ r2 a2 ~ 1/sqrt(y) Q16
@ r12 result exponent
movs r3,r2
muls r3,r3
lsls r1,#10
lsrs r4,r0,#22
orrs r1,r4 @ y Q30
mul32_32_64 r1,r3, r4,r3, r5,r6,r7,r4,r3 @ i64 p2=(ui64)(a2*a2)*(ui64)y; // Q62 r4:r3
lsls r5,r3,#6
lsrs r4,#26
orrs r4,r5
adds r4,#0x20 @ i32 r2=(p2>>26)+0x20; // Q36 r4
uxth r5,r4
muls r5,r2
asrs r4,#16
muls r4,r2
lsrs r5,#16
adds r4,r5
asrs r4,#6 @ i32 dy2=((i64)r2*(i64)a2)>>22; // Q36*Q16=Q52; Q30
lsls r2,#15
subs r2,r4
@ here
@ r0 y low bits
@ r1 y Q30
@ r2 a3 ~ 1/sqrt(y) Q31
@ r12 result exponent
mul32_32_64 r2,r1, r3,r4, r5,r6,r7,r3,r4
adds r3,r3,r3
adcs r4,r4,r4
adds r3,r3,r3
movs r3,#0
adcs r3,r4 @ ui32 a4=((ui64)a3*(ui64)y+(1U<<31))>>31; // Q30
@ here
@ r0 y low bits
@ r1 y Q30
@ r2 a3 Q31 ~ 1/sqrt(y)
@ r3 a4 Q30 ~ sqrt(y)
@ r12 result exponent
square32_64 r3, r4,r5, r6,r5,r7
lsls r6,r0,#8
lsrs r7,r1,#2
subs r6,r4
sbcs r7,r5 @ r4=(q60)y-a4*a4
@ by exhaustive testing, r4 = fffffffc0e134fdc .. 00000003c2bf539c Q60
lsls r5,r7,#29
lsrs r6,#3
adcs r6,r5 @ r4 Q57 with rounding
muls32_32_64 r6,r2, r6,r2, r4,r5,r7,r6,r2 @ d4=a3*r4/2 Q89
@ r4+d4 is correct to 1ULP at Q57, tested on ~9bn cases including all extreme values of r4 for each possible y Q30
adds r2,#8
asrs r2,#5 @ d4 Q52, rounded to Q53 with spare bit in carry
@ here
@ r0 y low bits
@ r1 y Q30
@ r2 d4 Q52, rounded to Q53
@ C flag contains d4_b53
@ r3 a4 Q30
bcs dq_5
lsrs r5,r3,#10 @ a4 Q52
lsls r4,r3,#22
asrs r1,r2,#31
adds r0,r2,r4
adcs r1,r5 @ a4+d4
add r1,r12 @ pack exponent
pop {r4-r7,r15}
.ltorg
@ round(sqrt(2^22./[68:8:252]))
drsqrtapp:
.byte 0xf8,0xeb,0xdf,0xd6,0xcd,0xc5,0xbe,0xb8
.byte 0xb2,0xad,0xa8,0xa4,0xa0,0x9c,0x99,0x95
.byte 0x92,0x8f,0x8d,0x8a,0x88,0x85,0x83,0x81
dq_5:
@ here we are near a rounding boundary, C is set
adcs r2,r2,r2 @ d4 Q53+1ulp
lsrs r5,r3,#9
lsls r4,r3,#23 @ r4:r5 a4 Q53
asrs r1,r2,#31
adds r4,r2,r4
adcs r5,r1 @ r4:r5 a5=a4+d4 Q53+1ulp
movs r3,r5
muls r3,r4
square32_64 r4,r1,r2,r6,r2,r7
adds r2,r3
adds r2,r3 @ r1:r2 a5^2 Q106
lsls r0,#22 @ y Q84
rsbs r1,#0
sbcs r0,r2 @ remainder y-a5^2
bmi 1f @ y<a5^2: no need to increment a5
movs r3,#0
adds r4,#1
adcs r5,r3 @ bump a5 if over rounding boundary
1:
lsrs r0,r4,#1
lsrs r1,r5,#1
lsls r5,#31
orrs r0,r5
add r1,r12
pop {r4-r7,r15}
@ compare r0:r1 against r2:r3, returning -1/0/1 for <, =, >
@ also set flags accordingly
.thumb_func
qfp_dcmp:
push {r4,r6,r7,r14}
ldr r7,=#0x7ff @ flush NaNs and denormals
lsls r4,r1,#1
lsrs r4,#21
beq 1f
cmp r4,r7
bne 2f
1:
movs r0,#0
lsrs r1,#20
lsls r1,#20
2:
lsls r4,r3,#1
lsrs r4,#21
beq 1f
cmp r4,r7
bne 2f
1:
movs r2,#0
lsrs r3,#20
lsls r3,#20
2:
dcmp_fast_entry:
movs r6,#1
eors r3,r1
bmi 4f @ opposite signs? then can proceed on basis of sign of x
eors r3,r1 @ restore r3
bpl 1f
rsbs r6,#0 @ negative? flip comparison
1:
cmp r1,r3
bne 1f
cmp r0,r2
bhi 2f
blo 3f
5:
movs r6,#0 @ equal? result is 0
1:
bgt 2f
3:
rsbs r6,#0
2:
subs r0,r6,#0 @ copy and set flags
pop {r4,r6,r7,r15}
4:
orrs r3,r1 @ make -0==+0
adds r3,r3
orrs r3,r0
orrs r3,r2
beq 5b
cmp r1,#0
bge 2b
b 3b
@ "scientific" functions start here
.thumb_func
push_r8_r11:
mov r4,r8
mov r5,r9
mov r6,r10
mov r7,r11
push {r4-r7}
bx r14
.thumb_func
pop_r8_r11:
pop {r4-r7}
mov r8,r4
mov r9,r5
mov r10,r6
mov r11,r7
bx r14
@ double-length CORDIC rotation step
@ r0:r1 ω
@ r6 32-i (complementary shift)
@ r7 i (shift)
@ r8:r9 x
@ r10:r11 y
@ r12 coefficient pointer
@ an option in rotation mode would be to compute the sequence of σ values
@ in one pass, rotate the initial vector by the residual ω and then run a
@ second pass to compute the final x and y. This would relieve pressure
@ on registers and hence possibly be faster. The same trick does not work
@ in vectoring mode (but perhaps one could work to single precision in
@ a first pass and then double precision in a second pass?).
.thumb_func
dcordic_vec_step:
mov r2,r12
ldmia r2!,{r3,r4}
mov r12,r2
mov r2,r11
cmp r2,#0
blt 1f
b 2f
.thumb_func
dcordic_rot_step:
mov r2,r12
ldmia r2!,{r3,r4}
mov r12,r2
cmp r1,#0
bge 1f
2:
@ ω<0 / y>=0
@ ω+=dω
@ x+=y>>i, y-=x>>i
adds r0,r3
adcs r1,r4
mov r3,r11
asrs r3,r7
mov r4,r11
lsls r4,r6
mov r2,r10
lsrs r2,r7
orrs r2,r4 @ r2:r3 y>>i, rounding in carry
mov r4,r8
mov r5,r9 @ r4:r5 x
adcs r2,r4
adcs r3,r5 @ r2:r3 x+(y>>i)
mov r8,r2
mov r9,r3
mov r3,r5
lsls r3,r6
asrs r5,r7
lsrs r4,r7
orrs r4,r3 @ r4:r5 x>>i, rounding in carry
mov r2,r10
mov r3,r11
sbcs r2,r4
sbcs r3,r5 @ r2:r3 y-(x>>i)
mov r10,r2
mov r11,r3
bx r14
@ ω>0 / y<0
@ ω-=dω
@ x-=y>>i, y+=x>>i
1:
subs r0,r3
sbcs r1,r4
mov r3,r9
asrs r3,r7
mov r4,r9
lsls r4,r6
mov r2,r8
lsrs r2,r7
orrs r2,r4 @ r2:r3 x>>i, rounding in carry
mov r4,r10
mov r5,r11 @ r4:r5 y
adcs r2,r4
adcs r3,r5 @ r2:r3 y+(x>>i)
mov r10,r2
mov r11,r3
mov r3,r5
lsls r3,r6
asrs r5,r7
lsrs r4,r7
orrs r4,r3 @ r4:r5 y>>i, rounding in carry
mov r2,r8
mov r3,r9
sbcs r2,r4
sbcs r3,r5 @ r2:r3 x-(y>>i)
mov r8,r2
mov r9,r3
bx r14
ret_dzero:
movs r0,#0
movs r1,#0
bx r14
@ convert packed double in r0:r1 to signed/unsigned 32/64-bit integer/fixed-point value in r0:r1 [with r2 places after point], with rounding towards -Inf
@ fixed-point versions only work with reasonable values in r2 because of the way dunpacks works
.thumb_func
_dfix:
movs r2,#0 @ and fall through
.thumb_func
qfp_double2fix:
push {r14}
adds r2,#32
bl qfp_double2fix64
movs r0,r1
pop {r15}
.thumb_func
_dfixu:
movs r2,#0 @ and fall through
.thumb_func
qfp_double2ufix:
push {r14}
adds r2,#32
bl qfp_double2ufix64
movs r0,r1
pop {r15}
.thumb_func
_ll_sfrom_f:
movs r1,#0 @ and fall through
.thumb_func
qfp_float2fix64:
push {r14}
bl f2fix
b d2f64_a
.thumb_func
_ll_ufrom_f:
movs r1,#0 @ and fall through
.thumb_func
qfp_float2ufix64:
asrs r3,r0,#23 @ negative? return 0
bmi ret_dzero
@ and fall through
@ convert float in r0 to signed fixed point in r0:r1:r3, r1 places after point, rounding towards -Inf
@ result clamped so that r3 can only be 0 or -1
@ trashes r12
.thumb_func
f2fix:
push {r4,r14}
mov r12,r1
asrs r3,r0,#31
lsls r0,#1
lsrs r2,r0,#24
beq 1f @ zero?
cmp r2,#0xff @ Inf?
beq 2f
subs r1,r2,#1
subs r2,#0x7f @ remove exponent bias
lsls r1,#24
subs r0,r1 @ insert implied 1
eors r0,r3
subs r0,r3 @ top two's complement
asrs r1,r0,#4 @ convert to double format
lsls r0,#28
b d2fix_a
1:
movs r0,#0
movs r1,r0
movs r3,r0
pop {r4,r15}
2:
mvns r0,r3 @ return max/min value
mvns r1,r3
pop {r4,r15}
.thumb_func
_ll_sfrom_d:
movs r2,#0 @ and fall through
.thumb_func
qfp_double2fix64:
push {r14}
bl d2fix
d2f64_a:
asrs r2,r1,#31
cmp r2,r3
bne 1f @ sign extension bits fail to match sign of result?
pop {r15}
1:
mvns r0,r3
movs r1,#1
lsls r1,#31
eors r1,r1,r0 @ generate extreme fixed-point values
pop {r15}
.thumb_func
_ll_ufrom_d:
movs r2,#0 @ and fall through
.thumb_func
qfp_double2ufix64:
asrs r3,r1,#20 @ negative? return 0
bmi ret_dzero
@ and fall through
@ convert double in r0:r1 to signed fixed point in r0:r1:r3, r2 places after point, rounding towards -Inf
@ result clamped so that r3 can only be 0 or -1
@ trashes r12
.thumb_func
d2fix:
push {r4,r14}
mov r12,r2
bl dunpacks
asrs r4,r2,#16
adds r4,#1
bge 1f
movs r1,#0 @ -0 -> +0
1:
asrs r3,r1,#31
d2fix_a:
@ here
@ r0:r1 two's complement mantissa
@ r2 unbaised exponent
@ r3 mantissa sign extension bits
add r2,r12 @ exponent plus offset for required binary point position
subs r2,#52 @ required shift
bmi 1f @ shift down?
@ here a shift up by r2 places
cmp r2,#12 @ will clamp?
bge 2f
movs r4,r0
lsls r1,r2
lsls r0,r2
rsbs r2,#0
adds r2,#32 @ complementary shift
lsrs r4,r2
orrs r1,r4
pop {r4,r15}
2:
mvns r0,r3
mvns r1,r3 @ overflow: clamp to extreme fixed-point values
pop {r4,r15}
1:
@ here a shift down by -r2 places
adds r2,#32
bmi 1f @ long shift?
mov r4,r1
lsls r4,r2
rsbs r2,#0
adds r2,#32 @ complementary shift
asrs r1,r2
lsrs r0,r2
orrs r0,r4
pop {r4,r15}
1:
@ here a long shift down
movs r0,r1
asrs r1,#31 @ shift down 32 places
adds r2,#32
bmi 1f @ very long shift?
rsbs r2,#0
adds r2,#32
asrs r0,r2
pop {r4,r15}
1:
movs r0,r3 @ result very near zero: use sign extension bits
movs r1,r3
pop {r4,r15}
@ float <-> double conversions
.thumb_func
__aeabi_f2d:
lsrs r3,r0,#31 @ sign bit
lsls r3,#31
lsls r1,r0,#1
lsrs r2,r1,#24 @ exponent
beq 1f @ zero?
cmp r2,#0xff @ Inf?
beq 2f
lsrs r1,#4 @ exponent and top 20 bits of mantissa
ldr r2,=#(0x3ff-0x7f)<<20 @ difference in exponent offsets
adds r1,r2
orrs r1,r3
lsls r0,#29 @ bottom 3 bits of mantissa
bx r14
1:
movs r1,r3 @ return signed zero
3:
movs r0,#0
bx r14
2:
ldr r1,=#0x7ff00000 @ return signed infinity
adds r1,r3
b 3b
.thumb_func
__aeabi_d2f:
lsls r2,r1,#1
lsrs r2,#21 @ exponent
ldr r3,=#0x3ff-0x7f
subs r2,r3 @ fix exponent bias
ble 1f @ underflow or zero
cmp r2,#0xff
bge 2f @ overflow or infinity
lsls r2,#23 @ position exponent of result
lsrs r3,r1,#31
lsls r3,#31
orrs r2,r3 @ insert sign
lsls r3,r0,#3 @ rounding bits
lsrs r0,#29
lsls r1,#12
lsrs r1,#9
orrs r0,r1 @ assemble mantissa
orrs r0,r2 @ insert exponent and sign
lsls r3,#1
bcc 3f @ no rounding
beq 4f @ all sticky bits 0?
5:
adds r0,#1
3:
bx r14
4:
lsrs r3,r0,#1 @ odd? then round up
bcs 5b
bx r14
1:
beq 6f @ check case where value is just less than smallest normal
7:
lsrs r0,r1,#31
lsls r0,#31
bx r14
6:
lsls r2,r1,#12 @ 20 1:s at top of mantissa?
asrs r2,#12
adds r2,#1
bne 7b
lsrs r2,r0,#29 @ and 3 more 1:s?
cmp r2,#7
bne 7b
movs r2,#1 @ return smallest normal with correct sign
b 8f
2:
movs r2,#0xff
8:
lsrs r0,r1,#31 @ return signed infinity
lsls r0,#8
adds r0,r2
lsls r0,#23
bx r14
@ convert signed/unsigned 32/64-bit integer/fixed-point value in r0:r1 [with r2 places after point] to packed double in r0:r1, with rounding
.thumb_func
_dfltu:
movs r1,#0 @ and fall through
.thumb_func
qfp_ufix2double:
movs r2,r1
movs r1,#0
b qfp_ufix642double
.thumb_func
_dflt:
movs r1,#0 @ and fall through
.thumb_func
qfp_fix2double:
movs r2,r1
asrs r1,r0,#31 @ sign extend
b qfp_fix642double
.thumb_func
_ll_uto_d:
movs r2,#0 @ and fall through
.thumb_func
qfp_ufix642double:
movs r3,#0
b uf2d
.thumb_func
_ll_sto_d:
movs r2,#0 @ and fall through
.thumb_func
qfp_fix642double:
asrs r3,r1,#31 @ sign bit across all bits
eors r0,r3
eors r1,r3
subs r0,r3
sbcs r1,r3
uf2d:
push {r4,r5,r14}
ldr r4,=#0x432
subs r2,r4,r2 @ form biased exponent
@ here
@ r0:r1 unnormalised mantissa
@ r2 -Q (will become exponent)
@ r3 sign across all bits
cmp r1,#0
bne 1f @ short normalising shift?
movs r1,r0
beq 2f @ zero? return it
movs r0,#0
subs r2,#32 @ fix exponent
1:
asrs r4,r1,#21
bne 3f @ will need shift down (and rounding?)
bcs 4f @ normalised already?
5:
subs r2,#1
adds r0,r0 @ shift up
adcs r1,r1
lsrs r4,r1,#21
bcc 5b
4:
ldr r4,=#0x7fe
cmp r2,r4
bhs 6f @ over/underflow? return signed zero/infinity
7:
lsls r2,#20 @ pack and return
adds r1,r2
lsls r3,#31
adds r1,r3
2:
pop {r4,r5,r15}
6: @ return signed zero/infinity according to unclamped exponent in r2
mvns r2,r2
lsrs r2,#21
movs r0,#0
movs r1,#0
b 7b
3:
@ here we need to shift down to normalise and possibly round
bmi 1f @ already normalised to Q63?
2:
subs r2,#1
adds r0,r0 @ shift up
adcs r1,r1
bpl 2b
1:
@ here we have a 1 in b63 of r0:r1
adds r2,#11 @ correct exponent for subsequent shift down
lsls r4,r0,#21 @ save bits for rounding
lsrs r0,#11
lsls r5,r1,#21
orrs r0,r5
lsrs r1,#11
lsls r4,#1
beq 1f @ sticky bits are zero?
8:
movs r4,#0
adcs r0,r4
adcs r1,r4
b 4b
1:
bcc 4b @ sticky bits are zero but not on rounding boundary
lsrs r4,r0,#1 @ increment if odd (force round to even)
b 8b
.ltorg
.thumb_func
dunpacks:
mdunpacks r0,r1,r2,r3,r4
ldr r3,=#0x3ff
subs r2,r3 @ exponent without offset
bx r14
@ r0:r1 signed mantissa Q52
@ r2 unbiased exponent < 10 (i.e., |x|<2^10)
@ r4 pointer to:
@ - divisor reciprocal approximation r=1/d Q15
@ - divisor d Q62 0..20
@ - divisor d Q62 21..41
@ - divisor d Q62 42..62
@ returns:
@ r0:r1 reduced result y Q62, -0.6 d < y < 0.6 d (better in practice)
@ r2 quotient q (number of reductions)
@ if exponent >=10, returns r0:r1=0, r2=1024*mantissa sign
@ designed to work for 0.5<d<2, in particular d=ln2 (~0.7) and d=π/2 (~1.6)
@ .thumb_func
@ dreduce:
@ adds r2,#2 @ e+2
@ bmi 1f @ |x|<0.25, too small to need adjustment
@ cmp r2,#12
@ bge 4f
@ 2:
@ movs r5,#17
@ subs r5,r2 @ 15-e
@ movs r3,r1 @ Q20
@ asrs r3,r5 @ x Q5
@ adds r2,#8 @ e+10
@ adds r5,#7 @ 22-e = 32-(e+10)
@ movs r6,r0
@ lsrs r6,r5
@ lsls r0,r2
@ lsls r1,r2
@ orrs r1,r6 @ r0:r1 x Q62
@ ldmia r4,{r4-r7}
@ muls r3,r4 @ rx Q20
@ asrs r2,r3,#20
@ movs r3,#0
@ adcs r2,r3 @ rx Q0 rounded = q; for e.g. r=1.5 |q|<1.5*2^10
@ muls r5,r2 @ qd in pieces: L Q62
@ muls r6,r2 @ M Q41
@ muls r7,r2 @ H Q20
@ lsls r7,#10
@ asrs r4,r6,#11
@ lsls r6,#21
@ adds r6,r5
@ adcs r7,r4
@ asrs r5,#31
@ adds r7,r5 @ r6:r7 qd Q62
@ subs r0,r6
@ sbcs r1,r7 @ remainder Q62
@ bx r14
@ 4:
@ movs r2,#12 @ overflow: clamp to +/-1024
@ movs r0,#0
@ asrs r1,#31
@ lsls r1,#1
@ adds r1,#1
@ lsls r1,#20
@ b 2b
@ 1:
@ lsls r1,#8
@ lsrs r3,r0,#24
@ orrs r1,r3
@ lsls r0,#8 @ r0:r1 Q60, to be shifted down -r2 places
@ rsbs r3,r2,#0
@ adds r2,#32 @ shift down in r3, complementary shift in r2
@ bmi 1f @ long shift?
@ 2:
@ movs r4,r1
@ asrs r1,r3
@ lsls r4,r2
@ lsrs r0,r3
@ orrs r0,r4
@ movs r2,#0 @ rounding
@ adcs r0,r2
@ adcs r1,r2
@ bx r14
@ 1:
@ movs r0,r1 @ down 32 places
@ asrs r1,#31
@ subs r3,#32
@ adds r2,#32
@ bpl 2b
@ movs r0,#0 @ very long shift? return 0
@ movs r1,#0
@ movs r2,#0
@ bx r14
@ .thumb_func
@ qfp_dtan:
@ push {r4-r7,r14}
@ bl push_r8_r11
@ bl dsincos
@ mov r12,r0 @ save ε
@ bl dcos_finish
@ push {r0,r1}
@ mov r0,r12
@ bl dsin_finish
@ pop {r2,r3}
@ bl pop_r8_r11
@ b ddiv0 @ compute sin θ/cos θ
@ .thumb_func
@ qfp_dcos:
@ push {r4-r7,r14}
@ bl push_r8_r11
@ bl dsincos
@ bl dcos_finish
@ b 1f
@ .thumb_func
@ qfp_dsin:
@ push {r4-r7,r14}
@ bl push_r8_r11
@ bl dsincos
@ bl dsin_finish
@ 1:
@ bl pop_r8_r11
@ pop {r4-r7,r15}
@ @ unpack double θ in r0:r1, range reduce and calculate ε, cos α and sin α such that
@ @ θ=α+ε and |ε|≤2^-32
@ @ on return:
@ @ r0:r1 ε (residual ω, where θ=α+ε) Q62, |ε|≤2^-32 (so fits in r0)
@ @ r8:r9 cos α Q62
@ @ r10:r11 sin α Q62
@ .thumb_func
@ dsincos:
@ push {r14}
@ bl dunpacks
@ adr r4,dreddata0
@ bl dreduce
@ movs r4,#0
@ ldr r5,=#0x9df04dbb @ this value compensates for the non-unity scaling of the CORDIC rotations
@ ldr r6,=#0x36f656c5
@ lsls r2,#31
@ bcc 1f
@ @ quadrant 2 or 3
@ mvns r6,r6
@ rsbs r5,r5,#0
@ adcs r6,r4
@ 1:
@ lsls r2,#1
@ bcs 1f
@ @ even quadrant
@ mov r10,r4
@ mov r11,r4
@ mov r8,r5
@ mov r9,r6
@ b 2f
@ 1:
@ @ odd quadrant
@ mov r8,r4
@ mov r9,r4
@ mov r10,r5
@ mov r11,r6
@ 2:
@ adr r4,dtab_cc
@ mov r12,r4
@ movs r7,#1
@ movs r6,#31
@ 1:
@ bl dcordic_rot_step
@ adds r7,#1
@ subs r6,#1
@ cmp r7,#33
@ bne 1b
@ pop {r15}
@ dcos_finish:
@ @ here
@ @ r0:r1 ε (residual ω, where θ=α+ε) Q62, |ε|≤2^-32 (so fits in r0)
@ @ r8:r9 cos α Q62
@ @ r10:r11 sin α Q62
@ @ and we wish to calculate cos θ=cos(α+ε)~cos α - ε sin α
@ mov r1,r11
@ @ mov r2,r10
@ @ lsrs r2,#31
@ @ adds r1,r2 @ rounding improves accuracy very slightly
@ muls32_s32_64 r0,r1, r2,r3, r4,r5,r6,r2,r3
@ @ r2:r3 ε sin α Q(62+62-32)=Q92
@ mov r0,r8
@ mov r1,r9
@ lsls r5,r3,#2
@ asrs r3,r3,#30
@ lsrs r2,r2,#30
@ orrs r2,r5
@ sbcs r0,r2 @ include rounding
@ sbcs r1,r3
@ movs r2,#62
@ b qfp_fix642double
@ dsin_finish:
@ @ here
@ @ r0:r1 ε (residual ω, where θ=α+ε) Q62, |ε|≤2^-32 (so fits in r0)
@ @ r8:r9 cos α Q62
@ @ r10:r11 sin α Q62
@ @ and we wish to calculate sin θ=sin(α+ε)~sin α + ε cos α
@ mov r1,r9
@ muls32_s32_64 r0,r1, r2,r3, r4,r5,r6,r2,r3
@ @ r2:r3 ε cos α Q(62+62-32)=Q92
@ mov r0,r10
@ mov r1,r11
@ lsls r5,r3,#2
@ asrs r3,r3,#30
@ lsrs r2,r2,#30
@ orrs r2,r5
@ adcs r0,r2 @ include rounding
@ adcs r1,r3
@ movs r2,#62
@ b qfp_fix642double
@ .ltorg
@ .align 2
@ dreddata0:
@ .word 0x0000517d @ 2/π Q15
@ .word 0x0014611A @ π/2 Q62=6487ED5110B4611A split into 21-bit pieces
@ .word 0x000A8885
@ .word 0x001921FB
@ .thumb_func
@ qfp_datan2:
@ @ r0:r1 y
@ @ r2:r3 x
@ push {r4-r7,r14}
@ bl push_r8_r11
@ ldr r5,=#0x7ff00000
@ movs r4,r1
@ ands r4,r5 @ y==0?
@ beq 1f
@ cmp r4,r5 @ or Inf/NaN?
@ bne 2f
@ 1:
@ lsrs r1,#20 @ flush
@ lsls r1,#20
@ movs r0,#0
@ 2:
@ movs r4,r3
@ ands r4,r5 @ x==0?
@ beq 1f
@ cmp r4,r5 @ or Inf/NaN?
@ bne 2f
@ 1:
@ lsrs r3,#20 @ flush
@ lsls r3,#20
@ movs r2,#0
@ 2:
@ movs r6,#0 @ quadrant offset
@ lsls r5,#11 @ constant 0x80000000
@ cmp r3,#0
@ bpl 1f @ skip if x positive
@ movs r6,#2
@ eors r3,r5
@ eors r1,r5
@ bmi 1f @ quadrant offset=+2 if y was positive
@ rsbs r6,#0 @ quadrant offset=-2 if y was negative
@ 1:
@ @ now in quadrant 0 or 3
@ adds r7,r1,r5 @ r7=-r1
@ bpl 1f
@ @ y>=0: in quadrant 0
@ cmp r1,r3
@ ble 2f @ y<~x so 0≤θ<~π/4: skip
@ adds r6,#1
@ eors r1,r5 @ negate x
@ b 3f @ and exchange x and y = rotate by -π/2
@ 1:
@ cmp r3,r7
@ bge 2f @ -y<~x so -π/4<~θ≤0: skip
@ subs r6,#1
@ eors r3,r5 @ negate y and ...
@ 3:
@ movs r7,r0 @ exchange x and y
@ movs r0,r2
@ movs r2,r7
@ movs r7,r1
@ movs r1,r3
@ movs r3,r7
@ 2:
@ @ here -π/4<~θ<~π/4
@ @ r6 has quadrant offset
@ push {r6}
@ cmp r2,#0
@ bne 1f
@ cmp r3,#0
@ beq 10f @ x==0 going into division?
@ lsls r4,r3,#1
@ asrs r4,#21
@ adds r4,#1
@ bne 1f @ x==Inf going into division?
@ lsls r4,r1,#1
@ asrs r4,#21
@ adds r4,#1 @ y also ±Inf?
@ bne 10f
@ subs r1,#1 @ make them both just finite
@ subs r3,#1
@ b 1f
@ 10:
@ movs r0,#0
@ movs r1,#0
@ b 12f
@ 1:
@ bl __aeabi_ddiv
@ movs r2,#62
@ bl qfp_double2fix64
@ @ r0:r1 y/x
@ mov r10,r0
@ mov r11,r1
@ movs r0,#0 @ ω=0
@ movs r1,#0
@ mov r8,r0
@ movs r2,#1
@ lsls r2,#30
@ mov r9,r2 @ x=1
@ adr r4,dtab_cc
@ mov r12,r4
@ movs r7,#1
@ movs r6,#31
@ 1:
@ bl dcordic_vec_step
@ adds r7,#1
@ subs r6,#1
@ cmp r7,#33
@ bne 1b
@ @ r0:r1 atan(y/x) Q62
@ @ r8:r9 x residual Q62
@ @ r10:r11 y residual Q62
@ mov r2,r9
@ mov r3,r10
@ subs r2,#12 @ this makes atan(0)==0
@ @ the following is basically a division residual y/x ~ atan(residual y/x)
@ movs r4,#1
@ lsls r4,#29
@ movs r7,#0
@ 2:
@ lsrs r2,#1
@ movs r3,r3 @ preserve carry
@ bmi 1f
@ sbcs r3,r2
@ adds r0,r4
@ adcs r1,r7
@ lsrs r4,#1
@ bne 2b
@ b 3f
@ 1:
@ adcs r3,r2
@ subs r0,r4
@ sbcs r1,r7
@ lsrs r4,#1
@ bne 2b
@ 3:
@ lsls r6,r1,#31
@ asrs r1,#1
@ lsrs r0,#1
@ orrs r0,r6 @ Q61
@ 12:
@ pop {r6}
@ cmp r6,#0
@ beq 1f
@ ldr r4,=#0x885A308D @ π/2 Q61
@ ldr r5,=#0x3243F6A8
@ bpl 2f
@ mvns r4,r4 @ negative quadrant offset
@ mvns r5,r5
@ 2:
@ lsls r6,#31
@ bne 2f @ skip if quadrant offset is ±1
@ adds r0,r4
@ adcs r1,r5
@ 2:
@ adds r0,r4
@ adcs r1,r5
@ 1:
@ movs r2,#61
@ bl qfp_fix642double
@ bl pop_r8_r11
@ pop {r4-r7,r15}
@ .ltorg
@ dtab_cc:
@ .word 0x61bb4f69, 0x1dac6705 @ atan 2^-1 Q62
@ .word 0x96406eb1, 0x0fadbafc @ atan 2^-2 Q62
@ .word 0xab0bdb72, 0x07f56ea6 @ atan 2^-3 Q62
@ .word 0xe59fbd39, 0x03feab76 @ atan 2^-4 Q62
@ .word 0xba97624b, 0x01ffd55b @ atan 2^-5 Q62
@ .word 0xdddb94d6, 0x00fffaaa @ atan 2^-6 Q62
@ .word 0x56eeea5d, 0x007fff55 @ atan 2^-7 Q62
@ .word 0xaab7776e, 0x003fffea @ atan 2^-8 Q62
@ .word 0x5555bbbc, 0x001ffffd @ atan 2^-9 Q62
@ .word 0xaaaaadde, 0x000fffff @ atan 2^-10 Q62
@ .word 0xf555556f, 0x0007ffff @ atan 2^-11 Q62
@ .word 0xfeaaaaab, 0x0003ffff @ atan 2^-12 Q62
@ .word 0xffd55555, 0x0001ffff @ atan 2^-13 Q62
@ .word 0xfffaaaab, 0x0000ffff @ atan 2^-14 Q62
@ .word 0xffff5555, 0x00007fff @ atan 2^-15 Q62
@ .word 0xffffeaab, 0x00003fff @ atan 2^-16 Q62
@ .word 0xfffffd55, 0x00001fff @ atan 2^-17 Q62
@ .word 0xffffffab, 0x00000fff @ atan 2^-18 Q62
@ .word 0xfffffff5, 0x000007ff @ atan 2^-19 Q62
@ .word 0xffffffff, 0x000003ff @ atan 2^-20 Q62
@ .word 0x00000000, 0x00000200 @ atan 2^-21 Q62 @ consider optimising these
@ .word 0x00000000, 0x00000100 @ atan 2^-22 Q62
@ .word 0x00000000, 0x00000080 @ atan 2^-23 Q62
@ .word 0x00000000, 0x00000040 @ atan 2^-24 Q62
@ .word 0x00000000, 0x00000020 @ atan 2^-25 Q62
@ .word 0x00000000, 0x00000010 @ atan 2^-26 Q62
@ .word 0x00000000, 0x00000008 @ atan 2^-27 Q62
@ .word 0x00000000, 0x00000004 @ atan 2^-28 Q62
@ .word 0x00000000, 0x00000002 @ atan 2^-29 Q62
@ .word 0x00000000, 0x00000001 @ atan 2^-30 Q62
@ .word 0x80000000, 0x00000000 @ atan 2^-31 Q62
@ .word 0x40000000, 0x00000000 @ atan 2^-32 Q62
@ .thumb_func
@ qfp_dexp:
@ push {r4-r7,r14}
@ bl dunpacks
@ adr r4,dreddata1
@ bl dreduce
@ cmp r1,#0
@ bge 1f
@ ldr r4,=#0xF473DE6B
@ ldr r5,=#0x2C5C85FD @ ln2 Q62
@ adds r0,r4
@ adcs r1,r5
@ subs r2,#1
@ 1:
@ push {r2}
@ movs r7,#1 @ shift
@ adr r6,dtab_exp
@ movs r2,#0
@ movs r3,#1
@ lsls r3,#30 @ x=1 Q62
@ 3:
@ ldmia r6!,{r4,r5}
@ mov r12,r6
@ subs r0,r4
@ sbcs r1,r5
@ bmi 1f
@ rsbs r6,r7,#0
@ adds r6,#32 @ complementary shift
@ movs r5,r3
@ asrs r5,r7
@ movs r4,r3
@ lsls r4,r6
@ movs r6,r2
@ lsrs r6,r7 @ rounding bit in carry
@ orrs r4,r6
@ adcs r2,r4
@ adcs r3,r5 @ x+=x>>i
@ b 2f
@ 1:
@ adds r0,r4 @ restore argument
@ adcs r1,r5
@ 2:
@ mov r6,r12
@ adds r7,#1
@ cmp r7,#33
@ bne 3b
@ @ here
@ @ r0:r1 ε (residual x, where x=a+ε) Q62, |ε|≤2^-32 (so fits in r0)
@ @ r2:r3 exp a Q62
@ @ and we wish to calculate exp x=exp a exp ε~(exp a)(1+ε)
@ muls32_32_64 r0,r3, r4,r1, r5,r6,r7,r4,r1
@ @ r4:r1 ε exp a Q(62+62-32)=Q92
@ lsrs r4,#30
@ lsls r0,r1,#2
@ orrs r0,r4
@ asrs r1,#30
@ adds r0,r2
@ adcs r1,r3
@ pop {r2}
@ rsbs r2,#0
@ adds r2,#62
@ bl qfp_fix642double @ in principle we can pack faster than this because we know the exponent
@ pop {r4-r7,r15}
@ .ltorg
@ .thumb_func
@ qfp_dln:
@ push {r4-r7,r14}
@ lsls r7,r1,#1
@ bcs 5f @ <0 ...
@ asrs r7,#21
@ beq 5f @ ... or =0? return -Inf
@ adds r7,#1
@ beq 6f @ Inf/NaN? return +Inf
@ bl dunpacks
@ push {r2}
@ lsls r1,#9
@ lsrs r2,r0,#23
@ orrs r1,r2
@ lsls r0,#9
@ @ r0:r1 m Q61 = m/2 Q62 0.5≤m/2<1
@ movs r7,#1 @ shift
@ adr r6,dtab_exp
@ mov r12,r6
@ movs r2,#0
@ movs r3,#0 @ y=0 Q62
@ 3:
@ rsbs r6,r7,#0
@ adds r6,#32 @ complementary shift
@ movs r5,r1
@ asrs r5,r7
@ movs r4,r1
@ lsls r4,r6
@ movs r6,r0
@ lsrs r6,r7
@ orrs r4,r6 @ x>>i, rounding bit in carry
@ adcs r4,r0
@ adcs r5,r1 @ x+(x>>i)
@ lsrs r6,r5,#30
@ bne 1f @ x+(x>>i)>1?
@ movs r0,r4
@ movs r1,r5 @ x+=x>>i
@ mov r6,r12
@ ldmia r6!,{r4,r5}
@ subs r2,r4
@ sbcs r3,r5
@ 1:
@ movs r4,#8
@ add r12,r4
@ adds r7,#1
@ cmp r7,#33
@ bne 3b
@ @ here:
@ @ r0:r1 residual x, nearly 1 Q62
@ @ r2:r3 y ~ ln m/2 = ln m - ln2 Q62
@ @ result is y + ln2 + ln x ~ y + ln2 + (x-1)
@ lsls r1,#2
@ asrs r1,#2 @ x-1
@ adds r2,r0
@ adcs r3,r1
@ pop {r7}
@ @ here:
@ @ r2:r3 ln m/2 = ln m - ln2 Q62
@ @ r7 unbiased exponent
@ adr r4,dreddata1+4
@ ldmia r4,{r0,r1,r4}
@ adds r7,#1
@ muls r0,r7 @ Q62
@ muls r1,r7 @ Q41
@ muls r4,r7 @ Q20
@ lsls r7,r1,#21
@ asrs r1,#11
@ asrs r5,r1,#31
@ adds r0,r7
@ adcs r1,r5
@ lsls r7,r4,#10
@ asrs r4,#22
@ asrs r5,r1,#31
@ adds r1,r7
@ adcs r4,r5
@ @ r0:r1:r4 exponent*ln2 Q62
@ asrs r5,r3,#31
@ adds r0,r2
@ adcs r1,r3
@ adcs r4,r5
@ @ r0:r1:r4 result Q62
@ movs r2,#62
@ 1:
@ asrs r5,r1,#31
@ cmp r4,r5
@ beq 2f @ r4 a sign extension of r1?
@ lsrs r0,#4 @ no: shift down 4 places and try again
@ lsls r6,r1,#28
@ orrs r0,r6
@ lsrs r1,#4
@ lsls r6,r4,#28
@ orrs r1,r6
@ asrs r4,#4
@ subs r2,#4
@ b 1b
@ 2:
@ bl qfp_fix642double
@ pop {r4-r7,r15}
@ 5:
@ ldr r1,=#0xfff00000
@ movs r0,#0
@ pop {r4-r7,r15}
@ 6:
@ ldr r1,=#0x7ff00000
@ movs r0,#0
@ pop {r4-r7,r15}
@ .ltorg
@ .align 2
@ dreddata1:
@ .word 0x0000B8AA @ 1/ln2 Q15
@ .word 0x0013DE6B @ ln2 Q62 Q62=2C5C85FDF473DE6B split into 21-bit pieces
@ .word 0x000FEFA3
@ .word 0x000B1721
@ dtab_exp:
@ .word 0xbf984bf3, 0x19f323ec @ log 1+2^-1 Q62
@ .word 0xcd4d10d6, 0x0e47fbe3 @ log 1+2^-2 Q62
@ .word 0x8abcb97a, 0x0789c1db @ log 1+2^-3 Q62
@ .word 0x022c54cc, 0x03e14618 @ log 1+2^-4 Q62
@ .word 0xe7833005, 0x01f829b0 @ log 1+2^-5 Q62
@ .word 0x87e01f1e, 0x00fe0545 @ log 1+2^-6 Q62
@ .word 0xac419e24, 0x007f80a9 @ log 1+2^-7 Q62
@ .word 0x45621781, 0x003fe015 @ log 1+2^-8 Q62
@ .word 0xa9ab10e6, 0x001ff802 @ log 1+2^-9 Q62
@ .word 0x55455888, 0x000ffe00 @ log 1+2^-10 Q62
@ .word 0x0aa9aac4, 0x0007ff80 @ log 1+2^-11 Q62
@ .word 0x01554556, 0x0003ffe0 @ log 1+2^-12 Q62
@ .word 0x002aa9ab, 0x0001fff8 @ log 1+2^-13 Q62
@ .word 0x00055545, 0x0000fffe @ log 1+2^-14 Q62
@ .word 0x8000aaaa, 0x00007fff @ log 1+2^-15 Q62
@ .word 0xe0001555, 0x00003fff @ log 1+2^-16 Q62
@ .word 0xf80002ab, 0x00001fff @ log 1+2^-17 Q62
@ .word 0xfe000055, 0x00000fff @ log 1+2^-18 Q62
@ .word 0xff80000b, 0x000007ff @ log 1+2^-19 Q62
@ .word 0xffe00001, 0x000003ff @ log 1+2^-20 Q62
@ .word 0xfff80000, 0x000001ff @ log 1+2^-21 Q62
@ .word 0xfffe0000, 0x000000ff @ log 1+2^-22 Q62
@ .word 0xffff8000, 0x0000007f @ log 1+2^-23 Q62
@ .word 0xffffe000, 0x0000003f @ log 1+2^-24 Q62
@ .word 0xfffff800, 0x0000001f @ log 1+2^-25 Q62
@ .word 0xfffffe00, 0x0000000f @ log 1+2^-26 Q62
@ .word 0xffffff80, 0x00000007 @ log 1+2^-27 Q62
@ .word 0xffffffe0, 0x00000003 @ log 1+2^-28 Q62
@ .word 0xfffffff8, 0x00000001 @ log 1+2^-29 Q62
@ .word 0xfffffffe, 0x00000000 @ log 1+2^-30 Q62
@ .word 0x80000000, 0x00000000 @ log 1+2^-31 Q62
@ .word 0x40000000, 0x00000000 @ log 1+2^-32 Q62
qfp_lib_end:
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,314
|
usr/initramfs_data.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
initramfs_data includes the compressed binary that is the
filesystem used for early user space.
Note: Older versions of "as" (prior to binutils 2.11.90.0.23
released on 2001-07-14) dit not support .incbin.
If you are forced to use older binutils than that then the
following trick can be applied to create the resulting binary:
ld -m elf_i386 --format binary --oformat elf32-i386 -r \
-T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
ld -m elf_i386 -r -o built-in.a initramfs_data.o
For including the .init.ramfs sections, see include/asm-generic/vmlinux.lds.
The above example is for i386 - the parameters vary from architectures.
Eventually look up LDFLAGS_BLOB in an older version of the
arch/$(ARCH)/Makefile to see the flags used before .incbin was introduced.
Using .incbin has the advantage over ld that the correct flags are set
in the ELF header, as required by certain architectures.
*/
#include <linux/stringify.h>
#include <asm-generic/vmlinux.lds.h>
.section .init.ramfs,"a"
__irf_start:
.incbin __stringify(INITRAMFS_IMAGE)
__irf_end:
.section .init.ramfs.info,"a"
.globl __initramfs_size
__initramfs_size:
#ifdef CONFIG_64BIT
.quad __irf_end - __irf_start
#else
.long __irf_end - __irf_start
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,309
|
Documentation/EDID/1680x1050.S
|
/*
1680x1050.S: EDID data set for standard 1680x1050 60 Hz monitor
Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 146250 /* kHz */
#define XPIX 1680
#define YPIX 1050
#define XY_RATIO XY_RATIO_16_10
#define XBLANK 560
#define YBLANK 39
#define XOFFSET 104
#define XPULSE 176
#define YOFFSET (63+3)
#define YPULSE (63+6)
#define DPI 96
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux WSXGA"
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#define CRC 0x26
#include "edid.S"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,155
|
Documentation/EDID/800x600.S
|
/*
800x600.S: EDID data set for standard 800x600 60 Hz monitor
Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
Copyright (C) 2014 Linaro Limited
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 40000 /* kHz */
#define XPIX 800
#define YPIX 600
#define XY_RATIO XY_RATIO_4_3
#define XBLANK 256
#define YBLANK 28
#define XOFFSET 40
#define XPULSE 128
#define YOFFSET (63+1)
#define YPULSE (63+4)
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux SVGA"
#define ESTABLISHED_TIMING1_BITS 0x01 /* Bit 0: 800x600 @ 60Hz */
#define HSYNC_POL 1
#define VSYNC_POL 1
#define CRC 0xc2
#include "edid.S"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,305
|
Documentation/EDID/1600x1200.S
|
/*
1600x1200.S: EDID data set for standard 1600x1200 60 Hz monitor
Copyright (C) 2013 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 162000 /* kHz */
#define XPIX 1600
#define YPIX 1200
#define XY_RATIO XY_RATIO_4_3
#define XBLANK 560
#define YBLANK 50
#define XOFFSET 64
#define XPULSE 192
#define YOFFSET (63+1)
#define YPULSE (63+3)
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux UXGA"
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#define CRC 0x9d
#include "edid.S"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,305
|
Documentation/EDID/1280x1024.S
|
/*
1280x1024.S: EDID data set for standard 1280x1024 60 Hz monitor
Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 108000 /* kHz */
#define XPIX 1280
#define YPIX 1024
#define XY_RATIO XY_RATIO_5_4
#define XBLANK 408
#define YBLANK 42
#define XOFFSET 48
#define XPULSE 112
#define YOFFSET (63+1)
#define YPULSE (63+3)
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux SXGA"
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#define CRC 0xa0
#include "edid.S"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,709
|
Documentation/EDID/edid.S
|
/*
edid.S: EDID data template
Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* Manufacturer */
#define MFG_LNX1 'L'
#define MFG_LNX2 'N'
#define MFG_LNX3 'X'
#define SERIAL 0
#define YEAR 2012
#define WEEK 5
/* EDID 1.3 standard definitions */
#define XY_RATIO_16_10 0b00
#define XY_RATIO_4_3 0b01
#define XY_RATIO_5_4 0b10
#define XY_RATIO_16_9 0b11
/* Provide defaults for the timing bits */
#ifndef ESTABLISHED_TIMING1_BITS
#define ESTABLISHED_TIMING1_BITS 0x00
#endif
#ifndef ESTABLISHED_TIMING2_BITS
#define ESTABLISHED_TIMING2_BITS 0x00
#endif
#ifndef ESTABLISHED_TIMING3_BITS
#define ESTABLISHED_TIMING3_BITS 0x00
#endif
#define mfgname2id(v1,v2,v3) \
((((v1-'@')&0x1f)<<10)+(((v2-'@')&0x1f)<<5)+((v3-'@')&0x1f))
#define swap16(v1) ((v1>>8)+((v1&0xff)<<8))
#define msbs2(v1,v2) ((((v1>>8)&0x0f)<<4)+((v2>>8)&0x0f))
#define msbs4(v1,v2,v3,v4) \
(((v1&0x03)>>2)+((v2&0x03)>>4)+((v3&0x03)>>6)+((v4&0x03)>>8))
#define pixdpi2mm(pix,dpi) ((pix*25)/dpi)
#define xsize pixdpi2mm(XPIX,DPI)
#define ysize pixdpi2mm(YPIX,DPI)
.data
/* Fixed header pattern */
header: .byte 0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x00
mfg_id: .hword swap16(mfgname2id(MFG_LNX1, MFG_LNX2, MFG_LNX3))
prod_code: .hword 0
/* Serial number. 32 bits, little endian. */
serial_number: .long SERIAL
/* Week of manufacture */
week: .byte WEEK
/* Year of manufacture, less 1990. (1990-2245)
If week=255, it is the model year instead */
year: .byte YEAR-1990
version: .byte VERSION /* EDID version, usually 1 (for 1.3) */
revision: .byte REVISION /* EDID revision, usually 3 (for 1.3) */
/* If Bit 7=1 Digital input. If set, the following bit definitions apply:
Bits 6-1 Reserved, must be 0
Bit 0 Signal is compatible with VESA DFP 1.x TMDS CRGB,
1 pixel per clock, up to 8 bits per color, MSB aligned,
If Bit 7=0 Analog input. If clear, the following bit definitions apply:
Bits 6-5 Video white and sync levels, relative to blank
00=+0.7/-0.3 V; 01=+0.714/-0.286 V;
10=+1.0/-0.4 V; 11=+0.7/0 V
Bit 4 Blank-to-black setup (pedestal) expected
Bit 3 Separate sync supported
Bit 2 Composite sync (on HSync) supported
Bit 1 Sync on green supported
Bit 0 VSync pulse must be serrated when somposite or
sync-on-green is used. */
video_parms: .byte 0x6d
/* Maximum horizontal image size, in centimetres
(max 292 cm/115 in at 16:9 aspect ratio) */
max_hor_size: .byte xsize/10
/* Maximum vertical image size, in centimetres.
If either byte is 0, undefined (e.g. projector) */
max_vert_size: .byte ysize/10
/* Display gamma, minus 1, times 100 (range 1.00-3.5 */
gamma: .byte 120
/* Bit 7 DPMS standby supported
Bit 6 DPMS suspend supported
Bit 5 DPMS active-off supported
Bits 4-3 Display type: 00=monochrome; 01=RGB colour;
10=non-RGB multicolour; 11=undefined
Bit 2 Standard sRGB colour space. Bytes 25-34 must contain
sRGB standard values.
Bit 1 Preferred timing mode specified in descriptor block 1.
Bit 0 GTF supported with default parameter values. */
dsp_features: .byte 0xea
/* Chromaticity coordinates. */
/* Red and green least-significant bits
Bits 7-6 Red x value least-significant 2 bits
Bits 5-4 Red y value least-significant 2 bits
Bits 3-2 Green x value lst-significant 2 bits
Bits 1-0 Green y value least-significant 2 bits */
red_green_lsb: .byte 0x5e
/* Blue and white least-significant 2 bits */
blue_white_lsb: .byte 0xc0
/* Red x value most significant 8 bits.
0-255 encodes 0-0.996 (255/256); 0-0.999 (1023/1024) with lsbits */
red_x_msb: .byte 0xa4
/* Red y value most significant 8 bits */
red_y_msb: .byte 0x59
/* Green x and y value most significant 8 bits */
green_x_y_msb: .byte 0x4a,0x98
/* Blue x and y value most significant 8 bits */
blue_x_y_msb: .byte 0x25,0x20
/* Default white point x and y value most significant 8 bits */
white_x_y_msb: .byte 0x50,0x54
/* Established timings */
/* Bit 7 720x400 @ 70 Hz
Bit 6 720x400 @ 88 Hz
Bit 5 640x480 @ 60 Hz
Bit 4 640x480 @ 67 Hz
Bit 3 640x480 @ 72 Hz
Bit 2 640x480 @ 75 Hz
Bit 1 800x600 @ 56 Hz
Bit 0 800x600 @ 60 Hz */
estbl_timing1: .byte ESTABLISHED_TIMING1_BITS
/* Bit 7 800x600 @ 72 Hz
Bit 6 800x600 @ 75 Hz
Bit 5 832x624 @ 75 Hz
Bit 4 1024x768 @ 87 Hz, interlaced (1024x768)
Bit 3 1024x768 @ 60 Hz
Bit 2 1024x768 @ 72 Hz
Bit 1 1024x768 @ 75 Hz
Bit 0 1280x1024 @ 75 Hz */
estbl_timing2: .byte ESTABLISHED_TIMING2_BITS
/* Bit 7 1152x870 @ 75 Hz (Apple Macintosh II)
Bits 6-0 Other manufacturer-specific display mod */
estbl_timing3: .byte ESTABLISHED_TIMING3_BITS
/* Standard timing */
/* X resolution, less 31, divided by 8 (256-2288 pixels) */
std_xres: .byte (XPIX/8)-31
/* Y resolution, X:Y pixel ratio
Bits 7-6 X:Y pixel ratio: 00=16:10; 01=4:3; 10=5:4; 11=16:9.
Bits 5-0 Vertical frequency, less 60 (60-123 Hz) */
std_vres: .byte (XY_RATIO<<6)+VFREQ-60
.fill 7,2,0x0101 /* Unused */
descriptor1:
/* Pixel clock in 10 kHz units. (0.-655.35 MHz, little-endian) */
clock: .hword CLOCK/10
/* Horizontal active pixels 8 lsbits (0-4095) */
x_act_lsb: .byte XPIX&0xff
/* Horizontal blanking pixels 8 lsbits (0-4095)
End of active to start of next active. */
x_blk_lsb: .byte XBLANK&0xff
/* Bits 7-4 Horizontal active pixels 4 msbits
Bits 3-0 Horizontal blanking pixels 4 msbits */
x_msbs: .byte msbs2(XPIX,XBLANK)
/* Vertical active lines 8 lsbits (0-4095) */
y_act_lsb: .byte YPIX&0xff
/* Vertical blanking lines 8 lsbits (0-4095) */
y_blk_lsb: .byte YBLANK&0xff
/* Bits 7-4 Vertical active lines 4 msbits
Bits 3-0 Vertical blanking lines 4 msbits */
y_msbs: .byte msbs2(YPIX,YBLANK)
/* Horizontal sync offset pixels 8 lsbits (0-1023) From blanking start */
x_snc_off_lsb: .byte XOFFSET&0xff
/* Horizontal sync pulse width pixels 8 lsbits (0-1023) */
x_snc_pls_lsb: .byte XPULSE&0xff
/* Bits 7-4 Vertical sync offset lines 4 lsbits -63)
Bits 3-0 Vertical sync pulse width lines 4 lsbits -63) */
y_snc_lsb: .byte ((YOFFSET-63)<<4)+(YPULSE-63)
/* Bits 7-6 Horizontal sync offset pixels 2 msbits
Bits 5-4 Horizontal sync pulse width pixels 2 msbits
Bits 3-2 Vertical sync offset lines 2 msbits
Bits 1-0 Vertical sync pulse width lines 2 msbits */
xy_snc_msbs: .byte msbs4(XOFFSET,XPULSE,YOFFSET,YPULSE)
/* Horizontal display size, mm, 8 lsbits (0-4095 mm, 161 in) */
x_dsp_size: .byte xsize&0xff
/* Vertical display size, mm, 8 lsbits (0-4095 mm, 161 in) */
y_dsp_size: .byte ysize&0xff
/* Bits 7-4 Horizontal display size, mm, 4 msbits
Bits 3-0 Vertical display size, mm, 4 msbits */
dsp_size_mbsb: .byte msbs2(xsize,ysize)
/* Horizontal border pixels (each side; total is twice this) */
x_border: .byte 0
/* Vertical border lines (each side; total is twice this) */
y_border: .byte 0
/* Bit 7 Interlaced
Bits 6-5 Stereo mode: 00=No stereo; other values depend on bit 0:
Bit 0=0: 01=Field sequential, sync=1 during right; 10=similar,
sync=1 during left; 11=4-way interleaved stereo
Bit 0=1 2-way interleaved stereo: 01=Right image on even lines;
10=Left image on even lines; 11=side-by-side
Bits 4-3 Sync type: 00=Analog composite; 01=Bipolar analog composite;
10=Digital composite (on HSync); 11=Digital separate
Bit 2 If digital separate: Vertical sync polarity (1=positive)
Other types: VSync serrated (HSync during VSync)
Bit 1 If analog sync: Sync on all 3 RGB lines (else green only)
Digital: HSync polarity (1=positive)
Bit 0 2-way line-interleaved stereo, if bits 4-3 are not 00. */
features: .byte 0x18+(VSYNC_POL<<2)+(HSYNC_POL<<1)
descriptor2: .byte 0,0 /* Not a detailed timing descriptor */
.byte 0 /* Must be zero */
.byte 0xff /* Descriptor is monitor serial number (text) */
.byte 0 /* Must be zero */
start1: .ascii "Linux #0"
end1: .byte 0x0a /* End marker */
.fill 12-(end1-start1), 1, 0x20 /* Padded spaces */
descriptor3: .byte 0,0 /* Not a detailed timing descriptor */
.byte 0 /* Must be zero */
.byte 0xfd /* Descriptor is monitor range limits */
.byte 0 /* Must be zero */
start2: .byte VFREQ-1 /* Minimum vertical field rate (1-255 Hz) */
.byte VFREQ+1 /* Maximum vertical field rate (1-255 Hz) */
.byte (CLOCK/(XPIX+XBLANK))-1 /* Minimum horizontal line rate
(1-255 kHz) */
.byte (CLOCK/(XPIX+XBLANK))+1 /* Maximum horizontal line rate
(1-255 kHz) */
.byte (CLOCK/10000)+1 /* Maximum pixel clock rate, rounded up
to 10 MHz multiple (10-2550 MHz) */
.byte 0 /* No extended timing information type */
end2: .byte 0x0a /* End marker */
.fill 12-(end2-start2), 1, 0x20 /* Padded spaces */
descriptor4: .byte 0,0 /* Not a detailed timing descriptor */
.byte 0 /* Must be zero */
.byte 0xfc /* Descriptor is text */
.byte 0 /* Must be zero */
start3: .ascii TIMING_NAME
end3: .byte 0x0a /* End marker */
.fill 12-(end3-start3), 1, 0x20 /* Padded spaces */
extensions: .byte 0 /* Number of extensions to follow */
checksum: .byte CRC /* Sum of all bytes must be 0 */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,334
|
Documentation/EDID/1024x768.S
|
/*
1024x768.S: EDID data set for standard 1024x768 60 Hz monitor
Copyright (C) 2011 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 65000 /* kHz */
#define XPIX 1024
#define YPIX 768
#define XY_RATIO XY_RATIO_4_3
#define XBLANK 320
#define YBLANK 38
#define XOFFSET 8
#define XPULSE 144
#define YOFFSET (63+3)
#define YPULSE (63+6)
#define DPI 72
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux XGA"
#define ESTABLISHED_TIMING2_BITS 0x08 /* Bit 3 -> 1024x768 @60 Hz */
#define HSYNC_POL 0
#define VSYNC_POL 0
#define CRC 0x55
#include "edid.S"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,304
|
Documentation/EDID/1920x1080.S
|
/*
1920x1080.S: EDID data set for standard 1920x1080 60 Hz monitor
Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* EDID */
#define VERSION 1
#define REVISION 3
/* Display */
#define CLOCK 148500 /* kHz */
#define XPIX 1920
#define YPIX 1080
#define XY_RATIO XY_RATIO_16_9
#define XBLANK 280
#define YBLANK 45
#define XOFFSET 88
#define XPULSE 44
#define YOFFSET (63+4)
#define YPULSE (63+5)
#define DPI 96
#define VFREQ 60 /* Hz */
#define TIMING_NAME "Linux FHD"
/* No ESTABLISHED_TIMINGx_BITS */
#define HSYNC_POL 1
#define VSYNC_POL 1
#define CRC 0x05
#include "edid.S"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,154
|
drivers/memory/ti-emif-sram-pm.S
|
/*
* Low level PM code for TI EMIF
*
* Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
* Dave Gerlach
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <generated/ti-emif-asm-offsets.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include "emif.h"
#define EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES 0x00a0
#define EMIF_POWER_MGMT_SR_TIMER_MASK 0x00f0
#define EMIF_POWER_MGMT_SELF_REFRESH_MODE 0x0200
#define EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 0x0700
#define EMIF_SDCFG_TYPE_DDR2 0x2 << SDRAM_TYPE_SHIFT
#define EMIF_STATUS_READY 0x4
#define AM43XX_EMIF_PHY_CTRL_REG_COUNT 0x120
#define EMIF_AM437X_REGISTERS 0x1
.arm
.align 3
ENTRY(ti_emif_sram)
/*
* void ti_emif_save_context(void)
*
* Used during suspend to save the context of all required EMIF registers
* to local memory if the EMIF is going to lose context during the sleep
* transition. Operates on the VIRTUAL address of the EMIF.
*/
ENTRY(ti_emif_save_context)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
/* Save EMIF configuration */
ldr r1, [r0, #EMIF_SDRAM_CONFIG]
str r1, [r2, #EMIF_SDCFG_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL]
str r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_TIMING_1]
str r1, [r2, #EMIF_TIMING1_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_TIMING_2]
str r1, [r2, #EMIF_TIMING2_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_TIMING_3]
str r1, [r2, #EMIF_TIMING3_VAL_OFFSET]
ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
str r1, [r2, #EMIF_PMCR_VAL_OFFSET]
ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW]
str r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
str r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
ldr r1, [r0, #EMIF_DDR_PHY_CTRL_1]
str r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET]
ldr r1, [r0, #EMIF_COS_CONFIG]
str r1, [r2, #EMIF_COS_CONFIG_OFFSET]
ldr r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING]
str r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET]
ldr r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING]
str r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET]
ldr r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING]
str r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET]
ldr r1, [r0, #EMIF_OCP_CONFIG]
str r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET]
ldr r5, [r4, #EMIF_PM_CONFIG_OFFSET]
cmp r5, #EMIF_SRAM_AM43_REG_LAYOUT
bne emif_skip_save_extra_regs
ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL]
str r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET]
ldr r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD]
str r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET]
ldr r1, [r0, #EMIF_LPDDR2_NVM_TIMING]
str r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET]
ldr r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW]
str r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET]
ldr r1, [r0, #EMIF_DLL_CALIB_CTRL]
str r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET]
ldr r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW]
str r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET]
/* Loop and save entire block of emif phy regs */
mov r5, #0x0
add r4, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET
add r3, r0, #EMIF_EXT_PHY_CTRL_1
ddr_phy_ctrl_save:
ldr r1, [r3, r5]
str r1, [r4, r5]
add r5, r5, #0x4
cmp r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT
bne ddr_phy_ctrl_save
emif_skip_save_extra_regs:
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(ti_emif_save_context)
/*
* void ti_emif_restore_context(void)
*
* Used during resume to restore the context of all required EMIF registers
* from local memory after the EMIF has lost context during a sleep transition.
* Operates on the PHYSICAL address of the EMIF.
*/
ENTRY(ti_emif_restore_context)
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET]
/* Config EMIF Timings */
ldr r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET]
str r1, [r0, #EMIF_DDR_PHY_CTRL_1]
str r1, [r0, #EMIF_DDR_PHY_CTRL_1_SHDW]
ldr r1, [r2, #EMIF_TIMING1_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_TIMING_1]
str r1, [r0, #EMIF_SDRAM_TIMING_1_SHDW]
ldr r1, [r2, #EMIF_TIMING2_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_TIMING_2]
str r1, [r0, #EMIF_SDRAM_TIMING_2_SHDW]
ldr r1, [r2, #EMIF_TIMING3_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_TIMING_3]
str r1, [r0, #EMIF_SDRAM_TIMING_3_SHDW]
ldr r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL]
str r1, [r0, #EMIF_SDRAM_REFRESH_CTRL_SHDW]
ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
ldr r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET]
str r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW]
ldr r1, [r2, #EMIF_COS_CONFIG_OFFSET]
str r1, [r0, #EMIF_COS_CONFIG]
ldr r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET]
str r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING]
ldr r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET]
str r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING]
ldr r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET]
str r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING]
ldr r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET]
str r1, [r0, #EMIF_OCP_CONFIG]
ldr r5, [r4, #EMIF_PM_CONFIG_OFFSET]
cmp r5, #EMIF_SRAM_AM43_REG_LAYOUT
bne emif_skip_restore_extra_regs
ldr r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET]
str r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL]
ldr r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET]
str r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD]
ldr r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET]
str r1, [r0, #EMIF_LPDDR2_NVM_TIMING]
ldr r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET]
str r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW]
ldr r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET]
str r1, [r0, #EMIF_DLL_CALIB_CTRL]
ldr r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET]
str r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW]
ldr r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
/* Loop and restore entire block of emif phy regs */
mov r5, #0x0
/* Load ti_emif_regs_amx3 + EMIF_EXT_PHY_CTRL_VALS_OFFSET for address
* to phy register save space
*/
add r3, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET
add r4, r0, #EMIF_EXT_PHY_CTRL_1
ddr_phy_ctrl_restore:
ldr r1, [r3, r5]
str r1, [r4, r5]
add r5, r5, #0x4
cmp r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT
bne ddr_phy_ctrl_restore
emif_skip_restore_extra_regs:
/*
* Output impedence calib needed only for DDR3
* but since the initial state of this will be
* disabled for DDR2 no harm in restoring the
* old configuration
*/
ldr r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
/* Write to sdcfg last for DDR2 only */
ldr r1, [r2, #EMIF_SDCFG_VAL_OFFSET]
and r2, r1, #SDRAM_TYPE_MASK
cmp r2, #EMIF_SDCFG_TYPE_DDR2
streq r1, [r0, #EMIF_SDRAM_CONFIG]
mov pc, lr
ENDPROC(ti_emif_restore_context)
/*
* void ti_emif_enter_sr(void)
*
* Programs the EMIF to tell the SDRAM to enter into self-refresh
* mode during a sleep transition. Operates on the VIRTUAL address
* of the EMIF.
*/
ENTRY(ti_emif_enter_sr)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
orr r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(ti_emif_enter_sr)
/*
* void ti_emif_exit_sr(void)
*
* Programs the EMIF to tell the SDRAM to exit self-refresh mode
* after a sleep transition. Operates on the PHYSICAL address of
* the EMIF.
*/
ENTRY(ti_emif_exit_sr)
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET]
/*
* Toggle EMIF to exit refresh mode:
* if EMIF lost context, PWR_MGT_CTRL is currently 0, writing disable
* (0x0), wont do diddly squat! so do a toggle from SR(0x2) to disable
* (0x0) here.
* *If* EMIF did not lose context, nothing broken as we write the same
* value(0x2) to reg before we write a disable (0x0).
*/
ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
orr r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
/* Wait for EMIF to become ready */
1: ldr r1, [r0, #EMIF_STATUS]
tst r1, #EMIF_STATUS_READY
beq 1b
mov pc, lr
ENDPROC(ti_emif_exit_sr)
/*
* void ti_emif_abort_sr(void)
*
* Disables self-refresh after a failed transition to a low-power
* state so the kernel can jump back to DDR and follow abort path.
* Operates on the VIRTUAL address of the EMIF.
*/
ENTRY(ti_emif_abort_sr)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
/* Wait for EMIF to become ready */
1: ldr r1, [r0, #EMIF_STATUS]
tst r1, #EMIF_STATUS_READY
beq 1b
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(ti_emif_abort_sr)
.align 3
ENTRY(ti_emif_pm_sram_data)
.space EMIF_PM_DATA_SIZE
ENTRY(ti_emif_sram_sz)
.word . - ti_emif_save_context
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,692
|
drivers/block/swim_asm.S
|
/*
* low-level functions for the SWIM floppy controller
*
* needs assembly language because is very timing dependent
* this controller exists only on macintosh 680x0 based
*
* Copyright (C) 2004,2008 Laurent Vivier <Laurent@lvivier.info>
*
* based on Alastair Bridgewater SWIM analysis, 2001
* based on netBSD IWM driver (c) 1997, 1998 Hauke Fath.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* 2004-08-21 (lv) - Initial implementation
* 2008-11-05 (lv) - add get_swim_mode
*/
.equ write_data, 0x0000
.equ write_mark, 0x0200
.equ write_CRC, 0x0400
.equ write_parameter,0x0600
.equ write_phase, 0x0800
.equ write_setup, 0x0a00
.equ write_mode0, 0x0c00
.equ write_mode1, 0x0e00
.equ read_data, 0x1000
.equ read_mark, 0x1200
.equ read_error, 0x1400
.equ read_parameter, 0x1600
.equ read_phase, 0x1800
.equ read_setup, 0x1a00
.equ read_status, 0x1c00
.equ read_handshake, 0x1e00
.equ o_side, 0
.equ o_track, 1
.equ o_sector, 2
.equ o_size, 3
.equ o_crc0, 4
.equ o_crc1, 5
.equ seek_time, 30000
.equ max_retry, 40
.equ sector_size, 512
.global swim_read_sector_header
swim_read_sector_header:
link %a6, #0
moveml %d1-%d5/%a0-%a4,%sp@-
movel %a6@(0x0c), %a4
bsr mfm_read_addrmark
moveml %sp@+, %d1-%d5/%a0-%a4
unlk %a6
rts
sector_address_mark:
.byte 0xa1, 0xa1, 0xa1, 0xfe
sector_data_mark:
.byte 0xa1, 0xa1, 0xa1, 0xfb
mfm_read_addrmark:
movel %a6@(0x08), %a3
lea %a3@(read_handshake), %a2
lea %a3@(read_mark), %a3
moveq #-1, %d0
movew #seek_time, %d2
wait_header_init:
tstb %a3@(read_error - read_mark)
moveb #0x18, %a3@(write_mode0 - read_mark)
moveb #0x01, %a3@(write_mode1 - read_mark)
moveb #0x01, %a3@(write_mode0 - read_mark)
tstb %a3@(read_error - read_mark)
moveb #0x08, %a3@(write_mode1 - read_mark)
lea sector_address_mark, %a0
moveq #3, %d1
wait_addr_mark_byte:
tstb %a2@
dbmi %d2, wait_addr_mark_byte
bpl header_exit
moveb %a3@, %d3
cmpb %a0@+, %d3
dbne %d1, wait_addr_mark_byte
bne wait_header_init
moveq #max_retry, %d2
amark0: tstb %a2@
dbmi %d2, amark0
bpl signal_nonyb
moveb %a3@, %a4@(o_track)
moveq #max_retry, %d2
amark1: tstb %a2@
dbmi %d2, amark1
bpl signal_nonyb
moveb %a3@, %a4@(o_side)
moveq #max_retry, %d2
amark2: tstb %a2@
dbmi %d2, amark2
bpl signal_nonyb
moveb %a3@, %a4@(o_sector)
moveq #max_retry, %d2
amark3: tstb %a2@
dbmi %d2, amark3
bpl signal_nonyb
moveb %a3@, %a4@(o_size)
moveq #max_retry, %d2
crc0: tstb %a2@
dbmi %d2, crc0
bpl signal_nonyb
moveb %a3@, %a4@(o_crc0)
moveq #max_retry, %d2
crc1: tstb %a2@
dbmi %d2, crc1
bpl signal_nonyb
moveb %a3@, %a4@(o_crc1)
tstb %a3@(read_error - read_mark)
header_exit:
moveq #0, %d0
moveb #0x18, %a3@(write_mode0 - read_mark)
rts
signal_nonyb:
moveq #-1, %d0
moveb #0x18, %a3@(write_mode0 - read_mark)
rts
.global swim_read_sector_data
swim_read_sector_data:
link %a6, #0
moveml %d1-%d5/%a0-%a5,%sp@-
movel %a6@(0x0c), %a4
bsr mfm_read_data
moveml %sp@+, %d1-%d5/%a0-%a5
unlk %a6
rts
mfm_read_data:
movel %a6@(0x08), %a3
lea %a3@(read_handshake), %a2
lea %a3@(read_data), %a5
lea %a3@(read_mark), %a3
movew #seek_time, %d2
wait_data_init:
tstb %a3@(read_error - read_mark)
moveb #0x18, %a3@(write_mode0 - read_mark)
moveb #0x01, %a3@(write_mode1 - read_mark)
moveb #0x01, %a3@(write_mode0 - read_mark)
tstb %a3@(read_error - read_mark)
moveb #0x08, %a3@(write_mode1 - read_mark)
lea sector_data_mark, %a0
moveq #3, %d1
/* wait data address mark */
wait_data_mark_byte:
tstb %a2@
dbmi %d2, wait_data_mark_byte
bpl data_exit
moveb %a3@, %d3
cmpb %a0@+, %d3
dbne %d1, wait_data_mark_byte
bne wait_data_init
/* read data */
tstb %a3@(read_error - read_mark)
movel #sector_size-1, %d4 /* sector size */
read_new_data:
movew #max_retry, %d2
read_data_loop:
moveb %a2@, %d5
andb #0xc0, %d5
dbne %d2, read_data_loop
beq data_exit
moveb %a5@, %a4@+
andb #0x40, %d5
dbne %d4, read_new_data
beq exit_loop
moveb %a5@, %a4@+
dbra %d4, read_new_data
exit_loop:
/* read CRC */
movew #max_retry, %d2
data_crc0:
tstb %a2@
dbmi %d2, data_crc0
bpl data_exit
moveb %a3@, %d5
moveq #max_retry, %d2
data_crc1:
tstb %a2@
dbmi %d2, data_crc1
bpl data_exit
moveb %a3@, %d5
tstb %a3@(read_error - read_mark)
moveb #0x18, %a3@(write_mode0 - read_mark)
/* return number of bytes read */
movel #sector_size, %d0
addw #1, %d4
subl %d4, %d0
rts
data_exit:
moveb #0x18, %a3@(write_mode0 - read_mark)
moveq #-1, %d0
rts
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,814
|
drivers/crypto/n2_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* n2_asm.S: Hypervisor calls for NCS support.
*
* Copyright (C) 2009 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
#include <asm/hypervisor.h>
#include "n2_core.h"
/* o0: queue type
* o1: RA of queue
* o2: num entries in queue
* o3: address of queue handle return
*/
ENTRY(sun4v_ncs_qconf)
mov HV_FAST_NCS_QCONF, %o5
ta HV_FAST_TRAP
stx %o1, [%o3]
retl
nop
ENDPROC(sun4v_ncs_qconf)
/* %o0: queue handle
* %o1: address of queue type return
* %o2: address of queue base address return
* %o3: address of queue num entries return
*/
ENTRY(sun4v_ncs_qinfo)
mov %o1, %g1
mov %o2, %g2
mov %o3, %g3
mov HV_FAST_NCS_QINFO, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
stx %o3, [%g3]
retl
nop
ENDPROC(sun4v_ncs_qinfo)
/* %o0: queue handle
* %o1: address of head offset return
*/
ENTRY(sun4v_ncs_gethead)
mov %o1, %o2
mov HV_FAST_NCS_GETHEAD, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
nop
ENDPROC(sun4v_ncs_gethead)
/* %o0: queue handle
* %o1: address of tail offset return
*/
ENTRY(sun4v_ncs_gettail)
mov %o1, %o2
mov HV_FAST_NCS_GETTAIL, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
nop
ENDPROC(sun4v_ncs_gettail)
/* %o0: queue handle
* %o1: new tail offset
*/
ENTRY(sun4v_ncs_settail)
mov HV_FAST_NCS_SETTAIL, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ncs_settail)
/* %o0: queue handle
* %o1: address of devino return
*/
ENTRY(sun4v_ncs_qhandle_to_devino)
mov %o1, %o2
mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
nop
ENDPROC(sun4v_ncs_qhandle_to_devino)
/* %o0: queue handle
* %o1: new head offset
*/
ENTRY(sun4v_ncs_sethead_marker)
mov HV_FAST_NCS_SETHEAD_MARKER, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ncs_sethead_marker)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,882
|
drivers/spi/spi-s3c24xx-fiq.S
|
/* linux/drivers/spi/spi_s3c24xx_fiq.S
*
* Copyright 2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C24XX SPI - FIQ pseudo-DMA transfer code
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/map.h>
#include <mach/regs-irq.h>
#include <plat/regs-spi.h>
#include "spi-s3c24xx-fiq.h"
.text
@ entry to these routines is as follows, with the register names
@ defined in fiq.h so that they can be shared with the C files which
@ setup the calling registers.
@
@ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND
@ fiq_rtmp Temporary register to hold tx/rx data
@ fiq_rspi The base of the SPI register block
@ fiq_rtx The tx buffer pointer
@ fiq_rrx The rx buffer pointer
@ fiq_rcount The number of bytes to move
@ each entry starts with a word entry of how long it is
@ and an offset to the irq acknowledgment word
ENTRY(s3c24xx_spi_fiq_rx)
.word fiq_rx_end - fiq_rx_start
.word fiq_rx_irq_ack - fiq_rx_start
fiq_rx_start:
ldr fiq_rtmp, fiq_rx_irq_ack
str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
strb fiq_rtmp, [ fiq_rrx ], #1
mov fiq_rtmp, #0xff
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
subsne pc, lr, #4 @@ return, still have work to do
@@ set IRQ controller so that next op will trigger IRQ
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
subs pc, lr, #4
fiq_rx_irq_ack:
.word 0
fiq_rx_end:
ENTRY(s3c24xx_spi_fiq_txrx)
.word fiq_txrx_end - fiq_txrx_start
.word fiq_txrx_irq_ack - fiq_txrx_start
fiq_txrx_start:
ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
strb fiq_rtmp, [ fiq_rrx ], #1
ldr fiq_rtmp, fiq_txrx_irq_ack
str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
ldrb fiq_rtmp, [ fiq_rtx ], #1
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
subsne pc, lr, #4 @@ return, still have work to do
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
subs pc, lr, #4
fiq_txrx_irq_ack:
.word 0
fiq_txrx_end:
ENTRY(s3c24xx_spi_fiq_tx)
.word fiq_tx_end - fiq_tx_start
.word fiq_tx_irq_ack - fiq_tx_start
fiq_tx_start:
ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
ldr fiq_rtmp, fiq_tx_irq_ack
str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
ldrb fiq_rtmp, [ fiq_rtx ], #1
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
subsne pc, lr, #4 @@ return, still have work to do
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
subs pc, lr, #4
fiq_tx_irq_ack:
.word 0
fiq_tx_end:
.end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,042
|
drivers/watchdog/octeon-wdt-nmi.S
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007-2017 Cavium, Inc.
*/
#include <asm/asm.h>
#include <asm/regdef.h>
#define CVMSEG_BASE -32768
#define CVMSEG_SIZE 6912
#define SAVE_REG(r) sd $r, CVMSEG_BASE + CVMSEG_SIZE - ((32 - r) * 8)($0)
NESTED(octeon_wdt_nmi_stage2, 0, sp)
.set push
.set noreorder
.set noat
/* Clear Dcache so cvmseg works right. */
cache 1,0($0)
/* Use K0 to do a read/modify/write of CVMMEMCTL */
dmfc0 k0, $11, 7
/* Clear out the size of CVMSEG */
dins k0, $0, 0, 6
/* Set CVMSEG to its largest value */
ori k0, k0, 0x1c0 | 54
/* Store the CVMMEMCTL value */
dmtc0 k0, $11, 7
/*
* Restore K0 from the debug scratch register, it was saved in
* the boot-vector code.
*/
dmfc0 k0, $31
/*
* Save all registers to the top CVMSEG. This shouldn't
* corrupt any state used by the kernel. Also all registers
* should have the value right before the NMI.
*/
SAVE_REG(0)
SAVE_REG(1)
SAVE_REG(2)
SAVE_REG(3)
SAVE_REG(4)
SAVE_REG(5)
SAVE_REG(6)
SAVE_REG(7)
SAVE_REG(8)
SAVE_REG(9)
SAVE_REG(10)
SAVE_REG(11)
SAVE_REG(12)
SAVE_REG(13)
SAVE_REG(14)
SAVE_REG(15)
SAVE_REG(16)
SAVE_REG(17)
SAVE_REG(18)
SAVE_REG(19)
SAVE_REG(20)
SAVE_REG(21)
SAVE_REG(22)
SAVE_REG(23)
SAVE_REG(24)
SAVE_REG(25)
SAVE_REG(26)
SAVE_REG(27)
SAVE_REG(28)
SAVE_REG(29)
SAVE_REG(30)
SAVE_REG(31)
/* Write zero to all CVMSEG locations per Core-15169 */
dli a0, CVMSEG_SIZE - (33 * 8)
1: sd zero, CVMSEG_BASE(a0)
daddiu a0, a0, -8
bgez a0, 1b
nop
/* Set the stack to begin right below the registers */
dli sp, CVMSEG_BASE + CVMSEG_SIZE - (32 * 8)
/* Load the address of the third stage handler */
dla $25, octeon_wdt_nmi_stage3
/* Call the third stage handler */
jal $25
/* a0 is the address of the saved registers */
move a0, sp
/* Loop forvever if we get here. */
2: b 2b
nop
.set pop
END(octeon_wdt_nmi_stage2)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,538
|
drivers/char/hw_random/n2-asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* n2-asm.S: Niagara2 RNG hypervisor call assembler.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
#include <asm/hypervisor.h>
#include "n2rng.h"
.text
ENTRY(sun4v_rng_get_diag_ctl)
mov HV_FAST_RNG_GET_DIAG_CTL, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_rng_get_diag_ctl)
ENTRY(sun4v_rng_ctl_read_v1)
mov %o1, %o3
mov %o2, %o4
mov HV_FAST_RNG_CTL_READ, %o5
ta HV_FAST_TRAP
stx %o1, [%o3]
retl
stx %o2, [%o4]
ENDPROC(sun4v_rng_ctl_read_v1)
ENTRY(sun4v_rng_ctl_read_v2)
save %sp, -192, %sp
mov %i0, %o0
mov %i1, %o1
mov HV_FAST_RNG_CTL_READ, %o5
ta HV_FAST_TRAP
stx %o1, [%i2]
stx %o2, [%i3]
stx %o3, [%i4]
stx %o4, [%i5]
ret
restore %g0, %o0, %o0
ENDPROC(sun4v_rng_ctl_read_v2)
ENTRY(sun4v_rng_ctl_write_v1)
mov %o3, %o4
mov HV_FAST_RNG_CTL_WRITE, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_rng_ctl_write_v1)
ENTRY(sun4v_rng_ctl_write_v2)
mov HV_FAST_RNG_CTL_WRITE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_rng_ctl_write_v2)
ENTRY(sun4v_rng_data_read_diag_v1)
mov %o2, %o4
mov HV_FAST_RNG_DATA_READ_DIAG, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_rng_data_read_diag_v1)
ENTRY(sun4v_rng_data_read_diag_v2)
mov %o3, %o4
mov HV_FAST_RNG_DATA_READ_DIAG, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_rng_data_read_diag_v2)
ENTRY(sun4v_rng_data_read)
mov %o1, %o4
mov HV_FAST_RNG_DATA_READ, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_rng_data_read)
|
airspy/airspyone_firmware
| 3,652
|
libopencm3/lib/linker.ld.S
|
/*
* This file is part of the libopencm3 project.
*
* Copyright (C) 2009 Uwe Hermann <uwe@hermann-uwe.de>
* Copyright (C) 2013 Frantisek Burian <BuFran@seznam.cz>
*
* This library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library. If not, see <http://www.gnu.org/licenses/>.
*/
/* Generic linker script for all targets using libopencm3. */
/* Enforce emmition of the vector table. */
EXTERN(vector_table)
/* Define the entry point of the output file. */
ENTRY(reset_handler)
/* Define memory regions. */
MEMORY
{
/* RAM is always used */
ram (rwx) : ORIGIN = RAM_OFF, LENGTH = RAM
#if defined(ROM)
rom (rx) : ORIGIN = ROM_OFF, LENGTH = ROM
#endif
#if defined(ROM2)
rom2 (rx) : ORIGIN = ROM2_OFF, LENGTH = ROM2
#endif
#if defined(RAM1)
ram1 (rwx) : ORIGIN = RAM1_OFF, LENGTH = RAM1
#endif
#if defined(RAM2)
ram2 (rwx) : ORIGIN = RAM2_OFF, LENGTH = RAM2
#endif
#if defined(CCM)
ccm (rwx) : ORIGIN = CCM_OFF, LENGTH = CCM
#endif
#if defined(EEP)
eep (r) : ORIGIN = EEP_OFF, LENGTH = EEP
#endif
#if defined(XSRAM)
xsram (rw) : ORIGIN = XSRAM_OFF, LENGTH = XSRAM
#endif
#if defined(XDRAM)
xdram (rw) : ORIGIN = XDRAM_OFF, LENGTH = XDRAM
#endif
}
/* Define sections. */
SECTIONS
{
.text : {
*(.vectors) /* Vector table */
*(.text*) /* Program code */
. = ALIGN(4);
*(.rodata*) /* Read-only data */
. = ALIGN(4);
} >rom
/* C++ Static constructors/destructors, also used for
* __attribute__((constructor)) and the likes.
*/
.preinit_array : {
. = ALIGN(4);
__preinit_array_start = .;
KEEP (*(.preinit_array))
__preinit_array_end = .;
} >rom
.init_array : {
. = ALIGN(4);
__init_array_start = .;
KEEP (*(SORT(.init_array.*)))
KEEP (*(.init_array))
__init_array_end = .;
} >rom
.fini_array : {
. = ALIGN(4);
__fini_array_start = .;
KEEP (*(.fini_array))
KEEP (*(SORT(.fini_array.*)))
__fini_array_end = .;
} >rom
/*
* Another section used by C++ stuff, appears when using newlib with
* 64bit (long long) printf support
*/
.ARM.extab : {
*(.ARM.extab*)
} >rom
.ARM.exidx : {
__exidx_start = .;
*(.ARM.exidx*)
__exidx_end = .;
} >rom
. = ALIGN(4);
_etext = .;
.data : {
_data = .;
*(.data*) /* Read-write initialized data */
. = ALIGN(4);
_edata = .;
} >ram AT >rom
_data_loadaddr = LOADADDR(.data);
.bss : {
*(.bss*) /* Read-write zero initialized data */
*(COMMON)
. = ALIGN(4);
_ebss = .;
} >ram
#if defined(EEP)
.eep : {
*(.eeprom*)
. = ALIGN(4);
} >eep
#endif
#if defined(CCM)
.ccm : {
*(.ccmram*)
. = ALIGN(4);
} >ccm
#endif
#if defined(RAM1)
.ram1 : {
*(.ram1*)
. = ALIGN(4);
} >ram1
#endif
#if defined(RAM2)
.ram2 : {
*(.ram2*)
. = ALIGN(4);
} >ram2
#endif
#if defined(XSRAM)
.xsram : {
*(.xsram*)
. = ALIGN(4);
} >xsram
#endif
#if defined(XDRAM)
.xdram : {
*(.xdram*)
. = ALIGN(4);
} >xdram
#endif
/*
* The .eh_frame section appears to be used for C++ exception handling.
* You may need to fix this if you're using C++.
*/
/DISCARD/ : { *(.eh_frame) }
. = ALIGN(4);
end = .;
}
PROVIDE(_stack = ORIGIN(ram) + LENGTH(ram));
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,327
|
drivers/misc/sgi-xp/xp_nofault.S
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* The xp_nofault_PIOR function takes a pointer to a remote PIO register
* and attempts to load and consume a value from it. This function
* will be registered as a nofault code block. In the event that the
* PIO read fails, the MCA handler will force the error to look
* corrected and vector to the xp_error_PIOR which will return an error.
*
* The definition of "consumption" and the time it takes for an MCA
* to surface is processor implementation specific. This code
* is sufficient on Itanium through the Montvale processor family.
* It may need to be adjusted for future processor implementations.
*
* extern int xp_nofault_PIOR(void *remote_register);
*/
.global xp_nofault_PIOR
xp_nofault_PIOR:
mov r8=r0 // Stage a success return value
ld8.acq r9=[r32];; // PIO Read the specified register
adds r9=1,r9;; // Add to force consumption
srlz.i;; // Allow time for MCA to surface
br.ret.sptk.many b0;; // Return success
.global xp_error_PIOR
xp_error_PIOR:
mov r8=1 // Return value of 1
br.ret.sptk.many b0;; // Return failure
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,546
|
drivers/soc/bcm/brcmstb/pm/s3-mips.S
|
/*
* Copyright (C) 2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/asm.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/bmips.h>
#include "pm.h"
.text
.set noreorder
.align 5
.global s3_reentry
/*
* a0: AON_CTRL base register
* a1: D-Cache line size
*/
LEAF(brcm_pm_do_s3)
/* Get the address of s3_context */
la t0, gp_regs
sw ra, 0(t0)
sw s0, 4(t0)
sw s1, 8(t0)
sw s2, 12(t0)
sw s3, 16(t0)
sw s4, 20(t0)
sw s5, 24(t0)
sw s6, 28(t0)
sw s7, 32(t0)
sw gp, 36(t0)
sw sp, 40(t0)
sw fp, 44(t0)
/* Save CP0 Status */
mfc0 t1, CP0_STATUS
sw t1, 48(t0)
/* Write-back gp registers - cache will be gone */
addiu t1, a1, -1
not t1
and t0, t1
/* Flush at least 64 bytes */
addiu t2, t0, 64
and t2, t1
1: cache 0x17, 0(t0)
bne t0, t2, 1b
addu t0, a1
/* Drop to deep standby */
li t1, PM_WARM_CONFIG
sw zero, AON_CTRL_PM_CTRL(a0)
lw zero, AON_CTRL_PM_CTRL(a0)
sw t1, AON_CTRL_PM_CTRL(a0)
lw t1, AON_CTRL_PM_CTRL(a0)
li t1, (PM_WARM_CONFIG | PM_PWR_DOWN)
sw t1, AON_CTRL_PM_CTRL(a0)
lw t1, AON_CTRL_PM_CTRL(a0)
/* Enable CP0 interrupt 2 and wait for interrupt */
mfc0 t0, CP0_STATUS
li t1, ~(ST0_IM | ST0_IE)
and t0, t1
ori t0, STATUSF_IP2
mtc0 t0, CP0_STATUS
nop
nop
nop
ori t0, ST0_IE
mtc0 t0, CP0_STATUS
/* Wait for interrupt */
wait
nop
s3_reentry:
/* Clear call/return stack */
li t0, (0x06 << 16)
mtc0 t0, $22, 2
ssnop
ssnop
ssnop
/* Clear jump target buffer */
li t0, (0x04 << 16)
mtc0 t0, $22, 2
ssnop
ssnop
ssnop
sync
nop
/* Setup mmu defaults */
mtc0 zero, CP0_WIRED
mtc0 zero, CP0_ENTRYHI
li k0, PM_DEFAULT_MASK
mtc0 k0, CP0_PAGEMASK
li sp, BMIPS_WARM_RESTART_VEC
la k0, plat_wired_tlb_setup
jalr k0
nop
/* Restore general purpose registers */
la t0, gp_regs
lw fp, 44(t0)
lw sp, 40(t0)
lw gp, 36(t0)
lw s7, 32(t0)
lw s6, 28(t0)
lw s5, 24(t0)
lw s4, 20(t0)
lw s3, 16(t0)
lw s2, 12(t0)
lw s1, 8(t0)
lw s0, 4(t0)
lw ra, 0(t0)
/* Restore CP0 status */
lw t1, 48(t0)
mtc0 t1, CP0_STATUS
/* Return to caller */
li v0, 0
jr ra
nop
END(brcm_pm_do_s3)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,699
|
drivers/soc/bcm/brcmstb/pm/s2-arm.S
|
/*
* Copyright © 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include "pm.h"
.text
.align 3
#define AON_CTRL_REG r10
#define DDR_PHY_STATUS_REG r11
/*
* r0: AON_CTRL base address
* r1: DDRY PHY PLL status register address
*/
ENTRY(brcmstb_pm_do_s2)
stmfd sp!, {r4-r11, lr}
mov AON_CTRL_REG, r0
mov DDR_PHY_STATUS_REG, r1
/* Flush memory transactions */
dsb
/* Cache DDR_PHY_STATUS_REG translation */
ldr r0, [DDR_PHY_STATUS_REG]
/* power down request */
ldr r0, =PM_S2_COMMAND
ldr r1, =0
str r1, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
ldr r1, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
str r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
ldr r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
/* Wait for interrupt */
wfi
nop
/* Bring MEMC back up */
1: ldr r0, [DDR_PHY_STATUS_REG]
ands r0, #1
beq 1b
/* Power-up handshake */
ldr r0, =1
str r0, [AON_CTRL_REG, #AON_CTRL_HOST_MISC_CMDS]
ldr r0, [AON_CTRL_REG, #AON_CTRL_HOST_MISC_CMDS]
ldr r0, =0
str r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
ldr r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
/* Return to caller */
ldr r0, =0
ldmfd sp!, {r4-r11, pc}
ENDPROC(brcmstb_pm_do_s2)
/* Place literal pool here */
.ltorg
ENTRY(brcmstb_pm_do_s2_sz)
.word . - brcmstb_pm_do_s2
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,341
|
drivers/soc/bcm/brcmstb/pm/s2-mips.S
|
/*
* Copyright (C) 2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/asm.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include "pm.h"
.text
.set noreorder
.align 5
/*
* a0: u32 params array
*/
LEAF(brcm_pm_do_s2)
subu sp, 64
sw ra, 0(sp)
sw s0, 4(sp)
sw s1, 8(sp)
sw s2, 12(sp)
sw s3, 16(sp)
sw s4, 20(sp)
sw s5, 24(sp)
sw s6, 28(sp)
sw s7, 32(sp)
/*
* Dereference the params array
* s0: AON_CTRL base register
* s1: DDR_PHY base register
* s2: TIMERS base register
* s3: I-Cache line size
* s4: Restart vector address
* s5: Restart vector size
*/
move t0, a0
lw s0, 0(t0)
lw s1, 4(t0)
lw s2, 8(t0)
lw s3, 12(t0)
lw s4, 16(t0)
lw s5, 20(t0)
/* Lock this asm section into the I-cache */
addiu t1, s3, -1
not t1
la t0, brcm_pm_do_s2
and t0, t1
la t2, asm_end
and t2, t1
1: cache 0x1c, 0(t0)
bne t0, t2, 1b
addu t0, s3
/* Lock the interrupt vector into the I-cache */
move t0, zero
2: move t1, s4
cache 0x1c, 0(t1)
addu t1, s3
addu t0, s3
ble t0, s5, 2b
nop
sync
/* Power down request */
li t0, PM_S2_COMMAND
sw zero, AON_CTRL_PM_CTRL(s0)
lw zero, AON_CTRL_PM_CTRL(s0)
sw t0, AON_CTRL_PM_CTRL(s0)
lw t0, AON_CTRL_PM_CTRL(s0)
/* Enable CP0 interrupt 2 and wait for interrupt */
mfc0 t0, CP0_STATUS
/* Save cp0 sr for restoring later */
move s6, t0
li t1, ~(ST0_IM | ST0_IE)
and t0, t1
ori t0, STATUSF_IP2
mtc0 t0, CP0_STATUS
nop
nop
nop
ori t0, ST0_IE
mtc0 t0, CP0_STATUS
/* Wait for interrupt */
wait
nop
/* Wait for memc0 */
1: lw t0, DDR40_PHY_CONTROL_REGS_0_PLL_STATUS(s1)
andi t0, 1
beqz t0, 1b
nop
/* 1ms delay needed for stable recovery */
/* Use TIMER1 to count 1 ms */
li t0, RESET_TIMER
sw t0, TIMER_TIMER1_CTRL(s2)
lw t0, TIMER_TIMER1_CTRL(s2)
li t0, START_TIMER
sw t0, TIMER_TIMER1_CTRL(s2)
lw t0, TIMER_TIMER1_CTRL(s2)
/* Prepare delay */
li t0, TIMER_MASK
lw t1, TIMER_TIMER1_STAT(s2)
and t1, t0
/* 1ms delay */
addi t1, 27000
/* Wait for the timer value to exceed t1 */
1: lw t0, TIMER_TIMER1_STAT(s2)
sgtu t2, t1, t0
bnez t2, 1b
nop
/* Power back up */
li t1, 1
sw t1, AON_CTRL_HOST_MISC_CMDS(s0)
lw t1, AON_CTRL_HOST_MISC_CMDS(s0)
sw zero, AON_CTRL_PM_CTRL(s0)
lw zero, AON_CTRL_PM_CTRL(s0)
/* Unlock I-cache */
addiu t1, s3, -1
not t1
la t0, brcm_pm_do_s2
and t0, t1
la t2, asm_end
and t2, t1
1: cache 0x00, 0(t0)
bne t0, t2, 1b
addu t0, s3
/* Unlock interrupt vector */
move t0, zero
2: move t1, s4
cache 0x00, 0(t1)
addu t1, s3
addu t0, s3
ble t0, s5, 2b
nop
/* Restore cp0 sr */
sync
nop
mtc0 s6, CP0_STATUS
nop
/* Set return value to success */
li v0, 0
/* Return to caller */
lw s7, 32(sp)
lw s6, 28(sp)
lw s5, 24(sp)
lw s4, 20(sp)
lw s3, 16(sp)
lw s2, 12(sp)
lw s1, 8(sp)
lw s0, 4(sp)
lw ra, 0(sp)
addiu sp, 64
jr ra
nop
END(brcm_pm_do_s2)
.globl asm_end
asm_end:
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 23,715
|
drivers/net/wan/wanxlfw.S
|
.psize 0
/*
wanXL serial card driver for Linux
card firmware part
Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
This program is free software; you can redistribute it and/or modify it
under the terms of version 2 of the GNU General Public License
as published by the Free Software Foundation.
DPRAM BDs:
0x000 - 0x050 TX#0 0x050 - 0x140 RX#0
0x140 - 0x190 TX#1 0x190 - 0x280 RX#1
0x280 - 0x2D0 TX#2 0x2D0 - 0x3C0 RX#2
0x3C0 - 0x410 TX#3 0x410 - 0x500 RX#3
000 5FF 1536 Bytes Dual-Port RAM User Data / BDs
600 6FF 256 Bytes Dual-Port RAM User Data / BDs
700 7FF 256 Bytes Dual-Port RAM User Data / BDs
C00 CBF 192 Bytes Dual-Port RAM Parameter RAM Page 1
D00 DBF 192 Bytes Dual-Port RAM Parameter RAM Page 2
E00 EBF 192 Bytes Dual-Port RAM Parameter RAM Page 3
F00 FBF 192 Bytes Dual-Port RAM Parameter RAM Page 4
local interrupts level
NMI 7
PIT timer, CPM (RX/TX complete) 4
PCI9060 DMA and PCI doorbells 3
Cable - not used 1
*/
#include <linux/hdlc.h>
#include <linux/hdlc/ioctl.h>
#include "wanxl.h"
/* memory addresses and offsets */
MAX_RAM_SIZE = 16 * 1024 * 1024 // max RAM supported by hardware
PCI9060_VECTOR = 0x0000006C
CPM_IRQ_BASE = 0x40
ERROR_VECTOR = CPM_IRQ_BASE * 4
SCC1_VECTOR = (CPM_IRQ_BASE + 0x1E) * 4
SCC2_VECTOR = (CPM_IRQ_BASE + 0x1D) * 4
SCC3_VECTOR = (CPM_IRQ_BASE + 0x1C) * 4
SCC4_VECTOR = (CPM_IRQ_BASE + 0x1B) * 4
CPM_IRQ_LEVEL = 4
TIMER_IRQ = 128
TIMER_IRQ_LEVEL = 4
PITR_CONST = 0x100 + 16 // 1 Hz timer
MBAR = 0x0003FF00
VALUE_WINDOW = 0x40000000
ORDER_WINDOW = 0xC0000000
PLX = 0xFFF90000
CSRA = 0xFFFB0000
CSRB = 0xFFFB0002
CSRC = 0xFFFB0004
CSRD = 0xFFFB0006
STATUS_CABLE_LL = 0x2000
STATUS_CABLE_DTR = 0x1000
DPRBASE = 0xFFFC0000
SCC1_BASE = DPRBASE + 0xC00
MISC_BASE = DPRBASE + 0xCB0
SCC2_BASE = DPRBASE + 0xD00
SCC3_BASE = DPRBASE + 0xE00
SCC4_BASE = DPRBASE + 0xF00
// offset from SCCx_BASE
// SCC_xBASE contain offsets from DPRBASE and must be divisible by 8
SCC_RBASE = 0 // 16-bit RxBD base address
SCC_TBASE = 2 // 16-bit TxBD base address
SCC_RFCR = 4 // 8-bit Rx function code
SCC_TFCR = 5 // 8-bit Tx function code
SCC_MRBLR = 6 // 16-bit maximum Rx buffer length
SCC_C_MASK = 0x34 // 32-bit CRC constant
SCC_C_PRES = 0x38 // 32-bit CRC preset
SCC_MFLR = 0x46 // 16-bit max Rx frame length (without flags)
REGBASE = DPRBASE + 0x1000
PICR = REGBASE + 0x026 // 16-bit periodic irq control
PITR = REGBASE + 0x02A // 16-bit periodic irq timing
OR1 = REGBASE + 0x064 // 32-bit RAM bank #1 options
CICR = REGBASE + 0x540 // 32(24)-bit CP interrupt config
CIMR = REGBASE + 0x548 // 32-bit CP interrupt mask
CISR = REGBASE + 0x54C // 32-bit CP interrupts in-service
PADIR = REGBASE + 0x550 // 16-bit PortA data direction bitmap
PAPAR = REGBASE + 0x552 // 16-bit PortA pin assignment bitmap
PAODR = REGBASE + 0x554 // 16-bit PortA open drain bitmap
PADAT = REGBASE + 0x556 // 16-bit PortA data register
PCDIR = REGBASE + 0x560 // 16-bit PortC data direction bitmap
PCPAR = REGBASE + 0x562 // 16-bit PortC pin assignment bitmap
PCSO = REGBASE + 0x564 // 16-bit PortC special options
PCDAT = REGBASE + 0x566 // 16-bit PortC data register
PCINT = REGBASE + 0x568 // 16-bit PortC interrupt control
CR = REGBASE + 0x5C0 // 16-bit Command register
SCC1_REGS = REGBASE + 0x600
SCC2_REGS = REGBASE + 0x620
SCC3_REGS = REGBASE + 0x640
SCC4_REGS = REGBASE + 0x660
SICR = REGBASE + 0x6EC // 32-bit SI clock route
// offset from SCCx_REGS
SCC_GSMR_L = 0x00 // 32 bits
SCC_GSMR_H = 0x04 // 32 bits
SCC_PSMR = 0x08 // 16 bits
SCC_TODR = 0x0C // 16 bits
SCC_DSR = 0x0E // 16 bits
SCC_SCCE = 0x10 // 16 bits
SCC_SCCM = 0x14 // 16 bits
SCC_SCCS = 0x17 // 8 bits
#if QUICC_MEMCPY_USES_PLX
.macro memcpy_from_pci src, dest, len // len must be < 8 MB
addl #3, \len
andl #0xFFFFFFFC, \len // always copy n * 4 bytes
movel \src, PLX_DMA_0_PCI
movel \dest, PLX_DMA_0_LOCAL
movel \len, PLX_DMA_0_LENGTH
movel #0x0103, PLX_DMA_CMD_STS // start channel 0 transfer
bsr memcpy_from_pci_run
.endm
.macro memcpy_to_pci src, dest, len
addl #3, \len
andl #0xFFFFFFFC, \len // always copy n * 4 bytes
movel \src, PLX_DMA_1_LOCAL
movel \dest, PLX_DMA_1_PCI
movel \len, PLX_DMA_1_LENGTH
movel #0x0301, PLX_DMA_CMD_STS // start channel 1 transfer
bsr memcpy_to_pci_run
.endm
#else
.macro memcpy src, dest, len // len must be < 65536 bytes
movel %d7, -(%sp) // src and dest must be < 256 MB
movel \len, %d7 // bits 0 and 1
lsrl #2, \len
andl \len, \len
beq 99f // only 0 - 3 bytes
subl #1, \len // for dbf
98: movel (\src)+, (\dest)+
dbfw \len, 98b
99: movel %d7, \len
btstl #1, \len
beq 99f
movew (\src)+, (\dest)+
99: btstl #0, \len
beq 99f
moveb (\src)+, (\dest)+
99:
movel (%sp)+, %d7
.endm
.macro memcpy_from_pci src, dest, len
addl #VALUE_WINDOW, \src
memcpy \src, \dest, \len
.endm
.macro memcpy_to_pci src, dest, len
addl #VALUE_WINDOW, \dest
memcpy \src, \dest, \len
.endm
#endif
.macro wait_for_command
99: btstl #0, CR
bne 99b
.endm
/****************************** card initialization *******************/
.text
.global _start
_start: bra init
.org _start + 4
ch_status_addr: .long 0, 0, 0, 0
rx_descs_addr: .long 0
init:
#if DETECT_RAM
movel OR1, %d0
andl #0xF00007FF, %d0 // mask AMxx bits
orl #0xFFFF800 & ~(MAX_RAM_SIZE - 1), %d0 // update RAM bank size
movel %d0, OR1
#endif
addl #VALUE_WINDOW, rx_descs_addr // PCI addresses of shared data
clrl %d0 // D0 = 4 * port
init_1: tstl ch_status_addr(%d0)
beq init_2
addl #VALUE_WINDOW, ch_status_addr(%d0)
init_2: addl #4, %d0
cmpl #4 * 4, %d0
bne init_1
movel #pci9060_interrupt, PCI9060_VECTOR
movel #error_interrupt, ERROR_VECTOR
movel #port_interrupt_1, SCC1_VECTOR
movel #port_interrupt_2, SCC2_VECTOR
movel #port_interrupt_3, SCC3_VECTOR
movel #port_interrupt_4, SCC4_VECTOR
movel #timer_interrupt, TIMER_IRQ * 4
movel #0x78000000, CIMR // only SCCx IRQs from CPM
movew #(TIMER_IRQ_LEVEL << 8) + TIMER_IRQ, PICR // interrupt from PIT
movew #PITR_CONST, PITR
// SCC1=SCCa SCC2=SCCb SCC3=SCCc SCC4=SCCd prio=4 HP=-1 IRQ=64-79
movel #0xD41F40 + (CPM_IRQ_LEVEL << 13), CICR
movel #0x543, PLX_DMA_0_MODE // 32-bit, Ready, Burst, IRQ
movel #0x543, PLX_DMA_1_MODE
movel #0x0, PLX_DMA_0_DESC // from PCI to local
movel #0x8, PLX_DMA_1_DESC // from local to PCI
movel #0x101, PLX_DMA_CMD_STS // enable both DMA channels
// enable local IRQ, DMA, doorbells and PCI IRQ
orl #0x000F0300, PLX_INTERRUPT_CS
#if DETECT_RAM
bsr ram_test
#else
movel #1, PLX_MAILBOX_5 // non-zero value = init complete
#endif
bsr check_csr
movew #0xFFFF, PAPAR // all pins are clocks/data
clrw PADIR // first function
clrw PCSO // CD and CTS always active
/****************************** main loop *****************************/
main: movel channel_stats, %d7 // D7 = doorbell + irq status
clrl channel_stats
tstl %d7
bne main_1
// nothing to do - wait for next event
stop #0x2200 // supervisor + IRQ level 2
movew #0x2700, %sr // disable IRQs again
bra main
main_1: clrl %d0 // D0 = 4 * port
clrl %d6 // D6 = doorbell to host value
main_l: btstl #DOORBELL_TO_CARD_CLOSE_0, %d7
beq main_op
bclrl #DOORBELL_TO_CARD_OPEN_0, %d7 // in case both bits are set
bsr close_port
main_op:
btstl #DOORBELL_TO_CARD_OPEN_0, %d7
beq main_cl
bsr open_port
main_cl:
btstl #DOORBELL_TO_CARD_TX_0, %d7
beq main_txend
bsr tx
main_txend:
btstl #TASK_SCC_0, %d7
beq main_next
bsr tx_end
bsr rx
main_next:
lsrl #1, %d7 // port status for next port
addl #4, %d0 // D0 = 4 * next port
cmpl #4 * 4, %d0
bne main_l
movel %d6, PLX_DOORBELL_FROM_CARD // signal the host
bra main
/****************************** open port *****************************/
open_port: // D0 = 4 * port, D6 = doorbell to host
movel ch_status_addr(%d0), %a0 // A0 = port status address
tstl STATUS_OPEN(%a0)
bne open_port_ret // port already open
movel #1, STATUS_OPEN(%a0) // confirm the port is open
// setup BDs
clrl tx_in(%d0)
clrl tx_out(%d0)
clrl tx_count(%d0)
clrl rx_in(%d0)
movel SICR, %d1 // D1 = clock settings in SICR
andl clocking_mask(%d0), %d1
cmpl #CLOCK_TXFROMRX, STATUS_CLOCKING(%a0)
bne open_port_clock_ext
orl clocking_txfromrx(%d0), %d1
bra open_port_set_clock
open_port_clock_ext:
orl clocking_ext(%d0), %d1
open_port_set_clock:
movel %d1, SICR // update clock settings in SICR
orw #STATUS_CABLE_DTR, csr_output(%d0) // DTR on
bsr check_csr // call with disabled timer interrupt
// Setup TX descriptors
movel first_buffer(%d0), %d1 // D1 = starting buffer address
movel tx_first_bd(%d0), %a1 // A1 = starting TX BD address
movel #TX_BUFFERS - 2, %d2 // D2 = TX_BUFFERS - 1 counter
movel #0x18000000, %d3 // D3 = initial TX BD flags: Int + Last
cmpl #PARITY_NONE, STATUS_PARITY(%a0)
beq open_port_tx_loop
bsetl #26, %d3 // TX BD flag: Transmit CRC
open_port_tx_loop:
movel %d3, (%a1)+ // TX flags + length
movel %d1, (%a1)+ // buffer address
addl #BUFFER_LENGTH, %d1
dbfw %d2, open_port_tx_loop
bsetl #29, %d3 // TX BD flag: Wrap (last BD)
movel %d3, (%a1)+ // Final TX flags + length
movel %d1, (%a1)+ // buffer address
// Setup RX descriptors // A1 = starting RX BD address
movel #RX_BUFFERS - 2, %d2 // D2 = RX_BUFFERS - 1 counter
open_port_rx_loop:
movel #0x90000000, (%a1)+ // RX flags + length
movel %d1, (%a1)+ // buffer address
addl #BUFFER_LENGTH, %d1
dbfw %d2, open_port_rx_loop
movel #0xB0000000, (%a1)+ // Final RX flags + length
movel %d1, (%a1)+ // buffer address
// Setup port parameters
movel scc_base_addr(%d0), %a1 // A1 = SCC_BASE address
movel scc_reg_addr(%d0), %a2 // A2 = SCC_REGS address
movel #0xFFFF, SCC_SCCE(%a2) // clear status bits
movel #0x0000, SCC_SCCM(%a2) // interrupt mask
movel tx_first_bd(%d0), %d1
movew %d1, SCC_TBASE(%a1) // D1 = offset of first TxBD
addl #TX_BUFFERS * 8, %d1
movew %d1, SCC_RBASE(%a1) // D1 = offset of first RxBD
moveb #0x8, SCC_RFCR(%a1) // Intel mode, 1000
moveb #0x8, SCC_TFCR(%a1)
// Parity settings
cmpl #PARITY_CRC16_PR1_CCITT, STATUS_PARITY(%a0)
bne open_port_parity_1
clrw SCC_PSMR(%a2) // CRC16-CCITT
movel #0xF0B8, SCC_C_MASK(%a1)
movel #0xFFFF, SCC_C_PRES(%a1)
movew #HDLC_MAX_MRU + 2, SCC_MFLR(%a1) // 2 bytes for CRC
movew #2, parity_bytes(%d0)
bra open_port_2
open_port_parity_1:
cmpl #PARITY_CRC32_PR1_CCITT, STATUS_PARITY(%a0)
bne open_port_parity_2
movew #0x0800, SCC_PSMR(%a2) // CRC32-CCITT
movel #0xDEBB20E3, SCC_C_MASK(%a1)
movel #0xFFFFFFFF, SCC_C_PRES(%a1)
movew #HDLC_MAX_MRU + 4, SCC_MFLR(%a1) // 4 bytes for CRC
movew #4, parity_bytes(%d0)
bra open_port_2
open_port_parity_2:
cmpl #PARITY_CRC16_PR0_CCITT, STATUS_PARITY(%a0)
bne open_port_parity_3
clrw SCC_PSMR(%a2) // CRC16-CCITT preset 0
movel #0xF0B8, SCC_C_MASK(%a1)
clrl SCC_C_PRES(%a1)
movew #HDLC_MAX_MRU + 2, SCC_MFLR(%a1) // 2 bytes for CRC
movew #2, parity_bytes(%d0)
bra open_port_2
open_port_parity_3:
cmpl #PARITY_CRC32_PR0_CCITT, STATUS_PARITY(%a0)
bne open_port_parity_4
movew #0x0800, SCC_PSMR(%a2) // CRC32-CCITT preset 0
movel #0xDEBB20E3, SCC_C_MASK(%a1)
clrl SCC_C_PRES(%a1)
movew #HDLC_MAX_MRU + 4, SCC_MFLR(%a1) // 4 bytes for CRC
movew #4, parity_bytes(%d0)
bra open_port_2
open_port_parity_4:
clrw SCC_PSMR(%a2) // no parity
movel #0xF0B8, SCC_C_MASK(%a1)
movel #0xFFFF, SCC_C_PRES(%a1)
movew #HDLC_MAX_MRU, SCC_MFLR(%a1) // 0 bytes for CRC
clrw parity_bytes(%d0)
open_port_2:
movel #0x00000003, SCC_GSMR_H(%a2) // RTSM
cmpl #ENCODING_NRZI, STATUS_ENCODING(%a0)
bne open_port_nrz
movel #0x10040900, SCC_GSMR_L(%a2) // NRZI: TCI Tend RECN+TENC=1
bra open_port_3
open_port_nrz:
movel #0x10040000, SCC_GSMR_L(%a2) // NRZ: TCI Tend RECN+TENC=0
open_port_3:
movew #BUFFER_LENGTH, SCC_MRBLR(%a1)
movel %d0, %d1
lsll #4, %d1 // D1 bits 7 and 6 = port
orl #1, %d1
movew %d1, CR // Init SCC RX and TX params
wait_for_command
// TCI Tend ENR ENT
movew #0x001F, SCC_SCCM(%a2) // TXE RXF BSY TXB RXB interrupts
orl #0x00000030, SCC_GSMR_L(%a2) // enable SCC
open_port_ret:
rts
/****************************** close port ****************************/
close_port: // D0 = 4 * port, D6 = doorbell to host
movel scc_reg_addr(%d0), %a0 // A0 = SCC_REGS address
clrw SCC_SCCM(%a0) // no SCC interrupts
andl #0xFFFFFFCF, SCC_GSMR_L(%a0) // Disable ENT and ENR
andw #~STATUS_CABLE_DTR, csr_output(%d0) // DTR off
bsr check_csr // call with disabled timer interrupt
movel ch_status_addr(%d0), %d1
clrl STATUS_OPEN(%d1) // confirm the port is closed
rts
/****************************** transmit packet ***********************/
// queue packets for transmission
tx: // D0 = 4 * port, D6 = doorbell to host
cmpl #TX_BUFFERS, tx_count(%d0)
beq tx_ret // all DB's = descs in use
movel tx_out(%d0), %d1
movel %d1, %d2 // D1 = D2 = tx_out BD# = desc#
mulul #DESC_LENGTH, %d2 // D2 = TX desc offset
addl ch_status_addr(%d0), %d2
addl #STATUS_TX_DESCS, %d2 // D2 = TX desc address
cmpl #PACKET_FULL, (%d2) // desc status
bne tx_ret
// queue it
movel 4(%d2), %a0 // PCI address
lsll #3, %d1 // BD is 8-bytes long
addl tx_first_bd(%d0), %d1 // D1 = current tx_out BD addr
movel 4(%d1), %a1 // A1 = dest address
movel 8(%d2), %d2 // D2 = length
movew %d2, 2(%d1) // length into BD
memcpy_from_pci %a0, %a1, %d2
bsetl #31, (%d1) // CP go ahead
// update tx_out and tx_count
movel tx_out(%d0), %d1
addl #1, %d1
cmpl #TX_BUFFERS, %d1
bne tx_1
clrl %d1
tx_1: movel %d1, tx_out(%d0)
addl #1, tx_count(%d0)
bra tx
tx_ret: rts
/****************************** packet received ***********************/
// Service receive buffers // D0 = 4 * port, D6 = doorbell to host
rx: movel rx_in(%d0), %d1 // D1 = rx_in BD#
lsll #3, %d1 // BD is 8-bytes long
addl rx_first_bd(%d0), %d1 // D1 = current rx_in BD address
movew (%d1), %d2 // D2 = RX BD flags
btstl #15, %d2
bne rx_ret // BD still empty
btstl #1, %d2
bne rx_overrun
tstw parity_bytes(%d0)
bne rx_parity
bclrl #2, %d2 // do not test for CRC errors
rx_parity:
andw #0x0CBC, %d2 // mask status bits
cmpw #0x0C00, %d2 // correct frame
bne rx_bad_frame
clrl %d3
movew 2(%d1), %d3
subw parity_bytes(%d0), %d3 // D3 = packet length
cmpw #HDLC_MAX_MRU, %d3
bgt rx_bad_frame
rx_good_frame:
movel rx_out, %d2
mulul #DESC_LENGTH, %d2
addl rx_descs_addr, %d2 // D2 = RX desc address
cmpl #PACKET_EMPTY, (%d2) // desc stat
bne rx_overrun
movel %d3, 8(%d2)
movel 4(%d1), %a0 // A0 = source address
movel 4(%d2), %a1
tstl %a1
beq rx_ignore_data
memcpy_to_pci %a0, %a1, %d3
rx_ignore_data:
movel packet_full(%d0), (%d2) // update desc stat
// update D6 and rx_out
bsetl #DOORBELL_FROM_CARD_RX, %d6 // signal host that RX completed
movel rx_out, %d2
addl #1, %d2
cmpl #RX_QUEUE_LENGTH, %d2
bne rx_1
clrl %d2
rx_1: movel %d2, rx_out
rx_free_bd:
andw #0xF000, (%d1) // clear CM and error bits
bsetl #31, (%d1) // free BD
// update rx_in
movel rx_in(%d0), %d1
addl #1, %d1
cmpl #RX_BUFFERS, %d1
bne rx_2
clrl %d1
rx_2: movel %d1, rx_in(%d0)
bra rx
rx_overrun:
movel ch_status_addr(%d0), %d2
addl #1, STATUS_RX_OVERRUNS(%d2)
bra rx_free_bd
rx_bad_frame:
movel ch_status_addr(%d0), %d2
addl #1, STATUS_RX_FRAME_ERRORS(%d2)
bra rx_free_bd
rx_ret: rts
/****************************** packet transmitted ********************/
// Service transmit buffers // D0 = 4 * port, D6 = doorbell to host
tx_end: tstl tx_count(%d0)
beq tx_end_ret // TX buffers already empty
movel tx_in(%d0), %d1
movel %d1, %d2 // D1 = D2 = tx_in BD# = desc#
lsll #3, %d1 // BD is 8-bytes long
addl tx_first_bd(%d0), %d1 // D1 = current tx_in BD address
movew (%d1), %d3 // D3 = TX BD flags
btstl #15, %d3
bne tx_end_ret // BD still being transmitted
// update D6, tx_in and tx_count
orl bell_tx(%d0), %d6 // signal host that TX desc freed
subl #1, tx_count(%d0)
movel tx_in(%d0), %d1
addl #1, %d1
cmpl #TX_BUFFERS, %d1
bne tx_end_1
clrl %d1
tx_end_1:
movel %d1, tx_in(%d0)
// free host's descriptor
mulul #DESC_LENGTH, %d2 // D2 = TX desc offset
addl ch_status_addr(%d0), %d2
addl #STATUS_TX_DESCS, %d2 // D2 = TX desc address
btstl #1, %d3
bne tx_end_underrun
movel #PACKET_SENT, (%d2)
bra tx_end
tx_end_underrun:
movel #PACKET_UNDERRUN, (%d2)
bra tx_end
tx_end_ret: rts
/****************************** PLX PCI9060 DMA memcpy ****************/
#if QUICC_MEMCPY_USES_PLX
// called with interrupts disabled
memcpy_from_pci_run:
movel %d0, -(%sp)
movew %sr, -(%sp)
memcpy_1:
movel PLX_DMA_CMD_STS, %d0 // do not btst PLX register directly
btstl #4, %d0 // transfer done?
bne memcpy_end
stop #0x2200 // enable PCI9060 interrupts
movew #0x2700, %sr // disable interrupts again
bra memcpy_1
memcpy_to_pci_run:
movel %d0, -(%sp)
movew %sr, -(%sp)
memcpy_2:
movel PLX_DMA_CMD_STS, %d0 // do not btst PLX register directly
btstl #12, %d0 // transfer done?
bne memcpy_end
stop #0x2200 // enable PCI9060 interrupts
movew #0x2700, %sr // disable interrupts again
bra memcpy_2
memcpy_end:
movew (%sp)+, %sr
movel (%sp)+, %d0
rts
#endif
/****************************** PLX PCI9060 interrupt *****************/
pci9060_interrupt:
movel %d0, -(%sp)
movel PLX_DOORBELL_TO_CARD, %d0
movel %d0, PLX_DOORBELL_TO_CARD // confirm all requests
orl %d0, channel_stats
movel #0x0909, PLX_DMA_CMD_STS // clear DMA ch #0 and #1 interrupts
movel (%sp)+, %d0
rte
/****************************** SCC interrupts ************************/
port_interrupt_1:
orl #0, SCC1_REGS + SCC_SCCE; // confirm SCC events
orl #1 << TASK_SCC_0, channel_stats
movel #0x40000000, CISR
rte
port_interrupt_2:
orl #0, SCC2_REGS + SCC_SCCE; // confirm SCC events
orl #1 << TASK_SCC_1, channel_stats
movel #0x20000000, CISR
rte
port_interrupt_3:
orl #0, SCC3_REGS + SCC_SCCE; // confirm SCC events
orl #1 << TASK_SCC_2, channel_stats
movel #0x10000000, CISR
rte
port_interrupt_4:
orl #0, SCC4_REGS + SCC_SCCE; // confirm SCC events
orl #1 << TASK_SCC_3, channel_stats
movel #0x08000000, CISR
rte
error_interrupt:
rte
/****************************** cable and PM routine ******************/
// modified registers: none
check_csr:
movel %d0, -(%sp)
movel %d1, -(%sp)
movel %d2, -(%sp)
movel %a0, -(%sp)
movel %a1, -(%sp)
clrl %d0 // D0 = 4 * port
movel #CSRA, %a0 // A0 = CSR address
check_csr_loop:
movew (%a0), %d1 // D1 = CSR input bits
andl #0xE7, %d1 // PM and cable sense bits (no DCE bit)
cmpw #STATUS_CABLE_V35 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
bne check_csr_1
movew #0x0E08, %d1
bra check_csr_valid
check_csr_1:
cmpw #STATUS_CABLE_X21 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
bne check_csr_2
movew #0x0408, %d1
bra check_csr_valid
check_csr_2:
cmpw #STATUS_CABLE_V24 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
bne check_csr_3
movew #0x0208, %d1
bra check_csr_valid
check_csr_3:
cmpw #STATUS_CABLE_EIA530 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
bne check_csr_disable
movew #0x0D08, %d1
bra check_csr_valid
check_csr_disable:
movew #0x0008, %d1 // D1 = disable everything
movew #0x80E7, %d2 // D2 = input mask: ignore DSR
bra check_csr_write
check_csr_valid: // D1 = mode and IRQ bits
movew csr_output(%d0), %d2
andw #0x3000, %d2 // D2 = requested LL and DTR bits
orw %d2, %d1 // D1 = all requested output bits
movew #0x80FF, %d2 // D2 = input mask: include DSR
check_csr_write:
cmpw old_csr_output(%d0), %d1
beq check_csr_input
movew %d1, old_csr_output(%d0)
movew %d1, (%a0) // Write CSR output bits
check_csr_input:
movew (PCDAT), %d1
andw dcd_mask(%d0), %d1
beq check_csr_dcd_on // DCD and CTS signals are negated
movew (%a0), %d1 // D1 = CSR input bits
andw #~STATUS_CABLE_DCD, %d1 // DCD off
bra check_csr_previous
check_csr_dcd_on:
movew (%a0), %d1 // D1 = CSR input bits
orw #STATUS_CABLE_DCD, %d1 // DCD on
check_csr_previous:
andw %d2, %d1 // input mask
movel ch_status_addr(%d0), %a1
cmpl STATUS_CABLE(%a1), %d1 // check for change
beq check_csr_next
movel %d1, STATUS_CABLE(%a1) // update status
movel bell_cable(%d0), PLX_DOORBELL_FROM_CARD // signal the host
check_csr_next:
addl #2, %a0 // next CSR register
addl #4, %d0 // D0 = 4 * next port
cmpl #4 * 4, %d0
bne check_csr_loop
movel (%sp)+, %a1
movel (%sp)+, %a0
movel (%sp)+, %d2
movel (%sp)+, %d1
movel (%sp)+, %d0
rts
/****************************** timer interrupt ***********************/
timer_interrupt:
bsr check_csr
rte
/****************************** RAM sizing and test *******************/
#if DETECT_RAM
ram_test:
movel #0x12345678, %d1 // D1 = test value
movel %d1, (128 * 1024 - 4)
movel #128 * 1024, %d0 // D0 = RAM size tested
ram_test_size:
cmpl #MAX_RAM_SIZE, %d0
beq ram_test_size_found
movel %d0, %a0
addl #128 * 1024 - 4, %a0
cmpl (%a0), %d1
beq ram_test_size_check
ram_test_next_size:
lsll #1, %d0
bra ram_test_size
ram_test_size_check:
eorl #0xFFFFFFFF, %d1
movel %d1, (128 * 1024 - 4)
cmpl (%a0), %d1
bne ram_test_next_size
ram_test_size_found: // D0 = RAM size
movel %d0, %a0 // A0 = fill ptr
subl #firmware_end + 4, %d0
lsrl #2, %d0
movel %d0, %d1 // D1 = DBf counter
ram_test_fill:
movel %a0, -(%a0)
dbfw %d1, ram_test_fill
subl #0x10000, %d1
cmpl #0xFFFFFFFF, %d1
bne ram_test_fill
ram_test_loop: // D0 = DBf counter
cmpl (%a0)+, %a0
dbnew %d0, ram_test_loop
bne ram_test_found_bad
subl #0x10000, %d0
cmpl #0xFFFFFFFF, %d0
bne ram_test_loop
bra ram_test_all_ok
ram_test_found_bad:
subl #4, %a0
ram_test_all_ok:
movel %a0, PLX_MAILBOX_5
rts
#endif
/****************************** constants *****************************/
scc_reg_addr:
.long SCC1_REGS, SCC2_REGS, SCC3_REGS, SCC4_REGS
scc_base_addr:
.long SCC1_BASE, SCC2_BASE, SCC3_BASE, SCC4_BASE
tx_first_bd:
.long DPRBASE
.long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8
.long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 * 2
.long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 * 3
rx_first_bd:
.long DPRBASE + TX_BUFFERS * 8
.long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8
.long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 * 2
.long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 * 3
first_buffer:
.long BUFFERS_ADDR
.long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH
.long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * 2
.long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * 3
bell_tx:
.long 1 << DOORBELL_FROM_CARD_TX_0, 1 << DOORBELL_FROM_CARD_TX_1
.long 1 << DOORBELL_FROM_CARD_TX_2, 1 << DOORBELL_FROM_CARD_TX_3
bell_cable:
.long 1 << DOORBELL_FROM_CARD_CABLE_0, 1 << DOORBELL_FROM_CARD_CABLE_1
.long 1 << DOORBELL_FROM_CARD_CABLE_2, 1 << DOORBELL_FROM_CARD_CABLE_3
packet_full:
.long PACKET_FULL, PACKET_FULL + 1, PACKET_FULL + 2, PACKET_FULL + 3
clocking_ext:
.long 0x0000002C, 0x00003E00, 0x002C0000, 0x3E000000
clocking_txfromrx:
.long 0x0000002D, 0x00003F00, 0x002D0000, 0x3F000000
clocking_mask:
.long 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
dcd_mask:
.word 0x020, 0, 0x080, 0, 0x200, 0, 0x800
.ascii "wanXL firmware\n"
.asciz "Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>\n"
/****************************** variables *****************************/
.align 4
channel_stats: .long 0
tx_in: .long 0, 0, 0, 0 // transmitted
tx_out: .long 0, 0, 0, 0 // received from host for transmission
tx_count: .long 0, 0, 0, 0 // currently in transmit queue
rx_in: .long 0, 0, 0, 0 // received from port
rx_out: .long 0 // transmitted to host
parity_bytes: .word 0, 0, 0, 0, 0, 0, 0 // only 4 words are used
csr_output: .word 0
old_csr_output: .word 0, 0, 0, 0, 0, 0, 0
.align 4
firmware_end: // must be dword-aligned
|
airtrack/airix
| 5,628
|
kernel/kernel.s
|
[bits 32]
extern init_paging
extern kernel_entry
extern pic_interrupt
extern exception_handles
extern syscall
global _start
global set_gdtr
global set_idtr
global set_cr3
global set_tss
global in_byte
global in_dword
global insw
global out_byte
global out_dword
global close_int
global start_int
global halt
global switch_kcontext
global ret_user_space
global syscall_entry
global isr_entry0
global isr_entry1
global isr_entry2
global isr_entry3
global isr_entry4
global isr_entry5
global isr_entry6
global isr_entry7
global isr_entry8
global isr_entry9
global isr_entry10
global isr_entry11
global isr_entry12
global isr_entry13
global isr_entry14
global isr_entry15
global divide_by_zero_entry
global debug_entry
global non_maskable_int_entry
global breakpoint_entry
global overflow_entry
global bound_range_exceeded_entry
global invalid_opcode_entry
global device_not_available_entry
global double_fault_entry
global invalid_tss_entry
global segment_not_present_entry
global stack_segment_fault_entry
global general_protection_fault_entry
global page_fault_entry
global fp_exception_entry
global alignment_check_entry
global machine_check_entry
global simd_fp_exception_entry
global virtualization_entry
global security_exception_entry
_start:
push ebx
mov eax, init_paging
sub eax, 0xC0000000
call eax
mov esp, 0xC0010000
mov ebp, esp
mov eax, kernel_entry
jmp eax
set_gdtr:
mov eax, dword [esp + 4]
lgdt [eax]
ret
set_idtr:
mov eax, dword [esp + 4]
lidt [eax]
ret
set_cr3:
mov eax, dword [esp + 4]
mov cr3, eax
mov eax, cr0
or eax, 0x80000000
mov cr0, eax
ret
set_tss:
mov ax, word [esp + 4]
ltr ax
ret
in_byte:
mov edx, dword [esp + 4]
xor eax, eax
in al, dx
nop
nop
ret
in_dword:
mov edx, dword [esp + 4]
xor eax, eax
in eax, dx
nop
nop
ret
insw:
push edi
mov dx, word [esp + 8]
mov ecx, dword [esp + 12]
mov edi, dword [esp + 16]
rep insw
pop edi
ret
out_byte:
mov edx, dword [esp + 4]
mov al, byte [esp + 8]
out dx, al
nop
nop
ret
out_dword:
mov edx, dword [esp + 4]
mov eax, dword [esp + 8]
out dx, eax
nop
nop
ret
close_int:
cli
ret
start_int:
sti
ret
halt:
hlt
; Switch kernel stack, prototype in c:
; void switch_kcontext(struct kstack_context **cur,
; struct kstack_context *new);
switch_kcontext:
mov ecx, dword [esp + 4] ; cur
mov eax, dword [esp + 8] ; new
; Save registers of struct kstack_context
push edi
push esi
push ebp
push ebx
; Switch stack
mov [ecx], esp ; Save esp to *cur
mov esp, eax ; Restore esp from new
; Restore registers of struct kstack_context
pop ebx
pop ebp
pop esi
pop edi
ret
%macro int_enter 0
pushad
push ds
push es
push fs
push gs
; 0x10 is kernel code descriptor selector
mov ax, 0x10
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
%endmacro
%macro int_ret 0
pop gs
pop fs
pop es
pop ds
popad
; Skip error code
add esp, 4
iret
%endmacro
ret_user_space:
mov esp, dword [esp + 4]
int_ret
syscall_entry:
push 0
int_enter
push esp
call syscall
add esp, 4
int_ret
%macro isr_entry 1
isr_entry%1:
push 0
int_enter
push %1
call pic_interrupt
add esp, 4
int_ret
%endmacro
isr_entry 0
isr_entry 1
isr_entry 2
isr_entry 3
isr_entry 4
isr_entry 5
isr_entry 6
isr_entry 8
isr_entry 9
isr_entry 10
isr_entry 11
isr_entry 12
isr_entry 13
isr_entry 14
isr_entry 15
isr_entry7:
push 0
int_enter
; Read master PIC IRR(Interrupt Request Register)
mov al, 0x0A
out 0x20, al
nop
nop
in al, 0x20
nop
nop
; Check bit 7
and al, 0x80
; If it is spurious IRQ, then just ignore it
jz .spurious
push 7
call pic_interrupt
add esp, 4
.spurious:
int_ret
%macro excep 2
%2:
push 0
int_enter
call [exception_handles + %1 * 4]
int_ret
%endmacro
%macro excep_error_code 2
%2:
int_enter
; Push error code
push dword [esp + 48]
call [exception_handles + %1 * 4]
add esp, 4
int_ret
%endmacro
; Exceptions entry
excep 0, divide_by_zero_entry
excep 1, debug_entry
excep 2, non_maskable_int_entry
excep 3, breakpoint_entry
excep 4, overflow_entry
excep 5, bound_range_exceeded_entry
excep 6, invalid_opcode_entry
excep 7, device_not_available_entry
excep 16, fp_exception_entry
excep 18, machine_check_entry
excep 19, simd_fp_exception_entry
excep 20, virtualization_entry
excep_error_code 8, double_fault_entry
excep_error_code 10, invalid_tss_entry
excep_error_code 11, segment_not_present_entry
excep_error_code 12, stack_segment_fault_entry
excep_error_code 13, general_protection_fault_entry
excep_error_code 17, alignment_check_entry
excep_error_code 30, security_exception_entry
page_fault_entry:
int_enter
; Push error code
push dword [esp + 48]
; Push the virtual address which caused the page fault
mov eax, cr2
push eax
call [exception_handles + 14 * 4]
add esp, 8
int_ret
|
airtrack/airix
| 1,226
|
lib/memory.s
|
[bits 32]
global memcpy
global memset
global memcmp
; void * memcpy(void *dst, const void *src, size_t n);
memcpy:
push ebp
mov ebp, esp
push edi
push esi
mov edi, dword [ebp + 8]
mov esi, dword [ebp + 12]
mov ecx, dword [ebp + 16]
cld
rep movsb
mov eax, dword [ebp + 8]
pop esi
pop edi
pop ebp
ret
; void * memset(void *b, int c, size_t len);
memset:
push ebp
mov ebp, esp
push edi
mov edi, dword [ebp + 8]
mov eax, dword [ebp + 12]
mov ecx, dword [ebp + 16]
cld
rep stosb
mov eax, dword [ebp + 8]
pop edi
pop ebp
ret
; int memcmp(const void *s1, const void *s2, size_t n);
memcmp:
push ebp
mov ebp, esp
push esi
push edi
mov esi, dword [ebp + 8]
mov edi, dword [ebp + 12]
mov ecx, dword [ebp + 16]
xor eax, eax
cld
cmp ecx, ecx
repe cmpsb
je .match
dec esi
dec edi
xor ecx, ecx
mov al, byte [esi]
mov cl, byte [edi]
sub eax, ecx
.match:
pop edi
pop esi
pop ebp
ret
|
airtrack/airix
| 4,880
|
bootloader/bootloader.s
|
%define SMAP 0x534D4150
%define BOOT_ADDRESS 0x7C00
%define BOOT_INFO_ADDRESS 0x7E00
%define NUM_MMAP_ADDRESS 0x7E04
%define KERNEL_BASE 0xC0000000
[bits 16]
org BOOT_ADDRESS
mov ax, cs
mov ds, ax
mov es, ax
mov ss, ax
mov esp, BOOT_ADDRESS
call get_memory_map
call load_kernel
jmp goto_pm
get_memory_map:
mov di, NUM_MMAP_ADDRESS
mov dword [es:di], 0
add di, 4
xor ebx, ebx
.getting_mm:
; Call int 0x15 0xE820 function, edx = 'SMAP'
mov edx, SMAP
mov eax, 0xE820
mov ecx, 24
int 0x15
; Check result
jc .get_mm_error
cmp eax, SMAP
jne .get_mm_error
jecxz .skip_entry
; Get entry success
add di, 24
inc dword [es:NUM_MMAP_ADDRESS]
.skip_entry:
; There is no more entry when ebx == 0
cmp ebx, 0
je .get_mm_done
jmp .getting_mm
.get_mm_error:
hlt
.get_mm_done:
ret
load_kernel:
push bp
mov bp, sp
sub sp, 4
mov ax, 0x1000
mov es, ax
mov ax, 1
mov bx, 0
.reading_sectors:
mov word [bp - 4], ax
mov word [bp - 2], bx
mov cl, 1
call read_sector
mov ax, word [bp - 4]
cmp ax, 128
jz .load_success
inc ax
mov bx, word [bp - 2]
add bx, 512
jmp .reading_sectors
.load_success:
add sp, 4
pop bp
ret
; ax: start sector number
; cl: sector count
; es:bx pointer to buffer
read_sector:
push bp
mov bp, sp
sub sp, 2
mov byte [bp - 2], cl
push bx
mov bl, 18
div bl
inc ah
mov dh, al
and dh, 1
shr al, 1
mov ch, al
pop bx
mov cl, ah
mov dl, 0
.retry_reading:
mov ah, 2
mov al, byte [bp - 2]
int 13h
jc .retry_reading
add sp, 2
pop bp
ret
goto_pm:
cli
lgdt [gdtr]
; Open A20
in al, 0x92
or al, 2
out 0x92, al
; Enable PE
mov eax, cr0
or al, 1
mov cr0, eax
jmp 0x8:protected_mode
[bits 32]
protected_mode:
; Set data selectors
mov ax, 0x10
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
mov ss, ax
call expand_kernel
; Store address of information which is passed to the kernel
; | 0x7E00: the end address of expanded kernel
; | 0x7E04: number of memory map entries
; | 0x7E08: start address of memory map entries
mov ebx, BOOT_INFO_ADDRESS
; Jump into kernel
jmp eax
; eax: return value, store entry address
expand_kernel:
push ebp
mov ebp, esp
sub esp, 12
; Kernel file pointer
mov eax, 0x10000
; Entry address
mov ebx, dword [eax + 24]
mov dword [ebp - 4], ebx
; Program head offset
mov ebx, dword [eax + 28]
; Program head size
mov dx, word [eax + 42]
mov word [ebp - 10], dx
; Program head number
mov cx, word [eax + 44]
.expand_all_segments:
mov dword [ebp - 8], ebx
mov word [ebp - 12], cx
call expand_segment
mov ebx, dword [ebp - 8]
movzx edx, word [ebp - 10]
add ebx, edx
mov cx, word [ebp - 12]
dec cx
jnz .expand_all_segments
; Expand kernel success
; Store the end address of expanded kernel
mov dword [BOOT_INFO_ADDRESS], edi
; Entry address as return value
mov eax, dword [ebp - 4]
sub eax, KERNEL_BASE
add esp, 12
pop ebp
ret
; eax: elf file buffer pointer
; ebx: current program head offset
expand_segment:
add ebx, eax
; Type
mov ecx, dword [ebx]
cmp ecx, 1
jz .expand
cmp ecx, 6
jz .expand
jmp .expand_segment_success
.expand:
; File Offset
mov ecx, dword [ebx + 4]
lea esi, [eax + ecx]
; Virtual address
mov edi, dword [ebx + 8]
sub edi, KERNEL_BASE
; File size
mov ecx, dword [ebx + 16]
cld
rep movsb
; Clear remain memory
mov ecx, dword [ebx + 20]
sub ecx, dword [ebx + 16]
jz .expand_segment_success
.fill_zero:
mov byte [edi], 0
inc edi
loop .fill_zero
.expand_segment_success:
ret
gdt_start:
; Unused
dd 0
dd 0
; Code descriptor
dw 0xFFFF
dw 0x0
db 0x0
db 0x9a
db 0xCF
db 0x0
; Data descriptor
dw 0xFFFF
dw 0x0
db 0x0
db 0x92
db 0xCF
db 0x0
gdt_len equ $ - gdt_start
gdtr:
dw gdt_len - 1
dd gdt_start
times 510 - ($ - $$) db 0
dw 0xAA55
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,320
|
drivers/scsi/arm/acornscsi-io.S
|
/*
* linux/drivers/acorn/scsi/acornscsi-io.S: Acorn SCSI card IO
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/hardware.h>
#if defined(__APCS_32__)
#define LOADREGS(t,r,l...) ldm##t r, l
#elif defined(__APCS_26__)
#define LOADREGS(t,r,l...) ldm##t r, l##^
#endif
@ Purpose: transfer a block of data from the acorn scsi card to memory
@ Proto : void acornscsi_in(unsigned int addr_start, char *buffer, int length)
@ Returns: nothing
.align
ENTRY(__acornscsi_in)
stmfd sp!, {r4 - r7, lr}
bic r0, r0, #3
mov lr, #0xff
orr lr, lr, #0xff00
acornscsi_in16lp:
subs r2, r2, #16
bmi acornscsi_in8
ldmia r0!, {r3, r4, r5, r6}
and r3, r3, lr
orr r3, r3, r4, lsl #16
and r4, r5, lr
orr r4, r4, r6, lsl #16
ldmia r0!, {r5, r6, r7, ip}
and r5, r5, lr
orr r5, r5, r6, lsl #16
and r6, r7, lr
orr r6, r6, ip, lsl #16
stmia r1!, {r3 - r6}
bne acornscsi_in16lp
LOADREGS(fd, sp!, {r4 - r7, pc})
acornscsi_in8: adds r2, r2, #8
bmi acornscsi_in4
ldmia r0!, {r3, r4, r5, r6}
and r3, r3, lr
orr r3, r3, r4, lsl #16
and r4, r5, lr
orr r4, r4, r6, lsl #16
stmia r1!, {r3 - r4}
LOADREGS(eqfd, sp!, {r4 - r7, pc})
sub r2, r2, #8
acornscsi_in4: adds r2, r2, #4
bmi acornscsi_in2
ldmia r0!, {r3, r4}
and r3, r3, lr
orr r3, r3, r4, lsl #16
str r3, [r1], #4
LOADREGS(eqfd, sp!, {r4 - r7, pc})
sub r2, r2, #4
acornscsi_in2: adds r2, r2, #2
ldr r3, [r0], #4
and r3, r3, lr
strb r3, [r1], #1
mov r3, r3, lsr #8
strplb r3, [r1], #1
LOADREGS(fd, sp!, {r4 - r7, pc})
@ Purpose: transfer a block of data from memory to the acorn scsi card
@ Proto : void acornscsi_in(unsigned int addr_start, char *buffer, int length)
@ Returns: nothing
ENTRY(__acornscsi_out)
stmfd sp!, {r4 - r6, lr}
bic r0, r0, #3
acornscsi_out16lp:
subs r2, r2, #16
bmi acornscsi_out8
ldmia r1!, {r4, r6, ip, lr}
mov r3, r4, lsl #16
orr r3, r3, r3, lsr #16
mov r4, r4, lsr #16
orr r4, r4, r4, lsl #16
mov r5, r6, lsl #16
orr r5, r5, r5, lsr #16
mov r6, r6, lsr #16
orr r6, r6, r6, lsl #16
stmia r0!, {r3, r4, r5, r6}
mov r3, ip, lsl #16
orr r3, r3, r3, lsr #16
mov r4, ip, lsr #16
orr r4, r4, r4, lsl #16
mov ip, lr, lsl #16
orr ip, ip, ip, lsr #16
mov lr, lr, lsr #16
orr lr, lr, lr, lsl #16
stmia r0!, {r3, r4, ip, lr}
bne acornscsi_out16lp
LOADREGS(fd, sp!, {r4 - r6, pc})
acornscsi_out8: adds r2, r2, #8
bmi acornscsi_out4
ldmia r1!, {r4, r6}
mov r3, r4, lsl #16
orr r3, r3, r3, lsr #16
mov r4, r4, lsr #16
orr r4, r4, r4, lsl #16
mov r5, r6, lsl #16
orr r5, r5, r5, lsr #16
mov r6, r6, lsr #16
orr r6, r6, r6, lsl #16
stmia r0!, {r3, r4, r5, r6}
LOADREGS(eqfd, sp!, {r4 - r6, pc})
sub r2, r2, #8
acornscsi_out4: adds r2, r2, #4
bmi acornscsi_out2
ldr r4, [r1], #4
mov r3, r4, lsl #16
orr r3, r3, r3, lsr #16
mov r4, r4, lsr #16
orr r4, r4, r4, lsl #16
stmia r0!, {r3, r4}
LOADREGS(eqfd, sp!, {r4 - r6, pc})
sub r2, r2, #4
acornscsi_out2: adds r2, r2, #2
ldr r3, [r1], #2
strb r3, [r0], #1
mov r3, r3, lsr #8
strplb r3, [r0], #1
LOADREGS(fd, sp!, {r4 - r6, pc})
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,542
|
tools/perf/arch/arm/tests/regs_load.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#define R0 0x00
#define R1 0x08
#define R2 0x10
#define R3 0x18
#define R4 0x20
#define R5 0x28
#define R6 0x30
#define R7 0x38
#define R8 0x40
#define R9 0x48
#define SL 0x50
#define FP 0x58
#define IP 0x60
#define SP 0x68
#define LR 0x70
#define PC 0x78
/*
* Implementation of void perf_regs_load(u64 *regs);
*
* This functions fills in the 'regs' buffer from the actual registers values,
* in the way the perf built-in unwinding test expects them:
* - the PC at the time at the call to this function. Since this function
* is called using a bl instruction, the PC value is taken from LR.
* The built-in unwinding test then unwinds the call stack from the dwarf
* information in unwind__get_entries.
*
* Notes:
* - the 8 bytes stride in the registers offsets comes from the fact
* that the registers are stored in an u64 array (u64 *regs),
* - the regs buffer needs to be zeroed before the call to this function,
* in this case using a calloc in dwarf-unwind.c.
*/
.text
.type perf_regs_load,%function
ENTRY(perf_regs_load)
str r0, [r0, #R0]
str r1, [r0, #R1]
str r2, [r0, #R2]
str r3, [r0, #R3]
str r4, [r0, #R4]
str r5, [r0, #R5]
str r6, [r0, #R6]
str r7, [r0, #R7]
str r8, [r0, #R8]
str r9, [r0, #R9]
str sl, [r0, #SL]
str fp, [r0, #FP]
str ip, [r0, #IP]
str sp, [r0, #SP]
str lr, [r0, #LR]
str lr, [r0, #PC] // store pc as lr in order to skip the call
// to this function
mov pc, lr
ENDPROC(perf_regs_load)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,909
|
tools/perf/arch/x86/tests/regs_load.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#define AX 0
#define BX 1 * 8
#define CX 2 * 8
#define DX 3 * 8
#define SI 4 * 8
#define DI 5 * 8
#define BP 6 * 8
#define SP 7 * 8
#define IP 8 * 8
#define FLAGS 9 * 8
#define CS 10 * 8
#define SS 11 * 8
#define DS 12 * 8
#define ES 13 * 8
#define FS 14 * 8
#define GS 15 * 8
#define R8 16 * 8
#define R9 17 * 8
#define R10 18 * 8
#define R11 19 * 8
#define R12 20 * 8
#define R13 21 * 8
#define R14 22 * 8
#define R15 23 * 8
.text
#ifdef HAVE_ARCH_X86_64_SUPPORT
ENTRY(perf_regs_load)
movq %rax, AX(%rdi)
movq %rbx, BX(%rdi)
movq %rcx, CX(%rdi)
movq %rdx, DX(%rdi)
movq %rsi, SI(%rdi)
movq %rdi, DI(%rdi)
movq %rbp, BP(%rdi)
leaq 8(%rsp), %rax /* exclude this call. */
movq %rax, SP(%rdi)
movq 0(%rsp), %rax
movq %rax, IP(%rdi)
movq $0, FLAGS(%rdi)
movq $0, CS(%rdi)
movq $0, SS(%rdi)
movq $0, DS(%rdi)
movq $0, ES(%rdi)
movq $0, FS(%rdi)
movq $0, GS(%rdi)
movq %r8, R8(%rdi)
movq %r9, R9(%rdi)
movq %r10, R10(%rdi)
movq %r11, R11(%rdi)
movq %r12, R12(%rdi)
movq %r13, R13(%rdi)
movq %r14, R14(%rdi)
movq %r15, R15(%rdi)
ret
ENDPROC(perf_regs_load)
#else
ENTRY(perf_regs_load)
push %edi
movl 8(%esp), %edi
movl %eax, AX(%edi)
movl %ebx, BX(%edi)
movl %ecx, CX(%edi)
movl %edx, DX(%edi)
movl %esi, SI(%edi)
pop %eax
movl %eax, DI(%edi)
movl %ebp, BP(%edi)
leal 4(%esp), %eax /* exclude this call. */
movl %eax, SP(%edi)
movl 0(%esp), %eax
movl %eax, IP(%edi)
movl $0, FLAGS(%edi)
movl $0, CS(%edi)
movl $0, SS(%edi)
movl $0, DS(%edi)
movl $0, ES(%edi)
movl $0, FS(%edi)
movl $0, GS(%edi)
ret
ENDPROC(perf_regs_load)
#endif
/*
* We need to provide note.GNU-stack section, saying that we want
* NOT executable stack. Otherwise the final linking will assume that
* the ELF stack should not be restricted at all and set it RWX.
*/
.section .note.GNU-stack,"",@progbits
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,560
|
tools/perf/arch/powerpc/tests/regs_load.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
/* Offset is based on macros from arch/powerpc/include/uapi/asm/ptrace.h. */
#define R0 0
#define R1 1 * 8
#define R2 2 * 8
#define R3 3 * 8
#define R4 4 * 8
#define R5 5 * 8
#define R6 6 * 8
#define R7 7 * 8
#define R8 8 * 8
#define R9 9 * 8
#define R10 10 * 8
#define R11 11 * 8
#define R12 12 * 8
#define R13 13 * 8
#define R14 14 * 8
#define R15 15 * 8
#define R16 16 * 8
#define R17 17 * 8
#define R18 18 * 8
#define R19 19 * 8
#define R20 20 * 8
#define R21 21 * 8
#define R22 22 * 8
#define R23 23 * 8
#define R24 24 * 8
#define R25 25 * 8
#define R26 26 * 8
#define R27 27 * 8
#define R28 28 * 8
#define R29 29 * 8
#define R30 30 * 8
#define R31 31 * 8
#define NIP 32 * 8
#define CTR 35 * 8
#define LINK 36 * 8
#define XER 37 * 8
.globl perf_regs_load
perf_regs_load:
std 0, R0(3)
std 1, R1(3)
std 2, R2(3)
std 3, R3(3)
std 4, R4(3)
std 5, R5(3)
std 6, R6(3)
std 7, R7(3)
std 8, R8(3)
std 9, R9(3)
std 10, R10(3)
std 11, R11(3)
std 12, R12(3)
std 13, R13(3)
std 14, R14(3)
std 15, R15(3)
std 16, R16(3)
std 17, R17(3)
std 18, R18(3)
std 19, R19(3)
std 20, R20(3)
std 21, R21(3)
std 22, R22(3)
std 23, R23(3)
std 24, R24(3)
std 25, R25(3)
std 26, R26(3)
std 27, R27(3)
std 28, R28(3)
std 29, R29(3)
std 30, R30(3)
std 31, R31(3)
/* store NIP */
mflr 4
std 4, NIP(3)
/* Store LR */
std 4, LINK(3)
/* Store XER */
mfxer 4
std 4, XER(3)
/* Store CTR */
mfctr 4
std 4, CTR(3)
/* Restore original value of r4 */
ld 4, R4(3)
blr
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,250
|
tools/testing/selftests/x86/thunks_32.S
|
/*
* thunks_32.S - assembly helpers for mixed-bitness code
* Copyright (c) 2015 Denys Vlasenko
*
* This program is free software; you can redistribute it and/or modify
* it under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* These are little helpers that make it easier to switch bitness on
* the fly.
*/
.text
.code32
.global call64_from_32
.type call32_from_64, @function
// 4(%esp): function to call
call64_from_32:
// Fetch function address
mov 4(%esp), %eax
// Save registers which are callee-clobbered by 64-bit ABI
push %ecx
push %edx
push %esi
push %edi
// Switch to long mode
jmp $0x33,$1f
1: .code64
// Call the function
call *%rax
// Switch to compatibility mode
push $0x23 /* USER32_CS */
.code32; push $1f; .code64 /* hack: can't have X86_64_32S relocation in 32-bit ELF */
lretq
1: .code32
pop %edi
pop %esi
pop %edx
pop %ecx
ret
.size call64_from_32, .-call64_from_32
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,284
|
tools/testing/selftests/x86/thunks.S
|
/*
* thunks.S - assembly helpers for mixed-bitness code
* Copyright (c) 2015 Andrew Lutomirski
*
* This program is free software; you can redistribute it and/or modify
* it under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* These are little helpers that make it easier to switch bitness on
* the fly.
*/
.text
.global call32_from_64
.type call32_from_64, @function
call32_from_64:
// rdi: stack to use
// esi: function to call
// Save registers
pushq %rbx
pushq %rbp
pushq %r12
pushq %r13
pushq %r14
pushq %r15
pushfq
// Switch stacks
mov %rsp,(%rdi)
mov %rdi,%rsp
// Switch to compatibility mode
pushq $0x23 /* USER32_CS */
pushq $1f
lretq
1:
.code32
// Call the function
call *%esi
// Switch back to long mode
jmp $0x33,$1f
.code64
1:
// Restore the stack
mov (%rsp),%rsp
// Restore registers
popfq
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
ret
.size call32_from_64, .-call32_from_64
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,880
|
tools/testing/selftests/powerpc/copyloops/memcpy_power7.S
|
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2012
*
* Author: Anton Blanchard <anton@au.ibm.com>
*/
#include <asm/ppc_asm.h>
#ifndef SELFTEST_CASE
/* 0 == don't use VMX, 1 == use VMX */
#define SELFTEST_CASE 0
#endif
#ifdef __BIG_ENDIAN__
#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
#else
#define LVS(VRT,RA,RB) lvsr VRT,RA,RB
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
#endif
_GLOBAL(memcpy_power7)
cmpldi r5,16
cmpldi cr1,r5,4096
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
blt .Lshort_copy
#ifdef CONFIG_ALTIVEC
test_feature = SELFTEST_CASE
BEGIN_FTR_SECTION
bgt cr1, .Lvmx_copy
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
.Lnonvmx_copy:
/* Get the source 8B aligned */
neg r6,r4
mtocrf 0x01,r6
clrldi r6,r6,(64-3)
bf cr7*4+3,1f
lbz r0,0(r4)
addi r4,r4,1
stb r0,0(r3)
addi r3,r3,1
1: bf cr7*4+2,2f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
2: bf cr7*4+1,3f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
3: sub r5,r5,r6
cmpldi r5,128
blt 5f
mflr r0
stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
std r17,STK_REG(R17)(r1)
std r18,STK_REG(R18)(r1)
std r19,STK_REG(R19)(r1)
std r20,STK_REG(R20)(r1)
std r21,STK_REG(R21)(r1)
std r22,STK_REG(R22)(r1)
std r0,STACKFRAMESIZE+16(r1)
srdi r6,r5,7
mtctr r6
/* Now do cacheline (128B) sized loads and stores. */
.align 5
4:
ld r0,0(r4)
ld r6,8(r4)
ld r7,16(r4)
ld r8,24(r4)
ld r9,32(r4)
ld r10,40(r4)
ld r11,48(r4)
ld r12,56(r4)
ld r14,64(r4)
ld r15,72(r4)
ld r16,80(r4)
ld r17,88(r4)
ld r18,96(r4)
ld r19,104(r4)
ld r20,112(r4)
ld r21,120(r4)
addi r4,r4,128
std r0,0(r3)
std r6,8(r3)
std r7,16(r3)
std r8,24(r3)
std r9,32(r3)
std r10,40(r3)
std r11,48(r3)
std r12,56(r3)
std r14,64(r3)
std r15,72(r3)
std r16,80(r3)
std r17,88(r3)
std r18,96(r3)
std r19,104(r3)
std r20,112(r3)
std r21,120(r3)
addi r3,r3,128
bdnz 4b
clrldi r5,r5,(64-7)
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
ld r17,STK_REG(R17)(r1)
ld r18,STK_REG(R18)(r1)
ld r19,STK_REG(R19)(r1)
ld r20,STK_REG(R20)(r1)
ld r21,STK_REG(R21)(r1)
ld r22,STK_REG(R22)(r1)
addi r1,r1,STACKFRAMESIZE
/* Up to 127B to go */
5: srdi r6,r5,4
mtocrf 0x01,r6
6: bf cr7*4+1,7f
ld r0,0(r4)
ld r6,8(r4)
ld r7,16(r4)
ld r8,24(r4)
ld r9,32(r4)
ld r10,40(r4)
ld r11,48(r4)
ld r12,56(r4)
addi r4,r4,64
std r0,0(r3)
std r6,8(r3)
std r7,16(r3)
std r8,24(r3)
std r9,32(r3)
std r10,40(r3)
std r11,48(r3)
std r12,56(r3)
addi r3,r3,64
/* Up to 63B to go */
7: bf cr7*4+2,8f
ld r0,0(r4)
ld r6,8(r4)
ld r7,16(r4)
ld r8,24(r4)
addi r4,r4,32
std r0,0(r3)
std r6,8(r3)
std r7,16(r3)
std r8,24(r3)
addi r3,r3,32
/* Up to 31B to go */
8: bf cr7*4+3,9f
ld r0,0(r4)
ld r6,8(r4)
addi r4,r4,16
std r0,0(r3)
std r6,8(r3)
addi r3,r3,16
9: clrldi r5,r5,(64-4)
/* Up to 15B to go */
.Lshort_copy:
mtocrf 0x01,r5
bf cr7*4+0,12f
lwz r0,0(r4) /* Less chance of a reject with word ops */
lwz r6,4(r4)
addi r4,r4,8
stw r0,0(r3)
stw r6,4(r3)
addi r3,r3,8
12: bf cr7*4+1,13f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
13: bf cr7*4+2,14f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
14: bf cr7*4+3,15f
lbz r0,0(r4)
stb r0,0(r3)
15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
blr
.Lunwind_stack_nonvmx_copy:
addi r1,r1,STACKFRAMESIZE
b .Lnonvmx_copy
.Lvmx_copy:
#ifdef CONFIG_ALTIVEC
mflr r0
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl enter_vmx_ops
cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STK_REG(R31)(r1)
ld r4,STK_REG(R30)(r1)
ld r5,STK_REG(R29)(r1)
mtlr r0
/*
* We prefetch both the source and destination using enhanced touch
* instructions. We use a stream ID of 0 for the load side and
* 1 for the store side.
*/
clrrdi r6,r4,7
clrrdi r9,r3,7
ori r9,r9,1 /* stream=1 */
srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
cmpldi r7,0x3FF
ble 1f
li r7,0x3FF
1: lis r0,0x0E00 /* depth=7 */
sldi r7,r7,7
or r7,r7,r0
ori r10,r7,1 /* stream=1 */
lis r8,0x8000 /* GO=1 */
clrldi r8,r8,32
dcbt 0,r6,0b01000
dcbt 0,r7,0b01010
dcbtst 0,r9,0b01000
dcbtst 0,r10,0b01010
eieio
dcbt 0,r8,0b01010 /* GO */
beq cr1,.Lunwind_stack_nonvmx_copy
/*
* If source and destination are not relatively aligned we use a
* slower permute loop.
*/
xor r6,r4,r3
rldicl. r6,r6,0,(64-4)
bne .Lvmx_unaligned_copy
/* Get the destination 16B aligned */
neg r6,r3
mtocrf 0x01,r6
clrldi r6,r6,(64-4)
bf cr7*4+3,1f
lbz r0,0(r4)
addi r4,r4,1
stb r0,0(r3)
addi r3,r3,1
1: bf cr7*4+2,2f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
2: bf cr7*4+1,3f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
3: bf cr7*4+0,4f
ld r0,0(r4)
addi r4,r4,8
std r0,0(r3)
addi r3,r3,8
4: sub r5,r5,r6
/* Get the desination 128B aligned */
neg r6,r3
srdi r7,r6,4
mtocrf 0x01,r7
clrldi r6,r6,(64-7)
li r9,16
li r10,32
li r11,48
bf cr7*4+3,5f
lvx v1,0,r4
addi r4,r4,16
stvx v1,0,r3
addi r3,r3,16
5: bf cr7*4+2,6f
lvx v1,0,r4
lvx v0,r4,r9
addi r4,r4,32
stvx v1,0,r3
stvx v0,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
lvx v3,0,r4
lvx v2,r4,r9
lvx v1,r4,r10
lvx v0,r4,r11
addi r4,r4,64
stvx v3,0,r3
stvx v2,r3,r9
stvx v1,r3,r10
stvx v0,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
srdi r6,r5,7
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
li r12,64
li r14,80
li r15,96
li r16,112
mtctr r6
/*
* Now do cacheline sized loads and stores. By this stage the
* cacheline stores are also cacheline aligned.
*/
.align 5
8:
lvx v7,0,r4
lvx v6,r4,r9
lvx v5,r4,r10
lvx v4,r4,r11
lvx v3,r4,r12
lvx v2,r4,r14
lvx v1,r4,r15
lvx v0,r4,r16
addi r4,r4,128
stvx v7,0,r3
stvx v6,r3,r9
stvx v5,r3,r10
stvx v4,r3,r11
stvx v3,r3,r12
stvx v2,r3,r14
stvx v1,r3,r15
stvx v0,r3,r16
addi r3,r3,128
bdnz 8b
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */
clrldi r5,r5,(64-7)
srdi r6,r5,4
mtocrf 0x01,r6
bf cr7*4+1,9f
lvx v3,0,r4
lvx v2,r4,r9
lvx v1,r4,r10
lvx v0,r4,r11
addi r4,r4,64
stvx v3,0,r3
stvx v2,r3,r9
stvx v1,r3,r10
stvx v0,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
lvx v1,0,r4
lvx v0,r4,r9
addi r4,r4,32
stvx v1,0,r3
stvx v0,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
lvx v1,0,r4
addi r4,r4,16
stvx v1,0,r3
addi r3,r3,16
/* Up to 15B to go */
11: clrldi r5,r5,(64-4)
mtocrf 0x01,r5
bf cr7*4+0,12f
ld r0,0(r4)
addi r4,r4,8
std r0,0(r3)
addi r3,r3,8
12: bf cr7*4+1,13f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
13: bf cr7*4+2,14f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
14: bf cr7*4+3,15f
lbz r0,0(r4)
stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
b exit_vmx_ops /* tail call optimise */
.Lvmx_unaligned_copy:
/* Get the destination 16B aligned */
neg r6,r3
mtocrf 0x01,r6
clrldi r6,r6,(64-4)
bf cr7*4+3,1f
lbz r0,0(r4)
addi r4,r4,1
stb r0,0(r3)
addi r3,r3,1
1: bf cr7*4+2,2f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
2: bf cr7*4+1,3f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
3: bf cr7*4+0,4f
lwz r0,0(r4) /* Less chance of a reject with word ops */
lwz r7,4(r4)
addi r4,r4,8
stw r0,0(r3)
stw r7,4(r3)
addi r3,r3,8
4: sub r5,r5,r6
/* Get the desination 128B aligned */
neg r6,r3
srdi r7,r6,4
mtocrf 0x01,r7
clrldi r6,r6,(64-7)
li r9,16
li r10,32
li r11,48
LVS(v16,0,r4) /* Setup permute control vector */
lvx v0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
stvx v8,0,r3
addi r3,r3,16
vor v0,v1,v1
5: bf cr7*4+2,6f
lvx v1,0,r4
VPERM(v8,v0,v1,v16)
lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
stvx v8,0,r3
stvx v9,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
lvx v3,0,r4
VPERM(v8,v0,v3,v16)
lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
lvx v1,r4,r10
VPERM(v10,v2,v1,v16)
lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
stvx v8,0,r3
stvx v9,r3,r9
stvx v10,r3,r10
stvx v11,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
srdi r6,r5,7
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
li r12,64
li r14,80
li r15,96
li r16,112
mtctr r6
/*
* Now do cacheline sized loads and stores. By this stage the
* cacheline stores are also cacheline aligned.
*/
.align 5
8:
lvx v7,0,r4
VPERM(v8,v0,v7,v16)
lvx v6,r4,r9
VPERM(v9,v7,v6,v16)
lvx v5,r4,r10
VPERM(v10,v6,v5,v16)
lvx v4,r4,r11
VPERM(v11,v5,v4,v16)
lvx v3,r4,r12
VPERM(v12,v4,v3,v16)
lvx v2,r4,r14
VPERM(v13,v3,v2,v16)
lvx v1,r4,r15
VPERM(v14,v2,v1,v16)
lvx v0,r4,r16
VPERM(v15,v1,v0,v16)
addi r4,r4,128
stvx v8,0,r3
stvx v9,r3,r9
stvx v10,r3,r10
stvx v11,r3,r11
stvx v12,r3,r12
stvx v13,r3,r14
stvx v14,r3,r15
stvx v15,r3,r16
addi r3,r3,128
bdnz 8b
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */
clrldi r5,r5,(64-7)
srdi r6,r5,4
mtocrf 0x01,r6
bf cr7*4+1,9f
lvx v3,0,r4
VPERM(v8,v0,v3,v16)
lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
lvx v1,r4,r10
VPERM(v10,v2,v1,v16)
lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
stvx v8,0,r3
stvx v9,r3,r9
stvx v10,r3,r10
stvx v11,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
lvx v1,0,r4
VPERM(v8,v0,v1,v16)
lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
stvx v8,0,r3
stvx v9,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
stvx v8,0,r3
addi r3,r3,16
/* Up to 15B to go */
11: clrldi r5,r5,(64-4)
addi r4,r4,-16 /* Unwind the +16 load offset */
mtocrf 0x01,r5
bf cr7*4+0,12f
lwz r0,0(r4) /* Less chance of a reject with word ops */
lwz r6,4(r4)
addi r4,r4,8
stw r0,0(r3)
stw r6,4(r3)
addi r3,r3,8
12: bf cr7*4+1,13f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
13: bf cr7*4+2,14f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
14: bf cr7*4+3,15f
lbz r0,0(r4)
stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
b exit_vmx_ops /* tail call optimise */
#endif /* CONFIG_ALTIVEC */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,422
|
tools/testing/selftests/powerpc/copyloops/memcpy_64.S
|
/*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
#ifndef SELFTEST_CASE
/* For big-endian, 0 == most CPUs, 1 == POWER6, 2 == Cell */
#define SELFTEST_CASE 0
#endif
.align 7
_GLOBAL_TOC(memcpy)
BEGIN_FTR_SECTION
#ifdef __LITTLE_ENDIAN__
cmpdi cr7,r5,0
#else
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* save destination pointer for return value */
#endif
FTR_SECTION_ELSE
#ifdef CONFIG_PPC_BOOK3S_64
b memcpy_power7
#endif
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
#ifdef __LITTLE_ENDIAN__
/* dumb little-endian memcpy that will get replaced at runtime */
addi r9,r3,-1
addi r4,r4,-1
beqlr cr7
mtctr r5
1: lbzu r10,1(r4)
stbu r10,1(r9)
bdnz 1b
blr
#else
PPC_MTOCRF(0x01,r5)
cmpldi cr1,r5,16
neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
andi. r6,r6,7
dcbt 0,r4
blt cr1,.Lshort_copy
/* Below we want to nop out the bne if we're on a CPU that has the
CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
cleared.
At the time of writing the only CPU that has this combination of bits
set is Power6. */
test_feature = (SELFTEST_CASE == 1)
BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
bne .Ldst_unaligned
ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
CPU_FTR_UNALIGNED_LD_STD)
.Ldst_aligned:
addi r3,r3,-16
test_feature = (SELFTEST_CASE == 0)
BEGIN_FTR_SECTION
andi. r0,r4,7
bne .Lsrc_unaligned
END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
srdi r7,r5,4
ld r9,0(r4)
addi r4,r4,-8
mtctr r7
andi. r5,r5,7
bf cr7*4+0,2f
addi r3,r3,8
addi r4,r4,8
mr r8,r9
blt cr1,3f
1: ld r9,8(r4)
std r8,8(r3)
2: ldu r8,16(r4)
stdu r9,16(r3)
bdnz 1b
3: std r8,8(r3)
beq 3f
addi r3,r3,16
.Ldo_tail:
bf cr7*4+1,1f
lwz r9,8(r4)
addi r4,r4,4
stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
lhz r9,8(r4)
addi r4,r4,2
sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
lbz r9,8(r4)
stb r9,0(r3)
3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */
blr
.Lsrc_unaligned:
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
srdi r7,r5,4
sldi r10,r0,3
cmpdi cr6,r6,3
andi. r5,r5,7
mtctr r7
subfic r11,r10,64
add r5,r5,r0
bt cr7*4+0,0f
ld r9,0(r4) # 3+2n loads, 2+2n stores
ld r0,8(r4)
sld r6,r9,r10
ldu r9,16(r4)
srd r7,r0,r11
sld r8,r0,r10
or r7,r7,r6
blt cr6,4f
ld r0,8(r4)
# s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
b 2f
0: ld r0,0(r4) # 4+2n loads, 3+2n stores
ldu r9,8(r4)
sld r8,r0,r10
addi r3,r3,-8
blt cr6,5f
ld r0,8(r4)
srd r12,r9,r11
sld r6,r9,r10
ldu r9,16(r4)
or r12,r8,r12
srd r7,r0,r11
sld r8,r0,r10
addi r3,r3,16
beq cr6,3f
# d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
1: or r7,r7,r6
ld r0,8(r4)
std r12,8(r3)
2: srd r12,r9,r11
sld r6,r9,r10
ldu r9,16(r4)
or r12,r8,r12
stdu r7,16(r3)
srd r7,r0,r11
sld r8,r0,r10
bdnz 1b
3: std r12,8(r3)
or r7,r7,r6
4: std r7,16(r3)
5: srd r12,r9,r11
or r12,r8,r12
std r12,24(r3)
beq 4f
cmpwi cr1,r5,8
addi r3,r3,32
sld r9,r9,r10
ble cr1,6f
ld r0,8(r4)
srd r7,r0,r11
or r9,r7,r9
6:
bf cr7*4+1,1f
rotldi r9,r9,32
stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
rotldi r9,r9,16
sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
rotldi r9,r9,8
stb r9,0(r3)
3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */
blr
.Ldst_unaligned:
PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7
subf r5,r6,r5
li r7,0
cmpldi cr1,r5,16
bf cr7*4+3,1f
lbz r0,0(r4)
stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
lhzx r0,r7,r4
sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
lwzx r0,r7,r4
stwx r0,r7,r3
3: PPC_MTOCRF(0x01,r5)
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
bf cr7*4+0,1f
lwz r0,0(r4)
lwz r9,4(r4)
addi r4,r4,8
stw r0,0(r3)
stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
lbz r0,0(r4)
stb r0,0(r3)
4: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */
blr
#endif
EXPORT_SYMBOL(memcpy)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 12,816
|
tools/testing/selftests/powerpc/copyloops/copyuser_power7.S
|
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2011
*
* Author: Anton Blanchard <anton@au.ibm.com>
*/
#include <asm/ppc_asm.h>
#ifndef SELFTEST_CASE
/* 0 == don't use VMX, 1 == use VMX */
#define SELFTEST_CASE 0
#endif
#ifdef __BIG_ENDIAN__
#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
#else
#define LVS(VRT,RA,RB) lvsr VRT,RA,RB
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC
#endif
.macro err1
100:
EX_TABLE(100b,.Ldo_err1)
.endm
.macro err2
200:
EX_TABLE(200b,.Ldo_err2)
.endm
#ifdef CONFIG_ALTIVEC
.macro err3
300:
EX_TABLE(300b,.Ldo_err3)
.endm
.macro err4
400:
EX_TABLE(400b,.Ldo_err4)
.endm
.Ldo_err4:
ld r16,STK_REG(R16)(r1)
ld r15,STK_REG(R15)(r1)
ld r14,STK_REG(R14)(r1)
.Ldo_err3:
bl exit_vmx_usercopy
ld r0,STACKFRAMESIZE+16(r1)
mtlr r0
b .Lexit
#endif /* CONFIG_ALTIVEC */
.Ldo_err2:
ld r22,STK_REG(R22)(r1)
ld r21,STK_REG(R21)(r1)
ld r20,STK_REG(R20)(r1)
ld r19,STK_REG(R19)(r1)
ld r18,STK_REG(R18)(r1)
ld r17,STK_REG(R17)(r1)
ld r16,STK_REG(R16)(r1)
ld r15,STK_REG(R15)(r1)
ld r14,STK_REG(R14)(r1)
.Lexit:
addi r1,r1,STACKFRAMESIZE
.Ldo_err1:
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
ld r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
ld r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
b __copy_tofrom_user_base
_GLOBAL(__copy_tofrom_user_power7)
cmpldi r5,16
cmpldi cr1,r5,3328
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
blt .Lshort_copy
#ifdef CONFIG_ALTIVEC
test_feature = SELFTEST_CASE
BEGIN_FTR_SECTION
bgt cr1,.Lvmx_copy
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
.Lnonvmx_copy:
/* Get the source 8B aligned */
neg r6,r4
mtocrf 0x01,r6
clrldi r6,r6,(64-3)
bf cr7*4+3,1f
err1; lbz r0,0(r4)
addi r4,r4,1
err1; stb r0,0(r3)
addi r3,r3,1
1: bf cr7*4+2,2f
err1; lhz r0,0(r4)
addi r4,r4,2
err1; sth r0,0(r3)
addi r3,r3,2
2: bf cr7*4+1,3f
err1; lwz r0,0(r4)
addi r4,r4,4
err1; stw r0,0(r3)
addi r3,r3,4
3: sub r5,r5,r6
cmpldi r5,128
blt 5f
mflr r0
stdu r1,-STACKFRAMESIZE(r1)
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
std r17,STK_REG(R17)(r1)
std r18,STK_REG(R18)(r1)
std r19,STK_REG(R19)(r1)
std r20,STK_REG(R20)(r1)
std r21,STK_REG(R21)(r1)
std r22,STK_REG(R22)(r1)
std r0,STACKFRAMESIZE+16(r1)
srdi r6,r5,7
mtctr r6
/* Now do cacheline (128B) sized loads and stores. */
.align 5
4:
err2; ld r0,0(r4)
err2; ld r6,8(r4)
err2; ld r7,16(r4)
err2; ld r8,24(r4)
err2; ld r9,32(r4)
err2; ld r10,40(r4)
err2; ld r11,48(r4)
err2; ld r12,56(r4)
err2; ld r14,64(r4)
err2; ld r15,72(r4)
err2; ld r16,80(r4)
err2; ld r17,88(r4)
err2; ld r18,96(r4)
err2; ld r19,104(r4)
err2; ld r20,112(r4)
err2; ld r21,120(r4)
addi r4,r4,128
err2; std r0,0(r3)
err2; std r6,8(r3)
err2; std r7,16(r3)
err2; std r8,24(r3)
err2; std r9,32(r3)
err2; std r10,40(r3)
err2; std r11,48(r3)
err2; std r12,56(r3)
err2; std r14,64(r3)
err2; std r15,72(r3)
err2; std r16,80(r3)
err2; std r17,88(r3)
err2; std r18,96(r3)
err2; std r19,104(r3)
err2; std r20,112(r3)
err2; std r21,120(r3)
addi r3,r3,128
bdnz 4b
clrldi r5,r5,(64-7)
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
ld r17,STK_REG(R17)(r1)
ld r18,STK_REG(R18)(r1)
ld r19,STK_REG(R19)(r1)
ld r20,STK_REG(R20)(r1)
ld r21,STK_REG(R21)(r1)
ld r22,STK_REG(R22)(r1)
addi r1,r1,STACKFRAMESIZE
/* Up to 127B to go */
5: srdi r6,r5,4
mtocrf 0x01,r6
6: bf cr7*4+1,7f
err1; ld r0,0(r4)
err1; ld r6,8(r4)
err1; ld r7,16(r4)
err1; ld r8,24(r4)
err1; ld r9,32(r4)
err1; ld r10,40(r4)
err1; ld r11,48(r4)
err1; ld r12,56(r4)
addi r4,r4,64
err1; std r0,0(r3)
err1; std r6,8(r3)
err1; std r7,16(r3)
err1; std r8,24(r3)
err1; std r9,32(r3)
err1; std r10,40(r3)
err1; std r11,48(r3)
err1; std r12,56(r3)
addi r3,r3,64
/* Up to 63B to go */
7: bf cr7*4+2,8f
err1; ld r0,0(r4)
err1; ld r6,8(r4)
err1; ld r7,16(r4)
err1; ld r8,24(r4)
addi r4,r4,32
err1; std r0,0(r3)
err1; std r6,8(r3)
err1; std r7,16(r3)
err1; std r8,24(r3)
addi r3,r3,32
/* Up to 31B to go */
8: bf cr7*4+3,9f
err1; ld r0,0(r4)
err1; ld r6,8(r4)
addi r4,r4,16
err1; std r0,0(r3)
err1; std r6,8(r3)
addi r3,r3,16
9: clrldi r5,r5,(64-4)
/* Up to 15B to go */
.Lshort_copy:
mtocrf 0x01,r5
bf cr7*4+0,12f
err1; lwz r0,0(r4) /* Less chance of a reject with word ops */
err1; lwz r6,4(r4)
addi r4,r4,8
err1; stw r0,0(r3)
err1; stw r6,4(r3)
addi r3,r3,8
12: bf cr7*4+1,13f
err1; lwz r0,0(r4)
addi r4,r4,4
err1; stw r0,0(r3)
addi r3,r3,4
13: bf cr7*4+2,14f
err1; lhz r0,0(r4)
addi r4,r4,2
err1; sth r0,0(r3)
addi r3,r3,2
14: bf cr7*4+3,15f
err1; lbz r0,0(r4)
err1; stb r0,0(r3)
15: li r3,0
blr
.Lunwind_stack_nonvmx_copy:
addi r1,r1,STACKFRAMESIZE
b .Lnonvmx_copy
.Lvmx_copy:
#ifdef CONFIG_ALTIVEC
mflr r0
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl enter_vmx_usercopy
cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STK_REG(R31)(r1)
ld r4,STK_REG(R30)(r1)
ld r5,STK_REG(R29)(r1)
mtlr r0
/*
* We prefetch both the source and destination using enhanced touch
* instructions. We use a stream ID of 0 for the load side and
* 1 for the store side.
*/
clrrdi r6,r4,7
clrrdi r9,r3,7
ori r9,r9,1 /* stream=1 */
srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
cmpldi r7,0x3FF
ble 1f
li r7,0x3FF
1: lis r0,0x0E00 /* depth=7 */
sldi r7,r7,7
or r7,r7,r0
ori r10,r7,1 /* stream=1 */
lis r8,0x8000 /* GO=1 */
clrldi r8,r8,32
/* setup read stream 0 */
dcbt 0,r6,0b01000 /* addr from */
dcbt 0,r7,0b01010 /* length and depth from */
/* setup write stream 1 */
dcbtst 0,r9,0b01000 /* addr to */
dcbtst 0,r10,0b01010 /* length and depth to */
eieio
dcbt 0,r8,0b01010 /* all streams GO */
beq cr1,.Lunwind_stack_nonvmx_copy
/*
* If source and destination are not relatively aligned we use a
* slower permute loop.
*/
xor r6,r4,r3
rldicl. r6,r6,0,(64-4)
bne .Lvmx_unaligned_copy
/* Get the destination 16B aligned */
neg r6,r3
mtocrf 0x01,r6
clrldi r6,r6,(64-4)
bf cr7*4+3,1f
err3; lbz r0,0(r4)
addi r4,r4,1
err3; stb r0,0(r3)
addi r3,r3,1
1: bf cr7*4+2,2f
err3; lhz r0,0(r4)
addi r4,r4,2
err3; sth r0,0(r3)
addi r3,r3,2
2: bf cr7*4+1,3f
err3; lwz r0,0(r4)
addi r4,r4,4
err3; stw r0,0(r3)
addi r3,r3,4
3: bf cr7*4+0,4f
err3; ld r0,0(r4)
addi r4,r4,8
err3; std r0,0(r3)
addi r3,r3,8
4: sub r5,r5,r6
/* Get the desination 128B aligned */
neg r6,r3
srdi r7,r6,4
mtocrf 0x01,r7
clrldi r6,r6,(64-7)
li r9,16
li r10,32
li r11,48
bf cr7*4+3,5f
err3; lvx v1,0,r4
addi r4,r4,16
err3; stvx v1,0,r3
addi r3,r3,16
5: bf cr7*4+2,6f
err3; lvx v1,0,r4
err3; lvx v0,r4,r9
addi r4,r4,32
err3; stvx v1,0,r3
err3; stvx v0,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
err3; lvx v3,0,r4
err3; lvx v2,r4,r9
err3; lvx v1,r4,r10
err3; lvx v0,r4,r11
addi r4,r4,64
err3; stvx v3,0,r3
err3; stvx v2,r3,r9
err3; stvx v1,r3,r10
err3; stvx v0,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
srdi r6,r5,7
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
li r12,64
li r14,80
li r15,96
li r16,112
mtctr r6
/*
* Now do cacheline sized loads and stores. By this stage the
* cacheline stores are also cacheline aligned.
*/
.align 5
8:
err4; lvx v7,0,r4
err4; lvx v6,r4,r9
err4; lvx v5,r4,r10
err4; lvx v4,r4,r11
err4; lvx v3,r4,r12
err4; lvx v2,r4,r14
err4; lvx v1,r4,r15
err4; lvx v0,r4,r16
addi r4,r4,128
err4; stvx v7,0,r3
err4; stvx v6,r3,r9
err4; stvx v5,r3,r10
err4; stvx v4,r3,r11
err4; stvx v3,r3,r12
err4; stvx v2,r3,r14
err4; stvx v1,r3,r15
err4; stvx v0,r3,r16
addi r3,r3,128
bdnz 8b
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */
clrldi r5,r5,(64-7)
srdi r6,r5,4
mtocrf 0x01,r6
bf cr7*4+1,9f
err3; lvx v3,0,r4
err3; lvx v2,r4,r9
err3; lvx v1,r4,r10
err3; lvx v0,r4,r11
addi r4,r4,64
err3; stvx v3,0,r3
err3; stvx v2,r3,r9
err3; stvx v1,r3,r10
err3; stvx v0,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
err3; lvx v1,0,r4
err3; lvx v0,r4,r9
addi r4,r4,32
err3; stvx v1,0,r3
err3; stvx v0,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
err3; lvx v1,0,r4
addi r4,r4,16
err3; stvx v1,0,r3
addi r3,r3,16
/* Up to 15B to go */
11: clrldi r5,r5,(64-4)
mtocrf 0x01,r5
bf cr7*4+0,12f
err3; ld r0,0(r4)
addi r4,r4,8
err3; std r0,0(r3)
addi r3,r3,8
12: bf cr7*4+1,13f
err3; lwz r0,0(r4)
addi r4,r4,4
err3; stw r0,0(r3)
addi r3,r3,4
13: bf cr7*4+2,14f
err3; lhz r0,0(r4)
addi r4,r4,2
err3; sth r0,0(r3)
addi r3,r3,2
14: bf cr7*4+3,15f
err3; lbz r0,0(r4)
err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
b exit_vmx_usercopy /* tail call optimise */
.Lvmx_unaligned_copy:
/* Get the destination 16B aligned */
neg r6,r3
mtocrf 0x01,r6
clrldi r6,r6,(64-4)
bf cr7*4+3,1f
err3; lbz r0,0(r4)
addi r4,r4,1
err3; stb r0,0(r3)
addi r3,r3,1
1: bf cr7*4+2,2f
err3; lhz r0,0(r4)
addi r4,r4,2
err3; sth r0,0(r3)
addi r3,r3,2
2: bf cr7*4+1,3f
err3; lwz r0,0(r4)
addi r4,r4,4
err3; stw r0,0(r3)
addi r3,r3,4
3: bf cr7*4+0,4f
err3; lwz r0,0(r4) /* Less chance of a reject with word ops */
err3; lwz r7,4(r4)
addi r4,r4,8
err3; stw r0,0(r3)
err3; stw r7,4(r3)
addi r3,r3,8
4: sub r5,r5,r6
/* Get the desination 128B aligned */
neg r6,r3
srdi r7,r6,4
mtocrf 0x01,r7
clrldi r6,r6,(64-7)
li r9,16
li r10,32
li r11,48
LVS(v16,0,r4) /* Setup permute control vector */
err3; lvx v0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
err3; stvx v8,0,r3
addi r3,r3,16
vor v0,v1,v1
5: bf cr7*4+2,6f
err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
err3; lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
err3; stvx v8,0,r3
err3; stvx v9,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
err3; lvx v3,0,r4
VPERM(v8,v0,v3,v16)
err3; lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
err3; lvx v1,r4,r10
VPERM(v10,v2,v1,v16)
err3; lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
err3; stvx v8,0,r3
err3; stvx v9,r3,r9
err3; stvx v10,r3,r10
err3; stvx v11,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
srdi r6,r5,7
std r14,STK_REG(R14)(r1)
std r15,STK_REG(R15)(r1)
std r16,STK_REG(R16)(r1)
li r12,64
li r14,80
li r15,96
li r16,112
mtctr r6
/*
* Now do cacheline sized loads and stores. By this stage the
* cacheline stores are also cacheline aligned.
*/
.align 5
8:
err4; lvx v7,0,r4
VPERM(v8,v0,v7,v16)
err4; lvx v6,r4,r9
VPERM(v9,v7,v6,v16)
err4; lvx v5,r4,r10
VPERM(v10,v6,v5,v16)
err4; lvx v4,r4,r11
VPERM(v11,v5,v4,v16)
err4; lvx v3,r4,r12
VPERM(v12,v4,v3,v16)
err4; lvx v2,r4,r14
VPERM(v13,v3,v2,v16)
err4; lvx v1,r4,r15
VPERM(v14,v2,v1,v16)
err4; lvx v0,r4,r16
VPERM(v15,v1,v0,v16)
addi r4,r4,128
err4; stvx v8,0,r3
err4; stvx v9,r3,r9
err4; stvx v10,r3,r10
err4; stvx v11,r3,r11
err4; stvx v12,r3,r12
err4; stvx v13,r3,r14
err4; stvx v14,r3,r15
err4; stvx v15,r3,r16
addi r3,r3,128
bdnz 8b
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */
clrldi r5,r5,(64-7)
srdi r6,r5,4
mtocrf 0x01,r6
bf cr7*4+1,9f
err3; lvx v3,0,r4
VPERM(v8,v0,v3,v16)
err3; lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
err3; lvx v1,r4,r10
VPERM(v10,v2,v1,v16)
err3; lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
err3; stvx v8,0,r3
err3; stvx v9,r3,r9
err3; stvx v10,r3,r10
err3; stvx v11,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
err3; lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
err3; stvx v8,0,r3
err3; stvx v9,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
err3; stvx v8,0,r3
addi r3,r3,16
/* Up to 15B to go */
11: clrldi r5,r5,(64-4)
addi r4,r4,-16 /* Unwind the +16 load offset */
mtocrf 0x01,r5
bf cr7*4+0,12f
err3; lwz r0,0(r4) /* Less chance of a reject with word ops */
err3; lwz r6,4(r4)
addi r4,r4,8
err3; stw r0,0(r3)
err3; stw r6,4(r3)
addi r3,r3,8
12: bf cr7*4+1,13f
err3; lwz r0,0(r4)
addi r4,r4,4
err3; stw r0,0(r3)
addi r3,r3,4
13: bf cr7*4+2,14f
err3; lhz r0,0(r4)
addi r4,r4,2
err3; sth r0,0(r3)
addi r3,r3,2
14: bf cr7*4+3,15f
err3; lbz r0,0(r4)
err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
b exit_vmx_usercopy /* tail call optimise */
#endif /* CONFIG_ALTIVEC */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 11,358
|
tools/testing/selftests/powerpc/copyloops/copyuser_64.S
|
/*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
#ifndef SELFTEST_CASE
/* 0 == most CPUs, 1 == POWER6, 2 == Cell */
#define SELFTEST_CASE 0
#endif
#ifdef __BIG_ENDIAN__
#define sLd sld /* Shift towards low-numbered address. */
#define sHd srd /* Shift towards high-numbered address. */
#else
#define sLd srd /* Shift towards low-numbered address. */
#define sHd sld /* Shift towards high-numbered address. */
#endif
/*
* These macros are used to generate exception table entries.
* The exception handlers below use the original arguments
* (stored on the stack) and the point where we're up to in
* the destination buffer, i.e. the address of the first
* unmodified byte. Generally r3 points into the destination
* buffer, but the first unmodified byte is at a variable
* offset from r3. In the code below, the symbol r3_offset
* is set to indicate the current offset at each point in
* the code. This offset is then used as a negative offset
* from the exception handler code, and those instructions
* before the exception handlers are addi instructions that
* adjust r3 to point to the correct place.
*/
.macro lex /* exception handler for load */
100: EX_TABLE(100b, .Lld_exc - r3_offset)
.endm
.macro stex /* exception handler for store */
100: EX_TABLE(100b, .Lst_exc - r3_offset)
.endm
.align 7
_GLOBAL_TOC(__copy_tofrom_user)
#ifdef CONFIG_PPC_BOOK3S_64
BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
b __copy_tofrom_user_power7
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
#endif
_GLOBAL(__copy_tofrom_user_base)
/* first check for a 4kB copy on a 4kB boundary */
cmpldi cr1,r5,16
cmpdi cr6,r5,4096
or r0,r3,r4
neg r6,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
andi. r0,r0,4095
std r3,-24(r1)
crand cr0*4+2,cr0*4+2,cr6*4+2
std r4,-16(r1)
std r5,-8(r1)
dcbt 0,r4
beq .Lcopy_page_4K
andi. r6,r6,7
PPC_MTOCRF(0x01,r5)
blt cr1,.Lshort_copy
/* Below we want to nop out the bne if we're on a CPU that has the
* CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
* cleared.
* At the time of writing the only CPU that has this combination of bits
* set is Power6.
*/
test_feature = (SELFTEST_CASE == 1)
BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
bne .Ldst_unaligned
ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
CPU_FTR_UNALIGNED_LD_STD)
.Ldst_aligned:
addi r3,r3,-16
r3_offset = 16
test_feature = (SELFTEST_CASE == 0)
BEGIN_FTR_SECTION
andi. r0,r4,7
bne .Lsrc_unaligned
END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blt cr1,.Ldo_tail /* if < 16 bytes to copy */
srdi r0,r5,5
cmpdi cr1,r0,0
lex; ld r7,0(r4)
lex; ld r6,8(r4)
addi r4,r4,16
mtctr r0
andi. r0,r5,0x10
beq 22f
addi r3,r3,16
r3_offset = 0
addi r4,r4,-16
mr r9,r7
mr r8,r6
beq cr1,72f
21:
lex; ld r7,16(r4)
lex; ld r6,24(r4)
addi r4,r4,32
stex; std r9,0(r3)
r3_offset = 8
stex; std r8,8(r3)
r3_offset = 16
22:
lex; ld r9,0(r4)
lex; ld r8,8(r4)
stex; std r7,16(r3)
r3_offset = 24
stex; std r6,24(r3)
addi r3,r3,32
r3_offset = 0
bdnz 21b
72:
stex; std r9,0(r3)
r3_offset = 8
stex; std r8,8(r3)
r3_offset = 16
andi. r5,r5,0xf
beq+ 3f
addi r4,r4,16
.Ldo_tail:
addi r3,r3,16
r3_offset = 0
bf cr7*4+0,246f
lex; ld r9,0(r4)
addi r4,r4,8
stex; std r9,0(r3)
addi r3,r3,8
246: bf cr7*4+1,1f
lex; lwz r9,0(r4)
addi r4,r4,4
stex; stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
lex; lhz r9,0(r4)
addi r4,r4,2
stex; sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
lex; lbz r9,0(r4)
stex; stb r9,0(r3)
3: li r3,0
blr
.Lsrc_unaligned:
r3_offset = 16
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
srdi r7,r5,4
sldi r10,r0,3
cmpldi cr6,r6,3
andi. r5,r5,7
mtctr r7
subfic r11,r10,64
add r5,r5,r0
bt cr7*4+0,28f
lex; ld r9,0(r4) /* 3+2n loads, 2+2n stores */
lex; ld r0,8(r4)
sLd r6,r9,r10
lex; ldu r9,16(r4)
sHd r7,r0,r11
sLd r8,r0,r10
or r7,r7,r6
blt cr6,79f
lex; ld r0,8(r4)
b 2f
28:
lex; ld r0,0(r4) /* 4+2n loads, 3+2n stores */
lex; ldu r9,8(r4)
sLd r8,r0,r10
addi r3,r3,-8
r3_offset = 24
blt cr6,5f
lex; ld r0,8(r4)
sHd r12,r9,r11
sLd r6,r9,r10
lex; ldu r9,16(r4)
or r12,r8,r12
sHd r7,r0,r11
sLd r8,r0,r10
addi r3,r3,16
r3_offset = 8
beq cr6,78f
1: or r7,r7,r6
lex; ld r0,8(r4)
stex; std r12,8(r3)
r3_offset = 16
2: sHd r12,r9,r11
sLd r6,r9,r10
lex; ldu r9,16(r4)
or r12,r8,r12
stex; stdu r7,16(r3)
r3_offset = 8
sHd r7,r0,r11
sLd r8,r0,r10
bdnz 1b
78:
stex; std r12,8(r3)
r3_offset = 16
or r7,r7,r6
79:
stex; std r7,16(r3)
r3_offset = 24
5: sHd r12,r9,r11
or r12,r8,r12
stex; std r12,24(r3)
r3_offset = 32
bne 6f
li r3,0
blr
6: cmpwi cr1,r5,8
addi r3,r3,32
r3_offset = 0
sLd r9,r9,r10
ble cr1,7f
lex; ld r0,8(r4)
sHd r7,r0,r11
or r9,r7,r9
7:
bf cr7*4+1,1f
#ifdef __BIG_ENDIAN__
rotldi r9,r9,32
#endif
stex; stw r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,32
#endif
addi r3,r3,4
1: bf cr7*4+2,2f
#ifdef __BIG_ENDIAN__
rotldi r9,r9,16
#endif
stex; sth r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,16
#endif
addi r3,r3,2
2: bf cr7*4+3,3f
#ifdef __BIG_ENDIAN__
rotldi r9,r9,8
#endif
stex; stb r9,0(r3)
#ifdef __LITTLE_ENDIAN__
rotrdi r9,r9,8
#endif
3: li r3,0
blr
.Ldst_unaligned:
r3_offset = 0
PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */
subf r5,r6,r5
li r7,0
cmpldi cr1,r5,16
bf cr7*4+3,1f
100: EX_TABLE(100b, .Lld_exc_r7)
lbz r0,0(r4)
100: EX_TABLE(100b, .Lst_exc_r7)
stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
100: EX_TABLE(100b, .Lld_exc_r7)
lhzx r0,r7,r4
100: EX_TABLE(100b, .Lst_exc_r7)
sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
100: EX_TABLE(100b, .Lld_exc_r7)
lwzx r0,r7,r4
100: EX_TABLE(100b, .Lst_exc_r7)
stwx r0,r7,r3
3: PPC_MTOCRF(0x01,r5)
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
r3_offset = 0
bf cr7*4+0,1f
lex; lwz r0,0(r4)
lex; lwz r9,4(r4)
addi r4,r4,8
stex; stw r0,0(r3)
stex; stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
lex; lwz r0,0(r4)
addi r4,r4,4
stex; stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
lex; lhz r0,0(r4)
addi r4,r4,2
stex; sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
lex; lbz r0,0(r4)
stex; stb r0,0(r3)
4: li r3,0
blr
/*
* exception handlers follow
* we have to return the number of bytes not copied
* for an exception on a load, we set the rest of the destination to 0
* Note that the number of bytes of instructions for adjusting r3 needs
* to equal the amount of the adjustment, due to the trick of using
* .Lld_exc - r3_offset as the handler address.
*/
.Lld_exc_r7:
add r3,r3,r7
b .Lld_exc
/* adjust by 24 */
addi r3,r3,8
nop
/* adjust by 16 */
addi r3,r3,8
nop
/* adjust by 8 */
addi r3,r3,8
nop
/*
* Here we have had a fault on a load and r3 points to the first
* unmodified byte of the destination. We use the original arguments
* and r3 to work out how much wasn't copied. Since we load some
* distance ahead of the stores, we continue copying byte-by-byte until
* we hit the load fault again in order to copy as much as possible.
*/
.Lld_exc:
ld r6,-24(r1)
ld r4,-16(r1)
ld r5,-8(r1)
subf r6,r6,r3
add r4,r4,r6
subf r5,r6,r5 /* #bytes left to go */
/*
* first see if we can copy any more bytes before hitting another exception
*/
mtctr r5
r3_offset = 0
100: EX_TABLE(100b, .Ldone)
43: lbz r0,0(r4)
addi r4,r4,1
stex; stb r0,0(r3)
addi r3,r3,1
bdnz 43b
li r3,0 /* huh? all copied successfully this time? */
blr
/*
* here we have trapped again, amount remaining is in ctr.
*/
.Ldone:
mfctr r3
blr
/*
* exception handlers for stores: we need to work out how many bytes
* weren't copied, and we may need to copy some more.
* Note that the number of bytes of instructions for adjusting r3 needs
* to equal the amount of the adjustment, due to the trick of using
* .Lst_exc - r3_offset as the handler address.
*/
.Lst_exc_r7:
add r3,r3,r7
b .Lst_exc
/* adjust by 24 */
addi r3,r3,8
nop
/* adjust by 16 */
addi r3,r3,8
nop
/* adjust by 8 */
addi r3,r3,4
/* adjust by 4 */
addi r3,r3,4
.Lst_exc:
ld r6,-24(r1) /* original destination pointer */
ld r4,-16(r1) /* original source pointer */
ld r5,-8(r1) /* original number of bytes */
add r7,r6,r5
/*
* If the destination pointer isn't 8-byte aligned,
* we may have got the exception as a result of a
* store that overlapped a page boundary, so we may be
* able to copy a few more bytes.
*/
17: andi. r0,r3,7
beq 19f
subf r8,r6,r3 /* #bytes copied */
100: EX_TABLE(100b,19f)
lbzx r0,r8,r4
100: EX_TABLE(100b,19f)
stb r0,0(r3)
addi r3,r3,1
cmpld r3,r7
blt 17b
19: subf r3,r3,r7 /* #bytes not copied in r3 */
blr
/*
* Routine to copy a whole page of data, optimized for POWER4.
* On POWER4 it is more than 50% faster than the simple loop
* above (following the .Ldst_aligned label).
*/
.macro exc
100: EX_TABLE(100b, .Labort)
.endm
.Lcopy_page_4K:
std r31,-32(1)
std r30,-40(1)
std r29,-48(1)
std r28,-56(1)
std r27,-64(1)
std r26,-72(1)
std r25,-80(1)
std r24,-88(1)
std r23,-96(1)
std r22,-104(1)
std r21,-112(1)
std r20,-120(1)
li r5,4096/32 - 1
addi r3,r3,-8
li r0,5
0: addi r5,r5,-24
mtctr r0
exc; ld r22,640(4)
exc; ld r21,512(4)
exc; ld r20,384(4)
exc; ld r11,256(4)
exc; ld r9,128(4)
exc; ld r7,0(4)
exc; ld r25,648(4)
exc; ld r24,520(4)
exc; ld r23,392(4)
exc; ld r10,264(4)
exc; ld r8,136(4)
exc; ldu r6,8(4)
cmpwi r5,24
1:
exc; std r22,648(3)
exc; std r21,520(3)
exc; std r20,392(3)
exc; std r11,264(3)
exc; std r9,136(3)
exc; std r7,8(3)
exc; ld r28,648(4)
exc; ld r27,520(4)
exc; ld r26,392(4)
exc; ld r31,264(4)
exc; ld r30,136(4)
exc; ld r29,8(4)
exc; std r25,656(3)
exc; std r24,528(3)
exc; std r23,400(3)
exc; std r10,272(3)
exc; std r8,144(3)
exc; std r6,16(3)
exc; ld r22,656(4)
exc; ld r21,528(4)
exc; ld r20,400(4)
exc; ld r11,272(4)
exc; ld r9,144(4)
exc; ld r7,16(4)
exc; std r28,664(3)
exc; std r27,536(3)
exc; std r26,408(3)
exc; std r31,280(3)
exc; std r30,152(3)
exc; stdu r29,24(3)
exc; ld r25,664(4)
exc; ld r24,536(4)
exc; ld r23,408(4)
exc; ld r10,280(4)
exc; ld r8,152(4)
exc; ldu r6,24(4)
bdnz 1b
exc; std r22,648(3)
exc; std r21,520(3)
exc; std r20,392(3)
exc; std r11,264(3)
exc; std r9,136(3)
exc; std r7,8(3)
addi r4,r4,640
addi r3,r3,648
bge 0b
mtctr r5
exc; ld r7,0(4)
exc; ld r8,8(4)
exc; ldu r9,16(4)
3:
exc; ld r10,8(4)
exc; std r7,8(3)
exc; ld r7,16(4)
exc; std r8,16(3)
exc; ld r8,24(4)
exc; std r9,24(3)
exc; ldu r9,32(4)
exc; stdu r10,32(3)
bdnz 3b
4:
exc; ld r10,8(4)
exc; std r7,8(3)
exc; std r8,16(3)
exc; std r9,24(3)
exc; std r10,32(3)
9: ld r20,-120(1)
ld r21,-112(1)
ld r22,-104(1)
ld r23,-96(1)
ld r24,-88(1)
ld r25,-80(1)
ld r26,-72(1)
ld r27,-64(1)
ld r28,-56(1)
ld r29,-48(1)
ld r30,-40(1)
ld r31,-32(1)
li r3,0
blr
/*
* on an exception, reset to the beginning and jump back into the
* standard __copy_tofrom_user
*/
.Labort:
ld r20,-120(1)
ld r21,-112(1)
ld r22,-104(1)
ld r23,-96(1)
ld r24,-88(1)
ld r25,-80(1)
ld r26,-72(1)
ld r27,-64(1)
ld r28,-56(1)
ld r29,-48(1)
ld r30,-40(1)
ld r31,-32(1)
ld r3,-24(r1)
ld r4,-16(r1)
li r5,4096
b .Ldst_aligned
EXPORT_SYMBOL(__copy_tofrom_user)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,691
|
tools/testing/selftests/powerpc/stringloops/strlen_32.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* strlen() for PPC32
*
* Copyright (C) 2018 Christophe Leroy CS Systemes d'Information.
*
* Inspired from glibc implementation
*/
#include <asm/ppc_asm.h>
#include <asm/export.h>
#include <asm/cache.h>
.text
/*
* Algorithm:
*
* 1) Given a word 'x', we can test to see if it contains any 0 bytes
* by subtracting 0x01010101, and seeing if any of the high bits of each
* byte changed from 0 to 1. This works because the least significant
* 0 byte must have had no incoming carry (otherwise it's not the least
* significant), so it is 0x00 - 0x01 == 0xff. For all other
* byte values, either they have the high bit set initially, or when
* 1 is subtracted you get a value in the range 0x00-0x7f, none of which
* have their high bit set. The expression here is
* (x - 0x01010101) & ~x & 0x80808080), which gives 0x00000000 when
* there were no 0x00 bytes in the word. You get 0x80 in bytes that
* match, but possibly false 0x80 matches in the next more significant
* byte to a true match due to carries. For little-endian this is
* of no consequence since the least significant match is the one
* we're interested in, but big-endian needs method 2 to find which
* byte matches.
* 2) Given a word 'x', we can test to see _which_ byte was zero by
* calculating ~(((x & ~0x80808080) - 0x80808080 - 1) | x | ~0x80808080).
* This produces 0x80 in each byte that was zero, and 0x00 in all
* the other bytes. The '| ~0x80808080' clears the low 7 bits in each
* byte, and the '| x' part ensures that bytes with the high bit set
* produce 0x00. The addition will carry into the high bit of each byte
* iff that byte had one of its low 7 bits set. We can then just see
* which was the most significant bit set and divide by 8 to find how
* many to add to the index.
* This is from the book 'The PowerPC Compiler Writer's Guide',
* by Steve Hoxey, Faraydon Karim, Bill Hay and Hank Warren.
*/
_GLOBAL(strlen)
andi. r0, r3, 3
lis r7, 0x0101
addi r10, r3, -4
addic r7, r7, 0x0101 /* r7 = 0x01010101 (lomagic) & clear XER[CA] */
rotlwi r6, r7, 31 /* r6 = 0x80808080 (himagic) */
bne- 3f
.balign IFETCH_ALIGN_BYTES
1: lwzu r9, 4(r10)
2: subf r8, r7, r9
and. r8, r8, r6
beq+ 1b
andc. r8, r8, r9
beq+ 1b
andc r8, r9, r6
orc r9, r9, r6
subfe r8, r6, r8
nor r8, r8, r9
cntlzw r8, r8
subf r3, r3, r10
srwi r8, r8, 3
add r3, r3, r8
blr
/* Missaligned string: make sure bytes before string are seen not 0 */
3: xor r10, r10, r0
orc r8, r8, r8
lwzu r9, 4(r10)
slwi r0, r0, 3
srw r8, r8, r0
orc r9, r9, r8
b 2b
EXPORT_SYMBOL(strlen)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 11,899
|
tools/testing/selftests/powerpc/stringloops/memcmp_64.S
|
/*
* Author: Anton Blanchard <anton@au.ibm.com>
* Copyright 2015 IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
#include <asm/export.h>
#include <asm/ppc-opcode.h>
#define off8 r6
#define off16 r7
#define off24 r8
#define rA r9
#define rB r10
#define rC r11
#define rD r27
#define rE r28
#define rF r29
#define rG r30
#define rH r31
#ifdef __LITTLE_ENDIAN__
#define LH lhbrx
#define LW lwbrx
#define LD ldbrx
#define LVS lvsr
#define VPERM(_VRT,_VRA,_VRB,_VRC) \
vperm _VRT,_VRB,_VRA,_VRC
#else
#define LH lhzx
#define LW lwzx
#define LD ldx
#define LVS lvsl
#define VPERM(_VRT,_VRA,_VRB,_VRC) \
vperm _VRT,_VRA,_VRB,_VRC
#endif
#define VMX_THRESH 4096
#define ENTER_VMX_OPS \
mflr r0; \
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
std r0,16(r1); \
stdu r1,-STACKFRAMESIZE(r1); \
bl enter_vmx_ops; \
cmpwi cr1,r3,0; \
ld r0,STACKFRAMESIZE+16(r1); \
ld r3,STK_REG(R31)(r1); \
ld r4,STK_REG(R30)(r1); \
ld r5,STK_REG(R29)(r1); \
addi r1,r1,STACKFRAMESIZE; \
mtlr r0
#define EXIT_VMX_OPS \
mflr r0; \
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
std r0,16(r1); \
stdu r1,-STACKFRAMESIZE(r1); \
bl exit_vmx_ops; \
ld r0,STACKFRAMESIZE+16(r1); \
ld r3,STK_REG(R31)(r1); \
ld r4,STK_REG(R30)(r1); \
ld r5,STK_REG(R29)(r1); \
addi r1,r1,STACKFRAMESIZE; \
mtlr r0
/*
* LD_VSR_CROSS16B load the 2nd 16 bytes for _vaddr which is unaligned with
* 16 bytes boundary and permute the result with the 1st 16 bytes.
* | y y y y y y y y y y y y y 0 1 2 | 3 4 5 6 7 8 9 a b c d e f z z z |
* ^ ^ ^
* 0xbbbb10 0xbbbb20 0xbbb30
* ^
* _vaddr
*
*
* _vmask is the mask generated by LVS
* _v1st_qw is the 1st aligned QW of current addr which is already loaded.
* for example: 0xyyyyyyyyyyyyy012 for big endian
* _v2nd_qw is the 2nd aligned QW of cur _vaddr to be loaded.
* for example: 0x3456789abcdefzzz for big endian
* The permute result is saved in _v_res.
* for example: 0x0123456789abcdef for big endian.
*/
#define LD_VSR_CROSS16B(_vaddr,_vmask,_v1st_qw,_v2nd_qw,_v_res) \
lvx _v2nd_qw,_vaddr,off16; \
VPERM(_v_res,_v1st_qw,_v2nd_qw,_vmask)
/*
* There are 2 categories for memcmp:
* 1) src/dst has the same offset to the 8 bytes boundary. The handlers
* are named like .Lsameoffset_xxxx
* 2) src/dst has different offset to the 8 bytes boundary. The handlers
* are named like .Ldiffoffset_xxxx
*/
_GLOBAL_TOC(memcmp)
cmpdi cr1,r5,0
/* Use the short loop if the src/dst addresses are not
* with the same offset of 8 bytes align boundary.
*/
xor r6,r3,r4
andi. r6,r6,7
/* Fall back to short loop if compare at aligned addrs
* with less than 8 bytes.
*/
cmpdi cr6,r5,7
beq cr1,.Lzero
bgt cr6,.Lno_short
.Lshort:
mtctr r5
1: lbz rA,0(r3)
lbz rB,0(r4)
subf. rC,rB,rA
bne .Lnon_zero
bdz .Lzero
lbz rA,1(r3)
lbz rB,1(r4)
subf. rC,rB,rA
bne .Lnon_zero
bdz .Lzero
lbz rA,2(r3)
lbz rB,2(r4)
subf. rC,rB,rA
bne .Lnon_zero
bdz .Lzero
lbz rA,3(r3)
lbz rB,3(r4)
subf. rC,rB,rA
bne .Lnon_zero
addi r3,r3,4
addi r4,r4,4
bdnz 1b
.Lzero:
li r3,0
blr
.Lno_short:
dcbt 0,r3
dcbt 0,r4
bne .Ldiffoffset_8bytes_make_align_start
.Lsameoffset_8bytes_make_align_start:
/* attempt to compare bytes not aligned with 8 bytes so that
* rest comparison can run based on 8 bytes alignment.
*/
andi. r6,r3,7
/* Try to compare the first double word which is not 8 bytes aligned:
* load the first double word at (src & ~7UL) and shift left appropriate
* bits before comparision.
*/
rlwinm r6,r3,3,26,28
beq .Lsameoffset_8bytes_aligned
clrrdi r3,r3,3
clrrdi r4,r4,3
LD rA,0,r3
LD rB,0,r4
sld rA,rA,r6
sld rB,rB,r6
cmpld cr0,rA,rB
srwi r6,r6,3
bne cr0,.LcmpAB_lightweight
subfic r6,r6,8
subf. r5,r6,r5
addi r3,r3,8
addi r4,r4,8
beq .Lzero
.Lsameoffset_8bytes_aligned:
/* now we are aligned with 8 bytes.
* Use .Llong loop if left cmp bytes are equal or greater than 32B.
*/
cmpdi cr6,r5,31
bgt cr6,.Llong
.Lcmp_lt32bytes:
/* compare 1 ~ 31 bytes, at least r3 addr is 8 bytes aligned now */
cmpdi cr5,r5,7
srdi r0,r5,3
ble cr5,.Lcmp_rest_lt8bytes
/* handle 8 ~ 31 bytes */
clrldi r5,r5,61
mtctr r0
2:
LD rA,0,r3
LD rB,0,r4
cmpld cr0,rA,rB
addi r3,r3,8
addi r4,r4,8
bne cr0,.LcmpAB_lightweight
bdnz 2b
cmpwi r5,0
beq .Lzero
.Lcmp_rest_lt8bytes:
/*
* Here we have less than 8 bytes to compare. At least s1 is aligned to
* 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
* page boundary, otherwise we might read past the end of the buffer and
* trigger a page fault. We use 4K as the conservative minimum page
* size. If we detect that case we go to the byte-by-byte loop.
*
* Otherwise the next double word is loaded from s1 and s2, and shifted
* right to compare the appropriate bits.
*/
clrldi r6,r4,(64-12) // r6 = r4 & 0xfff
cmpdi r6,0xff8
bgt .Lshort
subfic r6,r5,8
slwi r6,r6,3
LD rA,0,r3
LD rB,0,r4
srd rA,rA,r6
srd rB,rB,r6
cmpld cr0,rA,rB
bne cr0,.LcmpAB_lightweight
b .Lzero
.Lnon_zero:
mr r3,rC
blr
.Llong:
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
/* Try to use vmx loop if length is equal or greater than 4K */
cmpldi cr6,r5,VMX_THRESH
bge cr6,.Lsameoffset_vmx_cmp
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
.Llong_novmx_cmp:
#endif
/* At least s1 addr is aligned with 8 bytes */
li off8,8
li off16,16
li off24,24
std r31,-8(r1)
std r30,-16(r1)
std r29,-24(r1)
std r28,-32(r1)
std r27,-40(r1)
srdi r0,r5,5
mtctr r0
andi. r5,r5,31
LD rA,0,r3
LD rB,0,r4
LD rC,off8,r3
LD rD,off8,r4
LD rE,off16,r3
LD rF,off16,r4
LD rG,off24,r3
LD rH,off24,r4
cmpld cr0,rA,rB
addi r3,r3,32
addi r4,r4,32
bdz .Lfirst32
LD rA,0,r3
LD rB,0,r4
cmpld cr1,rC,rD
LD rC,off8,r3
LD rD,off8,r4
cmpld cr6,rE,rF
LD rE,off16,r3
LD rF,off16,r4
cmpld cr7,rG,rH
bne cr0,.LcmpAB
LD rG,off24,r3
LD rH,off24,r4
cmpld cr0,rA,rB
bne cr1,.LcmpCD
addi r3,r3,32
addi r4,r4,32
bdz .Lsecond32
.balign 16
1: LD rA,0,r3
LD rB,0,r4
cmpld cr1,rC,rD
bne cr6,.LcmpEF
LD rC,off8,r3
LD rD,off8,r4
cmpld cr6,rE,rF
bne cr7,.LcmpGH
LD rE,off16,r3
LD rF,off16,r4
cmpld cr7,rG,rH
bne cr0,.LcmpAB
LD rG,off24,r3
LD rH,off24,r4
cmpld cr0,rA,rB
bne cr1,.LcmpCD
addi r3,r3,32
addi r4,r4,32
bdnz 1b
.Lsecond32:
cmpld cr1,rC,rD
bne cr6,.LcmpEF
cmpld cr6,rE,rF
bne cr7,.LcmpGH
cmpld cr7,rG,rH
bne cr0,.LcmpAB
bne cr1,.LcmpCD
bne cr6,.LcmpEF
bne cr7,.LcmpGH
.Ltail:
ld r31,-8(r1)
ld r30,-16(r1)
ld r29,-24(r1)
ld r28,-32(r1)
ld r27,-40(r1)
cmpdi r5,0
beq .Lzero
b .Lshort
.Lfirst32:
cmpld cr1,rC,rD
cmpld cr6,rE,rF
cmpld cr7,rG,rH
bne cr0,.LcmpAB
bne cr1,.LcmpCD
bne cr6,.LcmpEF
bne cr7,.LcmpGH
b .Ltail
.LcmpAB:
li r3,1
bgt cr0,.Lout
li r3,-1
b .Lout
.LcmpCD:
li r3,1
bgt cr1,.Lout
li r3,-1
b .Lout
.LcmpEF:
li r3,1
bgt cr6,.Lout
li r3,-1
b .Lout
.LcmpGH:
li r3,1
bgt cr7,.Lout
li r3,-1
.Lout:
ld r31,-8(r1)
ld r30,-16(r1)
ld r29,-24(r1)
ld r28,-32(r1)
ld r27,-40(r1)
blr
.LcmpAB_lightweight: /* skip NV GPRS restore */
li r3,1
bgtlr
li r3,-1
blr
#ifdef CONFIG_ALTIVEC
.Lsameoffset_vmx_cmp:
/* Enter with src/dst addrs has the same offset with 8 bytes
* align boundary.
*
* There is an optimization based on following fact: memcmp()
* prones to fail early at the first 32 bytes.
* Before applying VMX instructions which will lead to 32x128bits
* VMX regs load/restore penalty, we compare the first 32 bytes
* so that we can catch the ~80% fail cases.
*/
li r0,4
mtctr r0
.Lsameoffset_prechk_32B_loop:
LD rA,0,r3
LD rB,0,r4
cmpld cr0,rA,rB
addi r3,r3,8
addi r4,r4,8
bne cr0,.LcmpAB_lightweight
addi r5,r5,-8
bdnz .Lsameoffset_prechk_32B_loop
ENTER_VMX_OPS
beq cr1,.Llong_novmx_cmp
3:
/* need to check whether r4 has the same offset with r3
* for 16 bytes boundary.
*/
xor r0,r3,r4
andi. r0,r0,0xf
bne .Ldiffoffset_vmx_cmp_start
/* len is no less than 4KB. Need to align with 16 bytes further.
*/
andi. rA,r3,8
LD rA,0,r3
beq 4f
LD rB,0,r4
cmpld cr0,rA,rB
addi r3,r3,8
addi r4,r4,8
addi r5,r5,-8
beq cr0,4f
/* save and restore cr0 */
mfocrf r5,128
EXIT_VMX_OPS
mtocrf 128,r5
b .LcmpAB_lightweight
4:
/* compare 32 bytes for each loop */
srdi r0,r5,5
mtctr r0
clrldi r5,r5,59
li off16,16
.balign 16
5:
lvx v0,0,r3
lvx v1,0,r4
VCMPEQUD_RC(v0,v0,v1)
bnl cr6,7f
lvx v0,off16,r3
lvx v1,off16,r4
VCMPEQUD_RC(v0,v0,v1)
bnl cr6,6f
addi r3,r3,32
addi r4,r4,32
bdnz 5b
EXIT_VMX_OPS
cmpdi r5,0
beq .Lzero
b .Lcmp_lt32bytes
6:
addi r3,r3,16
addi r4,r4,16
7:
/* diff the last 16 bytes */
EXIT_VMX_OPS
LD rA,0,r3
LD rB,0,r4
cmpld cr0,rA,rB
li off8,8
bne cr0,.LcmpAB_lightweight
LD rA,off8,r3
LD rB,off8,r4
cmpld cr0,rA,rB
bne cr0,.LcmpAB_lightweight
b .Lzero
#endif
.Ldiffoffset_8bytes_make_align_start:
/* now try to align s1 with 8 bytes */
rlwinm r6,r3,3,26,28
beq .Ldiffoffset_align_s1_8bytes
clrrdi r3,r3,3
LD rA,0,r3
LD rB,0,r4 /* unaligned load */
sld rA,rA,r6
srd rA,rA,r6
srd rB,rB,r6
cmpld cr0,rA,rB
srwi r6,r6,3
bne cr0,.LcmpAB_lightweight
subfic r6,r6,8
subf. r5,r6,r5
addi r3,r3,8
add r4,r4,r6
beq .Lzero
.Ldiffoffset_align_s1_8bytes:
/* now s1 is aligned with 8 bytes. */
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
/* only do vmx ops when the size equal or greater than 4K bytes */
cmpdi cr5,r5,VMX_THRESH
bge cr5,.Ldiffoffset_vmx_cmp
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
.Ldiffoffset_novmx_cmp:
#endif
cmpdi cr5,r5,31
ble cr5,.Lcmp_lt32bytes
#ifdef CONFIG_ALTIVEC
b .Llong_novmx_cmp
#else
b .Llong
#endif
#ifdef CONFIG_ALTIVEC
.Ldiffoffset_vmx_cmp:
/* perform a 32 bytes pre-checking before
* enable VMX operations.
*/
li r0,4
mtctr r0
.Ldiffoffset_prechk_32B_loop:
LD rA,0,r3
LD rB,0,r4
cmpld cr0,rA,rB
addi r3,r3,8
addi r4,r4,8
bne cr0,.LcmpAB_lightweight
addi r5,r5,-8
bdnz .Ldiffoffset_prechk_32B_loop
ENTER_VMX_OPS
beq cr1,.Ldiffoffset_novmx_cmp
.Ldiffoffset_vmx_cmp_start:
/* Firstly try to align r3 with 16 bytes */
andi. r6,r3,0xf
li off16,16
beq .Ldiffoffset_vmx_s1_16bytes_align
LVS v3,0,r3
LVS v4,0,r4
lvx v5,0,r3
lvx v6,0,r4
LD_VSR_CROSS16B(r3,v3,v5,v7,v9)
LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
VCMPEQUB_RC(v7,v9,v10)
bnl cr6,.Ldiffoffset_vmx_diff_found
subfic r6,r6,16
subf r5,r6,r5
add r3,r3,r6
add r4,r4,r6
.Ldiffoffset_vmx_s1_16bytes_align:
/* now s1 is aligned with 16 bytes */
lvx v6,0,r4
LVS v4,0,r4
srdi r6,r5,5 /* loop for 32 bytes each */
clrldi r5,r5,59
mtctr r6
.balign 16
.Ldiffoffset_vmx_32bytesloop:
/* the first qw of r4 was saved in v6 */
lvx v9,0,r3
LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
VCMPEQUB_RC(v7,v9,v10)
vor v6,v8,v8
bnl cr6,.Ldiffoffset_vmx_diff_found
addi r3,r3,16
addi r4,r4,16
lvx v9,0,r3
LD_VSR_CROSS16B(r4,v4,v6,v8,v10)
VCMPEQUB_RC(v7,v9,v10)
vor v6,v8,v8
bnl cr6,.Ldiffoffset_vmx_diff_found
addi r3,r3,16
addi r4,r4,16
bdnz .Ldiffoffset_vmx_32bytesloop
EXIT_VMX_OPS
cmpdi r5,0
beq .Lzero
b .Lcmp_lt32bytes
.Ldiffoffset_vmx_diff_found:
EXIT_VMX_OPS
/* anyway, the diff will appear in next 16 bytes */
li r5,16
b .Lcmp_lt32bytes
#endif
EXPORT_SYMBOL(memcmp)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,216
|
tools/testing/selftests/powerpc/lib/reg.S
|
/*
* test helper assembly functions
*
* Copyright (C) 2016 Simon Guo, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <ppc-asm.h>
#include "reg.h"
/* Non volatile GPR - unsigned long buf[18] */
FUNC_START(load_gpr)
ld 14, 0*8(3)
ld 15, 1*8(3)
ld 16, 2*8(3)
ld 17, 3*8(3)
ld 18, 4*8(3)
ld 19, 5*8(3)
ld 20, 6*8(3)
ld 21, 7*8(3)
ld 22, 8*8(3)
ld 23, 9*8(3)
ld 24, 10*8(3)
ld 25, 11*8(3)
ld 26, 12*8(3)
ld 27, 13*8(3)
ld 28, 14*8(3)
ld 29, 15*8(3)
ld 30, 16*8(3)
ld 31, 17*8(3)
blr
FUNC_END(load_gpr)
FUNC_START(store_gpr)
std 14, 0*8(3)
std 15, 1*8(3)
std 16, 2*8(3)
std 17, 3*8(3)
std 18, 4*8(3)
std 19, 5*8(3)
std 20, 6*8(3)
std 21, 7*8(3)
std 22, 8*8(3)
std 23, 9*8(3)
std 24, 10*8(3)
std 25, 11*8(3)
std 26, 12*8(3)
std 27, 13*8(3)
std 28, 14*8(3)
std 29, 15*8(3)
std 30, 16*8(3)
std 31, 17*8(3)
blr
FUNC_END(store_gpr)
/* Single Precision Float - float buf[32] */
FUNC_START(load_fpr_single_precision)
lfs 0, 0*4(3)
lfs 1, 1*4(3)
lfs 2, 2*4(3)
lfs 3, 3*4(3)
lfs 4, 4*4(3)
lfs 5, 5*4(3)
lfs 6, 6*4(3)
lfs 7, 7*4(3)
lfs 8, 8*4(3)
lfs 9, 9*4(3)
lfs 10, 10*4(3)
lfs 11, 11*4(3)
lfs 12, 12*4(3)
lfs 13, 13*4(3)
lfs 14, 14*4(3)
lfs 15, 15*4(3)
lfs 16, 16*4(3)
lfs 17, 17*4(3)
lfs 18, 18*4(3)
lfs 19, 19*4(3)
lfs 20, 20*4(3)
lfs 21, 21*4(3)
lfs 22, 22*4(3)
lfs 23, 23*4(3)
lfs 24, 24*4(3)
lfs 25, 25*4(3)
lfs 26, 26*4(3)
lfs 27, 27*4(3)
lfs 28, 28*4(3)
lfs 29, 29*4(3)
lfs 30, 30*4(3)
lfs 31, 31*4(3)
blr
FUNC_END(load_fpr_single_precision)
/* Single Precision Float - float buf[32] */
FUNC_START(store_fpr_single_precision)
stfs 0, 0*4(3)
stfs 1, 1*4(3)
stfs 2, 2*4(3)
stfs 3, 3*4(3)
stfs 4, 4*4(3)
stfs 5, 5*4(3)
stfs 6, 6*4(3)
stfs 7, 7*4(3)
stfs 8, 8*4(3)
stfs 9, 9*4(3)
stfs 10, 10*4(3)
stfs 11, 11*4(3)
stfs 12, 12*4(3)
stfs 13, 13*4(3)
stfs 14, 14*4(3)
stfs 15, 15*4(3)
stfs 16, 16*4(3)
stfs 17, 17*4(3)
stfs 18, 18*4(3)
stfs 19, 19*4(3)
stfs 20, 20*4(3)
stfs 21, 21*4(3)
stfs 22, 22*4(3)
stfs 23, 23*4(3)
stfs 24, 24*4(3)
stfs 25, 25*4(3)
stfs 26, 26*4(3)
stfs 27, 27*4(3)
stfs 28, 28*4(3)
stfs 29, 29*4(3)
stfs 30, 30*4(3)
stfs 31, 31*4(3)
blr
FUNC_END(store_fpr_single_precision)
/* VMX/VSX registers - unsigned long buf[128] */
FUNC_START(loadvsx)
lis 4, 0
LXVD2X (0,(4),(3))
addi 4, 4, 16
LXVD2X (1,(4),(3))
addi 4, 4, 16
LXVD2X (2,(4),(3))
addi 4, 4, 16
LXVD2X (3,(4),(3))
addi 4, 4, 16
LXVD2X (4,(4),(3))
addi 4, 4, 16
LXVD2X (5,(4),(3))
addi 4, 4, 16
LXVD2X (6,(4),(3))
addi 4, 4, 16
LXVD2X (7,(4),(3))
addi 4, 4, 16
LXVD2X (8,(4),(3))
addi 4, 4, 16
LXVD2X (9,(4),(3))
addi 4, 4, 16
LXVD2X (10,(4),(3))
addi 4, 4, 16
LXVD2X (11,(4),(3))
addi 4, 4, 16
LXVD2X (12,(4),(3))
addi 4, 4, 16
LXVD2X (13,(4),(3))
addi 4, 4, 16
LXVD2X (14,(4),(3))
addi 4, 4, 16
LXVD2X (15,(4),(3))
addi 4, 4, 16
LXVD2X (16,(4),(3))
addi 4, 4, 16
LXVD2X (17,(4),(3))
addi 4, 4, 16
LXVD2X (18,(4),(3))
addi 4, 4, 16
LXVD2X (19,(4),(3))
addi 4, 4, 16
LXVD2X (20,(4),(3))
addi 4, 4, 16
LXVD2X (21,(4),(3))
addi 4, 4, 16
LXVD2X (22,(4),(3))
addi 4, 4, 16
LXVD2X (23,(4),(3))
addi 4, 4, 16
LXVD2X (24,(4),(3))
addi 4, 4, 16
LXVD2X (25,(4),(3))
addi 4, 4, 16
LXVD2X (26,(4),(3))
addi 4, 4, 16
LXVD2X (27,(4),(3))
addi 4, 4, 16
LXVD2X (28,(4),(3))
addi 4, 4, 16
LXVD2X (29,(4),(3))
addi 4, 4, 16
LXVD2X (30,(4),(3))
addi 4, 4, 16
LXVD2X (31,(4),(3))
addi 4, 4, 16
LXVD2X (32,(4),(3))
addi 4, 4, 16
LXVD2X (33,(4),(3))
addi 4, 4, 16
LXVD2X (34,(4),(3))
addi 4, 4, 16
LXVD2X (35,(4),(3))
addi 4, 4, 16
LXVD2X (36,(4),(3))
addi 4, 4, 16
LXVD2X (37,(4),(3))
addi 4, 4, 16
LXVD2X (38,(4),(3))
addi 4, 4, 16
LXVD2X (39,(4),(3))
addi 4, 4, 16
LXVD2X (40,(4),(3))
addi 4, 4, 16
LXVD2X (41,(4),(3))
addi 4, 4, 16
LXVD2X (42,(4),(3))
addi 4, 4, 16
LXVD2X (43,(4),(3))
addi 4, 4, 16
LXVD2X (44,(4),(3))
addi 4, 4, 16
LXVD2X (45,(4),(3))
addi 4, 4, 16
LXVD2X (46,(4),(3))
addi 4, 4, 16
LXVD2X (47,(4),(3))
addi 4, 4, 16
LXVD2X (48,(4),(3))
addi 4, 4, 16
LXVD2X (49,(4),(3))
addi 4, 4, 16
LXVD2X (50,(4),(3))
addi 4, 4, 16
LXVD2X (51,(4),(3))
addi 4, 4, 16
LXVD2X (52,(4),(3))
addi 4, 4, 16
LXVD2X (53,(4),(3))
addi 4, 4, 16
LXVD2X (54,(4),(3))
addi 4, 4, 16
LXVD2X (55,(4),(3))
addi 4, 4, 16
LXVD2X (56,(4),(3))
addi 4, 4, 16
LXVD2X (57,(4),(3))
addi 4, 4, 16
LXVD2X (58,(4),(3))
addi 4, 4, 16
LXVD2X (59,(4),(3))
addi 4, 4, 16
LXVD2X (60,(4),(3))
addi 4, 4, 16
LXVD2X (61,(4),(3))
addi 4, 4, 16
LXVD2X (62,(4),(3))
addi 4, 4, 16
LXVD2X (63,(4),(3))
blr
FUNC_END(loadvsx)
FUNC_START(storevsx)
lis 4, 0
STXVD2X (0,(4),(3))
addi 4, 4, 16
STXVD2X (1,(4),(3))
addi 4, 4, 16
STXVD2X (2,(4),(3))
addi 4, 4, 16
STXVD2X (3,(4),(3))
addi 4, 4, 16
STXVD2X (4,(4),(3))
addi 4, 4, 16
STXVD2X (5,(4),(3))
addi 4, 4, 16
STXVD2X (6,(4),(3))
addi 4, 4, 16
STXVD2X (7,(4),(3))
addi 4, 4, 16
STXVD2X (8,(4),(3))
addi 4, 4, 16
STXVD2X (9,(4),(3))
addi 4, 4, 16
STXVD2X (10,(4),(3))
addi 4, 4, 16
STXVD2X (11,(4),(3))
addi 4, 4, 16
STXVD2X (12,(4),(3))
addi 4, 4, 16
STXVD2X (13,(4),(3))
addi 4, 4, 16
STXVD2X (14,(4),(3))
addi 4, 4, 16
STXVD2X (15,(4),(3))
addi 4, 4, 16
STXVD2X (16,(4),(3))
addi 4, 4, 16
STXVD2X (17,(4),(3))
addi 4, 4, 16
STXVD2X (18,(4),(3))
addi 4, 4, 16
STXVD2X (19,(4),(3))
addi 4, 4, 16
STXVD2X (20,(4),(3))
addi 4, 4, 16
STXVD2X (21,(4),(3))
addi 4, 4, 16
STXVD2X (22,(4),(3))
addi 4, 4, 16
STXVD2X (23,(4),(3))
addi 4, 4, 16
STXVD2X (24,(4),(3))
addi 4, 4, 16
STXVD2X (25,(4),(3))
addi 4, 4, 16
STXVD2X (26,(4),(3))
addi 4, 4, 16
STXVD2X (27,(4),(3))
addi 4, 4, 16
STXVD2X (28,(4),(3))
addi 4, 4, 16
STXVD2X (29,(4),(3))
addi 4, 4, 16
STXVD2X (30,(4),(3))
addi 4, 4, 16
STXVD2X (31,(4),(3))
addi 4, 4, 16
STXVD2X (32,(4),(3))
addi 4, 4, 16
STXVD2X (33,(4),(3))
addi 4, 4, 16
STXVD2X (34,(4),(3))
addi 4, 4, 16
STXVD2X (35,(4),(3))
addi 4, 4, 16
STXVD2X (36,(4),(3))
addi 4, 4, 16
STXVD2X (37,(4),(3))
addi 4, 4, 16
STXVD2X (38,(4),(3))
addi 4, 4, 16
STXVD2X (39,(4),(3))
addi 4, 4, 16
STXVD2X (40,(4),(3))
addi 4, 4, 16
STXVD2X (41,(4),(3))
addi 4, 4, 16
STXVD2X (42,(4),(3))
addi 4, 4, 16
STXVD2X (43,(4),(3))
addi 4, 4, 16
STXVD2X (44,(4),(3))
addi 4, 4, 16
STXVD2X (45,(4),(3))
addi 4, 4, 16
STXVD2X (46,(4),(3))
addi 4, 4, 16
STXVD2X (47,(4),(3))
addi 4, 4, 16
STXVD2X (48,(4),(3))
addi 4, 4, 16
STXVD2X (49,(4),(3))
addi 4, 4, 16
STXVD2X (50,(4),(3))
addi 4, 4, 16
STXVD2X (51,(4),(3))
addi 4, 4, 16
STXVD2X (52,(4),(3))
addi 4, 4, 16
STXVD2X (53,(4),(3))
addi 4, 4, 16
STXVD2X (54,(4),(3))
addi 4, 4, 16
STXVD2X (55,(4),(3))
addi 4, 4, 16
STXVD2X (56,(4),(3))
addi 4, 4, 16
STXVD2X (57,(4),(3))
addi 4, 4, 16
STXVD2X (58,(4),(3))
addi 4, 4, 16
STXVD2X (59,(4),(3))
addi 4, 4, 16
STXVD2X (60,(4),(3))
addi 4, 4, 16
STXVD2X (61,(4),(3))
addi 4, 4, 16
STXVD2X (62,(4),(3))
addi 4, 4, 16
STXVD2X (63,(4),(3))
blr
FUNC_END(storevsx)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,180
|
tools/testing/selftests/powerpc/signal/signal.S
|
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "basic_asm.h"
/* long signal_self(pid_t pid, int sig); */
FUNC_START(signal_self)
li r0,37 /* sys_kill */
/* r3 already has our pid in it */
/* r4 already has signal type in it */
sc
bc 4,3,1f
subfze r3,r3
1: blr
FUNC_END(signal_self)
/* long tm_signal_self(pid_t pid, int sig, int *ret); */
FUNC_START(tm_signal_self)
PUSH_BASIC_STACK(8)
std r5,STACK_FRAME_PARAM(0)(sp) /* ret */
tbegin.
beq 1f
tsuspend.
li r0,37 /* sys_kill */
/* r3 already has our pid in it */
/* r4 already has signal type in it */
sc
ld r5,STACK_FRAME_PARAM(0)(sp) /* ret */
bc 4,3,2f
subfze r3,r3
2: std r3,0(r5)
tabort. 0
tresume. /* Be nice to some cleanup, jumps back to tbegin then to 1: */
/*
* Transaction should be proper doomed and we should never get
* here
*/
li r3,1
POP_BASIC_STACK(8)
blr
1: li r3,0
POP_BASIC_STACK(8)
blr
FUNC_END(tm_signal_self)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,375
|
tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include "common.h"
.data
.balign 8
message:
.ascii "success: switch_endian_test\n\0"
.section ".toc"
.balign 8
pattern:
.8byte 0x5555AAAA5555AAAA
.text
FUNC_START(_start)
/* Load the pattern */
ld r15, pattern@TOC(%r2)
/* Setup CR, only CR2-CR4 are maintained */
lis r3, 0x00FF
ori r3, r3, 0xF000
mtcr r3
/* Load the pattern slightly modified into the registers */
mr r3, r15
addi r4, r15, 4
addi r5, r15, 32
mtlr r5
addi r5, r15, 5
addi r6, r15, 6
addi r7, r15, 7
addi r8, r15, 8
/* r9 - r12 are clobbered */
addi r13, r15, 13
addi r14, r15, 14
/* Skip r15 we're using it */
addi r16, r15, 16
addi r17, r15, 17
addi r18, r15, 18
addi r19, r15, 19
addi r20, r15, 20
addi r21, r15, 21
addi r22, r15, 22
addi r23, r15, 23
addi r24, r15, 24
addi r25, r15, 25
addi r26, r15, 26
addi r27, r15, 27
addi r28, r15, 28
addi r29, r15, 29
addi r30, r15, 30
addi r31, r15, 31
/*
* Call the syscall to switch endian.
* It clobbers r9-r12, XER, CTR and CR0-1,5-7.
*/
li r0, __NR_switch_endian
sc
#include "check-reversed.S"
/* Flip back, r0 already has the switch syscall number */
.long 0x02000044 /* sc */
#include "check.S"
li r0, __NR_write
li r3, 1 /* stdout */
ld r4, message@got(%r2)
li r5, 28 /* strlen(message3) */
sc
li r0, __NR_exit
li r3, 0
sc
b .
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,977
|
tools/testing/selftests/powerpc/switch_endian/check.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include "common.h"
/*
* Checks that registers contain what we expect, ie. they were not clobbered by
* the syscall.
*
* r15: pattern to check registers against.
*
* At the end r3 == 0 if everything's OK.
*/
nop # guaranteed to be illegal in reverse-endian
mr r9,r15
cmpd r9,r3 # check r3
bne 1f
addi r9,r15,4 # check r4
cmpd r9,r4
bne 1f
lis r9,0x00FF # check CR
ori r9,r9,0xF000
mfcr r10
and r10,r10,r9
cmpw r9,r10
addi r9,r15,34
bne 1f
addi r9,r15,32 # check LR
mflr r10
cmpd r9,r10
bne 1f
addi r9,r15,5 # check r5
cmpd r9,r5
bne 1f
addi r9,r15,6 # check r6
cmpd r9,r6
bne 1f
addi r9,r15,7 # check r7
cmpd r9,r7
bne 1f
addi r9,r15,8 # check r8
cmpd r9,r8
bne 1f
addi r9,r15,13 # check r13
cmpd r9,r13
bne 1f
addi r9,r15,14 # check r14
cmpd r9,r14
bne 1f
addi r9,r15,16 # check r16
cmpd r9,r16
bne 1f
addi r9,r15,17 # check r17
cmpd r9,r17
bne 1f
addi r9,r15,18 # check r18
cmpd r9,r18
bne 1f
addi r9,r15,19 # check r19
cmpd r9,r19
bne 1f
addi r9,r15,20 # check r20
cmpd r9,r20
bne 1f
addi r9,r15,21 # check r21
cmpd r9,r21
bne 1f
addi r9,r15,22 # check r22
cmpd r9,r22
bne 1f
addi r9,r15,23 # check r23
cmpd r9,r23
bne 1f
addi r9,r15,24 # check r24
cmpd r9,r24
bne 1f
addi r9,r15,25 # check r25
cmpd r9,r25
bne 1f
addi r9,r15,26 # check r26
cmpd r9,r26
bne 1f
addi r9,r15,27 # check r27
cmpd r9,r27
bne 1f
addi r9,r15,28 # check r28
cmpd r9,r28
bne 1f
addi r9,r15,29 # check r29
cmpd r9,r29
bne 1f
addi r9,r15,30 # check r30
cmpd r9,r30
bne 1f
addi r9,r15,31 # check r31
cmpd r9,r31
bne 1f
b 2f
1: mr r3, r9
li r0, __NR_exit
sc
2: li r0, __NR_switch_endian
nop
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,025
|
tools/testing/selftests/powerpc/tm/tm-signal.S
|
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "basic_asm.h"
#include "gpr_asm.h"
#include "fpu_asm.h"
#include "vmx_asm.h"
#include "vsx_asm.h"
/*
* Large caveat here being that the caller cannot expect the
* signal to always be sent! The hardware can (AND WILL!) abort
* the transaction between the tbegin and the tsuspend (however
* unlikely it seems or infrequently it actually happens).
* You have been warned.
*/
/* long tm_signal_self(pid_t pid, long *gprs, double *fps, vector *vms, vector *vss); */
FUNC_START(tm_signal_self_context_load)
PUSH_BASIC_STACK(512)
/*
* Don't strictly need to save and restore as it depends on if
* we're going to use them, however this reduces messy logic
*/
PUSH_VMX(STACK_FRAME_LOCAL(5,0),r8)
PUSH_FPU(512)
PUSH_NVREGS_BELOW_FPU(512)
std r3, STACK_FRAME_PARAM(0)(sp) /* pid */
std r4, STACK_FRAME_PARAM(1)(sp) /* gps */
std r5, STACK_FRAME_PARAM(2)(sp) /* fps */
std r6, STACK_FRAME_PARAM(3)(sp) /* vms */
std r7, STACK_FRAME_PARAM(4)(sp) /* vss */
ld r3, STACK_FRAME_PARAM(1)(sp)
cmpdi r3, 0
beq skip_gpr_lc
bl load_gpr
skip_gpr_lc:
ld r3, STACK_FRAME_PARAM(2)(sp)
cmpdi r3, 0
beq skip_fpu_lc
bl load_fpu
skip_fpu_lc:
ld r3, STACK_FRAME_PARAM(3)(sp)
cmpdi r3, 0
beq skip_vmx_lc
bl load_vmx
skip_vmx_lc:
ld r3, STACK_FRAME_PARAM(4)(sp)
cmpdi r3, 0
beq skip_vsx_lc
bl load_vsx
skip_vsx_lc:
/*
* Set r3 (return value) before tbegin. Use the pid as a known
* 'all good' return value, zero is used to indicate a non-doomed
* transaction.
*/
ld r3, STACK_FRAME_PARAM(0)(sp)
tbegin.
beq 1f
tsuspend. /* Can't enter a syscall transactionally */
ld r3, STACK_FRAME_PARAM(1)(sp)
cmpdi r3, 0
beq skip_gpr_lt
/* Get the second half of the array */
addi r3, r3, 8 * 18
bl load_gpr
skip_gpr_lt:
ld r3, STACK_FRAME_PARAM(2)(sp)
cmpdi r3, 0
beq skip_fpu_lt
/* Get the second half of the array */
addi r3, r3, 8 * 18
bl load_fpu
skip_fpu_lt:
ld r3, STACK_FRAME_PARAM(3)(sp)
cmpdi r3, 0
beq skip_vmx_lt
/* Get the second half of the array */
addi r3, r3, 16 * 12
bl load_vmx
skip_vmx_lt:
ld r3, STACK_FRAME_PARAM(4)(sp)
cmpdi r3, 0
beq skip_vsx_lt
/* Get the second half of the array */
addi r3, r3, 16 * 12
bl load_vsx
skip_vsx_lt:
li r0, 37 /* sys_kill */
ld r3, STACK_FRAME_PARAM(0)(sp) /* pid */
li r4, 10 /* SIGUSR1 */
sc /* Taking the signal will doom the transaction */
tabort. 0
tresume. /* Be super sure we abort */
/*
* This will cause us to resume doomed transaction and cause
* hardware to cleanup, we'll end up at 1: anything between
* tresume. and 1: shouldn't ever run.
*/
li r3, 0
1:
POP_VMX(STACK_FRAME_LOCAL(5,0),r4)
POP_FPU(512)
POP_NVREGS_BELOW_FPU(512)
POP_BASIC_STACK(512)
blr
FUNC_END(tm_signal_self_context_load)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,566
|
tools/testing/selftests/powerpc/math/vsx_asm.S
|
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "basic_asm.h"
#include "vsx_asm.h"
#long check_vsx(vector int *r3);
#This function wraps storeing VSX regs to the end of an array and a
#call to a comparison function in C which boils down to a memcmp()
FUNC_START(check_vsx)
PUSH_BASIC_STACK(32)
std r3,STACK_FRAME_PARAM(0)(sp)
addi r3, r3, 16 * 12 #Second half of array
bl store_vsx
ld r3,STACK_FRAME_PARAM(0)(sp)
bl vsx_memcmp
POP_BASIC_STACK(32)
blr
FUNC_END(check_vsx)
# int preempt_vmx(vector int *varray, int *threads_starting,
# int *running);
# On starting will (atomically) decrement threads_starting as a signal
# that the VMX have been loaded with varray. Will proceed to check the
# validity of the VMX registers while running is not zero.
FUNC_START(preempt_vsx)
PUSH_BASIC_STACK(512)
std r3,STACK_FRAME_PARAM(0)(sp) # vector int *varray
std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting
std r5,STACK_FRAME_PARAM(2)(sp) # int *running
bl load_vsx
nop
sync
# Atomic DEC
ld r3,STACK_FRAME_PARAM(1)(sp)
1: lwarx r4,0,r3
addi r4,r4,-1
stwcx. r4,0,r3
bne- 1b
2: ld r3,STACK_FRAME_PARAM(0)(sp)
bl check_vsx
nop
cmpdi r3,0
bne 3f
ld r4,STACK_FRAME_PARAM(2)(sp)
ld r5,0(r4)
cmpwi r5,0
bne 2b
3: POP_BASIC_STACK(512)
blr
FUNC_END(preempt_vsx)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,591
|
tools/testing/selftests/powerpc/math/fpu_asm.S
|
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "basic_asm.h"
#include "fpu_asm.h"
FUNC_START(check_fpu)
mr r4,r3
li r3,1 # assume a bad result
lfd f0,0(r4)
fcmpu cr1,f0,f14
bne cr1,1f
lfd f0,8(r4)
fcmpu cr1,f0,f15
bne cr1,1f
lfd f0,16(r4)
fcmpu cr1,f0,f16
bne cr1,1f
lfd f0,24(r4)
fcmpu cr1,f0,f17
bne cr1,1f
lfd f0,32(r4)
fcmpu cr1,f0,f18
bne cr1,1f
lfd f0,40(r4)
fcmpu cr1,f0,f19
bne cr1,1f
lfd f0,48(r4)
fcmpu cr1,f0,f20
bne cr1,1f
lfd f0,56(r4)
fcmpu cr1,f0,f21
bne cr1,1f
lfd f0,64(r4)
fcmpu cr1,f0,f22
bne cr1,1f
lfd f0,72(r4)
fcmpu cr1,f0,f23
bne cr1,1f
lfd f0,80(r4)
fcmpu cr1,f0,f24
bne cr1,1f
lfd f0,88(r4)
fcmpu cr1,f0,f25
bne cr1,1f
lfd f0,96(r4)
fcmpu cr1,f0,f26
bne cr1,1f
lfd f0,104(r4)
fcmpu cr1,f0,f27
bne cr1,1f
lfd f0,112(r4)
fcmpu cr1,f0,f28
bne cr1,1f
lfd f0,120(r4)
fcmpu cr1,f0,f29
bne cr1,1f
lfd f0,128(r4)
fcmpu cr1,f0,f30
bne cr1,1f
lfd f0,136(r4)
fcmpu cr1,f0,f31
bne cr1,1f
li r3,0 # Success!!!
1: blr
FUNC_START(test_fpu)
# r3 holds pointer to where to put the result of fork
# r4 holds pointer to the pid
# f14-f31 are non volatiles
PUSH_BASIC_STACK(256)
PUSH_FPU(256)
std r3,STACK_FRAME_PARAM(0)(sp) # Address of darray
std r4,STACK_FRAME_PARAM(1)(sp) # Address of pid
bl load_fpu
nop
li r0,__NR_fork
sc
# pass the result of the fork to the caller
ld r9,STACK_FRAME_PARAM(1)(sp)
std r3,0(r9)
ld r3,STACK_FRAME_PARAM(0)(sp)
bl check_fpu
nop
POP_FPU(256)
POP_BASIC_STACK(256)
blr
FUNC_END(test_fpu)
# int preempt_fpu(double *darray, int *threads_running, int *running)
# On starting will (atomically) decrement not_ready as a signal that the FPU
# has been loaded with darray. Will proceed to check the validity of the FPU
# registers while running is not zero.
FUNC_START(preempt_fpu)
PUSH_BASIC_STACK(256)
PUSH_FPU(256)
std r3,STACK_FRAME_PARAM(0)(sp) # double *darray
std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting
std r5,STACK_FRAME_PARAM(2)(sp) # int *running
bl load_fpu
nop
sync
# Atomic DEC
ld r3,STACK_FRAME_PARAM(1)(sp)
1: lwarx r4,0,r3
addi r4,r4,-1
stwcx. r4,0,r3
bne- 1b
2: ld r3,STACK_FRAME_PARAM(0)(sp)
bl check_fpu
nop
cmpdi r3,0
bne 3f
ld r4,STACK_FRAME_PARAM(2)(sp)
ld r5,0(r4)
cmpwi r5,0
bne 2b
3: POP_FPU(256)
POP_BASIC_STACK(256)
blr
FUNC_END(preempt_fpu)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,962
|
tools/testing/selftests/powerpc/math/vmx_asm.S
|
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "basic_asm.h"
#include "vmx_asm.h"
# Should be safe from C, only touches r4, r5 and v0,v1,v2
FUNC_START(check_vmx)
PUSH_BASIC_STACK(32)
mr r4,r3
li r3,1 # assume a bad result
li r5,0
lvx v0,r5,r4
vcmpequd. v1,v0,v20
vmr v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v21
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v22
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v23
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v24
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v25
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v26
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v27
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v28
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v29
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v30
vand v2,v2,v1
addi r5,r5,16
lvx v0,r5,r4
vcmpequd. v1,v0,v31
vand v2,v2,v1
li r5,STACK_FRAME_LOCAL(0,0)
stvx v2,r5,sp
ldx r0,r5,sp
cmpdi r0,0xffffffffffffffff
bne 1f
li r3,0
1: POP_BASIC_STACK(32)
blr
FUNC_END(check_vmx)
# Safe from C
FUNC_START(test_vmx)
# r3 holds pointer to where to put the result of fork
# r4 holds pointer to the pid
# v20-v31 are non-volatile
PUSH_BASIC_STACK(512)
std r3,STACK_FRAME_PARAM(0)(sp) # Address of varray
std r4,STACK_FRAME_PARAM(1)(sp) # address of pid
PUSH_VMX(STACK_FRAME_LOCAL(2,0),r4)
bl load_vmx
nop
li r0,__NR_fork
sc
# Pass the result of fork back to the caller
ld r9,STACK_FRAME_PARAM(1)(sp)
std r3,0(r9)
ld r3,STACK_FRAME_PARAM(0)(sp)
bl check_vmx
nop
POP_VMX(STACK_FRAME_LOCAL(2,0),r4)
POP_BASIC_STACK(512)
blr
FUNC_END(test_vmx)
# int preempt_vmx(vector int *varray, int *threads_starting, int *running)
# On starting will (atomically) decrement threads_starting as a signal that
# the VMX have been loaded with varray. Will proceed to check the validity of
# the VMX registers while running is not zero.
FUNC_START(preempt_vmx)
PUSH_BASIC_STACK(512)
std r3,STACK_FRAME_PARAM(0)(sp) # vector int *varray
std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting
std r5,STACK_FRAME_PARAM(2)(sp) # int *running
# VMX need to write to 16 byte aligned addresses, skip STACK_FRAME_LOCAL(3,0)
PUSH_VMX(STACK_FRAME_LOCAL(4,0),r4)
bl load_vmx
nop
sync
# Atomic DEC
ld r3,STACK_FRAME_PARAM(1)(sp)
1: lwarx r4,0,r3
addi r4,r4,-1
stwcx. r4,0,r3
bne- 1b
2: ld r3,STACK_FRAME_PARAM(0)(sp)
bl check_vmx
nop
cmpdi r3,0
bne 3f
ld r4,STACK_FRAME_PARAM(2)(sp)
ld r5,0(r4)
cmpwi r5,0
bne 2b
3: POP_VMX(STACK_FRAME_LOCAL(4,0),r4)
POP_BASIC_STACK(512)
blr
FUNC_END(preempt_vmx)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,262
|
tools/testing/selftests/powerpc/pmu/ebb/busy_loop.S
|
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <ppc-asm.h>
.text
FUNC_START(core_busy_loop)
stdu %r1, -168(%r1)
std r14, 160(%r1)
std r15, 152(%r1)
std r16, 144(%r1)
std r17, 136(%r1)
std r18, 128(%r1)
std r19, 120(%r1)
std r20, 112(%r1)
std r21, 104(%r1)
std r22, 96(%r1)
std r23, 88(%r1)
std r24, 80(%r1)
std r25, 72(%r1)
std r26, 64(%r1)
std r27, 56(%r1)
std r28, 48(%r1)
std r29, 40(%r1)
std r30, 32(%r1)
std r31, 24(%r1)
li r3, 0x3030
std r3, -96(%r1)
li r4, 0x4040
std r4, -104(%r1)
li r5, 0x5050
std r5, -112(%r1)
li r6, 0x6060
std r6, -120(%r1)
li r7, 0x7070
std r7, -128(%r1)
li r8, 0x0808
std r8, -136(%r1)
li r9, 0x0909
std r9, -144(%r1)
li r10, 0x1010
std r10, -152(%r1)
li r11, 0x1111
std r11, -160(%r1)
li r14, 0x1414
std r14, -168(%r1)
li r15, 0x1515
std r15, -176(%r1)
li r16, 0x1616
std r16, -184(%r1)
li r17, 0x1717
std r17, -192(%r1)
li r18, 0x1818
std r18, -200(%r1)
li r19, 0x1919
std r19, -208(%r1)
li r20, 0x2020
std r20, -216(%r1)
li r21, 0x2121
std r21, -224(%r1)
li r22, 0x2222
std r22, -232(%r1)
li r23, 0x2323
std r23, -240(%r1)
li r24, 0x2424
std r24, -248(%r1)
li r25, 0x2525
std r25, -256(%r1)
li r26, 0x2626
std r26, -264(%r1)
li r27, 0x2727
std r27, -272(%r1)
li r28, 0x2828
std r28, -280(%r1)
li r29, 0x2929
std r29, -288(%r1)
li r30, 0x3030
li r31, 0x3131
li r3, 0
0: addi r3, r3, 1
cmpwi r3, 100
blt 0b
/* Return 1 (fail) unless we get through all the checks */
li r3, 1
/* Check none of our registers have been corrupted */
cmpwi r4, 0x4040
bne 1f
cmpwi r5, 0x5050
bne 1f
cmpwi r6, 0x6060
bne 1f
cmpwi r7, 0x7070
bne 1f
cmpwi r8, 0x0808
bne 1f
cmpwi r9, 0x0909
bne 1f
cmpwi r10, 0x1010
bne 1f
cmpwi r11, 0x1111
bne 1f
cmpwi r14, 0x1414
bne 1f
cmpwi r15, 0x1515
bne 1f
cmpwi r16, 0x1616
bne 1f
cmpwi r17, 0x1717
bne 1f
cmpwi r18, 0x1818
bne 1f
cmpwi r19, 0x1919
bne 1f
cmpwi r20, 0x2020
bne 1f
cmpwi r21, 0x2121
bne 1f
cmpwi r22, 0x2222
bne 1f
cmpwi r23, 0x2323
bne 1f
cmpwi r24, 0x2424
bne 1f
cmpwi r25, 0x2525
bne 1f
cmpwi r26, 0x2626
bne 1f
cmpwi r27, 0x2727
bne 1f
cmpwi r28, 0x2828
bne 1f
cmpwi r29, 0x2929
bne 1f
cmpwi r30, 0x3030
bne 1f
cmpwi r31, 0x3131
bne 1f
/* Load junk into all our registers before we reload them from the stack. */
li r3, 0xde
li r4, 0xad
li r5, 0xbe
li r6, 0xef
li r7, 0xde
li r8, 0xad
li r9, 0xbe
li r10, 0xef
li r11, 0xde
li r14, 0xad
li r15, 0xbe
li r16, 0xef
li r17, 0xde
li r18, 0xad
li r19, 0xbe
li r20, 0xef
li r21, 0xde
li r22, 0xad
li r23, 0xbe
li r24, 0xef
li r25, 0xde
li r26, 0xad
li r27, 0xbe
li r28, 0xef
li r29, 0xdd
ld r3, -96(%r1)
cmpwi r3, 0x3030
bne 1f
ld r4, -104(%r1)
cmpwi r4, 0x4040
bne 1f
ld r5, -112(%r1)
cmpwi r5, 0x5050
bne 1f
ld r6, -120(%r1)
cmpwi r6, 0x6060
bne 1f
ld r7, -128(%r1)
cmpwi r7, 0x7070
bne 1f
ld r8, -136(%r1)
cmpwi r8, 0x0808
bne 1f
ld r9, -144(%r1)
cmpwi r9, 0x0909
bne 1f
ld r10, -152(%r1)
cmpwi r10, 0x1010
bne 1f
ld r11, -160(%r1)
cmpwi r11, 0x1111
bne 1f
ld r14, -168(%r1)
cmpwi r14, 0x1414
bne 1f
ld r15, -176(%r1)
cmpwi r15, 0x1515
bne 1f
ld r16, -184(%r1)
cmpwi r16, 0x1616
bne 1f
ld r17, -192(%r1)
cmpwi r17, 0x1717
bne 1f
ld r18, -200(%r1)
cmpwi r18, 0x1818
bne 1f
ld r19, -208(%r1)
cmpwi r19, 0x1919
bne 1f
ld r20, -216(%r1)
cmpwi r20, 0x2020
bne 1f
ld r21, -224(%r1)
cmpwi r21, 0x2121
bne 1f
ld r22, -232(%r1)
cmpwi r22, 0x2222
bne 1f
ld r23, -240(%r1)
cmpwi r23, 0x2323
bne 1f
ld r24, -248(%r1)
cmpwi r24, 0x2424
bne 1f
ld r25, -256(%r1)
cmpwi r25, 0x2525
bne 1f
ld r26, -264(%r1)
cmpwi r26, 0x2626
bne 1f
ld r27, -272(%r1)
cmpwi r27, 0x2727
bne 1f
ld r28, -280(%r1)
cmpwi r28, 0x2828
bne 1f
ld r29, -288(%r1)
cmpwi r29, 0x2929
bne 1f
/* Load 0 (success) to return */
li r3, 0
1: ld r14, 160(%r1)
ld r15, 152(%r1)
ld r16, 144(%r1)
ld r17, 136(%r1)
ld r18, 128(%r1)
ld r19, 120(%r1)
ld r20, 112(%r1)
ld r21, 104(%r1)
ld r22, 96(%r1)
ld r23, 88(%r1)
ld r24, 80(%r1)
ld r25, 72(%r1)
ld r26, 64(%r1)
ld r27, 56(%r1)
ld r28, 48(%r1)
ld r29, 40(%r1)
ld r30, 32(%r1)
ld r31, 24(%r1)
addi %r1, %r1, 168
blr
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,476
|
tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S
|
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <ppc-asm.h>
#include "reg.h"
/* ppc-asm.h defines most of the reg aliases, but not r1/r2. */
#define r1 1
#define r2 2
#define RFEBB .long 0x4c000924
/* Stack layout:
*
* ^
* User stack |
* Back chain ------+ <- r1 <-------+
* ... |
* Red zone / ABI Gap |
* ... |
* vr63 <+ |
* vr0 | |
* VSCR | |
* FSCR | |
* r31 | Save area |
* r0 | |
* XER | |
* CTR | |
* LR | |
* CCR <+ |
* ... <+ |
* LR | Caller frame |
* CCR | |
* Back chain <+ <- updated r1 --------+
*
*/
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define ABIGAP 512
#else
#define ABIGAP 288
#endif
#define NR_GPR 32
#define NR_SPR 6
#define NR_VSR 64
#define SAVE_AREA ((NR_GPR + NR_SPR) * 8 + (NR_VSR * 16))
#define CALLER_FRAME 112
#define STACK_FRAME (ABIGAP + SAVE_AREA + CALLER_FRAME)
#define CCR_SAVE (CALLER_FRAME)
#define LR_SAVE (CCR_SAVE + 8)
#define CTR_SAVE (LR_SAVE + 8)
#define XER_SAVE (CTR_SAVE + 8)
#define GPR_SAVE(n) (XER_SAVE + 8 + (8 * n))
#define FSCR_SAVE (GPR_SAVE(31) + 8)
#define VSCR_SAVE (FSCR_SAVE + 8)
#define VSR_SAVE(n) (VSCR_SAVE + 8 + (16 * n))
#define SAVE_GPR(n) std n,GPR_SAVE(n)(r1)
#define REST_GPR(n) ld n,GPR_SAVE(n)(r1)
#define TRASH_GPR(n) lis n,0xaaaa
#define SAVE_VSR(n, b) li b, VSR_SAVE(n); stxvd2x n,b,r1
#define LOAD_VSR(n, b) li b, VSR_SAVE(n); lxvd2x n,b,r1
#define LOAD_REG_IMMEDIATE(reg,expr) \
lis reg,(expr)@highest; \
ori reg,reg,(expr)@higher; \
rldicr reg,reg,32,31; \
oris reg,reg,(expr)@h; \
ori reg,reg,(expr)@l;
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define ENTRY_POINT(name) \
.type FUNC_NAME(name),@function; \
.globl FUNC_NAME(name); \
FUNC_NAME(name):
#define RESTORE_TOC(name) \
/* Restore our TOC pointer using our entry point */ \
LOAD_REG_IMMEDIATE(r12, name) \
0: addis r2,r12,(.TOC.-0b)@ha; \
addi r2,r2,(.TOC.-0b)@l;
#else
#define ENTRY_POINT(name) FUNC_START(name)
#define RESTORE_TOC(name) \
/* Restore our TOC pointer via our opd entry */ \
LOAD_REG_IMMEDIATE(r2, name) \
ld r2,8(r2);
#endif
.text
ENTRY_POINT(ebb_handler)
stdu r1,-STACK_FRAME(r1)
SAVE_GPR(0)
mflr r0
std r0,LR_SAVE(r1)
mfcr r0
std r0,CCR_SAVE(r1)
mfctr r0
std r0,CTR_SAVE(r1)
mfxer r0
std r0,XER_SAVE(r1)
SAVE_GPR(2)
SAVE_GPR(3)
SAVE_GPR(4)
SAVE_GPR(5)
SAVE_GPR(6)
SAVE_GPR(7)
SAVE_GPR(8)
SAVE_GPR(9)
SAVE_GPR(10)
SAVE_GPR(11)
SAVE_GPR(12)
SAVE_GPR(13)
SAVE_GPR(14)
SAVE_GPR(15)
SAVE_GPR(16)
SAVE_GPR(17)
SAVE_GPR(18)
SAVE_GPR(19)
SAVE_GPR(20)
SAVE_GPR(21)
SAVE_GPR(22)
SAVE_GPR(23)
SAVE_GPR(24)
SAVE_GPR(25)
SAVE_GPR(26)
SAVE_GPR(27)
SAVE_GPR(28)
SAVE_GPR(29)
SAVE_GPR(30)
SAVE_GPR(31)
SAVE_VSR(0, r3)
mffs f0
stfd f0, FSCR_SAVE(r1)
mfvscr f0
stfd f0, VSCR_SAVE(r1)
SAVE_VSR(1, r3)
SAVE_VSR(2, r3)
SAVE_VSR(3, r3)
SAVE_VSR(4, r3)
SAVE_VSR(5, r3)
SAVE_VSR(6, r3)
SAVE_VSR(7, r3)
SAVE_VSR(8, r3)
SAVE_VSR(9, r3)
SAVE_VSR(10, r3)
SAVE_VSR(11, r3)
SAVE_VSR(12, r3)
SAVE_VSR(13, r3)
SAVE_VSR(14, r3)
SAVE_VSR(15, r3)
SAVE_VSR(16, r3)
SAVE_VSR(17, r3)
SAVE_VSR(18, r3)
SAVE_VSR(19, r3)
SAVE_VSR(20, r3)
SAVE_VSR(21, r3)
SAVE_VSR(22, r3)
SAVE_VSR(23, r3)
SAVE_VSR(24, r3)
SAVE_VSR(25, r3)
SAVE_VSR(26, r3)
SAVE_VSR(27, r3)
SAVE_VSR(28, r3)
SAVE_VSR(29, r3)
SAVE_VSR(30, r3)
SAVE_VSR(31, r3)
SAVE_VSR(32, r3)
SAVE_VSR(33, r3)
SAVE_VSR(34, r3)
SAVE_VSR(35, r3)
SAVE_VSR(36, r3)
SAVE_VSR(37, r3)
SAVE_VSR(38, r3)
SAVE_VSR(39, r3)
SAVE_VSR(40, r3)
SAVE_VSR(41, r3)
SAVE_VSR(42, r3)
SAVE_VSR(43, r3)
SAVE_VSR(44, r3)
SAVE_VSR(45, r3)
SAVE_VSR(46, r3)
SAVE_VSR(47, r3)
SAVE_VSR(48, r3)
SAVE_VSR(49, r3)
SAVE_VSR(50, r3)
SAVE_VSR(51, r3)
SAVE_VSR(52, r3)
SAVE_VSR(53, r3)
SAVE_VSR(54, r3)
SAVE_VSR(55, r3)
SAVE_VSR(56, r3)
SAVE_VSR(57, r3)
SAVE_VSR(58, r3)
SAVE_VSR(59, r3)
SAVE_VSR(60, r3)
SAVE_VSR(61, r3)
SAVE_VSR(62, r3)
SAVE_VSR(63, r3)
TRASH_GPR(2)
TRASH_GPR(3)
TRASH_GPR(4)
TRASH_GPR(5)
TRASH_GPR(6)
TRASH_GPR(7)
TRASH_GPR(8)
TRASH_GPR(9)
TRASH_GPR(10)
TRASH_GPR(11)
TRASH_GPR(12)
TRASH_GPR(14)
TRASH_GPR(15)
TRASH_GPR(16)
TRASH_GPR(17)
TRASH_GPR(18)
TRASH_GPR(19)
TRASH_GPR(20)
TRASH_GPR(21)
TRASH_GPR(22)
TRASH_GPR(23)
TRASH_GPR(24)
TRASH_GPR(25)
TRASH_GPR(26)
TRASH_GPR(27)
TRASH_GPR(28)
TRASH_GPR(29)
TRASH_GPR(30)
TRASH_GPR(31)
RESTORE_TOC(ebb_handler)
/*
* r13 is our TLS pointer. We leave whatever value was in there when the
* EBB fired. That seems to be OK because once set the TLS pointer is not
* changed - but presumably that could change in future.
*/
bl ebb_hook
nop
/* r2 may be changed here but we don't care */
lfd f0, FSCR_SAVE(r1)
mtfsf 0xff,f0
lfd f0, VSCR_SAVE(r1)
mtvscr f0
LOAD_VSR(0, r3)
LOAD_VSR(1, r3)
LOAD_VSR(2, r3)
LOAD_VSR(3, r3)
LOAD_VSR(4, r3)
LOAD_VSR(5, r3)
LOAD_VSR(6, r3)
LOAD_VSR(7, r3)
LOAD_VSR(8, r3)
LOAD_VSR(9, r3)
LOAD_VSR(10, r3)
LOAD_VSR(11, r3)
LOAD_VSR(12, r3)
LOAD_VSR(13, r3)
LOAD_VSR(14, r3)
LOAD_VSR(15, r3)
LOAD_VSR(16, r3)
LOAD_VSR(17, r3)
LOAD_VSR(18, r3)
LOAD_VSR(19, r3)
LOAD_VSR(20, r3)
LOAD_VSR(21, r3)
LOAD_VSR(22, r3)
LOAD_VSR(23, r3)
LOAD_VSR(24, r3)
LOAD_VSR(25, r3)
LOAD_VSR(26, r3)
LOAD_VSR(27, r3)
LOAD_VSR(28, r3)
LOAD_VSR(29, r3)
LOAD_VSR(30, r3)
LOAD_VSR(31, r3)
LOAD_VSR(32, r3)
LOAD_VSR(33, r3)
LOAD_VSR(34, r3)
LOAD_VSR(35, r3)
LOAD_VSR(36, r3)
LOAD_VSR(37, r3)
LOAD_VSR(38, r3)
LOAD_VSR(39, r3)
LOAD_VSR(40, r3)
LOAD_VSR(41, r3)
LOAD_VSR(42, r3)
LOAD_VSR(43, r3)
LOAD_VSR(44, r3)
LOAD_VSR(45, r3)
LOAD_VSR(46, r3)
LOAD_VSR(47, r3)
LOAD_VSR(48, r3)
LOAD_VSR(49, r3)
LOAD_VSR(50, r3)
LOAD_VSR(51, r3)
LOAD_VSR(52, r3)
LOAD_VSR(53, r3)
LOAD_VSR(54, r3)
LOAD_VSR(55, r3)
LOAD_VSR(56, r3)
LOAD_VSR(57, r3)
LOAD_VSR(58, r3)
LOAD_VSR(59, r3)
LOAD_VSR(60, r3)
LOAD_VSR(61, r3)
LOAD_VSR(62, r3)
LOAD_VSR(63, r3)
ld r0,XER_SAVE(r1)
mtxer r0
ld r0,CTR_SAVE(r1)
mtctr r0
ld r0,LR_SAVE(r1)
mtlr r0
ld r0,CCR_SAVE(r1)
mtcr r0
REST_GPR(0)
REST_GPR(2)
REST_GPR(3)
REST_GPR(4)
REST_GPR(5)
REST_GPR(6)
REST_GPR(7)
REST_GPR(8)
REST_GPR(9)
REST_GPR(10)
REST_GPR(11)
REST_GPR(12)
REST_GPR(13)
REST_GPR(14)
REST_GPR(15)
REST_GPR(16)
REST_GPR(17)
REST_GPR(18)
REST_GPR(19)
REST_GPR(20)
REST_GPR(21)
REST_GPR(22)
REST_GPR(23)
REST_GPR(24)
REST_GPR(25)
REST_GPR(26)
REST_GPR(27)
REST_GPR(28)
REST_GPR(29)
REST_GPR(30)
REST_GPR(31)
addi r1,r1,STACK_FRAME
RFEBB
FUNC_END(ebb_handler)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,007
|
tools/arch/x86/lib/memcpy_64.S
|
/* Copyright 2002 Andi Kleen */
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/cpufeatures.h>
#include <asm/mcsafe_test.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
/*
* We build a jump to memcpy_orig by default which gets NOPped out on
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
* have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
* to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
*/
.weak memcpy
/*
* memcpy - Copy a memory block.
*
* Input:
* rdi destination
* rsi source
* rdx count
*
* Output:
* rax original destination
*/
ENTRY(__memcpy)
ENTRY(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS
movq %rdi, %rax
movq %rdx, %rcx
shrq $3, %rcx
andl $7, %edx
rep movsq
movl %edx, %ecx
rep movsb
ret
ENDPROC(memcpy)
ENDPROC(__memcpy)
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(__memcpy)
/*
* memcpy_erms() - enhanced fast string memcpy. This is faster and
* simpler than memcpy. Use memcpy_erms when possible.
*/
ENTRY(memcpy_erms)
movq %rdi, %rax
movq %rdx, %rcx
rep movsb
ret
ENDPROC(memcpy_erms)
ENTRY(memcpy_orig)
movq %rdi, %rax
cmpq $0x20, %rdx
jb .Lhandle_tail
/*
* We check whether memory false dependence could occur,
* then jump to corresponding copy mode.
*/
cmp %dil, %sil
jl .Lcopy_backward
subq $0x20, %rdx
.Lcopy_forward_loop:
subq $0x20, %rdx
/*
* Move in blocks of 4x8 bytes:
*/
movq 0*8(%rsi), %r8
movq 1*8(%rsi), %r9
movq 2*8(%rsi), %r10
movq 3*8(%rsi), %r11
leaq 4*8(%rsi), %rsi
movq %r8, 0*8(%rdi)
movq %r9, 1*8(%rdi)
movq %r10, 2*8(%rdi)
movq %r11, 3*8(%rdi)
leaq 4*8(%rdi), %rdi
jae .Lcopy_forward_loop
addl $0x20, %edx
jmp .Lhandle_tail
.Lcopy_backward:
/*
* Calculate copy position to tail.
*/
addq %rdx, %rsi
addq %rdx, %rdi
subq $0x20, %rdx
/*
* At most 3 ALU operations in one cycle,
* so append NOPS in the same 16 bytes trunk.
*/
.p2align 4
.Lcopy_backward_loop:
subq $0x20, %rdx
movq -1*8(%rsi), %r8
movq -2*8(%rsi), %r9
movq -3*8(%rsi), %r10
movq -4*8(%rsi), %r11
leaq -4*8(%rsi), %rsi
movq %r8, -1*8(%rdi)
movq %r9, -2*8(%rdi)
movq %r10, -3*8(%rdi)
movq %r11, -4*8(%rdi)
leaq -4*8(%rdi), %rdi
jae .Lcopy_backward_loop
/*
* Calculate copy position to head.
*/
addl $0x20, %edx
subq %rdx, %rsi
subq %rdx, %rdi
.Lhandle_tail:
cmpl $16, %edx
jb .Lless_16bytes
/*
* Move data from 16 bytes to 31 bytes.
*/
movq 0*8(%rsi), %r8
movq 1*8(%rsi), %r9
movq -2*8(%rsi, %rdx), %r10
movq -1*8(%rsi, %rdx), %r11
movq %r8, 0*8(%rdi)
movq %r9, 1*8(%rdi)
movq %r10, -2*8(%rdi, %rdx)
movq %r11, -1*8(%rdi, %rdx)
retq
.p2align 4
.Lless_16bytes:
cmpl $8, %edx
jb .Lless_8bytes
/*
* Move data from 8 bytes to 15 bytes.
*/
movq 0*8(%rsi), %r8
movq -1*8(%rsi, %rdx), %r9
movq %r8, 0*8(%rdi)
movq %r9, -1*8(%rdi, %rdx)
retq
.p2align 4
.Lless_8bytes:
cmpl $4, %edx
jb .Lless_3bytes
/*
* Move data from 4 bytes to 7 bytes.
*/
movl (%rsi), %ecx
movl -4(%rsi, %rdx), %r8d
movl %ecx, (%rdi)
movl %r8d, -4(%rdi, %rdx)
retq
.p2align 4
.Lless_3bytes:
subl $1, %edx
jb .Lend
/*
* Move data from 1 bytes to 3 bytes.
*/
movzbl (%rsi), %ecx
jz .Lstore_1byte
movzbq 1(%rsi), %r8
movzbq (%rsi, %rdx), %r9
movb %r8b, 1(%rdi)
movb %r9b, (%rdi, %rdx)
.Lstore_1byte:
movb %cl, (%rdi)
.Lend:
retq
ENDPROC(memcpy_orig)
#ifndef CONFIG_UML
MCSAFE_TEST_CTL
/*
* __memcpy_mcsafe - memory copy with machine check exception handling
* Note that we only catch machine checks when reading the source addresses.
* Writes to target are posted and don't generate machine checks.
*/
ENTRY(__memcpy_mcsafe)
cmpl $8, %edx
/* Less than 8 bytes? Go to byte copy loop */
jb .L_no_whole_words
/* Check for bad alignment of source */
testl $7, %esi
/* Already aligned */
jz .L_8byte_aligned
/* Copy one byte at a time until source is 8-byte aligned */
movl %esi, %ecx
andl $7, %ecx
subl $8, %ecx
negl %ecx
subl %ecx, %edx
.L_read_leading_bytes:
movb (%rsi), %al
MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
.L_write_leading_bytes:
movb %al, (%rdi)
incq %rsi
incq %rdi
decl %ecx
jnz .L_read_leading_bytes
.L_8byte_aligned:
movl %edx, %ecx
andl $7, %edx
shrl $3, %ecx
jz .L_no_whole_words
.L_read_words:
movq (%rsi), %r8
MCSAFE_TEST_SRC %rsi 8 .E_read_words
MCSAFE_TEST_DST %rdi 8 .E_write_words
.L_write_words:
movq %r8, (%rdi)
addq $8, %rsi
addq $8, %rdi
decl %ecx
jnz .L_read_words
/* Any trailing bytes? */
.L_no_whole_words:
andl %edx, %edx
jz .L_done_memcpy_trap
/* Copy trailing bytes */
movl %edx, %ecx
.L_read_trailing_bytes:
movb (%rsi), %al
MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
.L_write_trailing_bytes:
movb %al, (%rdi)
incq %rsi
incq %rdi
decl %ecx
jnz .L_read_trailing_bytes
/* Copy successful. Return zero */
.L_done_memcpy_trap:
xorl %eax, %eax
ret
ENDPROC(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
.section .fixup, "ax"
/*
* Return number of bytes not copied for any failure. Note that
* there is no "tail" handling since the source buffer is 8-byte
* aligned and poison is cacheline aligned.
*/
.E_read_words:
shll $3, %ecx
.E_leading_bytes:
addl %edx, %ecx
.E_trailing_bytes:
mov %ecx, %eax
ret
/*
* For write fault handling, given the destination is unaligned,
* we handle faults on multi-byte writes with a byte-by-byte
* copy up to the write-protected page.
*/
.E_write_words:
shll $3, %ecx
addl %edx, %ecx
movl %ecx, %edx
jmp mcsafe_handle_tail
.previous
_ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
_ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
_ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
_ASM_EXTABLE(.L_write_words, .E_write_words)
_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,702
|
tools/arch/x86/lib/memset_64.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2002 Andi Kleen, SuSE Labs */
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
.weak memset
/*
* ISO C memset - set a memory block to a byte value. This function uses fast
* string to get better performance than the original function. The code is
* simpler and shorter than the original function as well.
*
* rdi destination
* rsi value (char)
* rdx count (bytes)
*
* rax original destination
*/
ENTRY(memset)
ENTRY(__memset)
/*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
* to use it when possible. If not available, use fast string instructions.
*
* Otherwise, use original memset function.
*/
ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memset_erms", X86_FEATURE_ERMS
movq %rdi,%r9
movq %rdx,%rcx
andl $7,%edx
shrq $3,%rcx
/* expand byte value */
movzbl %sil,%esi
movabs $0x0101010101010101,%rax
imulq %rsi,%rax
rep stosq
movl %edx,%ecx
rep stosb
movq %r9,%rax
ret
ENDPROC(memset)
ENDPROC(__memset)
/*
* ISO C memset - set a memory block to a byte value. This function uses
* enhanced rep stosb to override the fast string function.
* The code is simpler and shorter than the fast string function as well.
*
* rdi destination
* rsi value (char)
* rdx count (bytes)
*
* rax original destination
*/
ENTRY(memset_erms)
movq %rdi,%r9
movb %sil,%al
movq %rdx,%rcx
rep stosb
movq %r9,%rax
ret
ENDPROC(memset_erms)
ENTRY(memset_orig)
movq %rdi,%r10
/* expand byte value */
movzbl %sil,%ecx
movabs $0x0101010101010101,%rax
imulq %rcx,%rax
/* align dst */
movl %edi,%r9d
andl $7,%r9d
jnz .Lbad_alignment
.Lafter_bad_alignment:
movq %rdx,%rcx
shrq $6,%rcx
jz .Lhandle_tail
.p2align 4
.Lloop_64:
decq %rcx
movq %rax,(%rdi)
movq %rax,8(%rdi)
movq %rax,16(%rdi)
movq %rax,24(%rdi)
movq %rax,32(%rdi)
movq %rax,40(%rdi)
movq %rax,48(%rdi)
movq %rax,56(%rdi)
leaq 64(%rdi),%rdi
jnz .Lloop_64
/* Handle tail in loops. The loops should be faster than hard
to predict jump tables. */
.p2align 4
.Lhandle_tail:
movl %edx,%ecx
andl $63&(~7),%ecx
jz .Lhandle_7
shrl $3,%ecx
.p2align 4
.Lloop_8:
decl %ecx
movq %rax,(%rdi)
leaq 8(%rdi),%rdi
jnz .Lloop_8
.Lhandle_7:
andl $7,%edx
jz .Lende
.p2align 4
.Lloop_1:
decl %edx
movb %al,(%rdi)
leaq 1(%rdi),%rdi
jnz .Lloop_1
.Lende:
movq %r10,%rax
ret
.Lbad_alignment:
cmpq $7,%rdx
jbe .Lhandle_7
movq %rax,(%rdi) /* unaligned store */
movq $8,%r8
subq %r9,%r8
addq %r8,%rdi
subq %r8,%rdx
jmp .Lafter_bad_alignment
.Lfinal:
ENDPROC(memset_orig)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,109
|
arch/nios2/kernel/head.S
|
/*
* Copyright (C) 2009 Wind River Systems Inc
* Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
* Copyright (C) 2004 Microtronix Datacom Ltd
* Copyright (C) 2001 Vic Phillips, Microtronix Datacom Ltd.
*
* Based on head.S for Altera's Excalibur development board with nios processor
*
* Based on the following from the Excalibur sdk distribution:
* NA_MemoryMap.s, NR_JumpToStart.s, NR_Setup.s, NR_CWPManager.s
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
#include <asm/asm-macros.h>
/*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
.data
.global empty_zero_page
.align 12
empty_zero_page:
.space PAGE_SIZE
/*
* This global variable is used as an extension to the nios'
* STATUS register to emulate a user/supervisor mode.
*/
.data
.align 2
.set noat
.global _current_thread
_current_thread:
.long 0
/*
* Input(s): passed from u-boot
* r4 - Optional pointer to a board information structure.
* r5 - Optional pointer to the physical starting address of the init RAM
* disk.
* r6 - Optional pointer to the physical ending address of the init RAM
* disk.
* r7 - Optional pointer to the physical starting address of any kernel
* command-line parameters.
*/
/*
* First executable code - detected and jumped to by the ROM bootstrap
* if the code resides in flash (looks for "Nios" at offset 0x0c from
* the potential executable image).
*/
__HEAD
ENTRY(_start)
wrctl status, r0 /* Disable interrupts */
/* Initialize all cache lines within the instruction cache */
movia r1, NIOS2_ICACHE_SIZE
movui r2, NIOS2_ICACHE_LINE_SIZE
icache_init:
initi r1
sub r1, r1, r2
bgt r1, r0, icache_init
br 1f
/*
* This is the default location for the exception handler. Code in jump
* to our handler
*/
ENTRY(exception_handler_hook)
movia r24, inthandler
jmp r24
ENTRY(fast_handler)
nextpc et
helper:
stw r3, r3save - helper(et)
rdctl r3 , pteaddr
srli r3, r3, 12
slli r3, r3, 2
movia et, pgd_current
ldw et, 0(et)
add r3, et, r3
ldw et, 0(r3)
rdctl r3, pteaddr
andi r3, r3, 0xfff
add et, r3, et
ldw et, 0(et)
wrctl tlbacc, et
nextpc et
helper2:
ldw r3, r3save - helper2(et)
subi ea, ea, 4
eret
r3save:
.word 0x0
ENTRY(fast_handler_end)
1:
/*
* After the instruction cache is initialized, the data cache must
* also be initialized.
*/
movia r1, NIOS2_DCACHE_SIZE
movui r2, NIOS2_DCACHE_LINE_SIZE
dcache_init:
initd 0(r1)
sub r1, r1, r2
bgt r1, r0, dcache_init
nextpc r1 /* Find out where we are */
chkadr:
movia r2, chkadr
beq r1, r2,finish_move /* We are running in RAM done */
addi r1, r1,(_start - chkadr) /* Source */
movia r2, _start /* Destination */
movia r3, __bss_start /* End of copy */
loop_move: /* r1: src, r2: dest, r3: last dest */
ldw r8, 0(r1) /* load a word from [r1] */
stw r8, 0(r2) /* store a word to dest [r2] */
flushd 0(r2) /* Flush cache for safety */
addi r1, r1, 4 /* inc the src addr */
addi r2, r2, 4 /* inc the dest addr */
blt r2, r3, loop_move
movia r1, finish_move /* VMA(_start)->l1 */
jmp r1 /* jmp to _start */
finish_move:
/* Mask off all possible interrupts */
wrctl ienable, r0
/* Clear .bss */
movia r2, __bss_start
movia r1, __bss_stop
1:
stb r0, 0(r2)
addi r2, r2, 1
bne r1, r2, 1b
movia r1, init_thread_union /* set stack at top of the task union */
addi sp, r1, THREAD_SIZE
movia r2, _current_thread /* Remember current thread */
stw r1, 0(r2)
movia r1, nios2_boot_init /* save args r4-r7 passed from u-boot */
callr r1
movia r1, start_kernel /* call start_kernel as a subroutine */
callr r1
/* If we return from start_kernel, break to the oci debugger and
* buggered we are.
*/
break
/* End of startup code */
.set at
|
AirFortressIlikara/LS2K0300-linux-4.19
| 14,821
|
arch/nios2/kernel/entry.S
|
/*
* linux/arch/nios2/kernel/entry.S
*
* Copyright (C) 2013-2014 Altera Corporation
* Copyright (C) 2009, Wind River Systems Inc
*
* Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
*
* Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
* Kenneth Albanowski <kjahds@kjahds.com>,
* Copyright (C) 2000 Lineo Inc. (www.lineo.com)
* Copyright (C) 2004 Microtronix Datacom Ltd.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Linux/m68k support by Hamish Macdonald
*
* 68060 fixes by Jesper Skov
* ColdFire support by Greg Ungerer (gerg@snapgear.com)
* 5307 fixes by David W. Miller
* linux 2.4 support David McCullough <davidm@snapgear.com>
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/asm-macros.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/setup.h>
#include <asm/entry.h>
#include <asm/unistd.h>
#include <asm/processor.h>
.macro GET_THREAD_INFO reg
.if THREAD_SIZE & 0xffff0000
andhi \reg, sp, %hi(~(THREAD_SIZE-1))
.else
addi \reg, r0, %lo(~(THREAD_SIZE-1))
and \reg, \reg, sp
.endif
.endm
.macro kuser_cmpxchg_check
/*
* Make sure our user space atomic helper is restarted if it was
* interrupted in a critical region.
* ea-4 = address of interrupted insn (ea must be preserved).
* sp = saved regs.
* cmpxchg_ldw = first critical insn, cmpxchg_stw = last critical insn.
* If ea <= cmpxchg_stw and ea > cmpxchg_ldw then saved EA is set to
* cmpxchg_ldw + 4.
*/
/* et = cmpxchg_stw + 4 */
movui et, (KUSER_BASE + 4 + (cmpxchg_stw - __kuser_helper_start))
bgtu ea, et, 1f
subi et, et, (cmpxchg_stw - cmpxchg_ldw) /* et = cmpxchg_ldw + 4 */
bltu ea, et, 1f
stw et, PT_EA(sp) /* fix up EA */
mov ea, et
1:
.endm
.section .rodata
.align 4
exception_table:
.word unhandled_exception /* 0 - Reset */
.word unhandled_exception /* 1 - Processor-only Reset */
.word external_interrupt /* 2 - Interrupt */
.word handle_trap /* 3 - Trap Instruction */
.word instruction_trap /* 4 - Unimplemented instruction */
.word handle_illegal /* 5 - Illegal instruction */
.word handle_unaligned /* 6 - Misaligned data access */
.word handle_unaligned /* 7 - Misaligned destination address */
.word handle_diverror /* 8 - Division error */
.word protection_exception_ba /* 9 - Supervisor-only instr. address */
.word protection_exception_instr /* 10 - Supervisor only instruction */
.word protection_exception_ba /* 11 - Supervisor only data address */
.word unhandled_exception /* 12 - Double TLB miss (data) */
.word protection_exception_pte /* 13 - TLB permission violation (x) */
.word protection_exception_pte /* 14 - TLB permission violation (r) */
.word protection_exception_pte /* 15 - TLB permission violation (w) */
.word unhandled_exception /* 16 - MPU region violation */
trap_table:
.word handle_system_call /* 0 */
.word handle_trap_1 /* 1 */
.word handle_trap_2 /* 2 */
.word handle_trap_3 /* 3 */
.word handle_trap_reserved /* 4 */
.word handle_trap_reserved /* 5 */
.word handle_trap_reserved /* 6 */
.word handle_trap_reserved /* 7 */
.word handle_trap_reserved /* 8 */
.word handle_trap_reserved /* 9 */
.word handle_trap_reserved /* 10 */
.word handle_trap_reserved /* 11 */
.word handle_trap_reserved /* 12 */
.word handle_trap_reserved /* 13 */
.word handle_trap_reserved /* 14 */
.word handle_trap_reserved /* 15 */
.word handle_trap_reserved /* 16 */
.word handle_trap_reserved /* 17 */
.word handle_trap_reserved /* 18 */
.word handle_trap_reserved /* 19 */
.word handle_trap_reserved /* 20 */
.word handle_trap_reserved /* 21 */
.word handle_trap_reserved /* 22 */
.word handle_trap_reserved /* 23 */
.word handle_trap_reserved /* 24 */
.word handle_trap_reserved /* 25 */
.word handle_trap_reserved /* 26 */
.word handle_trap_reserved /* 27 */
.word handle_trap_reserved /* 28 */
.word handle_trap_reserved /* 29 */
#ifdef CONFIG_KGDB
.word handle_kgdb_breakpoint /* 30 KGDB breakpoint */
#else
.word instruction_trap /* 30 */
#endif
.word handle_breakpoint /* 31 */
.text
.set noat
.set nobreak
ENTRY(inthandler)
SAVE_ALL
kuser_cmpxchg_check
/* Clear EH bit before we get a new excpetion in the kernel
* and after we have saved it to the exception frame. This is done
* whether it's trap, tlb-miss or interrupt. If we don't do this
* estatus is not updated the next exception.
*/
rdctl r24, status
movi r9, %lo(~STATUS_EH)
and r24, r24, r9
wrctl status, r24
/* Read cause and vector and branch to the associated handler */
mov r4, sp
rdctl r5, exception
movia r9, exception_table
add r24, r9, r5
ldw r24, 0(r24)
jmp r24
/***********************************************************************
* Handle traps
***********************************************************************
*/
ENTRY(handle_trap)
ldwio r24, -4(ea) /* instruction that caused the exception */
srli r24, r24, 4
andi r24, r24, 0x7c
movia r9,trap_table
add r24, r24, r9
ldw r24, 0(r24)
jmp r24
/***********************************************************************
* Handle system calls
***********************************************************************
*/
ENTRY(handle_system_call)
/* Enable interrupts */
rdctl r10, status
ori r10, r10, STATUS_PIE
wrctl status, r10
/* Reload registers destroyed by common code. */
ldw r4, PT_R4(sp)
ldw r5, PT_R5(sp)
local_restart:
/* Check that the requested system call is within limits */
movui r1, __NR_syscalls
bgeu r2, r1, ret_invsyscall
slli r1, r2, 2
movhi r11, %hiadj(sys_call_table)
add r1, r1, r11
ldw r1, %lo(sys_call_table)(r1)
beq r1, r0, ret_invsyscall
/* Check if we are being traced */
GET_THREAD_INFO r11
ldw r11,TI_FLAGS(r11)
BTBNZ r11,r11,TIF_SYSCALL_TRACE,traced_system_call
/* Execute the system call */
callr r1
/* If the syscall returns a negative result:
* Set r7 to 1 to indicate error,
* Negate r2 to get a positive error code
* If the syscall returns zero or a positive value:
* Set r7 to 0.
* The sigreturn system calls will skip the code below by
* adding to register ra. To avoid destroying registers
*/
translate_rc_and_ret:
movi r1, 0
bge r2, zero, 3f
sub r2, zero, r2
movi r1, 1
3:
stw r2, PT_R2(sp)
stw r1, PT_R7(sp)
end_translate_rc_and_ret:
ret_from_exception:
ldw r1, PT_ESTATUS(sp)
/* if so, skip resched, signals */
TSTBNZ r1, r1, ESTATUS_EU, Luser_return
restore_all:
rdctl r10, status /* disable intrs */
andi r10, r10, %lo(~STATUS_PIE)
wrctl status, r10
RESTORE_ALL
eret
/* If the syscall number was invalid return ENOSYS */
ret_invsyscall:
movi r2, -ENOSYS
br translate_rc_and_ret
/* This implements the same as above, except it calls
* do_syscall_trace_enter and do_syscall_trace_exit before and after the
* syscall in order for utilities like strace and gdb to work.
*/
traced_system_call:
SAVE_SWITCH_STACK
call do_syscall_trace_enter
RESTORE_SWITCH_STACK
/* Create system call register arguments. The 5th and 6th
arguments on stack are already in place at the beginning
of pt_regs. */
ldw r2, PT_R2(sp)
ldw r4, PT_R4(sp)
ldw r5, PT_R5(sp)
ldw r6, PT_R6(sp)
ldw r7, PT_R7(sp)
/* Fetch the syscall function, we don't need to check the boundaries
* since this is already done.
*/
slli r1, r2, 2
movhi r11,%hiadj(sys_call_table)
add r1, r1, r11
ldw r1, %lo(sys_call_table)(r1)
callr r1
/* If the syscall returns a negative result:
* Set r7 to 1 to indicate error,
* Negate r2 to get a positive error code
* If the syscall returns zero or a positive value:
* Set r7 to 0.
* The sigreturn system calls will skip the code below by
* adding to register ra. To avoid destroying registers
*/
translate_rc_and_ret2:
movi r1, 0
bge r2, zero, 4f
sub r2, zero, r2
movi r1, 1
4:
stw r2, PT_R2(sp)
stw r1, PT_R7(sp)
end_translate_rc_and_ret2:
SAVE_SWITCH_STACK
call do_syscall_trace_exit
RESTORE_SWITCH_STACK
br ret_from_exception
Luser_return:
GET_THREAD_INFO r11 /* get thread_info pointer */
ldw r10, TI_FLAGS(r11) /* get thread_info->flags */
ANDI32 r11, r10, _TIF_WORK_MASK
beq r11, r0, restore_all /* Nothing to do */
BTBZ r1, r10, TIF_NEED_RESCHED, Lsignal_return
/* Reschedule work */
call schedule
br ret_from_exception
Lsignal_return:
ANDI32 r1, r10, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
beq r1, r0, restore_all
mov r4, sp /* pt_regs */
SAVE_SWITCH_STACK
call do_notify_resume
beq r2, r0, no_work_pending
RESTORE_SWITCH_STACK
/* prepare restart syscall here without leaving kernel */
ldw r2, PT_R2(sp) /* reload syscall number in r2 */
ldw r4, PT_R4(sp) /* reload syscall arguments r4-r9 */
ldw r5, PT_R5(sp)
ldw r6, PT_R6(sp)
ldw r7, PT_R7(sp)
ldw r8, PT_R8(sp)
ldw r9, PT_R9(sp)
br local_restart /* restart syscall */
no_work_pending:
RESTORE_SWITCH_STACK
br ret_from_exception
/***********************************************************************
* Handle external interrupts.
***********************************************************************
*/
/*
* This is the generic interrupt handler (for all hardware interrupt
* sources). It figures out the vector number and calls the appropriate
* interrupt service routine directly.
*/
external_interrupt:
rdctl r12, ipending
rdctl r9, ienable
and r12, r12, r9
/* skip if no interrupt is pending */
beq r12, r0, ret_from_interrupt
movi r24, -1
stw r24, PT_ORIG_R2(sp)
/*
* Process an external hardware interrupt.
*/
addi ea, ea, -4 /* re-issue the interrupted instruction */
stw ea, PT_EA(sp)
2: movi r4, %lo(-1) /* Start from bit position 0,
highest priority */
/* This is the IRQ # for handler call */
1: andi r10, r12, 1 /* Isolate bit we are interested in */
srli r12, r12, 1 /* shift count is costly without hardware
multiplier */
addi r4, r4, 1
beq r10, r0, 1b
mov r5, sp /* Setup pt_regs pointer for handler call */
call do_IRQ
rdctl r12, ipending /* check again if irq still pending */
rdctl r9, ienable /* Isolate possible interrupts */
and r12, r12, r9
bne r12, r0, 2b
/* br ret_from_interrupt */ /* fall through to ret_from_interrupt */
ENTRY(ret_from_interrupt)
ldw r1, PT_ESTATUS(sp) /* check if returning to kernel */
TSTBNZ r1, r1, ESTATUS_EU, Luser_return
#ifdef CONFIG_PREEMPT
GET_THREAD_INFO r1
ldw r4, TI_PREEMPT_COUNT(r1)
bne r4, r0, restore_all
ldw r4, TI_FLAGS(r1) /* ? Need resched set */
BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
andi r10, r4, ESTATUS_EPIE
beq r10, r0, restore_all
call preempt_schedule_irq
#endif
br restore_all
/***********************************************************************
* A few syscall wrappers
***********************************************************************
*/
/*
* int clone(unsigned long clone_flags, unsigned long newsp,
* int __user * parent_tidptr, int __user * child_tidptr,
* int tls_val)
*/
ENTRY(sys_clone)
SAVE_SWITCH_STACK
addi sp, sp, -4
stw r7, 0(sp) /* Pass 5th arg thru stack */
mov r7, r6 /* 4th arg is 3rd of clone() */
mov r6, zero /* 3rd arg always 0 */
call do_fork
addi sp, sp, 4
RESTORE_SWITCH_STACK
ret
ENTRY(sys_rt_sigreturn)
SAVE_SWITCH_STACK
mov r4, sp
call do_rt_sigreturn
RESTORE_SWITCH_STACK
addi ra, ra, (end_translate_rc_and_ret - translate_rc_and_ret)
ret
/***********************************************************************
* A few other wrappers and stubs
***********************************************************************
*/
protection_exception_pte:
rdctl r6, pteaddr
slli r6, r6, 10
call do_page_fault
br ret_from_exception
protection_exception_ba:
rdctl r6, badaddr
call do_page_fault
br ret_from_exception
protection_exception_instr:
call handle_supervisor_instr
br ret_from_exception
handle_breakpoint:
call breakpoint_c
br ret_from_exception
#ifdef CONFIG_NIOS2_ALIGNMENT_TRAP
handle_unaligned:
SAVE_SWITCH_STACK
call handle_unaligned_c
RESTORE_SWITCH_STACK
br ret_from_exception
#else
handle_unaligned:
call handle_unaligned_c
br ret_from_exception
#endif
handle_illegal:
call handle_illegal_c
br ret_from_exception
handle_diverror:
call handle_diverror_c
br ret_from_exception
#ifdef CONFIG_KGDB
handle_kgdb_breakpoint:
call kgdb_breakpoint_c
br ret_from_exception
#endif
handle_trap_1:
call handle_trap_1_c
br ret_from_exception
handle_trap_2:
call handle_trap_2_c
br ret_from_exception
handle_trap_3:
handle_trap_reserved:
call handle_trap_3_c
br ret_from_exception
/*
* Beware - when entering resume, prev (the current task) is
* in r4, next (the new task) is in r5, don't change these
* registers.
*/
ENTRY(resume)
rdctl r7, status /* save thread status reg */
stw r7, TASK_THREAD + THREAD_KPSR(r4)
andi r7, r7, %lo(~STATUS_PIE) /* disable interrupts */
wrctl status, r7
SAVE_SWITCH_STACK
stw sp, TASK_THREAD + THREAD_KSP(r4)/* save kernel stack pointer */
ldw sp, TASK_THREAD + THREAD_KSP(r5)/* restore new thread stack */
movia r24, _current_thread /* save thread */
GET_THREAD_INFO r1
stw r1, 0(r24)
RESTORE_SWITCH_STACK
ldw r7, TASK_THREAD + THREAD_KPSR(r5)/* restore thread status reg */
wrctl status, r7
ret
ENTRY(ret_from_fork)
call schedule_tail
br ret_from_exception
ENTRY(ret_from_kernel_thread)
call schedule_tail
mov r4,r17 /* arg */
callr r16 /* function */
br ret_from_exception
/*
* Kernel user helpers.
*
* Each segment is 64-byte aligned and will be mapped to the <User space>.
* New segments (if ever needed) must be added after the existing ones.
* This mechanism should be used only for things that are really small and
* justified, and not be abused freely.
*
*/
/* Filling pads with undefined instructions. */
.macro kuser_pad sym size
.if ((. - \sym) & 3)
.rept (4 - (. - \sym) & 3)
.byte 0
.endr
.endif
.rept ((\size - (. - \sym)) / 4)
.word 0xdeadbeef
.endr
.endm
.align 6
.globl __kuser_helper_start
__kuser_helper_start:
__kuser_helper_version: /* @ 0x1000 */
.word ((__kuser_helper_end - __kuser_helper_start) >> 6)
__kuser_cmpxchg: /* @ 0x1004 */
/*
* r4 pointer to exchange variable
* r5 old value
* r6 new value
*/
cmpxchg_ldw:
ldw r2, 0(r4) /* load current value */
sub r2, r2, r5 /* compare with old value */
bne r2, zero, cmpxchg_ret
/* We had a match, store the new value */
cmpxchg_stw:
stw r6, 0(r4)
cmpxchg_ret:
ret
kuser_pad __kuser_cmpxchg, 64
.globl __kuser_sigtramp
__kuser_sigtramp:
movi r2, __NR_rt_sigreturn
trap
kuser_pad __kuser_sigtramp, 64
.globl __kuser_helper_end
__kuser_helper_end:
|
AirFortressIlikara/LS2K0300-linux-4.19
| 15,355
|
arch/nios2/kernel/insnemu.S
|
/*
* Copyright (C) 2003-2013 Altera Corporation
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <asm/entry.h>
.set noat
.set nobreak
/*
* Explicitly allow the use of r1 (the assembler temporary register)
* within this code. This register is normally reserved for the use of
* the compiler.
*/
ENTRY(instruction_trap)
ldw r1, PT_R1(sp) // Restore registers
ldw r2, PT_R2(sp)
ldw r3, PT_R3(sp)
ldw r4, PT_R4(sp)
ldw r5, PT_R5(sp)
ldw r6, PT_R6(sp)
ldw r7, PT_R7(sp)
ldw r8, PT_R8(sp)
ldw r9, PT_R9(sp)
ldw r10, PT_R10(sp)
ldw r11, PT_R11(sp)
ldw r12, PT_R12(sp)
ldw r13, PT_R13(sp)
ldw r14, PT_R14(sp)
ldw r15, PT_R15(sp)
ldw ra, PT_RA(sp)
ldw fp, PT_FP(sp)
ldw gp, PT_GP(sp)
ldw et, PT_ESTATUS(sp)
wrctl estatus, et
ldw ea, PT_EA(sp)
ldw et, PT_SP(sp) /* backup sp in et */
addi sp, sp, PT_REGS_SIZE
/* INSTRUCTION EMULATION
* ---------------------
*
* Nios II processors generate exceptions for unimplemented instructions.
* The routines below emulate these instructions. Depending on the
* processor core, the only instructions that might need to be emulated
* are div, divu, mul, muli, mulxss, mulxsu, and mulxuu.
*
* The emulations match the instructions, except for the following
* limitations:
*
* 1) The emulation routines do not emulate the use of the exception
* temporary register (et) as a source operand because the exception
* handler already has modified it.
*
* 2) The routines do not emulate the use of the stack pointer (sp) or
* the exception return address register (ea) as a destination because
* modifying these registers crashes the exception handler or the
* interrupted routine.
*
* Detailed Design
* ---------------
*
* The emulation routines expect the contents of integer registers r0-r31
* to be on the stack at addresses sp, 4(sp), 8(sp), ... 124(sp). The
* routines retrieve source operands from the stack and modify the
* destination register's value on the stack prior to the end of the
* exception handler. Then all registers except the destination register
* are restored to their previous values.
*
* The instruction that causes the exception is found at address -4(ea).
* The instruction's OP and OPX fields identify the operation to be
* performed.
*
* One instruction, muli, is an I-type instruction that is identified by
* an OP field of 0x24.
*
* muli AAAAA,BBBBB,IIIIIIIIIIIIIIII,-0x24-
* 27 22 6 0 <-- LSB of field
*
* The remaining emulated instructions are R-type and have an OP field
* of 0x3a. Their OPX fields identify them.
*
* R-type AAAAA,BBBBB,CCCCC,XXXXXX,NNNNN,-0x3a-
* 27 22 17 11 6 0 <-- LSB of field
*
*
* Opcode Encoding. muli is identified by its OP value. Then OPX & 0x02
* is used to differentiate between the division opcodes and the
* remaining multiplication opcodes.
*
* Instruction OP OPX OPX & 0x02
* ----------- ---- ---- ----------
* muli 0x24
* divu 0x3a 0x24 0
* div 0x3a 0x25 0
* mul 0x3a 0x27 != 0
* mulxuu 0x3a 0x07 != 0
* mulxsu 0x3a 0x17 != 0
* mulxss 0x3a 0x1f != 0
*/
/*
* Save everything on the stack to make it easy for the emulation
* routines to retrieve the source register operands.
*/
addi sp, sp, -128
stw zero, 0(sp) /* Save zero on stack to avoid special case for r0. */
stw r1, 4(sp)
stw r2, 8(sp)
stw r3, 12(sp)
stw r4, 16(sp)
stw r5, 20(sp)
stw r6, 24(sp)
stw r7, 28(sp)
stw r8, 32(sp)
stw r9, 36(sp)
stw r10, 40(sp)
stw r11, 44(sp)
stw r12, 48(sp)
stw r13, 52(sp)
stw r14, 56(sp)
stw r15, 60(sp)
stw r16, 64(sp)
stw r17, 68(sp)
stw r18, 72(sp)
stw r19, 76(sp)
stw r20, 80(sp)
stw r21, 84(sp)
stw r22, 88(sp)
stw r23, 92(sp)
/* Don't bother to save et. It's already been changed. */
rdctl r5, estatus
stw r5, 100(sp)
stw gp, 104(sp)
stw et, 108(sp) /* et contains previous sp value. */
stw fp, 112(sp)
stw ea, 116(sp)
stw ra, 120(sp)
/*
* Split the instruction into its fields. We need 4*A, 4*B, and 4*C as
* offsets to the stack pointer for access to the stored register values.
*/
ldw r2,-4(ea) /* r2 = AAAAA,BBBBB,IIIIIIIIIIIIIIII,PPPPPP */
roli r3, r2, 7 /* r3 = BBB,IIIIIIIIIIIIIIII,PPPPPP,AAAAA,BB */
roli r4, r3, 3 /* r4 = IIIIIIIIIIIIIIII,PPPPPP,AAAAA,BBBBB */
roli r5, r4, 2 /* r5 = IIIIIIIIIIIIII,PPPPPP,AAAAA,BBBBB,II */
srai r4, r4, 16 /* r4 = (sign-extended) IMM16 */
roli r6, r5, 5 /* r6 = XXXX,NNNNN,PPPPPP,AAAAA,BBBBB,CCCCC,XX */
andi r2, r2, 0x3f /* r2 = 00000000000000000000000000,PPPPPP */
andi r3, r3, 0x7c /* r3 = 0000000000000000000000000,AAAAA,00 */
andi r5, r5, 0x7c /* r5 = 0000000000000000000000000,BBBBB,00 */
andi r6, r6, 0x7c /* r6 = 0000000000000000000000000,CCCCC,00 */
/* Now
* r2 = OP
* r3 = 4*A
* r4 = IMM16 (sign extended)
* r5 = 4*B
* r6 = 4*C
*/
/*
* Get the operands.
*
* It is necessary to check for muli because it uses an I-type
* instruction format, while the other instructions are have an R-type
* format.
*
* Prepare for either multiplication or division loop.
* They both loop 32 times.
*/
movi r14, 32
add r3, r3, sp /* r3 = address of A-operand. */
ldw r3, 0(r3) /* r3 = A-operand. */
movi r7, 0x24 /* muli opcode (I-type instruction format) */
beq r2, r7, mul_immed /* muli doesn't use the B register as a source */
add r5, r5, sp /* r5 = address of B-operand. */
ldw r5, 0(r5) /* r5 = B-operand. */
/* r4 = SSSSSSSSSSSSSSSS,-----IMM16------ */
/* IMM16 not needed, align OPX portion */
/* r4 = SSSSSSSSSSSSSSSS,CCCCC,-OPX--,00000 */
srli r4, r4, 5 /* r4 = 00000,SSSSSSSSSSSSSSSS,CCCCC,-OPX-- */
andi r4, r4, 0x3f /* r4 = 00000000000000000000000000,-OPX-- */
/* Now
* r2 = OP
* r3 = src1
* r5 = src2
* r4 = OPX (no longer can be muli)
* r6 = 4*C
*/
/*
* Multiply or Divide?
*/
andi r7, r4, 0x02 /* For R-type multiply instructions,
OPX & 0x02 != 0 */
bne r7, zero, multiply
/* DIVISION
*
* Divide an unsigned dividend by an unsigned divisor using
* a shift-and-subtract algorithm. The example below shows
* 43 div 7 = 6 for 8-bit integers. This classic algorithm uses a
* single register to store both the dividend and the quotient,
* allowing both values to be shifted with a single instruction.
*
* remainder dividend:quotient
* --------- -----------------
* initialize 00000000 00101011:
* shift 00000000 0101011:_
* remainder >= divisor? no 00000000 0101011:0
* shift 00000000 101011:0_
* remainder >= divisor? no 00000000 101011:00
* shift 00000001 01011:00_
* remainder >= divisor? no 00000001 01011:000
* shift 00000010 1011:000_
* remainder >= divisor? no 00000010 1011:0000
* shift 00000101 011:0000_
* remainder >= divisor? no 00000101 011:00000
* shift 00001010 11:00000_
* remainder >= divisor? yes 00001010 11:000001
* remainder -= divisor - 00000111
* ----------
* 00000011 11:000001
* shift 00000111 1:000001_
* remainder >= divisor? yes 00000111 1:0000011
* remainder -= divisor - 00000111
* ----------
* 00000000 1:0000011
* shift 00000001 :0000011_
* remainder >= divisor? no 00000001 :00000110
*
* The quotient is 00000110.
*/
divide:
/*
* Prepare for division by assuming the result
* is unsigned, and storing its "sign" as 0.
*/
movi r17, 0
/* Which division opcode? */
xori r7, r4, 0x25 /* OPX of div */
bne r7, zero, unsigned_division
/*
* OPX is div. Determine and store the sign of the quotient.
* Then take the absolute value of both operands.
*/
xor r17, r3, r5 /* MSB contains sign of quotient */
bge r3,zero,dividend_is_nonnegative
sub r3, zero, r3 /* -r3 */
dividend_is_nonnegative:
bge r5, zero, divisor_is_nonnegative
sub r5, zero, r5 /* -r5 */
divisor_is_nonnegative:
unsigned_division:
/* Initialize the unsigned-division loop. */
movi r13, 0 /* remainder = 0 */
/* Now
* r3 = dividend : quotient
* r4 = 0x25 for div, 0x24 for divu
* r5 = divisor
* r13 = remainder
* r14 = loop counter (already initialized to 32)
* r17 = MSB contains sign of quotient
*/
/*
* for (count = 32; count > 0; --count)
* {
*/
divide_loop:
/*
* Division:
*
* (remainder:dividend:quotient) <<= 1;
*/
slli r13, r13, 1
cmplt r7, r3, zero /* r7 = MSB of r3 */
or r13, r13, r7
slli r3, r3, 1
/*
* if (remainder >= divisor)
* {
* set LSB of quotient
* remainder -= divisor;
* }
*/
bltu r13, r5, div_skip
ori r3, r3, 1
sub r13, r13, r5
div_skip:
/*
* }
*/
subi r14, r14, 1
bne r14, zero, divide_loop
/* Now
* r3 = quotient
* r4 = 0x25 for div, 0x24 for divu
* r6 = 4*C
* r17 = MSB contains sign of quotient
*/
/*
* Conditionally negate signed quotient. If quotient is unsigned,
* the sign already is initialized to 0.
*/
bge r17, zero, quotient_is_nonnegative
sub r3, zero, r3 /* -r3 */
quotient_is_nonnegative:
/*
* Final quotient is in r3.
*/
add r6, r6, sp
stw r3, 0(r6) /* write quotient to stack */
br restore_registers
/* MULTIPLICATION
*
* A "product" is the number that one gets by summing a "multiplicand"
* several times. The "multiplier" specifies the number of copies of the
* multiplicand that are summed.
*
* Actual multiplication algorithms don't use repeated addition, however.
* Shift-and-add algorithms get the same answer as repeated addition, and
* they are faster. To compute the lower half of a product (pppp below)
* one shifts the product left before adding in each of the partial
* products (a * mmmm) through (d * mmmm).
*
* To compute the upper half of a product (PPPP below), one adds in the
* partial products (d * mmmm) through (a * mmmm), each time following
* the add by a right shift of the product.
*
* mmmm
* * abcd
* ------
* #### = d * mmmm
* #### = c * mmmm
* #### = b * mmmm
* #### = a * mmmm
* --------
* PPPPpppp
*
* The example above shows 4 partial products. Computing actual Nios II
* products requires 32 partials.
*
* It is possible to compute the result of mulxsu from the result of
* mulxuu because the only difference between the results of these two
* opcodes is the value of the partial product associated with the sign
* bit of rA.
*
* mulxsu = mulxuu - (rA < 0) ? rB : 0;
*
* It is possible to compute the result of mulxss from the result of
* mulxsu because the only difference between the results of these two
* opcodes is the value of the partial product associated with the sign
* bit of rB.
*
* mulxss = mulxsu - (rB < 0) ? rA : 0;
*
*/
mul_immed:
/* Opcode is muli. Change it into mul for remainder of algorithm. */
mov r6, r5 /* Field B is dest register, not field C. */
mov r5, r4 /* Field IMM16 is src2, not field B. */
movi r4, 0x27 /* OPX of mul is 0x27 */
multiply:
/* Initialize the multiplication loop. */
movi r9, 0 /* mul_product = 0 */
movi r10, 0 /* mulxuu_product = 0 */
mov r11, r5 /* save original multiplier for mulxsu and mulxss */
mov r12, r5 /* mulxuu_multiplier (will be shifted) */
movi r16, 1 /* used to create "rori B,A,1" from "ror B,A,r16" */
/* Now
* r3 = multiplicand
* r5 = mul_multiplier
* r6 = 4 * dest_register (used later as offset to sp)
* r7 = temp
* r9 = mul_product
* r10 = mulxuu_product
* r11 = original multiplier
* r12 = mulxuu_multiplier
* r14 = loop counter (already initialized)
* r16 = 1
*/
/*
* for (count = 32; count > 0; --count)
* {
*/
multiply_loop:
/*
* mul_product <<= 1;
* lsb = multiplier & 1;
*/
slli r9, r9, 1
andi r7, r12, 1
/*
* if (lsb == 1)
* {
* mulxuu_product += multiplicand;
* }
*/
beq r7, zero, mulx_skip
add r10, r10, r3
cmpltu r7, r10, r3 /* Save the carry from the MSB of mulxuu_product. */
ror r7, r7, r16 /* r7 = 0x80000000 on carry, or else 0x00000000 */
mulx_skip:
/*
* if (MSB of mul_multiplier == 1)
* {
* mul_product += multiplicand;
* }
*/
bge r5, zero, mul_skip
add r9, r9, r3
mul_skip:
/*
* mulxuu_product >>= 1; logical shift
* mul_multiplier <<= 1; done with MSB
* mulx_multiplier >>= 1; done with LSB
*/
srli r10, r10, 1
or r10, r10, r7 /* OR in the saved carry bit. */
slli r5, r5, 1
srli r12, r12, 1
/*
* }
*/
subi r14, r14, 1
bne r14, zero, multiply_loop
/*
* Multiply emulation loop done.
*/
/* Now
* r3 = multiplicand
* r4 = OPX
* r6 = 4 * dest_register (used later as offset to sp)
* r7 = temp
* r9 = mul_product
* r10 = mulxuu_product
* r11 = original multiplier
*/
/* Calculate address for result from 4 * dest_register */
add r6, r6, sp
/*
* Select/compute the result based on OPX.
*/
/* OPX == mul? Then store. */
xori r7, r4, 0x27
beq r7, zero, store_product
/* It's one of the mulx.. opcodes. Move over the result. */
mov r9, r10
/* OPX == mulxuu? Then store. */
xori r7, r4, 0x07
beq r7, zero, store_product
/* Compute mulxsu
*
* mulxsu = mulxuu - (rA < 0) ? rB : 0;
*/
bge r3, zero, mulxsu_skip
sub r9, r9, r11
mulxsu_skip:
/* OPX == mulxsu? Then store. */
xori r7, r4, 0x17
beq r7, zero, store_product
/* Compute mulxss
*
* mulxss = mulxsu - (rB < 0) ? rA : 0;
*/
bge r11,zero,mulxss_skip
sub r9, r9, r3
mulxss_skip:
/* At this point, assume that OPX is mulxss, so store*/
store_product:
stw r9, 0(r6)
restore_registers:
/* No need to restore r0. */
ldw r5, 100(sp)
wrctl estatus, r5
ldw r1, 4(sp)
ldw r2, 8(sp)
ldw r3, 12(sp)
ldw r4, 16(sp)
ldw r5, 20(sp)
ldw r6, 24(sp)
ldw r7, 28(sp)
ldw r8, 32(sp)
ldw r9, 36(sp)
ldw r10, 40(sp)
ldw r11, 44(sp)
ldw r12, 48(sp)
ldw r13, 52(sp)
ldw r14, 56(sp)
ldw r15, 60(sp)
ldw r16, 64(sp)
ldw r17, 68(sp)
ldw r18, 72(sp)
ldw r19, 76(sp)
ldw r20, 80(sp)
ldw r21, 84(sp)
ldw r22, 88(sp)
ldw r23, 92(sp)
/* Does not need to restore et */
ldw gp, 104(sp)
ldw fp, 112(sp)
ldw ea, 116(sp)
ldw ra, 120(sp)
ldw sp, 108(sp) /* last restore sp */
eret
.set at
.set break
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,680
|
arch/nios2/kernel/vmlinux.lds.S
|
/*
* Copyright (C) 2009 Thomas Chou <thomas@wytron.com.tw>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
OUTPUT_FORMAT("elf32-littlenios2", "elf32-littlenios2", "elf32-littlenios2")
OUTPUT_ARCH(nios)
ENTRY(_start) /* Defined in head.S */
jiffies = jiffies_64;
SECTIONS
{
. = CONFIG_NIOS2_MEM_BASE | CONFIG_NIOS2_KERNEL_REGION_BASE;
_text = .;
_stext = .;
HEAD_TEXT_SECTION
.text : {
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
KPROBES_TEXT
} =0
_etext = .;
.got : {
*(.got.plt)
*(.igot.plt)
*(.got)
*(.igot)
}
EXCEPTION_TABLE(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
__init_end = .;
_sdata = .;
RO_DATA_SECTION(PAGE_SIZE)
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
BSS_SECTION(0, 0, 0)
_end = .;
STABS_DEBUG
DWARF_DEBUG
NOTES
DISCARDS
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,803
|
arch/nios2/boot/compressed/head.S
|
/*
* Copyright (C) 2009 Thomas Chou <thomas@wytron.com.tw>
*
* Based on arch/nios2/kernel/head.S
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
/*
* This code can be loaded anywhere, eg FLASH ROM as reset vector,
* as long as output does not overlap it.
*/
#include <linux/linkage.h>
#include <asm/cache.h>
.text
.set noat
ENTRY(_start)
wrctl status, r0 /* disable interrupt */
/* invalidate all instruction cache */
movia r1, NIOS2_ICACHE_SIZE
movui r2, NIOS2_ICACHE_LINE_SIZE
1: initi r1
sub r1, r1, r2
bgt r1, r0, 1b
/* invalidate all data cache */
movia r1, NIOS2_DCACHE_SIZE
movui r2, NIOS2_DCACHE_LINE_SIZE
1: initd 0(r1)
sub r1, r1, r2
bgt r1, r0, 1b
nextpc r1 /* Find out where we are */
chkadr:
movia r2, chkadr
beq r1, r2, finish_move /* We are running in correct address,
done */
/* move code, r1: src, r2: dest, r3: last dest */
addi r1, r1, (_start - chkadr) /* Source */
movia r2, _start /* Destination */
movia r3, __bss_start /* End of copy */
1: ldw r8, 0(r1) /* load a word from [r1] */
stw r8, 0(r2) /* stort a word to dest [r2] */
addi r1, r1, 4 /* inc the src addr */
addi r2, r2, 4 /* inc the dest addr */
blt r2, r3, 1b
/* flush the data cache after moving */
movia r1, NIOS2_DCACHE_SIZE
movui r2, NIOS2_DCACHE_LINE_SIZE
1: flushd 0(r1)
sub r1, r1, r2
bgt r1, r0, 1b
movia r1, finish_move
jmp r1 /* jmp to linked address */
finish_move:
/* zero out the .bss segment (uninitialized common data) */
movia r2, __bss_start /* presume nothing is between */
movia r1, _end /* the .bss and _end. */
1: stb r0, 0(r2)
addi r2, r2, 1
bne r1, r2, 1b
/*
* set up the stack pointer, some where higher than _end.
* The stack space must be greater than 32K for decompress.
*/
movia sp, 0x10000
add sp, sp, r1
/* save args passed from u-boot, maybe */
addi sp, sp, -16
stw r4, 0(sp)
stw r5, 4(sp)
stw r6, 8(sp)
stw r7, 12(sp)
/* decompress the kernel */
call decompress_kernel
/* pass saved args to kernel */
ldw r4, 0(sp)
ldw r5, 4(sp)
ldw r6, 8(sp)
ldw r7, 12(sp)
/* flush all data cache after decompressing */
movia r1, NIOS2_DCACHE_SIZE
movui r2, NIOS2_DCACHE_LINE_SIZE
1: flushd 0(r1)
sub r1, r1, r2
bgt r1, r0, 1b
/* flush all instruction cache */
movia r1, NIOS2_ICACHE_SIZE
movui r2, NIOS2_ICACHE_LINE_SIZE
1: flushi r1
sub r1, r1, r2
bgt r1, r0, 1b
flushp
/* jump to start real kernel */
movia r1, (CONFIG_NIOS2_MEM_BASE | CONFIG_NIOS2_KERNEL_REGION_BASE)
jmp r1
.balign 512
fake_headers_as_bzImage:
.short 0
.ascii "HdrS"
.short 0x0202
.short 0
.short 0
.byte 0x00, 0x10
.short 0
.byte 0
.byte 1
.byte 0x00, 0x80
.long 0
.long 0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,408
|
arch/nios2/boot/compressed/vmlinux.lds.S
|
/*
* Copyright (C) 2009 Thomas Chou <thomas@wytron.com.tw>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <asm-generic/vmlinux.lds.h>
OUTPUT_FORMAT("elf32-littlenios2", "elf32-littlenios2", "elf32-littlenios2")
OUTPUT_ARCH(nios)
ENTRY(_start) /* Defined in head.S */
SECTIONS
{
. = (CONFIG_NIOS2_MEM_BASE + CONFIG_NIOS2_BOOT_LINK_OFFSET) | \
CONFIG_NIOS2_KERNEL_REGION_BASE;
_text = .;
.text : { *(.text) } = 0
.rodata : { *(.rodata) *(.rodata.*) }
_etext = .;
. = ALIGN(32 / 8);
.data : { *(.data) }
. = ALIGN(32 / 8);
_got = .;
.got : {
*(.got.plt)
*(.igot.plt)
*(.got)
*(.igot)
}
_egot = .;
_edata = .;
. = ALIGN(32 / 8);
__bss_start = .;
.bss : { *(.bss) *(.sbss) }
. = ALIGN(32 / 8);
_ebss = .;
end = . ;
_end = . ;
got_len = (_egot - _got);
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,624
|
arch/m68k/coldfire/head.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*****************************************************************************/
/*
* head.S -- common startup code for ColdFire CPUs.
*
* (C) Copyright 1999-2011, Greg Ungerer <gerg@snapgear.com>.
*/
/*****************************************************************************/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfmmu.h>
#include <asm/thread_info.h>
/*****************************************************************************/
/*
* If we don't have a fixed memory size, then lets build in code
* to auto detect the DRAM size. Obviously this is the preferred
* method, and should work for most boards. It won't work for those
* that do not have their RAM starting at address 0, and it only
* works on SDRAM (not boards fitted with SRAM).
*/
#if CONFIG_RAMSIZE != 0
.macro GET_MEM_SIZE
movel #CONFIG_RAMSIZE,%d0 /* hard coded memory size */
.endm
#elif defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
defined(CONFIG_M5249) || defined(CONFIG_M525x) || \
defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M5307) || defined(CONFIG_M5407)
/*
* Not all these devices have exactly the same DRAM controller,
* but the DCMR register is virtually identical - give or take
* a couple of bits. The only exception is the 5272 devices, their
* DRAM controller is quite different.
*/
.macro GET_MEM_SIZE
movel MCFSIM_DMR0,%d0 /* get mask for 1st bank */
btst #0,%d0 /* check if region enabled */
beq 1f
andl #0xfffc0000,%d0
beq 1f
addl #0x00040000,%d0 /* convert mask to size */
1:
movel MCFSIM_DMR1,%d1 /* get mask for 2nd bank */
btst #0,%d1 /* check if region enabled */
beq 2f
andl #0xfffc0000,%d1
beq 2f
addl #0x00040000,%d1
addl %d1,%d0 /* total mem size in d0 */
2:
.endm
#elif defined(CONFIG_M5272)
.macro GET_MEM_SIZE
movel MCFSIM_CSOR7,%d0 /* get SDRAM address mask */
andil #0xfffff000,%d0 /* mask out chip select options */
negl %d0 /* negate bits */
.endm
#elif defined(CONFIG_M520x)
.macro GET_MEM_SIZE
clrl %d0
movel MCFSIM_SDCS0, %d2 /* Get SDRAM chip select 0 config */
andl #0x1f, %d2 /* Get only the chip select size */
beq 3f /* Check if it is enabled */
addql #1, %d2 /* Form exponent */
moveql #1, %d0
lsll %d2, %d0 /* 2 ^ exponent */
3:
movel MCFSIM_SDCS1, %d2 /* Get SDRAM chip select 1 config */
andl #0x1f, %d2 /* Get only the chip select size */
beq 4f /* Check if it is enabled */
addql #1, %d2 /* Form exponent */
moveql #1, %d1
lsll %d2, %d1 /* 2 ^ exponent */
addl %d1, %d0 /* Total size of SDRAM in d0 */
4:
.endm
#else
#error "ERROR: I don't know how to probe your boards memory size?"
#endif
/*****************************************************************************/
/*
* Boards and platforms can do specific early hardware setup if
* they need to. Most don't need this, define away if not required.
*/
#ifndef PLATFORM_SETUP
#define PLATFORM_SETUP
#endif
/*****************************************************************************/
.global _start
.global _rambase
.global _ramvec
.global _ramstart
.global _ramend
#if defined(CONFIG_UBOOT)
.global _init_sp
#endif
/*****************************************************************************/
.data
/*
* During startup we store away the RAM setup. These are not in the
* bss, since their values are determined and written before the bss
* has been cleared.
*/
_rambase:
.long 0
_ramvec:
.long 0
_ramstart:
.long 0
_ramend:
.long 0
#if defined(CONFIG_UBOOT)
_init_sp:
.long 0
#endif
/*****************************************************************************/
__HEAD
#ifdef CONFIG_MMU
_start0:
jmp _start
.global kernel_pg_dir
.equ kernel_pg_dir,_start0
.equ .,_start0+0x1000
#endif
/*
* This is the codes first entry point. This is where it all
* begins...
*/
_start:
nop /* filler */
movew #0x2700, %sr /* no interrupts */
movel #CACHE_INIT,%d0 /* disable cache */
movec %d0,%CACR
nop
#if defined(CONFIG_UBOOT)
movel %sp,_init_sp /* save initial stack pointer */
#endif
#ifdef CONFIG_MBAR
movel #CONFIG_MBAR+1,%d0 /* configured MBAR address */
movec %d0,%MBAR /* set it */
#endif
/*
* Do any platform or board specific setup now. Most boards
* don't need anything. Those exceptions are define this in
* their board specific includes.
*/
PLATFORM_SETUP
/*
* Create basic memory configuration. Set VBR accordingly,
* and size memory.
*/
movel #CONFIG_VECTORBASE,%a7
movec %a7,%VBR /* set vectors addr */
movel %a7,_ramvec
movel #CONFIG_RAMBASE,%a7 /* mark the base of RAM */
movel %a7,_rambase
GET_MEM_SIZE /* macro code determines size */
addl %a7,%d0
movel %d0,_ramend /* set end ram addr */
/*
* Now that we know what the memory is, lets enable cache
* and get things moving. This is Coldfire CPU specific. Not
* all version cores have identical cache register setup. But
* it is very similar. Define the exact settings in the headers
* then the code here is the same for all.
*/
movel #ACR0_MODE,%d0 /* set RAM region for caching */
movec %d0,%ACR0
movel #ACR1_MODE,%d0 /* anything else to cache? */
movec %d0,%ACR1
#ifdef ACR2_MODE
movel #ACR2_MODE,%d0
movec %d0,%ACR2
movel #ACR3_MODE,%d0
movec %d0,%ACR3
#endif
movel #CACHE_MODE,%d0 /* enable cache */
movec %d0,%CACR
nop
#ifdef CONFIG_MMU
/*
* Identity mapping for the kernel region.
*/
movel #(MMUBASE+1),%d0 /* enable MMUBAR registers */
movec %d0,%MMUBAR
movel #MMUOR_CA,%d0 /* clear TLB entries */
movel %d0,MMUOR
movel #0,%d0 /* set ASID to 0 */
movec %d0,%asid
movel #MMUCR_EN,%d0 /* Enable the identity map */
movel %d0,MMUCR
nop /* sync i-pipeline */
movel #_vstart,%a0 /* jump to "virtual" space */
jmp %a0@
_vstart:
#endif /* CONFIG_MMU */
#ifdef CONFIG_ROMFS_FS
/*
* Move ROM filesystem above bss :-)
*/
lea __bss_start,%a0 /* get start of bss */
lea __bss_stop,%a1 /* set up destination */
movel %a0,%a2 /* copy of bss start */
movel 8(%a0),%d0 /* get size of ROMFS */
addql #8,%d0 /* allow for rounding */
andl #0xfffffffc, %d0 /* whole words */
addl %d0,%a0 /* copy from end */
addl %d0,%a1 /* copy from end */
movel %a1,_ramstart /* set start of ram */
_copy_romfs:
movel -(%a0),%d0 /* copy dword */
movel %d0,-(%a1)
cmpl %a0,%a2 /* check if at end */
bne _copy_romfs
#else /* CONFIG_ROMFS_FS */
lea __bss_stop,%a1
movel %a1,_ramstart
#endif /* CONFIG_ROMFS_FS */
/*
* Zero out the bss region.
*/
lea __bss_start,%a0 /* get start of bss */
lea __bss_stop,%a1 /* get end of bss */
clrl %d0 /* set value */
_clear_bss:
movel %d0,(%a0)+ /* clear each word */
cmpl %a0,%a1 /* check if at end */
bne _clear_bss
/*
* Load the current task pointer and stack.
*/
lea init_thread_union,%a0
lea THREAD_SIZE(%a0),%sp
#ifdef CONFIG_MMU
.global m68k_cputype
.global m68k_mmutype
.global m68k_fputype
.global m68k_machtype
movel #CPU_COLDFIRE,%d0
movel %d0,m68k_cputype /* Mark us as a ColdFire */
movel #MMU_COLDFIRE,%d0
movel %d0,m68k_mmutype
movel #FPUTYPE,%d0
movel %d0,m68k_fputype /* Mark FPU type */
movel #MACHINE,%d0
movel %d0,m68k_machtype /* Mark machine type */
lea init_task,%a2 /* Set "current" init task */
#endif
/*
* Assembler start up done, start code proper.
*/
jsr start_kernel /* start Linux kernel */
_exit:
jmp _exit /* should never get here */
/*****************************************************************************/
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,352
|
arch/m68k/coldfire/entry.S
|
/*
* entry.S -- interrupt and exception processing for ColdFire
*
* Copyright (C) 1999-2007, Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
* Kenneth Albanowski <kjahds@kjahds.com>,
* Copyright (C) 2000 Lineo Inc. (www.lineo.com)
* Copyright (C) 2004-2006 Macq Electronique SA. (www.macqel.com)
*
* Based on:
*
* linux/arch/m68k/kernel/entry.S
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file README.legal in the main directory of this archive
* for more details.
*
* Linux/m68k support by Hamish Macdonald
*
* 68060 fixes by Jesper Skov
* ColdFire support by Greg Ungerer (gerg@snapgear.com)
* 5307 fixes by David W. Miller
* linux 2.4 support David McCullough <davidm@snapgear.com>
* Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
*/
#include <linux/linkage.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
#ifdef CONFIG_COLDFIRE_SW_A7
/*
* Define software copies of the supervisor and user stack pointers.
*/
.bss
sw_ksp:
.long 0
sw_usp:
.long 0
#endif /* CONFIG_COLDFIRE_SW_A7 */
.text
.globl system_call
.globl resume
.globl ret_from_exception
.globl ret_from_signal
.globl sys_call_table
.globl inthandler
enosys:
mov.l #sys_ni_syscall,%d3
bra 1f
ENTRY(system_call)
SAVE_ALL_SYS
move #0x2000,%sr /* enable intrs again */
GET_CURRENT(%d2)
cmpl #NR_syscalls,%d0
jcc enosys
lea sys_call_table,%a0
lsll #2,%d0 /* movel %a0@(%d0:l:4),%d3 */
movel %a0@(%d0),%d3
jeq enosys
1:
movel %sp,%d2 /* get thread_info pointer */
andl #-THREAD_SIZE,%d2 /* at start of kernel stack */
movel %d2,%a0
movel %a0@,%a1 /* save top of frame */
movel %sp,%a1@(TASK_THREAD+THREAD_ESP0)
btst #(TIF_SYSCALL_TRACE%8),%a0@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
bnes 1f
movel %d3,%a0
jbsr %a0@
movel %d0,%sp@(PT_OFF_D0) /* save the return value */
jra ret_from_exception
1:
movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_OFF_D0 */
movel %d2,PT_OFF_D0(%sp) /* on syscall entry */
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql #4,%sp
movel %d3,%a0
jbsr %a0@
movel %d0,%sp@(PT_OFF_D0) /* save the return value */
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
jbsr syscall_trace_leave
ret_from_signal:
RESTORE_SWITCH_STACK
addql #4,%sp
ret_from_exception:
move #0x2700,%sr /* disable intrs */
btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */
jeq Luser_return /* if so, skip resched, signals */
#ifdef CONFIG_PREEMPT
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
movel %d1,%a0
movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
andl #(1<<TIF_NEED_RESCHED),%d1
jeq Lkernel_return
movel %a0@(TINFO_PREEMPT),%d1
cmpl #0,%d1
jne Lkernel_return
pea Lkernel_return
jmp preempt_schedule_irq /* preempt the kernel */
#endif
Lkernel_return:
moveml %sp@,%d1-%d5/%a0-%a2
lea %sp@(32),%sp /* space for 8 regs */
movel %sp@+,%d0
addql #4,%sp /* orig d0 */
addl %sp@+,%sp /* stk adj */
rte
Luser_return:
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
movel %d1,%a0
moveb %a0@(TINFO_FLAGS+3),%d1 /* thread_info->flags (low 8 bits) */
jne Lwork_to_do /* still work to do */
Lreturn:
RESTORE_USER
Lwork_to_do:
movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
move #0x2000,%sr /* enable intrs again */
btst #TIF_NEED_RESCHED,%d1
jne reschedule
Lsignal_return:
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jsr do_notify_resume
addql #4,%sp
RESTORE_SWITCH_STACK
addql #4,%sp
jmp Luser_return
/*
* This is the generic interrupt handler (for all hardware interrupt
* sources). Calls up to high level code to do all the work.
*/
ENTRY(inthandler)
SAVE_ALL_INT
GET_CURRENT(%d2)
movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
andl #0x03fc,%d0 /* mask out vector only */
movel %sp,%sp@- /* push regs arg */
lsrl #2,%d0 /* calculate real vector # */
movel %d0,%sp@- /* push vector number */
jbsr do_IRQ /* call high level irq handler */
lea %sp@(8),%sp /* pop args off stack */
bra ret_from_exception
/*
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1, so don't change these
* registers until their contents are no longer needed.
*/
ENTRY(resume)
movew %sr,%d1 /* save current status */
movew %d1,%a0@(TASK_THREAD+THREAD_SR)
movel %a0,%d1 /* get prev thread in d1 */
SAVE_SWITCH_STACK
movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
RDUSP /* movel %usp,%a3 */
movel %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */
#ifdef CONFIG_MMU
movel %a1,%a2 /* set new current */
#endif
movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */
WRUSP /* movel %a3,%usp */
movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */
movew %a1@(TASK_THREAD+THREAD_SR),%d7 /* restore new status */
movew %d7,%sr
RESTORE_SWITCH_STACK
rts
|
AirFortressIlikara/LS2K0300-linux-4.19
| 89,771
|
arch/m68k/kernel/head.S
|
/* -*- mode: asm -*-
**
** head.S -- This file contains the initial boot code for the
** Linux/68k kernel.
**
** Copyright 1993 by Hamish Macdonald
**
** 68040 fixes by Michael Rausch
** 68060 fixes by Roman Hodek
** MMU cleanup by Randy Thelen
** Final MMU cleanup by Roman Zippel
**
** Atari support by Andreas Schwab, using ideas of Robert de Vries
** and Bjoern Brauel
** VME Support by Richard Hirst
**
** 94/11/14 Andreas Schwab: put kernel at PAGESIZE
** 94/11/18 Andreas Schwab: remove identity mapping of STRAM for Atari
** ++ Bjoern & Roman: ATARI-68040 support for the Medusa
** 95/11/18 Richard Hirst: Added MVME166 support
** 96/04/26 Guenther Kelleter: fixed identity mapping for Falcon with
** Magnum- and FX-alternate ram
** 98/04/25 Phil Blundell: added HP300 support
** 1998/08/30 David Kilzer: Added support for font_desc structures
** for linux-2.1.115
** 1999/02/11 Richard Zidlicky: added Q40 support (initial version 99/01/01)
** 2004/05/13 Kars de Jong: Finalised HP300 support
**
** This file is subject to the terms and conditions of the GNU General Public
** License. See the file README.legal in the main directory of this archive
** for more details.
**
*/
/*
* Linux startup code.
*
* At this point, the boot loader has:
* Disabled interrupts
* Disabled caches
* Put us in supervisor state.
*
* The kernel setup code takes the following steps:
* . Raise interrupt level
* . Set up initial kernel memory mapping.
* . This sets up a mapping of the 4M of memory the kernel is located in.
* . It also does a mapping of any initial machine specific areas.
* . Enable the MMU
* . Enable cache memories
* . Jump to kernel startup
*
* Much of the file restructuring was to accomplish:
* 1) Remove register dependency through-out the file.
* 2) Increase use of subroutines to perform functions
* 3) Increase readability of the code
*
* Of course, readability is a subjective issue, so it will never be
* argued that that goal was accomplished. It was merely a goal.
* A key way to help make code more readable is to give good
* documentation. So, the first thing you will find is exaustive
* write-ups on the structure of the file, and the features of the
* functional subroutines.
*
* General Structure:
* ------------------
* Without a doubt the single largest chunk of head.S is spent
* mapping the kernel and I/O physical space into the logical range
* for the kernel.
* There are new subroutines and data structures to make MMU
* support cleaner and easier to understand.
* First, you will find a routine call "mmu_map" which maps
* a logical to a physical region for some length given a cache
* type on behalf of the caller. This routine makes writing the
* actual per-machine specific code very simple.
* A central part of the code, but not a subroutine in itself,
* is the mmu_init code which is broken down into mapping the kernel
* (the same for all machines) and mapping machine-specific I/O
* regions.
* Also, there will be a description of engaging the MMU and
* caches.
* You will notice that there is a chunk of code which
* can emit the entire MMU mapping of the machine. This is present
* only in debug modes and can be very helpful.
* Further, there is a new console driver in head.S that is
* also only engaged in debug mode. Currently, it's only supported
* on the Macintosh class of machines. However, it is hoped that
* others will plug-in support for specific machines.
*
* ######################################################################
*
* mmu_map
* -------
* mmu_map was written for two key reasons. First, it was clear
* that it was very difficult to read the previous code for mapping
* regions of memory. Second, the Macintosh required such extensive
* memory allocations that it didn't make sense to propagate the
* existing code any further.
* mmu_map requires some parameters:
*
* mmu_map (logical, physical, length, cache_type)
*
* While this essentially describes the function in the abstract, you'll
* find more indepth description of other parameters at the implementation site.
*
* mmu_get_root_table_entry
* ------------------------
* mmu_get_ptr_table_entry
* -----------------------
* mmu_get_page_table_entry
* ------------------------
*
* These routines are used by other mmu routines to get a pointer into
* a table, if necessary a new table is allocated. These routines are working
* basically like pmd_alloc() and pte_alloc() in <asm/pgtable.h>. The root
* table needs of course only to be allocated once in mmu_get_root_table_entry,
* so that here also some mmu specific initialization is done. The second page
* at the start of the kernel (the first page is unmapped later) is used for
* the kernel_pg_dir. It must be at a position known at link time (as it's used
* to initialize the init task struct) and since it needs special cache
* settings, it's the easiest to use this page, the rest of the page is used
* for further pointer tables.
* mmu_get_page_table_entry allocates always a whole page for page tables, this
* means 1024 pages and so 4MB of memory can be mapped. It doesn't make sense
* to manage page tables in smaller pieces as nearly all mappings have that
* size.
*
* ######################################################################
*
*
* ######################################################################
*
* mmu_engage
* ----------
* Thanks to a small helping routine enabling the mmu got quite simple
* and there is only one way left. mmu_engage makes a complete a new mapping
* that only includes the absolute necessary to be able to jump to the final
* position and to restore the original mapping.
* As this code doesn't need a transparent translation register anymore this
* means all registers are free to be used by machines that needs them for
* other purposes.
*
* ######################################################################
*
* mmu_print
* ---------
* This algorithm will print out the page tables of the system as
* appropriate for an 030 or an 040. This is useful for debugging purposes
* and as such is enclosed in #ifdef MMU_PRINT/#endif clauses.
*
* ######################################################################
*
* console_init
* ------------
* The console is also able to be turned off. The console in head.S
* is specifically for debugging and can be very useful. It is surrounded by
* #ifdef / #endif clauses so it doesn't have to ship in known-good
* kernels. It's basic algorithm is to determine the size of the screen
* (in height/width and bit depth) and then use that information for
* displaying an 8x8 font or an 8x16 (widthxheight). I prefer the 8x8 for
* debugging so I can see more good data. But it was trivial to add support
* for both fonts, so I included it.
* Also, the algorithm for plotting pixels is abstracted so that in
* theory other platforms could add support for different kinds of frame
* buffers. This could be very useful.
*
* console_put_penguin
* -------------------
* An important part of any Linux bring up is the penguin and there's
* nothing like getting the Penguin on the screen! This algorithm will work
* on any machine for which there is a console_plot_pixel.
*
* console_scroll
* --------------
* My hope is that the scroll algorithm does the right thing on the
* various platforms, but it wouldn't be hard to add the test conditions
* and new code if it doesn't.
*
* console_putc
* -------------
*
* ######################################################################
*
* Register usage has greatly simplified within head.S. Every subroutine
* saves and restores all registers that it modifies (except it returns a
* value in there of course). So the only register that needs to be initialized
* is the stack pointer.
* All other init code and data is now placed in the init section, so it will
* be automatically freed at the end of the kernel initialization.
*
* ######################################################################
*
* options
* -------
* There are many options available in a build of this file. I've
* taken the time to describe them here to save you the time of searching
* for them and trying to understand what they mean.
*
* CONFIG_xxx: These are the obvious machine configuration defines created
* during configuration. These are defined in autoconf.h.
*
* CONSOLE_DEBUG: Only supports a Mac frame buffer but could easily be
* extended to support other platforms.
*
* TEST_MMU: This is a test harness for running on any given machine but
* getting an MMU dump for another class of machine. The classes of machines
* that can be tested are any of the makes (Atari, Amiga, Mac, VME, etc.)
* and any of the models (030, 040, 060, etc.).
*
* NOTE: TEST_MMU is NOT permanent! It is scheduled to be removed
* When head.S boots on Atari, Amiga, Macintosh, and VME
* machines. At that point the underlying logic will be
* believed to be solid enough to be trusted, and TEST_MMU
* can be dropped. Do note that that will clean up the
* head.S code significantly as large blocks of #if/#else
* clauses can be removed.
*
* MMU_NOCACHE_KERNEL: On the Macintosh platform there was an inquiry into
* determing why devices don't appear to work. A test case was to remove
* the cacheability of the kernel bits.
*
* MMU_PRINT: There is a routine built into head.S that can display the
* MMU data structures. It outputs its result through the serial_putc
* interface. So where ever that winds up driving data, that's where the
* mmu struct will appear.
*
* SERIAL_DEBUG: There are a series of putc() macro statements
* scattered through out the code to give progress of status to the
* person sitting at the console. This constant determines whether those
* are used.
*
* DEBUG: This is the standard DEBUG flag that can be set for building
* the kernel. It has the effect adding additional tests into
* the code.
*
* FONT_6x11:
* FONT_8x8:
* FONT_8x16:
* In theory these could be determined at run time or handed
* over by the booter. But, let's be real, it's a fine hard
* coded value. (But, you will notice the code is run-time
* flexible!) A pointer to the font's struct font_desc
* is kept locally in Lconsole_font. It is used to determine
* font size information dynamically.
*
* Atari constants:
* USE_PRINTER: Use the printer port for serial debug.
* USE_SCC_B: Use the SCC port A (Serial2) for serial debug.
* USE_SCC_A: Use the SCC port B (Modem2) for serial debug.
* USE_MFP: Use the ST-MFP port (Modem1) for serial debug.
*
* Macintosh constants:
* MAC_USE_SCC_A: Use SCC port A (modem) for serial debug.
* MAC_USE_SCC_B: Use SCC port B (printer) for serial debug.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/bootinfo.h>
#include <asm/bootinfo-amiga.h>
#include <asm/bootinfo-atari.h>
#include <asm/bootinfo-hp300.h>
#include <asm/bootinfo-mac.h>
#include <asm/bootinfo-q40.h>
#include <asm/bootinfo-vme.h>
#include <asm/setup.h>
#include <asm/entry.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
#ifdef CONFIG_MAC
# include <asm/machw.h>
#endif
#ifdef CONFIG_EARLY_PRINTK
# define SERIAL_DEBUG
# if defined(CONFIG_MAC) && defined(CONFIG_FONT_SUPPORT)
# define CONSOLE_DEBUG
# endif
#endif
#undef MMU_PRINT
#undef MMU_NOCACHE_KERNEL
#undef DEBUG
/*
* For the head.S console, there are three supported fonts, 6x11, 8x16 and 8x8.
* The 8x8 font is harder to read but fits more on the screen.
*/
#define FONT_8x8 /* default */
/* #define FONT_8x16 */ /* 2nd choice */
/* #define FONT_6x11 */ /* 3rd choice */
.globl kernel_pg_dir
.globl availmem
.globl m68k_init_mapped_size
.globl m68k_pgtable_cachemode
.globl m68k_supervisor_cachemode
#ifdef CONFIG_MVME16x
.globl mvme_bdid
#endif
#ifdef CONFIG_Q40
.globl q40_mem_cptr
#endif
CPUTYPE_040 = 1 /* indicates an 040 */
CPUTYPE_060 = 2 /* indicates an 060 */
CPUTYPE_0460 = 3 /* if either above are set, this is set */
CPUTYPE_020 = 4 /* indicates an 020 */
/* Translation control register */
TC_ENABLE = 0x8000
TC_PAGE8K = 0x4000
TC_PAGE4K = 0x0000
/* Transparent translation registers */
TTR_ENABLE = 0x8000 /* enable transparent translation */
TTR_ANYMODE = 0x4000 /* user and kernel mode access */
TTR_KERNELMODE = 0x2000 /* only kernel mode access */
TTR_USERMODE = 0x0000 /* only user mode access */
TTR_CI = 0x0400 /* inhibit cache */
TTR_RW = 0x0200 /* read/write mode */
TTR_RWM = 0x0100 /* read/write mask */
TTR_FCB2 = 0x0040 /* function code base bit 2 */
TTR_FCB1 = 0x0020 /* function code base bit 1 */
TTR_FCB0 = 0x0010 /* function code base bit 0 */
TTR_FCM2 = 0x0004 /* function code mask bit 2 */
TTR_FCM1 = 0x0002 /* function code mask bit 1 */
TTR_FCM0 = 0x0001 /* function code mask bit 0 */
/* Cache Control registers */
CC6_ENABLE_D = 0x80000000 /* enable data cache (680[46]0) */
CC6_FREEZE_D = 0x40000000 /* freeze data cache (68060) */
CC6_ENABLE_SB = 0x20000000 /* enable store buffer (68060) */
CC6_PUSH_DPI = 0x10000000 /* disable CPUSH invalidation (68060) */
CC6_HALF_D = 0x08000000 /* half-cache mode for data cache (68060) */
CC6_ENABLE_B = 0x00800000 /* enable branch cache (68060) */
CC6_CLRA_B = 0x00400000 /* clear all entries in branch cache (68060) */
CC6_CLRU_B = 0x00200000 /* clear user entries in branch cache (68060) */
CC6_ENABLE_I = 0x00008000 /* enable instruction cache (680[46]0) */
CC6_FREEZE_I = 0x00004000 /* freeze instruction cache (68060) */
CC6_HALF_I = 0x00002000 /* half-cache mode for instruction cache (68060) */
CC3_ALLOC_WRITE = 0x00002000 /* write allocate mode(68030) */
CC3_ENABLE_DB = 0x00001000 /* enable data burst (68030) */
CC3_CLR_D = 0x00000800 /* clear data cache (68030) */
CC3_CLRE_D = 0x00000400 /* clear entry in data cache (68030) */
CC3_FREEZE_D = 0x00000200 /* freeze data cache (68030) */
CC3_ENABLE_D = 0x00000100 /* enable data cache (68030) */
CC3_ENABLE_IB = 0x00000010 /* enable instruction burst (68030) */
CC3_CLR_I = 0x00000008 /* clear instruction cache (68030) */
CC3_CLRE_I = 0x00000004 /* clear entry in instruction cache (68030) */
CC3_FREEZE_I = 0x00000002 /* freeze instruction cache (68030) */
CC3_ENABLE_I = 0x00000001 /* enable instruction cache (68030) */
/* Miscellaneous definitions */
PAGESIZE = 4096
PAGESHIFT = 12
ROOT_TABLE_SIZE = 128
PTR_TABLE_SIZE = 128
PAGE_TABLE_SIZE = 64
ROOT_INDEX_SHIFT = 25
PTR_INDEX_SHIFT = 18
PAGE_INDEX_SHIFT = 12
#ifdef DEBUG
/* When debugging use readable names for labels */
#ifdef __STDC__
#define L(name) .head.S.##name
#else
#define L(name) .head.S./**/name
#endif
#else
#ifdef __STDC__
#define L(name) .L##name
#else
#define L(name) .L/**/name
#endif
#endif
/* The __INITDATA stuff is a no-op when ftrace or kgdb are turned on */
#ifndef __INITDATA
#define __INITDATA .data
#define __FINIT .previous
#endif
/* Several macros to make the writing of subroutines easier:
* - func_start marks the beginning of the routine which setups the frame
* register and saves the registers, it also defines another macro
* to automatically restore the registers again.
* - func_return marks the end of the routine and simply calls the prepared
* macro to restore registers and jump back to the caller.
* - func_define generates another macro to automatically put arguments
* onto the stack call the subroutine and cleanup the stack again.
*/
/* Within subroutines these macros can be used to access the arguments
* on the stack. With STACK some allocated memory on the stack can be
* accessed and ARG0 points to the return address (used by mmu_engage).
*/
#define STACK %a6@(stackstart)
#define ARG0 %a6@(4)
#define ARG1 %a6@(8)
#define ARG2 %a6@(12)
#define ARG3 %a6@(16)
#define ARG4 %a6@(20)
.macro func_start name,saveregs,stack=0
L(\name):
linkw %a6,#-\stack
moveml \saveregs,%sp@-
.set stackstart,-\stack
.macro func_return_\name
moveml %sp@+,\saveregs
unlk %a6
rts
.endm
.endm
.macro func_return name
func_return_\name
.endm
.macro func_call name
jbsr L(\name)
.endm
.macro move_stack nr,arg1,arg2,arg3,arg4
.if \nr
move_stack "(\nr-1)",\arg2,\arg3,\arg4
movel \arg1,%sp@-
.endif
.endm
.macro func_define name,nr=0
.macro \name arg1,arg2,arg3,arg4
move_stack \nr,\arg1,\arg2,\arg3,\arg4
func_call \name
.if \nr
lea %sp@(\nr*4),%sp
.endif
.endm
.endm
func_define mmu_map,4
func_define mmu_map_tt,4
func_define mmu_fixup_page_mmu_cache,1
func_define mmu_temp_map,2
func_define mmu_engage
func_define mmu_get_root_table_entry,1
func_define mmu_get_ptr_table_entry,2
func_define mmu_get_page_table_entry,2
func_define mmu_print
func_define get_new_page
#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
func_define set_leds
#endif
.macro mmu_map_eq arg1,arg2,arg3
mmu_map \arg1,\arg1,\arg2,\arg3
.endm
.macro get_bi_record record
pea \record
func_call get_bi_record
addql #4,%sp
.endm
func_define serial_putc,1
func_define console_putc,1
func_define console_init
func_define console_put_penguin
func_define console_plot_pixel,3
func_define console_scroll
.macro putc ch
#if defined(CONSOLE_DEBUG) || defined(SERIAL_DEBUG)
pea \ch
#endif
#ifdef CONSOLE_DEBUG
func_call console_putc
#endif
#ifdef SERIAL_DEBUG
func_call serial_putc
#endif
#if defined(CONSOLE_DEBUG) || defined(SERIAL_DEBUG)
addql #4,%sp
#endif
.endm
.macro dputc ch
#ifdef DEBUG
putc \ch
#endif
.endm
func_define putn,1
.macro dputn nr
#ifdef DEBUG
putn \nr
#endif
.endm
.macro puts string
#if defined(CONSOLE_DEBUG) || defined(SERIAL_DEBUG)
__INITDATA
.Lstr\@:
.string "\string"
__FINIT
pea %pc@(.Lstr\@)
func_call puts
addql #4,%sp
#endif
.endm
.macro dputs string
#ifdef DEBUG
puts "\string"
#endif
.endm
#define is_not_amiga(lab) cmpl &MACH_AMIGA,%pc@(m68k_machtype); jne lab
#define is_not_atari(lab) cmpl &MACH_ATARI,%pc@(m68k_machtype); jne lab
#define is_not_mac(lab) cmpl &MACH_MAC,%pc@(m68k_machtype); jne lab
#define is_not_mvme147(lab) cmpl &MACH_MVME147,%pc@(m68k_machtype); jne lab
#define is_not_mvme16x(lab) cmpl &MACH_MVME16x,%pc@(m68k_machtype); jne lab
#define is_not_bvme6000(lab) cmpl &MACH_BVME6000,%pc@(m68k_machtype); jne lab
#define is_mvme147(lab) cmpl &MACH_MVME147,%pc@(m68k_machtype); jeq lab
#define is_mvme16x(lab) cmpl &MACH_MVME16x,%pc@(m68k_machtype); jeq lab
#define is_bvme6000(lab) cmpl &MACH_BVME6000,%pc@(m68k_machtype); jeq lab
#define is_not_hp300(lab) cmpl &MACH_HP300,%pc@(m68k_machtype); jne lab
#define is_not_apollo(lab) cmpl &MACH_APOLLO,%pc@(m68k_machtype); jne lab
#define is_not_q40(lab) cmpl &MACH_Q40,%pc@(m68k_machtype); jne lab
#define is_not_sun3x(lab) cmpl &MACH_SUN3X,%pc@(m68k_machtype); jne lab
#define hasnt_leds(lab) cmpl &MACH_HP300,%pc@(m68k_machtype); \
jeq 42f; \
cmpl &MACH_APOLLO,%pc@(m68k_machtype); \
jne lab ;\
42:\
#define is_040_or_060(lab) btst &CPUTYPE_0460,%pc@(L(cputype)+3); jne lab
#define is_not_040_or_060(lab) btst &CPUTYPE_0460,%pc@(L(cputype)+3); jeq lab
#define is_040(lab) btst &CPUTYPE_040,%pc@(L(cputype)+3); jne lab
#define is_060(lab) btst &CPUTYPE_060,%pc@(L(cputype)+3); jne lab
#define is_not_060(lab) btst &CPUTYPE_060,%pc@(L(cputype)+3); jeq lab
#define is_020(lab) btst &CPUTYPE_020,%pc@(L(cputype)+3); jne lab
#define is_not_020(lab) btst &CPUTYPE_020,%pc@(L(cputype)+3); jeq lab
/* On the HP300 we use the on-board LEDs for debug output before
the console is running. Writing a 1 bit turns the corresponding LED
_off_ - on the 340 bit 7 is towards the back panel of the machine. */
.macro leds mask
#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
hasnt_leds(.Lled\@)
pea \mask
func_call set_leds
addql #4,%sp
.Lled\@:
#endif
.endm
__HEAD
ENTRY(_stext)
/*
* Version numbers of the bootinfo interface
* The area from _stext to _start will later be used as kernel pointer table
*/
bras 1f /* Jump over bootinfo version numbers */
.long BOOTINFOV_MAGIC
.long MACH_AMIGA, AMIGA_BOOTI_VERSION
.long MACH_ATARI, ATARI_BOOTI_VERSION
.long MACH_MVME147, MVME147_BOOTI_VERSION
.long MACH_MVME16x, MVME16x_BOOTI_VERSION
.long MACH_BVME6000, BVME6000_BOOTI_VERSION
.long MACH_MAC, MAC_BOOTI_VERSION
.long MACH_Q40, Q40_BOOTI_VERSION
.long MACH_HP300, HP300_BOOTI_VERSION
.long 0
1: jra __start
.equ kernel_pg_dir,_stext
.equ .,_stext+PAGESIZE
ENTRY(_start)
jra __start
__INIT
ENTRY(__start)
/*
* Setup initial stack pointer
*/
lea %pc@(_stext),%sp
/*
* Record the CPU and machine type.
*/
get_bi_record BI_MACHTYPE
lea %pc@(m68k_machtype),%a1
movel %a0@,%a1@
get_bi_record BI_FPUTYPE
lea %pc@(m68k_fputype),%a1
movel %a0@,%a1@
get_bi_record BI_MMUTYPE
lea %pc@(m68k_mmutype),%a1
movel %a0@,%a1@
get_bi_record BI_CPUTYPE
lea %pc@(m68k_cputype),%a1
movel %a0@,%a1@
leds 0x1
#ifdef CONFIG_MAC
/*
* For Macintosh, we need to determine the display parameters early (at least
* while debugging it).
*/
is_not_mac(L(test_notmac))
get_bi_record BI_MAC_VADDR
lea %pc@(L(mac_videobase)),%a1
movel %a0@,%a1@
get_bi_record BI_MAC_VDEPTH
lea %pc@(L(mac_videodepth)),%a1
movel %a0@,%a1@
get_bi_record BI_MAC_VDIM
lea %pc@(L(mac_dimensions)),%a1
movel %a0@,%a1@
get_bi_record BI_MAC_VROW
lea %pc@(L(mac_rowbytes)),%a1
movel %a0@,%a1@
get_bi_record BI_MAC_SCCBASE
lea %pc@(L(mac_sccbase)),%a1
movel %a0@,%a1@
L(test_notmac):
#endif /* CONFIG_MAC */
/*
* There are ultimately two pieces of information we want for all kinds of
* processors CpuType and CacheBits. The CPUTYPE was passed in from booter
* and is converted here from a booter type definition to a separate bit
* number which allows for the standard is_0x0 macro tests.
*/
movel %pc@(m68k_cputype),%d0
/*
* Assume it's an 030
*/
clrl %d1
/*
* Test the BootInfo cputype for 060
*/
btst #CPUB_68060,%d0
jeq 1f
bset #CPUTYPE_060,%d1
bset #CPUTYPE_0460,%d1
jra 3f
1:
/*
* Test the BootInfo cputype for 040
*/
btst #CPUB_68040,%d0
jeq 2f
bset #CPUTYPE_040,%d1
bset #CPUTYPE_0460,%d1
jra 3f
2:
/*
* Test the BootInfo cputype for 020
*/
btst #CPUB_68020,%d0
jeq 3f
bset #CPUTYPE_020,%d1
jra 3f
3:
/*
* Record the cpu type
*/
lea %pc@(L(cputype)),%a0
movel %d1,%a0@
/*
* NOTE:
*
* Now the macros are valid:
* is_040_or_060
* is_not_040_or_060
* is_040
* is_060
* is_not_060
*/
/*
* Determine the cache mode for pages holding MMU tables
* and for supervisor mode, unused for '020 and '030
*/
clrl %d0
clrl %d1
is_not_040_or_060(L(save_cachetype))
/*
* '040 or '060
* d1 := cacheable write-through
* NOTE: The 68040 manual strongly recommends non-cached for MMU tables,
* but we have been using write-through since at least 2.0.29 so I
* guess it is OK.
*/
#ifdef CONFIG_060_WRITETHROUGH
/*
* If this is a 68060 board using drivers with cache coherency
* problems, then supervisor memory accesses need to be write-through
* also; otherwise, we want copyback.
*/
is_not_060(1f)
movel #_PAGE_CACHE040W,%d0
jra L(save_cachetype)
#endif /* CONFIG_060_WRITETHROUGH */
1:
movew #_PAGE_CACHE040,%d0
movel #_PAGE_CACHE040W,%d1
L(save_cachetype):
/* Save cache mode for supervisor mode and page tables
*/
lea %pc@(m68k_supervisor_cachemode),%a0
movel %d0,%a0@
lea %pc@(m68k_pgtable_cachemode),%a0
movel %d1,%a0@
/*
* raise interrupt level
*/
movew #0x2700,%sr
/*
If running on an Atari, determine the I/O base of the
serial port and test if we are running on a Medusa or Hades.
This test is necessary here, because on the Hades the serial
port is only accessible in the high I/O memory area.
The test whether it is a Medusa is done by writing to the byte at
phys. 0x0. This should result in a bus error on all other machines.
...should, but doesn't. The Afterburner040 for the Falcon has the
same behaviour (0x0..0x7 are no ROM shadow). So we have to do
another test to distinguish Medusa and AB040. This is a
read attempt for 0x00ff82fe phys. that should bus error on a Falcon
(+AB040), but is in the range where the Medusa always asserts DTACK.
The test for the Hades is done by reading address 0xb0000000. This
should give a bus error on the Medusa.
*/
#ifdef CONFIG_ATARI
is_not_atari(L(notypetest))
/* get special machine type (Medusa/Hades/AB40) */
moveq #0,%d3 /* default if tag doesn't exist */
get_bi_record BI_ATARI_MCH_TYPE
tstl %d0
jbmi 1f
movel %a0@,%d3
lea %pc@(atari_mch_type),%a0
movel %d3,%a0@
1:
/* On the Hades, the iobase must be set up before opening the
* serial port. There are no I/O regs at 0x00ffxxxx at all. */
moveq #0,%d0
cmpl #ATARI_MACH_HADES,%d3
jbne 1f
movel #0xff000000,%d0 /* Hades I/O base addr: 0xff000000 */
1: lea %pc@(L(iobase)),%a0
movel %d0,%a0@
L(notypetest):
#endif
#ifdef CONFIG_VME
is_mvme147(L(getvmetype))
is_bvme6000(L(getvmetype))
is_not_mvme16x(L(gvtdone))
/* See if the loader has specified the BI_VME_TYPE tag. Recent
* versions of VMELILO and TFTPLILO do this. We have to do this
* early so we know how to handle console output. If the tag
* doesn't exist then we use the Bug for output on MVME16x.
*/
L(getvmetype):
get_bi_record BI_VME_TYPE
tstl %d0
jbmi 1f
movel %a0@,%d3
lea %pc@(vme_brdtype),%a0
movel %d3,%a0@
1:
#ifdef CONFIG_MVME16x
is_not_mvme16x(L(gvtdone))
/* Need to get the BRD_ID info to differentiate between 162, 167,
* etc. This is available as a BI_VME_BRDINFO tag with later
* versions of VMELILO and TFTPLILO, otherwise we call the Bug.
*/
get_bi_record BI_VME_BRDINFO
tstl %d0
jpl 1f
/* Get pointer to board ID data from Bug */
movel %d2,%sp@-
trap #15
.word 0x70 /* trap 0x70 - .BRD_ID */
movel %sp@+,%a0
1:
lea %pc@(mvme_bdid),%a1
/* Structure is 32 bytes long */
movel %a0@+,%a1@+
movel %a0@+,%a1@+
movel %a0@+,%a1@+
movel %a0@+,%a1@+
movel %a0@+,%a1@+
movel %a0@+,%a1@+
movel %a0@+,%a1@+
movel %a0@+,%a1@+
#endif
L(gvtdone):
#endif
#ifdef CONFIG_HP300
is_not_hp300(L(nothp))
/* Get the address of the UART for serial debugging */
get_bi_record BI_HP300_UART_ADDR
tstl %d0
jbmi 1f
movel %a0@,%d3
lea %pc@(L(uartbase)),%a0
movel %d3,%a0@
get_bi_record BI_HP300_UART_SCODE
tstl %d0
jbmi 1f
movel %a0@,%d3
lea %pc@(L(uart_scode)),%a0
movel %d3,%a0@
1:
L(nothp):
#endif
/*
* Initialize serial port
*/
jbsr L(serial_init)
/*
* Initialize console
*/
#ifdef CONFIG_MAC
is_not_mac(L(nocon))
# ifdef CONSOLE_DEBUG
console_init
# ifdef CONFIG_LOGO
console_put_penguin
# endif /* CONFIG_LOGO */
# endif /* CONSOLE_DEBUG */
L(nocon):
#endif /* CONFIG_MAC */
putc '\n'
putc 'A'
leds 0x2
dputn %pc@(L(cputype))
dputn %pc@(m68k_supervisor_cachemode)
dputn %pc@(m68k_pgtable_cachemode)
dputc '\n'
/*
* Save physical start address of kernel
*/
lea %pc@(L(phys_kernel_start)),%a0
lea %pc@(_stext),%a1
subl #_stext,%a1
addl #PAGE_OFFSET,%a1
movel %a1,%a0@
putc 'B'
leds 0x4
/*
* mmu_init
*
* This block of code does what's necessary to map in the various kinds
* of machines for execution of Linux.
* First map the first 4, 8, or 16 MB of kernel code & data
*/
get_bi_record BI_MEMCHUNK
movel %a0@(4),%d0
movel #16*1024*1024,%d1
cmpl %d0,%d1
jls 1f
lsrl #1,%d1
cmpl %d0,%d1
jls 1f
lsrl #1,%d1
1:
lea %pc@(m68k_init_mapped_size),%a0
movel %d1,%a0@
mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),%d1,\
%pc@(m68k_supervisor_cachemode)
putc 'C'
#ifdef CONFIG_AMIGA
L(mmu_init_amiga):
is_not_amiga(L(mmu_init_not_amiga))
/*
* mmu_init_amiga
*/
putc 'D'
is_not_040_or_060(1f)
/*
* 040: Map the 16Meg range physical 0x0 up to logical 0x8000.0000
*/
mmu_map #0x80000000,#0,#0x01000000,#_PAGE_NOCACHE_S
/*
* Map the Zorro III I/O space with transparent translation
* for frame buffer memory etc.
*/
mmu_map_tt #1,#0x40000000,#0x20000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
1:
/*
* 030: Map the 32Meg range physical 0x0 up to logical 0x8000.0000
*/
mmu_map #0x80000000,#0,#0x02000000,#_PAGE_NOCACHE030
mmu_map_tt #1,#0x40000000,#0x20000000,#_PAGE_NOCACHE030
jbra L(mmu_init_done)
L(mmu_init_not_amiga):
#endif
#ifdef CONFIG_ATARI
L(mmu_init_atari):
is_not_atari(L(mmu_init_not_atari))
putc 'E'
/* On the Atari, we map the I/O region (phys. 0x00ffxxxx) by mapping
the last 16 MB of virtual address space to the first 16 MB (i.e.
0xffxxxxxx -> 0x00xxxxxx). For this, an additional pointer table is
needed. I/O ranges are marked non-cachable.
For the Medusa it is better to map the I/O region transparently
(i.e. 0xffxxxxxx -> 0xffxxxxxx), because some I/O registers are
accessible only in the high area.
On the Hades all I/O registers are only accessible in the high
area.
*/
/* I/O base addr for non-Medusa, non-Hades: 0x00000000 */
moveq #0,%d0
movel %pc@(atari_mch_type),%d3
cmpl #ATARI_MACH_MEDUSA,%d3
jbeq 2f
cmpl #ATARI_MACH_HADES,%d3
jbne 1f
2: movel #0xff000000,%d0 /* Medusa/Hades base addr: 0xff000000 */
1: movel %d0,%d3
is_040_or_060(L(spata68040))
/* Map everything non-cacheable, though not all parts really
* need to disable caches (crucial only for 0xff8000..0xffffff
* (standard I/O) and 0xf00000..0xf3ffff (IDE)). The remainder
* isn't really used, except for sometimes peeking into the
* ROMs (mirror at phys. 0x0), so caching isn't necessary for
* this. */
mmu_map #0xff000000,%d3,#0x01000000,#_PAGE_NOCACHE030
jbra L(mmu_init_done)
L(spata68040):
mmu_map #0xff000000,%d3,#0x01000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(mmu_init_not_atari):
#endif
#ifdef CONFIG_Q40
is_not_q40(L(notq40))
/*
* add transparent mapping for 0xff00 0000 - 0xffff ffff
* non-cached serialized etc..
* this includes master chip, DAC, RTC and ISA ports
* 0xfe000000-0xfeffffff is for screen and ROM
*/
putc 'Q'
mmu_map_tt #0,#0xfe000000,#0x01000000,#_PAGE_CACHE040W
mmu_map_tt #1,#0xff000000,#0x01000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(notq40):
#endif
#ifdef CONFIG_HP300
is_not_hp300(L(nothp300))
/* On the HP300, we map the ROM, INTIO and DIO regions (phys. 0x00xxxxxx)
* by mapping 32MB (on 020/030) or 16 MB (on 040) from 0xf0xxxxxx -> 0x00xxxxxx).
* The ROM mapping is needed because the LEDs are mapped there too.
*/
is_040(1f)
/*
* 030: Map the 32Meg range physical 0x0 up to logical 0xf000.0000
*/
mmu_map #0xf0000000,#0,#0x02000000,#_PAGE_NOCACHE030
jbra L(mmu_init_done)
1:
/*
* 040: Map the 16Meg range physical 0x0 up to logical 0xf000.0000
*/
mmu_map #0xf0000000,#0,#0x01000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(nothp300):
#endif /* CONFIG_HP300 */
#ifdef CONFIG_MVME147
is_not_mvme147(L(not147))
/*
* On MVME147 we have already created kernel page tables for
* 4MB of RAM at address 0, so now need to do a transparent
* mapping of the top of memory space. Make it 0.5GByte for now,
* so we can access on-board i/o areas.
*/
mmu_map_tt #1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE030
jbra L(mmu_init_done)
L(not147):
#endif /* CONFIG_MVME147 */
#ifdef CONFIG_MVME16x
is_not_mvme16x(L(not16x))
/*
* On MVME16x we have already created kernel page tables for
* 4MB of RAM at address 0, so now need to do a transparent
* mapping of the top of memory space. Make it 0.5GByte for now.
* Supervisor only access, so transparent mapping doesn't
* clash with User code virtual address space.
* this covers IO devices, PROM and SRAM. The PROM and SRAM
* mapping is needed to allow 167Bug to run.
* IO is in the range 0xfff00000 to 0xfffeffff.
* PROM is 0xff800000->0xffbfffff and SRAM is
* 0xffe00000->0xffe1ffff.
*/
mmu_map_tt #1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(not16x):
#endif /* CONFIG_MVME162 | CONFIG_MVME167 */
#ifdef CONFIG_BVME6000
is_not_bvme6000(L(not6000))
/*
* On BVME6000 we have already created kernel page tables for
* 4MB of RAM at address 0, so now need to do a transparent
* mapping of the top of memory space. Make it 0.5GByte for now,
* so we can access on-board i/o areas.
* Supervisor only access, so transparent mapping doesn't
* clash with User code virtual address space.
*/
mmu_map_tt #1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(not6000):
#endif /* CONFIG_BVME6000 */
/*
* mmu_init_mac
*
* The Macintosh mappings are less clear.
*
* Even as of this writing, it is unclear how the
* Macintosh mappings will be done. However, as
* the first author of this code I'm proposing the
* following model:
*
* Map the kernel (that's already done),
* Map the I/O (on most machines that's the
* 0x5000.0000 ... 0x5300.0000 range,
* Map the video frame buffer using as few pages
* as absolutely (this requirement mostly stems from
* the fact that when the frame buffer is at
* 0x0000.0000 then we know there is valid RAM just
* above the screen that we don't want to waste!).
*
* By the way, if the frame buffer is at 0x0000.0000
* then the Macintosh is known as an RBV based Mac.
*
* By the way 2, the code currently maps in a bunch of
* regions. But I'd like to cut that out. (And move most
* of the mappings up into the kernel proper ... or only
* map what's necessary.)
*/
#ifdef CONFIG_MAC
L(mmu_init_mac):
is_not_mac(L(mmu_init_not_mac))
putc 'F'
is_not_040_or_060(1f)
moveq #_PAGE_NOCACHE_S,%d3
jbra 2f
1:
moveq #_PAGE_NOCACHE030,%d3
2:
/*
* Mac Note: screen address of logical 0xF000.0000 -> <screen physical>
* we simply map the 4MB that contains the videomem
*/
movel #VIDEOMEMMASK,%d0
andl %pc@(L(mac_videobase)),%d0
mmu_map #VIDEOMEMBASE,%d0,#VIDEOMEMSIZE,%d3
/* ROM from 4000 0000 to 4200 0000 (only for mac_reset()) */
mmu_map_eq #0x40000000,#0x02000000,%d3
/* IO devices (incl. serial port) from 5000 0000 to 5300 0000 */
mmu_map_eq #0x50000000,#0x03000000,%d3
/* Nubus slot space (video at 0xF0000000, rom at 0xF0F80000) */
mmu_map_tt #1,#0xf8000000,#0x08000000,%d3
jbra L(mmu_init_done)
L(mmu_init_not_mac):
#endif
#ifdef CONFIG_SUN3X
is_not_sun3x(L(notsun3x))
/* oh, the pain.. We're gonna want the prom code after
* starting the MMU, so we copy the mappings, translating
* from 8k -> 4k pages as we go.
*/
/* copy maps from 0xfee00000 to 0xff000000 */
movel #0xfee00000, %d0
moveq #ROOT_INDEX_SHIFT, %d1
lsrl %d1,%d0
mmu_get_root_table_entry %d0
movel #0xfee00000, %d0
moveq #PTR_INDEX_SHIFT, %d1
lsrl %d1,%d0
andl #PTR_TABLE_SIZE-1, %d0
mmu_get_ptr_table_entry %a0,%d0
movel #0xfee00000, %d0
moveq #PAGE_INDEX_SHIFT, %d1
lsrl %d1,%d0
andl #PAGE_TABLE_SIZE-1, %d0
mmu_get_page_table_entry %a0,%d0
/* this is where the prom page table lives */
movel 0xfefe00d4, %a1
movel %a1@, %a1
movel #((0x200000 >> 13)-1), %d1
1:
movel %a1@+, %d3
movel %d3,%a0@+
addl #0x1000,%d3
movel %d3,%a0@+
dbra %d1,1b
/* setup tt1 for I/O */
mmu_map_tt #1,#0x40000000,#0x40000000,#_PAGE_NOCACHE_S
jbra L(mmu_init_done)
L(notsun3x):
#endif
#ifdef CONFIG_APOLLO
is_not_apollo(L(notapollo))
putc 'P'
mmu_map #0x80000000,#0,#0x02000000,#_PAGE_NOCACHE030
L(notapollo):
jbra L(mmu_init_done)
#endif
L(mmu_init_done):
putc 'G'
leds 0x8
/*
* mmu_fixup
*
* On the 040 class machines, all pages that are used for the
* mmu have to be fixed up. According to Motorola, pages holding mmu
* tables should be non-cacheable on a '040 and write-through on a
* '060. But analysis of the reasons for this, and practical
* experience, showed that write-through also works on a '040.
*
* Allocated memory so far goes from kernel_end to memory_start that
* is used for all kind of tables, for that the cache attributes
* are now fixed.
*/
L(mmu_fixup):
is_not_040_or_060(L(mmu_fixup_done))
#ifdef MMU_NOCACHE_KERNEL
jbra L(mmu_fixup_done)
#endif
/* first fix the page at the start of the kernel, that
* contains also kernel_pg_dir.
*/
movel %pc@(L(phys_kernel_start)),%d0
subl #PAGE_OFFSET,%d0
lea %pc@(_stext),%a0
subl %d0,%a0
mmu_fixup_page_mmu_cache %a0
movel %pc@(L(kernel_end)),%a0
subl %d0,%a0
movel %pc@(L(memory_start)),%a1
subl %d0,%a1
bra 2f
1:
mmu_fixup_page_mmu_cache %a0
addw #PAGESIZE,%a0
2:
cmpl %a0,%a1
jgt 1b
L(mmu_fixup_done):
#ifdef MMU_PRINT
mmu_print
#endif
/*
* mmu_engage
*
* This chunk of code performs the gruesome task of engaging the MMU.
* The reason its gruesome is because when the MMU becomes engaged it
* maps logical addresses to physical addresses. The Program Counter
* register is then passed through the MMU before the next instruction
* is fetched (the instruction following the engage MMU instruction).
* This may mean one of two things:
* 1. The Program Counter falls within the logical address space of
* the kernel of which there are two sub-possibilities:
* A. The PC maps to the correct instruction (logical PC == physical
* code location), or
* B. The PC does not map through and the processor will read some
* data (or instruction) which is not the logically next instr.
* As you can imagine, A is good and B is bad.
* Alternatively,
* 2. The Program Counter does not map through the MMU. The processor
* will take a Bus Error.
* Clearly, 2 is bad.
* It doesn't take a wiz kid to figure you want 1.A.
* This code creates that possibility.
* There are two possible 1.A. states (we now ignore the other above states):
* A. The kernel is located at physical memory addressed the same as
* the logical memory for the kernel, i.e., 0x01000.
* B. The kernel is located some where else. e.g., 0x0400.0000
*
* Under some conditions the Macintosh can look like A or B.
* [A friend and I once noted that Apple hardware engineers should be
* wacked twice each day: once when they show up at work (as in, Whack!,
* "This is for the screwy hardware we know you're going to design today."),
* and also at the end of the day (as in, Whack! "I don't know what
* you designed today, but I'm sure it wasn't good."). -- rst]
*
* This code works on the following premise:
* If the kernel start (%d5) is within the first 16 Meg of RAM,
* then create a mapping for the kernel at logical 0x8000.0000 to
* the physical location of the pc. And, create a transparent
* translation register for the first 16 Meg. Then, after the MMU
* is engaged, the PC can be moved up into the 0x8000.0000 range
* and then the transparent translation can be turned off and then
* the PC can jump to the correct logical location and it will be
* home (finally). This is essentially the code that the Amiga used
* to use. Now, it's generalized for all processors. Which means
* that a fresh (but temporary) mapping has to be created. The mapping
* is made in page 0 (an as of yet unused location -- except for the
* stack!). This temporary mapping will only require 1 pointer table
* and a single page table (it can map 256K).
*
* OK, alternatively, imagine that the Program Counter is not within
* the first 16 Meg. Then, just use Transparent Translation registers
* to do the right thing.
*
* Last, if _start is already at 0x01000, then there's nothing special
* to do (in other words, in a degenerate case of the first case above,
* do nothing).
*
* Let's do it.
*
*
*/
putc 'H'
mmu_engage
/*
* After this point no new memory is allocated and
* the start of available memory is stored in availmem.
* (The bootmem allocator requires now the physicall address.)
*/
movel L(memory_start),availmem
#ifdef CONFIG_AMIGA
is_not_amiga(1f)
/* fixup the Amiga custom register location before printing */
clrl L(custom)
1:
#endif
#ifdef CONFIG_ATARI
is_not_atari(1f)
/* fixup the Atari iobase register location before printing */
movel #0xff000000,L(iobase)
1:
#endif
#ifdef CONFIG_MAC
is_not_mac(1f)
movel #~VIDEOMEMMASK,%d0
andl L(mac_videobase),%d0
addl #VIDEOMEMBASE,%d0
movel %d0,L(mac_videobase)
#ifdef CONSOLE_DEBUG
movel %pc@(L(phys_kernel_start)),%d0
subl #PAGE_OFFSET,%d0
subl %d0,L(console_font)
subl %d0,L(console_font_data)
#endif
orl #0x50000000,L(mac_sccbase)
1:
#endif
#ifdef CONFIG_HP300
is_not_hp300(2f)
/*
* Fix up the iobase register to point to the new location of the LEDs.
*/
movel #0xf0000000,L(iobase)
/*
* Energise the FPU and caches.
*/
is_040(1f)
movel #0x60,0xf05f400c
jbra 2f
/*
* 040: slightly different, apparently.
*/
1: movew #0,0xf05f400e
movew #0x64,0xf05f400e
2:
#endif
#ifdef CONFIG_SUN3X
is_not_sun3x(1f)
/* enable copro */
oriw #0x4000,0x61000000
1:
#endif
#ifdef CONFIG_APOLLO
is_not_apollo(1f)
/*
* Fix up the iobase before printing
*/
movel #0x80000000,L(iobase)
1:
#endif
putc 'I'
leds 0x10
/*
* Enable caches
*/
is_not_040_or_060(L(cache_not_680460))
L(cache680460):
.chip 68040
nop
cpusha %bc
nop
is_060(L(cache68060))
movel #CC6_ENABLE_D+CC6_ENABLE_I,%d0
/* MMU stuff works in copyback mode now, so enable the cache */
movec %d0,%cacr
jra L(cache_done)
L(cache68060):
movel #CC6_ENABLE_D+CC6_ENABLE_I+CC6_ENABLE_SB+CC6_PUSH_DPI+CC6_ENABLE_B+CC6_CLRA_B,%d0
/* MMU stuff works in copyback mode now, so enable the cache */
movec %d0,%cacr
/* enable superscalar dispatch in PCR */
moveq #1,%d0
.chip 68060
movec %d0,%pcr
jbra L(cache_done)
L(cache_not_680460):
L(cache68030):
.chip 68030
movel #CC3_ENABLE_DB+CC3_CLR_D+CC3_ENABLE_D+CC3_ENABLE_IB+CC3_CLR_I+CC3_ENABLE_I,%d0
movec %d0,%cacr
jra L(cache_done)
.chip 68k
L(cache_done):
putc 'J'
/*
* Setup initial stack pointer
*/
lea init_task,%curptr
lea init_thread_union+THREAD_SIZE,%sp
putc 'K'
subl %a6,%a6 /* clear a6 for gdb */
/*
* The new 64bit printf support requires an early exception initialization.
*/
jbsr base_trap_init
/* jump to the kernel start */
putc '\n'
leds 0x55
jbsr start_kernel
/*
* Find a tag record in the bootinfo structure
* The bootinfo structure is located right after the kernel
* Returns: d0: size (-1 if not found)
* a0: data pointer (end-of-records if not found)
*/
func_start get_bi_record,%d1
movel ARG1,%d0
lea %pc@(_end),%a0
1: tstw %a0@(BIR_TAG)
jeq 3f
cmpw %a0@(BIR_TAG),%d0
jeq 2f
addw %a0@(BIR_SIZE),%a0
jra 1b
2: moveq #0,%d0
movew %a0@(BIR_SIZE),%d0
lea %a0@(BIR_DATA),%a0
jra 4f
3: moveq #-1,%d0
lea %a0@(BIR_SIZE),%a0
4:
func_return get_bi_record
/*
* MMU Initialization Begins Here
*
* The structure of the MMU tables on the 68k machines
* is thus:
* Root Table
* Logical addresses are translated through
* a hierarchical translation mechanism where the high-order
* seven bits of the logical address (LA) are used as an
* index into the "root table." Each entry in the root
* table has a bit which specifies if it's a valid pointer to a
* pointer table. Each entry defines a 32KMeg range of memory.
* If an entry is invalid then that logical range of 32M is
* invalid and references to that range of memory (when the MMU
* is enabled) will fault. If the entry is valid, then it does
* one of two things. On 040/060 class machines, it points to
* a pointer table which then describes more finely the memory
* within that 32M range. On 020/030 class machines, a technique
* called "early terminating descriptors" are used. This technique
* allows an entire 32Meg to be described by a single entry in the
* root table. Thus, this entry in the root table, contains the
* physical address of the memory or I/O at the logical address
* which the entry represents and it also contains the necessary
* cache bits for this region.
*
* Pointer Tables
* Per the Root Table, there will be one or more
* pointer tables. Each pointer table defines a 32M range.
* Not all of the 32M range need be defined. Again, the next
* seven bits of the logical address are used an index into
* the pointer table to point to page tables (if the pointer
* is valid). There will undoubtedly be more than one
* pointer table for the kernel because each pointer table
* defines a range of only 32M. Valid pointer table entries
* point to page tables, or are early terminating entries
* themselves.
*
* Page Tables
* Per the Pointer Tables, each page table entry points
* to the physical page in memory that supports the logical
* address that translates to the particular index.
*
* In short, the Logical Address gets translated as follows:
* bits 31..26 - index into the Root Table
* bits 25..18 - index into the Pointer Table
* bits 17..12 - index into the Page Table
* bits 11..0 - offset into a particular 4K page
*
* The algorithms which follows do one thing: they abstract
* the MMU hardware. For example, there are three kinds of
* cache settings that are relevant. Either, memory is
* being mapped in which case it is either Kernel Code (or
* the RamDisk) or it is MMU data. On the 030, the MMU data
* option also describes the kernel. Or, I/O is being mapped
* in which case it has its own kind of cache bits. There
* are constants which abstract these notions from the code that
* actually makes the call to map some range of memory.
*
*
*
*/
#ifdef MMU_PRINT
/*
* mmu_print
*
* This algorithm will print out the current MMU mappings.
*
* Input:
* %a5 points to the root table. Everything else is calculated
* from this.
*/
#define mmu_next_valid 0
#define mmu_start_logical 4
#define mmu_next_logical 8
#define mmu_start_physical 12
#define mmu_next_physical 16
#define MMU_PRINT_INVALID -1
#define MMU_PRINT_VALID 1
#define MMU_PRINT_UNINITED 0
#define putZc(z,n) jbne 1f; putc z; jbra 2f; 1: putc n; 2:
func_start mmu_print,%a0-%a6/%d0-%d7
movel %pc@(L(kernel_pgdir_ptr)),%a5
lea %pc@(L(mmu_print_data)),%a0
movel #MMU_PRINT_UNINITED,%a0@(mmu_next_valid)
is_not_040_or_060(mmu_030_print)
mmu_040_print:
puts "\nMMU040\n"
puts "rp:"
putn %a5
putc '\n'
#if 0
/*
* The following #if/#endif block is a tight algorithm for dumping the 040
* MMU Map in gory detail. It really isn't that practical unless the
* MMU Map algorithm appears to go awry and you need to debug it at the
* entry per entry level.
*/
movel #ROOT_TABLE_SIZE,%d5
#if 0
movel %a5@+,%d7 | Burn an entry to skip the kernel mappings,
subql #1,%d5 | they (might) work
#endif
1: tstl %d5
jbeq mmu_print_done
subq #1,%d5
movel %a5@+,%d7
btst #1,%d7
jbeq 1b
2: putn %d7
andil #0xFFFFFE00,%d7
movel %d7,%a4
movel #PTR_TABLE_SIZE,%d4
putc ' '
3: tstl %d4
jbeq 11f
subq #1,%d4
movel %a4@+,%d7
btst #1,%d7
jbeq 3b
4: putn %d7
andil #0xFFFFFF00,%d7
movel %d7,%a3
movel #PAGE_TABLE_SIZE,%d3
5: movel #8,%d2
6: tstl %d3
jbeq 31f
subq #1,%d3
movel %a3@+,%d6
btst #0,%d6
jbeq 6b
7: tstl %d2
jbeq 8f
subq #1,%d2
putc ' '
jbra 91f
8: putc '\n'
movel #8+1+8+1+1,%d2
9: putc ' '
dbra %d2,9b
movel #7,%d2
91: putn %d6
jbra 6b
31: putc '\n'
movel #8+1,%d2
32: putc ' '
dbra %d2,32b
jbra 3b
11: putc '\n'
jbra 1b
#endif /* MMU 040 Dumping code that's gory and detailed */
lea %pc@(kernel_pg_dir),%a5
movel %a5,%a0 /* a0 has the address of the root table ptr */
movel #0x00000000,%a4 /* logical address */
moveql #0,%d0
40:
/* Increment the logical address and preserve in d5 */
movel %a4,%d5
addil #PAGESIZE<<13,%d5
movel %a0@+,%d6
btst #1,%d6
jbne 41f
jbsr mmu_print_tuple_invalidate
jbra 48f
41:
movel #0,%d1
andil #0xfffffe00,%d6
movel %d6,%a1
42:
movel %a4,%d5
addil #PAGESIZE<<6,%d5
movel %a1@+,%d6
btst #1,%d6
jbne 43f
jbsr mmu_print_tuple_invalidate
jbra 47f
43:
movel #0,%d2
andil #0xffffff00,%d6
movel %d6,%a2
44:
movel %a4,%d5
addil #PAGESIZE,%d5
movel %a2@+,%d6
btst #0,%d6
jbne 45f
jbsr mmu_print_tuple_invalidate
jbra 46f
45:
moveml %d0-%d1,%sp@-
movel %a4,%d0
movel %d6,%d1
andil #0xfffff4e0,%d1
lea %pc@(mmu_040_print_flags),%a6
jbsr mmu_print_tuple
moveml %sp@+,%d0-%d1
46:
movel %d5,%a4
addq #1,%d2
cmpib #64,%d2
jbne 44b
47:
movel %d5,%a4
addq #1,%d1
cmpib #128,%d1
jbne 42b
48:
movel %d5,%a4 /* move to the next logical address */
addq #1,%d0
cmpib #128,%d0
jbne 40b
.chip 68040
movec %dtt1,%d0
movel %d0,%d1
andiw #0x8000,%d1 /* is it valid ? */
jbeq 1f /* No, bail out */
movel %d0,%d1
andil #0xff000000,%d1 /* Get the address */
putn %d1
puts "=="
putn %d1
movel %d0,%d6
jbsr mmu_040_print_flags_tt
1:
movec %dtt0,%d0
movel %d0,%d1
andiw #0x8000,%d1 /* is it valid ? */
jbeq 1f /* No, bail out */
movel %d0,%d1
andil #0xff000000,%d1 /* Get the address */
putn %d1
puts "=="
putn %d1
movel %d0,%d6
jbsr mmu_040_print_flags_tt
1:
.chip 68k
jbra mmu_print_done
mmu_040_print_flags:
btstl #10,%d6
putZc(' ','G') /* global bit */
btstl #7,%d6
putZc(' ','S') /* supervisor bit */
mmu_040_print_flags_tt:
btstl #6,%d6
jbne 3f
putc 'C'
btstl #5,%d6
putZc('w','c') /* write through or copy-back */
jbra 4f
3:
putc 'N'
btstl #5,%d6
putZc('s',' ') /* serialized non-cacheable, or non-cacheable */
4:
rts
mmu_030_print_flags:
btstl #6,%d6
putZc('C','I') /* write through or copy-back */
rts
mmu_030_print:
puts "\nMMU030\n"
puts "\nrp:"
putn %a5
putc '\n'
movel %a5,%d0
andil #0xfffffff0,%d0
movel %d0,%a0
movel #0x00000000,%a4 /* logical address */
movel #0,%d0
30:
movel %a4,%d5
addil #PAGESIZE<<13,%d5
movel %a0@+,%d6
btst #1,%d6 /* is it a table ptr? */
jbne 31f /* yes */
btst #0,%d6 /* is it early terminating? */
jbeq 1f /* no */
jbsr mmu_030_print_helper
jbra 38f
1:
jbsr mmu_print_tuple_invalidate
jbra 38f
31:
movel #0,%d1
andil #0xfffffff0,%d6
movel %d6,%a1
32:
movel %a4,%d5
addil #PAGESIZE<<6,%d5
movel %a1@+,%d6
btst #1,%d6 /* is it a table ptr? */
jbne 33f /* yes */
btst #0,%d6 /* is it a page descriptor? */
jbeq 1f /* no */
jbsr mmu_030_print_helper
jbra 37f
1:
jbsr mmu_print_tuple_invalidate
jbra 37f
33:
movel #0,%d2
andil #0xfffffff0,%d6
movel %d6,%a2
34:
movel %a4,%d5
addil #PAGESIZE,%d5
movel %a2@+,%d6
btst #0,%d6
jbne 35f
jbsr mmu_print_tuple_invalidate
jbra 36f
35:
jbsr mmu_030_print_helper
36:
movel %d5,%a4
addq #1,%d2
cmpib #64,%d2
jbne 34b
37:
movel %d5,%a4
addq #1,%d1
cmpib #128,%d1
jbne 32b
38:
movel %d5,%a4 /* move to the next logical address */
addq #1,%d0
cmpib #128,%d0
jbne 30b
mmu_print_done:
puts "\n"
func_return mmu_print
mmu_030_print_helper:
moveml %d0-%d1,%sp@-
movel %a4,%d0
movel %d6,%d1
lea %pc@(mmu_030_print_flags),%a6
jbsr mmu_print_tuple
moveml %sp@+,%d0-%d1
rts
mmu_print_tuple_invalidate:
moveml %a0/%d7,%sp@-
lea %pc@(L(mmu_print_data)),%a0
tstl %a0@(mmu_next_valid)
jbmi mmu_print_tuple_invalidate_exit
movel #MMU_PRINT_INVALID,%a0@(mmu_next_valid)
putn %a4
puts "##\n"
mmu_print_tuple_invalidate_exit:
moveml %sp@+,%a0/%d7
rts
mmu_print_tuple:
moveml %d0-%d7/%a0,%sp@-
lea %pc@(L(mmu_print_data)),%a0
tstl %a0@(mmu_next_valid)
jble mmu_print_tuple_print
cmpl %a0@(mmu_next_physical),%d1
jbeq mmu_print_tuple_increment
mmu_print_tuple_print:
putn %d0
puts "->"
putn %d1
movel %d1,%d6
jbsr %a6@
mmu_print_tuple_record:
movel #MMU_PRINT_VALID,%a0@(mmu_next_valid)
movel %d1,%a0@(mmu_next_physical)
mmu_print_tuple_increment:
movel %d5,%d7
subl %a4,%d7
addl %d7,%a0@(mmu_next_physical)
mmu_print_tuple_exit:
moveml %sp@+,%d0-%d7/%a0
rts
mmu_print_machine_cpu_types:
puts "machine: "
is_not_amiga(1f)
puts "amiga"
jbra 9f
1:
is_not_atari(2f)
puts "atari"
jbra 9f
2:
is_not_mac(3f)
puts "macintosh"
jbra 9f
3: puts "unknown"
9: putc '\n'
puts "cputype: 0"
is_not_060(1f)
putc '6'
jbra 9f
1:
is_not_040_or_060(2f)
putc '4'
jbra 9f
2: putc '3'
9: putc '0'
putc '\n'
rts
#endif /* MMU_PRINT */
/*
* mmu_map_tt
*
* This is a specific function which works on all 680x0 machines.
* On 030, 040 & 060 it will attempt to use Transparent Translation
* registers (tt1).
* On 020 it will call the standard mmu_map which will use early
* terminating descriptors.
*/
func_start mmu_map_tt,%d0/%d1/%a0,4
dputs "mmu_map_tt:"
dputn ARG1
dputn ARG2
dputn ARG3
dputn ARG4
dputc '\n'
is_020(L(do_map))
/* Extract the highest bit set
*/
bfffo ARG3{#0,#32},%d1
cmpw #8,%d1
jcc L(do_map)
/* And get the mask
*/
moveq #-1,%d0
lsrl %d1,%d0
lsrl #1,%d0
/* Mask the address
*/
movel %d0,%d1
notl %d1
andl ARG2,%d1
/* Generate the upper 16bit of the tt register
*/
lsrl #8,%d0
orl %d0,%d1
clrw %d1
is_040_or_060(L(mmu_map_tt_040))
/* set 030 specific bits (read/write access for supervisor mode
* (highest function code set, lower two bits masked))
*/
orw #TTR_ENABLE+TTR_RWM+TTR_FCB2+TTR_FCM1+TTR_FCM0,%d1
movel ARG4,%d0
btst #6,%d0
jeq 1f
orw #TTR_CI,%d1
1: lea STACK,%a0
dputn %d1
movel %d1,%a0@
.chip 68030
tstl ARG1
jne 1f
pmove %a0@,%tt0
jra 2f
1: pmove %a0@,%tt1
2: .chip 68k
jra L(mmu_map_tt_done)
/* set 040 specific bits
*/
L(mmu_map_tt_040):
orw #TTR_ENABLE+TTR_KERNELMODE,%d1
orl ARG4,%d1
dputn %d1
.chip 68040
tstl ARG1
jne 1f
movec %d1,%itt0
movec %d1,%dtt0
jra 2f
1: movec %d1,%itt1
movec %d1,%dtt1
2: .chip 68k
jra L(mmu_map_tt_done)
L(do_map):
mmu_map_eq ARG2,ARG3,ARG4
L(mmu_map_tt_done):
func_return mmu_map_tt
/*
* mmu_map
*
* This routine will map a range of memory using a pointer
* table and allocating the pages on the fly from the kernel.
* The pointer table does not have to be already linked into
* the root table, this routine will do that if necessary.
*
* NOTE
* This routine will assert failure and use the serial_putc
* routines in the case of a run-time error. For example,
* if the address is already mapped.
*
* NOTE-2
* This routine will use early terminating descriptors
* where possible for the 68020+68851 and 68030 type
* processors.
*/
func_start mmu_map,%d0-%d4/%a0-%a4
dputs "\nmmu_map:"
dputn ARG1
dputn ARG2
dputn ARG3
dputn ARG4
dputc '\n'
/* Get logical address and round it down to 256KB
*/
movel ARG1,%d0
andl #-(PAGESIZE*PAGE_TABLE_SIZE),%d0
movel %d0,%a3
/* Get the end address
*/
movel ARG1,%a4
addl ARG3,%a4
subql #1,%a4
/* Get physical address and round it down to 256KB
*/
movel ARG2,%d0
andl #-(PAGESIZE*PAGE_TABLE_SIZE),%d0
movel %d0,%a2
/* Add page attributes to the physical address
*/
movel ARG4,%d0
orw #_PAGE_PRESENT+_PAGE_ACCESSED+_PAGE_DIRTY,%d0
addw %d0,%a2
dputn %a2
dputn %a3
dputn %a4
is_not_040_or_060(L(mmu_map_030))
addw #_PAGE_GLOBAL040,%a2
/*
* MMU 040 & 060 Support
*
* The MMU usage for the 040 and 060 is different enough from
* the 030 and 68851 that there is separate code. This comment
* block describes the data structures and algorithms built by
* this code.
*
* The 040 does not support early terminating descriptors, as
* the 030 does. Therefore, a third level of table is needed
* for the 040, and that would be the page table. In Linux,
* page tables are allocated directly from the memory above the
* kernel.
*
*/
L(mmu_map_040):
/* Calculate the offset into the root table
*/
movel %a3,%d0
moveq #ROOT_INDEX_SHIFT,%d1
lsrl %d1,%d0
mmu_get_root_table_entry %d0
/* Calculate the offset into the pointer table
*/
movel %a3,%d0
moveq #PTR_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PTR_TABLE_SIZE-1,%d0
mmu_get_ptr_table_entry %a0,%d0
/* Calculate the offset into the page table
*/
movel %a3,%d0
moveq #PAGE_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PAGE_TABLE_SIZE-1,%d0
mmu_get_page_table_entry %a0,%d0
/* The page table entry must not no be busy
*/
tstl %a0@
jne L(mmu_map_error)
/* Do the mapping and advance the pointers
*/
movel %a2,%a0@
2:
addw #PAGESIZE,%a2
addw #PAGESIZE,%a3
/* Ready with mapping?
*/
lea %a3@(-1),%a0
cmpl %a0,%a4
jhi L(mmu_map_040)
jra L(mmu_map_done)
L(mmu_map_030):
/* Calculate the offset into the root table
*/
movel %a3,%d0
moveq #ROOT_INDEX_SHIFT,%d1
lsrl %d1,%d0
mmu_get_root_table_entry %d0
/* Check if logical address 32MB aligned,
* so we can try to map it once
*/
movel %a3,%d0
andl #(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE-1)&(-ROOT_TABLE_SIZE),%d0
jne 1f
/* Is there enough to map for 32MB at once
*/
lea %a3@(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE-1),%a1
cmpl %a1,%a4
jcs 1f
addql #1,%a1
/* The root table entry must not no be busy
*/
tstl %a0@
jne L(mmu_map_error)
/* Do the mapping and advance the pointers
*/
dputs "early term1"
dputn %a2
dputn %a3
dputn %a1
dputc '\n'
movel %a2,%a0@
movel %a1,%a3
lea %a2@(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE),%a2
jra L(mmu_mapnext_030)
1:
/* Calculate the offset into the pointer table
*/
movel %a3,%d0
moveq #PTR_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PTR_TABLE_SIZE-1,%d0
mmu_get_ptr_table_entry %a0,%d0
/* The pointer table entry must not no be busy
*/
tstl %a0@
jne L(mmu_map_error)
/* Do the mapping and advance the pointers
*/
dputs "early term2"
dputn %a2
dputn %a3
dputc '\n'
movel %a2,%a0@
addl #PAGE_TABLE_SIZE*PAGESIZE,%a2
addl #PAGE_TABLE_SIZE*PAGESIZE,%a3
L(mmu_mapnext_030):
/* Ready with mapping?
*/
lea %a3@(-1),%a0
cmpl %a0,%a4
jhi L(mmu_map_030)
jra L(mmu_map_done)
L(mmu_map_error):
dputs "mmu_map error:"
dputn %a2
dputn %a3
dputc '\n'
L(mmu_map_done):
func_return mmu_map
/*
* mmu_fixup
*
* On the 040 class machines, all pages that are used for the
* mmu have to be fixed up.
*/
func_start mmu_fixup_page_mmu_cache,%d0/%a0
dputs "mmu_fixup_page_mmu_cache"
dputn ARG1
/* Calculate the offset into the root table
*/
movel ARG1,%d0
moveq #ROOT_INDEX_SHIFT,%d1
lsrl %d1,%d0
mmu_get_root_table_entry %d0
/* Calculate the offset into the pointer table
*/
movel ARG1,%d0
moveq #PTR_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PTR_TABLE_SIZE-1,%d0
mmu_get_ptr_table_entry %a0,%d0
/* Calculate the offset into the page table
*/
movel ARG1,%d0
moveq #PAGE_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PAGE_TABLE_SIZE-1,%d0
mmu_get_page_table_entry %a0,%d0
movel %a0@,%d0
andil #_CACHEMASK040,%d0
orl %pc@(m68k_pgtable_cachemode),%d0
movel %d0,%a0@
dputc '\n'
func_return mmu_fixup_page_mmu_cache
/*
* mmu_temp_map
*
* create a temporary mapping to enable the mmu,
* this we don't need any transparation translation tricks.
*/
func_start mmu_temp_map,%d0/%d1/%a0/%a1
dputs "mmu_temp_map"
dputn ARG1
dputn ARG2
dputc '\n'
lea %pc@(L(temp_mmap_mem)),%a1
/* Calculate the offset in the root table
*/
movel ARG2,%d0
moveq #ROOT_INDEX_SHIFT,%d1
lsrl %d1,%d0
mmu_get_root_table_entry %d0
/* Check if the table is temporary allocated, so we have to reuse it
*/
movel %a0@,%d0
cmpl %pc@(L(memory_start)),%d0
jcc 1f
/* Temporary allocate a ptr table and insert it into the root table
*/
movel %a1@,%d0
addl #PTR_TABLE_SIZE*4,%a1@
orw #_PAGE_TABLE+_PAGE_ACCESSED,%d0
movel %d0,%a0@
dputs " (new)"
1:
dputn %d0
/* Mask the root table entry for the ptr table
*/
andw #-ROOT_TABLE_SIZE,%d0
movel %d0,%a0
/* Calculate the offset into the pointer table
*/
movel ARG2,%d0
moveq #PTR_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PTR_TABLE_SIZE-1,%d0
lea %a0@(%d0*4),%a0
dputn %a0
/* Check if a temporary page table is already allocated
*/
movel %a0@,%d0
jne 1f
/* Temporary allocate a page table and insert it into the ptr table
*/
movel %a1@,%d0
/* The 512 should be PAGE_TABLE_SIZE*4, but that violates the
alignment restriction for pointer tables on the '0[46]0. */
addl #512,%a1@
orw #_PAGE_TABLE+_PAGE_ACCESSED,%d0
movel %d0,%a0@
dputs " (new)"
1:
dputn %d0
/* Mask the ptr table entry for the page table
*/
andw #-PTR_TABLE_SIZE,%d0
movel %d0,%a0
/* Calculate the offset into the page table
*/
movel ARG2,%d0
moveq #PAGE_INDEX_SHIFT,%d1
lsrl %d1,%d0
andl #PAGE_TABLE_SIZE-1,%d0
lea %a0@(%d0*4),%a0
dputn %a0
/* Insert the address into the page table
*/
movel ARG1,%d0
andw #-PAGESIZE,%d0
orw #_PAGE_PRESENT+_PAGE_ACCESSED+_PAGE_DIRTY,%d0
movel %d0,%a0@
dputn %d0
dputc '\n'
func_return mmu_temp_map
func_start mmu_engage,%d0-%d2/%a0-%a3
moveq #ROOT_TABLE_SIZE-1,%d0
/* Temporarily use a different root table. */
lea %pc@(L(kernel_pgdir_ptr)),%a0
movel %a0@,%a2
movel %pc@(L(memory_start)),%a1
movel %a1,%a0@
movel %a2,%a0
1:
movel %a0@+,%a1@+
dbra %d0,1b
lea %pc@(L(temp_mmap_mem)),%a0
movel %a1,%a0@
movew #PAGESIZE-1,%d0
1:
clrl %a1@+
dbra %d0,1b
lea %pc@(1b),%a0
movel #1b,%a1
/* Skip temp mappings if phys == virt */
cmpl %a0,%a1
jeq 1f
mmu_temp_map %a0,%a0
mmu_temp_map %a0,%a1
addw #PAGESIZE,%a0
addw #PAGESIZE,%a1
mmu_temp_map %a0,%a0
mmu_temp_map %a0,%a1
1:
movel %pc@(L(memory_start)),%a3
movel %pc@(L(phys_kernel_start)),%d2
is_not_040_or_060(L(mmu_engage_030))
L(mmu_engage_040):
.chip 68040
nop
cinva %bc
nop
pflusha
nop
movec %a3,%srp
movel #TC_ENABLE+TC_PAGE4K,%d0
movec %d0,%tc /* enable the MMU */
jmp 1f:l
1: nop
movec %a2,%srp
nop
cinva %bc
nop
pflusha
.chip 68k
jra L(mmu_engage_cleanup)
L(mmu_engage_030_temp):
.space 12
L(mmu_engage_030):
.chip 68030
lea %pc@(L(mmu_engage_030_temp)),%a0
movel #0x80000002,%a0@
movel %a3,%a0@(4)
movel #0x0808,%d0
movec %d0,%cacr
pmove %a0@,%srp
pflusha
/*
* enable,super root enable,4096 byte pages,7 bit root index,
* 7 bit pointer index, 6 bit page table index.
*/
movel #0x82c07760,%a0@(8)
pmove %a0@(8),%tc /* enable the MMU */
jmp 1f:l
1: movel %a2,%a0@(4)
movel #0x0808,%d0
movec %d0,%cacr
pmove %a0@,%srp
pflusha
.chip 68k
L(mmu_engage_cleanup):
subl #PAGE_OFFSET,%d2
subl %d2,%a2
movel %a2,L(kernel_pgdir_ptr)
subl %d2,%fp
subl %d2,%sp
subl %d2,ARG0
func_return mmu_engage
func_start mmu_get_root_table_entry,%d0/%a1
#if 0
dputs "mmu_get_root_table_entry:"
dputn ARG1
dputs " ="
#endif
movel %pc@(L(kernel_pgdir_ptr)),%a0
tstl %a0
jne 2f
dputs "\nmmu_init:"
/* Find the start of free memory, get_bi_record does this for us,
* as the bootinfo structure is located directly behind the kernel
* and and we simply search for the last entry.
*/
get_bi_record BI_LAST
addw #PAGESIZE-1,%a0
movel %a0,%d0
andw #-PAGESIZE,%d0
dputn %d0
lea %pc@(L(memory_start)),%a0
movel %d0,%a0@
lea %pc@(L(kernel_end)),%a0
movel %d0,%a0@
/* we have to return the first page at _stext since the init code
* in mm/init.c simply expects kernel_pg_dir there, the rest of
* page is used for further ptr tables in get_ptr_table.
*/
lea %pc@(_stext),%a0
lea %pc@(L(mmu_cached_pointer_tables)),%a1
movel %a0,%a1@
addl #ROOT_TABLE_SIZE*4,%a1@
lea %pc@(L(mmu_num_pointer_tables)),%a1
addql #1,%a1@
/* clear the page
*/
movel %a0,%a1
movew #PAGESIZE/4-1,%d0
1:
clrl %a1@+
dbra %d0,1b
lea %pc@(L(kernel_pgdir_ptr)),%a1
movel %a0,%a1@
dputn %a0
dputc '\n'
2:
movel ARG1,%d0
lea %a0@(%d0*4),%a0
#if 0
dputn %a0
dputc '\n'
#endif
func_return mmu_get_root_table_entry
func_start mmu_get_ptr_table_entry,%d0/%a1
#if 0
dputs "mmu_get_ptr_table_entry:"
dputn ARG1
dputn ARG2
dputs " ="
#endif
movel ARG1,%a0
movel %a0@,%d0
jne 2f
/* Keep track of the number of pointer tables we use
*/
dputs "\nmmu_get_new_ptr_table:"
lea %pc@(L(mmu_num_pointer_tables)),%a0
movel %a0@,%d0
addql #1,%a0@
/* See if there is a free pointer table in our cache of pointer tables
*/
lea %pc@(L(mmu_cached_pointer_tables)),%a1
andw #7,%d0
jne 1f
/* Get a new pointer table page from above the kernel memory
*/
get_new_page
movel %a0,%a1@
1:
/* There is an unused pointer table in our cache... use it
*/
movel %a1@,%d0
addl #PTR_TABLE_SIZE*4,%a1@
dputn %d0
dputc '\n'
/* Insert the new pointer table into the root table
*/
movel ARG1,%a0
orw #_PAGE_TABLE+_PAGE_ACCESSED,%d0
movel %d0,%a0@
2:
/* Extract the pointer table entry
*/
andw #-PTR_TABLE_SIZE,%d0
movel %d0,%a0
movel ARG2,%d0
lea %a0@(%d0*4),%a0
#if 0
dputn %a0
dputc '\n'
#endif
func_return mmu_get_ptr_table_entry
func_start mmu_get_page_table_entry,%d0/%a1
#if 0
dputs "mmu_get_page_table_entry:"
dputn ARG1
dputn ARG2
dputs " ="
#endif
movel ARG1,%a0
movel %a0@,%d0
jne 2f
/* If the page table entry doesn't exist, we allocate a complete new
* page and use it as one continues big page table which can cover
* 4MB of memory, nearly almost all mappings have that alignment.
*/
get_new_page
addw #_PAGE_TABLE+_PAGE_ACCESSED,%a0
/* align pointer table entry for a page of page tables
*/
movel ARG1,%d0
andw #-(PAGESIZE/PAGE_TABLE_SIZE),%d0
movel %d0,%a1
/* Insert the page tables into the pointer entries
*/
moveq #PAGESIZE/PAGE_TABLE_SIZE/4-1,%d0
1:
movel %a0,%a1@+
lea %a0@(PAGE_TABLE_SIZE*4),%a0
dbra %d0,1b
/* Now we can get the initialized pointer table entry
*/
movel ARG1,%a0
movel %a0@,%d0
2:
/* Extract the page table entry
*/
andw #-PAGE_TABLE_SIZE,%d0
movel %d0,%a0
movel ARG2,%d0
lea %a0@(%d0*4),%a0
#if 0
dputn %a0
dputc '\n'
#endif
func_return mmu_get_page_table_entry
/*
* get_new_page
*
* Return a new page from the memory start and clear it.
*/
func_start get_new_page,%d0/%a1
dputs "\nget_new_page:"
/* allocate the page and adjust memory_start
*/
lea %pc@(L(memory_start)),%a0
movel %a0@,%a1
addl #PAGESIZE,%a0@
/* clear the new page
*/
movel %a1,%a0
movew #PAGESIZE/4-1,%d0
1:
clrl %a1@+
dbra %d0,1b
dputn %a0
dputc '\n'
func_return get_new_page
/*
* Debug output support
* Atarians have a choice between the parallel port, the serial port
* from the MFP or a serial port of the SCC
*/
#ifdef CONFIG_MAC
/* You may define either or both of these. */
#define MAC_USE_SCC_A /* Modem port */
#define MAC_USE_SCC_B /* Printer port */
#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
/* Initialisation table for SCC with 3.6864 MHz PCLK */
L(scc_initable_mac):
.byte 4,0x44 /* x16, 1 stopbit, no parity */
.byte 3,0xc0 /* receiver: 8 bpc */
.byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */
.byte 10,0 /* NRZ */
.byte 11,0x50 /* use baud rate generator */
.byte 12,1,13,0 /* 38400 baud */
.byte 14,1 /* Baud rate generator enable */
.byte 3,0xc1 /* enable receiver */
.byte 5,0xea /* enable transmitter */
.byte -1
.even
#endif
#endif /* CONFIG_MAC */
#ifdef CONFIG_ATARI
/* #define USE_PRINTER */
/* #define USE_SCC_B */
/* #define USE_SCC_A */
#define USE_MFP
#if defined(USE_SCC_A) || defined(USE_SCC_B)
/* Initialisation table for SCC with 7.9872 MHz PCLK */
/* PCLK == 8.0539 gives baud == 9680.1 */
L(scc_initable_atari):
.byte 4,0x44 /* x16, 1 stopbit, no parity */
.byte 3,0xc0 /* receiver: 8 bpc */
.byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */
.byte 10,0 /* NRZ */
.byte 11,0x50 /* use baud rate generator */
.byte 12,24,13,0 /* 9600 baud */
.byte 14,2,14,3 /* use master clock for BRG, enable */
.byte 3,0xc1 /* enable receiver */
.byte 5,0xea /* enable transmitter */
.byte -1
.even
#endif
#ifdef USE_PRINTER
LPSG_SELECT = 0xff8800
LPSG_READ = 0xff8800
LPSG_WRITE = 0xff8802
LPSG_IO_A = 14
LPSG_IO_B = 15
LPSG_CONTROL = 7
LSTMFP_GPIP = 0xfffa01
LSTMFP_DDR = 0xfffa05
LSTMFP_IERB = 0xfffa09
#elif defined(USE_SCC_B)
LSCC_CTRL = 0xff8c85
LSCC_DATA = 0xff8c87
#elif defined(USE_SCC_A)
LSCC_CTRL = 0xff8c81
LSCC_DATA = 0xff8c83
#elif defined(USE_MFP)
LMFP_UCR = 0xfffa29
LMFP_TDCDR = 0xfffa1d
LMFP_TDDR = 0xfffa25
LMFP_TSR = 0xfffa2d
LMFP_UDR = 0xfffa2f
#endif
#endif /* CONFIG_ATARI */
/*
* Serial port output support.
*/
/*
* Initialize serial port hardware
*/
func_start serial_init,%d0/%d1/%a0/%a1
/*
* Some of the register usage that follows
* CONFIG_AMIGA
* a0 = pointer to boot info record
* d0 = boot info offset
* CONFIG_ATARI
* a0 = address of SCC
* a1 = Liobase address/address of scc_initable_atari
* d0 = init data for serial port
* CONFIG_MAC
* a0 = address of SCC
* a1 = address of scc_initable_mac
* d0 = init data for serial port
*/
#ifdef CONFIG_AMIGA
#define SERIAL_DTR 7
#define SERIAL_CNTRL CIABBASE+C_PRA
is_not_amiga(1f)
lea %pc@(L(custom)),%a0
movel #-ZTWOBASE,%a0@
bclr #SERIAL_DTR,SERIAL_CNTRL-ZTWOBASE
get_bi_record BI_AMIGA_SERPER
movew %a0@,CUSTOMBASE+C_SERPER-ZTWOBASE
| movew #61,CUSTOMBASE+C_SERPER-ZTWOBASE
1:
#endif
#ifdef CONFIG_ATARI
is_not_atari(4f)
movel %pc@(L(iobase)),%a1
#if defined(USE_PRINTER)
bclr #0,%a1@(LSTMFP_IERB)
bclr #0,%a1@(LSTMFP_DDR)
moveb #LPSG_CONTROL,%a1@(LPSG_SELECT)
moveb #0xff,%a1@(LPSG_WRITE)
moveb #LPSG_IO_B,%a1@(LPSG_SELECT)
clrb %a1@(LPSG_WRITE)
moveb #LPSG_IO_A,%a1@(LPSG_SELECT)
moveb %a1@(LPSG_READ),%d0
bset #5,%d0
moveb %d0,%a1@(LPSG_WRITE)
#elif defined(USE_SCC_A) || defined(USE_SCC_B)
lea %a1@(LSCC_CTRL),%a0
/* Reset SCC register pointer */
moveb %a0@,%d0
/* Reset SCC device: write register pointer then register value */
moveb #9,%a0@
moveb #0xc0,%a0@
/* Wait for 5 PCLK cycles, which is about 63 CPU cycles */
/* 5 / 7.9872 MHz = approx. 0.63 us = 63 / 100 MHz */
movel #32,%d0
2:
subq #1,%d0
jne 2b
/* Initialize channel */
lea %pc@(L(scc_initable_atari)),%a1
2: moveb %a1@+,%d0
jmi 3f
moveb %d0,%a0@
moveb %a1@+,%a0@
jra 2b
3: clrb %a0@
#elif defined(USE_MFP)
bclr #1,%a1@(LMFP_TSR)
moveb #0x88,%a1@(LMFP_UCR)
andb #0x70,%a1@(LMFP_TDCDR)
moveb #2,%a1@(LMFP_TDDR)
orb #1,%a1@(LMFP_TDCDR)
bset #1,%a1@(LMFP_TSR)
#endif
jra L(serial_init_done)
4:
#endif
#ifdef CONFIG_MAC
is_not_mac(L(serial_init_not_mac))
#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
#define mac_scc_cha_b_ctrl_offset 0x0
#define mac_scc_cha_a_ctrl_offset 0x2
#define mac_scc_cha_b_data_offset 0x4
#define mac_scc_cha_a_data_offset 0x6
movel %pc@(L(mac_sccbase)),%a0
/* Reset SCC register pointer */
moveb %a0@(mac_scc_cha_a_ctrl_offset),%d0
/* Reset SCC device: write register pointer then register value */
moveb #9,%a0@(mac_scc_cha_a_ctrl_offset)
moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset)
/* Wait for 5 PCLK cycles, which is about 68 CPU cycles */
/* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */
movel #35,%d0
5:
subq #1,%d0
jne 5b
#endif
#ifdef MAC_USE_SCC_A
/* Initialize channel A */
lea %pc@(L(scc_initable_mac)),%a1
5: moveb %a1@+,%d0
jmi 6f
moveb %d0,%a0@(mac_scc_cha_a_ctrl_offset)
moveb %a1@+,%a0@(mac_scc_cha_a_ctrl_offset)
jra 5b
6:
#endif /* MAC_USE_SCC_A */
#ifdef MAC_USE_SCC_B
/* Initialize channel B */
lea %pc@(L(scc_initable_mac)),%a1
7: moveb %a1@+,%d0
jmi 8f
moveb %d0,%a0@(mac_scc_cha_b_ctrl_offset)
moveb %a1@+,%a0@(mac_scc_cha_b_ctrl_offset)
jra 7b
8:
#endif /* MAC_USE_SCC_B */
jra L(serial_init_done)
L(serial_init_not_mac):
#endif /* CONFIG_MAC */
#ifdef CONFIG_Q40
is_not_q40(2f)
/* debug output goes into SRAM, so we don't do it unless requested
- check for '%LX$' signature in SRAM */
lea %pc@(q40_mem_cptr),%a1
move.l #0xff020010,%a1@ /* must be inited - also used by debug=mem */
move.l #0xff020000,%a1
cmp.b #'%',%a1@
bne 2f /*nodbg*/
addq.w #4,%a1
cmp.b #'L',%a1@
bne 2f /*nodbg*/
addq.w #4,%a1
cmp.b #'X',%a1@
bne 2f /*nodbg*/
addq.w #4,%a1
cmp.b #'$',%a1@
bne 2f /*nodbg*/
/* signature OK */
lea %pc@(L(q40_do_debug)),%a1
tas %a1@
/*nodbg: q40_do_debug is 0 by default*/
2:
#endif
#ifdef CONFIG_MVME16x
is_not_mvme16x(L(serial_init_not_mvme16x))
moveb #0x10,M167_PCSCCMICR
moveb #0x10,M167_PCSCCTICR
moveb #0x10,M167_PCSCCRICR
jra L(serial_init_done)
L(serial_init_not_mvme16x):
#endif
#ifdef CONFIG_APOLLO
/* We count on the PROM initializing SIO1 */
#endif
#ifdef CONFIG_HP300
/* We count on the boot loader initialising the UART */
#endif
L(serial_init_done):
func_return serial_init
/*
* Output character on serial port.
*/
func_start serial_putc,%d0/%d1/%a0/%a1
movel ARG1,%d0
cmpib #'\n',%d0
jbne 1f
/* A little safe recursion is good for the soul */
serial_putc #'\r'
1:
#ifdef CONFIG_AMIGA
is_not_amiga(2f)
andw #0x00ff,%d0
oriw #0x0100,%d0
movel %pc@(L(custom)),%a0
movew %d0,%a0@(CUSTOMBASE+C_SERDAT)
1: movew %a0@(CUSTOMBASE+C_SERDATR),%d0
andw #0x2000,%d0
jeq 1b
jra L(serial_putc_done)
2:
#endif
#ifdef CONFIG_MAC
is_not_mac(5f)
#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
movel %pc@(L(mac_sccbase)),%a1
#endif
#ifdef MAC_USE_SCC_A
3: btst #2,%a1@(mac_scc_cha_a_ctrl_offset)
jeq 3b
moveb %d0,%a1@(mac_scc_cha_a_data_offset)
#endif /* MAC_USE_SCC_A */
#ifdef MAC_USE_SCC_B
4: btst #2,%a1@(mac_scc_cha_b_ctrl_offset)
jeq 4b
moveb %d0,%a1@(mac_scc_cha_b_data_offset)
#endif /* MAC_USE_SCC_B */
jra L(serial_putc_done)
5:
#endif /* CONFIG_MAC */
#ifdef CONFIG_ATARI
is_not_atari(4f)
movel %pc@(L(iobase)),%a1
#if defined(USE_PRINTER)
3: btst #0,%a1@(LSTMFP_GPIP)
jne 3b
moveb #LPSG_IO_B,%a1@(LPSG_SELECT)
moveb %d0,%a1@(LPSG_WRITE)
moveb #LPSG_IO_A,%a1@(LPSG_SELECT)
moveb %a1@(LPSG_READ),%d0
bclr #5,%d0
moveb %d0,%a1@(LPSG_WRITE)
nop
nop
bset #5,%d0
moveb %d0,%a1@(LPSG_WRITE)
#elif defined(USE_SCC_A) || defined(USE_SCC_B)
3: btst #2,%a1@(LSCC_CTRL)
jeq 3b
moveb %d0,%a1@(LSCC_DATA)
#elif defined(USE_MFP)
3: btst #7,%a1@(LMFP_TSR)
jeq 3b
moveb %d0,%a1@(LMFP_UDR)
#endif
jra L(serial_putc_done)
4:
#endif /* CONFIG_ATARI */
#ifdef CONFIG_MVME147
is_not_mvme147(2f)
1: btst #2,M147_SCC_CTRL_A
jeq 1b
moveb %d0,M147_SCC_DATA_A
jbra L(serial_putc_done)
2:
#endif
#ifdef CONFIG_MVME16x
is_not_mvme16x(2f)
/*
* If the loader gave us a board type then we can use that to
* select an appropriate output routine; otherwise we just use
* the Bug code. If we have to use the Bug that means the Bug
* workspace has to be valid, which means the Bug has to use
* the SRAM, which is non-standard.
*/
moveml %d0-%d7/%a2-%a6,%sp@-
movel vme_brdtype,%d1
jeq 1f | No tag - use the Bug
cmpi #VME_TYPE_MVME162,%d1
jeq 6f
cmpi #VME_TYPE_MVME172,%d1
jne 5f
/* 162/172; it's an SCC */
6: btst #2,M162_SCC_CTRL_A
nop
nop
nop
jeq 6b
moveb #8,M162_SCC_CTRL_A
nop
nop
nop
moveb %d0,M162_SCC_CTRL_A
jra 3f
5:
/* 166/167/177; it's a CD2401 */
moveb #0,M167_CYCAR
moveb M167_CYIER,%d2
moveb #0x02,M167_CYIER
7:
btst #5,M167_PCSCCTICR
jeq 7b
moveb M167_PCTPIACKR,%d1
moveb M167_CYLICR,%d1
jeq 8f
moveb #0x08,M167_CYTEOIR
jra 7b
8:
moveb %d0,M167_CYTDR
moveb #0,M167_CYTEOIR
moveb %d2,M167_CYIER
jra 3f
1:
moveb %d0,%sp@-
trap #15
.word 0x0020 /* TRAP 0x020 */
3:
moveml %sp@+,%d0-%d7/%a2-%a6
jbra L(serial_putc_done)
2:
#endif /* CONFIG_MVME16x */
#ifdef CONFIG_BVME6000
is_not_bvme6000(2f)
/*
* The BVME6000 machine has a serial port ...
*/
1: btst #2,BVME_SCC_CTRL_A
jeq 1b
moveb %d0,BVME_SCC_DATA_A
jbra L(serial_putc_done)
2:
#endif
#ifdef CONFIG_SUN3X
is_not_sun3x(2f)
movel %d0,-(%sp)
movel 0xFEFE0018,%a1
jbsr (%a1)
addq #4,%sp
jbra L(serial_putc_done)
2:
#endif
#ifdef CONFIG_Q40
is_not_q40(2f)
tst.l %pc@(L(q40_do_debug)) /* only debug if requested */
beq 2f
lea %pc@(q40_mem_cptr),%a1
move.l %a1@,%a0
move.b %d0,%a0@
addq.l #4,%a0
move.l %a0,%a1@
jbra L(serial_putc_done)
2:
#endif
#ifdef CONFIG_APOLLO
is_not_apollo(2f)
movl %pc@(L(iobase)),%a1
moveb %d0,%a1@(LTHRB0)
1: moveb %a1@(LSRB0),%d0
andb #0x4,%d0
beq 1b
jbra L(serial_putc_done)
2:
#endif
#ifdef CONFIG_HP300
is_not_hp300(3f)
movl %pc@(L(iobase)),%a1
addl %pc@(L(uartbase)),%a1
movel %pc@(L(uart_scode)),%d1 /* Check the scode */
jmi 3f /* Unset? Exit */
cmpi #256,%d1 /* APCI scode? */
jeq 2f
1: moveb %a1@(DCALSR),%d1 /* Output to DCA */
andb #0x20,%d1
beq 1b
moveb %d0,%a1@(DCADATA)
jbra L(serial_putc_done)
2: moveb %a1@(APCILSR),%d1 /* Output to APCI */
andb #0x20,%d1
beq 2b
moveb %d0,%a1@(APCIDATA)
jbra L(serial_putc_done)
3:
#endif
L(serial_putc_done):
func_return serial_putc
/*
* Output a string.
*/
func_start puts,%d0/%a0
movel ARG1,%a0
jra 2f
1:
#ifdef CONSOLE_DEBUG
console_putc %d0
#endif
#ifdef SERIAL_DEBUG
serial_putc %d0
#endif
2: moveb %a0@+,%d0
jne 1b
func_return puts
/*
* Output number in hex notation.
*/
func_start putn,%d0-%d2
putc ' '
movel ARG1,%d0
moveq #7,%d1
1: roll #4,%d0
move %d0,%d2
andb #0x0f,%d2
addb #'0',%d2
cmpb #'9',%d2
jls 2f
addb #'A'-('9'+1),%d2
2:
#ifdef CONSOLE_DEBUG
console_putc %d2
#endif
#ifdef SERIAL_DEBUG
serial_putc %d2
#endif
dbra %d1,1b
func_return putn
#ifdef CONFIG_EARLY_PRINTK
/*
* This routine takes its parameters on the stack. It then
* turns around and calls the internal routines. This routine
* is used by the boot console.
*
* The calling parameters are:
* void debug_cons_nputs(const char *str, unsigned length)
*
* This routine does NOT understand variable arguments only
* simple strings!
*/
ENTRY(debug_cons_nputs)
moveml %d0/%d1/%a0,%sp@-
movew %sr,%sp@-
ori #0x0700,%sr
movel %sp@(18),%a0 /* fetch parameter */
movel %sp@(22),%d1 /* fetch parameter */
jra 2f
1:
#ifdef CONSOLE_DEBUG
console_putc %d0
#endif
#ifdef SERIAL_DEBUG
serial_putc %d0
#endif
subq #1,%d1
2: jeq 3f
moveb %a0@+,%d0
jne 1b
3:
movew %sp@+,%sr
moveml %sp@+,%d0/%d1/%a0
rts
#endif /* CONFIG_EARLY_PRINTK */
#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
func_start set_leds,%d0/%a0
movel ARG1,%d0
#ifdef CONFIG_HP300
is_not_hp300(1f)
movel %pc@(L(iobase)),%a0
moveb %d0,%a0@(0x1ffff)
jra 2f
#endif
1:
#ifdef CONFIG_APOLLO
movel %pc@(L(iobase)),%a0
lsll #8,%d0
eorw #0xff00,%d0
moveb %d0,%a0@(LCPUCTRL)
#endif
2:
func_return set_leds
#endif
#ifdef CONSOLE_DEBUG
/*
* For continuity, see the data alignment
* to which this structure is tied.
*/
#define Lconsole_struct_cur_column 0
#define Lconsole_struct_cur_row 4
#define Lconsole_struct_num_columns 8
#define Lconsole_struct_num_rows 12
#define Lconsole_struct_left_edge 16
func_start console_init,%a0-%a4/%d0-%d7
/*
* Some of the register usage that follows
* a0 = pointer to boot_info
* a1 = pointer to screen
* a2 = pointer to console_globals
* d3 = pixel width of screen
* d4 = pixel height of screen
* (d3,d4) ~= (x,y) of a point just below
* and to the right of the screen
* NOT on the screen!
* d5 = number of bytes per scan line
* d6 = number of bytes on the entire screen
*/
lea %pc@(L(console_globals)),%a2
movel %pc@(L(mac_videobase)),%a1
movel %pc@(L(mac_rowbytes)),%d5
movel %pc@(L(mac_dimensions)),%d3 /* -> low byte */
movel %d3,%d4
swap %d4 /* -> high byte */
andl #0xffff,%d3 /* d3 = screen width in pixels */
andl #0xffff,%d4 /* d4 = screen height in pixels */
movel %d5,%d6
| subl #20,%d6
mulul %d4,%d6 /* scan line bytes x num scan lines */
divul #8,%d6 /* we'll clear 8 bytes at a time */
moveq #-1,%d0 /* Mac_black */
subq #1,%d6
L(console_clear_loop):
movel %d0,%a1@+
movel %d0,%a1@+
dbra %d6,L(console_clear_loop)
/* Calculate font size */
#if defined(FONT_8x8) && defined(CONFIG_FONT_8x8)
lea %pc@(font_vga_8x8),%a0
#elif defined(FONT_8x16) && defined(CONFIG_FONT_8x16)
lea %pc@(font_vga_8x16),%a0
#elif defined(FONT_6x11) && defined(CONFIG_FONT_6x11)
lea %pc@(font_vga_6x11),%a0
#elif defined(CONFIG_FONT_8x8) /* default */
lea %pc@(font_vga_8x8),%a0
#else /* no compiled-in font */
lea 0,%a0
#endif
/*
* At this point we make a shift in register usage
* a1 = address of console_font pointer
*/
lea %pc@(L(console_font)),%a1
movel %a0,%a1@ /* store pointer to struct fbcon_font_desc in console_font */
tstl %a0
jeq 1f
lea %pc@(L(console_font_data)),%a4
movel %a0@(FONT_DESC_DATA),%d0
subl #L(console_font),%a1
addl %a1,%d0
movel %d0,%a4@
/*
* Calculate global maxs
* Note - we can use either an
* 8 x 16 or 8 x 8 character font
* 6 x 11 also supported
*/
/* ASSERT: a0 = contents of Lconsole_font */
movel %d3,%d0 /* screen width in pixels */
divul %a0@(FONT_DESC_WIDTH),%d0 /* d0 = max num chars per row */
movel %d4,%d1 /* screen height in pixels */
divul %a0@(FONT_DESC_HEIGHT),%d1 /* d1 = max num rows */
movel %d0,%a2@(Lconsole_struct_num_columns)
movel %d1,%a2@(Lconsole_struct_num_rows)
/*
* Clear the current row and column
*/
clrl %a2@(Lconsole_struct_cur_column)
clrl %a2@(Lconsole_struct_cur_row)
clrl %a2@(Lconsole_struct_left_edge)
/*
* Initialization is complete
*/
1:
func_return console_init
#ifdef CONFIG_LOGO
func_start console_put_penguin,%a0-%a1/%d0-%d7
/*
* Get 'that_penguin' onto the screen in the upper right corner
* penguin is 64 x 74 pixels, align against right edge of screen
*/
lea %pc@(L(mac_dimensions)),%a0
movel %a0@,%d0
andil #0xffff,%d0
subil #64,%d0 /* snug up against the right edge */
clrl %d1 /* start at the top */
movel #73,%d7
lea %pc@(L(that_penguin)),%a1
L(console_penguin_row):
movel #31,%d6
L(console_penguin_pixel_pair):
moveb %a1@,%d2
lsrb #4,%d2
console_plot_pixel %d0,%d1,%d2
addq #1,%d0
moveb %a1@+,%d2
console_plot_pixel %d0,%d1,%d2
addq #1,%d0
dbra %d6,L(console_penguin_pixel_pair)
subil #64,%d0
addq #1,%d1
dbra %d7,L(console_penguin_row)
func_return console_put_penguin
/* include penguin bitmap */
L(that_penguin):
#include "../mac/mac_penguin.S"
#endif
/*
* Calculate source and destination addresses
* output a1 = dest
* a2 = source
*/
func_start console_scroll,%a0-%a4/%d0-%d7
lea %pc@(L(mac_videobase)),%a0
movel %a0@,%a1
movel %a1,%a2
lea %pc@(L(mac_rowbytes)),%a0
movel %a0@,%d5
movel %pc@(L(console_font)),%a0
tstl %a0
jeq 1f
mulul %a0@(FONT_DESC_HEIGHT),%d5 /* account for # scan lines per character */
addal %d5,%a2
/*
* Get dimensions
*/
lea %pc@(L(mac_dimensions)),%a0
movel %a0@,%d3
movel %d3,%d4
swap %d4
andl #0xffff,%d3 /* d3 = screen width in pixels */
andl #0xffff,%d4 /* d4 = screen height in pixels */
/*
* Calculate number of bytes to move
*/
lea %pc@(L(mac_rowbytes)),%a0
movel %a0@,%d6
movel %pc@(L(console_font)),%a0
subl %a0@(FONT_DESC_HEIGHT),%d4 /* we're not scrolling the top row! */
mulul %d4,%d6 /* scan line bytes x num scan lines */
divul #32,%d6 /* we'll move 8 longs at a time */
subq #1,%d6
L(console_scroll_loop):
movel %a2@+,%a1@+
movel %a2@+,%a1@+
movel %a2@+,%a1@+
movel %a2@+,%a1@+
movel %a2@+,%a1@+
movel %a2@+,%a1@+
movel %a2@+,%a1@+
movel %a2@+,%a1@+
dbra %d6,L(console_scroll_loop)
lea %pc@(L(mac_rowbytes)),%a0
movel %a0@,%d6
movel %pc@(L(console_font)),%a0
mulul %a0@(FONT_DESC_HEIGHT),%d6 /* scan line bytes x font height */
divul #32,%d6 /* we'll move 8 words at a time */
subq #1,%d6
moveq #-1,%d0
L(console_scroll_clear_loop):
movel %d0,%a1@+
movel %d0,%a1@+
movel %d0,%a1@+
movel %d0,%a1@+
movel %d0,%a1@+
movel %d0,%a1@+
movel %d0,%a1@+
movel %d0,%a1@+
dbra %d6,L(console_scroll_clear_loop)
1:
func_return console_scroll
func_start console_putc,%a0/%a1/%d0-%d7
is_not_mac(L(console_exit))
tstl %pc@(L(console_font))
jeq L(console_exit)
/* Output character in d7 on console.
*/
movel ARG1,%d7
cmpib #'\n',%d7
jbne 1f
/* A little safe recursion is good for the soul */
console_putc #'\r'
1:
lea %pc@(L(console_globals)),%a0
cmpib #10,%d7
jne L(console_not_lf)
movel %a0@(Lconsole_struct_cur_row),%d0
addil #1,%d0
movel %d0,%a0@(Lconsole_struct_cur_row)
movel %a0@(Lconsole_struct_num_rows),%d1
cmpl %d1,%d0
jcs 1f
subil #1,%d0
movel %d0,%a0@(Lconsole_struct_cur_row)
console_scroll
1:
jra L(console_exit)
L(console_not_lf):
cmpib #13,%d7
jne L(console_not_cr)
clrl %a0@(Lconsole_struct_cur_column)
jra L(console_exit)
L(console_not_cr):
cmpib #1,%d7
jne L(console_not_home)
clrl %a0@(Lconsole_struct_cur_row)
clrl %a0@(Lconsole_struct_cur_column)
jra L(console_exit)
/*
* At this point we know that the %d7 character is going to be
* rendered on the screen. Register usage is -
* a0 = pointer to console globals
* a1 = font data
* d0 = cursor column
* d1 = cursor row to draw the character
* d7 = character number
*/
L(console_not_home):
movel %a0@(Lconsole_struct_cur_column),%d0
addql #1,%a0@(Lconsole_struct_cur_column)
movel %a0@(Lconsole_struct_num_columns),%d1
cmpl %d1,%d0
jcs 1f
console_putc #'\n' /* recursion is OK! */
1:
movel %a0@(Lconsole_struct_cur_row),%d1
/*
* At this point we make a shift in register usage
* a0 = address of pointer to font data (fbcon_font_desc)
*/
movel %pc@(L(console_font)),%a0
movel %pc@(L(console_font_data)),%a1 /* Load fbcon_font_desc.data into a1 */
andl #0x000000ff,%d7
/* ASSERT: a0 = contents of Lconsole_font */
mulul %a0@(FONT_DESC_HEIGHT),%d7 /* d7 = index into font data */
addl %d7,%a1 /* a1 = points to char image */
/*
* At this point we make a shift in register usage
* d0 = pixel coordinate, x
* d1 = pixel coordinate, y
* d2 = (bit 0) 1/0 for white/black (!) pixel on screen
* d3 = font scan line data (8 pixels)
* d6 = count down for the font's pixel width (8)
* d7 = count down for the font's pixel count in height
*/
/* ASSERT: a0 = contents of Lconsole_font */
mulul %a0@(FONT_DESC_WIDTH),%d0
mulul %a0@(FONT_DESC_HEIGHT),%d1
movel %a0@(FONT_DESC_HEIGHT),%d7 /* Load fbcon_font_desc.height into d7 */
subq #1,%d7
L(console_read_char_scanline):
moveb %a1@+,%d3
/* ASSERT: a0 = contents of Lconsole_font */
movel %a0@(FONT_DESC_WIDTH),%d6 /* Load fbcon_font_desc.width into d6 */
subql #1,%d6
L(console_do_font_scanline):
lslb #1,%d3
scsb %d2 /* convert 1 bit into a byte */
console_plot_pixel %d0,%d1,%d2
addq #1,%d0
dbra %d6,L(console_do_font_scanline)
/* ASSERT: a0 = contents of Lconsole_font */
subl %a0@(FONT_DESC_WIDTH),%d0
addq #1,%d1
dbra %d7,L(console_read_char_scanline)
L(console_exit):
func_return console_putc
/*
* Input:
* d0 = x coordinate
* d1 = y coordinate
* d2 = (bit 0) 1/0 for white/black (!)
* All registers are preserved
*/
func_start console_plot_pixel,%a0-%a1/%d0-%d4
movel %pc@(L(mac_videobase)),%a1
movel %pc@(L(mac_videodepth)),%d3
movel ARG1,%d0
movel ARG2,%d1
mulul %pc@(L(mac_rowbytes)),%d1
movel ARG3,%d2
/*
* Register usage:
* d0 = x coord becomes byte offset into frame buffer
* d1 = y coord
* d2 = black or white (0/1)
* d3 = video depth
* d4 = temp of x (d0) for many bit depths
*/
L(test_1bit):
cmpb #1,%d3
jbne L(test_2bit)
movel %d0,%d4 /* we need the low order 3 bits! */
divul #8,%d0
addal %d0,%a1
addal %d1,%a1
andb #7,%d4
eorb #7,%d4 /* reverse the x-coordinate w/ screen-bit # */
andb #1,%d2
jbne L(white_1)
bsetb %d4,%a1@
jbra L(console_plot_pixel_exit)
L(white_1):
bclrb %d4,%a1@
jbra L(console_plot_pixel_exit)
L(test_2bit):
cmpb #2,%d3
jbne L(test_4bit)
movel %d0,%d4 /* we need the low order 2 bits! */
divul #4,%d0
addal %d0,%a1
addal %d1,%a1
andb #3,%d4
eorb #3,%d4 /* reverse the x-coordinate w/ screen-bit # */
lsll #1,%d4 /* ! */
andb #1,%d2
jbne L(white_2)
bsetb %d4,%a1@
addq #1,%d4
bsetb %d4,%a1@
jbra L(console_plot_pixel_exit)
L(white_2):
bclrb %d4,%a1@
addq #1,%d4
bclrb %d4,%a1@
jbra L(console_plot_pixel_exit)
L(test_4bit):
cmpb #4,%d3
jbne L(test_8bit)
movel %d0,%d4 /* we need the low order bit! */
divul #2,%d0
addal %d0,%a1
addal %d1,%a1
andb #1,%d4
eorb #1,%d4
lsll #2,%d4 /* ! */
andb #1,%d2
jbne L(white_4)
bsetb %d4,%a1@
addq #1,%d4
bsetb %d4,%a1@
addq #1,%d4
bsetb %d4,%a1@
addq #1,%d4
bsetb %d4,%a1@
jbra L(console_plot_pixel_exit)
L(white_4):
bclrb %d4,%a1@
addq #1,%d4
bclrb %d4,%a1@
addq #1,%d4
bclrb %d4,%a1@
addq #1,%d4
bclrb %d4,%a1@
jbra L(console_plot_pixel_exit)
L(test_8bit):
cmpb #8,%d3
jbne L(test_16bit)
addal %d0,%a1
addal %d1,%a1
andb #1,%d2
jbne L(white_8)
moveb #0xff,%a1@
jbra L(console_plot_pixel_exit)
L(white_8):
clrb %a1@
jbra L(console_plot_pixel_exit)
L(test_16bit):
cmpb #16,%d3
jbne L(console_plot_pixel_exit)
addal %d0,%a1
addal %d0,%a1
addal %d1,%a1
andb #1,%d2
jbne L(white_16)
clrw %a1@
jbra L(console_plot_pixel_exit)
L(white_16):
movew #0x0fff,%a1@
jbra L(console_plot_pixel_exit)
L(console_plot_pixel_exit):
func_return console_plot_pixel
#endif /* CONSOLE_DEBUG */
__INITDATA
.align 4
m68k_init_mapped_size:
.long 0
#if defined(CONFIG_ATARI) || defined(CONFIG_AMIGA) || \
defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
L(custom):
L(iobase):
.long 0
#endif
#ifdef CONSOLE_DEBUG
L(console_globals):
.long 0 /* cursor column */
.long 0 /* cursor row */
.long 0 /* max num columns */
.long 0 /* max num rows */
.long 0 /* left edge */
L(console_font):
.long 0 /* pointer to console font (struct font_desc) */
L(console_font_data):
.long 0 /* pointer to console font data */
#endif /* CONSOLE_DEBUG */
#if defined(MMU_PRINT)
L(mmu_print_data):
.long 0 /* valid flag */
.long 0 /* start logical */
.long 0 /* next logical */
.long 0 /* start physical */
.long 0 /* next physical */
#endif /* MMU_PRINT */
L(cputype):
.long 0
L(mmu_cached_pointer_tables):
.long 0
L(mmu_num_pointer_tables):
.long 0
L(phys_kernel_start):
.long 0
L(kernel_end):
.long 0
L(memory_start):
.long 0
L(kernel_pgdir_ptr):
.long 0
L(temp_mmap_mem):
.long 0
#if defined (CONFIG_MVME147)
M147_SCC_CTRL_A = 0xfffe3002
M147_SCC_DATA_A = 0xfffe3003
#endif
#if defined (CONFIG_MVME16x)
M162_SCC_CTRL_A = 0xfff45005
M167_CYCAR = 0xfff450ee
M167_CYIER = 0xfff45011
M167_CYLICR = 0xfff45026
M167_CYTEOIR = 0xfff45085
M167_CYTDR = 0xfff450f8
M167_PCSCCMICR = 0xfff4201d
M167_PCSCCTICR = 0xfff4201e
M167_PCSCCRICR = 0xfff4201f
M167_PCTPIACKR = 0xfff42025
#endif
#if defined (CONFIG_BVME6000)
BVME_SCC_CTRL_A = 0xffb0000b
BVME_SCC_DATA_A = 0xffb0000f
#endif
#if defined(CONFIG_MAC)
L(mac_videobase):
.long 0
L(mac_videodepth):
.long 0
L(mac_dimensions):
.long 0
L(mac_rowbytes):
.long 0
L(mac_sccbase):
.long 0
#endif /* CONFIG_MAC */
#if defined (CONFIG_APOLLO)
LSRB0 = 0x10412
LTHRB0 = 0x10416
LCPUCTRL = 0x10100
#endif
#if defined(CONFIG_HP300)
DCADATA = 0x11
DCALSR = 0x1b
APCIDATA = 0x00
APCILSR = 0x14
L(uartbase):
.long 0
L(uart_scode):
.long -1
#endif
__FINIT
.data
.align 4
availmem:
.long 0
m68k_pgtable_cachemode:
.long 0
m68k_supervisor_cachemode:
.long 0
#if defined(CONFIG_MVME16x)
mvme_bdid:
.long 0,0,0,0,0,0,0,0
#endif
#if defined(CONFIG_Q40)
q40_mem_cptr:
.long 0
L(q40_do_debug):
.long 0
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,085
|
arch/m68k/kernel/sun3-head.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/entry.h>
#include <asm/page.h>
#include <asm/contregs.h>
#include <asm/sun3-head.h>
PSL_HIGHIPL = 0x2700
NBSG = 0x20000
ICACHE_ONLY = 0x00000009
CACHES_OFF = 0x00000008 | actually a clear and disable --m
#define MAS_STACK INT_STACK
ROOT_TABLE_SIZE = 128
PAGESIZE = 8192
SUN3_INVALID_PMEG = 255
.globl bootup_user_stack
.globl bootup_kernel_stack
.globl pg0
.globl swapper_pg_dir
.globl kernel_pmd_table
.globl availmem
.global m68k_pgtable_cachemode
.global kpt
| todo: all these should be in bss!
swapper_pg_dir: .skip 0x2000
pg0: .skip 0x2000
kernel_pmd_table: .skip 0x2000
.globl kernel_pg_dir
.equ kernel_pg_dir,kernel_pmd_table
__HEAD
ENTRY(_stext)
ENTRY(_start)
/* Firstly, disable interrupts and set up function codes. */
movew #PSL_HIGHIPL, %sr
moveq #FC_CONTROL, %d0
movec %d0, %sfc
movec %d0, %dfc
/* Make sure we're in context zero. */
moveq #0, %d0
movsb %d0, AC_CONTEXT
/* map everything the bootloader left us into high memory, clean up the
excess later */
lea (AC_SEGMAP+0),%a0
lea (AC_SEGMAP+KERNBASE),%a1
1:
movsb %a0@, %d1
movsb %d1, %a1@
cmpib #SUN3_INVALID_PMEG, %d1
beq 2f
addl #NBSG,%a0
addl #NBSG,%a1
jmp 1b
2:
/* Disable caches and jump to high code. */
moveq #ICACHE_ONLY,%d0 | Cache disabled until we're ready to enable it
movc %d0, %cacr | is this the right value? (yes --m)
jmp 1f:l
/* Following code executes at high addresses (0xE000xxx). */
1: lea init_task,%curptr | get initial thread...
lea init_thread_union+THREAD_SIZE,%sp | ...and its stack.
/* Point MSP at an invalid page to trap if it's used. --m */
movl #(PAGESIZE),%d0
movc %d0,%msp
moveq #-1,%d0
movsb %d0,(AC_SEGMAP+0x0)
jbsr sun3_init
jbsr base_trap_init
jbsr start_kernel
trap #15
.data
.even
kpt:
.long 0
availmem:
.long 0
| todo: remove next two. --m
is_medusa:
.long 0
m68k_pgtable_cachemode:
.long 0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,447
|
arch/m68k/kernel/entry.S
|
/* -*- mode: asm -*-
*
* linux/arch/m68k/kernel/entry.S
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file README.legal in the main directory of this archive
* for more details.
*
* Linux/m68k support by Hamish Macdonald
*
* 68060 fixes by Jesper Skov
*
*/
/*
* entry.S contains the system-call and fault low-level handling routines.
* This also contains the timer-interrupt handler, as well as all interrupts
* and faults that can result in a task-switch.
*
* NOTE: This code handles signal-recognition, which happens every time
* after a timer-interrupt and after each system call.
*
*/
/*
* 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
* all pointers that used to be 'current' are now entry
* number 0 in the 'current_set' list.
*
* 6/05/00 RZ: addedd writeback completion after return from sighandler
* for 68040
*/
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/traps.h>
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
.globl system_call, buserr, trap, resume
.globl sys_call_table
.globl __sys_fork, __sys_clone, __sys_vfork
.globl bad_interrupt
.globl auto_irqhandler_fixup
.globl user_irqvec_fixup
.text
ENTRY(__sys_fork)
SAVE_SWITCH_STACK
jbsr sys_fork
lea %sp@(24),%sp
rts
ENTRY(__sys_clone)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_clone
lea %sp@(28),%sp
rts
ENTRY(__sys_vfork)
SAVE_SWITCH_STACK
jbsr sys_vfork
lea %sp@(24),%sp
rts
ENTRY(sys_sigreturn)
SAVE_SWITCH_STACK
movel %sp,%sp@- | switch_stack pointer
pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
jbsr do_sigreturn
addql #8,%sp
RESTORE_SWITCH_STACK
rts
ENTRY(sys_rt_sigreturn)
SAVE_SWITCH_STACK
movel %sp,%sp@- | switch_stack pointer
pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
jbsr do_rt_sigreturn
addql #8,%sp
RESTORE_SWITCH_STACK
rts
ENTRY(buserr)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
jbsr buserr_c
addql #4,%sp
jra ret_from_exception
ENTRY(trap)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
jbsr trap_c
addql #4,%sp
jra ret_from_exception
| After a fork we jump here directly from resume,
| so that %d1 contains the previous task
| schedule_tail now used regardless of CONFIG_SMP
ENTRY(ret_from_fork)
movel %d1,%sp@-
jsr schedule_tail
addql #4,%sp
jra ret_from_exception
ENTRY(ret_from_kernel_thread)
| a3 contains the kernel thread payload, d7 - its argument
movel %d1,%sp@-
jsr schedule_tail
movel %d7,(%sp)
jsr %a3@
addql #4,%sp
jra ret_from_exception
#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
#ifdef TRAP_DBG_INTERRUPT
.globl dbginterrupt
ENTRY(dbginterrupt)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- /* stack frame pointer argument */
jsr dbginterrupt_c
addql #4,%sp
jra ret_from_exception
#endif
ENTRY(reschedule)
/* save top of frame */
pea %sp@
jbsr set_esp0
addql #4,%sp
pea ret_from_exception
jmp schedule
ENTRY(ret_from_user_signal)
moveq #__NR_sigreturn,%d0
trap #0
ENTRY(ret_from_user_rt_signal)
movel #__NR_rt_sigreturn,%d0
trap #0
#else
do_trace_entry:
movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace
RESTORE_SWITCH_STACK
addql #4,%sp
movel %sp@(PT_OFF_ORIG_D0),%d0
cmpl #NR_syscalls,%d0
jcs syscall
badsys:
movel #-ENOSYS,%sp@(PT_OFF_D0)
jra ret_from_syscall
do_trace_exit:
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace
RESTORE_SWITCH_STACK
addql #4,%sp
jra .Lret_from_exception
ENTRY(ret_from_signal)
movel %curptr@(TASK_STACK),%a1
tstb %a1@(TINFO_FLAGS+2)
jge 1f
jbsr syscall_trace
1: RESTORE_SWITCH_STACK
addql #4,%sp
/* on 68040 complete pending writebacks if any */
#ifdef CONFIG_M68040
bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
subql #7,%d0 | bus error frame ?
jbne 1f
movel %sp,%sp@-
jbsr berr_040cleanup
addql #4,%sp
1:
#endif
jra .Lret_from_exception
ENTRY(system_call)
SAVE_ALL_SYS
GET_CURRENT(%d1)
movel %d1,%a1
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
| syscall trace?
tstb %a1@(TINFO_FLAGS+2)
jmi do_trace_entry
cmpl #NR_syscalls,%d0
jcc badsys
syscall:
jbsr @(sys_call_table,%d0:l:4)@(0)
movel %d0,%sp@(PT_OFF_D0) | save the return value
ret_from_syscall:
|oriw #0x0700,%sr
movel %curptr@(TASK_STACK),%a1
movew %a1@(TINFO_FLAGS+2),%d0
jne syscall_exit_work
1: RESTORE_ALL
syscall_exit_work:
btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
bnes 1b | if so, skip resched, signals
lslw #1,%d0
jcs do_trace_exit
jmi do_delayed_trace
lslw #8,%d0
jne do_signal_return
pea resume_userspace
jra schedule
ENTRY(ret_from_exception)
.Lret_from_exception:
btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
bnes 1f | if so, skip resched, signals
| only allow interrupts when we are really the last one on the
| kernel stack, otherwise stack overflow can occur during
| heavy interrupt load
andw #ALLOWINT,%sr
resume_userspace:
movel %curptr@(TASK_STACK),%a1
moveb %a1@(TINFO_FLAGS+3),%d0
jne exit_work
1: RESTORE_ALL
exit_work:
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
lslb #1,%d0
jne do_signal_return
pea resume_userspace
jra schedule
do_signal_return:
|andw #ALLOWINT,%sr
subql #4,%sp | dummy return address
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
bsrl do_notify_resume
addql #4,%sp
RESTORE_SWITCH_STACK
addql #4,%sp
jbra resume_userspace
do_delayed_trace:
bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
pea 1 | send SIGTRAP
movel %curptr,%sp@-
pea LSIGTRAP
jbsr send_sig
addql #8,%sp
addql #4,%sp
jbra resume_userspace
/* This is the main interrupt handler for autovector interrupts */
ENTRY(auto_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
| put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
subw #VEC_SPUR,%d0
movel %sp,%sp@-
movel %d0,%sp@- | put vector # on stack
auto_irqhandler_fixup = . + 2
jsr do_IRQ | process the IRQ
addql #8,%sp | pop parameters off stack
jra ret_from_exception
/* Handler for user defined interrupt vectors */
ENTRY(user_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
| put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
user_irqvec_fixup = . + 2
subw #VEC_USER,%d0
movel %sp,%sp@-
movel %d0,%sp@- | put vector # on stack
jsr do_IRQ | process the IRQ
addql #8,%sp | pop parameters off stack
jra ret_from_exception
/* Handler for uninitialized and spurious interrupts */
ENTRY(bad_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@-
jsr handle_badint
addql #4,%sp
jra ret_from_exception
resume:
/*
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1,so don't change these
* registers until their contents are no longer needed.
*/
/* save sr */
movew %sr,%a0@(TASK_THREAD+THREAD_SR)
/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
movec %sfc,%d0
movew %d0,%a0@(TASK_THREAD+THREAD_FS)
/* save usp */
/* it is better to use a movel here instead of a movew 8*) */
movec %usp,%d0
movel %d0,%a0@(TASK_THREAD+THREAD_USP)
/* save non-scratch registers on stack */
SAVE_SWITCH_STACK
/* save current kernel stack pointer */
movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
/* save floating point context */
#ifndef CONFIG_M68KFPU_EMU_ONLY
#ifdef CONFIG_M68KFPU_EMU
tstl m68k_fputype
jeq 3f
#endif
fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
#if defined(CONFIG_M68060)
#if !defined(CPU_M68060_ONLY)
btst #3,m68k_cputype+3
beqs 1f
#endif
/* The 060 FPU keeps status in bits 15-8 of the first longword */
tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
jeq 3f
#if !defined(CPU_M68060_ONLY)
jra 2f
#endif
#endif /* CONFIG_M68060 */
#if !defined(CPU_M68060_ONLY)
1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
jeq 3f
#endif
2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3:
#endif /* CONFIG_M68KFPU_EMU_ONLY */
/* Return previous task in %d1 */
movel %curptr,%d1
/* switch to new task (a1 contains new task) */
movel %a1,%curptr
/* restore floating point context */
#ifndef CONFIG_M68KFPU_EMU_ONLY
#ifdef CONFIG_M68KFPU_EMU
tstl m68k_fputype
jeq 4f
#endif
#if defined(CONFIG_M68060)
#if !defined(CPU_M68060_ONLY)
btst #3,m68k_cputype+3
beqs 1f
#endif
/* The 060 FPU keeps status in bits 15-8 of the first longword */
tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
jeq 3f
#if !defined(CPU_M68060_ONLY)
jra 2f
#endif
#endif /* CONFIG_M68060 */
#if !defined(CPU_M68060_ONLY)
1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
jeq 3f
#endif
2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4:
#endif /* CONFIG_M68KFPU_EMU_ONLY */
/* restore the kernel stack pointer */
movel %a1@(TASK_THREAD+THREAD_KSP),%sp
/* restore non-scratch registers */
RESTORE_SWITCH_STACK
/* restore user stack pointer */
movel %a1@(TASK_THREAD+THREAD_USP),%a0
movel %a0,%usp
/* restore fs (sfc,%dfc) */
movew %a1@(TASK_THREAD+THREAD_FS),%a0
movec %a0,%sfc
movec %a0,%dfc
/* restore status register */
movew %a1@(TASK_THREAD+THREAD_SR),%sr
rts
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,999
|
arch/m68k/kernel/syscalltable.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2002, Greg Ungerer (gerg@snapgear.com)
*
* Based on older entry.S files, the following copyrights apply:
*
* Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
* Kenneth Albanowski <kjahds@kjahds.com>,
* Copyright (C) 2000 Lineo Inc. (www.lineo.com)
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Linux/m68k support by Hamish Macdonald
*/
#include <linux/linkage.h>
#ifndef CONFIG_MMU
#define sys_mmap2 sys_mmap_pgoff
#endif
.section .rodata
ALIGN
ENTRY(sys_call_table)
.long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
.long sys_exit
.long __sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_chown16
.long sys_ni_syscall /* old break syscall holder */
.long sys_stat
.long sys_lseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_oldumount
.long sys_setuid16
.long sys_getuid16
.long sys_stime /* 25 */
.long sys_ptrace
.long sys_alarm
.long sys_fstat
.long sys_pause
.long sys_utime /* 30 */
.long sys_ni_syscall /* old stty syscall holder */
.long sys_ni_syscall /* old gtty syscall holder */
.long sys_access
.long sys_nice
.long sys_ni_syscall /* 35 - old ftime syscall holder */
.long sys_sync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
.long sys_setgid16
.long sys_getgid16
.long sys_signal
.long sys_geteuid16
.long sys_getegid16 /* 50 */
.long sys_acct
.long sys_umount /* recycled never used phys() */
.long sys_ni_syscall /* old lock syscall holder */
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_ni_syscall /* old mpx syscall holder */
.long sys_setpgid
.long sys_ni_syscall /* old ulimit syscall holder */
.long sys_ni_syscall
.long sys_umask /* 60 */
.long sys_chroot
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_sigaction
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid16 /* 70 */
.long sys_setregid16
.long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_old_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups16 /* 80 */
.long sys_setgroups16
.long sys_old_select
.long sys_symlink
.long sys_lstat
.long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long sys_old_readdir
.long sys_old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown16 /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_ni_syscall /* old profil syscall holder */
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_ni_syscall /* ioperm for i386 */
.long sys_socketcall
.long sys_syslog
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_ni_syscall
.long sys_ni_syscall /* 110 - iopl for i386 */
.long sys_vhangup
.long sys_ni_syscall /* obsolete idle() syscall */
.long sys_ni_syscall /* vm86old for i386 */
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc
.long sys_fsync
.long sys_sigreturn
.long __sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_cacheflush /* modify_ldt for i386 */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
.long sys_ni_syscall /* old "create_module" */
.long sys_init_module
.long sys_delete_module
.long sys_ni_syscall /* 130 - old "get_kernel_syms" */
.long sys_quotactl
.long sys_getpgid
.long sys_fchdir
.long sys_bdflush
.long sys_sysfs /* 135 */
.long sys_personality
.long sys_ni_syscall /* for afs_syscall */
.long sys_setfsuid16
.long sys_setfsgid16
.long sys_llseek /* 140 */
.long sys_getdents
.long sys_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl
.long sys_mlock /* 150 */
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
.long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max
.long sys_sched_get_priority_min /* 160 */
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
.long sys_setresuid16
.long sys_getresuid16 /* 165 */
.long sys_getpagesize
.long sys_ni_syscall /* old "query_module" */
.long sys_poll
.long sys_ni_syscall /* old nfsservctl */
.long sys_setresgid16 /* 170 */
.long sys_getresgid16
.long sys_prctl
.long sys_rt_sigreturn
.long sys_rt_sigaction
.long sys_rt_sigprocmask /* 175 */
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long sys_pread64 /* 180 */
.long sys_pwrite64
.long sys_lchown16
.long sys_getcwd
.long sys_capget
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
.long sys_ni_syscall /* streams1 */
.long sys_ni_syscall /* streams2 */
.long __sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
.long sys_lstat64
.long sys_fstat64
.long sys_chown
.long sys_getuid
.long sys_getgid /* 200 */
.long sys_geteuid
.long sys_getegid
.long sys_setreuid
.long sys_setregid
.long sys_getgroups /* 205 */
.long sys_setgroups
.long sys_fchown
.long sys_setresuid
.long sys_getresuid
.long sys_setresgid /* 210 */
.long sys_getresgid
.long sys_lchown
.long sys_setuid
.long sys_setgid
.long sys_setfsuid /* 215 */
.long sys_setfsgid
.long sys_pivot_root
.long sys_ni_syscall
.long sys_ni_syscall
.long sys_getdents64 /* 220 */
.long sys_gettid
.long sys_tkill
.long sys_setxattr
.long sys_lsetxattr
.long sys_fsetxattr /* 225 */
.long sys_getxattr
.long sys_lgetxattr
.long sys_fgetxattr
.long sys_listxattr
.long sys_llistxattr /* 230 */
.long sys_flistxattr
.long sys_removexattr
.long sys_lremovexattr
.long sys_fremovexattr
.long sys_futex /* 235 */
.long sys_sendfile64
.long sys_mincore
.long sys_madvise
.long sys_fcntl64
.long sys_readahead /* 240 */
.long sys_io_setup
.long sys_io_destroy
.long sys_io_getevents
.long sys_io_submit
.long sys_io_cancel /* 245 */
.long sys_fadvise64
.long sys_exit_group
.long sys_lookup_dcookie
.long sys_epoll_create
.long sys_epoll_ctl /* 250 */
.long sys_epoll_wait
.long sys_remap_file_pages
.long sys_set_tid_address
.long sys_timer_create
.long sys_timer_settime /* 255 */
.long sys_timer_gettime
.long sys_timer_getoverrun
.long sys_timer_delete
.long sys_clock_settime
.long sys_clock_gettime /* 260 */
.long sys_clock_getres
.long sys_clock_nanosleep
.long sys_statfs64
.long sys_fstatfs64
.long sys_tgkill /* 265 */
.long sys_utimes
.long sys_fadvise64_64
.long sys_mbind
.long sys_get_mempolicy
.long sys_set_mempolicy /* 270 */
.long sys_mq_open
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive
.long sys_mq_notify /* 275 */
.long sys_mq_getsetattr
.long sys_waitid
.long sys_ni_syscall /* for sys_vserver */
.long sys_add_key
.long sys_request_key /* 280 */
.long sys_keyctl
.long sys_ioprio_set
.long sys_ioprio_get
.long sys_inotify_init
.long sys_inotify_add_watch /* 285 */
.long sys_inotify_rm_watch
.long sys_migrate_pages
.long sys_openat
.long sys_mkdirat
.long sys_mknodat /* 290 */
.long sys_fchownat
.long sys_futimesat
.long sys_fstatat64
.long sys_unlinkat
.long sys_renameat /* 295 */
.long sys_linkat
.long sys_symlinkat
.long sys_readlinkat
.long sys_fchmodat
.long sys_faccessat /* 300 */
.long sys_pselect6
.long sys_ppoll
.long sys_unshare
.long sys_set_robust_list
.long sys_get_robust_list /* 305 */
.long sys_splice
.long sys_sync_file_range
.long sys_tee
.long sys_vmsplice
.long sys_move_pages /* 310 */
.long sys_sched_setaffinity
.long sys_sched_getaffinity
.long sys_kexec_load
.long sys_getcpu
.long sys_epoll_pwait /* 315 */
.long sys_utimensat
.long sys_signalfd
.long sys_timerfd_create
.long sys_eventfd
.long sys_fallocate /* 320 */
.long sys_timerfd_settime
.long sys_timerfd_gettime
.long sys_signalfd4
.long sys_eventfd2
.long sys_epoll_create1 /* 325 */
.long sys_dup3
.long sys_pipe2
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev /* 330 */
.long sys_rt_tgsigqueueinfo
.long sys_perf_event_open
.long sys_get_thread_area
.long sys_set_thread_area
.long sys_atomic_cmpxchg_32 /* 335 */
.long sys_atomic_barrier
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
.long sys_name_to_handle_at /* 340 */
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs
.long sys_setns
.long sys_process_vm_readv /* 345 */
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module
.long sys_sched_setattr
.long sys_sched_getattr /* 350 */
.long sys_renameat2
.long sys_getrandom
.long sys_memfd_create
.long sys_bpf
.long sys_execveat /* 355 */
.long sys_socket
.long sys_socketpair
.long sys_bind
.long sys_connect
.long sys_listen /* 360 */
.long sys_accept4
.long sys_getsockopt
.long sys_setsockopt
.long sys_getsockname
.long sys_getpeername /* 365 */
.long sys_sendto
.long sys_sendmsg
.long sys_recvfrom
.long sys_recvmsg
.long sys_shutdown /* 370 */
.long sys_recvmmsg
.long sys_sendmmsg
.long sys_userfaultfd
.long sys_membarrier
.long sys_mlock2 /* 375 */
.long sys_copy_file_range
.long sys_preadv2
.long sys_pwritev2
.long sys_statx
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,877
|
arch/m68k/kernel/relocate_kernel.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/setup.h>
#define MMU_BASE 8 /* MMU flags base in cpu_mmu_flags */
.text
ENTRY(relocate_new_kernel)
movel %sp@(4),%a0 /* a0 = ptr */
movel %sp@(8),%a1 /* a1 = start */
movel %sp@(12),%d1 /* d1 = cpu_mmu_flags */
movew #PAGE_MASK,%d2 /* d2 = PAGE_MASK */
/* Disable MMU */
btst #MMU_BASE + MMUB_68851,%d1
jeq 3f
1: /* 68851 or 68030 */
lea %pc@(.Lcopy),%a4
2: addl #0x00000000,%a4 /* virt_to_phys() */
.section ".m68k_fixup","aw"
.long M68K_FIXUP_MEMOFFSET, 2b+2
.previous
.chip 68030
pmove %tc,%d0 /* Disable MMU */
bclr #7,%d0
pmove %d0,%tc
jmp %a4@ /* Jump to physical .Lcopy */
.chip 68k
3:
btst #MMU_BASE + MMUB_68030,%d1
jne 1b
btst #MMU_BASE + MMUB_68040,%d1
jeq 6f
4: /* 68040 or 68060 */
lea %pc@(.Lcont040),%a4
5: addl #0x00000000,%a4 /* virt_to_phys() */
.section ".m68k_fixup","aw"
.long M68K_FIXUP_MEMOFFSET, 5b+2
.previous
movel %a4,%d0
andl #0xff000000,%d0
orw #0xe020,%d0 /* Map 16 MiB, enable, cacheable */
.chip 68040
movec %d0,%itt0
movec %d0,%dtt0
.chip 68k
jmp %a4@ /* Jump to physical .Lcont040 */
.Lcont040:
moveq #0,%d0
.chip 68040
movec %d0,%tc /* Disable MMU */
movec %d0,%itt0
movec %d0,%itt1
movec %d0,%dtt0
movec %d0,%dtt1
.chip 68k
jra .Lcopy
6:
btst #MMU_BASE + MMUB_68060,%d1
jne 4b
.Lcopy:
movel %a0@+,%d0 /* d0 = entry = *ptr */
jeq .Lflush
btst #2,%d0 /* entry & IND_DONE? */
jne .Lflush
btst #1,%d0 /* entry & IND_INDIRECTION? */
jeq 1f
andw %d2,%d0
movel %d0,%a0 /* ptr = entry & PAGE_MASK */
jra .Lcopy
1:
btst #0,%d0 /* entry & IND_DESTINATION? */
jeq 2f
andw %d2,%d0
movel %d0,%a2 /* a2 = dst = entry & PAGE_MASK */
jra .Lcopy
2:
btst #3,%d0 /* entry & IND_SOURCE? */
jeq .Lcopy
andw %d2,%d0
movel %d0,%a3 /* a3 = src = entry & PAGE_MASK */
movew #PAGE_SIZE/32 - 1,%d0 /* d0 = PAGE_SIZE/32 - 1 */
3:
movel %a3@+,%a2@+ /* *dst++ = *src++ */
movel %a3@+,%a2@+ /* *dst++ = *src++ */
movel %a3@+,%a2@+ /* *dst++ = *src++ */
movel %a3@+,%a2@+ /* *dst++ = *src++ */
movel %a3@+,%a2@+ /* *dst++ = *src++ */
movel %a3@+,%a2@+ /* *dst++ = *src++ */
movel %a3@+,%a2@+ /* *dst++ = *src++ */
movel %a3@+,%a2@+ /* *dst++ = *src++ */
dbf %d0, 3b
jra .Lcopy
.Lflush:
/* Flush all caches */
btst #CPUB_68020,%d1
jeq 2f
1: /* 68020 or 68030 */
.chip 68030
movec %cacr,%d0
orw #0x808,%d0
movec %d0,%cacr
.chip 68k
jra .Lreincarnate
2:
btst #CPUB_68030,%d1
jne 1b
btst #CPUB_68040,%d1
jeq 4f
3: /* 68040 or 68060 */
.chip 68040
nop
cpusha %bc
nop
cinva %bc
nop
.chip 68k
jra .Lreincarnate
4:
btst #CPUB_68060,%d1
jne 3b
.Lreincarnate:
jmp %a1@
relocate_new_kernel_end:
ENTRY(relocate_new_kernel_size)
.long relocate_new_kernel_end - relocate_new_kernel
|
AirFortressIlikara/LS2K0300-linux-4.19
| 28,388
|
arch/m68k/fpsp040/setox.S
|
|
| setox.sa 3.1 12/10/90
|
| The entry point setox computes the exponential of a value.
| setoxd does the same except the input value is a denormalized
| number. setoxm1 computes exp(X)-1, and setoxm1d computes
| exp(X)-1 for denormalized X.
|
| INPUT
| -----
| Double-extended value in memory location pointed to by address
| register a0.
|
| OUTPUT
| ------
| exp(X) or exp(X)-1 returned in floating-point register fp0.
|
| ACCURACY and MONOTONICITY
| -------------------------
| The returned result is within 0.85 ulps in 64 significant bit, i.e.
| within 0.5001 ulp to 53 bits if the result is subsequently rounded
| to double precision. The result is provably monotonic in double
| precision.
|
| SPEED
| -----
| Two timings are measured, both in the copy-back mode. The
| first one is measured when the function is invoked the first time
| (so the instructions and data are not in cache), and the
| second one is measured when the function is reinvoked at the same
| input argument.
|
| The program setox takes approximately 210/190 cycles for input
| argument X whose magnitude is less than 16380 log2, which
| is the usual situation. For the less common arguments,
| depending on their values, the program may run faster or slower --
| but no worse than 10% slower even in the extreme cases.
|
| The program setoxm1 takes approximately ??? / ??? cycles for input
| argument X, 0.25 <= |X| < 70log2. For |X| < 0.25, it takes
| approximately ??? / ??? cycles. For the less common arguments,
| depending on their values, the program may run faster or slower --
| but no worse than 10% slower even in the extreme cases.
|
| ALGORITHM and IMPLEMENTATION NOTES
| ----------------------------------
|
| setoxd
| ------
| Step 1. Set ans := 1.0
|
| Step 2. Return ans := ans + sign(X)*2^(-126). Exit.
| Notes: This will always generate one exception -- inexact.
|
|
| setox
| -----
|
| Step 1. Filter out extreme cases of input argument.
| 1.1 If |X| >= 2^(-65), go to Step 1.3.
| 1.2 Go to Step 7.
| 1.3 If |X| < 16380 log(2), go to Step 2.
| 1.4 Go to Step 8.
| Notes: The usual case should take the branches 1.1 -> 1.3 -> 2.
| To avoid the use of floating-point comparisons, a
| compact representation of |X| is used. This format is a
| 32-bit integer, the upper (more significant) 16 bits are
| the sign and biased exponent field of |X|; the lower 16
| bits are the 16 most significant fraction (including the
| explicit bit) bits of |X|. Consequently, the comparisons
| in Steps 1.1 and 1.3 can be performed by integer comparison.
| Note also that the constant 16380 log(2) used in Step 1.3
| is also in the compact form. Thus taking the branch
| to Step 2 guarantees |X| < 16380 log(2). There is no harm
| to have a small number of cases where |X| is less than,
| but close to, 16380 log(2) and the branch to Step 9 is
| taken.
|
| Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ).
| 2.1 Set AdjFlag := 0 (indicates the branch 1.3 -> 2 was taken)
| 2.2 N := round-to-nearest-integer( X * 64/log2 ).
| 2.3 Calculate J = N mod 64; so J = 0,1,2,..., or 63.
| 2.4 Calculate M = (N - J)/64; so N = 64M + J.
| 2.5 Calculate the address of the stored value of 2^(J/64).
| 2.6 Create the value Scale = 2^M.
| Notes: The calculation in 2.2 is really performed by
|
| Z := X * constant
| N := round-to-nearest-integer(Z)
|
| where
|
| constant := single-precision( 64/log 2 ).
|
| Using a single-precision constant avoids memory access.
| Another effect of using a single-precision "constant" is
| that the calculated value Z is
|
| Z = X*(64/log2)*(1+eps), |eps| <= 2^(-24).
|
| This error has to be considered later in Steps 3 and 4.
|
| Step 3. Calculate X - N*log2/64.
| 3.1 R := X + N*L1, where L1 := single-precision(-log2/64).
| 3.2 R := R + N*L2, L2 := extended-precision(-log2/64 - L1).
| Notes: a) The way L1 and L2 are chosen ensures L1+L2 approximate
| the value -log2/64 to 88 bits of accuracy.
| b) N*L1 is exact because N is no longer than 22 bits and
| L1 is no longer than 24 bits.
| c) The calculation X+N*L1 is also exact due to cancellation.
| Thus, R is practically X+N(L1+L2) to full 64 bits.
| d) It is important to estimate how large can |R| be after
| Step 3.2.
|
| N = rnd-to-int( X*64/log2 (1+eps) ), |eps|<=2^(-24)
| X*64/log2 (1+eps) = N + f, |f| <= 0.5
| X*64/log2 - N = f - eps*X 64/log2
| X - N*log2/64 = f*log2/64 - eps*X
|
|
| Now |X| <= 16446 log2, thus
|
| |X - N*log2/64| <= (0.5 + 16446/2^(18))*log2/64
| <= 0.57 log2/64.
| This bound will be used in Step 4.
|
| Step 4. Approximate exp(R)-1 by a polynomial
| p = R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
| Notes: a) In order to reduce memory access, the coefficients are
| made as "short" as possible: A1 (which is 1/2), A4 and A5
| are single precision; A2 and A3 are double precision.
| b) Even with the restrictions above,
| |p - (exp(R)-1)| < 2^(-68.8) for all |R| <= 0.0062.
| Note that 0.0062 is slightly bigger than 0.57 log2/64.
| c) To fully utilize the pipeline, p is separated into
| two independent pieces of roughly equal complexities
| p = [ R + R*S*(A2 + S*A4) ] +
| [ S*(A1 + S*(A3 + S*A5)) ]
| where S = R*R.
|
| Step 5. Compute 2^(J/64)*exp(R) = 2^(J/64)*(1+p) by
| ans := T + ( T*p + t)
| where T and t are the stored values for 2^(J/64).
| Notes: 2^(J/64) is stored as T and t where T+t approximates
| 2^(J/64) to roughly 85 bits; T is in extended precision
| and t is in single precision. Note also that T is rounded
| to 62 bits so that the last two bits of T are zero. The
| reason for such a special form is that T-1, T-2, and T-8
| will all be exact --- a property that will give much
| more accurate computation of the function EXPM1.
|
| Step 6. Reconstruction of exp(X)
| exp(X) = 2^M * 2^(J/64) * exp(R).
| 6.1 If AdjFlag = 0, go to 6.3
| 6.2 ans := ans * AdjScale
| 6.3 Restore the user FPCR
| 6.4 Return ans := ans * Scale. Exit.
| Notes: If AdjFlag = 0, we have X = Mlog2 + Jlog2/64 + R,
| |M| <= 16380, and Scale = 2^M. Moreover, exp(X) will
| neither overflow nor underflow. If AdjFlag = 1, that
| means that
| X = (M1+M)log2 + Jlog2/64 + R, |M1+M| >= 16380.
| Hence, exp(X) may overflow or underflow or neither.
| When that is the case, AdjScale = 2^(M1) where M1 is
| approximately M. Thus 6.2 will never cause over/underflow.
| Possible exception in 6.4 is overflow or underflow.
| The inexact exception is not generated in 6.4. Although
| one can argue that the inexact flag should always be
| raised, to simulate that exception cost to much than the
| flag is worth in practical uses.
|
| Step 7. Return 1 + X.
| 7.1 ans := X
| 7.2 Restore user FPCR.
| 7.3 Return ans := 1 + ans. Exit
| Notes: For non-zero X, the inexact exception will always be
| raised by 7.3. That is the only exception raised by 7.3.
| Note also that we use the FMOVEM instruction to move X
| in Step 7.1 to avoid unnecessary trapping. (Although
| the FMOVEM may not seem relevant since X is normalized,
| the precaution will be useful in the library version of
| this code where the separate entry for denormalized inputs
| will be done away with.)
|
| Step 8. Handle exp(X) where |X| >= 16380log2.
| 8.1 If |X| > 16480 log2, go to Step 9.
| (mimic 2.2 - 2.6)
| 8.2 N := round-to-integer( X * 64/log2 )
| 8.3 Calculate J = N mod 64, J = 0,1,...,63
| 8.4 K := (N-J)/64, M1 := truncate(K/2), M = K-M1, AdjFlag := 1.
| 8.5 Calculate the address of the stored value 2^(J/64).
| 8.6 Create the values Scale = 2^M, AdjScale = 2^M1.
| 8.7 Go to Step 3.
| Notes: Refer to notes for 2.2 - 2.6.
|
| Step 9. Handle exp(X), |X| > 16480 log2.
| 9.1 If X < 0, go to 9.3
| 9.2 ans := Huge, go to 9.4
| 9.3 ans := Tiny.
| 9.4 Restore user FPCR.
| 9.5 Return ans := ans * ans. Exit.
| Notes: Exp(X) will surely overflow or underflow, depending on
| X's sign. "Huge" and "Tiny" are respectively large/tiny
| extended-precision numbers whose square over/underflow
| with an inexact result. Thus, 9.5 always raises the
| inexact together with either overflow or underflow.
|
|
| setoxm1d
| --------
|
| Step 1. Set ans := 0
|
| Step 2. Return ans := X + ans. Exit.
| Notes: This will return X with the appropriate rounding
| precision prescribed by the user FPCR.
|
| setoxm1
| -------
|
| Step 1. Check |X|
| 1.1 If |X| >= 1/4, go to Step 1.3.
| 1.2 Go to Step 7.
| 1.3 If |X| < 70 log(2), go to Step 2.
| 1.4 Go to Step 10.
| Notes: The usual case should take the branches 1.1 -> 1.3 -> 2.
| However, it is conceivable |X| can be small very often
| because EXPM1 is intended to evaluate exp(X)-1 accurately
| when |X| is small. For further details on the comparisons,
| see the notes on Step 1 of setox.
|
| Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ).
| 2.1 N := round-to-nearest-integer( X * 64/log2 ).
| 2.2 Calculate J = N mod 64; so J = 0,1,2,..., or 63.
| 2.3 Calculate M = (N - J)/64; so N = 64M + J.
| 2.4 Calculate the address of the stored value of 2^(J/64).
| 2.5 Create the values Sc = 2^M and OnebySc := -2^(-M).
| Notes: See the notes on Step 2 of setox.
|
| Step 3. Calculate X - N*log2/64.
| 3.1 R := X + N*L1, where L1 := single-precision(-log2/64).
| 3.2 R := R + N*L2, L2 := extended-precision(-log2/64 - L1).
| Notes: Applying the analysis of Step 3 of setox in this case
| shows that |R| <= 0.0055 (note that |X| <= 70 log2 in
| this case).
|
| Step 4. Approximate exp(R)-1 by a polynomial
| p = R+R*R*(A1+R*(A2+R*(A3+R*(A4+R*(A5+R*A6)))))
| Notes: a) In order to reduce memory access, the coefficients are
| made as "short" as possible: A1 (which is 1/2), A5 and A6
| are single precision; A2, A3 and A4 are double precision.
| b) Even with the restriction above,
| |p - (exp(R)-1)| < |R| * 2^(-72.7)
| for all |R| <= 0.0055.
| c) To fully utilize the pipeline, p is separated into
| two independent pieces of roughly equal complexity
| p = [ R*S*(A2 + S*(A4 + S*A6)) ] +
| [ R + S*(A1 + S*(A3 + S*A5)) ]
| where S = R*R.
|
| Step 5. Compute 2^(J/64)*p by
| p := T*p
| where T and t are the stored values for 2^(J/64).
| Notes: 2^(J/64) is stored as T and t where T+t approximates
| 2^(J/64) to roughly 85 bits; T is in extended precision
| and t is in single precision. Note also that T is rounded
| to 62 bits so that the last two bits of T are zero. The
| reason for such a special form is that T-1, T-2, and T-8
| will all be exact --- a property that will be exploited
| in Step 6 below. The total relative error in p is no
| bigger than 2^(-67.7) compared to the final result.
|
| Step 6. Reconstruction of exp(X)-1
| exp(X)-1 = 2^M * ( 2^(J/64) + p - 2^(-M) ).
| 6.1 If M <= 63, go to Step 6.3.
| 6.2 ans := T + (p + (t + OnebySc)). Go to 6.6
| 6.3 If M >= -3, go to 6.5.
| 6.4 ans := (T + (p + t)) + OnebySc. Go to 6.6
| 6.5 ans := (T + OnebySc) + (p + t).
| 6.6 Restore user FPCR.
| 6.7 Return ans := Sc * ans. Exit.
| Notes: The various arrangements of the expressions give accurate
| evaluations.
|
| Step 7. exp(X)-1 for |X| < 1/4.
| 7.1 If |X| >= 2^(-65), go to Step 9.
| 7.2 Go to Step 8.
|
| Step 8. Calculate exp(X)-1, |X| < 2^(-65).
| 8.1 If |X| < 2^(-16312), goto 8.3
| 8.2 Restore FPCR; return ans := X - 2^(-16382). Exit.
| 8.3 X := X * 2^(140).
| 8.4 Restore FPCR; ans := ans - 2^(-16382).
| Return ans := ans*2^(140). Exit
| Notes: The idea is to return "X - tiny" under the user
| precision and rounding modes. To avoid unnecessary
| inefficiency, we stay away from denormalized numbers the
| best we can. For |X| >= 2^(-16312), the straightforward
| 8.2 generates the inexact exception as the case warrants.
|
| Step 9. Calculate exp(X)-1, |X| < 1/4, by a polynomial
| p = X + X*X*(B1 + X*(B2 + ... + X*B12))
| Notes: a) In order to reduce memory access, the coefficients are
| made as "short" as possible: B1 (which is 1/2), B9 to B12
| are single precision; B3 to B8 are double precision; and
| B2 is double extended.
| b) Even with the restriction above,
| |p - (exp(X)-1)| < |X| 2^(-70.6)
| for all |X| <= 0.251.
| Note that 0.251 is slightly bigger than 1/4.
| c) To fully preserve accuracy, the polynomial is computed
| as X + ( S*B1 + Q ) where S = X*X and
| Q = X*S*(B2 + X*(B3 + ... + X*B12))
| d) To fully utilize the pipeline, Q is separated into
| two independent pieces of roughly equal complexity
| Q = [ X*S*(B2 + S*(B4 + ... + S*B12)) ] +
| [ S*S*(B3 + S*(B5 + ... + S*B11)) ]
|
| Step 10. Calculate exp(X)-1 for |X| >= 70 log 2.
| 10.1 If X >= 70log2 , exp(X) - 1 = exp(X) for all practical
| purposes. Therefore, go to Step 1 of setox.
| 10.2 If X <= -70log2, exp(X) - 1 = -1 for all practical purposes.
| ans := -1
| Restore user FPCR
| Return ans := ans + 2^(-126). Exit.
| Notes: 10.2 will always create an inexact and return -1 + tiny
| in the user rounding precision and mode.
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|setox idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
L2: .long 0x3FDC0000,0x82E30865,0x4361C4C6,0x00000000
EXPA3: .long 0x3FA55555,0x55554431
EXPA2: .long 0x3FC55555,0x55554018
HUGE: .long 0x7FFE0000,0xFFFFFFFF,0xFFFFFFFF,0x00000000
TINY: .long 0x00010000,0xFFFFFFFF,0xFFFFFFFF,0x00000000
EM1A4: .long 0x3F811111,0x11174385
EM1A3: .long 0x3FA55555,0x55554F5A
EM1A2: .long 0x3FC55555,0x55555555,0x00000000,0x00000000
EM1B8: .long 0x3EC71DE3,0xA5774682
EM1B7: .long 0x3EFA01A0,0x19D7CB68
EM1B6: .long 0x3F2A01A0,0x1A019DF3
EM1B5: .long 0x3F56C16C,0x16C170E2
EM1B4: .long 0x3F811111,0x11111111
EM1B3: .long 0x3FA55555,0x55555555
EM1B2: .long 0x3FFC0000,0xAAAAAAAA,0xAAAAAAAB
.long 0x00000000
TWO140: .long 0x48B00000,0x00000000
TWON140: .long 0x37300000,0x00000000
EXPTBL:
.long 0x3FFF0000,0x80000000,0x00000000,0x00000000
.long 0x3FFF0000,0x8164D1F3,0xBC030774,0x9F841A9B
.long 0x3FFF0000,0x82CD8698,0xAC2BA1D8,0x9FC1D5B9
.long 0x3FFF0000,0x843A28C3,0xACDE4048,0xA0728369
.long 0x3FFF0000,0x85AAC367,0xCC487B14,0x1FC5C95C
.long 0x3FFF0000,0x871F6196,0x9E8D1010,0x1EE85C9F
.long 0x3FFF0000,0x88980E80,0x92DA8528,0x9FA20729
.long 0x3FFF0000,0x8A14D575,0x496EFD9C,0xA07BF9AF
.long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E8,0xA0020DCF
.long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E4,0x205A63DA
.long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x1EB70051
.long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x1F6EB029
.long 0x3FFF0000,0x91C3D373,0xAB11C338,0xA0781494
.long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0x9EB319B0
.long 0x3FFF0000,0x94F4EFA8,0xFEF70960,0x2017457D
.long 0x3FFF0000,0x96942D37,0x20185A00,0x1F11D537
.long 0x3FFF0000,0x9837F051,0x8DB8A970,0x9FB952DD
.long 0x3FFF0000,0x99E04593,0x20B7FA64,0x1FE43087
.long 0x3FFF0000,0x9B8D39B9,0xD54E5538,0x1FA2A818
.long 0x3FFF0000,0x9D3ED9A7,0x2CFFB750,0x1FDE494D
.long 0x3FFF0000,0x9EF53260,0x91A111AC,0x20504890
.long 0x3FFF0000,0xA0B0510F,0xB9714FC4,0xA073691C
.long 0x3FFF0000,0xA2704303,0x0C496818,0x1F9B7A05
.long 0x3FFF0000,0xA43515AE,0x09E680A0,0xA0797126
.long 0x3FFF0000,0xA5FED6A9,0xB15138EC,0xA071A140
.long 0x3FFF0000,0xA7CD93B4,0xE9653568,0x204F62DA
.long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x1F283C4A
.long 0x3FFF0000,0xAB7A39B5,0xA93ED338,0x9F9A7FDC
.long 0x3FFF0000,0xAD583EEA,0x42A14AC8,0xA05B3FAC
.long 0x3FFF0000,0xAF3B78AD,0x690A4374,0x1FDF2610
.long 0x3FFF0000,0xB123F581,0xD2AC2590,0x9F705F90
.long 0x3FFF0000,0xB311C412,0xA9112488,0x201F678A
.long 0x3FFF0000,0xB504F333,0xF9DE6484,0x1F32FB13
.long 0x3FFF0000,0xB6FD91E3,0x28D17790,0x20038B30
.long 0x3FFF0000,0xB8FBAF47,0x62FB9EE8,0x200DC3CC
.long 0x3FFF0000,0xBAFF5AB2,0x133E45FC,0x9F8B2AE6
.long 0x3FFF0000,0xBD08A39F,0x580C36C0,0xA02BBF70
.long 0x3FFF0000,0xBF1799B6,0x7A731084,0xA00BF518
.long 0x3FFF0000,0xC12C4CCA,0x66709458,0xA041DD41
.long 0x3FFF0000,0xC346CCDA,0x24976408,0x9FDF137B
.long 0x3FFF0000,0xC5672A11,0x5506DADC,0x201F1568
.long 0x3FFF0000,0xC78D74C8,0xABB9B15C,0x1FC13A2E
.long 0x3FFF0000,0xC9B9BD86,0x6E2F27A4,0xA03F8F03
.long 0x3FFF0000,0xCBEC14FE,0xF2727C5C,0x1FF4907D
.long 0x3FFF0000,0xCE248C15,0x1F8480E4,0x9E6E53E4
.long 0x3FFF0000,0xD06333DA,0xEF2B2594,0x1FD6D45C
.long 0x3FFF0000,0xD2A81D91,0xF12AE45C,0xA076EDB9
.long 0x3FFF0000,0xD4F35AAB,0xCFEDFA20,0x9FA6DE21
.long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x1EE69A2F
.long 0x3FFF0000,0xD99D15C2,0x78AFD7B4,0x207F439F
.long 0x3FFF0000,0xDBFBB797,0xDAF23754,0x201EC207
.long 0x3FFF0000,0xDE60F482,0x5E0E9124,0x9E8BE175
.long 0x3FFF0000,0xE0CCDEEC,0x2A94E110,0x20032C4B
.long 0x3FFF0000,0xE33F8972,0xBE8A5A50,0x2004DFF5
.long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x1E72F47A
.long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x1F722F22
.long 0x3FFF0000,0xEAC0C6E7,0xDD243930,0xA017E945
.long 0x3FFF0000,0xED4F301E,0xD9942B84,0x1F401A5B
.long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CC,0x9FB9A9E3
.long 0x3FFF0000,0xF281773C,0x59FFB138,0x20744C05
.long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x1F773A19
.long 0x3FFF0000,0xF7D0DF73,0x0AD13BB8,0x1FFE90D5
.long 0x3FFF0000,0xFA83B2DB,0x722A033C,0xA041ED22
.long 0x3FFF0000,0xFD3E0C0C,0xF486C174,0x1F853F3A
.set ADJFLAG,L_SCR2
.set SCALE,FP_SCR1
.set ADJSCALE,FP_SCR2
.set SC,FP_SCR3
.set ONEBYSC,FP_SCR4
| xref t_frcinx
|xref t_extdnrm
|xref t_unfl
|xref t_ovfl
.global setoxd
setoxd:
|--entry point for EXP(X), X is denormalized
movel (%a0),%d0
andil #0x80000000,%d0
oril #0x00800000,%d0 | ...sign(X)*2^(-126)
movel %d0,-(%sp)
fmoves #0x3F800000,%fp0
fmovel %d1,%fpcr
fadds (%sp)+,%fp0
bra t_frcinx
.global setox
setox:
|--entry point for EXP(X), here X is finite, non-zero, and not NaN's
|--Step 1.
movel (%a0),%d0 | ...load part of input X
andil #0x7FFF0000,%d0 | ...biased expo. of X
cmpil #0x3FBE0000,%d0 | ...2^(-65)
bges EXPC1 | ...normal case
bra EXPSM
EXPC1:
|--The case |X| >= 2^(-65)
movew 4(%a0),%d0 | ...expo. and partial sig. of |X|
cmpil #0x400CB167,%d0 | ...16380 log2 trunc. 16 bits
blts EXPMAIN | ...normal case
bra EXPBIG
EXPMAIN:
|--Step 2.
|--This is the normal branch: 2^(-65) <= |X| < 16380 log2.
fmovex (%a0),%fp0 | ...load input from (a0)
fmovex %fp0,%fp1
fmuls #0x42B8AA3B,%fp0 | ...64/log2 * X
fmovemx %fp2-%fp2/%fp3,-(%a7) | ...save fp2
movel #0,ADJFLAG(%a6)
fmovel %fp0,%d0 | ...N = int( X * 64/log2 )
lea EXPTBL,%a1
fmovel %d0,%fp0 | ...convert to floating-format
movel %d0,L_SCR1(%a6) | ...save N temporarily
andil #0x3F,%d0 | ...D0 is J = N mod 64
lsll #4,%d0
addal %d0,%a1 | ...address of 2^(J/64)
movel L_SCR1(%a6),%d0
asrl #6,%d0 | ...D0 is M
addiw #0x3FFF,%d0 | ...biased expo. of 2^(M)
movew L2,L_SCR1(%a6) | ...prefetch L2, no need in CB
EXPCONT1:
|--Step 3.
|--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
|--a0 points to 2^(J/64), D0 is biased expo. of 2^(M)
fmovex %fp0,%fp2
fmuls #0xBC317218,%fp0 | ...N * L1, L1 = lead(-log2/64)
fmulx L2,%fp2 | ...N * L2, L1+L2 = -log2/64
faddx %fp1,%fp0 | ...X + N*L1
faddx %fp2,%fp0 | ...fp0 is R, reduced arg.
| MOVE.W #$3FA5,EXPA3 ...load EXPA3 in cache
|--Step 4.
|--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
|-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
|--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
|--[R+R*S*(A2+S*A4)] + [S*(A1+S*(A3+S*A5))]
fmovex %fp0,%fp1
fmulx %fp1,%fp1 | ...fp1 IS S = R*R
fmoves #0x3AB60B70,%fp2 | ...fp2 IS A5
| MOVE.W #0,2(%a1) ...load 2^(J/64) in cache
fmulx %fp1,%fp2 | ...fp2 IS S*A5
fmovex %fp1,%fp3
fmuls #0x3C088895,%fp3 | ...fp3 IS S*A4
faddd EXPA3,%fp2 | ...fp2 IS A3+S*A5
faddd EXPA2,%fp3 | ...fp3 IS A2+S*A4
fmulx %fp1,%fp2 | ...fp2 IS S*(A3+S*A5)
movew %d0,SCALE(%a6) | ...SCALE is 2^(M) in extended
clrw SCALE+2(%a6)
movel #0x80000000,SCALE+4(%a6)
clrl SCALE+8(%a6)
fmulx %fp1,%fp3 | ...fp3 IS S*(A2+S*A4)
fadds #0x3F000000,%fp2 | ...fp2 IS A1+S*(A3+S*A5)
fmulx %fp0,%fp3 | ...fp3 IS R*S*(A2+S*A4)
fmulx %fp1,%fp2 | ...fp2 IS S*(A1+S*(A3+S*A5))
faddx %fp3,%fp0 | ...fp0 IS R+R*S*(A2+S*A4),
| ...fp3 released
fmovex (%a1)+,%fp1 | ...fp1 is lead. pt. of 2^(J/64)
faddx %fp2,%fp0 | ...fp0 is EXP(R) - 1
| ...fp2 released
|--Step 5
|--final reconstruction process
|--EXP(X) = 2^M * ( 2^(J/64) + 2^(J/64)*(EXP(R)-1) )
fmulx %fp1,%fp0 | ...2^(J/64)*(Exp(R)-1)
fmovemx (%a7)+,%fp2-%fp2/%fp3 | ...fp2 restored
fadds (%a1),%fp0 | ...accurate 2^(J/64)
faddx %fp1,%fp0 | ...2^(J/64) + 2^(J/64)*...
movel ADJFLAG(%a6),%d0
|--Step 6
tstl %d0
beqs NORMAL
ADJUST:
fmulx ADJSCALE(%a6),%fp0
NORMAL:
fmovel %d1,%FPCR | ...restore user FPCR
fmulx SCALE(%a6),%fp0 | ...multiply 2^(M)
bra t_frcinx
EXPSM:
|--Step 7
fmovemx (%a0),%fp0-%fp0 | ...in case X is denormalized
fmovel %d1,%FPCR
fadds #0x3F800000,%fp0 | ...1+X in user mode
bra t_frcinx
EXPBIG:
|--Step 8
cmpil #0x400CB27C,%d0 | ...16480 log2
bgts EXP2BIG
|--Steps 8.2 -- 8.6
fmovex (%a0),%fp0 | ...load input from (a0)
fmovex %fp0,%fp1
fmuls #0x42B8AA3B,%fp0 | ...64/log2 * X
fmovemx %fp2-%fp2/%fp3,-(%a7) | ...save fp2
movel #1,ADJFLAG(%a6)
fmovel %fp0,%d0 | ...N = int( X * 64/log2 )
lea EXPTBL,%a1
fmovel %d0,%fp0 | ...convert to floating-format
movel %d0,L_SCR1(%a6) | ...save N temporarily
andil #0x3F,%d0 | ...D0 is J = N mod 64
lsll #4,%d0
addal %d0,%a1 | ...address of 2^(J/64)
movel L_SCR1(%a6),%d0
asrl #6,%d0 | ...D0 is K
movel %d0,L_SCR1(%a6) | ...save K temporarily
asrl #1,%d0 | ...D0 is M1
subl %d0,L_SCR1(%a6) | ...a1 is M
addiw #0x3FFF,%d0 | ...biased expo. of 2^(M1)
movew %d0,ADJSCALE(%a6) | ...ADJSCALE := 2^(M1)
clrw ADJSCALE+2(%a6)
movel #0x80000000,ADJSCALE+4(%a6)
clrl ADJSCALE+8(%a6)
movel L_SCR1(%a6),%d0 | ...D0 is M
addiw #0x3FFF,%d0 | ...biased expo. of 2^(M)
bra EXPCONT1 | ...go back to Step 3
EXP2BIG:
|--Step 9
fmovel %d1,%FPCR
movel (%a0),%d0
bclrb #sign_bit,(%a0) | ...setox always returns positive
cmpil #0,%d0
blt t_unfl
bra t_ovfl
.global setoxm1d
setoxm1d:
|--entry point for EXPM1(X), here X is denormalized
|--Step 0.
bra t_extdnrm
.global setoxm1
setoxm1:
|--entry point for EXPM1(X), here X is finite, non-zero, non-NaN
|--Step 1.
|--Step 1.1
movel (%a0),%d0 | ...load part of input X
andil #0x7FFF0000,%d0 | ...biased expo. of X
cmpil #0x3FFD0000,%d0 | ...1/4
bges EM1CON1 | ...|X| >= 1/4
bra EM1SM
EM1CON1:
|--Step 1.3
|--The case |X| >= 1/4
movew 4(%a0),%d0 | ...expo. and partial sig. of |X|
cmpil #0x4004C215,%d0 | ...70log2 rounded up to 16 bits
bles EM1MAIN | ...1/4 <= |X| <= 70log2
bra EM1BIG
EM1MAIN:
|--Step 2.
|--This is the case: 1/4 <= |X| <= 70 log2.
fmovex (%a0),%fp0 | ...load input from (a0)
fmovex %fp0,%fp1
fmuls #0x42B8AA3B,%fp0 | ...64/log2 * X
fmovemx %fp2-%fp2/%fp3,-(%a7) | ...save fp2
| MOVE.W #$3F81,EM1A4 ...prefetch in CB mode
fmovel %fp0,%d0 | ...N = int( X * 64/log2 )
lea EXPTBL,%a1
fmovel %d0,%fp0 | ...convert to floating-format
movel %d0,L_SCR1(%a6) | ...save N temporarily
andil #0x3F,%d0 | ...D0 is J = N mod 64
lsll #4,%d0
addal %d0,%a1 | ...address of 2^(J/64)
movel L_SCR1(%a6),%d0
asrl #6,%d0 | ...D0 is M
movel %d0,L_SCR1(%a6) | ...save a copy of M
| MOVE.W #$3FDC,L2 ...prefetch L2 in CB mode
|--Step 3.
|--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
|--a0 points to 2^(J/64), D0 and a1 both contain M
fmovex %fp0,%fp2
fmuls #0xBC317218,%fp0 | ...N * L1, L1 = lead(-log2/64)
fmulx L2,%fp2 | ...N * L2, L1+L2 = -log2/64
faddx %fp1,%fp0 | ...X + N*L1
faddx %fp2,%fp0 | ...fp0 is R, reduced arg.
| MOVE.W #$3FC5,EM1A2 ...load EM1A2 in cache
addiw #0x3FFF,%d0 | ...D0 is biased expo. of 2^M
|--Step 4.
|--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
|-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*(A5 + R*A6)))))
|--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
|--[R*S*(A2+S*(A4+S*A6))] + [R+S*(A1+S*(A3+S*A5))]
fmovex %fp0,%fp1
fmulx %fp1,%fp1 | ...fp1 IS S = R*R
fmoves #0x3950097B,%fp2 | ...fp2 IS a6
| MOVE.W #0,2(%a1) ...load 2^(J/64) in cache
fmulx %fp1,%fp2 | ...fp2 IS S*A6
fmovex %fp1,%fp3
fmuls #0x3AB60B6A,%fp3 | ...fp3 IS S*A5
faddd EM1A4,%fp2 | ...fp2 IS A4+S*A6
faddd EM1A3,%fp3 | ...fp3 IS A3+S*A5
movew %d0,SC(%a6) | ...SC is 2^(M) in extended
clrw SC+2(%a6)
movel #0x80000000,SC+4(%a6)
clrl SC+8(%a6)
fmulx %fp1,%fp2 | ...fp2 IS S*(A4+S*A6)
movel L_SCR1(%a6),%d0 | ...D0 is M
negw %d0 | ...D0 is -M
fmulx %fp1,%fp3 | ...fp3 IS S*(A3+S*A5)
addiw #0x3FFF,%d0 | ...biased expo. of 2^(-M)
faddd EM1A2,%fp2 | ...fp2 IS A2+S*(A4+S*A6)
fadds #0x3F000000,%fp3 | ...fp3 IS A1+S*(A3+S*A5)
fmulx %fp1,%fp2 | ...fp2 IS S*(A2+S*(A4+S*A6))
oriw #0x8000,%d0 | ...signed/expo. of -2^(-M)
movew %d0,ONEBYSC(%a6) | ...OnebySc is -2^(-M)
clrw ONEBYSC+2(%a6)
movel #0x80000000,ONEBYSC+4(%a6)
clrl ONEBYSC+8(%a6)
fmulx %fp3,%fp1 | ...fp1 IS S*(A1+S*(A3+S*A5))
| ...fp3 released
fmulx %fp0,%fp2 | ...fp2 IS R*S*(A2+S*(A4+S*A6))
faddx %fp1,%fp0 | ...fp0 IS R+S*(A1+S*(A3+S*A5))
| ...fp1 released
faddx %fp2,%fp0 | ...fp0 IS EXP(R)-1
| ...fp2 released
fmovemx (%a7)+,%fp2-%fp2/%fp3 | ...fp2 restored
|--Step 5
|--Compute 2^(J/64)*p
fmulx (%a1),%fp0 | ...2^(J/64)*(Exp(R)-1)
|--Step 6
|--Step 6.1
movel L_SCR1(%a6),%d0 | ...retrieve M
cmpil #63,%d0
bles MLE63
|--Step 6.2 M >= 64
fmoves 12(%a1),%fp1 | ...fp1 is t
faddx ONEBYSC(%a6),%fp1 | ...fp1 is t+OnebySc
faddx %fp1,%fp0 | ...p+(t+OnebySc), fp1 released
faddx (%a1),%fp0 | ...T+(p+(t+OnebySc))
bras EM1SCALE
MLE63:
|--Step 6.3 M <= 63
cmpil #-3,%d0
bges MGEN3
MLTN3:
|--Step 6.4 M <= -4
fadds 12(%a1),%fp0 | ...p+t
faddx (%a1),%fp0 | ...T+(p+t)
faddx ONEBYSC(%a6),%fp0 | ...OnebySc + (T+(p+t))
bras EM1SCALE
MGEN3:
|--Step 6.5 -3 <= M <= 63
fmovex (%a1)+,%fp1 | ...fp1 is T
fadds (%a1),%fp0 | ...fp0 is p+t
faddx ONEBYSC(%a6),%fp1 | ...fp1 is T+OnebySc
faddx %fp1,%fp0 | ...(T+OnebySc)+(p+t)
EM1SCALE:
|--Step 6.6
fmovel %d1,%FPCR
fmulx SC(%a6),%fp0
bra t_frcinx
EM1SM:
|--Step 7 |X| < 1/4.
cmpil #0x3FBE0000,%d0 | ...2^(-65)
bges EM1POLY
EM1TINY:
|--Step 8 |X| < 2^(-65)
cmpil #0x00330000,%d0 | ...2^(-16312)
blts EM12TINY
|--Step 8.2
movel #0x80010000,SC(%a6) | ...SC is -2^(-16382)
movel #0x80000000,SC+4(%a6)
clrl SC+8(%a6)
fmovex (%a0),%fp0
fmovel %d1,%FPCR
faddx SC(%a6),%fp0
bra t_frcinx
EM12TINY:
|--Step 8.3
fmovex (%a0),%fp0
fmuld TWO140,%fp0
movel #0x80010000,SC(%a6)
movel #0x80000000,SC+4(%a6)
clrl SC+8(%a6)
faddx SC(%a6),%fp0
fmovel %d1,%FPCR
fmuld TWON140,%fp0
bra t_frcinx
EM1POLY:
|--Step 9 exp(X)-1 by a simple polynomial
fmovex (%a0),%fp0 | ...fp0 is X
fmulx %fp0,%fp0 | ...fp0 is S := X*X
fmovemx %fp2-%fp2/%fp3,-(%a7) | ...save fp2
fmoves #0x2F30CAA8,%fp1 | ...fp1 is B12
fmulx %fp0,%fp1 | ...fp1 is S*B12
fmoves #0x310F8290,%fp2 | ...fp2 is B11
fadds #0x32D73220,%fp1 | ...fp1 is B10+S*B12
fmulx %fp0,%fp2 | ...fp2 is S*B11
fmulx %fp0,%fp1 | ...fp1 is S*(B10 + ...
fadds #0x3493F281,%fp2 | ...fp2 is B9+S*...
faddd EM1B8,%fp1 | ...fp1 is B8+S*...
fmulx %fp0,%fp2 | ...fp2 is S*(B9+...
fmulx %fp0,%fp1 | ...fp1 is S*(B8+...
faddd EM1B7,%fp2 | ...fp2 is B7+S*...
faddd EM1B6,%fp1 | ...fp1 is B6+S*...
fmulx %fp0,%fp2 | ...fp2 is S*(B7+...
fmulx %fp0,%fp1 | ...fp1 is S*(B6+...
faddd EM1B5,%fp2 | ...fp2 is B5+S*...
faddd EM1B4,%fp1 | ...fp1 is B4+S*...
fmulx %fp0,%fp2 | ...fp2 is S*(B5+...
fmulx %fp0,%fp1 | ...fp1 is S*(B4+...
faddd EM1B3,%fp2 | ...fp2 is B3+S*...
faddx EM1B2,%fp1 | ...fp1 is B2+S*...
fmulx %fp0,%fp2 | ...fp2 is S*(B3+...
fmulx %fp0,%fp1 | ...fp1 is S*(B2+...
fmulx %fp0,%fp2 | ...fp2 is S*S*(B3+...)
fmulx (%a0),%fp1 | ...fp1 is X*S*(B2...
fmuls #0x3F000000,%fp0 | ...fp0 is S*B1
faddx %fp2,%fp1 | ...fp1 is Q
| ...fp2 released
fmovemx (%a7)+,%fp2-%fp2/%fp3 | ...fp2 restored
faddx %fp1,%fp0 | ...fp0 is S*B1+Q
| ...fp1 released
fmovel %d1,%FPCR
faddx (%a0),%fp0
bra t_frcinx
EM1BIG:
|--Step 10 |X| > 70 log2
movel (%a0),%d0
cmpil #0,%d0
bgt EXPC1
|--Step 10.2
fmoves #0xBF800000,%fp0 | ...fp0 is -1
fmovel %d1,%FPCR
fadds #0x00800000,%fp0 | ...-1 + 2^(-126)
bra t_frcinx
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 12,262
|
arch/m68k/fpsp040/stwotox.S
|
|
| stwotox.sa 3.1 12/10/90
|
| stwotox --- 2**X
| stwotoxd --- 2**X for denormalized X
| stentox --- 10**X
| stentoxd --- 10**X for denormalized X
|
| Input: Double-extended number X in location pointed to
| by address register a0.
|
| Output: The function values are returned in Fp0.
|
| Accuracy and Monotonicity: The returned result is within 2 ulps in
| 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
| result is subsequently rounded to double precision. The
| result is provably monotonic in double precision.
|
| Speed: The program stwotox takes approximately 190 cycles and the
| program stentox takes approximately 200 cycles.
|
| Algorithm:
|
| twotox
| 1. If |X| > 16480, go to ExpBig.
|
| 2. If |X| < 2**(-70), go to ExpSm.
|
| 3. Decompose X as X = N/64 + r where |r| <= 1/128. Furthermore
| decompose N as
| N = 64(M + M') + j, j = 0,1,2,...,63.
|
| 4. Overwrite r := r * log2. Then
| 2**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r).
| Go to expr to compute that expression.
|
| tentox
| 1. If |X| > 16480*log_10(2) (base 10 log of 2), go to ExpBig.
|
| 2. If |X| < 2**(-70), go to ExpSm.
|
| 3. Set y := X*log_2(10)*64 (base 2 log of 10). Set
| N := round-to-int(y). Decompose N as
| N = 64(M + M') + j, j = 0,1,2,...,63.
|
| 4. Define r as
| r := ((X - N*L1)-N*L2) * L10
| where L1, L2 are the leading and trailing parts of log_10(2)/64
| and L10 is the natural log of 10. Then
| 10**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r).
| Go to expr to compute that expression.
|
| expr
| 1. Fetch 2**(j/64) from table as Fact1 and Fact2.
|
| 2. Overwrite Fact1 and Fact2 by
| Fact1 := 2**(M) * Fact1
| Fact2 := 2**(M) * Fact2
| Thus Fact1 + Fact2 = 2**(M) * 2**(j/64).
|
| 3. Calculate P where 1 + P approximates exp(r):
| P = r + r*r*(A1+r*(A2+...+r*A5)).
|
| 4. Let AdjFact := 2**(M'). Return
| AdjFact * ( Fact1 + ((Fact1*P) + Fact2) ).
| Exit.
|
| ExpBig
| 1. Generate overflow by Huge * Huge if X > 0; otherwise, generate
| underflow by Tiny * Tiny.
|
| ExpSm
| 1. Return 1 + X.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|STWOTOX idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
BOUNDS1: .long 0x3FB98000,0x400D80C0 | ... 2^(-70),16480
BOUNDS2: .long 0x3FB98000,0x400B9B07 | ... 2^(-70),16480 LOG2/LOG10
L2TEN64: .long 0x406A934F,0x0979A371 | ... 64LOG10/LOG2
L10TWO1: .long 0x3F734413,0x509F8000 | ... LOG2/64LOG10
L10TWO2: .long 0xBFCD0000,0xC0219DC1,0xDA994FD2,0x00000000
LOG10: .long 0x40000000,0x935D8DDD,0xAAA8AC17,0x00000000
LOG2: .long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
EXPA5: .long 0x3F56C16D,0x6F7BD0B2
EXPA4: .long 0x3F811112,0x302C712C
EXPA3: .long 0x3FA55555,0x55554CC1
EXPA2: .long 0x3FC55555,0x55554A54
EXPA1: .long 0x3FE00000,0x00000000,0x00000000,0x00000000
HUGE: .long 0x7FFE0000,0xFFFFFFFF,0xFFFFFFFF,0x00000000
TINY: .long 0x00010000,0xFFFFFFFF,0xFFFFFFFF,0x00000000
EXPTBL:
.long 0x3FFF0000,0x80000000,0x00000000,0x3F738000
.long 0x3FFF0000,0x8164D1F3,0xBC030773,0x3FBEF7CA
.long 0x3FFF0000,0x82CD8698,0xAC2BA1D7,0x3FBDF8A9
.long 0x3FFF0000,0x843A28C3,0xACDE4046,0x3FBCD7C9
.long 0x3FFF0000,0x85AAC367,0xCC487B15,0xBFBDE8DA
.long 0x3FFF0000,0x871F6196,0x9E8D1010,0x3FBDE85C
.long 0x3FFF0000,0x88980E80,0x92DA8527,0x3FBEBBF1
.long 0x3FFF0000,0x8A14D575,0x496EFD9A,0x3FBB80CA
.long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E7,0xBFBA8373
.long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E6,0xBFBE9670
.long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x3FBDB700
.long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x3FBEEEB0
.long 0x3FFF0000,0x91C3D373,0xAB11C336,0x3FBBFD6D
.long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0xBFBDB319
.long 0x3FFF0000,0x94F4EFA8,0xFEF70961,0x3FBDBA2B
.long 0x3FFF0000,0x96942D37,0x20185A00,0x3FBE91D5
.long 0x3FFF0000,0x9837F051,0x8DB8A96F,0x3FBE8D5A
.long 0x3FFF0000,0x99E04593,0x20B7FA65,0xBFBCDE7B
.long 0x3FFF0000,0x9B8D39B9,0xD54E5539,0xBFBEBAAF
.long 0x3FFF0000,0x9D3ED9A7,0x2CFFB751,0xBFBD86DA
.long 0x3FFF0000,0x9EF53260,0x91A111AE,0xBFBEBEDD
.long 0x3FFF0000,0xA0B0510F,0xB9714FC2,0x3FBCC96E
.long 0x3FFF0000,0xA2704303,0x0C496819,0xBFBEC90B
.long 0x3FFF0000,0xA43515AE,0x09E6809E,0x3FBBD1DB
.long 0x3FFF0000,0xA5FED6A9,0xB15138EA,0x3FBCE5EB
.long 0x3FFF0000,0xA7CD93B4,0xE965356A,0xBFBEC274
.long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x3FBEA83C
.long 0x3FFF0000,0xAB7A39B5,0xA93ED337,0x3FBECB00
.long 0x3FFF0000,0xAD583EEA,0x42A14AC6,0x3FBE9301
.long 0x3FFF0000,0xAF3B78AD,0x690A4375,0xBFBD8367
.long 0x3FFF0000,0xB123F581,0xD2AC2590,0xBFBEF05F
.long 0x3FFF0000,0xB311C412,0xA9112489,0x3FBDFB3C
.long 0x3FFF0000,0xB504F333,0xF9DE6484,0x3FBEB2FB
.long 0x3FFF0000,0xB6FD91E3,0x28D17791,0x3FBAE2CB
.long 0x3FFF0000,0xB8FBAF47,0x62FB9EE9,0x3FBCDC3C
.long 0x3FFF0000,0xBAFF5AB2,0x133E45FB,0x3FBEE9AA
.long 0x3FFF0000,0xBD08A39F,0x580C36BF,0xBFBEAEFD
.long 0x3FFF0000,0xBF1799B6,0x7A731083,0xBFBCBF51
.long 0x3FFF0000,0xC12C4CCA,0x66709456,0x3FBEF88A
.long 0x3FFF0000,0xC346CCDA,0x24976407,0x3FBD83B2
.long 0x3FFF0000,0xC5672A11,0x5506DADD,0x3FBDF8AB
.long 0x3FFF0000,0xC78D74C8,0xABB9B15D,0xBFBDFB17
.long 0x3FFF0000,0xC9B9BD86,0x6E2F27A3,0xBFBEFE3C
.long 0x3FFF0000,0xCBEC14FE,0xF2727C5D,0xBFBBB6F8
.long 0x3FFF0000,0xCE248C15,0x1F8480E4,0xBFBCEE53
.long 0x3FFF0000,0xD06333DA,0xEF2B2595,0xBFBDA4AE
.long 0x3FFF0000,0xD2A81D91,0xF12AE45A,0x3FBC9124
.long 0x3FFF0000,0xD4F35AAB,0xCFEDFA1F,0x3FBEB243
.long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x3FBDE69A
.long 0x3FFF0000,0xD99D15C2,0x78AFD7B6,0xBFB8BC61
.long 0x3FFF0000,0xDBFBB797,0xDAF23755,0x3FBDF610
.long 0x3FFF0000,0xDE60F482,0x5E0E9124,0xBFBD8BE1
.long 0x3FFF0000,0xE0CCDEEC,0x2A94E111,0x3FBACB12
.long 0x3FFF0000,0xE33F8972,0xBE8A5A51,0x3FBB9BFE
.long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x3FBCF2F4
.long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x3FBEF22F
.long 0x3FFF0000,0xEAC0C6E7,0xDD24392F,0xBFBDBF4A
.long 0x3FFF0000,0xED4F301E,0xD9942B84,0x3FBEC01A
.long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CB,0x3FBE8CAC
.long 0x3FFF0000,0xF281773C,0x59FFB13A,0xBFBCBB3F
.long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x3FBEF73A
.long 0x3FFF0000,0xF7D0DF73,0x0AD13BB9,0xBFB8B795
.long 0x3FFF0000,0xFA83B2DB,0x722A033A,0x3FBEF84B
.long 0x3FFF0000,0xFD3E0C0C,0xF486C175,0xBFBEF581
.set N,L_SCR1
.set X,FP_SCR1
.set XDCARE,X+2
.set XFRAC,X+4
.set ADJFACT,FP_SCR2
.set FACT1,FP_SCR3
.set FACT1HI,FACT1+4
.set FACT1LOW,FACT1+8
.set FACT2,FP_SCR4
.set FACT2HI,FACT2+4
.set FACT2LOW,FACT2+8
| xref t_unfl
|xref t_ovfl
|xref t_frcinx
.global stwotoxd
stwotoxd:
|--ENTRY POINT FOR 2**(X) FOR DENORMALIZED ARGUMENT
fmovel %d1,%fpcr | ...set user's rounding mode/precision
fmoves #0x3F800000,%fp0 | ...RETURN 1 + X
movel (%a0),%d0
orl #0x00800001,%d0
fadds %d0,%fp0
bra t_frcinx
.global stwotox
stwotox:
|--ENTRY POINT FOR 2**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
fmovemx (%a0),%fp0-%fp0 | ...LOAD INPUT, do not set cc's
movel (%a0),%d0
movew 4(%a0),%d0
fmovex %fp0,X(%a6)
andil #0x7FFFFFFF,%d0
cmpil #0x3FB98000,%d0 | ...|X| >= 2**(-70)?
bges TWOOK1
bra EXPBORS
TWOOK1:
cmpil #0x400D80C0,%d0 | ...|X| > 16480?
bles TWOMAIN
bra EXPBORS
TWOMAIN:
|--USUAL CASE, 2^(-70) <= |X| <= 16480
fmovex %fp0,%fp1
fmuls #0x42800000,%fp1 | ...64 * X
fmovel %fp1,N(%a6) | ...N = ROUND-TO-INT(64 X)
movel %d2,-(%sp)
lea EXPTBL,%a1 | ...LOAD ADDRESS OF TABLE OF 2^(J/64)
fmovel N(%a6),%fp1 | ...N --> FLOATING FMT
movel N(%a6),%d0
movel %d0,%d2
andil #0x3F,%d0 | ...D0 IS J
asll #4,%d0 | ...DISPLACEMENT FOR 2^(J/64)
addal %d0,%a1 | ...ADDRESS FOR 2^(J/64)
asrl #6,%d2 | ...d2 IS L, N = 64L + J
movel %d2,%d0
asrl #1,%d0 | ...D0 IS M
subl %d0,%d2 | ...d2 IS M', N = 64(M+M') + J
addil #0x3FFF,%d2
movew %d2,ADJFACT(%a6) | ...ADJFACT IS 2^(M')
movel (%sp)+,%d2
|--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
|--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
|--ADJFACT = 2^(M').
|--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
fmuls #0x3C800000,%fp1 | ...(1/64)*N
movel (%a1)+,FACT1(%a6)
movel (%a1)+,FACT1HI(%a6)
movel (%a1)+,FACT1LOW(%a6)
movew (%a1)+,FACT2(%a6)
clrw FACT2+2(%a6)
fsubx %fp1,%fp0 | ...X - (1/64)*INT(64 X)
movew (%a1)+,FACT2HI(%a6)
clrw FACT2HI+2(%a6)
clrl FACT2LOW(%a6)
addw %d0,FACT1(%a6)
fmulx LOG2,%fp0 | ...FP0 IS R
addw %d0,FACT2(%a6)
bra expr
EXPBORS:
|--FPCR, D0 SAVED
cmpil #0x3FFF8000,%d0
bgts EXPBIG
EXPSM:
|--|X| IS SMALL, RETURN 1 + X
fmovel %d1,%FPCR |restore users exceptions
fadds #0x3F800000,%fp0 | ...RETURN 1 + X
bra t_frcinx
EXPBIG:
|--|X| IS LARGE, GENERATE OVERFLOW IF X > 0; ELSE GENERATE UNDERFLOW
|--REGISTERS SAVE SO FAR ARE FPCR AND D0
movel X(%a6),%d0
cmpil #0,%d0
blts EXPNEG
bclrb #7,(%a0) |t_ovfl expects positive value
bra t_ovfl
EXPNEG:
bclrb #7,(%a0) |t_unfl expects positive value
bra t_unfl
.global stentoxd
stentoxd:
|--ENTRY POINT FOR 10**(X) FOR DENORMALIZED ARGUMENT
fmovel %d1,%fpcr | ...set user's rounding mode/precision
fmoves #0x3F800000,%fp0 | ...RETURN 1 + X
movel (%a0),%d0
orl #0x00800001,%d0
fadds %d0,%fp0
bra t_frcinx
.global stentox
stentox:
|--ENTRY POINT FOR 10**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
fmovemx (%a0),%fp0-%fp0 | ...LOAD INPUT, do not set cc's
movel (%a0),%d0
movew 4(%a0),%d0
fmovex %fp0,X(%a6)
andil #0x7FFFFFFF,%d0
cmpil #0x3FB98000,%d0 | ...|X| >= 2**(-70)?
bges TENOK1
bra EXPBORS
TENOK1:
cmpil #0x400B9B07,%d0 | ...|X| <= 16480*log2/log10 ?
bles TENMAIN
bra EXPBORS
TENMAIN:
|--USUAL CASE, 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10
fmovex %fp0,%fp1
fmuld L2TEN64,%fp1 | ...X*64*LOG10/LOG2
fmovel %fp1,N(%a6) | ...N=INT(X*64*LOG10/LOG2)
movel %d2,-(%sp)
lea EXPTBL,%a1 | ...LOAD ADDRESS OF TABLE OF 2^(J/64)
fmovel N(%a6),%fp1 | ...N --> FLOATING FMT
movel N(%a6),%d0
movel %d0,%d2
andil #0x3F,%d0 | ...D0 IS J
asll #4,%d0 | ...DISPLACEMENT FOR 2^(J/64)
addal %d0,%a1 | ...ADDRESS FOR 2^(J/64)
asrl #6,%d2 | ...d2 IS L, N = 64L + J
movel %d2,%d0
asrl #1,%d0 | ...D0 IS M
subl %d0,%d2 | ...d2 IS M', N = 64(M+M') + J
addil #0x3FFF,%d2
movew %d2,ADJFACT(%a6) | ...ADJFACT IS 2^(M')
movel (%sp)+,%d2
|--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
|--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
|--ADJFACT = 2^(M').
|--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
fmovex %fp1,%fp2
fmuld L10TWO1,%fp1 | ...N*(LOG2/64LOG10)_LEAD
movel (%a1)+,FACT1(%a6)
fmulx L10TWO2,%fp2 | ...N*(LOG2/64LOG10)_TRAIL
movel (%a1)+,FACT1HI(%a6)
movel (%a1)+,FACT1LOW(%a6)
fsubx %fp1,%fp0 | ...X - N L_LEAD
movew (%a1)+,FACT2(%a6)
fsubx %fp2,%fp0 | ...X - N L_TRAIL
clrw FACT2+2(%a6)
movew (%a1)+,FACT2HI(%a6)
clrw FACT2HI+2(%a6)
clrl FACT2LOW(%a6)
fmulx LOG10,%fp0 | ...FP0 IS R
addw %d0,FACT1(%a6)
addw %d0,FACT2(%a6)
expr:
|--FPCR, FP2, FP3 ARE SAVED IN ORDER AS SHOWN.
|--ADJFACT CONTAINS 2**(M'), FACT1 + FACT2 = 2**(M) * 2**(J/64).
|--FP0 IS R. THE FOLLOWING CODE COMPUTES
|-- 2**(M'+M) * 2**(J/64) * EXP(R)
fmovex %fp0,%fp1
fmulx %fp1,%fp1 | ...FP1 IS S = R*R
fmoved EXPA5,%fp2 | ...FP2 IS A5
fmoved EXPA4,%fp3 | ...FP3 IS A4
fmulx %fp1,%fp2 | ...FP2 IS S*A5
fmulx %fp1,%fp3 | ...FP3 IS S*A4
faddd EXPA3,%fp2 | ...FP2 IS A3+S*A5
faddd EXPA2,%fp3 | ...FP3 IS A2+S*A4
fmulx %fp1,%fp2 | ...FP2 IS S*(A3+S*A5)
fmulx %fp1,%fp3 | ...FP3 IS S*(A2+S*A4)
faddd EXPA1,%fp2 | ...FP2 IS A1+S*(A3+S*A5)
fmulx %fp0,%fp3 | ...FP3 IS R*S*(A2+S*A4)
fmulx %fp1,%fp2 | ...FP2 IS S*(A1+S*(A3+S*A5))
faddx %fp3,%fp0 | ...FP0 IS R+R*S*(A2+S*A4)
faddx %fp2,%fp0 | ...FP0 IS EXP(R) - 1
|--FINAL RECONSTRUCTION PROCESS
|--EXP(X) = 2^M*2^(J/64) + 2^M*2^(J/64)*(EXP(R)-1) - (1 OR 0)
fmulx FACT1(%a6),%fp0
faddx FACT2(%a6),%fp0
faddx FACT1(%a6),%fp0
fmovel %d1,%FPCR |restore users exceptions
clrw ADJFACT+2(%a6)
movel #0x80000000,ADJFACT+4(%a6)
clrl ADJFACT+8(%a6)
fmulx ADJFACT(%a6),%fp0 | ...FINAL ADJUSTMENT
bra t_frcinx
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 19,468
|
arch/m68k/fpsp040/slogn.S
|
|
| slogn.sa 3.1 12/10/90
|
| slogn computes the natural logarithm of an
| input value. slognd does the same except the input value is a
| denormalized number. slognp1 computes log(1+X), and slognp1d
| computes log(1+X) for denormalized X.
|
| Input: Double-extended value in memory location pointed to by address
| register a0.
|
| Output: log(X) or log(1+X) returned in floating-point register Fp0.
|
| Accuracy and Monotonicity: The returned result is within 2 ulps in
| 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
| result is subsequently rounded to double precision. The
| result is provably monotonic in double precision.
|
| Speed: The program slogn takes approximately 190 cycles for input
| argument X such that |X-1| >= 1/16, which is the usual
| situation. For those arguments, slognp1 takes approximately
| 210 cycles. For the less common arguments, the program will
| run no worse than 10% slower.
|
| Algorithm:
| LOGN:
| Step 1. If |X-1| < 1/16, approximate log(X) by an odd polynomial in
| u, where u = 2(X-1)/(X+1). Otherwise, move on to Step 2.
|
| Step 2. X = 2**k * Y where 1 <= Y < 2. Define F to be the first seven
| significant bits of Y plus 2**(-7), i.e. F = 1.xxxxxx1 in base
| 2 where the six "x" match those of Y. Note that |Y-F| <= 2**(-7).
|
| Step 3. Define u = (Y-F)/F. Approximate log(1+u) by a polynomial in u,
| log(1+u) = poly.
|
| Step 4. Reconstruct log(X) = log( 2**k * Y ) = k*log(2) + log(F) + log(1+u)
| by k*log(2) + (log(F) + poly). The values of log(F) are calculated
| beforehand and stored in the program.
|
| lognp1:
| Step 1: If |X| < 1/16, approximate log(1+X) by an odd polynomial in
| u where u = 2X/(2+X). Otherwise, move on to Step 2.
|
| Step 2: Let 1+X = 2**k * Y, where 1 <= Y < 2. Define F as done in Step 2
| of the algorithm for LOGN and compute log(1+X) as
| k*log(2) + log(F) + poly where poly approximates log(1+u),
| u = (Y-F)/F.
|
| Implementation Notes:
| Note 1. There are 64 different possible values for F, thus 64 log(F)'s
| need to be tabulated. Moreover, the values of 1/F are also
| tabulated so that the division in (Y-F)/F can be performed by a
| multiplication.
|
| Note 2. In Step 2 of lognp1, in order to preserved accuracy, the value
| Y-F has to be calculated carefully when 1/2 <= X < 3/2.
|
| Note 3. To fully exploit the pipeline, polynomials are usually separated
| into two parts evaluated independently before being added up.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|slogn idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
BOUNDS1: .long 0x3FFEF07D,0x3FFF8841
BOUNDS2: .long 0x3FFE8000,0x3FFFC000
LOGOF2: .long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
one: .long 0x3F800000
zero: .long 0x00000000
infty: .long 0x7F800000
negone: .long 0xBF800000
LOGA6: .long 0x3FC2499A,0xB5E4040B
LOGA5: .long 0xBFC555B5,0x848CB7DB
LOGA4: .long 0x3FC99999,0x987D8730
LOGA3: .long 0xBFCFFFFF,0xFF6F7E97
LOGA2: .long 0x3FD55555,0x555555a4
LOGA1: .long 0xBFE00000,0x00000008
LOGB5: .long 0x3F175496,0xADD7DAD6
LOGB4: .long 0x3F3C71C2,0xFE80C7E0
LOGB3: .long 0x3F624924,0x928BCCFF
LOGB2: .long 0x3F899999,0x999995EC
LOGB1: .long 0x3FB55555,0x55555555
TWO: .long 0x40000000,0x00000000
LTHOLD: .long 0x3f990000,0x80000000,0x00000000,0x00000000
LOGTBL:
.long 0x3FFE0000,0xFE03F80F,0xE03F80FE,0x00000000
.long 0x3FF70000,0xFF015358,0x833C47E2,0x00000000
.long 0x3FFE0000,0xFA232CF2,0x52138AC0,0x00000000
.long 0x3FF90000,0xBDC8D83E,0xAD88D549,0x00000000
.long 0x3FFE0000,0xF6603D98,0x0F6603DA,0x00000000
.long 0x3FFA0000,0x9CF43DCF,0xF5EAFD48,0x00000000
.long 0x3FFE0000,0xF2B9D648,0x0F2B9D65,0x00000000
.long 0x3FFA0000,0xDA16EB88,0xCB8DF614,0x00000000
.long 0x3FFE0000,0xEF2EB71F,0xC4345238,0x00000000
.long 0x3FFB0000,0x8B29B775,0x1BD70743,0x00000000
.long 0x3FFE0000,0xEBBDB2A5,0xC1619C8C,0x00000000
.long 0x3FFB0000,0xA8D839F8,0x30C1FB49,0x00000000
.long 0x3FFE0000,0xE865AC7B,0x7603A197,0x00000000
.long 0x3FFB0000,0xC61A2EB1,0x8CD907AD,0x00000000
.long 0x3FFE0000,0xE525982A,0xF70C880E,0x00000000
.long 0x3FFB0000,0xE2F2A47A,0xDE3A18AF,0x00000000
.long 0x3FFE0000,0xE1FC780E,0x1FC780E2,0x00000000
.long 0x3FFB0000,0xFF64898E,0xDF55D551,0x00000000
.long 0x3FFE0000,0xDEE95C4C,0xA037BA57,0x00000000
.long 0x3FFC0000,0x8DB956A9,0x7B3D0148,0x00000000
.long 0x3FFE0000,0xDBEB61EE,0xD19C5958,0x00000000
.long 0x3FFC0000,0x9B8FE100,0xF47BA1DE,0x00000000
.long 0x3FFE0000,0xD901B203,0x6406C80E,0x00000000
.long 0x3FFC0000,0xA9372F1D,0x0DA1BD17,0x00000000
.long 0x3FFE0000,0xD62B80D6,0x2B80D62C,0x00000000
.long 0x3FFC0000,0xB6B07F38,0xCE90E46B,0x00000000
.long 0x3FFE0000,0xD3680D36,0x80D3680D,0x00000000
.long 0x3FFC0000,0xC3FD0329,0x06488481,0x00000000
.long 0x3FFE0000,0xD0B69FCB,0xD2580D0B,0x00000000
.long 0x3FFC0000,0xD11DE0FF,0x15AB18CA,0x00000000
.long 0x3FFE0000,0xCE168A77,0x25080CE1,0x00000000
.long 0x3FFC0000,0xDE1433A1,0x6C66B150,0x00000000
.long 0x3FFE0000,0xCB8727C0,0x65C393E0,0x00000000
.long 0x3FFC0000,0xEAE10B5A,0x7DDC8ADD,0x00000000
.long 0x3FFE0000,0xC907DA4E,0x871146AD,0x00000000
.long 0x3FFC0000,0xF7856E5E,0xE2C9B291,0x00000000
.long 0x3FFE0000,0xC6980C69,0x80C6980C,0x00000000
.long 0x3FFD0000,0x82012CA5,0xA68206D7,0x00000000
.long 0x3FFE0000,0xC4372F85,0x5D824CA6,0x00000000
.long 0x3FFD0000,0x882C5FCD,0x7256A8C5,0x00000000
.long 0x3FFE0000,0xC1E4BBD5,0x95F6E947,0x00000000
.long 0x3FFD0000,0x8E44C60B,0x4CCFD7DE,0x00000000
.long 0x3FFE0000,0xBFA02FE8,0x0BFA02FF,0x00000000
.long 0x3FFD0000,0x944AD09E,0xF4351AF6,0x00000000
.long 0x3FFE0000,0xBD691047,0x07661AA3,0x00000000
.long 0x3FFD0000,0x9A3EECD4,0xC3EAA6B2,0x00000000
.long 0x3FFE0000,0xBB3EE721,0xA54D880C,0x00000000
.long 0x3FFD0000,0xA0218434,0x353F1DE8,0x00000000
.long 0x3FFE0000,0xB92143FA,0x36F5E02E,0x00000000
.long 0x3FFD0000,0xA5F2FCAB,0xBBC506DA,0x00000000
.long 0x3FFE0000,0xB70FBB5A,0x19BE3659,0x00000000
.long 0x3FFD0000,0xABB3B8BA,0x2AD362A5,0x00000000
.long 0x3FFE0000,0xB509E68A,0x9B94821F,0x00000000
.long 0x3FFD0000,0xB1641795,0xCE3CA97B,0x00000000
.long 0x3FFE0000,0xB30F6352,0x8917C80B,0x00000000
.long 0x3FFD0000,0xB7047551,0x5D0F1C61,0x00000000
.long 0x3FFE0000,0xB11FD3B8,0x0B11FD3C,0x00000000
.long 0x3FFD0000,0xBC952AFE,0xEA3D13E1,0x00000000
.long 0x3FFE0000,0xAF3ADDC6,0x80AF3ADE,0x00000000
.long 0x3FFD0000,0xC2168ED0,0xF458BA4A,0x00000000
.long 0x3FFE0000,0xAD602B58,0x0AD602B6,0x00000000
.long 0x3FFD0000,0xC788F439,0xB3163BF1,0x00000000
.long 0x3FFE0000,0xAB8F69E2,0x8359CD11,0x00000000
.long 0x3FFD0000,0xCCECAC08,0xBF04565D,0x00000000
.long 0x3FFE0000,0xA9C84A47,0xA07F5638,0x00000000
.long 0x3FFD0000,0xD2420487,0x2DD85160,0x00000000
.long 0x3FFE0000,0xA80A80A8,0x0A80A80B,0x00000000
.long 0x3FFD0000,0xD7894992,0x3BC3588A,0x00000000
.long 0x3FFE0000,0xA655C439,0x2D7B73A8,0x00000000
.long 0x3FFD0000,0xDCC2C4B4,0x9887DACC,0x00000000
.long 0x3FFE0000,0xA4A9CF1D,0x96833751,0x00000000
.long 0x3FFD0000,0xE1EEBD3E,0x6D6A6B9E,0x00000000
.long 0x3FFE0000,0xA3065E3F,0xAE7CD0E0,0x00000000
.long 0x3FFD0000,0xE70D785C,0x2F9F5BDC,0x00000000
.long 0x3FFE0000,0xA16B312E,0xA8FC377D,0x00000000
.long 0x3FFD0000,0xEC1F392C,0x5179F283,0x00000000
.long 0x3FFE0000,0x9FD809FD,0x809FD80A,0x00000000
.long 0x3FFD0000,0xF12440D3,0xE36130E6,0x00000000
.long 0x3FFE0000,0x9E4CAD23,0xDD5F3A20,0x00000000
.long 0x3FFD0000,0xF61CCE92,0x346600BB,0x00000000
.long 0x3FFE0000,0x9CC8E160,0xC3FB19B9,0x00000000
.long 0x3FFD0000,0xFB091FD3,0x8145630A,0x00000000
.long 0x3FFE0000,0x9B4C6F9E,0xF03A3CAA,0x00000000
.long 0x3FFD0000,0xFFE97042,0xBFA4C2AD,0x00000000
.long 0x3FFE0000,0x99D722DA,0xBDE58F06,0x00000000
.long 0x3FFE0000,0x825EFCED,0x49369330,0x00000000
.long 0x3FFE0000,0x9868C809,0x868C8098,0x00000000
.long 0x3FFE0000,0x84C37A7A,0xB9A905C9,0x00000000
.long 0x3FFE0000,0x97012E02,0x5C04B809,0x00000000
.long 0x3FFE0000,0x87224C2E,0x8E645FB7,0x00000000
.long 0x3FFE0000,0x95A02568,0x095A0257,0x00000000
.long 0x3FFE0000,0x897B8CAC,0x9F7DE298,0x00000000
.long 0x3FFE0000,0x94458094,0x45809446,0x00000000
.long 0x3FFE0000,0x8BCF55DE,0xC4CD05FE,0x00000000
.long 0x3FFE0000,0x92F11384,0x0497889C,0x00000000
.long 0x3FFE0000,0x8E1DC0FB,0x89E125E5,0x00000000
.long 0x3FFE0000,0x91A2B3C4,0xD5E6F809,0x00000000
.long 0x3FFE0000,0x9066E68C,0x955B6C9B,0x00000000
.long 0x3FFE0000,0x905A3863,0x3E06C43B,0x00000000
.long 0x3FFE0000,0x92AADE74,0xC7BE59E0,0x00000000
.long 0x3FFE0000,0x8F1779D9,0xFDC3A219,0x00000000
.long 0x3FFE0000,0x94E9BFF6,0x15845643,0x00000000
.long 0x3FFE0000,0x8DDA5202,0x37694809,0x00000000
.long 0x3FFE0000,0x9723A1B7,0x20134203,0x00000000
.long 0x3FFE0000,0x8CA29C04,0x6514E023,0x00000000
.long 0x3FFE0000,0x995899C8,0x90EB8990,0x00000000
.long 0x3FFE0000,0x8B70344A,0x139BC75A,0x00000000
.long 0x3FFE0000,0x9B88BDAA,0x3A3DAE2F,0x00000000
.long 0x3FFE0000,0x8A42F870,0x5669DB46,0x00000000
.long 0x3FFE0000,0x9DB4224F,0xFFE1157C,0x00000000
.long 0x3FFE0000,0x891AC73A,0xE9819B50,0x00000000
.long 0x3FFE0000,0x9FDADC26,0x8B7A12DA,0x00000000
.long 0x3FFE0000,0x87F78087,0xF78087F8,0x00000000
.long 0x3FFE0000,0xA1FCFF17,0xCE733BD4,0x00000000
.long 0x3FFE0000,0x86D90544,0x7A34ACC6,0x00000000
.long 0x3FFE0000,0xA41A9E8F,0x5446FB9F,0x00000000
.long 0x3FFE0000,0x85BF3761,0x2CEE3C9B,0x00000000
.long 0x3FFE0000,0xA633CD7E,0x6771CD8B,0x00000000
.long 0x3FFE0000,0x84A9F9C8,0x084A9F9D,0x00000000
.long 0x3FFE0000,0xA8489E60,0x0B435A5E,0x00000000
.long 0x3FFE0000,0x83993052,0x3FBE3368,0x00000000
.long 0x3FFE0000,0xAA59233C,0xCCA4BD49,0x00000000
.long 0x3FFE0000,0x828CBFBE,0xB9A020A3,0x00000000
.long 0x3FFE0000,0xAC656DAE,0x6BCC4985,0x00000000
.long 0x3FFE0000,0x81848DA8,0xFAF0D277,0x00000000
.long 0x3FFE0000,0xAE6D8EE3,0x60BB2468,0x00000000
.long 0x3FFE0000,0x80808080,0x80808081,0x00000000
.long 0x3FFE0000,0xB07197A2,0x3C46C654,0x00000000
.set ADJK,L_SCR1
.set X,FP_SCR1
.set XDCARE,X+2
.set XFRAC,X+4
.set F,FP_SCR2
.set FFRAC,F+4
.set KLOG2,FP_SCR3
.set SAVEU,FP_SCR4
| xref t_frcinx
|xref t_extdnrm
|xref t_operr
|xref t_dz
.global slognd
slognd:
|--ENTRY POINT FOR LOG(X) FOR DENORMALIZED INPUT
movel #-100,ADJK(%a6) | ...INPUT = 2^(ADJK) * FP0
|----normalize the input value by left shifting k bits (k to be determined
|----below), adjusting exponent and storing -k to ADJK
|----the value TWOTO100 is no longer needed.
|----Note that this code assumes the denormalized input is NON-ZERO.
moveml %d2-%d7,-(%a7) | ...save some registers
movel #0x00000000,%d3 | ...D3 is exponent of smallest norm. #
movel 4(%a0),%d4
movel 8(%a0),%d5 | ...(D4,D5) is (Hi_X,Lo_X)
clrl %d2 | ...D2 used for holding K
tstl %d4
bnes HiX_not0
HiX_0:
movel %d5,%d4
clrl %d5
movel #32,%d2
clrl %d6
bfffo %d4{#0:#32},%d6
lsll %d6,%d4
addl %d6,%d2 | ...(D3,D4,D5) is normalized
movel %d3,X(%a6)
movel %d4,XFRAC(%a6)
movel %d5,XFRAC+4(%a6)
negl %d2
movel %d2,ADJK(%a6)
fmovex X(%a6),%fp0
moveml (%a7)+,%d2-%d7 | ...restore registers
lea X(%a6),%a0
bras LOGBGN | ...begin regular log(X)
HiX_not0:
clrl %d6
bfffo %d4{#0:#32},%d6 | ...find first 1
movel %d6,%d2 | ...get k
lsll %d6,%d4
movel %d5,%d7 | ...a copy of D5
lsll %d6,%d5
negl %d6
addil #32,%d6
lsrl %d6,%d7
orl %d7,%d4 | ...(D3,D4,D5) normalized
movel %d3,X(%a6)
movel %d4,XFRAC(%a6)
movel %d5,XFRAC+4(%a6)
negl %d2
movel %d2,ADJK(%a6)
fmovex X(%a6),%fp0
moveml (%a7)+,%d2-%d7 | ...restore registers
lea X(%a6),%a0
bras LOGBGN | ...begin regular log(X)
.global slogn
slogn:
|--ENTRY POINT FOR LOG(X) FOR X FINITE, NON-ZERO, NOT NAN'S
fmovex (%a0),%fp0 | ...LOAD INPUT
movel #0x00000000,ADJK(%a6)
LOGBGN:
|--FPCR SAVED AND CLEARED, INPUT IS 2^(ADJK)*FP0, FP0 CONTAINS
|--A FINITE, NON-ZERO, NORMALIZED NUMBER.
movel (%a0),%d0
movew 4(%a0),%d0
movel (%a0),X(%a6)
movel 4(%a0),X+4(%a6)
movel 8(%a0),X+8(%a6)
cmpil #0,%d0 | ...CHECK IF X IS NEGATIVE
blt LOGNEG | ...LOG OF NEGATIVE ARGUMENT IS INVALID
cmp2l BOUNDS1,%d0 | ...X IS POSITIVE, CHECK IF X IS NEAR 1
bcc LOGNEAR1 | ...BOUNDS IS ROUGHLY [15/16, 17/16]
LOGMAIN:
|--THIS SHOULD BE THE USUAL CASE, X NOT VERY CLOSE TO 1
|--X = 2^(K) * Y, 1 <= Y < 2. THUS, Y = 1.XXXXXXXX....XX IN BINARY.
|--WE DEFINE F = 1.XXXXXX1, I.E. FIRST 7 BITS OF Y AND ATTACH A 1.
|--THE IDEA IS THAT LOG(X) = K*LOG2 + LOG(Y)
|-- = K*LOG2 + LOG(F) + LOG(1 + (Y-F)/F).
|--NOTE THAT U = (Y-F)/F IS VERY SMALL AND THUS APPROXIMATING
|--LOG(1+U) CAN BE VERY EFFICIENT.
|--ALSO NOTE THAT THE VALUE 1/F IS STORED IN A TABLE SO THAT NO
|--DIVISION IS NEEDED TO CALCULATE (Y-F)/F.
|--GET K, Y, F, AND ADDRESS OF 1/F.
asrl #8,%d0
asrl #8,%d0 | ...SHIFTED 16 BITS, BIASED EXPO. OF X
subil #0x3FFF,%d0 | ...THIS IS K
addl ADJK(%a6),%d0 | ...ADJUST K, ORIGINAL INPUT MAY BE DENORM.
lea LOGTBL,%a0 | ...BASE ADDRESS OF 1/F AND LOG(F)
fmovel %d0,%fp1 | ...CONVERT K TO FLOATING-POINT FORMAT
|--WHILE THE CONVERSION IS GOING ON, WE GET F AND ADDRESS OF 1/F
movel #0x3FFF0000,X(%a6) | ...X IS NOW Y, I.E. 2^(-K)*X
movel XFRAC(%a6),FFRAC(%a6)
andil #0xFE000000,FFRAC(%a6) | ...FIRST 7 BITS OF Y
oril #0x01000000,FFRAC(%a6) | ...GET F: ATTACH A 1 AT THE EIGHTH BIT
movel FFRAC(%a6),%d0 | ...READY TO GET ADDRESS OF 1/F
andil #0x7E000000,%d0
asrl #8,%d0
asrl #8,%d0
asrl #4,%d0 | ...SHIFTED 20, D0 IS THE DISPLACEMENT
addal %d0,%a0 | ...A0 IS THE ADDRESS FOR 1/F
fmovex X(%a6),%fp0
movel #0x3fff0000,F(%a6)
clrl F+8(%a6)
fsubx F(%a6),%fp0 | ...Y-F
fmovemx %fp2-%fp2/%fp3,-(%sp) | ...SAVE FP2 WHILE FP0 IS NOT READY
|--SUMMARY: FP0 IS Y-F, A0 IS ADDRESS OF 1/F, FP1 IS K
|--REGISTERS SAVED: FPCR, FP1, FP2
LP1CONT1:
|--AN RE-ENTRY POINT FOR LOGNP1
fmulx (%a0),%fp0 | ...FP0 IS U = (Y-F)/F
fmulx LOGOF2,%fp1 | ...GET K*LOG2 WHILE FP0 IS NOT READY
fmovex %fp0,%fp2
fmulx %fp2,%fp2 | ...FP2 IS V=U*U
fmovex %fp1,KLOG2(%a6) | ...PUT K*LOG2 IN MEMORY, FREE FP1
|--LOG(1+U) IS APPROXIMATED BY
|--U + V*(A1+U*(A2+U*(A3+U*(A4+U*(A5+U*A6))))) WHICH IS
|--[U + V*(A1+V*(A3+V*A5))] + [U*V*(A2+V*(A4+V*A6))]
fmovex %fp2,%fp3
fmovex %fp2,%fp1
fmuld LOGA6,%fp1 | ...V*A6
fmuld LOGA5,%fp2 | ...V*A5
faddd LOGA4,%fp1 | ...A4+V*A6
faddd LOGA3,%fp2 | ...A3+V*A5
fmulx %fp3,%fp1 | ...V*(A4+V*A6)
fmulx %fp3,%fp2 | ...V*(A3+V*A5)
faddd LOGA2,%fp1 | ...A2+V*(A4+V*A6)
faddd LOGA1,%fp2 | ...A1+V*(A3+V*A5)
fmulx %fp3,%fp1 | ...V*(A2+V*(A4+V*A6))
addal #16,%a0 | ...ADDRESS OF LOG(F)
fmulx %fp3,%fp2 | ...V*(A1+V*(A3+V*A5)), FP3 RELEASED
fmulx %fp0,%fp1 | ...U*V*(A2+V*(A4+V*A6))
faddx %fp2,%fp0 | ...U+V*(A1+V*(A3+V*A5)), FP2 RELEASED
faddx (%a0),%fp1 | ...LOG(F)+U*V*(A2+V*(A4+V*A6))
fmovemx (%sp)+,%fp2-%fp2/%fp3 | ...RESTORE FP2
faddx %fp1,%fp0 | ...FP0 IS LOG(F) + LOG(1+U)
fmovel %d1,%fpcr
faddx KLOG2(%a6),%fp0 | ...FINAL ADD
bra t_frcinx
LOGNEAR1:
|--REGISTERS SAVED: FPCR, FP1. FP0 CONTAINS THE INPUT.
fmovex %fp0,%fp1
fsubs one,%fp1 | ...FP1 IS X-1
fadds one,%fp0 | ...FP0 IS X+1
faddx %fp1,%fp1 | ...FP1 IS 2(X-1)
|--LOG(X) = LOG(1+U/2)-LOG(1-U/2) WHICH IS AN ODD POLYNOMIAL
|--IN U, U = 2(X-1)/(X+1) = FP1/FP0
LP1CONT2:
|--THIS IS AN RE-ENTRY POINT FOR LOGNP1
fdivx %fp0,%fp1 | ...FP1 IS U
fmovemx %fp2-%fp2/%fp3,-(%sp) | ...SAVE FP2
|--REGISTERS SAVED ARE NOW FPCR,FP1,FP2,FP3
|--LET V=U*U, W=V*V, CALCULATE
|--U + U*V*(B1 + V*(B2 + V*(B3 + V*(B4 + V*B5)))) BY
|--U + U*V*( [B1 + W*(B3 + W*B5)] + [V*(B2 + W*B4)] )
fmovex %fp1,%fp0
fmulx %fp0,%fp0 | ...FP0 IS V
fmovex %fp1,SAVEU(%a6) | ...STORE U IN MEMORY, FREE FP1
fmovex %fp0,%fp1
fmulx %fp1,%fp1 | ...FP1 IS W
fmoved LOGB5,%fp3
fmoved LOGB4,%fp2
fmulx %fp1,%fp3 | ...W*B5
fmulx %fp1,%fp2 | ...W*B4
faddd LOGB3,%fp3 | ...B3+W*B5
faddd LOGB2,%fp2 | ...B2+W*B4
fmulx %fp3,%fp1 | ...W*(B3+W*B5), FP3 RELEASED
fmulx %fp0,%fp2 | ...V*(B2+W*B4)
faddd LOGB1,%fp1 | ...B1+W*(B3+W*B5)
fmulx SAVEU(%a6),%fp0 | ...FP0 IS U*V
faddx %fp2,%fp1 | ...B1+W*(B3+W*B5) + V*(B2+W*B4), FP2 RELEASED
fmovemx (%sp)+,%fp2-%fp2/%fp3 | ...FP2 RESTORED
fmulx %fp1,%fp0 | ...U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
fmovel %d1,%fpcr
faddx SAVEU(%a6),%fp0
bra t_frcinx
rts
LOGNEG:
|--REGISTERS SAVED FPCR. LOG(-VE) IS INVALID
bra t_operr
.global slognp1d
slognp1d:
|--ENTRY POINT FOR LOG(1+Z) FOR DENORMALIZED INPUT
| Simply return the denorm
bra t_extdnrm
.global slognp1
slognp1:
|--ENTRY POINT FOR LOG(1+X) FOR X FINITE, NON-ZERO, NOT NAN'S
fmovex (%a0),%fp0 | ...LOAD INPUT
fabsx %fp0 |test magnitude
fcmpx LTHOLD,%fp0 |compare with min threshold
fbgt LP1REAL |if greater, continue
fmovel #0,%fpsr |clr N flag from compare
fmovel %d1,%fpcr
fmovex (%a0),%fp0 |return signed argument
bra t_frcinx
LP1REAL:
fmovex (%a0),%fp0 | ...LOAD INPUT
movel #0x00000000,ADJK(%a6)
fmovex %fp0,%fp1 | ...FP1 IS INPUT Z
fadds one,%fp0 | ...X := ROUND(1+Z)
fmovex %fp0,X(%a6)
movew XFRAC(%a6),XDCARE(%a6)
movel X(%a6),%d0
cmpil #0,%d0
ble LP1NEG0 | ...LOG OF ZERO OR -VE
cmp2l BOUNDS2,%d0
bcs LOGMAIN | ...BOUNDS2 IS [1/2,3/2]
|--IF 1+Z > 3/2 OR 1+Z < 1/2, THEN X, WHICH IS ROUNDING 1+Z,
|--CONTAINS AT LEAST 63 BITS OF INFORMATION OF Z. IN THAT CASE,
|--SIMPLY INVOKE LOG(X) FOR LOG(1+Z).
LP1NEAR1:
|--NEXT SEE IF EXP(-1/16) < X < EXP(1/16)
cmp2l BOUNDS1,%d0
bcss LP1CARE
LP1ONE16:
|--EXP(-1/16) < X < EXP(1/16). LOG(1+Z) = LOG(1+U/2) - LOG(1-U/2)
|--WHERE U = 2Z/(2+Z) = 2Z/(1+X).
faddx %fp1,%fp1 | ...FP1 IS 2Z
fadds one,%fp0 | ...FP0 IS 1+X
|--U = FP1/FP0
bra LP1CONT2
LP1CARE:
|--HERE WE USE THE USUAL TABLE DRIVEN APPROACH. CARE HAS TO BE
|--TAKEN BECAUSE 1+Z CAN HAVE 67 BITS OF INFORMATION AND WE MUST
|--PRESERVE ALL THE INFORMATION. BECAUSE 1+Z IS IN [1/2,3/2],
|--THERE ARE ONLY TWO CASES.
|--CASE 1: 1+Z < 1, THEN K = -1 AND Y-F = (2-F) + 2Z
|--CASE 2: 1+Z > 1, THEN K = 0 AND Y-F = (1-F) + Z
|--ON RETURNING TO LP1CONT1, WE MUST HAVE K IN FP1, ADDRESS OF
|--(1/F) IN A0, Y-F IN FP0, AND FP2 SAVED.
movel XFRAC(%a6),FFRAC(%a6)
andil #0xFE000000,FFRAC(%a6)
oril #0x01000000,FFRAC(%a6) | ...F OBTAINED
cmpil #0x3FFF8000,%d0 | ...SEE IF 1+Z > 1
bges KISZERO
KISNEG1:
fmoves TWO,%fp0
movel #0x3fff0000,F(%a6)
clrl F+8(%a6)
fsubx F(%a6),%fp0 | ...2-F
movel FFRAC(%a6),%d0
andil #0x7E000000,%d0
asrl #8,%d0
asrl #8,%d0
asrl #4,%d0 | ...D0 CONTAINS DISPLACEMENT FOR 1/F
faddx %fp1,%fp1 | ...GET 2Z
fmovemx %fp2-%fp2/%fp3,-(%sp) | ...SAVE FP2
faddx %fp1,%fp0 | ...FP0 IS Y-F = (2-F)+2Z
lea LOGTBL,%a0 | ...A0 IS ADDRESS OF 1/F
addal %d0,%a0
fmoves negone,%fp1 | ...FP1 IS K = -1
bra LP1CONT1
KISZERO:
fmoves one,%fp0
movel #0x3fff0000,F(%a6)
clrl F+8(%a6)
fsubx F(%a6),%fp0 | ...1-F
movel FFRAC(%a6),%d0
andil #0x7E000000,%d0
asrl #8,%d0
asrl #8,%d0
asrl #4,%d0
faddx %fp1,%fp0 | ...FP0 IS Y-F
fmovemx %fp2-%fp2/%fp3,-(%sp) | ...FP2 SAVED
lea LOGTBL,%a0
addal %d0,%a0 | ...A0 IS ADDRESS OF 1/F
fmoves zero,%fp1 | ...FP1 IS K = 0
bra LP1CONT1
LP1NEG0:
|--FPCR SAVED. D0 IS X IN COMPACT FORM.
cmpil #0,%d0
blts LP1NEG
LP1ZERO:
fmoves negone,%fp0
fmovel %d1,%fpcr
bra t_dz
LP1NEG:
fmoves zero,%fp0
fmovel %d1,%fpcr
bra t_operr
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 13,349
|
arch/m68k/fpsp040/stan.S
|
|
| stan.sa 3.3 7/29/91
|
| The entry point stan computes the tangent of
| an input argument;
| stand does the same except for denormalized input.
|
| Input: Double-extended number X in location pointed to
| by address register a0.
|
| Output: The value tan(X) returned in floating-point register Fp0.
|
| Accuracy and Monotonicity: The returned result is within 3 ulp in
| 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
| result is subsequently rounded to double precision. The
| result is provably monotonic in double precision.
|
| Speed: The program sTAN takes approximately 170 cycles for
| input argument X such that |X| < 15Pi, which is the usual
| situation.
|
| Algorithm:
|
| 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.
|
| 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let
| k = N mod 2, so in particular, k = 0 or 1.
|
| 3. If k is odd, go to 5.
|
| 4. (k is even) Tan(X) = tan(r) and tan(r) is approximated by a
| rational function U/V where
| U = r + r*s*(P1 + s*(P2 + s*P3)), and
| V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r.
| Exit.
|
| 4. (k is odd) Tan(X) = -cot(r). Since tan(r) is approximated by a
| rational function U/V where
| U = r + r*s*(P1 + s*(P2 + s*P3)), and
| V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r,
| -Cot(r) = -V/U. Exit.
|
| 6. If |X| > 1, go to 8.
|
| 7. (|X|<2**(-40)) Tan(X) = X. Exit.
|
| 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back to 2.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|STAN idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
BOUNDS1: .long 0x3FD78000,0x4004BC7E
TWOBYPI: .long 0x3FE45F30,0x6DC9C883
TANQ4: .long 0x3EA0B759,0xF50F8688
TANP3: .long 0xBEF2BAA5,0xA8924F04
TANQ3: .long 0xBF346F59,0xB39BA65F,0x00000000,0x00000000
TANP2: .long 0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
TANQ2: .long 0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
TANP1: .long 0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
TANQ1: .long 0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
INVTWOPI: .long 0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
TWOPI1: .long 0x40010000,0xC90FDAA2,0x00000000,0x00000000
TWOPI2: .long 0x3FDF0000,0x85A308D4,0x00000000,0x00000000
|--N*PI/2, -32 <= N <= 32, IN A LEADING TERM IN EXT. AND TRAILING
|--TERM IN SGL. NOTE THAT PI IS 64-BIT LONG, THUS N*PI/2 IS AT
|--MOST 69 BITS LONG.
.global PITBL
PITBL:
.long 0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
.long 0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
.long 0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
.long 0xC0040000,0xB6365E22,0xEE46F000,0x21480000
.long 0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
.long 0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
.long 0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
.long 0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
.long 0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
.long 0xC0040000,0x90836524,0x88034B96,0x20B00000
.long 0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
.long 0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
.long 0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
.long 0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
.long 0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
.long 0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
.long 0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
.long 0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
.long 0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
.long 0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
.long 0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
.long 0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
.long 0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
.long 0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
.long 0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
.long 0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
.long 0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
.long 0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
.long 0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
.long 0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
.long 0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
.long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
.long 0x00000000,0x00000000,0x00000000,0x00000000
.long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
.long 0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
.long 0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
.long 0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
.long 0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
.long 0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
.long 0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
.long 0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
.long 0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
.long 0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
.long 0x40030000,0x8A3AE64F,0x76F80584,0x21080000
.long 0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
.long 0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
.long 0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
.long 0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
.long 0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
.long 0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
.long 0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
.long 0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
.long 0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
.long 0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
.long 0x40040000,0x8A3AE64F,0x76F80584,0x21880000
.long 0x40040000,0x90836524,0x88034B96,0xA0B00000
.long 0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
.long 0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
.long 0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
.long 0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
.long 0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
.long 0x40040000,0xB6365E22,0xEE46F000,0xA1480000
.long 0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
.long 0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
.long 0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
.set INARG,FP_SCR4
.set TWOTO63,L_SCR1
.set ENDFLAG,L_SCR2
.set N,L_SCR3
| xref t_frcinx
|xref t_extdnrm
.global stand
stand:
|--TAN(X) = X FOR DENORMALIZED X
bra t_extdnrm
.global stan
stan:
fmovex (%a0),%fp0 | ...LOAD INPUT
movel (%a0),%d0
movew 4(%a0),%d0
andil #0x7FFFFFFF,%d0
cmpil #0x3FD78000,%d0 | ...|X| >= 2**(-40)?
bges TANOK1
bra TANSM
TANOK1:
cmpil #0x4004BC7E,%d0 | ...|X| < 15 PI?
blts TANMAIN
bra REDUCEX
TANMAIN:
|--THIS IS THE USUAL CASE, |X| <= 15 PI.
|--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
fmovex %fp0,%fp1
fmuld TWOBYPI,%fp1 | ...X*2/PI
|--HIDE THE NEXT TWO INSTRUCTIONS
leal PITBL+0x200,%a1 | ...TABLE OF N*PI/2, N = -32,...,32
|--FP1 IS NOW READY
fmovel %fp1,%d0 | ...CONVERT TO INTEGER
asll #4,%d0
addal %d0,%a1 | ...ADDRESS N*PIBY2 IN Y1, Y2
fsubx (%a1)+,%fp0 | ...X-Y1
|--HIDE THE NEXT ONE
fsubs (%a1),%fp0 | ...FP0 IS R = (X-Y1)-Y2
rorl #5,%d0
andil #0x80000000,%d0 | ...D0 WAS ODD IFF D0 < 0
TANCONT:
cmpil #0,%d0
blt NODD
fmovex %fp0,%fp1
fmulx %fp1,%fp1 | ...S = R*R
fmoved TANQ4,%fp3
fmoved TANP3,%fp2
fmulx %fp1,%fp3 | ...SQ4
fmulx %fp1,%fp2 | ...SP3
faddd TANQ3,%fp3 | ...Q3+SQ4
faddx TANP2,%fp2 | ...P2+SP3
fmulx %fp1,%fp3 | ...S(Q3+SQ4)
fmulx %fp1,%fp2 | ...S(P2+SP3)
faddx TANQ2,%fp3 | ...Q2+S(Q3+SQ4)
faddx TANP1,%fp2 | ...P1+S(P2+SP3)
fmulx %fp1,%fp3 | ...S(Q2+S(Q3+SQ4))
fmulx %fp1,%fp2 | ...S(P1+S(P2+SP3))
faddx TANQ1,%fp3 | ...Q1+S(Q2+S(Q3+SQ4))
fmulx %fp0,%fp2 | ...RS(P1+S(P2+SP3))
fmulx %fp3,%fp1 | ...S(Q1+S(Q2+S(Q3+SQ4)))
faddx %fp2,%fp0 | ...R+RS(P1+S(P2+SP3))
fadds #0x3F800000,%fp1 | ...1+S(Q1+...)
fmovel %d1,%fpcr |restore users exceptions
fdivx %fp1,%fp0 |last inst - possible exception set
bra t_frcinx
NODD:
fmovex %fp0,%fp1
fmulx %fp0,%fp0 | ...S = R*R
fmoved TANQ4,%fp3
fmoved TANP3,%fp2
fmulx %fp0,%fp3 | ...SQ4
fmulx %fp0,%fp2 | ...SP3
faddd TANQ3,%fp3 | ...Q3+SQ4
faddx TANP2,%fp2 | ...P2+SP3
fmulx %fp0,%fp3 | ...S(Q3+SQ4)
fmulx %fp0,%fp2 | ...S(P2+SP3)
faddx TANQ2,%fp3 | ...Q2+S(Q3+SQ4)
faddx TANP1,%fp2 | ...P1+S(P2+SP3)
fmulx %fp0,%fp3 | ...S(Q2+S(Q3+SQ4))
fmulx %fp0,%fp2 | ...S(P1+S(P2+SP3))
faddx TANQ1,%fp3 | ...Q1+S(Q2+S(Q3+SQ4))
fmulx %fp1,%fp2 | ...RS(P1+S(P2+SP3))
fmulx %fp3,%fp0 | ...S(Q1+S(Q2+S(Q3+SQ4)))
faddx %fp2,%fp1 | ...R+RS(P1+S(P2+SP3))
fadds #0x3F800000,%fp0 | ...1+S(Q1+...)
fmovex %fp1,-(%sp)
eoril #0x80000000,(%sp)
fmovel %d1,%fpcr |restore users exceptions
fdivx (%sp)+,%fp0 |last inst - possible exception set
bra t_frcinx
TANBORS:
|--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
|--IF |X| < 2**(-40), RETURN X OR 1.
cmpil #0x3FFF8000,%d0
bgts REDUCEX
TANSM:
fmovex %fp0,-(%sp)
fmovel %d1,%fpcr |restore users exceptions
fmovex (%sp)+,%fp0 |last inst - possible exception set
bra t_frcinx
REDUCEX:
|--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
|--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
|--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
fmovemx %fp2-%fp5,-(%a7) | ...save FP2 through FP5
movel %d2,-(%a7)
fmoves #0x00000000,%fp1
|--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
|--there is a danger of unwanted overflow in first LOOP iteration. In this
|--case, reduce argument by one remainder step to make subsequent reduction
|--safe.
cmpil #0x7ffeffff,%d0 |is argument dangerously large?
bnes LOOP
movel #0x7ffe0000,FP_SCR2(%a6) |yes
| ;create 2**16383*PI/2
movel #0xc90fdaa2,FP_SCR2+4(%a6)
clrl FP_SCR2+8(%a6)
ftstx %fp0 |test sign of argument
movel #0x7fdc0000,FP_SCR3(%a6) |create low half of 2**16383*
| ;PI/2 at FP_SCR3
movel #0x85a308d3,FP_SCR3+4(%a6)
clrl FP_SCR3+8(%a6)
fblt red_neg
orw #0x8000,FP_SCR2(%a6) |positive arg
orw #0x8000,FP_SCR3(%a6)
red_neg:
faddx FP_SCR2(%a6),%fp0 |high part of reduction is exact
fmovex %fp0,%fp1 |save high result in fp1
faddx FP_SCR3(%a6),%fp0 |low part of reduction
fsubx %fp0,%fp1 |determine low component of result
faddx FP_SCR3(%a6),%fp1 |fp0/fp1 are reduced argument.
|--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
|--integer quotient will be stored in N
|--Intermediate remainder is 66-bit long; (R,r) in (FP0,FP1)
LOOP:
fmovex %fp0,INARG(%a6) | ...+-2**K * F, 1 <= F < 2
movew INARG(%a6),%d0
movel %d0,%a1 | ...save a copy of D0
andil #0x00007FFF,%d0
subil #0x00003FFF,%d0 | ...D0 IS K
cmpil #28,%d0
bles LASTLOOP
CONTLOOP:
subil #27,%d0 | ...D0 IS L := K-27
movel #0,ENDFLAG(%a6)
bras WORK
LASTLOOP:
clrl %d0 | ...D0 IS L := 0
movel #1,ENDFLAG(%a6)
WORK:
|--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
|--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
|--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
|--2**L * (PIby2_1), 2**L * (PIby2_2)
movel #0x00003FFE,%d2 | ...BIASED EXPO OF 2/PI
subl %d0,%d2 | ...BIASED EXPO OF 2**(-L)*(2/PI)
movel #0xA2F9836E,FP_SCR1+4(%a6)
movel #0x4E44152A,FP_SCR1+8(%a6)
movew %d2,FP_SCR1(%a6) | ...FP_SCR1 is 2**(-L)*(2/PI)
fmovex %fp0,%fp2
fmulx FP_SCR1(%a6),%fp2
|--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
|--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
|--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
|--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
|--US THE DESIRED VALUE IN FLOATING POINT.
|--HIDE SIX CYCLES OF INSTRUCTION
movel %a1,%d2
swap %d2
andil #0x80000000,%d2
oril #0x5F000000,%d2 | ...D2 IS SIGN(INARG)*2**63 IN SGL
movel %d2,TWOTO63(%a6)
movel %d0,%d2
addil #0x00003FFF,%d2 | ...BIASED EXPO OF 2**L * (PI/2)
|--FP2 IS READY
fadds TWOTO63(%a6),%fp2 | ...THE FRACTIONAL PART OF FP1 IS ROUNDED
|--HIDE 4 CYCLES OF INSTRUCTION; creating 2**(L)*Piby2_1 and 2**(L)*Piby2_2
movew %d2,FP_SCR2(%a6)
clrw FP_SCR2+2(%a6)
movel #0xC90FDAA2,FP_SCR2+4(%a6)
clrl FP_SCR2+8(%a6) | ...FP_SCR2 is 2**(L) * Piby2_1
|--FP2 IS READY
fsubs TWOTO63(%a6),%fp2 | ...FP2 is N
addil #0x00003FDD,%d0
movew %d0,FP_SCR3(%a6)
clrw FP_SCR3+2(%a6)
movel #0x85A308D3,FP_SCR3+4(%a6)
clrl FP_SCR3+8(%a6) | ...FP_SCR3 is 2**(L) * Piby2_2
movel ENDFLAG(%a6),%d0
|--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
|--P2 = 2**(L) * Piby2_2
fmovex %fp2,%fp4
fmulx FP_SCR2(%a6),%fp4 | ...W = N*P1
fmovex %fp2,%fp5
fmulx FP_SCR3(%a6),%fp5 | ...w = N*P2
fmovex %fp4,%fp3
|--we want P+p = W+w but |p| <= half ulp of P
|--Then, we need to compute A := R-P and a := r-p
faddx %fp5,%fp3 | ...FP3 is P
fsubx %fp3,%fp4 | ...W-P
fsubx %fp3,%fp0 | ...FP0 is A := R - P
faddx %fp5,%fp4 | ...FP4 is p = (W-P)+w
fmovex %fp0,%fp3 | ...FP3 A
fsubx %fp4,%fp1 | ...FP1 is a := r - p
|--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
|--|r| <= half ulp of R.
faddx %fp1,%fp0 | ...FP0 is R := A+a
|--No need to calculate r if this is the last loop
cmpil #0,%d0
bgt RESTORE
|--Need to calculate r
fsubx %fp0,%fp3 | ...A-R
faddx %fp3,%fp1 | ...FP1 is r := (A-R)+a
bra LOOP
RESTORE:
fmovel %fp2,N(%a6)
movel (%a7)+,%d2
fmovemx (%a7)+,%fp2-%fp5
movel N(%a6),%d0
rorl #1,%d0
bra TANCONT
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 15,728
|
arch/m68k/fpsp040/decbin.S
|
|
| decbin.sa 3.3 12/19/90
|
| Description: Converts normalized packed bcd value pointed to by
| register A6 to extended-precision value in FP0.
|
| Input: Normalized packed bcd value in ETEMP(a6).
|
| Output: Exact floating-point representation of the packed bcd value.
|
| Saves and Modifies: D2-D5
|
| Speed: The program decbin takes ??? cycles to execute.
|
| Object Size:
|
| External Reference(s): None.
|
| Algorithm:
| Expected is a normal bcd (i.e. non-exceptional; all inf, zero,
| and NaN operands are dispatched without entering this routine)
| value in 68881/882 format at location ETEMP(A6).
|
| A1. Convert the bcd exponent to binary by successive adds and muls.
| Set the sign according to SE. Subtract 16 to compensate
| for the mantissa which is to be interpreted as 17 integer
| digits, rather than 1 integer and 16 fraction digits.
| Note: this operation can never overflow.
|
| A2. Convert the bcd mantissa to binary by successive
| adds and muls in FP0. Set the sign according to SM.
| The mantissa digits will be converted with the decimal point
| assumed following the least-significant digit.
| Note: this operation can never overflow.
|
| A3. Count the number of leading/trailing zeros in the
| bcd string. If SE is positive, count the leading zeros;
| if negative, count the trailing zeros. Set the adjusted
| exponent equal to the exponent from A1 and the zero count
| added if SM = 1 and subtracted if SM = 0. Scale the
| mantissa the equivalent of forcing in the bcd value:
|
| SM = 0 a non-zero digit in the integer position
| SM = 1 a non-zero digit in Mant0, lsd of the fraction
|
| this will insure that any value, regardless of its
| representation (ex. 0.1E2, 1E1, 10E0, 100E-1), is converted
| consistently.
|
| A4. Calculate the factor 10^exp in FP1 using a table of
| 10^(2^n) values. To reduce the error in forming factors
| greater than 10^27, a directed rounding scheme is used with
| tables rounded to RN, RM, and RP, according to the table
| in the comments of the pwrten section.
|
| A5. Form the final binary number by scaling the mantissa by
| the exponent factor. This is done by multiplying the
| mantissa in FP0 by the factor in FP1 if the adjusted
| exponent sign is positive, and dividing FP0 by FP1 if
| it is negative.
|
| Clean up and return. Check if the final mul or div resulted
| in an inex2 exception. If so, set inex1 in the fpsr and
| check if the inex1 exception is enabled. If so, set d7 upper
| word to $0100. This will signal unimp.sa that an enabled inex1
| exception occurred. Unimp will fix the stack.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|DECBIN idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|
| PTENRN, PTENRM, and PTENRP are arrays of powers of 10 rounded
| to nearest, minus, and plus, respectively. The tables include
| 10**{1,2,4,8,16,32,64,128,256,512,1024,2048,4096}. No rounding
| is required until the power is greater than 27, however, all
| tables include the first 5 for ease of indexing.
|
|xref PTENRN
|xref PTENRM
|xref PTENRP
RTABLE: .byte 0,0,0,0
.byte 2,3,2,3
.byte 2,3,3,2
.byte 3,2,2,3
.global decbin
.global calc_e
.global pwrten
.global calc_m
.global norm
.global ap_st_z
.global ap_st_n
|
.set FNIBS,7
.set FSTRT,0
|
.set ESTRT,4
.set EDIGITS,2 |
|
| Constants in single precision
FZERO: .long 0x00000000
FONE: .long 0x3F800000
FTEN: .long 0x41200000
.set TEN,10
|
decbin:
| fmovel #0,FPCR ;clr real fpcr
moveml %d2-%d5,-(%a7)
|
| Calculate exponent:
| 1. Copy bcd value in memory for use as a working copy.
| 2. Calculate absolute value of exponent in d1 by mul and add.
| 3. Correct for exponent sign.
| 4. Subtract 16 to compensate for interpreting the mant as all integer digits.
| (i.e., all digits assumed left of the decimal point.)
|
| Register usage:
|
| calc_e:
| (*) d0: temp digit storage
| (*) d1: accumulator for binary exponent
| (*) d2: digit count
| (*) d3: offset pointer
| ( ) d4: first word of bcd
| ( ) a0: pointer to working bcd value
| ( ) a6: pointer to original bcd value
| (*) FP_SCR1: working copy of original bcd value
| (*) L_SCR1: copy of original exponent word
|
calc_e:
movel #EDIGITS,%d2 |# of nibbles (digits) in fraction part
moveql #ESTRT,%d3 |counter to pick up digits
leal FP_SCR1(%a6),%a0 |load tmp bcd storage address
movel ETEMP(%a6),(%a0) |save input bcd value
movel ETEMP_HI(%a6),4(%a0) |save words 2 and 3
movel ETEMP_LO(%a6),8(%a0) |and work with these
movel (%a0),%d4 |get first word of bcd
clrl %d1 |zero d1 for accumulator
e_gd:
mulul #TEN,%d1 |mul partial product by one digit place
bfextu %d4{%d3:#4},%d0 |get the digit and zero extend into d0
addl %d0,%d1 |d1 = d1 + d0
addqb #4,%d3 |advance d3 to the next digit
dbf %d2,e_gd |if we have used all 3 digits, exit loop
btst #30,%d4 |get SE
beqs e_pos |don't negate if pos
negl %d1 |negate before subtracting
e_pos:
subl #16,%d1 |sub to compensate for shift of mant
bges e_save |if still pos, do not neg
negl %d1 |now negative, make pos and set SE
orl #0x40000000,%d4 |set SE in d4,
orl #0x40000000,(%a0) |and in working bcd
e_save:
movel %d1,L_SCR1(%a6) |save exp in memory
|
|
| Calculate mantissa:
| 1. Calculate absolute value of mantissa in fp0 by mul and add.
| 2. Correct for mantissa sign.
| (i.e., all digits assumed left of the decimal point.)
|
| Register usage:
|
| calc_m:
| (*) d0: temp digit storage
| (*) d1: lword counter
| (*) d2: digit count
| (*) d3: offset pointer
| ( ) d4: words 2 and 3 of bcd
| ( ) a0: pointer to working bcd value
| ( ) a6: pointer to original bcd value
| (*) fp0: mantissa accumulator
| ( ) FP_SCR1: working copy of original bcd value
| ( ) L_SCR1: copy of original exponent word
|
calc_m:
moveql #1,%d1 |word counter, init to 1
fmoves FZERO,%fp0 |accumulator
|
|
| Since the packed number has a long word between the first & second parts,
| get the integer digit then skip down & get the rest of the
| mantissa. We will unroll the loop once.
|
bfextu (%a0){#28:#4},%d0 |integer part is ls digit in long word
faddb %d0,%fp0 |add digit to sum in fp0
|
|
| Get the rest of the mantissa.
|
loadlw:
movel (%a0,%d1.L*4),%d4 |load mantissa longword into d4
moveql #FSTRT,%d3 |counter to pick up digits
moveql #FNIBS,%d2 |reset number of digits per a0 ptr
md2b:
fmuls FTEN,%fp0 |fp0 = fp0 * 10
bfextu %d4{%d3:#4},%d0 |get the digit and zero extend
faddb %d0,%fp0 |fp0 = fp0 + digit
|
|
| If all the digits (8) in that long word have been converted (d2=0),
| then inc d1 (=2) to point to the next long word and reset d3 to 0
| to initialize the digit offset, and set d2 to 7 for the digit count;
| else continue with this long word.
|
addqb #4,%d3 |advance d3 to the next digit
dbf %d2,md2b |check for last digit in this lw
nextlw:
addql #1,%d1 |inc lw pointer in mantissa
cmpl #2,%d1 |test for last lw
ble loadlw |if not, get last one
|
| Check the sign of the mant and make the value in fp0 the same sign.
|
m_sign:
btst #31,(%a0) |test sign of the mantissa
beq ap_st_z |if clear, go to append/strip zeros
fnegx %fp0 |if set, negate fp0
|
| Append/strip zeros:
|
| For adjusted exponents which have an absolute value greater than 27*,
| this routine calculates the amount needed to normalize the mantissa
| for the adjusted exponent. That number is subtracted from the exp
| if the exp was positive, and added if it was negative. The purpose
| of this is to reduce the value of the exponent and the possibility
| of error in calculation of pwrten.
|
| 1. Branch on the sign of the adjusted exponent.
| 2p.(positive exp)
| 2. Check M16 and the digits in lwords 2 and 3 in descending order.
| 3. Add one for each zero encountered until a non-zero digit.
| 4. Subtract the count from the exp.
| 5. Check if the exp has crossed zero in #3 above; make the exp abs
| and set SE.
| 6. Multiply the mantissa by 10**count.
| 2n.(negative exp)
| 2. Check the digits in lwords 3 and 2 in descending order.
| 3. Add one for each zero encountered until a non-zero digit.
| 4. Add the count to the exp.
| 5. Check if the exp has crossed zero in #3 above; clear SE.
| 6. Divide the mantissa by 10**count.
|
| *Why 27? If the adjusted exponent is within -28 < expA < 28, than
| any adjustment due to append/strip zeros will drive the resultant
| exponent towards zero. Since all pwrten constants with a power
| of 27 or less are exact, there is no need to use this routine to
| attempt to lessen the resultant exponent.
|
| Register usage:
|
| ap_st_z:
| (*) d0: temp digit storage
| (*) d1: zero count
| (*) d2: digit count
| (*) d3: offset pointer
| ( ) d4: first word of bcd
| (*) d5: lword counter
| ( ) a0: pointer to working bcd value
| ( ) FP_SCR1: working copy of original bcd value
| ( ) L_SCR1: copy of original exponent word
|
|
| First check the absolute value of the exponent to see if this
| routine is necessary. If so, then check the sign of the exponent
| and do append (+) or strip (-) zeros accordingly.
| This section handles a positive adjusted exponent.
|
ap_st_z:
movel L_SCR1(%a6),%d1 |load expA for range test
cmpl #27,%d1 |test is with 27
ble pwrten |if abs(expA) <28, skip ap/st zeros
btst #30,(%a0) |check sign of exp
bne ap_st_n |if neg, go to neg side
clrl %d1 |zero count reg
movel (%a0),%d4 |load lword 1 to d4
bfextu %d4{#28:#4},%d0 |get M16 in d0
bnes ap_p_fx |if M16 is non-zero, go fix exp
addql #1,%d1 |inc zero count
moveql #1,%d5 |init lword counter
movel (%a0,%d5.L*4),%d4 |get lword 2 to d4
bnes ap_p_cl |if lw 2 is zero, skip it
addql #8,%d1 |and inc count by 8
addql #1,%d5 |inc lword counter
movel (%a0,%d5.L*4),%d4 |get lword 3 to d4
ap_p_cl:
clrl %d3 |init offset reg
moveql #7,%d2 |init digit counter
ap_p_gd:
bfextu %d4{%d3:#4},%d0 |get digit
bnes ap_p_fx |if non-zero, go to fix exp
addql #4,%d3 |point to next digit
addql #1,%d1 |inc digit counter
dbf %d2,ap_p_gd |get next digit
ap_p_fx:
movel %d1,%d0 |copy counter to d2
movel L_SCR1(%a6),%d1 |get adjusted exp from memory
subl %d0,%d1 |subtract count from exp
bges ap_p_fm |if still pos, go to pwrten
negl %d1 |now its neg; get abs
movel (%a0),%d4 |load lword 1 to d4
orl #0x40000000,%d4 | and set SE in d4
orl #0x40000000,(%a0) | and in memory
|
| Calculate the mantissa multiplier to compensate for the striping of
| zeros from the mantissa.
|
ap_p_fm:
movel #PTENRN,%a1 |get address of power-of-ten table
clrl %d3 |init table index
fmoves FONE,%fp1 |init fp1 to 1
moveql #3,%d2 |init d2 to count bits in counter
ap_p_el:
asrl #1,%d0 |shift lsb into carry
bccs ap_p_en |if 1, mul fp1 by pwrten factor
fmulx (%a1,%d3),%fp1 |mul by 10**(d3_bit_no)
ap_p_en:
addl #12,%d3 |inc d3 to next rtable entry
tstl %d0 |check if d0 is zero
bnes ap_p_el |if not, get next bit
fmulx %fp1,%fp0 |mul mantissa by 10**(no_bits_shifted)
bra pwrten |go calc pwrten
|
| This section handles a negative adjusted exponent.
|
ap_st_n:
clrl %d1 |clr counter
moveql #2,%d5 |set up d5 to point to lword 3
movel (%a0,%d5.L*4),%d4 |get lword 3
bnes ap_n_cl |if not zero, check digits
subl #1,%d5 |dec d5 to point to lword 2
addql #8,%d1 |inc counter by 8
movel (%a0,%d5.L*4),%d4 |get lword 2
ap_n_cl:
movel #28,%d3 |point to last digit
moveql #7,%d2 |init digit counter
ap_n_gd:
bfextu %d4{%d3:#4},%d0 |get digit
bnes ap_n_fx |if non-zero, go to exp fix
subql #4,%d3 |point to previous digit
addql #1,%d1 |inc digit counter
dbf %d2,ap_n_gd |get next digit
ap_n_fx:
movel %d1,%d0 |copy counter to d0
movel L_SCR1(%a6),%d1 |get adjusted exp from memory
subl %d0,%d1 |subtract count from exp
bgts ap_n_fm |if still pos, go fix mantissa
negl %d1 |take abs of exp and clr SE
movel (%a0),%d4 |load lword 1 to d4
andl #0xbfffffff,%d4 | and clr SE in d4
andl #0xbfffffff,(%a0) | and in memory
|
| Calculate the mantissa multiplier to compensate for the appending of
| zeros to the mantissa.
|
ap_n_fm:
movel #PTENRN,%a1 |get address of power-of-ten table
clrl %d3 |init table index
fmoves FONE,%fp1 |init fp1 to 1
moveql #3,%d2 |init d2 to count bits in counter
ap_n_el:
asrl #1,%d0 |shift lsb into carry
bccs ap_n_en |if 1, mul fp1 by pwrten factor
fmulx (%a1,%d3),%fp1 |mul by 10**(d3_bit_no)
ap_n_en:
addl #12,%d3 |inc d3 to next rtable entry
tstl %d0 |check if d0 is zero
bnes ap_n_el |if not, get next bit
fdivx %fp1,%fp0 |div mantissa by 10**(no_bits_shifted)
|
|
| Calculate power-of-ten factor from adjusted and shifted exponent.
|
| Register usage:
|
| pwrten:
| (*) d0: temp
| ( ) d1: exponent
| (*) d2: {FPCR[6:5],SM,SE} as index in RTABLE; temp
| (*) d3: FPCR work copy
| ( ) d4: first word of bcd
| (*) a1: RTABLE pointer
| calc_p:
| (*) d0: temp
| ( ) d1: exponent
| (*) d3: PWRTxx table index
| ( ) a0: pointer to working copy of bcd
| (*) a1: PWRTxx pointer
| (*) fp1: power-of-ten accumulator
|
| Pwrten calculates the exponent factor in the selected rounding mode
| according to the following table:
|
| Sign of Mant Sign of Exp Rounding Mode PWRTEN Rounding Mode
|
| ANY ANY RN RN
|
| + + RP RP
| - + RP RM
| + - RP RM
| - - RP RP
|
| + + RM RM
| - + RM RP
| + - RM RP
| - - RM RM
|
| + + RZ RM
| - + RZ RM
| + - RZ RP
| - - RZ RP
|
|
pwrten:
movel USER_FPCR(%a6),%d3 |get user's FPCR
bfextu %d3{#26:#2},%d2 |isolate rounding mode bits
movel (%a0),%d4 |reload 1st bcd word to d4
asll #2,%d2 |format d2 to be
bfextu %d4{#0:#2},%d0 | {FPCR[6],FPCR[5],SM,SE}
addl %d0,%d2 |in d2 as index into RTABLE
leal RTABLE,%a1 |load rtable base
moveb (%a1,%d2),%d0 |load new rounding bits from table
clrl %d3 |clear d3 to force no exc and extended
bfins %d0,%d3{#26:#2} |stuff new rounding bits in FPCR
fmovel %d3,%FPCR |write new FPCR
asrl #1,%d0 |write correct PTENxx table
bccs not_rp |to a1
leal PTENRP,%a1 |it is RP
bras calc_p |go to init section
not_rp:
asrl #1,%d0 |keep checking
bccs not_rm
leal PTENRM,%a1 |it is RM
bras calc_p |go to init section
not_rm:
leal PTENRN,%a1 |it is RN
calc_p:
movel %d1,%d0 |copy exp to d0;use d0
bpls no_neg |if exp is negative,
negl %d0 |invert it
orl #0x40000000,(%a0) |and set SE bit
no_neg:
clrl %d3 |table index
fmoves FONE,%fp1 |init fp1 to 1
e_loop:
asrl #1,%d0 |shift next bit into carry
bccs e_next |if zero, skip the mul
fmulx (%a1,%d3),%fp1 |mul by 10**(d3_bit_no)
e_next:
addl #12,%d3 |inc d3 to next rtable entry
tstl %d0 |check if d0 is zero
bnes e_loop |not zero, continue shifting
|
|
| Check the sign of the adjusted exp and make the value in fp0 the
| same sign. If the exp was pos then multiply fp1*fp0;
| else divide fp0/fp1.
|
| Register Usage:
| norm:
| ( ) a0: pointer to working bcd value
| (*) fp0: mantissa accumulator
| ( ) fp1: scaling factor - 10**(abs(exp))
|
norm:
btst #30,(%a0) |test the sign of the exponent
beqs mul |if clear, go to multiply
div:
fdivx %fp1,%fp0 |exp is negative, so divide mant by exp
bras end_dec
mul:
fmulx %fp1,%fp0 |exp is positive, so multiply by exp
|
|
| Clean up and return with result in fp0.
|
| If the final mul/div in decbin incurred an inex exception,
| it will be inex2, but will be reported as inex1 by get_op.
|
end_dec:
fmovel %FPSR,%d0 |get status register
bclrl #inex2_bit+8,%d0 |test for inex2 and clear it
fmovel %d0,%FPSR |return status reg w/o inex2
beqs no_exc |skip this if no exc
orl #inx1a_mask,USER_FPSR(%a6) |set inex1/ainex
no_exc:
moveml (%a7)+,%d2-%d5
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,974
|
arch/m68k/fpsp040/sto_res.S
|
|
| sto_res.sa 3.1 12/10/90
|
| Takes the result and puts it in where the user expects it.
| Library functions return result in fp0. If fp0 is not the
| users destination register then fp0 is moved to the
| correct floating-point destination register. fp0 and fp1
| are then restored to the original contents.
|
| Input: result in fp0,fp1
|
| d2 & a0 should be kept unmodified
|
| Output: moves the result to the true destination reg or mem
|
| Modifies: destination floating point register
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
STO_RES: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
.global sto_cos
sto_cos:
bfextu CMDREG1B(%a6){#13:#3},%d0 |extract cos destination
cmpib #3,%d0 |check for fp0/fp1 cases
bles c_fp0123
fmovemx %fp1-%fp1,-(%a7)
moveql #7,%d1
subl %d0,%d1 |d1 = 7- (dest. reg. no.)
clrl %d0
bsetl %d1,%d0 |d0 is dynamic register mask
fmovemx (%a7)+,%d0
rts
c_fp0123:
cmpib #0,%d0
beqs c_is_fp0
cmpib #1,%d0
beqs c_is_fp1
cmpib #2,%d0
beqs c_is_fp2
c_is_fp3:
fmovemx %fp1-%fp1,USER_FP3(%a6)
rts
c_is_fp2:
fmovemx %fp1-%fp1,USER_FP2(%a6)
rts
c_is_fp1:
fmovemx %fp1-%fp1,USER_FP1(%a6)
rts
c_is_fp0:
fmovemx %fp1-%fp1,USER_FP0(%a6)
rts
.global sto_res
sto_res:
bfextu CMDREG1B(%a6){#6:#3},%d0 |extract destination register
cmpib #3,%d0 |check for fp0/fp1 cases
bles fp0123
fmovemx %fp0-%fp0,-(%a7)
moveql #7,%d1
subl %d0,%d1 |d1 = 7- (dest. reg. no.)
clrl %d0
bsetl %d1,%d0 |d0 is dynamic register mask
fmovemx (%a7)+,%d0
rts
fp0123:
cmpib #0,%d0
beqs is_fp0
cmpib #1,%d0
beqs is_fp1
cmpib #2,%d0
beqs is_fp2
is_fp3:
fmovemx %fp0-%fp0,USER_FP3(%a6)
rts
is_fp2:
fmovemx %fp0-%fp0,USER_FP2(%a6)
rts
is_fp1:
fmovemx %fp0-%fp0,USER_FP1(%a6)
rts
is_fp0:
fmovemx %fp0-%fp0,USER_FP0(%a6)
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 13,176
|
arch/m68k/fpsp040/gen_except.S
|
|
| gen_except.sa 3.7 1/16/92
|
| gen_except --- FPSP routine to detect reportable exceptions
|
| This routine compares the exception enable byte of the
| user_fpcr on the stack with the exception status byte
| of the user_fpsr.
|
| Any routine which may report an exceptions must load
| the stack frame in memory with the exceptional operand(s).
|
| Priority for exceptions is:
|
| Highest: bsun
| snan
| operr
| ovfl
| unfl
| dz
| inex2
| Lowest: inex1
|
| Note: The IEEE standard specifies that inex2 is to be
| reported if ovfl occurs and the ovfl enable bit is not
| set but the inex2 enable bit is.
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
GEN_EXCEPT: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref real_trace
|xref fpsp_done
|xref fpsp_fmt_error
exc_tbl:
.long bsun_exc
.long commonE1
.long commonE1
.long ovfl_unfl
.long ovfl_unfl
.long commonE1
.long commonE3
.long commonE3
.long no_match
.global gen_except
gen_except:
cmpib #IDLE_SIZE-4,1(%a7) |test for idle frame
beq do_check |go handle idle frame
cmpib #UNIMP_40_SIZE-4,1(%a7) |test for orig unimp frame
beqs unimp_x |go handle unimp frame
cmpib #UNIMP_41_SIZE-4,1(%a7) |test for rev unimp frame
beqs unimp_x |go handle unimp frame
cmpib #BUSY_SIZE-4,1(%a7) |if size <> $60, fmt error
bnel fpsp_fmt_error
leal BUSY_SIZE+LOCAL_SIZE(%a7),%a1 |init a1 so fpsp.h
| ;equates will work
| Fix up the new busy frame with entries from the unimp frame
|
movel ETEMP_EX(%a6),ETEMP_EX(%a1) |copy etemp from unimp
movel ETEMP_HI(%a6),ETEMP_HI(%a1) |frame to busy frame
movel ETEMP_LO(%a6),ETEMP_LO(%a1)
movel CMDREG1B(%a6),CMDREG1B(%a1) |set inst in frame to unimp
movel CMDREG1B(%a6),%d0 |fix cmd1b to make it
andl #0x03c30000,%d0 |work for cmd3b
bfextu CMDREG1B(%a6){#13:#1},%d1 |extract bit 2
lsll #5,%d1
swap %d1
orl %d1,%d0 |put it in the right place
bfextu CMDREG1B(%a6){#10:#3},%d1 |extract bit 3,4,5
lsll #2,%d1
swap %d1
orl %d1,%d0 |put them in the right place
movel %d0,CMDREG3B(%a1) |in the busy frame
|
| Or in the FPSR from the emulation with the USER_FPSR on the stack.
|
fmovel %FPSR,%d0
orl %d0,USER_FPSR(%a6)
movel USER_FPSR(%a6),FPSR_SHADOW(%a1) |set exc bits
orl #sx_mask,E_BYTE(%a1)
bra do_clean
|
| Frame is an unimp frame possible resulting from an fmove <ea>,fp0
| that caused an exception
|
| a1 is modified to point into the new frame allowing fpsp equates
| to be valid.
|
unimp_x:
cmpib #UNIMP_40_SIZE-4,1(%a7) |test for orig unimp frame
bnes test_rev
leal UNIMP_40_SIZE+LOCAL_SIZE(%a7),%a1
bras unimp_con
test_rev:
cmpib #UNIMP_41_SIZE-4,1(%a7) |test for rev unimp frame
bnel fpsp_fmt_error |if not $28 or $30
leal UNIMP_41_SIZE+LOCAL_SIZE(%a7),%a1
unimp_con:
|
| Fix up the new unimp frame with entries from the old unimp frame
|
movel CMDREG1B(%a6),CMDREG1B(%a1) |set inst in frame to unimp
|
| Or in the FPSR from the emulation with the USER_FPSR on the stack.
|
fmovel %FPSR,%d0
orl %d0,USER_FPSR(%a6)
bra do_clean
|
| Frame is idle, so check for exceptions reported through
| USER_FPSR and set the unimp frame accordingly.
| A7 must be incremented to the point before the
| idle fsave vector to the unimp vector.
|
do_check:
addl #4,%a7 |point A7 back to unimp frame
|
| Or in the FPSR from the emulation with the USER_FPSR on the stack.
|
fmovel %FPSR,%d0
orl %d0,USER_FPSR(%a6)
|
| On a busy frame, we must clear the nmnexc bits.
|
cmpib #BUSY_SIZE-4,1(%a7) |check frame type
bnes check_fr |if busy, clr nmnexc
clrw NMNEXC(%a6) |clr nmnexc & nmcexc
btstb #5,CMDREG1B(%a6) |test for fmove out
bnes frame_com
movel USER_FPSR(%a6),FPSR_SHADOW(%a6) |set exc bits
orl #sx_mask,E_BYTE(%a6)
bras frame_com
check_fr:
cmpb #UNIMP_40_SIZE-4,1(%a7)
beqs frame_com
clrw NMNEXC(%a6)
frame_com:
moveb FPCR_ENABLE(%a6),%d0 |get fpcr enable byte
andb FPSR_EXCEPT(%a6),%d0 |and in the fpsr exc byte
bfffo %d0{#24:#8},%d1 |test for first set bit
leal exc_tbl,%a0 |load jmp table address
subib #24,%d1 |normalize bit offset to 0-8
movel (%a0,%d1.w*4),%a0 |load routine address based
| ;based on first enabled exc
jmp (%a0) |jump to routine
|
| Bsun is not possible in unimp or unsupp
|
bsun_exc:
bra do_clean
|
| The typical work to be done to the unimp frame to report an
| exception is to set the E1/E3 byte and clr the U flag.
| commonE1 does this for E1 exceptions, which are snan,
| operr, and dz. commonE3 does this for E3 exceptions, which
| are inex2 and inex1, and also clears the E1 exception bit
| left over from the unimp exception.
|
commonE1:
bsetb #E1,E_BYTE(%a6) |set E1 flag
bra commonE |go clean and exit
commonE3:
tstb UFLG_TMP(%a6) |test flag for unsup/unimp state
bnes unsE3
uniE3:
bsetb #E3,E_BYTE(%a6) |set E3 flag
bclrb #E1,E_BYTE(%a6) |clr E1 from unimp
bra commonE
unsE3:
tstb RES_FLG(%a6)
bnes unsE3_0
unsE3_1:
bsetb #E3,E_BYTE(%a6) |set E3 flag
unsE3_0:
bclrb #E1,E_BYTE(%a6) |clr E1 flag
movel CMDREG1B(%a6),%d0
andl #0x03c30000,%d0 |work for cmd3b
bfextu CMDREG1B(%a6){#13:#1},%d1 |extract bit 2
lsll #5,%d1
swap %d1
orl %d1,%d0 |put it in the right place
bfextu CMDREG1B(%a6){#10:#3},%d1 |extract bit 3,4,5
lsll #2,%d1
swap %d1
orl %d1,%d0 |put them in the right place
movel %d0,CMDREG3B(%a6) |in the busy frame
commonE:
bclrb #UFLAG,T_BYTE(%a6) |clr U flag from unimp
bra do_clean |go clean and exit
|
| No bits in the enable byte match existing exceptions. Check for
| the case of the ovfl exc without the ovfl enabled, but with
| inex2 enabled.
|
no_match:
btstb #inex2_bit,FPCR_ENABLE(%a6) |check for ovfl/inex2 case
beqs no_exc |if clear, exit
btstb #ovfl_bit,FPSR_EXCEPT(%a6) |now check ovfl
beqs no_exc |if clear, exit
bras ovfl_unfl |go to unfl_ovfl to determine if
| ;it is an unsupp or unimp exc
| No exceptions are to be reported. If the instruction was
| unimplemented, no FPU restore is necessary. If it was
| unsupported, we must perform the restore.
no_exc:
tstb UFLG_TMP(%a6) |test flag for unsupp/unimp state
beqs uni_no_exc
uns_no_exc:
tstb RES_FLG(%a6) |check if frestore is needed
bne do_clean |if clear, no frestore needed
uni_no_exc:
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
unlk %a6
bra finish_up
|
| Unsupported Data Type Handler:
| Ovfl:
| An fmoveout that results in an overflow is reported this way.
| Unfl:
| An fmoveout that results in an underflow is reported this way.
|
| Unimplemented Instruction Handler:
| Ovfl:
| Only scosh, setox, ssinh, stwotox, and scale can set overflow in
| this manner.
| Unfl:
| Stwotox, setox, and scale can set underflow in this manner.
| Any of the other Library Routines such that f(x)=x in which
| x is an extended denorm can report an underflow exception.
| It is the responsibility of the exception-causing exception
| to make sure that WBTEMP is correct.
|
| The exceptional operand is in FP_SCR1.
|
ovfl_unfl:
tstb UFLG_TMP(%a6) |test flag for unsupp/unimp state
beqs ofuf_con
|
| The caller was from an unsupported data type trap. Test if the
| caller set CU_ONLY. If so, the exceptional operand is expected in
| FPTEMP, rather than WBTEMP.
|
tstb CU_ONLY(%a6) |test if inst is cu-only
beq unsE3
| move.w #$fe,CU_SAVEPC(%a6)
clrb CU_SAVEPC(%a6)
bsetb #E1,E_BYTE(%a6) |set E1 exception flag
movew ETEMP_EX(%a6),FPTEMP_EX(%a6)
movel ETEMP_HI(%a6),FPTEMP_HI(%a6)
movel ETEMP_LO(%a6),FPTEMP_LO(%a6)
bsetb #fptemp15_bit,DTAG(%a6) |set fpte15
bclrb #UFLAG,T_BYTE(%a6) |clr U flag from unimp
bra do_clean |go clean and exit
ofuf_con:
moveb (%a7),VER_TMP(%a6) |save version number
cmpib #BUSY_SIZE-4,1(%a7) |check for busy frame
beqs busy_fr |if unimp, grow to busy
cmpib #VER_40,(%a7) |test for orig unimp frame
bnes try_41 |if not, test for rev frame
moveql #13,%d0 |need to zero 14 lwords
bras ofuf_fin
try_41:
cmpib #VER_41,(%a7) |test for rev unimp frame
bnel fpsp_fmt_error |if neither, exit with error
moveql #11,%d0 |need to zero 12 lwords
ofuf_fin:
clrl (%a7)
loop1:
clrl -(%a7) |clear and dec a7
dbra %d0,loop1
moveb VER_TMP(%a6),(%a7)
moveb #BUSY_SIZE-4,1(%a7) |write busy fmt word.
busy_fr:
movel FP_SCR1(%a6),WBTEMP_EX(%a6) |write
movel FP_SCR1+4(%a6),WBTEMP_HI(%a6) |exceptional op to
movel FP_SCR1+8(%a6),WBTEMP_LO(%a6) |wbtemp
bsetb #E3,E_BYTE(%a6) |set E3 flag
bclrb #E1,E_BYTE(%a6) |make sure E1 is clear
bclrb #UFLAG,T_BYTE(%a6) |clr U flag
movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
orl #sx_mask,E_BYTE(%a6)
movel CMDREG1B(%a6),%d0 |fix cmd1b to make it
andl #0x03c30000,%d0 |work for cmd3b
bfextu CMDREG1B(%a6){#13:#1},%d1 |extract bit 2
lsll #5,%d1
swap %d1
orl %d1,%d0 |put it in the right place
bfextu CMDREG1B(%a6){#10:#3},%d1 |extract bit 3,4,5
lsll #2,%d1
swap %d1
orl %d1,%d0 |put them in the right place
movel %d0,CMDREG3B(%a6) |in the busy frame
|
| Check if the frame to be restored is busy or unimp.
|** NOTE *** Bug fix for errata (0d43b #3)
| If the frame is unimp, we must create a busy frame to
| fix the bug with the nmnexc bits in cases in which they
| are set by a previous instruction and not cleared by
| the save. The frame will be unimp only if the final
| instruction in an emulation routine caused the exception
| by doing an fmove <ea>,fp0. The exception operand, in
| internal format, is in fptemp.
|
do_clean:
cmpib #UNIMP_40_SIZE-4,1(%a7)
bnes do_con
moveql #13,%d0 |in orig, need to zero 14 lwords
bras do_build
do_con:
cmpib #UNIMP_41_SIZE-4,1(%a7)
bnes do_restore |frame must be busy
moveql #11,%d0 |in rev, need to zero 12 lwords
do_build:
moveb (%a7),VER_TMP(%a6)
clrl (%a7)
loop2:
clrl -(%a7) |clear and dec a7
dbra %d0,loop2
|
| Use a1 as pointer into new frame. a6 is not correct if an unimp or
| busy frame was created as the result of an exception on the final
| instruction of an emulation routine.
|
| We need to set the nmcexc bits if the exception is E1. Otherwise,
| the exc taken will be inex2.
|
leal BUSY_SIZE+LOCAL_SIZE(%a7),%a1 |init a1 for new frame
moveb VER_TMP(%a6),(%a7) |write busy fmt word
moveb #BUSY_SIZE-4,1(%a7)
movel FP_SCR1(%a6),WBTEMP_EX(%a1) |write
movel FP_SCR1+4(%a6),WBTEMP_HI(%a1) |exceptional op to
movel FP_SCR1+8(%a6),WBTEMP_LO(%a1) |wbtemp
| btst.b #E1,E_BYTE(%a1)
| beq.b do_restore
bfextu USER_FPSR(%a6){#17:#4},%d0 |get snan/operr/ovfl/unfl bits
bfins %d0,NMCEXC(%a1){#4:#4} |and insert them in nmcexc
movel USER_FPSR(%a6),FPSR_SHADOW(%a1) |set exc bits
orl #sx_mask,E_BYTE(%a1)
do_restore:
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
tstb RES_FLG(%a6) |RES_FLG indicates a "continuation" frame
beq cont
bsr bug1384
cont:
unlk %a6
|
| If trace mode enabled, then go to trace handler. This handler
| cannot have any fp instructions. If there are fp inst's and an
| exception has been restored into the machine then the exception
| will occur upon execution of the fp inst. This is not desirable
| in the kernel (supervisor mode). See MC68040 manual Section 9.3.8.
|
finish_up:
btstb #7,(%a7) |test T1 in SR
bnes g_trace
btstb #6,(%a7) |test T0 in SR
bnes g_trace
bral fpsp_done
|
| Change integer stack to look like trace stack
| The address of the instruction that caused the
| exception is already in the integer stack (is
| the same as the saved friar)
|
| If the current frame is already a 6-word stack then all
| that needs to be done is to change the vector# to TRACE.
| If the frame is only a 4-word stack (meaning we got here
| on an Unsupported data type exception), then we need to grow
| the stack an extra 2 words and get the FPIAR from the FPU.
|
g_trace:
bftst EXC_VEC-4(%sp){#0:#4}
bne g_easy
subw #4,%sp | make room
movel 4(%sp),(%sp)
movel 8(%sp),4(%sp)
subw #BUSY_SIZE,%sp
fsave (%sp)
fmovel %fpiar,BUSY_SIZE+EXC_EA-4(%sp)
frestore (%sp)
addw #BUSY_SIZE,%sp
g_easy:
movew #TRACE_VEC,EXC_VEC-4(%a7)
bral real_trace
|
| This is a work-around for hardware bug 1384.
|
bug1384:
link %a5,#0
fsave -(%sp)
cmpib #0x41,(%sp) | check for correct frame
beq frame_41
bgt nofix | if more advanced mask, do nada
frame_40:
tstb 1(%sp) | check to see if idle
bne notidle
idle40:
clrl (%sp) | get rid of old fsave frame
movel %d1,USER_D1(%a6) | save d1
movew #8,%d1 | place unimp frame instead
loop40: clrl -(%sp)
dbra %d1,loop40
movel USER_D1(%a6),%d1 | restore d1
movel #0x40280000,-(%sp)
frestore (%sp)+
unlk %a5
rts
frame_41:
tstb 1(%sp) | check to see if idle
bne notidle
idle41:
clrl (%sp) | get rid of old fsave frame
movel %d1,USER_D1(%a6) | save d1
movew #10,%d1 | place unimp frame instead
loop41: clrl -(%sp)
dbra %d1,loop41
movel USER_D1(%a6),%d1 | restore d1
movel #0x41300000,-(%sp)
frestore (%sp)+
unlk %a5
rts
notidle:
bclrb #etemp15_bit,-40(%a5)
frestore (%sp)+
unlk %a5
rts
nofix:
frestore (%sp)+
unlk %a5
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,014
|
arch/m68k/fpsp040/x_unimp.S
|
|
| x_unimp.sa 3.3 7/1/91
|
| fpsp_unimp --- FPSP handler for unimplemented instruction
| exception.
|
| Invoked when the user program encounters a floating-point
| op-code that hardware does not support. Trap vector# 11
| (See table 8-1 MC68030 User's Manual).
|
|
| Note: An fsave for an unimplemented inst. will create a short
| fsave stack.
|
| Input: 1. Six word stack frame for unimplemented inst, four word
| for illegal
| (See table 8-7 MC68030 User's Manual).
| 2. Unimp (short) fsave state frame created here by fsave
| instruction.
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
X_UNIMP: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref get_op
|xref do_func
|xref sto_res
|xref gen_except
|xref fpsp_fmt_error
.global fpsp_unimp
.global uni_2
fpsp_unimp:
link %a6,#-LOCAL_SIZE
fsave -(%a7)
uni_2:
moveml %d0-%d1/%a0-%a1,USER_DA(%a6)
fmovemx %fp0-%fp3,USER_FP0(%a6)
fmoveml %fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
moveb (%a7),%d0 |test for valid version num
andib #0xf0,%d0 |test for $4x
cmpib #VER_4,%d0 |must be $4x or exit
bnel fpsp_fmt_error
|
| Temporary D25B Fix
| The following lines are used to ensure that the FPSR
| exception byte and condition codes are clear before proceeding
|
movel USER_FPSR(%a6),%d0
andl #0xFF00FF,%d0 |clear all but accrued exceptions
movel %d0,USER_FPSR(%a6)
fmovel #0,%FPSR |clear all user bits
fmovel #0,%FPCR |clear all user exceptions for FPSP
clrb UFLG_TMP(%a6) |clr flag for unsupp data
bsrl get_op |go get operand(s)
clrb STORE_FLG(%a6)
bsrl do_func |do the function
fsave -(%a7) |capture possible exc state
tstb STORE_FLG(%a6)
bnes no_store |if STORE_FLG is set, no store
bsrl sto_res |store the result in user space
no_store:
bral gen_except |post any exceptions and return
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,302
|
arch/m68k/fpsp040/binstr.S
|
|
| binstr.sa 3.3 12/19/90
|
|
| Description: Converts a 64-bit binary integer to bcd.
|
| Input: 64-bit binary integer in d2:d3, desired length (LEN) in
| d0, and a pointer to start in memory for bcd characters
| in d0. (This pointer must point to byte 4 of the first
| lword of the packed decimal memory string.)
|
| Output: LEN bcd digits representing the 64-bit integer.
|
| Algorithm:
| The 64-bit binary is assumed to have a decimal point before
| bit 63. The fraction is multiplied by 10 using a mul by 2
| shift and a mul by 8 shift. The bits shifted out of the
| msb form a decimal digit. This process is iterated until
| LEN digits are formed.
|
| A1. Init d7 to 1. D7 is the byte digit counter, and if 1, the
| digit formed will be assumed the least significant. This is
| to force the first byte formed to have a 0 in the upper 4 bits.
|
| A2. Beginning of the loop:
| Copy the fraction in d2:d3 to d4:d5.
|
| A3. Multiply the fraction in d2:d3 by 8 using bit-field
| extracts and shifts. The three msbs from d2 will go into
| d1.
|
| A4. Multiply the fraction in d4:d5 by 2 using shifts. The msb
| will be collected by the carry.
|
| A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5
| into d2:d3. D1 will contain the bcd digit formed.
|
| A6. Test d7. If zero, the digit formed is the ms digit. If non-
| zero, it is the ls digit. Put the digit in its place in the
| upper word of d0. If it is the ls digit, write the word
| from d0 to memory.
|
| A7. Decrement d6 (LEN counter) and repeat the loop until zero.
|
| Implementation Notes:
|
| The registers are used as follows:
|
| d0: LEN counter
| d1: temp used to form the digit
| d2: upper 32-bits of fraction for mul by 8
| d3: lower 32-bits of fraction for mul by 8
| d4: upper 32-bits of fraction for mul by 2
| d5: lower 32-bits of fraction for mul by 2
| d6: temp for bit-field extracts
| d7: byte digit formation word;digit count {0,1}
| a0: pointer into memory for packed bcd string formation
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|BINSTR idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
.global binstr
binstr:
moveml %d0-%d7,-(%a7)
|
| A1: Init d7
|
moveql #1,%d7 |init d7 for second digit
subql #1,%d0 |for dbf d0 would have LEN+1 passes
|
| A2. Copy d2:d3 to d4:d5. Start loop.
|
loop:
movel %d2,%d4 |copy the fraction before muls
movel %d3,%d5 |to d4:d5
|
| A3. Multiply d2:d3 by 8; extract msbs into d1.
|
bfextu %d2{#0:#3},%d1 |copy 3 msbs of d2 into d1
asll #3,%d2 |shift d2 left by 3 places
bfextu %d3{#0:#3},%d6 |copy 3 msbs of d3 into d6
asll #3,%d3 |shift d3 left by 3 places
orl %d6,%d2 |or in msbs from d3 into d2
|
| A4. Multiply d4:d5 by 2; add carry out to d1.
|
asll #1,%d5 |mul d5 by 2
roxll #1,%d4 |mul d4 by 2
swap %d6 |put 0 in d6 lower word
addxw %d6,%d1 |add in extend from mul by 2
|
| A5. Add mul by 8 to mul by 2. D1 contains the digit formed.
|
addl %d5,%d3 |add lower 32 bits
nop |ERRATA ; FIX #13 (Rev. 1.2 6/6/90)
addxl %d4,%d2 |add with extend upper 32 bits
nop |ERRATA ; FIX #13 (Rev. 1.2 6/6/90)
addxw %d6,%d1 |add in extend from add to d1
swap %d6 |with d6 = 0; put 0 in upper word
|
| A6. Test d7 and branch.
|
tstw %d7 |if zero, store digit & to loop
beqs first_d |if non-zero, form byte & write
sec_d:
swap %d7 |bring first digit to word d7b
aslw #4,%d7 |first digit in upper 4 bits d7b
addw %d1,%d7 |add in ls digit to d7b
moveb %d7,(%a0)+ |store d7b byte in memory
swap %d7 |put LEN counter in word d7a
clrw %d7 |set d7a to signal no digits done
dbf %d0,loop |do loop some more!
bras end_bstr |finished, so exit
first_d:
swap %d7 |put digit word in d7b
movew %d1,%d7 |put new digit in d7b
swap %d7 |put LEN counter in word d7a
addqw #1,%d7 |set d7a to signal first digit done
dbf %d0,loop |do loop some more!
swap %d7 |put last digit in string
lslw #4,%d7 |move it to upper 4 bits
moveb %d7,(%a0)+ |store it in memory string
|
| Clean up and return with result in fp0.
|
end_bstr:
moveml (%a7)+,%d0-%d7
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 16,764
|
arch/m68k/fpsp040/round.S
|
|
| round.sa 3.4 7/29/91
|
| handle rounding and normalization tasks
|
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|ROUND idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|
| round --- round result according to precision/mode
|
| a0 points to the input operand in the internal extended format
| d1(high word) contains rounding precision:
| ext = $0000xxxx
| sgl = $0001xxxx
| dbl = $0002xxxx
| d1(low word) contains rounding mode:
| RN = $xxxx0000
| RZ = $xxxx0001
| RM = $xxxx0010
| RP = $xxxx0011
| d0{31:29} contains the g,r,s bits (extended)
|
| On return the value pointed to by a0 is correctly rounded,
| a0 is preserved and the g-r-s bits in d0 are cleared.
| The result is not typed - the tag field is invalid. The
| result is still in the internal extended format.
|
| The INEX bit of USER_FPSR will be set if the rounded result was
| inexact (i.e. if any of the g-r-s bits were set).
|
.global round
round:
| If g=r=s=0 then result is exact and round is done, else set
| the inex flag in status reg and continue.
|
bsrs ext_grs |this subroutine looks at the
| :rounding precision and sets
| ;the appropriate g-r-s bits.
tstl %d0 |if grs are zero, go force
bne rnd_cont |lower bits to zero for size
swap %d1 |set up d1.w for round prec.
bra truncate
rnd_cont:
|
| Use rounding mode as an index into a jump table for these modes.
|
orl #inx2a_mask,USER_FPSR(%a6) |set inex2/ainex
lea mode_tab,%a1
movel (%a1,%d1.w*4),%a1
jmp (%a1)
|
| Jump table indexed by rounding mode in d1.w. All following assumes
| grs != 0.
|
mode_tab:
.long rnd_near
.long rnd_zero
.long rnd_mnus
.long rnd_plus
|
| ROUND PLUS INFINITY
|
| If sign of fp number = 0 (positive), then add 1 to l.
|
rnd_plus:
swap %d1 |set up d1 for round prec.
tstb LOCAL_SGN(%a0) |check for sign
bmi truncate |if positive then truncate
movel #0xffffffff,%d0 |force g,r,s to be all f's
lea add_to_l,%a1
movel (%a1,%d1.w*4),%a1
jmp (%a1)
|
| ROUND MINUS INFINITY
|
| If sign of fp number = 1 (negative), then add 1 to l.
|
rnd_mnus:
swap %d1 |set up d1 for round prec.
tstb LOCAL_SGN(%a0) |check for sign
bpl truncate |if negative then truncate
movel #0xffffffff,%d0 |force g,r,s to be all f's
lea add_to_l,%a1
movel (%a1,%d1.w*4),%a1
jmp (%a1)
|
| ROUND ZERO
|
| Always truncate.
rnd_zero:
swap %d1 |set up d1 for round prec.
bra truncate
|
|
| ROUND NEAREST
|
| If (g=1), then add 1 to l and if (r=s=0), then clear l
| Note that this will round to even in case of a tie.
|
rnd_near:
swap %d1 |set up d1 for round prec.
asll #1,%d0 |shift g-bit to c-bit
bcc truncate |if (g=1) then
lea add_to_l,%a1
movel (%a1,%d1.w*4),%a1
jmp (%a1)
|
| ext_grs --- extract guard, round and sticky bits
|
| Input: d1 = PREC:ROUND
| Output: d0{31:29}= guard, round, sticky
|
| The ext_grs extract the guard/round/sticky bits according to the
| selected rounding precision. It is called by the round subroutine
| only. All registers except d0 are kept intact. d0 becomes an
| updated guard,round,sticky in d0{31:29}
|
| Notes: the ext_grs uses the round PREC, and therefore has to swap d1
| prior to usage, and needs to restore d1 to original.
|
ext_grs:
swap %d1 |have d1.w point to round precision
cmpiw #0,%d1
bnes sgl_or_dbl
bras end_ext_grs
sgl_or_dbl:
moveml %d2/%d3,-(%a7) |make some temp registers
cmpiw #1,%d1
bnes grs_dbl
grs_sgl:
bfextu LOCAL_HI(%a0){#24:#2},%d3 |sgl prec. g-r are 2 bits right
movel #30,%d2 |of the sgl prec. limits
lsll %d2,%d3 |shift g-r bits to MSB of d3
movel LOCAL_HI(%a0),%d2 |get word 2 for s-bit test
andil #0x0000003f,%d2 |s bit is the or of all other
bnes st_stky |bits to the right of g-r
tstl LOCAL_LO(%a0) |test lower mantissa
bnes st_stky |if any are set, set sticky
tstl %d0 |test original g,r,s
bnes st_stky |if any are set, set sticky
bras end_sd |if words 3 and 4 are clr, exit
grs_dbl:
bfextu LOCAL_LO(%a0){#21:#2},%d3 |dbl-prec. g-r are 2 bits right
movel #30,%d2 |of the dbl prec. limits
lsll %d2,%d3 |shift g-r bits to the MSB of d3
movel LOCAL_LO(%a0),%d2 |get lower mantissa for s-bit test
andil #0x000001ff,%d2 |s bit is the or-ing of all
bnes st_stky |other bits to the right of g-r
tstl %d0 |test word original g,r,s
bnes st_stky |if any are set, set sticky
bras end_sd |if clear, exit
st_stky:
bset #rnd_stky_bit,%d3
end_sd:
movel %d3,%d0 |return grs to d0
moveml (%a7)+,%d2/%d3 |restore scratch registers
end_ext_grs:
swap %d1 |restore d1 to original
rts
|******************* Local Equates
.set ad_1_sgl,0x00000100 | constant to add 1 to l-bit in sgl prec
.set ad_1_dbl,0x00000800 | constant to add 1 to l-bit in dbl prec
|Jump table for adding 1 to the l-bit indexed by rnd prec
add_to_l:
.long add_ext
.long add_sgl
.long add_dbl
.long add_dbl
|
| ADD SINGLE
|
add_sgl:
addl #ad_1_sgl,LOCAL_HI(%a0)
bccs scc_clr |no mantissa overflow
roxrw LOCAL_HI(%a0) |shift v-bit back in
roxrw LOCAL_HI+2(%a0) |shift v-bit back in
addw #0x1,LOCAL_EX(%a0) |and incr exponent
scc_clr:
tstl %d0 |test for rs = 0
bnes sgl_done
andiw #0xfe00,LOCAL_HI+2(%a0) |clear the l-bit
sgl_done:
andil #0xffffff00,LOCAL_HI(%a0) |truncate bits beyond sgl limit
clrl LOCAL_LO(%a0) |clear d2
rts
|
| ADD EXTENDED
|
add_ext:
addql #1,LOCAL_LO(%a0) |add 1 to l-bit
bccs xcc_clr |test for carry out
addql #1,LOCAL_HI(%a0) |propagate carry
bccs xcc_clr
roxrw LOCAL_HI(%a0) |mant is 0 so restore v-bit
roxrw LOCAL_HI+2(%a0) |mant is 0 so restore v-bit
roxrw LOCAL_LO(%a0)
roxrw LOCAL_LO+2(%a0)
addw #0x1,LOCAL_EX(%a0) |and inc exp
xcc_clr:
tstl %d0 |test rs = 0
bnes add_ext_done
andib #0xfe,LOCAL_LO+3(%a0) |clear the l bit
add_ext_done:
rts
|
| ADD DOUBLE
|
add_dbl:
addl #ad_1_dbl,LOCAL_LO(%a0)
bccs dcc_clr
addql #1,LOCAL_HI(%a0) |propagate carry
bccs dcc_clr
roxrw LOCAL_HI(%a0) |mant is 0 so restore v-bit
roxrw LOCAL_HI+2(%a0) |mant is 0 so restore v-bit
roxrw LOCAL_LO(%a0)
roxrw LOCAL_LO+2(%a0)
addw #0x1,LOCAL_EX(%a0) |incr exponent
dcc_clr:
tstl %d0 |test for rs = 0
bnes dbl_done
andiw #0xf000,LOCAL_LO+2(%a0) |clear the l-bit
dbl_done:
andil #0xfffff800,LOCAL_LO(%a0) |truncate bits beyond dbl limit
rts
error:
rts
|
| Truncate all other bits
|
trunct:
.long end_rnd
.long sgl_done
.long dbl_done
.long dbl_done
truncate:
lea trunct,%a1
movel (%a1,%d1.w*4),%a1
jmp (%a1)
end_rnd:
rts
|
| NORMALIZE
|
| These routines (nrm_zero & nrm_set) normalize the unnorm. This
| is done by shifting the mantissa left while decrementing the
| exponent.
|
| NRM_SET shifts and decrements until there is a 1 set in the integer
| bit of the mantissa (msb in d1).
|
| NRM_ZERO shifts and decrements until there is a 1 set in the integer
| bit of the mantissa (msb in d1) unless this would mean the exponent
| would go less than 0. In that case the number becomes a denorm - the
| exponent (d0) is set to 0 and the mantissa (d1 & d2) is not
| normalized.
|
| Note that both routines have been optimized (for the worst case) and
| therefore do not have the easy to follow decrement/shift loop.
|
| NRM_ZERO
|
| Distance to first 1 bit in mantissa = X
| Distance to 0 from exponent = Y
| If X < Y
| Then
| nrm_set
| Else
| shift mantissa by Y
| set exponent = 0
|
|input:
| FP_SCR1 = exponent, ms mantissa part, ls mantissa part
|output:
| L_SCR1{4} = fpte15 or ete15 bit
|
.global nrm_zero
nrm_zero:
movew LOCAL_EX(%a0),%d0
cmpw #64,%d0 |see if exp > 64
bmis d0_less
bsr nrm_set |exp > 64 so exp won't exceed 0
rts
d0_less:
moveml %d2/%d3/%d5/%d6,-(%a7)
movel LOCAL_HI(%a0),%d1
movel LOCAL_LO(%a0),%d2
bfffo %d1{#0:#32},%d3 |get the distance to the first 1
| ;in ms mant
beqs ms_clr |branch if no bits were set
cmpw %d3,%d0 |of X>Y
bmis greater |then exp will go past 0 (neg) if
| ;it is just shifted
bsr nrm_set |else exp won't go past 0
moveml (%a7)+,%d2/%d3/%d5/%d6
rts
greater:
movel %d2,%d6 |save ls mant in d6
lsll %d0,%d2 |shift ls mant by count
lsll %d0,%d1 |shift ms mant by count
movel #32,%d5
subl %d0,%d5 |make op a denorm by shifting bits
lsrl %d5,%d6 |by the number in the exp, then
| ;set exp = 0.
orl %d6,%d1 |shift the ls mant bits into the ms mant
movel #0,%d0 |same as if decremented exp to 0
| ;while shifting
movew %d0,LOCAL_EX(%a0)
movel %d1,LOCAL_HI(%a0)
movel %d2,LOCAL_LO(%a0)
moveml (%a7)+,%d2/%d3/%d5/%d6
rts
ms_clr:
bfffo %d2{#0:#32},%d3 |check if any bits set in ls mant
beqs all_clr |branch if none set
addw #32,%d3
cmpw %d3,%d0 |if X>Y
bmis greater |then branch
bsr nrm_set |else exp won't go past 0
moveml (%a7)+,%d2/%d3/%d5/%d6
rts
all_clr:
movew #0,LOCAL_EX(%a0) |no mantissa bits set. Set exp = 0.
moveml (%a7)+,%d2/%d3/%d5/%d6
rts
|
| NRM_SET
|
.global nrm_set
nrm_set:
movel %d7,-(%a7)
bfffo LOCAL_HI(%a0){#0:#32},%d7 |find first 1 in ms mant to d7)
beqs lower |branch if ms mant is all 0's
movel %d6,-(%a7)
subw %d7,LOCAL_EX(%a0) |sub exponent by count
movel LOCAL_HI(%a0),%d0 |d0 has ms mant
movel LOCAL_LO(%a0),%d1 |d1 has ls mant
lsll %d7,%d0 |shift first 1 to j bit position
movel %d1,%d6 |copy ls mant into d6
lsll %d7,%d6 |shift ls mant by count
movel %d6,LOCAL_LO(%a0) |store ls mant into memory
moveql #32,%d6
subl %d7,%d6 |continue shift
lsrl %d6,%d1 |shift off all bits but those that will
| ;be shifted into ms mant
orl %d1,%d0 |shift the ls mant bits into the ms mant
movel %d0,LOCAL_HI(%a0) |store ms mant into memory
moveml (%a7)+,%d7/%d6 |restore registers
rts
|
| We get here if ms mant was = 0, and we assume ls mant has bits
| set (otherwise this would have been tagged a zero not a denorm).
|
lower:
movew LOCAL_EX(%a0),%d0 |d0 has exponent
movel LOCAL_LO(%a0),%d1 |d1 has ls mant
subw #32,%d0 |account for ms mant being all zeros
bfffo %d1{#0:#32},%d7 |find first 1 in ls mant to d7)
subw %d7,%d0 |subtract shift count from exp
lsll %d7,%d1 |shift first 1 to integer bit in ms mant
movew %d0,LOCAL_EX(%a0) |store ms mant
movel %d1,LOCAL_HI(%a0) |store exp
clrl LOCAL_LO(%a0) |clear ls mant
movel (%a7)+,%d7
rts
|
| denorm --- denormalize an intermediate result
|
| Used by underflow.
|
| Input:
| a0 points to the operand to be denormalized
| (in the internal extended format)
|
| d0: rounding precision
| Output:
| a0 points to the denormalized result
| (in the internal extended format)
|
| d0 is guard,round,sticky
|
| d0 comes into this routine with the rounding precision. It
| is then loaded with the denormalized exponent threshold for the
| rounding precision.
|
.global denorm
denorm:
btstb #6,LOCAL_EX(%a0) |check for exponents between $7fff-$4000
beqs no_sgn_ext
bsetb #7,LOCAL_EX(%a0) |sign extend if it is so
no_sgn_ext:
cmpib #0,%d0 |if 0 then extended precision
bnes not_ext |else branch
clrl %d1 |load d1 with ext threshold
clrl %d0 |clear the sticky flag
bsr dnrm_lp |denormalize the number
tstb %d1 |check for inex
beq no_inex |if clr, no inex
bras dnrm_inex |if set, set inex
not_ext:
cmpil #1,%d0 |if 1 then single precision
beqs load_sgl |else must be 2, double prec
load_dbl:
movew #dbl_thresh,%d1 |put copy of threshold in d1
movel %d1,%d0 |copy d1 into d0
subw LOCAL_EX(%a0),%d0 |diff = threshold - exp
cmpw #67,%d0 |if diff > 67 (mant + grs bits)
bpls chk_stky |then branch (all bits would be
| ; shifted off in denorm routine)
clrl %d0 |else clear the sticky flag
bsr dnrm_lp |denormalize the number
tstb %d1 |check flag
beqs no_inex |if clr, no inex
bras dnrm_inex |if set, set inex
load_sgl:
movew #sgl_thresh,%d1 |put copy of threshold in d1
movel %d1,%d0 |copy d1 into d0
subw LOCAL_EX(%a0),%d0 |diff = threshold - exp
cmpw #67,%d0 |if diff > 67 (mant + grs bits)
bpls chk_stky |then branch (all bits would be
| ; shifted off in denorm routine)
clrl %d0 |else clear the sticky flag
bsr dnrm_lp |denormalize the number
tstb %d1 |check flag
beqs no_inex |if clr, no inex
bras dnrm_inex |if set, set inex
chk_stky:
tstl LOCAL_HI(%a0) |check for any bits set
bnes set_stky
tstl LOCAL_LO(%a0) |check for any bits set
bnes set_stky
bras clr_mant
set_stky:
orl #inx2a_mask,USER_FPSR(%a6) |set inex2/ainex
movel #0x20000000,%d0 |set sticky bit in return value
clr_mant:
movew %d1,LOCAL_EX(%a0) |load exp with threshold
movel #0,LOCAL_HI(%a0) |set d1 = 0 (ms mantissa)
movel #0,LOCAL_LO(%a0) |set d2 = 0 (ms mantissa)
rts
dnrm_inex:
orl #inx2a_mask,USER_FPSR(%a6) |set inex2/ainex
no_inex:
rts
|
| dnrm_lp --- normalize exponent/mantissa to specified threshold
|
| Input:
| a0 points to the operand to be denormalized
| d0{31:29} initial guard,round,sticky
| d1{15:0} denormalization threshold
| Output:
| a0 points to the denormalized operand
| d0{31:29} final guard,round,sticky
| d1.b inexact flag: all ones means inexact result
|
| The LOCAL_LO and LOCAL_GRS parts of the value are copied to FP_SCR2
| so that bfext can be used to extract the new low part of the mantissa.
| Dnrm_lp can be called with a0 pointing to ETEMP or WBTEMP and there
| is no LOCAL_GRS scratch word following it on the fsave frame.
|
.global dnrm_lp
dnrm_lp:
movel %d2,-(%sp) |save d2 for temp use
btstb #E3,E_BYTE(%a6) |test for type E3 exception
beqs not_E3 |not type E3 exception
bfextu WBTEMP_GRS(%a6){#6:#3},%d2 |extract guard,round, sticky bit
movel #29,%d0
lsll %d0,%d2 |shift g,r,s to their positions
movel %d2,%d0
not_E3:
movel (%sp)+,%d2 |restore d2
movel LOCAL_LO(%a0),FP_SCR2+LOCAL_LO(%a6)
movel %d0,FP_SCR2+LOCAL_GRS(%a6)
movel %d1,%d0 |copy the denorm threshold
subw LOCAL_EX(%a0),%d1 |d1 = threshold - uns exponent
bles no_lp |d1 <= 0
cmpw #32,%d1
blts case_1 |0 = d1 < 32
cmpw #64,%d1
blts case_2 |32 <= d1 < 64
bra case_3 |d1 >= 64
|
| No normalization necessary
|
no_lp:
clrb %d1 |set no inex2 reported
movel FP_SCR2+LOCAL_GRS(%a6),%d0 |restore original g,r,s
rts
|
| case (0<d1<32)
|
case_1:
movel %d2,-(%sp)
movew %d0,LOCAL_EX(%a0) |exponent = denorm threshold
movel #32,%d0
subw %d1,%d0 |d0 = 32 - d1
bfextu LOCAL_EX(%a0){%d0:#32},%d2
bfextu %d2{%d1:%d0},%d2 |d2 = new LOCAL_HI
bfextu LOCAL_HI(%a0){%d0:#32},%d1 |d1 = new LOCAL_LO
bfextu FP_SCR2+LOCAL_LO(%a6){%d0:#32},%d0 |d0 = new G,R,S
movel %d2,LOCAL_HI(%a0) |store new LOCAL_HI
movel %d1,LOCAL_LO(%a0) |store new LOCAL_LO
clrb %d1
bftst %d0{#2:#30}
beqs c1nstky
bsetl #rnd_stky_bit,%d0
st %d1
c1nstky:
movel FP_SCR2+LOCAL_GRS(%a6),%d2 |restore original g,r,s
andil #0xe0000000,%d2 |clear all but G,R,S
tstl %d2 |test if original G,R,S are clear
beqs grs_clear
orl #0x20000000,%d0 |set sticky bit in d0
grs_clear:
andil #0xe0000000,%d0 |clear all but G,R,S
movel (%sp)+,%d2
rts
|
| case (32<=d1<64)
|
case_2:
movel %d2,-(%sp)
movew %d0,LOCAL_EX(%a0) |unsigned exponent = threshold
subw #32,%d1 |d1 now between 0 and 32
movel #32,%d0
subw %d1,%d0 |d0 = 32 - d1
bfextu LOCAL_EX(%a0){%d0:#32},%d2
bfextu %d2{%d1:%d0},%d2 |d2 = new LOCAL_LO
bfextu LOCAL_HI(%a0){%d0:#32},%d1 |d1 = new G,R,S
bftst %d1{#2:#30}
bnes c2_sstky |bra if sticky bit to be set
bftst FP_SCR2+LOCAL_LO(%a6){%d0:#32}
bnes c2_sstky |bra if sticky bit to be set
movel %d1,%d0
clrb %d1
bras end_c2
c2_sstky:
movel %d1,%d0
bsetl #rnd_stky_bit,%d0
st %d1
end_c2:
clrl LOCAL_HI(%a0) |store LOCAL_HI = 0
movel %d2,LOCAL_LO(%a0) |store LOCAL_LO
movel FP_SCR2+LOCAL_GRS(%a6),%d2 |restore original g,r,s
andil #0xe0000000,%d2 |clear all but G,R,S
tstl %d2 |test if original G,R,S are clear
beqs clear_grs
orl #0x20000000,%d0 |set sticky bit in d0
clear_grs:
andil #0xe0000000,%d0 |get rid of all but G,R,S
movel (%sp)+,%d2
rts
|
| d1 >= 64 Force the exponent to be the denorm threshold with the
| correct sign.
|
case_3:
movew %d0,LOCAL_EX(%a0)
tstw LOCAL_SGN(%a0)
bges c3con
c3neg:
orl #0x80000000,LOCAL_EX(%a0)
c3con:
cmpw #64,%d1
beqs sixty_four
cmpw #65,%d1
beqs sixty_five
|
| Shift value is out of range. Set d1 for inex2 flag and
| return a zero with the given threshold.
|
clrl LOCAL_HI(%a0)
clrl LOCAL_LO(%a0)
movel #0x20000000,%d0
st %d1
rts
sixty_four:
movel LOCAL_HI(%a0),%d0
bfextu %d0{#2:#30},%d1
andil #0xc0000000,%d0
bras c3com
sixty_five:
movel LOCAL_HI(%a0),%d0
bfextu %d0{#1:#31},%d1
andil #0x80000000,%d0
lsrl #1,%d0 |shift high bit into R bit
c3com:
tstl %d1
bnes c3ssticky
tstl LOCAL_LO(%a0)
bnes c3ssticky
tstb FP_SCR2+LOCAL_GRS(%a6)
bnes c3ssticky
clrb %d1
bras c3end
c3ssticky:
bsetl #rnd_stky_bit,%d0
st %d1
c3end:
clrl LOCAL_HI(%a0)
clrl LOCAL_LO(%a0)
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 13,813
|
arch/m68k/fpsp040/do_func.S
|
|
| do_func.sa 3.4 2/18/91
|
| Do_func performs the unimplemented operation. The operation
| to be performed is determined from the lower 7 bits of the
| extension word (except in the case of fmovecr and fsincos).
| The opcode and tag bits form an index into a jump table in
| tbldo.sa. Cases of zero, infinity and NaN are handled in
| do_func by forcing the default result. Normalized and
| denormalized (there are no unnormalized numbers at this
| point) are passed onto the emulation code.
|
| CMDREG1B and STAG are extracted from the fsave frame
| and combined to form the table index. The function called
| will start with a0 pointing to the ETEMP operand. Dyadic
| functions can find FPTEMP at -12(a0).
|
| Called functions return their result in fp0. Sincos returns
| sin(x) in fp0 and cos(x) in fp1.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
DO_FUNC: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref t_dz2
|xref t_operr
|xref t_inx2
|xref t_resdnrm
|xref dst_nan
|xref src_nan
|xref nrm_set
|xref sto_cos
|xref tblpre
|xref slognp1,slogn,slog10,slog2
|xref slognd,slog10d,slog2d
|xref smod,srem
|xref sscale
|xref smovcr
PONE: .long 0x3fff0000,0x80000000,0x00000000 |+1
MONE: .long 0xbfff0000,0x80000000,0x00000000 |-1
PZERO: .long 0x00000000,0x00000000,0x00000000 |+0
MZERO: .long 0x80000000,0x00000000,0x00000000 |-0
PINF: .long 0x7fff0000,0x00000000,0x00000000 |+inf
MINF: .long 0xffff0000,0x00000000,0x00000000 |-inf
QNAN: .long 0x7fff0000,0xffffffff,0xffffffff |non-signaling nan
PPIBY2: .long 0x3FFF0000,0xC90FDAA2,0x2168C235 |+PI/2
MPIBY2: .long 0xbFFF0000,0xC90FDAA2,0x2168C235 |-PI/2
.global do_func
do_func:
clrb CU_ONLY(%a6)
|
| Check for fmovecr. It does not follow the format of fp gen
| unimplemented instructions. The test is on the upper 6 bits;
| if they are $17, the inst is fmovecr. Call entry smovcr
| directly.
|
bfextu CMDREG1B(%a6){#0:#6},%d0 |get opclass and src fields
cmpil #0x17,%d0 |if op class and size fields are $17,
| ;it is FMOVECR; if not, continue
bnes not_fmovecr
jmp smovcr |fmovecr; jmp directly to emulation
not_fmovecr:
movew CMDREG1B(%a6),%d0
andl #0x7F,%d0
cmpil #0x38,%d0 |if the extension is >= $38,
bge serror |it is illegal
bfextu STAG(%a6){#0:#3},%d1
lsll #3,%d0 |make room for STAG
addl %d1,%d0 |combine for final index into table
leal tblpre,%a1 |start of monster jump table
movel (%a1,%d0.w*4),%a1 |real target address
leal ETEMP(%a6),%a0 |a0 is pointer to src op
movel USER_FPCR(%a6),%d1
andl #0xFF,%d1 | discard all but rounding mode/prec
fmovel #0,%fpcr
jmp (%a1)
|
| ERROR
|
.global serror
serror:
st STORE_FLG(%a6)
rts
|
| These routines load forced values into fp0. They are called
| by index into tbldo.
|
| Load a signed zero to fp0 and set inex2/ainex
|
.global snzrinx
snzrinx:
btstb #sign_bit,LOCAL_EX(%a0) |get sign of source operand
bnes ld_mzinx |if negative, branch
bsr ld_pzero |bsr so we can return and set inx
bra t_inx2 |now, set the inx for the next inst
ld_mzinx:
bsr ld_mzero |if neg, load neg zero, return here
bra t_inx2 |now, set the inx for the next inst
|
| Load a signed zero to fp0; do not set inex2/ainex
|
.global szero
szero:
btstb #sign_bit,LOCAL_EX(%a0) |get sign of source operand
bne ld_mzero |if neg, load neg zero
bra ld_pzero |load positive zero
|
| Load a signed infinity to fp0; do not set inex2/ainex
|
.global sinf
sinf:
btstb #sign_bit,LOCAL_EX(%a0) |get sign of source operand
bne ld_minf |if negative branch
bra ld_pinf
|
| Load a signed one to fp0; do not set inex2/ainex
|
.global sone
sone:
btstb #sign_bit,LOCAL_EX(%a0) |check sign of source
bne ld_mone
bra ld_pone
|
| Load a signed pi/2 to fp0; do not set inex2/ainex
|
.global spi_2
spi_2:
btstb #sign_bit,LOCAL_EX(%a0) |check sign of source
bne ld_mpi2
bra ld_ppi2
|
| Load either a +0 or +inf for plus/minus operand
|
.global szr_inf
szr_inf:
btstb #sign_bit,LOCAL_EX(%a0) |check sign of source
bne ld_pzero
bra ld_pinf
|
| Result is either an operr or +inf for plus/minus operand
| [Used by slogn, slognp1, slog10, and slog2]
|
.global sopr_inf
sopr_inf:
btstb #sign_bit,LOCAL_EX(%a0) |check sign of source
bne t_operr
bra ld_pinf
|
| FLOGNP1
|
.global sslognp1
sslognp1:
fmovemx (%a0),%fp0-%fp0
fcmpb #-1,%fp0
fbgt slognp1
fbeq t_dz2 |if = -1, divide by zero exception
fmovel #0,%FPSR |clr N flag
bra t_operr |take care of operands < -1
|
| FETOXM1
|
.global setoxm1i
setoxm1i:
btstb #sign_bit,LOCAL_EX(%a0) |check sign of source
bne ld_mone
bra ld_pinf
|
| FLOGN
|
| Test for 1.0 as an input argument, returning +zero. Also check
| the sign and return operr if negative.
|
.global sslogn
sslogn:
btstb #sign_bit,LOCAL_EX(%a0)
bne t_operr |take care of operands < 0
cmpiw #0x3fff,LOCAL_EX(%a0) |test for 1.0 input
bne slogn
cmpil #0x80000000,LOCAL_HI(%a0)
bne slogn
tstl LOCAL_LO(%a0)
bne slogn
fmovex PZERO,%fp0
rts
.global sslognd
sslognd:
btstb #sign_bit,LOCAL_EX(%a0)
beq slognd
bra t_operr |take care of operands < 0
|
| FLOG10
|
.global sslog10
sslog10:
btstb #sign_bit,LOCAL_EX(%a0)
bne t_operr |take care of operands < 0
cmpiw #0x3fff,LOCAL_EX(%a0) |test for 1.0 input
bne slog10
cmpil #0x80000000,LOCAL_HI(%a0)
bne slog10
tstl LOCAL_LO(%a0)
bne slog10
fmovex PZERO,%fp0
rts
.global sslog10d
sslog10d:
btstb #sign_bit,LOCAL_EX(%a0)
beq slog10d
bra t_operr |take care of operands < 0
|
| FLOG2
|
.global sslog2
sslog2:
btstb #sign_bit,LOCAL_EX(%a0)
bne t_operr |take care of operands < 0
cmpiw #0x3fff,LOCAL_EX(%a0) |test for 1.0 input
bne slog2
cmpil #0x80000000,LOCAL_HI(%a0)
bne slog2
tstl LOCAL_LO(%a0)
bne slog2
fmovex PZERO,%fp0
rts
.global sslog2d
sslog2d:
btstb #sign_bit,LOCAL_EX(%a0)
beq slog2d
bra t_operr |take care of operands < 0
|
| FMOD
|
pmodt:
| ;$21 fmod
| ;dtag,stag
.long smod | 00,00 norm,norm = normal
.long smod_oper | 00,01 norm,zero = nan with operr
.long smod_fpn | 00,10 norm,inf = fpn
.long smod_snan | 00,11 norm,nan = nan
.long smod_zro | 01,00 zero,norm = +-zero
.long smod_oper | 01,01 zero,zero = nan with operr
.long smod_zro | 01,10 zero,inf = +-zero
.long smod_snan | 01,11 zero,nan = nan
.long smod_oper | 10,00 inf,norm = nan with operr
.long smod_oper | 10,01 inf,zero = nan with operr
.long smod_oper | 10,10 inf,inf = nan with operr
.long smod_snan | 10,11 inf,nan = nan
.long smod_dnan | 11,00 nan,norm = nan
.long smod_dnan | 11,01 nan,zero = nan
.long smod_dnan | 11,10 nan,inf = nan
.long smod_dnan | 11,11 nan,nan = nan
.global pmod
pmod:
clrb FPSR_QBYTE(%a6) | clear quotient field
bfextu STAG(%a6){#0:#3},%d0 |stag = d0
bfextu DTAG(%a6){#0:#3},%d1 |dtag = d1
|
| Alias extended denorms to norms for the jump table.
|
bclrl #2,%d0
bclrl #2,%d1
lslb #2,%d1
orb %d0,%d1 |d1{3:2} = dtag, d1{1:0} = stag
| ;Tag values:
| ;00 = norm or denorm
| ;01 = zero
| ;10 = inf
| ;11 = nan
lea pmodt,%a1
movel (%a1,%d1.w*4),%a1
jmp (%a1)
smod_snan:
bra src_nan
smod_dnan:
bra dst_nan
smod_oper:
bra t_operr
smod_zro:
moveb ETEMP(%a6),%d1 |get sign of src op
moveb FPTEMP(%a6),%d0 |get sign of dst op
eorb %d0,%d1 |get exor of sign bits
btstl #7,%d1 |test for sign
beqs smod_zsn |if clr, do not set sign big
bsetb #q_sn_bit,FPSR_QBYTE(%a6) |set q-byte sign bit
smod_zsn:
btstl #7,%d0 |test if + or -
beq ld_pzero |if pos then load +0
bra ld_mzero |else neg load -0
smod_fpn:
moveb ETEMP(%a6),%d1 |get sign of src op
moveb FPTEMP(%a6),%d0 |get sign of dst op
eorb %d0,%d1 |get exor of sign bits
btstl #7,%d1 |test for sign
beqs smod_fsn |if clr, do not set sign big
bsetb #q_sn_bit,FPSR_QBYTE(%a6) |set q-byte sign bit
smod_fsn:
tstb DTAG(%a6) |filter out denormal destination case
bpls smod_nrm |
leal FPTEMP(%a6),%a0 |a0<- addr(FPTEMP)
bra t_resdnrm |force UNFL(but exact) result
smod_nrm:
fmovel USER_FPCR(%a6),%fpcr |use user's rmode and precision
fmovex FPTEMP(%a6),%fp0 |return dest to fp0
rts
|
| FREM
|
premt:
| ;$25 frem
| ;dtag,stag
.long srem | 00,00 norm,norm = normal
.long srem_oper | 00,01 norm,zero = nan with operr
.long srem_fpn | 00,10 norm,inf = fpn
.long srem_snan | 00,11 norm,nan = nan
.long srem_zro | 01,00 zero,norm = +-zero
.long srem_oper | 01,01 zero,zero = nan with operr
.long srem_zro | 01,10 zero,inf = +-zero
.long srem_snan | 01,11 zero,nan = nan
.long srem_oper | 10,00 inf,norm = nan with operr
.long srem_oper | 10,01 inf,zero = nan with operr
.long srem_oper | 10,10 inf,inf = nan with operr
.long srem_snan | 10,11 inf,nan = nan
.long srem_dnan | 11,00 nan,norm = nan
.long srem_dnan | 11,01 nan,zero = nan
.long srem_dnan | 11,10 nan,inf = nan
.long srem_dnan | 11,11 nan,nan = nan
.global prem
prem:
clrb FPSR_QBYTE(%a6) |clear quotient field
bfextu STAG(%a6){#0:#3},%d0 |stag = d0
bfextu DTAG(%a6){#0:#3},%d1 |dtag = d1
|
| Alias extended denorms to norms for the jump table.
|
bclr #2,%d0
bclr #2,%d1
lslb #2,%d1
orb %d0,%d1 |d1{3:2} = dtag, d1{1:0} = stag
| ;Tag values:
| ;00 = norm or denorm
| ;01 = zero
| ;10 = inf
| ;11 = nan
lea premt,%a1
movel (%a1,%d1.w*4),%a1
jmp (%a1)
srem_snan:
bra src_nan
srem_dnan:
bra dst_nan
srem_oper:
bra t_operr
srem_zro:
moveb ETEMP(%a6),%d1 |get sign of src op
moveb FPTEMP(%a6),%d0 |get sign of dst op
eorb %d0,%d1 |get exor of sign bits
btstl #7,%d1 |test for sign
beqs srem_zsn |if clr, do not set sign big
bsetb #q_sn_bit,FPSR_QBYTE(%a6) |set q-byte sign bit
srem_zsn:
btstl #7,%d0 |test if + or -
beq ld_pzero |if pos then load +0
bra ld_mzero |else neg load -0
srem_fpn:
moveb ETEMP(%a6),%d1 |get sign of src op
moveb FPTEMP(%a6),%d0 |get sign of dst op
eorb %d0,%d1 |get exor of sign bits
btstl #7,%d1 |test for sign
beqs srem_fsn |if clr, do not set sign big
bsetb #q_sn_bit,FPSR_QBYTE(%a6) |set q-byte sign bit
srem_fsn:
tstb DTAG(%a6) |filter out denormal destination case
bpls srem_nrm |
leal FPTEMP(%a6),%a0 |a0<- addr(FPTEMP)
bra t_resdnrm |force UNFL(but exact) result
srem_nrm:
fmovel USER_FPCR(%a6),%fpcr |use user's rmode and precision
fmovex FPTEMP(%a6),%fp0 |return dest to fp0
rts
|
| FSCALE
|
pscalet:
| ;$26 fscale
| ;dtag,stag
.long sscale | 00,00 norm,norm = result
.long sscale | 00,01 norm,zero = fpn
.long scl_opr | 00,10 norm,inf = nan with operr
.long scl_snan | 00,11 norm,nan = nan
.long scl_zro | 01,00 zero,norm = +-zero
.long scl_zro | 01,01 zero,zero = +-zero
.long scl_opr | 01,10 zero,inf = nan with operr
.long scl_snan | 01,11 zero,nan = nan
.long scl_inf | 10,00 inf,norm = +-inf
.long scl_inf | 10,01 inf,zero = +-inf
.long scl_opr | 10,10 inf,inf = nan with operr
.long scl_snan | 10,11 inf,nan = nan
.long scl_dnan | 11,00 nan,norm = nan
.long scl_dnan | 11,01 nan,zero = nan
.long scl_dnan | 11,10 nan,inf = nan
.long scl_dnan | 11,11 nan,nan = nan
.global pscale
pscale:
bfextu STAG(%a6){#0:#3},%d0 |stag in d0
bfextu DTAG(%a6){#0:#3},%d1 |dtag in d1
bclrl #2,%d0 |alias denorm into norm
bclrl #2,%d1 |alias denorm into norm
lslb #2,%d1
orb %d0,%d1 |d1{4:2} = dtag, d1{1:0} = stag
| ;dtag values stag values:
| ;000 = norm 00 = norm
| ;001 = zero 01 = zero
| ;010 = inf 10 = inf
| ;011 = nan 11 = nan
| ;100 = dnrm
|
|
leal pscalet,%a1 |load start of jump table
movel (%a1,%d1.w*4),%a1 |load a1 with label depending on tag
jmp (%a1) |go to the routine
scl_opr:
bra t_operr
scl_dnan:
bra dst_nan
scl_zro:
btstb #sign_bit,FPTEMP_EX(%a6) |test if + or -
beq ld_pzero |if pos then load +0
bra ld_mzero |if neg then load -0
scl_inf:
btstb #sign_bit,FPTEMP_EX(%a6) |test if + or -
beq ld_pinf |if pos then load +inf
bra ld_minf |else neg load -inf
scl_snan:
bra src_nan
|
| FSINCOS
|
.global ssincosz
ssincosz:
btstb #sign_bit,ETEMP(%a6) |get sign
beqs sincosp
fmovex MZERO,%fp0
bras sincoscom
sincosp:
fmovex PZERO,%fp0
sincoscom:
fmovemx PONE,%fp1-%fp1 |do not allow FPSR to be affected
bra sto_cos |store cosine result
.global ssincosi
ssincosi:
fmovex QNAN,%fp1 |load NAN
bsr sto_cos |store cosine result
fmovex QNAN,%fp0 |load NAN
bra t_operr
.global ssincosnan
ssincosnan:
movel ETEMP_EX(%a6),FP_SCR1(%a6)
movel ETEMP_HI(%a6),FP_SCR1+4(%a6)
movel ETEMP_LO(%a6),FP_SCR1+8(%a6)
bsetb #signan_bit,FP_SCR1+4(%a6)
fmovemx FP_SCR1(%a6),%fp1-%fp1
bsr sto_cos
bra src_nan
|
| This code forces default values for the zero, inf, and nan cases
| in the transcendentals code. The CC bits must be set in the
| stacked FPSR to be correctly reported.
|
|**Returns +PI/2
.global ld_ppi2
ld_ppi2:
fmovex PPIBY2,%fp0 |load +pi/2
bra t_inx2 |set inex2 exc
|**Returns -PI/2
.global ld_mpi2
ld_mpi2:
fmovex MPIBY2,%fp0 |load -pi/2
orl #neg_mask,USER_FPSR(%a6) |set N bit
bra t_inx2 |set inex2 exc
|**Returns +inf
.global ld_pinf
ld_pinf:
fmovex PINF,%fp0 |load +inf
orl #inf_mask,USER_FPSR(%a6) |set I bit
rts
|**Returns -inf
.global ld_minf
ld_minf:
fmovex MINF,%fp0 |load -inf
orl #neg_mask+inf_mask,USER_FPSR(%a6) |set N and I bits
rts
|**Returns +1
.global ld_pone
ld_pone:
fmovex PONE,%fp0 |load +1
rts
|**Returns -1
.global ld_mone
ld_mone:
fmovex MONE,%fp0 |load -1
orl #neg_mask,USER_FPSR(%a6) |set N bit
rts
|**Returns +0
.global ld_pzero
ld_pzero:
fmovex PZERO,%fp0 |load +0
orl #z_mask,USER_FPSR(%a6) |set Z bit
rts
|**Returns -0
.global ld_mzero
ld_mzero:
fmovex MZERO,%fp0 |load -0
orl #neg_mask+z_mask,USER_FPSR(%a6) |set N and Z bits
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,621
|
arch/m68k/fpsp040/x_snan.S
|
|
| x_snan.sa 3.3 7/1/91
|
| fpsp_snan --- FPSP handler for signalling NAN exception
|
| SNAN for float -> integer conversions (integer conversion of
| an SNAN) is a non-maskable run-time exception.
|
| For trap disabled the 040 does the following:
| If the dest data format is s, d, or x, then the SNAN bit in the NAN
| is set to one and the resulting non-signaling NAN (truncated if
| necessary) is transferred to the dest. If the dest format is b, w,
| or l, then garbage is written to the dest (actually the upper 32 bits
| of the mantissa are sent to the integer unit).
|
| For trap enabled the 040 does the following:
| If the inst is move_out, then the results are the same as for trap
| disabled with the exception posted. If the instruction is not move_
| out, the dest. is not modified, and the exception is posted.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
X_SNAN: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref get_fline
|xref mem_write
|xref real_snan
|xref real_inex
|xref fpsp_done
|xref reg_dest
.global fpsp_snan
fpsp_snan:
link %a6,#-LOCAL_SIZE
fsave -(%a7)
moveml %d0-%d1/%a0-%a1,USER_DA(%a6)
fmovemx %fp0-%fp3,USER_FP0(%a6)
fmoveml %fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
|
| Check if trap enabled
|
btstb #snan_bit,FPCR_ENABLE(%a6)
bnes ena |If enabled, then branch
bsrl move_out |else SNAN disabled
|
| It is possible to have an inex1 exception with the
| snan. If the inex enable bit is set in the FPCR, and either
| inex2 or inex1 occurred, we must clean up and branch to the
| real inex handler.
|
ck_inex:
moveb FPCR_ENABLE(%a6),%d0
andb FPSR_EXCEPT(%a6),%d0
andib #0x3,%d0
beq end_snan
|
| Inexact enabled and reported, and we must take an inexact exception.
|
take_inex:
moveb #INEX_VEC,EXC_VEC+1(%a6)
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
unlk %a6
bral real_inex
|
| SNAN is enabled. Check if inst is move_out.
| Make any corrections to the 040 output as necessary.
|
ena:
btstb #5,CMDREG1B(%a6) |if set, inst is move out
beq not_out
bsrl move_out
report_snan:
moveb (%a7),VER_TMP(%a6)
cmpib #VER_40,(%a7) |test for orig unimp frame
bnes ck_rev
moveql #13,%d0 |need to zero 14 lwords
bras rep_con
ck_rev:
moveql #11,%d0 |need to zero 12 lwords
rep_con:
clrl (%a7)
loop1:
clrl -(%a7) |clear and dec a7
dbra %d0,loop1
moveb VER_TMP(%a6),(%a7) |format a busy frame
moveb #BUSY_SIZE-4,1(%a7)
movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
orl #sx_mask,E_BYTE(%a6)
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
unlk %a6
bral real_snan
|
| Exit snan handler by expanding the unimp frame into a busy frame
|
end_snan:
bclrb #E1,E_BYTE(%a6)
moveb (%a7),VER_TMP(%a6)
cmpib #VER_40,(%a7) |test for orig unimp frame
bnes ck_rev2
moveql #13,%d0 |need to zero 14 lwords
bras rep_con2
ck_rev2:
moveql #11,%d0 |need to zero 12 lwords
rep_con2:
clrl (%a7)
loop2:
clrl -(%a7) |clear and dec a7
dbra %d0,loop2
moveb VER_TMP(%a6),(%a7) |format a busy frame
moveb #BUSY_SIZE-4,1(%a7) |write busy size
movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
orl #sx_mask,E_BYTE(%a6)
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
unlk %a6
bral fpsp_done
|
| Move_out
|
move_out:
movel EXC_EA(%a6),%a0 |get <ea> from exc frame
bfextu CMDREG1B(%a6){#3:#3},%d0 |move rx field to d0{2:0}
cmpil #0,%d0 |check for long
beqs sto_long |branch if move_out long
cmpil #4,%d0 |check for word
beqs sto_word |branch if move_out word
cmpil #6,%d0 |check for byte
beqs sto_byte |branch if move_out byte
|
| Not byte, word or long
|
rts
|
| Get the 32 most significant bits of etemp mantissa
|
sto_long:
movel ETEMP_HI(%a6),%d1
movel #4,%d0 |load byte count
|
| Set signalling nan bit
|
bsetl #30,%d1
|
| Store to the users destination address
|
tstl %a0 |check if <ea> is 0
beqs wrt_dn |destination is a data register
movel %d1,-(%a7) |move the snan onto the stack
movel %a0,%a1 |load dest addr into a1
movel %a7,%a0 |load src addr of snan into a0
bsrl mem_write |write snan to user memory
movel (%a7)+,%d1 |clear off stack
rts
|
| Get the 16 most significant bits of etemp mantissa
|
sto_word:
movel ETEMP_HI(%a6),%d1
movel #2,%d0 |load byte count
|
| Set signalling nan bit
|
bsetl #30,%d1
|
| Store to the users destination address
|
tstl %a0 |check if <ea> is 0
beqs wrt_dn |destination is a data register
movel %d1,-(%a7) |move the snan onto the stack
movel %a0,%a1 |load dest addr into a1
movel %a7,%a0 |point to low word
bsrl mem_write |write snan to user memory
movel (%a7)+,%d1 |clear off stack
rts
|
| Get the 8 most significant bits of etemp mantissa
|
sto_byte:
movel ETEMP_HI(%a6),%d1
movel #1,%d0 |load byte count
|
| Set signalling nan bit
|
bsetl #30,%d1
|
| Store to the users destination address
|
tstl %a0 |check if <ea> is 0
beqs wrt_dn |destination is a data register
movel %d1,-(%a7) |move the snan onto the stack
movel %a0,%a1 |load dest addr into a1
movel %a7,%a0 |point to source byte
bsrl mem_write |write snan to user memory
movel (%a7)+,%d1 |clear off stack
rts
|
| wrt_dn --- write to a data register
|
| We get here with D1 containing the data to write and D0 the
| number of bytes to write: 1=byte,2=word,4=long.
|
wrt_dn:
movel %d1,L_SCR1(%a6) |data
movel %d0,-(%a7) |size
bsrl get_fline |returns fline word in d0
movel %d0,%d1
andil #0x7,%d1 |d1 now holds register number
movel (%sp)+,%d0 |get original size
cmpil #4,%d0
beqs wrt_long
cmpil #2,%d0
bnes wrt_byte
wrt_word:
orl #0x8,%d1
bral reg_dest
wrt_long:
orl #0x10,%d1
bral reg_dest
wrt_byte:
bral reg_dest
|
| Check if it is a src nan or dst nan
|
not_out:
movel DTAG(%a6),%d0
bfextu %d0{#0:#3},%d0 |isolate dtag in lsbs
cmpib #3,%d0 |check for nan in destination
bnes issrc |destination nan has priority
dst_nan:
btstb #6,FPTEMP_HI(%a6) |check if dest nan is an snan
bnes issrc |no, so check source for snan
movew FPTEMP_EX(%a6),%d0
bras cont
issrc:
movew ETEMP_EX(%a6),%d0
cont:
btstl #15,%d0 |test for sign of snan
beqs clr_neg
bsetb #neg_bit,FPSR_CC(%a6)
bra report_snan
clr_neg:
bclrb #neg_bit,FPSR_CC(%a6)
bra report_snan
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 13,311
|
arch/m68k/fpsp040/kernel_ex.S
|
|
| kernel_ex.sa 3.3 12/19/90
|
| This file contains routines to force exception status in the
| fpu for exceptional cases detected or reported within the
| transcendental functions. Typically, the t_xx routine will
| set the appropriate bits in the USER_FPSR word on the stack.
| The bits are tested in gen_except.sa to determine if an exceptional
| situation needs to be created on return from the FPSP.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
KERNEL_EX: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
mns_inf: .long 0xffff0000,0x00000000,0x00000000
pls_inf: .long 0x7fff0000,0x00000000,0x00000000
nan: .long 0x7fff0000,0xffffffff,0xffffffff
huge: .long 0x7ffe0000,0xffffffff,0xffffffff
|xref ovf_r_k
|xref unf_sub
|xref nrm_set
.global t_dz
.global t_dz2
.global t_operr
.global t_unfl
.global t_ovfl
.global t_ovfl2
.global t_inx2
.global t_frcinx
.global t_extdnrm
.global t_resdnrm
.global dst_nan
.global src_nan
|
| DZ exception
|
|
| if dz trap disabled
| store properly signed inf (use sign of etemp) into fp0
| set FPSR exception status dz bit, condition code
| inf bit, and accrued dz bit
| return
| frestore the frame into the machine (done by unimp_hd)
|
| else dz trap enabled
| set exception status bit & accrued bits in FPSR
| set flag to disable sto_res from corrupting fp register
| return
| frestore the frame into the machine (done by unimp_hd)
|
| t_dz2 is used by monadic functions such as flogn (from do_func).
| t_dz is used by monadic functions such as satanh (from the
| transcendental function).
|
t_dz2:
bsetb #neg_bit,FPSR_CC(%a6) |set neg bit in FPSR
fmovel #0,%FPSR |clr status bits (Z set)
btstb #dz_bit,FPCR_ENABLE(%a6) |test FPCR for dz exc enabled
bnes dz_ena_end
bras m_inf |flogx always returns -inf
t_dz:
fmovel #0,%FPSR |clr status bits (Z set)
btstb #dz_bit,FPCR_ENABLE(%a6) |test FPCR for dz exc enabled
bnes dz_ena
|
| dz disabled
|
btstb #sign_bit,ETEMP_EX(%a6) |check sign for neg or pos
beqs p_inf |branch if pos sign
m_inf:
fmovemx mns_inf,%fp0-%fp0 |load -inf
bsetb #neg_bit,FPSR_CC(%a6) |set neg bit in FPSR
bras set_fpsr
p_inf:
fmovemx pls_inf,%fp0-%fp0 |load +inf
set_fpsr:
orl #dzinf_mask,USER_FPSR(%a6) |set I,DZ,ADZ
rts
|
| dz enabled
|
dz_ena:
btstb #sign_bit,ETEMP_EX(%a6) |check sign for neg or pos
beqs dz_ena_end
bsetb #neg_bit,FPSR_CC(%a6) |set neg bit in FPSR
dz_ena_end:
orl #dzinf_mask,USER_FPSR(%a6) |set I,DZ,ADZ
st STORE_FLG(%a6)
rts
|
| OPERR exception
|
| if (operr trap disabled)
| set FPSR exception status operr bit, condition code
| nan bit; Store default NAN into fp0
| frestore the frame into the machine (done by unimp_hd)
|
| else (operr trap enabled)
| set FPSR exception status operr bit, accrued operr bit
| set flag to disable sto_res from corrupting fp register
| frestore the frame into the machine (done by unimp_hd)
|
t_operr:
orl #opnan_mask,USER_FPSR(%a6) |set NaN, OPERR, AIOP
btstb #operr_bit,FPCR_ENABLE(%a6) |test FPCR for operr enabled
bnes op_ena
fmovemx nan,%fp0-%fp0 |load default nan
rts
op_ena:
st STORE_FLG(%a6) |do not corrupt destination
rts
|
| t_unfl --- UNFL exception
|
| This entry point is used by all routines requiring unfl, inex2,
| aunfl, and ainex to be set on exit.
|
| On entry, a0 points to the exceptional operand. The final exceptional
| operand is built in FP_SCR1 and only the sign from the original operand
| is used.
|
t_unfl:
clrl FP_SCR1(%a6) |set exceptional operand to zero
clrl FP_SCR1+4(%a6)
clrl FP_SCR1+8(%a6)
tstb (%a0) |extract sign from caller's exop
bpls unfl_signok
bset #sign_bit,FP_SCR1(%a6)
unfl_signok:
leal FP_SCR1(%a6),%a0
orl #unfinx_mask,USER_FPSR(%a6)
| ;set UNFL, INEX2, AUNFL, AINEX
unfl_con:
btstb #unfl_bit,FPCR_ENABLE(%a6)
beqs unfl_dis
unfl_ena:
bfclr STAG(%a6){#5:#3} |clear wbtm66,wbtm1,wbtm0
bsetb #wbtemp15_bit,WB_BYTE(%a6) |set wbtemp15
bsetb #sticky_bit,STICKY(%a6) |set sticky bit
bclrb #E1,E_BYTE(%a6)
unfl_dis:
bfextu FPCR_MODE(%a6){#0:#2},%d0 |get round precision
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0) |convert to internal ext format
bsr unf_sub |returns IEEE result at a0
| ;and sets FPSR_CC accordingly
bfclr LOCAL_SGN(%a0){#0:#8} |convert back to IEEE ext format
beqs unfl_fin
bsetb #sign_bit,LOCAL_EX(%a0)
bsetb #sign_bit,FP_SCR1(%a6) |set sign bit of exc operand
unfl_fin:
fmovemx (%a0),%fp0-%fp0 |store result in fp0
rts
|
| t_ovfl2 --- OVFL exception (without inex2 returned)
|
| This entry is used by scale to force catastrophic overflow. The
| ovfl, aovfl, and ainex bits are set, but not the inex2 bit.
|
t_ovfl2:
orl #ovfl_inx_mask,USER_FPSR(%a6)
movel ETEMP(%a6),FP_SCR1(%a6)
movel ETEMP_HI(%a6),FP_SCR1+4(%a6)
movel ETEMP_LO(%a6),FP_SCR1+8(%a6)
|
| Check for single or double round precision. If single, check if
| the lower 40 bits of ETEMP are zero; if not, set inex2. If double,
| check if the lower 21 bits are zero; if not, set inex2.
|
moveb FPCR_MODE(%a6),%d0
andib #0xc0,%d0
beq t_work |if extended, finish ovfl processing
cmpib #0x40,%d0 |test for single
bnes t_dbl
t_sgl:
tstb ETEMP_LO(%a6)
bnes t_setinx2
movel ETEMP_HI(%a6),%d0
andil #0xff,%d0 |look at only lower 8 bits
bnes t_setinx2
bra t_work
t_dbl:
movel ETEMP_LO(%a6),%d0
andil #0x7ff,%d0 |look at only lower 11 bits
beq t_work
t_setinx2:
orl #inex2_mask,USER_FPSR(%a6)
bras t_work
|
| t_ovfl --- OVFL exception
|
|** Note: the exc operand is returned in ETEMP.
|
t_ovfl:
orl #ovfinx_mask,USER_FPSR(%a6)
t_work:
btstb #ovfl_bit,FPCR_ENABLE(%a6) |test FPCR for ovfl enabled
beqs ovf_dis
ovf_ena:
clrl FP_SCR1(%a6) |set exceptional operand
clrl FP_SCR1+4(%a6)
clrl FP_SCR1+8(%a6)
bfclr STAG(%a6){#5:#3} |clear wbtm66,wbtm1,wbtm0
bclrb #wbtemp15_bit,WB_BYTE(%a6) |clear wbtemp15
bsetb #sticky_bit,STICKY(%a6) |set sticky bit
bclrb #E1,E_BYTE(%a6)
| ;fall through to disabled case
| For disabled overflow call 'ovf_r_k'. This routine loads the
| correct result based on the rounding precision, destination
| format, rounding mode and sign.
|
ovf_dis:
bsr ovf_r_k |returns unsigned ETEMP_EX
| ;and sets FPSR_CC accordingly.
bfclr ETEMP_SGN(%a6){#0:#8} |fix sign
beqs ovf_pos
bsetb #sign_bit,ETEMP_EX(%a6)
bsetb #sign_bit,FP_SCR1(%a6) |set exceptional operand sign
ovf_pos:
fmovemx ETEMP(%a6),%fp0-%fp0 |move the result to fp0
rts
|
| INEX2 exception
|
| The inex2 and ainex bits are set.
|
t_inx2:
orl #inx2a_mask,USER_FPSR(%a6) |set INEX2, AINEX
rts
|
| Force Inex2
|
| This routine is called by the transcendental routines to force
| the inex2 exception bits set in the FPSR. If the underflow bit
| is set, but the underflow trap was not taken, the aunfl bit in
| the FPSR must be set.
|
t_frcinx:
orl #inx2a_mask,USER_FPSR(%a6) |set INEX2, AINEX
btstb #unfl_bit,FPSR_EXCEPT(%a6) |test for unfl bit set
beqs no_uacc1 |if clear, do not set aunfl
bsetb #aunfl_bit,FPSR_AEXCEPT(%a6)
no_uacc1:
rts
|
| DST_NAN
|
| Determine if the destination nan is signalling or non-signalling,
| and set the FPSR bits accordingly. See the MC68040 User's Manual
| section 3.2.2.5 NOT-A-NUMBERS.
|
dst_nan:
btstb #sign_bit,FPTEMP_EX(%a6) |test sign of nan
beqs dst_pos |if clr, it was positive
bsetb #neg_bit,FPSR_CC(%a6) |set N bit
dst_pos:
btstb #signan_bit,FPTEMP_HI(%a6) |check if signalling
beqs dst_snan |branch if signalling
fmovel %d1,%fpcr |restore user's rmode/prec
fmovex FPTEMP(%a6),%fp0 |return the non-signalling nan
|
| Check the source nan. If it is signalling, snan will be reported.
|
moveb STAG(%a6),%d0
andib #0xe0,%d0
cmpib #0x60,%d0
bnes no_snan
btstb #signan_bit,ETEMP_HI(%a6) |check if signalling
bnes no_snan
orl #snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP
no_snan:
rts
dst_snan:
btstb #snan_bit,FPCR_ENABLE(%a6) |check if trap enabled
beqs dst_dis |branch if disabled
orb #nan_tag,DTAG(%a6) |set up dtag for nan
st STORE_FLG(%a6) |do not store a result
orl #snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP
rts
dst_dis:
bsetb #signan_bit,FPTEMP_HI(%a6) |set SNAN bit in sop
fmovel %d1,%fpcr |restore user's rmode/prec
fmovex FPTEMP(%a6),%fp0 |load non-sign. nan
orl #snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP
rts
|
| SRC_NAN
|
| Determine if the source nan is signalling or non-signalling,
| and set the FPSR bits accordingly. See the MC68040 User's Manual
| section 3.2.2.5 NOT-A-NUMBERS.
|
src_nan:
btstb #sign_bit,ETEMP_EX(%a6) |test sign of nan
beqs src_pos |if clr, it was positive
bsetb #neg_bit,FPSR_CC(%a6) |set N bit
src_pos:
btstb #signan_bit,ETEMP_HI(%a6) |check if signalling
beqs src_snan |branch if signalling
fmovel %d1,%fpcr |restore user's rmode/prec
fmovex ETEMP(%a6),%fp0 |return the non-signalling nan
rts
src_snan:
btstb #snan_bit,FPCR_ENABLE(%a6) |check if trap enabled
beqs src_dis |branch if disabled
bsetb #signan_bit,ETEMP_HI(%a6) |set SNAN bit in sop
orb #norm_tag,DTAG(%a6) |set up dtag for norm
orb #nan_tag,STAG(%a6) |set up stag for nan
st STORE_FLG(%a6) |do not store a result
orl #snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP
rts
src_dis:
bsetb #signan_bit,ETEMP_HI(%a6) |set SNAN bit in sop
fmovel %d1,%fpcr |restore user's rmode/prec
fmovex ETEMP(%a6),%fp0 |load non-sign. nan
orl #snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP
rts
|
| For all functions that have a denormalized input and that f(x)=x,
| this is the entry point
|
t_extdnrm:
orl #unfinx_mask,USER_FPSR(%a6)
| ;set UNFL, INEX2, AUNFL, AINEX
bras xdnrm_con
|
| Entry point for scale with extended denorm. The function does
| not set inex2, aunfl, or ainex.
|
t_resdnrm:
orl #unfl_mask,USER_FPSR(%a6)
xdnrm_con:
btstb #unfl_bit,FPCR_ENABLE(%a6)
beqs xdnrm_dis
|
| If exceptions are enabled, the additional task of setting up WBTEMP
| is needed so that when the underflow exception handler is entered,
| the user perceives no difference between what the 040 provides vs.
| what the FPSP provides.
|
xdnrm_ena:
movel %a0,-(%a7)
movel LOCAL_EX(%a0),FP_SCR1(%a6)
movel LOCAL_HI(%a0),FP_SCR1+4(%a6)
movel LOCAL_LO(%a0),FP_SCR1+8(%a6)
lea FP_SCR1(%a6),%a0
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0) |convert to internal ext format
tstw LOCAL_EX(%a0) |check if input is denorm
beqs xdnrm_dn |if so, skip nrm_set
bsr nrm_set |normalize the result (exponent
| ;will be negative
xdnrm_dn:
bclrb #sign_bit,LOCAL_EX(%a0) |take off false sign
bfclr LOCAL_SGN(%a0){#0:#8} |change back to IEEE ext format
beqs xdep
bsetb #sign_bit,LOCAL_EX(%a0)
xdep:
bfclr STAG(%a6){#5:#3} |clear wbtm66,wbtm1,wbtm0
bsetb #wbtemp15_bit,WB_BYTE(%a6) |set wbtemp15
bclrb #sticky_bit,STICKY(%a6) |clear sticky bit
bclrb #E1,E_BYTE(%a6)
movel (%a7)+,%a0
xdnrm_dis:
bfextu FPCR_MODE(%a6){#0:#2},%d0 |get round precision
bnes not_ext |if not round extended, store
| ;IEEE defaults
is_ext:
btstb #sign_bit,LOCAL_EX(%a0)
beqs xdnrm_store
bsetb #neg_bit,FPSR_CC(%a6) |set N bit in FPSR_CC
bras xdnrm_store
not_ext:
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0) |convert to internal ext format
bsr unf_sub |returns IEEE result pointed by
| ;a0; sets FPSR_CC accordingly
bfclr LOCAL_SGN(%a0){#0:#8} |convert back to IEEE ext format
beqs xdnrm_store
bsetb #sign_bit,LOCAL_EX(%a0)
xdnrm_store:
fmovemx (%a0),%fp0-%fp0 |store result in fp0
rts
|
| This subroutine is used for dyadic operations that use an extended
| denorm within the kernel. The approach used is to capture the frame,
| fix/restore.
|
.global t_avoid_unsupp
t_avoid_unsupp:
link %a2,#-LOCAL_SIZE |so that a2 fpsp.h negative
| ;offsets may be used
fsave -(%a7)
tstb 1(%a7) |check if idle, exit if so
beq idle_end
btstb #E1,E_BYTE(%a2) |check for an E1 exception if
| ;enabled, there is an unsupp
beq end_avun |else, exit
btstb #7,DTAG(%a2) |check for denorm destination
beqs src_den |else, must be a source denorm
|
| handle destination denorm
|
lea FPTEMP(%a2),%a0
btstb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0) |convert to internal ext format
bclrb #7,DTAG(%a2) |set DTAG to norm
bsr nrm_set |normalize result, exponent
| ;will become negative
bclrb #sign_bit,LOCAL_EX(%a0) |get rid of fake sign
bfclr LOCAL_SGN(%a0){#0:#8} |convert back to IEEE ext format
beqs ck_src_den |check if source is also denorm
bsetb #sign_bit,LOCAL_EX(%a0)
ck_src_den:
btstb #7,STAG(%a2)
beqs end_avun
src_den:
lea ETEMP(%a2),%a0
btstb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0) |convert to internal ext format
bclrb #7,STAG(%a2) |set STAG to norm
bsr nrm_set |normalize result, exponent
| ;will become negative
bclrb #sign_bit,LOCAL_EX(%a0) |get rid of fake sign
bfclr LOCAL_SGN(%a0){#0:#8} |convert back to IEEE ext format
beqs den_com
bsetb #sign_bit,LOCAL_EX(%a0)
den_com:
moveb #0xfe,CU_SAVEPC(%a2) |set continue frame
clrw NMNEXC(%a2) |clear NMNEXC
bclrb #E1,E_BYTE(%a2)
| fmove.l %FPSR,FPSR_SHADOW(%a2)
| bset.b #SFLAG,E_BYTE(%a2)
| bset.b #XFLAG,T_BYTE(%a2)
end_avun:
frestore (%a7)+
unlk %a2
rts
idle_end:
addl #4,%a7
unlk %a2
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,926
|
arch/m68k/fpsp040/scale.S
|
|
| scale.sa 3.3 7/30/91
|
| The entry point sSCALE computes the destination operand
| scaled by the source operand. If the absolute value of
| the source operand is (>= 2^14) an overflow or underflow
| is returned.
|
| The entry point sscale is called from do_func to emulate
| the fscale unimplemented instruction.
|
| Input: Double-extended destination operand in FPTEMP,
| double-extended source operand in ETEMP.
|
| Output: The function returns scale(X,Y) to fp0.
|
| Modifies: fp0.
|
| Algorithm:
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|SCALE idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref t_ovfl2
|xref t_unfl
|xref round
|xref t_resdnrm
SRC_BNDS: .short 0x3fff,0x400c
|
| This entry point is used by the unimplemented instruction exception
| handler.
|
|
|
| FSCALE
|
.global sscale
sscale:
fmovel #0,%fpcr |clr user enabled exc
clrl %d1
movew FPTEMP(%a6),%d1 |get dest exponent
smi L_SCR1(%a6) |use L_SCR1 to hold sign
andil #0x7fff,%d1 |strip sign
movew ETEMP(%a6),%d0 |check src bounds
andiw #0x7fff,%d0 |clr sign bit
cmp2w SRC_BNDS,%d0
bccs src_in
cmpiw #0x400c,%d0 |test for too large
bge src_out
|
| The source input is below 1, so we check for denormalized numbers
| and set unfl.
|
src_small:
moveb DTAG(%a6),%d0
andib #0xe0,%d0
tstb %d0
beqs no_denorm
st STORE_FLG(%a6) |dest already contains result
orl #unfl_mask,USER_FPSR(%a6) |set UNFL
den_done:
leal FPTEMP(%a6),%a0
bra t_resdnrm
no_denorm:
fmovel USER_FPCR(%a6),%FPCR
fmovex FPTEMP(%a6),%fp0 |simply return dest
rts
|
| Source is within 2^14 range. To perform the int operation,
| move it to d0.
|
src_in:
fmovex ETEMP(%a6),%fp0 |move in src for int
fmovel #rz_mode,%fpcr |force rz for src conversion
fmovel %fp0,%d0 |int src to d0
fmovel #0,%FPSR |clr status from above
tstw ETEMP(%a6) |check src sign
blt src_neg
|
| Source is positive. Add the src to the dest exponent.
| The result can be denormalized, if src = 0, or overflow,
| if the result of the add sets a bit in the upper word.
|
src_pos:
tstw %d1 |check for denorm
beq dst_dnrm
addl %d0,%d1 |add src to dest exp
beqs denorm |if zero, result is denorm
cmpil #0x7fff,%d1 |test for overflow
bges ovfl
tstb L_SCR1(%a6)
beqs spos_pos
orw #0x8000,%d1
spos_pos:
movew %d1,FPTEMP(%a6) |result in FPTEMP
fmovel USER_FPCR(%a6),%FPCR
fmovex FPTEMP(%a6),%fp0 |write result to fp0
rts
ovfl:
tstb L_SCR1(%a6)
beqs sovl_pos
orw #0x8000,%d1
sovl_pos:
movew FPTEMP(%a6),ETEMP(%a6) |result in ETEMP
movel FPTEMP_HI(%a6),ETEMP_HI(%a6)
movel FPTEMP_LO(%a6),ETEMP_LO(%a6)
bra t_ovfl2
denorm:
tstb L_SCR1(%a6)
beqs den_pos
orw #0x8000,%d1
den_pos:
tstl FPTEMP_HI(%a6) |check j bit
blts nden_exit |if set, not denorm
movew %d1,ETEMP(%a6) |input expected in ETEMP
movel FPTEMP_HI(%a6),ETEMP_HI(%a6)
movel FPTEMP_LO(%a6),ETEMP_LO(%a6)
orl #unfl_bit,USER_FPSR(%a6) |set unfl
leal ETEMP(%a6),%a0
bra t_resdnrm
nden_exit:
movew %d1,FPTEMP(%a6) |result in FPTEMP
fmovel USER_FPCR(%a6),%FPCR
fmovex FPTEMP(%a6),%fp0 |write result to fp0
rts
|
| Source is negative. Add the src to the dest exponent.
| (The result exponent will be reduced). The result can be
| denormalized.
|
src_neg:
addl %d0,%d1 |add src to dest
beqs denorm |if zero, result is denorm
blts fix_dnrm |if negative, result is
| ;needing denormalization
tstb L_SCR1(%a6)
beqs sneg_pos
orw #0x8000,%d1
sneg_pos:
movew %d1,FPTEMP(%a6) |result in FPTEMP
fmovel USER_FPCR(%a6),%FPCR
fmovex FPTEMP(%a6),%fp0 |write result to fp0
rts
|
| The result exponent is below denorm value. Test for catastrophic
| underflow and force zero if true. If not, try to shift the
| mantissa right until a zero exponent exists.
|
fix_dnrm:
cmpiw #0xffc0,%d1 |lower bound for normalization
blt fix_unfl |if lower, catastrophic unfl
movew %d1,%d0 |use d0 for exp
movel %d2,-(%a7) |free d2 for norm
movel FPTEMP_HI(%a6),%d1
movel FPTEMP_LO(%a6),%d2
clrl L_SCR2(%a6)
fix_loop:
addw #1,%d0 |drive d0 to 0
lsrl #1,%d1 |while shifting the
roxrl #1,%d2 |mantissa to the right
bccs no_carry
st L_SCR2(%a6) |use L_SCR2 to capture inex
no_carry:
tstw %d0 |it is finished when
blts fix_loop |d0 is zero or the mantissa
tstb L_SCR2(%a6)
beqs tst_zero
orl #unfl_inx_mask,USER_FPSR(%a6)
| ;set unfl, aunfl, ainex
|
| Test for zero. If zero, simply use fmove to return +/- zero
| to the fpu.
|
tst_zero:
clrw FPTEMP_EX(%a6)
tstb L_SCR1(%a6) |test for sign
beqs tst_con
orw #0x8000,FPTEMP_EX(%a6) |set sign bit
tst_con:
movel %d1,FPTEMP_HI(%a6)
movel %d2,FPTEMP_LO(%a6)
movel (%a7)+,%d2
tstl %d1
bnes not_zero
tstl FPTEMP_LO(%a6)
bnes not_zero
|
| Result is zero. Check for rounding mode to set lsb. If the
| mode is rp, and the zero is positive, return smallest denorm.
| If the mode is rm, and the zero is negative, return smallest
| negative denorm.
|
btstb #5,FPCR_MODE(%a6) |test if rm or rp
beqs no_dir
btstb #4,FPCR_MODE(%a6) |check which one
beqs zer_rm
zer_rp:
tstb L_SCR1(%a6) |check sign
bnes no_dir |if set, neg op, no inc
movel #1,FPTEMP_LO(%a6) |set lsb
bras sm_dnrm
zer_rm:
tstb L_SCR1(%a6) |check sign
beqs no_dir |if clr, neg op, no inc
movel #1,FPTEMP_LO(%a6) |set lsb
orl #neg_mask,USER_FPSR(%a6) |set N
bras sm_dnrm
no_dir:
fmovel USER_FPCR(%a6),%FPCR
fmovex FPTEMP(%a6),%fp0 |use fmove to set cc's
rts
|
| The rounding mode changed the zero to a smallest denorm. Call
| t_resdnrm with exceptional operand in ETEMP.
|
sm_dnrm:
movel FPTEMP_EX(%a6),ETEMP_EX(%a6)
movel FPTEMP_HI(%a6),ETEMP_HI(%a6)
movel FPTEMP_LO(%a6),ETEMP_LO(%a6)
leal ETEMP(%a6),%a0
bra t_resdnrm
|
| Result is still denormalized.
|
not_zero:
orl #unfl_mask,USER_FPSR(%a6) |set unfl
tstb L_SCR1(%a6) |check for sign
beqs fix_exit
orl #neg_mask,USER_FPSR(%a6) |set N
fix_exit:
bras sm_dnrm
|
| The result has underflowed to zero. Return zero and set
| unfl, aunfl, and ainex.
|
fix_unfl:
orl #unfl_inx_mask,USER_FPSR(%a6)
btstb #5,FPCR_MODE(%a6) |test if rm or rp
beqs no_dir2
btstb #4,FPCR_MODE(%a6) |check which one
beqs zer_rm2
zer_rp2:
tstb L_SCR1(%a6) |check sign
bnes no_dir2 |if set, neg op, no inc
clrl FPTEMP_EX(%a6)
clrl FPTEMP_HI(%a6)
movel #1,FPTEMP_LO(%a6) |set lsb
bras sm_dnrm |return smallest denorm
zer_rm2:
tstb L_SCR1(%a6) |check sign
beqs no_dir2 |if clr, neg op, no inc
movew #0x8000,FPTEMP_EX(%a6)
clrl FPTEMP_HI(%a6)
movel #1,FPTEMP_LO(%a6) |set lsb
orl #neg_mask,USER_FPSR(%a6) |set N
bra sm_dnrm |return smallest denorm
no_dir2:
tstb L_SCR1(%a6)
bges pos_zero
neg_zero:
clrl FP_SCR1(%a6) |clear the exceptional operand
clrl FP_SCR1+4(%a6) |for gen_except.
clrl FP_SCR1+8(%a6)
fmoves #0x80000000,%fp0
rts
pos_zero:
clrl FP_SCR1(%a6) |clear the exceptional operand
clrl FP_SCR1+4(%a6) |for gen_except.
clrl FP_SCR1+8(%a6)
fmoves #0x00000000,%fp0
rts
|
| The destination is a denormalized number. It must be handled
| by first shifting the bits in the mantissa until it is normalized,
| then adding the remainder of the source to the exponent.
|
dst_dnrm:
moveml %d2/%d3,-(%a7)
movew FPTEMP_EX(%a6),%d1
movel FPTEMP_HI(%a6),%d2
movel FPTEMP_LO(%a6),%d3
dst_loop:
tstl %d2 |test for normalized result
blts dst_norm |exit loop if so
tstl %d0 |otherwise, test shift count
beqs dst_fin |if zero, shifting is done
subil #1,%d0 |dec src
lsll #1,%d3
roxll #1,%d2
bras dst_loop
|
| Destination became normalized. Simply add the remaining
| portion of the src to the exponent.
|
dst_norm:
addw %d0,%d1 |dst is normalized; add src
tstb L_SCR1(%a6)
beqs dnrm_pos
orl #0x8000,%d1
dnrm_pos:
movemw %d1,FPTEMP_EX(%a6)
moveml %d2,FPTEMP_HI(%a6)
moveml %d3,FPTEMP_LO(%a6)
fmovel USER_FPCR(%a6),%FPCR
fmovex FPTEMP(%a6),%fp0
moveml (%a7)+,%d2/%d3
rts
|
| Destination remained denormalized. Call t_excdnrm with
| exceptional operand in ETEMP.
|
dst_fin:
tstb L_SCR1(%a6) |check for sign
beqs dst_exit
orl #neg_mask,USER_FPSR(%a6) |set N
orl #0x8000,%d1
dst_exit:
movemw %d1,ETEMP_EX(%a6)
moveml %d2,ETEMP_HI(%a6)
moveml %d3,ETEMP_LO(%a6)
orl #unfl_mask,USER_FPSR(%a6) |set unfl
moveml (%a7)+,%d2/%d3
leal ETEMP(%a6),%a0
bra t_resdnrm
|
| Source is outside of 2^14 range. Test the sign and branch
| to the appropriate exception handler.
|
src_out:
tstb L_SCR1(%a6)
beqs scro_pos
orl #0x8000,%d1
scro_pos:
movel FPTEMP_HI(%a6),ETEMP_HI(%a6)
movel FPTEMP_LO(%a6),ETEMP_LO(%a6)
tstw ETEMP(%a6)
blts res_neg
res_pos:
movew %d1,ETEMP(%a6) |result in ETEMP
bra t_ovfl2
res_neg:
movew %d1,ETEMP(%a6) |result in ETEMP
leal ETEMP(%a6),%a0
bra t_unfl
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,807
|
arch/m68k/fpsp040/x_operr.S
|
|
| x_operr.sa 3.5 7/1/91
|
| fpsp_operr --- FPSP handler for operand error exception
|
| See 68040 User's Manual pp. 9-44f
|
| Note 1: For trap disabled 040 does the following:
| If the dest is a fp reg, then an extended precision non_signaling
| NAN is stored in the dest reg. If the dest format is b, w, or l and
| the source op is a NAN, then garbage is stored as the result (actually
| the upper 32 bits of the mantissa are sent to the integer unit). If
| the dest format is integer (b, w, l) and the operr is caused by
| integer overflow, or the source op is inf, then the result stored is
| garbage.
| There are three cases in which operr is incorrectly signaled on the
| 040. This occurs for move_out of format b, w, or l for the largest
| negative integer (-2^7 for b, -2^15 for w, -2^31 for l).
|
| On opclass = 011 fmove.(b,w,l) that causes a conversion
| overflow -> OPERR, the exponent in wbte (and fpte) is:
| byte 56 - (62 - exp)
| word 48 - (62 - exp)
| long 32 - (62 - exp)
|
| where exp = (true exp) - 1
|
| So, wbtemp and fptemp will contain the following on erroneously
| signalled operr:
| fpts = 1
| fpte = $4000 (15 bit externally)
| byte fptm = $ffffffff ffffff80
| word fptm = $ffffffff ffff8000
| long fptm = $ffffffff 80000000
|
| Note 2: For trap enabled 040 does the following:
| If the inst is move_out, then same as Note 1.
| If the inst is not move_out, the dest is not modified.
| The exceptional operand is not defined for integer overflow
| during a move_out.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
X_OPERR: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref mem_write
|xref real_operr
|xref real_inex
|xref get_fline
|xref fpsp_done
|xref reg_dest
.global fpsp_operr
fpsp_operr:
|
link %a6,#-LOCAL_SIZE
fsave -(%a7)
moveml %d0-%d1/%a0-%a1,USER_DA(%a6)
fmovemx %fp0-%fp3,USER_FP0(%a6)
fmoveml %fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
|
| Check if this is an opclass 3 instruction.
| If so, fall through, else branch to operr_end
|
btstb #TFLAG,T_BYTE(%a6)
beqs operr_end
|
| If the destination size is B,W,or L, the operr must be
| handled here.
|
movel CMDREG1B(%a6),%d0
bfextu %d0{#3:#3},%d0 |0=long, 4=word, 6=byte
cmpib #0,%d0 |determine size; check long
beq operr_long
cmpib #4,%d0 |check word
beq operr_word
cmpib #6,%d0 |check byte
beq operr_byte
|
| The size is not B,W,or L, so the operr is handled by the
| kernel handler. Set the operr bits and clean up, leaving
| only the integer exception frame on the stack, and the
| fpu in the original exceptional state.
|
operr_end:
bsetb #operr_bit,FPSR_EXCEPT(%a6)
bsetb #aiop_bit,FPSR_AEXCEPT(%a6)
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
unlk %a6
bral real_operr
operr_long:
moveql #4,%d1 |write size to d1
moveb STAG(%a6),%d0 |test stag for nan
andib #0xe0,%d0 |clr all but tag
cmpib #0x60,%d0 |check for nan
beq operr_nan
cmpil #0x80000000,FPTEMP_LO(%a6) |test if ls lword is special
bnes chklerr |if not equal, check for incorrect operr
bsr check_upper |check if exp and ms mant are special
tstl %d0
bnes chklerr |if d0 is true, check for incorrect operr
movel #0x80000000,%d0 |store special case result
bsr operr_store
bra not_enabled |clean and exit
|
| CHECK FOR INCORRECTLY GENERATED OPERR EXCEPTION HERE
|
chklerr:
movew FPTEMP_EX(%a6),%d0
andw #0x7FFF,%d0 |ignore sign bit
cmpw #0x3FFE,%d0 |this is the only possible exponent value
bnes chklerr2
fixlong:
movel FPTEMP_LO(%a6),%d0
bsr operr_store
bra not_enabled
chklerr2:
movew FPTEMP_EX(%a6),%d0
andw #0x7FFF,%d0 |ignore sign bit
cmpw #0x4000,%d0
bcc store_max |exponent out of range
movel FPTEMP_LO(%a6),%d0
andl #0x7FFF0000,%d0 |look for all 1's on bits 30-16
cmpl #0x7FFF0000,%d0
beqs fixlong
tstl FPTEMP_LO(%a6)
bpls chklepos
cmpl #0xFFFFFFFF,FPTEMP_HI(%a6)
beqs fixlong
bra store_max
chklepos:
tstl FPTEMP_HI(%a6)
beqs fixlong
bra store_max
operr_word:
moveql #2,%d1 |write size to d1
moveb STAG(%a6),%d0 |test stag for nan
andib #0xe0,%d0 |clr all but tag
cmpib #0x60,%d0 |check for nan
beq operr_nan
cmpil #0xffff8000,FPTEMP_LO(%a6) |test if ls lword is special
bnes chkwerr |if not equal, check for incorrect operr
bsr check_upper |check if exp and ms mant are special
tstl %d0
bnes chkwerr |if d0 is true, check for incorrect operr
movel #0x80000000,%d0 |store special case result
bsr operr_store
bra not_enabled |clean and exit
|
| CHECK FOR INCORRECTLY GENERATED OPERR EXCEPTION HERE
|
chkwerr:
movew FPTEMP_EX(%a6),%d0
andw #0x7FFF,%d0 |ignore sign bit
cmpw #0x3FFE,%d0 |this is the only possible exponent value
bnes store_max
movel FPTEMP_LO(%a6),%d0
swap %d0
bsr operr_store
bra not_enabled
operr_byte:
moveql #1,%d1 |write size to d1
moveb STAG(%a6),%d0 |test stag for nan
andib #0xe0,%d0 |clr all but tag
cmpib #0x60,%d0 |check for nan
beqs operr_nan
cmpil #0xffffff80,FPTEMP_LO(%a6) |test if ls lword is special
bnes chkberr |if not equal, check for incorrect operr
bsr check_upper |check if exp and ms mant are special
tstl %d0
bnes chkberr |if d0 is true, check for incorrect operr
movel #0x80000000,%d0 |store special case result
bsr operr_store
bra not_enabled |clean and exit
|
| CHECK FOR INCORRECTLY GENERATED OPERR EXCEPTION HERE
|
chkberr:
movew FPTEMP_EX(%a6),%d0
andw #0x7FFF,%d0 |ignore sign bit
cmpw #0x3FFE,%d0 |this is the only possible exponent value
bnes store_max
movel FPTEMP_LO(%a6),%d0
asll #8,%d0
swap %d0
bsr operr_store
bra not_enabled
|
| This operr condition is not of the special case. Set operr
| and aiop and write the portion of the nan to memory for the
| given size.
|
operr_nan:
orl #opaop_mask,USER_FPSR(%a6) |set operr & aiop
movel ETEMP_HI(%a6),%d0 |output will be from upper 32 bits
bsr operr_store
bra end_operr
|
| Store_max loads the max pos or negative for the size, sets
| the operr and aiop bits, and clears inex and ainex, incorrectly
| set by the 040.
|
store_max:
orl #opaop_mask,USER_FPSR(%a6) |set operr & aiop
bclrb #inex2_bit,FPSR_EXCEPT(%a6)
bclrb #ainex_bit,FPSR_AEXCEPT(%a6)
fmovel #0,%FPSR
tstw FPTEMP_EX(%a6) |check sign
blts load_neg
movel #0x7fffffff,%d0
bsr operr_store
bra end_operr
load_neg:
movel #0x80000000,%d0
bsr operr_store
bra end_operr
|
| This routine stores the data in d0, for the given size in d1,
| to memory or data register as required. A read of the fline
| is required to determine the destination.
|
operr_store:
movel %d0,L_SCR1(%a6) |move write data to L_SCR1
movel %d1,-(%a7) |save register size
bsrl get_fline |fline returned in d0
movel (%a7)+,%d1
bftst %d0{#26:#3} |if mode is zero, dest is Dn
bnes dest_mem
|
| Destination is Dn. Get register number from d0. Data is on
| the stack at (a7). D1 has size: 1=byte,2=word,4=long/single
|
andil #7,%d0 |isolate register number
cmpil #4,%d1
beqs op_long |the most frequent case
cmpil #2,%d1
bnes op_con
orl #8,%d0
bras op_con
op_long:
orl #0x10,%d0
op_con:
movel %d0,%d1 |format size:reg for reg_dest
bral reg_dest |call to reg_dest returns to caller
| ;of operr_store
|
| Destination is memory. Get <ea> from integer exception frame
| and call mem_write.
|
dest_mem:
leal L_SCR1(%a6),%a0 |put ptr to write data in a0
movel EXC_EA(%a6),%a1 |put user destination address in a1
movel %d1,%d0 |put size in d0
bsrl mem_write
rts
|
| Check the exponent for $c000 and the upper 32 bits of the
| mantissa for $ffffffff. If both are true, return d0 clr
| and store the lower n bits of the least lword of FPTEMP
| to d0 for write out. If not, it is a real operr, and set d0.
|
check_upper:
cmpil #0xffffffff,FPTEMP_HI(%a6) |check if first byte is all 1's
bnes true_operr |if not all 1's then was true operr
cmpiw #0xc000,FPTEMP_EX(%a6) |check if incorrectly signalled
beqs not_true_operr |branch if not true operr
cmpiw #0xbfff,FPTEMP_EX(%a6) |check if incorrectly signalled
beqs not_true_operr |branch if not true operr
true_operr:
movel #1,%d0 |signal real operr
rts
not_true_operr:
clrl %d0 |signal no real operr
rts
|
| End_operr tests for operr enabled. If not, it cleans up the stack
| and does an rte. If enabled, it cleans up the stack and branches
| to the kernel operr handler with only the integer exception
| frame on the stack and the fpu in the original exceptional state
| with correct data written to the destination.
|
end_operr:
btstb #operr_bit,FPCR_ENABLE(%a6)
beqs not_enabled
enabled:
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
unlk %a6
bral real_operr
not_enabled:
|
| It is possible to have either inex2 or inex1 exceptions with the
| operr. If the inex enable bit is set in the FPCR, and either
| inex2 or inex1 occurred, we must clean up and branch to the
| real inex handler.
|
ck_inex:
moveb FPCR_ENABLE(%a6),%d0
andb FPSR_EXCEPT(%a6),%d0
andib #0x3,%d0
beq operr_exit
|
| Inexact enabled and reported, and we must take an inexact exception.
|
take_inex:
moveb #INEX_VEC,EXC_VEC+1(%a6)
movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
orl #sx_mask,E_BYTE(%a6)
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
unlk %a6
bral real_inex
|
| Since operr is only an E1 exception, there is no need to frestore
| any state back to the fpu.
|
operr_exit:
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
unlk %a6
bral fpsp_done
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,348
|
arch/m68k/fpsp040/sasin.S
|
|
| sasin.sa 3.3 12/19/90
|
| Description: The entry point sAsin computes the inverse sine of
| an input argument; sAsind does the same except for denormalized
| input.
|
| Input: Double-extended number X in location pointed to
| by address register a0.
|
| Output: The value arcsin(X) returned in floating-point register Fp0.
|
| Accuracy and Monotonicity: The returned result is within 3 ulps in
| 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
| result is subsequently rounded to double precision. The
| result is provably monotonic in double precision.
|
| Speed: The program sASIN takes approximately 310 cycles.
|
| Algorithm:
|
| ASIN
| 1. If |X| >= 1, go to 3.
|
| 2. (|X| < 1) Calculate asin(X) by
| z := sqrt( [1-X][1+X] )
| asin(X) = atan( x / z ).
| Exit.
|
| 3. If |X| > 1, go to 5.
|
| 4. (|X| = 1) sgn := sign(X), return asin(X) := sgn * Pi/2. Exit.
|
| 5. (|X| > 1) Generate an invalid operation by 0 * infinity.
| Exit.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|SASIN idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
PIBY2: .long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
|xref t_operr
|xref t_frcinx
|xref t_extdnrm
|xref satan
.global sasind
sasind:
|--ASIN(X) = X FOR DENORMALIZED X
bra t_extdnrm
.global sasin
sasin:
fmovex (%a0),%fp0 | ...LOAD INPUT
movel (%a0),%d0
movew 4(%a0),%d0
andil #0x7FFFFFFF,%d0
cmpil #0x3FFF8000,%d0
bges asinbig
|--THIS IS THE USUAL CASE, |X| < 1
|--ASIN(X) = ATAN( X / SQRT( (1-X)(1+X) ) )
fmoves #0x3F800000,%fp1
fsubx %fp0,%fp1 | ...1-X
fmovemx %fp2-%fp2,-(%a7)
fmoves #0x3F800000,%fp2
faddx %fp0,%fp2 | ...1+X
fmulx %fp2,%fp1 | ...(1+X)(1-X)
fmovemx (%a7)+,%fp2-%fp2
fsqrtx %fp1 | ...SQRT([1-X][1+X])
fdivx %fp1,%fp0 | ...X/SQRT([1-X][1+X])
fmovemx %fp0-%fp0,(%a0)
bsr satan
bra t_frcinx
asinbig:
fabsx %fp0 | ...|X|
fcmps #0x3F800000,%fp0
fbgt t_operr |cause an operr exception
|--|X| = 1, ASIN(X) = +- PI/2.
fmovex PIBY2,%fp0
movel (%a0),%d0
andil #0x80000000,%d0 | ...SIGN BIT OF X
oril #0x3F800000,%d0 | ...+-1 IN SGL FORMAT
movel %d0,-(%sp) | ...push SIGN(X) IN SGL-FMT
fmovel %d1,%FPCR
fmuls (%sp)+,%fp0
bra t_frcinx
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 19,058
|
arch/m68k/fpsp040/ssin.S
|
|
| ssin.sa 3.3 7/29/91
|
| The entry point sSIN computes the sine of an input argument
| sCOS computes the cosine, and sSINCOS computes both. The
| corresponding entry points with a "d" computes the same
| corresponding function values for denormalized inputs.
|
| Input: Double-extended number X in location pointed to
| by address register a0.
|
| Output: The function value sin(X) or cos(X) returned in Fp0 if SIN or
| COS is requested. Otherwise, for SINCOS, sin(X) is returned
| in Fp0, and cos(X) is returned in Fp1.
|
| Modifies: Fp0 for SIN or COS; both Fp0 and Fp1 for SINCOS.
|
| Accuracy and Monotonicity: The returned result is within 1 ulp in
| 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
| result is subsequently rounded to double precision. The
| result is provably monotonic in double precision.
|
| Speed: The programs sSIN and sCOS take approximately 150 cycles for
| input argument X such that |X| < 15Pi, which is the usual
| situation. The speed for sSINCOS is approximately 190 cycles.
|
| Algorithm:
|
| SIN and COS:
| 1. If SIN is invoked, set AdjN := 0; otherwise, set AdjN := 1.
|
| 2. If |X| >= 15Pi or |X| < 2**(-40), go to 7.
|
| 3. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let
| k = N mod 4, so in particular, k = 0,1,2,or 3. Overwrite
| k by k := k + AdjN.
|
| 4. If k is even, go to 6.
|
| 5. (k is odd) Set j := (k-1)/2, sgn := (-1)**j. Return sgn*cos(r)
| where cos(r) is approximated by an even polynomial in r,
| 1 + r*r*(B1+s*(B2+ ... + s*B8)), s = r*r.
| Exit.
|
| 6. (k is even) Set j := k/2, sgn := (-1)**j. Return sgn*sin(r)
| where sin(r) is approximated by an odd polynomial in r
| r + r*s*(A1+s*(A2+ ... + s*A7)), s = r*r.
| Exit.
|
| 7. If |X| > 1, go to 9.
|
| 8. (|X|<2**(-40)) If SIN is invoked, return X; otherwise return 1.
|
| 9. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back to 3.
|
| SINCOS:
| 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.
|
| 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let
| k = N mod 4, so in particular, k = 0,1,2,or 3.
|
| 3. If k is even, go to 5.
|
| 4. (k is odd) Set j1 := (k-1)/2, j2 := j1 (EOR) (k mod 2), i.e.
| j1 exclusive or with the l.s.b. of k.
| sgn1 := (-1)**j1, sgn2 := (-1)**j2.
| SIN(X) = sgn1 * cos(r) and COS(X) = sgn2*sin(r) where
| sin(r) and cos(r) are computed as odd and even polynomials
| in r, respectively. Exit
|
| 5. (k is even) Set j1 := k/2, sgn1 := (-1)**j1.
| SIN(X) = sgn1 * sin(r) and COS(X) = sgn1*cos(r) where
| sin(r) and cos(r) are computed as odd and even polynomials
| in r, respectively. Exit
|
| 6. If |X| > 1, go to 8.
|
| 7. (|X|<2**(-40)) SIN(X) = X and COS(X) = 1. Exit.
|
| 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back to 2.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|SSIN idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
BOUNDS1: .long 0x3FD78000,0x4004BC7E
TWOBYPI: .long 0x3FE45F30,0x6DC9C883
SINA7: .long 0xBD6AAA77,0xCCC994F5
SINA6: .long 0x3DE61209,0x7AAE8DA1
SINA5: .long 0xBE5AE645,0x2A118AE4
SINA4: .long 0x3EC71DE3,0xA5341531
SINA3: .long 0xBF2A01A0,0x1A018B59,0x00000000,0x00000000
SINA2: .long 0x3FF80000,0x88888888,0x888859AF,0x00000000
SINA1: .long 0xBFFC0000,0xAAAAAAAA,0xAAAAAA99,0x00000000
COSB8: .long 0x3D2AC4D0,0xD6011EE3
COSB7: .long 0xBDA9396F,0x9F45AC19
COSB6: .long 0x3E21EED9,0x0612C972
COSB5: .long 0xBE927E4F,0xB79D9FCF
COSB4: .long 0x3EFA01A0,0x1A01D423,0x00000000,0x00000000
COSB3: .long 0xBFF50000,0xB60B60B6,0x0B61D438,0x00000000
COSB2: .long 0x3FFA0000,0xAAAAAAAA,0xAAAAAB5E
COSB1: .long 0xBF000000
INVTWOPI: .long 0x3FFC0000,0xA2F9836E,0x4E44152A
TWOPI1: .long 0x40010000,0xC90FDAA2,0x00000000,0x00000000
TWOPI2: .long 0x3FDF0000,0x85A308D4,0x00000000,0x00000000
|xref PITBL
.set INARG,FP_SCR4
.set X,FP_SCR5
.set XDCARE,X+2
.set XFRAC,X+4
.set RPRIME,FP_SCR1
.set SPRIME,FP_SCR2
.set POSNEG1,L_SCR1
.set TWOTO63,L_SCR1
.set ENDFLAG,L_SCR2
.set N,L_SCR2
.set ADJN,L_SCR3
| xref t_frcinx
|xref t_extdnrm
|xref sto_cos
.global ssind
ssind:
|--SIN(X) = X FOR DENORMALIZED X
bra t_extdnrm
.global scosd
scosd:
|--COS(X) = 1 FOR DENORMALIZED X
fmoves #0x3F800000,%fp0
|
| 9D25B Fix: Sometimes the previous fmove.s sets fpsr bits
|
fmovel #0,%fpsr
|
bra t_frcinx
.global ssin
ssin:
|--SET ADJN TO 0
movel #0,ADJN(%a6)
bras SINBGN
.global scos
scos:
|--SET ADJN TO 1
movel #1,ADJN(%a6)
SINBGN:
|--SAVE FPCR, FP1. CHECK IF |X| IS TOO SMALL OR LARGE
fmovex (%a0),%fp0 | ...LOAD INPUT
movel (%a0),%d0
movew 4(%a0),%d0
fmovex %fp0,X(%a6)
andil #0x7FFFFFFF,%d0 | ...COMPACTIFY X
cmpil #0x3FD78000,%d0 | ...|X| >= 2**(-40)?
bges SOK1
bra SINSM
SOK1:
cmpil #0x4004BC7E,%d0 | ...|X| < 15 PI?
blts SINMAIN
bra REDUCEX
SINMAIN:
|--THIS IS THE USUAL CASE, |X| <= 15 PI.
|--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
fmovex %fp0,%fp1
fmuld TWOBYPI,%fp1 | ...X*2/PI
|--HIDE THE NEXT THREE INSTRUCTIONS
lea PITBL+0x200,%a1 | ...TABLE OF N*PI/2, N = -32,...,32
|--FP1 IS NOW READY
fmovel %fp1,N(%a6) | ...CONVERT TO INTEGER
movel N(%a6),%d0
asll #4,%d0
addal %d0,%a1 | ...A1 IS THE ADDRESS OF N*PIBY2
| ...WHICH IS IN TWO PIECES Y1 & Y2
fsubx (%a1)+,%fp0 | ...X-Y1
|--HIDE THE NEXT ONE
fsubs (%a1),%fp0 | ...FP0 IS R = (X-Y1)-Y2
SINCONT:
|--continuation from REDUCEX
|--GET N+ADJN AND SEE IF SIN(R) OR COS(R) IS NEEDED
movel N(%a6),%d0
addl ADJN(%a6),%d0 | ...SEE IF D0 IS ODD OR EVEN
rorl #1,%d0 | ...D0 WAS ODD IFF D0 IS NEGATIVE
cmpil #0,%d0
blt COSPOLY
SINPOLY:
|--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
|--THEN WE RETURN SGN*SIN(R). SGN*SIN(R) IS COMPUTED BY
|--R' + R'*S*(A1 + S(A2 + S(A3 + S(A4 + ... + SA7)))), WHERE
|--R' = SGN*R, S=R*R. THIS CAN BE REWRITTEN AS
|--R' + R'*S*( [A1+T(A3+T(A5+TA7))] + [S(A2+T(A4+TA6))])
|--WHERE T=S*S.
|--NOTE THAT A3 THROUGH A7 ARE STORED IN DOUBLE PRECISION
|--WHILE A1 AND A2 ARE IN DOUBLE-EXTENDED FORMAT.
fmovex %fp0,X(%a6) | ...X IS R
fmulx %fp0,%fp0 | ...FP0 IS S
|---HIDE THE NEXT TWO WHILE WAITING FOR FP0
fmoved SINA7,%fp3
fmoved SINA6,%fp2
|--FP0 IS NOW READY
fmovex %fp0,%fp1
fmulx %fp1,%fp1 | ...FP1 IS T
|--HIDE THE NEXT TWO WHILE WAITING FOR FP1
rorl #1,%d0
andil #0x80000000,%d0
| ...LEAST SIG. BIT OF D0 IN SIGN POSITION
eorl %d0,X(%a6) | ...X IS NOW R'= SGN*R
fmulx %fp1,%fp3 | ...TA7
fmulx %fp1,%fp2 | ...TA6
faddd SINA5,%fp3 | ...A5+TA7
faddd SINA4,%fp2 | ...A4+TA6
fmulx %fp1,%fp3 | ...T(A5+TA7)
fmulx %fp1,%fp2 | ...T(A4+TA6)
faddd SINA3,%fp3 | ...A3+T(A5+TA7)
faddx SINA2,%fp2 | ...A2+T(A4+TA6)
fmulx %fp3,%fp1 | ...T(A3+T(A5+TA7))
fmulx %fp0,%fp2 | ...S(A2+T(A4+TA6))
faddx SINA1,%fp1 | ...A1+T(A3+T(A5+TA7))
fmulx X(%a6),%fp0 | ...R'*S
faddx %fp2,%fp1 | ...[A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
|--FP3 RELEASED, RESTORE NOW AND TAKE SOME ADVANTAGE OF HIDING
|--FP2 RELEASED, RESTORE NOW AND TAKE FULL ADVANTAGE OF HIDING
fmulx %fp1,%fp0 | ...SIN(R')-R'
|--FP1 RELEASED.
fmovel %d1,%FPCR |restore users exceptions
faddx X(%a6),%fp0 |last inst - possible exception set
bra t_frcinx
COSPOLY:
|--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
|--THEN WE RETURN SGN*COS(R). SGN*COS(R) IS COMPUTED BY
|--SGN + S'*(B1 + S(B2 + S(B3 + S(B4 + ... + SB8)))), WHERE
|--S=R*R AND S'=SGN*S. THIS CAN BE REWRITTEN AS
|--SGN + S'*([B1+T(B3+T(B5+TB7))] + [S(B2+T(B4+T(B6+TB8)))])
|--WHERE T=S*S.
|--NOTE THAT B4 THROUGH B8 ARE STORED IN DOUBLE PRECISION
|--WHILE B2 AND B3 ARE IN DOUBLE-EXTENDED FORMAT, B1 IS -1/2
|--AND IS THEREFORE STORED AS SINGLE PRECISION.
fmulx %fp0,%fp0 | ...FP0 IS S
|---HIDE THE NEXT TWO WHILE WAITING FOR FP0
fmoved COSB8,%fp2
fmoved COSB7,%fp3
|--FP0 IS NOW READY
fmovex %fp0,%fp1
fmulx %fp1,%fp1 | ...FP1 IS T
|--HIDE THE NEXT TWO WHILE WAITING FOR FP1
fmovex %fp0,X(%a6) | ...X IS S
rorl #1,%d0
andil #0x80000000,%d0
| ...LEAST SIG. BIT OF D0 IN SIGN POSITION
fmulx %fp1,%fp2 | ...TB8
|--HIDE THE NEXT TWO WHILE WAITING FOR THE XU
eorl %d0,X(%a6) | ...X IS NOW S'= SGN*S
andil #0x80000000,%d0
fmulx %fp1,%fp3 | ...TB7
|--HIDE THE NEXT TWO WHILE WAITING FOR THE XU
oril #0x3F800000,%d0 | ...D0 IS SGN IN SINGLE
movel %d0,POSNEG1(%a6)
faddd COSB6,%fp2 | ...B6+TB8
faddd COSB5,%fp3 | ...B5+TB7
fmulx %fp1,%fp2 | ...T(B6+TB8)
fmulx %fp1,%fp3 | ...T(B5+TB7)
faddd COSB4,%fp2 | ...B4+T(B6+TB8)
faddx COSB3,%fp3 | ...B3+T(B5+TB7)
fmulx %fp1,%fp2 | ...T(B4+T(B6+TB8))
fmulx %fp3,%fp1 | ...T(B3+T(B5+TB7))
faddx COSB2,%fp2 | ...B2+T(B4+T(B6+TB8))
fadds COSB1,%fp1 | ...B1+T(B3+T(B5+TB7))
fmulx %fp2,%fp0 | ...S(B2+T(B4+T(B6+TB8)))
|--FP3 RELEASED, RESTORE NOW AND TAKE SOME ADVANTAGE OF HIDING
|--FP2 RELEASED.
faddx %fp1,%fp0
|--FP1 RELEASED
fmulx X(%a6),%fp0
fmovel %d1,%FPCR |restore users exceptions
fadds POSNEG1(%a6),%fp0 |last inst - possible exception set
bra t_frcinx
SINBORS:
|--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
|--IF |X| < 2**(-40), RETURN X OR 1.
cmpil #0x3FFF8000,%d0
bgts REDUCEX
SINSM:
movel ADJN(%a6),%d0
cmpil #0,%d0
bgts COSTINY
SINTINY:
movew #0x0000,XDCARE(%a6) | ...JUST IN CASE
fmovel %d1,%FPCR |restore users exceptions
fmovex X(%a6),%fp0 |last inst - possible exception set
bra t_frcinx
COSTINY:
fmoves #0x3F800000,%fp0
fmovel %d1,%FPCR |restore users exceptions
fsubs #0x00800000,%fp0 |last inst - possible exception set
bra t_frcinx
REDUCEX:
|--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
|--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
|--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
fmovemx %fp2-%fp5,-(%a7) | ...save FP2 through FP5
movel %d2,-(%a7)
fmoves #0x00000000,%fp1
|--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
|--there is a danger of unwanted overflow in first LOOP iteration. In this
|--case, reduce argument by one remainder step to make subsequent reduction
|--safe.
cmpil #0x7ffeffff,%d0 |is argument dangerously large?
bnes LOOP
movel #0x7ffe0000,FP_SCR2(%a6) |yes
| ;create 2**16383*PI/2
movel #0xc90fdaa2,FP_SCR2+4(%a6)
clrl FP_SCR2+8(%a6)
ftstx %fp0 |test sign of argument
movel #0x7fdc0000,FP_SCR3(%a6) |create low half of 2**16383*
| ;PI/2 at FP_SCR3
movel #0x85a308d3,FP_SCR3+4(%a6)
clrl FP_SCR3+8(%a6)
fblt red_neg
orw #0x8000,FP_SCR2(%a6) |positive arg
orw #0x8000,FP_SCR3(%a6)
red_neg:
faddx FP_SCR2(%a6),%fp0 |high part of reduction is exact
fmovex %fp0,%fp1 |save high result in fp1
faddx FP_SCR3(%a6),%fp0 |low part of reduction
fsubx %fp0,%fp1 |determine low component of result
faddx FP_SCR3(%a6),%fp1 |fp0/fp1 are reduced argument.
|--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
|--integer quotient will be stored in N
|--Intermediate remainder is 66-bit long; (R,r) in (FP0,FP1)
LOOP:
fmovex %fp0,INARG(%a6) | ...+-2**K * F, 1 <= F < 2
movew INARG(%a6),%d0
movel %d0,%a1 | ...save a copy of D0
andil #0x00007FFF,%d0
subil #0x00003FFF,%d0 | ...D0 IS K
cmpil #28,%d0
bles LASTLOOP
CONTLOOP:
subil #27,%d0 | ...D0 IS L := K-27
movel #0,ENDFLAG(%a6)
bras WORK
LASTLOOP:
clrl %d0 | ...D0 IS L := 0
movel #1,ENDFLAG(%a6)
WORK:
|--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
|--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
|--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
|--2**L * (PIby2_1), 2**L * (PIby2_2)
movel #0x00003FFE,%d2 | ...BIASED EXPO OF 2/PI
subl %d0,%d2 | ...BIASED EXPO OF 2**(-L)*(2/PI)
movel #0xA2F9836E,FP_SCR1+4(%a6)
movel #0x4E44152A,FP_SCR1+8(%a6)
movew %d2,FP_SCR1(%a6) | ...FP_SCR1 is 2**(-L)*(2/PI)
fmovex %fp0,%fp2
fmulx FP_SCR1(%a6),%fp2
|--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
|--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
|--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
|--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
|--US THE DESIRED VALUE IN FLOATING POINT.
|--HIDE SIX CYCLES OF INSTRUCTION
movel %a1,%d2
swap %d2
andil #0x80000000,%d2
oril #0x5F000000,%d2 | ...D2 IS SIGN(INARG)*2**63 IN SGL
movel %d2,TWOTO63(%a6)
movel %d0,%d2
addil #0x00003FFF,%d2 | ...BIASED EXPO OF 2**L * (PI/2)
|--FP2 IS READY
fadds TWOTO63(%a6),%fp2 | ...THE FRACTIONAL PART OF FP1 IS ROUNDED
|--HIDE 4 CYCLES OF INSTRUCTION; creating 2**(L)*Piby2_1 and 2**(L)*Piby2_2
movew %d2,FP_SCR2(%a6)
clrw FP_SCR2+2(%a6)
movel #0xC90FDAA2,FP_SCR2+4(%a6)
clrl FP_SCR2+8(%a6) | ...FP_SCR2 is 2**(L) * Piby2_1
|--FP2 IS READY
fsubs TWOTO63(%a6),%fp2 | ...FP2 is N
addil #0x00003FDD,%d0
movew %d0,FP_SCR3(%a6)
clrw FP_SCR3+2(%a6)
movel #0x85A308D3,FP_SCR3+4(%a6)
clrl FP_SCR3+8(%a6) | ...FP_SCR3 is 2**(L) * Piby2_2
movel ENDFLAG(%a6),%d0
|--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
|--P2 = 2**(L) * Piby2_2
fmovex %fp2,%fp4
fmulx FP_SCR2(%a6),%fp4 | ...W = N*P1
fmovex %fp2,%fp5
fmulx FP_SCR3(%a6),%fp5 | ...w = N*P2
fmovex %fp4,%fp3
|--we want P+p = W+w but |p| <= half ulp of P
|--Then, we need to compute A := R-P and a := r-p
faddx %fp5,%fp3 | ...FP3 is P
fsubx %fp3,%fp4 | ...W-P
fsubx %fp3,%fp0 | ...FP0 is A := R - P
faddx %fp5,%fp4 | ...FP4 is p = (W-P)+w
fmovex %fp0,%fp3 | ...FP3 A
fsubx %fp4,%fp1 | ...FP1 is a := r - p
|--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
|--|r| <= half ulp of R.
faddx %fp1,%fp0 | ...FP0 is R := A+a
|--No need to calculate r if this is the last loop
cmpil #0,%d0
bgt RESTORE
|--Need to calculate r
fsubx %fp0,%fp3 | ...A-R
faddx %fp3,%fp1 | ...FP1 is r := (A-R)+a
bra LOOP
RESTORE:
fmovel %fp2,N(%a6)
movel (%a7)+,%d2
fmovemx (%a7)+,%fp2-%fp5
movel ADJN(%a6),%d0
cmpil #4,%d0
blt SINCONT
bras SCCONT
.global ssincosd
ssincosd:
|--SIN AND COS OF X FOR DENORMALIZED X
fmoves #0x3F800000,%fp1
bsr sto_cos |store cosine result
bra t_extdnrm
.global ssincos
ssincos:
|--SET ADJN TO 4
movel #4,ADJN(%a6)
fmovex (%a0),%fp0 | ...LOAD INPUT
movel (%a0),%d0
movew 4(%a0),%d0
fmovex %fp0,X(%a6)
andil #0x7FFFFFFF,%d0 | ...COMPACTIFY X
cmpil #0x3FD78000,%d0 | ...|X| >= 2**(-40)?
bges SCOK1
bra SCSM
SCOK1:
cmpil #0x4004BC7E,%d0 | ...|X| < 15 PI?
blts SCMAIN
bra REDUCEX
SCMAIN:
|--THIS IS THE USUAL CASE, |X| <= 15 PI.
|--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
fmovex %fp0,%fp1
fmuld TWOBYPI,%fp1 | ...X*2/PI
|--HIDE THE NEXT THREE INSTRUCTIONS
lea PITBL+0x200,%a1 | ...TABLE OF N*PI/2, N = -32,...,32
|--FP1 IS NOW READY
fmovel %fp1,N(%a6) | ...CONVERT TO INTEGER
movel N(%a6),%d0
asll #4,%d0
addal %d0,%a1 | ...ADDRESS OF N*PIBY2, IN Y1, Y2
fsubx (%a1)+,%fp0 | ...X-Y1
fsubs (%a1),%fp0 | ...FP0 IS R = (X-Y1)-Y2
SCCONT:
|--continuation point from REDUCEX
|--HIDE THE NEXT TWO
movel N(%a6),%d0
rorl #1,%d0
cmpil #0,%d0 | ...D0 < 0 IFF N IS ODD
bge NEVEN
NODD:
|--REGISTERS SAVED SO FAR: D0, A0, FP2.
fmovex %fp0,RPRIME(%a6)
fmulx %fp0,%fp0 | ...FP0 IS S = R*R
fmoved SINA7,%fp1 | ...A7
fmoved COSB8,%fp2 | ...B8
fmulx %fp0,%fp1 | ...SA7
movel %d2,-(%a7)
movel %d0,%d2
fmulx %fp0,%fp2 | ...SB8
rorl #1,%d2
andil #0x80000000,%d2
faddd SINA6,%fp1 | ...A6+SA7
eorl %d0,%d2
andil #0x80000000,%d2
faddd COSB7,%fp2 | ...B7+SB8
fmulx %fp0,%fp1 | ...S(A6+SA7)
eorl %d2,RPRIME(%a6)
movel (%a7)+,%d2
fmulx %fp0,%fp2 | ...S(B7+SB8)
rorl #1,%d0
andil #0x80000000,%d0
faddd SINA5,%fp1 | ...A5+S(A6+SA7)
movel #0x3F800000,POSNEG1(%a6)
eorl %d0,POSNEG1(%a6)
faddd COSB6,%fp2 | ...B6+S(B7+SB8)
fmulx %fp0,%fp1 | ...S(A5+S(A6+SA7))
fmulx %fp0,%fp2 | ...S(B6+S(B7+SB8))
fmovex %fp0,SPRIME(%a6)
faddd SINA4,%fp1 | ...A4+S(A5+S(A6+SA7))
eorl %d0,SPRIME(%a6)
faddd COSB5,%fp2 | ...B5+S(B6+S(B7+SB8))
fmulx %fp0,%fp1 | ...S(A4+...)
fmulx %fp0,%fp2 | ...S(B5+...)
faddd SINA3,%fp1 | ...A3+S(A4+...)
faddd COSB4,%fp2 | ...B4+S(B5+...)
fmulx %fp0,%fp1 | ...S(A3+...)
fmulx %fp0,%fp2 | ...S(B4+...)
faddx SINA2,%fp1 | ...A2+S(A3+...)
faddx COSB3,%fp2 | ...B3+S(B4+...)
fmulx %fp0,%fp1 | ...S(A2+...)
fmulx %fp0,%fp2 | ...S(B3+...)
faddx SINA1,%fp1 | ...A1+S(A2+...)
faddx COSB2,%fp2 | ...B2+S(B3+...)
fmulx %fp0,%fp1 | ...S(A1+...)
fmulx %fp2,%fp0 | ...S(B2+...)
fmulx RPRIME(%a6),%fp1 | ...R'S(A1+...)
fadds COSB1,%fp0 | ...B1+S(B2...)
fmulx SPRIME(%a6),%fp0 | ...S'(B1+S(B2+...))
movel %d1,-(%sp) |restore users mode & precision
andil #0xff,%d1 |mask off all exceptions
fmovel %d1,%FPCR
faddx RPRIME(%a6),%fp1 | ...COS(X)
bsr sto_cos |store cosine result
fmovel (%sp)+,%FPCR |restore users exceptions
fadds POSNEG1(%a6),%fp0 | ...SIN(X)
bra t_frcinx
NEVEN:
|--REGISTERS SAVED SO FAR: FP2.
fmovex %fp0,RPRIME(%a6)
fmulx %fp0,%fp0 | ...FP0 IS S = R*R
fmoved COSB8,%fp1 | ...B8
fmoved SINA7,%fp2 | ...A7
fmulx %fp0,%fp1 | ...SB8
fmovex %fp0,SPRIME(%a6)
fmulx %fp0,%fp2 | ...SA7
rorl #1,%d0
andil #0x80000000,%d0
faddd COSB7,%fp1 | ...B7+SB8
faddd SINA6,%fp2 | ...A6+SA7
eorl %d0,RPRIME(%a6)
eorl %d0,SPRIME(%a6)
fmulx %fp0,%fp1 | ...S(B7+SB8)
oril #0x3F800000,%d0
movel %d0,POSNEG1(%a6)
fmulx %fp0,%fp2 | ...S(A6+SA7)
faddd COSB6,%fp1 | ...B6+S(B7+SB8)
faddd SINA5,%fp2 | ...A5+S(A6+SA7)
fmulx %fp0,%fp1 | ...S(B6+S(B7+SB8))
fmulx %fp0,%fp2 | ...S(A5+S(A6+SA7))
faddd COSB5,%fp1 | ...B5+S(B6+S(B7+SB8))
faddd SINA4,%fp2 | ...A4+S(A5+S(A6+SA7))
fmulx %fp0,%fp1 | ...S(B5+...)
fmulx %fp0,%fp2 | ...S(A4+...)
faddd COSB4,%fp1 | ...B4+S(B5+...)
faddd SINA3,%fp2 | ...A3+S(A4+...)
fmulx %fp0,%fp1 | ...S(B4+...)
fmulx %fp0,%fp2 | ...S(A3+...)
faddx COSB3,%fp1 | ...B3+S(B4+...)
faddx SINA2,%fp2 | ...A2+S(A3+...)
fmulx %fp0,%fp1 | ...S(B3+...)
fmulx %fp0,%fp2 | ...S(A2+...)
faddx COSB2,%fp1 | ...B2+S(B3+...)
faddx SINA1,%fp2 | ...A1+S(A2+...)
fmulx %fp0,%fp1 | ...S(B2+...)
fmulx %fp2,%fp0 | ...s(a1+...)
fadds COSB1,%fp1 | ...B1+S(B2...)
fmulx RPRIME(%a6),%fp0 | ...R'S(A1+...)
fmulx SPRIME(%a6),%fp1 | ...S'(B1+S(B2+...))
movel %d1,-(%sp) |save users mode & precision
andil #0xff,%d1 |mask off all exceptions
fmovel %d1,%FPCR
fadds POSNEG1(%a6),%fp1 | ...COS(X)
bsr sto_cos |store cosine result
fmovel (%sp)+,%FPCR |restore users exceptions
faddx RPRIME(%a6),%fp0 | ...SIN(X)
bra t_frcinx
SCBORS:
cmpil #0x3FFF8000,%d0
bgt REDUCEX
SCSM:
movew #0x0000,XDCARE(%a6)
fmoves #0x3F800000,%fp1
movel %d1,-(%sp) |save users mode & precision
andil #0xff,%d1 |mask off all exceptions
fmovel %d1,%FPCR
fsubs #0x00800000,%fp1
bsr sto_cos |store cosine result
fmovel (%sp)+,%FPCR |restore users exceptions
fmovex X(%a6),%fp0
bra t_frcinx
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,743
|
arch/m68k/fpsp040/x_ovfl.S
|
|
| x_ovfl.sa 3.5 7/1/91
|
| fpsp_ovfl --- FPSP handler for overflow exception
|
| Overflow occurs when a floating-point intermediate result is
| too large to be represented in a floating-point data register,
| or when storing to memory, the contents of a floating-point
| data register are too large to be represented in the
| destination format.
|
| Trap disabled results
|
| If the instruction is move_out, then garbage is stored in the
| destination. If the instruction is not move_out, then the
| destination is not affected. For 68881 compatibility, the
| following values should be stored at the destination, based
| on the current rounding mode:
|
| RN Infinity with the sign of the intermediate result.
| RZ Largest magnitude number, with the sign of the
| intermediate result.
| RM For pos overflow, the largest pos number. For neg overflow,
| -infinity
| RP For pos overflow, +infinity. For neg overflow, the largest
| neg number
|
| Trap enabled results
| All trap disabled code applies. In addition the exceptional
| operand needs to be made available to the users exception handler
| with a bias of $6000 subtracted from the exponent.
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
X_OVFL: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref ovf_r_x2
|xref ovf_r_x3
|xref store
|xref real_ovfl
|xref real_inex
|xref fpsp_done
|xref g_opcls
|xref b1238_fix
.global fpsp_ovfl
fpsp_ovfl:
link %a6,#-LOCAL_SIZE
fsave -(%a7)
moveml %d0-%d1/%a0-%a1,USER_DA(%a6)
fmovemx %fp0-%fp3,USER_FP0(%a6)
fmoveml %fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
|
| The 040 doesn't set the AINEX bit in the FPSR, the following
| line temporarily rectifies this error.
|
bsetb #ainex_bit,FPSR_AEXCEPT(%a6)
|
bsrl ovf_adj |denormalize, round & store interm op
|
| if overflow traps not enabled check for inexact exception
|
btstb #ovfl_bit,FPCR_ENABLE(%a6)
beqs ck_inex
|
btstb #E3,E_BYTE(%a6)
beqs no_e3_1
bfextu CMDREG3B(%a6){#6:#3},%d0 |get dest reg no
bclrb %d0,FPR_DIRTY_BITS(%a6) |clr dest dirty bit
bsrl b1238_fix
movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
orl #sx_mask,E_BYTE(%a6)
no_e3_1:
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
unlk %a6
bral real_ovfl
|
| It is possible to have either inex2 or inex1 exceptions with the
| ovfl. If the inex enable bit is set in the FPCR, and either
| inex2 or inex1 occurred, we must clean up and branch to the
| real inex handler.
|
ck_inex:
| move.b FPCR_ENABLE(%a6),%d0
| and.b FPSR_EXCEPT(%a6),%d0
| andi.b #$3,%d0
btstb #inex2_bit,FPCR_ENABLE(%a6)
beqs ovfl_exit
|
| Inexact enabled and reported, and we must take an inexact exception.
|
take_inex:
btstb #E3,E_BYTE(%a6)
beqs no_e3_2
bfextu CMDREG3B(%a6){#6:#3},%d0 |get dest reg no
bclrb %d0,FPR_DIRTY_BITS(%a6) |clr dest dirty bit
bsrl b1238_fix
movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
orl #sx_mask,E_BYTE(%a6)
no_e3_2:
moveb #INEX_VEC,EXC_VEC+1(%a6)
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
unlk %a6
bral real_inex
ovfl_exit:
bclrb #E3,E_BYTE(%a6) |test and clear E3 bit
beqs e1_set
|
| Clear dirty bit on dest resister in the frame before branching
| to b1238_fix.
|
bfextu CMDREG3B(%a6){#6:#3},%d0 |get dest reg no
bclrb %d0,FPR_DIRTY_BITS(%a6) |clr dest dirty bit
bsrl b1238_fix |test for bug1238 case
movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
orl #sx_mask,E_BYTE(%a6)
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
unlk %a6
bral fpsp_done
e1_set:
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
unlk %a6
bral fpsp_done
|
| ovf_adj
|
ovf_adj:
|
| Have a0 point to the correct operand.
|
btstb #E3,E_BYTE(%a6) |test E3 bit
beqs ovf_e1
lea WBTEMP(%a6),%a0
bras ovf_com
ovf_e1:
lea ETEMP(%a6),%a0
ovf_com:
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0)
bsrl g_opcls |returns opclass in d0
cmpiw #3,%d0 |check for opclass3
bnes not_opc011
|
| FPSR_CC is saved and restored because ovf_r_x3 affects it. The
| CCs are defined to be 'not affected' for the opclass3 instruction.
|
moveb FPSR_CC(%a6),L_SCR1(%a6)
bsrl ovf_r_x3 |returns a0 pointing to result
moveb L_SCR1(%a6),FPSR_CC(%a6)
bral store |stores to memory or register
not_opc011:
bsrl ovf_r_x2 |returns a0 pointing to result
bral store |stores to memory or register
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 28,114
|
arch/m68k/fpsp040/bindec.S
|
|
| bindec.sa 3.4 1/3/91
|
| bindec
|
| Description:
| Converts an input in extended precision format
| to bcd format.
|
| Input:
| a0 points to the input extended precision value
| value in memory; d0 contains the k-factor sign-extended
| to 32-bits. The input may be either normalized,
| unnormalized, or denormalized.
|
| Output: result in the FP_SCR1 space on the stack.
|
| Saves and Modifies: D2-D7,A2,FP2
|
| Algorithm:
|
| A1. Set RM and size ext; Set SIGMA = sign of input.
| The k-factor is saved for use in d7. Clear the
| BINDEC_FLG for separating normalized/denormalized
| input. If input is unnormalized or denormalized,
| normalize it.
|
| A2. Set X = abs(input).
|
| A3. Compute ILOG.
| ILOG is the log base 10 of the input value. It is
| approximated by adding e + 0.f when the original
| value is viewed as 2^^e * 1.f in extended precision.
| This value is stored in d6.
|
| A4. Clr INEX bit.
| The operation in A3 above may have set INEX2.
|
| A5. Set ICTR = 0;
| ICTR is a flag used in A13. It must be set before the
| loop entry A6.
|
| A6. Calculate LEN.
| LEN is the number of digits to be displayed. The
| k-factor can dictate either the total number of digits,
| if it is a positive number, or the number of digits
| after the decimal point which are to be included as
| significant. See the 68882 manual for examples.
| If LEN is computed to be greater than 17, set OPERR in
| USER_FPSR. LEN is stored in d4.
|
| A7. Calculate SCALE.
| SCALE is equal to 10^ISCALE, where ISCALE is the number
| of decimal places needed to insure LEN integer digits
| in the output before conversion to bcd. LAMBDA is the
| sign of ISCALE, used in A9. Fp1 contains
| 10^^(abs(ISCALE)) using a rounding mode which is a
| function of the original rounding mode and the signs
| of ISCALE and X. A table is given in the code.
|
| A8. Clr INEX; Force RZ.
| The operation in A3 above may have set INEX2.
| RZ mode is forced for the scaling operation to insure
| only one rounding error. The grs bits are collected in
| the INEX flag for use in A10.
|
| A9. Scale X -> Y.
| The mantissa is scaled to the desired number of
| significant digits. The excess digits are collected
| in INEX2.
|
| A10. Or in INEX.
| If INEX is set, round error occurred. This is
| compensated for by 'or-ing' in the INEX2 flag to
| the lsb of Y.
|
| A11. Restore original FPCR; set size ext.
| Perform FINT operation in the user's rounding mode.
| Keep the size to extended.
|
| A12. Calculate YINT = FINT(Y) according to user's rounding
| mode. The FPSP routine sintd0 is used. The output
| is in fp0.
|
| A13. Check for LEN digits.
| If the int operation results in more than LEN digits,
| or less than LEN -1 digits, adjust ILOG and repeat from
| A6. This test occurs only on the first pass. If the
| result is exactly 10^LEN, decrement ILOG and divide
| the mantissa by 10.
|
| A14. Convert the mantissa to bcd.
| The binstr routine is used to convert the LEN digit
| mantissa to bcd in memory. The input to binstr is
| to be a fraction; i.e. (mantissa)/10^LEN and adjusted
| such that the decimal point is to the left of bit 63.
| The bcd digits are stored in the correct position in
| the final string area in memory.
|
| A15. Convert the exponent to bcd.
| As in A14 above, the exp is converted to bcd and the
| digits are stored in the final string.
| Test the length of the final exponent string. If the
| length is 4, set operr.
|
| A16. Write sign bits to final string.
|
| Implementation Notes:
|
| The registers are used as follows:
|
| d0: scratch; LEN input to binstr
| d1: scratch
| d2: upper 32-bits of mantissa for binstr
| d3: scratch;lower 32-bits of mantissa for binstr
| d4: LEN
| d5: LAMBDA/ICTR
| d6: ILOG
| d7: k-factor
| a0: ptr for original operand/final result
| a1: scratch pointer
| a2: pointer to FP_X; abs(original value) in ext
| fp0: scratch
| fp1: scratch
| fp2: scratch
| F_SCR1:
| F_SCR2:
| L_SCR1:
| L_SCR2:
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|BINDEC idnt 2,1 | Motorola 040 Floating Point Software Package
#include "fpsp.h"
|section 8
| Constants in extended precision
LOG2: .long 0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
LOG2UP1: .long 0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
| Constants in single precision
FONE: .long 0x3F800000,0x00000000,0x00000000,0x00000000
FTWO: .long 0x40000000,0x00000000,0x00000000,0x00000000
FTEN: .long 0x41200000,0x00000000,0x00000000,0x00000000
F4933: .long 0x459A2800,0x00000000,0x00000000,0x00000000
RBDTBL: .byte 0,0,0,0
.byte 3,3,2,2
.byte 3,2,2,3
.byte 2,3,3,2
|xref binstr
|xref sintdo
|xref ptenrn,ptenrm,ptenrp
.global bindec
.global sc_mul
bindec:
moveml %d2-%d7/%a2,-(%a7)
fmovemx %fp0-%fp2,-(%a7)
| A1. Set RM and size ext. Set SIGMA = sign input;
| The k-factor is saved for use in d7. Clear BINDEC_FLG for
| separating normalized/denormalized input. If the input
| is a denormalized number, set the BINDEC_FLG memory word
| to signal denorm. If the input is unnormalized, normalize
| the input and test for denormalized result.
|
fmovel #rm_mode,%FPCR |set RM and ext
movel (%a0),L_SCR2(%a6) |save exponent for sign check
movel %d0,%d7 |move k-factor to d7
clrb BINDEC_FLG(%a6) |clr norm/denorm flag
movew STAG(%a6),%d0 |get stag
andiw #0xe000,%d0 |isolate stag bits
beq A2_str |if zero, input is norm
|
| Normalize the denorm
|
un_de_norm:
movew (%a0),%d0
andiw #0x7fff,%d0 |strip sign of normalized exp
movel 4(%a0),%d1
movel 8(%a0),%d2
norm_loop:
subw #1,%d0
lsll #1,%d2
roxll #1,%d1
tstl %d1
bges norm_loop
|
| Test if the normalized input is denormalized
|
tstw %d0
bgts pos_exp |if greater than zero, it is a norm
st BINDEC_FLG(%a6) |set flag for denorm
pos_exp:
andiw #0x7fff,%d0 |strip sign of normalized exp
movew %d0,(%a0)
movel %d1,4(%a0)
movel %d2,8(%a0)
| A2. Set X = abs(input).
|
A2_str:
movel (%a0),FP_SCR2(%a6) | move input to work space
movel 4(%a0),FP_SCR2+4(%a6) | move input to work space
movel 8(%a0),FP_SCR2+8(%a6) | move input to work space
andil #0x7fffffff,FP_SCR2(%a6) |create abs(X)
| A3. Compute ILOG.
| ILOG is the log base 10 of the input value. It is approx-
| imated by adding e + 0.f when the original value is viewed
| as 2^^e * 1.f in extended precision. This value is stored
| in d6.
|
| Register usage:
| Input/Output
| d0: k-factor/exponent
| d2: x/x
| d3: x/x
| d4: x/x
| d5: x/x
| d6: x/ILOG
| d7: k-factor/Unchanged
| a0: ptr for original operand/final result
| a1: x/x
| a2: x/x
| fp0: x/float(ILOG)
| fp1: x/x
| fp2: x/x
| F_SCR1:x/x
| F_SCR2:Abs(X)/Abs(X) with $3fff exponent
| L_SCR1:x/x
| L_SCR2:first word of X packed/Unchanged
tstb BINDEC_FLG(%a6) |check for denorm
beqs A3_cont |if clr, continue with norm
movel #-4933,%d6 |force ILOG = -4933
bras A4_str
A3_cont:
movew FP_SCR2(%a6),%d0 |move exp to d0
movew #0x3fff,FP_SCR2(%a6) |replace exponent with 0x3fff
fmovex FP_SCR2(%a6),%fp0 |now fp0 has 1.f
subw #0x3fff,%d0 |strip off bias
faddw %d0,%fp0 |add in exp
fsubs FONE,%fp0 |subtract off 1.0
fbge pos_res |if pos, branch
fmulx LOG2UP1,%fp0 |if neg, mul by LOG2UP1
fmovel %fp0,%d6 |put ILOG in d6 as a lword
bras A4_str |go move out ILOG
pos_res:
fmulx LOG2,%fp0 |if pos, mul by LOG2
fmovel %fp0,%d6 |put ILOG in d6 as a lword
| A4. Clr INEX bit.
| The operation in A3 above may have set INEX2.
A4_str:
fmovel #0,%FPSR |zero all of fpsr - nothing needed
| A5. Set ICTR = 0;
| ICTR is a flag used in A13. It must be set before the
| loop entry A6. The lower word of d5 is used for ICTR.
clrw %d5 |clear ICTR
| A6. Calculate LEN.
| LEN is the number of digits to be displayed. The k-factor
| can dictate either the total number of digits, if it is
| a positive number, or the number of digits after the
| original decimal point which are to be included as
| significant. See the 68882 manual for examples.
| If LEN is computed to be greater than 17, set OPERR in
| USER_FPSR. LEN is stored in d4.
|
| Register usage:
| Input/Output
| d0: exponent/Unchanged
| d2: x/x/scratch
| d3: x/x
| d4: exc picture/LEN
| d5: ICTR/Unchanged
| d6: ILOG/Unchanged
| d7: k-factor/Unchanged
| a0: ptr for original operand/final result
| a1: x/x
| a2: x/x
| fp0: float(ILOG)/Unchanged
| fp1: x/x
| fp2: x/x
| F_SCR1:x/x
| F_SCR2:Abs(X) with $3fff exponent/Unchanged
| L_SCR1:x/x
| L_SCR2:first word of X packed/Unchanged
A6_str:
tstl %d7 |branch on sign of k
bles k_neg |if k <= 0, LEN = ILOG + 1 - k
movel %d7,%d4 |if k > 0, LEN = k
bras len_ck |skip to LEN check
k_neg:
movel %d6,%d4 |first load ILOG to d4
subl %d7,%d4 |subtract off k
addql #1,%d4 |add in the 1
len_ck:
tstl %d4 |LEN check: branch on sign of LEN
bles LEN_ng |if neg, set LEN = 1
cmpl #17,%d4 |test if LEN > 17
bles A7_str |if not, forget it
movel #17,%d4 |set max LEN = 17
tstl %d7 |if negative, never set OPERR
bles A7_str |if positive, continue
orl #opaop_mask,USER_FPSR(%a6) |set OPERR & AIOP in USER_FPSR
bras A7_str |finished here
LEN_ng:
moveql #1,%d4 |min LEN is 1
| A7. Calculate SCALE.
| SCALE is equal to 10^ISCALE, where ISCALE is the number
| of decimal places needed to insure LEN integer digits
| in the output before conversion to bcd. LAMBDA is the sign
| of ISCALE, used in A9. Fp1 contains 10^^(abs(ISCALE)) using
| the rounding mode as given in the following table (see
| Coonen, p. 7.23 as ref.; however, the SCALE variable is
| of opposite sign in bindec.sa from Coonen).
|
| Initial USE
| FPCR[6:5] LAMBDA SIGN(X) FPCR[6:5]
| ----------------------------------------------
| RN 00 0 0 00/0 RN
| RN 00 0 1 00/0 RN
| RN 00 1 0 00/0 RN
| RN 00 1 1 00/0 RN
| RZ 01 0 0 11/3 RP
| RZ 01 0 1 11/3 RP
| RZ 01 1 0 10/2 RM
| RZ 01 1 1 10/2 RM
| RM 10 0 0 11/3 RP
| RM 10 0 1 10/2 RM
| RM 10 1 0 10/2 RM
| RM 10 1 1 11/3 RP
| RP 11 0 0 10/2 RM
| RP 11 0 1 11/3 RP
| RP 11 1 0 11/3 RP
| RP 11 1 1 10/2 RM
|
| Register usage:
| Input/Output
| d0: exponent/scratch - final is 0
| d2: x/0 or 24 for A9
| d3: x/scratch - offset ptr into PTENRM array
| d4: LEN/Unchanged
| d5: 0/ICTR:LAMBDA
| d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
| d7: k-factor/Unchanged
| a0: ptr for original operand/final result
| a1: x/ptr to PTENRM array
| a2: x/x
| fp0: float(ILOG)/Unchanged
| fp1: x/10^ISCALE
| fp2: x/x
| F_SCR1:x/x
| F_SCR2:Abs(X) with $3fff exponent/Unchanged
| L_SCR1:x/x
| L_SCR2:first word of X packed/Unchanged
A7_str:
tstl %d7 |test sign of k
bgts k_pos |if pos and > 0, skip this
cmpl %d6,%d7 |test k - ILOG
blts k_pos |if ILOG >= k, skip this
movel %d7,%d6 |if ((k<0) & (ILOG < k)) ILOG = k
k_pos:
movel %d6,%d0 |calc ILOG + 1 - LEN in d0
addql #1,%d0 |add the 1
subl %d4,%d0 |sub off LEN
swap %d5 |use upper word of d5 for LAMBDA
clrw %d5 |set it zero initially
clrw %d2 |set up d2 for very small case
tstl %d0 |test sign of ISCALE
bges iscale |if pos, skip next inst
addqw #1,%d5 |if neg, set LAMBDA true
cmpl #0xffffecd4,%d0 |test iscale <= -4908
bgts no_inf |if false, skip rest
addil #24,%d0 |add in 24 to iscale
movel #24,%d2 |put 24 in d2 for A9
no_inf:
negl %d0 |and take abs of ISCALE
iscale:
fmoves FONE,%fp1 |init fp1 to 1
bfextu USER_FPCR(%a6){#26:#2},%d1 |get initial rmode bits
lslw #1,%d1 |put them in bits 2:1
addw %d5,%d1 |add in LAMBDA
lslw #1,%d1 |put them in bits 3:1
tstl L_SCR2(%a6) |test sign of original x
bges x_pos |if pos, don't set bit 0
addql #1,%d1 |if neg, set bit 0
x_pos:
leal RBDTBL,%a2 |load rbdtbl base
moveb (%a2,%d1),%d3 |load d3 with new rmode
lsll #4,%d3 |put bits in proper position
fmovel %d3,%fpcr |load bits into fpu
lsrl #4,%d3 |put bits in proper position
tstb %d3 |decode new rmode for pten table
bnes not_rn |if zero, it is RN
leal PTENRN,%a1 |load a1 with RN table base
bras rmode |exit decode
not_rn:
lsrb #1,%d3 |get lsb in carry
bccs not_rp |if carry clear, it is RM
leal PTENRP,%a1 |load a1 with RP table base
bras rmode |exit decode
not_rp:
leal PTENRM,%a1 |load a1 with RM table base
rmode:
clrl %d3 |clr table index
e_loop:
lsrl #1,%d0 |shift next bit into carry
bccs e_next |if zero, skip the mul
fmulx (%a1,%d3),%fp1 |mul by 10**(d3_bit_no)
e_next:
addl #12,%d3 |inc d3 to next pwrten table entry
tstl %d0 |test if ISCALE is zero
bnes e_loop |if not, loop
| A8. Clr INEX; Force RZ.
| The operation in A3 above may have set INEX2.
| RZ mode is forced for the scaling operation to insure
| only one rounding error. The grs bits are collected in
| the INEX flag for use in A10.
|
| Register usage:
| Input/Output
fmovel #0,%FPSR |clr INEX
fmovel #rz_mode,%FPCR |set RZ rounding mode
| A9. Scale X -> Y.
| The mantissa is scaled to the desired number of significant
| digits. The excess digits are collected in INEX2. If mul,
| Check d2 for excess 10 exponential value. If not zero,
| the iscale value would have caused the pwrten calculation
| to overflow. Only a negative iscale can cause this, so
| multiply by 10^(d2), which is now only allowed to be 24,
| with a multiply by 10^8 and 10^16, which is exact since
| 10^24 is exact. If the input was denormalized, we must
| create a busy stack frame with the mul command and the
| two operands, and allow the fpu to complete the multiply.
|
| Register usage:
| Input/Output
| d0: FPCR with RZ mode/Unchanged
| d2: 0 or 24/unchanged
| d3: x/x
| d4: LEN/Unchanged
| d5: ICTR:LAMBDA
| d6: ILOG/Unchanged
| d7: k-factor/Unchanged
| a0: ptr for original operand/final result
| a1: ptr to PTENRM array/Unchanged
| a2: x/x
| fp0: float(ILOG)/X adjusted for SCALE (Y)
| fp1: 10^ISCALE/Unchanged
| fp2: x/x
| F_SCR1:x/x
| F_SCR2:Abs(X) with $3fff exponent/Unchanged
| L_SCR1:x/x
| L_SCR2:first word of X packed/Unchanged
A9_str:
fmovex (%a0),%fp0 |load X from memory
fabsx %fp0 |use abs(X)
tstw %d5 |LAMBDA is in lower word of d5
bne sc_mul |if neg (LAMBDA = 1), scale by mul
fdivx %fp1,%fp0 |calculate X / SCALE -> Y to fp0
bras A10_st |branch to A10
sc_mul:
tstb BINDEC_FLG(%a6) |check for denorm
beqs A9_norm |if norm, continue with mul
fmovemx %fp1-%fp1,-(%a7) |load ETEMP with 10^ISCALE
movel 8(%a0),-(%a7) |load FPTEMP with input arg
movel 4(%a0),-(%a7)
movel (%a0),-(%a7)
movel #18,%d3 |load count for busy stack
A9_loop:
clrl -(%a7) |clear lword on stack
dbf %d3,A9_loop
moveb VER_TMP(%a6),(%a7) |write current version number
moveb #BUSY_SIZE-4,1(%a7) |write current busy size
moveb #0x10,0x44(%a7) |set fcefpte[15] bit
movew #0x0023,0x40(%a7) |load cmdreg1b with mul command
moveb #0xfe,0x8(%a7) |load all 1s to cu savepc
frestore (%a7)+ |restore frame to fpu for completion
fmulx 36(%a1),%fp0 |multiply fp0 by 10^8
fmulx 48(%a1),%fp0 |multiply fp0 by 10^16
bras A10_st
A9_norm:
tstw %d2 |test for small exp case
beqs A9_con |if zero, continue as normal
fmulx 36(%a1),%fp0 |multiply fp0 by 10^8
fmulx 48(%a1),%fp0 |multiply fp0 by 10^16
A9_con:
fmulx %fp1,%fp0 |calculate X * SCALE -> Y to fp0
| A10. Or in INEX.
| If INEX is set, round error occurred. This is compensated
| for by 'or-ing' in the INEX2 flag to the lsb of Y.
|
| Register usage:
| Input/Output
| d0: FPCR with RZ mode/FPSR with INEX2 isolated
| d2: x/x
| d3: x/x
| d4: LEN/Unchanged
| d5: ICTR:LAMBDA
| d6: ILOG/Unchanged
| d7: k-factor/Unchanged
| a0: ptr for original operand/final result
| a1: ptr to PTENxx array/Unchanged
| a2: x/ptr to FP_SCR2(a6)
| fp0: Y/Y with lsb adjusted
| fp1: 10^ISCALE/Unchanged
| fp2: x/x
A10_st:
fmovel %FPSR,%d0 |get FPSR
fmovex %fp0,FP_SCR2(%a6) |move Y to memory
leal FP_SCR2(%a6),%a2 |load a2 with ptr to FP_SCR2
btstl #9,%d0 |check if INEX2 set
beqs A11_st |if clear, skip rest
oril #1,8(%a2) |or in 1 to lsb of mantissa
fmovex FP_SCR2(%a6),%fp0 |write adjusted Y back to fpu
| A11. Restore original FPCR; set size ext.
| Perform FINT operation in the user's rounding mode. Keep
| the size to extended. The sintdo entry point in the sint
| routine expects the FPCR value to be in USER_FPCR for
| mode and precision. The original FPCR is saved in L_SCR1.
A11_st:
movel USER_FPCR(%a6),L_SCR1(%a6) |save it for later
andil #0x00000030,USER_FPCR(%a6) |set size to ext,
| ;block exceptions
| A12. Calculate YINT = FINT(Y) according to user's rounding mode.
| The FPSP routine sintd0 is used. The output is in fp0.
|
| Register usage:
| Input/Output
| d0: FPSR with AINEX cleared/FPCR with size set to ext
| d2: x/x/scratch
| d3: x/x
| d4: LEN/Unchanged
| d5: ICTR:LAMBDA/Unchanged
| d6: ILOG/Unchanged
| d7: k-factor/Unchanged
| a0: ptr for original operand/src ptr for sintdo
| a1: ptr to PTENxx array/Unchanged
| a2: ptr to FP_SCR2(a6)/Unchanged
| a6: temp pointer to FP_SCR2(a6) - orig value saved and restored
| fp0: Y/YINT
| fp1: 10^ISCALE/Unchanged
| fp2: x/x
| F_SCR1:x/x
| F_SCR2:Y adjusted for inex/Y with original exponent
| L_SCR1:x/original USER_FPCR
| L_SCR2:first word of X packed/Unchanged
A12_st:
moveml %d0-%d1/%a0-%a1,-(%a7) |save regs used by sintd0
movel L_SCR1(%a6),-(%a7)
movel L_SCR2(%a6),-(%a7)
leal FP_SCR2(%a6),%a0 |a0 is ptr to F_SCR2(a6)
fmovex %fp0,(%a0) |move Y to memory at FP_SCR2(a6)
tstl L_SCR2(%a6) |test sign of original operand
bges do_fint |if pos, use Y
orl #0x80000000,(%a0) |if neg, use -Y
do_fint:
movel USER_FPSR(%a6),-(%a7)
bsr sintdo |sint routine returns int in fp0
moveb (%a7),USER_FPSR(%a6)
addl #4,%a7
movel (%a7)+,L_SCR2(%a6)
movel (%a7)+,L_SCR1(%a6)
moveml (%a7)+,%d0-%d1/%a0-%a1 |restore regs used by sint
movel L_SCR2(%a6),FP_SCR2(%a6) |restore original exponent
movel L_SCR1(%a6),USER_FPCR(%a6) |restore user's FPCR
| A13. Check for LEN digits.
| If the int operation results in more than LEN digits,
| or less than LEN -1 digits, adjust ILOG and repeat from
| A6. This test occurs only on the first pass. If the
| result is exactly 10^LEN, decrement ILOG and divide
| the mantissa by 10. The calculation of 10^LEN cannot
| be inexact, since all powers of ten up to 10^27 are exact
| in extended precision, so the use of a previous power-of-ten
| table will introduce no error.
|
|
| Register usage:
| Input/Output
| d0: FPCR with size set to ext/scratch final = 0
| d2: x/x
| d3: x/scratch final = x
| d4: LEN/LEN adjusted
| d5: ICTR:LAMBDA/LAMBDA:ICTR
| d6: ILOG/ILOG adjusted
| d7: k-factor/Unchanged
| a0: pointer into memory for packed bcd string formation
| a1: ptr to PTENxx array/Unchanged
| a2: ptr to FP_SCR2(a6)/Unchanged
| fp0: int portion of Y/abs(YINT) adjusted
| fp1: 10^ISCALE/Unchanged
| fp2: x/10^LEN
| F_SCR1:x/x
| F_SCR2:Y with original exponent/Unchanged
| L_SCR1:original USER_FPCR/Unchanged
| L_SCR2:first word of X packed/Unchanged
A13_st:
swap %d5 |put ICTR in lower word of d5
tstw %d5 |check if ICTR = 0
bne not_zr |if non-zero, go to second test
|
| Compute 10^(LEN-1)
|
fmoves FONE,%fp2 |init fp2 to 1.0
movel %d4,%d0 |put LEN in d0
subql #1,%d0 |d0 = LEN -1
clrl %d3 |clr table index
l_loop:
lsrl #1,%d0 |shift next bit into carry
bccs l_next |if zero, skip the mul
fmulx (%a1,%d3),%fp2 |mul by 10**(d3_bit_no)
l_next:
addl #12,%d3 |inc d3 to next pwrten table entry
tstl %d0 |test if LEN is zero
bnes l_loop |if not, loop
|
| 10^LEN-1 is computed for this test and A14. If the input was
| denormalized, check only the case in which YINT > 10^LEN.
|
tstb BINDEC_FLG(%a6) |check if input was norm
beqs A13_con |if norm, continue with checking
fabsx %fp0 |take abs of YINT
bra test_2
|
| Compare abs(YINT) to 10^(LEN-1) and 10^LEN
|
A13_con:
fabsx %fp0 |take abs of YINT
fcmpx %fp2,%fp0 |compare abs(YINT) with 10^(LEN-1)
fbge test_2 |if greater, do next test
subql #1,%d6 |subtract 1 from ILOG
movew #1,%d5 |set ICTR
fmovel #rm_mode,%FPCR |set rmode to RM
fmuls FTEN,%fp2 |compute 10^LEN
bra A6_str |return to A6 and recompute YINT
test_2:
fmuls FTEN,%fp2 |compute 10^LEN
fcmpx %fp2,%fp0 |compare abs(YINT) with 10^LEN
fblt A14_st |if less, all is ok, go to A14
fbgt fix_ex |if greater, fix and redo
fdivs FTEN,%fp0 |if equal, divide by 10
addql #1,%d6 | and inc ILOG
bras A14_st | and continue elsewhere
fix_ex:
addql #1,%d6 |increment ILOG by 1
movew #1,%d5 |set ICTR
fmovel #rm_mode,%FPCR |set rmode to RM
bra A6_str |return to A6 and recompute YINT
|
| Since ICTR <> 0, we have already been through one adjustment,
| and shouldn't have another; this is to check if abs(YINT) = 10^LEN
| 10^LEN is again computed using whatever table is in a1 since the
| value calculated cannot be inexact.
|
not_zr:
fmoves FONE,%fp2 |init fp2 to 1.0
movel %d4,%d0 |put LEN in d0
clrl %d3 |clr table index
z_loop:
lsrl #1,%d0 |shift next bit into carry
bccs z_next |if zero, skip the mul
fmulx (%a1,%d3),%fp2 |mul by 10**(d3_bit_no)
z_next:
addl #12,%d3 |inc d3 to next pwrten table entry
tstl %d0 |test if LEN is zero
bnes z_loop |if not, loop
fabsx %fp0 |get abs(YINT)
fcmpx %fp2,%fp0 |check if abs(YINT) = 10^LEN
fbne A14_st |if not, skip this
fdivs FTEN,%fp0 |divide abs(YINT) by 10
addql #1,%d6 |and inc ILOG by 1
addql #1,%d4 | and inc LEN
fmuls FTEN,%fp2 | if LEN++, the get 10^^LEN
| A14. Convert the mantissa to bcd.
| The binstr routine is used to convert the LEN digit
| mantissa to bcd in memory. The input to binstr is
| to be a fraction; i.e. (mantissa)/10^LEN and adjusted
| such that the decimal point is to the left of bit 63.
| The bcd digits are stored in the correct position in
| the final string area in memory.
|
|
| Register usage:
| Input/Output
| d0: x/LEN call to binstr - final is 0
| d1: x/0
| d2: x/ms 32-bits of mant of abs(YINT)
| d3: x/ls 32-bits of mant of abs(YINT)
| d4: LEN/Unchanged
| d5: ICTR:LAMBDA/LAMBDA:ICTR
| d6: ILOG
| d7: k-factor/Unchanged
| a0: pointer into memory for packed bcd string formation
| /ptr to first mantissa byte in result string
| a1: ptr to PTENxx array/Unchanged
| a2: ptr to FP_SCR2(a6)/Unchanged
| fp0: int portion of Y/abs(YINT) adjusted
| fp1: 10^ISCALE/Unchanged
| fp2: 10^LEN/Unchanged
| F_SCR1:x/Work area for final result
| F_SCR2:Y with original exponent/Unchanged
| L_SCR1:original USER_FPCR/Unchanged
| L_SCR2:first word of X packed/Unchanged
A14_st:
fmovel #rz_mode,%FPCR |force rz for conversion
fdivx %fp2,%fp0 |divide abs(YINT) by 10^LEN
leal FP_SCR1(%a6),%a0
fmovex %fp0,(%a0) |move abs(YINT)/10^LEN to memory
movel 4(%a0),%d2 |move 2nd word of FP_RES to d2
movel 8(%a0),%d3 |move 3rd word of FP_RES to d3
clrl 4(%a0) |zero word 2 of FP_RES
clrl 8(%a0) |zero word 3 of FP_RES
movel (%a0),%d0 |move exponent to d0
swap %d0 |put exponent in lower word
beqs no_sft |if zero, don't shift
subil #0x3ffd,%d0 |sub bias less 2 to make fract
tstl %d0 |check if > 1
bgts no_sft |if so, don't shift
negl %d0 |make exp positive
m_loop:
lsrl #1,%d2 |shift d2:d3 right, add 0s
roxrl #1,%d3 |the number of places
dbf %d0,m_loop |given in d0
no_sft:
tstl %d2 |check for mantissa of zero
bnes no_zr |if not, go on
tstl %d3 |continue zero check
beqs zer_m |if zero, go directly to binstr
no_zr:
clrl %d1 |put zero in d1 for addx
addil #0x00000080,%d3 |inc at bit 7
addxl %d1,%d2 |continue inc
andil #0xffffff80,%d3 |strip off lsb not used by 882
zer_m:
movel %d4,%d0 |put LEN in d0 for binstr call
addql #3,%a0 |a0 points to M16 byte in result
bsr binstr |call binstr to convert mant
| A15. Convert the exponent to bcd.
| As in A14 above, the exp is converted to bcd and the
| digits are stored in the final string.
|
| Digits are stored in L_SCR1(a6) on return from BINDEC as:
|
| 32 16 15 0
| -----------------------------------------
| | 0 | e3 | e2 | e1 | e4 | X | X | X |
| -----------------------------------------
|
| And are moved into their proper places in FP_SCR1. If digit e4
| is non-zero, OPERR is signaled. In all cases, all 4 digits are
| written as specified in the 881/882 manual for packed decimal.
|
| Register usage:
| Input/Output
| d0: x/LEN call to binstr - final is 0
| d1: x/scratch (0);shift count for final exponent packing
| d2: x/ms 32-bits of exp fraction/scratch
| d3: x/ls 32-bits of exp fraction
| d4: LEN/Unchanged
| d5: ICTR:LAMBDA/LAMBDA:ICTR
| d6: ILOG
| d7: k-factor/Unchanged
| a0: ptr to result string/ptr to L_SCR1(a6)
| a1: ptr to PTENxx array/Unchanged
| a2: ptr to FP_SCR2(a6)/Unchanged
| fp0: abs(YINT) adjusted/float(ILOG)
| fp1: 10^ISCALE/Unchanged
| fp2: 10^LEN/Unchanged
| F_SCR1:Work area for final result/BCD result
| F_SCR2:Y with original exponent/ILOG/10^4
| L_SCR1:original USER_FPCR/Exponent digits on return from binstr
| L_SCR2:first word of X packed/Unchanged
A15_st:
tstb BINDEC_FLG(%a6) |check for denorm
beqs not_denorm
ftstx %fp0 |test for zero
fbeq den_zero |if zero, use k-factor or 4933
fmovel %d6,%fp0 |float ILOG
fabsx %fp0 |get abs of ILOG
bras convrt
den_zero:
tstl %d7 |check sign of the k-factor
blts use_ilog |if negative, use ILOG
fmoves F4933,%fp0 |force exponent to 4933
bras convrt |do it
use_ilog:
fmovel %d6,%fp0 |float ILOG
fabsx %fp0 |get abs of ILOG
bras convrt
not_denorm:
ftstx %fp0 |test for zero
fbne not_zero |if zero, force exponent
fmoves FONE,%fp0 |force exponent to 1
bras convrt |do it
not_zero:
fmovel %d6,%fp0 |float ILOG
fabsx %fp0 |get abs of ILOG
convrt:
fdivx 24(%a1),%fp0 |compute ILOG/10^4
fmovex %fp0,FP_SCR2(%a6) |store fp0 in memory
movel 4(%a2),%d2 |move word 2 to d2
movel 8(%a2),%d3 |move word 3 to d3
movew (%a2),%d0 |move exp to d0
beqs x_loop_fin |if zero, skip the shift
subiw #0x3ffd,%d0 |subtract off bias
negw %d0 |make exp positive
x_loop:
lsrl #1,%d2 |shift d2:d3 right
roxrl #1,%d3 |the number of places
dbf %d0,x_loop |given in d0
x_loop_fin:
clrl %d1 |put zero in d1 for addx
addil #0x00000080,%d3 |inc at bit 6
addxl %d1,%d2 |continue inc
andil #0xffffff80,%d3 |strip off lsb not used by 882
movel #4,%d0 |put 4 in d0 for binstr call
leal L_SCR1(%a6),%a0 |a0 is ptr to L_SCR1 for exp digits
bsr binstr |call binstr to convert exp
movel L_SCR1(%a6),%d0 |load L_SCR1 lword to d0
movel #12,%d1 |use d1 for shift count
lsrl %d1,%d0 |shift d0 right by 12
bfins %d0,FP_SCR1(%a6){#4:#12} |put e3:e2:e1 in FP_SCR1
lsrl %d1,%d0 |shift d0 right by 12
bfins %d0,FP_SCR1(%a6){#16:#4} |put e4 in FP_SCR1
tstb %d0 |check if e4 is zero
beqs A16_st |if zero, skip rest
orl #opaop_mask,USER_FPSR(%a6) |set OPERR & AIOP in USER_FPSR
| A16. Write sign bits to final string.
| Sigma is bit 31 of initial value; RHO is bit 31 of d6 (ILOG).
|
| Register usage:
| Input/Output
| d0: x/scratch - final is x
| d2: x/x
| d3: x/x
| d4: LEN/Unchanged
| d5: ICTR:LAMBDA/LAMBDA:ICTR
| d6: ILOG/ILOG adjusted
| d7: k-factor/Unchanged
| a0: ptr to L_SCR1(a6)/Unchanged
| a1: ptr to PTENxx array/Unchanged
| a2: ptr to FP_SCR2(a6)/Unchanged
| fp0: float(ILOG)/Unchanged
| fp1: 10^ISCALE/Unchanged
| fp2: 10^LEN/Unchanged
| F_SCR1:BCD result with correct signs
| F_SCR2:ILOG/10^4
| L_SCR1:Exponent digits on return from binstr
| L_SCR2:first word of X packed/Unchanged
A16_st:
clrl %d0 |clr d0 for collection of signs
andib #0x0f,FP_SCR1(%a6) |clear first nibble of FP_SCR1
tstl L_SCR2(%a6) |check sign of original mantissa
bges mant_p |if pos, don't set SM
moveql #2,%d0 |move 2 in to d0 for SM
mant_p:
tstl %d6 |check sign of ILOG
bges wr_sgn |if pos, don't set SE
addql #1,%d0 |set bit 0 in d0 for SE
wr_sgn:
bfins %d0,FP_SCR1(%a6){#0:#2} |insert SM and SE into FP_SCR1
| Clean up and restore all registers used.
fmovel #0,%FPSR |clear possible inex2/ainex bits
fmovemx (%a7)+,%fp0-%fp2
moveml (%a7)+,%d2-%d7/%a2
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,932
|
arch/m68k/fpsp040/x_fline.S
|
|
| x_fline.sa 3.3 1/10/91
|
| fpsp_fline --- FPSP handler for fline exception
|
| First determine if the exception is one of the unimplemented
| floating point instructions. If so, let fpsp_unimp handle it.
| Next, determine if the instruction is an fmovecr with a non-zero
| <ea> field. If so, handle here and return. Otherwise, it
| must be a real F-line exception.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
X_FLINE: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref real_fline
|xref fpsp_unimp
|xref uni_2
|xref mem_read
|xref fpsp_fmt_error
.global fpsp_fline
fpsp_fline:
|
| check for unimplemented vector first. Use EXC_VEC-4 because
| the equate is valid only after a 'link a6' has pushed one more
| long onto the stack.
|
cmpw #UNIMP_VEC,EXC_VEC-4(%a7)
beql fpsp_unimp
|
| fmovecr with non-zero <ea> handling here
|
subl #4,%a7 |4 accounts for 2-word difference
| ;between six word frame (unimp) and
| ;four word frame
link %a6,#-LOCAL_SIZE
fsave -(%a7)
moveml %d0-%d1/%a0-%a1,USER_DA(%a6)
moveal EXC_PC+4(%a6),%a0 |get address of fline instruction
leal L_SCR1(%a6),%a1 |use L_SCR1 as scratch
movel #4,%d0
addl #4,%a6 |to offset the sub.l #4,a7 above so that
| ;a6 can point correctly to the stack frame
| ;before branching to mem_read
bsrl mem_read
subl #4,%a6
movel L_SCR1(%a6),%d0 |d0 contains the fline and command word
bfextu %d0{#4:#3},%d1 |extract coprocessor id
cmpib #1,%d1 |check if cpid=1
bne not_mvcr |exit if not
bfextu %d0{#16:#6},%d1
cmpib #0x17,%d1 |check if it is an FMOVECR encoding
bne not_mvcr
| ;if an FMOVECR instruction, fix stack
| ;and go to FPSP_UNIMP
fix_stack:
cmpib #VER_40,(%a7) |test for orig unimp frame
bnes ck_rev
subl #UNIMP_40_SIZE-4,%a7 |emulate an orig fsave
moveb #VER_40,(%a7)
moveb #UNIMP_40_SIZE-4,1(%a7)
clrw 2(%a7)
bras fix_con
ck_rev:
cmpib #VER_41,(%a7) |test for rev unimp frame
bnel fpsp_fmt_error |if not $40 or $41, exit with error
subl #UNIMP_41_SIZE-4,%a7 |emulate a rev fsave
moveb #VER_41,(%a7)
moveb #UNIMP_41_SIZE-4,1(%a7)
clrw 2(%a7)
fix_con:
movew EXC_SR+4(%a6),EXC_SR(%a6) |move stacked sr to new position
movel EXC_PC+4(%a6),EXC_PC(%a6) |move stacked pc to new position
fmovel EXC_PC(%a6),%FPIAR |point FPIAR to fline inst
movel #4,%d1
addl %d1,EXC_PC(%a6) |increment stacked pc value to next inst
movew #0x202c,EXC_VEC(%a6) |reformat vector to unimp
clrl EXC_EA(%a6) |clear the EXC_EA field
movew %d0,CMDREG1B(%a6) |move the lower word into CMDREG1B
clrl E_BYTE(%a6)
bsetb #UFLAG,T_BYTE(%a6)
moveml USER_DA(%a6),%d0-%d1/%a0-%a1 |restore data registers
bral uni_2
not_mvcr:
moveml USER_DA(%a6),%d0-%d1/%a0-%a1 |restore data registers
frestore (%a7)+
unlk %a6
addl #4,%a7
bral real_fline
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 14,062
|
arch/m68k/fpsp040/bugfix.S
|
|
| bugfix.sa 3.2 1/31/91
|
|
| This file contains workarounds for bugs in the 040
| relating to the Floating-Point Software Package (FPSP)
|
| Fixes for bugs: 1238
|
| Bug: 1238
|
|
| /* The following dirty_bit clear should be left in
| * the handler permanently to improve throughput.
| * The dirty_bits are located at bits [23:16] in
| * longword $08 in the busy frame $4x60. Bit 16
| * corresponds to FP0, bit 17 corresponds to FP1,
| * and so on.
| */
| if (E3_exception_just_serviced) {
| dirty_bit[cmdreg3b[9:7]] = 0;
| }
|
| if (fsave_format_version != $40) {goto NOFIX}
|
| if !(E3_exception_just_serviced) {goto NOFIX}
| if (cupc == 0000000) {goto NOFIX}
| if ((cmdreg1b[15:13] != 000) &&
| (cmdreg1b[15:10] != 010001)) {goto NOFIX}
| if (((cmdreg1b[15:13] != 000) || ((cmdreg1b[12:10] != cmdreg2b[9:7]) &&
| (cmdreg1b[12:10] != cmdreg3b[9:7])) ) &&
| ((cmdreg1b[ 9: 7] != cmdreg2b[9:7]) &&
| (cmdreg1b[ 9: 7] != cmdreg3b[9:7])) ) {goto NOFIX}
|
| /* Note: for 6d43b or 8d43b, you may want to add the following code
| * to get better coverage. (If you do not insert this code, the part
| * won't lock up; it will simply get the wrong answer.)
| * Do NOT insert this code for 10d43b or later parts.
| *
| * if (fpiarcu == integer stack return address) {
| * cupc = 0000000;
| * goto NOFIX;
| * }
| */
|
| if (cmdreg1b[15:13] != 000) {goto FIX_OPCLASS2}
| FIX_OPCLASS0:
| if (((cmdreg1b[12:10] == cmdreg2b[9:7]) ||
| (cmdreg1b[ 9: 7] == cmdreg2b[9:7])) &&
| (cmdreg1b[12:10] != cmdreg3b[9:7]) &&
| (cmdreg1b[ 9: 7] != cmdreg3b[9:7])) { /* xu conflict only */
| /* We execute the following code if there is an
| xu conflict and NOT an nu conflict */
|
| /* first save some values on the fsave frame */
| stag_temp = STAG[fsave_frame];
| cmdreg1b_temp = CMDREG1B[fsave_frame];
| dtag_temp = DTAG[fsave_frame];
| ete15_temp = ETE15[fsave_frame];
|
| CUPC[fsave_frame] = 0000000;
| FRESTORE
| FSAVE
|
| /* If the xu instruction is exceptional, we punt.
| * Otherwise, we would have to include OVFL/UNFL handler
| * code here to get the correct answer.
| */
| if (fsave_frame_format == $4060) {goto KILL_PROCESS}
|
| fsave_frame = /* build a long frame of all zeros */
| fsave_frame_format = $4060; /* label it as long frame */
|
| /* load it with the temps we saved */
| STAG[fsave_frame] = stag_temp;
| CMDREG1B[fsave_frame] = cmdreg1b_temp;
| DTAG[fsave_frame] = dtag_temp;
| ETE15[fsave_frame] = ete15_temp;
|
| /* Make sure that the cmdreg3b dest reg is not going to
| * be destroyed by a FMOVEM at the end of all this code.
| * If it is, you should move the current value of the reg
| * onto the stack so that the reg will loaded with that value.
| */
|
| /* All done. Proceed with the code below */
| }
|
| etemp = FP_reg_[cmdreg1b[12:10]];
| ete15 = ~ete14;
| cmdreg1b[15:10] = 010010;
| clear(bug_flag_procIDxxxx);
| FRESTORE and return;
|
|
| FIX_OPCLASS2:
| if ((cmdreg1b[9:7] == cmdreg2b[9:7]) &&
| (cmdreg1b[9:7] != cmdreg3b[9:7])) { /* xu conflict only */
| /* We execute the following code if there is an
| xu conflict and NOT an nu conflict */
|
| /* first save some values on the fsave frame */
| stag_temp = STAG[fsave_frame];
| cmdreg1b_temp = CMDREG1B[fsave_frame];
| dtag_temp = DTAG[fsave_frame];
| ete15_temp = ETE15[fsave_frame];
| etemp_temp = ETEMP[fsave_frame];
|
| CUPC[fsave_frame] = 0000000;
| FRESTORE
| FSAVE
|
|
| /* If the xu instruction is exceptional, we punt.
| * Otherwise, we would have to include OVFL/UNFL handler
| * code here to get the correct answer.
| */
| if (fsave_frame_format == $4060) {goto KILL_PROCESS}
|
| fsave_frame = /* build a long frame of all zeros */
| fsave_frame_format = $4060; /* label it as long frame */
|
| /* load it with the temps we saved */
| STAG[fsave_frame] = stag_temp;
| CMDREG1B[fsave_frame] = cmdreg1b_temp;
| DTAG[fsave_frame] = dtag_temp;
| ETE15[fsave_frame] = ete15_temp;
| ETEMP[fsave_frame] = etemp_temp;
|
| /* Make sure that the cmdreg3b dest reg is not going to
| * be destroyed by a FMOVEM at the end of all this code.
| * If it is, you should move the current value of the reg
| * onto the stack so that the reg will loaded with that value.
| */
|
| /* All done. Proceed with the code below */
| }
|
| if (etemp_exponent == min_sgl) etemp_exponent = min_dbl;
| if (etemp_exponent == max_sgl) etemp_exponent = max_dbl;
| cmdreg1b[15:10] = 010101;
| clear(bug_flag_procIDxxxx);
| FRESTORE and return;
|
|
| NOFIX:
| clear(bug_flag_procIDxxxx);
| FRESTORE and return;
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|BUGFIX idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref fpsp_fmt_error
.global b1238_fix
b1238_fix:
|
| This code is entered only on completion of the handling of an
| nu-generated ovfl, unfl, or inex exception. If the version
| number of the fsave is not $40, this handler is not necessary.
| Simply branch to fix_done and exit normally.
|
cmpib #VER_40,4(%a7)
bne fix_done
|
| Test for cu_savepc equal to zero. If not, this is not a bug
| #1238 case.
|
moveb CU_SAVEPC(%a6),%d0
andib #0xFE,%d0
beq fix_done |if zero, this is not bug #1238
|
| Test the register conflict aspect. If opclass0, check for
| cu src equal to xu dest or equal to nu dest. If so, go to
| op0. Else, or if opclass2, check for cu dest equal to
| xu dest or equal to nu dest. If so, go to tst_opcl. Else,
| exit, it is not the bug case.
|
| Check for opclass 0. If not, go and check for opclass 2 and sgl.
|
movew CMDREG1B(%a6),%d0
andiw #0xE000,%d0 |strip all but opclass
bne op2sgl |not opclass 0, check op2
|
| Check for cu and nu register conflict. If one exists, this takes
| priority over a cu and xu conflict.
|
bfextu CMDREG1B(%a6){#3:#3},%d0 |get 1st src
bfextu CMDREG3B(%a6){#6:#3},%d1 |get 3rd dest
cmpb %d0,%d1
beqs op0 |if equal, continue bugfix
|
| Check for cu dest equal to nu dest. If so, go and fix the
| bug condition. Otherwise, exit.
|
bfextu CMDREG1B(%a6){#6:#3},%d0 |get 1st dest
cmpb %d0,%d1 |cmp 1st dest with 3rd dest
beqs op0 |if equal, continue bugfix
|
| Check for cu and xu register conflict.
|
bfextu CMDREG2B(%a6){#6:#3},%d1 |get 2nd dest
cmpb %d0,%d1 |cmp 1st dest with 2nd dest
beqs op0_xu |if equal, continue bugfix
bfextu CMDREG1B(%a6){#3:#3},%d0 |get 1st src
cmpb %d0,%d1 |cmp 1st src with 2nd dest
beq op0_xu
bne fix_done |if the reg checks fail, exit
|
| We have the opclass 0 situation.
|
op0:
bfextu CMDREG1B(%a6){#3:#3},%d0 |get source register no
movel #7,%d1
subl %d0,%d1
clrl %d0
bsetl %d1,%d0
fmovemx %d0,ETEMP(%a6) |load source to ETEMP
moveb #0x12,%d0
bfins %d0,CMDREG1B(%a6){#0:#6} |opclass 2, extended
|
| Set ETEMP exponent bit 15 as the opposite of ete14
|
btst #6,ETEMP_EX(%a6) |check etemp exponent bit 14
beq setete15
bclr #etemp15_bit,STAG(%a6)
bra finish
setete15:
bset #etemp15_bit,STAG(%a6)
bra finish
|
| We have the case in which a conflict exists between the cu src or
| dest and the dest of the xu. We must clear the instruction in
| the cu and restore the state, allowing the instruction in the
| xu to complete. Remember, the instruction in the nu
| was exceptional, and was completed by the appropriate handler.
| If the result of the xu instruction is not exceptional, we can
| restore the instruction from the cu to the frame and continue
| processing the original exception. If the result is also
| exceptional, we choose to kill the process.
|
| Items saved from the stack:
|
| $3c stag - L_SCR1
| $40 cmdreg1b - L_SCR2
| $44 dtag - L_SCR3
|
| The cu savepc is set to zero, and the frame is restored to the
| fpu.
|
op0_xu:
movel STAG(%a6),L_SCR1(%a6)
movel CMDREG1B(%a6),L_SCR2(%a6)
movel DTAG(%a6),L_SCR3(%a6)
andil #0xe0000000,L_SCR3(%a6)
moveb #0,CU_SAVEPC(%a6)
movel (%a7)+,%d1 |save return address from bsr
frestore (%a7)+
fsave -(%a7)
|
| Check if the instruction which just completed was exceptional.
|
cmpw #0x4060,(%a7)
beq op0_xb
|
| It is necessary to isolate the result of the instruction in the
| xu if it is to fp0 - fp3 and write that value to the USER_FPn
| locations on the stack. The correct destination register is in
| cmdreg2b.
|
bfextu CMDREG2B(%a6){#6:#3},%d0 |get dest register no
cmpil #3,%d0
bgts op0_xi
beqs op0_fp3
cmpil #1,%d0
blts op0_fp0
beqs op0_fp1
op0_fp2:
fmovemx %fp2-%fp2,USER_FP2(%a6)
bras op0_xi
op0_fp1:
fmovemx %fp1-%fp1,USER_FP1(%a6)
bras op0_xi
op0_fp0:
fmovemx %fp0-%fp0,USER_FP0(%a6)
bras op0_xi
op0_fp3:
fmovemx %fp3-%fp3,USER_FP3(%a6)
|
| The frame returned is idle. We must build a busy frame to hold
| the cu state information and setup etemp.
|
op0_xi:
movel #22,%d0 |clear 23 lwords
clrl (%a7)
op0_loop:
clrl -(%a7)
dbf %d0,op0_loop
movel #0x40600000,-(%a7)
movel L_SCR1(%a6),STAG(%a6)
movel L_SCR2(%a6),CMDREG1B(%a6)
movel L_SCR3(%a6),DTAG(%a6)
moveb #0x6,CU_SAVEPC(%a6)
movel %d1,-(%a7) |return bsr return address
bfextu CMDREG1B(%a6){#3:#3},%d0 |get source register no
movel #7,%d1
subl %d0,%d1
clrl %d0
bsetl %d1,%d0
fmovemx %d0,ETEMP(%a6) |load source to ETEMP
moveb #0x12,%d0
bfins %d0,CMDREG1B(%a6){#0:#6} |opclass 2, extended
|
| Set ETEMP exponent bit 15 as the opposite of ete14
|
btst #6,ETEMP_EX(%a6) |check etemp exponent bit 14
beq op0_sete15
bclr #etemp15_bit,STAG(%a6)
bra finish
op0_sete15:
bset #etemp15_bit,STAG(%a6)
bra finish
|
| The frame returned is busy. It is not possible to reconstruct
| the code sequence to allow completion. We will jump to
| fpsp_fmt_error and allow the kernel to kill the process.
|
op0_xb:
jmp fpsp_fmt_error
|
| Check for opclass 2 and single size. If not both, exit.
|
op2sgl:
movew CMDREG1B(%a6),%d0
andiw #0xFC00,%d0 |strip all but opclass and size
cmpiw #0x4400,%d0 |test for opclass 2 and size=sgl
bne fix_done |if not, it is not bug 1238
|
| Check for cu dest equal to nu dest or equal to xu dest, with
| a cu and nu conflict taking priority an nu conflict. If either,
| go and fix the bug condition. Otherwise, exit.
|
bfextu CMDREG1B(%a6){#6:#3},%d0 |get 1st dest
bfextu CMDREG3B(%a6){#6:#3},%d1 |get 3rd dest
cmpb %d0,%d1 |cmp 1st dest with 3rd dest
beq op2_com |if equal, continue bugfix
bfextu CMDREG2B(%a6){#6:#3},%d1 |get 2nd dest
cmpb %d0,%d1 |cmp 1st dest with 2nd dest
bne fix_done |if the reg checks fail, exit
|
| We have the case in which a conflict exists between the cu src or
| dest and the dest of the xu. We must clear the instruction in
| the cu and restore the state, allowing the instruction in the
| xu to complete. Remember, the instruction in the nu
| was exceptional, and was completed by the appropriate handler.
| If the result of the xu instruction is not exceptional, we can
| restore the instruction from the cu to the frame and continue
| processing the original exception. If the result is also
| exceptional, we choose to kill the process.
|
| Items saved from the stack:
|
| $3c stag - L_SCR1
| $40 cmdreg1b - L_SCR2
| $44 dtag - L_SCR3
| etemp - FP_SCR2
|
| The cu savepc is set to zero, and the frame is restored to the
| fpu.
|
op2_xu:
movel STAG(%a6),L_SCR1(%a6)
movel CMDREG1B(%a6),L_SCR2(%a6)
movel DTAG(%a6),L_SCR3(%a6)
andil #0xe0000000,L_SCR3(%a6)
moveb #0,CU_SAVEPC(%a6)
movel ETEMP(%a6),FP_SCR2(%a6)
movel ETEMP_HI(%a6),FP_SCR2+4(%a6)
movel ETEMP_LO(%a6),FP_SCR2+8(%a6)
movel (%a7)+,%d1 |save return address from bsr
frestore (%a7)+
fsave -(%a7)
|
| Check if the instruction which just completed was exceptional.
|
cmpw #0x4060,(%a7)
beq op2_xb
|
| It is necessary to isolate the result of the instruction in the
| xu if it is to fp0 - fp3 and write that value to the USER_FPn
| locations on the stack. The correct destination register is in
| cmdreg2b.
|
bfextu CMDREG2B(%a6){#6:#3},%d0 |get dest register no
cmpil #3,%d0
bgts op2_xi
beqs op2_fp3
cmpil #1,%d0
blts op2_fp0
beqs op2_fp1
op2_fp2:
fmovemx %fp2-%fp2,USER_FP2(%a6)
bras op2_xi
op2_fp1:
fmovemx %fp1-%fp1,USER_FP1(%a6)
bras op2_xi
op2_fp0:
fmovemx %fp0-%fp0,USER_FP0(%a6)
bras op2_xi
op2_fp3:
fmovemx %fp3-%fp3,USER_FP3(%a6)
|
| The frame returned is idle. We must build a busy frame to hold
| the cu state information and fix up etemp.
|
op2_xi:
movel #22,%d0 |clear 23 lwords
clrl (%a7)
op2_loop:
clrl -(%a7)
dbf %d0,op2_loop
movel #0x40600000,-(%a7)
movel L_SCR1(%a6),STAG(%a6)
movel L_SCR2(%a6),CMDREG1B(%a6)
movel L_SCR3(%a6),DTAG(%a6)
moveb #0x6,CU_SAVEPC(%a6)
movel FP_SCR2(%a6),ETEMP(%a6)
movel FP_SCR2+4(%a6),ETEMP_HI(%a6)
movel FP_SCR2+8(%a6),ETEMP_LO(%a6)
movel %d1,-(%a7)
bra op2_com
|
| We have the opclass 2 single source situation.
|
op2_com:
moveb #0x15,%d0
bfins %d0,CMDREG1B(%a6){#0:#6} |opclass 2, double
cmpw #0x407F,ETEMP_EX(%a6) |single +max
bnes case2
movew #0x43FF,ETEMP_EX(%a6) |to double +max
bra finish
case2:
cmpw #0xC07F,ETEMP_EX(%a6) |single -max
bnes case3
movew #0xC3FF,ETEMP_EX(%a6) |to double -max
bra finish
case3:
cmpw #0x3F80,ETEMP_EX(%a6) |single +min
bnes case4
movew #0x3C00,ETEMP_EX(%a6) |to double +min
bra finish
case4:
cmpw #0xBF80,ETEMP_EX(%a6) |single -min
bne fix_done
movew #0xBC00,ETEMP_EX(%a6) |to double -min
bra finish
|
| The frame returned is busy. It is not possible to reconstruct
| the code sequence to allow completion. fpsp_fmt_error causes
| an fline illegal instruction to be executed.
|
| You should replace the jump to fpsp_fmt_error with a jump
| to the entry point used to kill a process.
|
op2_xb:
jmp fpsp_fmt_error
|
| Enter here if the case is not of the situations affected by
| bug #1238, or if the fix is completed, and exit.
|
finish:
fix_done:
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 15,961
|
arch/m68k/fpsp040/satan.S
|
|
| satan.sa 3.3 12/19/90
|
| The entry point satan computes the arctangent of an
| input value. satand does the same except the input value is a
| denormalized number.
|
| Input: Double-extended value in memory location pointed to by address
| register a0.
|
| Output: Arctan(X) returned in floating-point register Fp0.
|
| Accuracy and Monotonicity: The returned result is within 2 ulps in
| 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
| result is subsequently rounded to double precision. The
| result is provably monotonic in double precision.
|
| Speed: The program satan takes approximately 160 cycles for input
| argument X such that 1/16 < |X| < 16. For the other arguments,
| the program will run no worse than 10% slower.
|
| Algorithm:
| Step 1. If |X| >= 16 or |X| < 1/16, go to Step 5.
|
| Step 2. Let X = sgn * 2**k * 1.xxxxxxxx...x. Note that k = -4, -3,..., or 3.
| Define F = sgn * 2**k * 1.xxxx1, i.e. the first 5 significant bits
| of X with a bit-1 attached at the 6-th bit position. Define u
| to be u = (X-F) / (1 + X*F).
|
| Step 3. Approximate arctan(u) by a polynomial poly.
|
| Step 4. Return arctan(F) + poly, arctan(F) is fetched from a table of values
| calculated beforehand. Exit.
|
| Step 5. If |X| >= 16, go to Step 7.
|
| Step 6. Approximate arctan(X) by an odd polynomial in X. Exit.
|
| Step 7. Define X' = -1/X. Approximate arctan(X') by an odd polynomial in X'.
| Arctan(X) = sign(X)*Pi/2 + arctan(X'). Exit.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|satan idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
BOUNDS1: .long 0x3FFB8000,0x4002FFFF
ONE: .long 0x3F800000
.long 0x00000000
ATANA3: .long 0xBFF6687E,0x314987D8
ATANA2: .long 0x4002AC69,0x34A26DB3
ATANA1: .long 0xBFC2476F,0x4E1DA28E
ATANB6: .long 0x3FB34444,0x7F876989
ATANB5: .long 0xBFB744EE,0x7FAF45DB
ATANB4: .long 0x3FBC71C6,0x46940220
ATANB3: .long 0xBFC24924,0x921872F9
ATANB2: .long 0x3FC99999,0x99998FA9
ATANB1: .long 0xBFD55555,0x55555555
ATANC5: .long 0xBFB70BF3,0x98539E6A
ATANC4: .long 0x3FBC7187,0x962D1D7D
ATANC3: .long 0xBFC24924,0x827107B8
ATANC2: .long 0x3FC99999,0x9996263E
ATANC1: .long 0xBFD55555,0x55555536
PPIBY2: .long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
NPIBY2: .long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x00000000
PTINY: .long 0x00010000,0x80000000,0x00000000,0x00000000
NTINY: .long 0x80010000,0x80000000,0x00000000,0x00000000
ATANTBL:
.long 0x3FFB0000,0x83D152C5,0x060B7A51,0x00000000
.long 0x3FFB0000,0x8BC85445,0x65498B8B,0x00000000
.long 0x3FFB0000,0x93BE4060,0x17626B0D,0x00000000
.long 0x3FFB0000,0x9BB3078D,0x35AEC202,0x00000000
.long 0x3FFB0000,0xA3A69A52,0x5DDCE7DE,0x00000000
.long 0x3FFB0000,0xAB98E943,0x62765619,0x00000000
.long 0x3FFB0000,0xB389E502,0xF9C59862,0x00000000
.long 0x3FFB0000,0xBB797E43,0x6B09E6FB,0x00000000
.long 0x3FFB0000,0xC367A5C7,0x39E5F446,0x00000000
.long 0x3FFB0000,0xCB544C61,0xCFF7D5C6,0x00000000
.long 0x3FFB0000,0xD33F62F8,0x2488533E,0x00000000
.long 0x3FFB0000,0xDB28DA81,0x62404C77,0x00000000
.long 0x3FFB0000,0xE310A407,0x8AD34F18,0x00000000
.long 0x3FFB0000,0xEAF6B0A8,0x188EE1EB,0x00000000
.long 0x3FFB0000,0xF2DAF194,0x9DBE79D5,0x00000000
.long 0x3FFB0000,0xFABD5813,0x61D47E3E,0x00000000
.long 0x3FFC0000,0x8346AC21,0x0959ECC4,0x00000000
.long 0x3FFC0000,0x8B232A08,0x304282D8,0x00000000
.long 0x3FFC0000,0x92FB70B8,0xD29AE2F9,0x00000000
.long 0x3FFC0000,0x9ACF476F,0x5CCD1CB4,0x00000000
.long 0x3FFC0000,0xA29E7630,0x4954F23F,0x00000000
.long 0x3FFC0000,0xAA68C5D0,0x8AB85230,0x00000000
.long 0x3FFC0000,0xB22DFFFD,0x9D539F83,0x00000000
.long 0x3FFC0000,0xB9EDEF45,0x3E900EA5,0x00000000
.long 0x3FFC0000,0xC1A85F1C,0xC75E3EA5,0x00000000
.long 0x3FFC0000,0xC95D1BE8,0x28138DE6,0x00000000
.long 0x3FFC0000,0xD10BF300,0x840D2DE4,0x00000000
.long 0x3FFC0000,0xD8B4B2BA,0x6BC05E7A,0x00000000
.long 0x3FFC0000,0xE0572A6B,0xB42335F6,0x00000000
.long 0x3FFC0000,0xE7F32A70,0xEA9CAA8F,0x00000000
.long 0x3FFC0000,0xEF888432,0x64ECEFAA,0x00000000
.long 0x3FFC0000,0xF7170A28,0xECC06666,0x00000000
.long 0x3FFD0000,0x812FD288,0x332DAD32,0x00000000
.long 0x3FFD0000,0x88A8D1B1,0x218E4D64,0x00000000
.long 0x3FFD0000,0x9012AB3F,0x23E4AEE8,0x00000000
.long 0x3FFD0000,0x976CC3D4,0x11E7F1B9,0x00000000
.long 0x3FFD0000,0x9EB68949,0x3889A227,0x00000000
.long 0x3FFD0000,0xA5EF72C3,0x4487361B,0x00000000
.long 0x3FFD0000,0xAD1700BA,0xF07A7227,0x00000000
.long 0x3FFD0000,0xB42CBCFA,0xFD37EFB7,0x00000000
.long 0x3FFD0000,0xBB303A94,0x0BA80F89,0x00000000
.long 0x3FFD0000,0xC22115C6,0xFCAEBBAF,0x00000000
.long 0x3FFD0000,0xC8FEF3E6,0x86331221,0x00000000
.long 0x3FFD0000,0xCFC98330,0xB4000C70,0x00000000
.long 0x3FFD0000,0xD6807AA1,0x102C5BF9,0x00000000
.long 0x3FFD0000,0xDD2399BC,0x31252AA3,0x00000000
.long 0x3FFD0000,0xE3B2A855,0x6B8FC517,0x00000000
.long 0x3FFD0000,0xEA2D764F,0x64315989,0x00000000
.long 0x3FFD0000,0xF3BF5BF8,0xBAD1A21D,0x00000000
.long 0x3FFE0000,0x801CE39E,0x0D205C9A,0x00000000
.long 0x3FFE0000,0x8630A2DA,0xDA1ED066,0x00000000
.long 0x3FFE0000,0x8C1AD445,0xF3E09B8C,0x00000000
.long 0x3FFE0000,0x91DB8F16,0x64F350E2,0x00000000
.long 0x3FFE0000,0x97731420,0x365E538C,0x00000000
.long 0x3FFE0000,0x9CE1C8E6,0xA0B8CDBA,0x00000000
.long 0x3FFE0000,0xA22832DB,0xCADAAE09,0x00000000
.long 0x3FFE0000,0xA746F2DD,0xB7602294,0x00000000
.long 0x3FFE0000,0xAC3EC0FB,0x997DD6A2,0x00000000
.long 0x3FFE0000,0xB110688A,0xEBDC6F6A,0x00000000
.long 0x3FFE0000,0xB5BCC490,0x59ECC4B0,0x00000000
.long 0x3FFE0000,0xBA44BC7D,0xD470782F,0x00000000
.long 0x3FFE0000,0xBEA94144,0xFD049AAC,0x00000000
.long 0x3FFE0000,0xC2EB4ABB,0x661628B6,0x00000000
.long 0x3FFE0000,0xC70BD54C,0xE602EE14,0x00000000
.long 0x3FFE0000,0xCD000549,0xADEC7159,0x00000000
.long 0x3FFE0000,0xD48457D2,0xD8EA4EA3,0x00000000
.long 0x3FFE0000,0xDB948DA7,0x12DECE3B,0x00000000
.long 0x3FFE0000,0xE23855F9,0x69E8096A,0x00000000
.long 0x3FFE0000,0xE8771129,0xC4353259,0x00000000
.long 0x3FFE0000,0xEE57C16E,0x0D379C0D,0x00000000
.long 0x3FFE0000,0xF3E10211,0xA87C3779,0x00000000
.long 0x3FFE0000,0xF919039D,0x758B8D41,0x00000000
.long 0x3FFE0000,0xFE058B8F,0x64935FB3,0x00000000
.long 0x3FFF0000,0x8155FB49,0x7B685D04,0x00000000
.long 0x3FFF0000,0x83889E35,0x49D108E1,0x00000000
.long 0x3FFF0000,0x859CFA76,0x511D724B,0x00000000
.long 0x3FFF0000,0x87952ECF,0xFF8131E7,0x00000000
.long 0x3FFF0000,0x89732FD1,0x9557641B,0x00000000
.long 0x3FFF0000,0x8B38CAD1,0x01932A35,0x00000000
.long 0x3FFF0000,0x8CE7A8D8,0x301EE6B5,0x00000000
.long 0x3FFF0000,0x8F46A39E,0x2EAE5281,0x00000000
.long 0x3FFF0000,0x922DA7D7,0x91888487,0x00000000
.long 0x3FFF0000,0x94D19FCB,0xDEDF5241,0x00000000
.long 0x3FFF0000,0x973AB944,0x19D2A08B,0x00000000
.long 0x3FFF0000,0x996FF00E,0x08E10B96,0x00000000
.long 0x3FFF0000,0x9B773F95,0x12321DA7,0x00000000
.long 0x3FFF0000,0x9D55CC32,0x0F935624,0x00000000
.long 0x3FFF0000,0x9F100575,0x006CC571,0x00000000
.long 0x3FFF0000,0xA0A9C290,0xD97CC06C,0x00000000
.long 0x3FFF0000,0xA22659EB,0xEBC0630A,0x00000000
.long 0x3FFF0000,0xA388B4AF,0xF6EF0EC9,0x00000000
.long 0x3FFF0000,0xA4D35F10,0x61D292C4,0x00000000
.long 0x3FFF0000,0xA60895DC,0xFBE3187E,0x00000000
.long 0x3FFF0000,0xA72A51DC,0x7367BEAC,0x00000000
.long 0x3FFF0000,0xA83A5153,0x0956168F,0x00000000
.long 0x3FFF0000,0xA93A2007,0x7539546E,0x00000000
.long 0x3FFF0000,0xAA9E7245,0x023B2605,0x00000000
.long 0x3FFF0000,0xAC4C84BA,0x6FE4D58F,0x00000000
.long 0x3FFF0000,0xADCE4A4A,0x606B9712,0x00000000
.long 0x3FFF0000,0xAF2A2DCD,0x8D263C9C,0x00000000
.long 0x3FFF0000,0xB0656F81,0xF22265C7,0x00000000
.long 0x3FFF0000,0xB1846515,0x0F71496A,0x00000000
.long 0x3FFF0000,0xB28AAA15,0x6F9ADA35,0x00000000
.long 0x3FFF0000,0xB37B44FF,0x3766B895,0x00000000
.long 0x3FFF0000,0xB458C3DC,0xE9630433,0x00000000
.long 0x3FFF0000,0xB525529D,0x562246BD,0x00000000
.long 0x3FFF0000,0xB5E2CCA9,0x5F9D88CC,0x00000000
.long 0x3FFF0000,0xB692CADA,0x7ACA1ADA,0x00000000
.long 0x3FFF0000,0xB736AEA7,0xA6925838,0x00000000
.long 0x3FFF0000,0xB7CFAB28,0x7E9F7B36,0x00000000
.long 0x3FFF0000,0xB85ECC66,0xCB219835,0x00000000
.long 0x3FFF0000,0xB8E4FD5A,0x20A593DA,0x00000000
.long 0x3FFF0000,0xB99F41F6,0x4AFF9BB5,0x00000000
.long 0x3FFF0000,0xBA7F1E17,0x842BBE7B,0x00000000
.long 0x3FFF0000,0xBB471285,0x7637E17D,0x00000000
.long 0x3FFF0000,0xBBFABE8A,0x4788DF6F,0x00000000
.long 0x3FFF0000,0xBC9D0FAD,0x2B689D79,0x00000000
.long 0x3FFF0000,0xBD306A39,0x471ECD86,0x00000000
.long 0x3FFF0000,0xBDB6C731,0x856AF18A,0x00000000
.long 0x3FFF0000,0xBE31CAC5,0x02E80D70,0x00000000
.long 0x3FFF0000,0xBEA2D55C,0xE33194E2,0x00000000
.long 0x3FFF0000,0xBF0B10B7,0xC03128F0,0x00000000
.long 0x3FFF0000,0xBF6B7A18,0xDACB778D,0x00000000
.long 0x3FFF0000,0xBFC4EA46,0x63FA18F6,0x00000000
.long 0x3FFF0000,0xC0181BDE,0x8B89A454,0x00000000
.long 0x3FFF0000,0xC065B066,0xCFBF6439,0x00000000
.long 0x3FFF0000,0xC0AE345F,0x56340AE6,0x00000000
.long 0x3FFF0000,0xC0F22291,0x9CB9E6A7,0x00000000
.set X,FP_SCR1
.set XDCARE,X+2
.set XFRAC,X+4
.set XFRACLO,X+8
.set ATANF,FP_SCR2
.set ATANFHI,ATANF+4
.set ATANFLO,ATANF+8
| xref t_frcinx
|xref t_extdnrm
.global satand
satand:
|--ENTRY POINT FOR ATAN(X) FOR DENORMALIZED ARGUMENT
bra t_extdnrm
.global satan
satan:
|--ENTRY POINT FOR ATAN(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
fmovex (%a0),%fp0 | ...LOAD INPUT
movel (%a0),%d0
movew 4(%a0),%d0
fmovex %fp0,X(%a6)
andil #0x7FFFFFFF,%d0
cmpil #0x3FFB8000,%d0 | ...|X| >= 1/16?
bges ATANOK1
bra ATANSM
ATANOK1:
cmpil #0x4002FFFF,%d0 | ...|X| < 16 ?
bles ATANMAIN
bra ATANBIG
|--THE MOST LIKELY CASE, |X| IN [1/16, 16). WE USE TABLE TECHNIQUE
|--THE IDEA IS ATAN(X) = ATAN(F) + ATAN( [X-F] / [1+XF] ).
|--SO IF F IS CHOSEN TO BE CLOSE TO X AND ATAN(F) IS STORED IN
|--A TABLE, ALL WE NEED IS TO APPROXIMATE ATAN(U) WHERE
|--U = (X-F)/(1+XF) IS SMALL (REMEMBER F IS CLOSE TO X). IT IS
|--TRUE THAT A DIVIDE IS NOW NEEDED, BUT THE APPROXIMATION FOR
|--ATAN(U) IS A VERY SHORT POLYNOMIAL AND THE INDEXING TO
|--FETCH F AND SAVING OF REGISTERS CAN BE ALL HIDED UNDER THE
|--DIVIDE. IN THE END THIS METHOD IS MUCH FASTER THAN A TRADITIONAL
|--ONE. NOTE ALSO THAT THE TRADITIONAL SCHEME THAT APPROXIMATE
|--ATAN(X) DIRECTLY WILL NEED TO USE A RATIONAL APPROXIMATION
|--(DIVISION NEEDED) ANYWAY BECAUSE A POLYNOMIAL APPROXIMATION
|--WILL INVOLVE A VERY LONG POLYNOMIAL.
|--NOW WE SEE X AS +-2^K * 1.BBBBBBB....B <- 1. + 63 BITS
|--WE CHOSE F TO BE +-2^K * 1.BBBB1
|--THAT IS IT MATCHES THE EXPONENT AND FIRST 5 BITS OF X, THE
|--SIXTH BITS IS SET TO BE 1. SINCE K = -4, -3, ..., 3, THERE
|--ARE ONLY 8 TIMES 16 = 2^7 = 128 |F|'S. SINCE ATAN(-|F|) IS
|-- -ATAN(|F|), WE NEED TO STORE ONLY ATAN(|F|).
ATANMAIN:
movew #0x0000,XDCARE(%a6) | ...CLEAN UP X JUST IN CASE
andil #0xF8000000,XFRAC(%a6) | ...FIRST 5 BITS
oril #0x04000000,XFRAC(%a6) | ...SET 6-TH BIT TO 1
movel #0x00000000,XFRACLO(%a6) | ...LOCATION OF X IS NOW F
fmovex %fp0,%fp1 | ...FP1 IS X
fmulx X(%a6),%fp1 | ...FP1 IS X*F, NOTE THAT X*F > 0
fsubx X(%a6),%fp0 | ...FP0 IS X-F
fadds #0x3F800000,%fp1 | ...FP1 IS 1 + X*F
fdivx %fp1,%fp0 | ...FP0 IS U = (X-F)/(1+X*F)
|--WHILE THE DIVISION IS TAKING ITS TIME, WE FETCH ATAN(|F|)
|--CREATE ATAN(F) AND STORE IT IN ATANF, AND
|--SAVE REGISTERS FP2.
movel %d2,-(%a7) | ...SAVE d2 TEMPORARILY
movel %d0,%d2 | ...THE EXPO AND 16 BITS OF X
andil #0x00007800,%d0 | ...4 VARYING BITS OF F'S FRACTION
andil #0x7FFF0000,%d2 | ...EXPONENT OF F
subil #0x3FFB0000,%d2 | ...K+4
asrl #1,%d2
addl %d2,%d0 | ...THE 7 BITS IDENTIFYING F
asrl #7,%d0 | ...INDEX INTO TBL OF ATAN(|F|)
lea ATANTBL,%a1
addal %d0,%a1 | ...ADDRESS OF ATAN(|F|)
movel (%a1)+,ATANF(%a6)
movel (%a1)+,ATANFHI(%a6)
movel (%a1)+,ATANFLO(%a6) | ...ATANF IS NOW ATAN(|F|)
movel X(%a6),%d0 | ...LOAD SIGN AND EXPO. AGAIN
andil #0x80000000,%d0 | ...SIGN(F)
orl %d0,ATANF(%a6) | ...ATANF IS NOW SIGN(F)*ATAN(|F|)
movel (%a7)+,%d2 | ...RESTORE d2
|--THAT'S ALL I HAVE TO DO FOR NOW,
|--BUT ALAS, THE DIVIDE IS STILL CRANKING!
|--U IN FP0, WE ARE NOW READY TO COMPUTE ATAN(U) AS
|--U + A1*U*V*(A2 + V*(A3 + V)), V = U*U
|--THE POLYNOMIAL MAY LOOK STRANGE, BUT IS NEVERTHELESS CORRECT.
|--THE NATURAL FORM IS U + U*V*(A1 + V*(A2 + V*A3))
|--WHAT WE HAVE HERE IS MERELY A1 = A3, A2 = A1/A3, A3 = A2/A3.
|--THE REASON FOR THIS REARRANGEMENT IS TO MAKE THE INDEPENDENT
|--PARTS A1*U*V AND (A2 + ... STUFF) MORE LOAD-BALANCED
fmovex %fp0,%fp1
fmulx %fp1,%fp1
fmoved ATANA3,%fp2
faddx %fp1,%fp2 | ...A3+V
fmulx %fp1,%fp2 | ...V*(A3+V)
fmulx %fp0,%fp1 | ...U*V
faddd ATANA2,%fp2 | ...A2+V*(A3+V)
fmuld ATANA1,%fp1 | ...A1*U*V
fmulx %fp2,%fp1 | ...A1*U*V*(A2+V*(A3+V))
faddx %fp1,%fp0 | ...ATAN(U), FP1 RELEASED
fmovel %d1,%FPCR |restore users exceptions
faddx ATANF(%a6),%fp0 | ...ATAN(X)
bra t_frcinx
ATANBORS:
|--|X| IS IN d0 IN COMPACT FORM. FP1, d0 SAVED.
|--FP0 IS X AND |X| <= 1/16 OR |X| >= 16.
cmpil #0x3FFF8000,%d0
bgt ATANBIG | ...I.E. |X| >= 16
ATANSM:
|--|X| <= 1/16
|--IF |X| < 2^(-40), RETURN X AS ANSWER. OTHERWISE, APPROXIMATE
|--ATAN(X) BY X + X*Y*(B1+Y*(B2+Y*(B3+Y*(B4+Y*(B5+Y*B6)))))
|--WHICH IS X + X*Y*( [B1+Z*(B3+Z*B5)] + [Y*(B2+Z*(B4+Z*B6)] )
|--WHERE Y = X*X, AND Z = Y*Y.
cmpil #0x3FD78000,%d0
blt ATANTINY
|--COMPUTE POLYNOMIAL
fmulx %fp0,%fp0 | ...FP0 IS Y = X*X
movew #0x0000,XDCARE(%a6)
fmovex %fp0,%fp1
fmulx %fp1,%fp1 | ...FP1 IS Z = Y*Y
fmoved ATANB6,%fp2
fmoved ATANB5,%fp3
fmulx %fp1,%fp2 | ...Z*B6
fmulx %fp1,%fp3 | ...Z*B5
faddd ATANB4,%fp2 | ...B4+Z*B6
faddd ATANB3,%fp3 | ...B3+Z*B5
fmulx %fp1,%fp2 | ...Z*(B4+Z*B6)
fmulx %fp3,%fp1 | ...Z*(B3+Z*B5)
faddd ATANB2,%fp2 | ...B2+Z*(B4+Z*B6)
faddd ATANB1,%fp1 | ...B1+Z*(B3+Z*B5)
fmulx %fp0,%fp2 | ...Y*(B2+Z*(B4+Z*B6))
fmulx X(%a6),%fp0 | ...X*Y
faddx %fp2,%fp1 | ...[B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]
fmulx %fp1,%fp0 | ...X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))])
fmovel %d1,%FPCR |restore users exceptions
faddx X(%a6),%fp0
bra t_frcinx
ATANTINY:
|--|X| < 2^(-40), ATAN(X) = X
movew #0x0000,XDCARE(%a6)
fmovel %d1,%FPCR |restore users exceptions
fmovex X(%a6),%fp0 |last inst - possible exception set
bra t_frcinx
ATANBIG:
|--IF |X| > 2^(100), RETURN SIGN(X)*(PI/2 - TINY). OTHERWISE,
|--RETURN SIGN(X)*PI/2 + ATAN(-1/X).
cmpil #0x40638000,%d0
bgt ATANHUGE
|--APPROXIMATE ATAN(-1/X) BY
|--X'+X'*Y*(C1+Y*(C2+Y*(C3+Y*(C4+Y*C5)))), X' = -1/X, Y = X'*X'
|--THIS CAN BE RE-WRITTEN AS
|--X'+X'*Y*( [C1+Z*(C3+Z*C5)] + [Y*(C2+Z*C4)] ), Z = Y*Y.
fmoves #0xBF800000,%fp1 | ...LOAD -1
fdivx %fp0,%fp1 | ...FP1 IS -1/X
|--DIVIDE IS STILL CRANKING
fmovex %fp1,%fp0 | ...FP0 IS X'
fmulx %fp0,%fp0 | ...FP0 IS Y = X'*X'
fmovex %fp1,X(%a6) | ...X IS REALLY X'
fmovex %fp0,%fp1
fmulx %fp1,%fp1 | ...FP1 IS Z = Y*Y
fmoved ATANC5,%fp3
fmoved ATANC4,%fp2
fmulx %fp1,%fp3 | ...Z*C5
fmulx %fp1,%fp2 | ...Z*B4
faddd ATANC3,%fp3 | ...C3+Z*C5
faddd ATANC2,%fp2 | ...C2+Z*C4
fmulx %fp3,%fp1 | ...Z*(C3+Z*C5), FP3 RELEASED
fmulx %fp0,%fp2 | ...Y*(C2+Z*C4)
faddd ATANC1,%fp1 | ...C1+Z*(C3+Z*C5)
fmulx X(%a6),%fp0 | ...X'*Y
faddx %fp2,%fp1 | ...[Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)]
fmulx %fp1,%fp0 | ...X'*Y*([B1+Z*(B3+Z*B5)]
| ... +[Y*(B2+Z*(B4+Z*B6))])
faddx X(%a6),%fp0
fmovel %d1,%FPCR |restore users exceptions
btstb #7,(%a0)
beqs pos_big
neg_big:
faddx NPIBY2,%fp0
bra t_frcinx
pos_big:
faddx PPIBY2,%fp0
bra t_frcinx
ATANHUGE:
|--RETURN SIGN(X)*(PIBY2 - TINY) = SIGN(X)*PIBY2 - SIGN(X)*TINY
btstb #7,(%a0)
beqs pos_huge
neg_huge:
fmovex NPIBY2,%fp0
fmovel %d1,%fpcr
fsubx NTINY,%fp0
bra t_frcinx
pos_huge:
fmovex PPIBY2,%fp0
fmovel %d1,%fpcr
fsubx PTINY,%fp0
bra t_frcinx
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,198
|
arch/m68k/fpsp040/slog2.S
|
|
| slog2.sa 3.1 12/10/90
|
| The entry point slog10 computes the base-10
| logarithm of an input argument X.
| slog10d does the same except the input value is a
| denormalized number.
| sLog2 and sLog2d are the base-2 analogues.
|
| INPUT: Double-extended value in memory location pointed to
| by address register a0.
|
| OUTPUT: log_10(X) or log_2(X) returned in floating-point
| register fp0.
|
| ACCURACY and MONOTONICITY: The returned result is within 1.7
| ulps in 64 significant bit, i.e. within 0.5003 ulp
| to 53 bits if the result is subsequently rounded
| to double precision. The result is provably monotonic
| in double precision.
|
| SPEED: Two timings are measured, both in the copy-back mode.
| The first one is measured when the function is invoked
| the first time (so the instructions and data are not
| in cache), and the second one is measured when the
| function is reinvoked at the same input argument.
|
| ALGORITHM and IMPLEMENTATION NOTES:
|
| slog10d:
|
| Step 0. If X < 0, create a NaN and raise the invalid operation
| flag. Otherwise, save FPCR in D1; set FpCR to default.
| Notes: Default means round-to-nearest mode, no floating-point
| traps, and precision control = double extended.
|
| Step 1. Call slognd to obtain Y = log(X), the natural log of X.
| Notes: Even if X is denormalized, log(X) is always normalized.
|
| Step 2. Compute log_10(X) = log(X) * (1/log(10)).
| 2.1 Restore the user FPCR
| 2.2 Return ans := Y * INV_L10.
|
|
| slog10:
|
| Step 0. If X < 0, create a NaN and raise the invalid operation
| flag. Otherwise, save FPCR in D1; set FpCR to default.
| Notes: Default means round-to-nearest mode, no floating-point
| traps, and precision control = double extended.
|
| Step 1. Call sLogN to obtain Y = log(X), the natural log of X.
|
| Step 2. Compute log_10(X) = log(X) * (1/log(10)).
| 2.1 Restore the user FPCR
| 2.2 Return ans := Y * INV_L10.
|
|
| sLog2d:
|
| Step 0. If X < 0, create a NaN and raise the invalid operation
| flag. Otherwise, save FPCR in D1; set FpCR to default.
| Notes: Default means round-to-nearest mode, no floating-point
| traps, and precision control = double extended.
|
| Step 1. Call slognd to obtain Y = log(X), the natural log of X.
| Notes: Even if X is denormalized, log(X) is always normalized.
|
| Step 2. Compute log_10(X) = log(X) * (1/log(2)).
| 2.1 Restore the user FPCR
| 2.2 Return ans := Y * INV_L2.
|
|
| sLog2:
|
| Step 0. If X < 0, create a NaN and raise the invalid operation
| flag. Otherwise, save FPCR in D1; set FpCR to default.
| Notes: Default means round-to-nearest mode, no floating-point
| traps, and precision control = double extended.
|
| Step 1. If X is not an integer power of two, i.e., X != 2^k,
| go to Step 3.
|
| Step 2. Return k.
| 2.1 Get integer k, X = 2^k.
| 2.2 Restore the user FPCR.
| 2.3 Return ans := convert-to-double-extended(k).
|
| Step 3. Call sLogN to obtain Y = log(X), the natural log of X.
|
| Step 4. Compute log_2(X) = log(X) * (1/log(2)).
| 4.1 Restore the user FPCR
| 4.2 Return ans := Y * INV_L2.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|SLOG2 idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
|xref t_frcinx
|xref t_operr
|xref slogn
|xref slognd
INV_L10: .long 0x3FFD0000,0xDE5BD8A9,0x37287195,0x00000000
INV_L2: .long 0x3FFF0000,0xB8AA3B29,0x5C17F0BC,0x00000000
.global slog10d
slog10d:
|--entry point for Log10(X), X is denormalized
movel (%a0),%d0
blt invalid
movel %d1,-(%sp)
clrl %d1
bsr slognd | ...log(X), X denorm.
fmovel (%sp)+,%fpcr
fmulx INV_L10,%fp0
bra t_frcinx
.global slog10
slog10:
|--entry point for Log10(X), X is normalized
movel (%a0),%d0
blt invalid
movel %d1,-(%sp)
clrl %d1
bsr slogn | ...log(X), X normal.
fmovel (%sp)+,%fpcr
fmulx INV_L10,%fp0
bra t_frcinx
.global slog2d
slog2d:
|--entry point for Log2(X), X is denormalized
movel (%a0),%d0
blt invalid
movel %d1,-(%sp)
clrl %d1
bsr slognd | ...log(X), X denorm.
fmovel (%sp)+,%fpcr
fmulx INV_L2,%fp0
bra t_frcinx
.global slog2
slog2:
|--entry point for Log2(X), X is normalized
movel (%a0),%d0
blt invalid
movel 8(%a0),%d0
bnes continue | ...X is not 2^k
movel 4(%a0),%d0
andl #0x7FFFFFFF,%d0
tstl %d0
bnes continue
|--X = 2^k.
movew (%a0),%d0
andl #0x00007FFF,%d0
subl #0x3FFF,%d0
fmovel %d1,%fpcr
fmovel %d0,%fp0
bra t_frcinx
continue:
movel %d1,-(%sp)
clrl %d1
bsr slogn | ...log(X), X normal.
fmovel (%sp)+,%fpcr
fmulx INV_L2,%fp0
bra t_frcinx
invalid:
bra t_operr
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,288
|
arch/m68k/fpsp040/smovecr.S
|
|
| smovecr.sa 3.1 12/10/90
|
| The entry point sMOVECR returns the constant at the
| offset given in the instruction field.
|
| Input: An offset in the instruction word.
|
| Output: The constant rounded to the user's rounding
| mode unchecked for overflow.
|
| Modified: fp0.
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|SMOVECR idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref nrm_set
|xref round
|xref PIRN
|xref PIRZRM
|xref PIRP
|xref SMALRN
|xref SMALRZRM
|xref SMALRP
|xref BIGRN
|xref BIGRZRM
|xref BIGRP
FZERO: .long 00000000
|
| FMOVECR
|
.global smovcr
smovcr:
bfextu CMDREG1B(%a6){#9:#7},%d0 |get offset
bfextu USER_FPCR(%a6){#26:#2},%d1 |get rmode
|
| check range of offset
|
tstb %d0 |if zero, offset is to pi
beqs PI_TBL |it is pi
cmpib #0x0a,%d0 |check range $01 - $0a
bles Z_VAL |if in this range, return zero
cmpib #0x0e,%d0 |check range $0b - $0e
bles SM_TBL |valid constants in this range
cmpib #0x2f,%d0 |check range $10 - $2f
bles Z_VAL |if in this range, return zero
cmpib #0x3f,%d0 |check range $30 - $3f
ble BG_TBL |valid constants in this range
Z_VAL:
fmoves FZERO,%fp0
rts
PI_TBL:
tstb %d1 |offset is zero, check for rmode
beqs PI_RN |if zero, rn mode
cmpib #0x3,%d1 |check for rp
beqs PI_RP |if 3, rp mode
PI_RZRM:
leal PIRZRM,%a0 |rmode is rz or rm, load PIRZRM in a0
bra set_finx
PI_RN:
leal PIRN,%a0 |rmode is rn, load PIRN in a0
bra set_finx
PI_RP:
leal PIRP,%a0 |rmode is rp, load PIRP in a0
bra set_finx
SM_TBL:
subil #0xb,%d0 |make offset in 0 - 4 range
tstb %d1 |check for rmode
beqs SM_RN |if zero, rn mode
cmpib #0x3,%d1 |check for rp
beqs SM_RP |if 3, rp mode
SM_RZRM:
leal SMALRZRM,%a0 |rmode is rz or rm, load SMRZRM in a0
cmpib #0x2,%d0 |check if result is inex
ble set_finx |if 0 - 2, it is inexact
bra no_finx |if 3, it is exact
SM_RN:
leal SMALRN,%a0 |rmode is rn, load SMRN in a0
cmpib #0x2,%d0 |check if result is inex
ble set_finx |if 0 - 2, it is inexact
bra no_finx |if 3, it is exact
SM_RP:
leal SMALRP,%a0 |rmode is rp, load SMRP in a0
cmpib #0x2,%d0 |check if result is inex
ble set_finx |if 0 - 2, it is inexact
bra no_finx |if 3, it is exact
BG_TBL:
subil #0x30,%d0 |make offset in 0 - f range
tstb %d1 |check for rmode
beqs BG_RN |if zero, rn mode
cmpib #0x3,%d1 |check for rp
beqs BG_RP |if 3, rp mode
BG_RZRM:
leal BIGRZRM,%a0 |rmode is rz or rm, load BGRZRM in a0
cmpib #0x1,%d0 |check if result is inex
ble set_finx |if 0 - 1, it is inexact
cmpib #0x7,%d0 |second check
ble no_finx |if 0 - 7, it is exact
bra set_finx |if 8 - f, it is inexact
BG_RN:
leal BIGRN,%a0 |rmode is rn, load BGRN in a0
cmpib #0x1,%d0 |check if result is inex
ble set_finx |if 0 - 1, it is inexact
cmpib #0x7,%d0 |second check
ble no_finx |if 0 - 7, it is exact
bra set_finx |if 8 - f, it is inexact
BG_RP:
leal BIGRP,%a0 |rmode is rp, load SMRP in a0
cmpib #0x1,%d0 |check if result is inex
ble set_finx |if 0 - 1, it is inexact
cmpib #0x7,%d0 |second check
ble no_finx |if 0 - 7, it is exact
| bra set_finx ;if 8 - f, it is inexact
set_finx:
orl #inx2a_mask,USER_FPSR(%a6) |set inex2/ainex
no_finx:
mulul #12,%d0 |use offset to point into tables
movel %d1,L_SCR1(%a6) |load mode for round call
bfextu USER_FPCR(%a6){#24:#2},%d1 |get precision
tstl %d1 |check if extended precision
|
| Precision is extended
|
bnes not_ext |if extended, do not call round
fmovemx (%a0,%d0),%fp0-%fp0 |return result in fp0
rts
|
| Precision is single or double
|
not_ext:
swap %d1 |rnd prec in upper word of d1
addl L_SCR1(%a6),%d1 |merge rmode in low word of d1
movel (%a0,%d0),FP_SCR1(%a6) |load first word to temp storage
movel 4(%a0,%d0),FP_SCR1+4(%a6) |load second word
movel 8(%a0,%d0),FP_SCR1+8(%a6) |load third word
clrl %d0 |clear g,r,s
lea FP_SCR1(%a6),%a0
btstb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0) |convert to internal ext. format
bsr round |go round the mantissa
bfclr LOCAL_SGN(%a0){#0:#8} |convert back to IEEE ext format
beqs fin_fcr
bsetb #sign_bit,LOCAL_EX(%a0)
fin_fcr:
fmovemx (%a0),%fp0-%fp0
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 17,089
|
arch/m68k/fpsp040/util.S
|
|
| util.sa 3.7 7/29/91
|
| This file contains routines used by other programs.
|
| ovf_res: used by overflow to force the correct
| result. ovf_r_k, ovf_r_x2, ovf_r_x3 are
| derivatives of this routine.
| get_fline: get user's opcode word
| g_dfmtou: returns the destination format.
| g_opcls: returns the opclass of the float instruction.
| g_rndpr: returns the rounding precision.
| reg_dest: write byte, word, or long data to Dn
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|UTIL idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref mem_read
.global g_dfmtou
.global g_opcls
.global g_rndpr
.global get_fline
.global reg_dest
|
| Final result table for ovf_res. Note that the negative counterparts
| are unnecessary as ovf_res always returns the sign separately from
| the exponent.
| ;+inf
EXT_PINF: .long 0x7fff0000,0x00000000,0x00000000,0x00000000
| ;largest +ext
EXT_PLRG: .long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000
| ;largest magnitude +sgl in ext
SGL_PLRG: .long 0x407e0000,0xffffff00,0x00000000,0x00000000
| ;largest magnitude +dbl in ext
DBL_PLRG: .long 0x43fe0000,0xffffffff,0xfffff800,0x00000000
| ;largest -ext
tblovfl:
.long EXT_RN
.long EXT_RZ
.long EXT_RM
.long EXT_RP
.long SGL_RN
.long SGL_RZ
.long SGL_RM
.long SGL_RP
.long DBL_RN
.long DBL_RZ
.long DBL_RM
.long DBL_RP
.long error
.long error
.long error
.long error
|
| ovf_r_k --- overflow result calculation
|
| This entry point is used by kernel_ex.
|
| This forces the destination precision to be extended
|
| Input: operand in ETEMP
| Output: a result is in ETEMP (internal extended format)
|
.global ovf_r_k
ovf_r_k:
lea ETEMP(%a6),%a0 |a0 points to source operand
bclrb #sign_bit,ETEMP_EX(%a6)
sne ETEMP_SGN(%a6) |convert to internal IEEE format
|
| ovf_r_x2 --- overflow result calculation
|
| This entry point used by x_ovfl. (opclass 0 and 2)
|
| Input a0 points to an operand in the internal extended format
| Output a0 points to the result in the internal extended format
|
| This sets the round precision according to the user's FPCR unless the
| instruction is fsgldiv or fsglmul or fsadd, fdadd, fsub, fdsub, fsmul,
| fdmul, fsdiv, fddiv, fssqrt, fsmove, fdmove, fsabs, fdabs, fsneg, fdneg.
| If the instruction is fsgldiv of fsglmul, the rounding precision must be
| extended. If the instruction is not fsgldiv or fsglmul but a force-
| precision instruction, the rounding precision is then set to the force
| precision.
.global ovf_r_x2
ovf_r_x2:
btstb #E3,E_BYTE(%a6) |check for nu exception
beql ovf_e1_exc |it is cu exception
ovf_e3_exc:
movew CMDREG3B(%a6),%d0 |get the command word
andiw #0x00000060,%d0 |clear all bits except 6 and 5
cmpil #0x00000040,%d0
beql ovff_sgl |force precision is single
cmpil #0x00000060,%d0
beql ovff_dbl |force precision is double
movew CMDREG3B(%a6),%d0 |get the command word again
andil #0x7f,%d0 |clear all except operation
cmpil #0x33,%d0
beql ovf_fsgl |fsglmul or fsgldiv
cmpil #0x30,%d0
beql ovf_fsgl
bra ovf_fpcr |instruction is none of the above
| ;use FPCR
ovf_e1_exc:
movew CMDREG1B(%a6),%d0 |get command word
andil #0x00000044,%d0 |clear all bits except 6 and 2
cmpil #0x00000040,%d0
beql ovff_sgl |the instruction is force single
cmpil #0x00000044,%d0
beql ovff_dbl |the instruction is force double
movew CMDREG1B(%a6),%d0 |again get the command word
andil #0x0000007f,%d0 |clear all except the op code
cmpil #0x00000027,%d0
beql ovf_fsgl |fsglmul
cmpil #0x00000024,%d0
beql ovf_fsgl |fsgldiv
bra ovf_fpcr |none of the above, use FPCR
|
|
| Inst is either fsgldiv or fsglmul. Force extended precision.
|
ovf_fsgl:
clrl %d0
bra ovf_res
ovff_sgl:
movel #0x00000001,%d0 |set single
bra ovf_res
ovff_dbl:
movel #0x00000002,%d0 |set double
bra ovf_res
|
| The precision is in the fpcr.
|
ovf_fpcr:
bfextu FPCR_MODE(%a6){#0:#2},%d0 |set round precision
bra ovf_res
|
|
| ovf_r_x3 --- overflow result calculation
|
| This entry point used by x_ovfl. (opclass 3 only)
|
| Input a0 points to an operand in the internal extended format
| Output a0 points to the result in the internal extended format
|
| This sets the round precision according to the destination size.
|
.global ovf_r_x3
ovf_r_x3:
bsr g_dfmtou |get dest fmt in d0{1:0}
| ;for fmovout, the destination format
| ;is the rounding precision
|
| ovf_res --- overflow result calculation
|
| Input:
| a0 points to operand in internal extended format
| Output:
| a0 points to result in internal extended format
|
.global ovf_res
ovf_res:
lsll #2,%d0 |move round precision to d0{3:2}
bfextu FPCR_MODE(%a6){#2:#2},%d1 |set round mode
orl %d1,%d0 |index is fmt:mode in d0{3:0}
leal tblovfl,%a1 |load a1 with table address
movel %a1@(%d0:l:4),%a1 |use d0 as index to the table
jmp (%a1) |go to the correct routine
|
|case DEST_FMT = EXT
|
EXT_RN:
leal EXT_PINF,%a1 |answer is +/- infinity
bsetb #inf_bit,FPSR_CC(%a6)
bra set_sign |now go set the sign
EXT_RZ:
leal EXT_PLRG,%a1 |answer is +/- large number
bra set_sign |now go set the sign
EXT_RM:
tstb LOCAL_SGN(%a0) |if negative overflow
beqs e_rm_pos
e_rm_neg:
leal EXT_PINF,%a1 |answer is negative infinity
orl #neginf_mask,USER_FPSR(%a6)
bra end_ovfr
e_rm_pos:
leal EXT_PLRG,%a1 |answer is large positive number
bra end_ovfr
EXT_RP:
tstb LOCAL_SGN(%a0) |if negative overflow
beqs e_rp_pos
e_rp_neg:
leal EXT_PLRG,%a1 |answer is large negative number
bsetb #neg_bit,FPSR_CC(%a6)
bra end_ovfr
e_rp_pos:
leal EXT_PINF,%a1 |answer is positive infinity
bsetb #inf_bit,FPSR_CC(%a6)
bra end_ovfr
|
|case DEST_FMT = DBL
|
DBL_RN:
leal EXT_PINF,%a1 |answer is +/- infinity
bsetb #inf_bit,FPSR_CC(%a6)
bra set_sign
DBL_RZ:
leal DBL_PLRG,%a1 |answer is +/- large number
bra set_sign |now go set the sign
DBL_RM:
tstb LOCAL_SGN(%a0) |if negative overflow
beqs d_rm_pos
d_rm_neg:
leal EXT_PINF,%a1 |answer is negative infinity
orl #neginf_mask,USER_FPSR(%a6)
bra end_ovfr |inf is same for all precisions (ext,dbl,sgl)
d_rm_pos:
leal DBL_PLRG,%a1 |answer is large positive number
bra end_ovfr
DBL_RP:
tstb LOCAL_SGN(%a0) |if negative overflow
beqs d_rp_pos
d_rp_neg:
leal DBL_PLRG,%a1 |answer is large negative number
bsetb #neg_bit,FPSR_CC(%a6)
bra end_ovfr
d_rp_pos:
leal EXT_PINF,%a1 |answer is positive infinity
bsetb #inf_bit,FPSR_CC(%a6)
bra end_ovfr
|
|case DEST_FMT = SGL
|
SGL_RN:
leal EXT_PINF,%a1 |answer is +/- infinity
bsetb #inf_bit,FPSR_CC(%a6)
bras set_sign
SGL_RZ:
leal SGL_PLRG,%a1 |answer is +/- large number
bras set_sign
SGL_RM:
tstb LOCAL_SGN(%a0) |if negative overflow
beqs s_rm_pos
s_rm_neg:
leal EXT_PINF,%a1 |answer is negative infinity
orl #neginf_mask,USER_FPSR(%a6)
bras end_ovfr
s_rm_pos:
leal SGL_PLRG,%a1 |answer is large positive number
bras end_ovfr
SGL_RP:
tstb LOCAL_SGN(%a0) |if negative overflow
beqs s_rp_pos
s_rp_neg:
leal SGL_PLRG,%a1 |answer is large negative number
bsetb #neg_bit,FPSR_CC(%a6)
bras end_ovfr
s_rp_pos:
leal EXT_PINF,%a1 |answer is positive infinity
bsetb #inf_bit,FPSR_CC(%a6)
bras end_ovfr
set_sign:
tstb LOCAL_SGN(%a0) |if negative overflow
beqs end_ovfr
neg_sign:
bsetb #neg_bit,FPSR_CC(%a6)
end_ovfr:
movew LOCAL_EX(%a1),LOCAL_EX(%a0) |do not overwrite sign
movel LOCAL_HI(%a1),LOCAL_HI(%a0)
movel LOCAL_LO(%a1),LOCAL_LO(%a0)
rts
|
| ERROR
|
error:
rts
|
| get_fline --- get f-line opcode of interrupted instruction
|
| Returns opcode in the low word of d0.
|
get_fline:
movel USER_FPIAR(%a6),%a0 |opcode address
movel #0,-(%a7) |reserve a word on the stack
leal 2(%a7),%a1 |point to low word of temporary
movel #2,%d0 |count
bsrl mem_read
movel (%a7)+,%d0
rts
|
| g_rndpr --- put rounding precision in d0{1:0}
|
| valid return codes are:
| 00 - extended
| 01 - single
| 10 - double
|
| begin
| get rounding precision (cmdreg3b{6:5})
| begin
| case opclass = 011 (move out)
| get destination format - this is the also the rounding precision
|
| case opclass = 0x0
| if E3
| *case RndPr(from cmdreg3b{6:5} = 11 then RND_PREC = DBL
| *case RndPr(from cmdreg3b{6:5} = 10 then RND_PREC = SGL
| case RndPr(from cmdreg3b{6:5} = 00 | 01
| use precision from FPCR{7:6}
| case 00 then RND_PREC = EXT
| case 01 then RND_PREC = SGL
| case 10 then RND_PREC = DBL
| else E1
| use precision in FPCR{7:6}
| case 00 then RND_PREC = EXT
| case 01 then RND_PREC = SGL
| case 10 then RND_PREC = DBL
| end
|
g_rndpr:
bsr g_opcls |get opclass in d0{2:0}
cmpw #0x0003,%d0 |check for opclass 011
bnes op_0x0
|
| For move out instructions (opclass 011) the destination format
| is the same as the rounding precision. Pass results from g_dfmtou.
|
bsr g_dfmtou
rts
op_0x0:
btstb #E3,E_BYTE(%a6)
beql unf_e1_exc |branch to e1 underflow
unf_e3_exc:
movel CMDREG3B(%a6),%d0 |rounding precision in d0{10:9}
bfextu %d0{#9:#2},%d0 |move the rounding prec bits to d0{1:0}
cmpil #0x2,%d0
beql unff_sgl |force precision is single
cmpil #0x3,%d0 |force precision is double
beql unff_dbl
movew CMDREG3B(%a6),%d0 |get the command word again
andil #0x7f,%d0 |clear all except operation
cmpil #0x33,%d0
beql unf_fsgl |fsglmul or fsgldiv
cmpil #0x30,%d0
beql unf_fsgl |fsgldiv or fsglmul
bra unf_fpcr
unf_e1_exc:
movel CMDREG1B(%a6),%d0 |get 32 bits off the stack, 1st 16 bits
| ;are the command word
andil #0x00440000,%d0 |clear all bits except bits 6 and 2
cmpil #0x00400000,%d0
beql unff_sgl |force single
cmpil #0x00440000,%d0 |force double
beql unff_dbl
movel CMDREG1B(%a6),%d0 |get the command word again
andil #0x007f0000,%d0 |clear all bits except the operation
cmpil #0x00270000,%d0
beql unf_fsgl |fsglmul
cmpil #0x00240000,%d0
beql unf_fsgl |fsgldiv
bra unf_fpcr
|
| Convert to return format. The values from cmdreg3b and the return
| values are:
| cmdreg3b return precision
| -------- ------ ---------
| 00,01 0 ext
| 10 1 sgl
| 11 2 dbl
| Force single
|
unff_sgl:
movel #1,%d0 |return 1
rts
|
| Force double
|
unff_dbl:
movel #2,%d0 |return 2
rts
|
| Force extended
|
unf_fsgl:
movel #0,%d0
rts
|
| Get rounding precision set in FPCR{7:6}.
|
unf_fpcr:
movel USER_FPCR(%a6),%d0 |rounding precision bits in d0{7:6}
bfextu %d0{#24:#2},%d0 |move the rounding prec bits to d0{1:0}
rts
|
| g_opcls --- put opclass in d0{2:0}
|
g_opcls:
btstb #E3,E_BYTE(%a6)
beqs opc_1b |if set, go to cmdreg1b
opc_3b:
clrl %d0 |if E3, only opclass 0x0 is possible
rts
opc_1b:
movel CMDREG1B(%a6),%d0
bfextu %d0{#0:#3},%d0 |shift opclass bits d0{31:29} to d0{2:0}
rts
|
| g_dfmtou --- put destination format in d0{1:0}
|
| If E1, the format is from cmdreg1b{12:10}
| If E3, the format is extended.
|
| Dest. Fmt.
| extended 010 -> 00
| single 001 -> 01
| double 101 -> 10
|
g_dfmtou:
btstb #E3,E_BYTE(%a6)
beqs op011
clrl %d0 |if E1, size is always ext
rts
op011:
movel CMDREG1B(%a6),%d0
bfextu %d0{#3:#3},%d0 |dest fmt from cmdreg1b{12:10}
cmpb #1,%d0 |check for single
bnes not_sgl
movel #1,%d0
rts
not_sgl:
cmpb #5,%d0 |check for double
bnes not_dbl
movel #2,%d0
rts
not_dbl:
clrl %d0 |must be extended
rts
|
|
| Final result table for unf_sub. Note that the negative counterparts
| are unnecessary as unf_sub always returns the sign separately from
| the exponent.
| ;+zero
EXT_PZRO: .long 0x00000000,0x00000000,0x00000000,0x00000000
| ;+zero
SGL_PZRO: .long 0x3f810000,0x00000000,0x00000000,0x00000000
| ;+zero
DBL_PZRO: .long 0x3c010000,0x00000000,0x00000000,0x00000000
| ;smallest +ext denorm
EXT_PSML: .long 0x00000000,0x00000000,0x00000001,0x00000000
| ;smallest +sgl denorm
SGL_PSML: .long 0x3f810000,0x00000100,0x00000000,0x00000000
| ;smallest +dbl denorm
DBL_PSML: .long 0x3c010000,0x00000000,0x00000800,0x00000000
|
| UNF_SUB --- underflow result calculation
|
| Input:
| d0 contains round precision
| a0 points to input operand in the internal extended format
|
| Output:
| a0 points to correct internal extended precision result.
|
tblunf:
.long uEXT_RN
.long uEXT_RZ
.long uEXT_RM
.long uEXT_RP
.long uSGL_RN
.long uSGL_RZ
.long uSGL_RM
.long uSGL_RP
.long uDBL_RN
.long uDBL_RZ
.long uDBL_RM
.long uDBL_RP
.long uDBL_RN
.long uDBL_RZ
.long uDBL_RM
.long uDBL_RP
.global unf_sub
unf_sub:
lsll #2,%d0 |move round precision to d0{3:2}
bfextu FPCR_MODE(%a6){#2:#2},%d1 |set round mode
orl %d1,%d0 |index is fmt:mode in d0{3:0}
leal tblunf,%a1 |load a1 with table address
movel %a1@(%d0:l:4),%a1 |use d0 as index to the table
jmp (%a1) |go to the correct routine
|
|case DEST_FMT = EXT
|
uEXT_RN:
leal EXT_PZRO,%a1 |answer is +/- zero
bsetb #z_bit,FPSR_CC(%a6)
bra uset_sign |now go set the sign
uEXT_RZ:
leal EXT_PZRO,%a1 |answer is +/- zero
bsetb #z_bit,FPSR_CC(%a6)
bra uset_sign |now go set the sign
uEXT_RM:
tstb LOCAL_SGN(%a0) |if negative underflow
beqs ue_rm_pos
ue_rm_neg:
leal EXT_PSML,%a1 |answer is negative smallest denorm
bsetb #neg_bit,FPSR_CC(%a6)
bra end_unfr
ue_rm_pos:
leal EXT_PZRO,%a1 |answer is positive zero
bsetb #z_bit,FPSR_CC(%a6)
bra end_unfr
uEXT_RP:
tstb LOCAL_SGN(%a0) |if negative underflow
beqs ue_rp_pos
ue_rp_neg:
leal EXT_PZRO,%a1 |answer is negative zero
oril #negz_mask,USER_FPSR(%a6)
bra end_unfr
ue_rp_pos:
leal EXT_PSML,%a1 |answer is positive smallest denorm
bra end_unfr
|
|case DEST_FMT = DBL
|
uDBL_RN:
leal DBL_PZRO,%a1 |answer is +/- zero
bsetb #z_bit,FPSR_CC(%a6)
bra uset_sign
uDBL_RZ:
leal DBL_PZRO,%a1 |answer is +/- zero
bsetb #z_bit,FPSR_CC(%a6)
bra uset_sign |now go set the sign
uDBL_RM:
tstb LOCAL_SGN(%a0) |if negative overflow
beqs ud_rm_pos
ud_rm_neg:
leal DBL_PSML,%a1 |answer is smallest denormalized negative
bsetb #neg_bit,FPSR_CC(%a6)
bra end_unfr
ud_rm_pos:
leal DBL_PZRO,%a1 |answer is positive zero
bsetb #z_bit,FPSR_CC(%a6)
bra end_unfr
uDBL_RP:
tstb LOCAL_SGN(%a0) |if negative overflow
beqs ud_rp_pos
ud_rp_neg:
leal DBL_PZRO,%a1 |answer is negative zero
oril #negz_mask,USER_FPSR(%a6)
bra end_unfr
ud_rp_pos:
leal DBL_PSML,%a1 |answer is smallest denormalized negative
bra end_unfr
|
|case DEST_FMT = SGL
|
uSGL_RN:
leal SGL_PZRO,%a1 |answer is +/- zero
bsetb #z_bit,FPSR_CC(%a6)
bras uset_sign
uSGL_RZ:
leal SGL_PZRO,%a1 |answer is +/- zero
bsetb #z_bit,FPSR_CC(%a6)
bras uset_sign
uSGL_RM:
tstb LOCAL_SGN(%a0) |if negative overflow
beqs us_rm_pos
us_rm_neg:
leal SGL_PSML,%a1 |answer is smallest denormalized negative
bsetb #neg_bit,FPSR_CC(%a6)
bras end_unfr
us_rm_pos:
leal SGL_PZRO,%a1 |answer is positive zero
bsetb #z_bit,FPSR_CC(%a6)
bras end_unfr
uSGL_RP:
tstb LOCAL_SGN(%a0) |if negative overflow
beqs us_rp_pos
us_rp_neg:
leal SGL_PZRO,%a1 |answer is negative zero
oril #negz_mask,USER_FPSR(%a6)
bras end_unfr
us_rp_pos:
leal SGL_PSML,%a1 |answer is smallest denormalized positive
bras end_unfr
uset_sign:
tstb LOCAL_SGN(%a0) |if negative overflow
beqs end_unfr
uneg_sign:
bsetb #neg_bit,FPSR_CC(%a6)
end_unfr:
movew LOCAL_EX(%a1),LOCAL_EX(%a0) |be careful not to overwrite sign
movel LOCAL_HI(%a1),LOCAL_HI(%a0)
movel LOCAL_LO(%a1),LOCAL_LO(%a0)
rts
|
| reg_dest --- write byte, word, or long data to Dn
|
|
| Input:
| L_SCR1: Data
| d1: data size and dest register number formatted as:
|
| 32 5 4 3 2 1 0
| -----------------------------------------------
| | 0 | Size | Dest Reg # |
| -----------------------------------------------
|
| Size is:
| 0 - Byte
| 1 - Word
| 2 - Long/Single
|
pregdst:
.long byte_d0
.long byte_d1
.long byte_d2
.long byte_d3
.long byte_d4
.long byte_d5
.long byte_d6
.long byte_d7
.long word_d0
.long word_d1
.long word_d2
.long word_d3
.long word_d4
.long word_d5
.long word_d6
.long word_d7
.long long_d0
.long long_d1
.long long_d2
.long long_d3
.long long_d4
.long long_d5
.long long_d6
.long long_d7
reg_dest:
leal pregdst,%a0
movel %a0@(%d1:l:4),%a0
jmp (%a0)
byte_d0:
moveb L_SCR1(%a6),USER_D0+3(%a6)
rts
byte_d1:
moveb L_SCR1(%a6),USER_D1+3(%a6)
rts
byte_d2:
moveb L_SCR1(%a6),%d2
rts
byte_d3:
moveb L_SCR1(%a6),%d3
rts
byte_d4:
moveb L_SCR1(%a6),%d4
rts
byte_d5:
moveb L_SCR1(%a6),%d5
rts
byte_d6:
moveb L_SCR1(%a6),%d6
rts
byte_d7:
moveb L_SCR1(%a6),%d7
rts
word_d0:
movew L_SCR1(%a6),USER_D0+2(%a6)
rts
word_d1:
movew L_SCR1(%a6),USER_D1+2(%a6)
rts
word_d2:
movew L_SCR1(%a6),%d2
rts
word_d3:
movew L_SCR1(%a6),%d3
rts
word_d4:
movew L_SCR1(%a6),%d4
rts
word_d5:
movew L_SCR1(%a6),%d5
rts
word_d6:
movew L_SCR1(%a6),%d6
rts
word_d7:
movew L_SCR1(%a6),%d7
rts
long_d0:
movel L_SCR1(%a6),USER_D0(%a6)
rts
long_d1:
movel L_SCR1(%a6),USER_D1(%a6)
rts
long_d2:
movel L_SCR1(%a6),%d2
rts
long_d3:
movel L_SCR1(%a6),%d3
rts
long_d4:
movel L_SCR1(%a6),%d4
rts
long_d5:
movel L_SCR1(%a6),%d5
rts
long_d6:
movel L_SCR1(%a6),%d6
rts
long_d7:
movel L_SCR1(%a6),%d7
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,666
|
arch/m68k/fpsp040/sgetem.S
|
|
| sgetem.sa 3.1 12/10/90
|
| The entry point sGETEXP returns the exponent portion
| of the input argument. The exponent bias is removed
| and the exponent value is returned as an extended
| precision number in fp0. sGETEXPD handles denormalized
| numbers.
|
| The entry point sGETMAN extracts the mantissa of the
| input argument. The mantissa is converted to an
| extended precision number and returned in fp0. The
| range of the result is [1.0 - 2.0).
|
|
| Input: Double-extended number X in the ETEMP space in
| the floating-point save stack.
|
| Output: The functions return exp(X) or man(X) in fp0.
|
| Modified: fp0.
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|SGETEM idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref nrm_set
|
| This entry point is used by the unimplemented instruction exception
| handler. It points a0 to the input operand.
|
|
|
| SGETEXP
|
.global sgetexp
sgetexp:
movew LOCAL_EX(%a0),%d0 |get the exponent
bclrl #15,%d0 |clear the sign bit
subw #0x3fff,%d0 |subtract off the bias
fmovew %d0,%fp0 |move the exp to fp0
rts
.global sgetexpd
sgetexpd:
bclrb #sign_bit,LOCAL_EX(%a0)
bsr nrm_set |normalize (exp will go negative)
movew LOCAL_EX(%a0),%d0 |load resulting exponent into d0
subw #0x3fff,%d0 |subtract off the bias
fmovew %d0,%fp0 |move the exp to fp0
rts
|
|
| This entry point is used by the unimplemented instruction exception
| handler. It points a0 to the input operand.
|
|
|
| SGETMAN
|
|
| For normalized numbers, leave the mantissa alone, simply load
| with an exponent of +/- $3fff.
|
.global sgetman
sgetman:
movel USER_FPCR(%a6),%d0
andil #0xffffff00,%d0 |clear rounding precision and mode
fmovel %d0,%fpcr |this fpcr setting is used by the 882
movew LOCAL_EX(%a0),%d0 |get the exp (really just want sign bit)
orw #0x7fff,%d0 |clear old exp
bclrl #14,%d0 |make it the new exp +-3fff
movew %d0,LOCAL_EX(%a0) |move the sign & exp back to fsave stack
fmovex (%a0),%fp0 |put new value back in fp0
rts
|
| For denormalized numbers, shift the mantissa until the j-bit = 1,
| then load the exponent with +/1 $3fff.
|
.global sgetmand
sgetmand:
movel LOCAL_HI(%a0),%d0 |load ms mant in d0
movel LOCAL_LO(%a0),%d1 |load ls mant in d1
bsr shft |shift mantissa bits till msbit is set
movel %d0,LOCAL_HI(%a0) |put ms mant back on stack
movel %d1,LOCAL_LO(%a0) |put ls mant back on stack
bras sgetman
|
| SHFT
|
| Shifts the mantissa bits until msbit is set.
| input:
| ms mantissa part in d0
| ls mantissa part in d1
| output:
| shifted bits in d0 and d1
shft:
tstl %d0 |if any bits set in ms mant
bnes upper |then branch
| ;else no bits set in ms mant
tstl %d1 |test if any bits set in ls mant
bnes cont |if set then continue
bras shft_end |else return
cont:
movel %d3,-(%a7) |save d3
exg %d0,%d1 |shift ls mant to ms mant
bfffo %d0{#0:#32},%d3 |find first 1 in ls mant to d0
lsll %d3,%d0 |shift first 1 to integer bit in ms mant
movel (%a7)+,%d3 |restore d3
bras shft_end
upper:
moveml %d3/%d5/%d6,-(%a7) |save registers
bfffo %d0{#0:#32},%d3 |find first 1 in ls mant to d0
lsll %d3,%d0 |shift ms mant until j-bit is set
movel %d1,%d6 |save ls mant in d6
lsll %d3,%d1 |shift ls mant by count
movel #32,%d5
subl %d3,%d5 |sub 32 from shift for ls mant
lsrl %d5,%d6 |shift off all bits but those that will
| ;be shifted into ms mant
orl %d6,%d0 |shift the ls mant bits into the ms mant
moveml (%a7)+,%d3/%d5/%d6 |restore registers
shft_end:
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 19,240
|
arch/m68k/fpsp040/tbldo.S
|
|
| tbldo.sa 3.1 12/10/90
|
| Modified:
| 8/16/90 chinds The table was constructed to use only one level
| of indirection in do_func for monadic
| functions. Dyadic functions require two
| levels, and the tables are still contained
| in do_func. The table is arranged for
| index with a 10-bit index, with the first
| 7 bits the opcode, and the remaining 3
| the stag. For dyadic functions, all
| valid addresses are to the generic entry
| point.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|TBLDO idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
|xref ld_pinf,ld_pone,ld_ppi2
|xref t_dz2,t_operr
|xref serror,sone,szero,sinf,snzrinx
|xref sopr_inf,spi_2,src_nan,szr_inf
|xref smovcr
|xref pmod,prem,pscale
|xref satanh,satanhd
|xref sacos,sacosd,sasin,sasind,satan,satand
|xref setox,setoxd,setoxm1,setoxm1d,setoxm1i
|xref sgetexp,sgetexpd,sgetman,sgetmand
|xref sint,sintd,sintrz
|xref ssincos,ssincosd,ssincosi,ssincosnan,ssincosz
|xref scos,scosd,ssin,ssind,stan,stand
|xref scosh,scoshd,ssinh,ssinhd,stanh,stanhd
|xref sslog10,sslog2,sslogn,sslognp1
|xref sslog10d,sslog2d,sslognd,slognp1d
|xref stentox,stentoxd,stwotox,stwotoxd
| instruction ;opcode-stag Notes
.global tblpre
tblpre:
.long smovcr |$00-0 fmovecr all
.long smovcr |$00-1 fmovecr all
.long smovcr |$00-2 fmovecr all
.long smovcr |$00-3 fmovecr all
.long smovcr |$00-4 fmovecr all
.long smovcr |$00-5 fmovecr all
.long smovcr |$00-6 fmovecr all
.long smovcr |$00-7 fmovecr all
.long sint |$01-0 fint norm
.long szero |$01-1 fint zero
.long sinf |$01-2 fint inf
.long src_nan |$01-3 fint nan
.long sintd |$01-4 fint denorm inx
.long serror |$01-5 fint ERROR
.long serror |$01-6 fint ERROR
.long serror |$01-7 fint ERROR
.long ssinh |$02-0 fsinh norm
.long szero |$02-1 fsinh zero
.long sinf |$02-2 fsinh inf
.long src_nan |$02-3 fsinh nan
.long ssinhd |$02-4 fsinh denorm
.long serror |$02-5 fsinh ERROR
.long serror |$02-6 fsinh ERROR
.long serror |$02-7 fsinh ERROR
.long sintrz |$03-0 fintrz norm
.long szero |$03-1 fintrz zero
.long sinf |$03-2 fintrz inf
.long src_nan |$03-3 fintrz nan
.long snzrinx |$03-4 fintrz denorm inx
.long serror |$03-5 fintrz ERROR
.long serror |$03-6 fintrz ERROR
.long serror |$03-7 fintrz ERROR
.long serror |$04-0 ERROR - illegal extension
.long serror |$04-1 ERROR - illegal extension
.long serror |$04-2 ERROR - illegal extension
.long serror |$04-3 ERROR - illegal extension
.long serror |$04-4 ERROR - illegal extension
.long serror |$04-5 ERROR - illegal extension
.long serror |$04-6 ERROR - illegal extension
.long serror |$04-7 ERROR - illegal extension
.long serror |$05-0 ERROR - illegal extension
.long serror |$05-1 ERROR - illegal extension
.long serror |$05-2 ERROR - illegal extension
.long serror |$05-3 ERROR - illegal extension
.long serror |$05-4 ERROR - illegal extension
.long serror |$05-5 ERROR - illegal extension
.long serror |$05-6 ERROR - illegal extension
.long serror |$05-7 ERROR - illegal extension
.long sslognp1 |$06-0 flognp1 norm
.long szero |$06-1 flognp1 zero
.long sopr_inf |$06-2 flognp1 inf
.long src_nan |$06-3 flognp1 nan
.long slognp1d |$06-4 flognp1 denorm
.long serror |$06-5 flognp1 ERROR
.long serror |$06-6 flognp1 ERROR
.long serror |$06-7 flognp1 ERROR
.long serror |$07-0 ERROR - illegal extension
.long serror |$07-1 ERROR - illegal extension
.long serror |$07-2 ERROR - illegal extension
.long serror |$07-3 ERROR - illegal extension
.long serror |$07-4 ERROR - illegal extension
.long serror |$07-5 ERROR - illegal extension
.long serror |$07-6 ERROR - illegal extension
.long serror |$07-7 ERROR - illegal extension
.long setoxm1 |$08-0 fetoxm1 norm
.long szero |$08-1 fetoxm1 zero
.long setoxm1i |$08-2 fetoxm1 inf
.long src_nan |$08-3 fetoxm1 nan
.long setoxm1d |$08-4 fetoxm1 denorm
.long serror |$08-5 fetoxm1 ERROR
.long serror |$08-6 fetoxm1 ERROR
.long serror |$08-7 fetoxm1 ERROR
.long stanh |$09-0 ftanh norm
.long szero |$09-1 ftanh zero
.long sone |$09-2 ftanh inf
.long src_nan |$09-3 ftanh nan
.long stanhd |$09-4 ftanh denorm
.long serror |$09-5 ftanh ERROR
.long serror |$09-6 ftanh ERROR
.long serror |$09-7 ftanh ERROR
.long satan |$0a-0 fatan norm
.long szero |$0a-1 fatan zero
.long spi_2 |$0a-2 fatan inf
.long src_nan |$0a-3 fatan nan
.long satand |$0a-4 fatan denorm
.long serror |$0a-5 fatan ERROR
.long serror |$0a-6 fatan ERROR
.long serror |$0a-7 fatan ERROR
.long serror |$0b-0 ERROR - illegal extension
.long serror |$0b-1 ERROR - illegal extension
.long serror |$0b-2 ERROR - illegal extension
.long serror |$0b-3 ERROR - illegal extension
.long serror |$0b-4 ERROR - illegal extension
.long serror |$0b-5 ERROR - illegal extension
.long serror |$0b-6 ERROR - illegal extension
.long serror |$0b-7 ERROR - illegal extension
.long sasin |$0c-0 fasin norm
.long szero |$0c-1 fasin zero
.long t_operr |$0c-2 fasin inf
.long src_nan |$0c-3 fasin nan
.long sasind |$0c-4 fasin denorm
.long serror |$0c-5 fasin ERROR
.long serror |$0c-6 fasin ERROR
.long serror |$0c-7 fasin ERROR
.long satanh |$0d-0 fatanh norm
.long szero |$0d-1 fatanh zero
.long t_operr |$0d-2 fatanh inf
.long src_nan |$0d-3 fatanh nan
.long satanhd |$0d-4 fatanh denorm
.long serror |$0d-5 fatanh ERROR
.long serror |$0d-6 fatanh ERROR
.long serror |$0d-7 fatanh ERROR
.long ssin |$0e-0 fsin norm
.long szero |$0e-1 fsin zero
.long t_operr |$0e-2 fsin inf
.long src_nan |$0e-3 fsin nan
.long ssind |$0e-4 fsin denorm
.long serror |$0e-5 fsin ERROR
.long serror |$0e-6 fsin ERROR
.long serror |$0e-7 fsin ERROR
.long stan |$0f-0 ftan norm
.long szero |$0f-1 ftan zero
.long t_operr |$0f-2 ftan inf
.long src_nan |$0f-3 ftan nan
.long stand |$0f-4 ftan denorm
.long serror |$0f-5 ftan ERROR
.long serror |$0f-6 ftan ERROR
.long serror |$0f-7 ftan ERROR
.long setox |$10-0 fetox norm
.long ld_pone |$10-1 fetox zero
.long szr_inf |$10-2 fetox inf
.long src_nan |$10-3 fetox nan
.long setoxd |$10-4 fetox denorm
.long serror |$10-5 fetox ERROR
.long serror |$10-6 fetox ERROR
.long serror |$10-7 fetox ERROR
.long stwotox |$11-0 ftwotox norm
.long ld_pone |$11-1 ftwotox zero
.long szr_inf |$11-2 ftwotox inf
.long src_nan |$11-3 ftwotox nan
.long stwotoxd |$11-4 ftwotox denorm
.long serror |$11-5 ftwotox ERROR
.long serror |$11-6 ftwotox ERROR
.long serror |$11-7 ftwotox ERROR
.long stentox |$12-0 ftentox norm
.long ld_pone |$12-1 ftentox zero
.long szr_inf |$12-2 ftentox inf
.long src_nan |$12-3 ftentox nan
.long stentoxd |$12-4 ftentox denorm
.long serror |$12-5 ftentox ERROR
.long serror |$12-6 ftentox ERROR
.long serror |$12-7 ftentox ERROR
.long serror |$13-0 ERROR - illegal extension
.long serror |$13-1 ERROR - illegal extension
.long serror |$13-2 ERROR - illegal extension
.long serror |$13-3 ERROR - illegal extension
.long serror |$13-4 ERROR - illegal extension
.long serror |$13-5 ERROR - illegal extension
.long serror |$13-6 ERROR - illegal extension
.long serror |$13-7 ERROR - illegal extension
.long sslogn |$14-0 flogn norm
.long t_dz2 |$14-1 flogn zero
.long sopr_inf |$14-2 flogn inf
.long src_nan |$14-3 flogn nan
.long sslognd |$14-4 flogn denorm
.long serror |$14-5 flogn ERROR
.long serror |$14-6 flogn ERROR
.long serror |$14-7 flogn ERROR
.long sslog10 |$15-0 flog10 norm
.long t_dz2 |$15-1 flog10 zero
.long sopr_inf |$15-2 flog10 inf
.long src_nan |$15-3 flog10 nan
.long sslog10d |$15-4 flog10 denorm
.long serror |$15-5 flog10 ERROR
.long serror |$15-6 flog10 ERROR
.long serror |$15-7 flog10 ERROR
.long sslog2 |$16-0 flog2 norm
.long t_dz2 |$16-1 flog2 zero
.long sopr_inf |$16-2 flog2 inf
.long src_nan |$16-3 flog2 nan
.long sslog2d |$16-4 flog2 denorm
.long serror |$16-5 flog2 ERROR
.long serror |$16-6 flog2 ERROR
.long serror |$16-7 flog2 ERROR
.long serror |$17-0 ERROR - illegal extension
.long serror |$17-1 ERROR - illegal extension
.long serror |$17-2 ERROR - illegal extension
.long serror |$17-3 ERROR - illegal extension
.long serror |$17-4 ERROR - illegal extension
.long serror |$17-5 ERROR - illegal extension
.long serror |$17-6 ERROR - illegal extension
.long serror |$17-7 ERROR - illegal extension
.long serror |$18-0 ERROR - illegal extension
.long serror |$18-1 ERROR - illegal extension
.long serror |$18-2 ERROR - illegal extension
.long serror |$18-3 ERROR - illegal extension
.long serror |$18-4 ERROR - illegal extension
.long serror |$18-5 ERROR - illegal extension
.long serror |$18-6 ERROR - illegal extension
.long serror |$18-7 ERROR - illegal extension
.long scosh |$19-0 fcosh norm
.long ld_pone |$19-1 fcosh zero
.long ld_pinf |$19-2 fcosh inf
.long src_nan |$19-3 fcosh nan
.long scoshd |$19-4 fcosh denorm
.long serror |$19-5 fcosh ERROR
.long serror |$19-6 fcosh ERROR
.long serror |$19-7 fcosh ERROR
.long serror |$1a-0 ERROR - illegal extension
.long serror |$1a-1 ERROR - illegal extension
.long serror |$1a-2 ERROR - illegal extension
.long serror |$1a-3 ERROR - illegal extension
.long serror |$1a-4 ERROR - illegal extension
.long serror |$1a-5 ERROR - illegal extension
.long serror |$1a-6 ERROR - illegal extension
.long serror |$1a-7 ERROR - illegal extension
.long serror |$1b-0 ERROR - illegal extension
.long serror |$1b-1 ERROR - illegal extension
.long serror |$1b-2 ERROR - illegal extension
.long serror |$1b-3 ERROR - illegal extension
.long serror |$1b-4 ERROR - illegal extension
.long serror |$1b-5 ERROR - illegal extension
.long serror |$1b-6 ERROR - illegal extension
.long serror |$1b-7 ERROR - illegal extension
.long sacos |$1c-0 facos norm
.long ld_ppi2 |$1c-1 facos zero
.long t_operr |$1c-2 facos inf
.long src_nan |$1c-3 facos nan
.long sacosd |$1c-4 facos denorm
.long serror |$1c-5 facos ERROR
.long serror |$1c-6 facos ERROR
.long serror |$1c-7 facos ERROR
.long scos |$1d-0 fcos norm
.long ld_pone |$1d-1 fcos zero
.long t_operr |$1d-2 fcos inf
.long src_nan |$1d-3 fcos nan
.long scosd |$1d-4 fcos denorm
.long serror |$1d-5 fcos ERROR
.long serror |$1d-6 fcos ERROR
.long serror |$1d-7 fcos ERROR
.long sgetexp |$1e-0 fgetexp norm
.long szero |$1e-1 fgetexp zero
.long t_operr |$1e-2 fgetexp inf
.long src_nan |$1e-3 fgetexp nan
.long sgetexpd |$1e-4 fgetexp denorm
.long serror |$1e-5 fgetexp ERROR
.long serror |$1e-6 fgetexp ERROR
.long serror |$1e-7 fgetexp ERROR
.long sgetman |$1f-0 fgetman norm
.long szero |$1f-1 fgetman zero
.long t_operr |$1f-2 fgetman inf
.long src_nan |$1f-3 fgetman nan
.long sgetmand |$1f-4 fgetman denorm
.long serror |$1f-5 fgetman ERROR
.long serror |$1f-6 fgetman ERROR
.long serror |$1f-7 fgetman ERROR
.long serror |$20-0 ERROR - illegal extension
.long serror |$20-1 ERROR - illegal extension
.long serror |$20-2 ERROR - illegal extension
.long serror |$20-3 ERROR - illegal extension
.long serror |$20-4 ERROR - illegal extension
.long serror |$20-5 ERROR - illegal extension
.long serror |$20-6 ERROR - illegal extension
.long serror |$20-7 ERROR - illegal extension
.long pmod |$21-0 fmod all
.long pmod |$21-1 fmod all
.long pmod |$21-2 fmod all
.long pmod |$21-3 fmod all
.long pmod |$21-4 fmod all
.long serror |$21-5 fmod ERROR
.long serror |$21-6 fmod ERROR
.long serror |$21-7 fmod ERROR
.long serror |$22-0 ERROR - illegal extension
.long serror |$22-1 ERROR - illegal extension
.long serror |$22-2 ERROR - illegal extension
.long serror |$22-3 ERROR - illegal extension
.long serror |$22-4 ERROR - illegal extension
.long serror |$22-5 ERROR - illegal extension
.long serror |$22-6 ERROR - illegal extension
.long serror |$22-7 ERROR - illegal extension
.long serror |$23-0 ERROR - illegal extension
.long serror |$23-1 ERROR - illegal extension
.long serror |$23-2 ERROR - illegal extension
.long serror |$23-3 ERROR - illegal extension
.long serror |$23-4 ERROR - illegal extension
.long serror |$23-5 ERROR - illegal extension
.long serror |$23-6 ERROR - illegal extension
.long serror |$23-7 ERROR - illegal extension
.long serror |$24-0 ERROR - illegal extension
.long serror |$24-1 ERROR - illegal extension
.long serror |$24-2 ERROR - illegal extension
.long serror |$24-3 ERROR - illegal extension
.long serror |$24-4 ERROR - illegal extension
.long serror |$24-5 ERROR - illegal extension
.long serror |$24-6 ERROR - illegal extension
.long serror |$24-7 ERROR - illegal extension
.long prem |$25-0 frem all
.long prem |$25-1 frem all
.long prem |$25-2 frem all
.long prem |$25-3 frem all
.long prem |$25-4 frem all
.long serror |$25-5 frem ERROR
.long serror |$25-6 frem ERROR
.long serror |$25-7 frem ERROR
.long pscale |$26-0 fscale all
.long pscale |$26-1 fscale all
.long pscale |$26-2 fscale all
.long pscale |$26-3 fscale all
.long pscale |$26-4 fscale all
.long serror |$26-5 fscale ERROR
.long serror |$26-6 fscale ERROR
.long serror |$26-7 fscale ERROR
.long serror |$27-0 ERROR - illegal extension
.long serror |$27-1 ERROR - illegal extension
.long serror |$27-2 ERROR - illegal extension
.long serror |$27-3 ERROR - illegal extension
.long serror |$27-4 ERROR - illegal extension
.long serror |$27-5 ERROR - illegal extension
.long serror |$27-6 ERROR - illegal extension
.long serror |$27-7 ERROR - illegal extension
.long serror |$28-0 ERROR - illegal extension
.long serror |$28-1 ERROR - illegal extension
.long serror |$28-2 ERROR - illegal extension
.long serror |$28-3 ERROR - illegal extension
.long serror |$28-4 ERROR - illegal extension
.long serror |$28-5 ERROR - illegal extension
.long serror |$28-6 ERROR - illegal extension
.long serror |$28-7 ERROR - illegal extension
.long serror |$29-0 ERROR - illegal extension
.long serror |$29-1 ERROR - illegal extension
.long serror |$29-2 ERROR - illegal extension
.long serror |$29-3 ERROR - illegal extension
.long serror |$29-4 ERROR - illegal extension
.long serror |$29-5 ERROR - illegal extension
.long serror |$29-6 ERROR - illegal extension
.long serror |$29-7 ERROR - illegal extension
.long serror |$2a-0 ERROR - illegal extension
.long serror |$2a-1 ERROR - illegal extension
.long serror |$2a-2 ERROR - illegal extension
.long serror |$2a-3 ERROR - illegal extension
.long serror |$2a-4 ERROR - illegal extension
.long serror |$2a-5 ERROR - illegal extension
.long serror |$2a-6 ERROR - illegal extension
.long serror |$2a-7 ERROR - illegal extension
.long serror |$2b-0 ERROR - illegal extension
.long serror |$2b-1 ERROR - illegal extension
.long serror |$2b-2 ERROR - illegal extension
.long serror |$2b-3 ERROR - illegal extension
.long serror |$2b-4 ERROR - illegal extension
.long serror |$2b-5 ERROR - illegal extension
.long serror |$2b-6 ERROR - illegal extension
.long serror |$2b-7 ERROR - illegal extension
.long serror |$2c-0 ERROR - illegal extension
.long serror |$2c-1 ERROR - illegal extension
.long serror |$2c-2 ERROR - illegal extension
.long serror |$2c-3 ERROR - illegal extension
.long serror |$2c-4 ERROR - illegal extension
.long serror |$2c-5 ERROR - illegal extension
.long serror |$2c-6 ERROR - illegal extension
.long serror |$2c-7 ERROR - illegal extension
.long serror |$2d-0 ERROR - illegal extension
.long serror |$2d-1 ERROR - illegal extension
.long serror |$2d-2 ERROR - illegal extension
.long serror |$2d-3 ERROR - illegal extension
.long serror |$2d-4 ERROR - illegal extension
.long serror |$2d-5 ERROR - illegal extension
.long serror |$2d-6 ERROR - illegal extension
.long serror |$2d-7 ERROR - illegal extension
.long serror |$2e-0 ERROR - illegal extension
.long serror |$2e-1 ERROR - illegal extension
.long serror |$2e-2 ERROR - illegal extension
.long serror |$2e-3 ERROR - illegal extension
.long serror |$2e-4 ERROR - illegal extension
.long serror |$2e-5 ERROR - illegal extension
.long serror |$2e-6 ERROR - illegal extension
.long serror |$2e-7 ERROR - illegal extension
.long serror |$2f-0 ERROR - illegal extension
.long serror |$2f-1 ERROR - illegal extension
.long serror |$2f-2 ERROR - illegal extension
.long serror |$2f-3 ERROR - illegal extension
.long serror |$2f-4 ERROR - illegal extension
.long serror |$2f-5 ERROR - illegal extension
.long serror |$2f-6 ERROR - illegal extension
.long serror |$2f-7 ERROR - illegal extension
.long ssincos |$30-0 fsincos norm
.long ssincosz |$30-1 fsincos zero
.long ssincosi |$30-2 fsincos inf
.long ssincosnan |$30-3 fsincos nan
.long ssincosd |$30-4 fsincos denorm
.long serror |$30-5 fsincos ERROR
.long serror |$30-6 fsincos ERROR
.long serror |$30-7 fsincos ERROR
.long ssincos |$31-0 fsincos norm
.long ssincosz |$31-1 fsincos zero
.long ssincosi |$31-2 fsincos inf
.long ssincosnan |$31-3 fsincos nan
.long ssincosd |$31-4 fsincos denorm
.long serror |$31-5 fsincos ERROR
.long serror |$31-6 fsincos ERROR
.long serror |$31-7 fsincos ERROR
.long ssincos |$32-0 fsincos norm
.long ssincosz |$32-1 fsincos zero
.long ssincosi |$32-2 fsincos inf
.long ssincosnan |$32-3 fsincos nan
.long ssincosd |$32-4 fsincos denorm
.long serror |$32-5 fsincos ERROR
.long serror |$32-6 fsincos ERROR
.long serror |$32-7 fsincos ERROR
.long ssincos |$33-0 fsincos norm
.long ssincosz |$33-1 fsincos zero
.long ssincosi |$33-2 fsincos inf
.long ssincosnan |$33-3 fsincos nan
.long ssincosd |$33-4 fsincos denorm
.long serror |$33-5 fsincos ERROR
.long serror |$33-6 fsincos ERROR
.long serror |$33-7 fsincos ERROR
.long ssincos |$34-0 fsincos norm
.long ssincosz |$34-1 fsincos zero
.long ssincosi |$34-2 fsincos inf
.long ssincosnan |$34-3 fsincos nan
.long ssincosd |$34-4 fsincos denorm
.long serror |$34-5 fsincos ERROR
.long serror |$34-6 fsincos ERROR
.long serror |$34-7 fsincos ERROR
.long ssincos |$35-0 fsincos norm
.long ssincosz |$35-1 fsincos zero
.long ssincosi |$35-2 fsincos inf
.long ssincosnan |$35-3 fsincos nan
.long ssincosd |$35-4 fsincos denorm
.long serror |$35-5 fsincos ERROR
.long serror |$35-6 fsincos ERROR
.long serror |$35-7 fsincos ERROR
.long ssincos |$36-0 fsincos norm
.long ssincosz |$36-1 fsincos zero
.long ssincosi |$36-2 fsincos inf
.long ssincosnan |$36-3 fsincos nan
.long ssincosd |$36-4 fsincos denorm
.long serror |$36-5 fsincos ERROR
.long serror |$36-6 fsincos ERROR
.long serror |$36-7 fsincos ERROR
.long ssincos |$37-0 fsincos norm
.long ssincosz |$37-1 fsincos zero
.long ssincosi |$37-2 fsincos inf
.long ssincosnan |$37-3 fsincos nan
.long ssincosd |$37-4 fsincos denorm
.long serror |$37-5 fsincos ERROR
.long serror |$37-6 fsincos ERROR
.long serror |$37-7 fsincos ERROR
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,995
|
arch/m68k/fpsp040/scosh.S
|
|
| scosh.sa 3.1 12/10/90
|
| The entry point sCosh computes the hyperbolic cosine of
| an input argument; sCoshd does the same except for denormalized
| input.
|
| Input: Double-extended number X in location pointed to
| by address register a0.
|
| Output: The value cosh(X) returned in floating-point register Fp0.
|
| Accuracy and Monotonicity: The returned result is within 3 ulps in
| 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
| result is subsequently rounded to double precision. The
| result is provably monotonic in double precision.
|
| Speed: The program sCOSH takes approximately 250 cycles.
|
| Algorithm:
|
| COSH
| 1. If |X| > 16380 log2, go to 3.
|
| 2. (|X| <= 16380 log2) Cosh(X) is obtained by the formulae
| y = |X|, z = exp(Y), and
| cosh(X) = (1/2)*( z + 1/z ).
| Exit.
|
| 3. (|X| > 16380 log2). If |X| > 16480 log2, go to 5.
|
| 4. (16380 log2 < |X| <= 16480 log2)
| cosh(X) = sign(X) * exp(|X|)/2.
| However, invoking exp(|X|) may cause premature overflow.
| Thus, we calculate sinh(X) as follows:
| Y := |X|
| Fact := 2**(16380)
| Y' := Y - 16381 log2
| cosh(X) := Fact * exp(Y').
| Exit.
|
| 5. (|X| > 16480 log2) sinh(X) must overflow. Return
| Huge*Huge to generate overflow and an infinity with
| the appropriate sign. Huge is the largest finite number in
| extended format. Exit.
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|SCOSH idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
|xref t_ovfl
|xref t_frcinx
|xref setox
T1: .long 0x40C62D38,0xD3D64634 | ... 16381 LOG2 LEAD
T2: .long 0x3D6F90AE,0xB1E75CC7 | ... 16381 LOG2 TRAIL
TWO16380: .long 0x7FFB0000,0x80000000,0x00000000,0x00000000
.global scoshd
scoshd:
|--COSH(X) = 1 FOR DENORMALIZED X
fmoves #0x3F800000,%fp0
fmovel %d1,%FPCR
fadds #0x00800000,%fp0
bra t_frcinx
.global scosh
scosh:
fmovex (%a0),%fp0 | ...LOAD INPUT
movel (%a0),%d0
movew 4(%a0),%d0
andil #0x7FFFFFFF,%d0
cmpil #0x400CB167,%d0
bgts COSHBIG
|--THIS IS THE USUAL CASE, |X| < 16380 LOG2
|--COSH(X) = (1/2) * ( EXP(X) + 1/EXP(X) )
fabsx %fp0 | ...|X|
movel %d1,-(%sp)
clrl %d1
fmovemx %fp0-%fp0,(%a0) |pass parameter to setox
bsr setox | ...FP0 IS EXP(|X|)
fmuls #0x3F000000,%fp0 | ...(1/2)EXP(|X|)
movel (%sp)+,%d1
fmoves #0x3E800000,%fp1 | ...(1/4)
fdivx %fp0,%fp1 | ...1/(2 EXP(|X|))
fmovel %d1,%FPCR
faddx %fp1,%fp0
bra t_frcinx
COSHBIG:
cmpil #0x400CB2B3,%d0
bgts COSHHUGE
fabsx %fp0
fsubd T1(%pc),%fp0 | ...(|X|-16381LOG2_LEAD)
fsubd T2(%pc),%fp0 | ...|X| - 16381 LOG2, ACCURATE
movel %d1,-(%sp)
clrl %d1
fmovemx %fp0-%fp0,(%a0)
bsr setox
fmovel (%sp)+,%fpcr
fmulx TWO16380(%pc),%fp0
bra t_frcinx
COSHHUGE:
fmovel #0,%fpsr |clr N bit if set by source
bclrb #7,(%a0) |always return positive value
fmovemx (%a0),%fp0-%fp0
bra t_ovfl
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,074
|
arch/m68k/fpsp040/stanh.S
|
|
| stanh.sa 3.1 12/10/90
|
| The entry point sTanh computes the hyperbolic tangent of
| an input argument; sTanhd does the same except for denormalized
| input.
|
| Input: Double-extended number X in location pointed to
| by address register a0.
|
| Output: The value tanh(X) returned in floating-point register Fp0.
|
| Accuracy and Monotonicity: The returned result is within 3 ulps in
| 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
| result is subsequently rounded to double precision. The
| result is provably monotonic in double precision.
|
| Speed: The program stanh takes approximately 270 cycles.
|
| Algorithm:
|
| TANH
| 1. If |X| >= (5/2) log2 or |X| <= 2**(-40), go to 3.
|
| 2. (2**(-40) < |X| < (5/2) log2) Calculate tanh(X) by
| sgn := sign(X), y := 2|X|, z := expm1(Y), and
| tanh(X) = sgn*( z/(2+z) ).
| Exit.
|
| 3. (|X| <= 2**(-40) or |X| >= (5/2) log2). If |X| < 1,
| go to 7.
|
| 4. (|X| >= (5/2) log2) If |X| >= 50 log2, go to 6.
|
| 5. ((5/2) log2 <= |X| < 50 log2) Calculate tanh(X) by
| sgn := sign(X), y := 2|X|, z := exp(Y),
| tanh(X) = sgn - [ sgn*2/(1+z) ].
| Exit.
|
| 6. (|X| >= 50 log2) Tanh(X) = +-1 (round to nearest). Thus, we
| calculate Tanh(X) by
| sgn := sign(X), Tiny := 2**(-126),
| tanh(X) := sgn - sgn*Tiny.
| Exit.
|
| 7. (|X| < 2**(-40)). Tanh(X) = X. Exit.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|STANH idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
.set X,FP_SCR5
.set XDCARE,X+2
.set XFRAC,X+4
.set SGN,L_SCR3
.set V,FP_SCR6
BOUNDS1: .long 0x3FD78000,0x3FFFDDCE | ... 2^(-40), (5/2)LOG2
|xref t_frcinx
|xref t_extdnrm
|xref setox
|xref setoxm1
.global stanhd
stanhd:
|--TANH(X) = X FOR DENORMALIZED X
bra t_extdnrm
.global stanh
stanh:
fmovex (%a0),%fp0 | ...LOAD INPUT
fmovex %fp0,X(%a6)
movel (%a0),%d0
movew 4(%a0),%d0
movel %d0,X(%a6)
andl #0x7FFFFFFF,%d0
cmp2l BOUNDS1(%pc),%d0 | ...2**(-40) < |X| < (5/2)LOG2 ?
bcss TANHBORS
|--THIS IS THE USUAL CASE
|--Y = 2|X|, Z = EXPM1(Y), TANH(X) = SIGN(X) * Z / (Z+2).
movel X(%a6),%d0
movel %d0,SGN(%a6)
andl #0x7FFF0000,%d0
addl #0x00010000,%d0 | ...EXPONENT OF 2|X|
movel %d0,X(%a6)
andl #0x80000000,SGN(%a6)
fmovex X(%a6),%fp0 | ...FP0 IS Y = 2|X|
movel %d1,-(%a7)
clrl %d1
fmovemx %fp0-%fp0,(%a0)
bsr setoxm1 | ...FP0 IS Z = EXPM1(Y)
movel (%a7)+,%d1
fmovex %fp0,%fp1
fadds #0x40000000,%fp1 | ...Z+2
movel SGN(%a6),%d0
fmovex %fp1,V(%a6)
eorl %d0,V(%a6)
fmovel %d1,%FPCR |restore users exceptions
fdivx V(%a6),%fp0
bra t_frcinx
TANHBORS:
cmpl #0x3FFF8000,%d0
blt TANHSM
cmpl #0x40048AA1,%d0
bgt TANHHUGE
|-- (5/2) LOG2 < |X| < 50 LOG2,
|--TANH(X) = 1 - (2/[EXP(2X)+1]). LET Y = 2|X|, SGN = SIGN(X),
|--TANH(X) = SGN - SGN*2/[EXP(Y)+1].
movel X(%a6),%d0
movel %d0,SGN(%a6)
andl #0x7FFF0000,%d0
addl #0x00010000,%d0 | ...EXPO OF 2|X|
movel %d0,X(%a6) | ...Y = 2|X|
andl #0x80000000,SGN(%a6)
movel SGN(%a6),%d0
fmovex X(%a6),%fp0 | ...Y = 2|X|
movel %d1,-(%a7)
clrl %d1
fmovemx %fp0-%fp0,(%a0)
bsr setox | ...FP0 IS EXP(Y)
movel (%a7)+,%d1
movel SGN(%a6),%d0
fadds #0x3F800000,%fp0 | ...EXP(Y)+1
eorl #0xC0000000,%d0 | ...-SIGN(X)*2
fmoves %d0,%fp1 | ...-SIGN(X)*2 IN SGL FMT
fdivx %fp0,%fp1 | ...-SIGN(X)2 / [EXP(Y)+1 ]
movel SGN(%a6),%d0
orl #0x3F800000,%d0 | ...SGN
fmoves %d0,%fp0 | ...SGN IN SGL FMT
fmovel %d1,%FPCR |restore users exceptions
faddx %fp1,%fp0
bra t_frcinx
TANHSM:
movew #0x0000,XDCARE(%a6)
fmovel %d1,%FPCR |restore users exceptions
fmovex X(%a6),%fp0 |last inst - possible exception set
bra t_frcinx
TANHHUGE:
|---RETURN SGN(X) - SGN(X)EPS
movel X(%a6),%d0
andl #0x80000000,%d0
orl #0x3F800000,%d0
fmoves %d0,%fp0
andl #0x80000000,%d0
eorl #0x80800000,%d0 | ...-SIGN(X)*EPS
fmovel %d1,%FPCR |restore users exceptions
fadds %d0,%fp0
bra t_frcinx
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,235
|
arch/m68k/fpsp040/satanh.S
|
|
| satanh.sa 3.3 12/19/90
|
| The entry point satanh computes the inverse
| hyperbolic tangent of
| an input argument; satanhd does the same except for denormalized
| input.
|
| Input: Double-extended number X in location pointed to
| by address register a0.
|
| Output: The value arctanh(X) returned in floating-point register Fp0.
|
| Accuracy and Monotonicity: The returned result is within 3 ulps in
| 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
| result is subsequently rounded to double precision. The
| result is provably monotonic in double precision.
|
| Speed: The program satanh takes approximately 270 cycles.
|
| Algorithm:
|
| ATANH
| 1. If |X| >= 1, go to 3.
|
| 2. (|X| < 1) Calculate atanh(X) by
| sgn := sign(X)
| y := |X|
| z := 2y/(1-y)
| atanh(X) := sgn * (1/2) * logp1(z)
| Exit.
|
| 3. If |X| > 1, go to 5.
|
| 4. (|X| = 1) Generate infinity with an appropriate sign and
| divide-by-zero by
| sgn := sign(X)
| atan(X) := sgn / (+0).
| Exit.
|
| 5. (|X| > 1) Generate an invalid operation by 0 * infinity.
| Exit.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|satanh idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
|xref t_dz
|xref t_operr
|xref t_frcinx
|xref t_extdnrm
|xref slognp1
.global satanhd
satanhd:
|--ATANH(X) = X FOR DENORMALIZED X
bra t_extdnrm
.global satanh
satanh:
movel (%a0),%d0
movew 4(%a0),%d0
andil #0x7FFFFFFF,%d0
cmpil #0x3FFF8000,%d0
bges ATANHBIG
|--THIS IS THE USUAL CASE, |X| < 1
|--Y = |X|, Z = 2Y/(1-Y), ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z).
fabsx (%a0),%fp0 | ...Y = |X|
fmovex %fp0,%fp1
fnegx %fp1 | ...-Y
faddx %fp0,%fp0 | ...2Y
fadds #0x3F800000,%fp1 | ...1-Y
fdivx %fp1,%fp0 | ...2Y/(1-Y)
movel (%a0),%d0
andil #0x80000000,%d0
oril #0x3F000000,%d0 | ...SIGN(X)*HALF
movel %d0,-(%sp)
fmovemx %fp0-%fp0,(%a0) | ...overwrite input
movel %d1,-(%sp)
clrl %d1
bsr slognp1 | ...LOG1P(Z)
fmovel (%sp)+,%fpcr
fmuls (%sp)+,%fp0
bra t_frcinx
ATANHBIG:
fabsx (%a0),%fp0 | ...|X|
fcmps #0x3F800000,%fp0
fbgt t_operr
bra t_dz
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,827
|
arch/m68k/fpsp040/sacos.S
|
|
| sacos.sa 3.3 12/19/90
|
| Description: The entry point sAcos computes the inverse cosine of
| an input argument; sAcosd does the same except for denormalized
| input.
|
| Input: Double-extended number X in location pointed to
| by address register a0.
|
| Output: The value arccos(X) returned in floating-point register Fp0.
|
| Accuracy and Monotonicity: The returned result is within 3 ulps in
| 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
| result is subsequently rounded to double precision. The
| result is provably monotonic in double precision.
|
| Speed: The program sCOS takes approximately 310 cycles.
|
| Algorithm:
|
| ACOS
| 1. If |X| >= 1, go to 3.
|
| 2. (|X| < 1) Calculate acos(X) by
| z := (1-X) / (1+X)
| acos(X) = 2 * atan( sqrt(z) ).
| Exit.
|
| 3. If |X| > 1, go to 5.
|
| 4. (|X| = 1) If X > 0, return 0. Otherwise, return Pi. Exit.
|
| 5. (|X| > 1) Generate an invalid operation by 0 * infinity.
| Exit.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|SACOS idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
PI: .long 0x40000000,0xC90FDAA2,0x2168C235,0x00000000
PIBY2: .long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
|xref t_operr
|xref t_frcinx
|xref satan
.global sacosd
sacosd:
|--ACOS(X) = PI/2 FOR DENORMALIZED X
fmovel %d1,%fpcr | ...load user's rounding mode/precision
fmovex PIBY2,%fp0
bra t_frcinx
.global sacos
sacos:
fmovex (%a0),%fp0 | ...LOAD INPUT
movel (%a0),%d0 | ...pack exponent with upper 16 fraction
movew 4(%a0),%d0
andil #0x7FFFFFFF,%d0
cmpil #0x3FFF8000,%d0
bges ACOSBIG
|--THIS IS THE USUAL CASE, |X| < 1
|--ACOS(X) = 2 * ATAN( SQRT( (1-X)/(1+X) ) )
fmoves #0x3F800000,%fp1
faddx %fp0,%fp1 | ...1+X
fnegx %fp0 | ... -X
fadds #0x3F800000,%fp0 | ...1-X
fdivx %fp1,%fp0 | ...(1-X)/(1+X)
fsqrtx %fp0 | ...SQRT((1-X)/(1+X))
fmovemx %fp0-%fp0,(%a0) | ...overwrite input
movel %d1,-(%sp) |save original users fpcr
clrl %d1
bsr satan | ...ATAN(SQRT([1-X]/[1+X]))
fmovel (%sp)+,%fpcr |restore users exceptions
faddx %fp0,%fp0 | ...2 * ATAN( STUFF )
bra t_frcinx
ACOSBIG:
fabsx %fp0
fcmps #0x3F800000,%fp0
fbgt t_operr |cause an operr exception
|--|X| = 1, ACOS(X) = 0 OR PI
movel (%a0),%d0 | ...pack exponent with upper 16 fraction
movew 4(%a0),%d0
cmpl #0,%d0 |D0 has original exponent+fraction
bgts ACOSP1
|--X = -1
|Returns PI and inexact exception
fmovex PI,%fp0
fmovel %d1,%FPCR
fadds #0x00800000,%fp0 |cause an inexact exception to be put
| ;into the 040 - will not trap until next
| ;fp inst.
bra t_frcinx
ACOSP1:
fmovel %d1,%FPCR
fmoves #0x00000000,%fp0
rts |Facos ; of +1 is exact
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,983
|
arch/m68k/fpsp040/x_store.S
|
|
| x_store.sa 3.2 1/24/91
|
| store --- store operand to memory or register
|
| Used by underflow and overflow handlers.
|
| a6 = points to fp value to be stored.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
X_STORE: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
fpreg_mask:
.byte 0x80,0x40,0x20,0x10,0x08,0x04,0x02,0x01
#include "fpsp.h"
|xref mem_write
|xref get_fline
|xref g_opcls
|xref g_dfmtou
|xref reg_dest
.global dest_ext
.global dest_dbl
.global dest_sgl
.global store
store:
btstb #E3,E_BYTE(%a6)
beqs E1_sto
E3_sto:
movel CMDREG3B(%a6),%d0
bfextu %d0{#6:#3},%d0 |isolate dest. reg from cmdreg3b
sto_fp:
lea fpreg_mask,%a1
moveb (%a1,%d0.w),%d0 |convert reg# to dynamic register mask
tstb LOCAL_SGN(%a0)
beqs is_pos
bsetb #sign_bit,LOCAL_EX(%a0)
is_pos:
fmovemx (%a0),%d0 |move to correct register
|
| if fp0-fp3 is being modified, we must put a copy
| in the USER_FPn variable on the stack because all exception
| handlers restore fp0-fp3 from there.
|
cmpb #0x80,%d0
bnes not_fp0
fmovemx %fp0-%fp0,USER_FP0(%a6)
rts
not_fp0:
cmpb #0x40,%d0
bnes not_fp1
fmovemx %fp1-%fp1,USER_FP1(%a6)
rts
not_fp1:
cmpb #0x20,%d0
bnes not_fp2
fmovemx %fp2-%fp2,USER_FP2(%a6)
rts
not_fp2:
cmpb #0x10,%d0
bnes not_fp3
fmovemx %fp3-%fp3,USER_FP3(%a6)
rts
not_fp3:
rts
E1_sto:
bsrl g_opcls |returns opclass in d0
cmpib #3,%d0
beq opc011 |branch if opclass 3
movel CMDREG1B(%a6),%d0
bfextu %d0{#6:#3},%d0 |extract destination register
bras sto_fp
opc011:
bsrl g_dfmtou |returns dest format in d0
| ;ext=00, sgl=01, dbl=10
movel %a0,%a1 |save source addr in a1
movel EXC_EA(%a6),%a0 |get the address
cmpil #0,%d0 |if dest format is extended
beq dest_ext |then branch
cmpil #1,%d0 |if dest format is single
beq dest_sgl |then branch
|
| fall through to dest_dbl
|
|
| dest_dbl --- write double precision value to user space
|
|Input
| a0 -> destination address
| a1 -> source in extended precision
|Output
| a0 -> destroyed
| a1 -> destroyed
| d0 -> 0
|
|Changes extended precision to double precision.
| Note: no attempt is made to round the extended value to double.
| dbl_sign = ext_sign
| dbl_exp = ext_exp - $3fff(ext bias) + $7ff(dbl bias)
| get rid of ext integer bit
| dbl_mant = ext_mant{62:12}
|
| --------------- --------------- ---------------
| extended -> |s| exp | |1| ms mant | | ls mant |
| --------------- --------------- ---------------
| 95 64 63 62 32 31 11 0
| | |
| | |
| | |
| v v
| --------------- ---------------
| double -> |s|exp| mant | | mant |
| --------------- ---------------
| 63 51 32 31 0
|
dest_dbl:
clrl %d0 |clear d0
movew LOCAL_EX(%a1),%d0 |get exponent
subw #0x3fff,%d0 |subtract extended precision bias
cmpw #0x4000,%d0 |check if inf
beqs inf |if so, special case
addw #0x3ff,%d0 |add double precision bias
swap %d0 |d0 now in upper word
lsll #4,%d0 |d0 now in proper place for dbl prec exp
tstb LOCAL_SGN(%a1)
beqs get_mant |if positive, go process mantissa
bsetl #31,%d0 |if negative, put in sign information
| ; before continuing
bras get_mant |go process mantissa
inf:
movel #0x7ff00000,%d0 |load dbl inf exponent
clrl LOCAL_HI(%a1) |clear msb
tstb LOCAL_SGN(%a1)
beqs dbl_inf |if positive, go ahead and write it
bsetl #31,%d0 |if negative put in sign information
dbl_inf:
movel %d0,LOCAL_EX(%a1) |put the new exp back on the stack
bras dbl_wrt
get_mant:
movel LOCAL_HI(%a1),%d1 |get ms mantissa
bfextu %d1{#1:#20},%d1 |get upper 20 bits of ms
orl %d1,%d0 |put these bits in ms word of double
movel %d0,LOCAL_EX(%a1) |put the new exp back on the stack
movel LOCAL_HI(%a1),%d1 |get ms mantissa
movel #21,%d0 |load shift count
lsll %d0,%d1 |put lower 11 bits in upper bits
movel %d1,LOCAL_HI(%a1) |build lower lword in memory
movel LOCAL_LO(%a1),%d1 |get ls mantissa
bfextu %d1{#0:#21},%d0 |get ls 21 bits of double
orl %d0,LOCAL_HI(%a1) |put them in double result
dbl_wrt:
movel #0x8,%d0 |byte count for double precision number
exg %a0,%a1 |a0=supervisor source, a1=user dest
bsrl mem_write |move the number to the user's memory
rts
|
| dest_sgl --- write single precision value to user space
|
|Input
| a0 -> destination address
| a1 -> source in extended precision
|
|Output
| a0 -> destroyed
| a1 -> destroyed
| d0 -> 0
|
|Changes extended precision to single precision.
| sgl_sign = ext_sign
| sgl_exp = ext_exp - $3fff(ext bias) + $7f(sgl bias)
| get rid of ext integer bit
| sgl_mant = ext_mant{62:12}
|
| --------------- --------------- ---------------
| extended -> |s| exp | |1| ms mant | | ls mant |
| --------------- --------------- ---------------
| 95 64 63 62 40 32 31 12 0
| | |
| | |
| | |
| v v
| ---------------
| single -> |s|exp| mant |
| ---------------
| 31 22 0
|
dest_sgl:
clrl %d0
movew LOCAL_EX(%a1),%d0 |get exponent
subw #0x3fff,%d0 |subtract extended precision bias
cmpw #0x4000,%d0 |check if inf
beqs sinf |if so, special case
addw #0x7f,%d0 |add single precision bias
swap %d0 |put exp in upper word of d0
lsll #7,%d0 |shift it into single exp bits
tstb LOCAL_SGN(%a1)
beqs get_sman |if positive, continue
bsetl #31,%d0 |if negative, put in sign first
bras get_sman |get mantissa
sinf:
movel #0x7f800000,%d0 |load single inf exp to d0
tstb LOCAL_SGN(%a1)
beqs sgl_wrt |if positive, continue
bsetl #31,%d0 |if negative, put in sign info
bras sgl_wrt
get_sman:
movel LOCAL_HI(%a1),%d1 |get ms mantissa
bfextu %d1{#1:#23},%d1 |get upper 23 bits of ms
orl %d1,%d0 |put these bits in ms word of single
sgl_wrt:
movel %d0,L_SCR1(%a6) |put the new exp back on the stack
movel #0x4,%d0 |byte count for single precision number
tstl %a0 |users destination address
beqs sgl_Dn |destination is a data register
exg %a0,%a1 |a0=supervisor source, a1=user dest
leal L_SCR1(%a6),%a0 |point a0 to data
bsrl mem_write |move the number to the user's memory
rts
sgl_Dn:
bsrl get_fline |returns fline word in d0
andw #0x7,%d0 |isolate register number
movel %d0,%d1 |d1 has size:reg formatted for reg_dest
orl #0x10,%d1 |reg_dest wants size added to reg#
bral reg_dest |size is X, rts in reg_dest will
| ;return to caller of dest_sgl
dest_ext:
tstb LOCAL_SGN(%a1) |put back sign into exponent word
beqs dstx_cont
bsetb #sign_bit,LOCAL_EX(%a1)
dstx_cont:
clrb LOCAL_SGN(%a1) |clear out the sign byte
movel #0x0c,%d0 |byte count for extended number
exg %a0,%a1 |a0=supervisor source, a1=user dest
bsrl mem_write |move the number to the user's memory
rts
|end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.