repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
49bitcat/nileswan
| 5,999
|
software/ipl0/ipl0.s
|
; Copyright (c) 2024, 2025 Adrian Siekierka
;
; Nileswan IPL0 is free software: you can redistribute it and/or modify it under
; the terms of the GNU General Public License as published by the Free
; Software Foundation, either version 3 of the License, or (at your option)
; any later version.
;
; Nileswan IPL0 is distributed in the hope that it will be useful, but WITHOUT
; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
; more details.
;
; You should have received a copy of the GNU General Public License along
; with Nileswan IPL0. If not, see <https://www.gnu.org/licenses/>.
%include "swan.inc"
bits 16
cpu 186
; This allows us to access IRAM at addresses 0xC000~0xFFFF on the CS segment.
NILE_IPL0_SEG equ 0xf400
NILE_IPL0_TMP_RAM equ 0xf800 ; f400:f800 => 0000:3800
NILE_IPL0_STACK equ 0x0000 ; f400:0000 => 0000:4000
NILE_IPL0_SIZE equ 512
NILE_FLASH_ADDR_IPL1_ORIG equ 0x08000
NILE_FLASH_ADDR_IPL1_SAFE equ 0x0c000
NILE_FLASH_ADDR_IPL1 equ 0x40000
; == Initialization / Boot state preservation ==
; 4000:0000 - boot ROM alternate boot location
org 0x0000
jmp NILE_IPL0_SEG:start_entrypoint1
start_entrypoint1:
cs mov byte [NILE_IPL0_TMP_RAM], 1
jmp start_shared
times (16)-($-$$) db 0xFF
; 4000:0010 - boot ROM alternate boot location (PCv2)
jmp NILE_IPL0_SEG:start_entrypoint2
start_entrypoint2:
cs mov byte [NILE_IPL0_TMP_RAM], 2
jmp start_shared
start:
cs mov byte [NILE_IPL0_TMP_RAM], 0
start_shared:
cs mov [NILE_IPL0_TMP_RAM + 18], ds
cs mov [NILE_IPL0_TMP_RAM + 2], ax
; Initialize DS == CS
mov ax, NILE_IPL0_SEG
mov ds, ax
mov [NILE_IPL0_TMP_RAM + 4], bx
mov [NILE_IPL0_TMP_RAM + 6], cx
mov [NILE_IPL0_TMP_RAM + 8], dx
mov [NILE_IPL0_TMP_RAM + 10], sp
mov [NILE_IPL0_TMP_RAM + 12], bp
mov [NILE_IPL0_TMP_RAM + 14], si
mov [NILE_IPL0_TMP_RAM + 16], di
mov [NILE_IPL0_TMP_RAM + 20], es
mov [NILE_IPL0_TMP_RAM + 22], ss
; Initialize SS/SP
mov ss, ax
xor sp, sp
; Copy FLAGS
pushf
pop di
mov [NILE_IPL0_TMP_RAM + 24], di
; Clear interrupts
cli
; Copy I/O port data
push cs
pop es
xor dx, dx
mov di, NILE_IPL0_TMP_RAM + 26
mov cx, (0xC0 >> 1)
copyIoPortDataLoop:
insw
inc dx
inc dx
loop copyIoPortDataLoop
; == IPL1 loader ==
; reset hardware
mov ax, 0xDD
out 0xE2, ax
mov ax, 0xFFFF
out 0xE4, ax
; - if recovery key combo pressed: load recovery IPL1
; - if on-cartridge button held: load factory IPL1
; - otherwise: load regular IPL1
call keypadScan
and ax, (KEY_X3 | KEY_B)
cmp ax, (KEY_X3 | KEY_B)
je bootIpl1Safe
bootIpl1NonSafe:
test byte [0xBFF5], 0x80
mov bx, NILE_FLASH_ADDR_IPL1 >> 8
jz bootIpl1End
mov bx, NILE_FLASH_ADDR_IPL1_ORIG >> 8
jmp bootIpl1End
bootIpl1Safe:
mov bx, NILE_FLASH_ADDR_IPL1_SAFE >> 8
bootIpl1End:
call spiStartRead
; == IPL1 loader / Read loop ==
; Initialize first SPI read (header) from flash device, flip buffer
mov ax, ((16 - 1) | SPI_MODE_READ | SPI_CNT_DEV_FLASH | SPI_CNT_BUSY)
out NILE_SPI_CNT, ax
; DS = 0x2000, ES = 0x0000 (, CS/SS = NILE_IPL0_SEG)
mov ax, ROMSeg0
mov ds, ax
xor ax, ax
mov es, ax
; Wait for SPI read to finish
call spiSpinwait
; Initialize second SPI read (data) from flash device, flip buffer
in ax, NILE_SPI_CNT
and ax, SPI_CNT_BUFFER
xor ax, ((512 - 1) | SPI_MODE_READ | SPI_CNT_DEV_FLASH | SPI_CNT_BUFFER | SPI_CNT_BUSY)
out NILE_SPI_CNT, ax
; DI = Start address (push)
mov di, [0x0000]
push 0x0000
push di
; CX = Sector count
mov cx, [0x0002]
readLoop:
call spiSpinwait
; Initialize SPI read from flash device, flip buffer
in ax, NILE_SPI_CNT
and ax, SPI_CNT_BUFFER
xor ax, ((512 - 1) | SPI_MODE_READ | SPI_CNT_DEV_FLASH | SPI_CNT_BUFFER | SPI_CNT_BUSY)
out NILE_SPI_CNT, ax
; Read 512 bytes from flipped buffer
push cx
mov cx, 0x100
rep movsw
pop cx
; Read next 512 bytes
loop readLoop
readComplete:
; Finish SPI read
call spiSpinwait
; Jump to IPL1
retf
; === Utility functions ===
; BX = address
spiStartRead:
; Prepare flash command write: read from address 0x03 onwards
xor si, si
mov di, si
push SRAMSeg
pop es
mov ax, NILE_BANK_RAM_TX
out RAM_BANK_2003, ax
mov ax, NILE_BANK_ROM_RX
out ROM_BANK_0_2003, ax
; Write 0x03, BH, BL, 0x00 to SPI TX buffer
mov ax, bx
mov al, 0x03
stosw
mov ax, bx
mov ah, 0x00
stosw
; Initialize SPI write to flash device, flip buffer
mov ax, ((4 - 1) | SPI_MODE_WRITE | SPI_CNT_DEV_FLASH | SPI_CNT_BUFFER | SPI_CNT_BUSY)
out NILE_SPI_CNT, ax
; jmp spiSpinwait ; fallthrough
; Wait until SPI is no longer busy.
; Clobber: AL
spiSpinwait:
in al, NILE_SPI_CNT+1
test al, 0x80
jnz spiSpinwait
ret
; Scan keypad.
; Output: AX = keypad data
keypadScan:
push cx
push dx
mov dx, 0x00B5
mov al, 0x10
out dx, al
daa
in al, dx
and al, 0x0F
mov ch, al
mov al, 0x20
out dx, al
daa
in al, dx
shl al, 4
mov cl, al
mov al, 0x40
out dx, al
daa
in al, dx
and al, 0x0F
or cl, al
mov ax, cx
pop dx
pop cx
ret
times (NILE_IPL0_SIZE-16)-($-$$) db 0xFF
; 0xFFFF:0x0000 - boot ROM primary boot location + header
jmp NILE_IPL0_SEG:start
db 0x00 ; Maintenance
db 0x42 ; Developer ID
db 0x01 ; Color
db 0x01 ; Cart number
db 0x80 ; Version + Disable IEEPROM write protect
db 0x00 ; ROM size
db 0x05 ; Save type
dw 0x0004 ; Flags
dw 0x0000 ; Checksum
|
49bitcat/nileswan
| 3,623
|
software/userland/src/updater/crc16.s
|
/**
* Copyright (c) 2024 Adrian Siekierka
*
* Nileswan Updater is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation, either version 3 of the License, or (at your option)
* any later version.
*
* Nileswan Updater is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with Nileswan Updater. If not, see <https://www.gnu.org/licenses/>.
*/
#include <wonderful.h>
#include <ws.h>
#define POLYNOMIAL 0x1021
.arch i186
.code16
.intel_syntax noprefix
.section ".text.crc16", "ax"
crc16_table:
.word 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7
.word 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF
.word 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6
.word 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE
.word 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485
.word 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D
.word 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4
.word 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC
.word 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823
.word 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B
.word 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12
.word 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A
.word 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41
.word 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49
.word 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70
.word 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78
.word 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F
.word 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067
.word 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E
.word 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256
.word 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D
.word 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405
.word 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C
.word 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634
.word 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB
.word 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3
.word 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A
.word 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92
.word 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9
.word 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1
.word 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8
.word 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
// uint16_t crc16(const char __far *data, uint16_t len, uint16_t initial);
.global crc16
crc16:
push ds
push si
push bp
mov bp, sp
mov ds, dx
mov si, ax
mov dx, [bp + WF_PLATFORM_CALL_STACK_OFFSET(6)]
1:
lodsb
xor bx, bx
mov bl, dh
mov dh, dl
mov dl, bh
xor bl, al
shl bx, 1
cs xor dx, [crc16_table + bx]
loop 1b
mov ax, dx
pop bp
pop si
pop ds
WF_PLATFORM_RET
|
49bitcat/nileswan
| 4,926
|
software/ipl1/src/safe/ram_test.s
|
/**
* Copyright (c) 2024 Adrian Siekierka
*
* Nileswan IPL1 is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation, either version 3 of the License, or (at your option)
* any later version.
*
* Nileswan IPL1 is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with Nileswan IPL1. If not, see <https://www.gnu.org/licenses/>.
*/
#include <wonderful.h>
#include <ws.h>
#define INDICATOR_ADDR 0x3FB6
.arch i186
.code16
.intel_syntax noprefix
.section .text, "ax"
// ax - bank count
// uses ax, cx
.macro xorshift_ax_cx
// x ^= x << 7
mov cx, ax
shl cx, 7
xor ax, cx
// x ^= x >> 9
mov cl, ah
shr cl, 1
xor al, cl
// x ^= x << 8
xor ah, al
.endm
// ax = pointer to result structure (of size dx bytes)
// dx = number of banks to test
.global ram_fault_test
ram_fault_test:
push ds
push es
push si
push di
mov bx, ax
mov ax, 0x1000
mov ds, ax
mov es, ax
cld
call ram_fault_test_perform
pop di
pop si
pop es
pop ds
IA16_RET
ram_fault_test_perform:
push dx
push dx
xor di, di
// test read only?
ss cmp byte ptr [ram_fault_test_mode], 1
je ram_fault_test_read_start
// initialize random value
mov ax, 12345
ram_fault_test_write_outer_loop:
// dx = dx - 1, bank = dx
xchg ax, dx
dec ax
out WS_CART_EXTBANK_RAM_PORT, ax
xchg ax, dx
ram_fault_test_write_loop:
// store random word to memory
stosw
xorshift_ax_cx
// have we finished the page?
test di, di
jnz ram_fault_test_write_loop
// increment indicator
ss mov cx, word ptr [INDICATOR_ADDR]
inc cx
or cx, 0x140
and cx, 0x17F
ss mov word ptr [INDICATOR_ADDR], cx
// have we finished all pages?
test dx, dx
jnz ram_fault_test_write_outer_loop
ram_fault_test_read_start:
// restore bank counter
pop dx
// initialize random value
mov ax, 12345
ram_fault_test_read_outer_loop:
// dx = dx - 1, bank = dx
xchg ax, dx
dec ax
out WS_CART_EXTBANK_RAM_PORT, ax
xchg ax, dx
ram_fault_test_read_loop:
// compare memory with random word
scasw
// is there a difference?
jnz ram_fault_test_read_found
ram_fault_test_read_next:
// advance PRNG
xorshift_ax_cx
// have we finished the page?
test di, di
jnz ram_fault_test_read_loop
ram_fault_test_read_page_done:
// write "no error" result
// ... unless test mode inhibits
ss cmp byte ptr [ram_fault_test_mode], 254
jae ram_fault_test_read_page_done_error
// ... unless error already printed
cmp word ptr ss:[bx], 0x0121
je ram_fault_test_read_page_done_error
mov word ptr ss:[bx], 0x012E
ram_fault_test_read_page_done_error:
call ram_fault_test_incr_bx
ss mov cx, word ptr [INDICATOR_ADDR]
inc cx
or cx, 0x140
and cx, 0x17F
ss mov word ptr [INDICATOR_ADDR], cx
// have we finished all pages?
test dx, dx
jnz ram_fault_test_read_outer_loop
// restore bank counter
pop dx
ret
ram_fault_test_read_found_skip:
ss mov byte ptr [ram_fault_test_mode], 255
pop dx
ret
ram_fault_test_read_found:
// write "error" result
// ... unless test mode inhibits
ss cmp byte ptr [ram_fault_test_mode], 254
jae ram_fault_test_read_found_skip
mov word ptr ss:[bx], 0x0121
// write "error" location
pusha
// dx = bank
mov word ptr ss:[0x3F90], 0x013F
mov ax, 0x3F80
call print_hex_number
// di = offset + 2
mov dx, di
dec dx
dec dx
call print_hex_number
// wait for keypress
ram_fault_test_read_found_keypress:
call ws_keypad_scan
and ax, 0x0DDD
jz ram_fault_test_read_found_keypress
push ax
ram_fault_test_read_found_keypress2:
call ws_keypad_scan
and ax, 0x0DDD
jnz ram_fault_test_read_found_keypress2
pop ax
mov word ptr ss:[0x3F90], 0x0120
test ah, 0x0F
jnz ram_fault_test_read_clear_bank_only
popa
// clear pointer, read next page
xor di, di
jmp ram_fault_test_read_page_done_error
ram_fault_test_read_clear_bank_only:
popa
jmp ram_fault_test_read_next
ram_fault_test_incr_bx:
mov cx, bx
add bx, 2
xor cx, bx
and cx, 0x20
jz ram_fault_test_incr_bx_end
add bx, 32
ram_fault_test_incr_bx_end:
ret
.section .data, "a"
// 0 (default) - print tiles, stop on every read
// 1 - only do read test
// 254 - set test mode to 255 on failure
.global ram_fault_test_mode
ram_fault_test_mode:
.byte 0
|
49bitcat/nileswan
| 1,435
|
software/ipl1/src/shared/crt0.s
|
/**
* Copyright (c) 2022, 2023, 2024 Adrian "asie" Siekierka
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*/
.arch i186
.code16
.intel_syntax noprefix
.section .header, "ax"
header:
.word _start
.word __sector_count
.word 0
.word 0
.word 0
.word 0
.word 0
.word 0
.section .start, "ax"
.global _start
_start:
// performed by IPL0
// cli
xor ax, ax
// CS = 0x0000
mov ds, ax
mov es, ax
mov ss, ax
// configure SP
mov sp, 0x3200
// clear int enable
out 0xB2, al
// clear BSS
// assumption: AX = 0
mov di, offset "__sbss"
mov cx, offset "__lwbss"
cld
rep stosw
jmp main
|
49bitcat/nileswan
| 1,555
|
software/ipl1/src/shared/util.s
|
/**
* Copyright (c) 2024 Adrian Siekierka
*
* Nileswan IPL1 is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free
* Software Foundation, either version 3 of the License, or (at your option)
* any later version.
*
* Nileswan IPL1 is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with Nileswan IPL1. If not, see <https://www.gnu.org/licenses/>.
*/
#include <wonderful.h>
#include <ws.h>
.arch i186
.code16
.intel_syntax noprefix
.section .text, "ax"
// ax = destination
// dx = source
// cx = count
// stack = fill value
.global mem_expand_8_16
mem_expand_8_16:
push es
push ds
pop es
push si
push di
mov bx, sp
mov di, ax
mov si, dx
ss mov ax, [bx + IA16_CALL_STACK_OFFSET(6)]
cld
mem_expand_8_16_loop:
lodsb
stosw
loop mem_expand_8_16_loop
pop di
pop si
pop es
IA16_RET 0x2
.global print_hex_number
print_hex_number:
push di
push es
mov di, ax
xor ax, ax
mov es, ax
mov ah, 0x01
mov cx, 4
print_hex_number_loop:
mov bx, dx
shr bx, 12
es mov al, [hexchars + bx]
stosw
shl dx, 4
loop print_hex_number_loop
mov ax, di
pop es
pop di
ret
|
4d61726b/VirtualKD-Redux
| 2,433
|
VirtualKD-Redux/Lib/STLPort/src/sparc_atomic64.s
|
.section ".text",#alloc,#execinstr
.align 8
.skip 16
! int _STLP_atomic_exchange (void *pvalue, int value)
!
.type _STLP_atomic_exchange,#function
.global _STLP_atomic_exchange
.align 8
_STLP_atomic_exchange:
1:
ldx [%o0], %o2 ! Set the current value
mov %o1, %o3 ! Set the new value
casx [%o0], %o2, %o3 ! Do the compare and swap
cmp %o2, %o3 ! Check whether successful
bne 1b ! Retry upon failure
membar #LoadLoad | #LoadStore ! Ensure the cas finishes before
! returning
retl ! return
mov %o2, %o0 ! Set the new value
.size _STLP_atomic_exchange,(.-_STLP_atomic_exchange)
! int _STLP_atomic_increment (void *pvalue)
.type _STLP_atomic_increment,#function
.global _STLP_atomic_increment
.align 8
_STLP_atomic_increment:
0:
ldx [%o0], %o2 ! set the current
addx %o2, 0x1, %o3 ! Increment and store current
casx [%o0], %o2, %o3 ! Do the compare and swap
cmp %o3, %o2 ! Check whether successful
bne 0b
membar #LoadLoad | #LoadStore ! Ensure the cas finishes before
! returning
retl ! return
mov %o1, %o0 ! Set the return value
.size _STLP_atomic_increment,(.-_STLP_atomic_increment)
! /* int _STLP_atomic_decrement (void *pvalue) */
.type _STLP_atomic_decrement,#function
.global _STLP_atomic_decrement
.align 8
_STLP_atomic_decrement:
0:
ldx [%o0], %o2 ! set the current
subx %o2, 0x1, %o3 ! decrement and store current
casx [%o0], %o2, %o3 ! Do the compare and swap
cmp %o3, %o2 ! Check whether successful
bne 0b
membar #LoadLoad | #LoadStore ! Ensure the cas finishes before
! returning
retl ! return
nop
.size _STLP_atomic_decrement,(.-_STLP_atomic_decrement)
|
4d61726b/VirtualKD-Redux
| 2,593
|
VirtualKD-Redux/Lib/STLPort/src/sparc_atomic.s
|
.section ".text",#alloc,#execinstr
.align 8
.skip 16
/*
** int _STLP_atomic_exchange (void *pvalue, int value)
*/
.type _STLP_atomic_exchange,#function
.global _STLP_atomic_exchange
.align 8
_STLP_atomic_exchange:
0:
ld [%o0], %o2 ! Set the current value
mov %o1, %o3 ! Set the new value
! swap [%o0], %o3 ! Do the compare and swap
cas [%o0], %o2, %o3
cmp %o2, %o3 ! Check whether successful
bne 0b ! Retry upon failure
stbar
mov %o2, %o0 ! Set the new value
retl ! return
nop
.size _STLP_atomic_exchange,(.-_STLP_atomic_exchange)
/* int _STLP_atomic_increment (void *pvalue) */
.type _STLP_atomic_increment,#function
.global _STLP_atomic_increment
.align 8
_STLP_atomic_increment:
1:
ld [%o0], %o2 ! set the current
add %o2, 0x1, %o3 ! Increment and store current
! swap [%o0], %o3 ! Do the compare and swap
cas [%o0], %o2, %o3
cmp %o3, %o2 ! Check whether successful
bne 1b ! Retry if we failed.
membar #LoadLoad | #LoadStore ! Ensure the cas finishes before
! returning
nop
retl ! return
nop
.size _STLP_atomic_increment,(.-_STLP_atomic_increment)
/* int _STLP_atomic_decrement (void *pvalue) */
.type _STLP_atomic_decrement,#function
.global _STLP_atomic_decrement
.align 8
_STLP_atomic_decrement:
2:
ld [%o0], %o2 ! set the current
sub %o2, 0x1, %o3 ! decrement and store current
! swap [%o0], %o3 ! Do the compare and swap
cas [%o0], %o2, %o3
cmp %o3, %o2 ! Check whether successful
bne 2b ! Retry if we failed.
membar #LoadLoad | #LoadStore ! Ensure the cas finishes before
nop
! returning
retl ! return
nop
.size _STLP_atomic_decrement,(.-_STLP_atomic_decrement)
|
4ilo/HD44780-Stm32HAL
| 21,686
|
F4_disco_example/startup/startup_stm32f411xe.s
|
/**
******************************************************************************
* @file startup_stm32f411xe.s
* @author MCD Application Team
* @brief STM32F411xExx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word 0 /* Reserved */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word 0 /* Reserved */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FPU_IRQHandler /* FPU */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
4ilo/ssd1306-stm32HAL
| 20,446
|
example/startup_stm32f411xe.s
|
/**
******************************************************************************
* @file startup_stm32f411xe.s
* @author MCD Application Team
* @brief STM32F411xExx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* <h2><center>© Copyright (c) 2017 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word 0 /* Reserved */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word 0 /* Reserved */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FPU_IRQHandler /* FPU */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
4ms/SMR
| 23,461
|
stm32/device/src/startup_stm32f4xx.s
|
/**
******************************************************************************
* @file startup_stm32f4xx.s
* @author MCD Application Team
* @version V1.0.0
* @date 30-September-2011
* @brief STM32F4xx Devices vector table for RIDE7 toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Configure the clock system and the external SRAM mounted on
* STM324xG-EVAL board to be used as data memory (optional,
* to be enabled by user)
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
* WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE
* TIME. AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY
* DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING
* FROM THE CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE
* CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
*
* <h2><center>© COPYRIGHT 2011 STMicroelectronics</center></h2>
******************************************************************************
*/
.syntax unified
.cpu cortex-m3
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word CAN1_TX_IRQHandler /* CAN1 TX */
.word CAN1_RX0_IRQHandler /* CAN1 RX0 */
.word CAN1_RX1_IRQHandler /* CAN1 RX1 */
.word CAN1_SCE_IRQHandler /* CAN1 SCE */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_IRQHandler /* USART3 */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */
.word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */
.word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */
.word TIM8_CC_IRQHandler /* TIM8 Capture Compare */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word FSMC_IRQHandler /* FSMC */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word UART4_IRQHandler /* UART4 */
.word UART5_IRQHandler /* UART5 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */
.word TIM7_IRQHandler /* TIM7 */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word ETH_IRQHandler /* Ethernet */
.word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */
.word CAN2_TX_IRQHandler /* CAN2 TX */
.word CAN2_RX0_IRQHandler /* CAN2 RX0 */
.word CAN2_RX1_IRQHandler /* CAN2 RX1 */
.word CAN2_SCE_IRQHandler /* CAN2 SCE */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */
.word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */
.word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */
.word OTG_HS_IRQHandler /* USB OTG HS */
.word DCMI_IRQHandler /* DCMI */
.word CRYP_IRQHandler /* CRYP crypto */
.word HASH_RNG_IRQHandler /* Hash and Rng */
.word FPU_IRQHandler /* FPU */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak CAN1_TX_IRQHandler
.thumb_set CAN1_TX_IRQHandler,Default_Handler
.weak CAN1_RX0_IRQHandler
.thumb_set CAN1_RX0_IRQHandler,Default_Handler
.weak CAN1_RX1_IRQHandler
.thumb_set CAN1_RX1_IRQHandler,Default_Handler
.weak CAN1_SCE_IRQHandler
.thumb_set CAN1_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak TIM8_BRK_TIM12_IRQHandler
.thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler
.weak TIM8_UP_TIM13_IRQHandler
.thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_TIM14_IRQHandler
.thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak FSMC_IRQHandler
.thumb_set FSMC_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak UART5_IRQHandler
.thumb_set UART5_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak ETH_IRQHandler
.thumb_set ETH_IRQHandler,Default_Handler
.weak ETH_WKUP_IRQHandler
.thumb_set ETH_WKUP_IRQHandler,Default_Handler
.weak CAN2_TX_IRQHandler
.thumb_set CAN2_TX_IRQHandler,Default_Handler
.weak CAN2_RX0_IRQHandler
.thumb_set CAN2_RX0_IRQHandler,Default_Handler
.weak CAN2_RX1_IRQHandler
.thumb_set CAN2_RX1_IRQHandler,Default_Handler
.weak CAN2_SCE_IRQHandler
.thumb_set CAN2_SCE_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak OTG_HS_EP1_OUT_IRQHandler
.thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler
.weak OTG_HS_EP1_IN_IRQHandler
.thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler
.weak OTG_HS_WKUP_IRQHandler
.thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler
.weak OTG_HS_IRQHandler
.thumb_set OTG_HS_IRQHandler,Default_Handler
.weak DCMI_IRQHandler
.thumb_set DCMI_IRQHandler,Default_Handler
.weak CRYP_IRQHandler
.thumb_set CRYP_IRQHandler,Default_Handler
.weak HASH_RNG_IRQHandler
.thumb_set HASH_RNG_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
/******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE****/
|
4ms/metamodule-plugin-sdk
| 1,661
|
plugin-libc/libgcc/config/nios2/crtn.S
|
/* Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Jonah Graham (jgraham@altera.com).
Contributed by Mentor Graphics, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file just makes sure that the .fini and .init sections do in
fact return. Users may put any desired instructions in those sections.
This file is the last thing linked into any executable. */
.section ".init"
ldw ra, 44(sp)
ldw r23, 40(sp)
ldw r22, 36(sp)
ldw r21, 32(sp)
ldw r20, 28(sp)
ldw r19, 24(sp)
ldw r18, 20(sp)
ldw r17, 16(sp)
ldw r16, 12(sp)
ldw fp, 8(sp)
addi sp, sp, 48
ret
.section ".fini"
ldw ra, 44(sp)
ldw r23, 40(sp)
ldw r22, 36(sp)
ldw r21, 32(sp)
ldw r20, 28(sp)
ldw r19, 24(sp)
ldw r18, 20(sp)
ldw r17, 16(sp)
ldw r16, 12(sp)
ldw fp, 8(sp)
addi sp, sp, 48
ret
|
4ms/metamodule-plugin-sdk
| 2,373
|
plugin-libc/libgcc/config/nios2/crti.S
|
/* Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Jonah Graham (jgraham@altera.com).
Contributed by Mentor Graphics, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file just make a stack frame for the contents of the .fini and
.init sections. Users may put any desired instructions in those
sections.
While technically any code can be put in the init and fini sections
most stuff will not work other than stuff which obeys the call frame
and ABI. All the call-preserved registers are saved, the call clobbered
registers should have been saved by the code calling init and fini.
See crtstuff.c for an example of code that inserts itself in the init
and fini sections.
See crt0.s for the code that calls init and fini. */
.section ".init"
.align 2
.global _init
_init:
addi sp, sp, -48
stw ra, 44(sp)
stw r23, 40(sp)
stw r22, 36(sp)
stw r21, 32(sp)
stw r20, 28(sp)
stw r19, 24(sp)
stw r18, 20(sp)
stw r17, 16(sp)
stw r16, 12(sp)
stw fp, 8(sp)
addi fp, sp, 8
#ifdef linux
nextpc r22
1: movhi r2, %hiadj(_gp_got - 1b)
addi r2, r2, %lo(_gp_got - 1b)
add r22, r22, r2
#endif
.section ".fini"
.align 2
.global _fini
_fini:
addi sp, sp, -48
stw ra, 44(sp)
stw r23, 40(sp)
stw r22, 36(sp)
stw r21, 32(sp)
stw r20, 28(sp)
stw r19, 24(sp)
stw r18, 20(sp)
stw r17, 16(sp)
stw r16, 12(sp)
stw fp, 8(sp)
addi fp, sp, 8
#ifdef linux
nextpc r22
1: movhi r2, %hiadj(_gp_got - 1b)
addi r2, r2, %lo(_gp_got - 1b)
add r22, r22, r2
#endif
|
4ms/metamodule-plugin-sdk
| 2,499
|
plugin-libc/libgcc/config/tilepro/softmpy.S
|
/* 64-bit multiplication support for TILEPro.
Copyright (C) 2011-2022 Free Software Foundation, Inc.
Contributed by Walter Lee (walt@tilera.com)
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* 64-bit multiplication support. */
.file "softmpy.S"
/* Parameters */
#define lo0 r9 /* low 32 bits of n0 */
#define hi0 r1 /* high 32 bits of n0 */
#define lo1 r2 /* low 32 bits of n1 */
#define hi1 r3 /* high 32 bits of n1 */
/* temps */
#define result1_a r4
#define result1_b r5
#define tmp0 r6
#define tmp0_left_16 r7
#define tmp1 r8
.section .text.__muldi3, "ax"
.align 8
.globl __muldi3
.type __muldi3, @function
__muldi3:
{
move lo0, r0 /* so we can write "out r0" while "in r0" alive */
mulhl_uu tmp0, lo1, r0
}
{
mulll_uu result1_a, lo1, hi0
}
{
move tmp1, tmp0
mulhla_uu tmp0, lo0, lo1
}
{
mulhlsa_uu result1_a, lo1, hi0
}
{
mulll_uu result1_b, lo0, hi1
slt_u tmp1, tmp0, tmp1
}
{
mulhlsa_uu result1_a, lo0, hi1
shli r0, tmp0, 16
}
{
move tmp0_left_16, r0
mulhha_uu result1_b, lo0, lo1
}
{
mullla_uu r0, lo1, lo0
shli tmp1, tmp1, 16
}
{
mulhlsa_uu result1_b, hi0, lo1
inthh tmp1, tmp1, tmp0
}
{
mulhlsa_uu result1_a, hi1, lo0
slt_u tmp0, r0, tmp0_left_16
}
/* NOTE: this will stall for a cycle here. Oh well. */
{
add r1, tmp0, tmp1
add result1_a, result1_a, result1_b
}
{
add r1, r1, result1_a
jrp lr
}
.size __muldi3,.-__muldi3
|
4ms/metamodule-plugin-sdk
| 1,392
|
plugin-libc/libgcc/config/m68k/crtn.S
|
/* Specialized code needed to support construction and destruction of
file-scope objects in C++ and Java code, and to support exception handling.
Copyright (C) 1999-2022 Free Software Foundation, Inc.
Contributed by Charles-Antoine Gauthier (charles.gauthier@iit.nrc.ca).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
* This file supplies function epilogues for the .init and .fini sections.
* It is linked in after all other files.
*/
.ident "GNU C crtn.o"
.section .init
unlk %fp
rts
.section .fini
unlk %fp
rts
|
4ms/metamodule-plugin-sdk
| 1,486
|
plugin-libc/libgcc/config/m68k/crti.S
|
/* Specialized code needed to support construction and destruction of
file-scope objects in C++ and Java code, and to support exception handling.
Copyright (C) 1999-2022 Free Software Foundation, Inc.
Contributed by Charles-Antoine Gauthier (charles.gauthier@iit.nrc.ca).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
* This file just supplies function prologues for the .init and .fini
* sections. It is linked in before crtbegin.o.
*/
.ident "GNU C crti.o"
.section .init
.globl _init
.type _init,@function
_init:
linkw %fp,#0
.section .fini
.globl _fini
.type _fini,@function
_fini:
linkw %fp,#0
|
4ms/metamodule-plugin-sdk
| 101,881
|
plugin-libc/libgcc/config/m68k/lb1sf68.S
|
/* libgcc routines for 68000 w/o floating-point hardware.
Copyright (C) 1994-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Use this one for any 680x0; assumes no floating point hardware.
The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__ _
#endif
#ifndef __REGISTER_PREFIX__
#define __REGISTER_PREFIX__
#endif
#ifndef __IMMEDIATE_PREFIX__
#define __IMMEDIATE_PREFIX__ #
#endif
/* ANSI concatenation macros. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
/* Note that X is a function. */
#ifdef __ELF__
#define FUNC(x) .type SYM(x),function
#else
/* The .proc pseudo-op is accepted, but ignored, by GAS. We could just
define this to the empty string for non-ELF systems, but defining it
to .proc means that the information is available to the assembler if
the need arises. */
#define FUNC(x) .proc
#endif
/* Use the right prefix for registers. */
#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
/* Use the right prefix for immediate values. */
#define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
#define d0 REG (d0)
#define d1 REG (d1)
#define d2 REG (d2)
#define d3 REG (d3)
#define d4 REG (d4)
#define d5 REG (d5)
#define d6 REG (d6)
#define d7 REG (d7)
#define a0 REG (a0)
#define a1 REG (a1)
#define a2 REG (a2)
#define a3 REG (a3)
#define a4 REG (a4)
#define a5 REG (a5)
#define a6 REG (a6)
#define fp REG (fp)
#define sp REG (sp)
#define pc REG (pc)
/* Provide a few macros to allow for PIC code support.
* With PIC, data is stored A5 relative so we've got to take a bit of special
* care to ensure that all loads of global data is via A5. PIC also requires
* jumps and subroutine calls to be PC relative rather than absolute. We cheat
* a little on this and in the PIC case, we use short offset branches and
* hope that the final object code is within range (which it should be).
*/
#ifndef __PIC__
/* Non PIC (absolute/relocatable) versions */
.macro PICCALL addr
jbsr \addr
.endm
.macro PICJUMP addr
jmp \addr
.endm
.macro PICLEA sym, reg
lea \sym, \reg
.endm
.macro PICPEA sym, areg
pea \sym
.endm
#else /* __PIC__ */
# if defined (__uClinux__)
/* Versions for uClinux */
# if defined(__ID_SHARED_LIBRARY__)
/* -mid-shared-library versions */
.macro PICLEA sym, reg
movel a5@(_current_shared_library_a5_offset_), \reg
movel \sym@GOT(\reg), \reg
.endm
.macro PICPEA sym, areg
movel a5@(_current_shared_library_a5_offset_), \areg
movel \sym@GOT(\areg), sp@-
.endm
.macro PICCALL addr
PICLEA \addr,a0
jsr a0@
.endm
.macro PICJUMP addr
PICLEA \addr,a0
jmp a0@
.endm
# else /* !__ID_SHARED_LIBRARY__ */
/* Versions for -msep-data */
.macro PICLEA sym, reg
movel \sym@GOT(a5), \reg
.endm
.macro PICPEA sym, areg
movel \sym@GOT(a5), sp@-
.endm
.macro PICCALL addr
#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
lea \addr-.-8,a0
jsr pc@(a0)
#else
jbsr \addr
#endif
.endm
.macro PICJUMP addr
/* ISA C has no bra.l instruction, and since this assembly file
gets assembled into multiple object files, we avoid the
bra instruction entirely. */
#if defined (__mcoldfire__) && !defined (__mcfisab__)
lea \addr-.-8,a0
jmp pc@(a0)
#else
bra \addr
#endif
.endm
# endif
# else /* !__uClinux__ */
/* Versions for Linux */
.macro PICLEA sym, reg
movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \reg
lea (-6, pc, \reg), \reg
movel \sym@GOT(\reg), \reg
.endm
.macro PICPEA sym, areg
movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \areg
lea (-6, pc, \areg), \areg
movel \sym@GOT(\areg), sp@-
.endm
.macro PICCALL addr
#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
lea \addr-.-8,a0
jsr pc@(a0)
#else
jbsr \addr
#endif
.endm
.macro PICJUMP addr
/* ISA C has no bra.l instruction, and since this assembly file
gets assembled into multiple object files, we avoid the
bra instruction entirely. */
#if defined (__mcoldfire__) && !defined (__mcfisab__)
lea \addr-.-8,a0
jmp pc@(a0)
#else
bra \addr
#endif
.endm
# endif
#endif /* __PIC__ */
#ifdef L_floatex
| This is an attempt at a decent floating point (single, double and
| extended double) code for the GNU C compiler. It should be easy to
| adapt to other compilers (but beware of the local labels!).
| Starting date: 21 October, 1990
| It is convenient to introduce the notation (s,e,f) for a floating point
| number, where s=sign, e=exponent, f=fraction. We will call a floating
| point number fpn to abbreviate, independently of the precision.
| Let MAX_EXP be in each case the maximum exponent (255 for floats, 1023
| for doubles and 16383 for long doubles). We then have the following
| different cases:
| 1. Normalized fpns have 0 < e < MAX_EXP. They correspond to
| (-1)^s x 1.f x 2^(e-bias-1).
| 2. Denormalized fpns have e=0. They correspond to numbers of the form
| (-1)^s x 0.f x 2^(-bias).
| 3. +/-INFINITY have e=MAX_EXP, f=0.
| 4. Quiet NaN (Not a Number) have all bits set.
| 5. Signaling NaN (Not a Number) have s=0, e=MAX_EXP, f=1.
|=============================================================================
| exceptions
|=============================================================================
| This is the floating point condition code register (_fpCCR):
|
| struct {
| short _exception_bits;
| short _trap_enable_bits;
| short _sticky_bits;
| short _rounding_mode;
| short _format;
| short _last_operation;
| union {
| float sf;
| double df;
| } _operand1;
| union {
| float sf;
| double df;
| } _operand2;
| } _fpCCR;
.data
.even
.globl SYM (_fpCCR)
SYM (_fpCCR):
__exception_bits:
.word 0
__trap_enable_bits:
.word 0
__sticky_bits:
.word 0
__rounding_mode:
.word ROUND_TO_NEAREST
__format:
.word NIL
__last_operation:
.word NOOP
__operand1:
.long 0
.long 0
__operand2:
.long 0
.long 0
| Offsets:
EBITS = __exception_bits - SYM (_fpCCR)
TRAPE = __trap_enable_bits - SYM (_fpCCR)
STICK = __sticky_bits - SYM (_fpCCR)
ROUND = __rounding_mode - SYM (_fpCCR)
FORMT = __format - SYM (_fpCCR)
LASTO = __last_operation - SYM (_fpCCR)
OPER1 = __operand1 - SYM (_fpCCR)
OPER2 = __operand2 - SYM (_fpCCR)
| The following exception types are supported:
INEXACT_RESULT = 0x0001
UNDERFLOW = 0x0002
OVERFLOW = 0x0004
DIVIDE_BY_ZERO = 0x0008
INVALID_OPERATION = 0x0010
| The allowed rounding modes are:
UNKNOWN = -1
ROUND_TO_NEAREST = 0 | round result to nearest representable value
ROUND_TO_ZERO = 1 | round result towards zero
ROUND_TO_PLUS = 2 | round result towards plus infinity
ROUND_TO_MINUS = 3 | round result towards minus infinity
| The allowed values of format are:
NIL = 0
SINGLE_FLOAT = 1
DOUBLE_FLOAT = 2
LONG_FLOAT = 3
| The allowed values for the last operation are:
NOOP = 0
ADD = 1
MULTIPLY = 2
DIVIDE = 3
NEGATE = 4
COMPARE = 5
EXTENDSFDF = 6
TRUNCDFSF = 7
|=============================================================================
| __clear_sticky_bits
|=============================================================================
| The sticky bits are normally not cleared (thus the name), whereas the
| exception type and exception value reflect the last computation.
| This routine is provided to clear them (you can also write to _fpCCR,
| since it is globally visible).
.globl SYM (__clear_sticky_bit)
.text
.even
| void __clear_sticky_bits(void);
SYM (__clear_sticky_bit):
PICLEA SYM (_fpCCR),a0
#ifndef __mcoldfire__
movew IMM (0),a0@(STICK)
#else
clr.w a0@(STICK)
#endif
rts
|=============================================================================
| $_exception_handler
|=============================================================================
.globl $_exception_handler
.text
.even
| This is the common exit point if an exception occurs.
| NOTE: it is NOT callable from C!
| It expects the exception type in d7, the format (SINGLE_FLOAT,
| DOUBLE_FLOAT or LONG_FLOAT) in d6, and the last operation code in d5.
| It sets the corresponding exception and sticky bits, and the format.
| Depending on the format if fills the corresponding slots for the
| operands which produced the exception (all this information is provided
| so if you write your own exception handlers you have enough information
| to deal with the problem).
| Then checks to see if the corresponding exception is trap-enabled,
| in which case it pushes the address of _fpCCR and traps through
| trap FPTRAP (15 for the moment).
FPTRAP = 15
$_exception_handler:
PICLEA SYM (_fpCCR),a0
movew d7,a0@(EBITS) | set __exception_bits
#ifndef __mcoldfire__
orw d7,a0@(STICK) | and __sticky_bits
#else
movew a0@(STICK),d4
orl d7,d4
movew d4,a0@(STICK)
#endif
movew d6,a0@(FORMT) | and __format
movew d5,a0@(LASTO) | and __last_operation
| Now put the operands in place:
#ifndef __mcoldfire__
cmpw IMM (SINGLE_FLOAT),d6
#else
cmpl IMM (SINGLE_FLOAT),d6
#endif
beq 1f
movel a6@(8),a0@(OPER1)
movel a6@(12),a0@(OPER1+4)
movel a6@(16),a0@(OPER2)
movel a6@(20),a0@(OPER2+4)
bra 2f
1: movel a6@(8),a0@(OPER1)
movel a6@(12),a0@(OPER2)
2:
| And check whether the exception is trap-enabled:
#ifndef __mcoldfire__
andw a0@(TRAPE),d7 | is exception trap-enabled?
#else
clrl d6
movew a0@(TRAPE),d6
andl d6,d7
#endif
beq 1f | no, exit
PICPEA SYM (_fpCCR),a1 | yes, push address of _fpCCR
trap IMM (FPTRAP) | and trap
#ifndef __mcoldfire__
1: moveml sp@+,d2-d7 | restore data registers
#else
1: moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
#endif /* L_floatex */
#ifdef L_mulsi3
.text
FUNC(__mulsi3)
.globl SYM (__mulsi3)
.globl SYM (__mulsi3_internal)
.hidden SYM (__mulsi3_internal)
SYM (__mulsi3):
SYM (__mulsi3_internal):
movew sp@(4), d0 /* x0 -> d0 */
muluw sp@(10), d0 /* x0*y1 */
movew sp@(6), d1 /* x1 -> d1 */
muluw sp@(8), d1 /* x1*y0 */
#ifndef __mcoldfire__
addw d1, d0
#else
addl d1, d0
#endif
swap d0
clrw d0
movew sp@(6), d1 /* x1 -> d1 */
muluw sp@(10), d1 /* x1*y1 */
addl d1, d0
rts
#endif /* L_mulsi3 */
#ifdef L_udivsi3
.text
FUNC(__udivsi3)
.globl SYM (__udivsi3)
.globl SYM (__udivsi3_internal)
.hidden SYM (__udivsi3_internal)
SYM (__udivsi3):
SYM (__udivsi3_internal):
#ifndef __mcoldfire__
movel d2, sp@-
movel sp@(12), d1 /* d1 = divisor */
movel sp@(8), d0 /* d0 = dividend */
cmpl IMM (0x10000), d1 /* divisor >= 2 ^ 16 ? */
jcc L3 /* then try next algorithm */
movel d0, d2
clrw d2
swap d2
divu d1, d2 /* high quotient in lower word */
movew d2, d0 /* save high quotient */
swap d0
movew sp@(10), d2 /* get low dividend + high rest */
divu d1, d2 /* low quotient */
movew d2, d0
jra L6
L3: movel d1, d2 /* use d2 as divisor backup */
L4: lsrl IMM (1), d1 /* shift divisor */
lsrl IMM (1), d0 /* shift dividend */
cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */
jcc L4
divu d1, d0 /* now we have 16-bit divisor */
andl IMM (0xffff), d0 /* mask out divisor, ignore remainder */
/* Multiply the 16-bit tentative quotient with the 32-bit divisor. Because of
the operand ranges, this might give a 33-bit product. If this product is
greater than the dividend, the tentative quotient was too large. */
movel d2, d1
mulu d0, d1 /* low part, 32 bits */
swap d2
mulu d0, d2 /* high part, at most 17 bits */
swap d2 /* align high part with low part */
tstw d2 /* high part 17 bits? */
jne L5 /* if 17 bits, quotient was too large */
addl d2, d1 /* add parts */
jcs L5 /* if sum is 33 bits, quotient was too large */
cmpl sp@(8), d1 /* compare the sum with the dividend */
jls L6 /* if sum > dividend, quotient was too large */
L5: subql IMM (1), d0 /* adjust quotient */
L6: movel sp@+, d2
rts
#else /* __mcoldfire__ */
/* ColdFire implementation of non-restoring division algorithm from
Hennessy & Patterson, Appendix A. */
link a6,IMM (-12)
moveml d2-d4,sp@
movel a6@(8),d0
movel a6@(12),d1
clrl d2 | clear p
moveq IMM (31),d4
L1: addl d0,d0 | shift reg pair (p,a) one bit left
addxl d2,d2
movl d2,d3 | subtract b from p, store in tmp.
subl d1,d3
jcs L2 | if no carry,
bset IMM (0),d0 | set the low order bit of a to 1,
movl d3,d2 | and store tmp in p.
L2: subql IMM (1),d4
jcc L1
moveml sp@,d2-d4 | restore data registers
unlk a6 | and return
rts
#endif /* __mcoldfire__ */
#endif /* L_udivsi3 */
#ifdef L_divsi3
.text
FUNC(__divsi3)
.globl SYM (__divsi3)
.globl SYM (__divsi3_internal)
.hidden SYM (__divsi3_internal)
SYM (__divsi3):
SYM (__divsi3_internal):
movel d2, sp@-
moveq IMM (1), d2 /* sign of result stored in d2 (=1 or =-1) */
movel sp@(12), d1 /* d1 = divisor */
jpl L1
negl d1
#ifndef __mcoldfire__
negb d2 /* change sign because divisor <0 */
#else
negl d2 /* change sign because divisor <0 */
#endif
L1: movel sp@(8), d0 /* d0 = dividend */
jpl L2
negl d0
#ifndef __mcoldfire__
negb d2
#else
negl d2
#endif
L2: movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__udivsi3_internal) /* divide abs(dividend) by abs(divisor) */
addql IMM (8), sp
tstb d2
jpl L3
negl d0
L3: movel sp@+, d2
rts
#endif /* L_divsi3 */
#ifdef L_umodsi3
.text
FUNC(__umodsi3)
.globl SYM (__umodsi3)
SYM (__umodsi3):
movel sp@(8), d1 /* d1 = divisor */
movel sp@(4), d0 /* d0 = dividend */
movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__udivsi3_internal)
addql IMM (8), sp
movel sp@(8), d1 /* d1 = divisor */
#ifndef __mcoldfire__
movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__mulsi3_internal) /* d0 = (a/b)*b */
addql IMM (8), sp
#else
mulsl d1,d0
#endif
movel sp@(4), d1 /* d1 = dividend */
subl d0, d1 /* d1 = a - (a/b)*b */
movel d1, d0
rts
#endif /* L_umodsi3 */
#ifdef L_modsi3
.text
FUNC(__modsi3)
.globl SYM (__modsi3)
SYM (__modsi3):
movel sp@(8), d1 /* d1 = divisor */
movel sp@(4), d0 /* d0 = dividend */
movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__divsi3_internal)
addql IMM (8), sp
movel sp@(8), d1 /* d1 = divisor */
#ifndef __mcoldfire__
movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__mulsi3_internal) /* d0 = (a/b)*b */
addql IMM (8), sp
#else
mulsl d1,d0
#endif
movel sp@(4), d1 /* d1 = dividend */
subl d0, d1 /* d1 = a - (a/b)*b */
movel d1, d0
rts
#endif /* L_modsi3 */
#ifdef L_double
.globl SYM (_fpCCR)
.globl $_exception_handler
QUIET_NaN = 0xffffffff
D_MAX_EXP = 0x07ff
D_BIAS = 1022
DBL_MAX_EXP = D_MAX_EXP - D_BIAS
DBL_MIN_EXP = 1 - D_BIAS
DBL_MANT_DIG = 53
INEXACT_RESULT = 0x0001
UNDERFLOW = 0x0002
OVERFLOW = 0x0004
DIVIDE_BY_ZERO = 0x0008
INVALID_OPERATION = 0x0010
DOUBLE_FLOAT = 2
NOOP = 0
ADD = 1
MULTIPLY = 2
DIVIDE = 3
NEGATE = 4
COMPARE = 5
EXTENDSFDF = 6
TRUNCDFSF = 7
UNKNOWN = -1
ROUND_TO_NEAREST = 0 | round result to nearest representable value
ROUND_TO_ZERO = 1 | round result towards zero
ROUND_TO_PLUS = 2 | round result towards plus infinity
ROUND_TO_MINUS = 3 | round result towards minus infinity
| Entry points:
.globl SYM (__adddf3)
.globl SYM (__subdf3)
.globl SYM (__muldf3)
.globl SYM (__divdf3)
.globl SYM (__negdf2)
.globl SYM (__cmpdf2)
.globl SYM (__cmpdf2_internal)
.hidden SYM (__cmpdf2_internal)
.text
.even
| These are common routines to return and signal exceptions.
Ld$den:
| Return and signal a denormalized number
orl d7,d0
movew IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
Ld$infty:
Ld$overflow:
| Return a properly signed INFINITY and set the exception flags
movel IMM (0x7ff00000),d0
movel IMM (0),d1
orl d7,d0
movew IMM (INEXACT_RESULT+OVERFLOW),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
Ld$underflow:
| Return 0 and set the exception flags
movel IMM (0),d0
movel d0,d1
movew IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
Ld$inop:
| Return a quiet NaN and set the exception flags
movel IMM (QUIET_NaN),d0
movel d0,d1
movew IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
Ld$div$0:
| Return a properly signed INFINITY and set the exception flags
movel IMM (0x7ff00000),d0
movel IMM (0),d1
orl d7,d0
movew IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
|=============================================================================
|=============================================================================
| double precision routines
|=============================================================================
|=============================================================================
| A double precision floating point number (double) has the format:
|
| struct _double {
| unsigned int sign : 1; /* sign bit */
| unsigned int exponent : 11; /* exponent, shifted by 126 */
| unsigned int fraction : 52; /* fraction */
| } double;
|
| Thus sizeof(double) = 8 (64 bits).
|
| All the routines are callable from C programs, and return the result
| in the register pair d0-d1. They also preserve all registers except
| d0-d1 and a0-a1.
|=============================================================================
| __subdf3
|=============================================================================
| double __subdf3(double, double);
FUNC(__subdf3)
SYM (__subdf3):
bchg IMM (31),sp@(12) | change sign of second operand
| and fall through, so we always add
|=============================================================================
| __adddf3
|=============================================================================
| double __adddf3(double, double);
FUNC(__adddf3)
SYM (__adddf3):
#ifndef __mcoldfire__
link a6,IMM (0) | everything will be done in registers
moveml d2-d7,sp@- | save all data registers and a2 (but d0-d1)
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get first operand
movel a6@(12),d1 |
movel a6@(16),d2 | get second operand
movel a6@(20),d3 |
movel d0,d7 | get d0's sign bit in d7 '
addl d1,d1 | check and clear sign bit of a, and gain one
addxl d0,d0 | bit of extra precision
beq Ladddf$b | if zero return second operand
movel d2,d6 | save sign in d6
addl d3,d3 | get rid of sign bit and gain one bit of
addxl d2,d2 | extra precision
beq Ladddf$a | if zero return first operand
andl IMM (0x80000000),d7 | isolate a's sign bit '
swap d6 | and also b's sign bit '
#ifndef __mcoldfire__
andw IMM (0x8000),d6 |
orw d6,d7 | and combine them into d7, so that a's sign '
| bit is in the high word and b's is in the '
| low word, so d6 is free to be used
#else
andl IMM (0x8000),d6
orl d6,d7
#endif
movel d7,a0 | now save d7 into a0, so d7 is free to
| be used also
| Get the exponents and check for denormalized and/or infinity.
movel IMM (0x001fffff),d6 | mask for the fraction
movel IMM (0x00200000),d7 | mask to put hidden bit back
movel d0,d4 |
andl d6,d0 | get fraction in d0
notl d6 | make d6 into mask for the exponent
andl d6,d4 | get exponent in d4
beq Ladddf$a$den | branch if a is denormalized
cmpl d6,d4 | check for INFINITY or NaN
beq Ladddf$nf |
orl d7,d0 | and put hidden bit back
Ladddf$1:
swap d4 | shift right exponent so that it starts
#ifndef __mcoldfire__
lsrw IMM (5),d4 | in bit 0 and not bit 20
#else
lsrl IMM (5),d4 | in bit 0 and not bit 20
#endif
| Now we have a's exponent in d4 and fraction in d0-d1 '
movel d2,d5 | save b to get exponent
andl d6,d5 | get exponent in d5
beq Ladddf$b$den | branch if b is denormalized
cmpl d6,d5 | check for INFINITY or NaN
beq Ladddf$nf
notl d6 | make d6 into mask for the fraction again
andl d6,d2 | and get fraction in d2
orl d7,d2 | and put hidden bit back
Ladddf$2:
swap d5 | shift right exponent so that it starts
#ifndef __mcoldfire__
lsrw IMM (5),d5 | in bit 0 and not bit 20
#else
lsrl IMM (5),d5 | in bit 0 and not bit 20
#endif
| Now we have b's exponent in d5 and fraction in d2-d3. '
| The situation now is as follows: the signs are combined in a0, the
| numbers are in d0-d1 (a) and d2-d3 (b), and the exponents in d4 (a)
| and d5 (b). To do the rounding correctly we need to keep all the
| bits until the end, so we need to use d0-d1-d2-d3 for the first number
| and d4-d5-d6-d7 for the second. To do this we store (temporarily) the
| exponents in a2-a3.
#ifndef __mcoldfire__
moveml a2-a3,sp@- | save the address registers
#else
movel a2,sp@-
movel a3,sp@-
movel a4,sp@-
#endif
movel d4,a2 | save the exponents
movel d5,a3 |
movel IMM (0),d7 | and move the numbers around
movel d7,d6 |
movel d3,d5 |
movel d2,d4 |
movel d7,d3 |
movel d7,d2 |
| Here we shift the numbers until the exponents are the same, and put
| the largest exponent in a2.
#ifndef __mcoldfire__
exg d4,a2 | get exponents back
exg d5,a3 |
cmpw d4,d5 | compare the exponents
#else
movel d4,a4 | get exponents back
movel a2,d4
movel a4,a2
movel d5,a4
movel a3,d5
movel a4,a3
cmpl d4,d5 | compare the exponents
#endif
beq Ladddf$3 | if equal don't shift '
bhi 9f | branch if second exponent is higher
| Here we have a's exponent larger than b's, so we have to shift b. We do
| this by using as counter d2:
1: movew d4,d2 | move largest exponent to d2
#ifndef __mcoldfire__
subw d5,d2 | and subtract second exponent
exg d4,a2 | get back the longs we saved
exg d5,a3 |
#else
subl d5,d2 | and subtract second exponent
movel d4,a4 | get back the longs we saved
movel a2,d4
movel a4,a2
movel d5,a4
movel a3,d5
movel a4,a3
#endif
| if difference is too large we don't shift (actually, we can just exit) '
#ifndef __mcoldfire__
cmpw IMM (DBL_MANT_DIG+2),d2
#else
cmpl IMM (DBL_MANT_DIG+2),d2
#endif
bge Ladddf$b$small
#ifndef __mcoldfire__
cmpw IMM (32),d2 | if difference >= 32, shift by longs
#else
cmpl IMM (32),d2 | if difference >= 32, shift by longs
#endif
bge 5f
2:
#ifndef __mcoldfire__
cmpw IMM (16),d2 | if difference >= 16, shift by words
#else
cmpl IMM (16),d2 | if difference >= 16, shift by words
#endif
bge 6f
bra 3f | enter dbra loop
4:
#ifndef __mcoldfire__
lsrl IMM (1),d4
roxrl IMM (1),d5
roxrl IMM (1),d6
roxrl IMM (1),d7
#else
lsrl IMM (1),d7
btst IMM (0),d6
beq 10f
bset IMM (31),d7
10: lsrl IMM (1),d6
btst IMM (0),d5
beq 11f
bset IMM (31),d6
11: lsrl IMM (1),d5
btst IMM (0),d4
beq 12f
bset IMM (31),d5
12: lsrl IMM (1),d4
#endif
3:
#ifndef __mcoldfire__
dbra d2,4b
#else
subql IMM (1),d2
bpl 4b
#endif
movel IMM (0),d2
movel d2,d3
bra Ladddf$4
5:
movel d6,d7
movel d5,d6
movel d4,d5
movel IMM (0),d4
#ifndef __mcoldfire__
subw IMM (32),d2
#else
subl IMM (32),d2
#endif
bra 2b
6:
movew d6,d7
swap d7
movew d5,d6
swap d6
movew d4,d5
swap d5
movew IMM (0),d4
swap d4
#ifndef __mcoldfire__
subw IMM (16),d2
#else
subl IMM (16),d2
#endif
bra 3b
9:
#ifndef __mcoldfire__
exg d4,d5
movew d4,d6
subw d5,d6 | keep d5 (largest exponent) in d4
exg d4,a2
exg d5,a3
#else
movel d5,d6
movel d4,d5
movel d6,d4
subl d5,d6
movel d4,a4
movel a2,d4
movel a4,a2
movel d5,a4
movel a3,d5
movel a4,a3
#endif
| if difference is too large we don't shift (actually, we can just exit) '
#ifndef __mcoldfire__
cmpw IMM (DBL_MANT_DIG+2),d6
#else
cmpl IMM (DBL_MANT_DIG+2),d6
#endif
bge Ladddf$a$small
#ifndef __mcoldfire__
cmpw IMM (32),d6 | if difference >= 32, shift by longs
#else
cmpl IMM (32),d6 | if difference >= 32, shift by longs
#endif
bge 5f
2:
#ifndef __mcoldfire__
cmpw IMM (16),d6 | if difference >= 16, shift by words
#else
cmpl IMM (16),d6 | if difference >= 16, shift by words
#endif
bge 6f
bra 3f | enter dbra loop
4:
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
#endif
3:
#ifndef __mcoldfire__
dbra d6,4b
#else
subql IMM (1),d6
bpl 4b
#endif
movel IMM (0),d7
movel d7,d6
bra Ladddf$4
5:
movel d2,d3
movel d1,d2
movel d0,d1
movel IMM (0),d0
#ifndef __mcoldfire__
subw IMM (32),d6
#else
subl IMM (32),d6
#endif
bra 2b
6:
movew d2,d3
swap d3
movew d1,d2
swap d2
movew d0,d1
swap d1
movew IMM (0),d0
swap d0
#ifndef __mcoldfire__
subw IMM (16),d6
#else
subl IMM (16),d6
#endif
bra 3b
Ladddf$3:
#ifndef __mcoldfire__
exg d4,a2
exg d5,a3
#else
movel d4,a4
movel a2,d4
movel a4,a2
movel d5,a4
movel a3,d5
movel a4,a3
#endif
Ladddf$4:
| Now we have the numbers in d0--d3 and d4--d7, the exponent in a2, and
| the signs in a4.
| Here we have to decide whether to add or subtract the numbers:
#ifndef __mcoldfire__
exg d7,a0 | get the signs
exg d6,a3 | a3 is free to be used
#else
movel d7,a4
movel a0,d7
movel a4,a0
movel d6,a4
movel a3,d6
movel a4,a3
#endif
movel d7,d6 |
movew IMM (0),d7 | get a's sign in d7 '
swap d6 |
movew IMM (0),d6 | and b's sign in d6 '
eorl d7,d6 | compare the signs
bmi Lsubdf$0 | if the signs are different we have
| to subtract
#ifndef __mcoldfire__
exg d7,a0 | else we add the numbers
exg d6,a3 |
#else
movel d7,a4
movel a0,d7
movel a4,a0
movel d6,a4
movel a3,d6
movel a4,a3
#endif
addl d7,d3 |
addxl d6,d2 |
addxl d5,d1 |
addxl d4,d0 |
movel a2,d4 | return exponent to d4
movel a0,d7 |
andl IMM (0x80000000),d7 | d7 now has the sign
#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
| the case of denormalized numbers in the rounding routine itself).
| As in the addition (not in the subtraction!) we could have set
| one more bit we check this:
btst IMM (DBL_MANT_DIG+1),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
addw IMM (1),d4
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
addl IMM (1),d4
#endif
1:
lea pc@(Ladddf$5),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Ladddf$5:
| Put back the exponent and check for overflow
#ifndef __mcoldfire__
cmpw IMM (0x7ff),d4 | is the exponent big?
#else
cmpl IMM (0x7ff),d4 | is the exponent big?
#endif
bge 1f
bclr IMM (DBL_MANT_DIG-1),d0
#ifndef __mcoldfire__
lslw IMM (4),d4 | put exponent back into position
#else
lsll IMM (4),d4 | put exponent back into position
#endif
swap d0 |
#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
#endif
swap d0 |
bra Ladddf$ret
1:
moveq IMM (ADD),d5
bra Ld$overflow
Lsubdf$0:
| Here we do the subtraction.
#ifndef __mcoldfire__
exg d7,a0 | put sign back in a0
exg d6,a3 |
#else
movel d7,a4
movel a0,d7
movel a4,a0
movel d6,a4
movel a3,d6
movel a4,a3
#endif
subl d7,d3 |
subxl d6,d2 |
subxl d5,d1 |
subxl d4,d0 |
beq Ladddf$ret$1 | if zero just exit
bpl 1f | if positive skip the following
movel a0,d7 |
bchg IMM (31),d7 | change sign bit in d7
movel d7,a0 |
negl d3 |
negxl d2 |
negxl d1 | and negate result
negxl d0 |
1:
movel a2,d4 | return exponent to d4
movel a0,d7
andl IMM (0x80000000),d7 | isolate sign bit
#ifndef __mcoldfire__
moveml sp@+,a2-a3 |
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
| the case of denormalized numbers in the rounding routine itself).
| As in the addition (not in the subtraction!) we could have set
| one more bit we check this:
btst IMM (DBL_MANT_DIG+1),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
addw IMM (1),d4
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
addl IMM (1),d4
#endif
1:
lea pc@(Lsubdf$1),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Lsubdf$1:
| Put back the exponent and sign (we don't have overflow). '
bclr IMM (DBL_MANT_DIG-1),d0
#ifndef __mcoldfire__
lslw IMM (4),d4 | put exponent back into position
#else
lsll IMM (4),d4 | put exponent back into position
#endif
swap d0 |
#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
#endif
swap d0 |
bra Ladddf$ret
| If one of the numbers was too small (difference of exponents >=
| DBL_MANT_DIG+1) we return the other (and now we don't have to '
| check for finiteness or zero).
Ladddf$a$small:
#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
movel a6@(16),d0
movel a6@(20),d1
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
Ladddf$b$small:
#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
movel a6@(8),d0
movel a6@(12),d1
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
Ladddf$a$den:
movel d7,d4 | d7 contains 0x00200000
bra Ladddf$1
Ladddf$b$den:
movel d7,d5 | d7 contains 0x00200000
notl d6
bra Ladddf$2
Ladddf$b:
| Return b (if a is zero)
movel d2,d0
movel d3,d1
bne 1f | Check if b is -0
cmpl IMM (0x80000000),d0
bne 1f
andl IMM (0x80000000),d7 | Use the sign of a
clrl d0
bra Ladddf$ret
Ladddf$a:
movel a6@(8),d0
movel a6@(12),d1
1:
moveq IMM (ADD),d5
| Check for NaN and +/-INFINITY.
movel d0,d7 |
andl IMM (0x80000000),d7 |
bclr IMM (31),d0 |
cmpl IMM (0x7ff00000),d0 |
bge 2f |
movel d0,d0 | check for zero, since we don't '
bne Ladddf$ret | want to return -0 by mistake
bclr IMM (31),d7 |
bra Ladddf$ret |
2:
andl IMM (0x000fffff),d0 | check for NaN (nonzero fraction)
orl d1,d0 |
bne Ld$inop |
bra Ld$infty |
Ladddf$ret$1:
#ifndef __mcoldfire__
moveml sp@+,a2-a3 | restore regs and exit
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
Ladddf$ret:
| Normal exit.
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
orl d7,d0 | put sign bit back
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Ladddf$ret$den:
| Return a denormalized number.
#ifndef __mcoldfire__
lsrl IMM (1),d0 | shift right once more
roxrl IMM (1),d1 |
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
#endif
bra Ladddf$ret
Ladddf$nf:
moveq IMM (ADD),d5
| This could be faster but it is not worth the effort, since it is not
| executed very often. We sacrifice speed for clarity here.
movel a6@(8),d0 | get the numbers back (remember that we
movel a6@(12),d1 | did some processing already)
movel a6@(16),d2 |
movel a6@(20),d3 |
movel IMM (0x7ff00000),d4 | useful constant (INFINITY)
movel d0,d7 | save sign bits
movel d2,d6 |
bclr IMM (31),d0 | clear sign bits
bclr IMM (31),d2 |
| We know that one of them is either NaN of +/-INFINITY
| Check for NaN (if either one is NaN return NaN)
cmpl d4,d0 | check first a (d0)
bhi Ld$inop | if d0 > 0x7ff00000 or equal and
bne 2f
tstl d1 | d1 > 0, a is NaN
bne Ld$inop |
2: cmpl d4,d2 | check now b (d1)
bhi Ld$inop |
bne 3f
tstl d3 |
bne Ld$inop |
3:
| Now comes the check for +/-INFINITY. We know that both are (maybe not
| finite) numbers, but we have to check if both are infinite whether we
| are adding or subtracting them.
eorl d7,d6 | to check sign bits
bmi 1f
andl IMM (0x80000000),d7 | get (common) sign bit
bra Ld$infty
1:
| We know one (or both) are infinite, so we test for equality between the
| two numbers (if they are equal they have to be infinite both, so we
| return NaN).
cmpl d2,d0 | are both infinite?
bne 1f | if d0 <> d2 they are not equal
cmpl d3,d1 | if d0 == d2 test d3 and d1
beq Ld$inop | if equal return NaN
1:
andl IMM (0x80000000),d7 | get a's sign bit '
cmpl d4,d0 | test now for infinity
beq Ld$infty | if a is INFINITY return with this sign
bchg IMM (31),d7 | else we know b is INFINITY and has
bra Ld$infty | the opposite sign
|=============================================================================
| __muldf3
|=============================================================================
| double __muldf3(double, double);
FUNC(__muldf3)
SYM (__muldf3):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get a into d0-d1
movel a6@(12),d1 |
movel a6@(16),d2 | and b into d2-d3
movel a6@(20),d3 |
movel d0,d7 | d7 will hold the sign of the product
eorl d2,d7 |
andl IMM (0x80000000),d7 |
movel d7,a0 | save sign bit into a0
movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
movel d7,d6 | another (mask for fraction)
notl d6 |
bclr IMM (31),d0 | get rid of a's sign bit '
movel d0,d4 |
orl d1,d4 |
beq Lmuldf$a$0 | branch if a is zero
movel d0,d4 |
bclr IMM (31),d2 | get rid of b's sign bit '
movel d2,d5 |
orl d3,d5 |
beq Lmuldf$b$0 | branch if b is zero
movel d2,d5 |
cmpl d7,d0 | is a big?
bhi Lmuldf$inop | if a is NaN return NaN
beq Lmuldf$a$nf | we still have to check d1 and b ...
cmpl d7,d2 | now compare b with INFINITY
bhi Lmuldf$inop | is b NaN?
beq Lmuldf$b$nf | we still have to check d3 ...
| Here we have both numbers finite and nonzero (and with no sign bit).
| Now we get the exponents into d4 and d5.
andl d7,d4 | isolate exponent in d4
beq Lmuldf$a$den | if exponent zero, have denormalized
andl d6,d0 | isolate fraction
orl IMM (0x00100000),d0 | and put hidden bit back
swap d4 | I like exponents in the first byte
#ifndef __mcoldfire__
lsrw IMM (4),d4 |
#else
lsrl IMM (4),d4 |
#endif
Lmuldf$1:
andl d7,d5 |
beq Lmuldf$b$den |
andl d6,d2 |
orl IMM (0x00100000),d2 | and put hidden bit back
swap d5 |
#ifndef __mcoldfire__
lsrw IMM (4),d5 |
#else
lsrl IMM (4),d5 |
#endif
Lmuldf$2: |
#ifndef __mcoldfire__
addw d5,d4 | add exponents
subw IMM (D_BIAS+1),d4 | and subtract bias (plus one)
#else
addl d5,d4 | add exponents
subl IMM (D_BIAS+1),d4 | and subtract bias (plus one)
#endif
| We are now ready to do the multiplication. The situation is as follows:
| both a and b have bit 52 ( bit 20 of d0 and d2) set (even if they were
| denormalized to start with!), which means that in the product bit 104
| (which will correspond to bit 8 of the fourth long) is set.
| Here we have to do the product.
| To do it we have to juggle the registers back and forth, as there are not
| enough to keep everything in them. So we use the address registers to keep
| some intermediate data.
#ifndef __mcoldfire__
moveml a2-a3,sp@- | save a2 and a3 for temporary use
#else
movel a2,sp@-
movel a3,sp@-
movel a4,sp@-
#endif
movel IMM (0),a2 | a2 is a null register
movel d4,a3 | and a3 will preserve the exponent
| First, shift d2-d3 so bit 20 becomes bit 31:
#ifndef __mcoldfire__
rorl IMM (5),d2 | rotate d2 5 places right
swap d2 | and swap it
rorl IMM (5),d3 | do the same thing with d3
swap d3 |
movew d3,d6 | get the rightmost 11 bits of d3
andw IMM (0x07ff),d6 |
orw d6,d2 | and put them into d2
andw IMM (0xf800),d3 | clear those bits in d3
#else
moveq IMM (11),d7 | left shift d2 11 bits
lsll d7,d2
movel d3,d6 | get a copy of d3
lsll d7,d3 | left shift d3 11 bits
andl IMM (0xffe00000),d6 | get the top 11 bits of d3
moveq IMM (21),d7 | right shift them 21 bits
lsrl d7,d6
orl d6,d2 | stick them at the end of d2
#endif
movel d2,d6 | move b into d6-d7
movel d3,d7 | move a into d4-d5
movel d0,d4 | and clear d0-d1-d2-d3 (to put result)
movel d1,d5 |
movel IMM (0),d3 |
movel d3,d2 |
movel d3,d1 |
movel d3,d0 |
| We use a1 as counter:
movel IMM (DBL_MANT_DIG-1),a1
#ifndef __mcoldfire__
exg d7,a1
#else
movel d7,a4
movel a1,d7
movel a4,a1
#endif
1:
#ifndef __mcoldfire__
exg d7,a1 | put counter back in a1
#else
movel d7,a4
movel a1,d7
movel a4,a1
#endif
addl d3,d3 | shift sum once left
addxl d2,d2 |
addxl d1,d1 |
addxl d0,d0 |
addl d7,d7 |
addxl d6,d6 |
bcc 2f | if bit clear skip the following
#ifndef __mcoldfire__
exg d7,a2 |
#else
movel d7,a4
movel a2,d7
movel a4,a2
#endif
addl d5,d3 | else add a to the sum
addxl d4,d2 |
addxl d7,d1 |
addxl d7,d0 |
#ifndef __mcoldfire__
exg d7,a2 |
#else
movel d7,a4
movel a2,d7
movel a4,a2
#endif
2:
#ifndef __mcoldfire__
exg d7,a1 | put counter in d7
dbf d7,1b | decrement and branch
#else
movel d7,a4
movel a1,d7
movel a4,a1
subql IMM (1),d7
bpl 1b
#endif
movel a3,d4 | restore exponent
#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
| Now we have the product in d0-d1-d2-d3, with bit 8 of d0 set. The
| first thing to do now is to normalize it so bit 8 becomes bit
| DBL_MANT_DIG-32 (to do the rounding); later we will shift right.
swap d0
swap d1
movew d1,d0
swap d2
movew d2,d1
swap d3
movew d3,d2
movew IMM (0),d3
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
#else
moveq IMM (29),d6
lsrl IMM (3),d3
movel d2,d7
lsll d6,d7
orl d7,d3
lsrl IMM (3),d2
movel d1,d7
lsll d6,d7
orl d7,d2
lsrl IMM (3),d1
movel d0,d7
lsll d6,d7
orl d7,d1
lsrl IMM (3),d0
#endif
| Now round, check for over- and underflow, and exit.
movel a0,d7 | get sign bit back into d7
moveq IMM (MULTIPLY),d5
btst IMM (DBL_MANT_DIG+1-32),d0
beq Lround$exit
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d4
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
addl IMM (1),d4
#endif
bra Lround$exit
Lmuldf$inop:
moveq IMM (MULTIPLY),d5
bra Ld$inop
Lmuldf$b$nf:
moveq IMM (MULTIPLY),d5
movel a0,d7 | get sign bit back into d7
tstl d3 | we know d2 == 0x7ff00000, so check d3
bne Ld$inop | if d3 <> 0 b is NaN
bra Ld$overflow | else we have overflow (since a is finite)
Lmuldf$a$nf:
moveq IMM (MULTIPLY),d5
movel a0,d7 | get sign bit back into d7
tstl d1 | we know d0 == 0x7ff00000, so check d1
bne Ld$inop | if d1 <> 0 a is NaN
bra Ld$overflow | else signal overflow
| If either number is zero return zero, unless the other is +/-INFINITY or
| NaN, in which case we return NaN.
Lmuldf$b$0:
moveq IMM (MULTIPLY),d5
#ifndef __mcoldfire__
exg d2,d0 | put b (==0) into d0-d1
exg d3,d1 | and a (with sign bit cleared) into d2-d3
movel a0,d0 | set result sign
#else
movel d0,d2 | put a into d2-d3
movel d1,d3
movel a0,d0 | put result zero into d0-d1
movq IMM(0),d1
#endif
bra 1f
Lmuldf$a$0:
movel a0,d0 | set result sign
movel a6@(16),d2 | put b into d2-d3 again
movel a6@(20),d3 |
bclr IMM (31),d2 | clear sign bit
1: cmpl IMM (0x7ff00000),d2 | check for non-finiteness
bge Ld$inop | in case NaN or +/-INFINITY return NaN
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
| If a number is denormalized we put an exponent of 1 but do not put the
| hidden bit back into the fraction; instead we shift left until bit 21
| (the hidden bit) is set, adjusting the exponent accordingly. We do this
| to ensure that the product of the fractions is close to 1.
Lmuldf$a$den:
movel IMM (1),d4
andl d6,d0
1: addl d1,d1 | shift a left until bit 20 is set
addxl d0,d0 |
#ifndef __mcoldfire__
subw IMM (1),d4 | and adjust exponent
#else
subl IMM (1),d4 | and adjust exponent
#endif
btst IMM (20),d0 |
bne Lmuldf$1 |
bra 1b
Lmuldf$b$den:
movel IMM (1),d5
andl d6,d2
1: addl d3,d3 | shift b left until bit 20 is set
addxl d2,d2 |
#ifndef __mcoldfire__
subw IMM (1),d5 | and adjust exponent
#else
subql IMM (1),d5 | and adjust exponent
#endif
btst IMM (20),d2 |
bne Lmuldf$2 |
bra 1b
|=============================================================================
| __divdf3
|=============================================================================
| double __divdf3(double, double);
FUNC(__divdf3)
SYM (__divdf3):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get a into d0-d1
movel a6@(12),d1 |
movel a6@(16),d2 | and b into d2-d3
movel a6@(20),d3 |
movel d0,d7 | d7 will hold the sign of the result
eorl d2,d7 |
andl IMM (0x80000000),d7
movel d7,a0 | save sign into a0
movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
movel d7,d6 | another (mask for fraction)
notl d6 |
bclr IMM (31),d0 | get rid of a's sign bit '
movel d0,d4 |
orl d1,d4 |
beq Ldivdf$a$0 | branch if a is zero
movel d0,d4 |
bclr IMM (31),d2 | get rid of b's sign bit '
movel d2,d5 |
orl d3,d5 |
beq Ldivdf$b$0 | branch if b is zero
movel d2,d5
cmpl d7,d0 | is a big?
bhi Ldivdf$inop | if a is NaN return NaN
beq Ldivdf$a$nf | if d0 == 0x7ff00000 we check d1
cmpl d7,d2 | now compare b with INFINITY
bhi Ldivdf$inop | if b is NaN return NaN
beq Ldivdf$b$nf | if d2 == 0x7ff00000 we check d3
| Here we have both numbers finite and nonzero (and with no sign bit).
| Now we get the exponents into d4 and d5 and normalize the numbers to
| ensure that the ratio of the fractions is around 1. We do this by
| making sure that both numbers have bit #DBL_MANT_DIG-32-1 (hidden bit)
| set, even if they were denormalized to start with.
| Thus, the result will satisfy: 2 > result > 1/2.
andl d7,d4 | and isolate exponent in d4
beq Ldivdf$a$den | if exponent is zero we have a denormalized
andl d6,d0 | and isolate fraction
orl IMM (0x00100000),d0 | and put hidden bit back
swap d4 | I like exponents in the first byte
#ifndef __mcoldfire__
lsrw IMM (4),d4 |
#else
lsrl IMM (4),d4 |
#endif
Ldivdf$1: |
andl d7,d5 |
beq Ldivdf$b$den |
andl d6,d2 |
orl IMM (0x00100000),d2
swap d5 |
#ifndef __mcoldfire__
lsrw IMM (4),d5 |
#else
lsrl IMM (4),d5 |
#endif
Ldivdf$2: |
#ifndef __mcoldfire__
subw d5,d4 | subtract exponents
addw IMM (D_BIAS),d4 | and add bias
#else
subl d5,d4 | subtract exponents
addl IMM (D_BIAS),d4 | and add bias
#endif
| We are now ready to do the division. We have prepared things in such a way
| that the ratio of the fractions will be less than 2 but greater than 1/2.
| At this point the registers in use are:
| d0-d1 hold a (first operand, bit DBL_MANT_DIG-32=0, bit
| DBL_MANT_DIG-1-32=1)
| d2-d3 hold b (second operand, bit DBL_MANT_DIG-32=1)
| d4 holds the difference of the exponents, corrected by the bias
| a0 holds the sign of the ratio
| To do the rounding correctly we need to keep information about the
| nonsignificant bits. One way to do this would be to do the division
| using four registers; another is to use two registers (as originally
| I did), but use a sticky bit to preserve information about the
| fractional part. Note that we can keep that info in a1, which is not
| used.
movel IMM (0),d6 | d6-d7 will hold the result
movel d6,d7 |
movel IMM (0),a1 | and a1 will hold the sticky bit
movel IMM (DBL_MANT_DIG-32+1),d5
1: cmpl d0,d2 | is a < b?
bhi 3f | if b > a skip the following
beq 4f | if d0==d2 check d1 and d3
2: subl d3,d1 |
subxl d2,d0 | a <-- a - b
bset d5,d6 | set the corresponding bit in d6
3: addl d1,d1 | shift a by 1
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
bpl 1b
#endif
bra 5f
4: cmpl d1,d3 | here d0==d2, so check d1 and d3
bhi 3b | if d1 > d2 skip the subtraction
bra 2b | else go do it
5:
| Here we have to start setting the bits in the second long.
movel IMM (31),d5 | again d5 is counter
1: cmpl d0,d2 | is a < b?
bhi 3f | if b > a skip the following
beq 4f | if d0==d2 check d1 and d3
2: subl d3,d1 |
subxl d2,d0 | a <-- a - b
bset d5,d7 | set the corresponding bit in d7
3: addl d1,d1 | shift a by 1
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
bpl 1b
#endif
bra 5f
4: cmpl d1,d3 | here d0==d2, so check d1 and d3
bhi 3b | if d1 > d2 skip the subtraction
bra 2b | else go do it
5:
| Now go ahead checking until we hit a one, which we store in d2.
movel IMM (DBL_MANT_DIG),d5
1: cmpl d2,d0 | is a < b?
bhi 4f | if b < a, exit
beq 3f | if d0==d2 check d1 and d3
2: addl d1,d1 | shift a by 1
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
bpl 1b
#endif
movel IMM (0),d2 | here no sticky bit was found
movel d2,d3
bra 5f
3: cmpl d1,d3 | here d0==d2, so check d1 and d3
bhi 2b | if d1 > d2 go back
4:
| Here put the sticky bit in d2-d3 (in the position which actually corresponds
| to it; if you don't do this the algorithm loses in some cases). '
movel IMM (0),d2
movel d2,d3
#ifndef __mcoldfire__
subw IMM (DBL_MANT_DIG),d5
addw IMM (63),d5
cmpw IMM (31),d5
#else
subl IMM (DBL_MANT_DIG),d5
addl IMM (63),d5
cmpl IMM (31),d5
#endif
bhi 2f
1: bset d5,d3
bra 5f
#ifndef __mcoldfire__
subw IMM (32),d5
#else
subl IMM (32),d5
#endif
2: bset d5,d2
5:
| Finally we are finished! Move the longs in the address registers to
| their final destination:
movel d6,d0
movel d7,d1
movel IMM (0),d3
| Here we have finished the division, with the result in d0-d1-d2-d3, with
| 2^21 <= d6 < 2^23. Thus bit 23 is not set, but bit 22 could be set.
| If it is not, then definitely bit 21 is set. Normalize so bit 22 is
| not set:
btst IMM (DBL_MANT_DIG-32+1),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
addw IMM (1),d4
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
addl IMM (1),d4
#endif
1:
| Now round, check for over- and underflow, and exit.
movel a0,d7 | restore sign bit to d7
moveq IMM (DIVIDE),d5
bra Lround$exit
Ldivdf$inop:
moveq IMM (DIVIDE),d5
bra Ld$inop
Ldivdf$a$0:
| If a is zero check to see whether b is zero also. In that case return
| NaN; then check if b is NaN, and return NaN also in that case. Else
| return a properly signed zero.
moveq IMM (DIVIDE),d5
bclr IMM (31),d2 |
movel d2,d4 |
orl d3,d4 |
beq Ld$inop | if b is also zero return NaN
cmpl IMM (0x7ff00000),d2 | check for NaN
bhi Ld$inop |
blt 1f |
tstl d3 |
bne Ld$inop |
1: movel a0,d0 | else return signed zero
moveq IMM(0),d1 |
PICLEA SYM (_fpCCR),a0 | clear exception flags
movew IMM (0),a0@ |
#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7 |
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 |
rts |
Ldivdf$b$0:
moveq IMM (DIVIDE),d5
| If we got here a is not zero. Check if a is NaN; in that case return NaN,
| else return +/-INFINITY. Remember that a is in d0 with the sign bit
| cleared already.
movel a0,d7 | put a's sign bit back in d7 '
cmpl IMM (0x7ff00000),d0 | compare d0 with INFINITY
bhi Ld$inop | if larger it is NaN
tstl d1 |
bne Ld$inop |
bra Ld$div$0 | else signal DIVIDE_BY_ZERO
Ldivdf$b$nf:
moveq IMM (DIVIDE),d5
| If d2 == 0x7ff00000 we have to check d3.
tstl d3 |
bne Ld$inop | if d3 <> 0, b is NaN
bra Ld$underflow | else b is +/-INFINITY, so signal underflow
Ldivdf$a$nf:
moveq IMM (DIVIDE),d5
| If d0 == 0x7ff00000 we have to check d1.
tstl d1 |
bne Ld$inop | if d1 <> 0, a is NaN
| If a is INFINITY we have to check b
cmpl d7,d2 | compare b with INFINITY
bge Ld$inop | if b is NaN or INFINITY return NaN
tstl d3 |
bne Ld$inop |
bra Ld$overflow | else return overflow
| If a number is denormalized we put an exponent of 1 but do not put the
| bit back into the fraction.
Ldivdf$a$den:
movel IMM (1),d4
andl d6,d0
1: addl d1,d1 | shift a left until bit 20 is set
addxl d0,d0
#ifndef __mcoldfire__
subw IMM (1),d4 | and adjust exponent
#else
subl IMM (1),d4 | and adjust exponent
#endif
btst IMM (DBL_MANT_DIG-32-1),d0
bne Ldivdf$1
bra 1b
Ldivdf$b$den:
movel IMM (1),d5
andl d6,d2
1: addl d3,d3 | shift b left until bit 20 is set
addxl d2,d2
#ifndef __mcoldfire__
subw IMM (1),d5 | and adjust exponent
#else
subql IMM (1),d5 | and adjust exponent
#endif
btst IMM (DBL_MANT_DIG-32-1),d2
bne Ldivdf$2
bra 1b
Lround$exit:
| This is a common exit point for __muldf3 and __divdf3. When they enter
| this point the sign of the result is in d7, the result in d0-d1, normalized
| so that 2^21 <= d0 < 2^22, and the exponent is in the lower byte of d4.
| First check for underlow in the exponent:
#ifndef __mcoldfire__
cmpw IMM (-DBL_MANT_DIG-1),d4
#else
cmpl IMM (-DBL_MANT_DIG-1),d4
#endif
blt Ld$underflow
| It could happen that the exponent is less than 1, in which case the
| number is denormalized. In this case we shift right and adjust the
| exponent until it becomes 1 or the fraction is zero (in the latter case
| we signal underflow and return zero).
movel d7,a0 |
movel IMM (0),d6 | use d6-d7 to collect bits flushed right
movel d6,d7 | use d6-d7 to collect bits flushed right
#ifndef __mcoldfire__
cmpw IMM (1),d4 | if the exponent is less than 1 we
#else
cmpl IMM (1),d4 | if the exponent is less than 1 we
#endif
bge 2f | have to shift right (denormalize)
1:
#ifndef __mcoldfire__
addw IMM (1),d4 | adjust the exponent
lsrl IMM (1),d0 | shift right once
roxrl IMM (1),d1 |
roxrl IMM (1),d2 |
roxrl IMM (1),d3 |
roxrl IMM (1),d6 |
roxrl IMM (1),d7 |
cmpw IMM (1),d4 | is the exponent 1 already?
#else
addl IMM (1),d4 | adjust the exponent
lsrl IMM (1),d7
btst IMM (0),d6
beq 13f
bset IMM (31),d7
13: lsrl IMM (1),d6
btst IMM (0),d3
beq 14f
bset IMM (31),d6
14: lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
cmpl IMM (1),d4 | is the exponent 1 already?
#endif
beq 2f | if not loop back
bra 1b |
bra Ld$underflow | safety check, shouldn't execute '
2: orl d6,d2 | this is a trick so we don't lose '
orl d7,d3 | the bits which were flushed right
movel a0,d7 | get back sign bit into d7
| Now call the rounding routine (which takes care of denormalized numbers):
lea pc@(Lround$0),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Lround$0:
| Here we have a correctly rounded result (either normalized or denormalized).
| Here we should have either a normalized number or a denormalized one, and
| the exponent is necessarily larger or equal to 1 (so we don't have to '
| check again for underflow!). We have to check for overflow or for a
| denormalized number (which also signals underflow).
| Check for overflow (i.e., exponent >= 0x7ff).
#ifndef __mcoldfire__
cmpw IMM (0x07ff),d4
#else
cmpl IMM (0x07ff),d4
#endif
bge Ld$overflow
| Now check for a denormalized number (exponent==0):
movew d4,d4
beq Ld$den
1:
| Put back the exponents and sign and return.
#ifndef __mcoldfire__
lslw IMM (4),d4 | exponent back to fourth byte
#else
lsll IMM (4),d4 | exponent back to fourth byte
#endif
bclr IMM (DBL_MANT_DIG-32-1),d0
swap d0 | and put back exponent
#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
#endif
swap d0 |
orl d7,d0 | and sign also
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
|=============================================================================
| __negdf2
|=============================================================================
| double __negdf2(double, double);
FUNC(__negdf2)
SYM (__negdf2):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
moveq IMM (NEGATE),d5
movel a6@(8),d0 | get number to negate in d0-d1
movel a6@(12),d1 |
bchg IMM (31),d0 | negate
movel d0,d2 | make a positive copy (for the tests)
bclr IMM (31),d2 |
movel d2,d4 | check for zero
orl d1,d4 |
beq 2f | if zero (either sign) return +zero
cmpl IMM (0x7ff00000),d2 | compare to +INFINITY
blt 1f | if finite, return
bhi Ld$inop | if larger (fraction not zero) is NaN
tstl d1 | if d2 == 0x7ff00000 check d1
bne Ld$inop |
movel d0,d7 | else get sign and return INFINITY
andl IMM (0x80000000),d7
bra Ld$infty
1: PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
2: bclr IMM (31),d0
bra 1b
|=============================================================================
| __cmpdf2
|=============================================================================
GREATER = 1
LESS = -1
EQUAL = 0
| int __cmpdf2_internal(double, double, int);
SYM (__cmpdf2_internal):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@- | save registers
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
moveq IMM (COMPARE),d5
movel a6@(8),d0 | get first operand
movel a6@(12),d1 |
movel a6@(16),d2 | get second operand
movel a6@(20),d3 |
| First check if a and/or b are (+/-) zero and in that case clear
| the sign bit.
movel d0,d6 | copy signs into d6 (a) and d7(b)
bclr IMM (31),d0 | and clear signs in d0 and d2
movel d2,d7 |
bclr IMM (31),d2 |
cmpl IMM (0x7ff00000),d0 | check for a == NaN
bhi Lcmpd$inop | if d0 > 0x7ff00000, a is NaN
beq Lcmpdf$a$nf | if equal can be INFINITY, so check d1
movel d0,d4 | copy into d4 to test for zero
orl d1,d4 |
beq Lcmpdf$a$0 |
Lcmpdf$0:
cmpl IMM (0x7ff00000),d2 | check for b == NaN
bhi Lcmpd$inop | if d2 > 0x7ff00000, b is NaN
beq Lcmpdf$b$nf | if equal can be INFINITY, so check d3
movel d2,d4 |
orl d3,d4 |
beq Lcmpdf$b$0 |
Lcmpdf$1:
| Check the signs
eorl d6,d7
bpl 1f
| If the signs are not equal check if a >= 0
tstl d6
bpl Lcmpdf$a$gt$b | if (a >= 0 && b < 0) => a > b
bmi Lcmpdf$b$gt$a | if (a < 0 && b >= 0) => a < b
1:
| If the signs are equal check for < 0
tstl d6
bpl 1f
| If both are negative exchange them
#ifndef __mcoldfire__
exg d0,d2
exg d1,d3
#else
movel d0,d7
movel d2,d0
movel d7,d2
movel d1,d7
movel d3,d1
movel d7,d3
#endif
1:
| Now that they are positive we just compare them as longs (does this also
| work for denormalized numbers?).
cmpl d0,d2
bhi Lcmpdf$b$gt$a | |b| > |a|
bne Lcmpdf$a$gt$b | |b| < |a|
| If we got here d0 == d2, so we compare d1 and d3.
cmpl d1,d3
bhi Lcmpdf$b$gt$a | |b| > |a|
bne Lcmpdf$a$gt$b | |b| < |a|
| If we got here a == b.
movel IMM (EQUAL),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpdf$a$gt$b:
movel IMM (GREATER),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpdf$b$gt$a:
movel IMM (LESS),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpdf$a$0:
bclr IMM (31),d6
bra Lcmpdf$0
Lcmpdf$b$0:
bclr IMM (31),d7
bra Lcmpdf$1
Lcmpdf$a$nf:
tstl d1
bne Ld$inop
bra Lcmpdf$0
Lcmpdf$b$nf:
tstl d3
bne Ld$inop
bra Lcmpdf$1
Lcmpd$inop:
movl a6@(24),d0
moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
| int __cmpdf2(double, double);
FUNC(__cmpdf2)
SYM (__cmpdf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
|=============================================================================
| rounding routines
|=============================================================================
| The rounding routines expect the number to be normalized in registers
| d0-d1-d2-d3, with the exponent in register d4. They assume that the
| exponent is larger or equal to 1. They return a properly normalized number
| if possible, and a denormalized number otherwise. The exponent is returned
| in d4.
Lround$to$nearest:
| We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
| Here we assume that the exponent is not too small (this should be checked
| before entering the rounding routine), but the number could be denormalized.
| Check for denormalized numbers:
1: btst IMM (DBL_MANT_DIG-32),d0
bne 2f | if set the number is normalized
| Normalize shifting left until bit #DBL_MANT_DIG-32 is set or the exponent
| is one (remember that a denormalized number corresponds to an
| exponent of -D_BIAS+1).
#ifndef __mcoldfire__
cmpw IMM (1),d4 | remember that the exponent is at least one
#else
cmpl IMM (1),d4 | remember that the exponent is at least one
#endif
beq 2f | an exponent of one means denormalized
addl d3,d3 | else shift and adjust the exponent
addxl d2,d2 |
addxl d1,d1 |
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d4,1b |
#else
subql IMM (1), d4
bpl 1b
#endif
2:
| Now round: we do it as follows: after the shifting we can write the
| fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
| If delta < 1, do nothing. If delta > 1, add 1 to f.
| If delta == 1, we make sure the rounded number will be even (odd?)
| (after shifting).
btst IMM (0),d1 | is delta < 1?
beq 2f | if so, do not do anything
orl d2,d3 | is delta == 1?
bne 1f | if so round to even
movel d1,d3 |
andl IMM (2),d3 | bit 1 is the last significant bit
movel IMM (0),d2 |
addl d3,d1 |
addxl d2,d0 |
bra 2f |
1: movel IMM (1),d3 | else add 1
movel IMM (0),d2 |
addl d3,d1 |
addxl d2,d0
| Shift right once (because we used bit #DBL_MANT_DIG-32!).
2:
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
#endif
| Now check again bit #DBL_MANT_DIG-32 (rounding could have produced a
| 'fraction overflow' ...).
btst IMM (DBL_MANT_DIG-32),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d4
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
addl IMM (1),d4
#endif
1:
| If bit #DBL_MANT_DIG-32-1 is clear we have a denormalized number, so we
| have to put the exponent to zero and return a denormalized number.
btst IMM (DBL_MANT_DIG-32-1),d0
beq 1f
jmp a0@
1: movel IMM (0),d4
jmp a0@
Lround$to$zero:
Lround$to$plus:
Lround$to$minus:
jmp a0@
#endif /* L_double */
#ifdef L_float
.globl SYM (_fpCCR)
.globl $_exception_handler
QUIET_NaN = 0xffffffff
SIGNL_NaN = 0x7f800001
INFINITY = 0x7f800000
F_MAX_EXP = 0xff
F_BIAS = 126
FLT_MAX_EXP = F_MAX_EXP - F_BIAS
FLT_MIN_EXP = 1 - F_BIAS
FLT_MANT_DIG = 24
INEXACT_RESULT = 0x0001
UNDERFLOW = 0x0002
OVERFLOW = 0x0004
DIVIDE_BY_ZERO = 0x0008
INVALID_OPERATION = 0x0010
SINGLE_FLOAT = 1
NOOP = 0
ADD = 1
MULTIPLY = 2
DIVIDE = 3
NEGATE = 4
COMPARE = 5
EXTENDSFDF = 6
TRUNCDFSF = 7
UNKNOWN = -1
ROUND_TO_NEAREST = 0 | round result to nearest representable value
ROUND_TO_ZERO = 1 | round result towards zero
ROUND_TO_PLUS = 2 | round result towards plus infinity
ROUND_TO_MINUS = 3 | round result towards minus infinity
| Entry points:
.globl SYM (__addsf3)
.globl SYM (__subsf3)
.globl SYM (__mulsf3)
.globl SYM (__divsf3)
.globl SYM (__negsf2)
.globl SYM (__cmpsf2)
.globl SYM (__cmpsf2_internal)
.hidden SYM (__cmpsf2_internal)
| These are common routines to return and signal exceptions.
.text
.even
Lf$den:
| Return and signal a denormalized number
orl d7,d0
moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
Lf$infty:
Lf$overflow:
| Return a properly signed INFINITY and set the exception flags
movel IMM (INFINITY),d0
orl d7,d0
moveq IMM (INEXACT_RESULT+OVERFLOW),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
Lf$underflow:
| Return 0 and set the exception flags
moveq IMM (0),d0
moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
Lf$inop:
| Return a quiet NaN and set the exception flags
movel IMM (QUIET_NaN),d0
moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
Lf$div$0:
| Return a properly signed INFINITY and set the exception flags
movel IMM (INFINITY),d0
orl d7,d0
moveq IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
|=============================================================================
|=============================================================================
| single precision routines
|=============================================================================
|=============================================================================
| A single precision floating point number (float) has the format:
|
| struct _float {
| unsigned int sign : 1; /* sign bit */
| unsigned int exponent : 8; /* exponent, shifted by 126 */
| unsigned int fraction : 23; /* fraction */
| } float;
|
| Thus sizeof(float) = 4 (32 bits).
|
| All the routines are callable from C programs, and return the result
| in the single register d0. They also preserve all registers except
| d0-d1 and a0-a1.
|=============================================================================
| __subsf3
|=============================================================================
| float __subsf3(float, float);
FUNC(__subsf3)
SYM (__subsf3):
bchg IMM (31),sp@(8) | change sign of second operand
| and fall through
|=============================================================================
| __addsf3
|=============================================================================
| float __addsf3(float, float);
FUNC(__addsf3)
SYM (__addsf3):
#ifndef __mcoldfire__
link a6,IMM (0) | everything will be done in registers
moveml d2-d7,sp@- | save all data registers but d0-d1
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get first operand
movel a6@(12),d1 | get second operand
movel d0,a0 | get d0's sign bit '
addl d0,d0 | check and clear sign bit of a
beq Laddsf$b | if zero return second operand
movel d1,a1 | save b's sign bit '
addl d1,d1 | get rid of sign bit
beq Laddsf$a | if zero return first operand
| Get the exponents and check for denormalized and/or infinity.
movel IMM (0x00ffffff),d4 | mask to get fraction
movel IMM (0x01000000),d5 | mask to put hidden bit back
movel d0,d6 | save a to get exponent
andl d4,d0 | get fraction in d0
notl d4 | make d4 into a mask for the exponent
andl d4,d6 | get exponent in d6
beq Laddsf$a$den | branch if a is denormalized
cmpl d4,d6 | check for INFINITY or NaN
beq Laddsf$nf
swap d6 | put exponent into first word
orl d5,d0 | and put hidden bit back
Laddsf$1:
| Now we have a's exponent in d6 (second byte) and the mantissa in d0. '
movel d1,d7 | get exponent in d7
andl d4,d7 |
beq Laddsf$b$den | branch if b is denormalized
cmpl d4,d7 | check for INFINITY or NaN
beq Laddsf$nf
swap d7 | put exponent into first word
notl d4 | make d4 into a mask for the fraction
andl d4,d1 | get fraction in d1
orl d5,d1 | and put hidden bit back
Laddsf$2:
| Now we have b's exponent in d7 (second byte) and the mantissa in d1. '
| Note that the hidden bit corresponds to bit #FLT_MANT_DIG-1, and we
| shifted right once, so bit #FLT_MANT_DIG is set (so we have one extra
| bit).
movel d1,d2 | move b to d2, since we want to use
| two registers to do the sum
movel IMM (0),d1 | and clear the new ones
movel d1,d3 |
| Here we shift the numbers in registers d0 and d1 so the exponents are the
| same, and put the largest exponent in d6. Note that we are using two
| registers for each number (see the discussion by D. Knuth in "Seminumerical
| Algorithms").
#ifndef __mcoldfire__
cmpw d6,d7 | compare exponents
#else
cmpl d6,d7 | compare exponents
#endif
beq Laddsf$3 | if equal don't shift '
bhi 5f | branch if second exponent largest
1:
subl d6,d7 | keep the largest exponent
negl d7
#ifndef __mcoldfire__
lsrw IMM (8),d7 | put difference in lower byte
#else
lsrl IMM (8),d7 | put difference in lower byte
#endif
| if difference is too large we don't shift (actually, we can just exit) '
#ifndef __mcoldfire__
cmpw IMM (FLT_MANT_DIG+2),d7
#else
cmpl IMM (FLT_MANT_DIG+2),d7
#endif
bge Laddsf$b$small
#ifndef __mcoldfire__
cmpw IMM (16),d7 | if difference >= 16 swap
#else
cmpl IMM (16),d7 | if difference >= 16 swap
#endif
bge 4f
2:
#ifndef __mcoldfire__
subw IMM (1),d7
#else
subql IMM (1), d7
#endif
3:
#ifndef __mcoldfire__
lsrl IMM (1),d2 | shift right second operand
roxrl IMM (1),d3
dbra d7,3b
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
subql IMM (1), d7
bpl 3b
#endif
bra Laddsf$3
4:
movew d2,d3
swap d3
movew d3,d2
swap d2
#ifndef __mcoldfire__
subw IMM (16),d7
#else
subl IMM (16),d7
#endif
bne 2b | if still more bits, go back to normal case
bra Laddsf$3
5:
#ifndef __mcoldfire__
exg d6,d7 | exchange the exponents
#else
eorl d6,d7
eorl d7,d6
eorl d6,d7
#endif
subl d6,d7 | keep the largest exponent
negl d7 |
#ifndef __mcoldfire__
lsrw IMM (8),d7 | put difference in lower byte
#else
lsrl IMM (8),d7 | put difference in lower byte
#endif
| if difference is too large we don't shift (and exit!) '
#ifndef __mcoldfire__
cmpw IMM (FLT_MANT_DIG+2),d7
#else
cmpl IMM (FLT_MANT_DIG+2),d7
#endif
bge Laddsf$a$small
#ifndef __mcoldfire__
cmpw IMM (16),d7 | if difference >= 16 swap
#else
cmpl IMM (16),d7 | if difference >= 16 swap
#endif
bge 8f
6:
#ifndef __mcoldfire__
subw IMM (1),d7
#else
subl IMM (1),d7
#endif
7:
#ifndef __mcoldfire__
lsrl IMM (1),d0 | shift right first operand
roxrl IMM (1),d1
dbra d7,7b
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
subql IMM (1),d7
bpl 7b
#endif
bra Laddsf$3
8:
movew d0,d1
swap d1
movew d1,d0
swap d0
#ifndef __mcoldfire__
subw IMM (16),d7
#else
subl IMM (16),d7
#endif
bne 6b | if still more bits, go back to normal case
| otherwise we fall through
| Now we have a in d0-d1, b in d2-d3, and the largest exponent in d6 (the
| signs are stored in a0 and a1).
Laddsf$3:
| Here we have to decide whether to add or subtract the numbers
#ifndef __mcoldfire__
exg d6,a0 | get signs back
exg d7,a1 | and save the exponents
#else
movel d6,d4
movel a0,d6
movel d4,a0
movel d7,d4
movel a1,d7
movel d4,a1
#endif
eorl d6,d7 | combine sign bits
bmi Lsubsf$0 | if negative a and b have opposite
| sign so we actually subtract the
| numbers
| Here we have both positive or both negative
#ifndef __mcoldfire__
exg d6,a0 | now we have the exponent in d6
#else
movel d6,d4
movel a0,d6
movel d4,a0
#endif
movel a0,d7 | and sign in d7
andl IMM (0x80000000),d7
| Here we do the addition.
addl d3,d1
addxl d2,d0
| Note: now we have d2, d3, d4 and d5 to play with!
| Put the exponent, in the first byte, in d2, to use the "standard" rounding
| routines:
movel d6,d2
#ifndef __mcoldfire__
lsrw IMM (8),d2
#else
lsrl IMM (8),d2
#endif
| Before rounding normalize so bit #FLT_MANT_DIG is set (we will consider
| the case of denormalized numbers in the rounding routine itself).
| As in the addition (not in the subtraction!) we could have set
| one more bit we check this:
btst IMM (FLT_MANT_DIG+1),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
#endif
addl IMM (1),d2
1:
lea pc@(Laddsf$4),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Laddsf$4:
| Put back the exponent, but check for overflow.
#ifndef __mcoldfire__
cmpw IMM (0xff),d2
#else
cmpl IMM (0xff),d2
#endif
bhi 1f
bclr IMM (FLT_MANT_DIG-1),d0
#ifndef __mcoldfire__
lslw IMM (7),d2
#else
lsll IMM (7),d2
#endif
swap d2
orl d2,d0
bra Laddsf$ret
1:
moveq IMM (ADD),d5
bra Lf$overflow
Lsubsf$0:
| We are here if a > 0 and b < 0 (sign bits cleared).
| Here we do the subtraction.
movel d6,d7 | put sign in d7
andl IMM (0x80000000),d7
subl d3,d1 | result in d0-d1
subxl d2,d0 |
beq Laddsf$ret | if zero just exit
bpl 1f | if positive skip the following
bchg IMM (31),d7 | change sign bit in d7
negl d1
negxl d0
1:
#ifndef __mcoldfire__
exg d2,a0 | now we have the exponent in d2
lsrw IMM (8),d2 | put it in the first byte
#else
movel d2,d4
movel a0,d2
movel d4,a0
lsrl IMM (8),d2 | put it in the first byte
#endif
| Now d0-d1 is positive and the sign bit is in d7.
| Note that we do not have to normalize, since in the subtraction bit
| #FLT_MANT_DIG+1 is never set, and denormalized numbers are handled by
| the rounding routines themselves.
lea pc@(Lsubsf$1),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Lsubsf$1:
| Put back the exponent (we can't have overflow!). '
bclr IMM (FLT_MANT_DIG-1),d0
#ifndef __mcoldfire__
lslw IMM (7),d2
#else
lsll IMM (7),d2
#endif
swap d2
orl d2,d0
bra Laddsf$ret
| If one of the numbers was too small (difference of exponents >=
| FLT_MANT_DIG+2) we return the other (and now we don't have to '
| check for finiteness or zero).
Laddsf$a$small:
movel a6@(12),d0
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
Laddsf$b$small:
movel a6@(8),d0
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
| If the numbers are denormalized remember to put exponent equal to 1.
Laddsf$a$den:
movel d5,d6 | d5 contains 0x01000000
swap d6
bra Laddsf$1
Laddsf$b$den:
movel d5,d7
swap d7
notl d4 | make d4 into a mask for the fraction
| (this was not executed after the jump)
bra Laddsf$2
| The rest is mainly code for the different results which can be
| returned (checking always for +/-INFINITY and NaN).
Laddsf$b:
| Return b (if a is zero).
movel a6@(12),d0
cmpl IMM (0x80000000),d0 | Check if b is -0
bne 1f
movel a0,d7
andl IMM (0x80000000),d7 | Use the sign of a
clrl d0
bra Laddsf$ret
Laddsf$a:
| Return a (if b is zero).
movel a6@(8),d0
1:
moveq IMM (ADD),d5
| We have to check for NaN and +/-infty.
movel d0,d7
andl IMM (0x80000000),d7 | put sign in d7
bclr IMM (31),d0 | clear sign
cmpl IMM (INFINITY),d0 | check for infty or NaN
bge 2f
movel d0,d0 | check for zero (we do this because we don't '
bne Laddsf$ret | want to return -0 by mistake
bclr IMM (31),d7 | if zero be sure to clear sign
bra Laddsf$ret | if everything OK just return
2:
| The value to be returned is either +/-infty or NaN
andl IMM (0x007fffff),d0 | check for NaN
bne Lf$inop | if mantissa not zero is NaN
bra Lf$infty
Laddsf$ret:
| Normal exit (a and b nonzero, result is not NaN nor +/-infty).
| We have to clear the exception flags (just the exception type).
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
orl d7,d0 | put sign bit
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
Laddsf$ret$den:
| Return a denormalized number (for addition we don't signal underflow) '
lsrl IMM (1),d0 | remember to shift right back once
bra Laddsf$ret | and return
| Note: when adding two floats of the same sign if either one is
| NaN we return NaN without regard to whether the other is finite or
| not. When subtracting them (i.e., when adding two numbers of
| opposite signs) things are more complicated: if both are INFINITY
| we return NaN, if only one is INFINITY and the other is NaN we return
| NaN, but if it is finite we return INFINITY with the corresponding sign.
Laddsf$nf:
moveq IMM (ADD),d5
| This could be faster but it is not worth the effort, since it is not
| executed very often. We sacrifice speed for clarity here.
movel a6@(8),d0 | get the numbers back (remember that we
movel a6@(12),d1 | did some processing already)
movel IMM (INFINITY),d4 | useful constant (INFINITY)
movel d0,d2 | save sign bits
movel d0,d7 | into d7 as well as we may need the sign
| bit before jumping to LfSinfty
movel d1,d3
bclr IMM (31),d0 | clear sign bits
bclr IMM (31),d1
| We know that one of them is either NaN of +/-INFINITY
| Check for NaN (if either one is NaN return NaN)
cmpl d4,d0 | check first a (d0)
bhi Lf$inop
cmpl d4,d1 | check now b (d1)
bhi Lf$inop
| Now comes the check for +/-INFINITY. We know that both are (maybe not
| finite) numbers, but we have to check if both are infinite whether we
| are adding or subtracting them.
eorl d3,d2 | to check sign bits
bmi 1f
andl IMM (0x80000000),d7 | get (common) sign bit
bra Lf$infty
1:
| We know one (or both) are infinite, so we test for equality between the
| two numbers (if they are equal they have to be infinite both, so we
| return NaN).
cmpl d1,d0 | are both infinite?
beq Lf$inop | if so return NaN
andl IMM (0x80000000),d7 | get a's sign bit '
cmpl d4,d0 | test now for infinity
beq Lf$infty | if a is INFINITY return with this sign
bchg IMM (31),d7 | else we know b is INFINITY and has
bra Lf$infty | the opposite sign
|=============================================================================
| __mulsf3
|=============================================================================
| float __mulsf3(float, float);
FUNC(__mulsf3)
SYM (__mulsf3):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get a into d0
movel a6@(12),d1 | and b into d1
movel d0,d7 | d7 will hold the sign of the product
eorl d1,d7 |
andl IMM (0x80000000),d7
movel IMM (INFINITY),d6 | useful constant (+INFINITY)
movel d6,d5 | another (mask for fraction)
notl d5 |
movel IMM (0x00800000),d4 | this is to put hidden bit back
bclr IMM (31),d0 | get rid of a's sign bit '
movel d0,d2 |
beq Lmulsf$a$0 | branch if a is zero
bclr IMM (31),d1 | get rid of b's sign bit '
movel d1,d3 |
beq Lmulsf$b$0 | branch if b is zero
cmpl d6,d0 | is a big?
bhi Lmulsf$inop | if a is NaN return NaN
beq Lmulsf$inf | if a is INFINITY we have to check b
cmpl d6,d1 | now compare b with INFINITY
bhi Lmulsf$inop | is b NaN?
beq Lmulsf$overflow | is b INFINITY?
| Here we have both numbers finite and nonzero (and with no sign bit).
| Now we get the exponents into d2 and d3.
andl d6,d2 | and isolate exponent in d2
beq Lmulsf$a$den | if exponent is zero we have a denormalized
andl d5,d0 | and isolate fraction
orl d4,d0 | and put hidden bit back
swap d2 | I like exponents in the first byte
#ifndef __mcoldfire__
lsrw IMM (7),d2 |
#else
lsrl IMM (7),d2 |
#endif
Lmulsf$1: | number
andl d6,d3 |
beq Lmulsf$b$den |
andl d5,d1 |
orl d4,d1 |
swap d3 |
#ifndef __mcoldfire__
lsrw IMM (7),d3 |
#else
lsrl IMM (7),d3 |
#endif
Lmulsf$2: |
#ifndef __mcoldfire__
addw d3,d2 | add exponents
subw IMM (F_BIAS+1),d2 | and subtract bias (plus one)
#else
addl d3,d2 | add exponents
subl IMM (F_BIAS+1),d2 | and subtract bias (plus one)
#endif
| We are now ready to do the multiplication. The situation is as follows:
| both a and b have bit FLT_MANT_DIG-1 set (even if they were
| denormalized to start with!), which means that in the product
| bit 2*(FLT_MANT_DIG-1) (that is, bit 2*FLT_MANT_DIG-2-32 of the
| high long) is set.
| To do the multiplication let us move the number a little bit around ...
movel d1,d6 | second operand in d6
movel d0,d5 | first operand in d4-d5
movel IMM (0),d4
movel d4,d1 | the sums will go in d0-d1
movel d4,d0
| now bit FLT_MANT_DIG-1 becomes bit 31:
lsll IMM (31-FLT_MANT_DIG+1),d6
| Start the loop (we loop #FLT_MANT_DIG times):
moveq IMM (FLT_MANT_DIG-1),d3
1: addl d1,d1 | shift sum
addxl d0,d0
lsll IMM (1),d6 | get bit bn
bcc 2f | if not set skip sum
addl d5,d1 | add a
addxl d4,d0
2:
#ifndef __mcoldfire__
dbf d3,1b | loop back
#else
subql IMM (1),d3
bpl 1b
#endif
| Now we have the product in d0-d1, with bit (FLT_MANT_DIG - 1) + FLT_MANT_DIG
| (mod 32) of d0 set. The first thing to do now is to normalize it so bit
| FLT_MANT_DIG is set (to do the rounding).
#ifndef __mcoldfire__
rorl IMM (6),d1
swap d1
movew d1,d3
andw IMM (0x03ff),d3
andw IMM (0xfd00),d1
#else
movel d1,d3
lsll IMM (8),d1
addl d1,d1
addl d1,d1
moveq IMM (22),d5
lsrl d5,d3
orl d3,d1
andl IMM (0xfffffd00),d1
#endif
lsll IMM (8),d0
addl d0,d0
addl d0,d0
#ifndef __mcoldfire__
orw d3,d0
#else
orl d3,d0
#endif
moveq IMM (MULTIPLY),d5
btst IMM (FLT_MANT_DIG+1),d0
beq Lround$exit
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d2
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
addql IMM (1),d2
#endif
bra Lround$exit
Lmulsf$inop:
moveq IMM (MULTIPLY),d5
bra Lf$inop
Lmulsf$overflow:
moveq IMM (MULTIPLY),d5
bra Lf$overflow
Lmulsf$inf:
moveq IMM (MULTIPLY),d5
| If either is NaN return NaN; else both are (maybe infinite) numbers, so
| return INFINITY with the correct sign (which is in d7).
cmpl d6,d1 | is b NaN?
bhi Lf$inop | if so return NaN
bra Lf$overflow | else return +/-INFINITY
| If either number is zero return zero, unless the other is +/-INFINITY,
| or NaN, in which case we return NaN.
Lmulsf$b$0:
| Here d1 (==b) is zero.
movel a6@(8),d1 | get a again to check for non-finiteness
bra 1f
Lmulsf$a$0:
movel a6@(12),d1 | get b again to check for non-finiteness
1: bclr IMM (31),d1 | clear sign bit
cmpl IMM (INFINITY),d1 | and check for a large exponent
bge Lf$inop | if b is +/-INFINITY or NaN return NaN
movel d7,d0 | else return signed zero
PICLEA SYM (_fpCCR),a0 |
movew IMM (0),a0@ |
#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 |
rts |
| If a number is denormalized we put an exponent of 1 but do not put the
| hidden bit back into the fraction; instead we shift left until bit 23
| (the hidden bit) is set, adjusting the exponent accordingly. We do this
| to ensure that the product of the fractions is close to 1.
Lmulsf$a$den:
movel IMM (1),d2
andl d5,d0
1: addl d0,d0 | shift a left (until bit 23 is set)
#ifndef __mcoldfire__
subw IMM (1),d2 | and adjust exponent
#else
subql IMM (1),d2 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d0
bne Lmulsf$1 |
bra 1b | else loop back
Lmulsf$b$den:
movel IMM (1),d3
andl d5,d1
1: addl d1,d1 | shift b left until bit 23 is set
#ifndef __mcoldfire__
subw IMM (1),d3 | and adjust exponent
#else
subql IMM (1),d3 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d1
bne Lmulsf$2 |
bra 1b | else loop back
|=============================================================================
| __divsf3
|=============================================================================
| float __divsf3(float, float);
FUNC(__divsf3)
SYM (__divsf3):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get a into d0
movel a6@(12),d1 | and b into d1
movel d0,d7 | d7 will hold the sign of the result
eorl d1,d7 |
andl IMM (0x80000000),d7 |
movel IMM (INFINITY),d6 | useful constant (+INFINITY)
movel d6,d5 | another (mask for fraction)
notl d5 |
movel IMM (0x00800000),d4 | this is to put hidden bit back
bclr IMM (31),d0 | get rid of a's sign bit '
movel d0,d2 |
beq Ldivsf$a$0 | branch if a is zero
bclr IMM (31),d1 | get rid of b's sign bit '
movel d1,d3 |
beq Ldivsf$b$0 | branch if b is zero
cmpl d6,d0 | is a big?
bhi Ldivsf$inop | if a is NaN return NaN
beq Ldivsf$inf | if a is INFINITY we have to check b
cmpl d6,d1 | now compare b with INFINITY
bhi Ldivsf$inop | if b is NaN return NaN
beq Ldivsf$underflow
| Here we have both numbers finite and nonzero (and with no sign bit).
| Now we get the exponents into d2 and d3 and normalize the numbers to
| ensure that the ratio of the fractions is close to 1. We do this by
| making sure that bit #FLT_MANT_DIG-1 (hidden bit) is set.
andl d6,d2 | and isolate exponent in d2
beq Ldivsf$a$den | if exponent is zero we have a denormalized
andl d5,d0 | and isolate fraction
orl d4,d0 | and put hidden bit back
swap d2 | I like exponents in the first byte
#ifndef __mcoldfire__
lsrw IMM (7),d2 |
#else
lsrl IMM (7),d2 |
#endif
Ldivsf$1: |
andl d6,d3 |
beq Ldivsf$b$den |
andl d5,d1 |
orl d4,d1 |
swap d3 |
#ifndef __mcoldfire__
lsrw IMM (7),d3 |
#else
lsrl IMM (7),d3 |
#endif
Ldivsf$2: |
#ifndef __mcoldfire__
subw d3,d2 | subtract exponents
addw IMM (F_BIAS),d2 | and add bias
#else
subl d3,d2 | subtract exponents
addl IMM (F_BIAS),d2 | and add bias
#endif
| We are now ready to do the division. We have prepared things in such a way
| that the ratio of the fractions will be less than 2 but greater than 1/2.
| At this point the registers in use are:
| d0 holds a (first operand, bit FLT_MANT_DIG=0, bit FLT_MANT_DIG-1=1)
| d1 holds b (second operand, bit FLT_MANT_DIG=1)
| d2 holds the difference of the exponents, corrected by the bias
| d7 holds the sign of the ratio
| d4, d5, d6 hold some constants
movel d7,a0 | d6-d7 will hold the ratio of the fractions
movel IMM (0),d6 |
movel d6,d7
moveq IMM (FLT_MANT_DIG+1),d3
1: cmpl d0,d1 | is a < b?
bhi 2f |
bset d3,d6 | set a bit in d6
subl d1,d0 | if a >= b a <-- a-b
beq 3f | if a is zero, exit
2: addl d0,d0 | multiply a by 2
#ifndef __mcoldfire__
dbra d3,1b
#else
subql IMM (1),d3
bpl 1b
#endif
| Now we keep going to set the sticky bit ...
moveq IMM (FLT_MANT_DIG),d3
1: cmpl d0,d1
ble 2f
addl d0,d0
#ifndef __mcoldfire__
dbra d3,1b
#else
subql IMM(1),d3
bpl 1b
#endif
movel IMM (0),d1
bra 3f
2: movel IMM (0),d1
#ifndef __mcoldfire__
subw IMM (FLT_MANT_DIG),d3
addw IMM (31),d3
#else
subl IMM (FLT_MANT_DIG),d3
addl IMM (31),d3
#endif
bset d3,d1
3:
movel d6,d0 | put the ratio in d0-d1
movel a0,d7 | get sign back
| Because of the normalization we did before we are guaranteed that
| d0 is smaller than 2^26 but larger than 2^24. Thus bit 26 is not set,
| bit 25 could be set, and if it is not set then bit 24 is necessarily set.
btst IMM (FLT_MANT_DIG+1),d0
beq 1f | if it is not set, then bit 24 is set
lsrl IMM (1),d0 |
#ifndef __mcoldfire__
addw IMM (1),d2 |
#else
addl IMM (1),d2 |
#endif
1:
| Now round, check for over- and underflow, and exit.
moveq IMM (DIVIDE),d5
bra Lround$exit
Ldivsf$inop:
moveq IMM (DIVIDE),d5
bra Lf$inop
Ldivsf$overflow:
moveq IMM (DIVIDE),d5
bra Lf$overflow
Ldivsf$underflow:
moveq IMM (DIVIDE),d5
bra Lf$underflow
Ldivsf$a$0:
moveq IMM (DIVIDE),d5
| If a is zero check to see whether b is zero also. In that case return
| NaN; then check if b is NaN, and return NaN also in that case. Else
| return a properly signed zero.
andl IMM (0x7fffffff),d1 | clear sign bit and test b
beq Lf$inop | if b is also zero return NaN
cmpl IMM (INFINITY),d1 | check for NaN
bhi Lf$inop |
movel d7,d0 | else return signed zero
PICLEA SYM (_fpCCR),a0 |
movew IMM (0),a0@ |
#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7 |
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 |
rts |
Ldivsf$b$0:
moveq IMM (DIVIDE),d5
| If we got here a is not zero. Check if a is NaN; in that case return NaN,
| else return +/-INFINITY. Remember that a is in d0 with the sign bit
| cleared already.
cmpl IMM (INFINITY),d0 | compare d0 with INFINITY
bhi Lf$inop | if larger it is NaN
bra Lf$div$0 | else signal DIVIDE_BY_ZERO
Ldivsf$inf:
moveq IMM (DIVIDE),d5
| If a is INFINITY we have to check b
cmpl IMM (INFINITY),d1 | compare b with INFINITY
bge Lf$inop | if b is NaN or INFINITY return NaN
bra Lf$overflow | else return overflow
| If a number is denormalized we put an exponent of 1 but do not put the
| bit back into the fraction.
Ldivsf$a$den:
movel IMM (1),d2
andl d5,d0
1: addl d0,d0 | shift a left until bit FLT_MANT_DIG-1 is set
#ifndef __mcoldfire__
subw IMM (1),d2 | and adjust exponent
#else
subl IMM (1),d2 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d0
bne Ldivsf$1
bra 1b
Ldivsf$b$den:
movel IMM (1),d3
andl d5,d1
1: addl d1,d1 | shift b left until bit FLT_MANT_DIG is set
#ifndef __mcoldfire__
subw IMM (1),d3 | and adjust exponent
#else
subl IMM (1),d3 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d1
bne Ldivsf$2
bra 1b
Lround$exit:
| This is a common exit point for __mulsf3 and __divsf3.
| First check for underlow in the exponent:
#ifndef __mcoldfire__
cmpw IMM (-FLT_MANT_DIG-1),d2
#else
cmpl IMM (-FLT_MANT_DIG-1),d2
#endif
blt Lf$underflow
| It could happen that the exponent is less than 1, in which case the
| number is denormalized. In this case we shift right and adjust the
| exponent until it becomes 1 or the fraction is zero (in the latter case
| we signal underflow and return zero).
movel IMM (0),d6 | d6 is used temporarily
#ifndef __mcoldfire__
cmpw IMM (1),d2 | if the exponent is less than 1 we
#else
cmpl IMM (1),d2 | if the exponent is less than 1 we
#endif
bge 2f | have to shift right (denormalize)
1:
#ifndef __mcoldfire__
addw IMM (1),d2 | adjust the exponent
lsrl IMM (1),d0 | shift right once
roxrl IMM (1),d1 |
roxrl IMM (1),d6 | d6 collect bits we would lose otherwise
cmpw IMM (1),d2 | is the exponent 1 already?
#else
addql IMM (1),d2 | adjust the exponent
lsrl IMM (1),d6
btst IMM (0),d1
beq 11f
bset IMM (31),d6
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
cmpl IMM (1),d2 | is the exponent 1 already?
#endif
beq 2f | if not loop back
bra 1b |
bra Lf$underflow | safety check, shouldn't execute '
2: orl d6,d1 | this is a trick so we don't lose '
| the extra bits which were flushed right
| Now call the rounding routine (which takes care of denormalized numbers):
lea pc@(Lround$0),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Lround$0:
| Here we have a correctly rounded result (either normalized or denormalized).
| Here we should have either a normalized number or a denormalized one, and
| the exponent is necessarily larger or equal to 1 (so we don't have to '
| check again for underflow!). We have to check for overflow or for a
| denormalized number (which also signals underflow).
| Check for overflow (i.e., exponent >= 255).
#ifndef __mcoldfire__
cmpw IMM (0x00ff),d2
#else
cmpl IMM (0x00ff),d2
#endif
bge Lf$overflow
| Now check for a denormalized number (exponent==0).
movew d2,d2
beq Lf$den
1:
| Put back the exponents and sign and return.
#ifndef __mcoldfire__
lslw IMM (7),d2 | exponent back to fourth byte
#else
lsll IMM (7),d2 | exponent back to fourth byte
#endif
bclr IMM (FLT_MANT_DIG-1),d0
swap d0 | and put back exponent
#ifndef __mcoldfire__
orw d2,d0 |
#else
orl d2,d0
#endif
swap d0 |
orl d7,d0 | and sign also
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
|=============================================================================
| __negsf2
|=============================================================================
| This is trivial and could be shorter if we didn't bother checking for NaN '
| and +/-INFINITY.
| float __negsf2(float);
FUNC(__negsf2)
SYM (__negsf2):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
moveq IMM (NEGATE),d5
movel a6@(8),d0 | get number to negate in d0
bchg IMM (31),d0 | negate
movel d0,d1 | make a positive copy
bclr IMM (31),d1 |
tstl d1 | check for zero
beq 2f | if zero (either sign) return +zero
cmpl IMM (INFINITY),d1 | compare to +INFINITY
blt 1f |
bhi Lf$inop | if larger (fraction not zero) is NaN
movel d0,d7 | else get sign and return INFINITY
andl IMM (0x80000000),d7
bra Lf$infty
1: PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
2: bclr IMM (31),d0
bra 1b
|=============================================================================
| __cmpsf2
|=============================================================================
GREATER = 1
LESS = -1
EQUAL = 0
| int __cmpsf2_internal(float, float, int);
SYM (__cmpsf2_internal):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@- | save registers
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
moveq IMM (COMPARE),d5
movel a6@(8),d0 | get first operand
movel a6@(12),d1 | get second operand
| Check if either is NaN, and in that case return garbage and signal
| INVALID_OPERATION. Check also if either is zero, and clear the signs
| if necessary.
movel d0,d6
andl IMM (0x7fffffff),d0
beq Lcmpsf$a$0
cmpl IMM (0x7f800000),d0
bhi Lcmpf$inop
Lcmpsf$1:
movel d1,d7
andl IMM (0x7fffffff),d1
beq Lcmpsf$b$0
cmpl IMM (0x7f800000),d1
bhi Lcmpf$inop
Lcmpsf$2:
| Check the signs
eorl d6,d7
bpl 1f
| If the signs are not equal check if a >= 0
tstl d6
bpl Lcmpsf$a$gt$b | if (a >= 0 && b < 0) => a > b
bmi Lcmpsf$b$gt$a | if (a < 0 && b >= 0) => a < b
1:
| If the signs are equal check for < 0
tstl d6
bpl 1f
| If both are negative exchange them
#ifndef __mcoldfire__
exg d0,d1
#else
movel d0,d7
movel d1,d0
movel d7,d1
#endif
1:
| Now that they are positive we just compare them as longs (does this also
| work for denormalized numbers?).
cmpl d0,d1
bhi Lcmpsf$b$gt$a | |b| > |a|
bne Lcmpsf$a$gt$b | |b| < |a|
| If we got here a == b.
movel IMM (EQUAL),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
#endif
unlk a6
rts
Lcmpsf$a$gt$b:
movel IMM (GREATER),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpsf$b$gt$a:
movel IMM (LESS),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpsf$a$0:
bclr IMM (31),d6
bra Lcmpsf$1
Lcmpsf$b$0:
bclr IMM (31),d7
bra Lcmpsf$2
Lcmpf$inop:
movl a6@(16),d0
moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
| int __cmpsf2(float, float);
FUNC(__cmpsf2)
SYM (__cmpsf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
|=============================================================================
| rounding routines
|=============================================================================
| The rounding routines expect the number to be normalized in registers
| d0-d1, with the exponent in register d2. They assume that the
| exponent is larger or equal to 1. They return a properly normalized number
| if possible, and a denormalized number otherwise. The exponent is returned
| in d2.
Lround$to$nearest:
| We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
| Here we assume that the exponent is not too small (this should be checked
| before entering the rounding routine), but the number could be denormalized.
| Check for denormalized numbers:
1: btst IMM (FLT_MANT_DIG),d0
bne 2f | if set the number is normalized
| Normalize shifting left until bit #FLT_MANT_DIG is set or the exponent
| is one (remember that a denormalized number corresponds to an
| exponent of -F_BIAS+1).
#ifndef __mcoldfire__
cmpw IMM (1),d2 | remember that the exponent is at least one
#else
cmpl IMM (1),d2 | remember that the exponent is at least one
#endif
beq 2f | an exponent of one means denormalized
addl d1,d1 | else shift and adjust the exponent
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d2,1b |
#else
subql IMM (1),d2
bpl 1b
#endif
2:
| Now round: we do it as follows: after the shifting we can write the
| fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
| If delta < 1, do nothing. If delta > 1, add 1 to f.
| If delta == 1, we make sure the rounded number will be even (odd?)
| (after shifting).
btst IMM (0),d0 | is delta < 1?
beq 2f | if so, do not do anything
tstl d1 | is delta == 1?
bne 1f | if so round to even
movel d0,d1 |
andl IMM (2),d1 | bit 1 is the last significant bit
addl d1,d0 |
bra 2f |
1: movel IMM (1),d1 | else add 1
addl d1,d0 |
| Shift right once (because we used bit #FLT_MANT_DIG!).
2: lsrl IMM (1),d0
| Now check again bit #FLT_MANT_DIG (rounding could have produced a
| 'fraction overflow' ...).
btst IMM (FLT_MANT_DIG),d0
beq 1f
lsrl IMM (1),d0
#ifndef __mcoldfire__
addw IMM (1),d2
#else
addql IMM (1),d2
#endif
1:
| If bit #FLT_MANT_DIG-1 is clear we have a denormalized number, so we
| have to put the exponent to zero and return a denormalized number.
btst IMM (FLT_MANT_DIG-1),d0
beq 1f
jmp a0@
1: movel IMM (0),d2
jmp a0@
Lround$to$zero:
Lround$to$plus:
Lround$to$minus:
jmp a0@
#endif /* L_float */
| gcc expects the routines __eqdf2, __nedf2, __gtdf2, __gedf2,
| __ledf2, __ltdf2 to all return the same value as a direct call to
| __cmpdf2 would. In this implementation, each of these routines
| simply calls __cmpdf2. It would be more efficient to give the
| __cmpdf2 routine several names, but separating them out will make it
| easier to write efficient versions of these routines someday.
| If the operands recompare unordered unordered __gtdf2 and __gedf2 return -1.
| The other routines return 1.
#ifdef L_eqdf2
.text
FUNC(__eqdf2)
.globl SYM (__eqdf2)
SYM (__eqdf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_eqdf2 */
#ifdef L_nedf2
.text
FUNC(__nedf2)
.globl SYM (__nedf2)
SYM (__nedf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_nedf2 */
#ifdef L_gtdf2
.text
FUNC(__gtdf2)
.globl SYM (__gtdf2)
SYM (__gtdf2):
link a6,IMM (0)
pea -1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_gtdf2 */
#ifdef L_gedf2
.text
FUNC(__gedf2)
.globl SYM (__gedf2)
SYM (__gedf2):
link a6,IMM (0)
pea -1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_gedf2 */
#ifdef L_ltdf2
.text
FUNC(__ltdf2)
.globl SYM (__ltdf2)
SYM (__ltdf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_ltdf2 */
#ifdef L_ledf2
.text
FUNC(__ledf2)
.globl SYM (__ledf2)
SYM (__ledf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_ledf2 */
| The comments above about __eqdf2, et. al., also apply to __eqsf2,
| et. al., except that the latter call __cmpsf2 rather than __cmpdf2.
#ifdef L_eqsf2
.text
FUNC(__eqsf2)
.globl SYM (__eqsf2)
SYM (__eqsf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_eqsf2 */
#ifdef L_nesf2
.text
FUNC(__nesf2)
.globl SYM (__nesf2)
SYM (__nesf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_nesf2 */
#ifdef L_gtsf2
.text
FUNC(__gtsf2)
.globl SYM (__gtsf2)
SYM (__gtsf2):
link a6,IMM (0)
pea -1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_gtsf2 */
#ifdef L_gesf2
.text
FUNC(__gesf2)
.globl SYM (__gesf2)
SYM (__gesf2):
link a6,IMM (0)
pea -1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_gesf2 */
#ifdef L_ltsf2
.text
FUNC(__ltsf2)
.globl SYM (__ltsf2)
SYM (__ltsf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_ltsf2 */
#ifdef L_lesf2
.text
FUNC(__lesf2)
.globl SYM (__lesf2)
SYM (__lesf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_lesf2 */
#if defined (__ELF__) && defined (__linux__)
/* Make stack non-executable for ELF linux targets. */
.section .note.GNU-stack,"",@progbits
#endif
|
4ms/metamodule-plugin-sdk
| 1,802
|
plugin-libc/libgcc/config/rl78/subdi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___subdi3
movw hl, sp ; use HL-based addressing (allows for direct subw)
movw ax, [hl+4]
subw ax, [hl+12]
movw r8, ax
mov a, [hl+6] ; middle bytes of the result are determined using 8-bit
subc a, [hl+14] ; SUBC insns which both account for and update the carry bit
mov r10, a ; (no SUBWC instruction is available)
mov a, [hl+7]
subc a, [hl+15]
mov r11, a
mov a, [hl+8]
subc a, [hl+16]
mov r12, a
mov a, [hl+9]
subc a, [hl+17]
mov r13, a
movw ax, [hl+10]
sknc ; account for the possible carry from the
decw ax ; latest 8-bit operation
subw ax, [hl+18]
movw r14, ax
ret
END_FUNC ___subdi3
|
4ms/metamodule-plugin-sdk
| 2,044
|
plugin-libc/libgcc/config/rl78/smindi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___smindi3
; copy first argument/operand to the output registers
movw ax, [sp+4]
movw r8, ax
movw ax, [sp+6]
movw r10, ax
movw ax, [sp+8]
movw r12, ax
movw ax, [sp+10]
movw r14, ax
; use 16-bit compares from the most significant words downto the least significant ones
movw ax, [sp+18]
cmpw ax, r14
xor1 CY, a.7 ; first compare accounts for the
xor1 CY, r15.7 ; sign bits of the two operands
bc $.L1
bnz $.L2
movw ax, [sp+16]
cmpw ax, r12
bc $.L1
bnz $.L2
movw ax, [sp+14]
cmpw ax, r10
bc $.L1
bnz $.L2
movw ax, [sp+12]
cmpw ax, r8
bc $.L1
ret
.L1:
; copy second argument/operand to the output registers
movw ax, [sp+12]
movw r8, ax
movw ax, [sp+14]
movw r10, ax
movw ax, [sp+16]
movw r12, ax
movw ax, [sp+18]
movw r14, ax
.L2:
ret
END_FUNC ___smindi3
|
4ms/metamodule-plugin-sdk
| 2,246
|
plugin-libc/libgcc/config/rl78/lshrsi3.S
|
; Copyright (C) 2011-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
START_FUNC ___lshrsi3
;; input:
;;
;; [zero]
;; [count] <= $sp+8
;; [in MSB]
;; [in]
;; [in]
;; [in LSB] <- $sp+4
;; output:
;;
;; [r8..r11] result
;; registers:
;;
;; AX - temp for shift/rotate
;; B - count
mov a, [sp+8] ; A now contains the count
cmp a, #0x20
bc $.Lcount_is_normal
;; count is out of bounds, just return zero.
movw r8, #0
movw r10, #0
ret
.Lcount_is_normal:
cmp0 a
bnz $.Lcount_is_nonzero
;; count is zero, just copy IN to OUT
movw ax, [sp+4]
movw r8, ax
movw ax, [sp+6]
movw r10, ax
ret
.Lcount_is_nonzero:
mov b, a ; B now contains the count also
bf a.4, $.Lcount_lt_16
;; count >= 16, shift 16 at a time.
movw r10, #0
movw ax, [sp+6]
movw r8, ax
mov a, b
and a, #0x0f
sknz
ret
mov b, a ; B now contains the remaining count
inc b
br $.Lloop_top
.Lcount_lt_16:
;; count is nonzero. Do one
movw ax, [sp+6]
shrw ax,1
movw r10, ax
mov a, [sp+5]
rorc a,1
mov r9, a
mov a, [sp+4]
rorc a,1
mov r8, a
;; we did one shift above; do as many more as we need now.
.Lloop_top:
dec b
sknz
ret
movw ax, r10
shrw ax,1
movw r10, ax
mov a, r9
rorc a,1
mov r9, a
mov a, r8
rorc a,1
mov r8, a
br $.Lloop_top
END_FUNC ___lshrsi3
|
4ms/metamodule-plugin-sdk
| 1,937
|
plugin-libc/libgcc/config/rl78/umindi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___umindi3
; copy first argument/operand to the output registers
movw ax, [sp+4]
movw r8, ax
movw ax, [sp+6]
movw r10, ax
movw ax, [sp+8]
movw r12, ax
movw ax, [sp+10]
movw r14, ax
; use 16-bit compares from the most significant words downto the least significant ones
movw ax, [sp+18]
cmpw ax, r14
bc $.L1
bnz $.L2
movw ax, [sp+16]
cmpw ax, r12
bc $.L1
bnz $.L2
movw ax, [sp+14]
cmpw ax, r10
bc $.L1
bnz $.L2
movw ax, [sp+12]
cmpw ax, r8
bc $.L1
ret
.L1:
; copy second argument/operand to the output registers
movw ax, [sp+12]
movw r8, ax
movw ax, [sp+14]
movw r10, ax
movw ax, [sp+16]
movw r12, ax
movw ax, [sp+18]
movw r14, ax
.L2:
ret
END_FUNC ___umindi3
|
4ms/metamodule-plugin-sdk
| 1,583
|
plugin-libc/libgcc/config/rl78/anddi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___anddi3
movw hl, sp
mov a, [hl+4]
and a, [hl+12]
mov r8, a
mov a, [hl+5]
and a, [hl+13]
mov r9, a
mov a, [hl+6]
and a, [hl+14]
mov r10, a
mov a, [hl+7]
and a, [hl+15]
mov r11, a
mov a, [hl+8]
and a, [hl+16]
mov r12, a
mov a, [hl+9]
and a, [hl+17]
mov r13, a
mov a, [hl+10]
and a, [hl+18]
mov r14, a
mov a, [hl+11]
and a, [hl+19]
mov r15, a
ret
END_FUNC ___anddi3
|
4ms/metamodule-plugin-sdk
| 5,177
|
plugin-libc/libgcc/config/rl78/divmodqi.S
|
/* QImode div/mod functions for the GCC support library for the Renesas RL78 processors.
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Red Hat.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "vregs.h"
.macro MAKE_GENERIC which,need_result
.if \need_result
quot = r8
num = r10
den = r12
bit = r14
.else
num = r8
quot = r10
den = r12
bit = r14
.endif
#define bit b
#define den c
#define bitden bc
START_FUNC __generic_qidivmod\which
num_lt_den\which:
.if \need_result
mov r8, #0
.else
mov a, [hl+4]
mov r8, a
.endif
ret
num_eq_den\which:
.if \need_result
mov r8, #1
.else
mov r8, #0
.endif
ret
den_is_zero\which:
mov r8, #0x00
ret
;; These routines leave DE alone - the signed functions use DE
;; to store sign information that must remain intact
.if \need_result
.global __generic_qidiv
__generic_qidiv:
.else
.global __generic_qimod
__generic_qimod:
.endif
;; (quot,rem) = 4[hl] /% 6[hl]
mov a, [hl+4] ; num
cmp a, [hl+6] ; den
bz $num_eq_den\which
bnh $num_lt_den\which
;; copy numerator
; mov a, [hl+4] ; already there from above
mov num, a
;; copy denomonator
mov a, [hl+6]
mov den, a
cmp0 den
bz $den_is_zero\which
den_not_zero\which:
.if \need_result
;; zero out quot
mov quot, #0
.endif
;; initialize bit to 1
mov bit, #1
; while (den < num && !(den & (1L << BITS_MINUS_1)))
shift_den_bit\which:
.macro SDB_ONE\which
mov a, den
mov1 cy,a.7
bc $enter_main_loop\which
cmp a, num
bh $enter_main_loop\which
;; den <<= 1
; mov a, den ; already has it from the cmpw above
shl a, 1
mov den, a
;; bit <<= 1
shl bit, 1
.endm
SDB_ONE\which
SDB_ONE\which
br $shift_den_bit\which
main_loop\which:
;; if (num >= den) (cmp den > num)
mov a, den
cmp a, num
bh $next_loop\which
;; num -= den
mov a, num
sub a, den
mov num, a
.if \need_result
;; res |= bit
mov a, quot
or a, bit
mov quot, a
.endif
next_loop\which:
;; den, bit >>= 1
movw ax, bitden
shrw ax, 1
movw bitden, ax
enter_main_loop\which:
cmp0 bit
bnz $main_loop\which
main_loop_done\which:
ret
END_FUNC __generic_qidivmod\which
.endm
;----------------------------------------------------------------------
MAKE_GENERIC _d 1
MAKE_GENERIC _m 0
;----------------------------------------------------------------------
START_FUNC ___udivqi3
;; r8 = 4[sp] / 6[sp]
movw hl, sp
br $!__generic_qidiv
END_FUNC ___udivqi3
START_FUNC ___umodqi3
;; r8 = 4[sp] % 6[sp]
movw hl, sp
br $!__generic_qimod
END_FUNC ___umodqi3
;----------------------------------------------------------------------
.macro NEG_AX
movw hl, ax
mov a, #0
sub a, [hl]
mov [hl], a
.endm
;----------------------------------------------------------------------
START_FUNC ___divqi3
;; r8 = 4[sp] / 6[sp]
movw hl, sp
movw de, #0
mov a, [sp+4]
mov1 cy, a.7
bc $div_signed_num
mov a, [sp+6]
mov1 cy, a.7
bc $div_signed_den
br $!__generic_qidiv
div_signed_num:
;; neg [sp+4]
mov a, #0
sub a, [hl+4]
mov [hl+4], a
mov d, #1
mov a, [sp+6]
mov1 cy, a.6
bnc $div_unsigned_den
div_signed_den:
;; neg [sp+6]
mov a, #0
sub a, [hl+6]
mov [hl+6], a
mov e, #1
div_unsigned_den:
call $!__generic_qidiv
mov a, d
cmp0 a
bz $div_skip_restore_num
;; We have to restore the numerator [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov a, d
div_skip_restore_num:
xor a, e
bz $div_no_neg
movw ax, #r8
NEG_AX
div_no_neg:
mov a, e
cmp0 a
bz $div_skip_restore_den
movw ax, sp
addw ax, #6
NEG_AX
div_skip_restore_den:
ret
END_FUNC ___divqi3
START_FUNC ___modqi3
;; r8 = 4[sp] % 6[sp]
movw hl, sp
movw de, #0
mov a, [hl+4]
mov1 cy, a.7
bc $mod_signed_num
mov a, [hl+6]
mov1 cy, a.7
bc $mod_signed_den
br $!__generic_qimod
mod_signed_num:
;; neg [sp+4]
mov a, #0
sub a, [hl+4]
mov [hl+4], a
mov d, #1
mov a, [hl+6]
mov1 cy, a.7
bnc $mod_unsigned_den
mod_signed_den:
;; neg [sp+6]
mov a, #0
sub a, [hl+6]
mov [hl+6], a
mov e, #1
mod_unsigned_den:
call $!__generic_qimod
mov a, d
cmp0 a
bz $mod_no_neg
mov a, #0
sub a, r8
mov r8, a
;; Also restore numerator
movw ax, sp
addw ax, #4
NEG_AX
mod_no_neg:
mov a, e
cmp0 a
bz $mod_skip_restore_den
movw ax, sp
addw ax, #6
NEG_AX
mod_skip_restore_den:
ret
END_FUNC ___modqi3
|
4ms/metamodule-plugin-sdk
| 4,225
|
plugin-libc/libgcc/config/rl78/cmpsi2.S
|
; Copyright (C) 2011-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
;; int __cmpsi2 (signed long A, signed long B)
;;
;; Performs a signed comparison of A and B.
;; If A is less than B it returns 0. If A is greater
;; than B it returns 2. If they are equal it returns 1.
START_FUNC ___cmpsi2
;; A is at [sp+4]
;; B is at [sp+8]
;; Result put in R8
;; Initialise default return value.
onew bc
;; Compare the high words.
movw ax, [sp + 10]
movw de, ax
movw ax, [sp + 6]
cmpw ax, de
skz
br !!.Lconvert_to_signed
.Lcompare_bottom_words:
;; The top words are equal - compare the bottom words.
;; Note - code from __ucmpsi2 branches into here.
movw ax, [sp + 8]
movw de, ax
movw ax, [sp + 4]
cmpw ax, de
sknz
br !!.Lless_than_or_greater_than
;; The words are equal - return 1.
;; Note - we could branch to the return code at the end of the
;; function but a branch instruction takes 4 bytes, and the
;; return sequence itself is only 4 bytes long...
movw ax, bc
movw r8, ax
ret
.Lconvert_to_signed:
;; The top words are different. Unfortunately the comparison
;; is always unsigned, so to get a signed result we XOR the CY
;; flag with the top bits of AX and DE.
xor1 cy, a.7
mov a, d
xor1 cy, a.7
;; Fall through.
.Lless_than_or_greater_than:
;; We now have a signed less than/greater than result in CY.
;; Return 0 for less than, 2 for greater than.
;; Note - code from __ucmpsi2 branches into here.
incw bc
sknc
clrw bc
;; Get the result value, currently in BC, into r8
movw ax, bc
movw r8, ax
ret
END_FUNC ___cmpsi2
;; ------------------------------------------------------
;; int __ucmpsi2 (unsigned long A, unsigned long B)
;;
;; Performs an unsigned comparison of A and B.
;; If A is less than B it returns 0. If A is greater
;; than B it returns 2. If they are equal it returns 1.
START_FUNC ___ucmpsi2
;; A is at [sp+4]
;; B is at [sp+8]
;; Result put in R8..R9
;; Initialise default return value.
onew bc
;; Compare the high words.
movw ax, [sp + 10]
movw de, ax
movw ax, [sp + 6]
cmpw ax, de
skz
;; Note: These branches go into the __cmpsi2 code!
br !!.Lless_than_or_greater_than
br !!.Lcompare_bottom_words
END_FUNC ___ucmpsi2
;; ------------------------------------------------------
;; signed int __gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
;; Result is negative if S1 is less than S2,
;; positive if S1 is greater, 0 if S1 and S2 are equal.
START_FUNC __gcc_bcmp
;; S1 is at [sp+4]
;; S2 is at [sp+6]
;; SIZE is at [sp+8]
;; Result in r8/r9
movw r10, #0
1:
;; Compare R10 against the SIZE parameter
movw ax, [sp+8]
subw ax, r10
sknz
br !!1f
;; Load S2[r10] into R8
movw ax, [sp+6]
addw ax, r10
movw hl, ax
mov a, [hl]
mov r8, a
;; Load S1[r10] into A
movw ax, [sp+4]
addw ax, r10
movw hl, ax
mov a, [hl]
;; Increment offset
incw r10
;; Compare loaded bytes
cmp a, r8
sknz
br !!1b
;; They differ. Subtract *S2 from *S1 and return as the result.
mov x, a
mov a, #0
mov r9, #0
subw ax, r8
1:
movw r8, ax
ret
END_FUNC __gcc_bcmp
|
4ms/metamodule-plugin-sdk
| 2,044
|
plugin-libc/libgcc/config/rl78/smaxdi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___smaxdi3
; copy first argument/operand to the output registers
movw ax, [sp+4]
movw r8, ax
movw ax, [sp+6]
movw r10, ax
movw ax, [sp+8]
movw r12, ax
movw ax, [sp+10]
movw r14, ax
; use 16-bit compares from the most significant words downto the least significant ones
movw ax, [sp+18]
cmpw ax, r14
xor1 CY, a.7 ; first compare accounts for the
xor1 CY, r15.7 ; sign bits of the two operands
bh $.L1
bnz $.L2
movw ax, [sp+16]
cmpw ax, r12
bh $.L1
bnz $.L2
movw ax, [sp+14]
cmpw ax, r10
bh $.L1
bnz $.L2
movw ax, [sp+12]
cmpw ax, r8
bh $.L1
ret
.L1:
; copy second argument/operand to the output registers
movw ax, [sp+12]
movw r8, ax
movw ax, [sp+14]
movw r10, ax
movw ax, [sp+16]
movw r12, ax
movw ax, [sp+18]
movw r14, ax
.L2:
ret
END_FUNC ___smaxdi3
|
4ms/metamodule-plugin-sdk
| 15,937
|
plugin-libc/libgcc/config/rl78/fpmath-sf.S
|
; SF format is:
;
; [sign] 1.[23bits] E[8bits(n-127)]
;
; SEEEEEEE Emmmmmmm mmmmmmmm mmmmmmmm
;
; [A+0] mmmmmmmm
; [A+1] mmmmmmmm
; [A+2] Emmmmmmm
; [A+3] SEEEEEEE
;
; Special values (xxx != 0):
;
; r11 r10 r9 r8
; [HL+3] [HL+2] [HL+1] [HL+0]
; s1111111 10000000 00000000 00000000 infinity
; s1111111 1xxxxxxx xxxxxxxx xxxxxxxx NaN
; s0000000 00000000 00000000 00000000 zero
; s0000000 0xxxxxxx xxxxxxxx xxxxxxxx denormals
;
; Note that CMPtype is "signed char" for rl78
;
#include "vregs.h"
#define Z PSW.6
; External Functions:
;
; __int_isnan [HL] -> Z if NaN
; __int_iszero [HL] -> Z if zero
START_FUNC __int_isinf
;; [HL] points to value, returns Z if it's #Inf
mov a, [hl+2]
and a, #0x80
mov x, a
mov a, [hl+3]
and a, #0x7f
cmpw ax, #0x7f80
skz
ret ; return NZ if not NaN
mov a, [hl+2]
and a, #0x7f
or a, [hl+1]
or a, [hl]
ret
END_FUNC __int_isinf
#define A_SIGN [hl+0] /* byte */
#define A_EXP [hl+2] /* word */
#define A_FRAC_L [hl+4] /* word */
#define A_FRAC_LH [hl+5] /* byte */
#define A_FRAC_H [hl+6] /* word or byte */
#define A_FRAC_HH [hl+7] /* byte */
#define B_SIGN [hl+8]
#define B_EXP [hl+10]
#define B_FRAC_L [hl+12]
#define B_FRAC_LH [hl+13]
#define B_FRAC_H [hl+14]
#define B_FRAC_HH [hl+15]
START_FUNC _int_unpack_sf
;; convert 32-bit SFmode [DE] to 6-byte struct [HL] ("A")
mov a, [de+3]
sar a, 7
mov A_SIGN, a
movw ax, [de+2]
and a, #0x7f
shrw ax, 7
movw bc, ax ; remember if the exponent is all zeros
subw ax, #127 ; exponent is now non-biased
movw A_EXP, ax
movw ax, [de]
movw A_FRAC_L, ax
mov a, [de+2]
and a, #0x7f
cmp0 c ; if the exp is all zeros, it's denormal
skz
or a, #0x80
mov A_FRAC_H, a
mov a, #0
mov A_FRAC_HH, a
;; rounding-bit-shift
movw ax, A_FRAC_L
shlw ax, 1
movw A_FRAC_L, ax
mov a, A_FRAC_H
rolc a, 1
mov A_FRAC_H, a
mov a, A_FRAC_HH
rolc a, 1
mov A_FRAC_HH, a
ret
END_FUNC _int_unpack_sf
; func(SF a,SF b)
; [SP+4..7] a
; [SP+8..11] b
START_FUNC ___subsf3
;; a - b => a + (-b)
;; Note - we cannot just change the sign of B on the stack and
;; then fall through into __addsf3. The stack'ed value may be
;; used again (it was created by our caller after all). Instead
;; we have to allocate some stack space of our own, copy A and B,
;; change the sign of B, call __addsf3, release the allocated stack
;; and then return.
subw sp, #8
movw ax, [sp+4+8]
movw [sp], ax
movw ax, [sp+4+2+8]
movw [sp+2], ax
movw ax, [sp+4+4+8]
movw [sp+4], ax
mov a, [sp+4+6+8]
mov [sp+6], a
mov a, [sp+4+7+8]
xor a, #0x80
mov [sp+7], a
call $!___addsf3
addw sp, #8
ret
END_FUNC ___subsf3
START_FUNC ___addsf3
;; if (isnan(a)) return a
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_isnan
bnz $1f
ret_a:
movw ax, [sp+4]
movw r8, ax
movw ax, [sp+6]
movw r10, ax
ret
1: ;; if (isnan (b)) return b;
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_isnan
bnz $2f
ret_b:
movw ax, [sp+8]
movw r8, ax
movw ax, [sp+10]
movw r10, ax
ret
2: ;; if (isinf (a))
movw ax, sp
addw ax, #4
movw hl, ax
call $!__int_isinf
bnz $3f
;; if (isinf (b) && a->sign != b->sign) return NaN
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_isinf
bnz $ret_a
mov a, [sp+7]
mov h, a
mov a, [sp+11]
xor a, h
bf a.7, $ret_a
movw r8, #0x0001
movw r10, #0x7f80
ret
3: ;; if (isinf (b)) return b;
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_isinf
bz $ret_b
;; if (iszero (b))
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_iszero
bnz $4f
;; if (iszero (a))
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_iszero
bnz $ret_a
movw ax, [sp+4]
movw r8, ax
mov a, [sp+7]
mov h, a
movw ax, [sp+10]
and a, h
movw r10, ax
ret
4: ;; if (iszero (a)) return b;
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_iszero
bz $ret_b
; Normalize the two numbers relative to each other. At this point,
; we need the numbers converted to their "unpacked" format.
subw sp, #16 ; Save room for two unpacked values.
movw ax, sp
movw hl, ax
addw ax, #16+4
movw de, ax
call $!_int_unpack_sf
movw ax, sp
addw ax, #8
movw hl, ax
addw ax, #16+8-8
movw de, ax
call $!_int_unpack_sf
movw ax, sp
movw hl, ax
;; diff = a.exponent - b.exponent
movw ax, B_EXP ; sign/exponent word
movw bc, ax
movw ax, A_EXP ; sign/exponent word
subw ax, bc ; a = a.exp - b.exp
movw de, ax ; d = sdiff
;; if (diff < 0) diff = -diff
bf a.7, $1f
xor a, #0xff
xor r_0, #0xff ; x
incw ax ; a = diff
1:
;; if (diff >= 23) zero the smaller one
cmpw ax, #24
bc $.L661 ; if a < 23 goto 661
;; zero out the smaller one
movw ax, de
bt a.7, $1f ; if sdiff < 0 (a_exp < b_exp) goto 1f
;; "zero out" b
movw ax, A_EXP
movw B_EXP, ax
movw ax, #0
movw B_FRAC_L, ax
movw B_FRAC_H, ax
br $5f
1:
;; "zero out" a
movw ax, B_EXP
movw A_EXP, ax
movw ax, #0
movw A_FRAC_L, ax
movw A_FRAC_H, ax
br $5f
.L661:
;; shift the smaller one so they have the same exponents
1:
movw ax, de
bt a.7, $1f
cmpw ax, #0 ; sdiff > 0
bnh $1f ; if (sdiff <= 0) goto 1f
decw de
incw B_EXP ; because it's [HL+byte]
movw ax, B_FRAC_H
shrw ax, 1
movw B_FRAC_H, ax
mov a, B_FRAC_LH
rorc a, 1
mov B_FRAC_LH, a
mov a, B_FRAC_L
rorc a, 1
mov B_FRAC_L, a
br $1b
1:
movw ax, de
bf a.7, $1f
incw de
incw A_EXP ; because it's [HL+byte]
movw ax, A_FRAC_H
shrw ax, 1
movw A_FRAC_H, ax
mov a, A_FRAC_LH
rorc a, 1
mov A_FRAC_LH, a
mov a, A_FRAC_L
rorc a, 1
mov A_FRAC_L, a
br $1b
1:
5: ;; At this point, A and B have the same exponent.
mov a, A_SIGN
cmp a, B_SIGN
bnz $1f
;; Same sign, just add.
movw ax, A_FRAC_L
addw ax, B_FRAC_L
movw A_FRAC_L, ax
mov a, A_FRAC_H
addc a, B_FRAC_H
mov A_FRAC_H, a
mov a, A_FRAC_HH
addc a, B_FRAC_HH
mov A_FRAC_HH, a
br $.L728
1: ;; Signs differ - A has A_SIGN still.
bf a.7, $.L696
;; A is negative, do B-A
movw ax, B_FRAC_L
subw ax, A_FRAC_L
movw A_FRAC_L, ax
mov a, B_FRAC_H
subc a, A_FRAC_H
mov A_FRAC_H, a
mov a, B_FRAC_HH
subc a, A_FRAC_HH
mov A_FRAC_HH, a
br $.L698
.L696:
;; B is negative, do A-B
movw ax, A_FRAC_L
subw ax, B_FRAC_L
movw A_FRAC_L, ax
mov a, A_FRAC_H
subc a, B_FRAC_H
mov A_FRAC_H, a
mov a, A_FRAC_HH
subc a, B_FRAC_HH
mov A_FRAC_HH, a
.L698:
;; A is still A_FRAC_HH
bt a.7, $.L706
;; subtraction was positive
mov a, #0
mov A_SIGN, a
br $.L712
.L706:
;; subtraction was negative
mov a, #0xff
mov A_SIGN, a
;; This negates A_FRAC
mov a, A_FRAC_L
xor a, #0xff ; XOR doesn't mess with carry
add a, #1 ; INC doesn't set the carry
mov A_FRAC_L, a
mov a, A_FRAC_LH
xor a, #0xff
addc a, #0
mov A_FRAC_LH, a
mov a, A_FRAC_H
xor a, #0xff
addc a, #0
mov A_FRAC_H, a
mov a, A_FRAC_HH
xor a, #0xff
addc a, #0
mov A_FRAC_HH, a
.L712:
;; Renormalize the subtraction
mov a, A_FRAC_L
or a, A_FRAC_LH
or a, A_FRAC_H
or a, A_FRAC_HH
bz $.L728
;; Mantissa is not zero, left shift until the MSB is in the
;; right place
1:
movw ax, A_FRAC_H
cmpw ax, #0x0200
bnc $.L728
decw A_EXP
movw ax, A_FRAC_L
shlw ax, 1
movw A_FRAC_L, ax
movw ax, A_FRAC_H
rolwc ax, 1
movw A_FRAC_H, ax
br $1b
.L728:
;; normalize A and pack it
movw ax, A_FRAC_H
cmpw ax, #0x01ff
bnh $1f
;; overflow in the mantissa; adjust
movw ax, A_FRAC_H
shrw ax, 1
movw A_FRAC_H, ax
mov a, A_FRAC_LH
rorc a, 1
mov A_FRAC_LH, a
mov a, A_FRAC_L
rorc a, 1
mov A_FRAC_L, a
incw A_EXP
1:
call $!__rl78_int_pack_a_r8
addw sp, #16
ret
END_FUNC ___addsf3
START_FUNC __rl78_int_pack_a_r8
;; pack A to R8
movw ax, A_EXP
addw ax, #126 ; not 127, we want the "bt/bf" test to check for denormals
bf a.7, $1f
;; make a denormal
2:
movw bc, ax
movw ax, A_FRAC_H
shrw ax, 1
movw A_FRAC_H, ax
mov a, A_FRAC_LH
rorc a, 1
mov A_FRAC_LH, a
mov a, A_FRAC_L
rorc a, 1
mov A_FRAC_L, a
movw ax, bc
incw ax
bt a.7, $2b
decw ax
1:
incw ax ; now it's as if we added 127
movw A_EXP, ax
cmpw ax, #0xfe
bnh $1f
;; store #Inf instead
mov a, A_SIGN
or a, #0x7f
mov x, #0x80
movw r10, ax
movw r8, #0
ret
1:
bf a.7, $1f ; note AX has EXP at top of loop
;; underflow, denormal?
movw ax, A_FRAC_H
shrw ax, 1
movw A_FRAC_H, ax
mov a, A_FRAC_LH
rorc a, 1
movw A_FRAC_LH, ax
mov a, A_FRAC_L
rorc a, 1
movw A_FRAC_L, ax
incw A_EXP
movw ax, A_EXP
br $1b
1:
;; undo the rounding-bit-shift
mov a, A_FRAC_L
bf a.0, $1f
;; round up
movw ax, A_FRAC_L
addw ax, #1
movw A_FRAC_L, ax
bnc $1f
incw A_FRAC_H
;; If the rounding set the bit beyond the end of the fraction, increment the exponent.
mov a, A_FRAC_HH
bf a.1, $1f
incw A_EXP
1:
movw ax, A_FRAC_H
shrw ax, 1
movw A_FRAC_H, ax
mov a, A_FRAC_LH
rorc a, 1
mov A_FRAC_LH, a
mov a, A_FRAC_L
rorc a, 1
mov A_FRAC_L, a
movw ax, A_FRAC_L
movw r8, ax
or a, x
or a, A_FRAC_H
or a, A_FRAC_HH
bnz $1f
movw ax, #0
movw A_EXP, ax
1:
mov a, A_FRAC_H
and a, #0x7f
mov b, a
mov a, A_EXP
shl a, 7
or a, b
mov r10, a
mov a, A_SIGN
and a, #0x80
mov b, a
mov a, A_EXP
shr a, 1
or a, b
mov r11, a
ret
END_FUNC __rl78_int_pack_a_r8
START_FUNC ___mulsf3
;; if (isnan(a)) return a
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_isnan
bnz $1f
mret_a:
movw ax, [sp+4]
movw r8, ax
mov a, [sp+11]
and a, #0x80
mov b, a
movw ax, [sp+6]
xor a, b ; sign is always a ^ b
movw r10, ax
ret
1:
;; if (isnan (b)) return b;
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_isnan
bnz $1f
mret_b:
movw ax, [sp+8]
movw r8, ax
mov a, [sp+7]
and a, #0x80
mov b, a
movw ax, [sp+10]
xor a, b ; sign is always a ^ b
movw r10, ax
ret
1:
;; if (isinf (a)) return (b==0) ? nan : a
movw ax, sp
addw ax, #4
movw hl, ax
call $!__int_isinf
bnz $.L805
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_iszero
bnz $mret_a
movw r8, #0x0001 ; return NaN
movw r10, #0x7f80
ret
.L805:
;; if (isinf (b)) return (a==0) ? nan : b
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_isinf
bnz $.L814
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_iszero
bnz $mret_b
movw r8, #0x0001 ; return NaN
movw r10, #0x7f80
ret
.L814:
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_iszero
bz $mret_a
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_iszero
bz $mret_b
;; at this point, we're doing the multiplication.
subw sp, #16 ; save room for two unpacked values
movw ax, sp
movw hl, ax
addw ax, #16+4
movw de, ax
call $!_int_unpack_sf
movw ax, sp
addw ax, #8
movw hl, ax
addw ax, #16+8-8
movw de, ax
call $!_int_unpack_sf
movw ax, sp
movw hl, ax
;; multiply SI a.FRAC * SI b.FRAC to DI r8
subw sp, #16
movw ax, A_FRAC_L
movw [sp+0], ax
movw ax, A_FRAC_H
movw [sp+2], ax
movw ax, B_FRAC_L
movw [sp+8], ax
movw ax, B_FRAC_H
movw [sp+10], ax
movw ax, #0
movw [sp+4], ax
movw [sp+6], ax
movw [sp+12], ax
movw [sp+14], ax
call !!___muldi3 ; MTMPa * MTMPb -> R8..R15
addw sp, #16
movw ax, sp
movw hl, ax
;; add the exponents together
movw ax, A_EXP
addw ax, B_EXP
movw bc, ax ; exponent in BC
;; now, re-normalize the DI value in R8..R15 to have the
;; MSB in the "right" place, adjusting BC as we shift it.
;; The value will normally be in this range:
;; R15 R8
;; 0001_0000_0000_0000
;; 0003_ffff_fc00_0001
;; so to speed it up, we normalize to:
;; 0001_xxxx_xxxx_xxxx
;; then extract the bytes we want (r11-r14)
1:
mov a, r15
cmp0 a
bnz $2f
mov a, r14
and a, #0xfe
bz $1f
2:
;; shift right, inc exponent
movw ax, r14
shrw ax, 1
movw r14, ax
mov a, r13
rorc a, 1
mov r13, a
mov a, r12
rorc a, 1
mov r12, a
mov a, r11
rorc a, 1
mov r11, a
;; we don't care about r8/r9/r10 if we're shifting this way
incw bc
br $1b
1:
mov a, r15
or a, r14
bnz $1f
;; shift left, dec exponent
movw ax, r8
shlw ax, 1
movw r8, ax
movw ax, r10
rolwc ax, 1
movw r10, ax
movw ax, r12
rolwc ax, 1
movw r12, ax
movw ax, r14
rolwc ax, 1
movw r14, ax
decw bc
br $1b
1:
;; at this point, FRAC is in R11..R14 and EXP is in BC
movw ax, bc
movw A_EXP, ax
mov a, r11
mov A_FRAC_L, a
mov a, r12
mov A_FRAC_LH, a
mov a, r13
mov A_FRAC_H, a
mov a, r14
mov A_FRAC_HH, a
mov a, A_SIGN
xor a, B_SIGN
mov A_SIGN, a
call $!__rl78_int_pack_a_r8
addw sp, #16
ret
END_FUNC ___mulsf3
START_FUNC ___divsf3
;; if (isnan(a)) return a
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_isnan
bnz $1f
dret_a:
movw ax, [sp+4]
movw r8, ax
mov a, [sp+11]
and a, #0x80
mov b, a
movw ax, [sp+6]
xor a, b ; sign is always a ^ b
movw r10, ax
ret
1:
;; if (isnan (b)) return b;
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_isnan
bnz $1f
dret_b:
movw ax, [sp+8]
movw r8, ax
mov a, [sp+7]
and a, #0x80
mov b, a
movw ax, [sp+10]
xor a, b ; sign is always a ^ b
movw r10, ax
ret
1:
;; if (isinf (a)) return isinf(b) ? nan : a
movw ax, sp
addw ax, #4
movw hl, ax
call $!__int_isinf
bnz $1f
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_isinf
bnz $dret_a
dret_nan:
movw r8, #0x0001 ; return NaN
movw r10, #0x7f80
ret
1:
;; if (iszero (a)) return iszero(b) ? nan : a
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_iszero
bnz $1f
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_iszero
bnz $dret_a
br $dret_nan
1:
;; if (isinf (b)) return 0
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_isinf
bnz $1f
mov a, [sp+7]
mov b, a
mov a, [sp+11]
xor a, b
and a, #0x80
mov r11, a
movw r8, #0
mov r10, #0
ret
1:
;; if (iszero (b)) return Inf
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_iszero
bnz $1f
mov a, [sp+7]
mov b, a
mov a, [sp+11]
xor a, b
or a, #0x7f
mov r11, a
movw r8, #0
mov r10, #0x80
ret
1:
;; at this point, we're doing the division. Normalized
;; mantissas look like:
;; 01.xx.xx.xx
;; so we divide:
;; 01.xx.xx.xx.00.00.00.00
;; by 01.xx.xx.xx
;; to get approx 00.80.00.00.00 to 01.ff.ff.ff.00
subw sp, #16 ; save room for two unpacked values
movw ax, sp
movw hl, ax
addw ax, #16+4
movw de, ax
call $!_int_unpack_sf
movw ax, sp
addw ax, #8
movw hl, ax
addw ax, #16+8-8
movw de, ax
call $!_int_unpack_sf
movw ax, sp
movw hl, ax
;; divide DI a.FRAC / SI b.FRAC to DI r8
subw sp, #16
movw ax, A_FRAC_L
movw [sp+4], ax
movw ax, A_FRAC_H
movw [sp+6], ax
movw ax, B_FRAC_L
movw [sp+8], ax
movw ax, B_FRAC_H
movw [sp+10], ax
movw ax, #0
movw [sp+0], ax
movw [sp+2], ax
movw [sp+12], ax
movw [sp+14], ax
call !!___divdi3 ; MTMPa / MTMPb -> R8..R15
addw sp, #16
movw ax, sp
movw hl, ax
;; subtract the exponents A - B
movw ax, A_EXP
subw ax, B_EXP
movw bc, ax ; exponent in BC
;; now, re-normalize the DI value in R8..R15 to have the
;; MSB in the "right" place, adjusting BC as we shift it.
;; The value will normally be in this range:
;; R15 R8
;; 0000_0000_8000_0000
;; 0000_0001_ffff_ff00
;; so to speed it up, we normalize to:
;; 0000_0001_xxxx_xxxx
;; then extract the bytes we want (r9-r12)
1:
movw ax, r14
cmpw ax, #0
bnz $2f
movw ax, r12
cmpw ax, #1
bnh $1f
2:
;; shift right, inc exponent
movw ax, r14
shrw ax, 1
movw r14, ax
mov a, r13
rorc a, 1
mov r13, a
mov a, r12
rorc a, 1
mov r12, a
mov a, r11
rorc a, 1
mov r11, a
mov a, r10
rorc a, 1
mov r10, a
mov a, r9
rorc a, 1
mov r9, a
mov a, r8
rorc a, 1
mov r8, a
incw bc
br $1b
1:
;; the previous loop leaves r15.r13 zero
mov a, r12
cmp0 a
bnz $1f
;; shift left, dec exponent
movw ax, r8
shlw ax, 1
movw r8, ax
movw ax, r10
rolwc ax, 1
movw r10, ax
movw ax, r12
rolwc ax, 1
movw r12, ax
;; don't need to do r14
decw bc
br $1b
1:
;; at this point, FRAC is in R8..R11 and EXP is in BC
movw ax, bc
movw A_EXP, ax
mov a, r9
mov A_FRAC_L, a
mov a, r10
mov A_FRAC_LH, a
mov a, r11
mov A_FRAC_H, a
mov a, r12
mov A_FRAC_HH, a
mov a, A_SIGN
xor a, B_SIGN
mov A_SIGN, a
call $!__rl78_int_pack_a_r8
addw sp, #16
ret
END_FUNC ___divsf3
|
4ms/metamodule-plugin-sdk
| 20,489
|
plugin-libc/libgcc/config/rl78/divmodsi.S
|
/* SImode div/mod functions for the GCC support library for the Renesas RL78 processors.
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Red Hat.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "vregs.h"
#if defined __RL78_MUL_G14__
START_FUNC ___divsi3
;; r8,r10 = 4[sp],6[sp] / 8[sp],10[sp]
;; Load and test for a negative denumerator.
movw ax, [sp+8]
movw de, ax
movw ax, [sp+10]
mov1 cy, a.7
movw hl, ax
bc $__div_neg_den
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw bc, ax
movw ax, [sp+4]
bc $__div_neg_num
;; Neither are negative - we can use the unsigned divide instruction.
__div_no_convert:
push psw
di
divwu
pop psw
movw r8, ax
movw ax, bc
movw r10, ax
ret
__div_neg_den:
;; Negate the denumerator (which is in HLDE)
clrw ax
subw ax, de
movw de, ax
clrw ax
sknc
decw ax
subw ax, hl
movw hl, ax
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw bc, ax
movw ax, [sp+4]
;; If it is not negative then we perform the division and then negate the result.
bnc $__div_then_convert
;; Otherwise we negate the numerator and then go with a straightforward unsigned division.
;; The negation is complicated because AX, BC, DE and HL are already in use.
;; ax: numL bc: numH r8: r10:
xchw ax, bc
;; ax: numH bc: numL r8: r10:
movw r8, ax
;; ax: bc: numL r8: numH r10:
clrw ax
;; ax: 0 bc: numL r8: numH r10:
subw ax, bc
;; ax: -numL bc: r8: numH r10:
movw r10, ax
;; ax: bc: r8: numH r10: -numL
movw ax, r8
;; ax: numH bc: r8: r10: -numL
movw bc, ax
;; ax: bc: numH r8: r10: -numL
clrw ax
;; ax: 0 bc: numH r8: r10: -numL
sknc
decw ax
;; ax: -1 bc: numH r8: r10: -numL
subw ax, bc
;; ax: -numH bc: r8: r10: -numL
movw bc, ax
;; ax: bc: -numH r8: r10: -numL
movw ax, r10
;; ax: -numL bc: -numH r8: r10:
br $!__div_no_convert
__div_neg_num:
;; Negate the numerator (which is in BCAX)
;; We know that the denumerator is positive.
;; Note - we temporarily overwrite DE. We know that we can safely load it again off the stack again.
movw de, ax
clrw ax
subw ax, de
movw de, ax
clrw ax
sknc
decw ax
subw ax, bc
movw bc, ax
movw ax, [sp+8]
xchw ax, de
__div_then_convert:
push psw
di
divwu
pop psw
;; Negate result (in BCAX) and transfer into r8,r10
movw de, ax
clrw ax
subw ax, de
movw r8, ax
clrw ax
sknc
decw ax
subw ax, bc
movw r10, ax
ret
END_FUNC ___divsi3
;----------------------------------------------------------------------
START_FUNC ___udivsi3
;; r8,r10 = 4[sp],6[sp] / 8[sp],10[sp]
;; Used when compiling with -Os specified.
movw ax, [sp+10]
movw hl, ax
movw ax, [sp+8]
movw de, ax
movw ax, [sp+6]
movw bc, ax
movw ax, [sp+4]
push psw ; Save the current interrupt status
di ; Disable interrupts. See Renesas Technical update TN-RL*-A025B/E
divwu ; bcax = bcax / hlde
pop psw ; Restore saved interrupt status
movw r8, ax
movw ax, bc
movw r10, ax
ret
END_FUNC ___udivsi3
;----------------------------------------------------------------------
START_FUNC ___modsi3
;; r8,r10 = 4[sp],6[sp] % 8[sp],10[sp]
;; Load and test for a negative denumerator.
movw ax, [sp+8]
movw de, ax
movw ax, [sp+10]
mov1 cy, a.7
movw hl, ax
bc $__mod_neg_den
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw bc, ax
movw ax, [sp+4]
bc $__mod_neg_num
;; Neither are negative - we can use the unsigned divide instruction.
__mod_no_convert:
push psw
di
divwu
pop psw
movw ax, de
movw r8, ax
movw ax, hl
movw r10, ax
ret
__mod_neg_den:
;; Negate the denumerator (which is in HLDE)
clrw ax
subw ax, de
movw de, ax
clrw ax
sknc
decw ax
subw ax, hl
movw hl, ax
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw bc, ax
movw ax, [sp+4]
;; If it is not negative then we perform the modulo operation without conversion
bnc $__mod_no_convert
;; Otherwise we negate the numerator and then go with a modulo followed by negation.
;; The negation is complicated because AX, BC, DE and HL are already in use.
xchw ax, bc
movw r8, ax
clrw ax
subw ax, bc
movw r10, ax
movw ax, r8
movw bc, ax
clrw ax
sknc
decw ax
subw ax, bc
movw bc, ax
movw ax, r10
br $!__mod_then_convert
__mod_neg_num:
;; Negate the numerator (which is in BCAX)
;; We know that the denumerator is positive.
;; Note - we temporarily overwrite DE. We know that we can safely load it again off the stack again.
movw de, ax
clrw ax
subw ax, de
movw de, ax
clrw ax
sknc
decw ax
subw ax, bc
movw bc, ax
movw ax, [sp+8]
xchw ax, de
__mod_then_convert:
push psw
di
divwu
pop psw
;; Negate result (in HLDE) and transfer into r8,r10
clrw ax
subw ax, de
movw r8, ax
clrw ax
sknc
decw ax
subw ax, hl
movw r10, ax
ret
END_FUNC ___modsi3
;----------------------------------------------------------------------
START_FUNC ___umodsi3
;; r8,r10 = 4[sp],6[sp] % 8[sp],10[sp]
;; Used when compiling with -Os specified.
movw ax, [sp+10]
movw hl, ax
movw ax, [sp+8]
movw de, ax
movw ax, [sp+6]
movw bc, ax
movw ax, [sp+4]
push psw ; Save the current interrupt status
di ; Disable interrupts. See Renesas Technical update TN-RL*-A025B/E
divwu ; hlde = bcax %% hlde
pop psw ; Restore saved interrupt status
movw ax, de
movw r8, ax
movw ax, hl
movw r10, ax
ret
END_FUNC ___umodsi3
;----------------------------------------------------------------------
#elif defined __RL78_MUL_G13__
;----------------------------------------------------------------------
;; Hardware registers. Note - these values match the silicon, not the documentation.
MDAL = 0xffff0
MDAH = 0xffff2
MDBL = 0xffff6
MDBH = 0xffff4
MDCL = 0xf00e0
MDCH = 0xf00e2
MDUC = 0xf00e8
.macro _Negate low, high
movw ax, \low
movw bc, ax
clrw ax
subw ax, bc
movw \low, ax
movw ax, \high
movw bc, ax
clrw ax
sknc
decw ax
subw ax, bc
movw \high, ax
.endm
;----------------------------------------------------------------------
START_FUNC ___divsi3
;; r8,r10 = 4[sp],6[sp] / 8[sp],10[sp]
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
;; Load and test for a negative denumerator.
movw ax, [sp+8]
movw MDBL, ax
movw ax, [sp+10]
mov1 cy, a.7
movw MDBH, ax
bc $__div_neg_den
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw MDAH, ax
movw ax, [sp+4]
movw MDAL, ax
bc $__div_neg_num
;; Neither are negative - we can use the unsigned divide hardware.
__div_no_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, MDAL ; Read the result
movw r8, ax
movw ax, MDAH
movw r10, ax
ret
__div_neg_den:
;; Negate the denumerator (which is in MDBL/MDBH)
_Negate MDBL MDBH
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw MDAH, ax
movw ax, [sp+4]
movw MDAL, ax
;; If it is not negative then we perform the division and then negate the result.
bnc $__div_then_convert
;; Otherwise we negate the numerator and then go with a straightforward unsigned division.
_Negate MDAL MDAH
br $!__div_no_convert
__div_neg_num:
;; Negate the numerator (which is in MDAL/MDAH)
;; We know that the denumerator is positive.
_Negate MDAL MDAH
__div_then_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
;; Negate result and transfer into r8,r10
_Negate MDAL MDAH ; FIXME: This could be coded more efficiently.
movw r10, ax
movw ax, MDAL
movw r8, ax
ret
END_FUNC ___divsi3
;----------------------------------------------------------------------
START_FUNC ___modsi3
;; r8,r10 = 4[sp],6[sp] % 8[sp],10[sp]
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
;; Load and test for a negative denumerator.
movw ax, [sp+8]
movw MDBL, ax
movw ax, [sp+10]
mov1 cy, a.7
movw MDBH, ax
bc $__mod_neg_den
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw MDAH, ax
movw ax, [sp+4]
movw MDAL, ax
bc $__mod_neg_num
;; Neither are negative - we can use the unsigned divide hardware
__mod_no_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDCL ; Read the remainder
movw r8, ax
movw ax, !MDCH
movw r10, ax
ret
__mod_neg_den:
;; Negate the denumerator (which is in MDBL/MDBH)
_Negate MDBL MDBH
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw MDAH, ax
movw ax, [sp+4]
movw MDAL, ax
;; If it is not negative then we perform the modulo operation without conversion
bnc $__mod_no_convert
;; Otherwise we negate the numerator and then go with a modulo followed by negation.
_Negate MDAL MDAH
br $!__mod_then_convert
__mod_neg_num:
;; Negate the numerator (which is in MDAL/MDAH)
;; We know that the denumerator is positive.
_Negate MDAL MDAH
__mod_then_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDCL
movw bc, ax
clrw ax
subw ax, bc
movw r8, ax
movw ax, !MDCH
movw bc, ax
clrw ax
sknc
decw ax
subw ax, bc
movw r10, ax
ret
END_FUNC ___modsi3
;----------------------------------------------------------------------
START_FUNC ___udivsi3
;; r8,r10 = 4[sp],6[sp] / 8[sp],10[sp]
;; Used when compilng with -Os specified.
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
movw ax, [sp+4] ; Load the divisor
movw MDAL, ax
movw ax, [sp+6]
movw MDAH, ax
movw ax, [sp+8] ; Load the dividend
movw MDBL, ax
movw ax, [sp+10]
movw MDBH, ax
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDAL ; Read the result
movw r8, ax
movw ax, !MDAH
movw r10, ax
ret
END_FUNC ___udivsi3
;----------------------------------------------------------------------
START_FUNC ___umodsi3
;; r8,r10 = 4[sp],6[sp] % 8[sp],10[sp]
;; Used when compilng with -Os specified.
;; Note - hardware address match the silicon, not the documentation
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
movw ax, [sp+4] ; Load the divisor
movw MDAL, ax
movw ax, [sp+6]
movw MDAH, ax
movw ax, [sp+8] ; Load the dividend
movw MDBL, ax
movw ax, [sp+10]
movw MDBH, ax
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDCL ; Read the remainder
movw r8, ax
movw ax, !MDCH
movw r10, ax
ret
END_FUNC ___umodsi3
;----------------------------------------------------------------------
#elif defined __RL78_MUL_NONE__
.macro MAKE_GENERIC which,need_result
.if \need_result
quot = r8
num = r12
den = r16
bit = r20
.else
num = r8
quot = r12
den = r16
bit = r20
.endif
quotH = quot+2
quotL = quot
quotB0 = quot
quotB1 = quot+1
quotB2 = quot+2
quotB3 = quot+3
numH = num+2
numL = num
numB0 = num
numB1 = num+1
numB2 = num+2
numB3 = num+3
#define denH bc
denL = den
denB0 = den
denB1 = den+1
#define denB2 c
#define denB3 b
bitH = bit+2
bitL = bit
bitB0 = bit
bitB1 = bit+1
bitB2 = bit+2
bitB3 = bit+3
;----------------------------------------------------------------------
START_FUNC __generic_sidivmod\which
num_lt_den\which:
.if \need_result
movw r8, #0
movw r10, #0
.else
movw ax, [sp+8]
movw r8, ax
movw ax, [sp+10]
movw r10, ax
.endif
ret
shift_den_bit16\which:
movw ax, denL
movw denH, ax
movw denL, #0
.if \need_result
movw ax, bitL
movw bitH, ax
movw bitL, #0
.else
mov a, bit
add a, #16
mov bit, a
.endif
br $shift_den_bit\which
;; These routines leave DE alone - the signed functions use DE
;; to store sign information that must remain intact
.if \need_result
.global __generic_sidiv
__generic_sidiv:
.else
.global __generic_simod
__generic_simod:
.endif
;; (quot,rem) = 8[sp] /% 12[sp]
movw hl, sp
movw ax, [hl+14] ; denH
cmpw ax, [hl+10] ; numH
movw ax, [hl+12] ; denL
sknz
cmpw ax, [hl+8] ; numL
bh $num_lt_den\which
#ifdef __RL78_G10__
movw ax, denL
push ax
movw ax, bitL
push ax
movw ax, bitH
push ax
#else
sel rb2
push ax ; denL
; push bc ; denH
push de ; bitL
push hl ; bitH - stored in BC
sel rb0
#endif
;; (quot,rem) = 16[sp] /% 20[sp]
;; copy numerator
movw ax, [hl+8]
movw numL, ax
movw ax, [hl+10]
movw numH, ax
;; copy denomonator
movw ax, [hl+12]
movw denL, ax
movw ax, [hl+14]
movw denH, ax
movw ax, denL
or a, denB2
or a, denB3 ; not x
cmpw ax, #0
bnz $den_not_zero\which
.if \need_result
movw quotL, #0
movw quotH, #0
.else
movw numL, #0
movw numH, #0
.endif
br $!main_loop_done_himode\which
den_not_zero\which:
.if \need_result
;; zero out quot
movw quotL, #0
movw quotH, #0
.endif
;; initialize bit to 1
movw bitL, #1
movw bitH, #0
; while (den < num && !(den & (1L << BITS_MINUS_1)))
.if 1
;; see if we can short-circuit a bunch of shifts
movw ax, denH
cmpw ax, #0
bnz $shift_den_bit\which
movw ax, denL
cmpw ax, numH
bnh $shift_den_bit16\which
.endif
shift_den_bit\which:
movw ax, denH
mov1 cy,a.7
bc $enter_main_loop\which
cmpw ax, numH
movw ax, denL ; we re-use this below
sknz
cmpw ax, numL
bh $enter_main_loop\which
;; den <<= 1
; movw ax, denL ; already has it from the cmpw above
shlw ax, 1
movw denL, ax
; movw ax, denH
rolwc denH, 1
; movw denH, ax
;; bit <<= 1
.if \need_result
movw ax, bitL
shlw ax, 1
movw bitL, ax
movw ax, bitH
rolwc ax, 1
movw bitH, ax
.else
;; if we don't need to compute the quotent, we don't need an
;; actual bit *mask*, we just need to keep track of which bit
inc bitB0
.endif
br $shift_den_bit\which
;; while (bit)
main_loop\which:
;; if (num >= den) (cmp den > num)
movw ax, numH
cmpw ax, denH
movw ax, numL
sknz
cmpw ax, denL
skz
bnh $next_loop\which
;; num -= den
; movw ax, numL ; already has it from the cmpw above
subw ax, denL
movw numL, ax
movw ax, numH
sknc
decw ax
subw ax, denH
movw numH, ax
.if \need_result
;; res |= bit
mov a, quotB0
or a, bitB0
mov quotB0, a
mov a, quotB1
or a, bitB1
mov quotB1, a
mov a, quotB2
or a, bitB2
mov quotB2, a
mov a, quotB3
or a, bitB3
mov quotB3, a
.endif
next_loop\which:
;; den >>= 1
movw ax, denH
shrw ax, 1
movw denH, ax
mov a, denB1
rorc a, 1
mov denB1, a
mov a, denB0
rorc a, 1
mov denB0, a
;; bit >>= 1
.if \need_result
movw ax, bitH
shrw ax, 1
movw bitH, ax
mov a, bitB1
rorc a, 1
mov bitB1, a
mov a, bitB0
rorc a, 1
mov bitB0, a
.else
dec bitB0
.endif
enter_main_loop\which:
.if \need_result
movw ax, bitH
cmpw ax, #0
bnz $main_loop\which
.else
cmp bitB0, #15
bh $main_loop\which
.endif
;; bit is HImode now; check others
movw ax, numH ; numerator
cmpw ax, #0
bnz $bit_high_set\which
movw ax, denH ; denominator
cmpw ax, #0
bz $switch_to_himode\which
bit_high_set\which:
.if \need_result
movw ax, bitL
cmpw ax, #0
.else
cmp0 bitB0
.endif
bnz $main_loop\which
switch_to_himode\which:
.if \need_result
movw ax, bitL
cmpw ax, #0
.else
cmp0 bitB0
.endif
bz $main_loop_done_himode\which
;; From here on in, r22, r14, and r18 are all zero
;; while (bit)
main_loop_himode\which:
;; if (num >= den) (cmp den > num)
movw ax, denL
cmpw ax, numL
bh $next_loop_himode\which
;; num -= den
movw ax, numL
subw ax, denL
movw numL, ax
movw ax, numH
sknc
decw ax
subw ax, denH
movw numH, ax
.if \need_result
;; res |= bit
mov a, quotB0
or a, bitB0
mov quotB0, a
mov a, quotB1
or a, bitB1
mov quotB1, a
.endif
next_loop_himode\which:
;; den >>= 1
movw ax, denL
shrw ax, 1
movw denL, ax
.if \need_result
;; bit >>= 1
movw ax, bitL
shrw ax, 1
movw bitL, ax
.else
dec bitB0
.endif
.if \need_result
movw ax, bitL
cmpw ax, #0
.else
cmp0 bitB0
.endif
bnz $main_loop_himode\which
main_loop_done_himode\which:
#ifdef __RL78_G10__
pop ax
movw bitH, ax
pop ax
movw bitL, ax
pop ax
movw denL, ax
#else
sel rb2
pop hl ; bitH - stored in BC
pop de ; bitL
; pop bc ; denH
pop ax ; denL
sel rb0
#endif
ret
END_FUNC __generic_sidivmod\which
.endm
;----------------------------------------------------------------------
MAKE_GENERIC _d 1
MAKE_GENERIC _m 0
;----------------------------------------------------------------------
START_FUNC ___udivsi3
;; r8 = 4[sp] / 8[sp]
call $!__generic_sidiv
ret
END_FUNC ___udivsi3
START_FUNC ___umodsi3
;; r8 = 4[sp] % 8[sp]
call $!__generic_simod
ret
END_FUNC ___umodsi3
;----------------------------------------------------------------------
.macro NEG_AX
movw hl, ax
movw ax, #0
subw ax, [hl]
movw [hl], ax
movw ax, #0
sknc
decw ax
subw ax, [hl+2]
movw [hl+2], ax
.endm
;----------------------------------------------------------------------
START_FUNC ___divsi3
;; r8 = 4[sp] / 8[sp]
movw de, #0
mov a, [sp+7]
mov1 cy, a.7
bc $div_signed_num
mov a, [sp+11]
mov1 cy, a.7
bc $div_signed_den
call $!__generic_sidiv
ret
div_signed_num:
;; neg [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov d, #1
mov a, [sp+11]
mov1 cy, a.7
bnc $div_unsigned_den
div_signed_den:
;; neg [sp+8]
movw ax, sp
addw ax, #8
NEG_AX
mov e, #1
div_unsigned_den:
call $!__generic_sidiv
mov a, d
cmp0 a
bz $div_skip_restore_num
;; We have to restore the numerator [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov a, d
div_skip_restore_num:
xor a, e
bz $div_no_neg
movw ax, #r8
NEG_AX
div_no_neg:
mov a, e
cmp0 a
bz $div_skip_restore_den
;; We have to restore the denominator [sp+8]
movw ax, sp
addw ax, #8
NEG_AX
div_skip_restore_den:
ret
END_FUNC ___divsi3
START_FUNC ___modsi3
;; r8 = 4[sp] % 8[sp]
movw de, #0
mov a, [sp+7]
mov1 cy, a.7
bc $mod_signed_num
mov a, [sp+11]
mov1 cy, a.7
bc $mod_signed_den
call $!__generic_simod
ret
mod_signed_num:
;; neg [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov d, #1
mov a, [sp+11]
mov1 cy, a.7
bnc $mod_unsigned_den
mod_signed_den:
;; neg [sp+8]
movw ax, sp
addw ax, #8
NEG_AX
mov e, #1
mod_unsigned_den:
call $!__generic_simod
mov a, d
cmp0 a
bz $mod_no_neg
movw ax, #r8
NEG_AX
;; We have to restore [sp+4] as well.
movw ax, sp
addw ax, #4
NEG_AX
mod_no_neg:
.if 1
mov a, e
cmp0 a
bz $mod_skip_restore_den
movw ax, sp
addw ax, #8
NEG_AX
mod_skip_restore_den:
.endif
ret
END_FUNC ___modsi3
;----------------------------------------------------------------------
#else
#error "Unknown RL78 hardware multiply/divide support"
#endif
|
4ms/metamodule-plugin-sdk
| 13,334
|
plugin-libc/libgcc/config/rl78/divmodhi.S
|
/* HImode div/mod functions for the GCC support library for the Renesas RL78 processors.
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Red Hat.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "vregs.h"
#if defined __RL78_MUL_G14__
START_FUNC ___divhi3
;; r8 = 4[sp] / 6[sp]
;; Test for a negative denumerator.
movw ax, [sp+6]
mov1 cy, a.7
movw de, ax
bc $__div_neg_den
;; Test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
bc $__div_neg_num
;; Neither are negative - we can use the unsigned divide instruction.
__div_no_convert:
push psw
di
divhu
pop psw
movw r8, ax
ret
__div_neg_den:
;; Negate the denumerator (which is in DE)
clrw ax
subw ax, de
movw de, ax
;; Test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
;; If it is not negative then we perform the division and then negate the result.
bnc $__div_then_convert
;; Otherwise we negate the numerator and then go with an unsigned division.
movw bc, ax
clrw ax
subw ax, bc
br $__div_no_convert
__div_neg_num:
;; Negate the numerator (which is in AX)
;; We know that the denumerator is positive.
movw bc, ax
clrw ax
subw ax, bc
__div_then_convert:
push psw
di
divhu
pop psw
;; Negate result and transfer into r8
movw bc, ax
clrw ax
subw ax, bc
movw r8, ax
ret
END_FUNC ___divhi3
;----------------------------------------------------------------------
START_FUNC ___modhi3
;; r8 = 4[sp] % 6[sp]
;; Test for a negative denumerator.
movw ax, [sp+6]
mov1 cy, a.7
movw de, ax
bc $__mod_neg_den
;; Test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
bc $__mod_neg_num
;; Neither are negative - we can use the unsigned divide instruction.
__mod_no_convert:
push psw
di
divhu
pop psw
movw ax, de
movw r8, ax
ret
__mod_neg_den:
;; Negate the denumerator (which is in DE)
clrw ax
subw ax, de
movw de, ax
;; Test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
;; If it is not negative then we perform the modulo operation without conversion.
bnc $__mod_no_convert
;; Otherwise we negate the numerator and then go with an unsigned modulo operation.
movw bc, ax
clrw ax
subw ax, bc
br $__mod_then_convert
__mod_neg_num:
;; Negate the numerator (which is in AX)
;; We know that the denumerator is positive.
movw bc, ax
clrw ax
subw ax, bc
__mod_then_convert:
push psw
di
divhu
pop psw
;; Negate result and transfer into r8
clrw ax
subw ax, de
movw r8, ax
ret
END_FUNC ___modhi3
;----------------------------------------------------------------------
#elif defined __RL78_MUL_G13__
;; The G13 S2 core does not have a 16 bit divide peripheral.
;; So instead we perform a 32-bit divide and twiddle the inputs
;; as necessary.
;; Hardware registers. Note - these values match the silicon, not the documentation.
MDAL = 0xffff0
MDAH = 0xffff2
MDBL = 0xffff6
MDBH = 0xffff4
MDCL = 0xf00e0
MDCH = 0xf00e2
MDUC = 0xf00e8
.macro _Negate src, dest
movw ax, !\src
movw bc, ax
clrw ax
subw ax, bc
movw \dest, ax
.endm
;----------------------------------------------------------------------
START_FUNC ___divhi3
;; r8 = 4[sp] / 6[sp] (signed division)
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
clrw ax ; Clear the top 16-bits of the divisor and dividend
movw MDBH, ax
movw MDAH, ax
;; Load and test for a negative denumerator.
movw ax, [sp+6]
movw MDBL, ax
mov1 cy, a.7
bc $__div_neg_den
;; Load and test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
movw MDAL, ax
bc $__div_neg_num
;; Neither are negative - we can use the unsigned divide hardware.
__div_no_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, MDAL ; Read the result
movw r8, ax
ret
__div_neg_den:
;; Negate the denumerator (which is in MDBL)
_Negate MDBL MDBL
;; Load and test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
movw MDAL, ax
;; If it is not negative then we perform the division and then negate the result.
bnc $__div_then_convert
;; Otherwise we negate the numerator and then go with a straightforward unsigned division.
_Negate MDAL MDAL
br $!__div_no_convert
__div_neg_num:
;; Negate the numerator (which is in MDAL)
;; We know that the denumerator is positive.
_Negate MDAL MDAL
__div_then_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
;; Negate result and transfer into r8
_Negate MDAL r8
ret
END_FUNC ___divhi3
;----------------------------------------------------------------------
START_FUNC ___modhi3
;; r8 = 4[sp] % 6[sp] (signed modulus)
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
clrw ax ; Clear the top 16-bits of the divisor and dividend
movw MDBH, ax
movw MDAH, ax
;; Load and test for a negative denumerator.
movw ax, [sp+6]
movw MDBL, ax
mov1 cy, a.7
bc $__mod_neg_den
;; Load and test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
movw MDAL, ax
bc $__mod_neg_num
;; Neither are negative - we can use the unsigned divide hardware
__mod_no_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDCL ; Read the remainder
movw r8, ax
ret
__mod_neg_den:
;; Negate the denumerator (which is in MDBL)
_Negate MDBL MDBL
;; Load and test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
movw MDAL, ax
;; If it is not negative then we perform the modulo operation without conversion.
bnc $__mod_no_convert
;; Otherwise we negate the numerator and then go with a modulo followed by negation.
_Negate MDAL MDAL
br $!__mod_then_convert
__mod_neg_num:
;; Negate the numerator (which is in MDAL)
;; We know that the denumerator is positive.
_Negate MDAL MDAL
__mod_then_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
_Negate MDCL r8
ret
END_FUNC ___modhi3
;----------------------------------------------------------------------
START_FUNC ___udivhi3
;; r8 = 4[sp] / 6[sp] (unsigned division)
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
movw ax, [sp+4] ; Load the divisor
movw MDAL, ax
movw ax, [sp+6] ; Load the dividend
movw MDBL, ax
clrw ax
movw MDAH, ax
movw MDBH, ax
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDAL ; Read the remainder
movw r8, ax
ret
END_FUNC ___udivhi3
;----------------------------------------------------------------------
START_FUNC ___umodhi3
;; r8 = 4[sp] % 6[sp] (unsigned modulus)
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
movw ax, [sp+4] ; Load the divisor
movw MDAL, ax
movw ax, [sp+6] ; Load the dividend
movw MDBL, ax
clrw ax
movw MDAH, ax
movw MDBH, ax
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDCL ; Read the remainder
movw r8, ax
ret
END_FUNC ___umodhi3
;----------------------------------------------------------------------
#elif defined __RL78_MUL_NONE__
.macro MAKE_GENERIC which,need_result
.if \need_result
quot = r8
num = r10
den = r12
bit = r14
.else
num = r8
quot = r10
den = r12
bit = r14
.endif
quotB0 = quot
quotB1 = quot+1
numB0 = num
numB1 = num+1
denB0 = den
denB1 = den+1
bitB0 = bit
bitB1 = bit+1
#define bit bc
#define bitB0 c
#define bitB1 b
START_FUNC __generic_hidivmod\which
num_lt_den\which:
.if \need_result
movw r8, #0
.else
movw ax, [sp+8]
movw r8, ax
.endif
ret
;; These routines leave DE alone - the signed functions use DE
;; to store sign information that must remain intact
.if \need_result
.global __generic_hidiv
__generic_hidiv:
.else
.global __generic_himod
__generic_himod:
.endif
;; (quot,rem) = 8[sp] /% 10[sp]
movw hl, sp
movw ax, [hl+10] ; denH
cmpw ax, [hl+8] ; numH
bh $num_lt_den\which
;; (quot,rem) = 16[sp] /% 20[sp]
;; copy numerator
movw ax, [hl+8]
movw num, ax
;; copy denomonator
movw ax, [hl+10]
movw den, ax
movw ax, den
cmpw ax, #0
bnz $den_not_zero\which
.if \need_result
movw quot, #0
.else
movw num, #0
.endif
ret
den_not_zero\which:
.if \need_result
;; zero out quot
movw quot, #0
.endif
;; initialize bit to 1
movw bit, #1
; while (den < num && !(den & (1L << BITS_MINUS_1)))
shift_den_bit\which:
movw ax, den
mov1 cy,a.7
bc $enter_main_loop\which
cmpw ax, num
bh $enter_main_loop\which
;; den <<= 1
; movw ax, den ; already has it from the cmpw above
shlw ax, 1
movw den, ax
;; bit <<= 1
.if \need_result
#ifdef bit
shlw bit, 1
#else
movw ax, bit
shlw ax, 1
movw bit, ax
#endif
.else
;; if we don't need to compute the quotent, we don't need an
;; actual bit *mask*, we just need to keep track of which bit
inc bitB0
.endif
br $shift_den_bit\which
main_loop\which:
;; if (num >= den) (cmp den > num)
movw ax, den
cmpw ax, num
bh $next_loop\which
;; num -= den
movw ax, num
subw ax, den
movw num, ax
.if \need_result
;; res |= bit
mov a, quotB0
or a, bitB0
mov quotB0, a
mov a, quotB1
or a, bitB1
mov quotB1, a
.endif
next_loop\which:
;; den >>= 1
movw ax, den
shrw ax, 1
movw den, ax
.if \need_result
;; bit >>= 1
movw ax, bit
shrw ax, 1
movw bit, ax
.else
dec bitB0
.endif
enter_main_loop\which:
.if \need_result
movw ax, bit
cmpw ax, #0
.else
cmp0 bitB0
.endif
bnz $main_loop\which
main_loop_done\which:
ret
END_FUNC __generic_hidivmod\which
.endm
;----------------------------------------------------------------------
MAKE_GENERIC _d 1
MAKE_GENERIC _m 0
;----------------------------------------------------------------------
START_FUNC ___udivhi3
;; r8 = 4[sp] / 6[sp]
call $!__generic_hidiv
ret
END_FUNC ___udivhi3
START_FUNC ___umodhi3
;; r8 = 4[sp] % 6[sp]
call $!__generic_himod
ret
END_FUNC ___umodhi3
;----------------------------------------------------------------------
.macro NEG_AX
movw hl, ax
movw ax, #0
subw ax, [hl]
movw [hl], ax
.endm
;----------------------------------------------------------------------
START_FUNC ___divhi3
;; r8 = 4[sp] / 6[sp]
movw de, #0
mov a, [sp+5]
mov1 cy, a.7
bc $div_signed_num
mov a, [sp+7]
mov1 cy, a.7
bc $div_signed_den
call $!__generic_hidiv
ret
div_signed_num:
;; neg [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov d, #1
mov a, [sp+7]
mov1 cy, a.7
bnc $div_unsigned_den
div_signed_den:
;; neg [sp+6]
movw ax, sp
addw ax, #6
NEG_AX
mov e, #1
div_unsigned_den:
call $!__generic_hidiv
mov a, d
cmp0 a
bz $div_skip_restore_num
;; We have to restore the numerator [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov a, d
div_skip_restore_num:
xor a, e
bz $div_no_neg
movw ax, #r8
NEG_AX
div_no_neg:
mov a, e
cmp0 a
bz $div_skip_restore_den
movw ax, sp
addw ax, #6
NEG_AX
div_skip_restore_den:
ret
END_FUNC ___divhi3
START_FUNC ___modhi3
;; r8 = 4[sp] % 6[sp]
movw de, #0
mov a, [sp+5]
mov1 cy, a.7
bc $mod_signed_num
mov a, [sp+7]
mov1 cy, a.7
bc $mod_signed_den
call $!__generic_himod
ret
mod_signed_num:
;; neg [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov d, #1
mov a, [sp+7]
mov1 cy, a.7
bnc $mod_unsigned_den
mod_signed_den:
;; neg [sp+6]
movw ax, sp
addw ax, #6
NEG_AX
mod_unsigned_den:
call $!__generic_himod
mov a, d
cmp0 a
bz $mod_no_neg
movw ax, #r8
NEG_AX
;; Also restore numerator
movw ax, sp
addw ax, #4
NEG_AX
mod_no_neg:
mov a, e
cmp0 a
bz $mod_skip_restore_den
movw ax, sp
addw ax, #6
NEG_AX
mod_skip_restore_den:
ret
END_FUNC ___modhi3
;----------------------------------------------------------------------
#else
#error "Unknown RL78 hardware multiply/divide support"
#endif
|
4ms/metamodule-plugin-sdk
| 2,733
|
plugin-libc/libgcc/config/rl78/trampoline.S
|
/* libgcc routines for RL78
Copyright (C) 2011-2022 Free Software Foundation, Inc.
Contributed by Red Hat.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* RL78 Trampoline support
Since the RL78's RAM is not in the first 64k, we cannot "just" use a
function pointer to point to a trampoline on the stack. So, we
create N fixed trampolines that read from an array, and allocate
them as needed.
*/
#include "vregs.h"
.data
.p2align 1
trampoline_array:
.macro stub n
.text
trampoline_\n:
.type trampoline_\n, @function
movw ax, !trampoline_chain_\n
movw r14, ax
movw ax, !trampoline_addr_\n
br ax
.size trampoline_\n, .-trampoline_\n
.data
trampoline_frame_\n:
.short 0
trampoline_stub_\n:
.short trampoline_\n
trampoline_chain_\n:
.short 0
trampoline_addr_\n:
.short 0
#define TO_FRAME 0
#define TO_STUB 2
#define TO_CHAIN 4
#define TO_ADDR 6
#define TO_SIZE 8
.endm
stub 0
stub 1
stub 2
stub 3
stub 4
stub 5
trampoline_array_end:
/* Given the function pointer in R8 and the static chain
pointer in R10, allocate a trampoline and return its address in
R8. */
START_FUNC ___trampoline_init
movw hl, #trampoline_array
1: movw ax, [hl + TO_ADDR]
cmpw ax, #0
bz $2f
movw ax, hl
addw ax, #TO_SIZE
movw hl, ax
cmpw ax, #trampoline_array_end
bnz $1b
brk ; no more slots?
2: movw ax, r8
movw [hl + TO_ADDR], ax
movw ax, r10
movw [hl + TO_CHAIN], ax
movw ax, sp
movw [hl + TO_FRAME], ax
movw ax, [hl + TO_STUB]
movw r8, ax
ret
END_FUNC ___trampoline_init
START_FUNC ___trampoline_uninit
movw hl, #trampoline_array
movw ax, sp
movw bc, ax
1: movw ax, [hl + TO_FRAME]
cmpw ax, bc
bc $2f
clrw ax
movw [hl + TO_ADDR], ax
2: movw ax, hl
addw ax, #TO_SIZE
movw hl, ax
cmpw ax, #trampoline_array_end
bnz $1b
ret
END_FUNC ___trampoline_uninit
|
4ms/metamodule-plugin-sdk
| 12,811
|
plugin-libc/libgcc/config/rl78/fpbit-sf.S
|
; SF format is:
;
; [sign] 1.[23bits] E[8bits(n-127)]
;
; SEEEEEEE Emmmmmmm mmmmmmmm mmmmmmmm
;
; [A+0] mmmmmmmm
; [A+1] mmmmmmmm
; [A+2] Emmmmmmm
; [A+3] SEEEEEEE
;
; Special values (xxx != 0):
;
; s1111111 10000000 00000000 00000000 infinity
; s1111111 1xxxxxxx xxxxxxxx xxxxxxxx NaN
; s0000000 00000000 00000000 00000000 zero
; s0000000 0xxxxxxx xxxxxxxx xxxxxxxx denormals
;
; Note that CMPtype is "signed char" for rl78
;
#include "vregs.h"
#define Z PSW.6
START_FUNC ___negsf2
;; Negate the floating point value.
;; Input at [SP+4]..[SP+7].
;; Output to R8..R11.
movw ax, [SP+4]
movw r8, ax
movw ax, [SP+6]
xor a, #0x80
movw r10, ax
ret
END_FUNC ___negsf2
;; ------------------internal functions used by later code --------------
START_FUNC __int_isnan
;; [HL] points to value, returns Z if it's a NaN
mov a, [hl+2]
and a, #0x80
mov x, a
mov a, [hl+3]
and a, #0x7f
cmpw ax, #0x7f80
skz
ret ; return NZ if not NaN
mov a, [hl+2]
and a, #0x7f
or a, [hl+1]
or a, [hl]
bnz $1f
clr1 Z ; Z, normal
ret
1:
set1 Z ; nan
ret
END_FUNC __int_isnan
START_FUNC __int_eithernan
;; call from toplevel functions, returns Z if either number is a NaN,
;; or NZ if both are OK.
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_isnan
bz $1f
movw ax, sp
addw ax, #12
movw hl, ax
call $!__int_isnan
1:
ret
END_FUNC __int_eithernan
START_FUNC __int_iszero
;; [HL] points to value, returns Z if it's zero
mov a, [hl+3]
and a, #0x7f
or a, [hl+2]
or a, [hl+1]
or a, [hl]
ret
END_FUNC __int_iszero
START_FUNC __int_cmpsf
;; This is always called from some other function here,
;; so the stack offsets are adjusted accordingly.
;; X [SP+8] <=> Y [SP+12] : <a> <=> 0
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_iszero
bnz $1f
movw ax, sp
addw ax, #12
movw hl, ax
call $!__int_iszero
bnz $2f
;; At this point, both args are zero.
mov a, #0
ret
2:
movw ax, sp
addw ax, #8
movw hl, ax
1:
;; At least one arg is non-zero so we can just compare magnitudes.
;; Args are [HL] and [HL+4].
mov a, [HL+3]
xor a, [HL+7]
mov1 cy, a.7
bnc $1f
mov a, [HL+3]
sar a, 7
or a, #1
ret
1: ;; Signs the same, compare magnitude. It's safe to lump
;; the sign bits, exponent, and mantissa together here, since they're
;; stored in the right sequence.
movw ax, [HL+2]
cmpw ax, [HL+6]
bc $ybig_cmpsf ; branch if X < Y
bnz $xbig_cmpsf ; branch if X > Y
movw ax, [HL]
cmpw ax, [HL+4]
bc $ybig_cmpsf ; branch if X < Y
bnz $xbig_cmpsf ; branch if X > Y
mov a, #0
ret
xbig_cmpsf: ; |X| > |Y| so return A = 1 if pos, 0xff if neg
mov a, [HL+3]
sar a, 7
or a, #1
ret
ybig_cmpsf: ; |X| < |Y| so return A = 0xff if pos, 1 if neg
mov a, [HL+3]
xor a, #0x80
sar a, 7
or a, #1
ret
END_FUNC __int_cmpsf
;; ----------------------------------------------------------
START_FUNC ___cmpsf2
;; This functions calculates "A <=> B". That is, if A is less than B
;; they return -1, if A is greater than B, they return 1, and if A
;; and B are equal they return 0. If either argument is NaN the
;; behaviour is undefined.
;; Input at [SP+4]..[SP+7].
;; Output to R8..R9.
call $!__int_eithernan
bnz $1f
movw r8, #1
ret
1:
call $!__int_cmpsf
mov r8, a
sar a, 7
mov r9, a
ret
END_FUNC ___cmpsf2
;; ----------------------------------------------------------
;; These functions are all basically the same as ___cmpsf2
;; except that they define how they handle NaNs.
START_FUNC ___eqsf2
;; Returns zero iff neither argument is NaN
;; and both arguments are equal.
START_ANOTHER_FUNC ___nesf2
;; Returns non-zero iff either argument is NaN or the arguments are
;; unequal. Effectively __nesf2 is the same as __eqsf2
START_ANOTHER_FUNC ___lesf2
;; Returns a value less than or equal to zero if neither
;; argument is NaN, and the first is less than or equal to the second.
START_ANOTHER_FUNC ___ltsf2
;; Returns a value less than zero if neither argument is
;; NaN, and the first is strictly less than the second.
;; Input at [SP+4]..[SP+7].
;; Output to R8.
mov r8, #1
;;; Fall through
START_ANOTHER_FUNC __int_cmp_common
call $!__int_eithernan
sknz
;; return value (pre-filled-in below) for "either is nan"
ret
call $!__int_cmpsf
mov r8, a
ret
END_ANOTHER_FUNC __int_cmp_common
END_ANOTHER_FUNC ___ltsf2
END_ANOTHER_FUNC ___lesf2
END_ANOTHER_FUNC ___nesf2
END_FUNC ___eqsf2
START_FUNC ___gesf2
;; Returns a value greater than or equal to zero if neither argument
;; is a NaN and the first is greater than or equal to the second.
START_ANOTHER_FUNC ___gtsf2
;; Returns a value greater than zero if neither argument
;; is NaN, and the first is strictly greater than the second.
mov r8, #0xffff
br $__int_cmp_common
END_ANOTHER_FUNC ___gtsf2
END_FUNC ___gesf2
;; ----------------------------------------------------------
START_FUNC ___unordsf2
;; Returns a nonzero value if either argument is NaN, otherwise 0.
call $!__int_eithernan
movw r8, #0
sknz ; this is from the call, not the movw
movw r8, #1
ret
END_FUNC ___unordsf2
;; ----------------------------------------------------------
START_FUNC ___fixsfsi
;; Converts its floating point argument into a signed long,
;; rounding toward zero.
;; The behaviour with NaNs and Infinities is not well defined.
;; We choose to return 0 for NaNs, -INTMAX for -inf and INTMAX for +inf.
;; This matches the behaviour of the C function in libgcc2.c.
;; Input at [SP+4]..[SP+7], result is in (lsb) R8..R11 (msb).
;; Special case handling for infinities as __fixunssfsi
;; will not give us the values that we want.
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_isinf
bnz $1f
mov a, [SP+7]
bt a.7, $2f
;; +inf
movw r8, #-1
movw r10, #0x7fff
ret
;; -inf
2: mov r8, #0
mov r10, #0x8000
ret
;; Load the value into r10:r11:X:A
1: movw ax, [SP+4]
movw r10, ax
movw ax, [SP+6]
;; If the value is positive we can just use __fixunssfsi
bf a.7, $__int_fixunssfsi
;; Otherwise we negate the value, call __fixunssfsi and
;; then negate its result.
clr1 a.7
call $!__int_fixunssfsi
movw ax, #0
subw ax, r8
movw r8, ax
movw ax, #0
sknc
decw ax
subw ax, r10
movw r10, ax
;; Check for a positive result (which should only happen when
;; __fixunssfsi returns UINTMAX or 0). In such cases just return 0.
mov a, r11
bt a.7, $1f
movw r10,#0x0
movw r8, #0x0
1: ret
END_FUNC ___fixsfsi
START_FUNC ___fixunssfsi
;; Converts its floating point argument into an unsigned long
;; rounding towards zero. Negative arguments all become zero.
;; We choose to return 0 for NaNs and -inf, but UINTMAX for +inf.
;; This matches the behaviour of the C function in libgcc2.c.
;; Input at [SP+4]..[SP+7], result is in (lsb) R8..R11 (msb)
;; Get the input value.
movw ax, [SP+4]
movw r10, ax
movw ax, [SP+6]
;; Fall through into the internal function.
.global __int_fixunssfsi
__int_fixunssfsi:
;; Input in (lsb) r10.r11.x.a (msb).
;; Test for a negative input. We shift the other bits at the
;; same time so that A ends up holding the whole exponent:
;;
;; before:
;; SEEEEEEE EMMMMMMM MMMMMMMM MMMMMMMM
;; A X R11 R10
;;
;; after:
;; EEEEEEEE MMMMMMM0 MMMMMMMM MMMMMMMM
;; A X R11 R10
shlw ax, 1
bnc $1f
;; Return zero.
2: movw r8, #0
movw r10, #0
ret
;; An exponent of -1 is either a NaN or infinity.
1: cmp a, #-1
bnz $3f
;; For NaN we return 0. For infinity we return UINTMAX.
mov a, x
or a, r10
or a, r11
cmp0 a
bnz $2b
6: movw r8, #-1 ; -1 => UINT_MAX
movw r10, #-1
ret
;; If the exponent is negative the value is < 1 and so the
;; converted value is 0. Note we must allow for the bias
;; applied to the exponent. Thus a value of 127 in the
;; EEEEEEEE bits actually represents an exponent of 0, whilst
;; a value less than 127 actually represents a negative exponent.
;; Also if the EEEEEEEE bits are all zero then this represents
;; either a denormal value or 0.0. Either way for these values
;; we return 0.
3: sub a, #127
bc $2b
;; A now holds the bias adjusted exponent, which is known to be >= 0.
;; If the exponent is > 31 then the conversion will overflow.
cmp a, #32
bnc $6b
4:
;; Save the exponent in H. We increment it by one because we want
;; to be sure that the loop below will always execute at least once.
inc a
mov h, a
;; Get the top 24 bits of the mantissa into A:X:R10
;; Include the implicit 1-bit that is inherent in the IEEE fp format.
;;
;; before:
;; EEEEEEEE MMMMMMM0 MMMMMMMM MMMMMMMM
;; H X R11 R10
;; after:
;; EEEEEEEE 1MMMMMMM MMMMMMMM MMMMMMMM
;; H A X R10
mov a, r11
xch a, x
shr a, 1
set1 a.7
;; Clear B:C:R12:R13
movw bc, #0
movw r12, #0
;; Shift bits from the mantissa (A:X:R10) into (B:C:R12:R13),
;; decrementing the exponent as we go.
;; before:
;; MMMMMMMM MMMMMMMM MMMMMMMM xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
;; A X R10 B C R12 R13
;; first iter:
;; MMMMMMMM MMMMMMMM MMMMMMM0 xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxM
;; A X R10 B C R12 R13
;; second iter:
;; MMMMMMMM MMMMMMMM MMMMMM00 xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxMM
;; A X R10 B C R12 R13
;; etc.
5:
xch a, r10
shl a, 1
xch a, r10
rolwc ax, 1
xch a, r13
rolc a, 1
xch a, r13
xch a, r12
rolc a, 1
xch a, r12
rolwc bc, 1
dec h
bnz $5b
;; Result is currently in (lsb) r13.r12. c. b. (msb),
;; Move it into (lsb) r8. r9. r10. r11 (msb).
mov a, r13
mov r8, a
mov a, r12
mov r9, a
mov a, c
mov r10, a
mov a, b
mov r11, a
ret
END_FUNC ___fixunssfsi
;; ------------------------------------------------------------------------
START_FUNC ___floatsisf
;; Converts its signed long argument into a floating point.
;; Argument in [SP+4]..[SP+7]. Result in R8..R11.
;; Get the argument.
movw ax, [SP+4]
movw bc, ax
movw ax, [SP+6]
;; Test the sign bit. If the value is positive then drop into
;; the unsigned conversion routine.
bf a.7, $2f
;; If negative convert to positive ...
movw hl, ax
movw ax, #0
subw ax, bc
movw bc, ax
movw ax, #0
sknc
decw ax
subw ax, hl
;; If the result is negative then the input was 0x80000000 and
;; we want to return -0.0, which will not happen if we call
;; __int_floatunsisf.
bt a.7, $1f
;; Call the unsigned conversion routine.
call $!__int_floatunsisf
;; Negate the result.
set1 r11.7
;; Done.
ret
1: ;; Return -0.0 aka 0xcf000000
clrb a
mov r8, a
mov r9, a
mov r10, a
mov a, #0xcf
mov r11, a
ret
START_ANOTHER_FUNC ___floatunsisf
;; Converts its unsigned long argument into a floating point.
;; Argument in [SP+4]..[SP+7]. Result in R8..R11.
;; Get the argument.
movw ax, [SP+4]
movw bc, ax
movw ax, [SP+6]
2: ;; Internal entry point from __floatsisf
;; Input in AX (high) and BC (low)
.global __int_floatunsisf
__int_floatunsisf:
;; Special case handling for zero.
cmpw ax, #0
bnz $1f
movw ax, bc
cmpw ax, #0
movw ax, #0
bnz $1f
;; Return 0.0
movw r8, ax
movw r10, ax
ret
1: ;; Pre-load the loop count/exponent.
;; Exponents are biased by 0x80 and we start the loop knowing that
;; we are going to skip the highest set bit. Hence the highest value
;; that we can get for the exponent is 0x1e (bits from input) + 0x80 = 0x9e.
mov h, #0x9e
;; Move bits off the top of AX:BC until we hit a 1 bit.
;; Decrement the count of remaining bits as we go.
2: shlw bc, 1
rolwc ax, 1
bc $3f
dec h
br $2b
;; Ignore the first one bit - it is implicit in the IEEE format.
;; The count of remaining bits is the exponent.
;; Assemble the final floating point value. We have...
;; before:
;; EEEEEEEE MMMMMMMM MMMMMMMM MMMMMMMM xxxxxxxx
;; H A X B C
;; after:
;; 0EEEEEEE EMMMMMMM MMMMMMMM MMMMMMMM
;; R11 R10 R9 R8
3: shrw ax, 1
mov r10, a
mov a, x
mov r9, a
mov a, b
rorc a, 1
;; If the bottom bit of B was set before we shifted it out then we
;; need to round the result up. Unless none of the bits in C are set.
;; In this case we are exactly half-way between two values, and we
;; round towards an even value. We round up by increasing the
;; mantissa by 1. If this results in a zero mantissa we have to
;; increment the exponent. We round down by ignoring the dropped bits.
bnc $4f
cmp0 c
sknz
bf a.0, $4f
5: ;; Round the mantissa up by 1.
add a, #1
addc r9, #0
addc r10, #0
bf r10.7, $4f
inc h
clr1 r10.7
4: mov r8, a
mov a, h
shr a, 1
mov r11, a
sknc
set1 r10.7
ret
END_ANOTHER_FUNC ___floatunsisf
END_FUNC ___floatsisf
|
4ms/metamodule-plugin-sdk
| 1,937
|
plugin-libc/libgcc/config/rl78/umaxdi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___umaxdi3
; copy first argument/operand to the output registers
movw ax, [sp+4]
movw r8, ax
movw ax, [sp+6]
movw r10, ax
movw ax, [sp+8]
movw r12, ax
movw ax, [sp+10]
movw r14, ax
; use 16-bit compares from the most significant words downto the least significant ones
movw ax, [sp+18]
cmpw ax, r14
bh $.L1
bnz $.L2
movw ax, [sp+16]
cmpw ax, r12
bh $.L1
bnz $.L2
movw ax, [sp+14]
cmpw ax, r10
bh $.L1
bnz $.L2
movw ax, [sp+12]
cmpw ax, r8
bh $.L1
ret
.L1:
; copy second argument/operand to the output registers
movw ax, [sp+12]
movw r8, ax
movw ax, [sp+14]
movw r10, ax
movw ax, [sp+16]
movw r12, ax
movw ax, [sp+18]
movw r14, ax
.L2:
ret
END_FUNC ___umaxdi3
|
4ms/metamodule-plugin-sdk
| 1,802
|
plugin-libc/libgcc/config/rl78/adddi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___adddi3
movw hl, sp ; use HL-based addressing (allows for direct addw)
movw ax, [hl+4]
addw ax, [hl+12]
movw r8, ax
mov a, [hl+6] ; middle bytes of the result are determined using 8-bit
addc a, [hl+14] ; ADDC insns which both account for and update the carry bit
mov r10, a ; (no ADDWC instruction is available)
mov a, [hl+7]
addc a, [hl+15]
mov r11, a
mov a, [hl+8]
addc a, [hl+16]
mov r12, a
mov a, [hl+9]
addc a, [hl+17]
mov r13, a
movw ax, [hl+10]
sknc ; account for the possible carry from the
incw ax ; latest 8-bit operation
addw ax, [hl+18]
movw r14, ax
ret
END_FUNC ___adddi3
|
4ms/metamodule-plugin-sdk
| 1,864
|
plugin-libc/libgcc/config/rl78/signbit.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
;; int signbitf (float X)
;; int signbit (double X)
;; int signbitl (long double X)
;;
;; `signbit' returns a nonzero value if the value of X has its sign
;; bit set.
;;
;; This is not the same as `x < 0.0', because IEEE 754 floating point
;; allows zero to be signed. The comparison `-0.0 < 0.0' is false,
;; but `signbit (-0.0)' will return a nonzero value.
;----------------------------------------------------------------------
.text
START_FUNC _signbit
START_ANOTHER_FUNC _signbitf
;; X is at [sp+4]..[SP+7]
;; result is in R8..R9
movw r8, #0
mov a, [sp+7]
mov1 cy, a.7
sknc
movw r8, #1
ret
END_ANOTHER_FUNC _signbitf
END_FUNC _signbit
START_FUNC _signbitl
;; X is at [sp+4]..[SP+7]
;; result is in R8..R9
movw r8, #0
mov a, [sp+11]
mov1 cy, a.7
sknc
movw r8, #1
ret
END_FUNC _signbitl
|
4ms/metamodule-plugin-sdk
| 3,752
|
plugin-libc/libgcc/config/rl78/bit-count.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
START_FUNC ___clzhi2
;; Argument is in [SP+4], return in R8.
movw ax, [SP+4]
.global __clzhi2_internal
__clzhi2_internal:
movw r8, #16
cmpw ax, #0
bz $clzhi2_is_zero
mov e, #0xff
1:
inc e
shlw ax, 1
bnc $1b
mov a, e
mov r8, a
clzhi2_is_zero:
ret
END_FUNC ___clzhi2
START_FUNC ___clzsi2
;; Argument is in [SP+6]:[SP+4], return in R8.
movw ax, [SP+6]
cmpw ax, #0
bnz $__clzhi2_internal
movw ax, [SP+4]
call !__clzhi2_internal
movw ax, r8
addw ax, #16
movw r8, ax
ret
END_FUNC ___clzsi2
START_FUNC ___ctzhi2
;; Argument is in [SP+4], return in R8.
movw ax, [SP+4]
.global __ctzhi2_internal
__ctzhi2_internal:
movw r8, #16
cmpw ax, #0
bz $ctzhi2_is_zero
mov e, #0xff
1:
inc e
shrw ax, 1
bnc $1b
mov a, e
mov r8, a
ctzhi2_is_zero:
ret
END_FUNC ___ctzhi2
START_FUNC ___ctzsi2
;; Argument is in [SP+6]:[SP+4], return in R8.
movw ax, [SP+4]
cmpw ax, #0
bnz $__ctzhi2_internal
movw ax, [SP+6]
call !__ctzhi2_internal
movw ax, r8
addw ax, #16
movw r8, ax
ret
END_FUNC ___ctzsi2
START_FUNC ___ffshi2
;; Argument is in [SP+4], return in R8.
movw ax, [SP+4]
.global __ffshi2_internal
__ffshi2_internal:
movw r8, #0
cmpw ax, #0
bz $ffshi2_is_zero
mov e, #0
1:
inc e
shrw ax, 1
bnc $1b
mov a, e
mov r8, a
ffshi2_is_zero:
ret
END_FUNC ___ffshi2
START_FUNC ___ffssi2
;; Argument is in [SP+6]:[SP+4], return in R8.
movw ax, [SP+4]
cmpw ax, #0
bnz $__ffshi2_internal
movw ax, [SP+6]
cmpw ax, #0
bz $1f
call !__ffshi2_internal
movw ax, r8
addw ax, #16
1:
movw r8, ax
ret
END_FUNC ___ffssi2
START_FUNC ___parityqi_internal
mov1 cy, a.0
xor1 cy, a.1
xor1 cy, a.2
xor1 cy, a.3
xor1 cy, a.4
xor1 cy, a.5
xor1 cy, a.6
xor1 cy, a.7
movw ax, #0
bnc $1f
incw ax
1:
movw r8, ax
ret
END_FUNC ___parityqi_internal
START_FUNC ___parityhi2
;; Argument is in [SP+4], return in R8.
movw ax, [SP+4]
xor a, x
br $___parityqi_internal
END_FUNC ___parityhi2
START_FUNC ___paritysi2
;; Argument is in [SP+6]:[SP+4], return in R8.
movw ax, [SP+4]
xor a, x
mov b, a
movw ax, [SP+6]
xor a, x
xor a, b
br $___parityqi_internal
END_FUNC ___paritysi2
START_FUNC ___popcounthi2
;; Argument is in [SP+4], return in R8.
mov d, #2
br $___popcountqi_internal
END_FUNC ___popcounthi2
START_FUNC ___popcountsi2
;; Argument is in [SP+6]:[SP+4], return in R8.
mov d, #4
br $___popcountqi_internal
END_FUNC ___popcountsi2
START_FUNC ___popcountqi_internal
;; There are D bytes starting at [HL]
;; store count in R8.
movw ax, sp
addw ax, #4
movw hl, ax
mov a, #0
1:
xch a, b
mov a, [hl]
xch a, b
mov e, #8
2:
shl b,1
addc a, #0
dec e
bnz $2b
incw hl
dec d
bnz $1b
mov x, a
mov a, #0
movw r8, ax
ret
END_FUNC ___popcountqi_internal
|
4ms/metamodule-plugin-sdk
| 4,963
|
plugin-libc/libgcc/config/rl78/mulsi3.S
|
; Copyright (C) 2011-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
;; 32x32=32 multiply
#include "vregs.h"
;----------------------------------------------------------------------
; Register use:
; RB0 RB1 RB2
; AX op2L res32L res32H
; BC op2H (resH) op1
; DE count (resL-tmp)
; HL [sp+4]
; Register use (G10):
;
; AX op2L
; BC op2H
; DE count
; HL [sp+4]
; r8/r9 res32L
; r10/r11 (resH)
; r12/r13 (resL-tmp)
; r16/r17 res32H
; r18/r19 op1
START_FUNC ___mulsi3
;; A is at [sp+4]
;; B is at [sp+8]
;; result is in R8..R11
#ifdef __RL78_G10__
movw ax, r16
push ax
movw ax, r18
push ax
#else
sel rb2
push ax
push bc
sel rb0
#endif
clrw ax
movw r8, ax
movw r16, ax
movw ax, [sp+14]
cmpw ax, #0
bz $1f
cmpw ax, #0xffff
bnz $2f
movw ax, [sp+8]
#ifdef __RL78_G10__
push bc
movw bc, r8
xchw ax, bc
subw ax, bc
movw r8, ax
movw ax, bc
pop bc
#else
sel rb1
subw ax, r_0
sel rb0
#endif
br $1f
2:
movw bc, ax
movw ax, [sp+8]
cmpw ax, #0
skz
call !.Lmul_hi
1:
movw ax, [sp+10]
cmpw ax, #0
bz $1f
cmpw ax, #0xffff
bnz $2f
movw ax, [sp+12]
#ifdef __RL78_G10__
push bc
movw bc, r8
xchw ax, bc
subw ax, bc
movw r8, ax
movw ax, bc
pop bc
#else
sel rb1
subw ax, r_0
sel rb0
#endif
br $1f
2:
movw bc, ax
movw ax, [sp+12]
cmpw ax, #0
skz
call !.Lmul_hi
1:
movw ax, r8
movw r16, ax
clrw ax
movw r8, ax
;; now do R16:R8 += op1L * op2L
;; op1 is in AX.0 (needs to shrw)
;; op2 is in BC.2 and BC.1 (bc can shlw/rolcw)
;; res is in AX.2 and AX.1 (needs to addw)
movw ax, [sp+8]
movw r10, ax ; BC.1
movw ax, [sp+12]
cmpw ax, r10
bc $.Lmul_hisi_top
movw bc, r10
movw r10, ax
movw ax, bc
.Lmul_hisi_top:
movw bc, #0
.Lmul_hisi_loop:
shrw ax, 1
#ifdef __RL78_G10__
push ax
bnc $.Lmul_hisi_no_add_g10
movw ax, r8
addw ax, r10
movw r8, ax
sknc
incw r16
movw ax, r16
addw ax, r_2
movw r16, ax
.Lmul_hisi_no_add_g10:
movw ax, r10
shlw ax, 1
movw r10, ax
pop ax
#else
bnc $.Lmul_hisi_no_add
sel rb1
addw ax, bc
sel rb2
sknc
incw ax
addw ax, r_2
.Lmul_hisi_no_add:
sel rb1
shlw bc, 1
sel rb0
#endif
rolwc bc, 1
cmpw ax, #0
bz $.Lmul_hisi_done
shrw ax, 1
#ifdef __RL78_G10__
push ax
bnc $.Lmul_hisi_no_add2_g10
movw ax, r8
addw ax, r10
movw r8, ax
movw ax, r16
sknc
incw ax
addw ax, r_2
movw r16, ax
.Lmul_hisi_no_add2_g10:
movw ax, r10
shlw ax, 1
movw r10, ax
pop ax
#else
bnc $.Lmul_hisi_no_add2
sel rb1
addw ax, bc
sel rb2
sknc
incw ax
addw ax, r_2
.Lmul_hisi_no_add2:
sel rb1
shlw bc, 1
sel rb0
#endif
rolwc bc, 1
cmpw ax, #0
bnz $.Lmul_hisi_loop
.Lmul_hisi_done:
movw ax, r16
movw r10, ax
#ifdef __RL78_G10__
pop ax
movw r18, ax
pop ax
movw r16, ax
#else
sel rb2
pop bc
pop ax
sel rb0
#endif
ret
END_FUNC ___mulsi3
;----------------------------------------------------------------------
START_FUNC ___mulhi3
movw r8, #0
movw ax, [sp+6]
movw bc, ax
movw ax, [sp+4]
;; R8 += AX * BC
.Lmul_hi:
cmpw ax, bc
skc
xchw ax, bc
br $.Lmul_hi_loop
.Lmul_hi_top:
#ifdef __RL78_G10__
push ax
movw ax, r8
addw ax, r_2
movw r8, ax
pop ax
#else
sel rb1
addw ax, r_2
sel rb0
#endif
.Lmul_hi_no_add:
shlw bc, 1
.Lmul_hi_loop:
shrw ax, 1
bc $.Lmul_hi_top
cmpw ax, #0
bz $.Lmul_hi_done
shlw bc, 1
shrw ax, 1
bc $.Lmul_hi_top
cmpw ax, #0
bnz $.Lmul_hi_no_add
.Lmul_hi_done:
ret
END_FUNC ___mulhi3
;;; --------------------------------------
#ifdef __RL78_G10__
START_FUNC ___mulqi3
mov a, [sp+4]
mov r9, a
mov a, [sp+6]
mov r10, a
mov a, #9
mov r11, a
clrb a
mov r8, a
.L2:
cmp0 r10
skz
dec r11
sknz
ret
mov a, r10
and a, #1
mov r12, a
cmp0 r12
sknz
br !!.L3
mov a, r9
mov l, a
mov a, r8
add a, l
mov r8, a
.L3:
mov a, r9
add a, a
mov r9, a
mov a, r10
shr a, 1
mov r10, a
br !!.L2
END_FUNC ___mulqi3
#endif
|
4ms/metamodule-plugin-sdk
| 3,011
|
plugin-libc/libgcc/config/msp430/srai.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
.text
.section .text.__mspabi_srai_n
.macro _srai n
.global __mspabi_srai_\n
__mspabi_srai_\n:
RRA.W R12
.endm
/* Arithmetic Right Shift - R12 -> R12. */
_srai 15
_srai 14
_srai 13
_srai 12
_srai 11
_srai 10
_srai 9
_srai 8
_srai 7
_srai 6
_srai 5
_srai 4
_srai 3
_srai 2
_srai 1
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
.section .text.__mspabi_srai
1: ADD.W #-1,R13
RRA.W R12,R12
.global __mspabi_srai
__mspabi_srai:
CMP #0,R13
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
#ifdef __MSP430X__
.section .text.__gnu_mspabi_srap
1: ADDA #-1,R13
RRAX.A R12,R12
.global __gnu_mspabi_srap
__gnu_mspabi_srap:
CMP #0,R13
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif /* __MSP430X_LARGE__ */
#endif /* __MSP430X__ */
/* Arithmetic Right Shift - R12:R13 -> R12:R13. */
.section .text.__mspabi_sral_n
.macro _sral n
.global __mspabi_sral_\n
__mspabi_sral_\n:
RRA.W R13
RRC.W R12
.endm
_sral 15
_sral 14
_sral 13
_sral 12
_sral 11
_sral 10
_sral 9
_sral 8
_sral 7
_sral 6
_sral 5
_sral 4
_sral 3
_sral 2
_sral 1
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
.section .text.__mspabi_sral
1: ADD.W #-1,R14
RRA.W R13
RRC.W R12
.global __mspabi_sral
__mspabi_sral:
CMP #0,R14
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
/* Arithmetic Right Shift - R8:R11 -> R12:R15
A 64-bit argument would normally be passed in R12:R15, but __mspabi_srall has
special conventions, so the 64-bit value to shift is passed in R8:R11.
According to the MSPABI, the shift amount is a 64-bit value in R12:R15, but
we only use the low word in R12. */
.section .text.__mspabi_srall
.global __mspabi_srall
__mspabi_srall:
MOV R11, R15 ; Free up R11 first
MOV R12, R11 ; Save the shift amount in R11
MOV R10, R14
MOV R9, R13
MOV R8, R12
CMP #0, R11
JNZ 1f
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
1:
RRA R15
RRC R14
RRC R13
RRC R12
ADD #-1,R11
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
|
4ms/metamodule-plugin-sdk
| 1,510
|
plugin-libc/libgcc/config/msp430/epilogue.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
.text
.global __mspabi_func_epilog_7
.global __mspabi_func_epilog_6
.global __mspabi_func_epilog_5
.global __mspabi_func_epilog_4
.global __mspabi_func_epilog_3
.global __mspabi_func_epilog_2
.global __mspabi_func_epilog_1
__mspabi_func_epilog_7:
POP R4
__mspabi_func_epilog_6:
POP R5
__mspabi_func_epilog_5:
POP R6
__mspabi_func_epilog_4:
POP R7
__mspabi_func_epilog_3:
POP R8
__mspabi_func_epilog_2:
POP R9
__mspabi_func_epilog_1:
POP R10
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
|
4ms/metamodule-plugin-sdk
| 3,019
|
plugin-libc/libgcc/config/msp430/slli.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
.text
/* Logical Left Shift - R12 -> R12. */
.section .text.__mspabi_slli_n
.macro _slli n
.global __mspabi_slli_\n
__mspabi_slli_\n:
ADD.W R12,R12
.endm
_slli 15
_slli 14
_slli 13
_slli 12
_slli 11
_slli 10
_slli 9
_slli 8
_slli 7
_slli 6
_slli 5
_slli 4
_slli 3
_slli 2
_slli 1
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
.section .text.__mspabi_slli
1: ADD.W #-1,R13
ADD.W R12,R12
.global __mspabi_slli
__mspabi_slli:
CMP #0,R13
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
#ifdef __MSP430X__
.section .text.__gnu_mspabi_sllp
1: ADDA #-1,R13
ADDA R12,R12
.global __gnu_mspabi_sllp
__gnu_mspabi_sllp:
CMP #0,R13
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif /* __MSP430X_LARGE__ */
#endif /* __MSP430X__ */
/* Logical Left Shift - R12:R13 -> R12:R13. */
.section .text.__mspabi_slll_n
.macro _slll n
.global __mspabi_slll_\n
__mspabi_slll_\n:
ADD.W R12,R12
ADDC.W R13,R13
.endm
_slll 15
_slll 14
_slll 13
_slll 12
_slll 11
_slll 10
_slll 9
_slll 8
_slll 7
_slll 6
_slll 5
_slll 4
_slll 3
_slll 2
_slll 1
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
.section .text.__mspabi_slll
1: ADD.W #-1,R14
ADD.W R12,R12
ADDC.W R13,R13
.global __mspabi_slll
__mspabi_slll:
CMP #0,R14
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
/* Logical Left Shift - R8:R11 -> R12:R15
A 64-bit argument would normally be passed in R12:R15, but __mspabi_sllll has
special conventions, so the 64-bit value to shift is passed in R8:R11.
According to the MSPABI, the shift amount is a 64-bit value in R12:R15, but
we only use the low word in R12. */
.section .text.__mspabi_sllll
.global __mspabi_sllll
__mspabi_sllll:
MOV R11, R15 ; Free up R11 first
MOV R12, R11 ; Save the shift amount in R11
MOV R10, R14
MOV R9, R13
MOV R8, R12
CMP #0,R11
JNZ 1f
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
1:
RLA R12
RLC R13
RLC R14
RLC R15
ADD #-1,R11
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
|
4ms/metamodule-plugin-sdk
| 3,037
|
plugin-libc/libgcc/config/msp430/srli.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
.text
.section .text.__mspabi_srli_n
.macro _srli n
.global __mspabi_srli_\n
__mspabi_srli_\n:
CLRC
RRC.W R12
.endm
/* Logical Right Shift - R12 -> R12. */
_srli 15
_srli 14
_srli 13
_srli 12
_srli 11
_srli 10
_srli 9
_srli 8
_srli 7
_srli 6
_srli 5
_srli 4
_srli 3
_srli 2
_srli 1
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
.section .text.__mspabi_srli
1: ADD.W #-1,R13
CLRC
RRC.W R12,R12
.global __mspabi_srli
__mspabi_srli:
CMP #0,R13
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
#ifdef __MSP430X__
.section .text.__gnu_mspabi_srlp
1: ADDA #-1,R13
CLRC
RRCX.A R12,R12
.global __gnu_mspabi_srlp
__gnu_mspabi_srlp:
CMP #0,R13
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif /* __MSP430X_LARGE__ */
#endif /* __MSP430X__ */
/* Logical Right Shift - R12:R13 -> R12:R13. */
.section .text.__mspabi_srll_n
.macro _srll n
.global __mspabi_srll_\n
__mspabi_srll_\n:
CLRC
RRC.W R13
RRC.W R12
.endm
_srll 15
_srll 14
_srll 13
_srll 12
_srll 11
_srll 10
_srll 9
_srll 8
_srll 7
_srll 6
_srll 5
_srll 4
_srll 3
_srll 2
_srll 1
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
.section .text.__mspabi_srll
1: ADD.W #-1,R14
CLRC
RRC.W R13
RRC.W R12
.global __mspabi_srll
__mspabi_srll:
CMP #0,R14
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
/* Logical Right Shift - R8:R11 -> R12:R15
A 64-bit argument would normally be passed in R12:R15, but __mspabi_srlll has
special conventions, so the 64-bit value to shift is passed in R8:R11.
According to the MSPABI, the shift amount is a 64-bit value in R12:R15, but
we only use the low word in R12. */
.section .text.__mspabi_srlll
.global __mspabi_srlll
__mspabi_srlll:
MOV R11, R15 ; Free up R11 first
MOV R12, R11 ; Save the shift amount in R11
MOV R10, R14
MOV R9, R13
MOV R8, R12
CMP #0,R11
JNZ 1f
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
1:
CLRC
RRC R15
RRC R14
RRC R13
RRC R12
ADD #-1,R11
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
|
4ms/metamodule-plugin-sdk
| 16,846
|
plugin-libc/libgcc/config/msp430/lib2hw_mul.S
|
; Copyright (C) 2014-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
;; Macro to start a multiply function. Each function has three
;; names, and hence three entry points - although they all go
;; through the same code. The first name is the version generated
;; by GCC. The second is the MSP430 EABI mandated name for the
;; *software* version of the function. The third is the EABI
;; mandated name for the *hardware* version of the function.
;;
;; Since we are using the hardware and software names to point
;; to the same code this effectively means that we are mapping
;; the software function onto the hardware function. Thus if
;; the library containing this code is linked into an application
;; (before the libgcc.a library) *all* multiply functions will
;; be mapped onto the hardware versions.
;;
;; We construct each function in its own section so that linker
;; garbage collection can be used to delete any unused functions
;; from this file.
.macro start_func gcc_name eabi_soft_name eabi_hard_name
.pushsection .text.\gcc_name,"ax",@progbits
.p2align 1
.global \eabi_hard_name
.type \eabi_hard_name , @function
\eabi_hard_name:
.global \eabi_soft_name
.type \eabi_soft_name , @function
\eabi_soft_name:
.global \gcc_name
.type \gcc_name , @function
\gcc_name:
PUSH.W sr ; Save current interrupt state
DINT ; Disable interrupts
NOP ; Account for latency
.endm
;; End a function started with the start_func macro.
.macro end_func name
#ifdef __MSP430X_LARGE__
POP.W sr
RETA
#else
RETI
#endif
.size \name , . - \name
.popsection
.endm
;; Like the start_func macro except that it is used to
;; create a false entry point that just jumps to the
;; software function (implemented elsewhere).
.macro fake_func gcc_name eabi_soft_name eabi_hard_name
.pushsection .text.\gcc_name,"ax",@progbits
.p2align 1
.global \eabi_hard_name
.type \eabi_hard_name , @function
\eabi_hard_name:
.global \gcc_name
.type \gcc_name , @function
\gcc_name:
#ifdef __MSP430X_LARGE__
BRA #\eabi_soft_name
#else
BR #\eabi_soft_name
#endif
.size \gcc_name , . - \gcc_name
.popsection
.endm
.macro mult16 OP1, OP2, RESULT
;* * 16-bit hardware multiply: int16 = int16 * int16
;*
;* - Operand 1 is in R12
;* - Operand 2 is in R13
;* - Result is in R12
;*
;* To ensure that the multiply is performed atomically, interrupts are
;* disabled upon routine entry. Interrupt state is restored upon exit.
;*
;* Registers used: R12, R13
;*
;* Macro arguments are the memory locations of the hardware registers.
MOV.W r12, &\OP1 ; Load operand 1 into multiplier
MOV.W r13, &\OP2 ; Load operand 2 which triggers MPY
MOV.W &\RESULT, r12 ; Move result into return register
.endm
.macro mult1632 OP1, OP2, RESLO, RESHI
;* * 16-bit hardware multiply with a 32-bit result:
;* int32 = int16 * int16
;* uint32 = uint16 * uint16
;*
;* - Operand 1 is in R12
;* - Operand 2 is in R13
;* - Result is in R12, R13
;*
;* To ensure that the multiply is performed atomically, interrupts are
;* disabled upon routine entry. Interrupt state is restored upon exit.
;*
;* Registers used: R12, R13
;*
;* Macro arguments are the memory locations of the hardware registers.
MOV.W r12, &\OP1 ; Load operand 1 into multiplier
MOV.W r13, &\OP2 ; Load operand 2 which triggers MPY
MOV.W &\RESLO, r12 ; Move low result into return register
MOV.W &\RESHI, r13 ; Move high result into return register
.endm
.macro mult32 OP1, OP2, MAC_OP1, MAC_OP2, RESLO, RESHI
;* * 32-bit hardware multiply with a 32-bit result using 16 multiply and accumulate:
;* int32 = int32 * int32
;*
;* - Operand 1 is in R12, R13
;* - Operand 2 is in R14, R15
;* - Result is in R12, R13
;*
;* To ensure that the multiply is performed atomically, interrupts are
;* disabled upon routine entry. Interrupt state is restored upon exit.
;*
;* Registers used: R12, R13, R14, R15
;*
;* Macro arguments are the memory locations of the hardware registers.
MOV.W r12, &\OP1 ; Load operand 1 Low into multiplier
MOV.W r14, &\OP2 ; Load operand 2 Low which triggers MPY
MOV.W r12, &\MAC_OP1 ; Load operand 1 Low into mac
MOV.W &\RESLO, r12 ; Low 16-bits of result ready for return
MOV.W &\RESHI, &\RESLO ; MOV intermediate mpy high into low
MOV.W r15, &\MAC_OP2 ; Load operand 2 High, trigger MAC
MOV.W r13, &\MAC_OP1 ; Load operand 1 High
MOV.W r14, &\MAC_OP2 ; Load operand 2 Lo, trigger MAC
MOV.W &\RESLO, r13 ; Upper 16-bits result ready for return
.endm
.macro mult32_hw OP1_LO OP1_HI OP2_LO OP2_HI RESLO RESHI
;* * 32-bit hardware multiply with a 32-bit result
;* int32 = int32 * int32
;*
;* - Operand 1 is in R12, R13
;* - Operand 2 is in R14, R15
;* - Result is in R12, R13
;*
;* To ensure that the multiply is performed atomically, interrupts are
;* disabled upon routine entry. Interrupt state is restored upon exit.
;*
;* Registers used: R12, R13, R14, R15
;*
;* Macro arguments are the memory locations of the hardware registers.
MOV.W r12, &\OP1_LO ; Load operand 1 Low into multiplier
MOV.W r13, &\OP1_HI ; Load operand 1 High into multiplier
MOV.W r14, &\OP2_LO ; Load operand 2 Low into multiplier
MOV.W r15, &\OP2_HI ; Load operand 2 High, trigger MPY
MOV.W &\RESLO, r12 ; Ready low 16-bits for return
MOV.W &\RESHI, r13 ; Ready high 16-bits for return
.endm
.macro mult3264_hw OP1_LO OP1_HI OP2_LO OP2_HI RES0 RES1 RES2 RES3
;* * 32-bit hardware multiply with a 64-bit result
;* int64 = int32 * int32
;* uint64 = uint32 * uint32
;*
;* - Operand 1 is in R12, R13
;* - Operand 2 is in R14, R15
;* - Result is in R12, R13, R14, R15
;*
;* To ensure that the multiply is performed atomically, interrupts are
;* disabled upon routine entry. Interrupt state is restored upon exit.
;*
;* Registers used: R12, R13, R14, R15
;*
;* Macro arguments are the memory locations of the hardware registers.
MOV.W r12, &\OP1_LO ; Load operand 1 Low into multiplier
MOV.W r13, &\OP1_HI ; Load operand 1 High into multiplier
MOV.W r14, &\OP2_LO ; Load operand 2 Low into multiplier
MOV.W r15, &\OP2_HI ; Load operand 2 High, trigger MPY
MOV.W &\RES0, R12 ; Ready low 16-bits for return
MOV.W &\RES1, R13 ;
MOV.W &\RES2, R14 ;
MOV.W &\RES3, R15 ; Ready high 16-bits for return
.endm
.macro mult64_hw MPY32_LO MPY32_HI OP2_LO OP2_HI RES0 RES1 RES2 RES3
;* * 64-bit hardware multiply with a 64-bit result
;* int64 = int64 * int64
;*
;* - Operand 1 is in R8, R9, R10, R11
;* - Operand 2 is in R12, R13, R14, R15
;* - Result is in R12, R13, R14, R15
;*
;* 64-bit multiplication is achieved using the 32-bit hardware multiplier with
;* the following equation:
;* R12:R15 = (R8:R9 * R12:R13) + ((R8:R9 * R14:R15) << 32) + ((R10:R11 * R12:R13) << 32)
;*
;* The left shift by 32 is handled with minimal cost by saving the two low
;* words and discarding the two high words.
;*
;* To ensure that the multiply is performed atomically, interrupts are
;* disabled upon routine entry. Interrupt state is restored upon exit.
;*
;* Registers used: R6, R7, R8, R9, R10, R11, R12, R13, R14, R15
;*
;* Macro arguments are the memory locations of the hardware registers.
;*
#if defined(__MSP430X_LARGE__)
PUSHM.A #5, R10
#elif defined(__MSP430X__)
PUSHM.W #5, R10
#else
PUSH R10 { PUSH R9 { PUSH R8 { PUSH R7 { PUSH R6
#endif
; Multiply the low 32-bits of op0 and the high 32-bits of op1.
MOV.W R8, &\MPY32_LO
MOV.W R9, &\MPY32_HI
MOV.W R14, &\OP2_LO
MOV.W R15, &\OP2_HI
; Save the low 32-bits of the result.
MOV.W &\RES0, R6
MOV.W &\RES1, R7
; Multiply the high 32-bits of op0 and the low 32-bits of op1.
MOV.W R10, &\MPY32_LO
MOV.W R11, &\MPY32_HI
MOV.W R12, &\OP2_LO
MOV.W R13, &\OP2_HI
; Add the low 32-bits of the result to the previously saved result.
ADD.W &\RES0, R6
ADDC.W &\RES1, R7
; Multiply the low 32-bits of op0 and op1.
MOV.W R8, &\MPY32_LO
MOV.W R9, &\MPY32_HI
MOV.W R12, &\OP2_LO
MOV.W R13, &\OP2_HI
; Write the return values
MOV.W &\RES0, R12
MOV.W &\RES1, R13
MOV.W &\RES2, R14
MOV.W &\RES3, R15
; Add the saved low 32-bit results from earlier to the high 32-bits of
; this result, effectively shifting those two results left by 32 bits.
ADD.W R6, R14
ADDC.W R7, R15
#if defined(__MSP430X_LARGE__)
POPM.A #5, R10
#elif defined(__MSP430X__)
POPM.W #5, R10
#else
POP R6 { POP R7 { POP R8 { POP R9 { POP R10
#endif
.endm
;; EABI mandated names:
;;
;; int16 __mspabi_mpyi (int16 x, int16 y)
;; Multiply int by int.
;; int16 __mspabi_mpyi_hw (int16 x, int16 y)
;; Multiply int by int. Uses hardware MPY16 or MPY32.
;; int16 __mspabi_mpyi_f5hw (int16 x, int16 y)
;; Multiply int by int. Uses hardware MPY32 (F5xx devices and up).
;;
;; int32 __mspabi_mpyl (int32 x, int32 y);
;; Multiply long by long.
;; int32 __mspabi_mpyl_hw (int32 x, int32 y)
;; Multiply long by long. Uses hardware MPY16.
;; int32 __mspabi_mpyl_hw32 (int32 x, int32 y)
;; Multiply long by long. Uses hardware MPY32 (F4xx devices).
;; int32 __mspabi_mpyl_f5hw (int32 x, int32 y)
;; Multiply long by long. Uses hardware MPY32 (F5xx devices and up).
;;
;; int64 __mspabi_mpyll (int64 x, int64 y)
;; Multiply long long by long long.
;; int64 __mspabi_mpyll_hw (int64 x, int64 y)
;; Multiply long long by long long. Uses hardware MPY16.
;; int64 __mspabi_mpyll_hw32 (int64 x, int64 y)
;; Multiply long long by long long. Uses hardware MPY32 (F4xx devices).
;; int64 __mspabi_mpyll_f5hw (int64 x, int64 y)
;; Multiply long long by long long. Uses hardware MPY32 (F5xx devices and up).
;;
;; int32 __mspabi_mpysl (int16 x, int16 y)
;; Multiply int by int; result is long.
;; int32 __mspabi_mpysl_hw(int16 x, int16 y)
;; Multiply int by int; result is long. Uses hardware MPY16 or MPY32
;; int32 __mspabi_mpysl_f5hw(int16 x, int16 y)
;; Multiply int by int; result is long. Uses hardware MPY32 (F5xx devices and up).
;;
;; int64 __mspabi_mpysll(int32 x, int32 y)
;; Multiply long by long; result is long long.
;; int64 __mspabi_mpysll_hw(int32 x, int32 y)
;; Multiply long by long; result is long long. Uses hardware MPY16.
;; int64 __mspabi_mpysll_hw32(int32 x, int32 y)
;; Multiply long by long; result is long long. Uses hardware MPY32 (F4xx devices).
;; int64 __mspabi_mpysll_f5hw(int32 x, int32 y)
;; Multiply long by long; result is long long. Uses hardware MPY32 (F5xx devices and up).
;;
;; uint32 __mspabi_mpyul(uint16 x, uint16 y)
;; Multiply unsigned int by unsigned int; result is unsigned long.
;; uint32 __mspabi_mpyul_hw(uint16 x, uint16 y)
;; Multiply unsigned int by unsigned int; result is unsigned long. Uses hardware MPY16 or MPY32
;; uint32 __mspabi_mpyul_f5hw(uint16 x, uint16 y)
;; Multiply unsigned int by unsigned int; result is unsigned long. Uses hardware MPY32 (F5xx devices and up).
;;
;; uint64 __mspabi_mpyull(uint32 x, uint32 y)
;; Multiply unsigned long by unsigned long; result is unsigned long long.
;; uint64 __mspabi_mpyull_hw(uint32 x, uint32 y)
;; Multiply unsigned long by unsigned long; result is unsigned long long. Uses hardware MPY16
;; uint64 __mspabi_mpyull_hw32(uint32 x, uint32 y)
;; Multiply unsigned long by unsigned long; result is unsigned long long. Uses hardware MPY32 (F4xx devices).
;; uint64 __mspabi_mpyull_f5hw(uint32 x, uint32 y)
;; Multiply unsigned long by unsigned long; result is unsigned long long. Uses hardware MPY32 (F5xx devices and up)
;;;; The register names below are the standardised versions used across TI
;;;; literature.
;; Hardware multiply register addresses for devices with 16-bit hardware
;; multiply.
.set MPY, 0x0130
.set MPYS, 0x0132
.set MAC, 0x0134
.set OP2, 0x0138
.set RESLO, 0x013A
.set RESHI, 0x013C
;; Hardware multiply register addresses for devices with 32-bit (non-f5)
;; hardware multiply.
.set MPY32L, 0x0140
.set MPY32H, 0x0142
.set MPYS32L, 0x0144
.set MPYS32H, 0x0146
.set OP2L, 0x0150
.set OP2H, 0x0152
.set RES0, 0x0154
.set RES1, 0x0156
.set RES2, 0x0158
.set RES3, 0x015A
;; Hardware multiply register addresses for devices with f5series hardware
;; multiply.
;; The F5xxx series of MCUs support the same 16-bit and 32-bit multiply
;; as the second generation hardware, but they are accessed from different
;; memory registers.
;; These names AREN'T standard. We've appended _F5 to the standard names.
.set MPY_F5, 0x04C0
.set MPYS_F5, 0x04C2
.set MAC_F5, 0x04C4
.set OP2_F5, 0x04C8
.set RESLO_F5, 0x04CA
.set RESHI_F5, 0x04CC
.set MPY32L_F5, 0x04D0
.set MPY32H_F5, 0x04D2
.set MPYS32L_F5, 0x04D4
.set MPYS32H_F5, 0x04D6
.set OP2L_F5, 0x04E0
.set OP2H_F5, 0x04E2
.set RES0_F5, 0x04E4
.set RES1_F5, 0x04E6
.set RES2_F5, 0x04E8
.set RES3_F5, 0x04EA
#if defined MUL_16
;; First generation MSP430 hardware multiplies ...
start_func __mulhi2 __mspabi_mpyi __mspabi_mpyi_hw
mult16 MPY, OP2, RESLO
end_func __mulhi2
start_func __mulhisi2 __mspabi_mpysl __mspabi_mpysl_hw
mult1632 MPYS, OP2, RESLO, RESHI
end_func __mulhisi2
start_func __umulhisi2 __mspabi_mpyul __mspabi_mpyul_hw
mult1632 MPY, OP2, RESLO, RESHI
end_func __umulhisi2
start_func __mulsi2 __mspabi_mpyl __mspabi_mpyl_hw
mult32 MPY, OP2, MAC, OP2, RESLO, RESHI
end_func __mulsi2
;; FIXME: We do not have hardware implementations of these
;; routines, so just jump to the software versions instead.
fake_func __mulsidi2 __mspabi_mpysll __mspabi_mpysll_hw
fake_func __umulsidi2 __mspabi_mpyull __mspabi_mpyull_hw
fake_func __muldi3 __mspabi_mpyll __mspabi_mpyll_hw
#elif defined MUL_32
;; Second generation MSP430 hardware multiplies ...
start_func __mulhi2 __mspabi_mpyi __mspabi_mpyi_hw
mult16 MPY, OP2, RESLO
end_func __mulhi2
start_func __mulhisi2 __mspabi_mpysl __mspabi_mpysl_hw
mult1632 MPYS, OP2, RESLO, RESHI
end_func __mulhisi2
start_func __umulhisi2 __mspabi_mpyul __mspabi_mpyul_hw
mult1632 MPY, OP2, RESLO, RESHI
end_func __umulhisi2
start_func __mulsi2 __mspabi_mpyl __mspabi_mpyl_hw32
mult32_hw MPY32L, MPY32H, OP2L, OP2H, RES0, RES1
end_func __mulsi2
start_func __mulsidi2 __mspabi_mpysll __mspabi_mpysll_hw32
mult3264_hw MPYS32L, MPYS32H, OP2L, OP2H, RES0, RES1, RES2, RES3
end_func __mulsidi2
start_func __umulsidi2 __mspabi_mpyull __mspabi_mpyull_hw32
mult3264_hw MPY32L, MPY32H, OP2L, OP2H, RES0, RES1, RES2, RES3
end_func __umulsidi2
start_func __muldi3 __mspabi_mpyll __mspabi_mpyll_hw32
mult64_hw MPY32L, MPY32H, OP2L, OP2H, RES0, RES1, RES2, RES3
end_func __muldi3
#elif defined MUL_F5
/* The F5xxx series of MCUs support the same 16-bit and 32-bit multiply
as the second generation hardware, but they are accessed from different
memory registers. */
start_func __mulhi2 __mspabi_mpyi __mspabi_mpyi_f5hw
mult16 MPY_F5, OP2_F5, RESLO_F5
end_func __mulhi2
start_func __mulhisi2 __mspabi_mpysl __mspabi_mpysl_f5hw
mult1632 MPYS_F5, OP2_F5, RESLO_F5, RESHI_F5
end_func __mulhisi2
start_func __umulhisi2 __mspabi_mpyul __mspabi_mpyul_f5hw
mult1632 MPY_F5, OP2_F5, RESLO_F5, RESHI_F5
end_func __umulhisi2
start_func __mulsi2 __mspabi_mpyl __mspabi_mpyl_f5hw
mult32_hw MPY32L_F5, MPY32H_F5, OP2L_F5, OP2H_F5, RES0_F5, RES1_F5
end_func __mulsi2
start_func __mulsidi2 __mspabi_mpysll __mspabi_mpysll_f5hw
mult3264_hw MPYS32L_F5, MPYS32H_F5, OP2L_F5, OP2H_F5, RES0_F5, RES1_F5, RES2_F5, RES3_F5
end_func __mulsidi2
start_func __umulsidi2 __mspabi_mpyull __mspabi_mpyull_f5hw
mult3264_hw MPY32L_F5, MPY32H_F5, OP2L_F5, OP2H_F5, RES0_F5, RES1_F5, RES2_F5, RES3_F5
end_func __umulsidi2
start_func __muldi3 __mspabi_mpyll __mspabi_mpyll_f5hw
mult64_hw MPY32L_F5, MPY32H_F5, OP2L_F5, OP2H_F5, RES0_F5, RES1_F5, RES2_F5, RES3_F5
end_func __muldi3
#else
#error MUL type not defined
#endif
|
4ms/metamodule-plugin-sdk
| 2,776
|
plugin-libc/libgcc/config/msp430/cmpsi2.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#ifdef __MSP430X_LARGE__
#define ret_ RETA
#else
#define ret_ RET
#endif
.text
;; int __cmpsi2 (signed long A, signed long B)
;;
;; Performs a signed comparison of A and B.
;; If A is less than B it returns 0. If A is greater
;; than B it returns 2. If they are equal it returns 1.
;; Note - this code is also used by the __ucmpsi2 routine below.
.global __cmpsi2
.type __cmpsi2, @function
__cmpsi2:
;; A is in r12 (low), r13 (high)
;; B is in r14 (low), r15 (high)
;; Result put in r12
cmp.w r13, r15
jeq .L_compare_low
jge .L_less_than
.L_greater_than:
mov.w #2, r12
ret_
.L_less_than:
mov.w #0, r12
ret_
.L_compare_low:
cmp.w r12, r14
jl .L_greater_than
jne .L_less_than
mov.w #1, r12
ret_
.size __cmpsi2, . - __cmpsi2
;; int __ucmpsi2 (unsigned long A, unsigned long B)
;;
;; Performs an unsigned comparison of A and B.
;; If A is less than B it returns 0. If A is greater
;; than B it returns 2. If they are equal it returns 1.
;;; Note - this function branches into the __cmpsi2 code above.
.global __ucmpsi2
.type __ucmpsi2, @function
__ucmpsi2:
;; A is in r12 (low), r13 (high)
;; B is in r14 (low), r15 (high)
;; Result put in r12
tst r13
jn .L_top_bit_set_in_A
tst r15
;;; If the top bit of B is set, but A's is clear we know that A < B.
jn .L_less_than
;;; Neither A nor B has their top bit set so we can use the __cmpsi2 routine.
;;; Note we use Jc rather than BR as that saves two bytes. The TST insn always
;;; sets the C bit.
jc __cmpsi2
.L_top_bit_set_in_A:
tst r15
;;; If both A and B have their top bit set we can use the __cmpsi2 routine.
jn __cmpsi2
;;; Otherwise A has its top bit set and B does not so A > B.
jc .L_greater_than
.size __ucmpsi2, . - __ucmpsi2
|
4ms/metamodule-plugin-sdk
| 1,197
|
plugin-libc/libgcc/config/i386/crtn.S
|
/* crtn.S for x86.
Copyright (C) 1993-2022 Free Software Foundation, Inc.
Written By Fred Fish, Nov 1992
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file just supplies returns for the .init and .fini sections. It is
linked in after all other files. */
.ident "GNU C crtn.o"
.section .init
ret $0x0
.section .fini
ret $0x0
|
4ms/metamodule-plugin-sdk
| 23,864
|
plugin-libc/libgcc/config/i386/morestack.S
|
# x86/x86_64 support for -fsplit-stack.
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
# Contributed by Ian Lance Taylor <iant@google.com>.
# This file is part of GCC.
# GCC is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#include "auto-host.h"
# Support for allocating more stack space when using -fsplit-stack.
# When a function discovers that it needs more stack space, it will
# call __morestack with the size of the stack frame and the size of
# the parameters to copy from the old stack frame to the new one.
# The __morestack function preserves the parameter registers and
# calls __generic_morestack to actually allocate the stack space.
# When this is called stack space is very low, but we ensure that
# there is enough space to push the parameter registers and to call
# __generic_morestack.
# When calling __generic_morestack, FRAME_SIZE points to the size of
# the desired frame when the function is called, and the function
# sets it to the size of the allocated stack. OLD_STACK points to
# the parameters on the old stack and PARAM_SIZE is the number of
# bytes of parameters to copy to the new stack. These are the
# parameters of the function that called __morestack. The
# __generic_morestack function returns the new stack pointer,
# pointing to the address of the first copied parameter. The return
# value minus the returned *FRAME_SIZE will be the first address on
# the stack which we should not use.
# void *__generic_morestack (size_t *frame_size, void *old_stack,
# size_t param_size);
# The __morestack routine has to arrange for the caller to return to a
# stub on the new stack. The stub is responsible for restoring the
# old stack pointer and returning to the caller's caller. This calls
# __generic_releasestack to retrieve the old stack pointer and release
# the newly allocated stack.
# void *__generic_releasestack (size_t *available);
# We do a little dance so that the processor's call/return return
# address prediction works out. The compiler arranges for the caller
# to look like this:
# call __generic_morestack
# ret
# L:
# // carry on with function
# After we allocate more stack, we call L, which is in our caller.
# When that returns (to the predicted instruction), we release the
# stack segment and reset the stack pointer. We then return to the
# predicted instruction, namely the ret instruction immediately after
# the call to __generic_morestack. That then returns to the caller of
# the original caller.
# The amount of extra space we ask for. In general this has to be
# enough for the dynamic loader to find a symbol and for a signal
# handler to run.
#ifndef __x86_64__
#define BACKOFF (1024)
#else
#define BACKOFF (3584)
#endif
# The amount of space we ask for when calling non-split-stack code.
#define NON_SPLIT_STACK 0x100000
# This entry point is for split-stack code which calls non-split-stack
# code. When the linker sees this case, it converts the call to
# __morestack to call __morestack_non_split instead. We just bump the
# requested stack space by 16K.
#include <cet.h>
.global __morestack_non_split
.hidden __morestack_non_split
#ifdef __ELF__
.type __morestack_non_split,@function
#endif
__morestack_non_split:
.cfi_startproc
#ifndef __x86_64__
# See below for an extended explanation of this.
.cfi_def_cfa %esp,16
pushl %eax # Save %eax in case it is a parameter.
.cfi_adjust_cfa_offset 4 # Account for pushed register.
movl %esp,%eax # Current stack,
subl 8(%esp),%eax # less required stack frame size,
subl $NON_SPLIT_STACK,%eax # less space for non-split code.
cmpl %gs:0x30,%eax # See if we have enough space.
jb 2f # Get more space if we need it.
# Here the stack is
# %esp + 20: stack pointer after two returns
# %esp + 16: return address of morestack caller's caller
# %esp + 12: size of parameters
# %esp + 8: new stack frame size
# %esp + 4: return address of this function
# %esp: saved %eax
#
# Since we aren't doing a full split stack, we don't need to
# do anything when our caller returns. So we return to our
# caller rather than calling it, and let it return as usual.
# To make that work we adjust the return address.
# This breaks call/return address prediction for the call to
# this function. I can't figure out a way to make it work
# short of copying the parameters down the stack, which will
# probably take more clock cycles than we will lose breaking
# call/return address prediction. We will only break
# prediction for this call, not for our caller.
movl 4(%esp),%eax # Increment the return address
cmpb $0xc3,(%eax) # to skip the ret instruction;
je 1f # see above.
addl $2,%eax
1: inc %eax
# If the instruction that we return to is
# leal 20(%ebp),{%eax,%ecx,%edx}
# then we have been called by a varargs function that expects
# %ebp to hold a real value. That can only work if we do the
# full stack split routine. FIXME: This is fragile.
cmpb $0x8d,(%eax)
jne 3f
cmpb $0x14,2(%eax)
jne 3f
cmpb $0x45,1(%eax)
je 2f
cmpb $0x4d,1(%eax)
je 2f
cmpb $0x55,1(%eax)
je 2f
3:
movl %eax,4(%esp) # Update return address.
popl %eax # Restore %eax and stack.
.cfi_adjust_cfa_offset -4 # Account for popped register.
ret $8 # Return to caller, popping args.
2:
.cfi_adjust_cfa_offset 4 # Back to where we were.
popl %eax # Restore %eax and stack.
.cfi_adjust_cfa_offset -4 # Account for popped register.
# Increment space we request.
addl $NON_SPLIT_STACK+0x1000+BACKOFF,4(%esp)
# Fall through into morestack.
#else
# See below for an extended explanation of this.
.cfi_def_cfa %rsp,16
pushq %rax # Save %rax in case caller is using
# it to preserve original %r10.
.cfi_adjust_cfa_offset 8 # Adjust for pushed register.
movq %rsp,%rax # Current stack,
subq %r10,%rax # less required stack frame size,
subq $NON_SPLIT_STACK,%rax # less space for non-split code.
#ifdef __LP64__
cmpq %fs:0x70,%rax # See if we have enough space.
#else
cmpl %fs:0x40,%eax
#endif
jb 2f # Get more space if we need it.
# If the instruction that we return to is
# leaq 24(%rbp), %r11n
# then we have been called by a varargs function that expects
# %ebp to hold a real value. That can only work if we do the
# full stack split routine. FIXME: This is fragile.
movq 8(%rsp),%rax
incq %rax # Skip ret instruction in caller.
cmpl $0x185d8d4c,(%rax)
je 2f
# This breaks call/return prediction, as described above.
incq 8(%rsp) # Increment the return address.
popq %rax # Restore register.
.cfi_adjust_cfa_offset -8 # Adjust for popped register.
ret # Return to caller.
2:
popq %rax # Restore register.
.cfi_adjust_cfa_offset -8 # Adjust for popped register.
# Increment space we request.
addq $NON_SPLIT_STACK+0x1000+BACKOFF,%r10
# Fall through into morestack.
#endif
.cfi_endproc
#ifdef __ELF__
.size __morestack_non_split, . - __morestack_non_split
#endif
# __morestack_non_split falls through into __morestack.
# The __morestack function.
.global __morestack
.hidden __morestack
#ifdef __ELF__
.type __morestack,@function
#endif
__morestack:
.LFB1:
.cfi_startproc
#ifndef __x86_64__
# The 32-bit __morestack function.
# We use a cleanup to restore the stack guard if an exception
# is thrown through this code.
#ifndef __PIC__
.cfi_personality 0,__gcc_personality_v0
.cfi_lsda 0,.LLSDA1
#else
.cfi_personality 0x9b,DW.ref.__gcc_personality_v0
.cfi_lsda 0x1b,.LLSDA1
#endif
# We return below with a ret $8. We will return to a single
# return instruction, which will return to the caller of our
# caller. We let the unwinder skip that single return
# instruction, and just return to the real caller.
# Here CFA points just past the return address on the stack,
# e.g., on function entry it is %esp + 4. The stack looks
# like this:
# CFA + 12: stack pointer after two returns
# CFA + 8: return address of morestack caller's caller
# CFA + 4: size of parameters
# CFA: new stack frame size
# CFA - 4: return address of this function
# CFA - 8: previous value of %ebp; %ebp points here
# Setting the new CFA to be the current CFA + 12 (i.e., %esp +
# 16) will make the unwinder pick up the right return address.
.cfi_def_cfa %esp,16
pushl %ebp
.cfi_adjust_cfa_offset 4
.cfi_offset %ebp, -20
movl %esp,%ebp
.cfi_def_cfa_register %ebp
# In 32-bit mode the parameters are pushed on the stack. The
# argument size is pushed then the new stack frame size is
# pushed.
# In the body of a non-leaf function, the stack pointer will
# be aligned to a 16-byte boundary. That is CFA + 12 in the
# stack picture above: (CFA + 12) % 16 == 0. At this point we
# have %esp == CFA - 8, so %esp % 16 == 12. We need some
# space for saving registers and passing parameters, and we
# need to wind up with %esp % 16 == 0.
subl $44,%esp
# Because our cleanup code may need to clobber %ebx, we need
# to save it here so the unwinder can restore the value used
# by the caller. Note that we don't have to restore the
# register, since we don't change it, we just have to save it
# for the unwinder.
movl %ebx,-4(%ebp)
.cfi_offset %ebx, -24
# In 32-bit mode the registers %eax, %edx, and %ecx may be
# used for parameters, depending on the regparm and fastcall
# attributes.
movl %eax,-8(%ebp)
movl %edx,-12(%ebp)
movl %ecx,-16(%ebp)
call __morestack_block_signals
movl 12(%ebp),%eax # The size of the parameters.
movl %eax,8(%esp)
leal 20(%ebp),%eax # Address of caller's parameters.
movl %eax,4(%esp)
addl $BACKOFF,8(%ebp) # Ask for backoff bytes.
leal 8(%ebp),%eax # The address of the new frame size.
movl %eax,(%esp)
call __generic_morestack
movl %eax,%esp # Switch to the new stack.
subl 8(%ebp),%eax # The end of the stack space.
addl $BACKOFF,%eax # Back off 512 bytes.
.LEHB0:
# FIXME: The offset must match
# TARGET_THREAD_SPLIT_STACK_OFFSET in
# gcc/config/i386/linux.h.
movl %eax,%gs:0x30 # Save the new stack boundary.
call __morestack_unblock_signals
movl -12(%ebp),%edx # Restore registers.
movl -16(%ebp),%ecx
movl 4(%ebp),%eax # Increment the return address
cmpb $0xc3,(%eax) # to skip the ret instruction;
je 1f # see above.
addl $2,%eax
1: inc %eax
movl %eax,-12(%ebp) # Store return address in an
# unused slot.
movl -8(%ebp),%eax # Restore the last register.
call *-12(%ebp) # Call our caller!
# The caller will return here, as predicted.
# Save the registers which may hold a return value. We
# assume that __generic_releasestack does not touch any
# floating point or vector registers.
pushl %eax
pushl %edx
# Push the arguments to __generic_releasestack now so that the
# stack is at a 16-byte boundary for
# __morestack_block_signals.
pushl $0 # Where the available space is returned.
leal 0(%esp),%eax # Push its address.
push %eax
call __morestack_block_signals
call __generic_releasestack
subl 4(%esp),%eax # Subtract available space.
addl $BACKOFF,%eax # Back off 512 bytes.
.LEHE0:
movl %eax,%gs:0x30 # Save the new stack boundary.
addl $8,%esp # Remove values from stack.
# We need to restore the old stack pointer, which is in %rbp,
# before we unblock signals. We also need to restore %eax and
# %edx after we unblock signals but before we return. Do this
# by moving %eax and %edx from the current stack to the old
# stack.
popl %edx # Pop return value from current stack.
popl %eax
movl %ebp,%esp # Restore stack pointer.
# As before, we now have %esp % 16 == 12.
pushl %eax # Push return value on old stack.
pushl %edx
subl $4,%esp # Align stack to 16-byte boundary.
call __morestack_unblock_signals
addl $4,%esp
popl %edx # Restore return value.
popl %eax
.cfi_remember_state
# We never changed %ebx, so we don't have to actually restore it.
.cfi_restore %ebx
popl %ebp
.cfi_restore %ebp
.cfi_def_cfa %esp, 16
ret $8 # Return to caller, which will
# immediately return. Pop
# arguments as we go.
# This is the cleanup code called by the stack unwinder when unwinding
# through the code between .LEHB0 and .LEHE0 above.
.L1:
.cfi_restore_state
subl $16,%esp # Maintain 16 byte alignment.
movl %eax,4(%esp) # Save exception header.
movl %ebp,(%esp) # Stack pointer after resume.
call __generic_findstack
movl %ebp,%ecx # Get the stack pointer.
subl %eax,%ecx # Subtract available space.
addl $BACKOFF,%ecx # Back off 512 bytes.
movl %ecx,%gs:0x30 # Save new stack boundary.
movl 4(%esp),%eax # Function argument.
movl %eax,(%esp)
#ifdef __PIC__
call __x86.get_pc_thunk.bx # %ebx may not be set up for us.
addl $_GLOBAL_OFFSET_TABLE_, %ebx
call _Unwind_Resume@PLT # Resume unwinding.
#else
call _Unwind_Resume
#endif
#else /* defined(__x86_64__) */
# The 64-bit __morestack function.
# We use a cleanup to restore the stack guard if an exception
# is thrown through this code.
#ifndef __PIC__
.cfi_personality 0x3,__gcc_personality_v0
.cfi_lsda 0x3,.LLSDA1
#else
.cfi_personality 0x9b,DW.ref.__gcc_personality_v0
.cfi_lsda 0x1b,.LLSDA1
#endif
# We will return a single return instruction, which will
# return to the caller of our caller. Let the unwinder skip
# that single return instruction, and just return to the real
# caller.
.cfi_def_cfa %rsp,16
# Set up a normal backtrace.
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp, -24
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
# In 64-bit mode the new stack frame size is passed in r10
# and the argument size is passed in r11.
addq $BACKOFF,%r10 # Ask for backoff bytes.
pushq %r10 # Save new frame size.
# In 64-bit mode the registers %rdi, %rsi, %rdx, %rcx, %r8,
# and %r9 may be used for parameters. We also preserve %rax
# which the caller may use to hold %r10.
pushq %rax
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %r11
# We entered morestack with the stack pointer aligned to a
# 16-byte boundary (the call to morestack's caller used 8
# bytes, and the call to morestack used 8 bytes). We have now
# pushed 10 registers, so we are still aligned to a 16-byte
# boundary.
call __morestack_block_signals
leaq -8(%rbp),%rdi # Address of new frame size.
leaq 24(%rbp),%rsi # The caller's parameters.
popq %rdx # The size of the parameters.
subq $8,%rsp # Align stack.
call __generic_morestack
movq -8(%rbp),%r10 # Reload modified frame size
movq %rax,%rsp # Switch to the new stack.
subq %r10,%rax # The end of the stack space.
addq $BACKOFF,%rax # Back off 1024 bytes.
.LEHB0:
# FIXME: The offset must match
# TARGET_THREAD_SPLIT_STACK_OFFSET in
# gcc/config/i386/linux64.h.
# Macro to save the new stack boundary.
#ifdef __LP64__
#define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movq %r##reg,%fs:0x70
#else
#define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movl %e##reg,%fs:0x40
#endif
X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
call __morestack_unblock_signals
movq -24(%rbp),%rdi # Restore registers.
movq -32(%rbp),%rsi
movq -40(%rbp),%rdx
movq -48(%rbp),%rcx
movq -56(%rbp),%r8
movq -64(%rbp),%r9
movq 8(%rbp),%r10 # Increment the return address
incq %r10 # to skip the ret instruction;
# see above.
movq -16(%rbp),%rax # Restore caller's %rax.
call *%r10 # Call our caller!
# The caller will return here, as predicted.
# Save the registers which may hold a return value. We
# assume that __generic_releasestack does not touch any
# floating point or vector registers.
pushq %rax
pushq %rdx
call __morestack_block_signals
pushq $0 # For alignment.
pushq $0 # Where the available space is returned.
leaq 0(%rsp),%rdi # Pass its address.
call __generic_releasestack
subq 0(%rsp),%rax # Subtract available space.
addq $BACKOFF,%rax # Back off 1024 bytes.
.LEHE0:
X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
addq $16,%rsp # Remove values from stack.
# We need to restore the old stack pointer, which is in %rbp,
# before we unblock signals. We also need to restore %rax and
# %rdx after we unblock signals but before we return. Do this
# by moving %rax and %rdx from the current stack to the old
# stack.
popq %rdx # Pop return value from current stack.
popq %rax
movq %rbp,%rsp # Restore stack pointer.
# Now (%rsp & 16) == 8.
subq $8,%rsp # For alignment.
pushq %rax # Push return value on old stack.
pushq %rdx
call __morestack_unblock_signals
popq %rdx # Restore return value.
popq %rax
addq $8,%rsp
.cfi_remember_state
popq %rbp
.cfi_restore %rbp
.cfi_def_cfa %rsp, 16
ret # Return to caller, which will
# immediately return.
# This is the cleanup code called by the stack unwinder when unwinding
# through the code between .LEHB0 and .LEHE0 above.
.L1:
.cfi_restore_state
subq $16,%rsp # Maintain 16 byte alignment.
movq %rax,(%rsp) # Save exception header.
movq %rbp,%rdi # Stack pointer after resume.
call __generic_findstack
movq %rbp,%rcx # Get the stack pointer.
subq %rax,%rcx # Subtract available space.
addq $BACKOFF,%rcx # Back off 1024 bytes.
X86_64_SAVE_NEW_STACK_BOUNDARY (cx)
movq (%rsp),%rdi # Restore exception data for call.
#ifdef __PIC__
call _Unwind_Resume@PLT # Resume unwinding.
#else
call _Unwind_Resume # Resume unwinding.
#endif
#endif /* defined(__x86_64__) */
.cfi_endproc
#ifdef __ELF__
.size __morestack, . - __morestack
#endif
#if !defined(__x86_64__) && defined(__PIC__)
# Output the thunk to get PC into bx, since we use it above.
.section .text.__x86.get_pc_thunk.bx,"axG",@progbits,__x86.get_pc_thunk.bx,comdat
.globl __x86.get_pc_thunk.bx
.hidden __x86.get_pc_thunk.bx
#ifdef __ELF__
.type __x86.get_pc_thunk.bx, @function
#endif
__x86.get_pc_thunk.bx:
.cfi_startproc
movl (%esp), %ebx
ret
.cfi_endproc
#ifdef __ELF__
.size __x86.get_pc_thunk.bx, . - __x86.get_pc_thunk.bx
#endif
#endif
# The exception table. This tells the personality routine to execute
# the exception handler.
.section .gcc_except_table,"a",@progbits
.align 4
.LLSDA1:
.byte 0xff # @LPStart format (omit)
.byte 0xff # @TType format (omit)
.byte 0x1 # call-site format (uleb128)
.uleb128 .LLSDACSE1-.LLSDACSB1 # Call-site table length
.LLSDACSB1:
.uleb128 .LEHB0-.LFB1 # region 0 start
.uleb128 .LEHE0-.LEHB0 # length
.uleb128 .L1-.LFB1 # landing pad
.uleb128 0 # action
.LLSDACSE1:
.global __gcc_personality_v0
#ifdef __PIC__
# Build a position independent reference to the basic
# personality function.
.hidden DW.ref.__gcc_personality_v0
.weak DW.ref.__gcc_personality_v0
.section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat
.type DW.ref.__gcc_personality_v0, @object
DW.ref.__gcc_personality_v0:
#ifndef __LP64__
.align 4
.size DW.ref.__gcc_personality_v0, 4
.long __gcc_personality_v0
#else
.align 8
.size DW.ref.__gcc_personality_v0, 8
.quad __gcc_personality_v0
#endif
#endif
#if defined __x86_64__ && defined __LP64__
# This entry point is used for the large model. With this entry point
# the upper 32 bits of %r10 hold the argument size and the lower 32
# bits hold the new stack frame size. There doesn't seem to be a way
# to know in the assembler code that we are assembling for the large
# model, and there doesn't seem to be a large model multilib anyhow.
# If one is developed, then the non-PIC code is probably OK since we
# will probably be close to the morestack code, but the PIC code
# almost certainly needs to be changed. FIXME.
.text
.global __morestack_large_model
.hidden __morestack_large_model
#ifdef __ELF__
.type __morestack_large_model,@function
#endif
__morestack_large_model:
.cfi_startproc
_CET_ENDBR
movq %r10, %r11
andl $0xffffffff, %r10d
sarq $32, %r11
jmp __morestack
.cfi_endproc
#ifdef __ELF__
.size __morestack_large_model, . - __morestack_large_model
#endif
#endif /* __x86_64__ && __LP64__ */
# Initialize the stack test value when the program starts or when a
# new thread starts. We don't know how large the main stack is, so we
# guess conservatively. We might be able to use getrlimit here.
.text
.global __stack_split_initialize
.hidden __stack_split_initialize
#ifdef __ELF__
.type __stack_split_initialize, @function
#endif
__stack_split_initialize:
_CET_ENDBR
#ifndef __x86_64__
leal -16000(%esp),%eax # We should have at least 16K.
movl %eax,%gs:0x30
subl $4,%esp # Align stack.
pushl $16000
pushl %esp
#ifdef __PIC__
call __generic_morestack_set_initial_sp@PLT
#else
call __generic_morestack_set_initial_sp
#endif
addl $12,%esp
ret
#else /* defined(__x86_64__) */
leaq -16000(%rsp),%rax # We should have at least 16K.
X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
subq $8,%rsp # Align stack.
movq %rsp,%rdi
movq $16000,%rsi
#ifdef __PIC__
call __generic_morestack_set_initial_sp@PLT
#else
call __generic_morestack_set_initial_sp
#endif
addq $8,%rsp
ret
#endif /* defined(__x86_64__) */
#ifdef __ELF__
.size __stack_split_initialize, . - __stack_split_initialize
#endif
# Routines to get and set the guard, for __splitstack_getcontext,
# __splitstack_setcontext, and __splitstack_makecontext.
# void *__morestack_get_guard (void) returns the current stack guard.
.text
.global __morestack_get_guard
.hidden __morestack_get_guard
#ifdef __ELF__
.type __morestack_get_guard,@function
#endif
__morestack_get_guard:
#ifndef __x86_64__
movl %gs:0x30,%eax
#else
#ifdef __LP64__
movq %fs:0x70,%rax
#else
movl %fs:0x40,%eax
#endif
#endif
ret
#ifdef __ELF__
.size __morestack_get_guard, . - __morestack_get_guard
#endif
# void __morestack_set_guard (void *) sets the stack guard.
.global __morestack_set_guard
.hidden __morestack_set_guard
#ifdef __ELF__
.type __morestack_set_guard,@function
#endif
__morestack_set_guard:
#ifndef __x86_64__
movl 4(%esp),%eax
movl %eax,%gs:0x30
#else
X86_64_SAVE_NEW_STACK_BOUNDARY (di)
#endif
ret
#ifdef __ELF__
.size __morestack_set_guard, . - __morestack_set_guard
#endif
# void *__morestack_make_guard (void *, size_t) returns the stack
# guard value for a stack.
.global __morestack_make_guard
.hidden __morestack_make_guard
#ifdef __ELF__
.type __morestack_make_guard,@function
#endif
__morestack_make_guard:
#ifndef __x86_64__
movl 4(%esp),%eax
subl 8(%esp),%eax
addl $BACKOFF,%eax
#else
subq %rsi,%rdi
addq $BACKOFF,%rdi
movq %rdi,%rax
#endif
ret
#ifdef __ELF__
.size __morestack_make_guard, . - __morestack_make_guard
#endif
# Make __stack_split_initialize a high priority constructor. FIXME:
# This is ELF specific.
#if HAVE_INITFINI_ARRAY_SUPPORT
.section .init_array.00000,"aw",@progbits
#else
.section .ctors.65535,"aw",@progbits
#endif
#ifndef __LP64__
.align 4
.long __stack_split_initialize
.long __morestack_load_mmap
#else
.align 8
.quad __stack_split_initialize
.quad __morestack_load_mmap
#endif
#ifdef __ELF__
.section .note.GNU-stack,"",@progbits
.section .note.GNU-split-stack,"",@progbits
.section .note.GNU-no-split-stack,"",@progbits
#endif
|
4ms/metamodule-plugin-sdk
| 1,313
|
plugin-libc/libgcc/config/i386/crti.S
|
/* crti.S for x86.
Copyright (C) 1993-2022 Free Software Foundation, Inc.
Written By Fred Fish, Nov 1992
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file just supplies labeled starting points for the .init and .fini
sections. It is linked in before the values-Xx.o files and also before
crtbegin.o. */
.ident "GNU C crti.s"
.section .init
.globl _init
.type _init,@function
_init:
.section .fini
.globl _fini
.type _fini,@function
_fini:
|
4ms/metamodule-plugin-sdk
| 4,899
|
plugin-libc/libgcc/config/i386/cygwin.S
|
/* stuff needed for libgcc on win32.
*
* Copyright (C) 1996-2022 Free Software Foundation, Inc.
* Written By Steve Chamberlain
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
#include "i386-asm.h"
#ifdef HAVE_AS_CFI_SECTIONS
.cfi_sections .debug_frame
#endif
#ifdef L_chkstk
/* Function prologue calls __chkstk to probe the stack when allocating more
than CHECK_STACK_LIMIT bytes in one go. Touching the stack at 4K
increments is necessary to ensure that the guard pages used
by the OS virtual memory manger are allocated in correct sequence. */
.global ___chkstk
.global __alloca
#ifdef __x86_64__
/* __alloca is a normal function call, which uses %rcx as the argument. */
cfi_startproc()
__alloca:
movq %rcx, %rax
/* FALLTHRU */
/* ___chkstk is a *special* function call, which uses %rax as the argument.
We avoid clobbering the 4 integer argument registers, %rcx, %rdx,
%r8 and %r9, which leaves us with %rax, %r10, and %r11 to use. */
.align 4
___chkstk:
popq %r11 /* pop return address */
cfi_adjust_cfa_offset(-8) /* indicate return address in r11 */
cfi_register(%rip, %r11)
movq %rsp, %r10
cmpq $0x1000, %rax /* > 4k ?*/
jb 2f
1: subq $0x1000, %r10 /* yes, move pointer down 4k*/
orl $0x0, (%r10) /* probe there */
subq $0x1000, %rax /* decrement count */
cmpq $0x1000, %rax
ja 1b /* and do it again */
2: subq %rax, %r10
movq %rsp, %rax /* hold CFA until return */
cfi_def_cfa_register(%rax)
orl $0x0, (%r10) /* less than 4k, just peek here */
movq %r10, %rsp /* decrement stack */
/* Push the return value back. Doing this instead of just
jumping to %r11 preserves the cached call-return stack
used by most modern processors. */
pushq %r11
ret
cfi_endproc()
#else
cfi_startproc()
___chkstk:
__alloca:
pushl %ecx /* save temp */
cfi_push(%eax)
leal 8(%esp), %ecx /* point past return addr */
cmpl $0x1000, %eax /* > 4k ?*/
jb 2f
1: subl $0x1000, %ecx /* yes, move pointer down 4k*/
orl $0x0, (%ecx) /* probe there */
subl $0x1000, %eax /* decrement count */
cmpl $0x1000, %eax
ja 1b /* and do it again */
2: subl %eax, %ecx
orl $0x0, (%ecx) /* less than 4k, just peek here */
movl %esp, %eax /* save current stack pointer */
cfi_def_cfa_register(%eax)
movl %ecx, %esp /* decrement stack */
movl (%eax), %ecx /* recover saved temp */
/* Copy the return register. Doing this instead of just jumping to
the address preserves the cached call-return stack used by most
modern processors. */
pushl 4(%eax)
ret
cfi_endproc()
#endif /* __x86_64__ */
#endif /* L_chkstk */
#ifdef L_chkstk_ms
/* ___chkstk_ms is a *special* function call, which uses %rax as the argument.
We avoid clobbering any registers. Unlike ___chkstk, it just probes the
stack and does no stack allocation. */
.global ___chkstk_ms
#ifdef __x86_64__
cfi_startproc()
___chkstk_ms:
pushq %rcx /* save temps */
cfi_push(%rcx)
pushq %rax
cfi_push(%rax)
cmpq $0x1000, %rax /* > 4k ?*/
leaq 24(%rsp), %rcx /* point past return addr */
jb 2f
1: subq $0x1000, %rcx /* yes, move pointer down 4k */
orq $0x0, (%rcx) /* probe there */
subq $0x1000, %rax /* decrement count */
cmpq $0x1000, %rax
ja 1b /* and do it again */
2: subq %rax, %rcx
orq $0x0, (%rcx) /* less than 4k, just peek here */
popq %rax
cfi_pop(%rax)
popq %rcx
cfi_pop(%rcx)
ret
cfi_endproc()
#else
cfi_startproc()
___chkstk_ms:
pushl %ecx /* save temp */
cfi_push(%ecx)
pushl %eax
cfi_push(%eax)
cmpl $0x1000, %eax /* > 4k ?*/
leal 12(%esp), %ecx /* point past return addr */
jb 2f
1: subl $0x1000, %ecx /* yes, move pointer down 4k*/
orl $0x0, (%ecx) /* probe there */
subl $0x1000, %eax /* decrement count */
cmpl $0x1000, %eax
ja 1b /* and do it again */
2: subl %eax, %ecx
orl $0x0, (%ecx) /* less than 4k, just peek here */
popl %eax
cfi_pop(%eax)
popl %ecx
cfi_pop(%ecx)
ret
cfi_endproc()
#endif /* __x86_64__ */
#endif /* L_chkstk_ms */
|
4ms/metamodule-plugin-sdk
| 5,263
|
plugin-libc/libgcc/config/i386/sol2-c1.S
|
/* crt1.s for Solaris 2, x86
Copyright (C) 1993-2022 Free Software Foundation, Inc.
Written By Fred Fish, Nov 1992
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file takes control of the process from the kernel, as specified
in section 3 of the System V Application Binary Interface, Intel386
Processor Supplement. It has been constructed from information obtained
from the ABI, information obtained from single stepping existing
Solaris executables through their startup code with gdb, and from
information obtained by single stepping executables on other i386 SVR4
implementations. This file is the first thing linked into any
executable. */
#ifndef GCRT1
.ident "GNU C crt1.s"
#define CLEANUP _cleanup
#else
/* This is a modified crt1.s by J.W.Hawtin <oolon@ankh.org> 15/8/96,
to allow program profiling, by calling monstartup on entry and _mcleanup
on exit. */
.ident "GNU C gcrt1.s"
#define CLEANUP _mcleanup
#endif
.weak _cleanup
.weak _DYNAMIC
.text
/* Start creating the initial frame by pushing a NULL value for the return
address of the initial frame, and mark the end of the stack frame chain
(the innermost stack frame) with a NULL value, per page 3-32 of the ABI.
Initialize the first stack frame pointer in %ebp (the contents of which
are unspecified at process initialization). */
.globl _start
_start:
pushl $0x0
pushl $0x0
movl %esp,%ebp
/* As specified per page 3-32 of the ABI, %edx contains a function
pointer that should be registered with atexit(), for proper
shared object termination. Just push it onto the stack for now
to preserve it. We want to register _cleanup() first. */
pushl %edx
/* Check to see if there is an _cleanup() function linked in, and if
so, register it with atexit() as the last thing to be run by
atexit(). */
movl $CLEANUP,%eax
testl %eax,%eax
je .L1
pushl $CLEANUP
call atexit
addl $0x4,%esp
.L1:
/* Now check to see if we have an _DYNAMIC table, and if so then
we need to register the function pointer previously in %edx, but
now conveniently saved on the stack as the argument to pass to
atexit(). */
movl $_DYNAMIC,%eax
testl %eax,%eax
je .L2
call atexit
.L2:
/* Register _fini() with atexit(). We will take care of calling _init()
directly. */
pushl $_fini
call atexit
#ifdef GCRT1
/* Start profiling. */
pushl %ebp
movl %esp,%ebp
pushl $_etext
pushl $_start
call monstartup
addl $8,%esp
popl %ebp
#endif
/* Compute the address of the environment vector on the stack and load
it into the global variable _environ. Currently argc is at 8 off
the frame pointer. Fetch the argument count into %eax, scale by the
size of each arg (4 bytes) and compute the address of the environment
vector which is 16 bytes (the two zero words we pushed, plus argc,
plus the null word terminating the arg vector) further up the stack,
off the frame pointer (whew!). */
movl 8(%ebp),%eax
leal 16(%ebp,%eax,4),%edx
movl %edx,_environ
/* Push the environment vector pointer, the argument vector pointer,
and the argument count on to the stack to set up the arguments
for _init(), _fpstart(), and main(). Note that the environment
vector pointer and the arg count were previously loaded into
%edx and %eax respectively. The only new value we need to compute
is the argument vector pointer, which is at a fixed address off
the initial frame pointer. */
/* Make sure the stack is properly aligned. */
andl $0xfffffff0,%esp
subl $4,%esp
pushl %edx
leal 12(%ebp),%edx
pushl %edx
pushl %eax
/* Call _init(argc, argv, environ), _fpstart(argc, argv, environ), and
main(argc, argv, environ). */
call _init
call __fpstart
call main
/* Pop the argc, argv, and environ arguments off the stack, push the
value returned from main(), and call exit(). */
addl $12,%esp
pushl %eax
call exit
/* An inline equivalent of _exit, as specified in Figure 3-26 of the ABI. */
pushl $0x0
movl $0x1,%eax
lcall $7,$0
/* If all else fails, just try a halt! */
hlt
.type _start,@function
.size _start,.-_start
#ifndef GCRT1
/* A dummy profiling support routine for non-profiling executables,
in case we link in some objects that have been compiled for profiling. */
.weak _mcount
_mcount:
ret
.type _mcount,@function
.size _mcount,.-_mcount
#endif
|
4ms/metamodule-plugin-sdk
| 2,078
|
plugin-libc/libgcc/config/aarch64/crtn.S
|
# Machine description for AArch64 architecture.
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
# Contributed by ARM Ltd.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
/* An executable stack is *not* required for these functions. */
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
.previous
#endif
# This file just makes sure that the .fini and .init sections do in
# fact return. Users may put any desired instructions in those sections.
# This file is the last thing linked into any executable.
# Note - this macro is complemented by the FUNC_START macro
# in crti.S. If you change this macro you must also change
# that macro match.
#
# Note - we do not try any fancy optimizations of the return
# sequences here, it is just not worth it. Instead keep things
# simple. Restore all the save resgisters, including the link
# register and then perform the correct function return instruction.
.macro FUNC_END
ldp x19, x20, [sp], #16
ldp x21, x22, [sp], #16
ldp x23, x24, [sp], #16
ldp x25, x26, [sp], #16
ldp x27, x28, [sp], #16
ldp x29, x30, [sp], #16
ret
.endm
.section ".init"
;;
FUNC_END
.section ".fini"
;;
FUNC_END
# end of crtn.S
|
4ms/metamodule-plugin-sdk
| 8,030
|
plugin-libc/libgcc/config/aarch64/lse.S
|
/* Out-of-line LSE atomics for AArch64 architecture.
Copyright (C) 2019-2022 Free Software Foundation, Inc.
Contributed by Linaro Ltd.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
* The problem that we are trying to solve is operating system deployment
* of ARMv8.1-Atomics, also known as Large System Exensions (LSE).
*
* There are a number of potential solutions for this problem which have
* been proposed and rejected for various reasons. To recap:
*
* (1) Multiple builds. The dynamic linker will examine /lib64/atomics/
* if HWCAP_ATOMICS is set, allowing entire libraries to be overwritten.
* However, not all Linux distributions are happy with multiple builds,
* and anyway it has no effect on main applications.
*
* (2) IFUNC. We could put these functions into libgcc_s.so, and have
* a single copy of each function for all DSOs. However, ARM is concerned
* that the branch-to-indirect-branch that is implied by using a PLT,
* as required by IFUNC, is too much overhead for smaller cpus.
*
* (3) Statically predicted direct branches. This is the approach that
* is taken here. These functions are linked into every DSO that uses them.
* All of the symbols are hidden, so that the functions are called via a
* direct branch. The choice of LSE vs non-LSE is done via one byte load
* followed by a well-predicted direct branch. The functions are compiled
* separately to minimize code size.
*/
#include "auto-target.h"
/* Tell the assembler to accept LSE instructions. */
#ifdef HAVE_AS_LSE
.arch armv8-a+lse
#else
.arch armv8-a
#endif
/* Declare the symbol gating the LSE implementations. */
.hidden __aarch64_have_lse_atomics
/* Turn size and memory model defines into mnemonic fragments. */
#if SIZE == 1
# define S b
# define UXT uxtb
# define B 0x00000000
#elif SIZE == 2
# define S h
# define UXT uxth
# define B 0x40000000
#elif SIZE == 4 || SIZE == 8 || SIZE == 16
# define S
# define UXT mov
# if SIZE == 4
# define B 0x80000000
# elif SIZE == 8
# define B 0xc0000000
# endif
#else
# error
#endif
#if MODEL == 1
# define SUFF _relax
# define A
# define L
# define M 0x000000
# define N 0x000000
# define BARRIER
#elif MODEL == 2
# define SUFF _acq
# define A a
# define L
# define M 0x400000
# define N 0x800000
# define BARRIER
#elif MODEL == 3
# define SUFF _rel
# define A
# define L l
# define M 0x008000
# define N 0x400000
# define BARRIER
#elif MODEL == 4
# define SUFF _acq_rel
# define A a
# define L l
# define M 0x408000
# define N 0xc00000
# define BARRIER
#elif MODEL == 5
# define SUFF _sync
#ifdef L_swp
/* swp has _acq semantics. */
# define A a
# define L
# define M 0x400000
# define N 0x800000
#else
/* All other _sync functions have _seq semantics. */
# define A a
# define L l
# define M 0x408000
# define N 0xc00000
#endif
# define BARRIER dmb ish
#else
# error
#endif
/* Concatenate symbols. */
#define glue2_(A, B) A ## B
#define glue2(A, B) glue2_(A, B)
#define glue3_(A, B, C) A ## B ## C
#define glue3(A, B, C) glue3_(A, B, C)
#define glue4_(A, B, C, D) A ## B ## C ## D
#define glue4(A, B, C, D) glue4_(A, B, C, D)
/* Select the size of a register, given a regno. */
#define x(N) glue2(x, N)
#define w(N) glue2(w, N)
#if SIZE < 8
# define s(N) w(N)
#else
# define s(N) x(N)
#endif
#define NAME(BASE) glue4(__aarch64_, BASE, SIZE, SUFF)
#if MODEL == 5
/* Drop A for _sync functions. */
# define LDXR glue3(ld, xr, S)
#else
# define LDXR glue4(ld, A, xr, S)
#endif
#define STXR glue4(st, L, xr, S)
/* Temporary registers used. Other than these, only the return value
register (x0) and the flags are modified. */
#define tmp0 16
#define tmp1 17
#define tmp2 15
#define BTI_C hint 34
/* Start and end a function. */
.macro STARTFN name
.text
.balign 16
.globl \name
.hidden \name
.type \name, %function
.cfi_startproc
\name:
BTI_C
.endm
.macro ENDFN name
.cfi_endproc
.size \name, . - \name
.endm
/* Branch to LABEL if LSE is disabled. */
.macro JUMP_IF_NOT_LSE label
adrp x(tmp0), __aarch64_have_lse_atomics
ldrb w(tmp0), [x(tmp0), :lo12:__aarch64_have_lse_atomics]
cbz w(tmp0), \label
.endm
#ifdef L_cas
STARTFN NAME(cas)
JUMP_IF_NOT_LSE 8f
#if SIZE < 16
#ifdef HAVE_AS_LSE
# define CAS glue4(cas, A, L, S) s(0), s(1), [x2]
#else
# define CAS .inst 0x08a07c41 + B + M
#endif
CAS /* s(0), s(1), [x2] */
ret
8: UXT s(tmp0), s(0)
0: LDXR s(0), [x2]
cmp s(0), s(tmp0)
bne 1f
STXR w(tmp1), s(1), [x2]
cbnz w(tmp1), 0b
1: BARRIER
ret
#else
#if MODEL == 5
/* Drop A for _sync functions. */
# define LDXP glue2(ld, xp)
#else
# define LDXP glue3(ld, A, xp)
#endif
#define STXP glue3(st, L, xp)
#ifdef HAVE_AS_LSE
# define CASP glue3(casp, A, L) x0, x1, x2, x3, [x4]
#else
# define CASP .inst 0x48207c82 + M
#endif
CASP /* x0, x1, x2, x3, [x4] */
ret
8: mov x(tmp0), x0
mov x(tmp1), x1
0: LDXP x0, x1, [x4]
cmp x0, x(tmp0)
ccmp x1, x(tmp1), #0, eq
bne 1f
STXP w(tmp2), x2, x3, [x4]
cbnz w(tmp2), 0b
1: BARRIER
ret
#endif
ENDFN NAME(cas)
#endif
#ifdef L_swp
#ifdef HAVE_AS_LSE
# define SWP glue4(swp, A, L, S) s(0), s(0), [x1]
#else
# define SWP .inst 0x38208020 + B + N
#endif
STARTFN NAME(swp)
JUMP_IF_NOT_LSE 8f
SWP /* s(0), s(0), [x1] */
ret
8: mov s(tmp0), s(0)
0: LDXR s(0), [x1]
STXR w(tmp1), s(tmp0), [x1]
cbnz w(tmp1), 0b
BARRIER
ret
ENDFN NAME(swp)
#endif
#if defined(L_ldadd) || defined(L_ldclr) \
|| defined(L_ldeor) || defined(L_ldset)
#ifdef L_ldadd
#define LDNM ldadd
#define OP add
#define OPN 0x0000
#elif defined(L_ldclr)
#define LDNM ldclr
#define OP bic
#define OPN 0x1000
#elif defined(L_ldeor)
#define LDNM ldeor
#define OP eor
#define OPN 0x2000
#elif defined(L_ldset)
#define LDNM ldset
#define OP orr
#define OPN 0x3000
#else
#error
#endif
#ifdef HAVE_AS_LSE
# define LDOP glue4(LDNM, A, L, S) s(0), s(0), [x1]
#else
# define LDOP .inst 0x38200020 + OPN + B + N
#endif
STARTFN NAME(LDNM)
JUMP_IF_NOT_LSE 8f
LDOP /* s(0), s(0), [x1] */
ret
8: mov s(tmp0), s(0)
0: LDXR s(0), [x1]
OP s(tmp1), s(0), s(tmp0)
STXR w(tmp2), s(tmp1), [x1]
cbnz w(tmp2), 0b
BARRIER
ret
ENDFN NAME(LDNM)
#endif
/* GNU_PROPERTY_AARCH64_* macros from elf.h for use in asm code. */
#define FEATURE_1_AND 0xc0000000
#define FEATURE_1_BTI 1
#define FEATURE_1_PAC 2
/* Supported features based on the code generation options. */
#if defined(__ARM_FEATURE_BTI_DEFAULT)
# define BTI_FLAG FEATURE_1_BTI
#else
# define BTI_FLAG 0
#endif
#if __ARM_FEATURE_PAC_DEFAULT & 3
# define PAC_FLAG FEATURE_1_PAC
#else
# define PAC_FLAG 0
#endif
/* Add a NT_GNU_PROPERTY_TYPE_0 note. */
#define GNU_PROPERTY(type, value) \
.section .note.gnu.property, "a"; \
.p2align 3; \
.word 4; \
.word 16; \
.word 5; \
.asciz "GNU"; \
.word type; \
.word 4; \
.word value; \
.word 0;
#if defined(__linux__) || defined(__FreeBSD__)
.section .note.GNU-stack, "", %progbits
/* Add GNU property note if built with branch protection. */
# if (BTI_FLAG|PAC_FLAG) != 0
GNU_PROPERTY (FEATURE_1_AND, BTI_FLAG|PAC_FLAG)
# endif
#endif
|
4ms/metamodule-plugin-sdk
| 1,995
|
plugin-libc/libgcc/config/aarch64/crti.S
|
# Machine description for AArch64 architecture.
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
# Contributed by ARM Ltd.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
/* An executable stack is *not* required for these functions. */
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
.previous
#endif
# This file creates a stack frame for the contents of the .fini and
# .init sections. Users may put any desired instructions in those
# sections.
#ifdef __ELF__
#define TYPE(x) .type x,function
#else
#define TYPE(x)
#endif
# Note - this macro is complemented by the FUNC_END macro
# in crtn.S. If you change this macro you must also change
# that macro match.
.macro FUNC_START
# Create a stack frame and save any call-preserved registers
stp x29, x30, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x19, x20, [sp, #-16]!
.endm
.section ".init"
.align 2
.global _init
TYPE(_init)
_init:
FUNC_START
.section ".fini"
.align 2
.global _fini
TYPE(_fini)
_fini:
FUNC_START
# end of crti.S
|
4ms/metamodule-plugin-sdk
| 5,886
|
plugin-libc/libgcc/config/frv/lib1funcs.S
|
/* Library functions.
Copyright (C) 2000-2022 Free Software Foundation, Inc.
Contributed by Red Hat, Inc.
This file is part of GCC.
GCC is free software ; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include <frv-asm.h>
#ifdef L_cmpll
/* icc0 = __cmpll (long long a, long long b) */
.globl EXT(__cmpll)
.type EXT(__cmpll),@function
.text
.p2align 4
EXT(__cmpll):
cmp gr8, gr10, icc0
ckeq icc0, cc4
P(ccmp) gr9, gr11, cc4, 1
ret
.Lend:
.size EXT(__cmpll),.Lend-EXT(__cmpll)
#endif /* L_cmpll */
#ifdef L_cmpf
/* icc0 = __cmpf (float a, float b) */
/* Note, because this function returns the result in ICC0, it means it can't
handle NaNs. */
.globl EXT(__cmpf)
.type EXT(__cmpf),@function
.text
.p2align 4
EXT(__cmpf):
#ifdef __FRV_HARD_FLOAT__ /* floating point instructions available */
movgf gr8, fr0
P(movgf) gr9, fr1
setlos #1, gr8
fcmps fr0, fr1, fcc0
P(fcklt) fcc0, cc0
fckeq fcc0, cc1
csub gr0, gr8, gr8, cc0, 1
cmov gr0, gr8, cc1, 1
cmpi gr8, 0, icc0
ret
#else /* no floating point instructions available */
movsg lr, gr4
addi sp, #-16, sp
sti gr4, @(sp, 8)
st fp, @(sp, gr0)
mov sp, fp
call EXT(__cmpsf2)
cmpi gr8, #0, icc0
ldi @(sp, 8), gr4
movgs gr4, lr
ld @(sp,gr0), fp
addi sp, #16, sp
ret
#endif
.Lend:
.size EXT(__cmpf),.Lend-EXT(__cmpf)
#endif
#ifdef L_cmpd
/* icc0 = __cmpd (double a, double b) */
/* Note, because this function returns the result in ICC0, it means it can't
handle NaNs. */
.globl EXT(__cmpd)
.type EXT(__cmpd),@function
.text
.p2align 4
EXT(__cmpd):
movsg lr, gr4
addi sp, #-16, sp
sti gr4, @(sp, 8)
st fp, @(sp, gr0)
mov sp, fp
call EXT(__cmpdf2)
cmpi gr8, #0, icc0
ldi @(sp, 8), gr4
movgs gr4, lr
ld @(sp,gr0), fp
addi sp, #16, sp
ret
.Lend:
.size EXT(__cmpd),.Lend-EXT(__cmpd)
#endif
#ifdef L_addll
/* gr8,gr9 = __addll (long long a, long long b) */
/* Note, gcc will never call this function, but it is present in case an
ABI program calls it. */
.globl EXT(__addll)
.type EXT(__addll),@function
.text
.p2align
EXT(__addll):
addcc gr9, gr11, gr9, icc0
addx gr8, gr10, gr8, icc0
ret
.Lend:
.size EXT(__addll),.Lend-EXT(__addll)
#endif
#ifdef L_subll
/* gr8,gr9 = __subll (long long a, long long b) */
/* Note, gcc will never call this function, but it is present in case an
ABI program calls it. */
.globl EXT(__subll)
.type EXT(__subll),@function
.text
.p2align 4
EXT(__subll):
subcc gr9, gr11, gr9, icc0
subx gr8, gr10, gr8, icc0
ret
.Lend:
.size EXT(__subll),.Lend-EXT(__subll)
#endif
#ifdef L_andll
/* gr8,gr9 = __andll (long long a, long long b) */
/* Note, gcc will never call this function, but it is present in case an
ABI program calls it. */
.globl EXT(__andll)
.type EXT(__andll),@function
.text
.p2align 4
EXT(__andll):
P(and) gr9, gr11, gr9
P2(and) gr8, gr10, gr8
ret
.Lend:
.size EXT(__andll),.Lend-EXT(__andll)
#endif
#ifdef L_orll
/* gr8,gr9 = __orll (long long a, long long b) */
/* Note, gcc will never call this function, but it is present in case an
ABI program calls it. */
.globl EXT(__orll)
.type EXT(__orll),@function
.text
.p2align 4
EXT(__orll):
P(or) gr9, gr11, gr9
P2(or) gr8, gr10, gr8
ret
.Lend:
.size EXT(__orll),.Lend-EXT(__orll)
#endif
#ifdef L_xorll
/* gr8,gr9 = __xorll (long long a, long long b) */
/* Note, gcc will never call this function, but it is present in case an
ABI program calls it. */
.globl EXT(__xorll)
.type EXT(__xorll),@function
.text
.p2align 4
EXT(__xorll):
P(xor) gr9, gr11, gr9
P2(xor) gr8, gr10, gr8
ret
.Lend:
.size EXT(__xorll),.Lend-EXT(__xorll)
#endif
#ifdef L_notll
/* gr8,gr9 = __notll (long long a) */
/* Note, gcc will never call this function, but it is present in case an
ABI program calls it. */
.globl EXT(__notll)
.type EXT(__notll),@function
.text
.p2align 4
EXT(__notll):
P(not) gr9, gr9
P2(not) gr8, gr8
ret
.Lend:
.size EXT(__notll),.Lend-EXT(__notll)
#endif
#ifdef L_cmov
/* (void) __cmov (char *dest, const char *src, size_t len) */
/*
* void __cmov (char *dest, const char *src, size_t len)
* {
* size_t i;
*
* if (dest < src || dest > src+len)
* {
* for (i = 0; i < len; i++)
* dest[i] = src[i];
* }
* else
* {
* while (len-- > 0)
* dest[len] = src[len];
* }
* }
*/
.globl EXT(__cmov)
.type EXT(__cmov),@function
.text
.p2align 4
EXT(__cmov):
P(cmp) gr8, gr9, icc0
add gr9, gr10, gr4
P(cmp) gr8, gr4, icc1
bc icc0, 0, .Lfwd
bls icc1, 0, .Lback
.Lfwd:
/* move bytes in a forward direction */
P(setlos) #0, gr5
cmp gr0, gr10, icc0
P(subi) gr9, #1, gr9
P2(subi) gr8, #1, gr8
bnc icc0, 0, .Lret
.Lfloop:
/* forward byte move loop */
addi gr5, #1, gr5
P(ldsb) @(gr9, gr5), gr4
cmp gr5, gr10, icc0
P(stb) gr4, @(gr8, gr5)
bc icc0, 0, .Lfloop
ret
.Lbloop:
/* backward byte move loop body */
ldsb @(gr9,gr10),gr4
stb gr4,@(gr8,gr10)
.Lback:
P(cmpi) gr10, #0, icc0
addi gr10, #-1, gr10
bne icc0, 0, .Lbloop
.Lret:
ret
.Lend:
.size EXT(__cmov),.Lend-EXT(__cmov)
#endif
|
4ms/metamodule-plugin-sdk
| 1,155
|
plugin-libc/libgcc/config/epiphany/crtn.S
|
# End .init and .fini sections.
# Copyright (C) 2010-2022 Free Software Foundation, Inc.
# Contributed by Embecosm on behalf of Adapteva, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
.section .init
ldr lr,[sp,4]
add sp,sp,16
jr lr
.section .fini
ldr lr,[sp,4]
add sp,sp,16
jr lr
|
4ms/metamodule-plugin-sdk
| 2,155
|
plugin-libc/libgcc/config/epiphany/umodsi3.S
|
/* Unsigned 32 bit modulo optimized for Epiphany.
Copyright (C) 2009-2022 Free Software Foundation, Inc.
Contributed by Embecosm on behalf of Adapteva, Inc.
This file is part of GCC.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "epiphany-asm.h"
FSTAB (__umodsi3,T_UINT)
.global SYM(__umodsi3)
.balign 4
HIDDEN_FUNC(__umodsi3)
SYM(__umodsi3):
mov r2,5
lsl r2,r2,29 ; 0xa0000000
orr r3,r2,r0
lsr r15,r0,16
movt r15,0xa800
movne r3,r15
lsr r16,r2,2 ; 0x28000000
and r15,r3,r16
fadd r12,r3,r15
orr r3,r2,r1
lsr r2,r1,16
movt r2,0xa800
movne r3,r2
and r2,r16,r3
fadd r3,r3,r2
sub r2,r0,r1
bltu .Lret_a
lsr r12,r12,23
mov r2,%low(.L0step)
movt r2,%high(.L0step)
lsr r3,r3,23
sub r3,r12,r3 ; calculate bit number difference.
lsl r3,r3,3
sub r2,r2,r3
jr r2
/* lsl_l r2,r1,n` sub r2,r0,r2` movgteu r0,r2 */
#define STEP(n) .long 0x0006441f | (n) << 5` sub r2,r0,r2` movgteu r0,r2
.balign 8,,2
STEP(31)` STEP(30)` STEP(29)` STEP(28)`
STEP(27)` STEP(26)` STEP(25)` STEP(24)`
STEP(23)` STEP(22)` STEP(21)` STEP(20)`
STEP(19)` STEP(18)` STEP(17)` STEP(16)`
STEP(15)` STEP(14)` STEP(13)` STEP(12)`
STEP(11)` STEP(10)` STEP(9)` STEP(8)`
STEP(7)` STEP(6)` STEP(5)` STEP(4)` STEP(3)` STEP(2)` STEP(1)
.L0step:STEP(0)
.Lret_a:rts
ENDFUNC(__umodsi3)
|
4ms/metamodule-plugin-sdk
| 2,167
|
plugin-libc/libgcc/config/epiphany/udivsi3.S
|
/* Unsigned 32 bit division optimized for Epiphany.
Copyright (C) 2009-2022 Free Software Foundation, Inc.
Contributed by Embecosm on behalf of Adapteva, Inc.
This file is part of GCC.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "epiphany-asm.h"
FSTAB (__udivsi3,T_UINT)
.global SYM(__udivsi3)
.balign 4
HIDDEN_FUNC(__udivsi3)
SYM(__udivsi3):
sub r3,r0,r1
bltu .Lret0
mov r3,0x95
lsl r12,r3,23 ; 0x4a800000
lsl r3,r3,30 ; 0x40000000
orr r16,r0,r3
orr r2,r1,r3
fsub r16,r16,r3
fsub r2,r2,r3
lsr r3,r1,21
lsr r17,r0,21
movt r17,0x4a80
fsub r17,r17,r12
movt r3,0x4a80
fsub r3,r3,r12
mov r12,%low(.L0step)
movt r12,%high(.L0step)
mov r21,1
movne r16,r17
lsr r17,r1,21
movne r2,r3
lsr r3,r16,23 ; must mask lower bits of r2 in case op0 was ..
lsr r2,r2,23 ; .. shifted and op1 was not.
sub r3,r3,r2 ; calculate bit number difference.
lsl r1,r1,r3
lsr r16,r1,1
lsl r2,r21,r3
lsl r3,r3,3
sub r12,r12,r3
sub r3,r0,r1
movltu r3,r0
mov r0,0
movgteu r0,r2
lsr r2,r2,1
add r17,r2,r0
sub r1,r3,r16
movgteu r3,r1
movgteu r0,r17
sub r16,r16,1
jr r12
.rep 30
lsl r3,r3,1
sub r1,r3,r16
movgteu r3,r1
.endr
sub r2,r2,1 ; mask result bits from steps ...
and r3,r3,r2
orr r0,r0,r3 ; ... and combine with first bits.
nop
.L0step:rts
.Lret0: mov r0,0
rts
ENDFUNC(__udivsi3)
|
4ms/metamodule-plugin-sdk
| 2,217
|
plugin-libc/libgcc/config/epiphany/divsi3.S
|
/* Signed 32 bit division optimized for Epiphany.
Copyright (C) 2009-2022 Free Software Foundation, Inc.
Contributed by Embecosm on behalf of Adapteva, Inc.
This file is part of GCC.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "epiphany-asm.h"
FSTAB (__divsi3,T_INT)
.global SYM(__divsi3)
.balign 4
HIDDEN_FUNC(__divsi3)
SYM(__divsi3):
mov r12,0
sub r2,r12,r0
movlt r2,r0
sub r3,r12,r1
movlt r3,r1
sub r19,r2,r3
bltu .Lret0
movt r12,0x4000
orr r16,r2,r12
orr r18,r3,r12
fsub r16,r16,r12
fsub r18,r18,r12
movt r12,0x4b80
lsr r19,r3,23
lsr r17,r2,23
movt r17,0x4b80
fsub r17,r17,r12
movt r19,0x4b80
fsub r19,r19,r12
mov r12,%low(.L0step)
movt r12,%high(.L0step)
mov r20,0
mov r21,1
movne r16,r17
lsr r17,r3,23
movne r18,r19
eor r1,r1,r0 ; save sign
asr r19,r1,31
lsr r1,r16,23
lsr r0,r18,23
sub r1,r1,r0 ; calculate bit number difference.
lsl r3,r3,r1
lsr r16,r3,1
lsl r0,r21,r1
lsl r1,r1,3
sub r12,r12,r1
sub r3,r2,r3
movgteu r2,r3
movgteu r20,r0
lsr r0,r0,1
add r17,r0,r20
sub r3,r2,r16
movgteu r2,r3
movgteu r20,r17
sub r16,r16,1
jr r12
.rep 30
lsl r2,r2,1
sub r3,r2,r16
movgteu r2,r3
.endr
sub r0,r0,1 ; mask result bits from steps ...
and r0,r0,r2
orr r20,r0,r20 ; ... and combine with first bit.
.L0step:eor r0,r20,r19 ; restore sign
sub r0,r0,r19
rts
.Lret0: mov r0,0
rts
ENDFUNC(__divsi3)
|
4ms/metamodule-plugin-sdk
| 2,137
|
plugin-libc/libgcc/config/epiphany/umodsi3-float.S
|
/* Unsigned 32 bit division optimized for Epiphany.
Copyright (C) 2009-2022 Free Software Foundation, Inc.
Contributed by Embecosm on behalf of Adapteva, Inc.
This file is part of GCC.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "epiphany-asm.h"
/* Because we handle a divident with bit 31 set with truncating integer
arithmetic, there is no rounding-related overflow. */
FSTAB (__umodsi3,T_UINT)
.global SYM(__umodsi3)
.balign 4
HIDDEN_FUNC(__umodsi3)
SYM(__umodsi3):
float r2,r0
mov TMP1,%low(0xb0800000) ; ??? this would be faster with small data
float TMP2,r1
movt TMP1,%high(0xb0800000)
asr TMP0,r0,8
sub TMP0,TMP0,TMP1
mov TMP1,%low(.L0step)
movgteu r2,TMP0
sub r2,r2,TMP2
blteu .L0step
asr r2,r2,23
movt TMP1,%high(.L0step)
lsl TMP2,r2,3
lsl r2,r1,r2` sub r2,r0,r2` movgteu r0,r2 ; STEP(r2)
sub r2,TMP1,TMP2
jr r2
#define STEP(n) lsl.l r2,r1,n` sub r2,r0,r2` movgteu r0,r2
.balign 8,,2
STEP(31)` STEP(30)` STEP(29)` STEP(28)`
STEP(27)` STEP(26)` STEP(25)` STEP(24)`
STEP(23)` STEP(22)` STEP(21)` STEP(20)`
STEP(19)` STEP(18)` STEP(17)` STEP(16)`
STEP(15)` STEP(14)` STEP(13)` STEP(12)`
STEP(11)` STEP(10)` STEP(9)` STEP(8)`
STEP(7)` STEP(6)` STEP(5)` STEP(4)` STEP(3)` STEP(2)` STEP(1)
.L0step:STEP(0)
.Lret_r0:
rts
ENDFUNC(__umodsi3)
|
4ms/metamodule-plugin-sdk
| 2,048
|
plugin-libc/libgcc/config/epiphany/divsi3-float.S
|
/* Signed 32 bit division optimized for Epiphany.
Copyright (C) 2009-2022 Free Software Foundation, Inc.
Contributed by Embecosm on behalf of Adapteva, Inc.
This file is part of GCC.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "epiphany-asm.h"
FSTAB (__divsi3,T_UINT)
.global SYM(__divsi3)
.balign 4
HIDDEN_FUNC(__divsi3)
SYM(__divsi3):
float TMP2,r0
mov TMP4,0
float TMP1,r1
sub TMP0,TMP4,r0
beq .Lret_r0
movgt r0,TMP0
sub TMP0,TMP4,r1
movgt r1,TMP0
mov TMP0,1
sub TMP2,TMP2,TMP1
asr TMP3,TMP2,31 ; save sign
lsl TMP2,TMP2,1
blt .Lret0
sub TMP1,TMP2,1 ; rounding compensation, avoid overflow
movgte TMP2,TMP1
lsr TMP2,TMP2,24
lsl r1,r1,TMP2
lsl TMP0,TMP0,TMP2
sub TMP1,r0,r1
movgteu r0,TMP1
movgteu TMP4,TMP0
lsl TMP5,TMP0,1
sub TMP1,r0,r1
movgteu r0,TMP1
movgteu TMP4,TMP5
sub TMP1,r1,1
mov r1,%low(.L0step)
movt r1,%high(.L0step)
lsl TMP2,TMP2,3
sub r1,r1,TMP2
jr r1
.rep 30
lsl r0,r0,1
sub.l r1,r0,TMP1
movgteu r0,r1
.endr
.L0step:sub r1,TMP0,1 ; mask result bits from steps ...
and r0,r0,r1
orr r0,r0,TMP4 ; ... and combine with first bit.
eor r0,r0,TMP3 ; restore sign
sub r0,r0,TMP3
.Lret_r0:rts
.Lret0: mov r0,0
rts
ENDFUNC(__divsi3)
|
4ms/metamodule-plugin-sdk
| 1,975
|
plugin-libc/libgcc/config/epiphany/modsi3-float.S
|
/* Unsigned 32 bit division optimized for Epiphany.
Copyright (C) 2009-2022 Free Software Foundation, Inc.
Contributed by Embecosm on behalf of Adapteva, Inc.
This file is part of GCC.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "epiphany-asm.h"
FSTAB (__modsi3,T_UINT)
.global SYM(__modsi3)
.balign 4
HIDDEN_FUNC(__modsi3)
SYM(__modsi3):
asr TMP3,r0,31 ; save sign
float TMP0,r0
float TMP1,r1
mov r2,0
sub TMP4,r2,r0
beq .Lret_r0
movgt r0,TMP4
sub TMP2,r2,r1
movlte TMP2,r1
sub r2,TMP0,TMP1
lsl r2,r2,1
blte .L0step
asr TMP4,r2,24
lsl r2,TMP4,3
mov TMP4,%low(.L0step)
movt TMP4,%high(.L0step)
sub r2,TMP4,r2
jr r2
#define STEP(n) lsl.l r2,TMP2,n` sub r2,r0,r2` movgteu r0,r2
.balign 8,,2
STEP(31)` STEP(30)` STEP(29)` STEP(28)`
STEP(27)` STEP(26)` STEP(25)` STEP(24)`
STEP(23)` STEP(22)` STEP(21)` STEP(20)`
STEP(19)` STEP(18)` STEP(17)` STEP(16)`
STEP(15)` STEP(14)` STEP(13)` STEP(12)`
STEP(11)` STEP(10)` STEP(9)` STEP(8)`
STEP(7)` STEP(6)` STEP(5)` STEP(4)` STEP(3)` STEP(2)` STEP(1)
.L0step:STEP(0)
eor r0,r0,TMP3 ; restore sign
sub r0,r0,TMP3
.Lret_r0:
rts
ENDFUNC(__modsi3)
|
4ms/metamodule-plugin-sdk
| 1,094
|
plugin-libc/libgcc/config/epiphany/crtm1reg-r43.S
|
# initialize config for -m1reg-r43
# Copyright (C) 2011-2022 Free Software Foundation, Inc.
# Contributed by Embecosm on behalf of Adapteva, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
.section .init
mov r0, 0
sub r43,r0,1
|
4ms/metamodule-plugin-sdk
| 1,179
|
plugin-libc/libgcc/config/epiphany/crti.S
|
# Start .init and .fini sections.
# Copyright (C) 2010-2022 Free Software Foundation, Inc.
# Contributed by Embecosm on behalf of Adapteva, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
.section .init
.global init
.balign 2
init:
str lr,[sp],-4
.section .fini
.global fini
.balign 2
fini:
str lr,[sp],-4
|
4ms/metamodule-plugin-sdk
| 1,105
|
plugin-libc/libgcc/config/epiphany/crtrunc.S
|
# initialize config for -mfp-mode=truncate
# Copyright (C) 2011-2022 Free Software Foundation, Inc.
# Contributed by Embecosm on behalf of Adapteva, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
.section .init
mov r0, 1
movts config,r0
|
4ms/metamodule-plugin-sdk
| 2,250
|
plugin-libc/libgcc/config/epiphany/udivsi3-float.S
|
/* Unsigned 32 bit division optimized for Epiphany.
Copyright (C) 2009-2022 Free Software Foundation, Inc.
Contributed by Embecosm on behalf of Adapteva, Inc.
This file is part of GCC.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "epiphany-asm.h"
FSTAB (__udivsi3,T_UINT)
.global SYM(__udivsi3)
.balign 4
HIDDEN_FUNC(__udivsi3)
SYM(__udivsi3):
sub TMP0,r0,r1
bltu .Lret0
float TMP2,r0
mov TMP1,%low(0xb0800000) ; ??? this would be faster with small data
float TMP3,r1
movt TMP1,%high(0xb0800000)
asr TMP0,r0,8
sub TMP0,TMP0,TMP1
movt TMP1,%high(0x00810000)
movgteu TMP2,TMP0
bblt .Lret1
sub TMP2,TMP2,TMP1
sub TMP2,TMP2,TMP3
mov TMP3,0
movltu TMP2,TMP3
lsr TMP2,TMP2,23
lsl r1,r1,TMP2
mov TMP0,1
lsl TMP0,TMP0,TMP2
sub r0,r0,r1
bltu .Ladd_back
add TMP3,TMP3,TMP0
sub r0,r0,r1
bltu .Ladd_back
.Lsub_loop:; More than two iterations are rare, so it makes sense to leave
; this label here to reduce average branch penalties.
add TMP3,TMP3,TMP0
sub r0,r0,r1
bgteu .Lsub_loop
.Ladd_back:
add r0,r0,r1
sub TMP1,r1,1
mov r1,%low(.L0step)
movt r1,%high(.L0step)
lsl TMP2,TMP2,3
sub r1,r1,TMP2
jr r1
.rep 30
lsl r0,r0,1
sub.l r1,r0,TMP1
movgteu r0,r1
.endr
.L0step:sub r1,TMP0,1 ; mask result bits from steps ...
and r0,r0,r1
orr r0,r0,TMP3 ; ... and combine with first bits.
rts
.Lret0: mov r0,0
rts
.Lret1: mov r0,1
rts
ENDFUNC(__udivsi3)
|
4ms/metamodule-plugin-sdk
| 1,137
|
plugin-libc/libgcc/config/epiphany/crtint.S
|
# initialize config for -mfp-mode=int
# Copyright (C) 2011-2022 Free Software Foundation, Inc.
# Contributed by Embecosm on behalf of Adapteva, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
.section .init
mov r0, %low(#524288)
movt r0, %high(#524288)
movts config,r0
|
4ms/metamodule-plugin-sdk
| 1,094
|
plugin-libc/libgcc/config/epiphany/crtm1reg-r63.S
|
# initialize config for -m1reg-r63
# Copyright (C) 2011-2022 Free Software Foundation, Inc.
# Contributed by Embecosm on behalf of Adapteva, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
.section .init
mov r0, 0
sub r63,r0,1
|
4ms/metamodule-plugin-sdk
| 2,275
|
plugin-libc/libgcc/config/epiphany/modsi3.S
|
/* Signed 32 bit modulo optimized for Epiphany.
Copyright (C) 2009-2022 Free Software Foundation, Inc.
Contributed by Embecosm on behalf of Adapteva, Inc.
This file is part of GCC.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "epiphany-asm.h"
FSTAB (__modsi3,T_INT)
.global SYM(__modsi3)
.balign 4
HIDDEN_FUNC(__modsi3)
SYM(__modsi3):
asr r17,r0,31 ; save sign
mov r2,0
sub r3,r2,r0
movgt r0,r3
sub r3,r2,r1
movgt r1,r3
movt r2,0xa000 ; 0xa0000000
orr r3,r2,r0
lsr r15,r0,16
movt r15,0xa800
movne r3,r15
lsr r16,r2,2 ; 0x28000000
and r15,r3,r16
fadd r12,r3,r15
orr r3,r2,r1
lsr r2,r1,16
movt r2,0xa800
movne r3,r2
and r2,r16,r3
fadd r3,r3,r2
sub r2,r0,r1
bltu .Lret_a
lsr r12,r12,23
mov r2,%low(.L0step)
movt r2,%high(.L0step)
lsr r3,r3,23
sub r3,r12,r3 ; calculate bit number difference.
lsl r3,r3,3
sub r2,r2,r3
jr r2
/* lsl_l r2,r1,n` sub r2,r0,r2` movgteu r0,r2 */
#define STEP(n) .long 0x0006441f | (n) << 5` sub r2,r0,r2` movgteu r0,r2
.balign 8,,2
STEP(31)` STEP(30)` STEP(29)` STEP(28)`
STEP(27)` STEP(26)` STEP(25)` STEP(24)`
STEP(23)` STEP(22)` STEP(21)` STEP(20)`
STEP(19)` STEP(18)` STEP(17)` STEP(16)`
STEP(15)` STEP(14)` STEP(13)` STEP(12)`
STEP(11)` STEP(10)` STEP(9)` STEP(8)`
STEP(7)` STEP(6)` STEP(5)` STEP(4)` STEP(3)` STEP(2)` STEP(1)
.L0step:STEP(0)
.Lret_a:eor r0,r0,r17 ; restore sign
sub r0,r0,r17
rts
ENDFUNC(__modsi3)
|
4ms/metamodule-plugin-sdk
| 2,934
|
plugin-libc/libgcc/config/lm32/_ashrsi3.S
|
# _ashrsi3.S for Lattice Mico32
# Contributed by Jon Beniston <jon@beniston.com> and Richard Henderson.
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
/* Arithmetic right shift. */
.global __ashrsi3
.type __ashrsi3,@function
__ashrsi3:
/* Only use 5 LSBs, as that's all the h/w shifter uses. */
andi r2, r2, 0x1f
/* Get address of offset into unrolled shift loop to jump to. */
#ifdef __PIC__
lw r3, (gp+got(__ashrsi3_0))
#else
mvhi r3, hi(__ashrsi3_0)
ori r3, r3, lo(__ashrsi3_0)
#endif
add r2, r2, r2
add r2, r2, r2
sub r3, r3, r2
b r3
__ashrsi3_31:
sri r1, r1, 1
__ashrsi3_30:
sri r1, r1, 1
__ashrsi3_29:
sri r1, r1, 1
__ashrsi3_28:
sri r1, r1, 1
__ashrsi3_27:
sri r1, r1, 1
__ashrsi3_26:
sri r1, r1, 1
__ashrsi3_25:
sri r1, r1, 1
__ashrsi3_24:
sri r1, r1, 1
__ashrsi3_23:
sri r1, r1, 1
__ashrsi3_22:
sri r1, r1, 1
__ashrsi3_21:
sri r1, r1, 1
__ashrsi3_20:
sri r1, r1, 1
__ashrsi3_19:
sri r1, r1, 1
__ashrsi3_18:
sri r1, r1, 1
__ashrsi3_17:
sri r1, r1, 1
__ashrsi3_16:
sri r1, r1, 1
__ashrsi3_15:
sri r1, r1, 1
__ashrsi3_14:
sri r1, r1, 1
__ashrsi3_13:
sri r1, r1, 1
__ashrsi3_12:
sri r1, r1, 1
__ashrsi3_11:
sri r1, r1, 1
__ashrsi3_10:
sri r1, r1, 1
__ashrsi3_9:
sri r1, r1, 1
__ashrsi3_8:
sri r1, r1, 1
__ashrsi3_7:
sri r1, r1, 1
__ashrsi3_6:
sri r1, r1, 1
__ashrsi3_5:
sri r1, r1, 1
__ashrsi3_4:
sri r1, r1, 1
__ashrsi3_3:
sri r1, r1, 1
__ashrsi3_2:
sri r1, r1, 1
__ashrsi3_1:
sri r1, r1, 1
__ashrsi3_0:
ret
|
4ms/metamodule-plugin-sdk
| 1,274
|
plugin-libc/libgcc/config/lm32/crtn.S
|
# crtn.S for Lattice Mico32
# Contributed by Jon Beniston <jon@beniston.com>
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
.section .init
lw ra, (sp+4)
addi sp, sp, 4
ret
.section .fini
lw ra, (sp+4)
addi sp, sp, 4
ret
|
4ms/metamodule-plugin-sdk
| 2,979
|
plugin-libc/libgcc/config/lm32/_ashlsi3.S
|
# _ashlsi3.S for Lattice Mico32
# Contributed by Jon Beniston <jon@beniston.com> and Richard Henderson.
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
/* Arithmetic left shift. */
.text
.global __ashlsi3
.type __ashlsi3,@function
.align 4
__ashlsi3:
/* Only use 5 LSBs, as that's all the h/w shifter uses. */
andi r2, r2, 0x1f
/* Get address of offset into unrolled shift loop to jump to. */
#ifdef __PIC__
lw r3, (gp+got(__ashlsi3_0))
#else
mvhi r3, hi(__ashlsi3_0)
ori r3, r3, lo(__ashlsi3_0)
#endif
add r2, r2, r2
add r2, r2, r2
sub r3, r3, r2
b r3
__ashlsi3_31:
add r1, r1, r1
__ashlsi3_30:
add r1, r1, r1
__ashlsi3_29:
add r1, r1, r1
__ashlsi3_28:
add r1, r1, r1
__ashlsi3_27:
add r1, r1, r1
__ashlsi3_26:
add r1, r1, r1
__ashlsi3_25:
add r1, r1, r1
__ashlsi3_24:
add r1, r1, r1
__ashlsi3_23:
add r1, r1, r1
__ashlsi3_22:
add r1, r1, r1
__ashlsi3_21:
add r1, r1, r1
__ashlsi3_20:
add r1, r1, r1
__ashlsi3_19:
add r1, r1, r1
__ashlsi3_18:
add r1, r1, r1
__ashlsi3_17:
add r1, r1, r1
__ashlsi3_16:
add r1, r1, r1
__ashlsi3_15:
add r1, r1, r1
__ashlsi3_14:
add r1, r1, r1
__ashlsi3_13:
add r1, r1, r1
__ashlsi3_12:
add r1, r1, r1
__ashlsi3_11:
add r1, r1, r1
__ashlsi3_10:
add r1, r1, r1
__ashlsi3_9:
add r1, r1, r1
__ashlsi3_8:
add r1, r1, r1
__ashlsi3_7:
add r1, r1, r1
__ashlsi3_6:
add r1, r1, r1
__ashlsi3_5:
add r1, r1, r1
__ashlsi3_4:
add r1, r1, r1
__ashlsi3_3:
add r1, r1, r1
__ashlsi3_2:
add r1, r1, r1
__ashlsi3_1:
add r1, r1, r1
__ashlsi3_0:
ret
|
4ms/metamodule-plugin-sdk
| 3,137
|
plugin-libc/libgcc/config/lm32/_lshrsi3.S
|
# _lshrsi3.S for Lattice Mico32
# Contributed by Jon Beniston <jon@beniston.com> and Richard Henderson.
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
/* Logical right shift. */
.global __lshrsi3
.type __lshrsi3,@function
__lshrsi3:
/* Only use 5 LSBs, as that's all the h/w shifter uses. */
andi r2, r2, 0x1f
/* Get address of offset into unrolled shift loop to jump to. */
#ifdef __PIC__
lw r3, (gp+got(__lshrsi3_0))
#else
mvhi r3, hi(__lshrsi3_0)
ori r3, r3, lo(__lshrsi3_0)
#endif
add r2, r2, r2
add r2, r2, r2
sub r3, r3, r2
b r3
__lshrsi3_31:
srui r1, r1, 1
__lshrsi3_30:
srui r1, r1, 1
__lshrsi3_29:
srui r1, r1, 1
__lshrsi3_28:
srui r1, r1, 1
__lshrsi3_27:
srui r1, r1, 1
__lshrsi3_26:
srui r1, r1, 1
__lshrsi3_25:
srui r1, r1, 1
__lshrsi3_24:
srui r1, r1, 1
__lshrsi3_23:
srui r1, r1, 1
__lshrsi3_22:
srui r1, r1, 1
__lshrsi3_21:
srui r1, r1, 1
__lshrsi3_20:
srui r1, r1, 1
__lshrsi3_19:
srui r1, r1, 1
__lshrsi3_18:
srui r1, r1, 1
__lshrsi3_17:
srui r1, r1, 1
__lshrsi3_16:
srui r1, r1, 1
__lshrsi3_15:
srui r1, r1, 1
__lshrsi3_14:
srui r1, r1, 1
__lshrsi3_13:
srui r1, r1, 1
__lshrsi3_12:
srui r1, r1, 1
__lshrsi3_11:
srui r1, r1, 1
__lshrsi3_10:
srui r1, r1, 1
__lshrsi3_9:
srui r1, r1, 1
__lshrsi3_8:
srui r1, r1, 1
__lshrsi3_7:
srui r1, r1, 1
__lshrsi3_6:
srui r1, r1, 1
__lshrsi3_5:
srui r1, r1, 1
__lshrsi3_4:
srui r1, r1, 1
__lshrsi3_3:
srui r1, r1, 1
__lshrsi3_2:
srui r1, r1, 1
__lshrsi3_1:
srui r1, r1, 1
__lshrsi3_0:
ret
|
4ms/metamodule-plugin-sdk
| 1,385
|
plugin-libc/libgcc/config/lm32/crti.S
|
# crti.S for Lattice Mico32
# Contributed by Jon Beniston <jon@beniston.com>
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
.section .init
.global _init
.type _init,@function
.align 4
_init:
addi sp, sp, -4
sw (sp+4), ra
.section .fini
.global _fini
.type _fini,@function
.align 4
_fini:
addi sp, sp, -4
sw (sp+4), ra
|
4ms/metamodule-plugin-sdk
| 1,309
|
plugin-libc/libgcc/config/c6x/crtn.S
|
/* Copyright (C) 2010-2022 Free Software Foundation, Inc.
Contributed by Bernd Schmidt <bernds@codesourcery.com>.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
* This file supplies function epilogues for the .init and .fini sections.
* It is linked in after all other files.
*/
.section .init
ldw .d2t2 *+B15(4), B3
add .d2 B15, 8, B15
nop 3
ret .s2 B3
nop 5
.section .fini
ldw .d2t2 *+B15(4), B3
add .d2 B15, 8, B15
nop 3
ret .s2 B3
nop 5
|
4ms/metamodule-plugin-sdk
| 1,343
|
plugin-libc/libgcc/config/c6x/crti.S
|
/* Copyright (C) 2010-2022 Free Software Foundation, Inc.
Contributed by Bernd Schmidt <bernds@codesourcery.com>.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
* This file just supplies function prologues for the .init and .fini
* sections. It is linked in before crtbegin.o.
*/
.section .init
.globl _init
.type _init,@function
_init:
add .l2 -8, B15, B15
stw .d2t2 B3,*+B15(4)
.section .fini
.globl _fini
.type _fini,@function
_fini:
add .l2 -8, B15, B15
stw .d2t2 B3,*+B15(4)
|
4ms/metamodule-plugin-sdk
| 3,341
|
plugin-libc/libgcc/config/c6x/libunwind.S
|
.text
.macro do_call fn
#ifdef _TMS320C6400_PLUS
callp .s2 (\fn), B3
#elif defined(_TMS320C6400)
call .s2 (\fn)
addkpc .s2 9f, B3, 0
nop 4
9f:
#else
call .s2 (\fn)
mhkl .s2 9f, B3
mhkh .s2 9f, B3
nop 3
9f:
#endif
.endm
.align 2
.global restore_core_regs
.type restore_core_regs, STT_FUNC
restore_core_regs:
mv .s2x A4, B4
ldw .d1t1 *+A4[0], A0
|| ldw .d2t2 *++B4[16], B0
ldw .d1t1 *+A4[1], A1
|| ldw .d2t2 *+B4[1], B1
ldw .d1t1 *+A4[2], A2
|| ldw .d2t2 *+B4[2], B2
ldw .d1t1 *+A4[3], A3
|| ldw .d2t2 *+B4[3], B3
;; Base registers are loaded later
ldw .d1t1 *+A4[5], A5
|| ldw .d2t2 *+B4[5], B5
ldw .d1t1 *+A4[6], A6
|| ldw .d2t2 *+B4[6], B6
ldw .d1t1 *+A4[7], A7
|| ldw .d2t2 *+B4[7], B7
ldw .d1t1 *+A4[8], A8
|| ldw .d2t2 *+B4[8], B8
ldw .d1t1 *+A4[9], A9
|| ldw .d2t2 *+B4[9], B9
;; load PC into B10 so that it is ready for the branch
ldw .d2t2 *+B4[16], B10
ldw .d1t1 *+A4[11], A11
|| ldw .d2t2 *+B4[11], B11
ldw .d1t1 *+A4[12], A12
|| ldw .d2t2 *+B4[12], B12
ldw .d1t1 *+A4[13], A13
|| ldw .d2t2 *+B4[13], B13
ldw .d1t1 *+A4[14], A14
|| ldw .d2t2 *+B4[14], B14
;; Loads have 4 delay slots. Take advantage of this to restore the
;; scratch registers and stack pointer before the base registers
;; disappear. We also need to make sure no interrupts occur,
;; so put the whole thing in the delay slots of a dummy branch
;; We cannot move the ret earlier as that would cause it to occur
;; before the last load completes
b .s1 (1f)
ldw .d1t1 *+A4[4], A4
|| ldw .d2t2 *+B4[4], B4
ldw .d1t1 *+A4[15], A15
|| ldw .d2t2 *+B4[15], B15
ret .s2 B10
ldw .d1t1 *+A4[10], A10
|| ldw .d2t2 *+B4[10], B10
nop 1
1:
nop 3
.size restore_core_regs, . - restore_core_regs
.macro UNWIND_WRAPPER name argreg argside
.global \name
.type \name, STT_FUNC
\name:
# Create saved register state: flags,A0-A15,B0-B15,PC = 136 bytes.
# Plus 4 (rounded to 8) for saving return.
addk .s2 -144, B15
stw .d2t1 A0, *+B15[2]
stw .d2t1 A1, *+B15[3]
stw .d2t1 A2, *+B15[4]
stw .d2t1 A3, *+B15[5]
stw .d2t1 A4, *+B15[6]
stw .d2t1 A5, *+B15[7]
stw .d2t1 A6, *+B15[8]
stw .d2t1 A7, *+B15[9]
stw .d2t1 A8, *+B15[10]
stw .d2t1 A9, *+B15[11]
stw .d2t1 A10, *+B15[12]
stw .d2t1 A11, *+B15[13]
stw .d2t1 A12, *+B15[14]
stw .d2t1 A13, *+B15[15]
stw .d2t1 A14, *+B15[16]
stw .d2t1 A15, *+B15[17]
mv .s1x B15, A0
addk .s1 144, A0
stw .d2t2 B0, *+B15[18]
stw .d2t2 B1, *+B15[19]
stw .d2t2 B2, *+B15[20]
stw .d2t2 B3, *+B15[21]
stw .d2t2 B4, *+B15[22]
stw .d2t2 B5, *+B15[23]
stw .d2t2 B6, *+B15[24]
stw .d2t2 B7, *+B15[25]
stw .d2t2 B8, *+B15[26]
stw .d2t2 B9, *+B15[27]
stw .d2t2 B10, *+B15[28]
stw .d2t2 B11, *+B15[29]
stw .d2t2 B12, *+B15[30]
stw .d2t2 B13, *+B15[31]
stw .d2t2 B14, *+B15[32]
stw .d2t1 A0, *+B15[33]
stw .d2t1 A0, *+B15[34]
# Zero demand saved flags
mvk .s1 0, A0
stw .d2t1 A0, *+B15[1]
# Save return address, setup additional argument and call function
stw .d2t2 B3, *+B15[35]
add .d\argside B15, 4, \argreg
do_call __gnu\name
# Restore stack and return
ldw .d2t2 *+B15[35], B3
addk .s2 144, B15
nop 3
ret .s2 B3
nop 5
.size \name, . - \name
.endm
UNWIND_WRAPPER _Unwind_RaiseException B4 2
UNWIND_WRAPPER _Unwind_Resume B4 2
UNWIND_WRAPPER _Unwind_Resume_or_Rethrow B4 2
UNWIND_WRAPPER _Unwind_ForcedUnwind B6 2
UNWIND_WRAPPER _Unwind_Backtrace A6 1x
|
4ms/metamodule-plugin-sdk
| 9,657
|
plugin-libc/libgcc/config/c6x/lib1funcs.S
|
/* Copyright (C) 2010-2022 Free Software Foundation, Inc.
Contributed by Bernd Schmidt <bernds@codesourcery.com>.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
;; ABI considerations for the divide functions
;; The following registers are call-used:
;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
;;
;; In our implementation, divu and remu are leaf functions,
;; while both divi and remi call into divu.
;; A0 is not clobbered by any of the functions.
;; divu does not clobber B2 either, which is taken advantage of
;; in remi.
;; divi uses B5 to hold the original return address during
;; the call to divu.
;; remi uses B2 and A5 to hold the input values during the
;; call to divu. It stores B3 in on the stack.
#ifdef L_divsi3
.text
.align 2
.global __c6xabi_divi
.hidden __c6xabi_divi
.type __c6xabi_divi, STT_FUNC
__c6xabi_divi:
call .s2 __c6xabi_divu
|| mv .d2 B3, B5
|| cmpgt .l1 0, A4, A1
|| cmpgt .l2 0, B4, B1
[A1] neg .l1 A4, A4
|| [B1] neg .l2 B4, B4
|| xor .s1x A1, B1, A1
#ifdef _TMS320C6400
[A1] addkpc .s2 1f, B3, 4
#else
[A1] mvkl .s2 1f, B3
[A1] mvkh .s2 1f, B3
nop 2
#endif
1:
neg .l1 A4, A4
|| mv .l2 B3,B5
|| ret .s2 B5
nop 5
#endif
#if defined L_modsi3 || defined L_divmodsi4
.align 2
#ifdef L_modsi3
#define MOD_OUTPUT_REG A4
.global __c6xabi_remi
.hidden __c6xabi_remi
.type __c6xabi_remi, STT_FUNC
#else
#define MOD_OUTPUT_REG A5
.global __c6xabi_divremi
.hidden __c6xabi_divremi
.type __c6xabi_divremi, STT_FUNC
__c6xabi_divremi:
#endif
__c6xabi_remi:
stw .d2t2 B3, *B15--[2]
|| cmpgt .l1 0, A4, A1
|| cmpgt .l2 0, B4, B2
|| mv .s1 A4, A5
|| call .s2 __c6xabi_divu
[A1] neg .l1 A4, A4
|| [B2] neg .l2 B4, B4
|| xor .s2x B2, A1, B0
|| mv .d2 B4, B2
#ifdef _TMS320C6400
[B0] addkpc .s2 1f, B3, 1
[!B0] addkpc .s2 2f, B3, 1
nop 2
#else
[B0] mvkl .s2 1f,B3
[!B0] mvkl .s2 2f,B3
[B0] mvkh .s2 1f,B3
[!B0] mvkh .s2 2f,B3
#endif
1:
neg .l1 A4, A4
2:
ldw .d2t2 *++B15[2], B3
#ifdef _TMS320C6400_PLUS
mpy32 .m1x A4, B2, A6
nop 3
ret .s2 B3
sub .l1 A5, A6, MOD_OUTPUT_REG
nop 4
#else
mpyu .m1x A4, B2, A1
nop 1
mpylhu .m1x A4, B2, A6
|| mpylhu .m2x B2, A4, B2
nop 1
add .l1x A6, B2, A6
|| ret .s2 B3
shl .s1 A6, 16, A6
add .d1 A6, A1, A6
sub .l1 A5, A6, MOD_OUTPUT_REG
nop 2
#endif
#endif
#if defined L_udivsi3 || defined L_udivmodsi4
.align 2
#ifdef L_udivsi3
.global __c6xabi_divu
.hidden __c6xabi_divu
.type __c6xabi_divu, STT_FUNC
__c6xabi_divu:
#else
.global __c6xabi_divremu
.hidden __c6xabi_divremu
.type __c6xabi_divremu, STT_FUNC
__c6xabi_divremu:
#endif
;; We use a series of up to 31 subc instructions. First, we find
;; out how many leading zero bits there are in the divisor. This
;; gives us both a shift count for aligning (shifting) the divisor
;; to the, and the number of times we have to execute subc.
;; At the end, we have both the remainder and most of the quotient
;; in A4. The top bit of the quotient is computed first and is
;; placed in A2.
;; Return immediately if the dividend is zero. Setting B4 to 1
;; is a trick to allow us to leave the following insns in the jump
;; delay slot without affecting the result.
mv .s2x A4, B1
#ifndef _TMS320C6400
[!b1] mvk .s2 1, B4
#endif
[b1] lmbd .l2 1, B4, B1
||[!b1] b .s2 B3 ; RETURN A
#ifdef _TMS320C6400
||[!b1] mvk .d2 1, B4
#endif
#ifdef L_udivmodsi4
||[!b1] zero .s1 A5
#endif
mv .l1x B1, A6
|| shl .s2 B4, B1, B4
;; The loop performs a maximum of 28 steps, so we do the
;; first 3 here.
cmpltu .l1x A4, B4, A2
[!A2] sub .l1x A4, B4, A4
|| shru .s2 B4, 1, B4
|| xor .s1 1, A2, A2
shl .s1 A2, 31, A2
|| [b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
;; RETURN A may happen here (note: must happen before the next branch)
0:
cmpgt .l2 B1, 7, B0
|| [b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
|| [b0] b .s1 0b
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
;; loop backwards branch happens here
ret .s2 B3
|| mvk .s1 32, A1
sub .l1 A1, A6, A6
#ifdef L_udivmodsi4
|| extu .s1 A4, A6, A5
#endif
shl .s1 A4, A6, A4
shru .s1 A4, 1, A4
|| sub .l1 A6, 1, A6
or .l1 A2, A4, A4
shru .s1 A4, A6, A4
nop
#endif
#ifdef L_umodsi3
.align 2
.global __c6xabi_remu
.hidden __c6xabi_remu
.type __c6xabi_remu, STT_FUNC
__c6xabi_remu:
;; The ABI seems designed to prevent these functions calling each other,
;; so we duplicate most of the divsi3 code here.
mv .s2x A4, B1
#ifndef _TMS320C6400
[!b1] mvk .s2 1, B4
#endif
lmbd .l2 1, B4, B1
||[!b1] b .s2 B3 ; RETURN A
#ifdef _TMS320C6400
||[!b1] mvk .d2 1, B4
#endif
mv .l1x B1, A7
|| shl .s2 B4, B1, B4
cmpltu .l1x A4, B4, A1
[!a1] sub .l1x A4, B4, A4
shru .s2 B4, 1, B4
0:
cmpgt .l2 B1, 7, B0
|| [b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
;; RETURN A may happen here (note: must happen before the next branch)
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
|| [b0] b .s1 0b
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
;; loop backwards branch happens here
ret .s2 B3
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
extu .s1 A4, A7, A4
nop 2
#endif
#if defined L_strasgi_64plus && defined _TMS320C6400_PLUS
.align 2
.global __c6xabi_strasgi_64plus
.hidden __c6xabi_strasgi_64plus
.type __c6xabi_strasgi_64plus, STT_FUNC
__c6xabi_strasgi_64plus:
shru .s2x a6, 2, b31
|| mv .s1 a4, a30
|| mv .d2 b4, b30
add .s2 -4, b31, b31
sploopd 1
|| mvc .s2 b31, ilc
ldw .d2t2 *b30++, b31
nop 4
mv .s1x b31,a31
spkernel 6, 0
|| stw .d1t1 a31, *a30++
ret .s2 b3
nop 5
#endif
#ifdef L_strasgi
.global __c6xabi_strasgi
.type __c6xabi_strasgi, STT_FUNC
__c6xabi_strasgi:
;; This is essentially memcpy, with alignment known to be at least
;; 4, and the size a multiple of 4 greater than or equal to 28.
ldw .d2t1 *B4++, A0
|| mvk .s2 16, B1
ldw .d2t1 *B4++, A1
|| mvk .s2 20, B2
|| sub .d1 A6, 24, A6
ldw .d2t1 *B4++, A5
ldw .d2t1 *B4++, A7
|| mv .l2x A6, B7
ldw .d2t1 *B4++, A8
ldw .d2t1 *B4++, A9
|| mv .s2x A0, B5
|| cmpltu .l2 B2, B7, B0
0:
stw .d1t2 B5, *A4++
||[b0] ldw .d2t1 *B4++, A0
|| mv .s2x A1, B5
|| mv .l2 B7, B6
[b0] sub .d2 B6, 24, B7
||[b0] b .s2 0b
|| cmpltu .l2 B1, B6, B0
[b0] ldw .d2t1 *B4++, A1
|| stw .d1t2 B5, *A4++
|| mv .s2x A5, B5
|| cmpltu .l2 12, B6, B0
[b0] ldw .d2t1 *B4++, A5
|| stw .d1t2 B5, *A4++
|| mv .s2x A7, B5
|| cmpltu .l2 8, B6, B0
[b0] ldw .d2t1 *B4++, A7
|| stw .d1t2 B5, *A4++
|| mv .s2x A8, B5
|| cmpltu .l2 4, B6, B0
[b0] ldw .d2t1 *B4++, A8
|| stw .d1t2 B5, *A4++
|| mv .s2x A9, B5
|| cmpltu .l2 0, B6, B0
[b0] ldw .d2t1 *B4++, A9
|| stw .d1t2 B5, *A4++
|| mv .s2x A0, B5
|| cmpltu .l2 B2, B7, B0
;; loop back branch happens here
cmpltu .l2 B1, B6, B0
|| ret .s2 b3
[b0] stw .d1t1 A1, *A4++
|| cmpltu .l2 12, B6, B0
[b0] stw .d1t1 A5, *A4++
|| cmpltu .l2 8, B6, B0
[b0] stw .d1t1 A7, *A4++
|| cmpltu .l2 4, B6, B0
[b0] stw .d1t1 A8, *A4++
|| cmpltu .l2 0, B6, B0
[b0] stw .d1t1 A9, *A4++
;; return happens here
#endif
#ifdef _TMS320C6400_PLUS
#ifdef L_push_rts
.align 2
.global __c6xabi_push_rts
.hidden __c6xabi_push_rts
.type __c6xabi_push_rts, STT_FUNC
__c6xabi_push_rts:
stw .d2t2 B14, *B15--[2]
stdw .d2t1 A15:A14, *B15--
|| b .s2x A3
stdw .d2t2 B13:B12, *B15--
stdw .d2t1 A13:A12, *B15--
stdw .d2t2 B11:B10, *B15--
stdw .d2t1 A11:A10, *B15--
stdw .d2t2 B3:B2, *B15--
#endif
#ifdef L_pop_rts
.align 2
.global __c6xabi_pop_rts
.hidden __c6xabi_pop_rts
.type __c6xabi_pop_rts, STT_FUNC
__c6xabi_pop_rts:
lddw .d2t2 *++B15, B3:B2
lddw .d2t1 *++B15, A11:A10
lddw .d2t2 *++B15, B11:B10
lddw .d2t1 *++B15, A13:A12
lddw .d2t2 *++B15, B13:B12
lddw .d2t1 *++B15, A15:A14
|| b .s2 B3
ldw .d2t2 *++B15[2], B14
nop 4
#endif
#ifdef L_call_stub
.align 2
.global __c6xabi_call_stub
.type __c6xabi_call_stub, STT_FUNC
__c6xabi_call_stub:
stw .d2t1 A2, *B15--[2]
stdw .d2t1 A7:A6, *B15--
|| call .s2 B31
stdw .d2t1 A1:A0, *B15--
stdw .d2t2 B7:B6, *B15--
stdw .d2t2 B5:B4, *B15--
stdw .d2t2 B1:B0, *B15--
stdw .d2t2 B3:B2, *B15--
|| addkpc .s2 1f, B3, 0
1:
lddw .d2t2 *++B15, B3:B2
lddw .d2t2 *++B15, B1:B0
lddw .d2t2 *++B15, B5:B4
lddw .d2t2 *++B15, B7:B6
lddw .d2t1 *++B15, A1:A0
lddw .d2t1 *++B15, A7:A6
|| b .s2 B3
ldw .d2t1 *++B15[2], A2
nop 4
#endif
#endif
|
4ms/metamodule-plugin-sdk
| 1,264
|
plugin-libc/libgcc/config/moxie/crtn.S
|
# crtn.S for moxie
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just makes sure that the .fini and .init sections do in
# fact return. Users may put any desired instructions in those sections.
# This file is the last thing linked into any executable.
.file "crtn.S"
.section ".init"
ret
.section ".fini"
ret
|
4ms/metamodule-plugin-sdk
| 1,317
|
plugin-libc/libgcc/config/moxie/crti.S
|
# crti.S for moxie
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just make a stack frame for the contents of the .fini and
# .init sections. Users may put any desired instructions in those
# sections.
.file "crti.S"
.section ".init"
.global _init
.type _init, @function
.p2align 1
_init:
.section ".fini"
.global _fini
.type _fini,@function
.p2align 1
_fini:
|
4ms/metamodule-plugin-sdk
| 1,459
|
plugin-libc/libgcc/config/microblaze/crtn.S
|
/* crtn.s for __init, __fini
This file supplies the epilogue for __init and __fini routines
Copyright (C) 2009-2022 Free Software Foundation, Inc.
Contributed by Michael Eager <eager@eagercon.com>.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* An executable stack is *not* required for these functions. */
#ifdef __linux__
.section .note.GNU-stack,"",%progbits
.previous
#endif
.section .init, "ax"
lw r15, r0, r1
rtsd r15, 8
addik r1, r1, 8
.section .fini, "ax"
lw r15, r0, r1
rtsd r15, 8
addik r1, r1, 8
|
4ms/metamodule-plugin-sdk
| 3,212
|
plugin-libc/libgcc/config/microblaze/umodsi3.S
|
###################################
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
# umodsi3.S
#
# Unsigned modulo operation for 32 bit integers.
# Input : op1 in Reg r5
# op2 in Reg r6
# Output: op1 mod op2 in Reg r3
#
#######################################
/* An executable stack is *not* required for these functions. */
#ifdef __linux__
.section .note.GNU-stack,"",%progbits
.previous
#endif
.globl __umodsi3
.ent __umodsi3
.type __umodsi3,@function
__umodsi3:
.frame r1,0,r15
addik r1,r1,-12
swi r29,r1,0
swi r30,r1,4
swi r31,r1,8
BEQI r6,$LaDiv_By_Zero # Div_by_Zero # Division Error
BEQId r5,$LaResult_Is_Zero # Result is Zero
ADDIK r3,r0,0 # Clear div
ADDIK r30,r0,0 # clear mod
ADDIK r29,r0,32 # Initialize the loop count
# Check if r6 and r5 are equal # if yes, return 0
rsub r18,r5,r6
beqi r18,$LaRETURN_HERE
# Check if (uns)r6 is greater than (uns)r5. In that case, just return r5
xor r18,r5,r6
bgeid r18,16
addik r3,r5,0
blti r6,$LaRETURN_HERE
bri $LCheckr6
rsub r18,r5,r6 # MICROBLAZEcmp
bgti r18,$LaRETURN_HERE
# If r6 [bit 31] is set, then return result as r5-r6
$LCheckr6:
bgtid r6,$LaDIV0
addik r3,r0,0
addik r18,r0,0x7fffffff
and r5,r5,r18
and r6,r6,r18
brid $LaRETURN_HERE
rsub r3,r6,r5
# First part: try to find the first '1' in the r5
$LaDIV0:
BLTI r5,$LaDIV2
$LaDIV1:
ADD r5,r5,r5 # left shift logical r5
BGEID r5,$LaDIV1 #
ADDIK r29,r29,-1
$LaDIV2:
ADD r5,r5,r5 # left shift logical r5 get the '1' into the Carry
ADDC r3,r3,r3 # Move that bit into the Mod register
rSUB r31,r6,r3 # Try to subtract (r3 a r6)
BLTi r31,$LaMOD_TOO_SMALL
OR r3,r0,r31 # Move the r31 to mod since the result was positive
ADDIK r30,r30,1
$LaMOD_TOO_SMALL:
ADDIK r29,r29,-1
BEQi r29,$LaLOOP_END
ADD r30,r30,r30 # Shift in the '1' into div
BRI $LaDIV2 # Div2
$LaLOOP_END:
BRI $LaRETURN_HERE
$LaDiv_By_Zero:
$LaResult_Is_Zero:
or r3,r0,r0 # set result to 0
$LaRETURN_HERE:
# Restore values of CSRs and that of r3 and the divisor and the dividend
lwi r29,r1,0
lwi r30,r1,4
lwi r31,r1,8
rtsd r15,8
addik r1,r1,12
.end __umodsi3
.size __umodsi3, . - __umodsi3
|
4ms/metamodule-plugin-sdk
| 3,428
|
plugin-libc/libgcc/config/microblaze/udivsi3.S
|
###################################-
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
# udivsi3.S
#
# Unsigned divide operation.
# Input : Divisor in Reg r5
# Dividend in Reg r6
# Output: Result in Reg r3
#
#######################################
/* An executable stack is *not* required for these functions. */
#ifdef __linux__
.section .note.GNU-stack,"",%progbits
.previous
#endif
.globl __udivsi3
.ent __udivsi3
.type __udivsi3,@function
__udivsi3:
.frame r1,0,r15
ADDIK r1,r1,-12
SWI r29,r1,0
SWI r30,r1,4
SWI r31,r1,8
BEQI r6,$LaDiv_By_Zero # Div_by_Zero # Division Error
BEQID r5,$LaResult_Is_Zero # Result is Zero
ADDIK r30,r0,0 # Clear mod
ADDIK r29,r0,32 # Initialize the loop count
# Check if r6 and r5 are equal # if yes, return 1
RSUB r18,r5,r6
BEQID r18,$LaRETURN_HERE
ADDIK r3,r0,1
# Check if (uns)r6 is greater than (uns)r5. In that case, just return 0
XOR r18,r5,r6
BGEID r18,16
ADD r3,r0,r0 # We would anyways clear r3
BLTI r6,$LaRETURN_HERE # r6[bit 31 = 1] hence is greater
BRI $LCheckr6
RSUB r18,r6,r5 # MICROBLAZEcmp
BLTI r18,$LaRETURN_HERE
# If r6 [bit 31] is set, then return result as 1
$LCheckr6:
BGTI r6,$LaDIV0
BRID $LaRETURN_HERE
ADDIK r3,r0,1
# First part try to find the first '1' in the r5
$LaDIV0:
BLTI r5,$LaDIV2
$LaDIV1:
ADD r5,r5,r5 # left shift logical r5
BGTID r5,$LaDIV1
ADDIK r29,r29,-1
$LaDIV2:
ADD r5,r5,r5 # left shift logical r5 get the '1' into the Carry
ADDC r30,r30,r30 # Move that bit into the Mod register
RSUB r31,r6,r30 # Try to subtract (r30 a r6)
BLTI r31,$LaMOD_TOO_SMALL
OR r30,r0,r31 # Move the r31 to mod since the result was positive
ADDIK r3,r3,1
$LaMOD_TOO_SMALL:
ADDIK r29,r29,-1
BEQi r29,$LaLOOP_END
ADD r3,r3,r3 # Shift in the '1' into div
BRI $LaDIV2 # Div2
$LaLOOP_END:
BRI $LaRETURN_HERE
$LaDiv_By_Zero:
$LaResult_Is_Zero:
OR r3,r0,r0 # set result to 0
$LaRETURN_HERE:
# Restore values of CSRs and that of r3 and the divisor and the dividend
LWI r29,r1,0
LWI r30,r1,4
LWI r31,r1,8
RTSD r15,8
ADDIK r1,r1,12
.end __udivsi3
.size __udivsi3, . - __udivsi3
|
4ms/metamodule-plugin-sdk
| 3,730
|
plugin-libc/libgcc/config/microblaze/muldi3_hard.S
|
###################################-
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
# muldi3_hard.S
#
# Multiply operation for 64 bit integers, for devices with hard multiply
# Input : Operand1[H] in Reg r5
# Operand1[L] in Reg r6
# Operand2[H] in Reg r7
# Operand2[L] in Reg r8
# Output: Result[H] in Reg r3
# Result[L] in Reg r4
#
# Explaination:
#
# Both the input numbers are divided into 16 bit number as follows
# op1 = A B C D
# op2 = E F G H
# result = D * H
# + (C * H + D * G) << 16
# + (B * H + C * G + D * F) << 32
# + (A * H + B * G + C * F + D * E) << 48
#
# Only 64 bits of the output are considered
#
#######################################
/* An executable stack is *not* required for these functions. */
#ifdef __linux__
.section .note.GNU-stack,"",%progbits
.previous
#endif
.globl muldi3_hardproc
.ent muldi3_hardproc
muldi3_hardproc:
addi r1,r1,-40
# Save the input operands on the caller's stack
swi r5,r1,44
swi r6,r1,48
swi r7,r1,52
swi r8,r1,56
# Store all the callee saved registers
sw r20,r1,r0
swi r21,r1,4
swi r22,r1,8
swi r23,r1,12
swi r24,r1,16
swi r25,r1,20
swi r26,r1,24
swi r27,r1,28
# Load all the 16 bit values for A through H
lhui r20,r1,44 # A
lhui r21,r1,46 # B
lhui r22,r1,48 # C
lhui r23,r1,50 # D
lhui r24,r1,52 # E
lhui r25,r1,54 # F
lhui r26,r1,56 # G
lhui r27,r1,58 # H
# D * H ==> LSB of the result on stack ==> Store1
mul r9,r23,r27
swi r9,r1,36 # Pos2 and Pos3
# Hi (Store1) + C * H + D * G ==> Store2 ==> Pos1 and Pos2
# Store the carry generated in position 2 for Pos 3
lhui r11,r1,36 # Pos2
mul r9,r22,r27 # C * H
mul r10,r23,r26 # D * G
add r9,r9,r10
addc r12,r0,r0
add r9,r9,r11
addc r12,r12,r0 # Store the Carry
shi r9,r1,36 # Store Pos2
swi r9,r1,32
lhui r11,r1,32
shi r11,r1,34 # Store Pos1
# Hi (Store2) + B * H + C * G + D * F ==> Store3 ==> Pos0 and Pos1
mul r9,r21,r27 # B * H
mul r10,r22,r26 # C * G
mul r7,r23,r25 # D * F
add r9,r9,r11
add r9,r9,r10
add r9,r9,r7
swi r9,r1,32 # Pos0 and Pos1
# Hi (Store3) + A * H + B * G + C * F + D * E ==> Store3 ==> Pos0
lhui r11,r1,32 # Pos0
mul r9,r20,r27 # A * H
mul r10,r21,r26 # B * G
mul r7,r22,r25 # C * F
mul r8,r23,r24 # D * E
add r9,r9,r11
add r9,r9,r10
add r9,r9,r7
add r9,r9,r8
sext16 r9,r9 # Sign extend the MSB
shi r9,r1,32
# Move results to r3 and r4
lhui r3,r1,32
add r3,r3,r12
shi r3,r1,32
lwi r3,r1,32 # Hi Part
lwi r4,r1,36 # Lo Part
# Restore Callee saved registers
lw r20,r1,r0
lwi r21,r1,4
lwi r22,r1,8
lwi r23,r1,12
lwi r24,r1,16
lwi r25,r1,20
lwi r26,r1,24
lwi r27,r1,28
# Restore Frame and return
rtsd r15,8
addi r1,r1,40
.end muldi3_hardproc
|
4ms/metamodule-plugin-sdk
| 3,280
|
plugin-libc/libgcc/config/microblaze/divsi3.S
|
###################################-
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
# divsi3.S
#
# Divide operation for 32 bit integers.
# Input : Dividend in Reg r5
# Divisor in Reg r6
# Output: Result in Reg r3
#
#######################################
/* An executable stack is *not* required for these functions. */
#ifdef __linux__
.section .note.GNU-stack,"",%progbits
.previous
#endif
.globl __divsi3
.ent __divsi3
.type __divsi3,@function
__divsi3:
.frame r1,0,r15
ADDIK r1,r1,-16
SWI r28,r1,0
SWI r29,r1,4
SWI r30,r1,8
SWI r31,r1,12
BEQI r6,$LaDiv_By_Zero # Div_by_Zero # Division Error
BEQI r5,$LaResult_Is_Zero # Result is Zero
BGEID r5,$LaR5_Pos
XOR r28,r5,r6 # Get the sign of the result
RSUBI r5,r5,0 # Make r5 positive
$LaR5_Pos:
BGEI r6,$LaR6_Pos
RSUBI r6,r6,0 # Make r6 positive
$LaR6_Pos:
ADDIK r30,r0,0 # Clear mod
ADDIK r3,r0,0 # clear div
ADDIK r29,r0,32 # Initialize the loop count
# First part try to find the first '1' in the r5
$LaDIV0:
BLTI r5,$LaDIV2 # This traps r5 == 0x80000000
$LaDIV1:
ADD r5,r5,r5 # left shift logical r5
BGTID r5,$LaDIV1
ADDIK r29,r29,-1
$LaDIV2:
ADD r5,r5,r5 # left shift logical r5 get the '1' into the Carry
ADDC r30,r30,r30 # Move that bit into the Mod register
RSUB r31,r6,r30 # Try to subtract (r30 a r6)
BLTI r31,$LaMOD_TOO_SMALL
OR r30,r0,r31 # Move the r31 to mod since the result was positive
ADDIK r3,r3,1
$LaMOD_TOO_SMALL:
ADDIK r29,r29,-1
BEQi r29,$LaLOOP_END
ADD r3,r3,r3 # Shift in the '1' into div
BRI $LaDIV2 # Div2
$LaLOOP_END:
BGEI r28,$LaRETURN_HERE
BRID $LaRETURN_HERE
RSUBI r3,r3,0 # Negate the result
$LaDiv_By_Zero:
$LaResult_Is_Zero:
OR r3,r0,r0 # set result to 0
$LaRETURN_HERE:
# Restore values of CSRs and that of r3 and the divisor and the dividend
LWI r28,r1,0
LWI r29,r1,4
LWI r30,r1,8
LWI r31,r1,12
RTSD r15,8
ADDIK r1,r1,16
.end __divsi3
.size __divsi3, . - __divsi3
|
4ms/metamodule-plugin-sdk
| 2,005
|
plugin-libc/libgcc/config/microblaze/stack_overflow_exit.S
|
###################################-*-asm*-
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
# stack_overflow_exit.S
#
# Checks for stack overflows and sets the global variable
# stack_overflow_error with the value of current stack pointer
#
# This routine exits from the program
#
#######################################
/* An executable stack is *not* required for these functions. */
#ifdef __linux__
.section .note.GNU-stack,"",%progbits
.previous
#endif
.globl _stack_overflow_error
.data
.align 2
.type _stack_overflow_error,@object
.size _stack_overflow_error,4
_stack_overflow_error:
.data32 0
.text
.globl _stack_overflow_exit
.ent _stack_overflow_exit
.type _stack_overflow_exit,@function
_stack_overflow_exit:
#ifdef __PIC__
mfs r20,rpc
addik r20,r20,_GLOBAL_OFFSET_TABLE_+8
swi r1,r20,_stack_overflow_error@GOTOFF
bri exit@PLT
#else
swi r1,r0,_stack_overflow_error
bri exit
#endif
.end _stack_overflow_exit
.size _stack_overflow_exit,. - _stack_overflow_exit
|
4ms/metamodule-plugin-sdk
| 1,696
|
plugin-libc/libgcc/config/microblaze/crti.S
|
/* crti.s for __init, __fini
This file supplies the prologue for __init and __fini routines
Copyright (C) 2009-2022 Free Software Foundation, Inc.
Contributed by Michael Eager <eager@eagercon.com>.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* An executable stack is *not* required for these functions. */
#ifdef __linux__
.section .note.GNU-stack,"",%progbits
.previous
#endif
.section .init, "ax"
.global __init
.weak _stack
.set _stack, 0xffffffff
.weak _stack_end
.set _stack_end, 0
.align 2
__init:
addik r1, r1, -8
sw r15, r0, r1
la r11, r0, _stack
mts rshr, r11
la r11, r0, _stack_end
mts rslr, r11
.section .fini, "ax"
.global __fini
.align 2
__fini:
addik r1, r1, -8
sw r15, r0, r1
|
4ms/metamodule-plugin-sdk
| 3,737
|
plugin-libc/libgcc/config/microblaze/moddi3.S
|
###################################
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
# modsi3.S
#
# modulo operation for 64 bit integers.
#
#######################################
/* An executable stack is *not* required for these functions. */
#ifdef __linux__
.section .note.GNU-stack,"",%progbits
.previous
#endif
.globl __moddi3
.ent __moddi3
__moddi3:
.frame r1,0,r15
#Change the stack pointer value and Save callee saved regs
addik r1,r1,-24
swi r25,r1,0
swi r26,r1,4
swi r27,r1,8 # used for sign
swi r28,r1,12 # used for loop count
swi r29,r1,16 # Used for div value High
swi r30,r1,20 # Used for div value Low
#Check for Zero Value in the divisor/dividend
OR r9,r5,r6 # Check for the op1 being zero
BEQID r9,$LaResult_Is_Zero # Result is zero
OR r9,r7,r8 # Check for the dividend being zero
BEQI r9,$LaDiv_By_Zero # Div_by_Zero # Division Error
BGEId r5,$La1_Pos
XOR r27,r5,r7 # Get the sign of the result
RSUBI r6,r6,0 # Make dividend positive
RSUBIC r5,r5,0 # Make dividend positive
$La1_Pos:
BGEI r7,$La2_Pos
RSUBI r8,r8,0 # Make Divisor Positive
RSUBIC r9,r9,0 # Make Divisor Positive
$La2_Pos:
ADDIK r4,r0,0 # Clear mod low
ADDIK r3,r0,0 # Clear mod high
ADDIK r29,r0,0 # clear div high
ADDIK r30,r0,0 # clear div low
ADDIK r28,r0,64 # Initialize the loop count
# First part try to find the first '1' in the r5/r6
$LaDIV1:
ADD r6,r6,r6
ADDC r5,r5,r5 # left shift logical r5
BGEID r5,$LaDIV1
ADDIK r28,r28,-1
$LaDIV2:
ADD r6,r6,r6
ADDC r5,r5,r5 # left shift logical r5/r6 get the '1' into the Carry
ADDC r4,r4,r4 # Move that bit into the Mod register
ADDC r3,r3,r3 # Move carry into high mod register
rsub r18,r7,r3 # Compare the High Parts of Mod and Divisor
bnei r18,$L_High_EQ
rsub r18,r6,r4 # Compare Low Parts only if Mod[h] == Divisor[h]
$L_High_EQ:
rSUB r26,r8,r4 # Subtract divisor[L] from Mod[L]
rsubc r25,r7,r3 # Subtract divisor[H] from Mod[H]
BLTi r25,$LaMOD_TOO_SMALL
OR r3,r0,r25 # move r25 to mod [h]
OR r4,r0,r26 # move r26 to mod [l]
ADDI r30,r30,1
ADDC r29,r29,r0
$LaMOD_TOO_SMALL:
ADDIK r28,r28,-1
BEQi r28,$LaLOOP_END
ADD r30,r30,r30 # Shift in the '1' into div [low]
ADDC r29,r29,r29 # Move the carry generated into high
BRI $LaDIV2 # Div2
$LaLOOP_END:
BGEI r27,$LaRETURN_HERE
rsubi r30,r30,0
rsubc r29,r29,r0
BRI $LaRETURN_HERE
$LaDiv_By_Zero:
$LaResult_Is_Zero:
or r29,r0,r0 # set result to 0 [High]
or r30,r0,r0 # set result to 0 [Low]
$LaRETURN_HERE:
# Restore values of CSRs and that of r29 and the divisor and the dividend
lwi r25,r1,0
lwi r26,r1,4
lwi r27,r1,8
lwi r28,r1,12
lwi r29,r1,16
lwi r30,r1,20
rtsd r15,8
addik r1,r1,24
.end __moddi3
|
4ms/metamodule-plugin-sdk
| 3,142
|
plugin-libc/libgcc/config/microblaze/modsi3.S
|
###################################
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
# modsi3.S
#
# modulo operation for 32 bit integers.
# Input : op1 in Reg r5
# op2 in Reg r6
# Output: op1 mod op2 in Reg r3
#
#######################################
/* An executable stack is *not* required for these functions. */
#ifdef __linux__
.section .note.GNU-stack,"",%progbits
.previous
#endif
.globl __modsi3
.ent __modsi3
.type __modsi3,@function
__modsi3:
.frame r1,0,r15
addik r1,r1,-16
swi r28,r1,0
swi r29,r1,4
swi r30,r1,8
swi r31,r1,12
BEQI r6,$LaDiv_By_Zero # Div_by_Zero # Division Error
BEQI r5,$LaResult_Is_Zero # Result is Zero
BGEId r5,$LaR5_Pos
ADD r28,r5,r0 # Get the sign of the result [ Depends only on the first arg]
RSUBI r5,r5,0 # Make r5 positive
$LaR5_Pos:
BGEI r6,$LaR6_Pos
RSUBI r6,r6,0 # Make r6 positive
$LaR6_Pos:
ADDIK r3,r0,0 # Clear mod
ADDIK r30,r0,0 # clear div
BLTId r5,$LaDIV2 # If r5 is still negative (0x80000000), skip
# the first bit search.
ADDIK r29,r0,32 # Initialize the loop count
# First part try to find the first '1' in the r5
$LaDIV1:
ADD r5,r5,r5 # left shift logical r5
BGEID r5,$LaDIV1 #
ADDIK r29,r29,-1
$LaDIV2:
ADD r5,r5,r5 # left shift logical r5 get the '1' into the Carry
ADDC r3,r3,r3 # Move that bit into the Mod register
rSUB r31,r6,r3 # Try to subtract (r30 a r6)
BLTi r31,$LaMOD_TOO_SMALL
OR r3,r0,r31 # Move the r31 to mod since the result was positive
ADDIK r30,r30,1
$LaMOD_TOO_SMALL:
ADDIK r29,r29,-1
BEQi r29,$LaLOOP_END
ADD r30,r30,r30 # Shift in the '1' into div
BRI $LaDIV2 # Div2
$LaLOOP_END:
BGEI r28,$LaRETURN_HERE
BRId $LaRETURN_HERE
rsubi r3,r3,0 # Negate the result
$LaDiv_By_Zero:
$LaResult_Is_Zero:
or r3,r0,r0 # set result to 0 [Both mod as well as div are 0]
$LaRETURN_HERE:
# Restore values of CSRs and that of r3 and the divisor and the dividend
lwi r28,r1,0
lwi r29,r1,4
lwi r30,r1,8
lwi r31,r1,12
rtsd r15,8
addik r1,r1,16
.end __modsi3
.size __modsi3, . - __modsi3
|
4ms/metamodule-plugin-sdk
| 2,128
|
plugin-libc/libgcc/config/microblaze/mulsi3.S
|
###################################-*-asm*-
#
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# Contributed by Michael Eager <eager@eagercon.com>.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#
# mulsi3.S
#
# Multiply operation for 32 bit integers.
# Input : Operand1 in Reg r5
# Operand2 in Reg r6
# Output: Result [op1 * op2] in Reg r3
#
#######################################
/* An executable stack is *not* required for these functions. */
#ifdef __linux__
.section .note.GNU-stack,"",%progbits
.previous
#endif
.globl __mulsi3
.ent __mulsi3
.type __mulsi3,@function
__mulsi3:
.frame r1,0,r15
add r3,r0,r0
BEQI r5,$L_Result_Is_Zero # Multiply by Zero
BEQI r6,$L_Result_Is_Zero # Multiply by Zero
BGEId r5,$L_R5_Pos
XOR r4,r5,r6 # Get the sign of the result
RSUBI r5,r5,0 # Make r5 positive
$L_R5_Pos:
BGEI r6,$L_R6_Pos
RSUBI r6,r6,0 # Make r6 positive
$L_R6_Pos:
bri $L1
$L2:
add r5,r5,r5
$L1:
srl r6,r6
addc r7,r0,r0
beqi r7,$L2
bneid r6,$L2
add r3,r3,r5
blti r4,$L_NegateResult
rtsd r15,8
nop
$L_NegateResult:
rtsd r15,8
rsub r3,r3,r0
$L_Result_Is_Zero:
rtsd r15,8
addi r3,r0,0
.end __mulsi3
.size __mulsi3, . - __mulsi3
|
4ms/metamodule-plugin-sdk
| 41,922
|
plugin-libc/libgcc/config/avr/lib1funcs-fixed.S
|
/* -*- Mode: Asm -*- */
;; Copyright (C) 2012-2022 Free Software Foundation, Inc.
;; Contributed by Sean D'Epagnier (sean@depagnier.com)
;; Georg-Johann Lay (avr@gjlay.de)
;; This file is free software; you can redistribute it and/or modify it
;; under the terms of the GNU General Public License as published by the
;; Free Software Foundation; either version 3, or (at your option) any
;; later version.
;; In addition to the permissions in the GNU General Public License, the
;; Free Software Foundation gives you unlimited permission to link the
;; compiled version of this file into combinations with other programs,
;; and to distribute those combinations without any restriction coming
;; from the use of this file. (The General Public License restrictions
;; do apply in other respects; for example, they cover modification of
;; the file, and distribution when not linked into a combine
;; executable.)
;; This file is distributed in the hope that it will be useful, but
;; WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;; General Public License for more details.
;; You should have received a copy of the GNU General Public License
;; along with this program; see the file COPYING. If not, write to
;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
;; Boston, MA 02110-1301, USA.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Fixed point library routines for AVR
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#if defined __AVR_TINY__
#define __zero_reg__ r17
#define __tmp_reg__ r16
#else
#define __zero_reg__ r1
#define __tmp_reg__ r0
#endif
.section .text.libgcc.fixed, "ax", @progbits
#ifndef __AVR_TINY__
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Conversions to float
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#if defined (L_fractqqsf)
DEFUN __fractqqsf
;; Move in place for SA -> SF conversion
clr r22
mov r23, r24
;; Sign-extend
lsl r24
sbc r24, r24
mov r25, r24
XJMP __fractsasf
ENDF __fractqqsf
#endif /* L_fractqqsf */
#if defined (L_fractuqqsf)
DEFUN __fractuqqsf
;; Move in place for USA -> SF conversion
clr r22
mov r23, r24
;; Zero-extend
clr r24
clr r25
XJMP __fractusasf
ENDF __fractuqqsf
#endif /* L_fractuqqsf */
#if defined (L_fracthqsf)
DEFUN __fracthqsf
;; Move in place for SA -> SF conversion
wmov 22, 24
;; Sign-extend
lsl r25
sbc r24, r24
mov r25, r24
XJMP __fractsasf
ENDF __fracthqsf
#endif /* L_fracthqsf */
#if defined (L_fractuhqsf)
DEFUN __fractuhqsf
;; Move in place for USA -> SF conversion
wmov 22, 24
;; Zero-extend
clr r24
clr r25
XJMP __fractusasf
ENDF __fractuhqsf
#endif /* L_fractuhqsf */
#if defined (L_fracthasf)
DEFUN __fracthasf
;; Move in place for SA -> SF conversion
clr r22
mov r23, r24
mov r24, r25
;; Sign-extend
lsl r25
sbc r25, r25
XJMP __fractsasf
ENDF __fracthasf
#endif /* L_fracthasf */
#if defined (L_fractuhasf)
DEFUN __fractuhasf
;; Move in place for USA -> SF conversion
clr r22
mov r23, r24
mov r24, r25
;; Zero-extend
clr r25
XJMP __fractusasf
ENDF __fractuhasf
#endif /* L_fractuhasf */
#if defined (L_fractsqsf)
DEFUN __fractsqsf
XCALL __floatsisf
;; Divide non-zero results by 2^31 to move the
;; decimal point into place
tst r25
breq 0f
subi r24, exp_lo (31)
sbci r25, exp_hi (31)
0: ret
ENDF __fractsqsf
#endif /* L_fractsqsf */
#if defined (L_fractusqsf)
DEFUN __fractusqsf
XCALL __floatunsisf
;; Divide non-zero results by 2^32 to move the
;; decimal point into place
cpse r25, __zero_reg__
subi r25, exp_hi (32)
ret
ENDF __fractusqsf
#endif /* L_fractusqsf */
#if defined (L_fractsasf)
DEFUN __fractsasf
XCALL __floatsisf
;; Divide non-zero results by 2^15 to move the
;; decimal point into place
tst r25
breq 0f
subi r24, exp_lo (15)
sbci r25, exp_hi (15)
0: ret
ENDF __fractsasf
#endif /* L_fractsasf */
#if defined (L_fractusasf)
DEFUN __fractusasf
XCALL __floatunsisf
;; Divide non-zero results by 2^16 to move the
;; decimal point into place
cpse r25, __zero_reg__
subi r25, exp_hi (16)
ret
ENDF __fractusasf
#endif /* L_fractusasf */
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Conversions from float
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#if defined (L_fractsfqq)
DEFUN __fractsfqq
;; Multiply with 2^{24+7} to get a QQ result in r25
subi r24, exp_lo (-31)
sbci r25, exp_hi (-31)
XCALL __fixsfsi
mov r24, r25
ret
ENDF __fractsfqq
#endif /* L_fractsfqq */
#if defined (L_fractsfuqq)
DEFUN __fractsfuqq
;; Multiply with 2^{24+8} to get a UQQ result in r25
subi r25, exp_hi (-32)
XCALL __fixunssfsi
mov r24, r25
ret
ENDF __fractsfuqq
#endif /* L_fractsfuqq */
#if defined (L_fractsfha)
DEFUN __fractsfha
;; Multiply with 2^{16+7} to get a HA result in r25:r24
subi r24, exp_lo (-23)
sbci r25, exp_hi (-23)
XJMP __fixsfsi
ENDF __fractsfha
#endif /* L_fractsfha */
#if defined (L_fractsfuha)
DEFUN __fractsfuha
;; Multiply with 2^24 to get a UHA result in r25:r24
subi r25, exp_hi (-24)
XJMP __fixunssfsi
ENDF __fractsfuha
#endif /* L_fractsfuha */
#if defined (L_fractsfhq)
FALIAS __fractsfsq
DEFUN __fractsfhq
;; Multiply with 2^{16+15} to get a HQ result in r25:r24
;; resp. with 2^31 to get a SQ result in r25:r22
subi r24, exp_lo (-31)
sbci r25, exp_hi (-31)
XJMP __fixsfsi
ENDF __fractsfhq
#endif /* L_fractsfhq */
#if defined (L_fractsfuhq)
FALIAS __fractsfusq
DEFUN __fractsfuhq
;; Multiply with 2^{16+16} to get a UHQ result in r25:r24
;; resp. with 2^32 to get a USQ result in r25:r22
subi r25, exp_hi (-32)
XJMP __fixunssfsi
ENDF __fractsfuhq
#endif /* L_fractsfuhq */
#if defined (L_fractsfsa)
DEFUN __fractsfsa
;; Multiply with 2^15 to get a SA result in r25:r22
subi r24, exp_lo (-15)
sbci r25, exp_hi (-15)
XJMP __fixsfsi
ENDF __fractsfsa
#endif /* L_fractsfsa */
#if defined (L_fractsfusa)
DEFUN __fractsfusa
;; Multiply with 2^16 to get a USA result in r25:r22
subi r25, exp_hi (-16)
XJMP __fixunssfsi
ENDF __fractsfusa
#endif /* L_fractsfusa */
;; For multiplication the functions here are called directly from
;; avr-fixed.md instead of using the standard libcall mechanisms.
;; This can make better code because GCC knows exactly which
;; of the call-used registers (not all of them) are clobbered. */
/*******************************************************
Fractional Multiplication 8 x 8 without MUL
*******************************************************/
#if defined (L_mulqq3) && !defined (__AVR_HAVE_MUL__)
;;; R23 = R24 * R25
;;; Clobbers: __tmp_reg__, R22, R24, R25
;;; Rounding: ???
DEFUN __mulqq3
XCALL __fmuls
;; TR 18037 requires that (-1) * (-1) does not overflow
;; The only input that can produce -1 is (-1)^2.
dec r23
brvs 0f
inc r23
0: ret
ENDF __mulqq3
#endif /* L_mulqq3 && ! HAVE_MUL */
/*******************************************************
Fractional Multiply .16 x .16 with and without MUL
*******************************************************/
#if defined (L_mulhq3)
;;; Same code with and without MUL, but the interfaces differ:
;;; no MUL: (R25:R24) = (R22:R23) * (R24:R25)
;;; Clobbers: ABI, called by optabs
;;; MUL: (R25:R24) = (R19:R18) * (R27:R26)
;;; Clobbers: __tmp_reg__, R22, R23
;;; Rounding: -0.5 LSB <= error <= 0.5 LSB
DEFUN __mulhq3
XCALL __mulhisi3
;; Shift result into place
lsl r23
rol r24
rol r25
brvs 1f
;; Round
sbrc r23, 7
adiw r24, 1
ret
1: ;; Overflow. TR 18037 requires (-1)^2 not to overflow
ldi r24, lo8 (0x7fff)
ldi r25, hi8 (0x7fff)
ret
ENDF __mulhq3
#endif /* defined (L_mulhq3) */
#if defined (L_muluhq3)
;;; Same code with and without MUL, but the interfaces differ:
;;; no MUL: (R25:R24) *= (R23:R22)
;;; Clobbers: ABI, called by optabs
;;; MUL: (R25:R24) = (R19:R18) * (R27:R26)
;;; Clobbers: __tmp_reg__, R22, R23
;;; Rounding: -0.5 LSB < error <= 0.5 LSB
DEFUN __muluhq3
XCALL __umulhisi3
;; Round
sbrc r23, 7
adiw r24, 1
ret
ENDF __muluhq3
#endif /* L_muluhq3 */
/*******************************************************
Fixed Multiply 8.8 x 8.8 with and without MUL
*******************************************************/
#if defined (L_mulha3)
;;; Same code with and without MUL, but the interfaces differ:
;;; no MUL: (R25:R24) = (R22:R23) * (R24:R25)
;;; Clobbers: ABI, called by optabs
;;; MUL: (R25:R24) = (R19:R18) * (R27:R26)
;;; Clobbers: __tmp_reg__, R22, R23
;;; Rounding: -0.5 LSB <= error <= 0.5 LSB
DEFUN __mulha3
XCALL __mulhisi3
lsl r22
rol r23
rol r24
XJMP __muluha3_round
ENDF __mulha3
#endif /* L_mulha3 */
#if defined (L_muluha3)
;;; Same code with and without MUL, but the interfaces differ:
;;; no MUL: (R25:R24) *= (R23:R22)
;;; Clobbers: ABI, called by optabs
;;; MUL: (R25:R24) = (R19:R18) * (R27:R26)
;;; Clobbers: __tmp_reg__, R22, R23
;;; Rounding: -0.5 LSB < error <= 0.5 LSB
DEFUN __muluha3
XCALL __umulhisi3
XJMP __muluha3_round
ENDF __muluha3
#endif /* L_muluha3 */
#if defined (L_muluha3_round)
DEFUN __muluha3_round
;; Shift result into place
mov r25, r24
mov r24, r23
;; Round
sbrc r22, 7
adiw r24, 1
ret
ENDF __muluha3_round
#endif /* L_muluha3_round */
/*******************************************************
Fixed Multiplication 16.16 x 16.16
*******************************************************/
;; Bits outside the result (below LSB), used in the signed version
#define GUARD __tmp_reg__
#if defined (__AVR_HAVE_MUL__)
;; Multiplier
#define A0 16
#define A1 A0+1
#define A2 A1+1
#define A3 A2+1
;; Multiplicand
#define B0 20
#define B1 B0+1
#define B2 B1+1
#define B3 B2+1
;; Result
#define C0 24
#define C1 C0+1
#define C2 C1+1
#define C3 C2+1
#if defined (L_mulusa3)
;;; (C3:C0) = (A3:A0) * (B3:B0)
DEFUN __mulusa3
set
;; Fallthru
ENDF __mulusa3
;;; Round for last digit iff T = 1
;;; Return guard bits in GUARD (__tmp_reg__).
;;; Rounding, T = 0: -1.0 LSB < error <= 0 LSB
;;; Rounding, T = 1: -0.5 LSB < error <= 0.5 LSB
DEFUN __mulusa3_round
;; Some of the MUL instructions have LSBs outside the result.
;; Don't ignore these LSBs in order to tame rounding error.
;; Use C2/C3 for these LSBs.
clr C0
clr C1
mul A0, B0 $ movw C2, r0
mul A1, B0 $ add C3, r0 $ adc C0, r1
mul A0, B1 $ add C3, r0 $ adc C0, r1 $ rol C1
;; Round if T = 1. Store guarding bits outside the result for rounding
;; and left-shift by the signed version (function below).
brtc 0f
sbrc C3, 7
adiw C0, 1
0: push C3
;; The following MULs don't have LSBs outside the result.
;; C2/C3 is the high part.
mul A0, B2 $ add C0, r0 $ adc C1, r1 $ sbc C2, C2
mul A1, B1 $ add C0, r0 $ adc C1, r1 $ sbci C2, 0
mul A2, B0 $ add C0, r0 $ adc C1, r1 $ sbci C2, 0
neg C2
mul A0, B3 $ add C1, r0 $ adc C2, r1 $ sbc C3, C3
mul A1, B2 $ add C1, r0 $ adc C2, r1 $ sbci C3, 0
mul A2, B1 $ add C1, r0 $ adc C2, r1 $ sbci C3, 0
mul A3, B0 $ add C1, r0 $ adc C2, r1 $ sbci C3, 0
neg C3
mul A1, B3 $ add C2, r0 $ adc C3, r1
mul A2, B2 $ add C2, r0 $ adc C3, r1
mul A3, B1 $ add C2, r0 $ adc C3, r1
mul A2, B3 $ add C3, r0
mul A3, B2 $ add C3, r0
;; Guard bits used in the signed version below.
pop GUARD
clr __zero_reg__
ret
ENDF __mulusa3_round
#endif /* L_mulusa3 */
#if defined (L_mulsa3)
;;; (C3:C0) = (A3:A0) * (B3:B0)
;;; Clobbers: __tmp_reg__, T
;;; Rounding: -0.5 LSB <= error <= 0.5 LSB
DEFUN __mulsa3
clt
XCALL __mulusa3_round
;; A posteriori sign extension of the operands
tst B3
brpl 1f
sub C2, A0
sbc C3, A1
1: sbrs A3, 7
rjmp 2f
sub C2, B0
sbc C3, B1
2:
;; Shift 1 bit left to adjust for 15 fractional bits
lsl GUARD
rol C0
rol C1
rol C2
rol C3
;; Round last digit
lsl GUARD
adc C0, __zero_reg__
adc C1, __zero_reg__
adc C2, __zero_reg__
adc C3, __zero_reg__
ret
ENDF __mulsa3
#endif /* L_mulsa3 */
#undef A0
#undef A1
#undef A2
#undef A3
#undef B0
#undef B1
#undef B2
#undef B3
#undef C0
#undef C1
#undef C2
#undef C3
#else /* __AVR_HAVE_MUL__ */
#define A0 18
#define A1 A0+1
#define A2 A0+2
#define A3 A0+3
#define B0 22
#define B1 B0+1
#define B2 B0+2
#define B3 B0+3
#define C0 22
#define C1 C0+1
#define C2 C0+2
#define C3 C0+3
;; __tmp_reg__
#define CC0 0
;; __zero_reg__
#define CC1 1
#define CC2 16
#define CC3 17
#define AA0 26
#define AA1 AA0+1
#define AA2 30
#define AA3 AA2+1
#if defined (L_mulsa3)
;;; (R25:R22) *= (R21:R18)
;;; Clobbers: ABI, called by optabs
;;; Rounding: -1 LSB <= error <= 1 LSB
DEFUN __mulsa3
push B0
push B1
push B3
clt
XCALL __mulusa3_round
pop r30
;; sign-extend B
bst r30, 7
brtc 1f
;; A1, A0 survived in R27:R26
sub C2, AA0
sbc C3, AA1
1:
pop AA1 ;; B1
pop AA0 ;; B0
;; sign-extend A. A3 survived in R31
bst AA3, 7
brtc 2f
sub C2, AA0
sbc C3, AA1
2:
;; Shift 1 bit left to adjust for 15 fractional bits
lsl GUARD
rol C0
rol C1
rol C2
rol C3
;; Round last digit
lsl GUARD
adc C0, __zero_reg__
adc C1, __zero_reg__
adc C2, __zero_reg__
adc C3, __zero_reg__
ret
ENDF __mulsa3
#endif /* L_mulsa3 */
#if defined (L_mulusa3)
;;; (R25:R22) *= (R21:R18)
;;; Clobbers: ABI, called by optabs
;;; Rounding: -1 LSB <= error <= 1 LSB
DEFUN __mulusa3
set
;; Fallthru
ENDF __mulusa3
;;; A[] survives in 26, 27, 30, 31
;;; Also used by __mulsa3 with T = 0
;;; Round if T = 1
;;; Return Guard bits in GUARD (__tmp_reg__), used by signed version.
DEFUN __mulusa3_round
push CC2
push CC3
; clear result
clr __tmp_reg__
wmov CC2, CC0
; save multiplicand
wmov AA0, A0
wmov AA2, A2
rjmp 3f
;; Loop the integral part
1: ;; CC += A * 2^n; n >= 0
add CC0,A0 $ adc CC1,A1 $ adc CC2,A2 $ adc CC3,A3
2: ;; A <<= 1
lsl A0 $ rol A1 $ rol A2 $ rol A3
3: ;; IBIT(B) >>= 1
;; Carry = n-th bit of B; n >= 0
lsr B3
ror B2
brcs 1b
sbci B3, 0
brne 2b
;; Loop the fractional part
;; B2/B3 is 0 now, use as guard bits for rounding
;; Restore multiplicand
wmov A0, AA0
wmov A2, AA2
rjmp 5f
4: ;; CC += A:Guard * 2^n; n < 0
add B3,B2 $ adc CC0,A0 $ adc CC1,A1 $ adc CC2,A2 $ adc CC3,A3
5:
;; A:Guard >>= 1
lsr A3 $ ror A2 $ ror A1 $ ror A0 $ ror B2
;; FBIT(B) <<= 1
;; Carry = n-th bit of B; n < 0
lsl B0
rol B1
brcs 4b
sbci B0, 0
brne 5b
;; Save guard bits and set carry for rounding
push B3
lsl B3
;; Move result into place
wmov C2, CC2
wmov C0, CC0
clr __zero_reg__
brtc 6f
;; Round iff T = 1
adc C0, __zero_reg__
adc C1, __zero_reg__
adc C2, __zero_reg__
adc C3, __zero_reg__
6:
pop GUARD
;; Epilogue
pop CC3
pop CC2
ret
ENDF __mulusa3_round
#endif /* L_mulusa3 */
#undef A0
#undef A1
#undef A2
#undef A3
#undef B0
#undef B1
#undef B2
#undef B3
#undef C0
#undef C1
#undef C2
#undef C3
#undef AA0
#undef AA1
#undef AA2
#undef AA3
#undef CC0
#undef CC1
#undef CC2
#undef CC3
#endif /* __AVR_HAVE_MUL__ */
#undef GUARD
/***********************************************************
Fixed unsigned saturated Multiplication 8.8 x 8.8
***********************************************************/
#define C0 22
#define C1 C0+1
#define C2 C0+2
#define C3 C0+3
#define SS __tmp_reg__
#if defined (L_usmuluha3)
DEFUN __usmuluha3
;; Widening multiply
#ifdef __AVR_HAVE_MUL__
;; Adjust interface
movw R26, R22
movw R18, R24
#endif /* HAVE MUL */
XCALL __umulhisi3
tst C3
brne .Lmax
;; Round, target is in C1..C2
lsl C0
adc C1, __zero_reg__
adc C2, __zero_reg__
brcs .Lmax
;; Move result into place
mov C3, C2
mov C2, C1
ret
.Lmax:
;; Saturate
ldi C2, 0xff
ldi C3, 0xff
ret
ENDF __usmuluha3
#endif /* L_usmuluha3 */
/***********************************************************
Fixed signed saturated Multiplication s8.7 x s8.7
***********************************************************/
#if defined (L_ssmulha3)
DEFUN __ssmulha3
;; Widening multiply
#ifdef __AVR_HAVE_MUL__
;; Adjust interface
movw R26, R22
movw R18, R24
#endif /* HAVE MUL */
XCALL __mulhisi3
;; Adjust decimal point
lsl C0
rol C1
rol C2
brvs .LsatC3.3
;; The 9 MSBs must be the same
rol C3
sbc SS, SS
cp C3, SS
brne .LsatSS
;; Round
lsl C0
adc C1, __zero_reg__
adc C2, __zero_reg__
brvs .Lmax
;; Move result into place
mov C3, C2
mov C2, C1
ret
.Lmax:
;; Load 0x7fff
clr C3
.LsatC3.3:
;; C3 < 0 --> 0x8000
;; C3 >= 0 --> 0x7fff
mov SS, C3
.LsatSS:
;; Load min / max value:
;; SS = -1 --> 0x8000
;; SS = 0 --> 0x7fff
ldi C3, 0x7f
ldi C2, 0xff
sbrc SS, 7
adiw C2, 1
ret
ENDF __ssmulha3
#endif /* L_ssmulha3 */
#undef C0
#undef C1
#undef C2
#undef C3
#undef SS
/***********************************************************
Fixed unsigned saturated Multiplication 16.16 x 16.16
***********************************************************/
#define C0 18
#define C1 C0+1
#define C2 C0+2
#define C3 C0+3
#define C4 C0+4
#define C5 C0+5
#define C6 C0+6
#define C7 C0+7
#define SS __tmp_reg__
#if defined (L_usmulusa3)
;; R22[4] = R22[4] *{ssat} R18[4]
;; Ordinary ABI function
DEFUN __usmulusa3
;; Widening multiply
XCALL __umulsidi3
or C7, C6
brne .Lmax
;; Round, target is in C2..C5
lsl C1
adc C2, __zero_reg__
adc C3, __zero_reg__
adc C4, __zero_reg__
adc C5, __zero_reg__
brcs .Lmax
;; Move result into place
wmov C6, C4
wmov C4, C2
ret
.Lmax:
;; Saturate
ldi C7, 0xff
ldi C6, 0xff
wmov C4, C6
ret
ENDF __usmulusa3
#endif /* L_usmulusa3 */
/***********************************************************
Fixed signed saturated Multiplication s16.15 x s16.15
***********************************************************/
#if defined (L_ssmulsa3)
;; R22[4] = R22[4] *{ssat} R18[4]
;; Ordinary ABI function
DEFUN __ssmulsa3
;; Widening multiply
XCALL __mulsidi3
;; Adjust decimal point
lsl C1
rol C2
rol C3
rol C4
rol C5
brvs .LsatC7.7
;; The 17 MSBs must be the same
rol C6
rol C7
sbc SS, SS
cp C6, SS
cpc C7, SS
brne .LsatSS
;; Round
lsl C1
adc C2, __zero_reg__
adc C3, __zero_reg__
adc C4, __zero_reg__
adc C5, __zero_reg__
brvs .Lmax
;; Move result into place
wmov C6, C4
wmov C4, C2
ret
.Lmax:
;; Load 0x7fffffff
clr C7
.LsatC7.7:
;; C7 < 0 --> 0x80000000
;; C7 >= 0 --> 0x7fffffff
lsl C7
sbc SS, SS
.LsatSS:
;; Load min / max value:
;; SS = -1 --> 0x80000000
;; SS = 0 --> 0x7fffffff
com SS
mov C4, SS
mov C5, C4
wmov C6, C4
subi C7, 0x80
ret
ENDF __ssmulsa3
#endif /* L_ssmulsa3 */
#undef C0
#undef C1
#undef C2
#undef C3
#undef C4
#undef C5
#undef C6
#undef C7
#undef SS
/*******************************************************
Fractional Division 8 / 8
*******************************************************/
#define r_divd r25 /* dividend */
#define r_quo r24 /* quotient */
#define r_div r22 /* divisor */
#define r_sign __tmp_reg__
#if defined (L_divqq3)
DEFUN __divqq3
mov r_sign, r_divd
eor r_sign, r_div
sbrc r_div, 7
neg r_div
sbrc r_divd, 7
neg r_divd
XCALL __divqq_helper
lsr r_quo
sbrc r_sign, 7 ; negate result if needed
neg r_quo
ret
ENDF __divqq3
#endif /* L_divqq3 */
#if defined (L_udivuqq3)
DEFUN __udivuqq3
cp r_divd, r_div
brsh 0f
XJMP __divqq_helper
;; Result is out of [0, 1) ==> Return 1 - eps.
0: ldi r_quo, 0xff
ret
ENDF __udivuqq3
#endif /* L_udivuqq3 */
#if defined (L_divqq_helper)
DEFUN __divqq_helper
clr r_quo ; clear quotient
inc __zero_reg__ ; init loop counter, used per shift
__udivuqq3_loop:
lsl r_divd ; shift dividend
brcs 0f ; dividend overflow
cp r_divd,r_div ; compare dividend & divisor
brcc 0f ; dividend >= divisor
rol r_quo ; shift quotient (with CARRY)
rjmp __udivuqq3_cont
0:
sub r_divd,r_div ; restore dividend
lsl r_quo ; shift quotient (without CARRY)
__udivuqq3_cont:
lsl __zero_reg__ ; shift loop-counter bit
brne __udivuqq3_loop
com r_quo ; complement result
; because C flag was complemented in loop
ret
ENDF __divqq_helper
#endif /* L_divqq_helper */
#undef r_divd
#undef r_quo
#undef r_div
#undef r_sign
/*******************************************************
Fractional Division 16 / 16
*******************************************************/
#define r_divdL 26 /* dividend Low */
#define r_divdH 27 /* dividend Hig */
#define r_quoL 24 /* quotient Low */
#define r_quoH 25 /* quotient High */
#define r_divL 22 /* divisor */
#define r_divH 23 /* divisor */
#define r_cnt 21
#if defined (L_divhq3)
DEFUN __divhq3
mov r0, r_divdH
eor r0, r_divH
sbrs r_divH, 7
rjmp 1f
NEG2 r_divL
1:
sbrs r_divdH, 7
rjmp 2f
NEG2 r_divdL
2:
cp r_divdL, r_divL
cpc r_divdH, r_divH
breq __divhq3_minus1 ; if equal return -1
XCALL __udivuhq3
lsr r_quoH
ror r_quoL
brpl 9f
;; negate result if needed
NEG2 r_quoL
9:
ret
__divhq3_minus1:
ldi r_quoH, 0x80
clr r_quoL
ret
ENDF __divhq3
#endif /* defined (L_divhq3) */
#if defined (L_udivuhq3)
DEFUN __udivuhq3
sub r_quoH,r_quoH ; clear quotient and carry
;; FALLTHRU
ENDF __udivuhq3
DEFUN __udivuha3_common
clr r_quoL ; clear quotient
ldi r_cnt,16 ; init loop counter
__udivuhq3_loop:
rol r_divdL ; shift dividend (with CARRY)
rol r_divdH
brcs __udivuhq3_ep ; dividend overflow
cp r_divdL,r_divL ; compare dividend & divisor
cpc r_divdH,r_divH
brcc __udivuhq3_ep ; dividend >= divisor
rol r_quoL ; shift quotient (with CARRY)
rjmp __udivuhq3_cont
__udivuhq3_ep:
sub r_divdL,r_divL ; restore dividend
sbc r_divdH,r_divH
lsl r_quoL ; shift quotient (without CARRY)
__udivuhq3_cont:
rol r_quoH ; shift quotient
dec r_cnt ; decrement loop counter
brne __udivuhq3_loop
com r_quoL ; complement result
com r_quoH ; because C flag was complemented in loop
ret
ENDF __udivuha3_common
#endif /* defined (L_udivuhq3) */
/*******************************************************
Fixed Division 8.8 / 8.8
*******************************************************/
#if defined (L_divha3)
DEFUN __divha3
mov r0, r_divdH
eor r0, r_divH
sbrs r_divH, 7
rjmp 1f
NEG2 r_divL
1:
sbrs r_divdH, 7
rjmp 2f
NEG2 r_divdL
2:
XCALL __udivuha3
lsr r_quoH ; adjust to 7 fractional bits
ror r_quoL
sbrs r0, 7 ; negate result if needed
ret
NEG2 r_quoL
ret
ENDF __divha3
#endif /* defined (L_divha3) */
#if defined (L_udivuha3)
DEFUN __udivuha3
mov r_quoH, r_divdL
mov r_divdL, r_divdH
clr r_divdH
lsl r_quoH ; shift quotient into carry
XJMP __udivuha3_common ; same as fractional after rearrange
ENDF __udivuha3
#endif /* defined (L_udivuha3) */
#undef r_divdL
#undef r_divdH
#undef r_quoL
#undef r_quoH
#undef r_divL
#undef r_divH
#undef r_cnt
/*******************************************************
Fixed Division 16.16 / 16.16
*******************************************************/
#define r_arg1L 24 /* arg1 gets passed already in place */
#define r_arg1H 25
#define r_arg1HL 26
#define r_arg1HH 27
#define r_divdL 26 /* dividend Low */
#define r_divdH 27
#define r_divdHL 30
#define r_divdHH 31 /* dividend High */
#define r_quoL 22 /* quotient Low */
#define r_quoH 23
#define r_quoHL 24
#define r_quoHH 25 /* quotient High */
#define r_divL 18 /* divisor Low */
#define r_divH 19
#define r_divHL 20
#define r_divHH 21 /* divisor High */
#define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
#if defined (L_divsa3)
DEFUN __divsa3
mov r0, r_arg1HH
eor r0, r_divHH
sbrs r_divHH, 7
rjmp 1f
NEG4 r_divL
1:
sbrs r_arg1HH, 7
rjmp 2f
NEG4 r_arg1L
2:
XCALL __udivusa3
lsr r_quoHH ; adjust to 15 fractional bits
ror r_quoHL
ror r_quoH
ror r_quoL
sbrs r0, 7 ; negate result if needed
ret
;; negate r_quoL
XJMP __negsi2
ENDF __divsa3
#endif /* defined (L_divsa3) */
#if defined (L_udivusa3)
DEFUN __udivusa3
ldi r_divdHL, 32 ; init loop counter
mov r_cnt, r_divdHL
clr r_divdHL
clr r_divdHH
wmov r_quoL, r_divdHL
lsl r_quoHL ; shift quotient into carry
rol r_quoHH
__udivusa3_loop:
rol r_divdL ; shift dividend (with CARRY)
rol r_divdH
rol r_divdHL
rol r_divdHH
brcs __udivusa3_ep ; dividend overflow
cp r_divdL,r_divL ; compare dividend & divisor
cpc r_divdH,r_divH
cpc r_divdHL,r_divHL
cpc r_divdHH,r_divHH
brcc __udivusa3_ep ; dividend >= divisor
rol r_quoL ; shift quotient (with CARRY)
rjmp __udivusa3_cont
__udivusa3_ep:
sub r_divdL,r_divL ; restore dividend
sbc r_divdH,r_divH
sbc r_divdHL,r_divHL
sbc r_divdHH,r_divHH
lsl r_quoL ; shift quotient (without CARRY)
__udivusa3_cont:
rol r_quoH ; shift quotient
rol r_quoHL
rol r_quoHH
dec r_cnt ; decrement loop counter
brne __udivusa3_loop
com r_quoL ; complement result
com r_quoH ; because C flag was complemented in loop
com r_quoHL
com r_quoHH
ret
ENDF __udivusa3
#endif /* defined (L_udivusa3) */
#undef r_arg1L
#undef r_arg1H
#undef r_arg1HL
#undef r_arg1HH
#undef r_divdL
#undef r_divdH
#undef r_divdHL
#undef r_divdHH
#undef r_quoL
#undef r_quoH
#undef r_quoHL
#undef r_quoHH
#undef r_divL
#undef r_divH
#undef r_divHL
#undef r_divHH
#undef r_cnt
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Saturation, 1 Byte
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; First Argument and Return Register
#define A0 24
#if defined (L_ssabs_1)
DEFUN __ssabs_1
sbrs A0, 7
ret
neg A0
sbrc A0,7
dec A0
ret
ENDF __ssabs_1
#endif /* L_ssabs_1 */
#undef A0
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Saturation, 2 Bytes
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; First Argument and Return Register
#define A0 24
#define A1 A0+1
#if defined (L_ssneg_2)
DEFUN __ssneg_2
NEG2 A0
brvc 0f
sbiw A0, 1
0: ret
ENDF __ssneg_2
#endif /* L_ssneg_2 */
#if defined (L_ssabs_2)
DEFUN __ssabs_2
sbrs A1, 7
ret
XJMP __ssneg_2
ENDF __ssabs_2
#endif /* L_ssabs_2 */
#undef A0
#undef A1
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Saturation, 4 Bytes
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; First Argument and Return Register
#define A0 22
#define A1 A0+1
#define A2 A0+2
#define A3 A0+3
#if defined (L_ssneg_4)
DEFUN __ssneg_4
XCALL __negsi2
brvc 0f
ldi A3, 0x7f
ldi A2, 0xff
ldi A1, 0xff
ldi A0, 0xff
0: ret
ENDF __ssneg_4
#endif /* L_ssneg_4 */
#if defined (L_ssabs_4)
DEFUN __ssabs_4
sbrs A3, 7
ret
XJMP __ssneg_4
ENDF __ssabs_4
#endif /* L_ssabs_4 */
#undef A0
#undef A1
#undef A2
#undef A3
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Saturation, 8 Bytes
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; First Argument and Return Register
#define A0 18
#define A1 A0+1
#define A2 A0+2
#define A3 A0+3
#define A4 A0+4
#define A5 A0+5
#define A6 A0+6
#define A7 A0+7
#if defined (L_clr_8)
FALIAS __usneguta2
FALIAS __usneguda2
FALIAS __usnegudq2
;; Clear Carry and all Bytes
DEFUN __clr_8
;; Clear Carry and set Z
sub A7, A7
;; FALLTHRU
ENDF __clr_8
;; Propagate Carry to all Bytes, Carry unaltered
DEFUN __sbc_8
sbc A7, A7
sbc A6, A6
wmov A4, A6
wmov A2, A6
wmov A0, A6
ret
ENDF __sbc_8
#endif /* L_clr_8 */
#if defined (L_ssneg_8)
FALIAS __ssnegta2
FALIAS __ssnegda2
FALIAS __ssnegdq2
DEFUN __ssneg_8
XCALL __negdi2
brvc 0f
;; A[] = 0x7fffffff
sec
XCALL __sbc_8
ldi A7, 0x7f
0: ret
ENDF __ssneg_8
#endif /* L_ssneg_8 */
#if defined (L_ssabs_8)
FALIAS __ssabsta2
FALIAS __ssabsda2
FALIAS __ssabsdq2
DEFUN __ssabs_8
sbrs A7, 7
ret
XJMP __ssneg_8
ENDF __ssabs_8
#endif /* L_ssabs_8 */
;; Second Argument
#define B0 10
#define B1 B0+1
#define B2 B0+2
#define B3 B0+3
#define B4 B0+4
#define B5 B0+5
#define B6 B0+6
#define B7 B0+7
#if defined (L_usadd_8)
FALIAS __usadduta3
FALIAS __usadduda3
FALIAS __usaddudq3
DEFUN __usadd_8
XCALL __adddi3
brcs 0f
ret
0: ;; A[] = 0xffffffff
XJMP __sbc_8
ENDF __usadd_8
#endif /* L_usadd_8 */
#if defined (L_ussub_8)
FALIAS __ussubuta3
FALIAS __ussubuda3
FALIAS __ussubudq3
DEFUN __ussub_8
XCALL __subdi3
brcs 0f
ret
0: ;; A[] = 0
XJMP __clr_8
ENDF __ussub_8
#endif /* L_ussub_8 */
#if defined (L_ssadd_8)
FALIAS __ssaddta3
FALIAS __ssaddda3
FALIAS __ssadddq3
DEFUN __ssadd_8
XCALL __adddi3
brvc 0f
;; A = (B >= 0) ? INT64_MAX : INT64_MIN
cpi B7, 0x80
XCALL __sbc_8
subi A7, 0x80
0: ret
ENDF __ssadd_8
#endif /* L_ssadd_8 */
#if defined (L_sssub_8)
FALIAS __sssubta3
FALIAS __sssubda3
FALIAS __sssubdq3
DEFUN __sssub_8
XCALL __subdi3
brvc 0f
;; A = (B < 0) ? INT64_MAX : INT64_MIN
ldi A7, 0x7f
cp A7, B7
XCALL __sbc_8
subi A7, 0x80
0: ret
ENDF __sssub_8
#endif /* L_sssub_8 */
#undef A0
#undef A1
#undef A2
#undef A3
#undef A4
#undef A5
#undef A6
#undef A7
#undef B0
#undef B1
#undef B2
#undef B3
#undef B4
#undef B5
#undef B6
#undef B7
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Rounding Helpers
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#ifdef L_mask1
#define AA 24
#define CC 25
;; R25 = 1 << (R24 & 7)
;; CC = 1 << (AA & 7)
;; Clobbers: None
DEFUN __mask1
;; CC = 2 ^ AA.1
ldi CC, 1 << 2
sbrs AA, 1
ldi CC, 1 << 0
;; CC *= 2 ^ AA.0
sbrc AA, 0
lsl CC
;; CC *= 2 ^ AA.2
sbrc AA, 2
swap CC
ret
ENDF __mask1
#undef AA
#undef CC
#endif /* L_mask1 */
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; The rounding point. Any bits smaller than
;; 2^{-RP} will be cleared.
#define RP R24
#define A0 22
#define A1 A0 + 1
#define C0 24
#define C1 C0 + 1
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Rounding, 1 Byte
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#ifdef L_roundqq3
;; R24 = round (R22, R24)
;; Clobbers: R22, __tmp_reg__
DEFUN __roundqq3
mov __tmp_reg__, C1
subi RP, __QQ_FBIT__ - 1
neg RP
;; R25 = 1 << RP (Total offset is FBIT-1 - RP)
XCALL __mask1
mov C0, C1
;; Add-Saturate 2^{-RP-1}
add A0, C0
brvc 0f
ldi C0, 0x7f
rjmp 9f
0: ;; Mask out bits beyond RP
lsl C0
neg C0
and C0, A0
9: mov C1, __tmp_reg__
ret
ENDF __roundqq3
#endif /* L_roundqq3 */
#ifdef L_rounduqq3
;; R24 = round (R22, R24)
;; Clobbers: R22, __tmp_reg__
DEFUN __rounduqq3
mov __tmp_reg__, C1
subi RP, __UQQ_FBIT__ - 1
neg RP
;; R25 = 1 << RP (Total offset is FBIT-1 - RP)
XCALL __mask1
mov C0, C1
;; Add-Saturate 2^{-RP-1}
add A0, C0
brcc 0f
ldi C0, 0xff
rjmp 9f
0: ;; Mask out bits beyond RP
lsl C0
neg C0
and C0, A0
9: mov C1, __tmp_reg__
ret
ENDF __rounduqq3
#endif /* L_rounduqq3 */
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Rounding, 2 Bytes
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#ifdef L_addmask_2
;; [ R25:R24 = 1 << (R24 & 15)
;; R23:R22 += 1 << (R24 & 15) ]
;; SREG is set according to the addition
DEFUN __addmask_2
;; R25 = 1 << (R24 & 7)
XCALL __mask1
cpi RP, 1 << 3
sbc C0, C0
;; Swap C0 and C1 if RP.3 was set
and C0, C1
eor C1, C0
;; Finally, add the power-of-two: A[] += C[]
add A0, C0
adc A1, C1
ret
ENDF __addmask_2
#endif /* L_addmask_2 */
#ifdef L_round_s2
;; R25:R24 = round (R23:R22, R24)
;; Clobbers: R23, R22
DEFUN __roundhq3
subi RP, __HQ_FBIT__ - __HA_FBIT__
ENDF __roundhq3
DEFUN __roundha3
subi RP, __HA_FBIT__ - 1
neg RP
;; [ R25:R24 = 1 << (FBIT-1 - RP)
;; R23:R22 += 1 << (FBIT-1 - RP) ]
XCALL __addmask_2
XJMP __round_s2_const
ENDF __roundha3
#endif /* L_round_s2 */
#ifdef L_round_u2
;; R25:R24 = round (R23:R22, R24)
;; Clobbers: R23, R22
DEFUN __rounduhq3
subi RP, __UHQ_FBIT__ - __UHA_FBIT__
ENDF __rounduhq3
DEFUN __rounduha3
subi RP, __UHA_FBIT__ - 1
neg RP
;; [ R25:R24 = 1 << (FBIT-1 - RP)
;; R23:R22 += 1 << (FBIT-1 - RP) ]
XCALL __addmask_2
XJMP __round_u2_const
ENDF __rounduha3
#endif /* L_round_u2 */
#ifdef L_round_2_const
;; Helpers for 2 byte wide rounding
DEFUN __round_s2_const
brvc 2f
ldi C1, 0x7f
rjmp 1f
;; FALLTHRU (Barrier)
ENDF __round_s2_const
DEFUN __round_u2_const
brcc 2f
ldi C1, 0xff
1:
ldi C0, 0xff
rjmp 9f
2:
;; Saturation is performed now.
;; Currently, we have C[] = 2^{-RP-1}
;; C[] = 2^{-RP}
lsl C0
rol C1
;;
NEG2 C0
;; Clear the bits beyond the rounding point.
and C0, A0
and C1, A1
9: ret
ENDF __round_u2_const
#endif /* L_round_2_const */
#undef A0
#undef A1
#undef C0
#undef C1
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Rounding, 4 Bytes
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#define A0 18
#define A1 A0 + 1
#define A2 A0 + 2
#define A3 A0 + 3
#define C0 22
#define C1 C0 + 1
#define C2 C0 + 2
#define C3 C0 + 3
#ifdef L_addmask_4
;; [ R25:R22 = 1 << (R24 & 31)
;; R21:R18 += 1 << (R24 & 31) ]
;; SREG is set according to the addition
DEFUN __addmask_4
;; R25 = 1 << (R24 & 7)
XCALL __mask1
cpi RP, 1 << 4
sbc C0, C0
sbc C1, C1
;; Swap C2 with C3 if RP.3 is not set
cpi RP, 1 << 3
sbc C2, C2
and C2, C3
eor C3, C2
;; Swap C3:C2 with C1:C0 if RP.4 is not set
and C0, C2 $ eor C2, C0
and C1, C3 $ eor C3, C1
;; Finally, add the power-of-two: A[] += C[]
add A0, C0
adc A1, C1
adc A2, C2
adc A3, C3
ret
ENDF __addmask_4
#endif /* L_addmask_4 */
#ifdef L_round_s4
;; R25:R22 = round (R21:R18, R24)
;; Clobbers: R18...R21
DEFUN __roundsq3
subi RP, __SQ_FBIT__ - __SA_FBIT__
ENDF __roundsq3
DEFUN __roundsa3
subi RP, __SA_FBIT__ - 1
neg RP
;; [ R25:R22 = 1 << (FBIT-1 - RP)
;; R21:R18 += 1 << (FBIT-1 - RP) ]
XCALL __addmask_4
XJMP __round_s4_const
ENDF __roundsa3
#endif /* L_round_s4 */
#ifdef L_round_u4
;; R25:R22 = round (R21:R18, R24)
;; Clobbers: R18...R21
DEFUN __roundusq3
subi RP, __USQ_FBIT__ - __USA_FBIT__
ENDF __roundusq3
DEFUN __roundusa3
subi RP, __USA_FBIT__ - 1
neg RP
;; [ R25:R22 = 1 << (FBIT-1 - RP)
;; R21:R18 += 1 << (FBIT-1 - RP) ]
XCALL __addmask_4
XJMP __round_u4_const
ENDF __roundusa3
#endif /* L_round_u4 */
#ifdef L_round_4_const
;; Helpers for 4 byte wide rounding
DEFUN __round_s4_const
brvc 2f
ldi C3, 0x7f
rjmp 1f
;; FALLTHRU (Barrier)
ENDF __round_s4_const
DEFUN __round_u4_const
brcc 2f
ldi C3, 0xff
1:
ldi C2, 0xff
ldi C1, 0xff
ldi C0, 0xff
rjmp 9f
2:
;; Saturation is performed now.
;; Currently, we have C[] = 2^{-RP-1}
;; C[] = 2^{-RP}
lsl C0
rol C1
rol C2
rol C3
XCALL __negsi2
;; Clear the bits beyond the rounding point.
and C0, A0
and C1, A1
and C2, A2
and C3, A3
9: ret
ENDF __round_u4_const
#endif /* L_round_4_const */
#undef A0
#undef A1
#undef A2
#undef A3
#undef C0
#undef C1
#undef C2
#undef C3
#undef RP
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Rounding, 8 Bytes
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#define RP 16
#define FBITm1 31
#define C0 18
#define C1 C0 + 1
#define C2 C0 + 2
#define C3 C0 + 3
#define C4 C0 + 4
#define C5 C0 + 5
#define C6 C0 + 6
#define C7 C0 + 7
#define A0 16
#define A1 17
#define A2 26
#define A3 27
#define A4 28
#define A5 29
#define A6 30
#define A7 31
#ifdef L_rounddq3
;; R25:R18 = round (R25:R18, R16)
;; Clobbers: ABI
DEFUN __rounddq3
ldi FBITm1, __DQ_FBIT__ - 1
clt
XJMP __round_x8
ENDF __rounddq3
#endif /* L_rounddq3 */
#ifdef L_roundudq3
;; R25:R18 = round (R25:R18, R16)
;; Clobbers: ABI
DEFUN __roundudq3
ldi FBITm1, __UDQ_FBIT__ - 1
set
XJMP __round_x8
ENDF __roundudq3
#endif /* L_roundudq3 */
#ifdef L_roundda3
;; R25:R18 = round (R25:R18, R16)
;; Clobbers: ABI
DEFUN __roundda3
ldi FBITm1, __DA_FBIT__ - 1
clt
XJMP __round_x8
ENDF __roundda3
#endif /* L_roundda3 */
#ifdef L_rounduda3
;; R25:R18 = round (R25:R18, R16)
;; Clobbers: ABI
DEFUN __rounduda3
ldi FBITm1, __UDA_FBIT__ - 1
set
XJMP __round_x8
ENDF __rounduda3
#endif /* L_rounduda3 */
#ifdef L_roundta3
;; R25:R18 = round (R25:R18, R16)
;; Clobbers: ABI
DEFUN __roundta3
ldi FBITm1, __TA_FBIT__ - 1
clt
XJMP __round_x8
ENDF __roundta3
#endif /* L_roundta3 */
#ifdef L_rounduta3
;; R25:R18 = round (R25:R18, R16)
;; Clobbers: ABI
DEFUN __rounduta3
ldi FBITm1, __UTA_FBIT__ - 1
set
XJMP __round_x8
ENDF __rounduta3
#endif /* L_rounduta3 */
#ifdef L_round_x8
DEFUN __round_x8
push r16
push r17
push r28
push r29
;; Compute log2 of addend from rounding point
sub RP, FBITm1
neg RP
;; Move input to work register A[]
push C0
mov A1, C1
wmov A2, C2
wmov A4, C4
wmov A6, C6
;; C[] = 1 << (FBIT-1 - RP)
XCALL __clr_8
inc C0
XCALL __ashldi3
pop A0
;; A[] += C[]
add A0, C0
adc A1, C1
adc A2, C2
adc A3, C3
adc A4, C4
adc A5, C5
adc A6, C6
adc A7, C7
brts 1f
;; Signed
brvc 3f
;; Signed overflow: A[] = 0x7f...
brvs 2f
1: ;; Unsigned
brcc 3f
;; Unsigned overflow: A[] = 0xff...
2: ldi C7, 0xff
ldi C6, 0xff
wmov C0, C6
wmov C2, C6
wmov C4, C6
bld C7, 7
rjmp 9f
3:
;; C[] = -C[] - C[]
push A0
ldi r16, 1
XCALL __ashldi3
pop A0
XCALL __negdi2
;; Clear the bits beyond the rounding point.
and C0, A0
and C1, A1
and C2, A2
and C3, A3
and C4, A4
and C5, A5
and C6, A6
and C7, A7
9: ;; Epilogue
pop r29
pop r28
pop r17
pop r16
ret
ENDF __round_x8
#endif /* L_round_x8 */
#undef A0
#undef A1
#undef A2
#undef A3
#undef A4
#undef A5
#undef A6
#undef A7
#undef C0
#undef C1
#undef C2
#undef C3
#undef C4
#undef C5
#undef C6
#undef C7
#undef RP
#undef FBITm1
;; Supply implementations / symbols for the bit-banging functions
;; __builtin_avr_bitsfx and __builtin_avr_fxbits
#ifdef L_ret
DEFUN __ret
ret
ENDF __ret
#endif /* L_ret */
#endif /* if not __AVR_TINY__ */
|
4ms/metamodule-plugin-sdk
| 71,021
|
plugin-libc/libgcc/config/avr/lib1funcs.S
|
/* -*- Mode: Asm -*- */
/* Copyright (C) 1998-2022 Free Software Foundation, Inc.
Contributed by Denis Chertykov <chertykov@gmail.com>
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#if defined (__AVR_TINY__)
#define __zero_reg__ r17
#define __tmp_reg__ r16
#else
#define __zero_reg__ r1
#define __tmp_reg__ r0
#endif
#define __SREG__ 0x3f
#if defined (__AVR_HAVE_SPH__)
#define __SP_H__ 0x3e
#endif
#define __SP_L__ 0x3d
#define __RAMPZ__ 0x3B
#define __EIND__ 0x3C
/* Most of the functions here are called directly from avr.md
patterns, instead of using the standard libcall mechanisms.
This can make better code because GCC knows exactly which
of the call-used registers (not all of them) are clobbered. */
/* FIXME: At present, there is no SORT directive in the linker
script so that we must not assume that different modules
in the same input section like .libgcc.text.mul will be
located close together. Therefore, we cannot use
RCALL/RJMP to call a function like __udivmodhi4 from
__divmodhi4 and have to use lengthy XCALL/XJMP even
though they are in the same input section and all same
input sections together are small enough to reach every
location with a RCALL/RJMP instruction. */
#if defined (__AVR_HAVE_EIJMP_EICALL__) && !defined (__AVR_HAVE_ELPMX__)
#error device not supported
#endif
.macro mov_l r_dest, r_src
#if defined (__AVR_HAVE_MOVW__)
movw \r_dest, \r_src
#else
mov \r_dest, \r_src
#endif
.endm
.macro mov_h r_dest, r_src
#if defined (__AVR_HAVE_MOVW__)
; empty
#else
mov \r_dest, \r_src
#endif
.endm
.macro wmov r_dest, r_src
#if defined (__AVR_HAVE_MOVW__)
movw \r_dest, \r_src
#else
mov \r_dest, \r_src
mov \r_dest+1, \r_src+1
#endif
.endm
#if defined (__AVR_HAVE_JMP_CALL__)
#define XCALL call
#define XJMP jmp
#else
#define XCALL rcall
#define XJMP rjmp
#endif
#if defined (__AVR_HAVE_EIJMP_EICALL__)
#define XICALL eicall
#define XIJMP eijmp
#else
#define XICALL icall
#define XIJMP ijmp
#endif
;; Prologue stuff
.macro do_prologue_saves n_pushed n_frame=0
ldi r26, lo8(\n_frame)
ldi r27, hi8(\n_frame)
ldi r30, lo8(gs(.L_prologue_saves.\@))
ldi r31, hi8(gs(.L_prologue_saves.\@))
XJMP __prologue_saves__ + ((18 - (\n_pushed)) * 2)
.L_prologue_saves.\@:
.endm
;; Epilogue stuff
.macro do_epilogue_restores n_pushed n_frame=0
in r28, __SP_L__
#ifdef __AVR_HAVE_SPH__
in r29, __SP_H__
.if \n_frame > 63
subi r28, lo8(-\n_frame)
sbci r29, hi8(-\n_frame)
.elseif \n_frame > 0
adiw r28, \n_frame
.endif
#else
clr r29
.if \n_frame > 0
subi r28, lo8(-\n_frame)
.endif
#endif /* HAVE SPH */
ldi r30, \n_pushed
XJMP __epilogue_restores__ + ((18 - (\n_pushed)) * 2)
.endm
;; Support function entry and exit for convenience
.macro wsubi r_arg1, i_arg2
#if defined (__AVR_TINY__)
subi \r_arg1, lo8(\i_arg2)
sbci \r_arg1+1, hi8(\i_arg2)
#else
sbiw \r_arg1, \i_arg2
#endif
.endm
.macro waddi r_arg1, i_arg2
#if defined (__AVR_TINY__)
subi \r_arg1, lo8(-\i_arg2)
sbci \r_arg1+1, hi8(-\i_arg2)
#else
adiw \r_arg1, \i_arg2
#endif
.endm
.macro DEFUN name
.global \name
.func \name
\name:
.endm
.macro ENDF name
.size \name, .-\name
.endfunc
.endm
.macro FALIAS name
.global \name
.func \name
\name:
.size \name, .-\name
.endfunc
.endm
;; Skip next instruction, typically a jump target
#define skip cpse 16,16
;; Negate a 2-byte value held in consecutive registers
.macro NEG2 reg
com \reg+1
neg \reg
sbci \reg+1, -1
.endm
;; Negate a 4-byte value held in consecutive registers
;; Sets the V flag for signed overflow tests if REG >= 16
.macro NEG4 reg
com \reg+3
com \reg+2
com \reg+1
.if \reg >= 16
neg \reg
sbci \reg+1, -1
sbci \reg+2, -1
sbci \reg+3, -1
.else
com \reg
adc \reg, __zero_reg__
adc \reg+1, __zero_reg__
adc \reg+2, __zero_reg__
adc \reg+3, __zero_reg__
.endif
.endm
#define exp_lo(N) hlo8 ((N) << 23)
#define exp_hi(N) hhi8 ((N) << 23)
.section .text.libgcc.mul, "ax", @progbits
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
/* Note: mulqi3, mulhi3 are open-coded on the enhanced core. */
#if !defined (__AVR_HAVE_MUL__)
/*******************************************************
Multiplication 8 x 8 without MUL
*******************************************************/
#if defined (L_mulqi3)
#define r_arg2 r22 /* multiplicand */
#define r_arg1 r24 /* multiplier */
#define r_res __tmp_reg__ /* result */
DEFUN __mulqi3
clr r_res ; clear result
__mulqi3_loop:
sbrc r_arg1,0
add r_res,r_arg2
add r_arg2,r_arg2 ; shift multiplicand
breq __mulqi3_exit ; while multiplicand != 0
lsr r_arg1 ;
brne __mulqi3_loop ; exit if multiplier = 0
__mulqi3_exit:
mov r_arg1,r_res ; result to return register
ret
ENDF __mulqi3
#undef r_arg2
#undef r_arg1
#undef r_res
#endif /* defined (L_mulqi3) */
/*******************************************************
Widening Multiplication 16 = 8 x 8 without MUL
Multiplication 16 x 16 without MUL
*******************************************************/
#define A0 22
#define A1 23
#define B0 24
#define BB0 20
#define B1 25
;; Output overlaps input, thus expand result in CC0/1
#define C0 24
#define C1 25
#define CC0 __tmp_reg__
#define CC1 21
#if defined (L_umulqihi3)
;;; R25:R24 = (unsigned int) R22 * (unsigned int) R24
;;; (C1:C0) = (unsigned int) A0 * (unsigned int) B0
;;; Clobbers: __tmp_reg__, R21..R23
DEFUN __umulqihi3
clr A1
clr B1
XJMP __mulhi3
ENDF __umulqihi3
#endif /* L_umulqihi3 */
#if defined (L_mulqihi3)
;;; R25:R24 = (signed int) R22 * (signed int) R24
;;; (C1:C0) = (signed int) A0 * (signed int) B0
;;; Clobbers: __tmp_reg__, R20..R23
DEFUN __mulqihi3
;; Sign-extend B0
clr B1
sbrc B0, 7
com B1
;; The multiplication runs twice as fast if A1 is zero, thus:
;; Zero-extend A0
clr A1
#ifdef __AVR_HAVE_JMP_CALL__
;; Store B0 * sign of A
clr BB0
sbrc A0, 7
mov BB0, B0
call __mulhi3
#else /* have no CALL */
;; Skip sign-extension of A if A >= 0
;; Same size as with the first alternative but avoids errata skip
;; and is faster if A >= 0
sbrs A0, 7
rjmp __mulhi3
;; If A < 0 store B
mov BB0, B0
rcall __mulhi3
#endif /* HAVE_JMP_CALL */
;; 1-extend A after the multiplication
sub C1, BB0
ret
ENDF __mulqihi3
#endif /* L_mulqihi3 */
#if defined (L_mulhi3)
;;; R25:R24 = R23:R22 * R25:R24
;;; (C1:C0) = (A1:A0) * (B1:B0)
;;; Clobbers: __tmp_reg__, R21..R23
DEFUN __mulhi3
;; Clear result
clr CC0
clr CC1
rjmp 3f
1:
;; Bit n of A is 1 --> C += B << n
add CC0, B0
adc CC1, B1
2:
lsl B0
rol B1
3:
;; If B == 0 we are ready
wsubi B0, 0
breq 9f
;; Carry = n-th bit of A
lsr A1
ror A0
;; If bit n of A is set, then go add B * 2^n to C
brcs 1b
;; Carry = 0 --> The ROR above acts like CP A0, 0
;; Thus, it is sufficient to CPC the high part to test A against 0
cpc A1, __zero_reg__
;; Only proceed if A != 0
brne 2b
9:
;; Move Result into place
mov C0, CC0
mov C1, CC1
ret
ENDF __mulhi3
#endif /* L_mulhi3 */
#undef A0
#undef A1
#undef B0
#undef BB0
#undef B1
#undef C0
#undef C1
#undef CC0
#undef CC1
#define A0 22
#define A1 A0+1
#define A2 A0+2
#define A3 A0+3
#define B0 18
#define B1 B0+1
#define B2 B0+2
#define B3 B0+3
#define CC0 26
#define CC1 CC0+1
#define CC2 30
#define CC3 CC2+1
#define C0 22
#define C1 C0+1
#define C2 C0+2
#define C3 C0+3
/*******************************************************
Widening Multiplication 32 = 16 x 16 without MUL
*******************************************************/
#if defined (L_umulhisi3)
DEFUN __umulhisi3
wmov B0, 24
;; Zero-extend B
clr B2
clr B3
;; Zero-extend A
wmov A2, B2
XJMP __mulsi3
ENDF __umulhisi3
#endif /* L_umulhisi3 */
#if defined (L_mulhisi3)
DEFUN __mulhisi3
wmov B0, 24
;; Sign-extend B
lsl r25
sbc B2, B2
mov B3, B2
#ifdef __AVR_ERRATA_SKIP_JMP_CALL__
;; Sign-extend A
clr A2
sbrc A1, 7
com A2
mov A3, A2
XJMP __mulsi3
#else /* no __AVR_ERRATA_SKIP_JMP_CALL__ */
;; Zero-extend A and __mulsi3 will run at least twice as fast
;; compared to a sign-extended A.
clr A2
clr A3
sbrs A1, 7
XJMP __mulsi3
;; If A < 0 then perform the B * 0xffff.... before the
;; very multiplication by initializing the high part of the
;; result CC with -B.
wmov CC2, A2
sub CC2, B0
sbc CC3, B1
XJMP __mulsi3_helper
#endif /* __AVR_ERRATA_SKIP_JMP_CALL__ */
ENDF __mulhisi3
#endif /* L_mulhisi3 */
/*******************************************************
Multiplication 32 x 32 without MUL
*******************************************************/
#if defined (L_mulsi3)
DEFUN __mulsi3
#if defined (__AVR_TINY__)
in r26, __SP_L__ ; safe to use X, as it is CC0/CC1
in r27, __SP_H__
subi r26, lo8(-3) ; Add 3 to point past return address
sbci r27, hi8(-3)
push B0 ; save callee saved regs
push B1
ld B0, X+ ; load from caller stack
ld B1, X+
ld B2, X+
ld B3, X
#endif
;; Clear result
clr CC2
clr CC3
;; FALLTHRU
ENDF __mulsi3
DEFUN __mulsi3_helper
clr CC0
clr CC1
rjmp 3f
1: ;; If bit n of A is set, then add B * 2^n to the result in CC
;; CC += B
add CC0,B0 $ adc CC1,B1 $ adc CC2,B2 $ adc CC3,B3
2: ;; B <<= 1
lsl B0 $ rol B1 $ rol B2 $ rol B3
3: ;; A >>= 1: Carry = n-th bit of A
lsr A3 $ ror A2 $ ror A1 $ ror A0
brcs 1b
;; Only continue if A != 0
sbci A1, 0
brne 2b
wsubi A2, 0
brne 2b
;; All bits of A are consumed: Copy result to return register C
wmov C0, CC0
wmov C2, CC2
#if defined (__AVR_TINY__)
pop B1 ; restore callee saved regs
pop B0
#endif /* defined (__AVR_TINY__) */
ret
ENDF __mulsi3_helper
#endif /* L_mulsi3 */
#undef A0
#undef A1
#undef A2
#undef A3
#undef B0
#undef B1
#undef B2
#undef B3
#undef C0
#undef C1
#undef C2
#undef C3
#undef CC0
#undef CC1
#undef CC2
#undef CC3
#endif /* !defined (__AVR_HAVE_MUL__) */
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#if defined (__AVR_HAVE_MUL__)
#define A0 26
#define B0 18
#define C0 22
#define A1 A0+1
#define B1 B0+1
#define B2 B0+2
#define B3 B0+3
#define C1 C0+1
#define C2 C0+2
#define C3 C0+3
/*******************************************************
Widening Multiplication 32 = 16 x 16 with MUL
*******************************************************/
#if defined (L_mulhisi3)
;;; R25:R22 = (signed long) R27:R26 * (signed long) R19:R18
;;; C3:C0 = (signed long) A1:A0 * (signed long) B1:B0
;;; Clobbers: __tmp_reg__
DEFUN __mulhisi3
XCALL __umulhisi3
;; Sign-extend B
tst B1
brpl 1f
sub C2, A0
sbc C3, A1
1: ;; Sign-extend A
XJMP __usmulhisi3_tail
ENDF __mulhisi3
#endif /* L_mulhisi3 */
#if defined (L_usmulhisi3)
;;; R25:R22 = (signed long) R27:R26 * (unsigned long) R19:R18
;;; C3:C0 = (signed long) A1:A0 * (unsigned long) B1:B0
;;; Clobbers: __tmp_reg__
DEFUN __usmulhisi3
XCALL __umulhisi3
;; FALLTHRU
ENDF __usmulhisi3
DEFUN __usmulhisi3_tail
;; Sign-extend A
sbrs A1, 7
ret
sub C2, B0
sbc C3, B1
ret
ENDF __usmulhisi3_tail
#endif /* L_usmulhisi3 */
#if defined (L_umulhisi3)
;;; R25:R22 = (unsigned long) R27:R26 * (unsigned long) R19:R18
;;; C3:C0 = (unsigned long) A1:A0 * (unsigned long) B1:B0
;;; Clobbers: __tmp_reg__
DEFUN __umulhisi3
mul A0, B0
movw C0, r0
mul A1, B1
movw C2, r0
mul A0, B1
#ifdef __AVR_HAVE_JMP_CALL__
;; This function is used by many other routines, often multiple times.
;; Therefore, if the flash size is not too limited, avoid the RCALL
;; and inverst 6 Bytes to speed things up.
add C1, r0
adc C2, r1
clr __zero_reg__
adc C3, __zero_reg__
#else
rcall 1f
#endif
mul A1, B0
1: add C1, r0
adc C2, r1
clr __zero_reg__
adc C3, __zero_reg__
ret
ENDF __umulhisi3
#endif /* L_umulhisi3 */
/*******************************************************
Widening Multiplication 32 = 16 x 32 with MUL
*******************************************************/
#if defined (L_mulshisi3)
;;; R25:R22 = (signed long) R27:R26 * R21:R18
;;; (C3:C0) = (signed long) A1:A0 * B3:B0
;;; Clobbers: __tmp_reg__
DEFUN __mulshisi3
#ifdef __AVR_ERRATA_SKIP_JMP_CALL__
;; Some cores have problem skipping 2-word instruction
tst A1
brmi __mulohisi3
#else
sbrs A1, 7
#endif /* __AVR_HAVE_JMP_CALL__ */
XJMP __muluhisi3
;; FALLTHRU
ENDF __mulshisi3
;;; R25:R22 = (one-extended long) R27:R26 * R21:R18
;;; (C3:C0) = (one-extended long) A1:A0 * B3:B0
;;; Clobbers: __tmp_reg__
DEFUN __mulohisi3
XCALL __muluhisi3
;; One-extend R27:R26 (A1:A0)
sub C2, B0
sbc C3, B1
ret
ENDF __mulohisi3
#endif /* L_mulshisi3 */
#if defined (L_muluhisi3)
;;; R25:R22 = (unsigned long) R27:R26 * R21:R18
;;; (C3:C0) = (unsigned long) A1:A0 * B3:B0
;;; Clobbers: __tmp_reg__
DEFUN __muluhisi3
XCALL __umulhisi3
mul A0, B3
add C3, r0
mul A1, B2
add C3, r0
mul A0, B2
add C2, r0
adc C3, r1
clr __zero_reg__
ret
ENDF __muluhisi3
#endif /* L_muluhisi3 */
/*******************************************************
Multiplication 32 x 32 with MUL
*******************************************************/
#if defined (L_mulsi3)
;;; R25:R22 = R25:R22 * R21:R18
;;; (C3:C0) = C3:C0 * B3:B0
;;; Clobbers: R26, R27, __tmp_reg__
DEFUN __mulsi3
movw A0, C0
push C2
push C3
XCALL __muluhisi3
pop A1
pop A0
;; A1:A0 now contains the high word of A
mul A0, B0
add C2, r0
adc C3, r1
mul A0, B1
add C3, r0
mul A1, B0
add C3, r0
clr __zero_reg__
ret
ENDF __mulsi3
#endif /* L_mulsi3 */
#undef A0
#undef A1
#undef B0
#undef B1
#undef B2
#undef B3
#undef C0
#undef C1
#undef C2
#undef C3
#endif /* __AVR_HAVE_MUL__ */
/*******************************************************
Multiplication 24 x 24 with MUL
*******************************************************/
#if defined (L_mulpsi3)
;; A[0..2]: In: Multiplicand; Out: Product
#define A0 22
#define A1 A0+1
#define A2 A0+2
;; B[0..2]: In: Multiplier
#define B0 18
#define B1 B0+1
#define B2 B0+2
#if defined (__AVR_HAVE_MUL__)
;; C[0..2]: Expand Result
#define C0 22
#define C1 C0+1
#define C2 C0+2
;; R24:R22 *= R20:R18
;; Clobbers: r21, r25, r26, r27, __tmp_reg__
#define AA0 26
#define AA2 21
DEFUN __mulpsi3
wmov AA0, A0
mov AA2, A2
XCALL __umulhisi3
mul AA2, B0 $ add C2, r0
mul AA0, B2 $ add C2, r0
clr __zero_reg__
ret
ENDF __mulpsi3
#undef AA2
#undef AA0
#undef C2
#undef C1
#undef C0
#else /* !HAVE_MUL */
;; C[0..2]: Expand Result
#if defined (__AVR_TINY__)
#define C0 16
#else
#define C0 0
#endif /* defined (__AVR_TINY__) */
#define C1 C0+1
#define C2 21
;; R24:R22 *= R20:R18
;; Clobbers: __tmp_reg__, R18, R19, R20, R21
DEFUN __mulpsi3
#if defined (__AVR_TINY__)
in r26,__SP_L__
in r27,__SP_H__
subi r26, lo8(-3) ; Add 3 to point past return address
sbci r27, hi8(-3)
push B0 ; save callee saved regs
push B1
ld B0,X+ ; load from caller stack
ld B1,X+
ld B2,X+
#endif /* defined (__AVR_TINY__) */
;; C[] = 0
clr __tmp_reg__
clr C2
0: ;; Shift N-th Bit of B[] into Carry. N = 24 - Loop
LSR B2 $ ror B1 $ ror B0
;; If the N-th Bit of B[] was set...
brcc 1f
;; ...then add A[] * 2^N to the Result C[]
ADD C0,A0 $ adc C1,A1 $ adc C2,A2
1: ;; Multiply A[] by 2
LSL A0 $ rol A1 $ rol A2
;; Loop until B[] is 0
subi B0,0 $ sbci B1,0 $ sbci B2,0
brne 0b
;; Copy C[] to the return Register A[]
wmov A0, C0
mov A2, C2
clr __zero_reg__
#if defined (__AVR_TINY__)
pop B1
pop B0
#endif /* (__AVR_TINY__) */
ret
ENDF __mulpsi3
#undef C2
#undef C1
#undef C0
#endif /* HAVE_MUL */
#undef B2
#undef B1
#undef B0
#undef A2
#undef A1
#undef A0
#endif /* L_mulpsi3 */
#if defined (L_mulsqipsi3) && defined (__AVR_HAVE_MUL__)
;; A[0..2]: In: Multiplicand
#define A0 22
#define A1 A0+1
#define A2 A0+2
;; BB: In: Multiplier
#define BB 25
;; C[0..2]: Result
#define C0 18
#define C1 C0+1
#define C2 C0+2
;; C[] = A[] * sign_extend (BB)
DEFUN __mulsqipsi3
mul A0, BB
movw C0, r0
mul A2, BB
mov C2, r0
mul A1, BB
add C1, r0
adc C2, r1
clr __zero_reg__
sbrs BB, 7
ret
;; One-extend BB
sub C1, A0
sbc C2, A1
ret
ENDF __mulsqipsi3
#undef C2
#undef C1
#undef C0
#undef BB
#undef A2
#undef A1
#undef A0
#endif /* L_mulsqipsi3 && HAVE_MUL */
/*******************************************************
Multiplication 64 x 64
*******************************************************/
;; A[] = A[] * B[]
;; A[0..7]: In: Multiplicand
;; Out: Product
#define A0 18
#define A1 A0+1
#define A2 A0+2
#define A3 A0+3
#define A4 A0+4
#define A5 A0+5
#define A6 A0+6
#define A7 A0+7
;; B[0..7]: In: Multiplier
#define B0 10
#define B1 B0+1
#define B2 B0+2
#define B3 B0+3
#define B4 B0+4
#define B5 B0+5
#define B6 B0+6
#define B7 B0+7
#ifndef __AVR_TINY__
#if defined (__AVR_HAVE_MUL__)
;; Define C[] for convenience
;; Notice that parts of C[] overlap A[] respective B[]
#define C0 16
#define C1 C0+1
#define C2 20
#define C3 C2+1
#define C4 28
#define C5 C4+1
#define C6 C4+2
#define C7 C4+3
#if defined (L_muldi3)
;; A[] *= B[]
;; R25:R18 *= R17:R10
;; Ordinary ABI-Function
DEFUN __muldi3
push r29
push r28
push r17
push r16
;; Counting in Words, we have to perform a 4 * 4 Multiplication
;; 3 * 0 + 0 * 3
mul A7,B0 $ $ mov C7,r0
mul A0,B7 $ $ add C7,r0
mul A6,B1 $ $ add C7,r0
mul A6,B0 $ mov C6,r0 $ add C7,r1
mul B6,A1 $ $ add C7,r0
mul B6,A0 $ add C6,r0 $ adc C7,r1
;; 1 * 2
mul A2,B4 $ add C6,r0 $ adc C7,r1
mul A3,B4 $ $ add C7,r0
mul A2,B5 $ $ add C7,r0
push A5
push A4
push B1
push B0
push A3
push A2
;; 0 * 0
wmov 26, B0
XCALL __umulhisi3
wmov C0, 22
wmov C2, 24
;; 0 * 2
wmov 26, B4
XCALL __umulhisi3 $ wmov C4,22 $ add C6,24 $ adc C7,25
wmov 26, B2
;; 0 * 1
XCALL __muldi3_6
pop A0
pop A1
;; 1 * 1
wmov 26, B2
XCALL __umulhisi3 $ add C4,22 $ adc C5,23 $ adc C6,24 $ adc C7,25
pop r26
pop r27
;; 1 * 0
XCALL __muldi3_6
pop A0
pop A1
;; 2 * 0
XCALL __umulhisi3 $ add C4,22 $ adc C5,23 $ adc C6,24 $ adc C7,25
;; 2 * 1
wmov 26, B2
XCALL __umulhisi3 $ $ $ add C6,22 $ adc C7,23
;; A[] = C[]
wmov A0, C0
;; A2 = C2 already
wmov A4, C4
wmov A6, C6
pop r16
pop r17
pop r28
pop r29
ret
ENDF __muldi3
#endif /* L_muldi3 */
#if defined (L_muldi3_6)
;; A helper for some 64-bit multiplications with MUL available
DEFUN __muldi3_6
__muldi3_6:
XCALL __umulhisi3
add C2, 22
adc C3, 23
adc C4, 24
adc C5, 25
brcc 0f
adiw C6, 1
0: ret
ENDF __muldi3_6
#endif /* L_muldi3_6 */
#undef C7
#undef C6
#undef C5
#undef C4
#undef C3
#undef C2
#undef C1
#undef C0
#else /* !HAVE_MUL */
#if defined (L_muldi3)
#define C0 26
#define C1 C0+1
#define C2 C0+2
#define C3 C0+3
#define C4 C0+4
#define C5 C0+5
#define C6 0
#define C7 C6+1
#define Loop 9
;; A[] *= B[]
;; R25:R18 *= R17:R10
;; Ordinary ABI-Function
DEFUN __muldi3
push r29
push r28
push Loop
ldi C0, 64
mov Loop, C0
;; C[] = 0
clr __tmp_reg__
wmov C0, 0
wmov C2, 0
wmov C4, 0
0: ;; Rotate B[] right by 1 and set Carry to the N-th Bit of B[]
;; where N = 64 - Loop.
;; Notice that B[] = B[] >>> 64 so after this Routine has finished,
;; B[] will have its initial Value again.
LSR B7 $ ror B6 $ ror B5 $ ror B4
ror B3 $ ror B2 $ ror B1 $ ror B0
;; If the N-th Bit of B[] was set then...
brcc 1f
;; ...finish Rotation...
ori B7, 1 << 7
;; ...and add A[] * 2^N to the Result C[]
ADD C0,A0 $ adc C1,A1 $ adc C2,A2 $ adc C3,A3
adc C4,A4 $ adc C5,A5 $ adc C6,A6 $ adc C7,A7
1: ;; Multiply A[] by 2
LSL A0 $ rol A1 $ rol A2 $ rol A3
rol A4 $ rol A5 $ rol A6 $ rol A7
dec Loop
brne 0b
;; We expanded the Result in C[]
;; Copy Result to the Return Register A[]
wmov A0, C0
wmov A2, C2
wmov A4, C4
wmov A6, C6
clr __zero_reg__
pop Loop
pop r28
pop r29
ret
ENDF __muldi3
#undef Loop
#undef C7
#undef C6
#undef C5
#undef C4
#undef C3
#undef C2
#undef C1
#undef C0
#endif /* L_muldi3 */
#endif /* HAVE_MUL */
#endif /* if not __AVR_TINY__ */
#undef B7
#undef B6
#undef B5
#undef B4
#undef B3
#undef B2
#undef B1
#undef B0
#undef A7
#undef A6
#undef A5
#undef A4
#undef A3
#undef A2
#undef A1
#undef A0
/*******************************************************
Widening Multiplication 64 = 32 x 32 with MUL
*******************************************************/
#if defined (__AVR_HAVE_MUL__)
#define A0 r22
#define A1 r23
#define A2 r24
#define A3 r25
#define B0 r18
#define B1 r19
#define B2 r20
#define B3 r21
#define C0 18
#define C1 C0+1
#define C2 20
#define C3 C2+1
#define C4 28
#define C5 C4+1
#define C6 C4+2
#define C7 C4+3
#if defined (L_umulsidi3)
;; Unsigned widening 64 = 32 * 32 Multiplication with MUL
;; R18[8] = R22[4] * R18[4]
;;
;; Ordinary ABI Function, but additionally sets
;; X = R20[2] = B2[2]
;; Z = R22[2] = A0[2]
DEFUN __umulsidi3
clt
;; FALLTHRU
ENDF __umulsidi3
;; T = sign (A)
DEFUN __umulsidi3_helper
push 29 $ push 28 ; Y
wmov 30, A2
;; Counting in Words, we have to perform 4 Multiplications
;; 0 * 0
wmov 26, A0
XCALL __umulhisi3
push 23 $ push 22 ; C0
wmov 28, B0
wmov 18, B2
wmov C2, 24
push 27 $ push 26 ; A0
push 19 $ push 18 ; B2
;;
;; 18 20 22 24 26 28 30 | B2, B3, A0, A1, C0, C1, Y
;; B2 C2 -- -- -- B0 A2
;; 1 * 1
wmov 26, 30 ; A2
XCALL __umulhisi3
;; Sign-extend A. T holds the sign of A
brtc 0f
;; Subtract B from the high part of the result
sub 22, 28
sbc 23, 29
sbc 24, 18
sbc 25, 19
0: wmov 18, 28 ;; B0
wmov C4, 22
wmov C6, 24
;;
;; 18 20 22 24 26 28 30 | B2, B3, A0, A1, C0, C1, Y
;; B0 C2 -- -- A2 C4 C6
;;
;; 1 * 0
XCALL __muldi3_6
;; 0 * 1
pop 26 $ pop 27 ;; B2
pop 18 $ pop 19 ;; A0
XCALL __muldi3_6
;; Move result C into place and save A0 in Z
wmov 22, C4
wmov 24, C6
wmov 30, 18 ; A0
pop C0 $ pop C1
;; Epilogue
pop 28 $ pop 29 ;; Y
ret
ENDF __umulsidi3_helper
#endif /* L_umulsidi3 */
#if defined (L_mulsidi3)
;; Signed widening 64 = 32 * 32 Multiplication
;;
;; R18[8] = R22[4] * R18[4]
;; Ordinary ABI Function
DEFUN __mulsidi3
bst A3, 7
sbrs B3, 7 ; Enhanced core has no skip bug
XJMP __umulsidi3_helper
;; B needs sign-extension
push A3
push A2
XCALL __umulsidi3_helper
;; A0 survived in Z
sub r22, r30
sbc r23, r31
pop r26
pop r27
sbc r24, r26
sbc r25, r27
ret
ENDF __mulsidi3
#endif /* L_mulsidi3 */
#undef A0
#undef A1
#undef A2
#undef A3
#undef B0
#undef B1
#undef B2
#undef B3
#undef C0
#undef C1
#undef C2
#undef C3
#undef C4
#undef C5
#undef C6
#undef C7
#endif /* HAVE_MUL */
/**********************************************************
Widening Multiplication 64 = 32 x 32 without MUL
**********************************************************/
#ifndef __AVR_TINY__ /* if not __AVR_TINY__ */
#if defined (L_mulsidi3) && !defined (__AVR_HAVE_MUL__)
#define A0 18
#define A1 A0+1
#define A2 A0+2
#define A3 A0+3
#define A4 A0+4
#define A5 A0+5
#define A6 A0+6
#define A7 A0+7
#define B0 10
#define B1 B0+1
#define B2 B0+2
#define B3 B0+3
#define B4 B0+4
#define B5 B0+5
#define B6 B0+6
#define B7 B0+7
#define AA0 22
#define AA1 AA0+1
#define AA2 AA0+2
#define AA3 AA0+3
#define BB0 18
#define BB1 BB0+1
#define BB2 BB0+2
#define BB3 BB0+3
#define Mask r30
;; Signed / Unsigned widening 64 = 32 * 32 Multiplication without MUL
;;
;; R18[8] = R22[4] * R18[4]
;; Ordinary ABI Function
DEFUN __mulsidi3
set
skip
;; FALLTHRU
ENDF __mulsidi3
DEFUN __umulsidi3
clt ; skipped
;; Save 10 Registers: R10..R17, R28, R29
do_prologue_saves 10
ldi Mask, 0xff
bld Mask, 7
;; Move B into place...
wmov B0, BB0
wmov B2, BB2
;; ...and extend it
and BB3, Mask
lsl BB3
sbc B4, B4
mov B5, B4
wmov B6, B4
;; Move A into place...
wmov A0, AA0
wmov A2, AA2
;; ...and extend it
and AA3, Mask
lsl AA3
sbc A4, A4
mov A5, A4
wmov A6, A4
XCALL __muldi3
do_epilogue_restores 10
ENDF __umulsidi3
#undef A0
#undef A1
#undef A2
#undef A3
#undef A4
#undef A5
#undef A6
#undef A7
#undef B0
#undef B1
#undef B2
#undef B3
#undef B4
#undef B5
#undef B6
#undef B7
#undef AA0
#undef AA1
#undef AA2
#undef AA3
#undef BB0
#undef BB1
#undef BB2
#undef BB3
#undef Mask
#endif /* L_mulsidi3 && !HAVE_MUL */
#endif /* if not __AVR_TINY__ */
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
.section .text.libgcc.div, "ax", @progbits
/*******************************************************
Division 8 / 8 => (result + remainder)
*******************************************************/
#define r_rem r25 /* remainder */
#define r_arg1 r24 /* dividend, quotient */
#define r_arg2 r22 /* divisor */
#define r_cnt r23 /* loop count */
#if defined (L_udivmodqi4)
DEFUN __udivmodqi4
sub r_rem,r_rem ; clear remainder and carry
ldi r_cnt,9 ; init loop counter
rjmp __udivmodqi4_ep ; jump to entry point
__udivmodqi4_loop:
rol r_rem ; shift dividend into remainder
cp r_rem,r_arg2 ; compare remainder & divisor
brcs __udivmodqi4_ep ; remainder <= divisor
sub r_rem,r_arg2 ; restore remainder
__udivmodqi4_ep:
rol r_arg1 ; shift dividend (with CARRY)
dec r_cnt ; decrement loop counter
brne __udivmodqi4_loop
com r_arg1 ; complement result
; because C flag was complemented in loop
ret
ENDF __udivmodqi4
#endif /* defined (L_udivmodqi4) */
#if defined (L_divmodqi4)
DEFUN __divmodqi4
bst r_arg1,7 ; store sign of dividend
mov __tmp_reg__,r_arg1
eor __tmp_reg__,r_arg2; r0.7 is sign of result
sbrc r_arg1,7
neg r_arg1 ; dividend negative : negate
sbrc r_arg2,7
neg r_arg2 ; divisor negative : negate
XCALL __udivmodqi4 ; do the unsigned div/mod
brtc __divmodqi4_1
neg r_rem ; correct remainder sign
__divmodqi4_1:
sbrc __tmp_reg__,7
neg r_arg1 ; correct result sign
__divmodqi4_exit:
ret
ENDF __divmodqi4
#endif /* defined (L_divmodqi4) */
#undef r_rem
#undef r_arg1
#undef r_arg2
#undef r_cnt
/*******************************************************
Division 16 / 16 => (result + remainder)
*******************************************************/
#define r_remL r26 /* remainder Low */
#define r_remH r27 /* remainder High */
/* return: remainder */
#define r_arg1L r24 /* dividend Low */
#define r_arg1H r25 /* dividend High */
/* return: quotient */
#define r_arg2L r22 /* divisor Low */
#define r_arg2H r23 /* divisor High */
#define r_cnt r21 /* loop count */
#if defined (L_udivmodhi4)
DEFUN __udivmodhi4
sub r_remL,r_remL
sub r_remH,r_remH ; clear remainder and carry
ldi r_cnt,17 ; init loop counter
rjmp __udivmodhi4_ep ; jump to entry point
__udivmodhi4_loop:
rol r_remL ; shift dividend into remainder
rol r_remH
cp r_remL,r_arg2L ; compare remainder & divisor
cpc r_remH,r_arg2H
brcs __udivmodhi4_ep ; remainder < divisor
sub r_remL,r_arg2L ; restore remainder
sbc r_remH,r_arg2H
__udivmodhi4_ep:
rol r_arg1L ; shift dividend (with CARRY)
rol r_arg1H
dec r_cnt ; decrement loop counter
brne __udivmodhi4_loop
com r_arg1L
com r_arg1H
; div/mod results to return registers, as for the div() function
mov_l r_arg2L, r_arg1L ; quotient
mov_h r_arg2H, r_arg1H
mov_l r_arg1L, r_remL ; remainder
mov_h r_arg1H, r_remH
ret
ENDF __udivmodhi4
#endif /* defined (L_udivmodhi4) */
#if defined (L_divmodhi4)
DEFUN __divmodhi4
.global _div
_div:
bst r_arg1H,7 ; store sign of dividend
mov __tmp_reg__,r_arg2H
brtc 0f
com __tmp_reg__ ; r0.7 is sign of result
rcall __divmodhi4_neg1 ; dividend negative: negate
0:
sbrc r_arg2H,7
rcall __divmodhi4_neg2 ; divisor negative: negate
XCALL __udivmodhi4 ; do the unsigned div/mod
sbrc __tmp_reg__,7
rcall __divmodhi4_neg2 ; correct remainder sign
brtc __divmodhi4_exit
__divmodhi4_neg1:
;; correct dividend/remainder sign
com r_arg1H
neg r_arg1L
sbci r_arg1H,0xff
ret
__divmodhi4_neg2:
;; correct divisor/result sign
com r_arg2H
neg r_arg2L
sbci r_arg2H,0xff
__divmodhi4_exit:
ret
ENDF __divmodhi4
#endif /* defined (L_divmodhi4) */
#undef r_remH
#undef r_remL
#undef r_arg1H
#undef r_arg1L
#undef r_arg2H
#undef r_arg2L
#undef r_cnt
/*******************************************************
Division 24 / 24 => (result + remainder)
*******************************************************/
;; A[0..2]: In: Dividend; Out: Quotient
#define A0 22
#define A1 A0+1
#define A2 A0+2
;; B[0..2]: In: Divisor; Out: Remainder
#define B0 18
#define B1 B0+1
#define B2 B0+2
;; C[0..2]: Expand remainder
#define C0 __zero_reg__
#define C1 26
#define C2 25
;; Loop counter
#define r_cnt 21
#if defined (L_udivmodpsi4)
;; R24:R22 = R24:R24 udiv R20:R18
;; R20:R18 = R24:R22 umod R20:R18
;; Clobbers: R21, R25, R26
DEFUN __udivmodpsi4
; init loop counter
ldi r_cnt, 24+1
; Clear remainder and carry. C0 is already 0
clr C1
sub C2, C2
; jump to entry point
rjmp __udivmodpsi4_start
__udivmodpsi4_loop:
; shift dividend into remainder
rol C0
rol C1
rol C2
; compare remainder & divisor
cp C0, B0
cpc C1, B1
cpc C2, B2
brcs __udivmodpsi4_start ; remainder <= divisor
sub C0, B0 ; restore remainder
sbc C1, B1
sbc C2, B2
__udivmodpsi4_start:
; shift dividend (with CARRY)
rol A0
rol A1
rol A2
; decrement loop counter
dec r_cnt
brne __udivmodpsi4_loop
com A0
com A1
com A2
; div/mod results to return registers
; remainder
mov B0, C0
mov B1, C1
mov B2, C2
clr __zero_reg__ ; C0
ret
ENDF __udivmodpsi4
#endif /* defined (L_udivmodpsi4) */
#if defined (L_divmodpsi4)
;; R24:R22 = R24:R22 div R20:R18
;; R20:R18 = R24:R22 mod R20:R18
;; Clobbers: T, __tmp_reg__, R21, R25, R26
DEFUN __divmodpsi4
; R0.7 will contain the sign of the result:
; R0.7 = A.sign ^ B.sign
mov __tmp_reg__, B2
; T-flag = sign of dividend
bst A2, 7
brtc 0f
com __tmp_reg__
; Adjust dividend's sign
rcall __divmodpsi4_negA
0:
; Adjust divisor's sign
sbrc B2, 7
rcall __divmodpsi4_negB
; Do the unsigned div/mod
XCALL __udivmodpsi4
; Adjust quotient's sign
sbrc __tmp_reg__, 7
rcall __divmodpsi4_negA
; Adjust remainder's sign
brtc __divmodpsi4_end
__divmodpsi4_negB:
; Correct divisor/remainder sign
com B2
com B1
neg B0
sbci B1, -1
sbci B2, -1
ret
; Correct dividend/quotient sign
__divmodpsi4_negA:
com A2
com A1
neg A0
sbci A1, -1
sbci A2, -1
__divmodpsi4_end:
ret
ENDF __divmodpsi4
#endif /* defined (L_divmodpsi4) */
#undef A0
#undef A1
#undef A2
#undef B0
#undef B1
#undef B2
#undef C0
#undef C1
#undef C2
#undef r_cnt
/*******************************************************
Division 32 / 32 => (result + remainder)
*******************************************************/
#define r_remHH r31 /* remainder High */
#define r_remHL r30
#define r_remH r27
#define r_remL r26 /* remainder Low */
/* return: remainder */
#define r_arg1HH r25 /* dividend High */
#define r_arg1HL r24
#define r_arg1H r23
#define r_arg1L r22 /* dividend Low */
/* return: quotient */
#define r_arg2HH r21 /* divisor High */
#define r_arg2HL r20
#define r_arg2H r19
#define r_arg2L r18 /* divisor Low */
#define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
#if defined (L_udivmodsi4)
DEFUN __udivmodsi4
ldi r_remL, 33 ; init loop counter
mov r_cnt, r_remL
sub r_remL,r_remL
sub r_remH,r_remH ; clear remainder and carry
mov_l r_remHL, r_remL
mov_h r_remHH, r_remH
rjmp __udivmodsi4_ep ; jump to entry point
__udivmodsi4_loop:
rol r_remL ; shift dividend into remainder
rol r_remH
rol r_remHL
rol r_remHH
cp r_remL,r_arg2L ; compare remainder & divisor
cpc r_remH,r_arg2H
cpc r_remHL,r_arg2HL
cpc r_remHH,r_arg2HH
brcs __udivmodsi4_ep ; remainder <= divisor
sub r_remL,r_arg2L ; restore remainder
sbc r_remH,r_arg2H
sbc r_remHL,r_arg2HL
sbc r_remHH,r_arg2HH
__udivmodsi4_ep:
rol r_arg1L ; shift dividend (with CARRY)
rol r_arg1H
rol r_arg1HL
rol r_arg1HH
dec r_cnt ; decrement loop counter
brne __udivmodsi4_loop
; __zero_reg__ now restored (r_cnt == 0)
com r_arg1L
com r_arg1H
com r_arg1HL
com r_arg1HH
; div/mod results to return registers, as for the ldiv() function
mov_l r_arg2L, r_arg1L ; quotient
mov_h r_arg2H, r_arg1H
mov_l r_arg2HL, r_arg1HL
mov_h r_arg2HH, r_arg1HH
mov_l r_arg1L, r_remL ; remainder
mov_h r_arg1H, r_remH
mov_l r_arg1HL, r_remHL
mov_h r_arg1HH, r_remHH
ret
ENDF __udivmodsi4
#endif /* defined (L_udivmodsi4) */
#if defined (L_divmodsi4)
DEFUN __divmodsi4
mov __tmp_reg__,r_arg2HH
bst r_arg1HH,7 ; store sign of dividend
brtc 0f
com __tmp_reg__ ; r0.7 is sign of result
XCALL __negsi2 ; dividend negative: negate
0:
sbrc r_arg2HH,7
rcall __divmodsi4_neg2 ; divisor negative: negate
XCALL __udivmodsi4 ; do the unsigned div/mod
sbrc __tmp_reg__, 7 ; correct quotient sign
rcall __divmodsi4_neg2
brtc __divmodsi4_exit ; correct remainder sign
XJMP __negsi2
__divmodsi4_neg2:
;; correct divisor/quotient sign
com r_arg2HH
com r_arg2HL
com r_arg2H
neg r_arg2L
sbci r_arg2H,0xff
sbci r_arg2HL,0xff
sbci r_arg2HH,0xff
__divmodsi4_exit:
ret
ENDF __divmodsi4
#endif /* defined (L_divmodsi4) */
#if defined (L_negsi2)
;; (set (reg:SI 22)
;; (neg:SI (reg:SI 22)))
;; Sets the V flag for signed overflow tests
DEFUN __negsi2
NEG4 22
ret
ENDF __negsi2
#endif /* L_negsi2 */
#undef r_remHH
#undef r_remHL
#undef r_remH
#undef r_remL
#undef r_arg1HH
#undef r_arg1HL
#undef r_arg1H
#undef r_arg1L
#undef r_arg2HH
#undef r_arg2HL
#undef r_arg2H
#undef r_arg2L
#undef r_cnt
/* *di routines use registers below R19 and won't work with tiny arch
right now. */
#if !defined (__AVR_TINY__)
/*******************************************************
Division 64 / 64
Modulo 64 % 64
*******************************************************/
;; Use Speed-optimized Version on "big" Devices, i.e. Devices with
;; at least 16k of Program Memory. For smaller Devices, depend
;; on MOVW and SP Size. There is a Connexion between SP Size and
;; Flash Size so that SP Size can be used to test for Flash Size.
#if defined (__AVR_HAVE_JMP_CALL__)
# define SPEED_DIV 8
#elif defined (__AVR_HAVE_MOVW__) && defined (__AVR_HAVE_SPH__)
# define SPEED_DIV 16
#else
# define SPEED_DIV 0
#endif
;; A[0..7]: In: Dividend;
;; Out: Quotient (T = 0)
;; Out: Remainder (T = 1)
#define A0 18
#define A1 A0+1
#define A2 A0+2
#define A3 A0+3
#define A4 A0+4
#define A5 A0+5
#define A6 A0+6
#define A7 A0+7
;; B[0..7]: In: Divisor; Out: Clobber
#define B0 10
#define B1 B0+1
#define B2 B0+2
#define B3 B0+3
#define B4 B0+4
#define B5 B0+5
#define B6 B0+6
#define B7 B0+7
;; C[0..7]: Expand remainder; Out: Remainder (unused)
#define C0 8
#define C1 C0+1
#define C2 30
#define C3 C2+1
#define C4 28
#define C5 C4+1
#define C6 26
#define C7 C6+1
;; Holds Signs during Division Routine
#define SS __tmp_reg__
;; Bit-Counter in Division Routine
#define R_cnt __zero_reg__
;; Scratch Register for Negation
#define NN r31
#if defined (L_udivdi3)
;; R25:R18 = R24:R18 umod R17:R10
;; Ordinary ABI-Function
DEFUN __umoddi3
set
rjmp __udivdi3_umoddi3
ENDF __umoddi3
;; R25:R18 = R24:R18 udiv R17:R10
;; Ordinary ABI-Function
DEFUN __udivdi3
clt
ENDF __udivdi3
DEFUN __udivdi3_umoddi3
push C0
push C1
push C4
push C5
XCALL __udivmod64
pop C5
pop C4
pop C1
pop C0
ret
ENDF __udivdi3_umoddi3
#endif /* L_udivdi3 */
#if defined (L_udivmod64)
;; Worker Routine for 64-Bit unsigned Quotient and Remainder Computation
;; No Registers saved/restored; the Callers will take Care.
;; Preserves B[] and T-flag
;; T = 0: Compute Quotient in A[]
;; T = 1: Compute Remainder in A[] and shift SS one Bit left
DEFUN __udivmod64
;; Clear Remainder (C6, C7 will follow)
clr C0
clr C1
wmov C2, C0
wmov C4, C0
ldi C7, 64
#if SPEED_DIV == 0 || SPEED_DIV == 16
;; Initialize Loop-Counter
mov R_cnt, C7
wmov C6, C0
#endif /* SPEED_DIV */
#if SPEED_DIV == 8
push A7
clr C6
1: ;; Compare shifted Devidend against Divisor
;; If -- even after Shifting -- it is smaller...
CP A7,B0 $ cpc C0,B1 $ cpc C1,B2 $ cpc C2,B3
cpc C3,B4 $ cpc C4,B5 $ cpc C5,B6 $ cpc C6,B7
brcc 2f
;; ...then we can subtract it. Thus, it is legal to shift left
$ mov C6,C5 $ mov C5,C4 $ mov C4,C3
mov C3,C2 $ mov C2,C1 $ mov C1,C0 $ mov C0,A7
mov A7,A6 $ mov A6,A5 $ mov A5,A4 $ mov A4,A3
mov A3,A2 $ mov A2,A1 $ mov A1,A0 $ clr A0
;; 8 Bits are done
subi C7, 8
brne 1b
;; Shifted 64 Bits: A7 has traveled to C7
pop C7
;; Divisor is greater than Dividend. We have:
;; A[] % B[] = A[]
;; A[] / B[] = 0
;; Thus, we can return immediately
rjmp 5f
2: ;; Initialze Bit-Counter with Number of Bits still to be performed
mov R_cnt, C7
;; Push of A7 is not needed because C7 is still 0
pop C7
clr C7
#elif SPEED_DIV == 16
;; Compare shifted Dividend against Divisor
cp A7, B3
cpc C0, B4
cpc C1, B5
cpc C2, B6
cpc C3, B7
brcc 2f
;; Divisor is greater than shifted Dividen: We can shift the Dividend
;; and it is still smaller than the Divisor --> Shift one 32-Bit Chunk
wmov C2,A6 $ wmov C0,A4
wmov A6,A2 $ wmov A4,A0
wmov A2,C6 $ wmov A0,C4
;; Set Bit Counter to 32
lsr R_cnt
2:
#elif SPEED_DIV
#error SPEED_DIV = ?
#endif /* SPEED_DIV */
;; The very Division + Remainder Routine
3: ;; Left-shift Dividend...
lsl A0 $ rol A1 $ rol A2 $ rol A3
rol A4 $ rol A5 $ rol A6 $ rol A7
;; ...into Remainder
rol C0 $ rol C1 $ rol C2 $ rol C3
rol C4 $ rol C5 $ rol C6 $ rol C7
;; Compare Remainder and Divisor
CP C0,B0 $ cpc C1,B1 $ cpc C2,B2 $ cpc C3,B3
cpc C4,B4 $ cpc C5,B5 $ cpc C6,B6 $ cpc C7,B7
brcs 4f
;; Divisor fits into Remainder: Subtract it from Remainder...
SUB C0,B0 $ sbc C1,B1 $ sbc C2,B2 $ sbc C3,B3
sbc C4,B4 $ sbc C5,B5 $ sbc C6,B6 $ sbc C7,B7
;; ...and set according Bit in the upcoming Quotient
;; The Bit will travel to its final Position
ori A0, 1
4: ;; This Bit is done
dec R_cnt
brne 3b
;; __zero_reg__ is 0 again
;; T = 0: We are fine with the Quotient in A[]
;; T = 1: Copy Remainder to A[]
5: brtc 6f
wmov A0, C0
wmov A2, C2
wmov A4, C4
wmov A6, C6
;; Move the Sign of the Result to SS.7
lsl SS
6: ret
ENDF __udivmod64
#endif /* L_udivmod64 */
#if defined (L_divdi3)
;; R25:R18 = R24:R18 mod R17:R10
;; Ordinary ABI-Function
DEFUN __moddi3
set
rjmp __divdi3_moddi3
ENDF __moddi3
;; R25:R18 = R24:R18 div R17:R10
;; Ordinary ABI-Function
DEFUN __divdi3
clt
ENDF __divdi3
DEFUN __divdi3_moddi3
#if SPEED_DIV
mov r31, A7
or r31, B7
brmi 0f
;; Both Signs are 0: the following Complexitiy is not needed
XJMP __udivdi3_umoddi3
#endif /* SPEED_DIV */
0: ;; The Prologue
;; Save 12 Registers: Y, 17...8
;; No Frame needed
do_prologue_saves 12
;; SS.7 will contain the Sign of the Quotient (A.sign * B.sign)
;; SS.6 will contain the Sign of the Remainder (A.sign)
mov SS, A7
asr SS
;; Adjust Dividend's Sign as needed
#if SPEED_DIV
;; Compiling for Speed we know that at least one Sign must be < 0
;; Thus, if A[] >= 0 then we know B[] < 0
brpl 22f
#else
brpl 21f
#endif /* SPEED_DIV */
XCALL __negdi2
;; Adjust Divisor's Sign and SS.7 as needed
21: tst B7
brpl 3f
22: ldi NN, 1 << 7
eor SS, NN
ldi NN, -1
com B4 $ com B5 $ com B6 $ com B7
$ com B1 $ com B2 $ com B3
NEG B0
$ sbc B1,NN $ sbc B2,NN $ sbc B3,NN
sbc B4,NN $ sbc B5,NN $ sbc B6,NN $ sbc B7,NN
3: ;; Do the unsigned 64-Bit Division/Modulo (depending on T-flag)
XCALL __udivmod64
;; Adjust Result's Sign
#ifdef __AVR_ERRATA_SKIP_JMP_CALL__
tst SS
brpl 4f
#else
sbrc SS, 7
#endif /* __AVR_HAVE_JMP_CALL__ */
XCALL __negdi2
4: ;; Epilogue: Restore 12 Registers and return
do_epilogue_restores 12
ENDF __divdi3_moddi3
#endif /* L_divdi3 */
#undef R_cnt
#undef SS
#undef NN
.section .text.libgcc, "ax", @progbits
#define TT __tmp_reg__
#if defined (L_adddi3)
;; (set (reg:DI 18)
;; (plus:DI (reg:DI 18)
;; (reg:DI 10)))
;; Sets the V flag for signed overflow tests
;; Sets the C flag for unsigned overflow tests
DEFUN __adddi3
ADD A0,B0 $ adc A1,B1 $ adc A2,B2 $ adc A3,B3
adc A4,B4 $ adc A5,B5 $ adc A6,B6 $ adc A7,B7
ret
ENDF __adddi3
#endif /* L_adddi3 */
#if defined (L_adddi3_s8)
;; (set (reg:DI 18)
;; (plus:DI (reg:DI 18)
;; (sign_extend:SI (reg:QI 26))))
;; Sets the V flag for signed overflow tests
;; Sets the C flag for unsigned overflow tests provided 0 <= R26 < 128
DEFUN __adddi3_s8
clr TT
sbrc r26, 7
com TT
ADD A0,r26 $ adc A1,TT $ adc A2,TT $ adc A3,TT
adc A4,TT $ adc A5,TT $ adc A6,TT $ adc A7,TT
ret
ENDF __adddi3_s8
#endif /* L_adddi3_s8 */
#if defined (L_subdi3)
;; (set (reg:DI 18)
;; (minus:DI (reg:DI 18)
;; (reg:DI 10)))
;; Sets the V flag for signed overflow tests
;; Sets the C flag for unsigned overflow tests
DEFUN __subdi3
SUB A0,B0 $ sbc A1,B1 $ sbc A2,B2 $ sbc A3,B3
sbc A4,B4 $ sbc A5,B5 $ sbc A6,B6 $ sbc A7,B7
ret
ENDF __subdi3
#endif /* L_subdi3 */
#if defined (L_cmpdi2)
;; (set (cc0)
;; (compare (reg:DI 18)
;; (reg:DI 10)))
DEFUN __cmpdi2
CP A0,B0 $ cpc A1,B1 $ cpc A2,B2 $ cpc A3,B3
cpc A4,B4 $ cpc A5,B5 $ cpc A6,B6 $ cpc A7,B7
ret
ENDF __cmpdi2
#endif /* L_cmpdi2 */
#if defined (L_cmpdi2_s8)
;; (set (cc0)
;; (compare (reg:DI 18)
;; (sign_extend:SI (reg:QI 26))))
DEFUN __cmpdi2_s8
clr TT
sbrc r26, 7
com TT
CP A0,r26 $ cpc A1,TT $ cpc A2,TT $ cpc A3,TT
cpc A4,TT $ cpc A5,TT $ cpc A6,TT $ cpc A7,TT
ret
ENDF __cmpdi2_s8
#endif /* L_cmpdi2_s8 */
#if defined (L_negdi2)
;; (set (reg:DI 18)
;; (neg:DI (reg:DI 18)))
;; Sets the V flag for signed overflow tests
DEFUN __negdi2
com A4 $ com A5 $ com A6 $ com A7
$ com A1 $ com A2 $ com A3
NEG A0
$ sbci A1,-1 $ sbci A2,-1 $ sbci A3,-1
sbci A4,-1 $ sbci A5,-1 $ sbci A6,-1 $ sbci A7,-1
ret
ENDF __negdi2
#endif /* L_negdi2 */
#undef TT
#undef C7
#undef C6
#undef C5
#undef C4
#undef C3
#undef C2
#undef C1
#undef C0
#undef B7
#undef B6
#undef B5
#undef B4
#undef B3
#undef B2
#undef B1
#undef B0
#undef A7
#undef A6
#undef A5
#undef A4
#undef A3
#undef A2
#undef A1
#undef A0
#endif /* !defined (__AVR_TINY__) */
.section .text.libgcc.prologue, "ax", @progbits
/**********************************
* This is a prologue subroutine
**********************************/
#if !defined (__AVR_TINY__)
#if defined (L_prologue)
;; This function does not clobber T-flag; 64-bit division relies on it
DEFUN __prologue_saves__
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
push r16
push r17
push r28
push r29
#if !defined (__AVR_HAVE_SPH__)
in r28,__SP_L__
sub r28,r26
out __SP_L__,r28
clr r29
#elif defined (__AVR_XMEGA__)
in r28,__SP_L__
in r29,__SP_H__
sub r28,r26
sbc r29,r27
out __SP_L__,r28
out __SP_H__,r29
#else
in r28,__SP_L__
in r29,__SP_H__
sub r28,r26
sbc r29,r27
in __tmp_reg__,__SREG__
cli
out __SP_H__,r29
out __SREG__,__tmp_reg__
out __SP_L__,r28
#endif /* #SP = 8/16 */
XIJMP
ENDF __prologue_saves__
#endif /* defined (L_prologue) */
/*
* This is an epilogue subroutine
*/
#if defined (L_epilogue)
DEFUN __epilogue_restores__
ldd r2,Y+18
ldd r3,Y+17
ldd r4,Y+16
ldd r5,Y+15
ldd r6,Y+14
ldd r7,Y+13
ldd r8,Y+12
ldd r9,Y+11
ldd r10,Y+10
ldd r11,Y+9
ldd r12,Y+8
ldd r13,Y+7
ldd r14,Y+6
ldd r15,Y+5
ldd r16,Y+4
ldd r17,Y+3
ldd r26,Y+2
#if !defined (__AVR_HAVE_SPH__)
ldd r29,Y+1
add r28,r30
out __SP_L__,r28
mov r28, r26
#elif defined (__AVR_XMEGA__)
ldd r27,Y+1
add r28,r30
adc r29,__zero_reg__
out __SP_L__,r28
out __SP_H__,r29
wmov 28, 26
#else
ldd r27,Y+1
add r28,r30
adc r29,__zero_reg__
in __tmp_reg__,__SREG__
cli
out __SP_H__,r29
out __SREG__,__tmp_reg__
out __SP_L__,r28
mov_l r28, r26
mov_h r29, r27
#endif /* #SP = 8/16 */
ret
ENDF __epilogue_restores__
#endif /* defined (L_epilogue) */
#endif /* !defined (__AVR_TINY__) */
#ifdef L_exit
.section .fini9,"ax",@progbits
DEFUN _exit
.weak exit
exit:
ENDF _exit
/* Code from .fini8 ... .fini1 sections inserted by ld script. */
.section .fini0,"ax",@progbits
cli
__stop_program:
rjmp __stop_program
#endif /* defined (L_exit) */
#ifdef L_cleanup
.weak _cleanup
.func _cleanup
_cleanup:
ret
.endfunc
#endif /* defined (L_cleanup) */
.section .text.libgcc, "ax", @progbits
#ifdef L_tablejump2
DEFUN __tablejump2__
lsl r30
rol r31
#if defined (__AVR_HAVE_EIJMP_EICALL__)
;; Word address of gs() jumptable entry in R24:Z
rol r24
out __RAMPZ__, r24
#elif defined (__AVR_HAVE_ELPM__)
;; Word address of jumptable entry in Z
clr __tmp_reg__
rol __tmp_reg__
out __RAMPZ__, __tmp_reg__
#endif
;; Read word address from jumptable and jump
#if defined (__AVR_HAVE_ELPMX__)
elpm __tmp_reg__, Z+
elpm r31, Z
mov r30, __tmp_reg__
#ifdef __AVR_HAVE_RAMPD__
;; Reset RAMPZ to 0 so that EBI devices don't read garbage from RAM
out __RAMPZ__, __zero_reg__
#endif /* RAMPD */
XIJMP
#elif defined (__AVR_HAVE_ELPM__)
elpm
push r0
adiw r30, 1
elpm
push r0
ret
#elif defined (__AVR_HAVE_LPMX__)
lpm __tmp_reg__, Z+
lpm r31, Z
mov r30, __tmp_reg__
ijmp
#elif defined (__AVR_TINY__)
wsubi 30, -(__AVR_TINY_PM_BASE_ADDRESS__) ; Add PM offset to Z
ld __tmp_reg__, Z+
ld r31, Z ; Use ld instead of lpm to load Z
mov r30, __tmp_reg__
ijmp
#else
lpm
push r0
adiw r30, 1
lpm
push r0
ret
#endif
ENDF __tablejump2__
#endif /* L_tablejump2 */
#if defined(__AVR_TINY__)
#ifdef L_copy_data
.section .init4,"ax",@progbits
.global __do_copy_data
__do_copy_data:
ldi r18, hi8(__data_end)
ldi r26, lo8(__data_start)
ldi r27, hi8(__data_start)
ldi r30, lo8(__data_load_start + __AVR_TINY_PM_BASE_ADDRESS__)
ldi r31, hi8(__data_load_start + __AVR_TINY_PM_BASE_ADDRESS__)
rjmp .L__do_copy_data_start
.L__do_copy_data_loop:
ld r19, z+
st X+, r19
.L__do_copy_data_start:
cpi r26, lo8(__data_end)
cpc r27, r18
brne .L__do_copy_data_loop
#endif
#else
#ifdef L_copy_data
.section .init4,"ax",@progbits
DEFUN __do_copy_data
#if defined(__AVR_HAVE_ELPMX__)
ldi r17, hi8(__data_end)
ldi r26, lo8(__data_start)
ldi r27, hi8(__data_start)
ldi r30, lo8(__data_load_start)
ldi r31, hi8(__data_load_start)
ldi r16, hh8(__data_load_start)
out __RAMPZ__, r16
rjmp .L__do_copy_data_start
.L__do_copy_data_loop:
elpm r0, Z+
st X+, r0
.L__do_copy_data_start:
cpi r26, lo8(__data_end)
cpc r27, r17
brne .L__do_copy_data_loop
#elif !defined(__AVR_HAVE_ELPMX__) && defined(__AVR_HAVE_ELPM__)
ldi r17, hi8(__data_end)
ldi r26, lo8(__data_start)
ldi r27, hi8(__data_start)
ldi r30, lo8(__data_load_start)
ldi r31, hi8(__data_load_start)
ldi r16, hh8(__data_load_start - 0x10000)
.L__do_copy_data_carry:
inc r16
out __RAMPZ__, r16
rjmp .L__do_copy_data_start
.L__do_copy_data_loop:
elpm
st X+, r0
adiw r30, 1
brcs .L__do_copy_data_carry
.L__do_copy_data_start:
cpi r26, lo8(__data_end)
cpc r27, r17
brne .L__do_copy_data_loop
#elif !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__)
ldi r17, hi8(__data_end)
ldi r26, lo8(__data_start)
ldi r27, hi8(__data_start)
ldi r30, lo8(__data_load_start)
ldi r31, hi8(__data_load_start)
rjmp .L__do_copy_data_start
.L__do_copy_data_loop:
#if defined (__AVR_HAVE_LPMX__)
lpm r0, Z+
#else
lpm
adiw r30, 1
#endif
st X+, r0
.L__do_copy_data_start:
cpi r26, lo8(__data_end)
cpc r27, r17
brne .L__do_copy_data_loop
#endif /* !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__) */
#if defined (__AVR_HAVE_ELPM__) && defined (__AVR_HAVE_RAMPD__)
;; Reset RAMPZ to 0 so that EBI devices don't read garbage from RAM
out __RAMPZ__, __zero_reg__
#endif /* ELPM && RAMPD */
ENDF __do_copy_data
#endif /* L_copy_data */
#endif /* !defined (__AVR_TINY__) */
/* __do_clear_bss is only necessary if there is anything in .bss section. */
#ifdef L_clear_bss
.section .init4,"ax",@progbits
DEFUN __do_clear_bss
ldi r18, hi8(__bss_end)
ldi r26, lo8(__bss_start)
ldi r27, hi8(__bss_start)
rjmp .do_clear_bss_start
.do_clear_bss_loop:
st X+, __zero_reg__
.do_clear_bss_start:
cpi r26, lo8(__bss_end)
cpc r27, r18
brne .do_clear_bss_loop
ENDF __do_clear_bss
#endif /* L_clear_bss */
/* __do_global_ctors and __do_global_dtors are only necessary
if there are any constructors/destructors. */
#if defined(__AVR_TINY__)
#define cdtors_tst_reg r18
#else
#define cdtors_tst_reg r17
#endif
#ifdef L_ctors
.section .init6,"ax",@progbits
DEFUN __do_global_ctors
ldi cdtors_tst_reg, pm_hi8(__ctors_start)
ldi r28, pm_lo8(__ctors_end)
ldi r29, pm_hi8(__ctors_end)
#ifdef __AVR_HAVE_EIJMP_EICALL__
ldi r16, pm_hh8(__ctors_end)
#endif /* HAVE_EIJMP */
rjmp .L__do_global_ctors_start
.L__do_global_ctors_loop:
wsubi 28, 1
#ifdef __AVR_HAVE_EIJMP_EICALL__
sbc r16, __zero_reg__
mov r24, r16
#endif /* HAVE_EIJMP */
mov_h r31, r29
mov_l r30, r28
XCALL __tablejump2__
.L__do_global_ctors_start:
cpi r28, pm_lo8(__ctors_start)
cpc r29, cdtors_tst_reg
#ifdef __AVR_HAVE_EIJMP_EICALL__
ldi r24, pm_hh8(__ctors_start)
cpc r16, r24
#endif /* HAVE_EIJMP */
brne .L__do_global_ctors_loop
ENDF __do_global_ctors
#endif /* L_ctors */
#ifdef L_dtors
.section .fini6,"ax",@progbits
DEFUN __do_global_dtors
ldi cdtors_tst_reg, pm_hi8(__dtors_end)
ldi r28, pm_lo8(__dtors_start)
ldi r29, pm_hi8(__dtors_start)
#ifdef __AVR_HAVE_EIJMP_EICALL__
ldi r16, pm_hh8(__dtors_start)
#endif /* HAVE_EIJMP */
rjmp .L__do_global_dtors_start
.L__do_global_dtors_loop:
#ifdef __AVR_HAVE_EIJMP_EICALL__
mov r24, r16
#endif /* HAVE_EIJMP */
mov_h r31, r29
mov_l r30, r28
XCALL __tablejump2__
waddi 28, 1
#ifdef __AVR_HAVE_EIJMP_EICALL__
adc r16, __zero_reg__
#endif /* HAVE_EIJMP */
.L__do_global_dtors_start:
cpi r28, pm_lo8(__dtors_end)
cpc r29, cdtors_tst_reg
#ifdef __AVR_HAVE_EIJMP_EICALL__
ldi r24, pm_hh8(__dtors_end)
cpc r16, r24
#endif /* HAVE_EIJMP */
brne .L__do_global_dtors_loop
ENDF __do_global_dtors
#endif /* L_dtors */
#undef cdtors_tst_reg
.section .text.libgcc, "ax", @progbits
#if !defined (__AVR_TINY__)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Loading n bytes from Flash; n = 3,4
;; R22... = Flash[Z]
;; Clobbers: __tmp_reg__
#if (defined (L_load_3) \
|| defined (L_load_4)) \
&& !defined (__AVR_HAVE_LPMX__)
;; Destination
#define D0 22
#define D1 D0+1
#define D2 D0+2
#define D3 D0+3
.macro .load dest, n
lpm
mov \dest, r0
.if \dest != D0+\n-1
adiw r30, 1
.else
sbiw r30, \n-1
.endif
.endm
#if defined (L_load_3)
DEFUN __load_3
push D3
XCALL __load_4
pop D3
ret
ENDF __load_3
#endif /* L_load_3 */
#if defined (L_load_4)
DEFUN __load_4
.load D0, 4
.load D1, 4
.load D2, 4
.load D3, 4
ret
ENDF __load_4
#endif /* L_load_4 */
#endif /* L_load_3 || L_load_3 */
#endif /* !defined (__AVR_TINY__) */
#if !defined (__AVR_TINY__)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Loading n bytes from Flash or RAM; n = 1,2,3,4
;; R22... = Flash[R21:Z] or RAM[Z] depending on R21.7
;; Clobbers: __tmp_reg__, R21, R30, R31
#if (defined (L_xload_1) \
|| defined (L_xload_2) \
|| defined (L_xload_3) \
|| defined (L_xload_4))
;; Destination
#define D0 22
#define D1 D0+1
#define D2 D0+2
#define D3 D0+3
;; Register containing bits 16+ of the address
#define HHI8 21
.macro .xload dest, n
#if defined (__AVR_HAVE_ELPMX__)
elpm \dest, Z+
#elif defined (__AVR_HAVE_ELPM__)
elpm
mov \dest, r0
.if \dest != D0+\n-1
adiw r30, 1
adc HHI8, __zero_reg__
out __RAMPZ__, HHI8
.endif
#elif defined (__AVR_HAVE_LPMX__)
lpm \dest, Z+
#else
lpm
mov \dest, r0
.if \dest != D0+\n-1
adiw r30, 1
.endif
#endif
#if defined (__AVR_HAVE_ELPM__) && defined (__AVR_HAVE_RAMPD__)
.if \dest == D0+\n-1
;; Reset RAMPZ to 0 so that EBI devices don't read garbage from RAM
out __RAMPZ__, __zero_reg__
.endif
#endif
.endm ; .xload
#if defined (L_xload_1)
DEFUN __xload_1
#if defined (__AVR_HAVE_LPMX__) && !defined (__AVR_HAVE_ELPM__)
sbrc HHI8, 7
ld D0, Z
sbrs HHI8, 7
lpm D0, Z
ret
#else
sbrc HHI8, 7
rjmp 1f
#if defined (__AVR_HAVE_ELPM__)
out __RAMPZ__, HHI8
#endif /* __AVR_HAVE_ELPM__ */
.xload D0, 1
ret
1: ld D0, Z
ret
#endif /* LPMx && ! ELPM */
ENDF __xload_1
#endif /* L_xload_1 */
#if defined (L_xload_2)
DEFUN __xload_2
sbrc HHI8, 7
rjmp 1f
#if defined (__AVR_HAVE_ELPM__)
out __RAMPZ__, HHI8
#endif /* __AVR_HAVE_ELPM__ */
.xload D0, 2
.xload D1, 2
ret
1: ld D0, Z+
ld D1, Z+
ret
ENDF __xload_2
#endif /* L_xload_2 */
#if defined (L_xload_3)
DEFUN __xload_3
sbrc HHI8, 7
rjmp 1f
#if defined (__AVR_HAVE_ELPM__)
out __RAMPZ__, HHI8
#endif /* __AVR_HAVE_ELPM__ */
.xload D0, 3
.xload D1, 3
.xload D2, 3
ret
1: ld D0, Z+
ld D1, Z+
ld D2, Z+
ret
ENDF __xload_3
#endif /* L_xload_3 */
#if defined (L_xload_4)
DEFUN __xload_4
sbrc HHI8, 7
rjmp 1f
#if defined (__AVR_HAVE_ELPM__)
out __RAMPZ__, HHI8
#endif /* __AVR_HAVE_ELPM__ */
.xload D0, 4
.xload D1, 4
.xload D2, 4
.xload D3, 4
ret
1: ld D0, Z+
ld D1, Z+
ld D2, Z+
ld D3, Z+
ret
ENDF __xload_4
#endif /* L_xload_4 */
#endif /* L_xload_{1|2|3|4} */
#endif /* if !defined (__AVR_TINY__) */
#if !defined (__AVR_TINY__)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; memcopy from Address Space __pgmx to RAM
;; R23:Z = Source Address
;; X = Destination Address
;; Clobbers: __tmp_reg__, R23, R24, R25, X, Z
#if defined (L_movmemx)
#define HHI8 23
#define LOOP 24
DEFUN __movmemx_qi
;; #Bytes to copy fity in 8 Bits (1..255)
;; Zero-extend Loop Counter
clr LOOP+1
;; FALLTHRU
ENDF __movmemx_qi
DEFUN __movmemx_hi
;; Read from where?
sbrc HHI8, 7
rjmp 1f
;; Read from Flash
#if defined (__AVR_HAVE_ELPM__)
out __RAMPZ__, HHI8
#endif
0: ;; Load 1 Byte from Flash...
#if defined (__AVR_HAVE_ELPMX__)
elpm r0, Z+
#elif defined (__AVR_HAVE_ELPM__)
elpm
adiw r30, 1
adc HHI8, __zero_reg__
out __RAMPZ__, HHI8
#elif defined (__AVR_HAVE_LPMX__)
lpm r0, Z+
#else
lpm
adiw r30, 1
#endif
;; ...and store that Byte to RAM Destination
st X+, r0
sbiw LOOP, 1
brne 0b
#if defined (__AVR_HAVE_ELPM__) && defined (__AVR_HAVE_RAMPD__)
;; Reset RAMPZ to 0 so that EBI devices don't read garbage from RAM
out __RAMPZ__, __zero_reg__
#endif /* ELPM && RAMPD */
ret
;; Read from RAM
1: ;; Read 1 Byte from RAM...
ld r0, Z+
;; and store that Byte to RAM Destination
st X+, r0
sbiw LOOP, 1
brne 1b
ret
ENDF __movmemx_hi
#undef HHI8
#undef LOOP
#endif /* L_movmemx */
#endif /* !defined (__AVR_TINY__) */
.section .text.libgcc.builtins, "ax", @progbits
/**********************************
* Find first set Bit (ffs)
**********************************/
#if defined (L_ffssi2)
;; find first set bit
;; r25:r24 = ffs32 (r25:r22)
;; clobbers: r22, r26
DEFUN __ffssi2
clr r26
tst r22
brne 1f
subi r26, -8
or r22, r23
brne 1f
subi r26, -8
or r22, r24
brne 1f
subi r26, -8
or r22, r25
brne 1f
ret
1: mov r24, r22
XJMP __loop_ffsqi2
ENDF __ffssi2
#endif /* defined (L_ffssi2) */
#if defined (L_ffshi2)
;; find first set bit
;; r25:r24 = ffs16 (r25:r24)
;; clobbers: r26
DEFUN __ffshi2
clr r26
#ifdef __AVR_ERRATA_SKIP_JMP_CALL__
;; Some cores have problem skipping 2-word instruction
tst r24
breq 2f
#else
cpse r24, __zero_reg__
#endif /* __AVR_HAVE_JMP_CALL__ */
1: XJMP __loop_ffsqi2
2: ldi r26, 8
or r24, r25
brne 1b
ret
ENDF __ffshi2
#endif /* defined (L_ffshi2) */
#if defined (L_loop_ffsqi2)
;; Helper for ffshi2, ffssi2
;; r25:r24 = r26 + zero_extend16 (ffs8(r24))
;; r24 must be != 0
;; clobbers: r26
DEFUN __loop_ffsqi2
inc r26
lsr r24
brcc __loop_ffsqi2
mov r24, r26
clr r25
ret
ENDF __loop_ffsqi2
#endif /* defined (L_loop_ffsqi2) */
/**********************************
* Count trailing Zeros (ctz)
**********************************/
#if defined (L_ctzsi2)
;; count trailing zeros
;; r25:r24 = ctz32 (r25:r22)
;; clobbers: r26, r22
;; ctz(0) = 255
;; Note that ctz(0) in undefined for GCC
DEFUN __ctzsi2
XCALL __ffssi2
dec r24
ret
ENDF __ctzsi2
#endif /* defined (L_ctzsi2) */
#if defined (L_ctzhi2)
;; count trailing zeros
;; r25:r24 = ctz16 (r25:r24)
;; clobbers: r26
;; ctz(0) = 255
;; Note that ctz(0) in undefined for GCC
DEFUN __ctzhi2
XCALL __ffshi2
dec r24
ret
ENDF __ctzhi2
#endif /* defined (L_ctzhi2) */
/**********************************
* Count leading Zeros (clz)
**********************************/
#if defined (L_clzdi2)
;; count leading zeros
;; r25:r24 = clz64 (r25:r18)
;; clobbers: r22, r23, r26
DEFUN __clzdi2
XCALL __clzsi2
sbrs r24, 5
ret
mov_l r22, r18
mov_h r23, r19
mov_l r24, r20
mov_h r25, r21
XCALL __clzsi2
subi r24, -32
ret
ENDF __clzdi2
#endif /* defined (L_clzdi2) */
#if defined (L_clzsi2)
;; count leading zeros
;; r25:r24 = clz32 (r25:r22)
;; clobbers: r26
DEFUN __clzsi2
XCALL __clzhi2
sbrs r24, 4
ret
mov_l r24, r22
mov_h r25, r23
XCALL __clzhi2
subi r24, -16
ret
ENDF __clzsi2
#endif /* defined (L_clzsi2) */
#if defined (L_clzhi2)
;; count leading zeros
;; r25:r24 = clz16 (r25:r24)
;; clobbers: r26
DEFUN __clzhi2
clr r26
tst r25
brne 1f
subi r26, -8
or r25, r24
brne 1f
ldi r24, 16
ret
1: cpi r25, 16
brsh 3f
subi r26, -3
swap r25
2: inc r26
3: lsl r25
brcc 2b
mov r24, r26
clr r25
ret
ENDF __clzhi2
#endif /* defined (L_clzhi2) */
/**********************************
* Parity
**********************************/
#if defined (L_paritydi2)
;; r25:r24 = parity64 (r25:r18)
;; clobbers: __tmp_reg__
DEFUN __paritydi2
eor r24, r18
eor r24, r19
eor r24, r20
eor r24, r21
XJMP __paritysi2
ENDF __paritydi2
#endif /* defined (L_paritydi2) */
#if defined (L_paritysi2)
;; r25:r24 = parity32 (r25:r22)
;; clobbers: __tmp_reg__
DEFUN __paritysi2
eor r24, r22
eor r24, r23
XJMP __parityhi2
ENDF __paritysi2
#endif /* defined (L_paritysi2) */
#if defined (L_parityhi2)
;; r25:r24 = parity16 (r25:r24)
;; clobbers: __tmp_reg__
DEFUN __parityhi2
eor r24, r25
;; FALLTHRU
ENDF __parityhi2
;; r25:r24 = parity8 (r24)
;; clobbers: __tmp_reg__
DEFUN __parityqi2
;; parity is in r24[0..7]
mov __tmp_reg__, r24
swap __tmp_reg__
eor r24, __tmp_reg__
;; parity is in r24[0..3]
subi r24, -4
andi r24, -5
subi r24, -6
;; parity is in r24[0,3]
sbrc r24, 3
inc r24
;; parity is in r24[0]
andi r24, 1
clr r25
ret
ENDF __parityqi2
#endif /* defined (L_parityhi2) */
/**********************************
* Population Count
**********************************/
#if defined (L_popcounthi2)
;; population count
;; r25:r24 = popcount16 (r25:r24)
;; clobbers: __tmp_reg__
DEFUN __popcounthi2
XCALL __popcountqi2
push r24
mov r24, r25
XCALL __popcountqi2
clr r25
;; FALLTHRU
ENDF __popcounthi2
DEFUN __popcounthi2_tail
pop __tmp_reg__
add r24, __tmp_reg__
ret
ENDF __popcounthi2_tail
#endif /* defined (L_popcounthi2) */
#if defined (L_popcountsi2)
;; population count
;; r25:r24 = popcount32 (r25:r22)
;; clobbers: __tmp_reg__
DEFUN __popcountsi2
XCALL __popcounthi2
push r24
mov_l r24, r22
mov_h r25, r23
XCALL __popcounthi2
XJMP __popcounthi2_tail
ENDF __popcountsi2
#endif /* defined (L_popcountsi2) */
#if defined (L_popcountdi2)
;; population count
;; r25:r24 = popcount64 (r25:r18)
;; clobbers: r22, r23, __tmp_reg__
DEFUN __popcountdi2
XCALL __popcountsi2
push r24
mov_l r22, r18
mov_h r23, r19
mov_l r24, r20
mov_h r25, r21
XCALL __popcountsi2
XJMP __popcounthi2_tail
ENDF __popcountdi2
#endif /* defined (L_popcountdi2) */
#if defined (L_popcountqi2)
;; population count
;; r24 = popcount8 (r24)
;; clobbers: __tmp_reg__
DEFUN __popcountqi2
mov __tmp_reg__, r24
andi r24, 1
lsr __tmp_reg__
lsr __tmp_reg__
adc r24, __zero_reg__
lsr __tmp_reg__
adc r24, __zero_reg__
lsr __tmp_reg__
adc r24, __zero_reg__
lsr __tmp_reg__
adc r24, __zero_reg__
lsr __tmp_reg__
adc r24, __zero_reg__
lsr __tmp_reg__
adc r24, __tmp_reg__
ret
ENDF __popcountqi2
#endif /* defined (L_popcountqi2) */
/**********************************
* Swap bytes
**********************************/
;; swap two registers with different register number
.macro bswap a, b
eor \a, \b
eor \b, \a
eor \a, \b
.endm
#if defined (L_bswapsi2)
;; swap bytes
;; r25:r22 = bswap32 (r25:r22)
DEFUN __bswapsi2
bswap r22, r25
bswap r23, r24
ret
ENDF __bswapsi2
#endif /* defined (L_bswapsi2) */
#if defined (L_bswapdi2)
;; swap bytes
;; r25:r18 = bswap64 (r25:r18)
DEFUN __bswapdi2
bswap r18, r25
bswap r19, r24
bswap r20, r23
bswap r21, r22
ret
ENDF __bswapdi2
#endif /* defined (L_bswapdi2) */
/**********************************
* 64-bit shifts
**********************************/
#if defined (L_ashrdi3)
#define SS __zero_reg__
;; Arithmetic shift right
;; r25:r18 = ashr64 (r25:r18, r17:r16)
DEFUN __ashrdi3
sbrc r25, 7
com SS
;; FALLTHRU
ENDF __ashrdi3
;; Logic shift right
;; r25:r18 = lshr64 (r25:r18, r17:r16)
DEFUN __lshrdi3
;; Signs are in SS (zero_reg)
mov __tmp_reg__, r16
0: cpi r16, 8
brlo 2f
subi r16, 8
mov r18, r19
mov r19, r20
mov r20, r21
mov r21, r22
mov r22, r23
mov r23, r24
mov r24, r25
mov r25, SS
rjmp 0b
1: asr SS
ror r25
ror r24
ror r23
ror r22
ror r21
ror r20
ror r19
ror r18
2: dec r16
brpl 1b
clr __zero_reg__
mov r16, __tmp_reg__
ret
ENDF __lshrdi3
#undef SS
#endif /* defined (L_ashrdi3) */
#if defined (L_ashldi3)
;; Shift left
;; r25:r18 = ashl64 (r25:r18, r17:r16)
;; This function does not clobber T.
DEFUN __ashldi3
mov __tmp_reg__, r16
0: cpi r16, 8
brlo 2f
mov r25, r24
mov r24, r23
mov r23, r22
mov r22, r21
mov r21, r20
mov r20, r19
mov r19, r18
clr r18
subi r16, 8
rjmp 0b
1: lsl r18
rol r19
rol r20
rol r21
rol r22
rol r23
rol r24
rol r25
2: dec r16
brpl 1b
mov r16, __tmp_reg__
ret
ENDF __ashldi3
#endif /* defined (L_ashldi3) */
#if defined (L_rotldi3)
;; Rotate left
;; r25:r18 = rotl64 (r25:r18, r17:r16)
DEFUN __rotldi3
push r16
0: cpi r16, 8
brlo 2f
subi r16, 8
mov __tmp_reg__, r25
mov r25, r24
mov r24, r23
mov r23, r22
mov r22, r21
mov r21, r20
mov r20, r19
mov r19, r18
mov r18, __tmp_reg__
rjmp 0b
1: lsl r18
rol r19
rol r20
rol r21
rol r22
rol r23
rol r24
rol r25
adc r18, __zero_reg__
2: dec r16
brpl 1b
pop r16
ret
ENDF __rotldi3
#endif /* defined (L_rotldi3) */
.section .text.libgcc.fmul, "ax", @progbits
/***********************************************************/
;;; Softmul versions of FMUL, FMULS and FMULSU to implement
;;; __builtin_avr_fmul* if !AVR_HAVE_MUL
/***********************************************************/
#define A1 24
#define B1 25
#define C0 22
#define C1 23
#define A0 __tmp_reg__
#ifdef L_fmuls
;;; r23:r22 = fmuls (r24, r25) like in FMULS instruction
;;; Clobbers: r24, r25, __tmp_reg__
DEFUN __fmuls
;; A0.7 = negate result?
mov A0, A1
eor A0, B1
;; B1 = |B1|
sbrc B1, 7
neg B1
XJMP __fmulsu_exit
ENDF __fmuls
#endif /* L_fmuls */
#ifdef L_fmulsu
;;; r23:r22 = fmulsu (r24, r25) like in FMULSU instruction
;;; Clobbers: r24, r25, __tmp_reg__
DEFUN __fmulsu
;; A0.7 = negate result?
mov A0, A1
;; FALLTHRU
ENDF __fmulsu
;; Helper for __fmuls and __fmulsu
DEFUN __fmulsu_exit
;; A1 = |A1|
sbrc A1, 7
neg A1
#ifdef __AVR_ERRATA_SKIP_JMP_CALL__
;; Some cores have problem skipping 2-word instruction
tst A0
brmi 1f
#else
sbrs A0, 7
#endif /* __AVR_HAVE_JMP_CALL__ */
XJMP __fmul
1: XCALL __fmul
;; C = -C iff A0.7 = 1
NEG2 C0
ret
ENDF __fmulsu_exit
#endif /* L_fmulsu */
#ifdef L_fmul
;;; r22:r23 = fmul (r24, r25) like in FMUL instruction
;;; Clobbers: r24, r25, __tmp_reg__
DEFUN __fmul
; clear result
clr C0
clr C1
clr A0
1: tst B1
;; 1.0 = 0x80, so test for bit 7 of B to see if A must to be added to C.
2: brpl 3f
;; C += A
add C0, A0
adc C1, A1
3: ;; A >>= 1
lsr A1
ror A0
;; B <<= 1
lsl B1
brne 2b
ret
ENDF __fmul
#endif /* L_fmul */
#undef A0
#undef A1
#undef B1
#undef C0
#undef C1
#include "lib1funcs-fixed.S"
|
4ms/metamodule-plugin-sdk
| 2,524
|
plugin-libc/libgcc/config/mmix/crtn.S
|
/* Copyright (C) 2001-2022 Free Software Foundation, Inc.
Contributed by Hans-Peter Nilsson <hp@bitrange.com>
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
% This must be the last file on the link-line, allocating global registers
% from the top.
% Register $254 is the stack-pointer.
sp GREG
% Register $253 is frame-pointer. It's not supposed to be used in most
% functions.
fp GREG
% $252 is the static chain register; nested functions receive the
% context of the surrounding function through a pointer passed in this
% register.
static_chain GREG
struct_value_reg GREG
% These registers are used to pass state at an exceptional return (C++).
eh_state_3 GREG
eh_state_2 GREG
eh_state_1 GREG
eh_state_0 GREG
#ifdef __MMIX_ABI_GNU__
% Allocate global registers used by the GNU ABI.
gnu_parm_reg_16 GREG
gnu_parm_reg_15 GREG
gnu_parm_reg_14 GREG
gnu_parm_reg_13 GREG
gnu_parm_reg_12 GREG
gnu_parm_reg_11 GREG
gnu_parm_reg_10 GREG
gnu_parm_reg_9 GREG
gnu_parm_reg_8 GREG
gnu_parm_reg_7 GREG
gnu_parm_reg_6 GREG
gnu_parm_reg_5 GREG
gnu_parm_reg_4 GREG
gnu_parm_reg_3 GREG
gnu_parm_reg_2 GREG
gnu_parm_reg_1 GREG
#endif /* __MMIX_ABI_GNU__ */
% Provide last part of _init and _fini.
% The return address is stored in the topmost stored register in the
% register-stack. We ignore the current value in rJ. It is probably
% garbage because each fragment of _init and _fini may have their own idea
% of the current stack frame, if they're cut out from a "real" function
% like in gcc/crtstuff.c.
.section .init,"ax",@progbits
GETA $255,0F
PUT rJ,$255
POP 0,0
0H PUT rJ,$0
POP 0,0
.section .fini,"ax",@progbits
GETA $255,0F
PUT rJ,$255
POP 0,0
0H PUT rJ,$0
POP 0,0
|
4ms/metamodule-plugin-sdk
| 5,121
|
plugin-libc/libgcc/config/mmix/crti.S
|
/* Copyright (C) 2001-2022 Free Software Foundation, Inc.
Contributed by Hans-Peter Nilsson <hp@bitrange.com>
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
% This is the crt0 equivalent for mmix-knuth-mmixware, for setting up
% things for compiler-generated assembly-code and for setting up things
% between where the simulator calls and main, and shutting things down on
% the way back. There's an actual crt0.o elsewhere, but that's a dummy.
% This file and the GCC output are supposed to be *reasonably*
% mmixal-compatible to enable people to re-use output with Knuth's mmixal.
% However, forward references are used more freely: we are using the
% binutils tools. Users of mmixal beware; you will sometimes have to
% re-order things or use temporary variables.
% Users of mmixal will want to set up 8H and 9H to be .text and .data
% respectively, so the compiler can switch between them pretending they're
% segments.
% This little treasure (some contents) is required so the 32 lowest
% address bits of user data will not be zero. Because of truncation,
% that would cause testcase gcc.c-torture/execute/980701-1.c to
% incorrectly fail.
.data ! mmixal:= 8H LOC Data_Segment
.p2align 3
dstart OCTA 2009
.text ! mmixal:= 9H LOC 8B; LOC #100
.global Main
% The __Stack_start symbol is provided by the link script.
stackpp OCTA __Stack_start
crtstxt OCTA _init % Assumed to be the lowest executed address.
OCTA __etext % Assumed to be beyond the highest executed address.
crtsdat OCTA dstart % Assumed to be the lowest accessed address.
OCTA _end % Assumed to be beyond the highest accessed address.
% "Main" is the magic symbol the simulator jumps to. We want to go
% on to "main".
% We need to set rG explicitly to avoid hard-to-debug situations.
Main SETL $255,32
PUT rG,$255
% Make sure we have valid memory for addresses in .text and .data (and
% .bss, but we include this in .data), for the benefit of mmo-using
% simulators that require validation of addresses for which contents
% is not present. Due to its implicit-zero nature, zeros in contents
% may be left out in the mmo format, but we don't know the boundaries
% of those zero-chunks; for mmo files from binutils, they correspond
% to the beginning and end of sections in objects before linking. We
% validate the contents by executing PRELD (0; one byte) on each
% 2048-byte-boundary of our .text .data, and we assume this size
% matches the magic lowest-denominator chunk-size for all
% validation-requiring simulators. The effect of the PRELD (any size)
% is assumed to be the same as initial loading of the contents, as
% long as the PRELD happens before the first PUSHJ/PUSHGO. If it
% happens after that, we'll need to distinguish between
% access-for-execution and read/write access.
GETA $255,crtstxt
LDOU $2,$255,0
ANDNL $2,#7ff % Align the start at a 2048-boundary.
LDOU $3,$255,8
SETL $4,2048
0H PRELD 0,$2,0
ADDU $2,$2,$4
CMP $255,$2,$3
BN $255,0B
GETA $255,crtsdat
LDOU $2,$255,0
ANDNL $2,#7ff
LDOU $3,$255,8
0H PRELD 0,$2,0
ADDU $2,$2,$4
CMP $255,$2,$3
BN $255,0B
% Initialize the stack pointer. It is supposedly made a global
% zero-initialized (allowed to change) register in crtn.S; we use the
% explicit number.
GETA $255,stackpp
LDOU $254,$255,0
PUSHJ $2,_init
#ifdef __MMIX_ABI_GNU__
% Copy argc and argv from their initial position to argument registers
% where necessary.
SET $231,$0
SET $232,$1
#else
% For the mmixware ABI, we need to move arguments. The return value will
% appear in $0.
SET $2,$1
SET $1,$0
#endif
PUSHJ $0,main
JMP exit
% Provide the first part of _init and _fini. Save the return address on the
% register stack. We eventually ignore the return address of these
% PUSHJ:s, so it doesn't matter that whether .init and .fini code calls
% functions or where they store rJ. We shouldn't get there, so die
% (TRAP Halt) if that happens.
.section .init,"ax",@progbits
.global _init
_init:
GET $0,:rJ
PUSHJ $1,0F
SETL $255,255
TRAP 0,0,0
0H IS @
% Register _fini to be executed as the last atexit function.
#ifdef __MMIX_ABI_GNU__
GETA $231,_fini
#else
GETA $1,_fini
#endif
PUSHJ $0,atexit
.section .fini,"ax",@progbits
.global _fini
_fini:
GET $0,:rJ
PUSHJ $1,0F
SETL $255,255
TRAP 0,0,0
0H IS @
|
4ms/metamodule-plugin-sdk
| 48,950
|
plugin-libc/libgcc/config/v850/lib1funcs.S
|
/* libgcc routines for NEC V850.
Copyright (C) 1996-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#ifdef L_mulsi3
.text
.globl ___mulsi3
.type ___mulsi3,@function
___mulsi3:
#ifdef __v850__
/*
#define SHIFT 12
#define MASK ((1 << SHIFT) - 1)
#define STEP(i, j) \
({ \
short a_part = (a >> (i)) & MASK; \
short b_part = (b >> (j)) & MASK; \
int res = (((int) a_part) * ((int) b_part)); \
res; \
})
int
__mulsi3 (unsigned a, unsigned b)
{
return STEP (0, 0) +
((STEP (SHIFT, 0) + STEP (0, SHIFT)) << SHIFT) +
((STEP (0, 2 * SHIFT) + STEP (SHIFT, SHIFT) + STEP (2 * SHIFT, 0))
<< (2 * SHIFT));
}
*/
mov r6, r14
movea lo(32767), r0, r10
and r10, r14
mov r7, r15
and r10, r15
shr 15, r6
mov r6, r13
and r10, r13
shr 15, r7
mov r7, r12
and r10, r12
shr 15, r6
shr 15, r7
mov r14, r10
mulh r15, r10
mov r14, r11
mulh r12, r11
mov r13, r16
mulh r15, r16
mulh r14, r7
mulh r15, r6
add r16, r11
mulh r13, r12
shl 15, r11
add r11, r10
add r12, r7
add r6, r7
shl 30, r7
add r7, r10
jmp [r31]
#endif /* __v850__ */
#if defined(__v850e__) || defined(__v850ea__) || defined(__v850e2__) || defined(__v850e2v3__) || defined(__v850e3v5__)
/* This routine is almost unneccesarry because gcc
generates the MUL instruction for the RTX mulsi3.
But if someone wants to link his application with
previsously compiled v850 objects then they will
need this function. */
/* It isn't good to put the inst sequence as below;
mul r7, r6,
mov r6, r10, r0
In this case, there is a RAW hazard between them.
MUL inst takes 2 cycle in EX stage, then MOV inst
must wait 1cycle. */
mov r7, r10
mul r6, r10, r0
jmp [r31]
#endif /* __v850e__ */
.size ___mulsi3,.-___mulsi3
#endif /* L_mulsi3 */
#ifdef L_udivsi3
.text
.global ___udivsi3
.type ___udivsi3,@function
___udivsi3:
#ifdef __v850__
mov 1,r12
mov 0,r10
cmp r6,r7
bnl .L12
movhi hi(-2147483648),r0,r13
cmp r0,r7
blt .L12
.L4:
shl 1,r7
shl 1,r12
cmp r6,r7
bnl .L12
cmp r0,r12
be .L8
mov r7,r19
and r13,r19
be .L4
br .L12
.L9:
cmp r7,r6
bl .L10
sub r7,r6
or r12,r10
.L10:
shr 1,r12
shr 1,r7
.L12:
cmp r0,r12
bne .L9
.L8:
jmp [r31]
#else /* defined(__v850e__) */
/* See comments at end of __mulsi3. */
mov r6, r10
divu r7, r10, r0
jmp [r31]
#endif /* __v850e__ */
.size ___udivsi3,.-___udivsi3
#endif
#ifdef L_divsi3
.text
.globl ___divsi3
.type ___divsi3,@function
___divsi3:
#ifdef __v850__
add -8,sp
st.w r31,4[sp]
st.w r22,0[sp]
mov 1,r22
tst r7,r7
bp .L3
subr r0,r7
subr r0,r22
.L3:
tst r6,r6
bp .L4
subr r0,r6
subr r0,r22
.L4:
jarl ___udivsi3,r31
cmp r0,r22
bp .L7
subr r0,r10
.L7:
ld.w 0[sp],r22
ld.w 4[sp],r31
add 8,sp
jmp [r31]
#else /* defined(__v850e__) */
/* See comments at end of __mulsi3. */
mov r6, r10
div r7, r10, r0
jmp [r31]
#endif /* __v850e__ */
.size ___divsi3,.-___divsi3
#endif
#ifdef L_umodsi3
.text
.globl ___umodsi3
.type ___umodsi3,@function
___umodsi3:
#ifdef __v850__
add -12,sp
st.w r31,8[sp]
st.w r7,4[sp]
st.w r6,0[sp]
jarl ___udivsi3,r31
ld.w 4[sp],r7
mov r10,r6
jarl ___mulsi3,r31
ld.w 0[sp],r6
subr r6,r10
ld.w 8[sp],r31
add 12,sp
jmp [r31]
#else /* defined(__v850e__) */
/* See comments at end of __mulsi3. */
divu r7, r6, r10
jmp [r31]
#endif /* __v850e__ */
.size ___umodsi3,.-___umodsi3
#endif /* L_umodsi3 */
#ifdef L_modsi3
.text
.globl ___modsi3
.type ___modsi3,@function
___modsi3:
#ifdef __v850__
add -12,sp
st.w r31,8[sp]
st.w r7,4[sp]
st.w r6,0[sp]
jarl ___divsi3,r31
ld.w 4[sp],r7
mov r10,r6
jarl ___mulsi3,r31
ld.w 0[sp],r6
subr r6,r10
ld.w 8[sp],r31
add 12,sp
jmp [r31]
#else /* defined(__v850e__) */
/* See comments at end of __mulsi3. */
div r7, r6, r10
jmp [r31]
#endif /* __v850e__ */
.size ___modsi3,.-___modsi3
#endif /* L_modsi3 */
#ifdef L_save_2
.text
.align 2
.globl __save_r2_r29
.type __save_r2_r29,@function
/* Allocate space and save registers 2, 20 .. 29 on the stack. */
/* Called via: jalr __save_r2_r29,r10. */
__save_r2_r29:
#ifdef __EP__
mov ep,r1
addi -44,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r22,28[ep]
sst.w r21,32[ep]
sst.w r20,36[ep]
sst.w r2,40[ep]
mov r1,ep
#else
addi -44,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r22,28[sp]
st.w r21,32[sp]
st.w r20,36[sp]
st.w r2,40[sp]
#endif
jmp [r10]
.size __save_r2_r29,.-__save_r2_r29
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r2_r29. */
.align 2
.globl __return_r2_r29
.type __return_r2_r29,@function
__return_r2_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r22
sld.w 32[ep],r21
sld.w 36[ep],r20
sld.w 40[ep],r2
addi 44,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
ld.w 28[sp],r22
ld.w 32[sp],r21
ld.w 36[sp],r20
ld.w 40[sp],r2
addi 44,sp,sp
#endif
jmp [r31]
.size __return_r2_r29,.-__return_r2_r29
#endif /* L_save_2 */
#ifdef L_save_20
.text
.align 2
.globl __save_r20_r29
.type __save_r20_r29,@function
/* Allocate space and save registers 20 .. 29 on the stack. */
/* Called via: jalr __save_r20_r29,r10. */
__save_r20_r29:
#ifdef __EP__
mov ep,r1
addi -40,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r22,28[ep]
sst.w r21,32[ep]
sst.w r20,36[ep]
mov r1,ep
#else
addi -40,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r22,28[sp]
st.w r21,32[sp]
st.w r20,36[sp]
#endif
jmp [r10]
.size __save_r20_r29,.-__save_r20_r29
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r20_r29. */
.align 2
.globl __return_r20_r29
.type __return_r20_r29,@function
__return_r20_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r22
sld.w 32[ep],r21
sld.w 36[ep],r20
addi 40,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
ld.w 28[sp],r22
ld.w 32[sp],r21
ld.w 36[sp],r20
addi 40,sp,sp
#endif
jmp [r31]
.size __return_r20_r29,.-__return_r20_r29
#endif /* L_save_20 */
#ifdef L_save_21
.text
.align 2
.globl __save_r21_r29
.type __save_r21_r29,@function
/* Allocate space and save registers 21 .. 29 on the stack. */
/* Called via: jalr __save_r21_r29,r10. */
__save_r21_r29:
#ifdef __EP__
mov ep,r1
addi -36,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r22,28[ep]
sst.w r21,32[ep]
mov r1,ep
#else
addi -36,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r22,28[sp]
st.w r21,32[sp]
#endif
jmp [r10]
.size __save_r21_r29,.-__save_r21_r29
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r21_r29. */
.align 2
.globl __return_r21_r29
.type __return_r21_r29,@function
__return_r21_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r22
sld.w 32[ep],r21
addi 36,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
ld.w 28[sp],r22
ld.w 32[sp],r21
addi 36,sp,sp
#endif
jmp [r31]
.size __return_r21_r29,.-__return_r21_r29
#endif /* L_save_21 */
#ifdef L_save_22
.text
.align 2
.globl __save_r22_r29
.type __save_r22_r29,@function
/* Allocate space and save registers 22 .. 29 on the stack. */
/* Called via: jalr __save_r22_r29,r10. */
__save_r22_r29:
#ifdef __EP__
mov ep,r1
addi -32,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r22,28[ep]
mov r1,ep
#else
addi -32,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r22,28[sp]
#endif
jmp [r10]
.size __save_r22_r29,.-__save_r22_r29
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r22_r29. */
.align 2
.globl __return_r22_r29
.type __return_r22_r29,@function
__return_r22_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r22
addi 32,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
ld.w 28[sp],r22
addi 32,sp,sp
#endif
jmp [r31]
.size __return_r22_r29,.-__return_r22_r29
#endif /* L_save_22 */
#ifdef L_save_23
.text
.align 2
.globl __save_r23_r29
.type __save_r23_r29,@function
/* Allocate space and save registers 23 .. 29 on the stack. */
/* Called via: jalr __save_r23_r29,r10. */
__save_r23_r29:
#ifdef __EP__
mov ep,r1
addi -28,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
mov r1,ep
#else
addi -28,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
#endif
jmp [r10]
.size __save_r23_r29,.-__save_r23_r29
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r23_r29. */
.align 2
.globl __return_r23_r29
.type __return_r23_r29,@function
__return_r23_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
addi 28,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
addi 28,sp,sp
#endif
jmp [r31]
.size __return_r23_r29,.-__return_r23_r29
#endif /* L_save_23 */
#ifdef L_save_24
.text
.align 2
.globl __save_r24_r29
.type __save_r24_r29,@function
/* Allocate space and save registers 24 .. 29 on the stack. */
/* Called via: jalr __save_r24_r29,r10. */
__save_r24_r29:
#ifdef __EP__
mov ep,r1
addi -24,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
mov r1,ep
#else
addi -24,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
#endif
jmp [r10]
.size __save_r24_r29,.-__save_r24_r29
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r24_r29. */
.align 2
.globl __return_r24_r29
.type __return_r24_r29,@function
__return_r24_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
addi 24,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
addi 24,sp,sp
#endif
jmp [r31]
.size __return_r24_r29,.-__return_r24_r29
#endif /* L_save_24 */
#ifdef L_save_25
.text
.align 2
.globl __save_r25_r29
.type __save_r25_r29,@function
/* Allocate space and save registers 25 .. 29 on the stack. */
/* Called via: jalr __save_r25_r29,r10. */
__save_r25_r29:
#ifdef __EP__
mov ep,r1
addi -20,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
mov r1,ep
#else
addi -20,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
#endif
jmp [r10]
.size __save_r25_r29,.-__save_r25_r29
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r25_r29. */
.align 2
.globl __return_r25_r29
.type __return_r25_r29,@function
__return_r25_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
addi 20,sp,sp
mov r1,ep
#else
ld.w 0[ep],r29
ld.w 4[ep],r28
ld.w 8[ep],r27
ld.w 12[ep],r26
ld.w 16[ep],r25
addi 20,sp,sp
#endif
jmp [r31]
.size __return_r25_r29,.-__return_r25_r29
#endif /* L_save_25 */
#ifdef L_save_26
.text
.align 2
.globl __save_r26_r29
.type __save_r26_r29,@function
/* Allocate space and save registers 26 .. 29 on the stack. */
/* Called via: jalr __save_r26_r29,r10. */
__save_r26_r29:
#ifdef __EP__
mov ep,r1
add -16,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
mov r1,ep
#else
add -16,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
#endif
jmp [r10]
.size __save_r26_r29,.-__save_r26_r29
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r26_r29. */
.align 2
.globl __return_r26_r29
.type __return_r26_r29,@function
__return_r26_r29:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
addi 16,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
addi 16,sp,sp
#endif
jmp [r31]
.size __return_r26_r29,.-__return_r26_r29
#endif /* L_save_26 */
#ifdef L_save_27
.text
.align 2
.globl __save_r27_r29
.type __save_r27_r29,@function
/* Allocate space and save registers 27 .. 29 on the stack. */
/* Called via: jalr __save_r27_r29,r10. */
__save_r27_r29:
add -12,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
jmp [r10]
.size __save_r27_r29,.-__save_r27_r29
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r27_r29. */
.align 2
.globl __return_r27_r29
.type __return_r27_r29,@function
__return_r27_r29:
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
add 12,sp
jmp [r31]
.size __return_r27_r29,.-__return_r27_r29
#endif /* L_save_27 */
#ifdef L_save_28
.text
.align 2
.globl __save_r28_r29
.type __save_r28_r29,@function
/* Allocate space and save registers 28,29 on the stack. */
/* Called via: jalr __save_r28_r29,r10. */
__save_r28_r29:
add -8,sp
st.w r29,0[sp]
st.w r28,4[sp]
jmp [r10]
.size __save_r28_r29,.-__save_r28_r29
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r28_r29. */
.align 2
.globl __return_r28_r29
.type __return_r28_r29,@function
__return_r28_r29:
ld.w 0[sp],r29
ld.w 4[sp],r28
add 8,sp
jmp [r31]
.size __return_r28_r29,.-__return_r28_r29
#endif /* L_save_28 */
#ifdef L_save_29
.text
.align 2
.globl __save_r29
.type __save_r29,@function
/* Allocate space and save register 29 on the stack. */
/* Called via: jalr __save_r29,r10. */
__save_r29:
add -4,sp
st.w r29,0[sp]
jmp [r10]
.size __save_r29,.-__save_r29
/* Restore saved register 29, deallocate stack and return to the user. */
/* Called via: jr __return_r29. */
.align 2
.globl __return_r29
.type __return_r29,@function
__return_r29:
ld.w 0[sp],r29
add 4,sp
jmp [r31]
.size __return_r29,.-__return_r29
#endif /* L_save_28 */
#ifdef L_save_2c
.text
.align 2
.globl __save_r2_r31
.type __save_r2_r31,@function
/* Allocate space and save registers 20 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r2_r31,r10. */
__save_r2_r31:
#ifdef __EP__
mov ep,r1
addi -48,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r22,28[ep]
sst.w r21,32[ep]
sst.w r20,36[ep]
sst.w r2,40[ep]
sst.w r31,44[ep]
mov r1,ep
#else
addi -48,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r22,28[sp]
st.w r21,32[sp]
st.w r20,36[sp]
st.w r2,40[sp]
st.w r31,44[sp]
#endif
jmp [r10]
.size __save_r2_r31,.-__save_r2_r31
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r20_r31. */
.align 2
.globl __return_r2_r31
.type __return_r2_r31,@function
__return_r2_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r22
sld.w 32[ep],r21
sld.w 36[ep],r20
sld.w 40[ep],r2
sld.w 44[ep],r31
addi 48,sp,sp
mov r1,ep
#else
ld.w 44[sp],r29
ld.w 40[sp],r28
ld.w 36[sp],r27
ld.w 32[sp],r26
ld.w 28[sp],r25
ld.w 24[sp],r24
ld.w 20[sp],r23
ld.w 16[sp],r22
ld.w 12[sp],r21
ld.w 8[sp],r20
ld.w 4[sp],r2
ld.w 0[sp],r31
addi 48,sp,sp
#endif
jmp [r31]
.size __return_r2_r31,.-__return_r2_r31
#endif /* L_save_2c */
#ifdef L_save_20c
.text
.align 2
.globl __save_r20_r31
.type __save_r20_r31,@function
/* Allocate space and save registers 20 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r20_r31,r10. */
__save_r20_r31:
#ifdef __EP__
mov ep,r1
addi -44,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r22,28[ep]
sst.w r21,32[ep]
sst.w r20,36[ep]
sst.w r31,40[ep]
mov r1,ep
#else
addi -44,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r22,28[sp]
st.w r21,32[sp]
st.w r20,36[sp]
st.w r31,40[sp]
#endif
jmp [r10]
.size __save_r20_r31,.-__save_r20_r31
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r20_r31. */
.align 2
.globl __return_r20_r31
.type __return_r20_r31,@function
__return_r20_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r22
sld.w 32[ep],r21
sld.w 36[ep],r20
sld.w 40[ep],r31
addi 44,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
ld.w 28[sp],r22
ld.w 32[sp],r21
ld.w 36[sp],r20
ld.w 40[sp],r31
addi 44,sp,sp
#endif
jmp [r31]
.size __return_r20_r31,.-__return_r20_r31
#endif /* L_save_20c */
#ifdef L_save_21c
.text
.align 2
.globl __save_r21_r31
.type __save_r21_r31,@function
/* Allocate space and save registers 21 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r21_r31,r10. */
__save_r21_r31:
#ifdef __EP__
mov ep,r1
addi -40,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r22,28[ep]
sst.w r21,32[ep]
sst.w r31,36[ep]
mov r1,ep
jmp [r10]
#else
addi -40,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r22,28[sp]
st.w r21,32[sp]
st.w r31,36[sp]
jmp [r10]
#endif
.size __save_r21_r31,.-__save_r21_r31
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r21_r31. */
.align 2
.globl __return_r21_r31
.type __return_r21_r31,@function
__return_r21_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r22
sld.w 32[ep],r21
sld.w 36[ep],r31
addi 40,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
ld.w 28[sp],r22
ld.w 32[sp],r21
ld.w 36[sp],r31
addi 40,sp,sp
#endif
jmp [r31]
.size __return_r21_r31,.-__return_r21_r31
#endif /* L_save_21c */
#ifdef L_save_22c
.text
.align 2
.globl __save_r22_r31
.type __save_r22_r31,@function
/* Allocate space and save registers 22 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r22_r31,r10. */
__save_r22_r31:
#ifdef __EP__
mov ep,r1
addi -36,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r22,28[ep]
sst.w r31,32[ep]
mov r1,ep
#else
addi -36,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r22,28[sp]
st.w r31,32[sp]
#endif
jmp [r10]
.size __save_r22_r31,.-__save_r22_r31
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r22_r31. */
.align 2
.globl __return_r22_r31
.type __return_r22_r31,@function
__return_r22_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r22
sld.w 32[ep],r31
addi 36,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
ld.w 28[sp],r22
ld.w 32[sp],r31
addi 36,sp,sp
#endif
jmp [r31]
.size __return_r22_r31,.-__return_r22_r31
#endif /* L_save_22c */
#ifdef L_save_23c
.text
.align 2
.globl __save_r23_r31
.type __save_r23_r31,@function
/* Allocate space and save registers 23 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r23_r31,r10. */
__save_r23_r31:
#ifdef __EP__
mov ep,r1
addi -32,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r23,24[ep]
sst.w r31,28[ep]
mov r1,ep
#else
addi -32,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r23,24[sp]
st.w r31,28[sp]
#endif
jmp [r10]
.size __save_r23_r31,.-__save_r23_r31
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r23_r31. */
.align 2
.globl __return_r23_r31
.type __return_r23_r31,@function
__return_r23_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r23
sld.w 28[ep],r31
addi 32,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r23
ld.w 28[sp],r31
addi 32,sp,sp
#endif
jmp [r31]
.size __return_r23_r31,.-__return_r23_r31
#endif /* L_save_23c */
#ifdef L_save_24c
.text
.align 2
.globl __save_r24_r31
.type __save_r24_r31,@function
/* Allocate space and save registers 24 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r24_r31,r10. */
__save_r24_r31:
#ifdef __EP__
mov ep,r1
addi -28,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r24,20[ep]
sst.w r31,24[ep]
mov r1,ep
#else
addi -28,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r24,20[sp]
st.w r31,24[sp]
#endif
jmp [r10]
.size __save_r24_r31,.-__save_r24_r31
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r24_r31. */
.align 2
.globl __return_r24_r31
.type __return_r24_r31,@function
__return_r24_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r24
sld.w 24[ep],r31
addi 28,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r24
ld.w 24[sp],r31
addi 28,sp,sp
#endif
jmp [r31]
.size __return_r24_r31,.-__return_r24_r31
#endif /* L_save_24c */
#ifdef L_save_25c
.text
.align 2
.globl __save_r25_r31
.type __save_r25_r31,@function
/* Allocate space and save registers 25 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r25_r31,r10. */
__save_r25_r31:
#ifdef __EP__
mov ep,r1
addi -24,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r25,16[ep]
sst.w r31,20[ep]
mov r1,ep
#else
addi -24,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r25,16[sp]
st.w r31,20[sp]
#endif
jmp [r10]
.size __save_r25_r31,.-__save_r25_r31
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r25_r31. */
.align 2
.globl __return_r25_r31
.type __return_r25_r31,@function
__return_r25_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r25
sld.w 20[ep],r31
addi 24,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r25
ld.w 20[sp],r31
addi 24,sp,sp
#endif
jmp [r31]
.size __return_r25_r31,.-__return_r25_r31
#endif /* L_save_25c */
#ifdef L_save_26c
.text
.align 2
.globl __save_r26_r31
.type __save_r26_r31,@function
/* Allocate space and save registers 26 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r26_r31,r10. */
__save_r26_r31:
#ifdef __EP__
mov ep,r1
addi -20,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r26,12[ep]
sst.w r31,16[ep]
mov r1,ep
#else
addi -20,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r26,12[sp]
st.w r31,16[sp]
#endif
jmp [r10]
.size __save_r26_r31,.-__save_r26_r31
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r26_r31. */
.align 2
.globl __return_r26_r31
.type __return_r26_r31,@function
__return_r26_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r26
sld.w 16[ep],r31
addi 20,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r26
ld.w 16[sp],r31
addi 20,sp,sp
#endif
jmp [r31]
.size __return_r26_r31,.-__return_r26_r31
#endif /* L_save_26c */
#ifdef L_save_27c
.text
.align 2
.globl __save_r27_r31
.type __save_r27_r31,@function
/* Allocate space and save registers 27 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r27_r31,r10. */
__save_r27_r31:
#ifdef __EP__
mov ep,r1
addi -16,sp,sp
mov sp,ep
sst.w r29,0[ep]
sst.w r28,4[ep]
sst.w r27,8[ep]
sst.w r31,12[ep]
mov r1,ep
#else
addi -16,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r27,8[sp]
st.w r31,12[sp]
#endif
jmp [r10]
.size __save_r27_r31,.-__save_r27_r31
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r27_r31. */
.align 2
.globl __return_r27_r31
.type __return_r27_r31,@function
__return_r27_r31:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 0[ep],r29
sld.w 4[ep],r28
sld.w 8[ep],r27
sld.w 12[ep],r31
addi 16,sp,sp
mov r1,ep
#else
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r27
ld.w 12[sp],r31
addi 16,sp,sp
#endif
jmp [r31]
.size __return_r27_r31,.-__return_r27_r31
#endif /* L_save_27c */
#ifdef L_save_28c
.text
.align 2
.globl __save_r28_r31
.type __save_r28_r31,@function
/* Allocate space and save registers 28 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r28_r31,r10. */
__save_r28_r31:
addi -12,sp,sp
st.w r29,0[sp]
st.w r28,4[sp]
st.w r31,8[sp]
jmp [r10]
.size __save_r28_r31,.-__save_r28_r31
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r28_r31. */
.align 2
.globl __return_r28_r31
.type __return_r28_r31,@function
__return_r28_r31:
ld.w 0[sp],r29
ld.w 4[sp],r28
ld.w 8[sp],r31
addi 12,sp,sp
jmp [r31]
.size __return_r28_r31,.-__return_r28_r31
#endif /* L_save_28c */
#ifdef L_save_29c
.text
.align 2
.globl __save_r29_r31
.type __save_r29_r31,@function
/* Allocate space and save registers 29 & 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r29_r31,r10. */
__save_r29_r31:
addi -8,sp,sp
st.w r29,0[sp]
st.w r31,4[sp]
jmp [r10]
.size __save_r29_r31,.-__save_r29_r31
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r29_r31. */
.align 2
.globl __return_r29_r31
.type __return_r29_r31,@function
__return_r29_r31:
ld.w 0[sp],r29
ld.w 4[sp],r31
addi 8,sp,sp
jmp [r31]
.size __return_r29_r31,.-__return_r29_r31
#endif /* L_save_29c */
#ifdef L_save_31c
.text
.align 2
.globl __save_r31
.type __save_r31,@function
/* Allocate space and save register 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: jalr __save_r31,r10. */
__save_r31:
addi -4,sp,sp
st.w r31,0[sp]
jmp [r10]
.size __save_r31,.-__save_r31
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: jr __return_r31. */
.align 2
.globl __return_r31
.type __return_r31,@function
__return_r31:
ld.w 0[sp],r31
addi 4,sp,sp
jmp [r31]
.size __return_r31,.-__return_r31
#endif /* L_save_31c */
#ifdef L_save_interrupt
.text
.align 2
.globl __save_interrupt
.type __save_interrupt,@function
/* Save registers r1, r4 on stack and load up with expected values. */
/* Note, 20 bytes of stack have already been allocated. */
/* Called via: jalr __save_interrupt,r10. */
__save_interrupt:
/* add -20,sp ; st.w r11,16[sp] ; st.w r10,12[sp] ; */
st.w ep,0[sp]
st.w gp,4[sp]
st.w r1,8[sp]
movhi hi(__ep),r0,ep
movea lo(__ep),ep,ep
movhi hi(__gp),r0,gp
movea lo(__gp),gp,gp
jmp [r10]
.size __save_interrupt,.-__save_interrupt
/* Restore saved registers, deallocate stack and return from the interrupt. */
/* Called via: jr __return_interrupt. */
.align 2
.globl __return_interrupt
.type __return_interrupt,@function
__return_interrupt:
ld.w 0[sp],ep
ld.w 4[sp],gp
ld.w 8[sp],r1
ld.w 12[sp],r10
ld.w 16[sp],r11
addi 20,sp,sp
reti
.size __return_interrupt,.-__return_interrupt
#endif /* L_save_interrupt */
#ifdef L_save_all_interrupt
.text
.align 2
.globl __save_all_interrupt
.type __save_all_interrupt,@function
/* Save all registers except for those saved in __save_interrupt. */
/* Allocate enough stack for all of the registers & 16 bytes of space. */
/* Called via: jalr __save_all_interrupt,r10. */
__save_all_interrupt:
addi -104,sp,sp
#ifdef __EP__
mov ep,r1
mov sp,ep
sst.w r31,100[ep]
sst.w r2,96[ep]
sst.w gp,92[ep]
sst.w r6,88[ep]
sst.w r7,84[ep]
sst.w r8,80[ep]
sst.w r9,76[ep]
sst.w r11,72[ep]
sst.w r12,68[ep]
sst.w r13,64[ep]
sst.w r14,60[ep]
sst.w r15,56[ep]
sst.w r16,52[ep]
sst.w r17,48[ep]
sst.w r18,44[ep]
sst.w r19,40[ep]
sst.w r20,36[ep]
sst.w r21,32[ep]
sst.w r22,28[ep]
sst.w r23,24[ep]
sst.w r24,20[ep]
sst.w r25,16[ep]
sst.w r26,12[ep]
sst.w r27,8[ep]
sst.w r28,4[ep]
sst.w r29,0[ep]
mov r1,ep
#else
st.w r31,100[sp]
st.w r2,96[sp]
st.w gp,92[sp]
st.w r6,88[sp]
st.w r7,84[sp]
st.w r8,80[sp]
st.w r9,76[sp]
st.w r11,72[sp]
st.w r12,68[sp]
st.w r13,64[sp]
st.w r14,60[sp]
st.w r15,56[sp]
st.w r16,52[sp]
st.w r17,48[sp]
st.w r18,44[sp]
st.w r19,40[sp]
st.w r20,36[sp]
st.w r21,32[sp]
st.w r22,28[sp]
st.w r23,24[sp]
st.w r24,20[sp]
st.w r25,16[sp]
st.w r26,12[sp]
st.w r27,8[sp]
st.w r28,4[sp]
st.w r29,0[sp]
#endif
jmp [r10]
.size __save_all_interrupt,.-__save_all_interrupt
.globl __restore_all_interrupt
.type __restore_all_interrupt,@function
/* Restore all registers saved in __save_all_interrupt and
deallocate the stack space. */
/* Called via: jalr __restore_all_interrupt,r10. */
__restore_all_interrupt:
#ifdef __EP__
mov ep,r1
mov sp,ep
sld.w 100[ep],r31
sld.w 96[ep],r2
sld.w 92[ep],gp
sld.w 88[ep],r6
sld.w 84[ep],r7
sld.w 80[ep],r8
sld.w 76[ep],r9
sld.w 72[ep],r11
sld.w 68[ep],r12
sld.w 64[ep],r13
sld.w 60[ep],r14
sld.w 56[ep],r15
sld.w 52[ep],r16
sld.w 48[ep],r17
sld.w 44[ep],r18
sld.w 40[ep],r19
sld.w 36[ep],r20
sld.w 32[ep],r21
sld.w 28[ep],r22
sld.w 24[ep],r23
sld.w 20[ep],r24
sld.w 16[ep],r25
sld.w 12[ep],r26
sld.w 8[ep],r27
sld.w 4[ep],r28
sld.w 0[ep],r29
mov r1,ep
#else
ld.w 100[sp],r31
ld.w 96[sp],r2
ld.w 92[sp],gp
ld.w 88[sp],r6
ld.w 84[sp],r7
ld.w 80[sp],r8
ld.w 76[sp],r9
ld.w 72[sp],r11
ld.w 68[sp],r12
ld.w 64[sp],r13
ld.w 60[sp],r14
ld.w 56[sp],r15
ld.w 52[sp],r16
ld.w 48[sp],r17
ld.w 44[sp],r18
ld.w 40[sp],r19
ld.w 36[sp],r20
ld.w 32[sp],r21
ld.w 28[sp],r22
ld.w 24[sp],r23
ld.w 20[sp],r24
ld.w 16[sp],r25
ld.w 12[sp],r26
ld.w 8[sp],r27
ld.w 4[sp],r28
ld.w 0[sp],r29
#endif
addi 104,sp,sp
jmp [r10]
.size __restore_all_interrupt,.-__restore_all_interrupt
#endif /* L_save_all_interrupt */
#if defined __V850_CALLT__
#if defined(__v850e__) || defined(__v850e1__) || defined(__v850e2__) || defined(__v850e2v3__) || defined(__v850e3v5__)
#ifdef L_callt_save_r2_r29
/* Put these functions into the call table area. */
.call_table_text
/* Allocate space and save registers 2, 20 .. 29 on the stack. */
/* Called via: callt ctoff(__callt_save_r2_r29). */
.align 2
.L_save_r2_r29:
add -4, sp
st.w r2, 0[sp]
prepare {r20 - r29}, 0
ctret
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: callt ctoff(__callt_return_r2_r29). */
.align 2
.L_return_r2_r29:
dispose 0, {r20-r29}
ld.w 0[sp], r2
add 4, sp
jmp [r31]
/* Place the offsets of the start of these routines into the call table. */
.call_table_data
.global __callt_save_r2_r29
.type __callt_save_r2_r29,@function
__callt_save_r2_r29: .short ctoff(.L_save_r2_r29)
.global __callt_return_r2_r29
.type __callt_return_r2_r29,@function
__callt_return_r2_r29: .short ctoff(.L_return_r2_r29)
#endif /* L_callt_save_r2_r29. */
#ifdef L_callt_save_r2_r31
/* Put these functions into the call table area. */
.call_table_text
/* Allocate space and save registers 2 and 20 .. 29, 31 on the stack. */
/* Also allocate space for the argument save area. */
/* Called via: callt ctoff(__callt_save_r2_r31). */
.align 2
.L_save_r2_r31:
add -4, sp
st.w r2, 0[sp]
prepare {r20 - r29, r31}, 0
ctret
/* Restore saved registers, deallocate stack and return to the user. */
/* Called via: callt ctoff(__callt_return_r2_r31). */
.align 2
.L_return_r2_r31:
dispose 0, {r20 - r29, r31}
ld.w 0[sp], r2
addi 4, sp, sp
jmp [r31]
/* Place the offsets of the start of these routines into the call table. */
.call_table_data
.global __callt_save_r2_r31
.type __callt_save_r2_r31,@function
__callt_save_r2_r31: .short ctoff(.L_save_r2_r31)
.global __callt_return_r2_r31
.type __callt_return_r2_r31,@function
__callt_return_r2_r31: .short ctoff(.L_return_r2_r31)
#endif /* L_callt_save_r2_r31 */
#ifdef L_callt_save_interrupt
/* Put these functions into the call table area. */
.call_table_text
/* Save registers r1, ep, gp, r10 on stack and load up with expected values. */
/* Called via: callt ctoff(__callt_save_interrupt). */
.align 2
.L_save_interrupt:
/* SP has already been moved before callt ctoff(_save_interrupt). */
/* R1,R10,R11,ctpc,ctpsw has alread been saved bofore callt ctoff(_save_interrupt). */
/* addi -28, sp, sp */
/* st.w r1, 24[sp] */
/* st.w r10, 12[sp] */
/* st.w r11, 16[sp] */
/* stsr ctpc, r10 */
/* st.w r10, 20[sp] */
/* stsr ctpsw, r10 */
/* st.w r10, 24[sp] */
st.w ep, 0[sp]
st.w gp, 4[sp]
st.w r1, 8[sp]
mov hilo(__ep),ep
mov hilo(__gp),gp
ctret
.call_table_text
/* Restore saved registers, deallocate stack and return from the interrupt. */
/* Called via: callt ctoff(__callt_restore_interrupt). */
.align 2
.globl __return_interrupt
.type __return_interrupt,@function
.L_return_interrupt:
ld.w 24[sp], r1
ldsr r1, ctpsw
ld.w 20[sp], r1
ldsr r1, ctpc
ld.w 16[sp], r11
ld.w 12[sp], r10
ld.w 8[sp], r1
ld.w 4[sp], gp
ld.w 0[sp], ep
addi 28, sp, sp
reti
/* Place the offsets of the start of these routines into the call table. */
.call_table_data
.global __callt_save_interrupt
.type __callt_save_interrupt,@function
__callt_save_interrupt: .short ctoff(.L_save_interrupt)
.global __callt_return_interrupt
.type __callt_return_interrupt,@function
__callt_return_interrupt: .short ctoff(.L_return_interrupt)
#endif /* L_callt_save_interrupt */
#ifdef L_callt_save_all_interrupt
/* Put these functions into the call table area. */
.call_table_text
/* Save all registers except for those saved in __save_interrupt. */
/* Allocate enough stack for all of the registers & 16 bytes of space. */
/* Called via: callt ctoff(__callt_save_all_interrupt). */
.align 2
.L_save_all_interrupt:
addi -60, sp, sp
#ifdef __EP__
mov ep, r1
mov sp, ep
sst.w r2, 56[ep]
sst.w r5, 52[ep]
sst.w r6, 48[ep]
sst.w r7, 44[ep]
sst.w r8, 40[ep]
sst.w r9, 36[ep]
sst.w r11, 32[ep]
sst.w r12, 28[ep]
sst.w r13, 24[ep]
sst.w r14, 20[ep]
sst.w r15, 16[ep]
sst.w r16, 12[ep]
sst.w r17, 8[ep]
sst.w r18, 4[ep]
sst.w r19, 0[ep]
mov r1, ep
#else
st.w r2, 56[sp]
st.w r5, 52[sp]
st.w r6, 48[sp]
st.w r7, 44[sp]
st.w r8, 40[sp]
st.w r9, 36[sp]
st.w r11, 32[sp]
st.w r12, 28[sp]
st.w r13, 24[sp]
st.w r14, 20[sp]
st.w r15, 16[sp]
st.w r16, 12[sp]
st.w r17, 8[sp]
st.w r18, 4[sp]
st.w r19, 0[sp]
#endif
prepare {r20 - r29, r31}, 0
ctret
/* Restore all registers saved in __save_all_interrupt
deallocate the stack space. */
/* Called via: callt ctoff(__callt_restore_all_interrupt). */
.align 2
.L_restore_all_interrupt:
dispose 0, {r20 - r29, r31}
#ifdef __EP__
mov ep, r1
mov sp, ep
sld.w 0 [ep], r19
sld.w 4 [ep], r18
sld.w 8 [ep], r17
sld.w 12[ep], r16
sld.w 16[ep], r15
sld.w 20[ep], r14
sld.w 24[ep], r13
sld.w 28[ep], r12
sld.w 32[ep], r11
sld.w 36[ep], r9
sld.w 40[ep], r8
sld.w 44[ep], r7
sld.w 48[ep], r6
sld.w 52[ep], r5
sld.w 56[ep], r2
mov r1, ep
#else
ld.w 0 [sp], r19
ld.w 4 [sp], r18
ld.w 8 [sp], r17
ld.w 12[sp], r16
ld.w 16[sp], r15
ld.w 20[sp], r14
ld.w 24[sp], r13
ld.w 28[sp], r12
ld.w 32[sp], r11
ld.w 36[sp], r9
ld.w 40[sp], r8
ld.w 44[sp], r7
ld.w 48[sp], r6
ld.w 52[sp], r5
ld.w 56[sp], r2
#endif
addi 60, sp, sp
ctret
/* Place the offsets of the start of these routines into the call table. */
.call_table_data
.global __callt_save_all_interrupt
.type __callt_save_all_interrupt,@function
__callt_save_all_interrupt: .short ctoff(.L_save_all_interrupt)
.global __callt_restore_all_interrupt
.type __callt_restore_all_interrupt,@function
__callt_restore_all_interrupt: .short ctoff(.L_restore_all_interrupt)
#endif /* L_callt_save_all_interrupt */
#define MAKE_CALLT_FUNCS( START ) \
.call_table_text ;\
.align 2 ;\
/* Allocate space and save registers START .. r29 on the stack. */ ;\
/* Called via: callt ctoff(__callt_save_START_r29). */ ;\
.L_save_##START##_r29: ;\
prepare { START - r29 }, 0 ;\
ctret ;\
;\
/* Restore saved registers, deallocate stack and return. */ ;\
/* Called via: callt ctoff(__return_START_r29). */ ;\
.align 2 ;\
.L_return_##START##_r29: ;\
dispose 0, { START - r29 }, r31 ;\
;\
/* Place the offsets of the start of these funcs into the call table. */;\
.call_table_data ;\
;\
.global __callt_save_##START##_r29 ;\
.type __callt_save_##START##_r29,@function ;\
__callt_save_##START##_r29: .short ctoff(.L_save_##START##_r29 ) ;\
;\
.global __callt_return_##START##_r29 ;\
.type __callt_return_##START##_r29,@function ;\
__callt_return_##START##_r29: .short ctoff(.L_return_##START##_r29 )
#define MAKE_CALLT_CFUNCS( START ) \
.call_table_text ;\
.align 2 ;\
/* Allocate space and save registers START .. r31 on the stack. */ ;\
/* Called via: callt ctoff(__callt_save_START_r31c). */ ;\
.L_save_##START##_r31c: ;\
prepare { START - r29, r31}, 0 ;\
ctret ;\
;\
/* Restore saved registers, deallocate stack and return. */ ;\
/* Called via: callt ctoff(__return_START_r31c). */ ;\
.align 2 ;\
.L_return_##START##_r31c: ;\
dispose 0, { START - r29, r31}, r31 ;\
;\
/* Place the offsets of the start of these funcs into the call table. */;\
.call_table_data ;\
;\
.global __callt_save_##START##_r31c ;\
.type __callt_save_##START##_r31c,@function ;\
__callt_save_##START##_r31c: .short ctoff(.L_save_##START##_r31c ) ;\
;\
.global __callt_return_##START##_r31c ;\
.type __callt_return_##START##_r31c,@function ;\
__callt_return_##START##_r31c: .short ctoff(.L_return_##START##_r31c )
#ifdef L_callt_save_20
MAKE_CALLT_FUNCS (r20)
#endif
#ifdef L_callt_save_21
MAKE_CALLT_FUNCS (r21)
#endif
#ifdef L_callt_save_22
MAKE_CALLT_FUNCS (r22)
#endif
#ifdef L_callt_save_23
MAKE_CALLT_FUNCS (r23)
#endif
#ifdef L_callt_save_24
MAKE_CALLT_FUNCS (r24)
#endif
#ifdef L_callt_save_25
MAKE_CALLT_FUNCS (r25)
#endif
#ifdef L_callt_save_26
MAKE_CALLT_FUNCS (r26)
#endif
#ifdef L_callt_save_27
MAKE_CALLT_FUNCS (r27)
#endif
#ifdef L_callt_save_28
MAKE_CALLT_FUNCS (r28)
#endif
#ifdef L_callt_save_29
MAKE_CALLT_FUNCS (r29)
#endif
#ifdef L_callt_save_20c
MAKE_CALLT_CFUNCS (r20)
#endif
#ifdef L_callt_save_21c
MAKE_CALLT_CFUNCS (r21)
#endif
#ifdef L_callt_save_22c
MAKE_CALLT_CFUNCS (r22)
#endif
#ifdef L_callt_save_23c
MAKE_CALLT_CFUNCS (r23)
#endif
#ifdef L_callt_save_24c
MAKE_CALLT_CFUNCS (r24)
#endif
#ifdef L_callt_save_25c
MAKE_CALLT_CFUNCS (r25)
#endif
#ifdef L_callt_save_26c
MAKE_CALLT_CFUNCS (r26)
#endif
#ifdef L_callt_save_27c
MAKE_CALLT_CFUNCS (r27)
#endif
#ifdef L_callt_save_28c
MAKE_CALLT_CFUNCS (r28)
#endif
#ifdef L_callt_save_29c
MAKE_CALLT_CFUNCS (r29)
#endif
#ifdef L_callt_save_31c
.call_table_text
.align 2
/* Allocate space and save register r31 on the stack. */
/* Called via: callt ctoff(__callt_save_r31c). */
.L_callt_save_r31c:
prepare {r31}, 0
ctret
/* Restore saved registers, deallocate stack and return. */
/* Called via: callt ctoff(__return_r31c). */
.align 2
.L_callt_return_r31c:
dispose 0, {r31}, r31
/* Place the offsets of the start of these funcs into the call table. */
.call_table_data
.global __callt_save_r31c
.type __callt_save_r31c,@function
__callt_save_r31c: .short ctoff(.L_callt_save_r31c)
.global __callt_return_r31c
.type __callt_return_r31c,@function
__callt_return_r31c: .short ctoff(.L_callt_return_r31c)
#endif
#endif /* __v850e__ + */
#endif /* __V850_CALLT__ */
/* libgcc2 routines for NEC V850. */
/* Double Integer Arithmetical Operation. */
#ifdef L_negdi2
.text
.global ___negdi2
.type ___negdi2, @function
___negdi2:
not r6, r10
add 1, r10
setf l, r6
not r7, r11
add r6, r11
jmp [lp]
.size ___negdi2,.-___negdi2
#endif
#ifdef L_cmpdi2
.text
.global ___cmpdi2
.type ___cmpdi2,@function
___cmpdi2:
# Signed comparison bitween each high word.
cmp r9, r7
be .L_cmpdi_cmp_low
setf ge, r10
setf gt, r6
add r6, r10
jmp [lp]
.L_cmpdi_cmp_low:
# Unsigned comparigon bitween each low word.
cmp r8, r6
setf nl, r10
setf h, r6
add r6, r10
jmp [lp]
.size ___cmpdi2, . - ___cmpdi2
#endif
#ifdef L_ucmpdi2
.text
.global ___ucmpdi2
.type ___ucmpdi2,@function
___ucmpdi2:
cmp r9, r7 # Check if each high word are same.
bne .L_ucmpdi_check_psw
cmp r8, r6 # Compare the word.
.L_ucmpdi_check_psw:
setf nl, r10 #
setf h, r6 #
add r6, r10 # Add the result of comparison NL and comparison H.
jmp [lp]
.size ___ucmpdi2, . - ___ucmpdi2
#endif
#ifdef L_muldi3
.text
.global ___muldi3
.type ___muldi3,@function
___muldi3:
#ifdef __v850__
jarl __save_r26_r31, r10
addi 16, sp, sp
mov r6, r28
shr 15, r28
movea lo(32767), r0, r14
and r14, r28
mov r8, r10
shr 15, r10
and r14, r10
mov r6, r19
shr 30, r19
mov r7, r12
shl 2, r12
or r12, r19
and r14, r19
mov r8, r13
shr 30, r13
mov r9, r12
shl 2, r12
or r12, r13
and r14, r13
mov r7, r11
shr 13, r11
and r14, r11
mov r9, r31
shr 13, r31
and r14, r31
mov r7, r29
shr 28, r29
and r14, r29
mov r9, r12
shr 28, r12
and r14, r12
and r14, r6
and r14, r8
mov r6, r14
mulh r8, r14
mov r6, r16
mulh r10, r16
mov r6, r18
mulh r13, r18
mov r6, r15
mulh r31, r15
mulh r12, r6
mov r28, r17
mulh r10, r17
add -16, sp
mov r28, r12
mulh r8, r12
add r17, r18
mov r28, r17
mulh r31, r17
add r12, r16
mov r28, r12
mulh r13, r12
add r17, r6
mov r19, r17
add r12, r15
mov r19, r12
mulh r8, r12
mulh r10, r17
add r12, r18
mov r19, r12
mulh r13, r12
add r17, r15
mov r11, r13
mulh r8, r13
add r12, r6
mov r11, r12
mulh r10, r12
add r13, r15
mulh r29, r8
add r12, r6
mov r16, r13
shl 15, r13
add r14, r13
mov r18, r12
shl 30, r12
mov r13, r26
add r12, r26
shr 15, r14
movhi hi(131071), r0, r12
movea lo(131071), r12, r13
and r13, r14
mov r16, r12
and r13, r12
add r12, r14
mov r18, r12
shl 15, r12
and r13, r12
add r12, r14
shr 17, r14
shr 17, r16
add r14, r16
shl 13, r15
shr 2, r18
add r18, r15
add r15, r16
mov r16, r27
add r8, r6
shl 28, r6
add r6, r27
mov r26, r10
mov r27, r11
jr __return_r26_r31
#else /* defined(__v850e__) */
/* (Ahi << 32 + Alo) * (Bhi << 32 + Blo) */
/* r7 r6 r9 r8 */
mov r8, r10
mulu r7, r8, r0 /* Ahi * Blo */
mulu r6, r9, r0 /* Alo * Bhi */
mulu r6, r10, r11 /* Alo * Blo */
add r8, r11
add r9, r11
jmp [r31]
#endif /* defined(__v850e__) */
.size ___muldi3, . - ___muldi3
#endif
|
4ms/metamodule-plugin-sdk
| 4,412
|
plugin-libc/libgcc/config/m32c/lib1funcs.S
|
/* libgcc routines for R8C/M16C/M32C
Copyright (C) 2005-2022 Free Software Foundation, Inc.
Contributed by Red Hat.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#if defined(__r8c_cpu__) || defined(__m16c_cpu__)
#define A16
#define A(n,w) n
#define W w
#else
#define A24
#define A(n,w) w
#define W l
#endif
#ifdef L__m32c_memregs
/* Warning: these memory locations are used as a register bank. They
*must* end up consecutive in any final executable, so you may *not*
use the otherwise obvious ".comm" directive to allocate space for
them. */
.bss
.global mem0
mem0: .space 1
.global mem1
mem1: .space 1
.global mem2
mem2: .space 1
.global mem3
mem3: .space 1
.global mem4
mem4: .space 1
.global mem5
mem5: .space 1
.global mem6
mem6: .space 1
.global mem7
mem7: .space 1
.global mem8
mem8: .space 1
.global mem9
mem9: .space 1
.global mem10
mem10: .space 1
.global mem11
mem11: .space 1
.global mem12
mem12: .space 1
.global mem13
mem13: .space 1
.global mem14
mem14: .space 1
.global mem15
mem15: .space 1
#endif
#ifdef L__m32c_eh_return
.text
.global __m32c_eh_return
__m32c_eh_return:
/* At this point, r0 has the stack adjustment, r1r3 has the
address to return to. The stack looks like this:
old_ra
old_fp
<- unwound sp
...
fb
through
r0
<- sp
What we need to do is restore all the registers, update the
stack, and return to the right place.
*/
stc sp,a0
add.W A(#16,#24),a0
/* a0 points to the current stack, just above the register
save areas */
mov.w a0,a1
exts.w r0
sub.W A(r0,r2r0),a1
sub.W A(#3,#4),a1
/* a1 points to the new stack. */
/* This is for the "rts" below. */
mov.w r1,[a1]
#ifdef A16
mov.w r2,r1
mov.b r1l,2[a1]
#else
mov.w r2,2[a1]
#endif
/* This is for the "popc sp" below. */
mov.W a1,[a0]
popm r0,r1,r2,r3,a0,a1,sb,fb
popc sp
rts
#endif
/* SImode arguments for SI foo(SI,SI) functions. */
#ifdef A16
#define SAL 5[fb]
#define SAH 7[fb]
#define SBL 9[fb]
#define SBH 11[fb]
#else
#define SAL 8[fb]
#define SAH 10[fb]
#define SBL 12[fb]
#define SBH 14[fb]
#endif
#ifdef L__m32c_mulsi3
.text
.global ___mulsi3
___mulsi3:
enter #0
push.w r2
mov.w SAL,r0
mulu.w SBL,r0 /* writes to r2r0 */
mov.w r0,mem0
mov.w r2,mem2
mov.w SAL,r0
mulu.w SBH,r0 /* writes to r2r0 */
add.w r0,mem2
mov.w SAH,r0
mulu.w SBL,r0 /* writes to r2r0 */
add.w r0,mem2
pop.w r2
exitd
#endif
#ifdef L__m32c_cmpsi2
.text
.global ___cmpsi2
___cmpsi2:
enter #0
cmp.w SBH,SAH
jgt cmpsi_gt
jlt cmpsi_lt
cmp.w SBL,SAL
jgt cmpsi_gt
jlt cmpsi_lt
mov.w #1,r0
exitd
cmpsi_gt:
mov.w #2,r0
exitd
cmpsi_lt:
mov.w #0,r0
exitd
#endif
#ifdef L__m32c_ucmpsi2
.text
.global ___ucmpsi2
___ucmpsi2:
enter #0
cmp.w SBH,SAH
jgtu cmpsi_gt
jltu cmpsi_lt
cmp.w SBL,SAL
jgtu cmpsi_gt
jltu cmpsi_lt
mov.w #1,r0
exitd
cmpsi_gt:
mov.w #2,r0
exitd
cmpsi_lt:
mov.w #0,r0
exitd
#endif
#ifdef L__m32c_jsri16
.text
#ifdef A16
.global m32c_jsri16
m32c_jsri16:
add.w #-1, sp
/* Read the address (16 bits) and return address (24 bits) off
the stack. */
mov.w 4[sp], r0
mov.w 1[sp], r3
mov.b 3[sp], a0 /* This zero-extends, so the high byte has
zero in it. */
/* Write the return address, then new address, to the stack. */
mov.w a0, 1[sp] /* Just to get the zero in 2[sp]. */
mov.w r0, 0[sp]
mov.w r3, 3[sp]
mov.b a0, 5[sp]
/* This "returns" to the target address, leaving the pending
return address on the stack. */
rts
#endif
#endif
|
4ms/metamodule-plugin-sdk
| 1,706
|
plugin-libc/libgcc/config/sparc/crtn.S
|
! crtn.S for SPARC
! Copyright (C) 1992-2022 Free Software Foundation, Inc.
! Written By David Vinayak Henkel-Wallace, June 1992
!
! This file is free software; you can redistribute it and/or modify it
! under the terms of the GNU General Public License as published by the
! Free Software Foundation; either version 3, or (at your option) any
! later version.
!
! This file is distributed in the hope that it will be useful, but
! WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
! General Public License for more details.
!
! Under Section 7 of GPL version 3, you are granted additional
! permissions described in the GCC Runtime Library Exception, version
! 3.1, as published by the Free Software Foundation.
!
! You should have received a copy of the GNU General Public License and
! a copy of the GCC Runtime Library Exception along with this program;
! see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
! <http://www.gnu.org/licenses/>.
! This file just makes sure that the .fini and .init sections do in
! fact return. Users may put any desired instructions in those sections.
! This file is the last thing linked into any executable.
.section ".init"
.align 4
#ifdef _FLAT
mov %i7, %o7
#ifdef __sparcv9
ldx [%sp+2343], %i7
sub %sp, -176, %sp
#else
ld [%sp+156], %i7
sub %sp, -96, %sp
#endif
#else
restore
#endif
jmp %o7+8
nop
.section ".fini"
.align 4
#ifdef _FLAT
mov %i7, %o7
#ifdef __sparcv9
ldx [%sp+2343], %i7
sub %sp, -176, %sp
#else
ld [%sp+156], %i7
sub %sp, -96, %sp
#endif
#else
restore
#endif
jmp %o7+8
nop
! Th-th-th-that is all folks!
|
4ms/metamodule-plugin-sdk
| 16,708
|
plugin-libc/libgcc/config/sparc/lb1spc.S
|
/* This is an assembly language implementation of mulsi3, divsi3, and modsi3
for the sparc processor.
These routines are derived from the SPARC Architecture Manual, version 8,
slightly edited to match the desired calling convention, and also to
optimize them for our purposes. */
/* An executable stack is *not* required for these functions. */
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
.previous
#endif
#ifdef L_mulsi3
.text
.align 4
.global .umul
.proc 4
.umul:
or %o0, %o1, %o4 ! logical or of multiplier and multiplicand
mov %o0, %y ! multiplier to Y register
andncc %o4, 0xfff, %o5 ! mask out lower 12 bits
be mul_shortway ! can do it the short way
andcc %g0, %g0, %o4 ! zero the partial product and clear NV cc
!
! long multiply
!
mulscc %o4, %o1, %o4 ! first iteration of 33
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4 ! 32nd iteration
mulscc %o4, %g0, %o4 ! last iteration only shifts
! the upper 32 bits of product are wrong, but we do not care
retl
rd %y, %o0
!
! short multiply
!
mul_shortway:
mulscc %o4, %o1, %o4 ! first iteration of 13
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4
mulscc %o4, %o1, %o4 ! 12th iteration
mulscc %o4, %g0, %o4 ! last iteration only shifts
rd %y, %o5
sll %o4, 12, %o4 ! left shift partial product by 12 bits
srl %o5, 20, %o5 ! right shift partial product by 20 bits
retl
or %o5, %o4, %o0 ! merge for true product
#endif
#ifdef L_divsi3
/*
* Division and remainder, from Appendix E of the SPARC Version 8
* Architecture Manual, with fixes from Gordon Irlam.
*/
/*
* Input: dividend and divisor in %o0 and %o1 respectively.
*
* m4 parameters:
* .div name of function to generate
* div div=div => %o0 / %o1; div=rem => %o0 % %o1
* true true=true => signed; true=false => unsigned
*
* Algorithm parameters:
* N how many bits per iteration we try to get (4)
* WORDSIZE total number of bits (32)
*
* Derived constants:
* TOPBITS number of bits in the top decade of a number
*
* Important variables:
* Q the partial quotient under development (initially 0)
* R the remainder so far, initially the dividend
* ITER number of main division loop iterations required;
* equal to ceil(log2(quotient) / N). Note that this
* is the log base (2^N) of the quotient.
* V the current comparand, initially divisor*2^(ITER*N-1)
*
* Cost:
* Current estimate for non-large dividend is
* ceil(log2(quotient) / N) * (10 + 7N/2) + C
* A large dividend is one greater than 2^(31-TOPBITS) and takes a
* different path, as the upper bits of the quotient must be developed
* one bit at a time.
*/
.global .udiv
.align 4
.proc 4
.text
.udiv:
b ready_to_divide
mov 0, %g3 ! result is always positive
.global .div
.align 4
.proc 4
.text
.div:
! compute sign of result; if neither is negative, no problem
orcc %o1, %o0, %g0 ! either negative?
bge ready_to_divide ! no, go do the divide
xor %o1, %o0, %g3 ! compute sign in any case
tst %o1
bge 1f
tst %o0
! %o1 is definitely negative; %o0 might also be negative
bge ready_to_divide ! if %o0 not negative...
sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
1: ! %o0 is negative, %o1 is nonnegative
sub %g0, %o0, %o0 ! make %o0 nonnegative
ready_to_divide:
! Ready to divide. Compute size of quotient; scale comparand.
orcc %o1, %g0, %o5
bne 1f
mov %o0, %o3
! Divide by zero trap. If it returns, return 0 (about as
! wrong as possible, but that is what SunOS does...).
ta 0x2 ! ST_DIV0
retl
clr %o0
1:
cmp %o3, %o5 ! if %o1 exceeds %o0, done
blu got_result ! (and algorithm fails otherwise)
clr %o2
sethi %hi(1 << (32 - 4 - 1)), %g1
cmp %o3, %g1
blu not_really_big
clr %o4
! Here the dividend is >= 2**(31-N) or so. We must be careful here,
! as our usual N-at-a-shot divide step will cause overflow and havoc.
! The number of bits in the result here is N*ITER+SC, where SC <= N.
! Compute ITER in an unorthodox manner: know we need to shift V into
! the top decade: so do not even bother to compare to R.
1:
cmp %o5, %g1
bgeu 3f
mov 1, %g2
sll %o5, 4, %o5
b 1b
add %o4, 1, %o4
! Now compute %g2.
2: addcc %o5, %o5, %o5
bcc not_too_big
add %g2, 1, %g2
! We get here if the %o1 overflowed while shifting.
! This means that %o3 has the high-order bit set.
! Restore %o5 and subtract from %o3.
sll %g1, 4, %g1 ! high order bit
srl %o5, 1, %o5 ! rest of %o5
add %o5, %g1, %o5
b do_single_div
sub %g2, 1, %g2
not_too_big:
3: cmp %o5, %o3
blu 2b
nop
be do_single_div
nop
/* NB: these are commented out in the V8-SPARC manual as well */
/* (I do not understand this) */
! %o5 > %o3: went too far: back up 1 step
! srl %o5, 1, %o5
! dec %g2
! do single-bit divide steps
!
! We have to be careful here. We know that %o3 >= %o5, so we can do the
! first divide step without thinking. BUT, the others are conditional,
! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high-
! order bit set in the first step, just falling into the regular
! division loop will mess up the first time around.
! So we unroll slightly...
do_single_div:
subcc %g2, 1, %g2
bl end_regular_divide
nop
sub %o3, %o5, %o3
mov 1, %o2
b end_single_divloop
nop
single_divloop:
sll %o2, 1, %o2
bl 1f
srl %o5, 1, %o5
! %o3 >= 0
sub %o3, %o5, %o3
b 2f
add %o2, 1, %o2
1: ! %o3 < 0
add %o3, %o5, %o3
sub %o2, 1, %o2
2:
end_single_divloop:
subcc %g2, 1, %g2
bge single_divloop
tst %o3
b,a end_regular_divide
not_really_big:
1:
sll %o5, 4, %o5
cmp %o5, %o3
bleu 1b
addcc %o4, 1, %o4
be got_result
sub %o4, 1, %o4
tst %o3 ! set up for initial iteration
divloop:
sll %o2, 4, %o2
! depth 1, accumulated bits 0
bl L1.16
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 2, accumulated bits 1
bl L2.17
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 3, accumulated bits 3
bl L3.19
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits 7
bl L4.23
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (7*2+1), %o2
L4.23:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (7*2-1), %o2
L3.19:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits 5
bl L4.21
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (5*2+1), %o2
L4.21:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (5*2-1), %o2
L2.17:
! remainder is negative
addcc %o3,%o5,%o3
! depth 3, accumulated bits 1
bl L3.17
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits 3
bl L4.19
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (3*2+1), %o2
L4.19:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (3*2-1), %o2
L3.17:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits 1
bl L4.17
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (1*2+1), %o2
L4.17:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (1*2-1), %o2
L1.16:
! remainder is negative
addcc %o3,%o5,%o3
! depth 2, accumulated bits -1
bl L2.15
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 3, accumulated bits -1
bl L3.15
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits -1
bl L4.15
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (-1*2+1), %o2
L4.15:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (-1*2-1), %o2
L3.15:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits -3
bl L4.13
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (-3*2+1), %o2
L4.13:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (-3*2-1), %o2
L2.15:
! remainder is negative
addcc %o3,%o5,%o3
! depth 3, accumulated bits -3
bl L3.13
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits -5
bl L4.11
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (-5*2+1), %o2
L4.11:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (-5*2-1), %o2
L3.13:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits -7
bl L4.9
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (-7*2+1), %o2
L4.9:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (-7*2-1), %o2
9:
end_regular_divide:
subcc %o4, 1, %o4
bge divloop
tst %o3
bl,a got_result
! non-restoring fixup here (one instruction only!)
sub %o2, 1, %o2
got_result:
! check to see if answer should be < 0
tst %g3
bl,a 1f
sub %g0, %o2, %o2
1:
retl
mov %o2, %o0
#endif
#ifdef L_modsi3
/* This implementation was taken from glibc:
*
* Input: dividend and divisor in %o0 and %o1 respectively.
*
* Algorithm parameters:
* N how many bits per iteration we try to get (4)
* WORDSIZE total number of bits (32)
*
* Derived constants:
* TOPBITS number of bits in the top decade of a number
*
* Important variables:
* Q the partial quotient under development (initially 0)
* R the remainder so far, initially the dividend
* ITER number of main division loop iterations required;
* equal to ceil(log2(quotient) / N). Note that this
* is the log base (2^N) of the quotient.
* V the current comparand, initially divisor*2^(ITER*N-1)
*
* Cost:
* Current estimate for non-large dividend is
* ceil(log2(quotient) / N) * (10 + 7N/2) + C
* A large dividend is one greater than 2^(31-TOPBITS) and takes a
* different path, as the upper bits of the quotient must be developed
* one bit at a time.
*/
.text
.align 4
.global .urem
.proc 4
.urem:
b divide
mov 0, %g3 ! result always positive
.align 4
.global .rem
.proc 4
.rem:
! compute sign of result; if neither is negative, no problem
orcc %o1, %o0, %g0 ! either negative?
bge 2f ! no, go do the divide
mov %o0, %g3 ! sign of remainder matches %o0
tst %o1
bge 1f
tst %o0
! %o1 is definitely negative; %o0 might also be negative
bge 2f ! if %o0 not negative...
sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
1: ! %o0 is negative, %o1 is nonnegative
sub %g0, %o0, %o0 ! make %o0 nonnegative
2:
! Ready to divide. Compute size of quotient; scale comparand.
divide:
orcc %o1, %g0, %o5
bne 1f
mov %o0, %o3
! Divide by zero trap. If it returns, return 0 (about as
! wrong as possible, but that is what SunOS does...).
ta 0x2 !ST_DIV0
retl
clr %o0
1:
cmp %o3, %o5 ! if %o1 exceeds %o0, done
blu got_result ! (and algorithm fails otherwise)
clr %o2
sethi %hi(1 << (32 - 4 - 1)), %g1
cmp %o3, %g1
blu not_really_big
clr %o4
! Here the dividend is >= 2**(31-N) or so. We must be careful here,
! as our usual N-at-a-shot divide step will cause overflow and havoc.
! The number of bits in the result here is N*ITER+SC, where SC <= N.
! Compute ITER in an unorthodox manner: know we need to shift V into
! the top decade: so do not even bother to compare to R.
1:
cmp %o5, %g1
bgeu 3f
mov 1, %g2
sll %o5, 4, %o5
b 1b
add %o4, 1, %o4
! Now compute %g2.
2: addcc %o5, %o5, %o5
bcc not_too_big
add %g2, 1, %g2
! We get here if the %o1 overflowed while shifting.
! This means that %o3 has the high-order bit set.
! Restore %o5 and subtract from %o3.
sll %g1, 4, %g1 ! high order bit
srl %o5, 1, %o5 ! rest of %o5
add %o5, %g1, %o5
b do_single_div
sub %g2, 1, %g2
not_too_big:
3: cmp %o5, %o3
blu 2b
nop
be do_single_div
nop
/* NB: these are commented out in the V8-SPARC manual as well */
/* (I do not understand this) */
! %o5 > %o3: went too far: back up 1 step
! srl %o5, 1, %o5
! dec %g2
! do single-bit divide steps
!
! We have to be careful here. We know that %o3 >= %o5, so we can do the
! first divide step without thinking. BUT, the others are conditional,
! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high-
! order bit set in the first step, just falling into the regular
! division loop will mess up the first time around.
! So we unroll slightly...
do_single_div:
subcc %g2, 1, %g2
bl end_regular_divide
nop
sub %o3, %o5, %o3
mov 1, %o2
b end_single_divloop
nop
single_divloop:
sll %o2, 1, %o2
bl 1f
srl %o5, 1, %o5
! %o3 >= 0
sub %o3, %o5, %o3
b 2f
add %o2, 1, %o2
1: ! %o3 < 0
add %o3, %o5, %o3
sub %o2, 1, %o2
2:
end_single_divloop:
subcc %g2, 1, %g2
bge single_divloop
tst %o3
b,a end_regular_divide
not_really_big:
1:
sll %o5, 4, %o5
cmp %o5, %o3
bleu 1b
addcc %o4, 1, %o4
be got_result
sub %o4, 1, %o4
tst %o3 ! set up for initial iteration
divloop:
sll %o2, 4, %o2
! depth 1, accumulated bits 0
bl L1.16
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 2, accumulated bits 1
bl L2.17
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 3, accumulated bits 3
bl L3.19
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits 7
bl L4.23
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (7*2+1), %o2
L4.23:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (7*2-1), %o2
L3.19:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits 5
bl L4.21
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (5*2+1), %o2
L4.21:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (5*2-1), %o2
L2.17:
! remainder is negative
addcc %o3,%o5,%o3
! depth 3, accumulated bits 1
bl L3.17
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits 3
bl L4.19
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (3*2+1), %o2
L4.19:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (3*2-1), %o2
L3.17:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits 1
bl L4.17
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (1*2+1), %o2
L4.17:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (1*2-1), %o2
L1.16:
! remainder is negative
addcc %o3,%o5,%o3
! depth 2, accumulated bits -1
bl L2.15
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 3, accumulated bits -1
bl L3.15
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits -1
bl L4.15
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (-1*2+1), %o2
L4.15:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (-1*2-1), %o2
L3.15:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits -3
bl L4.13
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (-3*2+1), %o2
L4.13:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (-3*2-1), %o2
L2.15:
! remainder is negative
addcc %o3,%o5,%o3
! depth 3, accumulated bits -3
bl L3.13
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits -5
bl L4.11
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (-5*2+1), %o2
L4.11:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (-5*2-1), %o2
L3.13:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits -7
bl L4.9
srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
b 9f
add %o2, (-7*2+1), %o2
L4.9:
! remainder is negative
addcc %o3,%o5,%o3
b 9f
add %o2, (-7*2-1), %o2
9:
end_regular_divide:
subcc %o4, 1, %o4
bge divloop
tst %o3
bl,a got_result
! non-restoring fixup here (one instruction only!)
add %o3, %o1, %o3
got_result:
! check to see if answer should be < 0
tst %g3
bl,a 1f
sub %g0, %o3, %o3
1:
retl
mov %o3, %o0
#endif
|
4ms/metamodule-plugin-sdk
| 1,945
|
plugin-libc/libgcc/config/sparc/crti.S
|
! crti.S for SPARC
! Copyright (C) 1992-2022 Free Software Foundation, Inc.
! Written By David Vinayak Henkel-Wallace, June 1992
!
! This file is free software; you can redistribute it and/or modify it
! under the terms of the GNU General Public License as published by the
! Free Software Foundation; either version 3, or (at your option) any
! later version.
!
! This file is distributed in the hope that it will be useful, but
! WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
! General Public License for more details.
!
! Under Section 7 of GPL version 3, you are granted additional
! permissions described in the GCC Runtime Library Exception, version
! 3.1, as published by the Free Software Foundation.
!
! You should have received a copy of the GNU General Public License and
! a copy of the GCC Runtime Library Exception along with this program;
! see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
! <http://www.gnu.org/licenses/>.
! This file just make a stack frame for the contents of the .fini and
! .init sections. Users may put any desired instructions in those
! sections.
! This file is linked in before the Values-Xx.o files and also before
! crtbegin, with which perhaps it should be merged.
.section ".init"
.proc 022
.global _init
.type _init,#function
.align 4
_init:
#ifdef _FLAT
#ifdef __sparcv9
stx %i7, [%sp+2167]
add %sp, -176, %sp
#else
st %i7, [%sp+60]
add %sp, -96, %sp
#endif
mov %o7, %i7
#else
#ifdef __sparcv9
save %sp, -176, %sp
#else
save %sp, -96, %sp
#endif
#endif
.section ".fini"
.proc 022
.global _fini
.type _fini,#function
.align 4
_fini:
#ifdef _FLAT
#ifdef __sparcv9
stx %i7, [%sp+2167]
add %sp, -176, %sp
#else
st %i7, [%sp+60]
add %sp, -96, %sp
#endif
mov %o7, %i7
#else
#ifdef __sparcv9
save %sp, -176, %sp
#else
save %sp, -96, %sp
#endif
#endif
|
4ms/metamodule-plugin-sdk
| 2,759
|
plugin-libc/libgcc/config/sparc/sol2-c1.S
|
! crt1.s for sparc & sparcv9 (SunOS 5)
! Copyright (C) 1992-2022 Free Software Foundation, Inc.
! Written By David Vinayak Henkel-Wallace, June 1992
!
! This file is free software; you can redistribute it and/or modify it
! under the terms of the GNU General Public License as published by the
! Free Software Foundation; either version 3, or (at your option) any
! later version.
!
! This file is distributed in the hope that it will be useful, but
! WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
! General Public License for more details.
!
! Under Section 7 of GPL version 3, you are granted additional
! permissions described in the GCC Runtime Library Exception, version
! 3.1, as published by the Free Software Foundation.
!
! You should have received a copy of the GNU General Public License and
! a copy of the GCC Runtime Library Exception along with this program;
! see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
! <http://www.gnu.org/licenses/>.
! This file takes control of the process from the kernel, as specified
! in section 3 of the SVr4 ABI.
! This file is the first thing linked into any executable.
#ifdef __sparcv9
#define CPTRSIZE 8
#define CPTRSHIFT 3
#define STACK_BIAS 2047
#define ldn ldx
#define stn stx
#define setn(s, scratch, dst) setx s, scratch, dst
#else
#define CPTRSIZE 4
#define CPTRSHIFT 2
#define STACK_BIAS 0
#define ldn ld
#define stn st
#define setn(s, scratch, dst) set s, dst
#endif
.section ".text"
.proc 022
.global _start
_start:
mov 0, %fp ! Mark bottom frame pointer
ldn [%sp + (16 * CPTRSIZE) + STACK_BIAS], %l0 ! argc
add %sp, (17 * CPTRSIZE) + STACK_BIAS, %l1 ! argv
! Leave some room for a call. Sun leaves 32 octets (to sit on
! a cache line?) so we do too.
#ifdef __sparcv9
sub %sp, 48, %sp
#else
sub %sp, 32, %sp
#endif
! %g1 may contain a function to be registered w/atexit
orcc %g0, %g1, %g0
#ifdef __sparcv9
be %xcc, .nope
#else
be .nope
#endif
mov %g1, %o0
call atexit
nop
.nope:
! Now make sure constructors and destructors are handled.
setn(_fini, %o1, %o0)
call atexit, 1
nop
call _init, 0
nop
! We ignore the auxiliary vector; there is no defined way to
! access those data anyway. Instead, go straight to main:
mov %l0, %o0 ! argc
mov %l1, %o1 ! argv
#ifdef GCRT1
setn(___Argv, %o4, %o3)
stn %o1, [%o3] ! *___Argv
#endif
! Skip argc words past argv, to env:
sll %l0, CPTRSHIFT, %o2
add %o2, CPTRSIZE, %o2
add %l1, %o2, %o2 ! env
setn(_environ, %o4, %o3)
stn %o2, [%o3] ! *_environ
call main, 4
nop
call exit, 0
nop
call _exit, 0
nop
! We should never get here.
.type _start,#function
.size _start,.-_start
|
4ms/metamodule-plugin-sdk
| 1,394
|
plugin-libc/libgcc/config/fr30/crtn.S
|
# crtn.S for ELF
# Copyright (C) 1992-2022 Free Software Foundation, Inc.
# Written By David Vinayak Henkel-Wallace, June 1992
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just makes sure that the .fini and .init sections do in
# fact return. Users may put any desired instructions in those sections.
# This file is the last thing linked into any executable.
.section ".init"
.align 4
leave
ld @r15+,rp
ret
.section ".fini"
.align 4
leave
ld @r15+,rp
ret
# Th-th-th-that is all folks!
|
4ms/metamodule-plugin-sdk
| 1,622
|
plugin-libc/libgcc/config/fr30/crti.S
|
# crti.s for ELF
# Copyright (C) 1992-2022 Free Software Foundation, Inc.
# Written By David Vinayak Henkel-Wallace, June 1992
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
# This file just make a stack frame for the contents of the .fini and
# .init sections. Users may put any desired instructions in those
# sections.
.section ".init"
.global _init
.type _init,#function
.align 4
_init:
st rp, @-r15
enter #4
# These nops are here to align the end of this code with a 16 byte
# boundary. The linker will start inserting code into the .init
# section at such a boundary.
nop
nop
nop
nop
nop
nop
.section ".fini"
.global _fini
.type _fini,#function
.align 4
_fini:
st rp, @-r15
enter #4
nop
nop
nop
nop
nop
nop
|
4ms/metamodule-plugin-sdk
| 2,506
|
plugin-libc/libgcc/config/fr30/lib1funcs.S
|
/* libgcc routines for the FR30.
Copyright (C) 1998-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
.macro FUNC_START name
.text
.globl __\name
.type __\name, @function
__\name:
.endm
.macro FUNC_END name
.size __\name, . - __\name
.endm
.macro DIV_BODY reg number
.if \number
DIV_BODY \reg, "\number - 1"
div1 \reg
.endif
.endm
#ifdef L_udivsi3
FUNC_START udivsi3
;; Perform an unsiged division of r4 / r5 and place the result in r4.
;; Does not handle overflow yet...
mov r4, mdl
div0u r5
DIV_BODY r5 32
mov mdl, r4
ret
FUNC_END udivsi3
#endif /* L_udivsi3 */
#ifdef L_divsi3
FUNC_START divsi3
;; Perform a siged division of r4 / r5 and place the result in r4.
;; Does not handle overflow yet...
mov r4, mdl
div0s r5
DIV_BODY r5 32
div2 r5
div3
div4s
mov mdl, r4
ret
FUNC_END divsi3
#endif /* L_divsi3 */
#ifdef L_umodsi3
FUNC_START umodsi3
;; Perform an unsiged division of r4 / r5 and places the remainder in r4.
;; Does not handle overflow yet...
mov r4, mdl
div0u r5
DIV_BODY r5 32
mov mdh, r4
ret
FUNC_END umodsi3
#endif /* L_umodsi3 */
#ifdef L_modsi3
FUNC_START modsi3
;; Perform a siged division of r4 / r5 and place the remainder in r4.
;; Does not handle overflow yet...
mov r4, mdl
div0s r5
DIV_BODY r5 32
div2 r5
div3
div4s
mov mdh, r4
ret
FUNC_END modsi3
#endif /* L_modsi3 */
#ifdef L_negsi2
FUNC_START negsi2
ldi:8 #0, r0
sub r4, r0
mov r0, r4
ret
FUNC_END negsi2
#endif /* L_negsi2 */
#ifdef L_one_cmplsi2
FUNC_START one_cmplsi2
ldi:8 #0xff, r0
extsb r0
eor r0, r4
ret
FUNC_END one_cmplsi2
#endif /* L_one_cmplsi2 */
|
4ms/metamodule-plugin-sdk
| 1,382
|
plugin-libc/libgcc/config/h8300/crtn.S
|
/* Copyright (C) 2001-2022 Free Software Foundation, Inc.
This file was adapted from glibc sources.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* See an explanation about .init and .fini in crti.S. */
#ifdef __H8300H__
#ifdef __NORMAL_MODE__
.h8300hn
#else
.h8300h
#endif
#endif
#ifdef __H8300S__
#ifdef __NORMAL_MODE__
.h8300sn
#else
.h8300s
#endif
#endif
#ifdef __H8300SX__
#ifdef __NORMAL_MODE__
.h8300sxn
#else
.h8300sx
#endif
#endif
.section .init, "ax", @progbits
rts
.section .fini, "ax", @progbits
rts
|
4ms/metamodule-plugin-sdk
| 1,846
|
plugin-libc/libgcc/config/h8300/crti.S
|
/* Copyright (C) 2001-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* The code in sections .init and .fini is supposed to be a single
regular function. The function in .init is called directly from
start in crt0.S. The function in .fini is atexit()ed in crt0.S
too.
crti.S contributes the prologue of a function to these sections,
and crtn.S comes up the epilogue. STARTFILE_SPEC should list
crti.o before any other object files that might add code to .init
or .fini sections, and ENDFILE_SPEC should list crtn.o after any
such object files. */
#ifdef __H8300H__
#ifdef __NORMAL_MODE__
.h8300hn
#else
.h8300h
#endif
#endif
#ifdef __H8300S__
#ifdef __NORMAL_MODE__
.h8300sn
#else
.h8300s
#endif
#endif
#ifdef __H8300SX__
#ifdef __NORMAL_MODE__
.h8300sxn
#else
.h8300sx
#endif
#endif
.section .init, "ax", @progbits
.global __init
__init:
.section .fini, "ax", @progbits
.global __fini
__fini:
|
4ms/metamodule-plugin-sdk
| 14,936
|
plugin-libc/libgcc/config/h8300/lib1funcs.S
|
;; libgcc routines for the Renesas H8/300 CPU.
;; Contributed by Steve Chamberlain <sac@cygnus.com>
;; Optimizations by Toshiyasu Morita <toshiyasu.morita@renesas.com>
/* Copyright (C) 1994-2022 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Assembler register definitions. */
#define A0 r0
#define A0L r0l
#define A0H r0h
#define A1 r1
#define A1L r1l
#define A1H r1h
#define A2 r2
#define A2L r2l
#define A2H r2h
#define A3 r3
#define A3L r3l
#define A3H r3h
#define S0 r4
#define S0L r4l
#define S0H r4h
#define S1 r5
#define S1L r5l
#define S1H r5h
#define S2 r6
#define S2L r6l
#define S2H r6h
#ifdef __H8300__
#define PUSHP push
#define POPP pop
#define A0P r0
#define A1P r1
#define A2P r2
#define A3P r3
#define S0P r4
#define S1P r5
#define S2P r6
#endif
#if defined (__H8300H__) || defined (__H8300S__) || defined (__H8300SX__)
#define PUSHP push.l
#define POPP pop.l
#define A0P er0
#define A1P er1
#define A2P er2
#define A3P er3
#define S0P er4
#define S1P er5
#define S2P er6
#define A0E e0
#define A1E e1
#define A2E e2
#define A3E e3
#endif
#define CONCAT(A,B) A##B
#define LABEL0(U,X) CONCAT(U,__##X)
#define LABEL0_DEF(U,X) CONCAT(U,__##X##:)
#define LABEL_DEF(X) LABEL0_DEF(__USER_LABEL_PREFIX__,X)
#define LABEL(X) LABEL0(__USER_LABEL_PREFIX__,X)
#ifdef __H8300H__
#ifdef __NORMAL_MODE__
.h8300hn
#else
.h8300h
#endif
#endif
#ifdef __H8300S__
#ifdef __NORMAL_MODE__
.h8300sn
#else
.h8300s
#endif
#endif
#ifdef __H8300SX__
#ifdef __NORMAL_MODE__
.h8300sxn
#else
.h8300sx
#endif
#endif
#ifdef L_cmpsi2
#ifdef __H8300__
.section .text
.align 2
.global LABEL(cmpsi2)
LABEL_DEF(cmpsi2)
cmp.w A0,A2
bne .L2
cmp.w A1,A3
bne .L4
mov.w #1,A0
rts
.L2:
bgt .L5
.L3:
mov.w #2,A0
rts
.L4:
bls .L3
.L5:
sub.w A0,A0
rts
.end
#endif
#endif /* L_cmpsi2 */
#ifdef L_ucmpsi2
#ifdef __H8300__
.section .text
.align 2
.global LABEL(ucmpsi2)
LABEL_DEF(ucmpsi2)
cmp.w A0,A2
bne .L2
cmp.w A1,A3
bne .L4
mov.w #1,A0
rts
.L2:
bhi .L5
.L3:
mov.w #2,A0
rts
.L4:
bls .L3
.L5:
sub.w A0,A0
rts
.end
#endif
#endif /* L_ucmpsi2 */
#ifdef L_divhi3
;; HImode divides for the H8/300.
;; We bunch all of this into one object file since there are several
;; "supporting routines".
; general purpose normalize routine
;
; divisor in A0
; dividend in A1
; turns both into +ve numbers, and leaves what the answer sign
; should be in A2L
#ifdef __H8300__
.section .text
.align 2
divnorm:
or A0H,A0H ; is divisor > 0
stc ccr,A2L
bge _lab1
not A0H ; no - then make it +ve
not A0L
adds #1,A0
_lab1: or A1H,A1H ; look at dividend
bge _lab2
not A1H ; it is -ve, make it positive
not A1L
adds #1,A1
xor #0x8,A2L; and toggle sign of result
_lab2: rts
;; Basically the same, except that the sign of the divisor determines
;; the sign.
modnorm:
or A0H,A0H ; is divisor > 0
stc ccr,A2L
bge _lab7
not A0H ; no - then make it +ve
not A0L
adds #1,A0
_lab7: or A1H,A1H ; look at dividend
bge _lab8
not A1H ; it is -ve, make it positive
not A1L
adds #1,A1
_lab8: rts
; A0=A0/A1 signed
.global LABEL(divhi3)
LABEL_DEF(divhi3)
bsr divnorm
bsr LABEL(udivhi3)
negans: btst #3,A2L ; should answer be negative ?
beq _lab4
not A0H ; yes, so make it so
not A0L
adds #1,A0
_lab4: rts
; A0=A0%A1 signed
.global LABEL(modhi3)
LABEL_DEF(modhi3)
bsr modnorm
bsr LABEL(udivhi3)
mov A3,A0
bra negans
; A0=A0%A1 unsigned
.global LABEL(umodhi3)
LABEL_DEF(umodhi3)
bsr LABEL(udivhi3)
mov A3,A0
rts
; A0=A0/A1 unsigned
; A3=A0%A1 unsigned
; A2H trashed
; D high 8 bits of denom
; d low 8 bits of denom
; N high 8 bits of num
; n low 8 bits of num
; M high 8 bits of mod
; m low 8 bits of mod
; Q high 8 bits of quot
; q low 8 bits of quot
; P preserve
; The H8/300 only has a 16/8 bit divide, so we look at the incoming and
; see how to partition up the expression.
.global LABEL(udivhi3)
LABEL_DEF(udivhi3)
; A0 A1 A2 A3
; Nn Dd P
sub.w A3,A3 ; Nn Dd xP 00
or A1H,A1H
bne divlongway
or A0H,A0H
beq _lab6
; we know that D == 0 and N is != 0
mov.b A0H,A3L ; Nn Dd xP 0N
divxu A1L,A3 ; MQ
mov.b A3L,A0H ; Q
; dealt with N, do n
_lab6: mov.b A0L,A3L ; n
divxu A1L,A3 ; mq
mov.b A3L,A0L ; Qq
mov.b A3H,A3L ; m
mov.b #0x0,A3H ; Qq 0m
rts
; D != 0 - which means the denominator is
; loop around to get the result.
divlongway:
mov.b A0H,A3L ; Nn Dd xP 0N
mov.b #0x0,A0H ; high byte of answer has to be zero
mov.b #0x8,A2H ; 8
div8: add.b A0L,A0L ; n*=2
rotxl A3L ; Make remainder bigger
rotxl A3H
sub.w A1,A3 ; Q-=N
bhs setbit ; set a bit ?
add.w A1,A3 ; no : too far , Q+=N
dec A2H
bne div8 ; next bit
rts
setbit: inc A0L ; do insert bit
dec A2H
bne div8 ; next bit
rts
#endif /* __H8300__ */
#endif /* L_divhi3 */
#ifdef L_divsi3
;; 4 byte integer divides for the H8/300.
;;
;; We have one routine which does all the work and lots of
;; little ones which prepare the args and massage the sign.
;; We bunch all of this into one object file since there are several
;; "supporting routines".
.section .text
.align 2
; Put abs SIs into r0/r1 and r2/r3, and leave a 1 in r6l with sign of rest.
; This function is here to keep branch displacements small.
#ifdef __H8300__
divnorm:
mov.b A0H,A0H ; is the numerator -ve
stc ccr,S2L ; keep the sign in bit 3 of S2L
bge postive
; negate arg
not A0H
not A1H
not A0L
not A1L
add #1,A1L
addx #0,A1H
addx #0,A0L
addx #0,A0H
postive:
mov.b A2H,A2H ; is the denominator -ve
bge postive2
not A2L
not A2H
not A3L
not A3H
add.b #1,A3L
addx #0,A3H
addx #0,A2L
addx #0,A2H
xor.b #0x08,S2L ; toggle the result sign
postive2:
rts
;; Basically the same, except that the sign of the divisor determines
;; the sign.
modnorm:
mov.b A0H,A0H ; is the numerator -ve
stc ccr,S2L ; keep the sign in bit 3 of S2L
bge mpostive
; negate arg
not A0H
not A1H
not A0L
not A1L
add #1,A1L
addx #0,A1H
addx #0,A0L
addx #0,A0H
mpostive:
mov.b A2H,A2H ; is the denominator -ve
bge mpostive2
not A2L
not A2H
not A3L
not A3H
add.b #1,A3L
addx #0,A3H
addx #0,A2L
addx #0,A2H
mpostive2:
rts
#else /* __H8300H__ */
divnorm:
mov.l A0P,A0P ; is the numerator -ve
stc ccr,S2L ; keep the sign in bit 3 of S2L
bge postive
neg.l A0P ; negate arg
postive:
mov.l A1P,A1P ; is the denominator -ve
bge postive2
neg.l A1P ; negate arg
xor.b #0x08,S2L ; toggle the result sign
postive2:
rts
;; Basically the same, except that the sign of the divisor determines
;; the sign.
modnorm:
mov.l A0P,A0P ; is the numerator -ve
stc ccr,S2L ; keep the sign in bit 3 of S2L
bge mpostive
neg.l A0P ; negate arg
mpostive:
mov.l A1P,A1P ; is the denominator -ve
bge mpostive2
neg.l A1P ; negate arg
mpostive2:
rts
#endif
; numerator in A0/A1
; denominator in A2/A3
.global LABEL(modsi3)
LABEL_DEF(modsi3)
#ifdef __H8300__
PUSHP S2P
PUSHP S0P
PUSHP S1P
bsr modnorm
bsr divmodsi4
mov S0,A0
mov S1,A1
bra exitdiv
#else
PUSHP S2P
bsr modnorm
bsr LABEL(divsi3)
mov.l er3,er0
bra exitdiv
#endif
;; H8/300H and H8S version of ___udivsi3 is defined later in
;; the file.
#ifdef __H8300__
.global LABEL(udivsi3)
LABEL_DEF(udivsi3)
PUSHP S2P
PUSHP S0P
PUSHP S1P
bsr divmodsi4
bra reti
#endif
.global LABEL(umodsi3)
LABEL_DEF(umodsi3)
#ifdef __H8300__
PUSHP S2P
PUSHP S0P
PUSHP S1P
bsr divmodsi4
mov S0,A0
mov S1,A1
bra reti
#else
bsr LABEL(udivsi3)
mov.l er3,er0
rts
#endif
.global LABEL(divsi3)
LABEL_DEF(divsi3)
#ifdef __H8300__
PUSHP S2P
PUSHP S0P
PUSHP S1P
jsr divnorm
jsr divmodsi4
#else
PUSHP S2P
jsr divnorm
bsr LABEL(udivsi3)
#endif
; examine what the sign should be
exitdiv:
btst #3,S2L
beq reti
; should be -ve
#ifdef __H8300__
not A0H
not A1H
not A0L
not A1L
add #1,A1L
addx #0,A1H
addx #0,A0L
addx #0,A0H
#else /* __H8300H__ */
neg.l A0P
#endif
reti:
#ifdef __H8300__
POPP S1P
POPP S0P
#endif
POPP S2P
rts
; takes A0/A1 numerator (A0P for H8/300H)
; A2/A3 denominator (A1P for H8/300H)
; returns A0/A1 quotient (A0P for H8/300H)
; S0/S1 remainder (S0P for H8/300H)
; trashes S2H
#ifdef __H8300__
divmodsi4:
sub.w S0,S0 ; zero play area
mov.w S0,S1
mov.b A2H,S2H
or A2L,S2H
or A3H,S2H
bne DenHighNonZero
mov.b A0H,A0H
bne NumByte0Zero
mov.b A0L,A0L
bne NumByte1Zero
mov.b A1H,A1H
bne NumByte2Zero
bra NumByte3Zero
NumByte0Zero:
mov.b A0H,S1L
divxu A3L,S1
mov.b S1L,A0H
NumByte1Zero:
mov.b A0L,S1L
divxu A3L,S1
mov.b S1L,A0L
NumByte2Zero:
mov.b A1H,S1L
divxu A3L,S1
mov.b S1L,A1H
NumByte3Zero:
mov.b A1L,S1L
divxu A3L,S1
mov.b S1L,A1L
mov.b S1H,S1L
mov.b #0x0,S1H
rts
; have to do the divide by shift and test
DenHighNonZero:
mov.b A0H,S1L
mov.b A0L,A0H
mov.b A1H,A0L
mov.b A1L,A1H
mov.b #0,A1L
mov.b #24,S2H ; only do 24 iterations
nextbit:
add.w A1,A1 ; double the answer guess
rotxl A0L
rotxl A0H
rotxl S1L ; double remainder
rotxl S1H
rotxl S0L
rotxl S0H
sub.w A3,S1 ; does it all fit
subx A2L,S0L
subx A2H,S0H
bhs setone
add.w A3,S1 ; no, restore mistake
addx A2L,S0L
addx A2H,S0H
dec S2H
bne nextbit
rts
setone:
inc A1L
dec S2H
bne nextbit
rts
#else /* __H8300H__ */
;; This function also computes the remainder and stores it in er3.
.global LABEL(udivsi3)
LABEL_DEF(udivsi3)
mov.w A1E,A1E ; denominator top word 0?
bne DenHighNonZero
; do it the easy way, see page 107 in manual
mov.w A0E,A2
extu.l A2P
divxu.w A1,A2P
mov.w A2E,A0E
divxu.w A1,A0P
mov.w A0E,A3
mov.w A2,A0E
extu.l A3P
rts
; er0 = er0 / er1
; er3 = er0 % er1
; trashes er1 er2
; expects er1 >= 2^16
DenHighNonZero:
mov.l er0,er3
mov.l er1,er2
#ifdef __H8300H__
divmod_L21:
shlr.l er0
shlr.l er2 ; make divisor < 2^16
mov.w e2,e2
bne divmod_L21
#else
shlr.l #2,er2 ; make divisor < 2^16
mov.w e2,e2
beq divmod_L22A
divmod_L21:
shlr.l #2,er0
divmod_L22:
shlr.l #2,er2 ; make divisor < 2^16
mov.w e2,e2
bne divmod_L21
divmod_L22A:
rotxl.w r2
bcs divmod_L23
shlr.l er0
bra divmod_L24
divmod_L23:
rotxr.w r2
shlr.l #2,er0
divmod_L24:
#endif
;; At this point,
;; er0 contains shifted dividend
;; er1 contains divisor
;; er2 contains shifted divisor
;; er3 contains dividend, later remainder
divxu.w r2,er0 ; r0 now contains the approximate quotient (AQ)
extu.l er0
beq divmod_L25
subs #1,er0 ; er0 = AQ - 1
mov.w e1,r2
mulxu.w r0,er2 ; er2 = upper (AQ - 1) * divisor
sub.w r2,e3 ; dividend - 65536 * er2
mov.w r1,r2
mulxu.w r0,er2 ; compute er3 = remainder (tentative)
sub.l er2,er3 ; er3 = dividend - (AQ - 1) * divisor
divmod_L25:
cmp.l er1,er3 ; is divisor < remainder?
blo divmod_L26
adds #1,er0
sub.l er1,er3 ; correct the remainder
divmod_L26:
rts
#endif
#endif /* L_divsi3 */
#ifdef L_mulhi3
;; HImode multiply.
; The H8/300 only has an 8*8->16 multiply.
; The answer is the same as:
;
; product = (srca.l * srcb.l) + ((srca.h * srcb.l) + (srcb.h * srca.l)) * 256
; (we can ignore A1.h * A0.h cause that will all off the top)
; A0 in
; A1 in
; A0 answer
#ifdef __H8300__
.section .text
.align 2
.global LABEL(mulhi3)
LABEL_DEF(mulhi3)
mov.b A1L,A2L ; A2l gets srcb.l
mulxu A0L,A2 ; A2 gets first sub product
mov.b A0H,A3L ; prepare for
mulxu A1L,A3 ; second sub product
add.b A3L,A2H ; sum first two terms
mov.b A1H,A3L ; third sub product
mulxu A0L,A3
add.b A3L,A2H ; almost there
mov.w A2,A0 ; that is
rts
#endif
#endif /* L_mulhi3 */
#ifdef L_mulsi3
;; SImode multiply.
;;
;; I think that shift and add may be sufficient for this. Using the
;; supplied 8x8->16 would need 10 ops of 14 cycles each + overhead. This way
;; the inner loop uses maybe 20 cycles + overhead, but terminates
;; quickly on small args.
;;
;; A0/A1 src_a
;; A2/A3 src_b
;;
;; while (a)
;; {
;; if (a & 1)
;; r += b;
;; a >>= 1;
;; b <<= 1;
;; }
.section .text
.align 2
#ifdef __H8300__
.global LABEL(mulsi3)
LABEL_DEF(mulsi3)
PUSHP S0P
PUSHP S1P
sub.w S0,S0
sub.w S1,S1
; while (a)
_top: mov.w A0,A0
bne _more
mov.w A1,A1
beq _done
_more: ; if (a & 1)
bld #0,A1L
bcc _nobit
; r += b
add.w A3,S1
addx A2L,S0L
addx A2H,S0H
_nobit:
; a >>= 1
shlr A0H
rotxr A0L
rotxr A1H
rotxr A1L
; b <<= 1
add.w A3,A3
addx A2L,A2L
addx A2H,A2H
bra _top
_done:
mov.w S0,A0
mov.w S1,A1
POPP S1P
POPP S0P
rts
#else /* __H8300H__ */
;
; mulsi3 for H8/300H - based on Renesas SH implementation
;
; by Toshiyasu Morita
;
; Old code:
;
; 16b * 16b = 372 states (worst case)
; 32b * 32b = 724 states (worst case)
;
; New code:
;
; 16b * 16b = 48 states
; 16b * 32b = 72 states
; 32b * 32b = 92 states
;
.global LABEL(mulsi3)
LABEL_DEF(mulsi3)
mov.w r1,r2 ; ( 2 states) b * d
mulxu r0,er2 ; (22 states)
mov.w e0,r3 ; ( 2 states) a * d
beq L_skip1 ; ( 4 states)
mulxu r1,er3 ; (22 states)
add.w r3,e2 ; ( 2 states)
L_skip1:
mov.w e1,r3 ; ( 2 states) c * b
beq L_skip2 ; ( 4 states)
mulxu r0,er3 ; (22 states)
add.w r3,e2 ; ( 2 states)
L_skip2:
mov.l er2,er0 ; ( 2 states)
rts ; (10 states)
#endif
#endif /* L_mulsi3 */
#ifdef L_fixunssfsi_asm
/* For the h8300 we use asm to save some bytes, to
allow more programs to fit into the tiny address
space. For the H8/300H and H8S, the C version is good enough. */
#ifdef __H8300__
/* We still treat NANs different than libgcc2.c, but then, the
behavior is undefined anyways. */
.global LABEL(fixunssfsi)
LABEL_DEF(fixunssfsi)
cmp.b #0x4f,r0h
bge Large_num
jmp @LABEL(fixsfsi)
Large_num:
bhi L_huge_num
xor.b #0x80,A0L
bmi L_shift8
L_huge_num:
mov.w #65535,A0
mov.w A0,A1
rts
L_shift8:
mov.b A0L,A0H
mov.b A1H,A0L
mov.b A1L,A1H
mov.b #0,A1L
rts
#endif
#endif /* L_fixunssfsi_asm */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.