repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
aappleby/metron
| 2,345
|
tests/risc-v/instructions/addi.S
|
# See LICENSE for license details.
#*****************************************************************************
# addi.S
#-----------------------------------------------------------------------------
#
# Test addi instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, addi, 0x00000000, 0x00000000, 0x000 );
TEST_IMM_OP( 3, addi, 0x00000002, 0x00000001, 0x001 );
TEST_IMM_OP( 4, addi, 0x0000000a, 0x00000003, 0x007 );
TEST_IMM_OP( 5, addi, 0xfffffffffffff800, 0x0000000000000000, 0x800 );
TEST_IMM_OP( 6, addi, 0xffffffff80000000, 0xffffffff80000000, 0x000 );
TEST_IMM_OP( 7, addi, 0xffffffff7ffff800, 0xffffffff80000000, 0x800 );
TEST_IMM_OP( 8, addi, 0x00000000000007ff, 0x00000000, 0x7ff );
TEST_IMM_OP( 9, addi, 0x000000007fffffff, 0x7fffffff, 0x000 );
TEST_IMM_OP( 10, addi, 0x00000000800007fe, 0x7fffffff, 0x7ff );
TEST_IMM_OP( 11, addi, 0xffffffff800007ff, 0xffffffff80000000, 0x7ff );
TEST_IMM_OP( 12, addi, 0x000000007ffff7ff, 0x000000007fffffff, 0x800 );
TEST_IMM_OP( 13, addi, 0xffffffffffffffff, 0x0000000000000000, 0xfff );
TEST_IMM_OP( 14, addi, 0x0000000000000000, 0xffffffffffffffff, 0x001 );
TEST_IMM_OP( 15, addi, 0xfffffffffffffffe, 0xffffffffffffffff, 0xfff );
TEST_IMM_OP( 16, addi, 0x0000000080000000, 0x7fffffff, 0x001 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, addi, 24, 13, 11 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, addi, 24, 13, 11 );
TEST_IMM_DEST_BYPASS( 19, 1, addi, 23, 13, 10 );
TEST_IMM_DEST_BYPASS( 20, 2, addi, 22, 13, 9 );
TEST_IMM_SRC1_BYPASS( 21, 0, addi, 24, 13, 11 );
TEST_IMM_SRC1_BYPASS( 22, 1, addi, 23, 13, 10 );
TEST_IMM_SRC1_BYPASS( 23, 2, addi, 22, 13, 9 );
TEST_IMM_ZEROSRC1( 24, addi, 32, 32 );
TEST_IMM_ZERODEST( 25, addi, 33, 50 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
aappleby/metron
| 2,282
|
tests/risc-v/instructions/lb.S
|
# See LICENSE for license details.
#*****************************************************************************
# lb.S
#-----------------------------------------------------------------------------
#
# Test lb instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_LD_OP( 2, lb, 0xffffffffffffffff, 0, tdat );
TEST_LD_OP( 3, lb, 0x0000000000000000, 1, tdat );
TEST_LD_OP( 4, lb, 0xfffffffffffffff0, 2, tdat );
TEST_LD_OP( 5, lb, 0x000000000000000f, 3, tdat );
# Test with negative offset
TEST_LD_OP( 6, lb, 0xffffffffffffffff, -3, tdat4 );
TEST_LD_OP( 7, lb, 0x0000000000000000, -2, tdat4 );
TEST_LD_OP( 8, lb, 0xfffffffffffffff0, -1, tdat4 );
TEST_LD_OP( 9, lb, 0x000000000000000f, 0, tdat4 );
# Test with a negative base
TEST_CASE( 10, x5, 0xffffffffffffffff, \
la x1, tdat; \
addi x1, x1, -32; \
lb x5, 32(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0x0000000000000000, \
la x1, tdat; \
addi x1, x1, -6; \
lb x5, 7(x1); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_LD_DEST_BYPASS( 12, 0, lb, 0xfffffffffffffff0, 1, tdat2 );
TEST_LD_DEST_BYPASS( 13, 1, lb, 0x000000000000000f, 1, tdat3 );
TEST_LD_DEST_BYPASS( 14, 2, lb, 0x0000000000000000, 1, tdat1 );
TEST_LD_SRC1_BYPASS( 15, 0, lb, 0xfffffffffffffff0, 1, tdat2 );
TEST_LD_SRC1_BYPASS( 16, 1, lb, 0x000000000000000f, 1, tdat3 );
TEST_LD_SRC1_BYPASS( 17, 2, lb, 0x0000000000000000, 1, tdat1 );
#-------------------------------------------------------------
# Test write-after-write hazard
#-------------------------------------------------------------
TEST_CASE( 18, x2, 2, \
la x5, tdat; \
lb x2, 0(x5); \
li x2, 2; \
)
TEST_CASE( 19, x2, 2, \
la x5, tdat; \
lb x2, 0(x5); \
nop; \
li x2, 2; \
)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .byte 0xff
tdat2: .byte 0x00
tdat3: .byte 0xf0
tdat4: .byte 0x0f
RVTEST_DATA_END
|
aappleby/metron
| 2,651
|
tests/risc-v/instructions/xor.S
|
# See LICENSE for license details.
#*****************************************************************************
# xor.S
#-----------------------------------------------------------------------------
#
# Test xor instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Logical tests
#-------------------------------------------------------------
TEST_RR_OP( 2, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_OP( 3, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_OP( 4, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_OP( 5, xor, 0x00ff00ff, 0xf00ff00f, 0xf0f0f0f0 );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 6, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC2_EQ_DEST( 7, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_EQ_DEST( 8, xor, 0x00000000, 0xff00ff00 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 9, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_DEST_BYPASS( 10, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_DEST_BYPASS( 11, 2, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 12, 0, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 13, 0, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC12_BYPASS( 14, 0, 2, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 15, 1, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC12_BYPASS( 16, 1, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC12_BYPASS( 17, 2, 0, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 18, 0, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 19, 0, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC21_BYPASS( 20, 0, 2, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 21, 1, 0, xor, 0xf00ff00f, 0xff00ff00, 0x0f0f0f0f );
TEST_RR_SRC21_BYPASS( 22, 1, 1, xor, 0xff00ff00, 0x0ff00ff0, 0xf0f0f0f0 );
TEST_RR_SRC21_BYPASS( 23, 2, 0, xor, 0x0ff00ff0, 0x00ff00ff, 0x0f0f0f0f );
TEST_RR_ZEROSRC1( 24, xor, 0xff00ff00, 0xff00ff00 );
TEST_RR_ZEROSRC2( 25, xor, 0x00ff00ff, 0x00ff00ff );
TEST_RR_ZEROSRC12( 26, xor, 0 );
TEST_RR_ZERODEST( 27, xor, 0x11111111, 0x22222222 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
aappleby/metron
| 3,122
|
tests/risc-v/instructions/sub.S
|
# See LICENSE for license details.
#*****************************************************************************
# sub.S
#-----------------------------------------------------------------------------
#
# Test sub instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_RR_OP( 2, sub, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 );
TEST_RR_OP( 3, sub, 0x0000000000000000, 0x0000000000000001, 0x0000000000000001 );
TEST_RR_OP( 4, sub, 0xfffffffffffffffc, 0x0000000000000003, 0x0000000000000007 );
TEST_RR_OP( 5, sub, 0x0000000000008000, 0x0000000000000000, 0xffffffffffff8000 );
TEST_RR_OP( 6, sub, 0xffffffff80000000, 0xffffffff80000000, 0x0000000000000000 );
TEST_RR_OP( 7, sub, 0xffffffff80008000, 0xffffffff80000000, 0xffffffffffff8000 );
TEST_RR_OP( 8, sub, 0xffffffffffff8001, 0x0000000000000000, 0x0000000000007fff );
TEST_RR_OP( 9, sub, 0x000000007fffffff, 0x000000007fffffff, 0x0000000000000000 );
TEST_RR_OP( 10, sub, 0x000000007fff8000, 0x000000007fffffff, 0x0000000000007fff );
TEST_RR_OP( 11, sub, 0xffffffff7fff8001, 0xffffffff80000000, 0x0000000000007fff );
TEST_RR_OP( 12, sub, 0x0000000080007fff, 0x000000007fffffff, 0xffffffffffff8000 );
TEST_RR_OP( 13, sub, 0x0000000000000001, 0x0000000000000000, 0xffffffffffffffff );
TEST_RR_OP( 14, sub, 0xfffffffffffffffe, 0xffffffffffffffff, 0x0000000000000001 );
TEST_RR_OP( 15, sub, 0x0000000000000000, 0xffffffffffffffff, 0xffffffffffffffff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_RR_SRC1_EQ_DEST( 16, sub, 2, 13, 11 );
TEST_RR_SRC2_EQ_DEST( 17, sub, 3, 14, 11 );
TEST_RR_SRC12_EQ_DEST( 18, sub, 0, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_RR_DEST_BYPASS( 19, 0, sub, 2, 13, 11 );
TEST_RR_DEST_BYPASS( 20, 1, sub, 3, 14, 11 );
TEST_RR_DEST_BYPASS( 21, 2, sub, 4, 15, 11 );
TEST_RR_SRC12_BYPASS( 22, 0, 0, sub, 2, 13, 11 );
TEST_RR_SRC12_BYPASS( 23, 0, 1, sub, 3, 14, 11 );
TEST_RR_SRC12_BYPASS( 24, 0, 2, sub, 4, 15, 11 );
TEST_RR_SRC12_BYPASS( 25, 1, 0, sub, 2, 13, 11 );
TEST_RR_SRC12_BYPASS( 26, 1, 1, sub, 3, 14, 11 );
TEST_RR_SRC12_BYPASS( 27, 2, 0, sub, 4, 15, 11 );
TEST_RR_SRC21_BYPASS( 28, 0, 0, sub, 2, 13, 11 );
TEST_RR_SRC21_BYPASS( 29, 0, 1, sub, 3, 14, 11 );
TEST_RR_SRC21_BYPASS( 30, 0, 2, sub, 4, 15, 11 );
TEST_RR_SRC21_BYPASS( 31, 1, 0, sub, 2, 13, 11 );
TEST_RR_SRC21_BYPASS( 32, 1, 1, sub, 3, 14, 11 );
TEST_RR_SRC21_BYPASS( 33, 2, 0, sub, 4, 15, 11 );
TEST_RR_ZEROSRC1( 34, sub, 15, -15 );
TEST_RR_ZEROSRC2( 35, sub, 32, 32 );
TEST_RR_ZEROSRC12( 36, sub, 0 );
TEST_RR_ZERODEST( 37, sub, 16, 30 );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
aappleby/metron
| 2,201
|
tests/risc-v/instructions/sltiu.S
|
# See LICENSE for license details.
#*****************************************************************************
# sltiu.S
#-----------------------------------------------------------------------------
#
# Test sltiu instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Arithmetic tests
#-------------------------------------------------------------
TEST_IMM_OP( 2, sltiu, 0, 0x0000000000000000, 0x000 );
TEST_IMM_OP( 3, sltiu, 0, 0x0000000000000001, 0x001 );
TEST_IMM_OP( 4, sltiu, 1, 0x0000000000000003, 0x007 );
TEST_IMM_OP( 5, sltiu, 0, 0x0000000000000007, 0x003 );
TEST_IMM_OP( 6, sltiu, 1, 0x0000000000000000, 0x800 );
TEST_IMM_OP( 7, sltiu, 0, 0xffffffff80000000, 0x000 );
TEST_IMM_OP( 8, sltiu, 1, 0xffffffff80000000, 0x800 );
TEST_IMM_OP( 9, sltiu, 1, 0x0000000000000000, 0x7ff );
TEST_IMM_OP( 10, sltiu, 0, 0x000000007fffffff, 0x000 );
TEST_IMM_OP( 11, sltiu, 0, 0x000000007fffffff, 0x7ff );
TEST_IMM_OP( 12, sltiu, 0, 0xffffffff80000000, 0x7ff );
TEST_IMM_OP( 13, sltiu, 1, 0x000000007fffffff, 0x800 );
TEST_IMM_OP( 14, sltiu, 1, 0x0000000000000000, 0xfff );
TEST_IMM_OP( 15, sltiu, 0, 0xffffffffffffffff, 0x001 );
TEST_IMM_OP( 16, sltiu, 0, 0xffffffffffffffff, 0xfff );
#-------------------------------------------------------------
# Source/Destination tests
#-------------------------------------------------------------
TEST_IMM_SRC1_EQ_DEST( 17, sltiu, 1, 11, 13 );
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_IMM_DEST_BYPASS( 18, 0, sltiu, 0, 15, 10 );
TEST_IMM_DEST_BYPASS( 19, 1, sltiu, 1, 10, 16 );
TEST_IMM_DEST_BYPASS( 20, 2, sltiu, 0, 16, 9 );
TEST_IMM_SRC1_BYPASS( 21, 0, sltiu, 1, 11, 15 );
TEST_IMM_SRC1_BYPASS( 22, 1, sltiu, 0, 17, 8 );
TEST_IMM_SRC1_BYPASS( 23, 2, sltiu, 1, 12, 14 );
TEST_IMM_ZEROSRC1( 24, sltiu, 1, 0xfff );
TEST_IMM_ZERODEST( 25, sltiu, 0x00ff00ff, 0xfff );
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
RVTEST_DATA_END
|
aappleby/metron
| 2,610
|
tests/risc-v/instructions/sb.S
|
# See LICENSE for license details.
#*****************************************************************************
# sb.S
#-----------------------------------------------------------------------------
#
# Test sb instruction.
#
#include "riscv_test.h"
#include "test_macros.h"
RVTEST_RV64U
RVTEST_CODE_BEGIN
#-------------------------------------------------------------
# Basic tests
#-------------------------------------------------------------
TEST_ST_OP( 2, lb, sb, 0xffffffffffffffaa, 0, tdat );
TEST_ST_OP( 3, lb, sb, 0x0000000000000000, 1, tdat );
TEST_ST_OP( 4, lh, sb, 0xffffffffffffefa0, 2, tdat );
TEST_ST_OP( 5, lb, sb, 0x000000000000000a, 3, tdat );
# Test with negative offset
TEST_ST_OP( 6, lb, sb, 0xffffffffffffffaa, -3, tdat8 );
TEST_ST_OP( 7, lb, sb, 0x0000000000000000, -2, tdat8 );
TEST_ST_OP( 8, lb, sb, 0xffffffffffffffa0, -1, tdat8 );
TEST_ST_OP( 9, lb, sb, 0x000000000000000a, 0, tdat8 );
# Test with a negative base
TEST_CASE( 10, x5, 0x78, \
la x1, tdat9; \
li x2, 0x12345678; \
addi x4, x1, -32; \
sb x2, 32(x4); \
lb x5, 0(x1); \
)
# Test with unaligned base
TEST_CASE( 11, x5, 0xffffffffffffff98, \
la x1, tdat9; \
li x2, 0x00003098; \
addi x1, x1, -6; \
sb x2, 7(x1); \
la x4, tdat10; \
lb x5, 0(x4); \
)
#-------------------------------------------------------------
# Bypassing tests
#-------------------------------------------------------------
TEST_ST_SRC12_BYPASS( 12, 0, 0, lb, sb, 0xffffffffffffffdd, 0, tdat );
TEST_ST_SRC12_BYPASS( 13, 0, 1, lb, sb, 0xffffffffffffffcd, 1, tdat );
TEST_ST_SRC12_BYPASS( 14, 0, 2, lb, sb, 0xffffffffffffffcc, 2, tdat );
TEST_ST_SRC12_BYPASS( 15, 1, 0, lb, sb, 0xffffffffffffffbc, 3, tdat );
TEST_ST_SRC12_BYPASS( 16, 1, 1, lb, sb, 0xffffffffffffffbb, 4, tdat );
TEST_ST_SRC12_BYPASS( 17, 2, 0, lb, sb, 0xffffffffffffffab, 5, tdat );
TEST_ST_SRC21_BYPASS( 18, 0, 0, lb, sb, 0x33, 0, tdat );
TEST_ST_SRC21_BYPASS( 19, 0, 1, lb, sb, 0x23, 1, tdat );
TEST_ST_SRC21_BYPASS( 20, 0, 2, lb, sb, 0x22, 2, tdat );
TEST_ST_SRC21_BYPASS( 21, 1, 0, lb, sb, 0x12, 3, tdat );
TEST_ST_SRC21_BYPASS( 22, 1, 1, lb, sb, 0x11, 4, tdat );
TEST_ST_SRC21_BYPASS( 23, 2, 0, lb, sb, 0x01, 5, tdat );
li a0, 0xef
la a1, tdat
sb a0, 3(a1)
TEST_PASSFAIL
RVTEST_CODE_END
.data
RVTEST_DATA_BEGIN
TEST_DATA
tdat:
tdat1: .byte 0xef
tdat2: .byte 0xef
tdat3: .byte 0xef
tdat4: .byte 0xef
tdat5: .byte 0xef
tdat6: .byte 0xef
tdat7: .byte 0xef
tdat8: .byte 0xef
tdat9: .byte 0xef
tdat10: .byte 0xef
RVTEST_DATA_END
|
aardappel/lobster
| 12,846
|
dev/external/SDL/src/video/arm/pixman-arm-neon-asm.S
|
/*
* Copyright © 2009 Nokia Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
*/
/*
* Copyright (c) 2018 RISC OS Open Ltd
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
/* Prevent the stack from becoming executable for no reason... */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
.text
.fpu neon
.arch armv7a
.object_arch armv4
.eabi_attribute 10, 0 /* suppress Tag_FP_arch */
.eabi_attribute 12, 0 /* suppress Tag_Advanced_SIMD_arch */
.arm
.altmacro
.p2align 2
#include "pixman-arm-asm.h"
#include "pixman-arm-neon-asm.h"
/* Global configuration options and preferences */
/*
* The code can optionally make use of unaligned memory accesses to improve
* performance of handling leading/trailing pixels for each scanline.
* Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for
* example in linux if unaligned memory accesses are not configured to
* generate.exceptions.
*/
.set RESPECT_STRICT_ALIGNMENT, 1
/*
* Set default prefetch type. There is a choice between the following options:
*
* PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work
* as NOP to workaround some HW bugs or for whatever other reason)
*
* PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where
* advanced prefetch intruduces heavy overhead)
*
* PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8
* which can run ARM and NEON instructions simultaneously so that extra ARM
* instructions do not add (many) extra cycles, but improve prefetch efficiency)
*
* Note: some types of function can't support advanced prefetch and fallback
* to simple one (those which handle 24bpp pixels)
*/
.set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED
/* Prefetch distance in pixels for simple prefetch */
.set PREFETCH_DISTANCE_SIMPLE, 64
/******************************************************************************/
/* We can actually do significantly better than the Pixman macros, at least for
* the case of fills, by using a carefully scheduled inner loop. Cortex-A53
* shows an improvement of up to 78% in ideal cases (large fills to L1 cache).
*/
.macro generate_fillrect_function name, bpp, log2Bpp
/*
* void name(int32_t w, int32_t h, uint8_t *dst, int32_t dst_stride, uint8_t src);
* On entry:
* a1 = width, pixels
* a2 = height, rows
* a3 = pointer to top-left destination pixel
* a4 = stride, pixels
* [sp] = pixel value to fill with
* Within the function:
* v1 = width remaining
* v2 = vst offset
* v3 = alternate pointer
* ip = data ARM register
*/
pixman_asm_function name
vld1.\bpp {d0[],d1[]}, [sp]
sub a4, a1
vld1.\bpp {d2[],d3[]}, [sp]
cmp a1, #(15+64) >> \log2Bpp
push {v1-v3,lr}
vmov ip, s0
blo 51f
/* Long-row case */
mov v2, #64
1: mov v1, a1
ands v3, a3, #15
beq 2f
/* Leading pixels */
rsb v3, v3, #16 /* number of leading bytes until 16-byte aligned */
sub v1, v1, v3, lsr #\log2Bpp
rbit v3, v3
.if bpp <= 16
.if bpp == 8
tst a3, #1 /* bit 0 unaffected by rsb so can avoid register interlock */
strneb ip, [a3], #1
tst v3, #1<<30
.else
tst a3, #2 /* bit 1 unaffected by rsb (assuming halfword alignment) so can avoid register interlock */
.endif
strneh ip, [a3], #2
.endif
movs v3, v3, lsl #3
vstmcs a3!, {s0}
vstmmi a3!, {d0}
2: sub v1, v1, #64 >> \log2Bpp /* simplifies inner loop termination */
add v3, a3, #32
/* Inner loop */
3: vst1.\bpp {q0-q1}, [a3 :128], v2
subs v1, v1, #64 >> \log2Bpp
vst1.\bpp {q0-q1}, [v3 :128], v2
bhs 3b
/* Trailing pixels */
4: movs v1, v1, lsl #27 + \log2Bpp
bcc 5f
vst1.\bpp {q0-q1}, [a3 :128]!
5: bpl 6f
vst1.\bpp {q0}, [a3 :128]!
6: movs v1, v1, lsl #2
vstmcs a3!, {d0}
vstmmi a3!, {s0}
.if bpp <= 16
movs v1, v1, lsl #2
strcsh ip, [a3], #2
.if bpp == 8
strmib ip, [a3], #1
.endif
.endif
subs a2, a2, #1
add a3, a3, a4, lsl #\log2Bpp
bhi 1b
pop {v1-v3,pc}
/* Short-row case */
51: movs v1, a1
.if bpp == 8
tst a3, #3
beq 53f
52: subs v1, v1, #1
blo 57f
strb ip, [a3], #1
tst a3, #3
bne 52b
.elseif bpp == 16
tstne a3, #2
subne v1, v1, #1
strneh ip, [a3], #2
.endif
53: cmp v1, #32 >> \log2Bpp
bcc 54f
vst1.\bpp {q0-q1}, [a3]!
sub v1, v1, #32 >> \log2Bpp
/* Trailing pixels */
54: movs v1, v1, lsl #27 + \log2Bpp
bcc 55f
vst1.\bpp {q0-q1}, [a3]!
55: bpl 56f
vst1.\bpp {q0}, [a3]!
56: movs v1, v1, lsl #2
vstmcs a3!, {d0}
vstmmi a3!, {s0}
.if bpp <= 16
movs v1, v1, lsl #2
strcsh ip, [a3], #2
.if bpp == 8
strmib ip, [a3], #1
.endif
.endif
subs a2, a2, #1
add a3, a3, a4, lsl #\log2Bpp
bhi 51b
57: pop {v1-v3,pc}
.endfunc
.endm
generate_fillrect_function FillRect32ARMNEONAsm, 32, 2
generate_fillrect_function FillRect16ARMNEONAsm, 16, 1
generate_fillrect_function FillRect8ARMNEONAsm, 8, 0
/******************************************************************************/
.macro RGBtoRGBPixelAlpha_process_pixblock_head
vmvn d30, d3 /* get inverted source alpha */
vmov d31, d7 /* dest alpha is always unchanged */
vmull.u8 q14, d0, d3
vmlal.u8 q14, d4, d30
vmull.u8 q0, d1, d3
vmlal.u8 q0, d5, d30
vmull.u8 q1, d2, d3
vmlal.u8 q1, d6, d30
vrshr.u16 q2, q14, #8
vrshr.u16 q3, q0, #8
vraddhn.u16 d28, q14, q2
vrshr.u16 q2, q1, #8
vraddhn.u16 d29, q0, q3
vraddhn.u16 d30, q1, q2
.endm
.macro RGBtoRGBPixelAlpha_process_pixblock_tail
/* nothing */
.endm
.macro RGBtoRGBPixelAlpha_process_pixblock_tail_head
vld4.8 {d0-d3}, [SRC]!
PF add PF_X, PF_X, #8
vst4.8 {d28-d31}, [DST_W :128]!
PF tst PF_CTL, #0xF
vld4.8 {d4-d7}, [DST_R :128]!
PF addne PF_X, PF_X, #8
vmvn d30, d3 /* get inverted source alpha */
vmov d31, d7 /* dest alpha is always unchanged */
vmull.u8 q14, d0, d3
PF subne PF_CTL, PF_CTL, #1
vmlal.u8 q14, d4, d30
PF cmp PF_X, ORIG_W
vmull.u8 q0, d1, d3
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
vmlal.u8 q0, d5, d30
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
vmull.u8 q1, d2, d3
PF subge PF_X, PF_X, ORIG_W
vmlal.u8 q1, d6, d30
PF subges PF_CTL, PF_CTL, #0x10
vrshr.u16 q2, q14, #8
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
vrshr.u16 q3, q0, #8
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vraddhn.u16 d28, q14, q2
vrshr.u16 q2, q1, #8
vraddhn.u16 d29, q0, q3
vraddhn.u16 d30, q1, q2
.endm
generate_composite_function \
BlitRGBtoRGBPixelAlphaARMNEONAsm, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init, \
default_cleanup, \
RGBtoRGBPixelAlpha_process_pixblock_head, \
RGBtoRGBPixelAlpha_process_pixblock_tail, \
RGBtoRGBPixelAlpha_process_pixblock_tail_head
/******************************************************************************/
.macro ARGBto565PixelAlpha_process_pixblock_head
vmvn d6, d3
vshr.u8 d1, #2
vshr.u8 d3, #3
vshr.u8 d0, #3
vshrn.u16 d7, q2, #3
vshrn.u16 d25, q2, #8
vbic.i16 q2, #0xe0
vshr.u8 d6, #3
vshr.u8 d7, #2
vshr.u8 d2, #3
vmovn.u16 d24, q2
vshr.u8 d25, #3
vmull.u8 q13, d1, d3
vmlal.u8 q13, d7, d6
vmull.u8 q14, d0, d3
vmlal.u8 q14, d24, d6
vmull.u8 q15, d2, d3
vmlal.u8 q15, d25, d6
.endm
.macro ARGBto565PixelAlpha_process_pixblock_tail
vsra.u16 q13, #5
vsra.u16 q14, #5
vsra.u16 q15, #5
vrshr.u16 q13, #5
vrshr.u16 q14, #5
vrshr.u16 q15, #5
vsli.u16 q14, q13, #5
vsli.u16 q14, q15, #11
.endm
.macro ARGBto565PixelAlpha_process_pixblock_tail_head
vld4.8 {d0-d3}, [SRC]!
PF add PF_X, PF_X, #8
vsra.u16 q13, #5
PF tst PF_CTL, #0xF
vsra.u16 q14, #5
PF addne PF_X, PF_X, #8
vsra.u16 q15, #5
PF subne PF_CTL, PF_CTL, #1
vrshr.u16 q13, #5
PF cmp PF_X, ORIG_W
vrshr.u16 q14, #5
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
vrshr.u16 q15, #5
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
vld1.8 {d4-d5}, [DST_R]!
PF subge PF_X, PF_X, ORIG_W
vsli.u16 q14, q13, #5
PF subges PF_CTL, PF_CTL, #0x10
vsli.u16 q14, q15, #11
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
vst1.8 {q14}, [DST_W :128]!
vmvn d6, d3
vshr.u8 d1, #2
vshr.u8 d3, #3
vshr.u8 d0, #3
vshrn.u16 d7, q2, #3
vshrn.u16 d25, q2, #8
vbic.i16 q2, #0xe0
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vshr.u8 d6, #3
vshr.u8 d7, #2
vshr.u8 d2, #3
vmovn.u16 d24, q2
vshr.u8 d25, #3
vmull.u8 q13, d1, d3
vmlal.u8 q13, d7, d6
vmull.u8 q14, d0, d3
vmlal.u8 q14, d24, d6
vmull.u8 q15, d2, d3
vmlal.u8 q15, d25, d6
.endm
generate_composite_function \
BlitARGBto565PixelAlphaARMNEONAsm, 32, 0, 16, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
6, /* prefetch distance */ \
default_init, \
default_cleanup, \
ARGBto565PixelAlpha_process_pixblock_head, \
ARGBto565PixelAlpha_process_pixblock_tail, \
ARGBto565PixelAlpha_process_pixblock_tail_head
|
aardappel/lobster
| 19,392
|
dev/external/SDL/src/video/arm/pixman-arm-simd-asm.S
|
/*
* Copyright (c) 2016 RISC OS Open Ltd
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*/
/* Prevent the stack from becoming executable */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
.text
.arch armv6
.object_arch armv4
.arm
.altmacro
.p2align 2
#include "pixman-arm-asm.h"
#include "pixman-arm-simd-asm.h"
/* A head macro should do all processing which results in an output of up to
* 16 bytes, as far as the final load instruction. The corresponding tail macro
* should complete the processing of the up-to-16 bytes. The calling macro will
* sometimes choose to insert a preload or a decrement of X between them.
* cond ARM condition code for code block
* numbytes Number of output bytes that should be generated this time
* firstreg First WK register in which to place output
* unaligned_src Whether to use non-wordaligned loads of source image
* unaligned_mask Whether to use non-wordaligned loads of mask image
* preload If outputting 16 bytes causes 64 bytes to be read, whether an extra preload should be output
*/
/******************************************************************************/
.macro FillRect32_init
ldr SRC, [sp, #ARGS_STACK_OFFSET]
mov STRIDE_S, SRC
mov MASK, SRC
mov STRIDE_M, SRC
.endm
.macro FillRect16_init
ldrh SRC, [sp, #ARGS_STACK_OFFSET]
orr SRC, SRC, lsl #16
mov STRIDE_S, SRC
mov MASK, SRC
mov STRIDE_M, SRC
.endm
.macro FillRect8_init
ldrb SRC, [sp, #ARGS_STACK_OFFSET]
orr SRC, SRC, lsl #8
orr SRC, SRC, lsl #16
mov STRIDE_S, SRC
mov MASK, SRC
mov STRIDE_M, SRC
.endm
.macro FillRect_process_tail cond, numbytes, firstreg
WK4 .req SRC
WK5 .req STRIDE_S
WK6 .req MASK
WK7 .req STRIDE_M
pixst cond, numbytes, 4, DST
.unreq WK4
.unreq WK5
.unreq WK6
.unreq WK7
.endm
generate_composite_function \
FillRect32ARMSIMDAsm, 0, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_PSR | FLAG_PROCESS_DOES_STORE | FLAG_PROCESS_PRESERVES_SCRATCH \
0, /* prefetch distance doesn't apply */ \
FillRect32_init \
nop_macro, /* newline */ \
nop_macro /* cleanup */ \
nop_macro /* process head */ \
FillRect_process_tail
generate_composite_function \
FillRect16ARMSIMDAsm, 0, 0, 16, \
FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_PSR | FLAG_PROCESS_DOES_STORE | FLAG_PROCESS_PRESERVES_SCRATCH \
0, /* prefetch distance doesn't apply */ \
FillRect16_init \
nop_macro, /* newline */ \
nop_macro /* cleanup */ \
nop_macro /* process head */ \
FillRect_process_tail
generate_composite_function \
FillRect8ARMSIMDAsm, 0, 0, 8, \
FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_PSR | FLAG_PROCESS_DOES_STORE | FLAG_PROCESS_PRESERVES_SCRATCH \
0, /* prefetch distance doesn't apply */ \
FillRect8_init \
nop_macro, /* newline */ \
nop_macro /* cleanup */ \
nop_macro /* process head */ \
FillRect_process_tail
/******************************************************************************/
/* This differs from the over_8888_8888 routine in Pixman in that the destination
* alpha component is always left unchanged, and RGB components are not
* premultiplied by alpha. It differs from BlitRGBtoRGBPixelAlpha in that
* renormalisation is done by multiplying by 257/256 (with rounding) rather than
* simply shifting right by 8 bits - removing the need to special-case alpha=0xff.
*/
.macro RGBtoRGBPixelAlpha_init
line_saved_regs STRIDE_S, ORIG_W
mov MASK, #0x80
.endm
.macro RGBtoRGBPixelAlpha_1pixel_translucent s, d, tmp0, tmp1, tmp2, tmp3, half
uxtb tmp3, s
uxtb tmp0, d
sub tmp0, tmp3, tmp0
uxtb tmp3, s, ror #16
uxtb tmp1, d, ror #16
sub tmp1, tmp3, tmp1
uxtb tmp3, s, ror #8
mov s, s, lsr #24
uxtb tmp2, d, ror #8
sub tmp2, tmp3, tmp2
smlabb tmp0, tmp0, s, half
smlabb tmp1, tmp1, s, half
smlabb tmp2, tmp2, s, half
add tmp0, tmp0, asr #8
add tmp1, tmp1, asr #8
add tmp2, tmp2, asr #8
pkhbt tmp0, tmp0, tmp1, lsl #16
and tmp2, tmp2, #0xff00
uxtb16 tmp0, tmp0, ror #8
orr tmp0, tmp0, tmp2
uadd8 d, d, tmp0
.endm
.macro RGBtoRGBPixelAlpha_1pixel_opaque s, d
and d, d, #0xff000000
bic s, s, #0xff000000
orr d, d, s
.endm
.macro RGBtoRGBPixelAlpha_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
.if numbytes == 16
ldm SRC!, {WK0, WK1}
ldm SRC!, {STRIDE_S, STRIDE_M}
ldrd WK2, WK3, [DST], #16
orr SCRATCH, WK0, WK1
and ORIG_W, WK0, WK1
orr SCRATCH, SCRATCH, STRIDE_S
and ORIG_W, ORIG_W, STRIDE_S
orr SCRATCH, SCRATCH, STRIDE_M
and ORIG_W, ORIG_W, STRIDE_M
tst SCRATCH, #0xff000000
.elseif numbytes == 8
ldm SRC!, {WK0, WK1}
ldm DST!, {WK2, WK3}
orr SCRATCH, WK0, WK1
and ORIG_W, WK0, WK1
tst SCRATCH, #0xff000000
.else // numbytes == 4
ldr WK0, [SRC], #4
ldr WK2, [DST], #4
tst WK0, #0xff000000
.endif
.endm
.macro RGBtoRGBPixelAlpha_process_tail cond, numbytes, firstreg
beq 20f @ all transparent
.if numbytes == 16
cmp ORIG_W, #0xff000000
bhs 10f @ all opaque
RGBtoRGBPixelAlpha_1pixel_translucent WK0, WK2, STRIDE_S, STRIDE_M, SCRATCH, ORIG_W, MASK
RGBtoRGBPixelAlpha_1pixel_translucent WK1, WK3, STRIDE_S, STRIDE_M, SCRATCH, ORIG_W, MASK
strd WK2, WK3, [DST, #-16]
ldrd WK0, WK1, [SRC, #-8]
ldrd WK2, WK3, [DST, #-8]
RGBtoRGBPixelAlpha_1pixel_translucent WK0, WK2, STRIDE_S, STRIDE_M, SCRATCH, ORIG_W, MASK
RGBtoRGBPixelAlpha_1pixel_translucent WK1, WK3, STRIDE_S, STRIDE_M, SCRATCH, ORIG_W, MASK
b 19f
10: RGBtoRGBPixelAlpha_1pixel_opaque WK0, WK2
RGBtoRGBPixelAlpha_1pixel_opaque WK1, WK3
strd WK2, WK3, [DST, #-16]
ldrd WK0, WK1, [SRC, #-8]
ldrd WK2, WK3, [DST, #-8]
RGBtoRGBPixelAlpha_1pixel_opaque WK0, WK2
RGBtoRGBPixelAlpha_1pixel_opaque WK1, WK3
19: strd WK2, WK3, [DST, #-8]
.elseif numbytes == 8
cmp ORIG_W, #0xff000000
bhs 10f @ all opaque
RGBtoRGBPixelAlpha_1pixel_translucent WK0, WK2, STRIDE_S, STRIDE_M, SCRATCH, ORIG_W, MASK
RGBtoRGBPixelAlpha_1pixel_translucent WK1, WK3, STRIDE_S, STRIDE_M, SCRATCH, ORIG_W, MASK
b 19f
10: RGBtoRGBPixelAlpha_1pixel_opaque WK0, WK2
RGBtoRGBPixelAlpha_1pixel_opaque WK1, WK3
19: strd WK2, WK3, [DST, #-8]
.else // numbytes == 4
cmp WK0, #0xff000000
bhs 10f @ opaque
RGBtoRGBPixelAlpha_1pixel_translucent WK0, WK2, STRIDE_S, STRIDE_M, SCRATCH, ORIG_W, MASK
b 19f
10: RGBtoRGBPixelAlpha_1pixel_opaque WK0, WK2
19: str WK2, [DST, #-4]
.endif
20:
.endm
generate_composite_function \
BlitRGBtoRGBPixelAlphaARMSIMDAsm, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_WK0, \
2, /* prefetch distance */ \
RGBtoRGBPixelAlpha_init, \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
RGBtoRGBPixelAlpha_process_head, \
RGBtoRGBPixelAlpha_process_tail
/******************************************************************************/
.macro ARGBto565PixelAlpha_init
line_saved_regs STRIDE_D, STRIDE_S, ORIG_W
mov MASK, #0x001f
mov STRIDE_M, #0x0010
orr MASK, MASK, MASK, lsl #16
orr STRIDE_M, STRIDE_M, STRIDE_M, lsl #16
.endm
.macro ARGBto565PixelAlpha_newline
mov STRIDE_S, #0x0200
.endm
/* On entry:
* s1 holds 1 32bpp source pixel
* d holds 1 16bpp destination pixel
* rbmask, rbhalf, ghalf hold 0x001f001f, 0x00100010, 0x00000200 respectively
* other registers are temporaries
* On exit:
* Constant registers preserved
*/
.macro ARGBto565PixelAlpha_1pixel_translucent s, d, rbmask, rbhalf, ghalf, alpha, rb, g, misc
mov alpha, s, lsr #27
and misc, s, #0xfc00
and g, d, #0x07e0
pkhbt rb, d, d, lsl #5
rsb misc, g, misc, lsr #5
and s, rbmask, s, lsr #3
and rb, rbmask, rb
sub s, s, rb
smlabb misc, misc, alpha, ghalf
mla s, s, alpha, rbhalf
add misc, misc, misc, lsl #5
add g, g, misc, asr #10
add s, s, s, lsl #5
and g, g, #0x07e0
add rb, rb, s, asr #10
and rb, rb, rbmask
pkhbt rb, rb, rb, lsl #11
orr d, rb, g
orr d, d, rb, lsr #16
.endm
/* On entry:
* s1 holds 1 32bpp source pixel
* d holds 1 16bpp destination pixel
* rbmask holds 0x001f001f
* On exit:
* Constant registers preserved
*/
.macro ARGBto565PixelAlpha_1pixel_opaque s, d, rbmask
and d, rbmask, s, lsr #3
and s, s, #0xfc00
orr d, d, d, lsr #5
orr d, d, s, lsr #5
.endm
/* On entry:
* s1, s2 hold 2 32bpp source pixels
* d holds 2 16bpp destination pixels
* rbmask, rbhalf, ghalf hold 0x001f001f, 0x00100010, 0x00000200 respectively
* other registers are temporaries
* On exit:
* Constant registers preserved
* Blended results have been written through destination pointer
*/
.macro ARGBto565PixelAlpha_2pixels_translucent s1, s2, d, rbmask, rbhalf, ghalf, alpha, rb, g, misc
mov alpha, s1, lsr #27
and misc, s1, #0xfc00
and g, d, #0x07e0
pkhbt rb, d, d, lsl #5
rsb misc, g, misc, lsr #5
and s1, rbmask, s1, lsr #3
and rb, rbmask, rb
sub s1, s1, rb
smlabb misc, misc, alpha, ghalf
mla s1, s1, alpha, rbhalf
uxth d, d, ror #16
add misc, misc, misc, lsl #5
mov alpha, s2, lsr #27
add g, g, misc, asr #10
add s1, s1, s1, lsl #5
and g, g, #0x07e0
add rb, rb, s1, asr #10
and rb, rb, rbmask
and misc, s2, #0xfc00
pkhbt rb, rb, rb, lsl #11
and s1, d, #0x07e0
pkhbt d, d, d, lsl #5
rsb misc, s1, misc, lsr #5
and s2, rbmask, s2, lsr #3
and d, rbmask, d
sub s2, s2, d
smlabb misc, misc, alpha, ghalf
mla s2, s2, alpha, rbhalf
orr alpha, rb, g
add misc, misc, misc, lsl #5
orr alpha, alpha, rb, lsr #16
add s1, s1, misc, asr #10
add s2, s2, s2, lsl #5
and s1, s1, #0x07e0
add d, d, s2, asr #10
and d, d, rbmask
strh alpha, [DST, #-4]
pkhbt d, d, d, lsl #11
orr alpha, d, s1
orr alpha, alpha, d, lsr #16
strh alpha, [DST, #-2]
.endm
/* On entry:
* s1, s2 hold 2 32bpp source pixels
* rbmask holds 0x001f001f
* other registers are temporaries
* On exit:
* Constant registers preserved
* Blended results have been written through destination pointer
*/
.macro ARGBto565PixelAlpha_2pixels_opaque s1, s2, d, rbmask, g
and g, s1, #0xfc00
and d, rbmask, s1, lsr #3
and s1, rbmask, s2, lsr #3
orr d, d, d, lsr #5
orr d, d, g, lsr #5
and g, s2, #0xfc00
strh d, [DST, #-4]
orr s1, s1, s1, lsr #5
orr s1, s1, g, lsr #5
strh s1, [DST, #-2]
.endm
.macro ARGBto565PixelAlpha_2pixels_head
ldrd WK0, WK1, [SRC], #8
ldr WK2, [DST], #4
orr SCRATCH, WK0, WK1
and ORIG_W, WK0, WK1
tst SCRATCH, #0xff000000
.endm
.macro ARGBto565PixelAlpha_2pixels_tail
beq 20f @ all transparent
cmp ORIG_W, #0xff000000
bhs 10f @ all opaque
ARGBto565PixelAlpha_2pixels_translucent WK0, WK1, WK2, MASK, STRIDE_M, STRIDE_S, STRIDE_D, WK3, SCRATCH, ORIG_W
b 20f
10: ARGBto565PixelAlpha_2pixels_opaque WK0, WK1, WK2, MASK, SCRATCH
20:
.endm
.macro ARGBto565PixelAlpha_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
.if numbytes == 16
ARGBto565PixelAlpha_2pixels_head
ARGBto565PixelAlpha_2pixels_tail
ARGBto565PixelAlpha_2pixels_head
ARGBto565PixelAlpha_2pixels_tail
.endif
.if numbytes >= 8
ARGBto565PixelAlpha_2pixels_head
ARGBto565PixelAlpha_2pixels_tail
.endif
.if numbytes >= 4
ARGBto565PixelAlpha_2pixels_head
.else // numbytes == 2
ldr WK0, [SRC], #4
ldrh WK2, [DST], #2
tst WK0, #0xff000000
.endif
.endm
.macro ARGBto565PixelAlpha_process_tail cond, numbytes, firstreg
.if numbytes >= 4
ARGBto565PixelAlpha_2pixels_tail
.else // numbytes == 2
beq 20f @ all transparent
cmp WK0, #0xff000000
bhs 10f @ opaque
ARGBto565PixelAlpha_1pixel_translucent WK0, WK2, MASK, STRIDE_M, STRIDE_S, STRIDE_D, WK3, SCRATCH, ORIG_W
b 19f
10: ARGBto565PixelAlpha_1pixel_opaque WK0, WK2, MASK
19: strh WK2, [DST, #-2]
20:
.endif
.endm
generate_composite_function \
BlitARGBto565PixelAlphaARMSIMDAsm, 32, 0, 16, \
FLAG_DST_READWRITE | FLAG_BRANCH_OVER | FLAG_PROCESS_CORRUPTS_PSR | FLAG_PROCESS_DOES_STORE | FLAG_SPILL_LINE_VARS | FLAG_PROCESS_CORRUPTS_WK0, \
2, /* prefetch distance */ \
ARGBto565PixelAlpha_init, \
ARGBto565PixelAlpha_newline, \
nop_macro, /* cleanup */ \
ARGBto565PixelAlpha_process_head, \
ARGBto565PixelAlpha_process_tail
/******************************************************************************/
.macro BGR888toRGB888_1pixel cond, reg, tmp
uxtb16&cond tmp, WK®, ror #8
uxtb16&cond WK®, WK®, ror #16
orr&cond WK®, WK®, tmp, lsl #8
.endm
.macro BGR888toRGB888_2pixels cond, reg1, reg2, tmp1, tmp2
uxtb16&cond tmp1, WK®1, ror #8
uxtb16&cond WK®1, WK®1, ror #16
uxtb16&cond tmp2, WK®2, ror #8
uxtb16&cond WK®2, WK®2, ror #16
orr&cond WK®1, WK®1, tmp1, lsl #8
orr&cond WK®2, WK®2, tmp2, lsl #8
.endm
.macro BGR888toRGB888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
pixld cond, numbytes, firstreg, SRC, unaligned_src
.endm
.macro BGR888toRGB888_process_tail cond, numbytes, firstreg
.if numbytes >= 8
BGR888toRGB888_2pixels cond, %(firstreg+0), %(firstreg+1), MASK, STRIDE_M
.if numbytes == 16
BGR888toRGB888_2pixels cond, %(firstreg+2), %(firstreg+3), MASK, STRIDE_M
.endif
.else @ numbytes == 4
BGR888toRGB888_1pixel cond, %(firstreg+0), MASK
.endif
.endm
generate_composite_function \
Blit_BGR888_RGB888ARMSIMDAsm, 32, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_COND_EXEC | FLAG_PROCESS_PRESERVES_SCRATCH, \
2, /* prefetch distance */ \
nop_macro, /* init */ \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
BGR888toRGB888_process_head, \
BGR888toRGB888_process_tail
/******************************************************************************/
.macro RGB444toRGB888_init
ldr MASK, =0x0f0f0f0f
/* Set GE[3:0] to 0101 so SEL instructions do what we want */
msr CPSR_s, #0x50000
.endm
.macro RGB444toRGB888_1pixel reg, mask, tmp
pkhbt WK®, WK®, WK®, lsl #12 @ 0000aaaarrrrggggaaaarrrrggggbbbb
and WK®, mask, WK® @ 0000aaaa0000gggg0000rrrr0000bbbb
orr WK®, WK®, WK®, lsl #4 @ aaaaaaaaggggggggrrrrrrrrbbbbbbbb
pkhtb tmp, WK®, WK®, asr #8 @ aaaaaaaaggggggggggggggggrrrrrrrr
pkhbt WK®, WK®, WK®, lsl #8 @ ggggggggrrrrrrrrrrrrrrrrbbbbbbbb
sel WK®, WK®, tmp @ aaaaaaaarrrrrrrrggggggggbbbbbbbb
.endm
.macro RGB444toRGB888_2pixels in, out1, out2, mask, tmp1, tmp2
and tmp1, mask, WK&in @ 0000RRRR0000BBBB0000rrrr0000bbbb
and tmp2, mask, WK&in, lsr #4 @ 0000AAAA0000GGGG0000aaaa0000gggg
orr tmp1, tmp1, tmp1, lsl #4 @ RRRRRRRRBBBBBBBBrrrrrrrrbbbbbbbb
orr tmp2, tmp2, tmp2, lsl #4 @ AAAAAAAAGGGGGGGGaaaaaaaagggggggg
pkhtb WK&out2, tmp2, tmp1, asr #16 @ AAAAAAAAGGGGGGGGRRRRRRRRBBBBBBBB
pkhbt WK&out1, tmp1, tmp2, lsl #16 @ aaaaaaaaggggggggrrrrrrrrbbbbbbbb
pkhtb tmp2, WK&out2, WK&out2, asr #8 @ AAAAAAAAGGGGGGGGGGGGGGGGRRRRRRRR
pkhtb tmp1, WK&out1, WK&out1, asr #8 @ aaaaaaaaggggggggggggggggrrrrrrrr
pkhbt WK&out1, WK&out1, WK&out1, lsl #8 @ ggggggggrrrrrrrrrrrrrrrrbbbbbbbb
pkhbt WK&out2, WK&out2, WK&out2, lsl #8 @ GGGGGGGGRRRRRRRRRRRRRRRRBBBBBBBB
sel WK&out1, WK&out1, tmp1 @ aaaaaaaarrrrrrrrggggggggbbbbbbbb
sel WK&out2, WK&out2, tmp2 @ AAAAAAAARRRRRRRRGGGGGGGGBBBBBBBB
.endm
.macro RGB444toRGB888_process_head cond, numbytes, firstreg, unaligned_src, unaligned_mask, preload
pixld cond, numbytes/2, firstreg, SRC, unaligned_src
.endm
.macro RGB444toRGB888_process_tail cond, numbytes, firstreg
.if numbytes >= 8
.if numbytes == 16
RGB444toRGB888_2pixels %(firstreg+1), %(firstreg+2), %(firstreg+3), MASK, STRIDE_M, SCRATCH
.endif
RGB444toRGB888_2pixels %(firstreg+0), %(firstreg+0), %(firstreg+1), MASK, STRIDE_M, SCRATCH
.else @ numbytes == 4
RGB444toRGB888_1pixel %(firstreg+0), MASK, SCRATCH
.endif
.endm
generate_composite_function \
Blit_RGB444_RGB888ARMSIMDAsm, 16, 0, 32, \
FLAG_DST_WRITEONLY | FLAG_BRANCH_OVER, \
2, /* prefetch distance */ \
RGB444toRGB888_init, \
nop_macro, /* newline */ \
nop_macro, /* cleanup */ \
RGB444toRGB888_process_head, \
RGB444toRGB888_process_tail
|
aardappel/lobster
| 19,181
|
dev/include/Box2D/Particle/b2ParticleAssembly.neon.s
|
@
@ Copyright (c) 2014 Google, Inc.
@
@ This software is provided 'as-is', without any express or implied
@ warranty. In no event will the authors be held liable for any damages
@ arising from the use of this software.
@ Permission is granted to anyone to use this software for any purpose,
@ including commercial applications, and to alter it and redistribute it
@ freely, subject to the following restrictions:
@ 1. The origin of this software must not be misrepresented; you must not
@ claim that you wrote the original software. If you use this software
@ in a product, an acknowledgment in the product documentation would be
@ appreciated but is not required.
@ 2. Altered source versions must be plainly marked as such, and must not be
@ misrepresented as being the original software.
@ 3. This notice may not be removed or altered from any source distribution.
@
.text
.syntax unified
.balign 4
.global CalculateTags_Simd
.thumb_func
CalculateTags_Simd:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ int CalculateTags_Simd(const b2Vec2* positions,
@ int count,
@ const float& inverseDiameter,
@ uint32* outTags)
@
@ r0: *positions
@ r1: count
@ r2: &inverseDiameter
@ r3: *outTags
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ q0 == x
@ q1 == y
@ q2 ==
@ q3 ==
@ q4 ==
@ q5 ==
@ q6 ==
@ q7 ==
@ q8 ==
@ q9 ==
@ q10 ==
@ q11 ==
@ q12 == inverseDiameter
@ q13 == xScale
@ q14 == xOffset
@ q15 == yOffset
@ Load constants. Literals are > 32, so must load as integers first.
vld1.f32 {d24[],d25[]}, [r2] @ q12 = inverseDiameter
vmov.i32 q13, #0x100 @ q13 = xScale = 1 << 8
vmov.i32 q14, #0x80000 @ q14 = xOffset = (1 << 8) * (1 << 11)
@ = (1 << 19) = 524288
vmov.i32 q15, #0x800 @ q15 = xScale = 1 << 11 = 2048
vcvt.f32.u32 q13, q13 @ convert to float
vcvt.f32.u32 q14, q14
vcvt.f32.u32 q15, q15
@ Calculate tags four at a time, from positions.
.L_CalculateTags_MainLoop:
@ We consume 32-bytes per iteration, so prefetch 4 iterations ahead.
@ TODO: experiment with different prefetch lengths on different
@ architectures.
pld [r0, #128] @ Prefetch position data
@ {q0, q1} == xPosition and yPosition
@ Four values in each. q0 = (x0, x1, x2, x3)
vld2.f32 {q0, q1}, [r0]! @ Read in positions; increment ptr
@ Calculate tags four at a time.
vmul.f32 q0, q0, q12 @ q0 = x = xPosition * inverseDiameter
vmul.f32 q1, q1, q12 @ q1 = y = yPosition * inverseDiameter
vmul.f32 q0, q0, q13 @ q0 = x * xScale
vadd.f32 q1, q1, q15 @ q1 = y + yOffset
vadd.f32 q0, q0, q14 @ q0 = x * xScale + xOffset
vcvt.u32.f32 q1, q1 @ q1 = (uint32)(y + yOffset)
vcvt.u32.f32 q0, q0 @ q0 = (uint32)(x * xScale + xOffset)
vsli.u32 q0, q1, #20 @ q0 = tag
@ = ((uint32)(y + yOffset) <<yShift)
@ + (uint32)(xScale * x + xOffset)
@ Decrement loop counter; sets the 'gt' flag used in 'bgt' below.
@ Pipelining is best if there are instructions between the 'subs' and
@ 'bgt' instructions, since it takes a few cycles for the result of
@ 'subs' to propegate to the flags register.
subs r1, r1, #4
@ Write out, ignoring index.
pld [r3, #64] @ Prefetch output tag array
vst1.f32 {q0}, [r3]! @ write out tags; increment ptr
bgt .L_CalculateTags_MainLoop
.L_CalculateTags_Return:
bx lr
.balign 4
.thumb_func
@
@ Once four contacts have been found, calculate their weights and
@ normals (using SIMD, so all at once).
@
@ Also, grab their flags from the flags buffer, and OR them together.
@ This flag grabbing is slow because we access the flag buffer in a
@ random order. We use prefetch instructions 'pld' to minimize the
@ cost of cache misses.
@
FindContacts_PostProcess:
@ Preload first four flag addresses into cache.
@ Note: hardware only has four preload slots.
ldrh r9, [r4]
ldrh r10, [r4, #2]
ldrh r11, [r4, #16]
ldrh r12, [r4, #18]
pld [r7, r9, lsl #2]
pld [r7, r10, lsl #2]
pld [r7, r11, lsl #2]
pld [r7, r12, lsl #2]
@ q0 = packedIndices -- indices output to b2ParticleContact
@ q1 = distBtParticlesSq -- will be used to calculate weight
@ q2 = diffX -- will be used to calculate normal
@ q3 = diffY -- will be used to calculate normal
add r8, r4, #32
vld4.f32 {d0, d2, d4, d6}, [r4]
vld4.f32 {d1, d3, d5, d7}, [r8]
@ Use distSq to estimate 1 / dist.
vrsqrte.f32 q8, q1 @ q8 = 1 / dist -- (rough estimate)
vmul.f32 q9, q8, q1 @ q9 = 1 / dist * distSq -- (appr 'dist')
vrsqrts.f32 q9, q9, q8 @ q9 = (3 - 1/dist * dist) / 2 -- (error)
vmul.f32 q8, q8, q9 @ q8 = (error) / dist -- (estimate)
vcgt.f32 q9, q8, #0 @ q8 = 1 / dist > 0 (true if not NaN)
vand q8, q8, q9 @ q8 = 1 / dist if valid, or 0 if NaN
@ Since we expand the output to include 'weight', we need to preserve
@ subsequent contacts. Note that there may be up to 7 contacts waiting
@ to be post-processed, since we output contacts in up-to groups of 4.
add r8, r4, #64
vldmia r8, {q9, q10, q11}
@ Load first four flags, 'or' them in pairs, then write to destination.
ldr r9, [r7, r9, lsl #2]
ldr r10, [r7, r10, lsl #2]
ldr r11, [r7, r11, lsl #2]
ldr r12, [r7, r12, lsl #2]
orr r9, r9, r10
orr r11, r11, r12
str r9, [r4, #16]
str r11, [r4, #36]
@ Preload the next four flags into cache.
ldrh r9, [r4, #32]
ldrh r10, [r4, #34]
ldrh r11, [r4, #48]
ldrh r12, [r4, #50]
pld [r7, r9, lsl #2]
pld [r7, r10, lsl #2]
pld [r7, r11, lsl #2]
pld [r7, r12, lsl #2]
@ Calculate normal and weight.
vmul.f32 q1, q1, q8 @ q1 = distSq / dist = dist
vmul.f32 q2, q2, q8 @ q2 = normX = diffX / dist
vmul.f32 q1, q1, q14 @ q1 = dist / diameter
vmul.f32 q3, q3, q8 @ q3 = normY = diffY / dist
vsub.f32 q1, q12, q1 @ q1 = weight = 1 - dist / diameter
@ Store again, making room for 'weight' member variable this time.
@ TODO OPT: Interleave with 'or' instructions below.
mov r8, #20 @ r8 = 20 = sizeof(b2ParticleContact)
vst4.f32 {d0[0], d2[0], d4[0], d6[0]}, [r4], r8
vst4.f32 {d0[1], d2[1], d4[1], d6[1]}, [r4], r8
vst4.f32 {d1[0], d3[0], d5[0], d7[0]}, [r4], r8
vst4.f32 {d1[1], d3[1], d5[1], d7[1]}, [r4], r8
mov r8, #12 @ r8 = 12 = sizeof(FindContactInput)
@ Load next four flags, 'or' them in pairs, then write to destination.
ldr r9, [r7, r9, lsl #2]
ldr r10, [r7, r10, lsl #2]
ldr r11, [r7, r11, lsl #2]
ldr r12, [r7, r12, lsl #2]
orr r9, r9, r10
orr r11, r11, r12
str r9, [r4, #-24]
str r11, [r4, #-4]
@ Update output pointers. Since we output 4 contacts, and added 4 bytes
@ for 'weight' on each contact, the output pointer must be advanced by
@ 16 bytes.
add r3, r3, #16
add r5, r5, #4 @ numContacts += 4
@ Restore subsequent contacts. That is, contacts that have yet to be
@ post-processed.
vstmia r4, {q9, q10, q11}
bx lr
@ When used with the 'vtbl' instruction, grabs the first byte of every
@ word, and places it in the first word. Fills the second word with 0s.
@ For example, (0xFFFFFFFF, 0x00000000, 0x00000000, 0xFFFFFFFF)
@ ==> (0xFF0000FF, 0x00000000)
CONST_IS_CLOSE_TABLE_INDICES:
.byte 0
.byte 4
.byte 8
.byte 12
.byte 0xFF
.byte 0xFF
.byte 0xFF
.byte 0xFF
.balign 4
.global FindContactsFromChecks_Simd
.thumb_func
FindContactsFromChecks_Simd:
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ void FindContactsFromChecks_Simd(
@ const FindContactInput* reordered,
@ const FindContactCheck* checks,
@ int numChecks,
@ const float& particleDiameterSq,
@ const float& particleDiameterInv,
@ const uint32* flags,
@ b2GrowableBuffer<b2ParticleContact>& contacts)
@
@ Parameters
@ r0: *reordered
@ r1: *checks
@ r2: numChecks
@ r3: particleDiameterSq
@ [sp]: particleDiameterInv
@ [sp+4]: *flags
@ [sp+8]: contacts
@
@ Persistent Variables
@ r0: *reordered (constant)
@ r1: *checks (advance once per iteration)
@ r2: numChecks (decrement once per iteration)
@ r3: *out <-- next free entry of outContacts array
@ r4: *postProcess <-- entry on-deck to be post-processed
@ r5: numContacts
@ r6: maxSafeContacts
@ r7: *flags (constant)
@ r8: 20 = sizeof(b2ParticleContact), or
@ 12 = sizeof(FindContactInput) (constants)
@
@ Scratch Variables
@ r9:
@ r10: address of current particle position
@ r11: address of comparator particle positions
@ r12: isClose (compacted)
@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@
@ Scratch
@ q0 == index ------> packedIndices
@ q1 == positionX ---_ distBtParticlesSq
@ q2 == positionY --_ --> normX
@ q3 == ---> normY
@
@ Unused (note: these are callee-saved)
@ q4 ==
@ q5 ==
@ q6 ==
@ q7 ==
@
@ Scratch
@ q8 == comparatorIndices
@ q9 == comparatorPositionX
@ q10 == comparatorPositionY
@ q11 ==
@
@ Constants
@ q12 == 1.0f
@ q13 == isClose table indices
@ q14 == 1 / particleDiameter
@ q15 == particleDiameterSq
push {r4-r11, lr}
@ Load constants from registers and stack.
vld1.f32 {d30[],d31[]}, [r3] @ q15 = particleDiameterSq
ldr r12, [sp, #36] @ r12 = particleDiameterInv
vld1.f32 {d28[],d29[]}, [r12] @ q14 = particleDiameterInv
ldr r9, [sp, #44] @ r9 = contacts
ldr r7, [sp, #40] @ r7 = flags
ldr r3, [r9, #0] @ r3 = out = contacts.data
ldr r6, [r9, #8] @ r6 = contacts.capacity
mov r4, r3 @ r4 = postProcess = outContacts
mov r5, #0 @ r5 = numContacts
sub r6, r6, #8 @ r6 = maxSafeContacts = capacity - 8
mov r8, #12 @ r8 = 12 = sizeof(FindContactInput)
@ Perform zero iterations if 'numChecks' is empty.
@ Must happen after initializing r5 = numContacts = 0.
cmp r2, #0
ble .L_FindContacts_Return
@ Load and calculate remaining constants.
vmov.f32 q12, #1.0 @ q12 = 1.0f splatted
adr r12, CONST_IS_CLOSE_TABLE_INDICES
vld1.8 {d26}, [r12] @ q13 = *CONST_IS_CLOSE_TABLE_INDICES
.L_FindContacts_MainLoop:
pld [r1, #8] @ prefetch two loops ahead
@ r10 <== Address of 'position', the current particle position
@ r11 <== Address of '&comparator[0]', the first particle position we
@ compare against.
ldr r10, [r1], #4 @ r10 = positionIndex|comparatorIndex
smlatb r11, r10, r8, r0 @ r11 = address of first comparator
smlabb r10, r10, r8, r0 @ r10 = address of current input
add r12, r11, #24 @ r12 = address of third comparator
@ Exit if not enough space in output array (part 1)
cmp r5, r6
@ {q0, q1, q2} == index, positionX, positionY, splatted across vector
vld3.f32 {d0[], d2[], d4[]}, [r10]
vld3.f32 {d1[], d3[], d5[]}, [r10]
@ {q8, q9, q10} == comparatorIndices, comparatorPosX and comparatorPosY
@ positions we compare against (positionX, positionY)
vld3.f32 {d16, d18, d20}, [r11]
vld3.f32 {d17, d19, d21}, [r12]
@ q0 = packedIndices -- indices output to b2ParticleContact
@ q1 = distBtParticlesSq -- will be used to calculate weight
@ q2 = diffX -- will be used to calculate normal
@ q3 = diffY -- will be used to calculate normal
vsub.f32 q3, q10, q2 @ q3 = diffY = comparatorPosY - positionY
vsub.f32 q2, q9, q1 @ q2 = diffX = comparatorPosX - positionX
vsli.32 q0, q8, #16 @ q0 = comparatorIndex[i] << 16 | index
vmul.f32 q1, q3, q3 @ q1 = diffX * diffX
vmla.f32 q1, q2, q2 @ q1 = diffX * diffX + diffY * diffY
@ Determine if each particle is close enough to output.
@ Pack the isClose bitmap (four T or F) into a 32-bit bitmap.
@ Move 32-bit bitmap to CPU register, for conditional operations.
@ Note: NEON to CPU register moves are slow (20 cyclds) on some
@ implementations of NEON.
@ isClose = distBtParticlesSq < particleDiameterSq
vclt.f32 q8, q1, q15 @ q8 == isClose
vtbl.8 d16, {d16,d17}, d26 @ q8[0] == isClose(packed)
vmov.32 r12, d16[0] @ q8[0] ==> r12.
@ If not enough space in output array, grow it.
@ This is a heavy operation, but should happen rarely.
ble .L_FindContacts_Output
ldr r9, [sp, #44] @ r9 = contacts
str r5, [r9, #4] @ contacts.count = numContacts
ldr r10, [r9, #0] @ r10 = contacts.data
push {r0-r3, r9, r10, r12}
vpush {q0, q1, q2, q3}
vpush {q12, q13, q14, q15}
mov r0, r9 @ r0 = contacts
bl GrowParticleContactBuffer
vpop {q12, q13, q14, q15}
vpop {q0, q1, q2, q3}
pop {r0-r3, r9, r10, r12}
@ The output array was reallocated, so update 'out', 'postProcess' and
@ 'maxSafeContacts' pointers.
ldr r6, [r9, #8] @ r6 = contacts.capacity
ldr r9, [r9, #0] @ r9 = contacts.data
sub r9, r9, r10 @ r9 = data buffer offset
sub r6, r6, #8 @ r6 = maxSafeContacts
add r3, r3, r9 @ r3 += data buffer offset
add r4, r4, r9 @ r4 += data buffer offset
.L_FindContacts_Output:
@ Store results to memory, but only results that are close
tst r12, 0xFF
it ne
vst4ne.32 {d0[0],d2[0],d4[0],d6[0]}, [r3]! @ Store 1st contact
tst r12, 0xFF00
it ne
vst4ne.32 {d0[1],d2[1],d4[1],d6[1]}, [r3]! @ Store 2nd contact
tst r12, 0xFF0000
it ne
vst4ne.32 {d1[0],d3[0],d5[0],d7[0]}, [r3]! @ Store 3rd contact
tst r12, 0xFF000000
it ne
vst4ne.32 {d1[1],d3[1],d5[1],d7[1]}, [r3]! @ Store 4th contact
@ post-process the last four elements that have been output
@ r12 = 5th element to not be post-processed yet
add r12, r4, #64 @ r12 = nextPostProcess
cmp r3, r12
it ge
blge FindContacts_PostProcess
@ decrement loop counter; sets the 'gt' flag used in 'bgt' below
subs r2, r2, #1
bgt .L_FindContacts_MainLoop
.L_FindContacts_PostProcessRemainingItems:
@ If at least one output item needs post-processing, do it.
subs r12, r3, r4
ble .L_FindContacts_Return
@ r12/16 = num extra contacts to process
add r5, r5, r12, lsr #4 @ numContacts += num extra
push {r5} @ Save numContacts, since stomped
@ Ensure indices past end of array are zeroed out.
@ We process 4 contacts in FindContacts_PostProcess, even if we only
@ have one left to process.
mov r12, #0
str r12, [r3]
str r12, [r3, #16]
str r12, [r3, #32]
bl FindContacts_PostProcess
pop {r5} @ Restore numContacts
.L_FindContacts_Return:
@ Set the final number of contacts in the output buffer.
ldr r9, [sp, #44] @ r9 = contacts
str r5, [r9, #4] @ contacts.count = numContacts
@ Return by popping the original lr into pc.
pop {r4-r11, pc}
|
aaronbloomfield/pdr
| 3,424
|
slides/code/08-assembly-32bit/test_abs.s
|
.file "test_abs.cpp"
.intel_syntax noprefix
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.text
.globl absolute_value
.type absolute_value, @function
absolute_value:
.LFB1021:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
cmp DWORD PTR [ebp+8], 0
jns .L2
neg DWORD PTR [ebp+8]
.L2:
mov eax, DWORD PTR [ebp+8]
pop ebp
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1021:
.size absolute_value, .-absolute_value
.section .rodata
.LC0:
.string "Enter a value: "
.LC1:
.string "The result is: "
.text
.globl main
.type main, @function
main:
.LFB1022:
.cfi_startproc
lea ecx, [esp+4]
.cfi_def_cfa 1, 0
and esp, -16
push DWORD PTR [ecx-4]
push ebp
.cfi_escape 0x10,0x5,0x2,0x75,0
mov ebp, esp
push ecx
.cfi_escape 0xf,0x3,0x75,0x7c,0x6
sub esp, 20
mov eax, DWORD PTR gs:20
mov DWORD PTR [ebp-12], eax
xor eax, eax
mov DWORD PTR [ebp-20], 0
sub esp, 8
push OFFSET FLAT:.LC0
push OFFSET FLAT:_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
add esp, 16
sub esp, 8
lea eax, [ebp-20]
push eax
push OFFSET FLAT:_ZSt3cin
call _ZNSirsERi
add esp, 16
mov eax, DWORD PTR [ebp-20]
sub esp, 12
push eax
call absolute_value
add esp, 16
mov DWORD PTR [ebp-16], eax
sub esp, 8
push OFFSET FLAT:.LC1
push OFFSET FLAT:_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push DWORD PTR [ebp-16]
push eax
call _ZNSolsEi
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
add esp, 16
mov eax, 0
mov edx, DWORD PTR [ebp-12]
xor edx, DWORD PTR gs:20
je .L6
call __stack_chk_fail
.L6:
mov ecx, DWORD PTR [ebp-4]
.cfi_def_cfa 1, 0
leave
.cfi_restore 5
lea esp, [ecx-4]
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1022:
.size main, .-main
.type _Z41__static_initialization_and_destruction_0ii, @function
_Z41__static_initialization_and_destruction_0ii:
.LFB1031:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
sub esp, 8
cmp DWORD PTR [ebp+8], 1
jne .L9
cmp DWORD PTR [ebp+12], 65535
jne .L9
sub esp, 12
push OFFSET FLAT:_ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
add esp, 16
sub esp, 4
push OFFSET FLAT:__dso_handle
push OFFSET FLAT:_ZStL8__ioinit
push OFFSET FLAT:_ZNSt8ios_base4InitD1Ev
call __cxa_atexit
add esp, 16
.L9:
nop
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1031:
.size _Z41__static_initialization_and_destruction_0ii, .-_Z41__static_initialization_and_destruction_0ii
.type _GLOBAL__sub_I_absolute_value, @function
_GLOBAL__sub_I_absolute_value:
.LFB1032:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
sub esp, 8
sub esp, 8
push 65535
push 1
call _Z41__static_initialization_and_destruction_0ii
add esp, 16
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1032:
.size _GLOBAL__sub_I_absolute_value, .-_GLOBAL__sub_I_absolute_value
.section .init_array,"aw"
.align 4
.long _GLOBAL__sub_I_absolute_value
.hidden __dso_handle
.ident "GCC: (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 20160609"
.section .note.GNU-stack,"",@progbits
|
aaronbloomfield/pdr
| 3,921
|
slides/code/08-assembly-32bit/test_max.s
|
.file "test_max.cpp"
.intel_syntax noprefix
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.text
.globl max
.type max, @function
max:
.LFB1021:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
sub esp, 16
mov eax, DWORD PTR [ebp+8]
cmp eax, DWORD PTR [ebp+12]
jle .L2
mov eax, DWORD PTR [ebp+8]
mov DWORD PTR [ebp-4], eax
jmp .L3
.L2:
mov eax, DWORD PTR [ebp+12]
mov DWORD PTR [ebp-4], eax
.L3:
mov eax, DWORD PTR [ebp-4]
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1021:
.size max, .-max
.section .rodata
.LC0:
.string "Enter value 1: "
.LC1:
.string "Enter value 2: "
.LC2:
.string "The result is: "
.text
.globl main
.type main, @function
main:
.LFB1022:
.cfi_startproc
lea ecx, [esp+4]
.cfi_def_cfa 1, 0
and esp, -16
push DWORD PTR [ecx-4]
push ebp
.cfi_escape 0x10,0x5,0x2,0x75,0
mov ebp, esp
push ecx
.cfi_escape 0xf,0x3,0x75,0x7c,0x6
sub esp, 20
mov eax, DWORD PTR gs:20
mov DWORD PTR [ebp-12], eax
xor eax, eax
mov DWORD PTR [ebp-24], 0
mov DWORD PTR [ebp-20], 0
sub esp, 8
push OFFSET FLAT:.LC0
push OFFSET FLAT:_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
add esp, 16
sub esp, 8
lea eax, [ebp-24]
push eax
push OFFSET FLAT:_ZSt3cin
call _ZNSirsERi
add esp, 16
sub esp, 8
push OFFSET FLAT:.LC1
push OFFSET FLAT:_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
add esp, 16
sub esp, 8
lea eax, [ebp-20]
push eax
push OFFSET FLAT:_ZSt3cin
call _ZNSirsERi
add esp, 16
mov edx, DWORD PTR [ebp-20]
mov eax, DWORD PTR [ebp-24]
sub esp, 8
push edx
push eax
call max
add esp, 16
mov DWORD PTR [ebp-16], eax
sub esp, 8
push OFFSET FLAT:.LC2
push OFFSET FLAT:_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push DWORD PTR [ebp-16]
push eax
call _ZNSolsEi
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
add esp, 16
mov eax, 0
mov ecx, DWORD PTR [ebp-12]
xor ecx, DWORD PTR gs:20
je .L7
call __stack_chk_fail
.L7:
mov ecx, DWORD PTR [ebp-4]
.cfi_def_cfa 1, 0
leave
.cfi_restore 5
lea esp, [ecx-4]
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1022:
.size main, .-main
.type _Z41__static_initialization_and_destruction_0ii, @function
_Z41__static_initialization_and_destruction_0ii:
.LFB1031:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
sub esp, 8
cmp DWORD PTR [ebp+8], 1
jne .L10
cmp DWORD PTR [ebp+12], 65535
jne .L10
sub esp, 12
push OFFSET FLAT:_ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
add esp, 16
sub esp, 4
push OFFSET FLAT:__dso_handle
push OFFSET FLAT:_ZStL8__ioinit
push OFFSET FLAT:_ZNSt8ios_base4InitD1Ev
call __cxa_atexit
add esp, 16
.L10:
nop
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1031:
.size _Z41__static_initialization_and_destruction_0ii, .-_Z41__static_initialization_and_destruction_0ii
.type _GLOBAL__sub_I_max, @function
_GLOBAL__sub_I_max:
.LFB1032:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
sub esp, 8
sub esp, 8
push 65535
push 1
call _Z41__static_initialization_and_destruction_0ii
add esp, 16
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1032:
.size _GLOBAL__sub_I_max, .-_GLOBAL__sub_I_max
.section .init_array,"aw"
.align 4
.long _GLOBAL__sub_I_max
.hidden __dso_handle
.ident "GCC: (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 20160609"
.section .note.GNU-stack,"",@progbits
|
aaronbloomfield/pdr
| 3,612
|
slides/code/08-assembly-32bit/test_fib.s
|
.file "test_fib.cpp"
.intel_syntax noprefix
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.text
.globl fib
.type fib, @function
fib:
.LFB1021:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
push ebx
sub esp, 4
.cfi_offset 3, -12
cmp DWORD PTR [ebp+8], 0
je .L2
cmp DWORD PTR [ebp+8], 1
jne .L3
.L2:
mov eax, 1
jmp .L4
.L3:
mov eax, DWORD PTR [ebp+8]
sub eax, 1
sub esp, 12
push eax
call fib
add esp, 16
mov ebx, eax
mov eax, DWORD PTR [ebp+8]
sub eax, 2
sub esp, 12
push eax
call fib
add esp, 16
add eax, ebx
.L4:
mov ebx, DWORD PTR [ebp-4]
leave
.cfi_restore 5
.cfi_restore 3
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1021:
.size fib, .-fib
.section .rodata
.LC0:
.string "Enter value for fib(): "
.LC1:
.string "The result is: "
.text
.globl main
.type main, @function
main:
.LFB1022:
.cfi_startproc
lea ecx, [esp+4]
.cfi_def_cfa 1, 0
and esp, -16
push DWORD PTR [ecx-4]
push ebp
.cfi_escape 0x10,0x5,0x2,0x75,0
mov ebp, esp
push ecx
.cfi_escape 0xf,0x3,0x75,0x7c,0x6
sub esp, 20
mov eax, DWORD PTR gs:20
mov DWORD PTR [ebp-12], eax
xor eax, eax
mov DWORD PTR [ebp-20], 0
sub esp, 8
push OFFSET FLAT:.LC0
push OFFSET FLAT:_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
add esp, 16
sub esp, 8
lea eax, [ebp-20]
push eax
push OFFSET FLAT:_ZSt3cin
call _ZNSirsERi
add esp, 16
mov eax, DWORD PTR [ebp-20]
sub esp, 12
push eax
call fib
add esp, 16
mov DWORD PTR [ebp-16], eax
sub esp, 8
push OFFSET FLAT:.LC1
push OFFSET FLAT:_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push DWORD PTR [ebp-16]
push eax
call _ZNSolsEi
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
add esp, 16
mov eax, 0
mov edx, DWORD PTR [ebp-12]
xor edx, DWORD PTR gs:20
je .L7
call __stack_chk_fail
.L7:
mov ecx, DWORD PTR [ebp-4]
.cfi_def_cfa 1, 0
leave
.cfi_restore 5
lea esp, [ecx-4]
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1022:
.size main, .-main
.type _Z41__static_initialization_and_destruction_0ii, @function
_Z41__static_initialization_and_destruction_0ii:
.LFB1031:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
sub esp, 8
cmp DWORD PTR [ebp+8], 1
jne .L10
cmp DWORD PTR [ebp+12], 65535
jne .L10
sub esp, 12
push OFFSET FLAT:_ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
add esp, 16
sub esp, 4
push OFFSET FLAT:__dso_handle
push OFFSET FLAT:_ZStL8__ioinit
push OFFSET FLAT:_ZNSt8ios_base4InitD1Ev
call __cxa_atexit
add esp, 16
.L10:
nop
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1031:
.size _Z41__static_initialization_and_destruction_0ii, .-_Z41__static_initialization_and_destruction_0ii
.type _GLOBAL__sub_I_fib, @function
_GLOBAL__sub_I_fib:
.LFB1032:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
sub esp, 8
sub esp, 8
push 65535
push 1
call _Z41__static_initialization_and_destruction_0ii
add esp, 16
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1032:
.size _GLOBAL__sub_I_fib, .-_GLOBAL__sub_I_fib
.section .init_array,"aw"
.align 4
.long _GLOBAL__sub_I_fib
.hidden __dso_handle
.ident "GCC: (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 20160609"
.section .note.GNU-stack,"",@progbits
|
aaronbloomfield/pdr
| 6,370
|
slides/code/08-assembly-32bit/test_string_compare.s
|
.file "test_string_compare.cpp"
.intel_syntax noprefix
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.text
.globl compare_string
.type compare_string, @function
compare_string:
.LFB1021:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
.L3:
mov eax, DWORD PTR [ebp+8]
movzx eax, BYTE PTR [eax]
test al, al
je .L2
mov eax, DWORD PTR [ebp+8]
movzx edx, BYTE PTR [eax]
mov eax, DWORD PTR [ebp+12]
movzx eax, BYTE PTR [eax]
cmp dl, al
jne .L2
add DWORD PTR [ebp+8], 1
add DWORD PTR [ebp+12], 1
jmp .L3
.L2:
mov eax, DWORD PTR [ebp+8]
movzx edx, BYTE PTR [eax]
mov eax, DWORD PTR [ebp+12]
movzx eax, BYTE PTR [eax]
cmp dl, al
sete al
pop ebp
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1021:
.size compare_string, .-compare_string
.section .rodata
.LC0:
.string "Enter string 1: "
.LC1:
.string "Enter string 2: "
.LC2:
.string "The result is: "
.text
.globl main
.type main, @function
main:
.LFB1022:
.cfi_startproc
.cfi_personality 0,__gxx_personality_v0
.cfi_lsda 0,.LLSDA1022
lea ecx, [esp+4]
.cfi_def_cfa 1, 0
and esp, -16
push DWORD PTR [ecx-4]
push ebp
.cfi_escape 0x10,0x5,0x2,0x75,0
mov ebp, esp
push ebx
push ecx
.cfi_escape 0xf,0x3,0x75,0x78,0x6
.cfi_escape 0x10,0x3,0x2,0x75,0x7c
sub esp, 64
mov eax, DWORD PTR gs:20
mov DWORD PTR [ebp-12], eax
xor eax, eax
sub esp, 12
lea eax, [ebp-60]
push eax
.LEHB0:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1Ev
.LEHE0:
add esp, 16
sub esp, 12
lea eax, [ebp-36]
push eax
.LEHB1:
.cfi_escape 0x2e,0x10
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1Ev
.LEHE1:
add esp, 16
sub esp, 8
push OFFSET FLAT:.LC0
push OFFSET FLAT:_ZSt4cout
.LEHB2:
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
add esp, 16
sub esp, 8
lea eax, [ebp-60]
push eax
push OFFSET FLAT:_ZSt3cin
call _ZStrsIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EE
add esp, 16
sub esp, 8
push OFFSET FLAT:.LC1
push OFFSET FLAT:_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
add esp, 16
sub esp, 8
lea eax, [ebp-36]
push eax
push OFFSET FLAT:_ZSt3cin
call _ZStrsIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EE
add esp, 16
sub esp, 12
lea eax, [ebp-36]
push eax
call _ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5c_strEv
add esp, 16
mov ebx, eax
sub esp, 12
lea eax, [ebp-60]
push eax
call _ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5c_strEv
add esp, 16
sub esp, 8
push ebx
push eax
call compare_string
add esp, 16
mov BYTE PTR [ebp-61], al
movzx ebx, BYTE PTR [ebp-61]
sub esp, 8
push OFFSET FLAT:.LC2
push OFFSET FLAT:_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push ebx
push eax
call _ZNSolsEb
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
.LEHE2:
add esp, 16
mov ebx, 0
sub esp, 12
lea eax, [ebp-36]
push eax
.LEHB3:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev
.LEHE3:
add esp, 16
sub esp, 12
lea eax, [ebp-60]
push eax
.LEHB4:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev
.LEHE4:
add esp, 16
mov eax, ebx
mov edx, DWORD PTR [ebp-12]
xor edx, DWORD PTR gs:20
je .L9
jmp .L12
.L11:
mov ebx, eax
sub esp, 12
lea eax, [ebp-36]
push eax
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev
add esp, 16
jmp .L8
.L10:
mov ebx, eax
.L8:
sub esp, 12
lea eax, [ebp-60]
push eax
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev
add esp, 16
mov eax, ebx
sub esp, 12
push eax
.LEHB5:
call _Unwind_Resume
.LEHE5:
.L12:
call __stack_chk_fail
.L9:
lea esp, [ebp-8]
pop ecx
.cfi_restore 1
.cfi_def_cfa 1, 0
pop ebx
.cfi_restore 3
pop ebp
.cfi_restore 5
lea esp, [ecx-4]
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1022:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.LLSDA1022:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE1022-.LLSDACSB1022
.LLSDACSB1022:
.uleb128 .LEHB0-.LFB1022
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB1022
.uleb128 .LEHE1-.LEHB1
.uleb128 .L10-.LFB1022
.uleb128 0
.uleb128 .LEHB2-.LFB1022
.uleb128 .LEHE2-.LEHB2
.uleb128 .L11-.LFB1022
.uleb128 0
.uleb128 .LEHB3-.LFB1022
.uleb128 .LEHE3-.LEHB3
.uleb128 .L10-.LFB1022
.uleb128 0
.uleb128 .LEHB4-.LFB1022
.uleb128 .LEHE4-.LEHB4
.uleb128 0
.uleb128 0
.uleb128 .LEHB5-.LFB1022
.uleb128 .LEHE5-.LEHB5
.uleb128 0
.uleb128 0
.LLSDACSE1022:
.text
.size main, .-main
.type _Z41__static_initialization_and_destruction_0ii, @function
_Z41__static_initialization_and_destruction_0ii:
.LFB1074:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
sub esp, 8
cmp DWORD PTR [ebp+8], 1
jne .L15
cmp DWORD PTR [ebp+12], 65535
jne .L15
sub esp, 12
push OFFSET FLAT:_ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
add esp, 16
sub esp, 4
push OFFSET FLAT:__dso_handle
push OFFSET FLAT:_ZStL8__ioinit
push OFFSET FLAT:_ZNSt8ios_base4InitD1Ev
call __cxa_atexit
add esp, 16
.L15:
nop
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1074:
.size _Z41__static_initialization_and_destruction_0ii, .-_Z41__static_initialization_and_destruction_0ii
.type _GLOBAL__sub_I_compare_string, @function
_GLOBAL__sub_I_compare_string:
.LFB1075:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
sub esp, 8
sub esp, 8
push 65535
push 1
call _Z41__static_initialization_and_destruction_0ii
add esp, 16
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1075:
.size _GLOBAL__sub_I_compare_string, .-_GLOBAL__sub_I_compare_string
.section .init_array,"aw"
.align 4
.long _GLOBAL__sub_I_compare_string
.hidden __dso_handle
.ident "GCC: (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 20160609"
.section .note.GNU-stack,"",@progbits
|
aaronbloomfield/pdr
| 3,301
|
slides/code/08-assembly-32bit/test_abs-non-intel.s
|
.file "test_abs.cpp"
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.text
.globl absolute_value
.type absolute_value, @function
absolute_value:
.LFB1021:
.cfi_startproc
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
movl %esp, %ebp
.cfi_def_cfa_register 5
cmpl $0, 8(%ebp)
jns .L2
negl 8(%ebp)
.L2:
movl 8(%ebp), %eax
popl %ebp
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1021:
.size absolute_value, .-absolute_value
.section .rodata
.LC0:
.string "Enter a value: "
.LC1:
.string "The result is: "
.text
.globl main
.type main, @function
main:
.LFB1022:
.cfi_startproc
leal 4(%esp), %ecx
.cfi_def_cfa 1, 0
andl $-16, %esp
pushl -4(%ecx)
pushl %ebp
.cfi_escape 0x10,0x5,0x2,0x75,0
movl %esp, %ebp
pushl %ecx
.cfi_escape 0xf,0x3,0x75,0x7c,0x6
subl $20, %esp
movl %gs:20, %eax
movl %eax, -12(%ebp)
xorl %eax, %eax
movl $0, -20(%ebp)
subl $8, %esp
pushl $.LC0
pushl $_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
addl $16, %esp
subl $8, %esp
pushl $_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
pushl %eax
call _ZNSolsEPFRSoS_E
addl $16, %esp
subl $8, %esp
leal -20(%ebp), %eax
pushl %eax
pushl $_ZSt3cin
call _ZNSirsERi
addl $16, %esp
movl -20(%ebp), %eax
subl $12, %esp
pushl %eax
call absolute_value
addl $16, %esp
movl %eax, -16(%ebp)
subl $8, %esp
pushl $.LC1
pushl $_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
addl $16, %esp
subl $8, %esp
pushl -16(%ebp)
pushl %eax
call _ZNSolsEi
addl $16, %esp
subl $8, %esp
pushl $_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
pushl %eax
call _ZNSolsEPFRSoS_E
addl $16, %esp
movl $0, %eax
movl -12(%ebp), %edx
xorl %gs:20, %edx
je .L6
call __stack_chk_fail
.L6:
movl -4(%ebp), %ecx
.cfi_def_cfa 1, 0
leave
.cfi_restore 5
leal -4(%ecx), %esp
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1022:
.size main, .-main
.type _Z41__static_initialization_and_destruction_0ii, @function
_Z41__static_initialization_and_destruction_0ii:
.LFB1031:
.cfi_startproc
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
movl %esp, %ebp
.cfi_def_cfa_register 5
subl $8, %esp
cmpl $1, 8(%ebp)
jne .L9
cmpl $65535, 12(%ebp)
jne .L9
subl $12, %esp
pushl $_ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
addl $16, %esp
subl $4, %esp
pushl $__dso_handle
pushl $_ZStL8__ioinit
pushl $_ZNSt8ios_base4InitD1Ev
call __cxa_atexit
addl $16, %esp
.L9:
nop
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1031:
.size _Z41__static_initialization_and_destruction_0ii, .-_Z41__static_initialization_and_destruction_0ii
.type _GLOBAL__sub_I_absolute_value, @function
_GLOBAL__sub_I_absolute_value:
.LFB1032:
.cfi_startproc
pushl %ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
movl %esp, %ebp
.cfi_def_cfa_register 5
subl $8, %esp
subl $8, %esp
pushl $65535
pushl $1
call _Z41__static_initialization_and_destruction_0ii
addl $16, %esp
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1032:
.size _GLOBAL__sub_I_absolute_value, .-_GLOBAL__sub_I_absolute_value
.section .init_array,"aw"
.align 4
.long _GLOBAL__sub_I_absolute_value
.hidden __dso_handle
.ident "GCC: (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 20160609"
.section .note.GNU-stack,"",@progbits
|
aaronbloomfield/pdr
| 3,971
|
slides/code/08-assembly-32bit/test_max-noextern.s
|
.file "foo.cpp"
.intel_syntax noprefix
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.text
.globl _Z3maxii
.type _Z3maxii, @function
_Z3maxii:
.LFB1021:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
sub esp, 16
mov eax, DWORD PTR [ebp+8]
cmp eax, DWORD PTR [ebp+12]
jle .L2
mov eax, DWORD PTR [ebp+8]
mov DWORD PTR [ebp-4], eax
jmp .L3
.L2:
mov eax, DWORD PTR [ebp+12]
mov DWORD PTR [ebp-4], eax
.L3:
mov eax, DWORD PTR [ebp-4]
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1021:
.size _Z3maxii, .-_Z3maxii
.section .rodata
.LC0:
.string "Enter value 1: "
.LC1:
.string "Enter value 2: "
.LC2:
.string "The result is: "
.text
.globl main
.type main, @function
main:
.LFB1022:
.cfi_startproc
lea ecx, [esp+4]
.cfi_def_cfa 1, 0
and esp, -16
push DWORD PTR [ecx-4]
push ebp
.cfi_escape 0x10,0x5,0x2,0x75,0
mov ebp, esp
push ecx
.cfi_escape 0xf,0x3,0x75,0x7c,0x6
sub esp, 20
mov eax, DWORD PTR gs:20
mov DWORD PTR [ebp-12], eax
xor eax, eax
mov DWORD PTR [ebp-24], 0
mov DWORD PTR [ebp-20], 0
sub esp, 8
push OFFSET FLAT:.LC0
push OFFSET FLAT:_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
add esp, 16
sub esp, 8
lea eax, [ebp-24]
push eax
push OFFSET FLAT:_ZSt3cin
call _ZNSirsERi
add esp, 16
sub esp, 8
push OFFSET FLAT:.LC1
push OFFSET FLAT:_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
add esp, 16
sub esp, 8
lea eax, [ebp-20]
push eax
push OFFSET FLAT:_ZSt3cin
call _ZNSirsERi
add esp, 16
mov edx, DWORD PTR [ebp-20]
mov eax, DWORD PTR [ebp-24]
sub esp, 8
push edx
push eax
call _Z3maxii
add esp, 16
mov DWORD PTR [ebp-16], eax
sub esp, 8
push OFFSET FLAT:.LC2
push OFFSET FLAT:_ZSt4cout
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
add esp, 16
sub esp, 8
push DWORD PTR [ebp-16]
push eax
call _ZNSolsEi
add esp, 16
sub esp, 8
push OFFSET FLAT:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
push eax
call _ZNSolsEPFRSoS_E
add esp, 16
mov eax, 0
mov ecx, DWORD PTR [ebp-12]
xor ecx, DWORD PTR gs:20
je .L7
call __stack_chk_fail
.L7:
mov ecx, DWORD PTR [ebp-4]
.cfi_def_cfa 1, 0
leave
.cfi_restore 5
lea esp, [ecx-4]
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1022:
.size main, .-main
.type _Z41__static_initialization_and_destruction_0ii, @function
_Z41__static_initialization_and_destruction_0ii:
.LFB1031:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
sub esp, 8
cmp DWORD PTR [ebp+8], 1
jne .L10
cmp DWORD PTR [ebp+12], 65535
jne .L10
sub esp, 12
push OFFSET FLAT:_ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
add esp, 16
sub esp, 4
push OFFSET FLAT:__dso_handle
push OFFSET FLAT:_ZStL8__ioinit
push OFFSET FLAT:_ZNSt8ios_base4InitD1Ev
call __cxa_atexit
add esp, 16
.L10:
nop
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1031:
.size _Z41__static_initialization_and_destruction_0ii, .-_Z41__static_initialization_and_destruction_0ii
.type _GLOBAL__sub_I__Z3maxii, @function
_GLOBAL__sub_I__Z3maxii:
.LFB1032:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
sub esp, 8
sub esp, 8
push 65535
push 1
call _Z41__static_initialization_and_destruction_0ii
add esp, 16
leave
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1032:
.size _GLOBAL__sub_I__Z3maxii, .-_GLOBAL__sub_I__Z3maxii
.section .init_array,"aw"
.align 4
.long _GLOBAL__sub_I__Z3maxii
.hidden __dso_handle
.ident "GCC: (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 20160609"
.section .note.GNU-stack,"",@progbits
|
aaronbloomfield/pdr
| 1,622
|
slides/code/08-assembly-32bit/test_abs_c.s
|
.file "test_abs_c.c"
.intel_syntax noprefix
.text
.globl absolute_value
.type absolute_value, @function
absolute_value:
.LFB0:
.cfi_startproc
push ebp
.cfi_def_cfa_offset 8
.cfi_offset 5, -8
mov ebp, esp
.cfi_def_cfa_register 5
cmp DWORD PTR [ebp+8], 0
jns .L2
neg DWORD PTR [ebp+8]
.L2:
mov eax, DWORD PTR [ebp+8]
pop ebp
.cfi_restore 5
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE0:
.size absolute_value, .-absolute_value
.section .rodata
.LC0:
.string "Enter a value: "
.LC1:
.string "%d"
.LC2:
.string "The result is: %d\n"
.text
.globl main
.type main, @function
main:
.LFB1:
.cfi_startproc
lea ecx, [esp+4]
.cfi_def_cfa 1, 0
and esp, -16
push DWORD PTR [ecx-4]
push ebp
.cfi_escape 0x10,0x5,0x2,0x75,0
mov ebp, esp
push ecx
.cfi_escape 0xf,0x3,0x75,0x7c,0x6
sub esp, 20
mov eax, DWORD PTR gs:20
mov DWORD PTR [ebp-12], eax
xor eax, eax
mov DWORD PTR [ebp-20], 0
sub esp, 12
push OFFSET FLAT:.LC0
call puts
add esp, 16
sub esp, 8
lea eax, [ebp-20]
push eax
push OFFSET FLAT:.LC1
call __isoc99_scanf
add esp, 16
mov eax, DWORD PTR [ebp-20]
sub esp, 12
push eax
call absolute_value
add esp, 16
mov DWORD PTR [ebp-16], eax
sub esp, 8
push DWORD PTR [ebp-16]
push OFFSET FLAT:.LC2
call printf
add esp, 16
mov eax, 0
mov edx, DWORD PTR [ebp-12]
xor edx, DWORD PTR gs:20
je .L6
call __stack_chk_fail
.L6:
mov ecx, DWORD PTR [ebp-4]
.cfi_def_cfa 1, 0
leave
.cfi_restore 5
lea esp, [ecx-4]
.cfi_def_cfa 4, 4
ret
.cfi_endproc
.LFE1:
.size main, .-main
.ident "GCC: (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 20160609"
.section .note.GNU-stack,"",@progbits
|
aaronbloomfield/pdr
| 3,522
|
slides/code/08-assembly-32bit/test_max-O2.s
|
.file "test_max.cpp"
.intel_syntax noprefix
.section .text.unlikely,"ax",@progbits
.LCOLDB0:
.text
.LHOTB0:
.p2align 4,,15
.globl max
.type max, @function
max:
.LFB1048:
.cfi_startproc
mov eax, DWORD PTR [esp+4]
mov edx, DWORD PTR [esp+8]
cmp eax, edx
cmovl eax, edx
ret
.cfi_endproc
.LFE1048:
.size max, .-max
.section .text.unlikely
.LCOLDE0:
.text
.LHOTE0:
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Enter value 1: "
.LC2:
.string "Enter value 2: "
.LC3:
.string "The result is: "
.section .text.unlikely
.LCOLDB4:
.section .text.startup,"ax",@progbits
.LHOTB4:
.p2align 4,,15
.globl main
.type main, @function
main:
.LFB1049:
.cfi_startproc
lea ecx, [esp+4]
.cfi_def_cfa 1, 0
and esp, -16
push DWORD PTR [ecx-4]
push ebp
.cfi_escape 0x10,0x5,0x2,0x75,0
mov ebp, esp
push ebx
push ecx
.cfi_escape 0xf,0x3,0x75,0x78,0x6
.cfi_escape 0x10,0x3,0x2,0x75,0x7c
sub esp, 20
mov DWORD PTR [ebp-20], 0
mov DWORD PTR [ebp-16], 0
push 15
push OFFSET FLAT:.LC1
push OFFSET FLAT:_ZSt4cout
mov eax, DWORD PTR gs:20
mov DWORD PTR [ebp-12], eax
xor eax, eax
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
mov DWORD PTR [esp], OFFSET FLAT:_ZSt4cout
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
pop eax
lea eax, [ebp-20]
pop edx
push eax
push OFFSET FLAT:_ZSt3cin
call _ZNSirsERi
add esp, 12
push 15
push OFFSET FLAT:.LC2
push OFFSET FLAT:_ZSt4cout
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
mov DWORD PTR [esp], OFFSET FLAT:_ZSt4cout
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
pop ecx
lea eax, [ebp-16]
pop ebx
push eax
push OFFSET FLAT:_ZSt3cin
call _ZNSirsERi
mov eax, DWORD PTR [ebp-16]
mov ebx, DWORD PTR [ebp-20]
add esp, 12
push 15
push OFFSET FLAT:.LC3
push OFFSET FLAT:_ZSt4cout
cmp eax, ebx
cmovge ebx, eax
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_i
pop eax
pop edx
push ebx
push OFFSET FLAT:_ZSt4cout
call _ZNSolsEi
mov DWORD PTR [esp], eax
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
add esp, 16
mov edx, DWORD PTR [ebp-12]
xor edx, DWORD PTR gs:20
jne .L6
lea esp, [ebp-8]
xor eax, eax
pop ecx
.cfi_remember_state
.cfi_restore 1
.cfi_def_cfa 1, 0
pop ebx
.cfi_restore 3
pop ebp
.cfi_restore 5
lea esp, [ecx-4]
.cfi_def_cfa 4, 4
ret
.L6:
.cfi_restore_state
call __stack_chk_fail
.cfi_endproc
.LFE1049:
.size main, .-main
.section .text.unlikely
.LCOLDE4:
.section .text.startup
.LHOTE4:
.section .text.unlikely
.LCOLDB5:
.section .text.startup
.LHOTB5:
.p2align 4,,15
.type _GLOBAL__sub_I_max, @function
_GLOBAL__sub_I_max:
.LFB1059:
.cfi_startproc
sub esp, 24
.cfi_def_cfa_offset 28
push OFFSET FLAT:_ZStL8__ioinit
.cfi_def_cfa_offset 32
call _ZNSt8ios_base4InitC1Ev
add esp, 12
.cfi_def_cfa_offset 20
push OFFSET FLAT:__dso_handle
.cfi_def_cfa_offset 24
push OFFSET FLAT:_ZStL8__ioinit
.cfi_def_cfa_offset 28
push OFFSET FLAT:_ZNSt8ios_base4InitD1Ev
.cfi_def_cfa_offset 32
call __cxa_atexit
add esp, 28
.cfi_def_cfa_offset 4
ret
.cfi_endproc
.LFE1059:
.size _GLOBAL__sub_I_max, .-_GLOBAL__sub_I_max
.section .text.unlikely
.LCOLDE5:
.section .text.startup
.LHOTE5:
.section .init_array,"aw"
.align 4
.long _GLOBAL__sub_I_max
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.hidden __dso_handle
.ident "GCC: (Ubuntu 5.4.0-6ubuntu1~16.04.2) 5.4.0 20160609"
.section .note.GNU-stack,"",@progbits
|
aaronbloomfield/pdr
| 3,781
|
slides/code/08-assembly-64bit/test_fact.s
|
.text
.intel_syntax noprefix
.file "test_fact.cpp"
.section .text.startup,"ax",@progbits
.p2align 4, 0x90 # -- Begin function __cxx_global_var_init
.type __cxx_global_var_init,@function
__cxx_global_var_init: # @__cxx_global_var_init
.cfi_startproc
# %bb.0:
push rax
.cfi_def_cfa_offset 16
movabs rdi, offset _ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
movabs rdi, offset _ZNSt8ios_base4InitD1Ev
movabs rsi, offset _ZStL8__ioinit
movabs rdx, offset __dso_handle
call __cxa_atexit
mov dword ptr [rsp + 4], eax # 4-byte Spill
pop rax
ret
.Lfunc_end0:
.size __cxx_global_var_init, .Lfunc_end0-__cxx_global_var_init
.cfi_endproc
# -- End function
.text
.globl fact # -- Begin function fact
.p2align 4, 0x90
.type fact,@function
fact: # @fact
.cfi_startproc
# %bb.0:
sub rsp, 24
.cfi_def_cfa_offset 32
mov dword ptr [rsp + 12], edi
cmp dword ptr [rsp + 12], 0
jne .LBB1_2
# %bb.1:
mov qword ptr [rsp + 16], 1
jmp .LBB1_3
.LBB1_2:
mov eax, dword ptr [rsp + 12]
mov ecx, eax
mov eax, dword ptr [rsp + 12]
sub eax, 1
mov edi, eax
mov qword ptr [rsp], rcx # 8-byte Spill
call fact
mov rcx, qword ptr [rsp] # 8-byte Reload
imul rcx, rax
mov qword ptr [rsp + 16], rcx
.LBB1_3:
mov rax, qword ptr [rsp + 16]
add rsp, 24
ret
.Lfunc_end1:
.size fact, .Lfunc_end1-fact
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
sub rsp, 40
.cfi_def_cfa_offset 48
movabs rdi, offset _ZSt4cout
movabs rsi, offset .L.str
mov dword ptr [rsp + 36], 0
mov dword ptr [rsp + 32], 0
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movabs rsi, offset _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
mov rdi, rax
call _ZNSolsEPFRSoS_E
movabs rdi, offset _ZSt3cin
lea rsi, [rsp + 32]
mov qword ptr [rsp + 16], rax # 8-byte Spill
call _ZNSirsERj
mov edi, dword ptr [rsp + 32]
mov qword ptr [rsp + 8], rax # 8-byte Spill
call fact
movabs rdi, offset _ZSt4cout
movabs rsi, offset .L.str.1
mov qword ptr [rsp + 24], rax
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
mov rsi, qword ptr [rsp + 24]
mov rdi, rax
call _ZNSolsEl
movabs rsi, offset _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
mov rdi, rax
call _ZNSolsEPFRSoS_E
xor ecx, ecx
mov qword ptr [rsp], rax # 8-byte Spill
mov eax, ecx
add rsp, 40
ret
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.section .text.startup,"ax",@progbits
.p2align 4, 0x90 # -- Begin function _GLOBAL__sub_I_test_fact.cpp
.type _GLOBAL__sub_I_test_fact.cpp,@function
_GLOBAL__sub_I_test_fact.cpp: # @_GLOBAL__sub_I_test_fact.cpp
.cfi_startproc
# %bb.0:
push rax
.cfi_def_cfa_offset 16
call __cxx_global_var_init
pop rax
ret
.Lfunc_end3:
.size _GLOBAL__sub_I_test_fact.cpp, .Lfunc_end3-_GLOBAL__sub_I_test_fact.cpp
.cfi_endproc
# -- End function
.type _ZStL8__ioinit,@object # @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.hidden __dso_handle
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter value for fact(): "
.size .L.str, 25
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "The result is: "
.size .L.str.1, 16
.section .init_array,"aw",@init_array
.p2align 3
.quad _GLOBAL__sub_I_test_fact.cpp
.ident "clang version 6.0.0-1ubuntu2 (tags/RELEASE_600/final)"
.section ".note.GNU-stack","",@progbits
|
aaronbloomfield/pdr
| 3,055
|
slides/code/08-assembly-64bit/test_abs.s
|
.text
.intel_syntax noprefix
.file "test_abs.cpp"
.section .text.startup,"ax",@progbits
.align 16, 0x90
.type __cxx_global_var_init,@function
__cxx_global_var_init: # @__cxx_global_var_init
.cfi_startproc
# BB#0:
push rax
.Ltmp0:
.cfi_def_cfa_offset 16
movabs rdi, _ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
movabs rdi, _ZNSt8ios_base4InitD1Ev
movabs rsi, _ZStL8__ioinit
movabs rdx, __dso_handle
call __cxa_atexit
mov dword ptr [rsp + 4], eax # 4-byte Spill
pop rax
ret
.Lfunc_end0:
.size __cxx_global_var_init, .Lfunc_end0-__cxx_global_var_init
.cfi_endproc
.text
.globl absolute_value
.align 16, 0x90
.type absolute_value,@function
absolute_value: # @absolute_value
.cfi_startproc
# BB#0:
mov qword ptr [rsp - 8], rdi
cmp qword ptr [rsp - 8], 0
jge .LBB1_2
# BB#1:
xor eax, eax
mov ecx, eax
sub rcx, qword ptr [rsp - 8]
mov qword ptr [rsp - 8], rcx
.LBB1_2:
mov rax, qword ptr [rsp - 8]
ret
.Lfunc_end1:
.size absolute_value, .Lfunc_end1-absolute_value
.cfi_endproc
.globl main
.align 16, 0x90
.type main,@function
main: # @main
.cfi_startproc
# BB#0:
sub rsp, 56
.Ltmp1:
.cfi_def_cfa_offset 64
movabs rdi, _ZSt4cout
movabs rsi, .L.str
mov dword ptr [rsp + 52], 0
mov qword ptr [rsp + 40], 0
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movabs rsi, _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
mov rdi, rax
call _ZNSolsEPFRSoS_E
movabs rdi, _ZSt3cin
lea rsi, [rsp + 40]
mov qword ptr [rsp + 24], rax # 8-byte Spill
call _ZNSirsERl
mov rdi, qword ptr [rsp + 40]
mov qword ptr [rsp + 16], rax # 8-byte Spill
call absolute_value
movabs rdi, _ZSt4cout
movabs rsi, .L.str.1
mov qword ptr [rsp + 32], rax
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
mov rsi, qword ptr [rsp + 32]
mov rdi, rax
call _ZNSolsEl
movabs rsi, _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
mov rdi, rax
call _ZNSolsEPFRSoS_E
xor ecx, ecx
mov qword ptr [rsp + 8], rax # 8-byte Spill
mov eax, ecx
add rsp, 56
ret
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
.section .text.startup,"ax",@progbits
.align 16, 0x90
.type _GLOBAL__sub_I_test_abs.cpp,@function
_GLOBAL__sub_I_test_abs.cpp: # @_GLOBAL__sub_I_test_abs.cpp
.cfi_startproc
# BB#0:
push rax
.Ltmp2:
.cfi_def_cfa_offset 16
call __cxx_global_var_init
pop rax
ret
.Lfunc_end3:
.size _GLOBAL__sub_I_test_abs.cpp, .Lfunc_end3-_GLOBAL__sub_I_test_abs.cpp
.cfi_endproc
.type _ZStL8__ioinit,@object # @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter a value: "
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "The result is: "
.size .L.str.1, 16
.section .init_array,"aw",@init_array
.align 8
.quad _GLOBAL__sub_I_test_abs.cpp
.ident "clang version 3.8.0-2ubuntu4 (tags/RELEASE_380/final)"
.section ".note.GNU-stack","",@progbits
|
aaronbloomfield/pdr
| 3,063
|
slides/code/08-assembly-64bit/test_abs_int.s
|
.text
.intel_syntax noprefix
.file "test_abs_int.cpp"
.section .text.startup,"ax",@progbits
.align 16, 0x90
.type __cxx_global_var_init,@function
__cxx_global_var_init: # @__cxx_global_var_init
.cfi_startproc
# BB#0:
push rax
.Ltmp0:
.cfi_def_cfa_offset 16
movabs rdi, _ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
movabs rdi, _ZNSt8ios_base4InitD1Ev
movabs rsi, _ZStL8__ioinit
movabs rdx, __dso_handle
call __cxa_atexit
mov dword ptr [rsp + 4], eax # 4-byte Spill
pop rax
ret
.Lfunc_end0:
.size __cxx_global_var_init, .Lfunc_end0-__cxx_global_var_init
.cfi_endproc
.text
.globl absolute_value
.align 16, 0x90
.type absolute_value,@function
absolute_value: # @absolute_value
.cfi_startproc
# BB#0:
mov dword ptr [rsp - 4], edi
cmp dword ptr [rsp - 4], 0
jge .LBB1_2
# BB#1:
xor eax, eax
sub eax, dword ptr [rsp - 4]
mov dword ptr [rsp - 4], eax
.LBB1_2:
mov eax, dword ptr [rsp - 4]
ret
.Lfunc_end1:
.size absolute_value, .Lfunc_end1-absolute_value
.cfi_endproc
.globl main
.align 16, 0x90
.type main,@function
main: # @main
.cfi_startproc
# BB#0:
sub rsp, 40
.Ltmp1:
.cfi_def_cfa_offset 48
movabs rdi, _ZSt4cout
movabs rsi, .L.str
mov dword ptr [rsp + 36], 0
mov dword ptr [rsp + 32], 0
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movabs rsi, _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
mov rdi, rax
call _ZNSolsEPFRSoS_E
movabs rdi, _ZSt3cin
lea rsi, [rsp + 32]
mov qword ptr [rsp + 16], rax # 8-byte Spill
call _ZNSirsERi
mov edi, dword ptr [rsp + 32]
mov qword ptr [rsp + 8], rax # 8-byte Spill
call absolute_value
movabs rdi, _ZSt4cout
movabs rsi, .L.str.1
mov dword ptr [rsp + 28], eax
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
mov esi, dword ptr [rsp + 28]
mov rdi, rax
call _ZNSolsEi
movabs rsi, _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
mov rdi, rax
call _ZNSolsEPFRSoS_E
xor ecx, ecx
mov qword ptr [rsp], rax # 8-byte Spill
mov eax, ecx
add rsp, 40
ret
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
.section .text.startup,"ax",@progbits
.align 16, 0x90
.type _GLOBAL__sub_I_test_abs_int.cpp,@function
_GLOBAL__sub_I_test_abs_int.cpp: # @_GLOBAL__sub_I_test_abs_int.cpp
.cfi_startproc
# BB#0:
push rax
.Ltmp2:
.cfi_def_cfa_offset 16
call __cxx_global_var_init
pop rax
ret
.Lfunc_end3:
.size _GLOBAL__sub_I_test_abs_int.cpp, .Lfunc_end3-_GLOBAL__sub_I_test_abs_int.cpp
.cfi_endproc
.type _ZStL8__ioinit,@object # @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter a value: "
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "The result is: "
.size .L.str.1, 16
.section .init_array,"aw",@init_array
.align 8
.quad _GLOBAL__sub_I_test_abs_int.cpp
.ident "clang version 3.8.0-2ubuntu4 (tags/RELEASE_380/final)"
.section ".note.GNU-stack","",@progbits
|
aaronbloomfield/pdr
| 3,632
|
slides/code/08-assembly-64bit/test_max.s
|
.text
.intel_syntax noprefix
.file "test_max.cpp"
.section .text.startup,"ax",@progbits
.align 16, 0x90
.type __cxx_global_var_init,@function
__cxx_global_var_init: # @__cxx_global_var_init
.cfi_startproc
# BB#0:
push rax
.Ltmp0:
.cfi_def_cfa_offset 16
movabs rdi, _ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
movabs rdi, _ZNSt8ios_base4InitD1Ev
movabs rsi, _ZStL8__ioinit
movabs rdx, __dso_handle
call __cxa_atexit
mov dword ptr [rsp + 4], eax # 4-byte Spill
pop rax
ret
.Lfunc_end0:
.size __cxx_global_var_init, .Lfunc_end0-__cxx_global_var_init
.cfi_endproc
.text
.globl max
.align 16, 0x90
.type max,@function
max: # @max
.cfi_startproc
# BB#0:
mov dword ptr [rsp - 4], edi
mov dword ptr [rsp - 8], esi
mov esi, dword ptr [rsp - 4]
cmp esi, dword ptr [rsp - 8]
jle .LBB1_2
# BB#1:
mov eax, dword ptr [rsp - 4]
mov dword ptr [rsp - 12], eax
jmp .LBB1_3
.LBB1_2:
mov eax, dword ptr [rsp - 8]
mov dword ptr [rsp - 12], eax
.LBB1_3:
mov eax, dword ptr [rsp - 12]
ret
.Lfunc_end1:
.size max, .Lfunc_end1-max
.cfi_endproc
.globl main
.align 16, 0x90
.type main,@function
main: # @main
.cfi_startproc
# BB#0:
sub rsp, 56
.Ltmp1:
.cfi_def_cfa_offset 64
movabs rdi, _ZSt4cout
movabs rsi, .L.str
mov dword ptr [rsp + 52], 0
mov dword ptr [rsp + 48], 0
mov dword ptr [rsp + 44], 0
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movabs rsi, _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
mov rdi, rax
call _ZNSolsEPFRSoS_E
movabs rdi, _ZSt3cin
lea rsi, [rsp + 48]
mov qword ptr [rsp + 32], rax # 8-byte Spill
call _ZNSirsERi
movabs rdi, _ZSt4cout
movabs rsi, .L.str.1
mov qword ptr [rsp + 24], rax # 8-byte Spill
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movabs rsi, _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
mov rdi, rax
call _ZNSolsEPFRSoS_E
movabs rdi, _ZSt3cin
lea rsi, [rsp + 44]
mov qword ptr [rsp + 16], rax # 8-byte Spill
call _ZNSirsERi
mov edi, dword ptr [rsp + 48]
mov esi, dword ptr [rsp + 44]
mov qword ptr [rsp + 8], rax # 8-byte Spill
call max
movabs rdi, _ZSt4cout
movabs rsi, .L.str.2
mov dword ptr [rsp + 40], eax
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
mov esi, dword ptr [rsp + 40]
mov rdi, rax
call _ZNSolsEi
movabs rsi, _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
mov rdi, rax
call _ZNSolsEPFRSoS_E
xor ecx, ecx
mov qword ptr [rsp], rax # 8-byte Spill
mov eax, ecx
add rsp, 56
ret
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
.section .text.startup,"ax",@progbits
.align 16, 0x90
.type _GLOBAL__sub_I_test_max.cpp,@function
_GLOBAL__sub_I_test_max.cpp: # @_GLOBAL__sub_I_test_max.cpp
.cfi_startproc
# BB#0:
push rax
.Ltmp2:
.cfi_def_cfa_offset 16
call __cxx_global_var_init
pop rax
ret
.Lfunc_end3:
.size _GLOBAL__sub_I_test_max.cpp, .Lfunc_end3-_GLOBAL__sub_I_test_max.cpp
.cfi_endproc
.type _ZStL8__ioinit,@object # @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter value 1: "
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Enter value 2: "
.size .L.str.1, 16
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "The result is: "
.size .L.str.2, 16
.section .init_array,"aw",@init_array
.align 8
.quad _GLOBAL__sub_I_test_max.cpp
.ident "clang version 3.8.0-2ubuntu4 (tags/RELEASE_380/final)"
.section ".note.GNU-stack","",@progbits
|
aaronbloomfield/pdr
| 9,352
|
slides/code/08-assembly-64bit/test_string_compare.s
|
.text
.intel_syntax noprefix
.file "test_string_compare.cpp"
.globl compare_string
.align 16, 0x90
.type compare_string,@function
compare_string: # @compare_string
.cfi_startproc
# BB#0:
mov al, byte ptr [rdi]
test al, al
je .LBB0_4
# BB#1: # %.lr.ph.preheader
inc rdi
.align 16, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movzx ecx, byte ptr [rsi]
movzx edx, al
cmp edx, ecx
jne .LBB0_5
# BB#3: # in Loop: Header=BB0_2 Depth=1
inc rsi
mov al, byte ptr [rdi]
inc rdi
test al, al
jne .LBB0_2
.LBB0_4:
xor eax, eax
.LBB0_5: # %.critedge
movzx ecx, byte ptr [rsi]
movzx eax, al
cmp eax, ecx
sete al
ret
.Lfunc_end0:
.size compare_string, .Lfunc_end0-compare_string
.cfi_endproc
.globl main
.align 16, 0x90
.type main,@function
main: # @main
.Lfunc_begin0:
.cfi_startproc
.cfi_personality 3, __gxx_personality_v0
.cfi_lsda 3, .Lexception0
# BB#0:
push rbp
.Ltmp43:
.cfi_def_cfa_offset 16
push r15
.Ltmp44:
.cfi_def_cfa_offset 24
push r14
.Ltmp45:
.cfi_def_cfa_offset 32
push r12
.Ltmp46:
.cfi_def_cfa_offset 40
push rbx
.Ltmp47:
.cfi_def_cfa_offset 48
sub rsp, 64
.Ltmp48:
.cfi_def_cfa_offset 112
.Ltmp49:
.cfi_offset rbx, -48
.Ltmp50:
.cfi_offset r12, -40
.Ltmp51:
.cfi_offset r14, -32
.Ltmp52:
.cfi_offset r15, -24
.Ltmp53:
.cfi_offset rbp, -16
lea r15, [rsp + 48]
mov qword ptr [rsp + 32], r15
mov qword ptr [rsp + 40], 0
mov byte ptr [rsp + 48], 0
lea r12, [rsp + 16]
mov qword ptr [rsp], r12
mov qword ptr [rsp + 8], 0
mov byte ptr [rsp + 16], 0
.Ltmp0:
mov edi, _ZSt4cout
mov esi, .L.str
mov edx, 16
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp1:
# BB#1: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
mov rax, qword ptr [rip + _ZSt4cout]
mov rax, qword ptr [rax - 24]
mov rbx, qword ptr [rax + _ZSt4cout+240]
test rbx, rbx
je .LBB1_2
.LBB1_3: # %.noexc34
cmp byte ptr [rbx + 56], 0
je .LBB1_5
# BB#4:
mov al, byte ptr [rbx + 67]
jmp .LBB1_7
.LBB1_5:
.Ltmp2:
mov rdi, rbx
call _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp3:
# BB#6: # %.noexc38
mov rax, qword ptr [rbx]
mov rax, qword ptr [rax + 48]
.Ltmp4:
mov esi, 10
mov rdi, rbx
call rax
.Ltmp5:
.LBB1_7: # %.noexc
.Ltmp6:
movsx esi, al
mov edi, _ZSt4cout
call _ZNSo3putEc
.Ltmp7:
# BB#8: # %.noexc4
.Ltmp8:
mov rdi, rax
call _ZNSo5flushEv
.Ltmp9:
# BB#9: # %_ZNSolsEPFRSoS_E.exit
.Ltmp10:
lea rsi, [rsp + 32]
mov edi, _ZSt3cin
call _ZStrsIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EE
.Ltmp11:
# BB#10:
.Ltmp12:
mov edi, _ZSt4cout
mov esi, .L.str.1
mov edx, 16
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp13:
# BB#11: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit8
mov rax, qword ptr [rip + _ZSt4cout]
mov rax, qword ptr [rax - 24]
mov rbx, qword ptr [rax + _ZSt4cout+240]
test rbx, rbx
je .LBB1_12
.LBB1_13: # %.noexc17
cmp byte ptr [rbx + 56], 0
je .LBB1_15
# BB#14:
mov al, byte ptr [rbx + 67]
jmp .LBB1_17
.LBB1_15:
.Ltmp14:
mov rdi, rbx
call _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp15:
# BB#16: # %.noexc19
mov rax, qword ptr [rbx]
mov rax, qword ptr [rax + 48]
.Ltmp16:
mov esi, 10
mov rdi, rbx
call rax
.Ltmp17:
.LBB1_17: # %.noexc11
.Ltmp18:
movsx esi, al
mov edi, _ZSt4cout
call _ZNSo3putEc
.Ltmp19:
# BB#18: # %.noexc12
.Ltmp20:
mov rdi, rax
call _ZNSo5flushEv
.Ltmp21:
# BB#19: # %_ZNSolsEPFRSoS_E.exit10
.Ltmp22:
lea rsi, [rsp]
mov edi, _ZSt3cin
call _ZStrsIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EE
.Ltmp23:
# BB#20:
mov rcx, qword ptr [rsp + 32]
mov rax, qword ptr [rsp]
mov bl, byte ptr [rcx]
test bl, bl
je .LBB1_24
# BB#21: # %.lr.ph.i.preheader
inc rcx
.align 16, 0x90
.LBB1_22: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movzx edx, byte ptr [rax]
movzx esi, bl
cmp esi, edx
jne .LBB1_25
# BB#23: # in Loop: Header=BB1_22 Depth=1
inc rax
mov bl, byte ptr [rcx]
inc rcx
test bl, bl
jne .LBB1_22
.LBB1_24:
xor ebx, ebx
.LBB1_25: # %compare_string.exit
mov bpl, byte ptr [rax]
.Ltmp24:
mov edi, _ZSt4cout
mov esi, .L.str.2
mov edx, 15
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
.Ltmp25:
# BB#26: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit22
movzx eax, bpl
movzx ecx, bl
cmp ecx, eax
sete al
movzx esi, al
.Ltmp26:
mov edi, _ZSt4cout
call _ZNSo9_M_insertIbEERSoT_
mov r14, rax
.Ltmp27:
# BB#27: # %_ZNSolsEb.exit
mov rax, qword ptr [r14]
mov rax, qword ptr [rax - 24]
mov rbx, qword ptr [r14 + rax + 240]
test rbx, rbx
je .LBB1_28
.LBB1_29: # %.noexc42
cmp byte ptr [rbx + 56], 0
je .LBB1_31
# BB#30:
mov al, byte ptr [rbx + 67]
jmp .LBB1_33
.LBB1_31:
.Ltmp28:
mov rdi, rbx
call _ZNKSt5ctypeIcE13_M_widen_initEv
.Ltmp29:
# BB#32: # %.noexc46
mov rax, qword ptr [rbx]
mov rax, qword ptr [rax + 48]
.Ltmp30:
mov esi, 10
mov rdi, rbx
call rax
.Ltmp31:
.LBB1_33: # %.noexc26
.Ltmp32:
movsx esi, al
mov rdi, r14
call _ZNSo3putEc
.Ltmp33:
# BB#34: # %.noexc27
.Ltmp34:
mov rdi, rax
call _ZNSo5flushEv
.Ltmp35:
# BB#35: # %_ZNSolsEPFRSoS_E.exit25
mov rdi, qword ptr [rsp]
cmp rdi, r12
je .LBB1_37
# BB#36:
call _ZdlPv
.LBB1_37: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit32
mov rdi, qword ptr [rsp + 32]
cmp rdi, r15
je .LBB1_39
# BB#38:
call _ZdlPv
.LBB1_39: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit3
xor eax, eax
add rsp, 64
pop rbx
pop r12
pop r14
pop r15
pop rbp
ret
.LBB1_2:
.Ltmp40:
call _ZSt16__throw_bad_castv
.Ltmp41:
jmp .LBB1_3
.LBB1_12:
.Ltmp38:
call _ZSt16__throw_bad_castv
.Ltmp39:
jmp .LBB1_13
.LBB1_28:
.Ltmp36:
call _ZSt16__throw_bad_castv
.Ltmp37:
jmp .LBB1_29
.LBB1_40:
.Ltmp42:
mov rbx, rax
mov rdi, qword ptr [rsp]
cmp rdi, r12
je .LBB1_42
# BB#41:
call _ZdlPv
.LBB1_42: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit33
mov rdi, qword ptr [rsp + 32]
cmp rdi, r15
je .LBB1_44
# BB#43:
call _ZdlPv
.LBB1_44: # %_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev.exit
mov rdi, rbx
call _Unwind_Resume
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
.section .gcc_except_table,"a",@progbits
.align 4
GCC_except_table1:
.Lexception0:
.byte 255 # @LPStart Encoding = omit
.byte 3 # @TType Encoding = udata4
.asciz "\234" # @TType base offset
.byte 3 # Call site Encoding = udata4
.byte 26 # Call site table length
.long .Ltmp0-.Lfunc_begin0 # >> Call Site 1 <<
.long .Ltmp37-.Ltmp0 # Call between .Ltmp0 and .Ltmp37
.long .Ltmp42-.Lfunc_begin0 # jumps to .Ltmp42
.byte 0 # On action: cleanup
.long .Ltmp37-.Lfunc_begin0 # >> Call Site 2 <<
.long .Lfunc_end1-.Ltmp37 # Call between .Ltmp37 and .Lfunc_end1
.long 0 # has no landing pad
.byte 0 # On action: cleanup
.align 4
.section .text.startup,"ax",@progbits
.align 16, 0x90
.type _GLOBAL__sub_I_test_string_compare.cpp,@function
_GLOBAL__sub_I_test_string_compare.cpp: # @_GLOBAL__sub_I_test_string_compare.cpp
.cfi_startproc
# BB#0:
push rax
.Ltmp54:
.cfi_def_cfa_offset 16
mov edi, _ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
mov edi, _ZNSt8ios_base4InitD1Ev
mov esi, _ZStL8__ioinit
mov edx, __dso_handle
pop rax
jmp __cxa_atexit # TAILCALL
.Lfunc_end2:
.size _GLOBAL__sub_I_test_string_compare.cpp, .Lfunc_end2-_GLOBAL__sub_I_test_string_compare.cpp
.cfi_endproc
.type _ZStL8__ioinit,@object # @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter string 1: "
.size .L.str, 17
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Enter string 2: "
.size .L.str.1, 17
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "The result is: "
.size .L.str.2, 16
.section .init_array,"aw",@init_array
.align 8
.quad _GLOBAL__sub_I_test_string_compare.cpp
.ident "clang version 3.8.0-2ubuntu4 (tags/RELEASE_380/final)"
.section ".note.GNU-stack","",@progbits
|
aaronbloomfield/pdr
| 3,016
|
slides/code/08-assembly-64bit/test_abs-non-intel.s
|
.text
.file "test_abs.cpp"
.section .text.startup,"ax",@progbits
.align 16, 0x90
.type __cxx_global_var_init,@function
__cxx_global_var_init: # @__cxx_global_var_init
.cfi_startproc
# BB#0:
pushq %rax
.Ltmp0:
.cfi_def_cfa_offset 16
movabsq $_ZStL8__ioinit, %rdi
callq _ZNSt8ios_base4InitC1Ev
movabsq $_ZNSt8ios_base4InitD1Ev, %rdi
movabsq $_ZStL8__ioinit, %rsi
movabsq $__dso_handle, %rdx
callq __cxa_atexit
movl %eax, 4(%rsp) # 4-byte Spill
popq %rax
retq
.Lfunc_end0:
.size __cxx_global_var_init, .Lfunc_end0-__cxx_global_var_init
.cfi_endproc
.text
.globl absolute_value
.align 16, 0x90
.type absolute_value,@function
absolute_value: # @absolute_value
.cfi_startproc
# BB#0:
movq %rdi, -8(%rsp)
cmpq $0, -8(%rsp)
jge .LBB1_2
# BB#1:
xorl %eax, %eax
movl %eax, %ecx
subq -8(%rsp), %rcx
movq %rcx, -8(%rsp)
.LBB1_2:
movq -8(%rsp), %rax
retq
.Lfunc_end1:
.size absolute_value, .Lfunc_end1-absolute_value
.cfi_endproc
.globl main
.align 16, 0x90
.type main,@function
main: # @main
.cfi_startproc
# BB#0:
subq $56, %rsp
.Ltmp1:
.cfi_def_cfa_offset 64
movabsq $_ZSt4cout, %rdi
movabsq $.L.str, %rsi
movl $0, 52(%rsp)
movq $0, 40(%rsp)
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movabsq $_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_, %rsi
movq %rax, %rdi
callq _ZNSolsEPFRSoS_E
movabsq $_ZSt3cin, %rdi
leaq 40(%rsp), %rsi
movq %rax, 24(%rsp) # 8-byte Spill
callq _ZNSirsERl
movq 40(%rsp), %rdi
movq %rax, 16(%rsp) # 8-byte Spill
callq absolute_value
movabsq $_ZSt4cout, %rdi
movabsq $.L.str.1, %rsi
movq %rax, 32(%rsp)
callq _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movq 32(%rsp), %rsi
movq %rax, %rdi
callq _ZNSolsEl
movabsq $_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_, %rsi
movq %rax, %rdi
callq _ZNSolsEPFRSoS_E
xorl %ecx, %ecx
movq %rax, 8(%rsp) # 8-byte Spill
movl %ecx, %eax
addq $56, %rsp
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
.section .text.startup,"ax",@progbits
.align 16, 0x90
.type _GLOBAL__sub_I_test_abs.cpp,@function
_GLOBAL__sub_I_test_abs.cpp: # @_GLOBAL__sub_I_test_abs.cpp
.cfi_startproc
# BB#0:
pushq %rax
.Ltmp2:
.cfi_def_cfa_offset 16
callq __cxx_global_var_init
popq %rax
retq
.Lfunc_end3:
.size _GLOBAL__sub_I_test_abs.cpp, .Lfunc_end3-_GLOBAL__sub_I_test_abs.cpp
.cfi_endproc
.type _ZStL8__ioinit,@object # @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter a value: "
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "The result is: "
.size .L.str.1, 16
.section .init_array,"aw",@init_array
.align 8
.quad _GLOBAL__sub_I_test_abs.cpp
.ident "clang version 3.8.0-2ubuntu4 (tags/RELEASE_380/final)"
.section ".note.GNU-stack","",@progbits
|
aaronbloomfield/pdr
| 3,632
|
slides/code/08-assembly-64bit/test_max-noextern.s
|
.text
.intel_syntax noprefix
.file "foo.cpp"
.section .text.startup,"ax",@progbits
.align 16, 0x90
.type __cxx_global_var_init,@function
__cxx_global_var_init: # @__cxx_global_var_init
.cfi_startproc
# BB#0:
push rax
.Ltmp0:
.cfi_def_cfa_offset 16
movabs rdi, _ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
movabs rdi, _ZNSt8ios_base4InitD1Ev
movabs rsi, _ZStL8__ioinit
movabs rdx, __dso_handle
call __cxa_atexit
mov dword ptr [rsp + 4], eax # 4-byte Spill
pop rax
ret
.Lfunc_end0:
.size __cxx_global_var_init, .Lfunc_end0-__cxx_global_var_init
.cfi_endproc
.text
.globl _Z3maxii
.align 16, 0x90
.type _Z3maxii,@function
_Z3maxii: # @_Z3maxii
.cfi_startproc
# BB#0:
mov dword ptr [rsp - 4], edi
mov dword ptr [rsp - 8], esi
mov esi, dword ptr [rsp - 4]
cmp esi, dword ptr [rsp - 8]
jle .LBB1_2
# BB#1:
mov eax, dword ptr [rsp - 4]
mov dword ptr [rsp - 12], eax
jmp .LBB1_3
.LBB1_2:
mov eax, dword ptr [rsp - 8]
mov dword ptr [rsp - 12], eax
.LBB1_3:
mov eax, dword ptr [rsp - 12]
ret
.Lfunc_end1:
.size _Z3maxii, .Lfunc_end1-_Z3maxii
.cfi_endproc
.globl main
.align 16, 0x90
.type main,@function
main: # @main
.cfi_startproc
# BB#0:
sub rsp, 56
.Ltmp1:
.cfi_def_cfa_offset 64
movabs rdi, _ZSt4cout
movabs rsi, .L.str
mov dword ptr [rsp + 52], 0
mov dword ptr [rsp + 48], 0
mov dword ptr [rsp + 44], 0
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movabs rsi, _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
mov rdi, rax
call _ZNSolsEPFRSoS_E
movabs rdi, _ZSt3cin
lea rsi, [rsp + 48]
mov qword ptr [rsp + 32], rax # 8-byte Spill
call _ZNSirsERi
movabs rdi, _ZSt4cout
movabs rsi, .L.str.1
mov qword ptr [rsp + 24], rax # 8-byte Spill
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
movabs rsi, _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
mov rdi, rax
call _ZNSolsEPFRSoS_E
movabs rdi, _ZSt3cin
lea rsi, [rsp + 44]
mov qword ptr [rsp + 16], rax # 8-byte Spill
call _ZNSirsERi
mov edi, dword ptr [rsp + 48]
mov esi, dword ptr [rsp + 44]
mov qword ptr [rsp + 8], rax # 8-byte Spill
call _Z3maxii
movabs rdi, _ZSt4cout
movabs rsi, .L.str.2
mov dword ptr [rsp + 40], eax
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc
mov esi, dword ptr [rsp + 40]
mov rdi, rax
call _ZNSolsEi
movabs rsi, _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_
mov rdi, rax
call _ZNSolsEPFRSoS_E
xor ecx, ecx
mov qword ptr [rsp], rax # 8-byte Spill
mov eax, ecx
add rsp, 56
ret
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
.section .text.startup,"ax",@progbits
.align 16, 0x90
.type _GLOBAL__sub_I_foo.cpp,@function
_GLOBAL__sub_I_foo.cpp: # @_GLOBAL__sub_I_foo.cpp
.cfi_startproc
# BB#0:
push rax
.Ltmp2:
.cfi_def_cfa_offset 16
call __cxx_global_var_init
pop rax
ret
.Lfunc_end3:
.size _GLOBAL__sub_I_foo.cpp, .Lfunc_end3-_GLOBAL__sub_I_foo.cpp
.cfi_endproc
.type _ZStL8__ioinit,@object # @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter value 1: "
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Enter value 2: "
.size .L.str.1, 16
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "The result is: "
.size .L.str.2, 16
.section .init_array,"aw",@init_array
.align 8
.quad _GLOBAL__sub_I_foo.cpp
.ident "clang version 3.8.0-2ubuntu4 (tags/RELEASE_380/final)"
.section ".note.GNU-stack","",@progbits
|
aaronbloomfield/pdr
| 1,673
|
slides/code/08-assembly-64bit/test_abs_c.s
|
.text
.intel_syntax noprefix
.file "test_abs_c.c"
.globl absolute_value
.align 16, 0x90
.type absolute_value,@function
absolute_value: # @absolute_value
.cfi_startproc
# BB#0:
mov qword ptr [rsp - 8], rdi
cmp qword ptr [rsp - 8], 0
jge .LBB0_2
# BB#1:
xor eax, eax
mov ecx, eax
sub rcx, qword ptr [rsp - 8]
mov qword ptr [rsp - 8], rcx
.LBB0_2:
mov rax, qword ptr [rsp - 8]
ret
.Lfunc_end0:
.size absolute_value, .Lfunc_end0-absolute_value
.cfi_endproc
.globl main
.align 16, 0x90
.type main,@function
main: # @main
.cfi_startproc
# BB#0:
sub rsp, 40
.Ltmp0:
.cfi_def_cfa_offset 48
movabs rdi, .L.str
mov dword ptr [rsp + 36], 0
mov qword ptr [rsp + 24], 0
mov al, 0
call printf
movabs rdi, .L.str.1
lea rsi, [rsp + 24]
mov dword ptr [rsp + 12], eax # 4-byte Spill
mov al, 0
call __isoc99_scanf
mov rdi, qword ptr [rsp + 24]
mov dword ptr [rsp + 8], eax # 4-byte Spill
call absolute_value
movabs rdi, .L.str.2
mov qword ptr [rsp + 16], rax
mov rsi, qword ptr [rsp + 16]
mov al, 0
call printf
xor ecx, ecx
mov dword ptr [rsp + 4], eax # 4-byte Spill
mov eax, ecx
add rsp, 40
ret
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter a value: \n"
.size .L.str, 17
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%ld"
.size .L.str.1, 4
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "The result is: %ld\n"
.size .L.str.2, 20
.ident "clang version 3.8.0-2ubuntu4 (tags/RELEASE_380/final)"
.section ".note.GNU-stack","",@progbits
|
aaronbloomfield/pdr
| 4,444
|
slides/code/08-assembly-64bit/test_max-O2.s
|
.text
.intel_syntax noprefix
.file "test_max.cpp"
.globl max
.align 16, 0x90
.type max,@function
max: # @max
.cfi_startproc
# BB#0:
cmp edi, esi
cmovge esi, edi
mov eax, esi
ret
.Lfunc_end0:
.size max, .Lfunc_end0-max
.cfi_endproc
.globl main
.align 16, 0x90
.type main,@function
main: # @main
.cfi_startproc
# BB#0:
push r14
.Ltmp0:
.cfi_def_cfa_offset 16
push rbx
.Ltmp1:
.cfi_def_cfa_offset 24
push rax
.Ltmp2:
.cfi_def_cfa_offset 32
.Ltmp3:
.cfi_offset rbx, -24
.Ltmp4:
.cfi_offset r14, -16
mov dword ptr [rsp + 4], 0
mov dword ptr [rsp], 0
mov edi, _ZSt4cout
mov esi, .L.str
mov edx, 15
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
mov rax, qword ptr [rip + _ZSt4cout]
mov rax, qword ptr [rax - 24]
mov rbx, qword ptr [rax + _ZSt4cout+240]
test rbx, rbx
je .LBB1_13
# BB#1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit
cmp byte ptr [rbx + 56], 0
je .LBB1_3
# BB#2:
mov al, byte ptr [rbx + 67]
jmp .LBB1_4
.LBB1_3:
mov rdi, rbx
call _ZNKSt5ctypeIcE13_M_widen_initEv
mov rax, qword ptr [rbx]
mov esi, 10
mov rdi, rbx
call qword ptr [rax + 48]
.LBB1_4: # %_ZNKSt5ctypeIcE5widenEc.exit
movsx esi, al
mov edi, _ZSt4cout
call _ZNSo3putEc
mov rdi, rax
call _ZNSo5flushEv
lea rsi, [rsp + 4]
mov edi, _ZSt3cin
call _ZNSirsERi
mov edi, _ZSt4cout
mov esi, .L.str.1
mov edx, 15
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
mov rax, qword ptr [rip + _ZSt4cout]
mov rax, qword ptr [rax - 24]
mov rbx, qword ptr [rax + _ZSt4cout+240]
test rbx, rbx
je .LBB1_13
# BB#5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit5
cmp byte ptr [rbx + 56], 0
je .LBB1_7
# BB#6:
mov al, byte ptr [rbx + 67]
jmp .LBB1_8
.LBB1_7:
mov rdi, rbx
call _ZNKSt5ctypeIcE13_M_widen_initEv
mov rax, qword ptr [rbx]
mov esi, 10
mov rdi, rbx
call qword ptr [rax + 48]
.LBB1_8: # %_ZNKSt5ctypeIcE5widenEc.exit2
movsx esi, al
mov edi, _ZSt4cout
call _ZNSo3putEc
mov rdi, rax
call _ZNSo5flushEv
lea rsi, [rsp]
mov edi, _ZSt3cin
call _ZNSirsERi
mov eax, dword ptr [rsp + 4]
mov ebx, dword ptr [rsp]
cmp eax, ebx
cmovge ebx, eax
mov edi, _ZSt4cout
mov esi, .L.str.2
mov edx, 15
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
mov edi, _ZSt4cout
mov esi, ebx
call _ZNSolsEi
mov r14, rax
mov rax, qword ptr [r14]
mov rax, qword ptr [rax - 24]
mov rbx, qword ptr [r14 + rax + 240]
test rbx, rbx
je .LBB1_13
# BB#9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit6
cmp byte ptr [rbx + 56], 0
je .LBB1_11
# BB#10:
mov al, byte ptr [rbx + 67]
jmp .LBB1_12
.LBB1_11:
mov rdi, rbx
call _ZNKSt5ctypeIcE13_M_widen_initEv
mov rax, qword ptr [rbx]
mov esi, 10
mov rdi, rbx
call qword ptr [rax + 48]
.LBB1_12: # %_ZNKSt5ctypeIcE5widenEc.exit4
movsx esi, al
mov rdi, r14
call _ZNSo3putEc
mov rdi, rax
call _ZNSo5flushEv
xor eax, eax
add rsp, 8
pop rbx
pop r14
ret
.LBB1_13:
call _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
.section .text.startup,"ax",@progbits
.align 16, 0x90
.type _GLOBAL__sub_I_test_max.cpp,@function
_GLOBAL__sub_I_test_max.cpp: # @_GLOBAL__sub_I_test_max.cpp
.cfi_startproc
# BB#0:
push rax
.Ltmp5:
.cfi_def_cfa_offset 16
mov edi, _ZStL8__ioinit
call _ZNSt8ios_base4InitC1Ev
mov edi, _ZNSt8ios_base4InitD1Ev
mov esi, _ZStL8__ioinit
mov edx, __dso_handle
pop rax
jmp __cxa_atexit # TAILCALL
.Lfunc_end2:
.size _GLOBAL__sub_I_test_max.cpp, .Lfunc_end2-_GLOBAL__sub_I_test_max.cpp
.cfi_endproc
.type _ZStL8__ioinit,@object # @_ZStL8__ioinit
.local _ZStL8__ioinit
.comm _ZStL8__ioinit,1,1
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter value 1: "
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Enter value 2: "
.size .L.str.1, 16
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "The result is: "
.size .L.str.2, 16
.section .init_array,"aw",@init_array
.align 8
.quad _GLOBAL__sub_I_test_max.cpp
.ident "clang version 3.8.0-2ubuntu4 (tags/RELEASE_380/final)"
.section ".note.GNU-stack","",@progbits
|
aaronbloomfield/pdr
| 2,658
|
labs/lab08-64bit/mergeSort.s
|
; University of Virginia
; CS 2150 In-Lab 8
; Spring 2018
; mergeSort.s
global mergeSort
global merge
section .text
; Parameter 1 is a pointer to the int array
; Parameter 2 is the left index in the array
; Parameter 3 is the right index in the array
; Return type is void
mergeSort:
; Implement mergeSort here
; Parameter 1 is a pointer to the int array
; Parameter 2 is the left index in the array
; Parameter 3 is the middle index in the array
; Parameter 4 is the right index in the array
; Return type is void
merge:
; Save callee-save registers
; Store local variables
push rbx ; int i
push r12 ; int j
push r13 ; int k
push r14 ; int n1
push r15 ; int n2
mov r14, rdx ; n1 = mid - left + 1
sub r14, rsi
inc r14
mov r15, rcx ; n2 = right - mid
sub r15, rdx
lea r11, [r14+r15] ; r11 = rsp offset = 4(n1 + n2)
lea r11, [4*r11]
sub rsp, r11 ; allocate space for temp arrays
mov rbx, 0 ; i = 0
mov r12, 0 ; j = 0
; Copy elements of arr[] into L[]
copyLloop:
cmp rbx, r14 ; is i >= n1?
jge copyRloop
; L[i] = arr[left + i]
lea r10, [rsi+rbx]
mov r10d, DWORD [rdi+4*r10] ; r10 = arr[left + i]
mov DWORD [rsp+4*rbx], r10d ; L[i] = r10
inc rbx ; i++
jmp copyLloop
; Copy elements of arr[] into R[]
copyRloop:
cmp r12, r15 ; is j >= n2?
jge endcopyR
; R[j] = arr[mid + 1 + j]
lea r10, [rdx+r12+1]
mov r10d, DWORD [rdi+4*r10] ; r10 = arr[mid + 1 + j]
lea rax, [r12+r14]
mov DWORD [rsp+4*rax], r10d ; R[j] = r10
inc r12 ; j++
jmp copyRloop
endcopyR:
mov rbx, 0 ; i = 0
mov r12, 0 ; j = 0
mov r13, rsi ; k = left
; Merge L[] and R[] into arr[]
mergeLoop:
cmp rbx, r14 ; is i >= n1 or j >= n2?
jge loopL
cmp r12, r15
jge loopL
lea r10, [r12+r14]
mov r10d, DWORD [rsp+4*r10] ; r10d = R[j]
cmp DWORD [rsp+4*rbx], r10d ; is L[i] <= R[j]?
jle if
mov DWORD [rdi+4*r13], r10d ; arr[k] = R[j]
inc r12 ; j++
jmp endif
if:
mov r10d, DWORD [rsp+4*rbx]
mov DWORD [rdi+4*r13], r10d ; arr[k] = L[i]
inc rbx ; i++
endif:
inc r13 ; k++
jmp mergeLoop
; Copy elements of L[] into arr[]
loopL:
cmp rbx, r14 ; is i >= n1?
jge loopR
mov r10d, DWORD [rsp+4*rbx]
mov DWORD [rdi+4*r13], r10d ; arr[k] = L[i]
inc rbx ; i++
inc r13 ; k++
jmp loopL
; Copy elements of R[] into arr[]
loopR:
cmp r12, r15 ; is j >= n2?
jge endR
lea r10, [r12+r14]
mov r10d, DWORD [rsp+4*r10]
mov DWORD [rdi+4*r13], r10d ; arr[k] = R[j]
inc r12 ; j++
inc r13 ; k++
jmp loopR
endR:
; deallocate temp arrays
; restore callee-save registers
add rsp, r11
pop r15
pop r14
pop r13
pop r12
pop rbx
ret
|
aaronbloomfield/pdr
| 1,088
|
labs/lab08-64bit/vecsum.s
|
; vecsum.s
;
; Purpose : This file contains the implementation of the function
; vecsum, which adds up a vector of integers.
;
; Parameter 1 (in rdi) is the starting address of a sequence of 64-bit longs
; Parameter 2 (in rsi) is the number of integers in the sequence
; Return value is a long that is the sum of the integers in the sequence
;
global vecsum
section .text
vecsum:
; Standard prologue: we do not have to create any local
; variables (those values will be kept in registers), and
; we are not using any callee-saved registers.
; Subroutine body:
xor rax, rax ; zero out the return register
xor r10, r10 ; zero out the counter i
start:
cmp r10, rsi ; does i == n?
je done ; if so, we are done with the loop
add rax, [rdi+8*r10] ; add a[i] to rax
inc r10 ; increment our counter i
jmp start ; we are done with this loop iteration
done:
; Standard epilogue: the return value is already in rax, we
; do not have any callee-saved registers to restore, and we do not
; have any local variables to deallocate, so all we do is return
ret
|
aaronbloomfield/pdr
| 1,869
|
labs/lab08-32bit/vecsum.s
|
; vecsum.s
;
; Author : Adam Ferrari
; Date : Jan 29, 1998
; Purpose : This file contains the implementation of the function
; vecsum, which adds up a vector of integers.
; Modified for NASM by Aaron Bloomfield on 9 Nov 2007
global vecsum
section .text
;
; vecsum
; Parameter 1 - the starting address of a sequence of 32-bit integers.
; Parameter 2 - the number of integers in the sequence.
; Return value - the sum of the integers in the sequence.
;
vecsum:
; Standard prologue
push ebp ; Save the old base pointer
mov ebp, esp ; Set new value of the base pointer
push esi ; Save registers
xor eax, eax ; Place zero in EAX. We will keep a running
; sum of the vector elements in EAX.
mov esi, [ebp+8] ; Put the vector starting address in ESI.
mov ecx, [ebp+12] ; Put the vector size in ECX. We will use
; ECX to indicate how many vector elements
; are left to add into the sum.
cmp ecx, 0 ; If there are not more than zero elemen
jle vecsum_done ; in the array, skip to the end and return
; zero (already in EAX).
vecsum_loop:
mov edx, [esi] ; Put the current vector element into EDX.
add eax, edx ; Add the current vector element into the
; running sum.
add esi, 4 ; Increment ESI to point to the next
; vector element (4 bytes away).
dec ecx ; Decrement ECX, the counter of how many
; left to do.
cmp ecx, 0 ; If there are more than zero elements
jg vecsum_loop ; left to add up, then do the loop again.
vecsum_done:
; At this point, the loop is done, and we have the sum of the
; vector elements in EAX, which is exactly where we want the
; return value to be.
; Standard epilogue
pop esi ; Restore registers that we used.
; Note - no local variables to dealocate.
pop ebp ; Restore the caller's base pointer.
ret ; Return to the caller.
|
aaronbloomfield/pdr
| 1,264
|
uva/lectures/bloomfield-fall-2020/lec25/assembly.s
|
; assembly.s, which contains the addOrMult() function
;
; Parameter 1 (in rdi) is the boolean (whether to add or mult)
; Parameter 2 (in rsi) is the starting address of a sequence of 64-bit longs
; Parameter 3 (in rdx) is the number of integers in the sequence
; Return value is a long that is the sum of the integers in the sequence
;
global addOrMult
section .text
addOrMult:
; subroutine body:
xor rax, rax ; zero out the return register
xor r10, r10 ; zero out the counter i
; see if boolean is true or false
; boolean is in lowest 1 byte of rdi (dil)
cmp dil, 0
je _mult
_add:
cmp r10, rdx ; does i == n?
je _done ; if so, we are done with the loop
add rax, [rsi+8*r10] ; add a[i] to rax
inc r10 ; increment our counter i
jmp _add ; we are done with this loop iteration
_mult:
mov rax, 1
_multloop:
cmp r10, rdx ; does i == n?
je _done ; if so, we are done with the loop
imul rax, [rsi+8*r10] ; multi a[i] by rax
inc r10 ; increment our counter i
jmp _multloop ; we are done with this loop iteration
_done:
; Standard epilogue: the return value is already in rax, we
; do not have any callee-saved registers to restore, and we do not
; have any local variables to deallocate, so all we do is return
ret
|
aaronbloomfield/pdr
| 1,029
|
uva/lectures/bloomfield-fall-2020/lec27/fib.s
|
; CS 2150, exam 2, fall 2019, question 11
fib: ; 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, ...
; fib(n) = fib(n-1) + fib(n-2)
xor rax, rax ; set rax = 0
cmp rdi, 0 ; compare 1st param to zero
je done ; jump to done if param is zero
cmp rdi, 2 ; compare 1st param to 2
jle base ; jump to base if 1st param is <=2
push rax ; no reason for this line
dec rdi ; change parameter to n-1 (from n)
________ ; push rdi: save rdi with the n-1 value
call fib ; call fib(n-1)
________ ; pop rdi: restore rdi with the n-1 value
________ ; must be a pop -- but to where?; pop to anywhere (pop r10; pop r11; pop rsi)
mov r10, rax ; copy rax into r10; no reason for this line
push rax ; push fib(n-1) result onto stack
dec rdi ; change parameter to n-2 (from n-1)
call fib ; call fib(n-2) -> rax
pop r10 ; restore fib(n-1) result into r10
sum rax, r10 ; sum fib(n-1) and fib(n-2) into rax (sum == add)
ret ; return result
base:
mov rax, 1 ; set return value to 1
done:
ret ; return value in rax
|
aaronbloomfield/pdr
| 1,376
|
uva/lectures/bloomfield-fall-2020/lec26/assembly.s
|
; factArray() and factorial()
;
; for factArray():
; Parameter 1 (in rdi) is the starting address of a sequence of 64-bit longs
; Parameter 2 (in rsi) is the number of integers in the sequence
;
; for factorial():
; Parameter 1 (in rdi) is the value to return the factorial of
global factArray
global factorial
section .text
; factArray(): given a array and a size, compute the factorial of each spot in that array
factArray:
xor r10, r10 ; r10 will store i to loop through a
_factArrayLoop:
cmp r10, rsi ; is i == n?
je _factArrayDone
; put parameters in proper place
; save values that could get overwritten
push rdi
push rsi
push r10
; put the parameter into rdi and call factorial()
mov rdi, [rdi + 8*r10]
call factorial
; restore the pushed values
pop r10
pop rsi
pop rdi
; store the return value in the right place
mov [rdi + 8*r10], rax
; finish up the loop
inc r10
jmp _factArrayLoop
_factArrayDone:
ret
; factorial(): takes one param (in rdi) and computes the factorial of that value
factorial:
cmp rdi, 1 ; is the parameter 1?
jle _factbase
; recursively compute the factorial
mov rsi, rdi ; mov param over
dec rdi ; setup n-1 for recursive call
push rsi
call factorial ; call factorial
pop rsi
imul rax, rsi ; multiply n into recursive answer
ret ; return
_factbase:
mov rax, 1
ret
|
aaronbloomfield/pdr
| 1,264
|
uva/lectures/bloomfield-spring-2021/lec25/assembly.s
|
; assembly.s, which contains the addOrMult() function
;
; Parameter 1 (in rdi) is the boolean (whether to add or mult)
; Parameter 2 (in rsi) is the starting address of a sequence of 64-bit longs
; Parameter 3 (in rdx) is the number of integers in the sequence
; Return value is a long that is the sum of the integers in the sequence
;
global addOrMult
section .text
addOrMult:
; subroutine body:
xor rax, rax ; zero out the return register
xor r10, r10 ; zero out the counter i
; see if boolean is true or false
; boolean is in lowest 1 byte of rdi (dil)
cmp dil, 0
je _mult
_add:
cmp r10, rdx ; does i == n?
je _done ; if so, we are done with the loop
add rax, [rsi+8*r10] ; add a[i] to rax
inc r10 ; increment our counter i
jmp _add ; we are done with this loop iteration
_mult:
mov rax, 1
_multloop:
cmp r10, rdx ; does i == n?
je _done ; if so, we are done with the loop
imul rax, [rsi+8*r10] ; multi a[i] by rax
inc r10 ; increment our counter i
jmp _multloop ; we are done with this loop iteration
_done:
; Standard epilogue: the return value is already in rax, we
; do not have any callee-saved registers to restore, and we do not
; have any local variables to deallocate, so all we do is return
ret
|
aaronbloomfield/pdr
| 1,447
|
uva/lectures/bloomfield-spring-2021/lec26/assembly.s
|
; factArray() and factorial()
;
; for factArray():
; Parameter 1 (in rdi) is the starting address of a sequence of 64-bit longs
; Parameter 2 (in rsi) is the number of integers in the sequence
;
; for factorial():
; Parameter 1 (in rdi) is the value to return the factorial of
global factArray
global factorial
section .text
; factArray(): given a array and a size, compute the factorial of each spot in that array
factArray:
xor r10, r10 ; r10 will store i to loop through a
_factArrayLoop:
cmp r10, rsi ; is i == n?
je _factArrayDone
; put parameters in proper place
; save values that could get overwritten
push rdi
push rsi
push r10
; put the parameter into rdi and call factorial()
mov rdi, [rdi + 8*r10]
call factorial
; restore the pushed values
pop r10
pop rsi
pop rdi
; store the return value in the right place
mov [rdi + 8*r10], rax
; finish up the loop
inc r10
jmp _factArrayLoop
_factArrayDone:
ret
; factorial(): takes one param (in rdi) and computes the factorial of that value
factorial:
cmp rdi, 1 ; is the parameter 1?
jle _factbase
; recursively compute the factorial
mov rsi, rdi ; mov param over
push rsi
;; the previous two lines could have been simplified with 'push rdi'
dec rdi ; setup n-1 for recursive call
call factorial ; call factorial
pop rsi
imul rax, rsi ; multiply n into recursive answer
ret ; return
_factbase:
mov rax, 1
ret
|
aaron-ev/CLI-STM32-FreeRTOS
| 20,143
|
workspace/cliFreeRTOS/Core/Startup/startup_stm32f401ccux.s
|
/**
******************************************************************************
* @file startup_stm32f401xc.s
* @author MCD Application Team
* @brief STM32F401xCxx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* Copyright (c) 2017 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
ldr r0, =_sdata
ldr r1, =_edata
ldr r2, =_sidata
movs r3, #0
b LoopCopyDataInit
CopyDataInit:
ldr r4, [r2, r3]
str r4, [r0, r3]
adds r3, r3, #4
LoopCopyDataInit:
adds r4, r0, r3
cmp r4, r1
bcc CopyDataInit
/* Zero fill the bss segment. */
ldr r2, =_sbss
ldr r4, =_ebss
movs r3, #0
b LoopFillZerobss
FillZerobss:
str r3, [r2]
adds r2, r2, #4
LoopFillZerobss:
cmp r2, r4
bcc FillZerobss
/* Call the clock system initialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word 0 /* Reserved */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word 0 /* Reserved */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FPU_IRQHandler /* FPU */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SPI4_IRQHandler /* SPI4 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
|
AaronChuzb/TD3
| 19,929
|
components/lvgl/rlottie/src/vector/pixman/pixman-arm-neon-asm.S
|
/*
* Copyright © 2009 Nokia Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Siarhei Siamashka (siarhei.siamashka@nokia.com)
*/
/*
* This file contains implementations of NEON optimized pixel processing
* functions. There is no full and detailed tutorial, but some functions
* (those which are exposing some new or interesting features) are
* extensively commented and can be used as examples.
*
* You may want to have a look at the comments for following functions:
* - pixman_composite_over_8888_0565_asm_neon
* - pixman_composite_over_n_8_0565_asm_neon
*/
/* Prevent the stack from becoming executable for no reason... */
#if defined(__linux__) && defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
.text
.fpu neon
.arch armv7a
.object_arch armv4
.eabi_attribute 10, 0 /* suppress Tag_FP_arch */
.eabi_attribute 12, 0 /* suppress Tag_Advanced_SIMD_arch */
.arm
.altmacro
.p2align 2
//#include "pixman-arm-asm.h"
/* Supplementary macro for setting function attributes */
.macro pixman_asm_function fname
.func fname
.global fname
#ifdef __ELF__
.hidden fname
.type fname, %function
#endif
fname:
.endm
//#include "pixman-private.h"
/*
* The defines which are shared between C and assembly code
*/
/* bilinear interpolation precision (must be < 8) */
#define BILINEAR_INTERPOLATION_BITS 7
#define BILINEAR_INTERPOLATION_RANGE (1 << BILINEAR_INTERPOLATION_BITS)
#include "pixman-arm-neon-asm.h"
/* Global configuration options and preferences */
/*
* The code can optionally make use of unaligned memory accesses to improve
* performance of handling leading/trailing pixels for each scanline.
* Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for
* example in linux if unaligned memory accesses are not configured to
* generate.exceptions.
*/
.set RESPECT_STRICT_ALIGNMENT, 1
/*
* Set default prefetch type. There is a choice between the following options:
*
* PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work
* as NOP to workaround some HW bugs or for whatever other reason)
*
* PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where
* advanced prefetch intruduces heavy overhead)
*
* PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8
* which can run ARM and NEON instructions simultaneously so that extra ARM
* instructions do not add (many) extra cycles, but improve prefetch efficiency)
*
* Note: some types of function can't support advanced prefetch and fallback
* to simple one (those which handle 24bpp pixels)
*/
.set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED
/* Prefetch distance in pixels for simple prefetch */
.set PREFETCH_DISTANCE_SIMPLE, 64
/*
* Implementation of pixman_composite_over_8888_0565_asm_neon
*
* This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and
* performs OVER compositing operation. Function fast_composite_over_8888_0565
* from pixman-fast-path.c does the same in C and can be used as a reference.
*
* First we need to have some NEON assembly code which can do the actual
* operation on the pixels and provide it to the template macro.
*
* Template macro quite conveniently takes care of emitting all the necessary
* code for memory reading and writing (including quite tricky cases of
* handling unaligned leading/trailing pixels), so we only need to deal with
* the data in NEON registers.
*
* NEON registers allocation in general is recommented to be the following:
* d0, d1, d2, d3 - contain loaded source pixel data
* d4, d5, d6, d7 - contain loaded destination pixels (if they are needed)
* d24, d25, d26, d27 - contain loading mask pixel data (if mask is used)
* d28, d29, d30, d31 - place for storing the result (destination pixels)
*
* As can be seen above, four 64-bit NEON registers are used for keeping
* intermediate pixel data and up to 8 pixels can be processed in one step
* for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp).
*
* This particular function uses the following registers allocation:
* d0, d1, d2, d3 - contain loaded source pixel data
* d4, d5 - contain loaded destination pixels (they are needed)
* d28, d29 - place for storing the result (destination pixels)
*/
/*
* Step one. We need to have some code to do some arithmetics on pixel data.
* This is implemented as a pair of macros: '*_head' and '*_tail'. When used
* back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5},
* perform all the needed calculations and write the result to {d28, d29}.
* The rationale for having two macros and not just one will be explained
* later. In practice, any single monolitic function which does the work can
* be split into two parts in any arbitrary way without affecting correctness.
*
* There is one special trick here too. Common template macro can optionally
* make our life a bit easier by doing R, G, B, A color components
* deinterleaving for 32bpp pixel formats (and this feature is used in
* 'pixman_composite_over_8888_0565_asm_neon' function). So it means that
* instead of having 8 packed pixels in {d0, d1, d2, d3} registers, we
* actually use d0 register for blue channel (a vector of eight 8-bit
* values), d1 register for green, d2 for red and d3 for alpha. This
* simple conversion can be also done with a few NEON instructions:
*
* Packed to planar conversion:
* vuzp.8 d0, d1
* vuzp.8 d2, d3
* vuzp.8 d1, d3
* vuzp.8 d0, d2
*
* Planar to packed conversion:
* vzip.8 d0, d2
* vzip.8 d1, d3
* vzip.8 d2, d3
* vzip.8 d0, d1
*
* But pixel can be loaded directly in planar format using VLD4.8 NEON
* instruction. It is 1 cycle slower than VLD1.32, so this is not always
* desirable, that's why deinterleaving is optional.
*
* But anyway, here is the code:
*/
/*
* OK, now we got almost everything that we need. Using the above two
* macros, the work can be done right. But now we want to optimize
* it a bit. ARM Cortex-A8 is an in-order core, and benefits really
* a lot from good code scheduling and software pipelining.
*
* Let's construct some code, which will run in the core main loop.
* Some pseudo-code of the main loop will look like this:
* head
* while (...) {
* tail
* head
* }
* tail
*
* It may look a bit weird, but this setup allows to hide instruction
* latencies better and also utilize dual-issue capability more
* efficiently (make pairs of load-store and ALU instructions).
*
* So what we need now is a '*_tail_head' macro, which will be used
* in the core main loop. A trivial straightforward implementation
* of this macro would look like this:
*
* pixman_composite_over_8888_0565_process_pixblock_tail
* vst1.16 {d28, d29}, [DST_W, :128]!
* vld1.16 {d4, d5}, [DST_R, :128]!
* vld4.32 {d0, d1, d2, d3}, [SRC]!
* pixman_composite_over_8888_0565_process_pixblock_head
* cache_preload 8, 8
*
* Now it also got some VLD/VST instructions. We simply can't move from
* processing one block of pixels to the other one with just arithmetics.
* The previously processed data needs to be written to memory and new
* data needs to be fetched. Fortunately, this main loop does not deal
* with partial leading/trailing pixels and can load/store a full block
* of pixels in a bulk. Additionally, destination buffer is already
* 16 bytes aligned here (which is good for performance).
*
* New things here are DST_R, DST_W, SRC and MASK identifiers. These
* are the aliases for ARM registers which are used as pointers for
* accessing data. We maintain separate pointers for reading and writing
* destination buffer (DST_R and DST_W).
*
* Another new thing is 'cache_preload' macro. It is used for prefetching
* data into CPU L2 cache and improve performance when dealing with large
* images which are far larger than cache size. It uses one argument
* (actually two, but they need to be the same here) - number of pixels
* in a block. Looking into 'pixman-arm-neon-asm.h' can provide some
* details about this macro. Moreover, if good performance is needed
* the code from this macro needs to be copied into '*_tail_head' macro
* and mixed with the rest of code for optimal instructions scheduling.
* We are actually doing it below.
*
* Now after all the explanations, here is the optimized code.
* Different instruction streams (originaling from '*_head', '*_tail'
* and 'cache_preload' macro) use different indentation levels for
* better readability. Actually taking the code from one of these
* indentation levels and ignoring a few VLD/VST instructions would
* result in exactly the code from '*_head', '*_tail' or 'cache_preload'
* macro!
*/
/*
* And now the final part. We are using 'generate_composite_function' macro
* to put all the stuff together. We are specifying the name of the function
* which we want to get, number of bits per pixel for the source, mask and
* destination (0 if unused, like mask in this case). Next come some bit
* flags:
* FLAG_DST_READWRITE - tells that the destination buffer is both read
* and written, for write-only buffer we would use
* FLAG_DST_WRITEONLY flag instead
* FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data
* and separate color channels for 32bpp format.
* The next things are:
* - the number of pixels processed per iteration (8 in this case, because
* that's the maximum what can fit into four 64-bit NEON registers).
* - prefetch distance, measured in pixel blocks. In this case it is 5 times
* by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal
* prefetch distance can be selected by running some benchmarks.
*
* After that we specify some macros, these are 'default_init',
* 'default_cleanup' here which are empty (but it is possible to have custom
* init/cleanup macros to be able to save/restore some extra NEON registers
* like d8-d15 or do anything else) followed by
* 'pixman_composite_over_8888_0565_process_pixblock_head',
* 'pixman_composite_over_8888_0565_process_pixblock_tail' and
* 'pixman_composite_over_8888_0565_process_pixblock_tail_head'
* which we got implemented above.
*
* The last part is the NEON registers allocation scheme.
*/
/******************************************************************************/
/******************************************************************************/
.macro pixman_composite_out_reverse_8888_8888_process_pixblock_head
vmvn.8 d24, d3 /* get inverted alpha */
/* do alpha blending */
vmull.u8 q8, d24, d4
vmull.u8 q9, d24, d5
vmull.u8 q10, d24, d6
vmull.u8 q11, d24, d7
.endm
.macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q12, q10
vraddhn.u16 d31, q13, q11
.endm
/******************************************************************************/
.macro pixman_composite_over_8888_8888_process_pixblock_head
pixman_composite_out_reverse_8888_8888_process_pixblock_head
.endm
.macro pixman_composite_over_8888_8888_process_pixblock_tail
pixman_composite_out_reverse_8888_8888_process_pixblock_tail
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
.endm
.macro pixman_composite_over_8888_8888_process_pixblock_tail_head
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
vrshr.u16 q14, q8, #8
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0xF
vrshr.u16 q15, q9, #8
vrshr.u16 q12, q10, #8
vrshr.u16 q13, q11, #8
PF addne PF_X, PF_X, #8
PF subne PF_CTL, PF_CTL, #1
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
PF cmp PF_X, ORIG_W
vraddhn.u16 d30, q12, q10
vraddhn.u16 d31, q13, q11
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
fetch_src_pixblock
PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
vmvn.8 d22, d3
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
PF subge PF_X, PF_X, ORIG_W
vmull.u8 q8, d22, d4
PF subges PF_CTL, PF_CTL, #0x10
vmull.u8 q9, d22, d5
PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
vmull.u8 q10, d22, d6
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vmull.u8 q11, d22, d7
.endm
generate_composite_function \
pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_process_pixblock_tail_head
generate_composite_function_single_scanline \
pixman_composite_scanline_over_asm_neon, 32, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
default_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_8888_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_over_n_8888_process_pixblock_head
/* deinterleaved source pixels in {d0, d1, d2, d3} */
/* inverted alpha in {d24} */
/* destination pixels in {d4, d5, d6, d7} */
vmull.u8 q8, d24, d4
vmull.u8 q9, d24, d5
vmull.u8 q10, d24, d6
vmull.u8 q11, d24, d7
.endm
.macro pixman_composite_over_n_8888_process_pixblock_tail
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q2, q10, #8
vrshr.u16 q3, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q2, q10
vraddhn.u16 d31, q3, q11
vqadd.u8 q14, q0, q14
vqadd.u8 q15, q1, q15
.endm
.macro pixman_composite_over_n_8888_process_pixblock_tail_head
vrshr.u16 q14, q8, #8
vrshr.u16 q15, q9, #8
vrshr.u16 q2, q10, #8
vrshr.u16 q3, q11, #8
vraddhn.u16 d28, q14, q8
vraddhn.u16 d29, q15, q9
vraddhn.u16 d30, q2, q10
vraddhn.u16 d31, q3, q11
vld4.8 {d4, d5, d6, d7}, [DST_R, :128]!
vqadd.u8 q14, q0, q14
PF add PF_X, PF_X, #8
PF tst PF_CTL, #0x0F
PF addne PF_X, PF_X, #8
PF subne PF_CTL, PF_CTL, #1
vqadd.u8 q15, q1, q15
PF cmp PF_X, ORIG_W
vmull.u8 q8, d24, d4
PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
vmull.u8 q9, d24, d5
PF subge PF_X, PF_X, ORIG_W
vmull.u8 q10, d24, d6
PF subges PF_CTL, PF_CTL, #0x10
vmull.u8 q11, d24, d7
PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
.endm
.macro pixman_composite_over_n_8888_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d3[0]}, [DUMMY]
vdup.8 d0, d3[0]
vdup.8 d1, d3[1]
vdup.8 d2, d3[2]
vdup.8 d3, d3[3]
vmvn.8 d24, d3 /* get inverted alpha */
.endm
generate_composite_function \
pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \
FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
8, /* number of pixels, processed in a single block */ \
5, /* prefetch distance */ \
pixman_composite_over_n_8888_init, \
default_cleanup, \
pixman_composite_over_8888_8888_process_pixblock_head, \
pixman_composite_over_8888_8888_process_pixblock_tail, \
pixman_composite_over_n_8888_process_pixblock_tail_head
/******************************************************************************/
.macro pixman_composite_src_n_8888_process_pixblock_head
.endm
.macro pixman_composite_src_n_8888_process_pixblock_tail
.endm
.macro pixman_composite_src_n_8888_process_pixblock_tail_head
vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
.endm
.macro pixman_composite_src_n_8888_init
add DUMMY, sp, #ARGS_STACK_OFFSET
vld1.32 {d0[0]}, [DUMMY]
vsli.u64 d0, d0, #32
vorr d1, d0, d0
vorr q1, q0, q0
.endm
.macro pixman_composite_src_n_8888_cleanup
.endm
generate_composite_function \
pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
0, /* prefetch distance */ \
pixman_composite_src_n_8888_init, \
pixman_composite_src_n_8888_cleanup, \
pixman_composite_src_n_8888_process_pixblock_head, \
pixman_composite_src_n_8888_process_pixblock_tail, \
pixman_composite_src_n_8888_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
.macro pixman_composite_src_8888_8888_process_pixblock_head
.endm
.macro pixman_composite_src_8888_8888_process_pixblock_tail
.endm
.macro pixman_composite_src_8888_8888_process_pixblock_tail_head
vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
fetch_src_pixblock
cache_preload 8, 8
.endm
generate_composite_function \
pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \
FLAG_DST_WRITEONLY, \
8, /* number of pixels, processed in a single block */ \
10, /* prefetch distance */ \
default_init, \
default_cleanup, \
pixman_composite_src_8888_8888_process_pixblock_head, \
pixman_composite_src_8888_8888_process_pixblock_tail, \
pixman_composite_src_8888_8888_process_pixblock_tail_head, \
0, /* dst_w_basereg */ \
0, /* dst_r_basereg */ \
0, /* src_basereg */ \
0 /* mask_basereg */
/******************************************************************************/
|
aarzilli/nucular
| 1,742
|
drawfillover_avx.s
|
//+build amd64,go1.10
#include "textflag.h"
GLOBL drawFillOver_SIMD_shufflemap<>(SB), (NOPTR+RODATA), $4
DATA drawFillOver_SIMD_shufflemap<>+0x00(SB)/4, $0x0d090501
TEXT ·drawFillOver_SIMD_internal(SB),0,$0-60
// base+0(FP)
// i0+8(FP)
// i1+16(FP)
// stride+24(FP)
// n+32(FP)
// adivm+40(FP)
// sr+44(FP)
// sg+48(FP)
// sb+52(FP)
// sa+56(FP)
// DX row index
// CX column index
// AX pointer to current pixel
// R14 i0
// R15 i1
// X0 zeroed register
// X1 current pixel
// X3 source pixel
// X4 is the shuffle map to do the >> 8 and pack everything back into a single 32bit value
MOVSS drawFillOver_SIMD_shufflemap<>(SB), X4
PXOR X0, X0
MOVQ i0+8(FP), R14
MOVQ i1+16(FP), R15
// load adivm to X2, fill all uint32s with it
MOVSS advim+40(FP), X2
VBROADCASTSS X2, X2
// load source pixel to X3
VMOVDQU sr+44(FP), X3
MOVQ $0, DX
row_loop:
CMPQ DX, n+32(FP)
JGE row_loop_end
MOVQ R14, CX
MOVQ base+0(FP), AX
LEAQ (AX)(CX*1), AX
column_loop:
CMPQ CX, R15
JGE column_loop_end
// load current pixel to X1, unpack twice to get uint32s
MOVSS (AX), X1
PUNPCKLBW X0, X1
VPUNPCKLWD X0, X1, X1
VPMULLD X2, X1, X1 // component * a/m
VPADDD X3, X1, X1 // (component * a/m) + source_component
VPSHUFB X4, X1, X1 // get the second byte of every 32bit word and pack it into the lowest word of X1
MOVSS X1, (AX) // write back to memory
ADDQ $4, CX
ADDQ $4, AX
JMP column_loop
column_loop_end:
ADDQ stride+24(FP), R14
ADDQ stride+24(FP), R15
INCQ DX
JMP row_loop
row_loop_end:
RET
TEXT ·getCPUID1(SB),$0
MOVQ $1, AX
CPUID
MOVD DX, ret+0(FP)
MOVD CX, ret+4(FP)
RET
TEXT ·getCPUID70(SB),$0
MOVQ $7, AX
MOVQ $0, CX
CPUID
MOVD BX, ret+0(FP)
MOVD CX, ret+4(FP)
RET
|
Aatch/ramp
| 1,488
|
src/ll/asm/addmul_1.S
|
.text
.file "addmul_1.S"
#define wp %rdi
#define xp %rsi
#define n_param %edx
#define n %r11d
#define v %rcx
.section .text.ramp_addmul_1,"ax",@progbits
.globl ramp_addmul_1
.align 16, 0x90
.type ramp_addmul_1,@function
ramp_addmul_1:
.cfi_startproc
#define L(lbl) .LADDMUL_ ## lbl
mov n_param, n # Move n away from %rdx
mov (xp), %rax
mul v
add %rax, (wp)
adc $0, %rdx
mov %rdx, %r8
dec n
jz L(ret)
add $8, wp
add $8, xp
.align 16
L(top):
mov (xp), %rax
mul v
add %r8, %rax
adc $0, %rdx
mov %rdx, %r8
add %rax, (wp)
adc $0, %r8
add $8, wp
add $8, xp
dec n
jnz L(top)
L(ret):
mov %r8, %rax
ret
L(tmp):
.size ramp_addmul_1, L(tmp) - ramp_addmul_1
.cfi_endproc
.section .text.ramp_submul_1,"ax",@progbits
.globl ramp_submul_1
.align 16, 0x90
.type ramp_submul_1,@function
ramp_submul_1:
.cfi_startproc
#undef L
#define L(lbl) .LSUBMUL_ ## lbl
mov n_param, n # Move n away from %rdx
mov (xp), %rax
mul v
sub %rax, (wp)
adc $0, %rdx
mov %rdx, %r8
dec n
jz L(ret)
add $8, wp
add $8, xp
.align 16
L(top):
mov (xp), %rax
mul v
add %r8, %rax
adc $0, %rdx
mov %rdx, %r8
sub %rax, (wp)
adc $0, %r8
add $8, wp
add $8, xp
dec n
jnz L(top)
L(ret):
mov %r8, %rax
ret
L(tmp):
.size ramp_submul_1, L(tmp) - ramp_submul_1
.cfi_endproc
|
Aatch/ramp
| 3,020
|
src/ll/asm/addsub_n.S
|
.text
.file "addsub_n.S"
#define wp %rdi
#define xp %rsi
#define yp %rdx
#define n %rcx
.section .text.ramp_add_n,"ax",@progbits
.globl ramp_add_n
.align 16, 0x90
.type ramp_add_n,@function
ramp_add_n:
.cfi_startproc
#define L(lbl) .LADD_ ## lbl
mov %ecx, %eax
shr $2, n
and $3, %eax
jrcxz L(lt4)
mov (xp), %r8
mov 8(xp), %r9
dec n
jmp L(mid)
L(lt4):
dec %eax
mov (xp), %r8
jnz L(2)
adc (yp), %r8
mov %r8, (wp)
adc %eax, %eax
ret
L(2):
dec %eax
mov 8(xp), %r9
jnz L(3)
adc (yp), %r8
adc 8(yp), %r9
mov %r8, (wp)
mov %r9, 8(wp)
adc %eax, %eax
ret
L(3):
mov 16(xp), %r10
adc (yp), %r8
adc 8(yp), %r9
adc 16(yp), %r10
mov %r8, (wp)
mov %r9, 8(wp)
mov %r10, 16(wp)
setc %al
ret
.align 16
L(top):
adc (yp), %r8
adc 8(yp), %r9
adc 16(yp), %r10
adc 24(yp), %r11
mov %r8, (wp)
lea 32(xp), xp
mov %r9, 8(wp)
mov %r10, 16(wp)
dec n
mov %r11, 24(wp)
lea 32(yp), yp
mov (xp), %r8
mov 8(xp), %r9
lea 32(wp), %rdi
L(mid):
mov 16(xp), %r10
mov 24(xp), %r11
jnz L(top)
L(end):
lea 32(xp), xp
adc (yp), %r8
adc 8(yp), %r9
adc 16(yp), %r10
adc 24(yp), %r11
lea 32(yp), yp
mov %r8, (wp)
mov %r9, 8(wp)
mov %r10, 16(wp)
mov %r11, 24(wp)
lea 32(wp), wp
inc %eax
dec %eax
jnz L(lt4)
adc %eax, %eax
ret
L(tmp):
.size ramp_add_n, L(tmp) - ramp_add_n
.cfi_endproc
.section .text.ramp_sub_n,"ax",@progbits
.globl ramp_sub_n
.align 16, 0x90
.type ramp_sub_n,@function
ramp_sub_n:
.cfi_startproc
#undef L
#define L(lbl) .LSUB_ ## lbl
mov %ecx, %eax
shr $2, n
and $3, %eax
jrcxz L(lt4)
mov (xp), %r8
mov 8(xp), %r9
dec n
jmp L(mid)
L(lt4):
dec %eax
mov (xp), %r8
jnz L(2)
sbb (yp), %r8
mov %r8, (wp)
adc %eax, %eax
ret
L(2):
dec %eax
mov 8(xp), %r9
jnz L(3)
sbb (yp), %r8
sbb 8(yp), %r9
mov %r8, (wp)
mov %r9, 8(wp)
adc %eax, %eax
ret
L(3):
mov 16(xp), %r10
sbb (yp), %r8
sbb 8(yp), %r9
sbb 16(yp), %r10
mov %r8, (wp)
mov %r9, 8(wp)
mov %r10, 16(wp)
setc %al
ret
.align 16
L(top):
sbb (yp), %r8
sbb 8(yp), %r9
sbb 16(yp), %r10
sbb 24(yp), %r11
mov %r8, (wp)
lea 32(xp), xp
mov %r9, 8(wp)
mov %r10, 16(wp)
dec n
mov %r11, 24(wp)
lea 32(yp), yp
mov (xp), %r8
mov 8(xp), %r9
lea 32(wp), %rdi
L(mid):
mov 16(xp), %r10
mov 24(xp), %r11
jnz L(top)
L(end):
lea 32(xp), xp
sbb (yp), %r8
sbb 8(yp), %r9
sbb 16(yp), %r10
sbb 24(yp), %r11
lea 32(yp), yp
mov %r8, (wp)
mov %r9, 8(wp)
mov %r10, 16(wp)
mov %r11, 24(wp)
lea 32(wp), wp
inc %eax
dec %eax
jnz L(lt4)
adc %eax, %eax
ret
L(tmp):
.size ramp_sub_n, L(tmp) - ramp_sub_n
.cfi_endproc
|
ab25cq/clover2
| 5,898
|
code/cstruct_test.s
|
.text
.file "cstruct_test.ocl"
.globl clover2_main // -- Begin function clover2_main
.p2align 2
.type clover2_main,@function
clover2_main: // @clover2_main
.cfi_startproc
// %bb.0: // %entry
str x24, [sp, #-64]! // 8-byte Folded Spill
stp x23, x22, [sp, #16] // 8-byte Folded Spill
stp x21, x20, [sp, #32] // 8-byte Folded Spill
stp x19, x30, [sp, #48] // 8-byte Folded Spill
.cfi_def_cfa_offset 64
.cfi_offset w30, -8
.cfi_offset w19, -16
.cfi_offset w20, -24
.cfi_offset w21, -32
.cfi_offset w22, -40
.cfi_offset w23, -48
.cfi_offset w24, -64
mov x20, x4
mov x19, x2
adrp x2, :got:gCodeData
adrp x4, :got:gConstData
ldr x2, [x2, :got_lo12:gCodeData]
ldr x4, [x4, :got_lo12:gConstData]
mov x0, x7
mov x1, x6
mov w21, w5
mov x22, x3
orr w3, wzr, #0x70
orr w5, wzr, #0x7c
bl initialize_code_and_constant
adrp x24, :got:gSigInt
ldr x24, [x24, :got_lo12:gSigInt]
adrp x1, .Lglobal_string
add x1, x1, :lo12:.Lglobal_string
orr w2, wzr, #0x1
mov x0, x19
str wzr, [x24]
bl mark_source_position
ldr x8, [x20]
adrp x23, .Lglobal_string.2
add x23, x23, :lo12:.Lglobal_string.2
adrp x1, .Lglobal_string.3
str x23, [x8]
ldr x8, [x20]
add x1, x1, :lo12:.Lglobal_string.3
orr w2, wzr, #0x1
mov x0, x19
add x8, x8, #8 // =8
str x8, [x20]
bl mark_source_position2
mov x0, x23
bl puts
ldr x8, [x20]
mov w9, w0
sub x10, x8, #8 // =8
str x10, [x20]
stur x9, [x8, #-8]
ldr x8, [x20]
add x8, x8, #8 // =8
str x8, [x20]
ldr w8, [x24]
tbz w8, #0, .LBB0_3
// %bb.1: // %sigint_then_block
adrp x4, .Lglobal_string.4
adrp x5, .Lglobal_string.5
add x4, x4, :lo12:.Lglobal_string.4
add x5, x5, :lo12:.Lglobal_string.5
str wzr, [x24]
.LBB0_2: // %sigint_then_block
mov x0, x20
mov x1, x22
mov w2, w21
mov x3, x19
bl entry_exception_object_with_class_name2
mov w0, wzr
b .LBB0_9
.LBB0_3: // %entry_after_sigint
ldr x8, [x20]
adrp x1, .Lglobal_string.6
add x1, x1, :lo12:.Lglobal_string.6
orr w2, wzr, #0x3
sub x8, x8, #8 // =8
mov x0, x19
str x8, [x20]
str wzr, [x24]
bl mark_source_position
adrp x1, .Lglobal_string.7
add x1, x1, :lo12:.Lglobal_string.7
orr w2, wzr, #0x3
mov x0, x19
bl mark_source_position2
adrp x0, .Lglobal_string.8
add x0, x0, :lo12:.Lglobal_string.8
mov w1, wzr
bl get_class_with_load_and_initialize
cbz x0, .LBB0_9
// %bb.4: // %entry_ifend
mov w1, wzr
mov x2, x22
mov w3, w21
mov x4, x20
mov x5, x19
bl call_invoke_method
cmp w0, #1 // =1
b.eq .LBB0_6
// %bb.5: // %then_block10
mov x0, x19
bl get_try_catch_label_name
mov w0, wzr
b .LBB0_9
.LBB0_6: // %entry_ifend11
ldr w8, [x24]
tbz w8, #0, .LBB0_8
// %bb.7: // %sigint_then_block15
adrp x4, .Lglobal_string.9
adrp x5, .Lglobal_string.10
str wzr, [x24]
add x4, x4, :lo12:.Lglobal_string.9
add x5, x5, :lo12:.Lglobal_string.10
b .LBB0_2
.LBB0_8: // %entry_after_sigint16
ldr x8, [x20]
orr w0, wzr, #0x1
sub x8, x8, #8 // =8
str x8, [x20]
.LBB0_9: // %then_block
ldp x19, x30, [sp, #48] // 8-byte Folded Reload
ldp x21, x20, [sp, #32] // 8-byte Folded Reload
ldp x23, x22, [sp, #16] // 8-byte Folded Reload
ldr x24, [sp], #64 // 8-byte Folded Reload
ret
.Lfunc_end0:
.size clover2_main, .Lfunc_end0-clover2_main
.cfi_endproc
// -- End function
.type gCodeData,@object // @gCodeData
.data
.globl gCodeData
gCodeData:
.asciz "\017\000\000\000\020\000\000\000\000\000\000\000\001\000\000\000+#\000\000\030\000\000\000\021\000\000\000$\000\000\000\001\000\000\000\270\013\000\000<\000\000\000\000\000\000\000 \000\000\000\022\000\000\000\001\000\000\000\017\000\000\000\020\000\000\000@\000\000\000\003\000\000\000\021\000\000\000X\000\000\000\003\000\000\000\270\013\000\000p\000\000\000\000\000\000\000 \000\000\000\022\000\000\000\001\000\000"
.size gCodeData, 112
.type gConstData,@object // @gConstData
.globl gConstData
gConstData:
.asciz "code/cstruct_test.cl\000\000\000\000HELLO WORLD\000code/cstruct_test.cl\000\000\000\000C\000\000\000code/cstruct_test.cl\000\000\000\000code/cstruct_test.cl\000\000\000\000CStructTest"
.size gConstData, 124
.type .Lglobal_string,@object // @global_string
.section .rodata,"a",@progbits
.Lglobal_string:
.asciz "code/cstruct_test.cl"
.size .Lglobal_string, 21
.type .Lglobal_string.2,@object // @global_string.2
.Lglobal_string.2:
.asciz "HELLO WORLD"
.size .Lglobal_string.2, 12
.type .Lglobal_string.3,@object // @global_string.3
.Lglobal_string.3:
.asciz "code/cstruct_test.cl"
.size .Lglobal_string.3, 21
.type .Lglobal_string.4,@object // @global_string.4
.Lglobal_string.4:
.asciz "Exception"
.size .Lglobal_string.4, 10
.type .Lglobal_string.5,@object // @global_string.5
.Lglobal_string.5:
.asciz "Signal Interrupt"
.size .Lglobal_string.5, 17
.type .Lglobal_string.6,@object // @global_string.6
.Lglobal_string.6:
.asciz "code/cstruct_test.cl"
.size .Lglobal_string.6, 21
.type .Lglobal_string.7,@object // @global_string.7
.Lglobal_string.7:
.asciz "code/cstruct_test.cl"
.size .Lglobal_string.7, 21
.type .Lglobal_string.8,@object // @global_string.8
.Lglobal_string.8:
.asciz "CStructTest"
.size .Lglobal_string.8, 12
.type .Lglobal_string.9,@object // @global_string.9
.Lglobal_string.9:
.asciz "Exception"
.size .Lglobal_string.9, 10
.type .Lglobal_string.10,@object // @global_string.10
.Lglobal_string.10:
.asciz "Signal Interrupt"
.size .Lglobal_string.10, 17
.section ".note.GNU-stack","",@progbits
|
ab25cq/clover2
| 3,688
|
code/CFFI.s
|
.text
.file "CFFI.ocl"
.globl clover2_main // -- Begin function clover2_main
.p2align 2
.type clover2_main,@function
clover2_main: // @clover2_main
.cfi_startproc
// %bb.0: // %entry
stp x23, x22, [sp, #-48]! // 8-byte Folded Spill
stp x21, x20, [sp, #16] // 8-byte Folded Spill
stp x19, x30, [sp, #32] // 8-byte Folded Spill
.cfi_def_cfa_offset 48
.cfi_offset w30, -8
.cfi_offset w19, -16
.cfi_offset w20, -24
.cfi_offset w21, -32
.cfi_offset w22, -40
.cfi_offset w23, -48
mov x19, x4
mov x20, x2
adrp x2, :got:gCodeData
adrp x4, :got:gConstData
ldr x2, [x2, :got_lo12:gCodeData]
ldr x4, [x4, :got_lo12:gConstData]
mov x0, x7
mov x1, x6
mov w21, w5
mov x22, x3
mov w3, #52
mov w5, #41
bl initialize_code_and_constant
adrp x23, :got:gSigInt
ldr x23, [x23, :got_lo12:gSigInt]
adrp x1, .Lglobal_string
add x1, x1, :lo12:.Lglobal_string
orr w2, wzr, #0x1
mov x0, x20
str wzr, [x23]
bl mark_source_position
adrp x1, .Lglobal_string.2
add x1, x1, :lo12:.Lglobal_string.2
orr w2, wzr, #0x1
mov x0, x20
bl mark_source_position2
adrp x0, .Lglobal_string.3
add x0, x0, :lo12:.Lglobal_string.3
mov w1, wzr
bl get_class_with_load_and_initialize
cbz x0, .LBB0_6
// %bb.1: // %entry_ifend
orr w1, wzr, #0x1
mov x2, x22
mov w3, w21
mov x4, x19
mov x5, x20
bl call_invoke_method
cmp w0, #1 // =1
b.eq .LBB0_3
// %bb.2: // %then_block2
mov x0, x20
bl get_try_catch_label_name
mov w0, wzr
b .LBB0_6
.LBB0_3: // %entry_ifend3
ldr w8, [x23]
tbz w8, #0, .LBB0_5
// %bb.4: // %sigint_then_block
adrp x4, .Lglobal_string.4
adrp x5, .Lglobal_string.5
add x4, x4, :lo12:.Lglobal_string.4
add x5, x5, :lo12:.Lglobal_string.5
mov x0, x19
mov x1, x22
mov w2, w21
mov x3, x20
str wzr, [x23]
bl entry_exception_object_with_class_name2
mov w0, wzr
b .LBB0_6
.LBB0_5: // %entry_after_sigint
ldr x8, [x19]
orr w0, wzr, #0x1
sub x8, x8, #8 // =8
str x8, [x19]
.LBB0_6: // %then_block
ldp x19, x30, [sp, #32] // 8-byte Folded Reload
ldp x21, x20, [sp, #16] // 8-byte Folded Reload
ldp x23, x22, [sp], #48 // 8-byte Folded Reload
ret
.Lfunc_end0:
.size clover2_main, .Lfunc_end0-clover2_main
.cfi_endproc
// -- End function
.type gCodeData,@object // @gCodeData
.data
.globl gCodeData
gCodeData:
.asciz "\017\000\000\000\020\000\000\000\000\000\000\000\001\000\000\000\021\000\000\000\020\000\000\000\001\000\000\000\270\013\000\000 \000\000\000\001\000\000\000 \000\000\000\022\000\000\000\001\000\000"
.size gCodeData, 52
.type gConstData,@object // @gConstData
.globl gConstData
gConstData:
.asciz "code/CFFI.cl\000\000\000\000code/CFFI.cl\000\000\000\000CFFITest"
.size gConstData, 41
.type .Lglobal_string,@object // @global_string
.section .rodata,"a",@progbits
.Lglobal_string:
.asciz "code/CFFI.cl"
.size .Lglobal_string, 13
.type .Lglobal_string.2,@object // @global_string.2
.Lglobal_string.2:
.asciz "code/CFFI.cl"
.size .Lglobal_string.2, 13
.type .Lglobal_string.3,@object // @global_string.3
.Lglobal_string.3:
.asciz "CFFITest"
.size .Lglobal_string.3, 9
.type .Lglobal_string.4,@object // @global_string.4
.Lglobal_string.4:
.asciz "Exception"
.size .Lglobal_string.4, 10
.type .Lglobal_string.5,@object // @global_string.5
.Lglobal_string.5:
.asciz "Signal Interrupt"
.size .Lglobal_string.5, 17
.section ".note.GNU-stack","",@progbits
|
ab25cq/comelang
| 51,617
|
minux9/msh.S
|
.file "msh.c"
.option nopic
.attribute arch, "rv64i2p1_m2p0_a2p1_f2p2_d2p2_c2p0_zicsr2p0"
.attribute unaligned_access, 0
.attribute stack_align, 16
.text
.Ltext0:
.cfi_sections .debug_frame
.file 0 "/Users/ab25cq/comelang/minux9" "msh.c"
.align 1
.globl strncpy
.type strncpy, @function
strncpy:
.LFB0:
.file 1 "msh.c"
.loc 1 5 46
.cfi_startproc
addi sp,sp,-64
.cfi_def_cfa_offset 64
sd ra,56(sp)
sd s0,48(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,64
.cfi_def_cfa 8, 0
sd a0,-40(s0)
sd a1,-48(s0)
mv a5,a2
sw a5,-52(s0)
.loc 1 8 6
ld a5,-40(s0)
sd a5,-24(s0)
.loc 1 9 8
nop
.L3:
.loc 1 9 10 discriminator 2
lw a5,-52(s0)
addiw a4,a5,-1
sw a4,-52(s0)
.loc 1 9 17 discriminator 2
ble a5,zero,.L4
.loc 1 9 30 discriminator 1
ld a4,-48(s0)
addi a5,a4,1
sd a5,-48(s0)
.loc 1 9 23 discriminator 1
ld a5,-40(s0)
addi a3,a5,1
sd a3,-40(s0)
.loc 1 9 28 discriminator 1
lbu a4,0(a4)
.loc 1 9 26 discriminator 1
sb a4,0(a5)
.loc 1 9 21 discriminator 1
lbu a5,0(a5)
.loc 1 9 17 discriminator 1
bne a5,zero,.L3
.loc 1 11 8
j .L4
.L5:
.loc 1 12 7
ld a5,-40(s0)
addi a4,a5,1
sd a4,-40(s0)
.loc 1 12 10
sb zero,0(a5)
.L4:
.loc 1 11 10
lw a5,-52(s0)
addiw a4,a5,-1
sw a4,-52(s0)
.loc 1 11 13
bgt a5,zero,.L5
.loc 1 13 10
ld a5,-24(s0)
.loc 1 14 1
mv a0,a5
ld ra,56(sp)
.cfi_restore 1
ld s0,48(sp)
.cfi_restore 8
.cfi_def_cfa 2, 64
addi sp,sp,64
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE0:
.size strncpy, .-strncpy
.align 1
.globl putchar
.type putchar, @function
putchar:
.LFB1:
.loc 1 17 1
.cfi_startproc
addi sp,sp,-48
.cfi_def_cfa_offset 48
sd ra,40(sp)
sd s0,32(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,48
.cfi_def_cfa 8, 0
mv a5,a0
sb a5,-33(s0)
.loc 1 19 12
lbu a5,-33(s0)
sb a5,-32(s0)
.loc 1 20 12
sb zero,-31(s0)
.LBB2:
.loc 1 21 5
li a0,1
addi a5,s0,-32
mv a1,a5
li a2,1
li a7,64
#APP
# 21 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
sd a0,-24(s0)
.LBE2:
.loc 1 22 1
nop
ld ra,40(sp)
.cfi_restore 1
ld s0,32(sp)
.cfi_restore 8
.cfi_def_cfa 2, 48
addi sp,sp,48
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE1:
.size putchar, .-putchar
.align 1
.globl printint
.type printint, @function
printint:
.LFB2:
.loc 1 24 45
.cfi_startproc
addi sp,sp,-96
.cfi_def_cfa_offset 96
sd ra,88(sp)
sd s0,80(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,96
.cfi_def_cfa 8, 0
mv a5,a0
mv a3,a1
mv a4,a2
sw a5,-84(s0)
mv a5,a3
sw a5,-88(s0)
mv a5,a4
sw a5,-92(s0)
.loc 1 26 9
sw zero,-20(s0)
.loc 1 27 9
sw zero,-24(s0)
.loc 1 30 8
lw a5,-92(s0)
sext.w a5,a5
beq a5,zero,.L9
.loc 1 30 14 discriminator 1
lw a5,-84(s0)
sext.w a5,a5
bge a5,zero,.L9
.loc 1 31 18
li a5,1
sw a5,-24(s0)
.loc 1 32 16
lw a5,-84(s0)
negw a5,a5
sext.w a5,a5
.loc 1 32 14
sw a5,-28(s0)
j .L10
.L9:
.loc 1 34 14
lw a5,-84(s0)
sw a5,-28(s0)
.L10:
.loc 1 37 8
lw a5,-28(s0)
sext.w a5,a5
bne a5,zero,.L13
.loc 1 38 9
li a0,48
call putchar
j .L8
.L16:
.LBB3:
.loc 1 43 26
lw a5,-88(s0)
lw a4,-28(s0)
remuw a5,a4,a5
sext.w a5,a5
.loc 1 43 13
sw a5,-32(s0)
.loc 1 44 18
lw a5,-32(s0)
sext.w a4,a5
li a5,9
bgt a4,a5,.L14
.loc 1 44 37 discriminator 1
lw a5,-32(s0)
andi a5,a5,0xff
.loc 1 44 18 discriminator 1
addiw a5,a5,48
andi a5,a5,0xff
j .L15
.L14:
.loc 1 44 51 discriminator 2
lw a5,-32(s0)
andi a5,a5,0xff
.loc 1 44 18 discriminator 2
addiw a5,a5,87
andi a5,a5,0xff
.L15:
.loc 1 44 14 discriminator 4
lw a4,-20(s0)
addiw a3,a4,1
sw a3,-20(s0)
.loc 1 44 18 discriminator 4
addi a4,a4,-16
add a4,a4,s0
sb a5,-56(a4)
.loc 1 45 14
lw a5,-88(s0)
lw a4,-28(s0)
divuw a5,a4,a5
sw a5,-28(s0)
.L13:
.LBE3:
.loc 1 42 17
lw a5,-28(s0)
sext.w a5,a5
bne a5,zero,.L16
.loc 1 48 8
lw a5,-24(s0)
sext.w a5,a5
beq a5,zero,.L18
.loc 1 49 9
li a0,45
call putchar
.loc 1 52 11
j .L18
.L19:
.loc 1 53 9
lw a5,-20(s0)
addi a5,a5,-16
add a5,a5,s0
lbu a5,-56(a5)
mv a0,a5
call putchar
.L18:
.loc 1 52 16
lw a5,-20(s0)
addiw a5,a5,-1
sw a5,-20(s0)
lw a5,-20(s0)
sext.w a5,a5
bge a5,zero,.L19
.L8:
.loc 1 55 1
ld ra,88(sp)
.cfi_restore 1
ld s0,80(sp)
.cfi_restore 8
.cfi_def_cfa 2, 96
addi sp,sp,96
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE2:
.size printint, .-printint
.align 1
.globl printlong
.type printlong, @function
printlong:
.LFB3:
.loc 1 57 57
.cfi_startproc
addi sp,sp,-112
.cfi_def_cfa_offset 112
sd ra,104(sp)
sd s0,96(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,112
.cfi_def_cfa 8, 0
sd a0,-104(s0)
mv a5,a1
mv a4,a2
sw a5,-108(s0)
mv a5,a4
sw a5,-112(s0)
.loc 1 59 9
sw zero,-20(s0)
.loc 1 60 9
sw zero,-24(s0)
.loc 1 62 8
lw a5,-112(s0)
sext.w a5,a5
beq a5,zero,.L21
.loc 1 62 17 discriminator 1
ld a5,-104(s0)
.loc 1 62 14 discriminator 1
bge a5,zero,.L21
.loc 1 63 18
li a5,1
sw a5,-24(s0)
.loc 1 64 17
ld a5,-104(s0)
.loc 1 64 16
neg a5,a5
.loc 1 64 14
sd a5,-104(s0)
.L21:
.loc 1 67 8
ld a5,-104(s0)
bne a5,zero,.L24
.loc 1 68 9
li a0,48
call putchar
j .L20
.L27:
.LBB4:
.loc 1 73 26
lw a5,-108(s0)
ld a4,-104(s0)
remu a5,a4,a5
.loc 1 73 13
sw a5,-28(s0)
.loc 1 74 18
lw a5,-28(s0)
sext.w a4,a5
li a5,9
bgt a4,a5,.L25
.loc 1 74 37 discriminator 1
lw a5,-28(s0)
andi a5,a5,0xff
.loc 1 74 18 discriminator 1
addiw a5,a5,48
andi a5,a5,0xff
j .L26
.L25:
.loc 1 74 51 discriminator 2
lw a5,-28(s0)
andi a5,a5,0xff
.loc 1 74 18 discriminator 2
addiw a5,a5,87
andi a5,a5,0xff
.L26:
.loc 1 74 14 discriminator 4
lw a4,-20(s0)
addiw a3,a4,1
sw a3,-20(s0)
.loc 1 74 18 discriminator 4
addi a4,a4,-16
add a4,a4,s0
sb a5,-80(a4)
.loc 1 75 14
lw a5,-108(s0)
ld a4,-104(s0)
divu a5,a4,a5
sd a5,-104(s0)
.L24:
.LBE4:
.loc 1 72 17
ld a5,-104(s0)
bne a5,zero,.L27
.loc 1 78 8
lw a5,-24(s0)
sext.w a5,a5
beq a5,zero,.L29
.loc 1 79 9
li a0,45
call putchar
.loc 1 82 11
j .L29
.L30:
.loc 1 83 9
lw a5,-20(s0)
addi a5,a5,-16
add a5,a5,s0
lbu a5,-80(a5)
mv a0,a5
call putchar
.L29:
.loc 1 82 16
lw a5,-20(s0)
addiw a5,a5,-1
sw a5,-20(s0)
lw a5,-20(s0)
sext.w a5,a5
bge a5,zero,.L30
.L20:
.loc 1 85 1
ld ra,104(sp)
.cfi_restore 1
ld s0,96(sp)
.cfi_restore 8
.cfi_def_cfa 2, 112
addi sp,sp,112
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE3:
.size printlong, .-printlong
.align 1
.globl printlonglong
.type printlonglong, @function
printlonglong:
.LFB4:
.loc 1 87 66
.cfi_startproc
addi sp,sp,-112
.cfi_def_cfa_offset 112
sd ra,104(sp)
sd s0,96(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,112
.cfi_def_cfa 8, 0
sd a0,-104(s0)
mv a5,a1
mv a4,a2
sw a5,-108(s0)
mv a5,a4
sw a5,-112(s0)
.loc 1 89 9
sw zero,-20(s0)
.loc 1 90 9
sw zero,-24(s0)
.loc 1 92 8
lw a5,-112(s0)
sext.w a5,a5
beq a5,zero,.L32
.loc 1 92 17 discriminator 1
ld a5,-104(s0)
.loc 1 92 14 discriminator 1
bge a5,zero,.L32
.loc 1 93 18
li a5,1
sw a5,-24(s0)
.loc 1 94 17
ld a5,-104(s0)
.loc 1 94 16
neg a5,a5
.loc 1 94 14
sd a5,-104(s0)
.L32:
.loc 1 97 8
ld a5,-104(s0)
bne a5,zero,.L35
.loc 1 98 9
li a0,48
call putchar
j .L31
.L38:
.LBB5:
.loc 1 103 26
lw a5,-108(s0)
ld a4,-104(s0)
remu a5,a4,a5
.loc 1 103 13
sw a5,-28(s0)
.loc 1 104 18
lw a5,-28(s0)
sext.w a4,a5
li a5,9
bgt a4,a5,.L36
.loc 1 104 37 discriminator 1
lw a5,-28(s0)
andi a5,a5,0xff
.loc 1 104 18 discriminator 1
addiw a5,a5,48
andi a5,a5,0xff
j .L37
.L36:
.loc 1 104 51 discriminator 2
lw a5,-28(s0)
andi a5,a5,0xff
.loc 1 104 18 discriminator 2
addiw a5,a5,87
andi a5,a5,0xff
.L37:
.loc 1 104 14 discriminator 4
lw a4,-20(s0)
addiw a3,a4,1
sw a3,-20(s0)
.loc 1 104 18 discriminator 4
addi a4,a4,-16
add a4,a4,s0
sb a5,-80(a4)
.loc 1 105 14
lw a5,-108(s0)
ld a4,-104(s0)
divu a5,a4,a5
sd a5,-104(s0)
.L35:
.LBE5:
.loc 1 102 17
ld a5,-104(s0)
bne a5,zero,.L38
.loc 1 108 8
lw a5,-24(s0)
sext.w a5,a5
beq a5,zero,.L40
.loc 1 109 9
li a0,45
call putchar
.loc 1 112 11
j .L40
.L41:
.loc 1 113 9
lw a5,-20(s0)
addi a5,a5,-16
add a5,a5,s0
lbu a5,-80(a5)
mv a0,a5
call putchar
.L40:
.loc 1 112 16
lw a5,-20(s0)
addiw a5,a5,-1
sw a5,-20(s0)
lw a5,-20(s0)
sext.w a5,a5
bge a5,zero,.L41
.L31:
.loc 1 115 1
ld ra,104(sp)
.cfi_restore 1
ld s0,96(sp)
.cfi_restore 8
.cfi_def_cfa 2, 112
addi sp,sp,112
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE4:
.size printlonglong, .-printlonglong
.section .rodata
.align 3
.LC0:
.string "(null)"
.text
.align 1
.globl printf
.type printf, @function
printf:
.LFB5:
.loc 1 117 34
.cfi_startproc
addi sp,sp,-192
.cfi_def_cfa_offset 192
sd ra,120(sp)
sd s0,112(sp)
.cfi_offset 1, -72
.cfi_offset 8, -80
addi s0,sp,128
.cfi_def_cfa 8, 64
sd a0,-120(s0)
sd a1,8(s0)
sd a2,16(s0)
sd a3,24(s0)
sd a4,32(s0)
sd a5,40(s0)
sd a6,48(s0)
sd a7,56(s0)
.loc 1 119 5
addi a5,s0,64
sd a5,-128(s0)
ld a5,-128(s0)
addi a5,a5,-56
sd a5,-104(s0)
.loc 1 122 12
ld a5,-120(s0)
sd a5,-24(s0)
.loc 1 122 5
j .L43
.L68:
.loc 1 123 13
ld a5,-24(s0)
lbu a5,0(a5)
.loc 1 123 12
mv a4,a5
li a5,37
beq a4,a5,.L44
.loc 1 124 13
ld a5,-24(s0)
lbu a5,0(a5)
mv a0,a5
call putchar
.loc 1 125 13
j .L45
.L44:
.loc 1 128 10
ld a5,-24(s0)
addi a5,a5,1
sd a5,-24(s0)
.loc 1 130 13
ld a5,-24(s0)
lbu a5,0(a5)
.loc 1 130 12
mv a4,a5
li a5,108
bne a4,a5,.L46
.LBB6:
.loc 1 131 17
li a5,1
sw a5,-28(s0)
.loc 1 132 20
ld a5,-24(s0)
addi a5,a5,1
.loc 1 132 17
lbu a5,0(a5)
.loc 1 132 16
mv a4,a5
li a5,108
bne a4,a5,.L47
.loc 1 133 24
li a5,2
sw a5,-28(s0)
.loc 1 134 18
ld a5,-24(s0)
addi a5,a5,1
sd a5,-24(s0)
.L47:
.loc 1 136 14
ld a5,-24(s0)
addi a5,a5,1
sd a5,-24(s0)
.loc 1 138 21
ld a5,-24(s0)
lbu a5,0(a5)
sext.w a5,a5
.loc 1 138 13
li a4,100
beq a5,a4,.L48
li a4,120
bne a5,a4,.L49
.loc 1 140 24
lw a5,-28(s0)
sext.w a4,a5
li a5,1
bne a4,a5,.L50
.LBB7:
.loc 1 141 39
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
ld a5,0(a5)
sd a5,-80(s0)
.loc 1 142 25
li a2,0
li a1,16
ld a0,-80(s0)
call printlong
.LBE7:
.loc 1 147 21
j .L45
.L50:
.LBB8:
.loc 1 144 44
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
ld a5,0(a5)
sd a5,-72(s0)
.loc 1 145 25
li a2,0
li a1,16
ld a0,-72(s0)
call printlonglong
.LBE8:
.loc 1 147 21
j .L45
.L48:
.loc 1 150 24
lw a5,-28(s0)
sext.w a4,a5
li a5,1
bne a4,a5,.L53
.LBB9:
.loc 1 151 30
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
ld a5,0(a5)
sd a5,-96(s0)
.loc 1 152 25
ld a5,-96(s0)
li a2,1
li a1,10
mv a0,a5
call printlong
.LBE9:
.loc 1 157 21
j .L45
.L53:
.LBB10:
.loc 1 154 35
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
ld a5,0(a5)
sd a5,-88(s0)
.loc 1 155 25
ld a5,-88(s0)
li a2,1
li a1,10
mv a0,a5
call printlonglong
.LBE10:
.loc 1 157 21
j .L45
.L49:
.loc 1 160 21
li a0,37
call putchar
.LBB11:
.loc 1 161 30
sw zero,-32(s0)
.loc 1 161 21
j .L55
.L56:
.loc 1 161 50 discriminator 3
li a0,108
call putchar
.loc 1 161 46 discriminator 3
lw a5,-32(s0)
addiw a5,a5,1
sw a5,-32(s0)
.L55:
.loc 1 161 36 discriminator 2
lw a5,-32(s0)
mv a4,a5
lw a5,-28(s0)
sext.w a4,a4
sext.w a5,a5
blt a4,a5,.L56
.LBE11:
.loc 1 162 21
ld a5,-24(s0)
lbu a5,0(a5)
mv a0,a5
call putchar
.loc 1 163 21
j .L45
.L46:
.LBE6:
.loc 1 167 21
ld a5,-24(s0)
lbu a5,0(a5)
sext.w a5,a5
.loc 1 167 13
li a4,37
beq a5,a4,.L57
li a4,37
blt a5,a4,.L58
li a4,120
bgt a5,a4,.L58
li a4,99
blt a5,a4,.L58
addiw a5,a5,-99
mv a3,a5
sext.w a4,a3
li a5,21
bgtu a4,a5,.L58
slli a5,a3,32
srli a5,a5,32
slli a4,a5,2
lla a5,.L60
add a5,a4,a5
lw a5,0(a5)
sext.w a4,a5
lla a5,.L60
add a5,a4,a5
jr a5
.section .rodata
.align 2
.align 2
.L60:
.word .L64-.L60
.word .L63-.L60
.word .L58-.L60
.word .L58-.L60
.word .L58-.L60
.word .L58-.L60
.word .L58-.L60
.word .L58-.L60
.word .L58-.L60
.word .L58-.L60
.word .L58-.L60
.word .L58-.L60
.word .L58-.L60
.word .L62-.L60
.word .L58-.L60
.word .L58-.L60
.word .L61-.L60
.word .L58-.L60
.word .L58-.L60
.word .L58-.L60
.word .L58-.L60
.word .L59-.L60
.text
.L63:
.LBB12:
.loc 1 169 25
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
lw a5,0(a5)
sw a5,-60(s0)
.loc 1 170 21
lw a5,-60(s0)
li a2,1
li a1,10
mv a0,a5
call printint
.loc 1 171 21
j .L45
.L59:
.LBE12:
.LBB13:
.loc 1 174 34
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
lw a5,0(a5)
sw a5,-44(s0)
.loc 1 175 21
lw a5,-44(s0)
li a2,0
li a1,16
mv a0,a5
call printint
.loc 1 176 21
j .L45
.L62:
.LBE13:
.LBB14:
.loc 1 179 57
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
ld a5,0(a5)
.loc 1 179 35 discriminator 1
sd a5,-56(s0)
.loc 1 180 21
li a0,48
call putchar
.loc 1 180 35 discriminator 1
li a0,120
call putchar
.loc 1 181 21
li a2,0
li a1,16
ld a0,-56(s0)
call printlong
.loc 1 182 21
j .L45
.L61:
.LBE14:
.LBB15:
.loc 1 185 33
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
ld a5,0(a5)
sd a5,-40(s0)
.loc 1 186 24
ld a5,-40(s0)
bne a5,zero,.L66
.loc 1 186 31 discriminator 1
lla a5,.LC0
sd a5,-40(s0)
.loc 1 187 27
j .L66
.L67:
.loc 1 187 42 discriminator 2
ld a5,-40(s0)
addi a4,a5,1
sd a4,-40(s0)
.loc 1 187 32 discriminator 2
lbu a5,0(a5)
mv a0,a5
call putchar
.L66:
.loc 1 187 28 discriminator 1
ld a5,-40(s0)
lbu a5,0(a5)
bne a5,zero,.L67
.loc 1 188 21
j .L45
.L64:
.LBE15:
.LBB16:
.loc 1 191 36
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
lw a5,0(a5)
.loc 1 191 26 discriminator 1
sb a5,-61(s0)
.loc 1 192 21
lbu a5,-61(s0)
mv a0,a5
call putchar
.loc 1 193 21
j .L45
.L57:
.LBE16:
.loc 1 196 21
li a0,37
call putchar
.loc 1 197 21
j .L45
.L58:
.loc 1 200 21
li a0,37
call putchar
.loc 1 201 21
ld a5,-24(s0)
lbu a5,0(a5)
mv a0,a5
call putchar
.loc 1 202 21
nop
.L45:
.loc 1 122 24 discriminator 2
ld a5,-24(s0)
addi a5,a5,1
sd a5,-24(s0)
.L43:
.loc 1 122 19 discriminator 1
ld a5,-24(s0)
lbu a5,0(a5)
bne a5,zero,.L68
.loc 1 209 12
li a5,0
.loc 1 210 1
mv a0,a5
ld ra,120(sp)
.cfi_restore 1
ld s0,112(sp)
.cfi_restore 8
.cfi_def_cfa 2, 192
addi sp,sp,192
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE5:
.size printf, .-printf
.align 1
.globl puts
.type puts, @function
puts:
.LFB6:
.loc 1 214 26
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
sd a0,-24(s0)
.loc 1 215 11
j .L71
.L72:
.loc 1 216 19
ld a5,-24(s0)
addi a4,a5,1
sd a4,-24(s0)
.loc 1 216 9
lbu a5,0(a5)
mv a0,a5
call putchar
.L71:
.loc 1 215 12
ld a5,-24(s0)
lbu a5,0(a5)
bne a5,zero,.L72
.loc 1 218 1
nop
nop
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE6:
.size puts, .-puts
.align 1
.globl run_command
.type run_command, @function
run_command:
.LFB7:
.loc 1 280 1
.cfi_startproc
addi sp,sp,-112
.cfi_def_cfa_offset 112
sd ra,104(sp)
sd s0,96(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,112
.cfi_def_cfa 8, 0
mv a5,a0
sd a1,-112(s0)
mv a4,a2
sw a5,-100(s0)
mv a5,a4
sw a5,-104(s0)
.loc 1 281 9
sw zero,-48(s0)
sw zero,-44(s0)
.loc 1 283 41
lw a4,-100(s0)
li a5,52
mul a5,a4,a5
.loc 1 283 22
ld a4,-112(s0)
add a5,a4,a5
sd a5,-32(s0)
.loc 1 285 25
lw a5,-104(s0)
addiw a5,a5,-1
sext.w a5,a5
.loc 1 285 7
lw a4,-100(s0)
sext.w a4,a4
bne a4,a5,.L74
.LBB17:
.loc 1 288 14
sw zero,-20(s0)
.loc 1 288 9
j .L75
.L76:
.loc 1 289 22
lw a5,-20(s0)
slli a5,a5,4
ld a4,-32(s0)
add a4,a4,a5
.loc 1 289 20
lw a3,-20(s0)
addi a5,s0,-72
slli a3,a3,3
add a5,a3,a5
sd a4,0(a5)
.loc 1 288 39 discriminator 3
lw a5,-20(s0)
addiw a5,a5,1
sw a5,-20(s0)
.L75:
.loc 1 288 27 discriminator 1
ld a5,-32(s0)
lw a5,48(a5)
.loc 1 288 19 discriminator 1
lw a4,-20(s0)
sext.w a4,a4
blt a4,a5,.L76
.loc 1 291 17
lw a4,-20(s0)
addi a5,s0,-72
slli a4,a4,3
add a5,a4,a5
sd zero,0(a5)
.LBB18:
.loc 1 293 9
ld a5,-72(s0)
mv a0,a5
addi a5,s0,-72
mv a1,a5
li a7,69
#APP
# 293 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE18:
.LBB19:
.loc 1 294 9
li a0,127
li a7,70
#APP
# 294 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
.L77:
j .L77
.L74:
.LBE19:
.LBE17:
.LBB20:
.LBB21:
.loc 1 297 9
addi a5,s0,-48
mv a0,a5
li a7,73
#APP
# 297 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE21:
.LBB22:
.loc 1 299 19
li a7,68
#APP
# 299 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
mv a5,a0
.LBE22:
.loc 1 299 13
sw a5,-36(s0)
.loc 1 301 11
lw a5,-36(s0)
sext.w a5,a5
bne a5,zero,.L78
.LBB23:
.loc 1 302 13
lw a5,-48(s0)
mv a0,a5
li a1,0
li a7,72
#APP
# 302 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE23:
.LBB24:
.loc 1 303 13
lw a5,-48(s0)
mv a0,a5
li a7,67
#APP
# 303 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE24:
.LBB25:
.loc 1 304 13
lw a5,-44(s0)
mv a0,a5
li a7,67
#APP
# 304 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE25:
.loc 1 305 13
lw a5,-100(s0)
addiw a5,a5,1
sext.w a5,a5
lw a4,-104(s0)
mv a2,a4
ld a1,-112(s0)
mv a0,a5
call run_command
j .L84
.L78:
.LBB26:
.LBB27:
.loc 1 308 13
lw a5,-48(s0)
mv a0,a5
li a7,67
#APP
# 308 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE27:
.LBB28:
.loc 1 309 13
lw a5,-44(s0)
mv a0,a5
li a1,1
li a7,72
#APP
# 309 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE28:
.LBB29:
.loc 1 310 13
lw a5,-44(s0)
mv a0,a5
li a7,67
#APP
# 310 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE29:
.loc 1 314 18
sw zero,-24(s0)
.loc 1 314 13
j .L80
.L81:
.loc 1 315 26
lw a5,-24(s0)
slli a5,a5,4
ld a4,-32(s0)
add a4,a4,a5
.loc 1 315 24
lw a3,-24(s0)
addi a5,s0,-96
slli a3,a3,3
add a5,a3,a5
sd a4,0(a5)
.loc 1 314 43 discriminator 3
lw a5,-24(s0)
addiw a5,a5,1
sw a5,-24(s0)
.L80:
.loc 1 314 31 discriminator 1
ld a5,-32(s0)
lw a5,48(a5)
.loc 1 314 23 discriminator 1
lw a4,-24(s0)
sext.w a4,a4
blt a4,a5,.L81
.loc 1 317 21
lw a4,-24(s0)
addi a5,s0,-96
slli a4,a4,3
add a5,a4,a5
sd zero,0(a5)
.LBB30:
.loc 1 319 13
ld a5,-96(s0)
mv a0,a5
addi a5,s0,-96
mv a1,a5
li a7,69
#APP
# 319 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE30:
.LBB31:
.loc 1 320 13
li a0,127
li a7,70
#APP
# 320 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
.L82:
j .L82
.L84:
.LBE31:
.LBE26:
.LBE20:
.loc 1 324 12
li a5,1
.loc 1 325 1
mv a0,a5
ld ra,104(sp)
.cfi_restore 1
ld s0,96(sp)
.cfi_restore 8
.cfi_def_cfa 2, 112
addi sp,sp,112
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE7:
.size run_command, .-run_command
.section .rodata
.align 3
.LC1:
.string "$ "
.align 3
.LC2:
.string "\b \b"
.align 3
.LC3:
.string "\r\n"
.align 3
.LC4:
.string "ERR MAX COMMAND"
.align 3
.LC5:
.string "ARG NUM ERROR"
.align 3
.LC6:
.string "ERR ARG NUM"
.text
.align 1
.globl main
.type main, @function
main:
.LFB8:
.loc 1 327 16
.cfi_startproc
addi sp,sp,-448
.cfi_def_cfa_offset 448
sd ra,440(sp)
sd s0,432(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,448
.cfi_def_cfa 8, 0
.L110:
.LBB32:
.loc 1 343 9
li a0,1
lla a5,.LC1
mv a1,a5
li a2,2
li a7,64
#APP
# 343 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
sd a0,-56(s0)
.LBE32:
.loc 1 346 11
sd zero,-24(s0)
.L92:
.LBB33:
.loc 1 348 13
li a0,0
addi a5,s0,-168
mv a1,a5
li a2,1
li a7,65
#APP
# 348 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
sd a0,-64(s0)
.LBE33:
.LBB34:
.loc 1 349 13
li a0,1
addi a5,s0,-168
mv a1,a5
li a2,1
li a7,64
#APP
# 349 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
sd a0,-72(s0)
.LBE34:
.loc 1 352 22
lbu a5,-168(s0)
.loc 1 352 16
mv a4,a5
li a5,8
beq a4,a5,.L86
.loc 1 352 41 discriminator 2
lbu a5,-168(s0)
.loc 1 352 34 discriminator 2
mv a4,a5
li a5,127
bne a4,a5,.L87
.L86:
.loc 1 352 53 discriminator 3
ld a5,-24(s0)
ble a5,zero,.L87
.loc 1 354 18
ld a5,-24(s0)
addi a5,a5,-1
sd a5,-24(s0)
.loc 1 355 24
ld a5,-24(s0)
addi a5,a5,-16
add a5,a5,s0
sb zero,-144(a5)
.LBB35:
.loc 1 357 17
li a0,1
lla a5,.LC2
mv a1,a5
li a2,3
li a7,64
#APP
# 357 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
sd a0,-80(s0)
.LBE35:
j .L88
.L87:
.loc 1 359 25
lbu a5,-168(s0)
.loc 1 359 20
mv a4,a5
li a5,13
beq a4,a5,.L111
.loc 1 362 25
lbu a5,-168(s0)
.loc 1 362 20
mv a4,a5
li a5,10
beq a4,a5,.L112
.loc 1 366 30
lbu a4,-168(s0)
.loc 1 366 24
ld a5,-24(s0)
addi a5,a5,-16
add a5,a5,s0
sb a4,-144(a5)
.loc 1 367 18
ld a5,-24(s0)
addi a5,a5,1
sd a5,-24(s0)
.L88:
.loc 1 348 13
j .L92
.L111:
.loc 1 360 17
nop
j .L90
.L112:
.loc 1 363 17
nop
.L90:
.loc 1 370 16
ld a5,-24(s0)
addi a5,a5,-16
add a5,a5,s0
sb zero,-144(a5)
.LBB36:
.loc 1 374 9
li a0,1
lla a5,.LC3
mv a1,a5
li a2,2
li a7,64
#APP
# 374 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
sd a0,-88(s0)
.LBE36:
.loc 1 376 15
lbu a5,-160(s0)
.loc 1 376 11
beq a5,zero,.L113
.loc 1 380 22
sw zero,-28(s0)
.loc 1 381 17
sw zero,-32(s0)
.loc 1 382 11
addi a5,s0,-160
sd a5,-40(s0)
.loc 1 383 11
sd zero,-24(s0)
.L106:
.loc 1 386 16
ld a5,-40(s0)
lbu a5,0(a5)
.loc 1 386 15
mv a4,a5
li a5,124
bne a4,a5,.L95
.loc 1 387 18
ld a5,-40(s0)
addi a5,a5,1
sd a5,-40(s0)
.loc 1 388 22
j .L96
.L97:
.loc 1 389 22
ld a5,-40(s0)
addi a5,a5,1
sd a5,-40(s0)
.L96:
.loc 1 388 23
ld a5,-40(s0)
lbu a5,0(a5)
.loc 1 388 33
mv a4,a5
li a5,32
beq a4,a5,.L97
.loc 1 388 36 discriminator 1
ld a5,-40(s0)
lbu a5,0(a5)
.loc 1 388 33 discriminator 1
mv a4,a5
li a5,9
beq a4,a5,.L97
.loc 1 392 57
lw a5,-32(s0)
lw a3,-28(s0)
slli a4,a5,4
li a5,52
mul a5,a3,a5
add a5,a4,a5
addi a5,a5,-16
add a4,a5,s0
ld a5,-24(s0)
add a5,a4,a5
sb zero,-416(a5)
.loc 1 393 48
lw a4,-28(s0)
li a5,52
mul a5,a4,a5
addi a5,a5,-16
add a5,a5,s0
lw a4,-32(s0)
sw a4,-368(a5)
.loc 1 395 29
lw a5,-28(s0)
addiw a5,a5,1
sw a5,-28(s0)
.loc 1 396 19
sd zero,-24(s0)
.loc 1 397 25
sw zero,-32(s0)
.loc 1 399 19
lw a5,-28(s0)
sext.w a4,a5
li a5,4
ble a4,a5,.L106
.loc 1 400 21
lla a0,.LC4
call puts
.loc 1 401 21
j .L99
.L95:
.loc 1 404 21
ld a5,-40(s0)
lbu a5,0(a5)
.loc 1 404 20
mv a4,a5
li a5,32
beq a4,a5,.L102
.loc 1 404 34 discriminator 1
ld a5,-40(s0)
lbu a5,0(a5)
.loc 1 404 31 discriminator 1
mv a4,a5
li a5,9
bne a4,a5,.L101
.loc 1 405 22
j .L102
.L103:
.loc 1 406 22
ld a5,-40(s0)
addi a5,a5,1
sd a5,-40(s0)
.L102:
.loc 1 405 23
ld a5,-40(s0)
lbu a5,0(a5)
.loc 1 405 33
mv a4,a5
li a5,32
beq a4,a5,.L103
.loc 1 405 36 discriminator 1
ld a5,-40(s0)
lbu a5,0(a5)
.loc 1 405 33 discriminator 1
mv a4,a5
li a5,9
beq a4,a5,.L103
.loc 1 408 57
lw a5,-32(s0)
lw a3,-28(s0)
slli a4,a5,4
li a5,52
mul a5,a3,a5
add a5,a4,a5
addi a5,a5,-16
add a4,a5,s0
ld a5,-24(s0)
add a5,a4,a5
sb zero,-416(a5)
.loc 1 409 24
lw a5,-32(s0)
addiw a5,a5,1
sw a5,-32(s0)
.loc 1 410 19
sd zero,-24(s0)
.loc 1 412 19
lw a5,-32(s0)
sext.w a4,a5
li a5,2
ble a4,a5,.L98
.loc 1 413 21
lla a0,.LC5
call puts
.loc 1 414 21
j .L99
.L101:
.loc 1 417 21
ld a5,-40(s0)
lbu a5,0(a5)
.loc 1 417 20
bne a5,zero,.L105
.loc 1 418 57
lw a5,-32(s0)
lw a3,-28(s0)
slli a4,a5,4
li a5,52
mul a5,a3,a5
add a5,a4,a5
addi a5,a5,-16
add a4,a5,s0
ld a5,-24(s0)
add a5,a4,a5
sb zero,-416(a5)
.loc 1 420 24
lw a5,-32(s0)
addiw a5,a5,1
sw a5,-32(s0)
.loc 1 421 48
lw a4,-28(s0)
li a5,52
mul a5,a4,a5
addi a5,a5,-16
add a5,a5,s0
lw a4,-32(s0)
sw a4,-368(a5)
.loc 1 422 29
lw a5,-28(s0)
addiw a5,a5,1
sw a5,-28(s0)
.loc 1 423 17
j .L99
.L105:
.loc 1 426 59
ld a5,-40(s0)
lbu a4,0(a5)
.loc 1 426 57
lw a5,-32(s0)
lw a2,-28(s0)
slli a3,a5,4
li a5,52
mul a5,a2,a5
add a5,a3,a5
addi a5,a5,-16
add a3,a5,s0
ld a5,-24(s0)
add a5,a3,a5
sb a4,-416(a5)
.loc 1 427 18
ld a5,-40(s0)
addi a5,a5,1
sd a5,-40(s0)
.loc 1 428 18
ld a5,-24(s0)
addi a5,a5,1
sd a5,-24(s0)
.loc 1 430 19
ld a4,-24(s0)
li a5,15
ble a4,a5,.L106
.loc 1 431 21
lla a0,.LC6
call puts
.loc 1 432 21
j .L99
.L98:
.loc 1 386 15
j .L106
.L99:
.LBB37:
.loc 1 437 15
li a7,68
#APP
# 437 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
mv a5,a0
.LBE37:
.loc 1 437 13
sw a5,-92(s0)
.loc 1 439 11
lw a5,-92(s0)
sext.w a5,a5
bne a5,zero,.L107
.loc 1 440 13
lw a4,-28(s0)
addi a5,s0,-432
mv a2,a4
mv a1,a5
li a0,0
call run_command
j .L110
.L107:
.LBB38:
.loc 1 443 21
sw zero,-44(s0)
.loc 1 443 13
j .L108
.L109:
.LBB39:
.LBB40:
.loc 1 445 17
addi a5,s0,-436
mv a0,a5
li a7,71
#APP
# 445 "msh.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE40:
.LBE39:
.loc 1 443 43 discriminator 3
lw a5,-44(s0)
addiw a5,a5,1
sw a5,-44(s0)
.L108:
.loc 1 443 27 discriminator 1
lw a5,-44(s0)
mv a4,a5
lw a5,-28(s0)
sext.w a4,a4
sext.w a5,a5
blt a4,a5,.L109
j .L110
.L113:
.LBE38:
.loc 1 377 13
nop
.loc 1 343 9
j .L110
.cfi_endproc
.LFE8:
.size main, .-main
.Letext0:
.file 2 "/opt/homebrew/Cellar/riscv-gnu-toolchain/main/lib/gcc/riscv64-unknown-elf/14.2.0/include/stdarg.h"
.file 3 "minux.h"
.section .debug_info,"",@progbits
.Ldebug_info0:
.4byte 0xca4
.2byte 0x5
.byte 0x1
.byte 0x8
.4byte .Ldebug_abbrev0
.uleb128 0x13
.4byte .LASF42
.byte 0x1d
.4byte .LASF0
.4byte .LASF1
.8byte .Ltext0
.8byte .Letext0-.Ltext0
.4byte .Ldebug_line0
.uleb128 0x7
.byte 0x1
.byte 0x6
.4byte .LASF2
.uleb128 0x7
.byte 0x2
.byte 0x5
.4byte .LASF3
.uleb128 0x14
.byte 0x4
.byte 0x5
.string "int"
.uleb128 0x7
.byte 0x8
.byte 0x5
.4byte .LASF4
.uleb128 0x7
.byte 0x1
.byte 0x8
.4byte .LASF5
.uleb128 0x7
.byte 0x2
.byte 0x7
.4byte .LASF6
.uleb128 0x7
.byte 0x4
.byte 0x7
.4byte .LASF7
.uleb128 0x7
.byte 0x8
.byte 0x7
.4byte .LASF8
.uleb128 0x7
.byte 0x8
.byte 0x5
.4byte .LASF9
.uleb128 0x7
.byte 0x8
.byte 0x7
.4byte .LASF10
.uleb128 0xd
.4byte .LASF11
.byte 0x2
.byte 0x28
.byte 0x1b
.4byte 0x80
.uleb128 0x15
.byte 0x8
.4byte .LASF43
.uleb128 0xd
.4byte .LASF12
.byte 0x2
.byte 0x67
.byte 0x18
.4byte 0x74
.uleb128 0xd
.4byte .LASF13
.byte 0x3
.byte 0x4
.byte 0xd
.4byte 0x3c
.uleb128 0x16
.4byte .LASF44
.byte 0x34
.byte 0x1
.2byte 0x111
.byte 0x8
.4byte 0xc7
.uleb128 0xf
.4byte .LASF14
.2byte 0x113
.byte 0xa
.4byte 0xc7
.byte 0
.uleb128 0xf
.4byte .LASF15
.2byte 0x114
.byte 0x9
.4byte 0x3c
.byte 0x30
.byte 0
.uleb128 0xa
.4byte 0xdd
.4byte 0xdd
.uleb128 0x8
.4byte 0x6d
.byte 0x2
.uleb128 0x8
.4byte 0x6d
.byte 0xf
.byte 0
.uleb128 0x7
.byte 0x1
.byte 0x8
.4byte .LASF16
.uleb128 0x17
.4byte 0xdd
.uleb128 0x10
.4byte .LASF24
.2byte 0x147
.4byte 0x3c
.8byte .LFB8
.8byte .LFE8-.LFB8
.uleb128 0x1
.byte 0x9c
.4byte 0x40e
.uleb128 0x1
.string "buf"
.2byte 0x148
.byte 0xa
.4byte 0x40e
.uleb128 0x3
.byte 0x91
.sleb128 -160
.uleb128 0x1
.string "n"
.2byte 0x149
.byte 0xa
.4byte 0x66
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x3
.4byte .LASF17
.2byte 0x14a
.byte 0xa
.4byte 0x41e
.uleb128 0x3
.byte 0x91
.sleb128 -168
.uleb128 0x1
.string "pid"
.2byte 0x14b
.byte 0xb
.4byte 0x92
.uleb128 0x3
.byte 0x91
.sleb128 -92
.uleb128 0x11
.4byte .LASF18
.2byte 0x14c
.4byte 0x3c
.uleb128 0x11
.4byte .LASF19
.2byte 0x14d
.4byte 0x3c
.uleb128 0x3
.4byte .LASF20
.2byte 0x14f
.byte 0x15
.4byte 0x42e
.uleb128 0x3
.byte 0x91
.sleb128 -432
.uleb128 0x3
.4byte .LASF21
.2byte 0x150
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -28
.uleb128 0x3
.4byte .LASF15
.2byte 0x151
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -32
.uleb128 0x1
.string "p"
.2byte 0x152
.byte 0xb
.4byte 0x43e
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0x2
.8byte .LBB32
.8byte .LBE32-.LBB32
.4byte 0x1f5
.uleb128 0x3
.4byte .LASF22
.2byte 0x157
.byte 0x9
.4byte 0x66
.uleb128 0x2
.byte 0x91
.sleb128 -56
.uleb128 0x1
.string "_a0"
.2byte 0x157
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.2byte 0x157
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a2"
.2byte 0x157
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5c
.uleb128 0x1
.string "_a7"
.2byte 0x157
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x2
.8byte .LBB33
.8byte .LBE33-.LBB33
.4byte 0x252
.uleb128 0x3
.4byte .LASF22
.2byte 0x15c
.byte 0xd
.4byte 0x66
.uleb128 0x2
.byte 0x91
.sleb128 -64
.uleb128 0x1
.string "_a0"
.2byte 0x15c
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.2byte 0x15c
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a2"
.2byte 0x15c
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5c
.uleb128 0x1
.string "_a7"
.2byte 0x15c
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x2
.8byte .LBB34
.8byte .LBE34-.LBB34
.4byte 0x2b0
.uleb128 0x3
.4byte .LASF22
.2byte 0x15d
.byte 0xd
.4byte 0x66
.uleb128 0x3
.byte 0x91
.sleb128 -72
.uleb128 0x1
.string "_a0"
.2byte 0x15d
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.2byte 0x15d
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a2"
.2byte 0x15d
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5c
.uleb128 0x1
.string "_a7"
.2byte 0x15d
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x2
.8byte .LBB35
.8byte .LBE35-.LBB35
.4byte 0x30e
.uleb128 0x3
.4byte .LASF22
.2byte 0x165
.byte 0x11
.4byte 0x66
.uleb128 0x3
.byte 0x91
.sleb128 -80
.uleb128 0x1
.string "_a0"
.2byte 0x165
.byte 0x11
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.2byte 0x165
.byte 0x11
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a2"
.2byte 0x165
.byte 0x11
.4byte 0x66
.uleb128 0x1
.byte 0x5c
.uleb128 0x1
.string "_a7"
.2byte 0x165
.byte 0x11
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x2
.8byte .LBB36
.8byte .LBE36-.LBB36
.4byte 0x36c
.uleb128 0x3
.4byte .LASF22
.2byte 0x176
.byte 0x9
.4byte 0x66
.uleb128 0x3
.byte 0x91
.sleb128 -88
.uleb128 0x1
.string "_a0"
.2byte 0x176
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.2byte 0x176
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a2"
.2byte 0x176
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5c
.uleb128 0x1
.string "_a7"
.2byte 0x176
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x2
.8byte .LBB37
.8byte .LBE37-.LBB37
.4byte 0x39e
.uleb128 0x3
.4byte .LASF23
.2byte 0x1b5
.byte 0xf
.4byte 0x66
.uleb128 0x1
.byte 0x61
.uleb128 0x3
.4byte .LASF22
.2byte 0x1b5
.byte 0xf
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.byte 0
.uleb128 0x6
.8byte .LBB38
.8byte .LBE38-.LBB38
.uleb128 0x1
.string "k"
.2byte 0x1bb
.byte 0x15
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -44
.uleb128 0x6
.8byte .LBB39
.8byte .LBE39-.LBB39
.uleb128 0x3
.4byte .LASF19
.2byte 0x1bc
.byte 0x15
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -436
.uleb128 0x6
.8byte .LBB40
.8byte .LBE40-.LBB40
.uleb128 0x1
.string "_a0"
.2byte 0x1bd
.byte 0x11
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.2byte 0x1bd
.byte 0x11
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.byte 0
.byte 0
.byte 0
.uleb128 0xa
.4byte 0xdd
.4byte 0x41e
.uleb128 0x8
.4byte 0x6d
.byte 0x3f
.byte 0
.uleb128 0xa
.4byte 0xdd
.4byte 0x42e
.uleb128 0x8
.4byte 0x6d
.byte 0x1
.byte 0
.uleb128 0xa
.4byte 0x9e
.4byte 0x43e
.uleb128 0x8
.4byte 0x6d
.byte 0x4
.byte 0
.uleb128 0xe
.4byte 0xdd
.uleb128 0x10
.4byte .LASF25
.2byte 0x117
.4byte 0x3c
.8byte .LFB7
.8byte .LFE7-.LFB7
.uleb128 0x1
.byte 0x9c
.4byte 0x7b9
.uleb128 0x18
.string "n"
.byte 0x1
.2byte 0x117
.byte 0x15
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -100
.uleb128 0x12
.4byte .LASF20
.byte 0x29
.4byte 0x7b9
.uleb128 0x3
.byte 0x91
.sleb128 -112
.uleb128 0x12
.4byte .LASF21
.byte 0x37
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -104
.uleb128 0x3
.4byte .LASF26
.2byte 0x119
.byte 0x9
.4byte 0x7be
.uleb128 0x2
.byte 0x91
.sleb128 -48
.uleb128 0x3
.4byte .LASF27
.2byte 0x11b
.byte 0x16
.4byte 0x7b9
.uleb128 0x2
.byte 0x91
.sleb128 -32
.uleb128 0x2
.8byte .LBB17
.8byte .LBE17-.LBB17
.4byte 0x54e
.uleb128 0x3
.4byte .LASF14
.2byte 0x11e
.byte 0xf
.4byte 0x7ce
.uleb128 0x3
.byte 0x91
.sleb128 -72
.uleb128 0x1
.string "j"
.2byte 0x11f
.byte 0xd
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0x2
.8byte .LBB18
.8byte .LBE18-.LBB18
.4byte 0x51f
.uleb128 0x1
.string "_a0"
.2byte 0x125
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.2byte 0x125
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a7"
.2byte 0x125
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x6
.8byte .LBB19
.8byte .LBE19-.LBB19
.uleb128 0x1
.string "_a0"
.2byte 0x126
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.2byte 0x126
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.byte 0
.uleb128 0x6
.8byte .LBB20
.8byte .LBE20-.LBB20
.uleb128 0x1
.string "pid"
.2byte 0x12b
.byte 0xd
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -36
.uleb128 0x2
.8byte .LBB21
.8byte .LBE21-.LBB21
.4byte 0x5a0
.uleb128 0x1
.string "_a0"
.2byte 0x129
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.2byte 0x129
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x2
.8byte .LBB22
.8byte .LBE22-.LBB22
.4byte 0x5d2
.uleb128 0x3
.4byte .LASF23
.2byte 0x12b
.byte 0x13
.4byte 0x66
.uleb128 0x1
.byte 0x61
.uleb128 0x3
.4byte .LASF22
.2byte 0x12b
.byte 0x13
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.byte 0
.uleb128 0x2
.8byte .LBB23
.8byte .LBE23-.LBB23
.4byte 0x612
.uleb128 0x1
.string "_a0"
.2byte 0x12e
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.2byte 0x12e
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a7"
.2byte 0x12e
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x2
.8byte .LBB24
.8byte .LBE24-.LBB24
.4byte 0x644
.uleb128 0x1
.string "_a0"
.2byte 0x12f
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.2byte 0x12f
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x2
.8byte .LBB25
.8byte .LBE25-.LBB25
.4byte 0x676
.uleb128 0x1
.string "_a0"
.2byte 0x130
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.2byte 0x130
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x6
.8byte .LBB26
.8byte .LBE26-.LBB26
.uleb128 0x3
.4byte .LASF14
.2byte 0x138
.byte 0x13
.4byte 0x7ce
.uleb128 0x3
.byte 0x91
.sleb128 -96
.uleb128 0x1
.string "j"
.2byte 0x139
.byte 0x11
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x2
.8byte .LBB27
.8byte .LBE27-.LBB27
.4byte 0x6d6
.uleb128 0x1
.string "_a0"
.2byte 0x134
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.2byte 0x134
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x2
.8byte .LBB28
.8byte .LBE28-.LBB28
.4byte 0x716
.uleb128 0x1
.string "_a0"
.2byte 0x135
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.2byte 0x135
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a7"
.2byte 0x135
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x2
.8byte .LBB29
.8byte .LBE29-.LBB29
.4byte 0x748
.uleb128 0x1
.string "_a0"
.2byte 0x136
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.2byte 0x136
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x2
.8byte .LBB30
.8byte .LBE30-.LBB30
.4byte 0x788
.uleb128 0x1
.string "_a0"
.2byte 0x13f
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.2byte 0x13f
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a7"
.2byte 0x13f
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x6
.8byte .LBB31
.8byte .LBE31-.LBB31
.uleb128 0x1
.string "_a0"
.2byte 0x140
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.2byte 0x140
.byte 0xd
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.byte 0
.byte 0
.byte 0
.uleb128 0xe
.4byte 0x9e
.uleb128 0xa
.4byte 0x3c
.4byte 0x7ce
.uleb128 0x8
.4byte 0x6d
.byte 0x1
.byte 0
.uleb128 0xa
.4byte 0x43e
.4byte 0x7de
.uleb128 0x8
.4byte 0x6d
.byte 0x2
.byte 0
.uleb128 0xc
.4byte .LASF31
.byte 0xd6
.8byte .LFB6
.8byte .LFE6-.LFB6
.uleb128 0x1
.byte 0x9c
.4byte 0x807
.uleb128 0xb
.string "s"
.byte 0xd6
.byte 0x17
.4byte 0x807
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0xe
.4byte 0xe4
.uleb128 0x19
.4byte .LASF28
.byte 0x1
.byte 0x75
.byte 0x5
.4byte 0x3c
.8byte .LFB5
.8byte .LFE5-.LFB5
.uleb128 0x1
.byte 0x9c
.4byte 0x9e3
.uleb128 0xb
.string "fmt"
.byte 0x75
.byte 0x18
.4byte 0x807
.uleb128 0x3
.byte 0x91
.sleb128 -184
.uleb128 0x1a
.uleb128 0x4
.string "ap"
.byte 0x76
.byte 0xd
.4byte 0x86
.uleb128 0x3
.byte 0x91
.sleb128 -168
.uleb128 0x4
.string "p"
.byte 0x79
.byte 0x11
.4byte 0x807
.uleb128 0x3
.byte 0x91
.sleb128 -88
.uleb128 0x2
.8byte .LBB6
.8byte .LBE6-.LBB6
.4byte 0x931
.uleb128 0x5
.4byte .LASF29
.byte 0x83
.byte 0x11
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -92
.uleb128 0x2
.8byte .LBB7
.8byte .LBE7-.LBB7
.4byte 0x8a2
.uleb128 0x5
.4byte .LASF30
.byte 0x8d
.byte 0x27
.4byte 0x6d
.uleb128 0x3
.byte 0x91
.sleb128 -144
.byte 0
.uleb128 0x2
.8byte .LBB8
.8byte .LBE8-.LBB8
.4byte 0x8c7
.uleb128 0x5
.4byte .LASF30
.byte 0x90
.byte 0x2c
.4byte 0x5f
.uleb128 0x3
.byte 0x91
.sleb128 -136
.byte 0
.uleb128 0x2
.8byte .LBB9
.8byte .LBE9-.LBB9
.4byte 0x8ec
.uleb128 0x5
.4byte .LASF30
.byte 0x97
.byte 0x1e
.4byte 0x66
.uleb128 0x3
.byte 0x91
.sleb128 -160
.byte 0
.uleb128 0x2
.8byte .LBB10
.8byte .LBE10-.LBB10
.4byte 0x911
.uleb128 0x5
.4byte .LASF30
.byte 0x9a
.byte 0x23
.4byte 0x43
.uleb128 0x3
.byte 0x91
.sleb128 -152
.byte 0
.uleb128 0x6
.8byte .LBB11
.8byte .LBE11-.LBB11
.uleb128 0x4
.string "i"
.byte 0xa1
.byte 0x1e
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -96
.byte 0
.byte 0
.uleb128 0x2
.8byte .LBB12
.8byte .LBE12-.LBB12
.4byte 0x956
.uleb128 0x5
.4byte .LASF30
.byte 0xa9
.byte 0x19
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -124
.byte 0
.uleb128 0x2
.8byte .LBB13
.8byte .LBE13-.LBB13
.4byte 0x97b
.uleb128 0x5
.4byte .LASF30
.byte 0xae
.byte 0x22
.4byte 0x58
.uleb128 0x3
.byte 0x91
.sleb128 -108
.byte 0
.uleb128 0x2
.8byte .LBB14
.8byte .LBE14-.LBB14
.4byte 0x9a0
.uleb128 0x5
.4byte .LASF30
.byte 0xb3
.byte 0x23
.4byte 0x6d
.uleb128 0x3
.byte 0x91
.sleb128 -120
.byte 0
.uleb128 0x2
.8byte .LBB15
.8byte .LBE15-.LBB15
.4byte 0x9c3
.uleb128 0x4
.string "s"
.byte 0xb9
.byte 0x21
.4byte 0x807
.uleb128 0x3
.byte 0x91
.sleb128 -104
.byte 0
.uleb128 0x6
.8byte .LBB16
.8byte .LBE16-.LBB16
.uleb128 0x4
.string "c"
.byte 0xbf
.byte 0x1a
.4byte 0xdd
.uleb128 0x3
.byte 0x91
.sleb128 -125
.byte 0
.byte 0
.uleb128 0xc
.4byte .LASF32
.byte 0x57
.8byte .LFB4
.8byte .LFE4-.LFB4
.uleb128 0x1
.byte 0x9c
.4byte 0xa76
.uleb128 0x9
.4byte .LASF30
.byte 0x57
.byte 0x27
.4byte 0x5f
.uleb128 0x3
.byte 0x91
.sleb128 -104
.uleb128 0x9
.4byte .LASF33
.byte 0x57
.byte 0x31
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -108
.uleb128 0x9
.4byte .LASF34
.byte 0x57
.byte 0x3b
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -112
.uleb128 0x4
.string "buf"
.byte 0x58
.byte 0xa
.4byte 0xa76
.uleb128 0x3
.byte 0x91
.sleb128 -96
.uleb128 0x4
.string "i"
.byte 0x59
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0x5
.4byte .LASF35
.byte 0x5a
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x6
.8byte .LBB5
.8byte .LBE5-.LBB5
.uleb128 0x5
.4byte .LASF36
.byte 0x67
.byte 0xd
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -28
.byte 0
.byte 0
.uleb128 0xa
.4byte 0xdd
.4byte 0xa86
.uleb128 0x8
.4byte 0x6d
.byte 0x40
.byte 0
.uleb128 0xc
.4byte .LASF37
.byte 0x39
.8byte .LFB3
.8byte .LFE3-.LFB3
.uleb128 0x1
.byte 0x9c
.4byte 0xb19
.uleb128 0x9
.4byte .LASF30
.byte 0x39
.byte 0x1e
.4byte 0x6d
.uleb128 0x3
.byte 0x91
.sleb128 -104
.uleb128 0x9
.4byte .LASF33
.byte 0x39
.byte 0x28
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -108
.uleb128 0x9
.4byte .LASF34
.byte 0x39
.byte 0x32
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -112
.uleb128 0x4
.string "buf"
.byte 0x3a
.byte 0xa
.4byte 0xa76
.uleb128 0x3
.byte 0x91
.sleb128 -96
.uleb128 0x4
.string "i"
.byte 0x3b
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0x5
.4byte .LASF35
.byte 0x3c
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x6
.8byte .LBB4
.8byte .LBE4-.LBB4
.uleb128 0x5
.4byte .LASF36
.byte 0x49
.byte 0xd
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -28
.byte 0
.byte 0
.uleb128 0xc
.4byte .LASF38
.byte 0x18
.8byte .LFB2
.8byte .LFE2-.LFB2
.uleb128 0x1
.byte 0x9c
.4byte 0xbba
.uleb128 0x9
.4byte .LASF30
.byte 0x18
.byte 0x13
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -84
.uleb128 0x9
.4byte .LASF33
.byte 0x18
.byte 0x1d
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -88
.uleb128 0x9
.4byte .LASF34
.byte 0x18
.byte 0x27
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -92
.uleb128 0x4
.string "buf"
.byte 0x19
.byte 0xa
.4byte 0xbba
.uleb128 0x3
.byte 0x91
.sleb128 -72
.uleb128 0x4
.string "i"
.byte 0x1a
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0x5
.4byte .LASF35
.byte 0x1b
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x5
.4byte .LASF39
.byte 0x1c
.byte 0x12
.4byte 0x58
.uleb128 0x2
.byte 0x91
.sleb128 -28
.uleb128 0x6
.8byte .LBB3
.8byte .LBE3-.LBB3
.uleb128 0x5
.4byte .LASF36
.byte 0x2b
.byte 0xd
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -32
.byte 0
.byte 0
.uleb128 0xa
.4byte 0xdd
.4byte 0xbca
.uleb128 0x8
.4byte 0x6d
.byte 0x20
.byte 0
.uleb128 0x1b
.4byte .LASF40
.byte 0x1
.byte 0x10
.byte 0x6
.8byte .LFB1
.8byte .LFE1-.LFB1
.uleb128 0x1
.byte 0x9c
.4byte 0xc57
.uleb128 0xb
.string "c"
.byte 0x10
.byte 0x13
.4byte 0xdd
.uleb128 0x2
.byte 0x91
.sleb128 -33
.uleb128 0x4
.string "buf"
.byte 0x12
.byte 0xa
.4byte 0x41e
.uleb128 0x2
.byte 0x91
.sleb128 -32
.uleb128 0x6
.8byte .LBB2
.8byte .LBE2-.LBB2
.uleb128 0x5
.4byte .LASF22
.byte 0x15
.byte 0x5
.4byte 0x66
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x4
.string "_a0"
.byte 0x15
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x4
.string "_a1"
.byte 0x15
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x4
.string "_a2"
.byte 0x15
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x5c
.uleb128 0x4
.string "_a7"
.byte 0x15
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.byte 0
.uleb128 0x1c
.4byte .LASF41
.byte 0x1
.byte 0x5
.byte 0x7
.4byte 0x43e
.8byte .LFB0
.8byte .LFE0-.LFB0
.uleb128 0x1
.byte 0x9c
.uleb128 0xb
.string "s"
.byte 0x5
.byte 0x15
.4byte 0x43e
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0xb
.string "t"
.byte 0x5
.byte 0x24
.4byte 0x807
.uleb128 0x2
.byte 0x91
.sleb128 -48
.uleb128 0xb
.string "n"
.byte 0x5
.byte 0x2b
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -52
.uleb128 0x4
.string "os"
.byte 0x6
.byte 0x9
.4byte 0x43e
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.byte 0
.section .debug_abbrev,"",@progbits
.Ldebug_abbrev0:
.uleb128 0x1
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x2
.uleb128 0xb
.byte 0x1
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x3
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x4
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x5
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x6
.uleb128 0xb
.byte 0x1
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.byte 0
.byte 0
.uleb128 0x7
.uleb128 0x24
.byte 0
.uleb128 0xb
.uleb128 0xb
.uleb128 0x3e
.uleb128 0xb
.uleb128 0x3
.uleb128 0xe
.byte 0
.byte 0
.uleb128 0x8
.uleb128 0x21
.byte 0
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2f
.uleb128 0xb
.byte 0
.byte 0
.uleb128 0x9
.uleb128 0x5
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0xa
.uleb128 0x1
.byte 0x1
.uleb128 0x49
.uleb128 0x13
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0xb
.uleb128 0x5
.byte 0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0xc
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0x21
.sleb128 6
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0xd
.uleb128 0x16
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0xe
.uleb128 0xf
.byte 0
.uleb128 0xb
.uleb128 0x21
.sleb128 8
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0xf
.uleb128 0xd
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x38
.uleb128 0xb
.byte 0
.byte 0
.uleb128 0x10
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0x21
.sleb128 5
.uleb128 0x27
.uleb128 0x19
.uleb128 0x49
.uleb128 0x13
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x11
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0x21
.sleb128 9
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x12
.uleb128 0x5
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x21
.sleb128 279
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x13
.uleb128 0x11
.byte 0x1
.uleb128 0x25
.uleb128 0xe
.uleb128 0x13
.uleb128 0xb
.uleb128 0x3
.uleb128 0x1f
.uleb128 0x1b
.uleb128 0x1f
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x10
.uleb128 0x17
.byte 0
.byte 0
.uleb128 0x14
.uleb128 0x24
.byte 0
.uleb128 0xb
.uleb128 0xb
.uleb128 0x3e
.uleb128 0xb
.uleb128 0x3
.uleb128 0x8
.byte 0
.byte 0
.uleb128 0x15
.uleb128 0xf
.byte 0
.uleb128 0xb
.uleb128 0xb
.uleb128 0x3
.uleb128 0xe
.byte 0
.byte 0
.uleb128 0x16
.uleb128 0x13
.byte 0x1
.uleb128 0x3
.uleb128 0xe
.uleb128 0xb
.uleb128 0xb
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x17
.uleb128 0x26
.byte 0
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x18
.uleb128 0x5
.byte 0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x19
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x49
.uleb128 0x13
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x1a
.uleb128 0x18
.byte 0
.byte 0
.byte 0
.uleb128 0x1b
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7a
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x1c
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x49
.uleb128 0x13
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7a
.uleb128 0x19
.byte 0
.byte 0
.byte 0
.section .debug_aranges,"",@progbits
.4byte 0x2c
.2byte 0x2
.4byte .Ldebug_info0
.byte 0x8
.byte 0
.2byte 0
.2byte 0
.8byte .Ltext0
.8byte .Letext0-.Ltext0
.8byte 0
.8byte 0
.section .debug_line,"",@progbits
.Ldebug_line0:
.section .debug_str,"MS",@progbits,1
.LASF21:
.string "num_commands"
.LASF17:
.string "buf2"
.LASF44:
.string "sCommand"
.LASF43:
.string "__builtin_va_list"
.LASF23:
.string "_num"
.LASF15:
.string "num_arg"
.LASF27:
.string "command"
.LASF37:
.string "printlong"
.LASF13:
.string "pid_t"
.LASF11:
.string "__gnuc_va_list"
.LASF5:
.string "unsigned char"
.LASF42:
.string "GNU C17 14.2.0 -mcmodel=medany -mtune=rocket -mabi=lp64d -misa-spec=20191213 -march=rv64imafdc_zicsr -g -O0 -fno-omit-frame-pointer"
.LASF22:
.string "_ret"
.LASF10:
.string "long unsigned int"
.LASF6:
.string "short unsigned int"
.LASF40:
.string "putchar"
.LASF12:
.string "va_list"
.LASF41:
.string "strncpy"
.LASF29:
.string "lcount"
.LASF24:
.string "main"
.LASF25:
.string "run_command"
.LASF26:
.string "pipes"
.LASF38:
.string "printint"
.LASF33:
.string "base"
.LASF7:
.string "unsigned int"
.LASF36:
.string "digit"
.LASF8:
.string "long long unsigned int"
.LASF39:
.string "uval"
.LASF18:
.string "argc"
.LASF4:
.string "long long int"
.LASF16:
.string "char"
.LASF28:
.string "printf"
.LASF3:
.string "short int"
.LASF32:
.string "printlonglong"
.LASF14:
.string "argv"
.LASF30:
.string "val_"
.LASF9:
.string "long int"
.LASF34:
.string "sign"
.LASF19:
.string "status"
.LASF31:
.string "puts"
.LASF2:
.string "signed char"
.LASF20:
.string "commands"
.LASF35:
.string "negative"
.section .debug_line_str,"MS",@progbits,1
.LASF1:
.string "/Users/ab25cq/comelang/minux9"
.LASF0:
.string "msh.c"
.ident "GCC: (g04696df09) 14.2.0"
.section .note.GNU-stack,"",@progbits
|
ab25cq/comelang
| 130,915
|
minux9/fs.S
|
.file "fs.c"
.option nopic
.attribute arch, "rv64i2p1_m2p0_a2p1_f2p2_d2p2_c2p0_zicsr2p0_zifencei2p0"
.attribute unaligned_access, 0
.attribute stack_align, 16
.text
.Ltext0:
.cfi_sections .debug_frame
.file 0 "/Users/ab25cq/comelang/minux9" "fs.c"
.local vbase
.comm vbase,8,8
.local Q
.comm Q,4,4
.local desc
.comm desc,8,8
.local avail
.comm avail,8,8
.local used
.comm used,8,8
.local vq_area
.comm vq_area,8,8
.local aidx
.comm aidx,2,2
.local status_byte
.comm status_byte,1,4
.local dma_buf
.comm dma_buf,512,512
.align 1
.type mw32, @function
mw32:
.LFB0:
.file 1 "fs.c"
.loc 1 105 59
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
sd a0,-24(s0)
mv a5,a1
mv a4,a2
sw a5,-28(s0)
mv a5,a4
sw a5,-32(s0)
.loc 1 105 84
lwu a4,-28(s0)
ld a5,-24(s0)
add a5,a4,a5
.loc 1 105 62
mv a4,a5
.loc 1 105 87
lw a5,-32(s0)
sw a5,0(a4)
.loc 1 105 91
nop
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE0:
.size mw32, .-mw32
.align 1
.type mr32, @function
mr32:
.LFB1:
.loc 1 106 60
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
sd a0,-24(s0)
mv a5,a1
sw a5,-28(s0)
.loc 1 106 92
lwu a4,-28(s0)
ld a5,-24(s0)
add a5,a4,a5
.loc 1 106 69
lw a5,0(a5)
sext.w a5,a5
.loc 1 106 97
mv a0,a5
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE1:
.size mr32, .-mr32
.section .rodata
.align 3
.LC0:
.string "virtio-blk not found\n"
.align 3
.LC1:
.string "not legacy mmio\n"
.align 3
.LC2:
.string "FEATURES_OK rejected\n"
.align 3
.LC3:
.string "unsupported queue size\n"
.align 3
.LC4:
.string "kalloc_pages failed\n"
.text
.align 1
.globl virtio_blk_init
.type virtio_blk_init, @function
virtio_blk_init:
.LFB2:
.loc 1 109 1
.cfi_startproc
addi sp,sp,-96
.cfi_def_cfa_offset 96
sd ra,88(sp)
sd s0,80(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,96
.cfi_def_cfa 8, 0
.LBB2:
.loc 1 110 13
sw zero,-20(s0)
.loc 1 110 5
j .L5
.L9:
.LBB3:
.loc 1 111 43
lw a4,-20(s0)
li a5,65536
addi a5,a5,1
add a5,a4,a5
.loc 1 111 18
slli a5,a5,12
sd a5,-32(s0)
.loc 1 112 12
li a1,0
ld a0,-32(s0)
call mr32
mv a5,a0
mv a4,a5
.loc 1 112 11 discriminator 1
li a5,1953656832
addi a5,a5,-1674
bne a4,a5,.L22
.loc 1 113 12
li a1,8
ld a0,-32(s0)
call mr32
mv a5,a0
mv a4,a5
.loc 1 113 11 discriminator 1
li a5,2
bne a4,a5,.L7
.loc 1 113 50 discriminator 1
lla a5,vbase
ld a4,-32(s0)
sd a4,0(a5)
.loc 1 113 58
j .L8
.L22:
.loc 1 112 47
nop
.L7:
.LBE3:
.loc 1 110 39 discriminator 2
lw a5,-20(s0)
addiw a5,a5,1
sw a5,-20(s0)
.L5:
.loc 1 110 19 discriminator 1
lw a5,-20(s0)
sext.w a4,a5
li a5,7
ble a4,a5,.L9
.L8:
.LBE2:
.loc 1 115 8
lla a5,vbase
ld a5,0(a5)
.loc 1 115 7
bne a5,zero,.L10
.loc 1 115 17 discriminator 1
lla a0,.LC0
call puts
.L11:
.loc 1 115 54
j .L11
.L10:
.loc 1 116 8
lla a5,vbase
ld a5,0(a5)
li a1,4
mv a0,a5
call mr32
mv a5,a0
mv a4,a5
.loc 1 116 7 discriminator 1
li a5,1
beq a4,a5,.L12
.loc 1 116 38 discriminator 1
lla a0,.LC1
call puts
.L13:
.loc 1 116 70
j .L13
.L12:
.loc 1 118 5
lla a5,vbase
ld a5,0(a5)
li a2,0
li a1,112
mv a0,a5
call mw32
.loc 1 119 5
lla a5,vbase
ld a5,0(a5)
li a2,3
li a1,112
mv a0,a5
call mw32
.loc 1 121 24
lla a5,vbase
ld a5,0(a5)
li a1,16
mv a0,a5
call mr32
mv a5,a0
sw a5,-36(s0)
.loc 1 123 5
lla a5,vbase
ld a5,0(a5)
li a2,0
li a1,32
mv a0,a5
call mw32
.loc 1 125 19
lla a5,vbase
ld a5,0(a5)
li a1,112
mv a0,a5
call mr32
mv a5,a0
.loc 1 125 14 discriminator 1
ori a5,a5,8
sw a5,-40(s0)
.loc 1 126 5
lla a5,vbase
ld a5,0(a5)
lw a4,-40(s0)
mv a2,a4
li a1,112
mv a0,a5
call mw32
.loc 1 127 10
lla a5,vbase
ld a5,0(a5)
li a1,112
mv a0,a5
call mr32
mv a5,a0
.loc 1 127 32 discriminator 1
andi a5,a5,8
sext.w a5,a5
.loc 1 127 7 discriminator 1
bne a5,zero,.L14
.loc 1 128 9
lla a0,.LC2
call puts
.L15:
.loc 1 128 46
j .L15
.L14:
.loc 1 131 5
lla a5,vbase
ld a5,0(a5)
li a2,4096
li a1,40
mv a0,a5
call mw32
.loc 1 132 5
lla a5,vbase
ld a5,0(a5)
li a2,0
li a1,48
mv a0,a5
call mw32
.loc 1 133 21
lla a5,vbase
ld a5,0(a5)
li a1,52
mv a0,a5
call mr32
mv a5,a0
sw a5,-44(s0)
.loc 1 134 14
lw a5,-44(s0)
sext.w a3,a5
li a4,128
bleu a3,a4,.L16
li a5,128
.L16:
sw a5,-48(s0)
.loc 1 135 5
lla a5,vbase
ld a5,0(a5)
lw a4,-48(s0)
mv a2,a4
li a1,56
mv a0,a5
call mw32
.loc 1 136 7
lla a5,Q
lw a4,-48(s0)
sw a4,0(a5)
.loc 1 137 8
lla a5,Q
lw a5,0(a5)
.loc 1 137 7
beq a5,zero,.L17
.loc 1 137 16 discriminator 2
lla a5,Q
lw a4,0(a5)
.loc 1 137 11 discriminator 2
li a5,1024
bleu a4,a5,.L18
.L17:
.loc 1 137 26 discriminator 3
lla a0,.LC3
call puts
.L19:
.loc 1 137 65
j .L19
.L18:
.loc 1 139 29
lla a5,Q
lw a5,0(a5)
slli a5,a5,32
srli a5,a5,32
.loc 1 139 12
slli a5,a5,4
sd a5,-56(s0)
.loc 1 140 29
lla a5,Q
lw a5,0(a5)
slli a5,a5,32
srli a5,a5,32
addi a5,a5,2
.loc 1 140 12
slli a5,a5,1
sd a5,-64(s0)
.loc 1 141 36
lla a5,Q
lw a5,0(a5)
slli a5,a5,32
srli a5,a5,32
slli a5,a5,3
.loc 1 141 12
addi a5,a5,4
sd a5,-72(s0)
.loc 1 142 31
ld a4,-56(s0)
ld a5,-64(s0)
add a5,a4,a5
.loc 1 142 12
ld a4,-72(s0)
add a5,a4,a5
sd a5,-80(s0)
.loc 1 143 33
ld a4,-80(s0)
li a5,4096
addi a5,a5,-1
add a5,a4,a5
.loc 1 143 12
srli a5,a5,12
sd a5,-88(s0)
.loc 1 145 15
ld a0,-88(s0)
call kalloc_pages
mv a4,a0
.loc 1 145 13 discriminator 1
lla a5,vq_area
sd a4,0(a5)
.loc 1 146 9
lla a5,vq_area
ld a5,0(a5)
.loc 1 146 8
bne a5,zero,.L20
.loc 1 146 21 discriminator 1
lla a0,.LC4
call puts
.L21:
.loc 1 146 57
j .L21
.L20:
.loc 1 147 5
lla a5,vq_area
ld a4,0(a5)
ld a5,-88(s0)
sext.w a5,a5
slliw a5,a5,12
sext.w a5,a5
mv a2,a5
li a1,0
mv a0,a4
call memset
.loc 1 149 11
lla a5,vq_area
ld a4,0(a5)
lla a5,desc
sd a4,0(a5)
.loc 1 150 44
lla a5,vq_area
ld a4,0(a5)
ld a5,-56(s0)
add a4,a4,a5
.loc 1 150 11
lla a5,avail
sd a4,0(a5)
.loc 1 151 54
lla a5,vq_area
ld a4,0(a5)
ld a3,-56(s0)
ld a5,-64(s0)
add a5,a3,a5
add a4,a4,a5
.loc 1 151 11
lla a5,used
sd a4,0(a5)
.loc 1 153 24
lla a5,vq_area
ld a5,0(a5)
.loc 1 153 14
sd a5,-96(s0)
.loc 1 154 5
lla a5,vbase
ld a5,0(a5)
li a2,4096
li a1,56
mv a0,a5
call mw32
.loc 1 155 5
lla a5,vbase
ld a4,0(a5)
.loc 1 155 47
ld a5,-96(s0)
srli a5,a5,12
.loc 1 155 5
sext.w a5,a5
mv a2,a5
li a1,64
mv a0,a4
call mw32
.loc 1 157 5
lla a5,vbase
ld a5,0(a5)
lw a4,-40(s0)
ori a4,a4,4
sext.w a4,a4
mv a2,a4
li a1,112
mv a0,a5
call mw32
.loc 1 158 1
nop
ld ra,88(sp)
.cfi_restore 1
ld s0,80(sp)
.cfi_restore 8
.cfi_def_cfa 2, 96
addi sp,sp,96
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE2:
.size virtio_blk_init, .-virtio_blk_init
.align 1
.type set_flags, @function
set_flags:
.LFB3:
.loc 1 160 66
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
mv a5,a0
mv a3,a1
mv a4,a2
sh a5,-18(s0)
mv a5,a3
sh a5,-20(s0)
mv a5,a4
sh a5,-22(s0)
.loc 1 161 9
lla a5,desc
ld a5,0(a5)
.loc 1 161 19
lhu a4,-18(s0)
andi a4,a4,255
lbu a3,12(a5)
andi a3,a3,0
or a4,a3,a4
sb a4,12(a5)
lhu a4,-18(s0)
srliw a4,a4,8
slli a4,a4,48
srli a4,a4,48
lbu a3,13(a5)
andi a3,a3,0
or a4,a3,a4
sb a4,13(a5)
.loc 1 162 9
lla a5,desc
ld a5,0(a5)
addi a5,a5,16
.loc 1 162 19
lhu a4,-20(s0)
andi a4,a4,255
lbu a3,12(a5)
andi a3,a3,0
or a4,a3,a4
sb a4,12(a5)
lhu a4,-20(s0)
srliw a4,a4,8
slli a4,a4,48
srli a4,a4,48
lbu a3,13(a5)
andi a3,a3,0
or a4,a3,a4
sb a4,13(a5)
.loc 1 163 9
lla a5,desc
ld a5,0(a5)
addi a5,a5,32
.loc 1 163 19
lhu a4,-22(s0)
andi a4,a4,255
lbu a3,12(a5)
andi a3,a3,0
or a4,a3,a4
sb a4,12(a5)
lhu a4,-22(s0)
srliw a4,a4,8
slli a4,a4,48
srli a4,a4,48
lbu a3,13(a5)
andi a3,a3,0
or a4,a3,a4
sb a4,13(a5)
.loc 1 164 1
nop
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE3:
.size set_flags, .-set_flags
.local req
.comm req,16,8
.align 1
.type virtio_blk_read, @function
virtio_blk_read:
.LFB4:
.loc 1 169 1
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
sd a0,-24(s0)
sd a1,-32(s0)
.loc 1 170 14
lla a5,req
sw zero,0(a5)
.loc 1 171 16
lla a5,req
ld a4,-24(s0)
sd a4,8(a5)
.loc 1 172 18
lla a5,req
sw zero,4(a5)
.loc 1 173 17
lla a5,status_byte
li a4,-1
sb a4,0(a5)
.loc 1 175 9
lla a5,desc
ld a5,0(a5)
.loc 1 175 20
lla a4,req
.loc 1 175 18
andi a3,a4,255
lbu a2,0(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,0(a5)
srli a3,a4,8
andi a3,a3,255
lbu a2,1(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,1(a5)
srli a3,a4,16
andi a3,a3,255
lbu a2,2(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,2(a5)
srli a3,a4,24
andi a3,a3,255
lbu a2,3(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,3(a5)
srli a3,a4,32
andi a3,a3,255
lbu a2,4(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,4(a5)
srli a3,a4,40
andi a3,a3,255
lbu a2,5(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,5(a5)
srli a3,a4,48
andi a3,a3,255
lbu a2,6(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,6(a5)
srli a4,a4,56
lbu a3,7(a5)
andi a3,a3,0
or a4,a3,a4
sb a4,7(a5)
.loc 1 175 43
lla a5,desc
ld a5,0(a5)
.loc 1 175 51
lbu a4,8(a5)
andi a4,a4,0
ori a4,a4,16
sb a4,8(a5)
lbu a4,9(a5)
andi a4,a4,0
sb a4,9(a5)
lbu a4,10(a5)
andi a4,a4,0
sb a4,10(a5)
lbu a4,11(a5)
andi a4,a4,0
sb a4,11(a5)
.loc 1 175 73
lla a5,desc
ld a5,0(a5)
.loc 1 175 82
lbu a4,14(a5)
andi a4,a4,0
ori a4,a4,1
sb a4,14(a5)
lbu a4,15(a5)
andi a4,a4,0
sb a4,15(a5)
.loc 1 176 9
lla a5,desc
ld a5,0(a5)
addi a5,a5,16
.loc 1 176 20
lla a4,dma_buf
.loc 1 176 18
andi a3,a4,255
lbu a2,0(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,0(a5)
srli a3,a4,8
andi a3,a3,255
lbu a2,1(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,1(a5)
srli a3,a4,16
andi a3,a3,255
lbu a2,2(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,2(a5)
srli a3,a4,24
andi a3,a3,255
lbu a2,3(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,3(a5)
srli a3,a4,32
andi a3,a3,255
lbu a2,4(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,4(a5)
srli a3,a4,40
andi a3,a3,255
lbu a2,5(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,5(a5)
srli a3,a4,48
andi a3,a3,255
lbu a2,6(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,6(a5)
srli a4,a4,56
lbu a3,7(a5)
andi a3,a3,0
or a4,a3,a4
sb a4,7(a5)
.loc 1 176 43
lla a5,desc
ld a5,0(a5)
addi a5,a5,16
.loc 1 176 51
lbu a4,8(a5)
andi a4,a4,0
sb a4,8(a5)
lbu a4,9(a5)
andi a4,a4,0
ori a4,a4,2
sb a4,9(a5)
lbu a4,10(a5)
andi a4,a4,0
sb a4,10(a5)
lbu a4,11(a5)
andi a4,a4,0
sb a4,11(a5)
.loc 1 176 73
lla a5,desc
ld a5,0(a5)
addi a5,a5,16
.loc 1 176 82
lbu a4,14(a5)
andi a4,a4,0
ori a4,a4,2
sb a4,14(a5)
lbu a4,15(a5)
andi a4,a4,0
sb a4,15(a5)
.loc 1 177 9
lla a5,desc
ld a5,0(a5)
addi a5,a5,32
.loc 1 177 20
lla a4,status_byte
.loc 1 177 18
andi a3,a4,255
lbu a2,0(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,0(a5)
srli a3,a4,8
andi a3,a3,255
lbu a2,1(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,1(a5)
srli a3,a4,16
andi a3,a3,255
lbu a2,2(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,2(a5)
srli a3,a4,24
andi a3,a3,255
lbu a2,3(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,3(a5)
srli a3,a4,32
andi a3,a3,255
lbu a2,4(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,4(a5)
srli a3,a4,40
andi a3,a3,255
lbu a2,5(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,5(a5)
srli a3,a4,48
andi a3,a3,255
lbu a2,6(a5)
andi a2,a2,0
or a3,a2,a3
sb a3,6(a5)
srli a4,a4,56
lbu a3,7(a5)
andi a3,a3,0
or a4,a3,a4
sb a4,7(a5)
.loc 1 177 48
lla a5,desc
ld a5,0(a5)
addi a5,a5,32
.loc 1 177 57
lbu a4,8(a5)
andi a4,a4,0
ori a4,a4,1
sb a4,8(a5)
lbu a4,9(a5)
andi a4,a4,0
sb a4,9(a5)
lbu a4,10(a5)
andi a4,a4,0
sb a4,10(a5)
lbu a4,11(a5)
andi a4,a4,0
sb a4,11(a5)
.loc 1 179 5
li a2,2
li a1,3
li a0,1
call set_flags
.loc 1 181 10
lla a5,avail
ld a4,0(a5)
.loc 1 181 22
lla a5,aidx
lhu a5,0(a5)
sext.w a3,a5
lla a5,Q
lw a5,0(a5)
remuw a5,a3,a5
sext.w a5,a5
.loc 1 181 27
slli a5,a5,32
srli a5,a5,32
slli a5,a5,1
add a5,a4,a5
sh zero,4(a5)
.loc 1 182 5
fence rw,rw
.loc 1 183 18
lla a5,aidx
lhu a5,0(a5)
addiw a5,a5,1
slli a4,a5,48
srli a4,a4,48
.loc 1 183 16
lla a5,aidx
sh a4,0(a5)
.loc 1 183 10
lla a5,avail
ld a5,0(a5)
.loc 1 183 16
lla a4,aidx
lhu a4,0(a4)
sh a4,2(a5)
.loc 1 185 5
lla a5,vbase
ld a5,0(a5)
li a2,0
li a1,80
mv a0,a5
call mw32
.loc 1 187 10
j .L25
.L26:
.loc 1 188 9
fence rw,rw
.L25:
.loc 1 187 23
lla a5,status_byte
lbu a5,0(a5)
andi a5,a5,0xff
mv a4,a5
li a5,255
beq a4,a5,.L26
.loc 1 190 5
li a2,512
lla a1,dma_buf
ld a0,-32(s0)
call memcpy
.loc 1 191 1
nop
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE4:
.size virtio_blk_read, .-virtio_blk_read
.align 1
.globl read_block
.type read_block, @function
read_block:
.LFB5:
.loc 1 194 1
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
mv a5,a0
sd a1,-32(s0)
sw a5,-20(s0)
.loc 1 195 5
lwu a5,-20(s0)
ld a1,-32(s0)
mv a0,a5
call virtio_blk_read
.loc 1 196 1
nop
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE5:
.size read_block, .-read_block
.local sb
.comm sb,28,8
.local block_buf
.comm block_buf,512,8
.align 1
.globl read_superblock
.type read_superblock, @function
read_superblock:
.LFB6:
.loc 1 205 28
.cfi_startproc
addi sp,sp,-16
.cfi_def_cfa_offset 16
sd ra,8(sp)
sd s0,0(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,16
.cfi_def_cfa 8, 0
.loc 1 206 5
lla a1,block_buf
li a0,1
call read_block
.loc 1 207 5
li a2,28
lla a1,block_buf
lla a0,sb
call memcpy
.loc 1 208 1
nop
ld ra,8(sp)
.cfi_restore 1
ld s0,0(sp)
.cfi_restore 8
.cfi_def_cfa 2, 16
addi sp,sp,16
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE6:
.size read_superblock, .-read_superblock
.align 1
.globl read_inode
.type read_inode, @function
read_inode:
.LFB7:
.loc 1 210 53
.cfi_startproc
addi sp,sp,-64
.cfi_def_cfa_offset 64
sd ra,56(sp)
sd s0,48(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,64
.cfi_def_cfa 8, 0
mv a5,a0
sd a1,-64(s0)
sw a5,-52(s0)
.loc 1 211 8
lw a5,-52(s0)
sext.w a5,a5
beq a5,zero,.L30
.loc 1 211 30 discriminator 1
lla a5,sb
lw a5,8(a5)
.loc 1 211 18 discriminator 1
lw a4,-52(s0)
sext.w a4,a4
bleu a4,a5,.L31
.L30:
.loc 1 212 9
li a2,68
li a1,0
ld a0,-64(s0)
call memset
.loc 1 213 9
j .L29
.L31:
.loc 1 217 14
li a5,7
sw a5,-20(s0)
.loc 1 219 24
lla a5,sb
lw a4,20(a5)
.loc 1 219 46
lw a5,-52(s0)
addiw a5,a5,-1
sext.w a5,a5
.loc 1 219 51
lw a3,-20(s0)
divuw a5,a5,a3
sext.w a5,a5
.loc 1 219 14
addw a5,a4,a5
sw a5,-24(s0)
.loc 1 221 30
lw a5,-52(s0)
addiw a5,a5,-1
sext.w a5,a5
.loc 1 221 35
mv a4,a5
lw a5,-20(s0)
remuw a5,a4,a5
sext.w a5,a5
.loc 1 221 14
mv a4,a5
mv a5,a4
slliw a5,a5,4
addw a5,a5,a4
slliw a5,a5,2
sw a5,-28(s0)
.loc 1 223 5
lw a5,-24(s0)
lla a1,block_buf
mv a0,a5
call read_block
.loc 1 224 26
lwu a4,-28(s0)
.loc 1 224 20
lla a5,block_buf
add a5,a4,a5
sd a5,-40(s0)
.loc 1 225 11
ld a4,-64(s0)
ld a5,-40(s0)
mv a3,a5
li a5,68
mv a2,a5
mv a1,a3
mv a0,a4
call memcpy
.L29:
.loc 1 226 1
ld ra,56(sp)
.cfi_restore 1
ld s0,48(sp)
.cfi_restore 8
.cfi_def_cfa 2, 64
addi sp,sp,64
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE7:
.size read_inode, .-read_inode
.align 1
.type inode_blockno, @function
inode_blockno:
.LFB8:
.loc 1 228 64
.cfi_startproc
addi sp,sp,-96
.cfi_def_cfa_offset 96
sd ra,88(sp)
sd s0,80(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,96
.cfi_def_cfa 8, 0
sd a0,-88(s0)
mv a5,a1
sw a5,-92(s0)
.loc 1 229 8
lw a5,-92(s0)
sext.w a4,a5
li a5,11
bgtu a4,a5,.L34
.loc 1 231 25
ld a4,-88(s0)
lwu a5,-92(s0)
slli a5,a5,2
add a5,a4,a5
lw a5,12(a5)
j .L35
.L34:
.loc 1 232 15
lw a5,-92(s0)
sext.w a4,a5
li a5,139
bgtu a4,a5,.L36
.LBB4:
.loc 1 234 18
ld a5,-88(s0)
lw a5,60(a5)
sw a5,-60(s0)
.loc 1 235 12
lw a5,-60(s0)
sext.w a5,a5
bne a5,zero,.L37
.loc 1 235 39 discriminator 1
li a5,0
.loc 1 235 39 is_stmt 0
j .L35
.L37:
.loc 1 236 9 is_stmt 1
lw a5,-60(s0)
lla a1,block_buf
mv a0,a5
call read_block
.loc 1 237 19
lla a5,block_buf
sd a5,-72(s0)
.loc 1 238 28
lw a5,-92(s0)
addiw a5,a5,-12
sext.w a5,a5
slli a5,a5,32
srli a5,a5,32
.loc 1 238 23
slli a5,a5,2
ld a4,-72(s0)
add a5,a4,a5
lw a5,0(a5)
j .L35
.L36:
.LBE4:
.LBB5:
.loc 1 241 13
lw a5,-92(s0)
addiw a5,a5,-140
sw a5,-92(s0)
.loc 1 242 18
lw a5,-92(s0)
srliw a5,a5,7
sw a5,-20(s0)
.loc 1 243 18
lw a5,-92(s0)
andi a5,a5,127
sw a5,-24(s0)
.loc 1 245 18
ld a5,-88(s0)
lw a5,64(a5)
sw a5,-28(s0)
.loc 1 246 12
lw a5,-28(s0)
sext.w a5,a5
bne a5,zero,.L38
.loc 1 246 40 discriminator 1
li a5,0
.loc 1 246 40 is_stmt 0
j .L35
.L38:
.loc 1 248 9 is_stmt 1
lw a5,-28(s0)
lla a1,block_buf
mv a0,a5
call read_block
.loc 1 249 19
lla a5,block_buf
sd a5,-40(s0)
.loc 1 250 43
lwu a5,-20(s0)
slli a5,a5,2
ld a4,-40(s0)
add a5,a4,a5
.loc 1 250 18
lw a5,0(a5)
sw a5,-44(s0)
.loc 1 251 12
lw a5,-44(s0)
sext.w a5,a5
bne a5,zero,.L39
.loc 1 251 36 discriminator 1
li a5,0
.loc 1 251 36 is_stmt 0
j .L35
.L39:
.loc 1 254 9 is_stmt 1
lw a5,-44(s0)
lla a1,block_buf
mv a0,a5
call read_block
.loc 1 255 19
lla a5,block_buf
sd a5,-56(s0)
.loc 1 256 30
lwu a5,-24(s0)
slli a5,a5,2
ld a4,-56(s0)
add a5,a4,a5
lw a5,0(a5)
.L35:
.LBE5:
.loc 1 258 1
mv a0,a5
ld ra,88(sp)
.cfi_restore 1
ld s0,80(sp)
.cfi_restore 8
.cfi_def_cfa 2, 96
addi sp,sp,96
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE8:
.size inode_blockno, .-inode_blockno
.align 1
.globl read_data
.type read_data, @function
read_data:
.LFB9:
.loc 1 260 81
.cfi_startproc
addi sp,sp,-80
.cfi_def_cfa_offset 80
sd ra,72(sp)
sd s0,64(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,80
.cfi_def_cfa 8, 0
sd a0,-56(s0)
mv a5,a1
sd a2,-72(s0)
mv a4,a3
sw a5,-60(s0)
mv a5,a4
sw a5,-64(s0)
.loc 1 261 14
lw a5,-64(s0)
sw a5,-20(s0)
.loc 1 262 14
lw a5,-60(s0)
sw a5,-24(s0)
.loc 1 263 14
ld a5,-72(s0)
sd a5,-32(s0)
.loc 1 265 11
j .L41
.L45:
.LBB6:
.loc 1 266 18
lw a5,-24(s0)
srliw a5,a5,9
sw a5,-40(s0)
.loc 1 267 18
lw a5,-24(s0)
andi a5,a5,511
sw a5,-44(s0)
.loc 1 268 18
li a5,512
lw a4,-44(s0)
subw a5,a5,a4
sw a5,-36(s0)
.loc 1 269 12
lw a5,-36(s0)
mv a4,a5
lw a5,-20(s0)
sext.w a4,a4
sext.w a5,a5
bleu a4,a5,.L42
.loc 1 269 37 discriminator 1
lw a5,-20(s0)
sw a5,-36(s0)
.L42:
.loc 1 271 29
lw a5,-40(s0)
mv a1,a5
ld a0,-56(s0)
call inode_blockno
mv a5,a0
sw a5,-48(s0)
.loc 1 272 12
lw a5,-48(s0)
sext.w a5,a5
bne a5,zero,.L43
.loc 1 273 13
lw a5,-36(s0)
mv a2,a5
li a1,0
ld a0,-32(s0)
call memset
j .L44
.L43:
.loc 1 275 13
lw a5,-48(s0)
lla a1,block_buf
mv a0,a5
call read_block
.loc 1 276 13
lwu a4,-44(s0)
lla a5,block_buf
add a5,a4,a5
lw a4,-36(s0)
mv a2,a4
mv a1,a5
ld a0,-32(s0)
call memcpy
.L44:
.loc 1 278 14
lw a5,-20(s0)
mv a4,a5
lw a5,-36(s0)
subw a5,a4,a5
sw a5,-20(s0)
.loc 1 279 14
lw a5,-24(s0)
mv a4,a5
lw a5,-36(s0)
addw a5,a4,a5
sw a5,-24(s0)
.loc 1 280 14
lwu a5,-36(s0)
ld a4,-32(s0)
add a5,a4,a5
sd a5,-32(s0)
.L41:
.LBE6:
.loc 1 265 17
lw a5,-20(s0)
sext.w a5,a5
bne a5,zero,.L45
.loc 1 282 1
nop
nop
ld ra,72(sp)
.cfi_restore 1
ld s0,64(sp)
.cfi_restore 8
.cfi_def_cfa 2, 80
addi sp,sp,80
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE9:
.size read_data, .-read_data
.align 1
.globl dir_lookup
.type dir_lookup, @function
dir_lookup:
.LFB10:
.loc 1 284 62
.cfi_startproc
addi sp,sp,-80
.cfi_def_cfa_offset 80
sd ra,72(sp)
sd s0,64(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,80
.cfi_def_cfa 8, 0
sd a0,-72(s0)
sd a1,-80(s0)
.loc 1 285 14
sw zero,-20(s0)
.loc 1 288 11
j .L47
.L53:
.LBB7:
.loc 1 289 9
lw a5,-20(s0)
li a3,512
lla a2,block_buf
mv a1,a5
ld a0,-72(s0)
call read_data
.loc 1 290 24
lla a5,block_buf
sd a5,-32(s0)
.loc 1 291 13
li a5,32
sw a5,-36(s0)
.LBB8:
.loc 1 292 18
sw zero,-24(s0)
.loc 1 292 9
j .L48
.L52:
.loc 1 293 19
lw a5,-24(s0)
slli a5,a5,4
ld a4,-32(s0)
add a5,a4,a5
.loc 1 293 22
lhu a5,0(a5)
.loc 1 293 16
beq a5,zero,.L55
.loc 1 294 31
lw a5,-24(s0)
slli a5,a5,4
ld a4,-32(s0)
add a5,a4,a5
.loc 1 294 34
addi a4,a5,2
.loc 1 294 13
addi a5,s0,-56
li a2,14
mv a1,a4
mv a0,a5
call memcpy
.loc 1 295 29
sb zero,-42(s0)
.loc 1 296 17
addi a5,s0,-56
li a2,14
ld a1,-80(s0)
mv a0,a5
call strncmp
mv a5,a0
.loc 1 296 16 discriminator 1
bne a5,zero,.L50
.loc 1 297 26
lw a5,-24(s0)
slli a5,a5,4
ld a4,-32(s0)
add a5,a4,a5
.loc 1 297 29
lhu a5,0(a5)
sext.w a5,a5
j .L54
.L55:
.loc 1 293 34
nop
.L50:
.loc 1 292 39 discriminator 2
lw a5,-24(s0)
addiw a5,a5,1
sw a5,-24(s0)
.L48:
.loc 1 292 27 discriminator 1
lw a5,-24(s0)
mv a4,a5
lw a5,-36(s0)
sext.w a4,a4
sext.w a5,a5
blt a4,a5,.L52
.LBE8:
.loc 1 300 13
lw a5,-20(s0)
addiw a5,a5,512
sw a5,-20(s0)
.L47:
.LBE7:
.loc 1 288 24
ld a5,-72(s0)
lw a5,8(a5)
.loc 1 288 16
lw a4,-20(s0)
sext.w a4,a4
bltu a4,a5,.L53
.loc 1 302 12
li a5,0
.L54:
.loc 1 303 1
mv a0,a5
ld ra,72(sp)
.cfi_restore 1
ld s0,64(sp)
.cfi_restore 8
.cfi_def_cfa 2, 80
addi sp,sp,80
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE10:
.size dir_lookup, .-dir_lookup
.section .rodata
.align 3
.LC5:
.string "/"
.text
.align 1
.globl path_lookup
.type path_lookup, @function
path_lookup:
.LFB11:
.loc 1 305 40
.cfi_startproc
addi sp,sp,-224
.cfi_def_cfa_offset 224
sd ra,216(sp)
sd s0,208(sp)
sd s1,200(sp)
sd s2,192(sp)
sd s3,184(sp)
sd s4,176(sp)
sd s5,168(sp)
sd s6,160(sp)
sd s7,152(sp)
sd s8,144(sp)
sd s9,136(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
.cfi_offset 9, -24
.cfi_offset 18, -32
.cfi_offset 19, -40
.cfi_offset 20, -48
.cfi_offset 21, -56
.cfi_offset 22, -64
.cfi_offset 23, -72
.cfi_offset 24, -80
.cfi_offset 25, -88
addi s0,sp,224
.cfi_def_cfa 8, 0
sd a0,-216(s0)
.loc 1 305 40
mv a5,sp
mv s1,a5
.loc 1 306 13
ld a5,-216(s0)
lbu a5,0(a5)
.loc 1 306 8
mv a4,a5
li a5,47
beq a4,a5,.L57
.loc 1 306 32 discriminator 1
li a5,0
.loc 1 306 32 is_stmt 0
j .L58
.L57:
.loc 1 307 9 is_stmt 1
lla a1,.LC5
ld a0,-216(s0)
call strcmp
mv a5,a0
.loc 1 307 8 discriminator 1
bne a5,zero,.L59
.loc 1 308 16
li a5,1
j .L58
.L59:
.loc 1 312 5
addi a5,s0,-200
mv a1,a5
li a0,1
call read_inode
.loc 1 313 12
lhu a5,-200(s0)
.loc 1 313 8
sext.w a4,a5
li a5,1
beq a4,a5,.L60
.loc 1 313 35 discriminator 1
li a5,0
.loc 1 313 35 is_stmt 0
j .L58
.L60:
.loc 1 315 15 is_stmt 1
ld a0,-216(s0)
call strlen
mv a5,a0
.loc 1 315 27 discriminator 1
addiw a5,a5,1
sext.w a5,a5
mv a4,a5
.loc 1 315 10 discriminator 1
addi a4,a4,-1
sd a4,-120(s0)
mv a4,a5
mv s8,a4
li s9,0
srli a4,s8,61
slli s5,s9,3
or s5,a4,s5
slli s4,s8,3
mv a4,a5
mv s6,a4
li s7,0
srli a4,s6,61
slli s3,s7,3
or s3,a4,s3
slli s2,s6,3
addi a5,a5,15
srli a5,a5,4
slli a5,a5,4
sub sp,sp,a5
mv a5,sp
sd a5,-128(s0)
.loc 1 316 25
ld a0,-216(s0)
call strlen
mv a5,a0
.loc 1 316 5 discriminator 1
addiw a5,a5,1
sext.w a5,a5
mv a2,a5
ld a1,-216(s0)
ld a0,-128(s0)
call strncpy
.loc 1 317 19
lla a1,.LC5
ld a0,-128(s0)
call strtok
sd a0,-104(s0)
.loc 1 318 14
li a5,1
sw a5,-108(s0)
.loc 1 320 11
j .L61
.L64:
.LBB9:
.loc 1 321 16
lhu a5,-200(s0)
.loc 1 321 12
sext.w a4,a5
li a5,1
beq a4,a5,.L62
.loc 1 321 39 discriminator 1
li a5,0
.loc 1 321 39 is_stmt 0
j .L58
.L62:
.loc 1 322 30 is_stmt 1
addi a5,s0,-200
ld a1,-104(s0)
mv a0,a5
call dir_lookup
mv a5,a0
sw a5,-132(s0)
.loc 1 323 12
lw a5,-132(s0)
sext.w a5,a5
bne a5,zero,.L63
.loc 1 323 36 discriminator 1
li a5,0
.loc 1 323 36 is_stmt 0
j .L58
.L63:
.loc 1 324 9 is_stmt 1
addi a4,s0,-200
lw a5,-132(s0)
mv a1,a4
mv a0,a5
call read_inode
.loc 1 325 22
lw a5,-132(s0)
sw a5,-108(s0)
.loc 1 326 17
lla a1,.LC5
li a0,0
call strtok
sd a0,-104(s0)
.L61:
.LBE9:
.loc 1 320 18
ld a5,-104(s0)
bne a5,zero,.L64
.loc 1 328 12
lw a5,-108(s0)
.L58:
mv sp,s1
.loc 1 329 1
mv a0,a5
addi sp,s0,-224
.cfi_def_cfa 2, 224
ld ra,216(sp)
.cfi_restore 1
ld s0,208(sp)
.cfi_restore 8
ld s1,200(sp)
.cfi_restore 9
ld s2,192(sp)
.cfi_restore 18
ld s3,184(sp)
.cfi_restore 19
ld s4,176(sp)
.cfi_restore 20
ld s5,168(sp)
.cfi_restore 21
ld s6,160(sp)
.cfi_restore 22
ld s7,152(sp)
.cfi_restore 23
ld s8,144(sp)
.cfi_restore 24
ld s9,136(sp)
.cfi_restore 25
addi sp,sp,224
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE11:
.size path_lookup, .-path_lookup
.section .rodata
.align 3
.LC6:
.string "== i-node %u ==\n"
.align 3
.LC7:
.string " type = %u\n"
.align 3
.LC8:
.string " size = %u\n"
.align 3
.LC9:
.string " addrs ="
.align 3
.LC10:
.string " %u"
.align 3
.LC11:
.string " [indirect %u]"
.align 3
.LC12:
.string " [double-indirect %u]"
.align 3
.LC13:
.string "\n"
.text
.align 1
.globl dump_inode
.type dump_inode, @function
dump_inode:
.LFB12:
.loc 1 331 32
.cfi_startproc
addi sp,sp,-112
.cfi_def_cfa_offset 112
sd ra,104(sp)
sd s0,96(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,112
.cfi_def_cfa 8, 0
mv a5,a0
sw a5,-100(s0)
.loc 1 333 5
addi a4,s0,-88
lw a5,-100(s0)
mv a1,a4
mv a0,a5
call read_inode
.loc 1 334 5
lw a5,-100(s0)
mv a1,a5
lla a0,.LC6
call printf
.loc 1 335 32
lhu a5,-88(s0)
.loc 1 335 5
sext.w a5,a5
mv a1,a5
lla a0,.LC7
call printf
.loc 1 336 5
lw a5,-80(s0)
mv a1,a5
lla a0,.LC8
call printf
.loc 1 337 5
lla a0,.LC9
call printf
.LBB10:
.loc 1 338 14
sw zero,-20(s0)
.loc 1 338 5
j .L67
.L69:
.loc 1 339 21
lw a4,-20(s0)
addi a5,s0,-76
slli a4,a4,2
add a5,a4,a5
lw a5,0(a5)
.loc 1 339 12
beq a5,zero,.L68
.loc 1 339 31 discriminator 1
lw a4,-20(s0)
addi a5,s0,-76
slli a4,a4,2
add a5,a4,a5
lw a5,0(a5)
mv a1,a5
lla a0,.LC10
call printf
.L68:
.loc 1 338 35 discriminator 2
lw a5,-20(s0)
addiw a5,a5,1
sw a5,-20(s0)
.L67:
.loc 1 338 23 discriminator 1
lw a5,-20(s0)
sext.w a4,a5
li a5,11
ble a4,a5,.L69
.LBE10:
.loc 1 341 17
lw a5,-28(s0)
.loc 1 341 8
beq a5,zero,.L70
.loc 1 342 9
lw a5,-28(s0)
mv a1,a5
lla a0,.LC11
call printf
.L70:
.loc 1 344 17
lw a5,-24(s0)
.loc 1 344 8
beq a5,zero,.L71
.loc 1 345 9
lw a5,-24(s0)
mv a1,a5
lla a0,.LC12
call printf
.L71:
.loc 1 347 5
lla a0,.LC13
call printf
.loc 1 348 1
nop
ld ra,104(sp)
.cfi_restore 1
ld s0,96(sp)
.cfi_restore 8
.cfi_def_cfa 2, 112
addi sp,sp,112
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE12:
.size dump_inode, .-dump_inode
.align 1
.type owner_cap, @function
owner_cap:
.LFB13:
.loc 1 355 38
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
sd a0,-24(s0)
.loc 1 356 12
li a5,128
.loc 1 357 1
mv a0,a5
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE13:
.size owner_cap, .-owner_cap
.align 1
.type owner_add, @function
owner_add:
.LFB14:
.loc 1 358 55
.cfi_startproc
addi sp,sp,-48
.cfi_def_cfa_offset 48
sd ra,40(sp)
sd s0,32(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,48
.cfi_def_cfa 8, 0
sd a0,-40(s0)
sd a1,-48(s0)
.loc 1 359 15
ld a0,-40(s0)
call owner_cap
mv a5,a0
sw a5,-28(s0)
.LBB11:
.loc 1 361 14
sw zero,-20(s0)
.loc 1 361 5
j .L75
.L78:
.loc 1 361 57 discriminator 5
ld a4,-40(s0)
lw a5,-20(s0)
addi a5,a5,14
slli a5,a5,3
add a5,a4,a5
ld a5,8(a5)
.loc 1 361 38 discriminator 5
ld a4,-48(s0)
beq a4,a5,.L83
.loc 1 361 31 discriminator 3
lw a5,-20(s0)
addiw a5,a5,1
sw a5,-20(s0)
.L75:
.loc 1 361 23 discriminator 4
lw a5,-20(s0)
mv a4,a5
lw a5,-28(s0)
sext.w a4,a4
sext.w a5,a5
blt a4,a5,.L78
.LBE11:
.LBB12:
.loc 1 363 14
sw zero,-24(s0)
.loc 1 363 5
j .L79
.L82:
.loc 1 364 31
ld a4,-40(s0)
lw a5,-24(s0)
addi a5,a5,14
slli a5,a5,3
add a5,a4,a5
ld a5,8(a5)
.loc 1 364 12
bne a5,zero,.L80
.loc 1 365 35
ld a4,-40(s0)
lw a5,-24(s0)
addi a5,a5,14
slli a5,a5,3
add a5,a4,a5
ld a4,-48(s0)
sd a4,8(a5)
.loc 1 366 18
ld a5,-40(s0)
lw a5,1144(a5)
.loc 1 366 16
lw a4,-28(s0)
sext.w a4,a4
ble a4,a5,.L84
.loc 1 366 46 discriminator 1
ld a5,-40(s0)
lw a5,1144(a5)
.loc 1 366 65 discriminator 1
addiw a5,a5,1
sext.w a4,a5
ld a5,-40(s0)
sw a4,1144(a5)
.loc 1 367 13
j .L84
.L80:
.loc 1 363 31 discriminator 2
lw a5,-24(s0)
addiw a5,a5,1
sw a5,-24(s0)
.L79:
.loc 1 363 23 discriminator 1
lw a5,-24(s0)
mv a4,a5
lw a5,-28(s0)
sext.w a4,a4
sext.w a5,a5
blt a4,a5,.L82
j .L74
.L83:
.LBE12:
.LBB13:
.loc 1 361 67
nop
j .L74
.L84:
.LBE13:
.LBB14:
.loc 1 367 13
nop
.L74:
.LBE14:
.loc 1 371 1
ld ra,40(sp)
.cfi_restore 1
ld s0,32(sp)
.cfi_restore 8
.cfi_def_cfa 2, 48
addi sp,sp,48
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE14:
.size owner_add, .-owner_add
.align 1
.type owner_remove, @function
owner_remove:
.LFB15:
.loc 1 372 58
.cfi_startproc
addi sp,sp,-48
.cfi_def_cfa_offset 48
sd ra,40(sp)
sd s0,32(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,48
.cfi_def_cfa 8, 0
sd a0,-40(s0)
sd a1,-48(s0)
.loc 1 373 15
ld a0,-40(s0)
call owner_cap
mv a5,a0
sw a5,-28(s0)
.loc 1 373 29 discriminator 1
sw zero,-20(s0)
.LBB15:
.loc 1 374 14
sw zero,-24(s0)
.loc 1 374 5
j .L86
.L89:
.loc 1 375 31
ld a4,-40(s0)
lw a5,-24(s0)
addi a5,a5,14
slli a5,a5,3
add a5,a4,a5
ld a5,8(a5)
.loc 1 375 12
ld a4,-48(s0)
bne a4,a5,.L87
.loc 1 375 63 discriminator 1
ld a4,-40(s0)
lw a5,-24(s0)
addi a5,a5,14
slli a5,a5,3
add a5,a4,a5
sd zero,8(a5)
.L87:
.loc 1 376 31
ld a4,-40(s0)
lw a5,-24(s0)
addi a5,a5,14
slli a5,a5,3
add a5,a4,a5
ld a5,8(a5)
.loc 1 376 12
beq a5,zero,.L88
.loc 1 376 37 discriminator 1
lw a5,-20(s0)
addiw a5,a5,1
sw a5,-20(s0)
.L88:
.loc 1 374 31 discriminator 2
lw a5,-24(s0)
addiw a5,a5,1
sw a5,-24(s0)
.L86:
.loc 1 374 23 discriminator 1
lw a5,-24(s0)
mv a4,a5
lw a5,-28(s0)
sext.w a4,a4
sext.w a5,a5
blt a4,a5,.L89
.LBE15:
.loc 1 378 26
ld a5,-40(s0)
lw a4,-20(s0)
sw a4,1144(a5)
.loc 1 379 1
nop
ld ra,40(sp)
.cfi_restore 1
ld s0,32(sp)
.cfi_restore 8
.cfi_def_cfa 2, 48
addi sp,sp,48
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE15:
.size owner_remove, .-owner_remove
.globl gPipes
.bss
.align 3
.type gPipes, @object
.size gPipes, 13056
gPipes:
.zero 13056
.globl gFiles
.align 3
.type gFiles, @object
.size gFiles, 18432
gFiles:
.zero 18432
.globl gFreePipes
.section .sbss,"aw",@nobits
.align 3
.type gFreePipes, @object
.size gFreePipes, 8
gFreePipes:
.zero 8
.globl gFreeFiles
.align 3
.type gFreeFiles, @object
.size gFreeFiles, 8
gFreeFiles:
.zero 8
.text
.align 1
.globl file_system_init
.type file_system_init, @function
file_system_init:
.LFB16:
.loc 1 393 1
.cfi_startproc
addi sp,sp,-48
.cfi_def_cfa_offset 48
sd ra,40(sp)
sd s0,32(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,48
.cfi_def_cfa 8, 0
.loc 1 394 5
li a5,12288
addi a2,a5,768
li a1,0
lla a0,gPipes
call memset
.loc 1 395 5
li a5,20480
addi a2,a5,-2048
li a1,0
lla a0,gFiles
call memset
.LBB16:
.loc 1 397 13
sw zero,-20(s0)
.loc 1 397 5
j .L91
.L92:
.LBB17:
.loc 1 398 23
lw a4,-20(s0)
li a5,816
mul a4,a4,a5
lla a5,gPipes
add a5,a4,a5
sd a5,-40(s0)
.loc 1 400 22
lla a5,gFreePipes
ld a4,0(a5)
ld a5,-40(s0)
sd a4,808(a5)
.loc 1 401 20
lla a5,gFreePipes
ld a4,-40(s0)
sd a4,0(a5)
.LBE17:
.loc 1 397 37 discriminator 3
lw a5,-20(s0)
addiw a5,a5,1
sw a5,-20(s0)
.L91:
.loc 1 397 19 discriminator 1
lw a5,-20(s0)
sext.w a4,a5
li a5,15
ble a4,a5,.L92
.LBE16:
.LBB18:
.loc 1 404 13
sw zero,-24(s0)
.loc 1 404 5
j .L93
.L94:
.LBB19:
.loc 1 405 22
lw a4,-24(s0)
mv a5,a4
slli a5,a5,3
add a5,a5,a4
slli a5,a5,7
lla a4,gFiles
add a5,a5,a4
sd a5,-32(s0)
.loc 1 407 22
lla a5,gFreeFiles
ld a4,0(a5)
ld a5,-32(s0)
sd a4,112(a5)
.loc 1 408 20
lla a5,gFreeFiles
ld a4,-32(s0)
sd a4,0(a5)
.LBE19:
.loc 1 404 37 discriminator 3
lw a5,-24(s0)
addiw a5,a5,1
sw a5,-24(s0)
.L93:
.loc 1 404 19 discriminator 1
lw a5,-24(s0)
sext.w a4,a5
li a5,15
ble a4,a5,.L94
.LBE18:
.loc 1 410 1
nop
nop
ld ra,40(sp)
.cfi_restore 1
ld s0,32(sp)
.cfi_restore 8
.cfi_def_cfa 2, 48
addi sp,sp,48
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE16:
.size file_system_init, .-file_system_init
.align 1
.globl alloc_pipe
.type alloc_pipe, @function
alloc_pipe:
.LFB17:
.loc 1 413 1
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
.loc 1 414 19
lla a5,gFreePipes
ld a5,0(a5)
sd a5,-24(s0)
.loc 1 416 28
lla a5,gFreePipes
ld a5,0(a5)
ld a4,808(a5)
.loc 1 416 16
lla a5,gFreePipes
sd a4,0(a5)
.loc 1 418 5
li a2,816
li a1,0
ld a0,-24(s0)
call memset
.loc 1 420 12
ld a5,-24(s0)
.loc 1 421 1
mv a0,a5
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE17:
.size alloc_pipe, .-alloc_pipe
.align 1
.globl free_pipe
.type free_pipe, @function
free_pipe:
.LFB18:
.loc 1 424 1
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
sd a0,-24(s0)
.loc 1 425 18
lla a5,gFreePipes
ld a4,0(a5)
ld a5,-24(s0)
sd a4,808(a5)
.loc 1 426 16
lla a5,gFreePipes
ld a4,-24(s0)
sd a4,0(a5)
.loc 1 427 1
nop
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE18:
.size free_pipe, .-free_pipe
.align 1
.globl alloc_file
.type alloc_file, @function
alloc_file:
.LFB19:
.loc 1 429 27
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
.loc 1 430 9
lla a5,gFreeFiles
ld a5,0(a5)
.loc 1 430 8
bne a5,zero,.L99
.loc 1 430 29 discriminator 1
li a5,0
.loc 1 430 29 is_stmt 0
j .L100
.L99:
.loc 1 431 18 is_stmt 1
lla a5,gFreeFiles
ld a5,0(a5)
sd a5,-24(s0)
.loc 1 432 28
lla a5,gFreeFiles
ld a5,0(a5)
ld a4,112(a5)
.loc 1 432 16
lla a5,gFreeFiles
sd a4,0(a5)
.loc 1 433 5
li a2,1152
li a1,0
ld a0,-24(s0)
call memset
.loc 1 434 12
ld a5,-24(s0)
.L100:
.loc 1 435 1
mv a0,a5
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE19:
.size alloc_file, .-alloc_file
.align 1
.globl free_file
.type free_file, @function
free_file:
.LFB20:
.loc 1 439 1
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
sd a0,-24(s0)
.loc 1 440 18
lla a5,gFreeFiles
ld a4,0(a5)
ld a5,-24(s0)
sd a4,112(a5)
.loc 1 441 16
lla a5,gFreeFiles
ld a4,-24(s0)
sd a4,0(a5)
.loc 1 442 1
nop
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE20:
.size free_file, .-free_file
.align 1
.globl pipealloc
.type pipealloc, @function
pipealloc:
.LFB21:
.loc 1 448 1
.cfi_startproc
addi sp,sp,-48
.cfi_def_cfa_offset 48
sd ra,40(sp)
sd s0,32(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,48
.cfi_def_cfa 8, 0
.loc 1 449 23
call alloc_pipe
sd a0,-24(s0)
.loc 1 451 5
li a2,816
li a1,0
ld a0,-24(s0)
call memset
.loc 1 452 8
ld a5,-24(s0)
bne a5,zero,.L103
.loc 1 453 16
li a5,0
j .L104
.L103:
.loc 1 454 18
ld a5,-24(s0)
sw zero,512(a5)
.loc 1 455 18
ld a5,-24(s0)
sw zero,516(a5)
.loc 1 456 19
ld a5,-24(s0)
li a4,1
sw a4,520(a5)
.loc 1 457 19
ld a5,-24(s0)
li a4,1
sw a4,524(a5)
.loc 1 458 13
ld a5,-24(s0)
sw zero,536(a5)
.loc 1 459 24
ld a5,-24(s0)
sw zero,800(a5)
.loc 1 461 11
ld a5,-24(s0)
sd a5,-32(s0)
.loc 1 462 11
ld a5,-24(s0)
sd a5,-40(s0)
.loc 1 464 12
ld a5,-24(s0)
.L104:
.loc 1 465 1
mv a0,a5
ld ra,40(sp)
.cfi_restore 1
ld s0,32(sp)
.cfi_restore 8
.cfi_def_cfa 2, 48
addi sp,sp,48
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE21:
.size pipealloc, .-pipealloc
.align 1
.globl new_file_table
.type new_file_table, @function
new_file_table:
.LFB22:
.loc 1 470 1
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
.loc 1 471 27
call alloc_file
sd a0,-24(s0)
.loc 1 473 5
li a2,1152
li a1,0
ld a0,-24(s0)
call memset
.loc 1 475 5
lla a5,gActiveProc
lw a5,0(a5)
lla a4,gProc
slli a5,a5,3
add a5,a4,a5
ld a5,0(a5)
mv a1,a5
ld a0,-24(s0)
call owner_add
.loc 1 477 12
ld a5,-24(s0)
.loc 1 478 1
mv a0,a5
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE22:
.size new_file_table, .-new_file_table
.align 1
.globl fs_init
.type fs_init, @function
fs_init:
.LFB23:
.loc 1 481 1
.cfi_startproc
addi sp,sp,-48
.cfi_def_cfa_offset 48
sd ra,40(sp)
sd s0,32(sp)
sd s1,24(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
.cfi_offset 9, -24
addi s0,sp,48
.cfi_def_cfa 8, 0
sd a0,-40(s0)
.loc 1 482 5
li a2,128
li a1,0
ld a0,-40(s0)
call memset
.loc 1 484 21
call new_file_table
mv a4,a0
.loc 1 484 19 discriminator 1
ld a5,-40(s0)
sd a4,0(a5)
.loc 1 486 15
ld a5,-40(s0)
ld a5,0(a5)
.loc 1 486 25
sw zero,0(a5)
.loc 1 487 15
ld a5,-40(s0)
ld a5,0(a5)
.loc 1 487 25
li a4,1
sw a4,80(a5)
.loc 1 489 15
ld a5,-40(s0)
addi s1,a5,8
.loc 1 489 21
call new_file_table
mv a5,a0
.loc 1 489 19 discriminator 1
sd a5,0(s1)
.loc 1 491 15
ld a5,-40(s0)
addi a5,a5,8
ld a5,0(a5)
.loc 1 491 25
li a4,1
sw a4,0(a5)
.loc 1 492 15
ld a5,-40(s0)
addi a5,a5,8
ld a5,0(a5)
.loc 1 492 25
li a4,1
sw a4,80(a5)
.loc 1 494 15
ld a5,-40(s0)
addi s1,a5,16
.loc 1 494 21
call new_file_table
mv a5,a0
.loc 1 494 19 discriminator 1
sd a5,0(s1)
.loc 1 496 15
ld a5,-40(s0)
addi a5,a5,16
ld a5,0(a5)
.loc 1 496 25
li a4,2
sw a4,0(a5)
.loc 1 497 15
ld a5,-40(s0)
addi a5,a5,16
ld a5,0(a5)
.loc 1 497 25
li a4,1
sw a4,80(a5)
.loc 1 498 1
nop
ld ra,40(sp)
.cfi_restore 1
ld s0,32(sp)
.cfi_restore 8
.cfi_def_cfa 2, 48
ld s1,24(sp)
.cfi_restore 9
addi sp,sp,48
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE23:
.size fs_init, .-fs_init
.align 1
.globl is_pipe
.type is_pipe, @function
is_pipe:
.LFB24:
.loc 1 501 1
.cfi_startproc
addi sp,sp,-48
.cfi_def_cfa_offset 48
sd ra,40(sp)
sd s0,32(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,48
.cfi_def_cfa 8, 0
mv a5,a0
sw a5,-36(s0)
.loc 1 502 32
call get_current_file_table
sd a0,-24(s0)
.loc 1 504 19
lw a5,-36(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 504 8
beq a5,zero,.L109
.loc 1 504 37 discriminator 1
lw a5,-36(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 504 41 discriminator 1
lw a5,80(a5)
.loc 1 504 24 discriminator 1
beq a5,zero,.L109
.loc 1 505 22
lw a5,-36(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 505 26
ld a5,88(a5)
.loc 1 505 11
beq a5,zero,.L109
.loc 1 506 20
li a5,1
j .L110
.L109:
.loc 1 510 12
li a5,0
.L110:
.loc 1 511 1
mv a0,a5
ld ra,40(sp)
.cfi_restore 1
ld s0,32(sp)
.cfi_restore 8
.cfi_def_cfa 2, 48
addi sp,sp,48
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE24:
.size is_pipe, .-is_pipe
.align 1
.globl is_stdin
.type is_stdin, @function
is_stdin:
.LFB25:
.loc 1 514 1
.cfi_startproc
addi sp,sp,-48
.cfi_def_cfa_offset 48
sd ra,40(sp)
sd s0,32(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,48
.cfi_def_cfa 8, 0
mv a5,a0
sw a5,-36(s0)
.loc 1 515 32
call get_current_file_table
sd a0,-24(s0)
.loc 1 516 19
lw a5,-36(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 516 8
beq a5,zero,.L112
.loc 1 516 37 discriminator 1
lw a5,-36(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 516 41 discriminator 1
lw a5,80(a5)
.loc 1 516 24 discriminator 1
beq a5,zero,.L112
.loc 1 517 22
lw a5,-36(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 517 26
lw a5,0(a5)
.loc 1 517 11
bne a5,zero,.L112
.loc 1 518 20
li a5,1
j .L113
.L112:
.loc 1 522 12
li a5,0
.L113:
.loc 1 523 1
mv a0,a5
ld ra,40(sp)
.cfi_restore 1
ld s0,32(sp)
.cfi_restore 8
.cfi_def_cfa 2, 48
addi sp,sp,48
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE25:
.size is_stdin, .-is_stdin
.align 1
.globl is_stdout
.type is_stdout, @function
is_stdout:
.LFB26:
.loc 1 526 1
.cfi_startproc
addi sp,sp,-48
.cfi_def_cfa_offset 48
sd ra,40(sp)
sd s0,32(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,48
.cfi_def_cfa 8, 0
mv a5,a0
sw a5,-36(s0)
.loc 1 527 32
call get_current_file_table
sd a0,-24(s0)
.loc 1 529 19
lw a5,-36(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 529 8
beq a5,zero,.L115
.loc 1 529 37 discriminator 1
lw a5,-36(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 529 41 discriminator 1
lw a5,80(a5)
.loc 1 529 24 discriminator 1
beq a5,zero,.L115
.loc 1 530 22
lw a5,-36(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 530 26
lw a4,0(a5)
.loc 1 530 11
li a5,1
bne a4,a5,.L115
.loc 1 531 20
li a5,1
j .L116
.L115:
.loc 1 535 12
li a5,0
.L116:
.loc 1 536 1
mv a0,a5
ld ra,40(sp)
.cfi_restore 1
ld s0,32(sp)
.cfi_restore 8
.cfi_def_cfa 2, 48
addi sp,sp,48
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE26:
.size is_stdout, .-is_stdout
.section .rodata
.align 3
.LC14:
.string "PIPE LINKED MAX"
.text
.align 1
.globl pipe_open
.type pipe_open, @function
pipe_open:
.LFB27:
.loc 1 538 36
.cfi_startproc
addi sp,sp,-80
.cfi_def_cfa_offset 80
sd ra,72(sp)
sd s0,64(sp)
sd s1,56(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
.cfi_offset 9, -24
addi s0,sp,80
.cfi_def_cfa 8, 0
sd a0,-72(s0)
sd a1,-80(s0)
.loc 1 539 32
call get_current_file_table
sd a0,-48(s0)
.loc 1 541 25
call pipealloc
sd a0,-56(s0)
.LBB20:
.loc 1 543 14
li a5,3
sw a5,-36(s0)
.loc 1 543 5
j .L118
.L123:
.loc 1 544 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 544 12
bne a5,zero,.L119
.loc 1 545 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add s1,a4,a5
.loc 1 545 29
call new_file_table
mv a5,a0
.loc 1 545 27 discriminator 1
sd a5,0(s1)
.loc 1 547 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 547 33
li a4,4
sw a4,0(a5)
.loc 1 548 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 548 34
li a4,1
sw a4,80(a5)
.loc 1 549 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 549 34
li a4,-1
sw a4,4(a5)
.loc 1 550 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 550 34
sw zero,76(a5)
.loc 1 551 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 551 33
ld a4,-56(s0)
sd a4,88(a5)
.loc 1 552 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 552 38
li a4,1
sw a4,100(a5)
.loc 1 553 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 553 41
sw zero,104(a5)
.loc 1 554 18
ld a5,-72(s0)
lw a4,-36(s0)
sw a4,0(a5)
.loc 1 556 16
ld a5,-56(s0)
lw a5,536(a5)
.loc 1 556 22
addiw a5,a5,1
sext.w a4,a5
ld a5,-56(s0)
sw a4,536(a5)
.loc 1 557 66
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a4,a4,a5
.loc 1 557 33
ld a5,-56(s0)
lw a5,800(a5)
.loc 1 557 50
addiw a3,a5,1
sext.w a2,a3
ld a3,-56(s0)
sw a2,800(a3)
.loc 1 557 66
ld a4,0(a4)
.loc 1 557 54
ld a3,-56(s0)
addi a5,a5,68
slli a5,a5,3
add a5,a3,a5
sd a4,0(a5)
.loc 1 558 16
ld a5,-56(s0)
lw a5,528(a5)
.loc 1 558 25
addiw a5,a5,1
sext.w a4,a5
ld a5,-56(s0)
sw a4,528(a5)
.loc 1 560 19
ld a5,-56(s0)
lw a4,800(a5)
.loc 1 560 15
li a5,31
ble a4,a5,.L130
.loc 1 561 17
lla a0,.LC14
call puts
.L121:
.loc 1 562 22
j .L121
.L119:
.loc 1 543 42 discriminator 2
lw a5,-36(s0)
addiw a5,a5,1
sw a5,-36(s0)
.L118:
.loc 1 543 23 discriminator 1
lw a5,-36(s0)
sext.w a4,a5
li a5,15
ble a4,a5,.L123
j .L122
.L130:
.loc 1 566 13
nop
.L122:
.LBE20:
.LBB21:
.loc 1 569 14
li a5,3
sw a5,-40(s0)
.loc 1 569 5
j .L124
.L129:
.loc 1 570 23
lw a5,-40(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 570 12
bne a5,zero,.L125
.loc 1 571 23
lw a5,-40(s0)
slli a5,a5,3
ld a4,-48(s0)
add s1,a4,a5
.loc 1 571 29
call new_file_table
mv a5,a0
.loc 1 571 27 discriminator 1
sd a5,0(s1)
.loc 1 573 23
lw a5,-40(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 573 33
li a4,4
sw a4,0(a5)
.loc 1 574 23
lw a5,-40(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 574 34
li a4,1
sw a4,80(a5)
.loc 1 575 23
lw a5,-40(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 575 34
li a4,-1
sw a4,4(a5)
.loc 1 577 23
lw a5,-40(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 577 34
sw zero,76(a5)
.loc 1 578 23
lw a5,-40(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 578 33
ld a4,-56(s0)
sd a4,88(a5)
.loc 1 579 23
lw a5,-40(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 579 39
li a4,1
sw a4,104(a5)
.loc 1 580 23
lw a5,-40(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 580 38
sw zero,100(a5)
.loc 1 581 18
ld a5,-80(s0)
lw a4,-40(s0)
sw a4,0(a5)
.loc 1 583 16
ld a5,-56(s0)
lw a5,536(a5)
.loc 1 583 22
addiw a5,a5,1
sext.w a4,a5
ld a5,-56(s0)
sw a4,536(a5)
.loc 1 584 66
lw a5,-40(s0)
slli a5,a5,3
ld a4,-48(s0)
add a4,a4,a5
.loc 1 584 33
ld a5,-56(s0)
lw a5,800(a5)
.loc 1 584 50
addiw a3,a5,1
sext.w a2,a3
ld a3,-56(s0)
sw a2,800(a3)
.loc 1 584 66
ld a4,0(a4)
.loc 1 584 54
ld a3,-56(s0)
addi a5,a5,68
slli a5,a5,3
add a5,a3,a5
sd a4,0(a5)
.loc 1 585 16
ld a5,-56(s0)
lw a5,532(a5)
.loc 1 585 25
addiw a5,a5,1
sext.w a4,a5
ld a5,-56(s0)
sw a4,532(a5)
.loc 1 587 19
ld a5,-56(s0)
lw a4,800(a5)
.loc 1 587 15
li a5,31
ble a4,a5,.L131
.loc 1 588 17
lla a0,.LC14
call puts
.L127:
.loc 1 589 22
j .L127
.L125:
.loc 1 569 42 discriminator 2
lw a5,-40(s0)
addiw a5,a5,1
sw a5,-40(s0)
.L124:
.loc 1 569 23 discriminator 1
lw a5,-40(s0)
sext.w a4,a5
li a5,15
ble a4,a5,.L129
.LBE21:
.loc 1 596 1
j .L132
.L131:
.LBB22:
.loc 1 593 13
nop
.L132:
.LBE22:
.loc 1 596 1
nop
ld ra,72(sp)
.cfi_restore 1
ld s0,64(sp)
.cfi_restore 8
.cfi_def_cfa 2, 80
ld s1,56(sp)
.cfi_restore 9
addi sp,sp,80
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE27:
.size pipe_open, .-pipe_open
.section .rodata
.align 3
.LC15:
.string "piperead"
.text
.align 1
.globl piperead
.type piperead, @function
piperead:
.LFB28:
.loc 1 606 1
.cfi_startproc
addi sp,sp,-64
.cfi_def_cfa_offset 64
sd ra,56(sp)
sd s0,48(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,64
.cfi_def_cfa 8, 0
mv a5,a0
sd a1,-64(s0)
mv a4,a2
sw a5,-52(s0)
mv a5,a4
sw a5,-56(s0)
.loc 1 607 32
call get_current_file_table
sd a0,-24(s0)
.loc 1 609 19
lw a5,-52(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 609 8
bne a5,zero,.L134
.loc 1 610 9
lla a0,.LC15
call panic
.L134:
.loc 1 613 7
lw a5,-52(s0)
sext.w a5,a5
blt a5,zero,.L135
.loc 1 613 15 discriminator 1
lw a5,-52(s0)
sext.w a4,a5
li a5,15
ble a4,a5,.L136
.L135:
.loc 1 614 9
lla a0,.LC15
call panic
.L136:
.loc 1 616 33
lw a5,-52(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 616 19
ld a5,88(a5)
sd a5,-32(s0)
.loc 1 618 7
ld a5,-32(s0)
bne a5,zero,.L138
.loc 1 619 9
lla a0,.LC15
call panic
.loc 1 622 11
j .L138
.L140:
.loc 1 623 8
call yield
.L138:
.loc 1 622 13
ld a5,-32(s0)
lw a4,512(a5)
.loc 1 622 25
ld a5,-32(s0)
lw a5,516(a5)
.loc 1 622 34
bne a4,a5,.L139
.loc 1 622 38 discriminator 1
ld a5,-32(s0)
lw a5,524(a5)
.loc 1 622 34 discriminator 1
bne a5,zero,.L140
.L139:
.loc 1 626 10
ld a5,-32(s0)
lw a4,512(a5)
.loc 1 626 22
ld a5,-32(s0)
lw a5,516(a5)
.loc 1 626 8
bne a4,a5,.L141
.loc 1 626 36 discriminator 1
ld a5,-32(s0)
lw a5,524(a5)
.loc 1 626 31 discriminator 1
bne a5,zero,.L141
.loc 1 627 16
li a5,0
j .L142
.L141:
.loc 1 631 18
sw zero,-36(s0)
.LBB23:
.loc 1 632 23
sw zero,-40(s0)
.loc 1 632 5
j .L143
.L146:
.loc 1 634 12
call yield
.L144:
.loc 1 633 17
ld a5,-32(s0)
lw a4,512(a5)
.loc 1 633 29
ld a5,-32(s0)
lw a5,516(a5)
.loc 1 633 38
bne a4,a5,.L145
.loc 1 633 42 discriminator 1
ld a5,-32(s0)
lw a5,524(a5)
.loc 1 633 38 discriminator 1
bne a5,zero,.L146
.L145:
.loc 1 638 14
ld a5,-32(s0)
lw a4,512(a5)
.loc 1 638 26
ld a5,-32(s0)
lw a5,516(a5)
.loc 1 638 12
bne a4,a5,.L147
.loc 1 638 40 discriminator 1
ld a5,-32(s0)
lw a5,524(a5)
.loc 1 638 35 discriminator 1
beq a5,zero,.L150
.L147:
.loc 1 642 28
ld a5,-32(s0)
lw a5,512(a5)
.loc 1 642 36
andi a5,a5,511
sext.w a4,a5
.loc 1 642 13
lw a5,-40(s0)
sext.w a5,a5
mv a3,a5
ld a5,-64(s0)
add a5,a5,a3
.loc 1 642 26
ld a3,-32(s0)
slli a4,a4,32
srli a4,a4,32
add a4,a3,a4
lbu a4,0(a4)
.loc 1 642 17
sb a4,0(a5)
.loc 1 643 10
ld a5,-32(s0)
lw a5,512(a5)
.loc 1 643 17
addiw a5,a5,1
sext.w a4,a5
ld a5,-32(s0)
sw a4,512(a5)
.loc 1 646 10
lw a5,-36(s0)
sext.w a5,a5
addiw a5,a5,1
sext.w a5,a5
sw a5,-36(s0)
.loc 1 632 62 discriminator 2
lw a5,-40(s0)
sext.w a5,a5
addiw a5,a5,1
sext.w a5,a5
sw a5,-40(s0)
.L143:
.loc 1 632 32 discriminator 1
lw a5,-40(s0)
sext.w a5,a5
.loc 1 632 36 discriminator 1
lw a4,-56(s0)
sext.w a4,a4
ble a4,a5,.L148
.loc 1 632 40 discriminator 3
ld a5,-32(s0)
lw a4,512(a5)
.loc 1 632 51 discriminator 3
ld a5,-32(s0)
lw a5,516(a5)
.loc 1 632 36 discriminator 3
bltu a4,a5,.L144
j .L148
.L150:
.loc 1 639 13
nop
.L148:
.LBE23:
.loc 1 650 12
lw a5,-36(s0)
sext.w a5,a5
.L142:
.loc 1 651 1
mv a0,a5
ld ra,56(sp)
.cfi_restore 1
ld s0,48(sp)
.cfi_restore 8
.cfi_def_cfa 2, 64
addi sp,sp,64
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE28:
.size piperead, .-piperead
.section .rodata
.align 3
.LC16:
.string "pipewrite"
.text
.align 1
.globl pipewrite
.type pipewrite, @function
pipewrite:
.LFB29:
.loc 1 656 1
.cfi_startproc
addi sp,sp,-64
.cfi_def_cfa_offset 64
sd ra,56(sp)
sd s0,48(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,64
.cfi_def_cfa 8, 0
mv a5,a0
sd a1,-64(s0)
mv a4,a2
sw a5,-52(s0)
mv a5,a4
sw a5,-56(s0)
.loc 1 657 32
call get_current_file_table
sd a0,-24(s0)
.loc 1 659 19
lw a5,-52(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 659 8
bne a5,zero,.L152
.loc 1 660 9
lla a0,.LC16
call panic
.L152:
.loc 1 663 7
lw a5,-52(s0)
sext.w a5,a5
blt a5,zero,.L153
.loc 1 663 15 discriminator 1
lw a5,-52(s0)
sext.w a4,a5
li a5,15
ble a4,a5,.L154
.L153:
.loc 1 664 9
lla a0,.LC16
call panic
.L154:
.loc 1 666 33
lw a5,-52(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 666 19
ld a5,88(a5)
sd a5,-32(s0)
.loc 1 668 7
ld a5,-32(s0)
bne a5,zero,.L155
.loc 1 669 9
lla a0,.LC16
call panic
.L155:
.LBB24:
.loc 1 672 23
sw zero,-36(s0)
.loc 1 672 5
j .L156
.L159:
.loc 1 675 11
call yield
.L157:
.loc 1 674 15
ld a5,-32(s0)
lw a4,516(a5)
.loc 1 674 27
ld a5,-32(s0)
lw a5,512(a5)
.loc 1 674 24
subw a5,a4,a5
sext.w a4,a5
.loc 1 674 48
li a5,512
bne a4,a5,.L158
.loc 1 674 52 discriminator 1
ld a5,-32(s0)
lw a5,520(a5)
.loc 1 674 48 discriminator 1
bne a5,zero,.L159
.L158:
.loc 1 678 13
ld a5,-32(s0)
lw a5,520(a5)
.loc 1 678 10
bne a5,zero,.L160
.loc 1 680 21
lw a5,-36(s0)
sext.w a5,a5
.loc 1 680 30
ble a5,zero,.L161
.loc 1 680 30 is_stmt 0 discriminator 1
lw a5,-36(s0)
sext.w a5,a5
.loc 1 680 30
j .L163
.L161:
.loc 1 680 30 discriminator 2
li a5,-1
.loc 1 680 30
j .L163
.L160:
.loc 1 682 44 is_stmt 1
lw a5,-36(s0)
sext.w a5,a5
mv a4,a5
ld a5,-64(s0)
add a5,a5,a4
.loc 1 682 16
ld a4,-32(s0)
lw a4,516(a4)
.loc 1 682 25
andi a4,a4,511
sext.w a2,a4
.loc 1 682 44
lbu a4,0(a5)
.loc 1 682 38
ld a3,-32(s0)
slli a5,a2,32
srli a5,a5,32
add a5,a3,a5
sb a4,0(a5)
.loc 1 683 8
ld a5,-32(s0)
lw a5,516(a5)
.loc 1 683 16
addiw a5,a5,1
sext.w a4,a5
ld a5,-32(s0)
sw a4,516(a5)
.loc 1 672 38 discriminator 2
lw a5,-36(s0)
sext.w a5,a5
addiw a5,a5,1
sext.w a5,a5
sw a5,-36(s0)
.L156:
.loc 1 672 32 discriminator 1
lw a5,-36(s0)
sext.w a5,a5
lw a4,-56(s0)
sext.w a4,a4
bgt a4,a5,.L157
.LBE24:
.loc 1 690 12
lw a5,-56(s0)
.L163:
.loc 1 691 1
mv a0,a5
ld ra,56(sp)
.cfi_restore 1
ld s0,48(sp)
.cfi_restore 8
.cfi_def_cfa 2, 64
addi sp,sp,64
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE29:
.size pipewrite, .-pipewrite
.align 1
.globl fs_open
.type fs_open, @function
fs_open:
.LFB30:
.loc 1 698 31
.cfi_startproc
addi sp,sp,-272
.cfi_def_cfa_offset 272
sd ra,264(sp)
sd s0,256(sp)
sd s1,248(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
.cfi_offset 9, -24
addi s0,sp,272
.cfi_def_cfa 8, 0
sd a0,-264(s0)
.loc 1 699 32
call get_current_file_table
sd a0,-48(s0)
.loc 1 703 13
ld a5,-264(s0)
lbu a5,0(a5)
.loc 1 703 8
mv a4,a5
li a5,47
bne a4,a5,.L166
.loc 1 705 9
addi a5,s0,-184
li a2,127
ld a1,-264(s0)
mv a0,a5
call strncpy
.loc 1 706 34
sb zero,-57(s0)
j .L167
.L166:
.loc 1 709 18
li a5,47
sb a5,-184(s0)
.loc 1 710 18
sb zero,-183(s0)
.loc 1 712 46
addi a5,s0,-184
mv a0,a5
call strlen
mv a5,a0
.loc 1 712 46 is_stmt 0 discriminator 1
mv a4,a5
.loc 1 712 9 is_stmt 1 discriminator 1
li a5,127
sub a4,a5,a4
addi a5,s0,-184
mv a2,a4
ld a1,-264(s0)
mv a0,a5
call strncat
.L167:
.loc 1 715 21
addi a5,s0,-184
mv a0,a5
call path_lookup
mv a5,a0
sw a5,-52(s0)
.loc 1 716 8
lw a5,-52(s0)
sext.w a5,a5
bne a5,zero,.L168
.loc 1 717 16
li a5,-1
j .L174
.L168:
.loc 1 721 5
addi a4,s0,-256
lw a5,-52(s0)
mv a1,a4
mv a0,a5
call read_inode
.loc 1 722 11
lhu a5,-256(s0)
.loc 1 722 8
sext.w a4,a5
li a5,2
beq a4,a5,.L170
.loc 1 722 32 discriminator 1
lhu a5,-256(s0)
.loc 1 722 27 discriminator 1
sext.w a4,a5
li a5,1
beq a4,a5,.L170
.loc 1 722 55 discriminator 2
li a5,-1
.loc 1 722 55 is_stmt 0
j .L174
.L170:
.LBB25:
.loc 1 724 14 is_stmt 1
li a5,3
sw a5,-36(s0)
.loc 1 724 5
j .L171
.L173:
.loc 1 725 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 725 12
bne a5,zero,.L172
.loc 1 726 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add s1,a4,a5
.loc 1 726 29
call new_file_table
mv a5,a0
.loc 1 726 27 discriminator 1
sd a5,0(s1)
.loc 1 728 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 728 33
li a4,3
sw a4,0(a5)
.loc 1 729 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 729 34
li a4,1
sw a4,80(a5)
.loc 1 730 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 730 34
lw a4,-52(s0)
sw a4,4(a5)
.loc 1 731 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 731 34
ld t1,-256(s0)
ld a7,-248(s0)
ld a6,-240(s0)
ld a0,-232(s0)
ld a1,-224(s0)
ld a2,-216(s0)
ld a3,-208(s0)
ld a4,-200(s0)
sd t1,8(a5)
sd a7,16(a5)
sd a6,24(a5)
sd a0,32(a5)
sd a1,40(a5)
sd a2,48(a5)
sd a3,56(a5)
sd a4,64(a5)
lw a4,-192(s0)
sw a4,72(a5)
.loc 1 732 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 732 34
sw zero,76(a5)
.loc 1 733 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 733 40
sw zero,100(a5)
.loc 1 734 23
lw a5,-36(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 734 41
sw zero,104(a5)
.loc 1 735 20
lw a5,-36(s0)
j .L174
.L172:
.loc 1 724 42 discriminator 2
lw a5,-36(s0)
addiw a5,a5,1
sw a5,-36(s0)
.L171:
.loc 1 724 23 discriminator 1
lw a5,-36(s0)
sext.w a4,a5
li a5,15
ble a4,a5,.L173
.LBE25:
.loc 1 738 12
li a5,-1
.L174:
.loc 1 739 1
mv a0,a5
ld ra,264(sp)
.cfi_restore 1
ld s0,256(sp)
.cfi_restore 8
.cfi_def_cfa 2, 272
ld s1,248(sp)
.cfi_restore 9
addi sp,sp,272
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE30:
.size fs_open, .-fs_open
.align 1
.globl fs_read
.type fs_read, @function
fs_read:
.LFB31:
.loc 1 743 50
.cfi_startproc
addi sp,sp,-80
.cfi_def_cfa_offset 80
sd ra,72(sp)
sd s0,64(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,80
.cfi_def_cfa 8, 0
mv a5,a0
sd a1,-64(s0)
sd a2,-72(s0)
sw a5,-52(s0)
.loc 1 744 32
call get_current_file_table
sd a0,-24(s0)
.loc 1 746 18
lw a5,-52(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 746 7
bne a5,zero,.L176
.loc 1 747 16
li a5,-1
j .L177
.L176:
.loc 1 750 9
lw a5,-52(s0)
sw a5,-28(s0)
.loc 1 751 8
lw a5,-28(s0)
sext.w a5,a5
blt a5,zero,.L178
.loc 1 751 17 discriminator 1
lw a5,-28(s0)
sext.w a4,a5
li a5,15
ble a4,a5,.L179
.L178:
.loc 1 752 16
li a5,-1
j .L177
.L179:
.loc 1 754 32
lw a5,-28(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
.loc 1 754 18
ld a5,0(a5)
sd a5,-40(s0)
.loc 1 755 32
ld a5,-40(s0)
lw a4,16(a5)
.loc 1 755 41
ld a5,-40(s0)
lw a5,76(a5)
.loc 1 755 14
subw a5,a4,a5
sw a5,-44(s0)
.loc 1 756 8
lw a5,-44(s0)
sext.w a5,a5
bne a5,zero,.L180
.loc 1 756 32 discriminator 1
li a5,0
.loc 1 756 32 is_stmt 0
j .L177
.L180:
.loc 1 758 52 is_stmt 1
lwu a4,-44(s0)
ld a5,-72(s0)
bleu a5,a4,.L181
mv a5,a4
.L181:
.loc 1 758 14
sw a5,-48(s0)
.loc 1 759 5
ld a5,-40(s0)
addi a4,a5,8
ld a5,-40(s0)
lw a5,76(a5)
lw a3,-48(s0)
ld a2,-64(s0)
mv a1,a5
mv a0,a4
call read_data
.loc 1 760 6
ld a5,-40(s0)
lw a5,76(a5)
.loc 1 760 12
lw a4,-48(s0)
addw a5,a4,a5
sext.w a4,a5
ld a5,-40(s0)
sw a4,76(a5)
.loc 1 761 12
lw a5,-48(s0)
.L177:
.loc 1 762 1
mv a0,a5
ld ra,72(sp)
.cfi_restore 1
ld s0,64(sp)
.cfi_restore 8
.cfi_def_cfa 2, 80
addi sp,sp,80
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE31:
.size fs_read, .-fs_read
.align 1
.globl fs_size
.type fs_size, @function
fs_size:
.LFB32:
.loc 1 764 25
.cfi_startproc
addi sp,sp,-64
.cfi_def_cfa_offset 64
sd ra,56(sp)
sd s0,48(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,64
.cfi_def_cfa 8, 0
mv a5,a0
sw a5,-52(s0)
.loc 1 765 32
call get_current_file_table
sd a0,-24(s0)
.loc 1 767 18
lw a5,-52(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 767 7
bne a5,zero,.L183
.loc 1 768 16
li a5,-1
j .L184
.L183:
.loc 1 771 9
lw a5,-52(s0)
sw a5,-28(s0)
.loc 1 772 8
lw a5,-28(s0)
sext.w a5,a5
blt a5,zero,.L185
.loc 1 772 17 discriminator 1
lw a5,-28(s0)
sext.w a4,a5
li a5,15
bgt a4,a5,.L185
.loc 1 772 56 discriminator 2
lw a5,-28(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 772 61 discriminator 2
lw a5,80(a5)
.loc 1 772 42 discriminator 2
bne a5,zero,.L186
.L185:
.loc 1 773 16
li a5,-1
j .L184
.L186:
.loc 1 775 32
lw a5,-28(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
.loc 1 775 18
ld a5,0(a5)
sd a5,-40(s0)
.loc 1 776 18
ld a5,-40(s0)
lw a5,16(a5)
.L184:
.loc 1 777 1
mv a0,a5
ld ra,56(sp)
.cfi_restore 1
ld s0,48(sp)
.cfi_restore 8
.cfi_def_cfa 2, 64
addi sp,sp,64
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE32:
.size fs_size, .-fs_size
.align 1
.globl fs_close
.type fs_close, @function
fs_close:
.LFB33:
.loc 1 782 36
.cfi_startproc
addi sp,sp,-64
.cfi_def_cfa_offset 64
sd ra,56(sp)
sd s0,48(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,64
.cfi_def_cfa 8, 0
sd a0,-56(s0)
mv a5,a1
sw a5,-60(s0)
.loc 1 783 24
call get_current_file_table
sd a0,-32(s0)
.loc 1 784 8
ld a5,-56(s0)
blt a5,zero,.L188
.loc 1 784 16 discriminator 2
ld a4,-56(s0)
li a5,15
ble a4,a5,.L189
.L188:
.loc 1 784 48 discriminator 3
li a5,-1
.loc 1 784 48 is_stmt 0
j .L190
.L189:
.loc 1 785 24 is_stmt 1
ld a5,-56(s0)
slli a5,a5,3
ld a4,-32(s0)
add a5,a4,a5
.loc 1 785 18
ld a5,0(a5)
sd a5,-40(s0)
.loc 1 786 8
ld a5,-40(s0)
bne a5,zero,.L191
.loc 1 786 20 discriminator 1
li a5,-1
.loc 1 786 20 is_stmt 0
j .L190
.L191:
.loc 1 789 7 is_stmt 1
ld a5,-56(s0)
slli a5,a5,3
ld a4,-32(s0)
add a5,a4,a5
.loc 1 789 12
sd zero,0(a5)
.loc 1 792 8
lw a5,-60(s0)
sext.w a5,a5
beq a5,zero,.L192
.loc 1 792 18 discriminator 1
lla a5,gActiveProc
lw a5,0(a5)
lla a4,gProc
slli a5,a5,3
add a5,a4,a5
ld a5,0(a5)
mv a1,a5
ld a0,-40(s0)
call owner_remove
.L192:
.loc 1 795 10
ld a5,-40(s0)
lw a5,80(a5)
.loc 1 795 8
ble a5,zero,.L193
.loc 1 795 23 discriminator 1
ld a5,-40(s0)
lw a5,80(a5)
.loc 1 795 29 discriminator 1
addiw a5,a5,-1
sext.w a4,a5
ld a5,-40(s0)
sw a4,80(a5)
.L193:
.loc 1 798 10
ld a5,-40(s0)
lw a5,80(a5)
.loc 1 798 8
ble a5,zero,.L194
.loc 1 798 29 discriminator 1
li a5,0
.loc 1 798 29 is_stmt 0
j .L190
.L194:
.loc 1 801 19 is_stmt 1
ld a5,-40(s0)
ld a5,88(a5)
sd a5,-48(s0)
.loc 1 802 8
ld a5,-48(s0)
beq a5,zero,.L195
.loc 1 803 14
ld a5,-40(s0)
lw a5,100(a5)
.loc 1 803 12
beq a5,zero,.L196
.loc 1 803 31 discriminator 1
ld a5,-48(s0)
lw a5,528(a5)
.loc 1 803 27 discriminator 1
ble a5,zero,.L196
.loc 1 803 55 discriminator 2
ld a5,-48(s0)
lw a5,528(a5)
.loc 1 803 52 discriminator 2
addiw a5,a5,-1
sext.w a4,a5
.loc 1 803 51 discriminator 2
ld a5,-48(s0)
sw a4,528(a5)
.loc 1 803 55 discriminator 2
ld a5,-48(s0)
lw a5,528(a5)
.loc 1 803 51 discriminator 2
bne a5,zero,.L196
.loc 1 803 85 discriminator 3
ld a5,-48(s0)
sw zero,520(a5)
.L196:
.loc 1 804 14
ld a5,-40(s0)
lw a5,104(a5)
.loc 1 804 12
beq a5,zero,.L197
.loc 1 804 31 discriminator 1
ld a5,-48(s0)
lw a5,532(a5)
.loc 1 804 27 discriminator 1
ble a5,zero,.L197
.loc 1 804 55 discriminator 2
ld a5,-48(s0)
lw a5,532(a5)
.loc 1 804 52 discriminator 2
addiw a5,a5,-1
sext.w a4,a5
.loc 1 804 51 discriminator 2
ld a5,-48(s0)
sw a4,532(a5)
.loc 1 804 55 discriminator 2
ld a5,-48(s0)
lw a5,532(a5)
.loc 1 804 51 discriminator 2
bne a5,zero,.L197
.loc 1 804 85 discriminator 3
ld a5,-48(s0)
sw zero,524(a5)
.L197:
.loc 1 805 14
ld a5,-48(s0)
lw a5,536(a5)
.loc 1 805 12
ble a5,zero,.L198
.loc 1 805 27 discriminator 1
ld a5,-48(s0)
lw a5,536(a5)
.loc 1 805 33 discriminator 1
addiw a5,a5,-1
sext.w a4,a5
ld a5,-48(s0)
sw a4,536(a5)
.L198:
.loc 1 806 14
ld a5,-48(s0)
lw a5,536(a5)
.loc 1 806 12
bne a5,zero,.L195
.LBB26:
.loc 1 808 22
sw zero,-20(s0)
.loc 1 808 13
j .L199
.L201:
.loc 1 809 35
ld a4,-48(s0)
lw a5,-20(s0)
addi a5,a5,68
slli a5,a5,3
add a5,a4,a5
ld a5,0(a5)
.loc 1 809 20
beq a5,zero,.L200
.loc 1 809 54 discriminator 1
ld a4,-48(s0)
lw a5,-20(s0)
addi a5,a5,68
slli a5,a5,3
add a5,a4,a5
ld a5,0(a5)
.loc 1 809 64 discriminator 1
sd zero,88(a5)
.L200:
.loc 1 808 54 discriminator 2
lw a5,-20(s0)
addiw a5,a5,1
sw a5,-20(s0)
.L199:
.loc 1 808 34 discriminator 1
ld a5,-48(s0)
lw a5,800(a5)
.loc 1 808 31 discriminator 1
lw a4,-20(s0)
sext.w a4,a4
blt a4,a5,.L201
.LBE26:
.loc 1 811 13
ld a0,-48(s0)
call free_pipe
.L195:
.loc 1 816 5
ld a0,-40(s0)
call free_file
.loc 1 817 12
li a5,0
.L190:
.loc 1 818 1
mv a0,a5
ld ra,56(sp)
.cfi_restore 1
ld s0,48(sp)
.cfi_restore 8
.cfi_def_cfa 2, 64
addi sp,sp,64
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE33:
.size fs_close, .-fs_close
.align 1
.globl fs_exit
.type fs_exit, @function
fs_exit:
.LFB34:
.loc 1 821 1
.cfi_startproc
addi sp,sp,-48
.cfi_def_cfa_offset 48
sd ra,40(sp)
sd s0,32(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,48
.cfi_def_cfa 8, 0
sd a0,-40(s0)
.LBB27:
.loc 1 822 13
sw zero,-20(s0)
.loc 1 822 5
j .L203
.L204:
.loc 1 823 9
lw a5,-20(s0)
li a1,1
mv a0,a5
call fs_close
.loc 1 822 37 discriminator 3
lw a5,-20(s0)
addiw a5,a5,1
sw a5,-20(s0)
.L203:
.loc 1 822 19 discriminator 1
lw a5,-20(s0)
sext.w a4,a5
li a5,15
ble a4,a5,.L204
.LBE27:
.loc 1 825 1
nop
nop
ld ra,40(sp)
.cfi_restore 1
ld s0,32(sp)
.cfi_restore 8
.cfi_def_cfa 2, 48
addi sp,sp,48
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE34:
.size fs_exit, .-fs_exit
.align 1
.globl fs_dup_table
.type fs_dup_table, @function
fs_dup_table:
.LFB35:
.loc 1 828 1
.cfi_startproc
addi sp,sp,-64
.cfi_def_cfa_offset 64
sd ra,56(sp)
sd s0,48(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,64
.cfi_def_cfa 8, 0
sd a0,-40(s0)
sd a1,-48(s0)
sd a2,-56(s0)
.LBB28:
.loc 1 830 13
sw zero,-20(s0)
.loc 1 830 5
j .L206
.L208:
.loc 1 831 16
lw a5,-20(s0)
slli a5,a5,3
ld a4,-56(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 831 11
beq a5,zero,.L207
.loc 1 832 29
lw a5,-20(s0)
slli a5,a5,3
ld a4,-56(s0)
add a4,a4,a5
.loc 1 832 19
lw a5,-20(s0)
slli a5,a5,3
ld a3,-48(s0)
add a5,a3,a5
.loc 1 832 29
ld a4,0(a4)
.loc 1 832 23
sd a4,0(a5)
.loc 1 833 19
lw a5,-20(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 833 22
lw a4,80(a5)
.loc 1 833 28
addiw a4,a4,1
sext.w a4,a4
sw a4,80(a5)
.loc 1 836 29
lw a5,-20(s0)
slli a5,a5,3
ld a4,-48(s0)
add a5,a4,a5
.loc 1 836 13
ld a5,0(a5)
ld a1,-40(s0)
mv a0,a5
call owner_add
.L207:
.loc 1 830 37 discriminator 2
lw a5,-20(s0)
addiw a5,a5,1
sw a5,-20(s0)
.L206:
.loc 1 830 19 discriminator 1
lw a5,-20(s0)
sext.w a4,a5
li a5,15
ble a4,a5,.L208
.LBE28:
.loc 1 862 1
nop
nop
ld ra,56(sp)
.cfi_restore 1
ld s0,48(sp)
.cfi_restore 8
.cfi_def_cfa 2, 64
addi sp,sp,64
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE35:
.size fs_dup_table, .-fs_dup_table
.align 1
.globl free_fs_table
.type free_fs_table, @function
free_fs_table:
.LFB36:
.loc 1 865 1
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
sd a0,-24(s0)
.loc 1 882 1
nop
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE36:
.size free_fs_table, .-free_fs_table
.align 1
.globl fs_dup2
.type fs_dup2, @function
fs_dup2:
.LFB37:
.loc 1 884 36
.cfi_startproc
addi sp,sp,-48
.cfi_def_cfa_offset 48
sd ra,40(sp)
sd s0,32(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,48
.cfi_def_cfa 8, 0
mv a5,a0
mv a4,a1
sw a5,-36(s0)
mv a5,a4
sw a5,-40(s0)
.loc 1 885 24
call get_current_file_table
sd a0,-24(s0)
.loc 1 886 8
lw a5,-36(s0)
sext.w a5,a5
blt a5,zero,.L219
.loc 1 886 19 discriminator 2
lw a5,-36(s0)
sext.w a4,a5
li a5,15
bgt a4,a5,.L219
.loc 1 887 8
lw a5,-40(s0)
sext.w a5,a5
blt a5,zero,.L220
.loc 1 887 19 discriminator 2
lw a5,-40(s0)
sext.w a4,a5
li a5,15
bgt a4,a5,.L220
.loc 1 888 11
lw a5,-36(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 888 8
beq a5,zero,.L221
.loc 1 889 8
lw a5,-36(s0)
mv a4,a5
lw a5,-40(s0)
sext.w a4,a4
sext.w a5,a5
beq a4,a5,.L222
.loc 1 891 11
lw a5,-40(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 891 8
beq a5,zero,.L218
.loc 1 891 28 discriminator 1
lw a5,-40(s0)
li a1,0
mv a0,a5
call fs_close
.L218:
.loc 1 892 19
lw a5,-36(s0)
slli a5,a5,3
ld a4,-24(s0)
add a4,a4,a5
.loc 1 892 7
lw a5,-40(s0)
slli a5,a5,3
ld a3,-24(s0)
add a5,a3,a5
.loc 1 892 19
ld a4,0(a4)
.loc 1 892 15
sd a4,0(a5)
.loc 1 893 7
lw a5,-36(s0)
slli a5,a5,3
ld a4,-24(s0)
add a5,a4,a5
ld a5,0(a5)
.loc 1 893 14
lw a4,80(a5)
.loc 1 893 20
addiw a4,a4,1
sext.w a4,a4
sw a4,80(a5)
j .L210
.L219:
.loc 1 886 47
nop
j .L210
.L220:
.loc 1 887 47
nop
j .L210
.L221:
.loc 1 888 28
nop
j .L210
.L222:
.loc 1 889 25
nop
.L210:
.loc 1 895 1
ld ra,40(sp)
.cfi_restore 1
ld s0,32(sp)
.cfi_restore 8
.cfi_def_cfa 2, 48
addi sp,sp,48
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE37:
.size fs_dup2, .-fs_dup2
.Letext0:
.file 2 "/opt/homebrew/Cellar/riscv-gnu-toolchain/main/lib/gcc/riscv64-unknown-elf/14.2.0/include/stdint-gcc.h"
.file 3 "/opt/homebrew/Cellar/riscv-gnu-toolchain/main/lib/gcc/riscv64-unknown-elf/14.2.0/include/stddef.h"
.file 4 "fs.h"
.file 5 "common.h"
.section .debug_info,"",@progbits
.Ldebug_info0:
.4byte 0x1997
.2byte 0x5
.byte 0x1
.byte 0x8
.4byte .Ldebug_abbrev0
.uleb128 0x26
.4byte .LASF197
.byte 0x1d
.4byte .LASF0
.4byte .LASF1
.8byte .Ltext0
.8byte .Letext0-.Ltext0
.4byte .Ldebug_line0
.uleb128 0xf
.byte 0x1
.byte 0x6
.4byte .LASF2
.uleb128 0xf
.byte 0x2
.byte 0x5
.4byte .LASF3
.uleb128 0x10
.4byte .LASF5
.byte 0x2
.byte 0x28
.byte 0x18
.4byte 0x48
.uleb128 0x27
.byte 0x4
.byte 0x5
.string "int"
.uleb128 0x19
.4byte 0x48
.uleb128 0xf
.byte 0x8
.byte 0x5
.4byte .LASF4
.uleb128 0x10
.4byte .LASF6
.byte 0x2
.byte 0x2e
.byte 0x18
.4byte 0x6c
.uleb128 0x19
.4byte 0x5b
.uleb128 0xf
.byte 0x1
.byte 0x8
.4byte .LASF7
.uleb128 0x10
.4byte .LASF8
.byte 0x2
.byte 0x31
.byte 0x19
.4byte 0x7f
.uleb128 0xf
.byte 0x2
.byte 0x7
.4byte .LASF9
.uleb128 0x10
.4byte .LASF10
.byte 0x2
.byte 0x34
.byte 0x19
.4byte 0x92
.uleb128 0xf
.byte 0x4
.byte 0x7
.4byte .LASF11
.uleb128 0x10
.4byte .LASF12
.byte 0x2
.byte 0x37
.byte 0x19
.4byte 0xa5
.uleb128 0xf
.byte 0x8
.byte 0x7
.4byte .LASF13
.uleb128 0x10
.4byte .LASF14
.byte 0x2
.byte 0x56
.byte 0x1a
.4byte 0xa5
.uleb128 0x10
.4byte .LASF15
.byte 0x3
.byte 0xd6
.byte 0x17
.4byte 0xa5
.uleb128 0xf
.byte 0x8
.byte 0x5
.4byte .LASF16
.uleb128 0xf
.byte 0x10
.byte 0x4
.4byte .LASF17
.uleb128 0x14
.4byte .LASF25
.byte 0x1c
.byte 0x4
.byte 0x19
.4byte 0x13a
.uleb128 0x3
.4byte .LASF18
.byte 0x4
.byte 0x1a
.byte 0xe
.4byte 0x86
.byte 0
.uleb128 0x3
.4byte .LASF19
.byte 0x4
.byte 0x1b
.byte 0xe
.4byte 0x86
.byte 0x4
.uleb128 0x3
.4byte .LASF20
.byte 0x4
.byte 0x1c
.byte 0xe
.4byte 0x86
.byte 0x8
.uleb128 0x3
.4byte .LASF21
.byte 0x4
.byte 0x1d
.byte 0xe
.4byte 0x86
.byte 0xc
.uleb128 0x3
.4byte .LASF22
.byte 0x4
.byte 0x1e
.byte 0xe
.4byte 0x86
.byte 0x10
.uleb128 0x3
.4byte .LASF23
.byte 0x4
.byte 0x1f
.byte 0xe
.4byte 0x86
.byte 0x14
.uleb128 0x3
.4byte .LASF24
.byte 0x4
.byte 0x20
.byte 0xe
.4byte 0x86
.byte 0x18
.byte 0
.uleb128 0x14
.4byte .LASF26
.byte 0x44
.byte 0x4
.byte 0x24
.4byte 0x195
.uleb128 0x3
.4byte .LASF27
.byte 0x4
.byte 0x25
.byte 0xe
.4byte 0x73
.byte 0
.uleb128 0x3
.4byte .LASF28
.byte 0x4
.byte 0x26
.byte 0xe
.4byte 0x73
.byte 0x2
.uleb128 0x3
.4byte .LASF29
.byte 0x4
.byte 0x27
.byte 0xe
.4byte 0x73
.byte 0x4
.uleb128 0x3
.4byte .LASF30
.byte 0x4
.byte 0x28
.byte 0xe
.4byte 0x73
.byte 0x6
.uleb128 0x3
.4byte .LASF18
.byte 0x4
.byte 0x29
.byte 0xe
.4byte 0x86
.byte 0x8
.uleb128 0x3
.4byte .LASF31
.byte 0x4
.byte 0x2b
.byte 0xe
.4byte 0x195
.byte 0xc
.byte 0
.uleb128 0xb
.4byte 0x86
.4byte 0x1a5
.uleb128 0x11
.4byte 0xa5
.byte 0xd
.byte 0
.uleb128 0x14
.4byte .LASF32
.byte 0x10
.byte 0x4
.byte 0x2f
.4byte 0x1cc
.uleb128 0x3
.4byte .LASF33
.byte 0x4
.byte 0x30
.byte 0xe
.4byte 0x73
.byte 0
.uleb128 0x3
.4byte .LASF34
.byte 0x4
.byte 0x31
.byte 0xe
.4byte 0x1cc
.byte 0x2
.byte 0
.uleb128 0xb
.4byte 0x1dc
.4byte 0x1dc
.uleb128 0x11
.4byte 0xa5
.byte 0xd
.byte 0
.uleb128 0xf
.byte 0x1
.byte 0x8
.4byte .LASF35
.uleb128 0x28
.4byte 0x1dc
.uleb128 0x10
.4byte .LASF36
.byte 0x4
.byte 0x5c
.byte 0x11
.4byte 0x3c
.uleb128 0x10
.4byte .LASF37
.byte 0x5
.byte 0x6
.byte 0x12
.4byte 0x99
.uleb128 0x10
.4byte .LASF38
.byte 0x5
.byte 0x8
.byte 0x10
.4byte 0x20c
.uleb128 0xc
.4byte 0x1f4
.uleb128 0x17
.4byte .LASF39
.2byte 0x100
.byte 0xc
.4byte 0x3a1
.uleb128 0x2
.string "ra"
.byte 0x5
.byte 0xd
.byte 0xe
.4byte 0x99
.byte 0
.uleb128 0x2
.string "sp"
.byte 0x5
.byte 0xe
.byte 0xe
.4byte 0x99
.byte 0x8
.uleb128 0x2
.string "gp"
.byte 0x5
.byte 0xf
.byte 0xe
.4byte 0x99
.byte 0x10
.uleb128 0x2
.string "tp"
.byte 0x5
.byte 0x10
.byte 0xe
.4byte 0x99
.byte 0x18
.uleb128 0x2
.string "t0"
.byte 0x5
.byte 0x11
.byte 0xe
.4byte 0x99
.byte 0x20
.uleb128 0x2
.string "t1"
.byte 0x5
.byte 0x12
.byte 0xe
.4byte 0x99
.byte 0x28
.uleb128 0x2
.string "t2"
.byte 0x5
.byte 0x13
.byte 0xe
.4byte 0x99
.byte 0x30
.uleb128 0x2
.string "t3"
.byte 0x5
.byte 0x14
.byte 0xe
.4byte 0x99
.byte 0x38
.uleb128 0x2
.string "t4"
.byte 0x5
.byte 0x15
.byte 0xe
.4byte 0x99
.byte 0x40
.uleb128 0x2
.string "t5"
.byte 0x5
.byte 0x16
.byte 0xe
.4byte 0x99
.byte 0x48
.uleb128 0x2
.string "t6"
.byte 0x5
.byte 0x17
.byte 0xe
.4byte 0x99
.byte 0x50
.uleb128 0x2
.string "a0"
.byte 0x5
.byte 0x18
.byte 0xe
.4byte 0x99
.byte 0x58
.uleb128 0x2
.string "a1"
.byte 0x5
.byte 0x19
.byte 0xe
.4byte 0x99
.byte 0x60
.uleb128 0x2
.string "a2"
.byte 0x5
.byte 0x1a
.byte 0xe
.4byte 0x99
.byte 0x68
.uleb128 0x2
.string "a3"
.byte 0x5
.byte 0x1b
.byte 0xe
.4byte 0x99
.byte 0x70
.uleb128 0x2
.string "a4"
.byte 0x5
.byte 0x1c
.byte 0xe
.4byte 0x99
.byte 0x78
.uleb128 0x2
.string "a5"
.byte 0x5
.byte 0x1d
.byte 0xe
.4byte 0x99
.byte 0x80
.uleb128 0x2
.string "a6"
.byte 0x5
.byte 0x1e
.byte 0xe
.4byte 0x99
.byte 0x88
.uleb128 0x2
.string "a7"
.byte 0x5
.byte 0x1f
.byte 0xe
.4byte 0x99
.byte 0x90
.uleb128 0x2
.string "s0"
.byte 0x5
.byte 0x20
.byte 0xe
.4byte 0x99
.byte 0x98
.uleb128 0x2
.string "s1"
.byte 0x5
.byte 0x21
.byte 0xe
.4byte 0x99
.byte 0xa0
.uleb128 0x2
.string "s2"
.byte 0x5
.byte 0x22
.byte 0xe
.4byte 0x99
.byte 0xa8
.uleb128 0x2
.string "s3"
.byte 0x5
.byte 0x23
.byte 0xe
.4byte 0x99
.byte 0xb0
.uleb128 0x2
.string "s4"
.byte 0x5
.byte 0x24
.byte 0xe
.4byte 0x99
.byte 0xb8
.uleb128 0x2
.string "s5"
.byte 0x5
.byte 0x25
.byte 0xe
.4byte 0x99
.byte 0xc0
.uleb128 0x2
.string "s6"
.byte 0x5
.byte 0x26
.byte 0xe
.4byte 0x99
.byte 0xc8
.uleb128 0x2
.string "s7"
.byte 0x5
.byte 0x27
.byte 0xe
.4byte 0x99
.byte 0xd0
.uleb128 0x2
.string "s8"
.byte 0x5
.byte 0x28
.byte 0xe
.4byte 0x99
.byte 0xd8
.uleb128 0x2
.string "s9"
.byte 0x5
.byte 0x29
.byte 0xe
.4byte 0x99
.byte 0xe0
.uleb128 0x2
.string "s10"
.byte 0x5
.byte 0x2a
.byte 0xe
.4byte 0x99
.byte 0xe8
.uleb128 0x2
.string "s11"
.byte 0x5
.byte 0x2b
.byte 0xe
.4byte 0x99
.byte 0xf0
.uleb128 0x3
.4byte .LASF40
.byte 0x5
.byte 0x2c
.byte 0xe
.4byte 0x99
.byte 0xf8
.byte 0
.uleb128 0x17
.4byte .LASF41
.2byte 0x4d8
.byte 0x33
.4byte 0x47e
.uleb128 0x3
.4byte .LASF42
.byte 0x5
.byte 0x34
.byte 0x16
.4byte 0x211
.byte 0
.uleb128 0x6
.4byte .LASF43
.byte 0x36
.byte 0x16
.4byte 0x211
.2byte 0x100
.uleb128 0x6
.4byte .LASF44
.byte 0x37
.byte 0x12
.4byte 0x47e
.2byte 0x200
.uleb128 0x6
.4byte .LASF45
.byte 0x39
.byte 0xb
.4byte 0x483
.2byte 0x208
.uleb128 0x6
.4byte .LASF46
.byte 0x3a
.byte 0xe
.4byte 0x99
.2byte 0x210
.uleb128 0x6
.4byte .LASF47
.byte 0x3b
.byte 0xe
.4byte 0x99
.2byte 0x218
.uleb128 0x29
.string "sz"
.byte 0x5
.byte 0x3c
.byte 0xe
.4byte 0x99
.2byte 0x220
.uleb128 0x6
.4byte .LASF48
.byte 0x3e
.byte 0x9
.4byte 0x48
.2byte 0x228
.uleb128 0x6
.4byte .LASF49
.byte 0x40
.byte 0x11
.4byte 0x200
.2byte 0x230
.uleb128 0x6
.4byte .LASF50
.byte 0x42
.byte 0xb
.4byte 0x483
.2byte 0x238
.uleb128 0x6
.4byte .LASF51
.byte 0x43
.byte 0x9
.4byte 0x48
.2byte 0x240
.uleb128 0x6
.4byte .LASF52
.byte 0x45
.byte 0x12
.4byte 0x488
.2byte 0x248
.uleb128 0x6
.4byte .LASF53
.byte 0x47
.byte 0xb
.4byte 0x546
.2byte 0x2c8
.uleb128 0x6
.4byte .LASF54
.byte 0x48
.byte 0x9
.4byte 0x48
.2byte 0x4c8
.uleb128 0x6
.4byte .LASF55
.byte 0x4a
.byte 0x9
.4byte 0x48
.2byte 0x4cc
.uleb128 0x6
.4byte .LASF56
.byte 0x4c
.byte 0x12
.4byte 0x47e
.2byte 0x4d0
.byte 0
.uleb128 0xc
.4byte 0x3a1
.uleb128 0xc
.4byte 0x1dc
.uleb128 0xb
.4byte 0x498
.4byte 0x498
.uleb128 0x11
.4byte 0xa5
.byte 0xf
.byte 0
.uleb128 0xc
.4byte 0x49d
.uleb128 0x17
.4byte .LASF57
.2byte 0x480
.byte 0x6b
.4byte 0x546
.uleb128 0x3
.4byte .LASF58
.byte 0x5
.byte 0x6c
.byte 0x3f
.4byte 0x616
.byte 0
.uleb128 0x3
.4byte .LASF33
.byte 0x5
.byte 0x6d
.byte 0xe
.4byte 0x86
.byte 0x4
.uleb128 0x2
.string "din"
.byte 0x5
.byte 0x6e
.byte 0x13
.4byte 0x13a
.byte 0x8
.uleb128 0x2
.string "off"
.byte 0x5
.byte 0x6f
.byte 0xe
.4byte 0x86
.byte 0x4c
.uleb128 0x3
.4byte .LASF59
.byte 0x5
.byte 0x70
.byte 0x9
.4byte 0x48
.byte 0x50
.uleb128 0x3
.4byte .LASF60
.byte 0x5
.byte 0x71
.byte 0x13
.4byte 0x611
.byte 0x58
.uleb128 0x3
.4byte .LASF61
.byte 0x5
.byte 0x72
.byte 0x9
.4byte 0x48
.byte 0x60
.uleb128 0x3
.4byte .LASF62
.byte 0x5
.byte 0x73
.byte 0x9
.4byte 0x48
.byte 0x64
.uleb128 0x3
.4byte .LASF63
.byte 0x5
.byte 0x74
.byte 0x9
.4byte 0x48
.byte 0x68
.uleb128 0x3
.4byte .LASF64
.byte 0x5
.byte 0x76
.byte 0x12
.4byte 0x498
.byte 0x70
.uleb128 0x3
.4byte .LASF65
.byte 0x5
.byte 0x78
.byte 0x12
.4byte 0x643
.byte 0x78
.uleb128 0x6
.4byte .LASF66
.byte 0x79
.byte 0x9
.4byte 0x48
.2byte 0x478
.byte 0
.uleb128 0xb
.4byte 0x483
.4byte 0x556
.uleb128 0x11
.4byte 0xa5
.byte 0x3f
.byte 0
.uleb128 0x17
.4byte .LASF67
.2byte 0x330
.byte 0x56
.4byte 0x5f2
.uleb128 0x3
.4byte .LASF68
.byte 0x5
.byte 0x57
.byte 0xa
.4byte 0x5f2
.byte 0
.uleb128 0x6
.4byte .LASF69
.byte 0x58
.byte 0xe
.4byte 0x86
.2byte 0x200
.uleb128 0x6
.4byte .LASF70
.byte 0x59
.byte 0xe
.4byte 0x86
.2byte 0x204
.uleb128 0x6
.4byte .LASF71
.byte 0x5a
.byte 0x9
.4byte 0x48
.2byte 0x208
.uleb128 0x6
.4byte .LASF72
.byte 0x5b
.byte 0x9
.4byte 0x48
.2byte 0x20c
.uleb128 0x6
.4byte .LASF73
.byte 0x5c
.byte 0x9
.4byte 0x48
.2byte 0x210
.uleb128 0x6
.4byte .LASF74
.byte 0x5d
.byte 0x9
.4byte 0x48
.2byte 0x214
.uleb128 0x6
.4byte .LASF59
.byte 0x5e
.byte 0x9
.4byte 0x48
.2byte 0x218
.uleb128 0x6
.4byte .LASF75
.byte 0x60
.byte 0x12
.4byte 0x601
.2byte 0x220
.uleb128 0x6
.4byte .LASF76
.byte 0x61
.byte 0x9
.4byte 0x48
.2byte 0x320
.uleb128 0x6
.4byte .LASF64
.byte 0x63
.byte 0x13
.4byte 0x611
.2byte 0x328
.byte 0
.uleb128 0xb
.4byte 0x1dc
.4byte 0x601
.uleb128 0x1a
.4byte 0xa5
.byte 0
.uleb128 0xb
.4byte 0x498
.4byte 0x611
.uleb128 0x11
.4byte 0xa5
.byte 0x1f
.byte 0
.uleb128 0xc
.4byte 0x556
.uleb128 0x2a
.byte 0x7
.byte 0x4
.4byte 0x92
.byte 0x5
.byte 0x6c
.byte 0xa
.4byte 0x643
.uleb128 0x16
.4byte .LASF77
.byte 0
.uleb128 0x16
.4byte .LASF78
.byte 0x1
.uleb128 0x16
.4byte .LASF79
.byte 0x2
.uleb128 0x16
.4byte .LASF80
.byte 0x3
.uleb128 0x16
.4byte .LASF81
.byte 0x4
.byte 0
.uleb128 0xb
.4byte 0x47e
.4byte 0x653
.uleb128 0x11
.4byte 0xa5
.byte 0x7f
.byte 0
.uleb128 0x21
.4byte .LASF82
.byte 0x93
.byte 0x15
.4byte 0x643
.uleb128 0x21
.4byte .LASF83
.byte 0x95
.byte 0xc
.4byte 0x48
.uleb128 0x14
.4byte .LASF84
.byte 0x10
.byte 0x1
.byte 0x3f
.4byte 0x6aa
.uleb128 0x3
.4byte .LASF85
.byte 0x1
.byte 0x40
.byte 0xe
.4byte 0x99
.byte 0
.uleb128 0x2
.string "len"
.byte 0x1
.byte 0x41
.byte 0xe
.4byte 0x86
.byte 0x8
.uleb128 0x3
.4byte .LASF86
.byte 0x1
.byte 0x42
.byte 0xe
.4byte 0x73
.byte 0xc
.uleb128 0x3
.4byte .LASF56
.byte 0x1
.byte 0x43
.byte 0xe
.4byte 0x73
.byte 0xe
.byte 0
.uleb128 0x22
.4byte .LASF89
.byte 0x2
.byte 0x46
.4byte 0x6dd
.uleb128 0x3
.4byte .LASF86
.byte 0x1
.byte 0x47
.byte 0xe
.4byte 0x73
.byte 0
.uleb128 0x2
.string "idx"
.byte 0x1
.byte 0x48
.byte 0xe
.4byte 0x73
.byte 0x2
.uleb128 0x3
.4byte .LASF87
.byte 0x1
.byte 0x49
.byte 0xe
.4byte 0x6dd
.byte 0x4
.byte 0
.uleb128 0xb
.4byte 0x73
.4byte 0x6ec
.uleb128 0x23
.4byte 0xa5
.byte 0
.uleb128 0x14
.4byte .LASF88
.byte 0x8
.byte 0x1
.byte 0x4c
.4byte 0x712
.uleb128 0x2
.string "id"
.byte 0x1
.byte 0x4c
.byte 0x23
.4byte 0x86
.byte 0
.uleb128 0x2
.string "len"
.byte 0x1
.byte 0x4c
.byte 0x26
.4byte 0x86
.byte 0x4
.byte 0
.uleb128 0x22
.4byte .LASF90
.byte 0x4
.byte 0x4d
.4byte 0x745
.uleb128 0x3
.4byte .LASF86
.byte 0x1
.byte 0x4e
.byte 0xe
.4byte 0x73
.byte 0
.uleb128 0x2
.string "idx"
.byte 0x1
.byte 0x4f
.byte 0xe
.4byte 0x73
.byte 0x2
.uleb128 0x3
.4byte .LASF87
.byte 0x1
.byte 0x50
.byte 0x1c
.4byte 0x745
.byte 0x4
.byte 0
.uleb128 0xb
.4byte 0x6ec
.4byte 0x754
.uleb128 0x23
.4byte 0xa5
.byte 0
.uleb128 0x14
.4byte .LASF91
.byte 0x10
.byte 0x1
.byte 0x56
.4byte 0x788
.uleb128 0x3
.4byte .LASF27
.byte 0x1
.byte 0x57
.byte 0xe
.4byte 0x86
.byte 0
.uleb128 0x3
.4byte .LASF92
.byte 0x1
.byte 0x58
.byte 0xe
.4byte 0x86
.byte 0x4
.uleb128 0x3
.4byte .LASF93
.byte 0x1
.byte 0x59
.byte 0xe
.4byte 0x99
.byte 0x8
.byte 0
.uleb128 0x4
.4byte .LASF94
.byte 0x5d
.byte 0x11
.4byte 0x99
.uleb128 0x9
.byte 0x3
.8byte vbase
.uleb128 0x15
.string "Q"
.byte 0x5e
.byte 0x11
.4byte 0x86
.uleb128 0x9
.byte 0x3
.8byte Q
.uleb128 0x4
.4byte .LASF95
.byte 0x60
.byte 0x1c
.4byte 0x7c5
.uleb128 0x9
.byte 0x3
.8byte desc
.uleb128 0xc
.4byte 0x669
.uleb128 0x4
.4byte .LASF96
.byte 0x61
.byte 0x1c
.4byte 0x7df
.uleb128 0x9
.byte 0x3
.8byte avail
.uleb128 0xc
.4byte 0x6aa
.uleb128 0x4
.4byte .LASF59
.byte 0x62
.byte 0x1c
.4byte 0x7f9
.uleb128 0x9
.byte 0x3
.8byte used
.uleb128 0xc
.4byte 0x712
.uleb128 0x4
.4byte .LASF97
.byte 0x63
.byte 0x1c
.4byte 0x813
.uleb128 0x9
.byte 0x3
.8byte vq_area
.uleb128 0xc
.4byte 0x5b
.uleb128 0x4
.4byte .LASF98
.byte 0x65
.byte 0x11
.4byte 0x73
.uleb128 0x9
.byte 0x3
.8byte aidx
.uleb128 0x2b
.4byte .LASF99
.byte 0x1
.byte 0x66
.byte 0x1a
.4byte 0x67
.byte 0x4
.uleb128 0x9
.byte 0x3
.8byte status_byte
.uleb128 0xb
.4byte 0x67
.4byte 0x853
.uleb128 0x1a
.4byte 0xa5
.byte 0
.uleb128 0x19
.4byte 0x844
.uleb128 0x2c
.4byte .LASF100
.byte 0x1
.byte 0x67
.byte 0x1a
.4byte 0x853
.2byte 0x200
.uleb128 0x9
.byte 0x3
.8byte dma_buf
.uleb128 0x15
.string "req"
.byte 0xa6
.byte 0x1e
.4byte 0x754
.uleb128 0x9
.byte 0x3
.8byte req
.uleb128 0x15
.string "sb"
.byte 0xca
.byte 0x1a
.4byte 0xd2
.uleb128 0x9
.byte 0x3
.8byte sb
.uleb128 0xb
.4byte 0x5b
.4byte 0x8a8
.uleb128 0x1a
.4byte 0xa5
.byte 0
.uleb128 0x4
.4byte .LASF101
.byte 0xcb
.byte 0x17
.4byte 0x899
.uleb128 0x9
.byte 0x3
.8byte block_buf
.uleb128 0xb
.4byte 0x556
.4byte 0x8cd
.uleb128 0x11
.4byte 0xa5
.byte 0xf
.byte 0
.uleb128 0x18
.4byte .LASF102
.2byte 0x182
.byte 0xe
.4byte 0x8bd
.uleb128 0x9
.byte 0x3
.8byte gPipes
.uleb128 0xb
.4byte 0x49d
.4byte 0x8f3
.uleb128 0x11
.4byte 0xa5
.byte 0xf
.byte 0
.uleb128 0x18
.4byte .LASF103
.2byte 0x183
.byte 0xd
.4byte 0x8e3
.uleb128 0x9
.byte 0x3
.8byte gFiles
.uleb128 0x18
.4byte .LASF104
.2byte 0x185
.byte 0xf
.4byte 0x611
.uleb128 0x9
.byte 0x3
.8byte gFreePipes
.uleb128 0x18
.4byte .LASF105
.2byte 0x186
.byte 0xe
.4byte 0x498
.uleb128 0x9
.byte 0x3
.8byte gFreeFiles
.uleb128 0x12
.4byte .LASF108
.byte 0x10
.byte 0x7
.4byte 0x483
.4byte 0x954
.uleb128 0x7
.4byte 0x483
.uleb128 0x7
.4byte 0x954
.uleb128 0x7
.4byte 0xb8
.byte 0
.uleb128 0xc
.4byte 0x1e3
.uleb128 0x2d
.4byte .LASF198
.byte 0x1
.2byte 0x259
.byte 0x6
.4byte 0x968
.uleb128 0x1b
.byte 0
.uleb128 0x2e
.4byte .LASF106
.byte 0x1
.2byte 0x258
.byte 0x6
.4byte 0x97b
.uleb128 0x7
.4byte 0x483
.byte 0
.uleb128 0x2f
.4byte .LASF107
.byte 0x5
.byte 0x8b
.byte 0xf
.4byte 0x98d
.4byte 0x98d
.uleb128 0x1b
.byte 0
.uleb128 0xc
.4byte 0x498
.uleb128 0x12
.4byte .LASF109
.byte 0xf
.byte 0x5
.4byte 0x48
.4byte 0x9a8
.uleb128 0x7
.4byte 0x954
.uleb128 0x1b
.byte 0
.uleb128 0x12
.4byte .LASF110
.byte 0x12
.byte 0x7
.4byte 0x483
.4byte 0x9c2
.uleb128 0x7
.4byte 0x483
.uleb128 0x7
.4byte 0x954
.byte 0
.uleb128 0x12
.4byte .LASF111
.byte 0x11
.byte 0x7
.4byte 0x483
.4byte 0x9e1
.uleb128 0x7
.4byte 0x483
.uleb128 0x7
.4byte 0x954
.uleb128 0x7
.4byte 0x48
.byte 0
.uleb128 0x12
.4byte .LASF112
.byte 0xe
.byte 0x5
.4byte 0x48
.4byte 0x9f6
.uleb128 0x7
.4byte 0x954
.byte 0
.uleb128 0x12
.4byte .LASF113
.byte 0x14
.byte 0x5
.4byte 0x48
.4byte 0xa10
.uleb128 0x7
.4byte 0x954
.uleb128 0x7
.4byte 0x954
.byte 0
.uleb128 0x12
.4byte .LASF114
.byte 0x15
.byte 0x5
.4byte 0x48
.4byte 0xa2f
.uleb128 0x7
.4byte 0x954
.uleb128 0x7
.4byte 0x954
.uleb128 0x7
.4byte 0x92
.byte 0
.uleb128 0x12
.4byte .LASF115
.byte 0xd
.byte 0x7
.4byte 0xa4e
.4byte 0xa4e
.uleb128 0x7
.4byte 0xa4e
.uleb128 0x7
.4byte 0xa50
.uleb128 0x7
.4byte 0x92
.byte 0
.uleb128 0x30
.byte 0x8
.uleb128 0xc
.4byte 0xa55
.uleb128 0x31
.uleb128 0x12
.4byte .LASF116
.byte 0xc
.byte 0x7
.4byte 0xa4e
.4byte 0xa75
.uleb128 0x7
.4byte 0xa4e
.uleb128 0x7
.4byte 0x48
.uleb128 0x7
.4byte 0x92
.byte 0
.uleb128 0x12
.4byte .LASF117
.byte 0xb
.byte 0x7
.4byte 0xa4e
.4byte 0xa8a
.uleb128 0x7
.4byte 0xb8
.byte 0
.uleb128 0x32
.4byte .LASF118
.byte 0x1
.byte 0x13
.byte 0xd
.4byte 0xa9c
.uleb128 0x7
.4byte 0x954
.byte 0
.uleb128 0x13
.4byte .LASF121
.2byte 0x374
.8byte .LFB37
.8byte .LFE37-.LFB37
.uleb128 0x1
.byte 0x9c
.4byte 0xae6
.uleb128 0x9
.4byte .LASF119
.2byte 0x374
.byte 0x12
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -36
.uleb128 0x9
.4byte .LASF120
.2byte 0x374
.byte 0x1d
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0x1
.string "ft"
.2byte 0x375
.byte 0x13
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0x1c
.4byte .LASF122
.2byte 0x360
.8byte .LFB36
.8byte .LFE36-.LFB36
.uleb128 0x1
.byte 0x9c
.4byte 0xb13
.uleb128 0x9
.4byte .LASF52
.2byte 0x360
.byte 0x22
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0x13
.4byte .LASF123
.2byte 0x33b
.8byte .LFB35
.8byte .LFE35-.LFB35
.uleb128 0x1
.byte 0x9c
.4byte 0xb7d
.uleb128 0x9
.4byte .LASF124
.2byte 0x33b
.byte 0x20
.4byte 0x47e
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0x9
.4byte .LASF125
.2byte 0x33b
.byte 0x35
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -48
.uleb128 0x9
.4byte .LASF126
.2byte 0x33b
.byte 0x4b
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -56
.uleb128 0xa
.8byte .LBB28
.8byte .LBE28-.LBB28
.uleb128 0x1
.string "i"
.2byte 0x33e
.byte 0xd
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -20
.byte 0
.byte 0
.uleb128 0x13
.4byte .LASF127
.2byte 0x334
.8byte .LFB34
.8byte .LFE34-.LFB34
.uleb128 0x1
.byte 0x9c
.4byte 0xbc9
.uleb128 0x9
.4byte .LASF52
.2byte 0x334
.byte 0x1c
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0xa
.8byte .LBB27
.8byte .LBE27-.LBB27
.uleb128 0x1
.string "i"
.2byte 0x336
.byte 0xd
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -20
.byte 0
.byte 0
.uleb128 0xe
.4byte .LASF129
.2byte 0x30e
.byte 0x5
.4byte 0x48
.8byte .LFB33
.8byte .LFE33-.LFB33
.uleb128 0x1
.byte 0x9c
.4byte 0xc50
.uleb128 0x8
.string "fd"
.2byte 0x30e
.byte 0x13
.4byte 0x54
.uleb128 0x2
.byte 0x91
.sleb128 -56
.uleb128 0x9
.4byte .LASF128
.2byte 0x30e
.byte 0x1b
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -60
.uleb128 0x1
.string "ft"
.2byte 0x30f
.byte 0x13
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -32
.uleb128 0x1
.string "f"
.2byte 0x311
.byte 0x12
.4byte 0x498
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0x1
.string "p"
.2byte 0x321
.byte 0x13
.4byte 0x611
.uleb128 0x2
.byte 0x91
.sleb128 -48
.uleb128 0xa
.8byte .LBB26
.8byte .LBE26-.LBB26
.uleb128 0x1
.string "i"
.2byte 0x328
.byte 0x16
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -20
.byte 0
.byte 0
.uleb128 0xe
.4byte .LASF130
.2byte 0x2fc
.byte 0x9
.4byte 0x1e8
.8byte .LFB32
.8byte .LFE32-.LFB32
.uleb128 0x1
.byte 0x9c
.4byte 0xcac
.uleb128 0x8
.string "fd"
.2byte 0x2fc
.byte 0x15
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -52
.uleb128 0x5
.4byte .LASF52
.2byte 0x2fd
.byte 0x13
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x1
.string "idx"
.2byte 0x303
.byte 0x9
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -28
.uleb128 0x1
.string "f"
.2byte 0x307
.byte 0x12
.4byte 0x498
.uleb128 0x2
.byte 0x91
.sleb128 -40
.byte 0
.uleb128 0xe
.4byte .LASF131
.2byte 0x2e7
.byte 0x9
.4byte 0x1e8
.8byte .LFB31
.8byte .LFE31-.LFB31
.uleb128 0x1
.byte 0x9c
.4byte 0xd45
.uleb128 0x8
.string "fd"
.2byte 0x2e7
.byte 0x15
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -52
.uleb128 0x8
.string "buf"
.2byte 0x2e7
.byte 0x1f
.4byte 0xa4e
.uleb128 0x2
.byte 0x91
.sleb128 -64
.uleb128 0x9
.4byte .LASF132
.2byte 0x2e7
.byte 0x2b
.4byte 0xb8
.uleb128 0x3
.byte 0x91
.sleb128 -72
.uleb128 0x5
.4byte .LASF52
.2byte 0x2e8
.byte 0x13
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x1
.string "idx"
.2byte 0x2ee
.byte 0x9
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -28
.uleb128 0x1
.string "f"
.2byte 0x2f2
.byte 0x12
.4byte 0x498
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0x5
.4byte .LASF133
.2byte 0x2f3
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -44
.uleb128 0x5
.4byte .LASF134
.2byte 0x2f6
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -48
.byte 0
.uleb128 0xe
.4byte .LASF135
.2byte 0x2ba
.byte 0x5
.4byte 0x48
.8byte .LFB30
.8byte .LFE30-.LFB30
.uleb128 0x1
.byte 0x9c
.4byte 0xdd4
.uleb128 0x9
.4byte .LASF136
.2byte 0x2ba
.byte 0x19
.4byte 0x954
.uleb128 0x3
.byte 0x91
.sleb128 -264
.uleb128 0x5
.4byte .LASF52
.2byte 0x2bb
.byte 0x13
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -48
.uleb128 0x5
.4byte .LASF137
.2byte 0x2bd
.byte 0xa
.4byte 0xdd4
.uleb128 0x3
.byte 0x91
.sleb128 -184
.uleb128 0x5
.4byte .LASF33
.2byte 0x2cb
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -52
.uleb128 0x1
.string "di"
.2byte 0x2d0
.byte 0x13
.4byte 0x13a
.uleb128 0x3
.byte 0x91
.sleb128 -256
.uleb128 0xa
.8byte .LBB25
.8byte .LBE25-.LBB25
.uleb128 0x1
.string "i"
.2byte 0x2d4
.byte 0xe
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -36
.byte 0
.byte 0
.uleb128 0xb
.4byte 0x1dc
.4byte 0xde4
.uleb128 0x11
.4byte 0xa5
.byte 0x7f
.byte 0
.uleb128 0xe
.4byte .LASF138
.2byte 0x28f
.byte 0x5
.4byte 0x48
.8byte .LFB29
.8byte .LFE29-.LFB29
.uleb128 0x1
.byte 0x9c
.4byte 0xe6c
.uleb128 0x8
.string "fd"
.2byte 0x28f
.byte 0x13
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -52
.uleb128 0x9
.4byte .LASF85
.2byte 0x28f
.byte 0x1d
.4byte 0x483
.uleb128 0x2
.byte 0x91
.sleb128 -64
.uleb128 0x8
.string "n"
.2byte 0x28f
.byte 0x27
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -56
.uleb128 0x5
.4byte .LASF52
.2byte 0x291
.byte 0x13
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x1
.string "p"
.2byte 0x29a
.byte 0x13
.4byte 0x611
.uleb128 0x2
.byte 0x91
.sleb128 -32
.uleb128 0xa
.8byte .LBB24
.8byte .LBE24-.LBB24
.uleb128 0x1
.string "i"
.2byte 0x2a0
.byte 0x17
.4byte 0x4f
.uleb128 0x2
.byte 0x91
.sleb128 -36
.byte 0
.byte 0
.uleb128 0xe
.4byte .LASF139
.2byte 0x25d
.byte 0x5
.4byte 0x48
.8byte .LFB28
.8byte .LFE28-.LFB28
.uleb128 0x1
.byte 0x9c
.4byte 0xf01
.uleb128 0x8
.string "fd"
.2byte 0x25d
.byte 0x12
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -52
.uleb128 0x9
.4byte .LASF85
.2byte 0x25d
.byte 0x1c
.4byte 0x483
.uleb128 0x2
.byte 0x91
.sleb128 -64
.uleb128 0x8
.string "n"
.2byte 0x25d
.byte 0x26
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -56
.uleb128 0x5
.4byte .LASF52
.2byte 0x25f
.byte 0x13
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x1
.string "p"
.2byte 0x268
.byte 0x13
.4byte 0x611
.uleb128 0x2
.byte 0x91
.sleb128 -32
.uleb128 0x1
.string "j"
.2byte 0x277
.byte 0x12
.4byte 0x4f
.uleb128 0x2
.byte 0x91
.sleb128 -36
.uleb128 0xa
.8byte .LBB23
.8byte .LBE23-.LBB23
.uleb128 0x1
.string "i"
.2byte 0x278
.byte 0x17
.4byte 0x4f
.uleb128 0x2
.byte 0x91
.sleb128 -40
.byte 0
.byte 0
.uleb128 0x13
.4byte .LASF140
.2byte 0x21a
.8byte .LFB27
.8byte .LFE27-.LFB27
.uleb128 0x1
.byte 0x9c
.4byte 0xf93
.uleb128 0x8
.string "fd1"
.2byte 0x21a
.byte 0x15
.4byte 0xf93
.uleb128 0x3
.byte 0x91
.sleb128 -72
.uleb128 0x8
.string "fd2"
.2byte 0x21a
.byte 0x1f
.4byte 0xf93
.uleb128 0x3
.byte 0x91
.sleb128 -80
.uleb128 0x5
.4byte .LASF52
.2byte 0x21b
.byte 0x13
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -48
.uleb128 0x1
.string "pip"
.2byte 0x21d
.byte 0x13
.4byte 0x611
.uleb128 0x2
.byte 0x91
.sleb128 -56
.uleb128 0x1d
.8byte .LBB20
.8byte .LBE20-.LBB20
.4byte 0xf7f
.uleb128 0x1
.string "i"
.2byte 0x21f
.byte 0xe
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -36
.byte 0
.uleb128 0x24
.4byte .LLRL2
.uleb128 0x1
.string "i"
.2byte 0x239
.byte 0xe
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -40
.byte 0
.byte 0
.uleb128 0xc
.4byte 0x48
.uleb128 0xe
.4byte .LASF141
.2byte 0x20d
.byte 0x5
.4byte 0x48
.8byte .LFB26
.8byte .LFE26-.LFB26
.uleb128 0x1
.byte 0x9c
.4byte 0xfd8
.uleb128 0x8
.string "fd"
.2byte 0x20d
.byte 0x13
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -36
.uleb128 0x5
.4byte .LASF52
.2byte 0x20f
.byte 0x13
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0xe
.4byte .LASF142
.2byte 0x201
.byte 0x5
.4byte 0x48
.8byte .LFB25
.8byte .LFE25-.LFB25
.uleb128 0x1
.byte 0x9c
.4byte 0x1018
.uleb128 0x8
.string "fd"
.2byte 0x201
.byte 0x12
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -36
.uleb128 0x5
.4byte .LASF52
.2byte 0x203
.byte 0x13
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0xe
.4byte .LASF143
.2byte 0x1f4
.byte 0x5
.4byte 0x48
.8byte .LFB24
.8byte .LFE24-.LFB24
.uleb128 0x1
.byte 0x9c
.4byte 0x1058
.uleb128 0x8
.string "fd"
.2byte 0x1f4
.byte 0x11
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -36
.uleb128 0x5
.4byte .LASF52
.2byte 0x1f6
.byte 0x13
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0x13
.4byte .LASF144
.2byte 0x1e0
.8byte .LFB23
.8byte .LFE23-.LFB23
.uleb128 0x1
.byte 0x9c
.4byte 0x1085
.uleb128 0x9
.4byte .LASF52
.2byte 0x1e0
.byte 0x1c
.4byte 0x98d
.uleb128 0x2
.byte 0x91
.sleb128 -40
.byte 0
.uleb128 0x1e
.4byte .LASF145
.2byte 0x1d5
.byte 0xe
.4byte 0x498
.8byte .LFB22
.8byte .LFE22-.LFB22
.uleb128 0x1
.byte 0x9c
.4byte 0x10b7
.uleb128 0x5
.4byte .LASF125
.2byte 0x1d7
.byte 0x12
.4byte 0x498
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0xe
.4byte .LASF146
.2byte 0x1bf
.byte 0xf
.4byte 0x611
.8byte .LFB21
.8byte .LFE21-.LFB21
.uleb128 0x1
.byte 0x9c
.4byte 0x1104
.uleb128 0x1
.string "p"
.2byte 0x1c1
.byte 0x13
.4byte 0x611
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x5
.4byte .LASF147
.2byte 0x1cd
.byte 0xb
.4byte 0xa4e
.uleb128 0x2
.byte 0x91
.sleb128 -32
.uleb128 0x1
.string "pa"
.2byte 0x1ce
.byte 0xb
.4byte 0xa4e
.uleb128 0x2
.byte 0x91
.sleb128 -40
.byte 0
.uleb128 0x1c
.4byte .LASF148
.2byte 0x1b6
.8byte .LFB20
.8byte .LFE20-.LFB20
.uleb128 0x1
.byte 0x9c
.4byte 0x112f
.uleb128 0x8
.string "p"
.2byte 0x1b6
.byte 0x1d
.4byte 0x498
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0x1e
.4byte .LASF149
.2byte 0x1ad
.byte 0xe
.4byte 0x498
.8byte .LFB19
.8byte .LFE19-.LFB19
.uleb128 0x1
.byte 0x9c
.4byte 0x115f
.uleb128 0x1
.string "p"
.2byte 0x1af
.byte 0x12
.4byte 0x498
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0x1c
.4byte .LASF150
.2byte 0x1a7
.8byte .LFB18
.8byte .LFE18-.LFB18
.uleb128 0x1
.byte 0x9c
.4byte 0x118a
.uleb128 0x8
.string "p"
.2byte 0x1a7
.byte 0x1e
.4byte 0x611
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0x1e
.4byte .LASF151
.2byte 0x19c
.byte 0xf
.4byte 0x611
.8byte .LFB17
.8byte .LFE17-.LFB17
.uleb128 0x1
.byte 0x9c
.4byte 0x11ba
.uleb128 0x1
.string "p"
.2byte 0x19e
.byte 0x13
.4byte 0x611
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0x33
.4byte .LASF199
.byte 0x1
.2byte 0x188
.byte 0x6
.8byte .LFB16
.8byte .LFE16-.LFB16
.uleb128 0x1
.byte 0x9c
.4byte 0x125a
.uleb128 0x1d
.8byte .LBB16
.8byte .LBE16-.LBB16
.4byte 0x121b
.uleb128 0x1
.string "i"
.2byte 0x18d
.byte 0xd
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0xa
.8byte .LBB17
.8byte .LBE17-.LBB17
.uleb128 0x1
.string "p"
.2byte 0x18e
.byte 0x17
.4byte 0x611
.uleb128 0x2
.byte 0x91
.sleb128 -40
.byte 0
.byte 0
.uleb128 0xa
.8byte .LBB18
.8byte .LBE18-.LBB18
.uleb128 0x1
.string "i"
.2byte 0x194
.byte 0xd
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0xa
.8byte .LBB19
.8byte .LBE19-.LBB19
.uleb128 0x1
.string "p"
.2byte 0x195
.byte 0x16
.4byte 0x498
.uleb128 0x2
.byte 0x91
.sleb128 -32
.byte 0
.byte 0
.byte 0
.uleb128 0x25
.4byte .LASF152
.2byte 0x174
.8byte .LFB15
.8byte .LFE15-.LFB15
.uleb128 0x1
.byte 0x9c
.4byte 0x12cd
.uleb128 0x8
.string "f"
.2byte 0x174
.byte 0x27
.4byte 0x498
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0x8
.string "o"
.2byte 0x174
.byte 0x37
.4byte 0x47e
.uleb128 0x2
.byte 0x91
.sleb128 -48
.uleb128 0x1
.string "cap"
.2byte 0x175
.byte 0x9
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -28
.uleb128 0x1
.string "c"
.2byte 0x175
.byte 0x1d
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0xa
.8byte .LBB15
.8byte .LBE15-.LBB15
.uleb128 0x1
.string "i"
.2byte 0x176
.byte 0xe
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.byte 0
.uleb128 0x25
.4byte .LASF153
.2byte 0x166
.8byte .LFB14
.8byte .LFE14-.LFB14
.uleb128 0x1
.byte 0x9c
.4byte 0x133e
.uleb128 0x8
.string "f"
.2byte 0x166
.byte 0x24
.4byte 0x498
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0x8
.string "o"
.2byte 0x166
.byte 0x34
.4byte 0x47e
.uleb128 0x2
.byte 0x91
.sleb128 -48
.uleb128 0x1
.string "cap"
.2byte 0x167
.byte 0x9
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -28
.uleb128 0x34
.4byte .LLRL0
.4byte 0x132a
.uleb128 0x1
.string "i"
.2byte 0x169
.byte 0xe
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -20
.byte 0
.uleb128 0x24
.4byte .LLRL1
.uleb128 0x1
.string "i"
.2byte 0x16b
.byte 0xe
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.byte 0
.uleb128 0x35
.4byte .LASF169
.byte 0x1
.2byte 0x163
.byte 0xc
.4byte 0x48
.8byte .LFB13
.8byte .LFE13-.LFB13
.uleb128 0x1
.byte 0x9c
.4byte 0x136f
.uleb128 0x8
.string "f"
.2byte 0x163
.byte 0x23
.4byte 0x498
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0x13
.4byte .LASF154
.2byte 0x14b
.8byte .LFB12
.8byte .LFE12-.LFB12
.uleb128 0x1
.byte 0x9c
.4byte 0x13cb
.uleb128 0x9
.4byte .LASF33
.2byte 0x14b
.byte 0x1a
.4byte 0x86
.uleb128 0x3
.byte 0x91
.sleb128 -100
.uleb128 0x1
.string "ip"
.2byte 0x14c
.byte 0x13
.4byte 0x13a
.uleb128 0x3
.byte 0x91
.sleb128 -88
.uleb128 0xa
.8byte .LBB10
.8byte .LBE10-.LBB10
.uleb128 0x1
.string "i"
.2byte 0x152
.byte 0xe
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -20
.byte 0
.byte 0
.uleb128 0xe
.4byte .LASF155
.2byte 0x131
.byte 0xa
.4byte 0x86
.8byte .LFB11
.8byte .LFE11-.LFB11
.uleb128 0x1
.byte 0x9c
.4byte 0x1461
.uleb128 0x9
.4byte .LASF136
.2byte 0x131
.byte 0x22
.4byte 0x954
.uleb128 0x3
.byte 0x91
.sleb128 -216
.uleb128 0x1
.string "ipr"
.2byte 0x137
.byte 0x13
.4byte 0x13a
.uleb128 0x3
.byte 0x91
.sleb128 -200
.uleb128 0x5
.4byte .LASF156
.2byte 0x13b
.byte 0xa
.4byte 0x1461
.uleb128 0x4
.byte 0x91
.sleb128 -128
.byte 0x6
.uleb128 0x5
.4byte .LASF157
.2byte 0x13d
.byte 0xb
.4byte 0x483
.uleb128 0x3
.byte 0x91
.sleb128 -104
.uleb128 0x5
.4byte .LASF158
.2byte 0x13e
.byte 0xe
.4byte 0x86
.uleb128 0x3
.byte 0x91
.sleb128 -108
.uleb128 0xa
.8byte .LBB9
.8byte .LBE9-.LBB9
.uleb128 0x5
.4byte .LASF159
.2byte 0x142
.byte 0x12
.4byte 0x86
.uleb128 0x3
.byte 0x91
.sleb128 -132
.byte 0
.byte 0
.uleb128 0xb
.4byte 0x1dc
.4byte 0x1475
.uleb128 0x36
.4byte 0xa5
.uleb128 0x4
.byte 0x91
.sleb128 -120
.byte 0x6
.byte 0
.uleb128 0xe
.4byte .LASF160
.2byte 0x11c
.byte 0xa
.4byte 0x86
.8byte .LFB10
.8byte .LFE10-.LFB10
.uleb128 0x1
.byte 0x9c
.4byte 0x1524
.uleb128 0x9
.4byte .LASF44
.2byte 0x11c
.byte 0x24
.4byte 0x1524
.uleb128 0x3
.byte 0x91
.sleb128 -72
.uleb128 0x9
.4byte .LASF34
.2byte 0x11c
.byte 0x38
.4byte 0x954
.uleb128 0x3
.byte 0x91
.sleb128 -80
.uleb128 0x1
.string "off"
.2byte 0x11d
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0x5
.4byte .LASF161
.2byte 0x11e
.byte 0xa
.4byte 0x1529
.uleb128 0x2
.byte 0x91
.sleb128 -56
.uleb128 0xa
.8byte .LBB7
.8byte .LBE7-.LBB7
.uleb128 0x1
.string "de"
.2byte 0x122
.byte 0x18
.4byte 0x1539
.uleb128 0x2
.byte 0x91
.sleb128 -32
.uleb128 0x5
.4byte .LASF162
.2byte 0x123
.byte 0xd
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -36
.uleb128 0xa
.8byte .LBB8
.8byte .LBE8-.LBB8
.uleb128 0x1
.string "i"
.2byte 0x124
.byte 0x12
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.byte 0
.byte 0
.uleb128 0xc
.4byte 0x13a
.uleb128 0xb
.4byte 0x1dc
.4byte 0x1539
.uleb128 0x11
.4byte 0xa5
.byte 0xe
.byte 0
.uleb128 0xc
.4byte 0x1a5
.uleb128 0x13
.4byte .LASF163
.2byte 0x104
.8byte .LFB9
.8byte .LFE9-.LFB9
.uleb128 0x1
.byte 0x9c
.4byte 0x1613
.uleb128 0x8
.string "ip"
.2byte 0x104
.byte 0x1f
.4byte 0x1524
.uleb128 0x2
.byte 0x91
.sleb128 -56
.uleb128 0x9
.4byte .LASF164
.2byte 0x104
.byte 0x2c
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -60
.uleb128 0x8
.string "buf"
.2byte 0x104
.byte 0x3d
.4byte 0x813
.uleb128 0x3
.byte 0x91
.sleb128 -72
.uleb128 0x9
.4byte .LASF18
.2byte 0x104
.byte 0x4b
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -64
.uleb128 0x5
.4byte .LASF165
.2byte 0x105
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0x1
.string "off"
.2byte 0x106
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x1
.string "dst"
.2byte 0x107
.byte 0xe
.4byte 0x813
.uleb128 0x2
.byte 0x91
.sleb128 -32
.uleb128 0xa
.8byte .LBB6
.8byte .LBE6-.LBB6
.uleb128 0x5
.4byte .LASF166
.2byte 0x10a
.byte 0x12
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0x5
.4byte .LASF167
.2byte 0x10b
.byte 0x12
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -44
.uleb128 0x5
.4byte .LASF134
.2byte 0x10c
.byte 0x12
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -36
.uleb128 0x5
.4byte .LASF168
.2byte 0x10f
.byte 0x12
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -48
.byte 0
.byte 0
.uleb128 0x37
.4byte .LASF170
.byte 0x1
.byte 0xe4
.byte 0x11
.4byte 0x86
.8byte .LFB8
.8byte .LFE8-.LFB8
.uleb128 0x1
.byte 0x9c
.4byte 0x16ec
.uleb128 0xd
.string "ip"
.byte 0xe4
.byte 0x2e
.4byte 0x1524
.uleb128 0x3
.byte 0x91
.sleb128 -88
.uleb128 0xd
.string "idx"
.byte 0xe4
.byte 0x3b
.4byte 0x86
.uleb128 0x3
.byte 0x91
.sleb128 -92
.uleb128 0x1d
.8byte .LBB4
.8byte .LBE4-.LBB4
.4byte 0x1685
.uleb128 0x4
.4byte .LASF171
.byte 0xea
.byte 0x12
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -60
.uleb128 0x4
.4byte .LASF162
.byte 0xed
.byte 0x13
.4byte 0x16ec
.uleb128 0x3
.byte 0x91
.sleb128 -72
.byte 0
.uleb128 0xa
.8byte .LBB5
.8byte .LBE5-.LBB5
.uleb128 0x4
.4byte .LASF172
.byte 0xf2
.byte 0x12
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0x4
.4byte .LASF173
.byte 0xf3
.byte 0x12
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x4
.4byte .LASF174
.byte 0xf5
.byte 0x12
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -28
.uleb128 0x4
.4byte .LASF175
.byte 0xf9
.byte 0x13
.4byte 0x16ec
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0x4
.4byte .LASF176
.byte 0xfa
.byte 0x12
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -44
.uleb128 0x4
.4byte .LASF177
.byte 0xff
.byte 0x13
.4byte 0x16ec
.uleb128 0x2
.byte 0x91
.sleb128 -56
.byte 0
.byte 0
.uleb128 0xc
.4byte 0x86
.uleb128 0x1f
.4byte .LASF178
.byte 0xd2
.8byte .LFB7
.8byte .LFE7-.LFB7
.uleb128 0x1
.byte 0x9c
.4byte 0x1762
.uleb128 0x20
.4byte .LASF33
.byte 0xd2
.byte 0x1a
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -52
.uleb128 0x20
.4byte .LASF179
.byte 0xd2
.byte 0x2f
.4byte 0x1524
.uleb128 0x2
.byte 0x91
.sleb128 -64
.uleb128 0x4
.4byte .LASF180
.byte 0xd9
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0x4
.4byte .LASF181
.byte 0xdb
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x4
.4byte .LASF164
.byte 0xdd
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -28
.uleb128 0x15
.string "dip"
.byte 0xe0
.byte 0x14
.4byte 0x1524
.uleb128 0x2
.byte 0x91
.sleb128 -40
.byte 0
.uleb128 0x38
.4byte .LASF200
.byte 0x1
.byte 0xcd
.byte 0x6
.8byte .LFB6
.8byte .LFE6-.LFB6
.uleb128 0x1
.byte 0x9c
.uleb128 0x1f
.4byte .LASF182
.byte 0xc1
.8byte .LFB5
.8byte .LFE5-.LFB5
.uleb128 0x1
.byte 0x9c
.4byte 0x17b5
.uleb128 0xd
.string "blk"
.byte 0xc1
.byte 0x1a
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0xd
.string "dst"
.byte 0xc1
.byte 0x25
.4byte 0xa4e
.uleb128 0x2
.byte 0x91
.sleb128 -32
.byte 0
.uleb128 0x39
.4byte .LASF183
.byte 0x1
.byte 0xa8
.byte 0xd
.8byte .LFB4
.8byte .LFE4-.LFB4
.uleb128 0x1
.byte 0x9c
.4byte 0x17f0
.uleb128 0x20
.4byte .LASF93
.byte 0xa8
.byte 0x26
.4byte 0x99
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0xd
.string "dst"
.byte 0xa8
.byte 0x34
.4byte 0xa4e
.uleb128 0x2
.byte 0x91
.sleb128 -32
.byte 0
.uleb128 0x3a
.4byte .LASF184
.byte 0x1
.byte 0xa0
.byte 0x14
.8byte .LFB3
.8byte .LFE3-.LFB3
.uleb128 0x1
.byte 0x9c
.4byte 0x1836
.uleb128 0xd
.string "f0"
.byte 0xa0
.byte 0x27
.4byte 0x73
.uleb128 0x2
.byte 0x91
.sleb128 -18
.uleb128 0xd
.string "f1"
.byte 0xa0
.byte 0x33
.4byte 0x73
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0xd
.string "f2"
.byte 0xa0
.byte 0x3f
.4byte 0x73
.uleb128 0x2
.byte 0x91
.sleb128 -22
.byte 0
.uleb128 0x1f
.4byte .LASF185
.byte 0x6c
.8byte .LFB2
.8byte .LFE2-.LFB2
.uleb128 0x1
.byte 0x9c
.4byte 0x1920
.uleb128 0x4
.4byte .LASF186
.byte 0x79
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -36
.uleb128 0x15
.string "st"
.byte 0x7d
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0x4
.4byte .LASF187
.byte 0x85
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -44
.uleb128 0x4
.4byte .LASF188
.byte 0x86
.byte 0xe
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -48
.uleb128 0x4
.4byte .LASF189
.byte 0x8b
.byte 0xc
.4byte 0xb8
.uleb128 0x2
.byte 0x91
.sleb128 -56
.uleb128 0x4
.4byte .LASF190
.byte 0x8c
.byte 0xc
.4byte 0xb8
.uleb128 0x2
.byte 0x91
.sleb128 -64
.uleb128 0x4
.4byte .LASF191
.byte 0x8d
.byte 0xc
.4byte 0xb8
.uleb128 0x3
.byte 0x91
.sleb128 -72
.uleb128 0x4
.4byte .LASF192
.byte 0x8e
.byte 0xc
.4byte 0xb8
.uleb128 0x3
.byte 0x91
.sleb128 -80
.uleb128 0x4
.4byte .LASF193
.byte 0x8f
.byte 0xc
.4byte 0xb8
.uleb128 0x3
.byte 0x91
.sleb128 -88
.uleb128 0x4
.4byte .LASF194
.byte 0x99
.byte 0xe
.4byte 0x99
.uleb128 0x3
.byte 0x91
.sleb128 -96
.uleb128 0xa
.8byte .LBB2
.8byte .LBE2-.LBB2
.uleb128 0x15
.string "s"
.byte 0x6e
.byte 0xd
.4byte 0x48
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0xa
.8byte .LBB3
.8byte .LBE3-.LBB3
.uleb128 0x4
.4byte .LASF195
.byte 0x6f
.byte 0x12
.4byte 0x99
.uleb128 0x2
.byte 0x91
.sleb128 -32
.byte 0
.byte 0
.byte 0
.uleb128 0x3b
.4byte .LASF196
.byte 0x1
.byte 0x6a
.byte 0x18
.4byte 0x86
.8byte .LFB1
.8byte .LFE1-.LFB1
.uleb128 0x1
.byte 0x9c
.4byte 0x195b
.uleb128 0xd
.string "b"
.byte 0x6a
.byte 0x26
.4byte 0x99
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0xd
.string "o"
.byte 0x6a
.byte 0x31
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -28
.byte 0
.uleb128 0x3c
.4byte .LASF201
.byte 0x1
.byte 0x69
.byte 0x14
.8byte .LFB0
.8byte .LFE0-.LFB0
.uleb128 0x1
.byte 0x9c
.uleb128 0xd
.string "b"
.byte 0x69
.byte 0x22
.4byte 0x99
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0xd
.string "o"
.byte 0x69
.byte 0x2d
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -28
.uleb128 0xd
.string "v"
.byte 0x69
.byte 0x38
.4byte 0x86
.uleb128 0x2
.byte 0x91
.sleb128 -32
.byte 0
.byte 0
.section .debug_abbrev,"",@progbits
.Ldebug_abbrev0:
.uleb128 0x1
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x2
.uleb128 0xd
.byte 0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x38
.uleb128 0xb
.byte 0
.byte 0
.uleb128 0x3
.uleb128 0xd
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x38
.uleb128 0xb
.byte 0
.byte 0
.uleb128 0x4
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x5
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x6
.uleb128 0xd
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 5
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x38
.uleb128 0x5
.byte 0
.byte 0
.uleb128 0x7
.uleb128 0x5
.byte 0
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x8
.uleb128 0x5
.byte 0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x9
.uleb128 0x5
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0xa
.uleb128 0xb
.byte 0x1
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.byte 0
.byte 0
.uleb128 0xb
.uleb128 0x1
.byte 0x1
.uleb128 0x49
.uleb128 0x13
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0xc
.uleb128 0xf
.byte 0
.uleb128 0xb
.uleb128 0x21
.sleb128 8
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0xd
.uleb128 0x5
.byte 0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0xe
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x49
.uleb128 0x13
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0xf
.uleb128 0x24
.byte 0
.uleb128 0xb
.uleb128 0xb
.uleb128 0x3e
.uleb128 0xb
.uleb128 0x3
.uleb128 0xe
.byte 0
.byte 0
.uleb128 0x10
.uleb128 0x16
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x11
.uleb128 0x21
.byte 0
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2f
.uleb128 0xb
.byte 0
.byte 0
.uleb128 0x12
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x49
.uleb128 0x13
.uleb128 0x3c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x13
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0x21
.sleb128 6
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x14
.uleb128 0x13
.byte 0x1
.uleb128 0x3
.uleb128 0xe
.uleb128 0xb
.uleb128 0xb
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0x21
.sleb128 8
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x15
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x16
.uleb128 0x28
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x1c
.uleb128 0xb
.byte 0
.byte 0
.uleb128 0x17
.uleb128 0x13
.byte 0x1
.uleb128 0x3
.uleb128 0xe
.uleb128 0xb
.uleb128 0x5
.uleb128 0x3a
.uleb128 0x21
.sleb128 5
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0x21
.sleb128 8
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x18
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x19
.uleb128 0x35
.byte 0
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x1a
.uleb128 0x21
.byte 0
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2f
.uleb128 0x21
.sleb128 511
.byte 0
.byte 0
.uleb128 0x1b
.uleb128 0x18
.byte 0
.byte 0
.byte 0
.uleb128 0x1c
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0x21
.sleb128 6
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7a
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x1d
.uleb128 0xb
.byte 0x1
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x1e
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x1f
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0x21
.sleb128 6
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x20
.uleb128 0x5
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x21
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 5
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3c
.uleb128 0x19
.byte 0
.byte 0
.uleb128 0x22
.uleb128 0x13
.byte 0x1
.uleb128 0x3
.uleb128 0xe
.uleb128 0xb
.uleb128 0x21
.sleb128 4
.uleb128 0x88
.uleb128 0xb
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0x21
.sleb128 8
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x23
.uleb128 0x21
.byte 0
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x24
.uleb128 0xb
.byte 0x1
.uleb128 0x55
.uleb128 0x17
.byte 0
.byte 0
.uleb128 0x25
.uleb128 0x2e
.byte 0x1
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 1
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0x21
.sleb128 13
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x26
.uleb128 0x11
.byte 0x1
.uleb128 0x25
.uleb128 0xe
.uleb128 0x13
.uleb128 0xb
.uleb128 0x3
.uleb128 0x1f
.uleb128 0x1b
.uleb128 0x1f
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x10
.uleb128 0x17
.byte 0
.byte 0
.uleb128 0x27
.uleb128 0x24
.byte 0
.uleb128 0xb
.uleb128 0xb
.uleb128 0x3e
.uleb128 0xb
.uleb128 0x3
.uleb128 0x8
.byte 0
.byte 0
.uleb128 0x28
.uleb128 0x26
.byte 0
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x29
.uleb128 0xd
.byte 0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x38
.uleb128 0x5
.byte 0
.byte 0
.uleb128 0x2a
.uleb128 0x4
.byte 0x1
.uleb128 0x3e
.uleb128 0xb
.uleb128 0xb
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x2b
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x88
.uleb128 0xb
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x2c
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x88
.uleb128 0x5
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x2d
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x3c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x2e
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x3c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x2f
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x3c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x30
.uleb128 0xf
.byte 0
.uleb128 0xb
.uleb128 0xb
.byte 0
.byte 0
.uleb128 0x31
.uleb128 0x26
.byte 0
.byte 0
.byte 0
.uleb128 0x32
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x3c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x33
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x34
.uleb128 0xb
.byte 0x1
.uleb128 0x55
.uleb128 0x17
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x35
.uleb128 0x2e
.byte 0x1
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0x5
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x49
.uleb128 0x13
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7a
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x36
.uleb128 0x21
.byte 0
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2f
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x37
.uleb128 0x2e
.byte 0x1
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x49
.uleb128 0x13
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x38
.uleb128 0x2e
.byte 0
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.byte 0
.byte 0
.uleb128 0x39
.uleb128 0x2e
.byte 0x1
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x3a
.uleb128 0x2e
.byte 0x1
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7a
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x3b
.uleb128 0x2e
.byte 0x1
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x49
.uleb128 0x13
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7a
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x3c
.uleb128 0x2e
.byte 0x1
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7a
.uleb128 0x19
.byte 0
.byte 0
.byte 0
.section .debug_aranges,"",@progbits
.4byte 0x2c
.2byte 0x2
.4byte .Ldebug_info0
.byte 0x8
.byte 0
.2byte 0
.2byte 0
.8byte .Ltext0
.8byte .Letext0-.Ltext0
.8byte 0
.8byte 0
.section .debug_rnglists,"",@progbits
.Ldebug_ranges0:
.4byte .Ldebug_ranges3-.Ldebug_ranges2
.Ldebug_ranges2:
.2byte 0x5
.byte 0x8
.byte 0
.4byte 0
.LLRL0:
.byte 0x4
.uleb128 .LBB11-.Ltext0
.uleb128 .LBE11-.Ltext0
.byte 0x4
.uleb128 .LBB13-.Ltext0
.uleb128 .LBE13-.Ltext0
.byte 0
.LLRL1:
.byte 0x4
.uleb128 .LBB12-.Ltext0
.uleb128 .LBE12-.Ltext0
.byte 0x4
.uleb128 .LBB14-.Ltext0
.uleb128 .LBE14-.Ltext0
.byte 0
.LLRL2:
.byte 0x4
.uleb128 .LBB21-.Ltext0
.uleb128 .LBE21-.Ltext0
.byte 0x4
.uleb128 .LBB22-.Ltext0
.uleb128 .LBE22-.Ltext0
.byte 0
.Ldebug_ranges3:
.section .debug_line,"",@progbits
.Ldebug_line0:
.section .debug_str,"MS",@progbits,1
.LASF109:
.string "printf"
.LASF132:
.string "count"
.LASF170:
.string "inode_blockno"
.LASF191:
.string "sz_used"
.LASF22:
.string "logstart"
.LASF64:
.string "free_next"
.LASF113:
.string "strcmp"
.LASF15:
.string "size_t"
.LASF182:
.string "read_block"
.LASF14:
.string "uintptr_t"
.LASF45:
.string "stack_top"
.LASF90:
.string "virtq_used"
.LASF12:
.string "uint64_t"
.LASF83:
.string "gActiveProc"
.LASF173:
.string "dbl_index1"
.LASF100:
.string "dma_buf"
.LASF145:
.string "new_file_table"
.LASF36:
.string "ssize_t"
.LASF200:
.string "read_superblock"
.LASF144:
.string "fs_init"
.LASF27:
.string "type"
.LASF111:
.string "strncpy"
.LASF117:
.string "kalloc_pages"
.LASF136:
.string "path"
.LASF53:
.string "process_kalloc_address"
.LASF171:
.string "indirect_blk"
.LASF52:
.string "file_table"
.LASF162:
.string "entries"
.LASF60:
.string "pipe"
.LASF138:
.string "pipewrite"
.LASF39:
.string "context_t"
.LASF56:
.string "next"
.LASF104:
.string "gFreePipes"
.LASF16:
.string "long long int"
.LASF2:
.string "signed char"
.LASF50:
.string "program"
.LASF77:
.string "FK_STDIN"
.LASF193:
.string "npages"
.LASF25:
.string "superblock"
.LASF159:
.string "next_inum"
.LASF44:
.string "parent"
.LASF194:
.string "vq_phys"
.LASF4:
.string "long int"
.LASF110:
.string "strtok"
.LASF76:
.string "num_linked_file"
.LASF98:
.string "aidx"
.LASF143:
.string "is_pipe"
.LASF124:
.string "owner"
.LASF196:
.string "mr32"
.LASF154:
.string "dump_inode"
.LASF115:
.string "memcpy"
.LASF97:
.string "vq_area"
.LASF8:
.string "uint16_t"
.LASF65:
.string "owner_processes"
.LASF80:
.string "FK_FILE"
.LASF140:
.string "pipe_open"
.LASF189:
.string "sz_desc"
.LASF84:
.string "virtq_desc"
.LASF164:
.string "offset"
.LASF106:
.string "panic"
.LASF128:
.string "massive"
.LASF92:
.string "reserved"
.LASF102:
.string "gPipes"
.LASF123:
.string "fs_dup_table"
.LASF11:
.string "unsigned int"
.LASF38:
.string "pagetable_t"
.LASF41:
.string "proc"
.LASF112:
.string "strlen"
.LASF114:
.string "strncmp"
.LASF71:
.string "read_open"
.LASF29:
.string "minor"
.LASF130:
.string "fs_size"
.LASF13:
.string "long unsigned int"
.LASF142:
.string "is_stdin"
.LASF72:
.string "write_open"
.LASF55:
.string "deleted"
.LASF34:
.string "name"
.LASF178:
.string "read_inode"
.LASF184:
.string "set_flags"
.LASF58:
.string "kind"
.LASF18:
.string "size"
.LASF9:
.string "short unsigned int"
.LASF198:
.string "yield"
.LASF129:
.string "fs_close"
.LASF30:
.string "nlink"
.LASF54:
.string "num_process_kalloc_address"
.LASF121:
.string "fs_dup2"
.LASF23:
.string "inodestart"
.LASF190:
.string "sz_avail"
.LASF197:
.string "GNU C17 14.2.0 -mabi=lp64 -mcmodel=medany -mtune=rocket -misa-spec=20191213 -march=rv64imafdc_zicsr_zifencei -g -ffreestanding"
.LASF153:
.string "owner_add"
.LASF93:
.string "sector"
.LASF78:
.string "FK_STDOUT"
.LASF99:
.string "status_byte"
.LASF150:
.string "free_pipe"
.LASF155:
.string "path_lookup"
.LASF107:
.string "get_current_file_table"
.LASF156:
.string "copy"
.LASF195:
.string "base"
.LASF160:
.string "dir_lookup"
.LASF118:
.string "puts"
.LASF180:
.string "inodes_per_blk"
.LASF177:
.string "second_entries"
.LASF122:
.string "free_fs_table"
.LASF42:
.string "trapframe"
.LASF181:
.string "blkno"
.LASF47:
.string "memsz"
.LASF17:
.string "long double"
.LASF48:
.string "zombie"
.LASF147:
.string "user_va"
.LASF95:
.string "desc"
.LASF88:
.string "virtq_used_elem"
.LASF32:
.string "dirent"
.LASF31:
.string "addrs"
.LASF163:
.string "read_data"
.LASF148:
.string "free_file"
.LASF105:
.string "gFreeFiles"
.LASF73:
.string "nreader"
.LASF82:
.string "gProc"
.LASF174:
.string "dindirect_blk"
.LASF120:
.string "newfd"
.LASF139:
.string "piperead"
.LASF172:
.string "dbl_index0"
.LASF158:
.string "current_inum"
.LASF59:
.string "used"
.LASF157:
.string "token"
.LASF179:
.string "dest"
.LASF134:
.string "to_read"
.LASF108:
.string "strncat"
.LASF85:
.string "addr"
.LASF89:
.string "virtq_avail"
.LASF33:
.string "inum"
.LASF49:
.string "pagetable"
.LASF5:
.string "int32_t"
.LASF7:
.string "unsigned char"
.LASF21:
.string "nlog"
.LASF67:
.string "spipe"
.LASF126:
.string "orig"
.LASF3:
.string "short int"
.LASF57:
.string "file"
.LASF37:
.string "pte_t"
.LASF20:
.string "ninodes"
.LASF169:
.string "owner_cap"
.LASF168:
.string "disk_blk"
.LASF161:
.string "namebuf"
.LASF166:
.string "blk_idx"
.LASF10:
.string "uint32_t"
.LASF69:
.string "nread"
.LASF199:
.string "file_system_init"
.LASF40:
.string "mepc"
.LASF103:
.string "gFiles"
.LASF186:
.string "devfeat"
.LASF35:
.string "char"
.LASF201:
.string "mw32"
.LASF192:
.string "sz_total"
.LASF91:
.string "virtio_blk_req"
.LASF135:
.string "fs_open"
.LASF51:
.string "xstatus"
.LASF176:
.string "first_blk"
.LASF66:
.string "num_owner_process"
.LASF183:
.string "virtio_blk_read"
.LASF70:
.string "nwrite"
.LASF81:
.string "FK_PIPE"
.LASF68:
.string "data"
.LASF43:
.string "context"
.LASF26:
.string "dinode"
.LASF151:
.string "alloc_pipe"
.LASF46:
.string "vaddr"
.LASF79:
.string "FK_STDERR"
.LASF167:
.string "blk_off"
.LASF119:
.string "oldfd"
.LASF116:
.string "memset"
.LASF149:
.string "alloc_file"
.LASF75:
.string "linked_file"
.LASF74:
.string "nwriter"
.LASF146:
.string "pipealloc"
.LASF131:
.string "fs_read"
.LASF187:
.string "devQ"
.LASF185:
.string "virtio_blk_init"
.LASF6:
.string "uint8_t"
.LASF152:
.string "owner_remove"
.LASF61:
.string "pipe_used"
.LASF86:
.string "flags"
.LASF96:
.string "avail"
.LASF87:
.string "ring"
.LASF101:
.string "block_buf"
.LASF94:
.string "vbase"
.LASF62:
.string "read_pipe"
.LASF141:
.string "is_stdout"
.LASF19:
.string "nblocks"
.LASF63:
.string "write_pipe"
.LASF28:
.string "major"
.LASF188:
.string "drvQ"
.LASF24:
.string "bmapstart"
.LASF127:
.string "fs_exit"
.LASF165:
.string "left"
.LASF137:
.string "path2"
.LASF125:
.string "result"
.LASF133:
.string "remaining"
.LASF175:
.string "first_entries"
.section .debug_line_str,"MS",@progbits,1
.LASF0:
.string "fs.c"
.LASF1:
.string "/Users/ab25cq/comelang/minux9"
.ident "GCC: (g04696df09) 14.2.0"
.section .note.GNU-stack,"",@progbits
|
ab25cq/comelang
| 11,405
|
minux9/trap.S
|
.extern kernel_satp # C のグローバルを取り込む
.extern user_satp # C のグローバルを取り込む
.extern kernel_sp # C のゴローバル変数を取り込む
.extern user_sp # C のゴローバル変数を取り込む
.extern gCPU # C のゴローバル変数を取り込む
.section ".trampoline", "ax"
.globl trapvec
#.section .text
.align 2
trapvec:
/*
la t0, gCPU
ld t0, 0(t0) # t0 = &gCPU
ld t0, 0(t0) # t0 = gCPU->proc (現在のプロセス)
*/
la t0, TRAPFRAME;
sd ra, 0(t0)
sd sp, 8(t0)
sd gp, 16(t0)
sd tp, 24(t0)
/* sd t0, 32(t0) */
sd t1, 40(t0)
sd t2, 48(t0)
sd t3, 56(t0)
sd t4, 64(t0)
sd t5, 72(t0)
sd t6, 80(t0)
sd a0, 88(t0)
sd a1, 96(t0)
sd a2, 104(t0)
sd a3, 112(t0)
sd a4, 120(t0)
sd a5, 128(t0)
sd a6, 136(t0)
sd a7, 144(t0)
sd s0, 152(t0)
sd s1, 160(t0)
sd s2, 168(t0)
sd s3, 176(t0)
sd s4, 184(t0)
sd s5, 192(t0)
sd s6, 200(t0)
sd s7, 208(t0)
sd s8, 216(t0)
sd s9, 224(t0)
sd s10, 232(t0)
sd s11, 240(t0)
csrr t1, sepc
sd t1, 248(t0)
csrr t1, satp
la t0, user_satp
sd t1, 0(t0)
la t0, user_sp
mv t1, sp
sd t1, 0(t0)
# --- まずカーネルページテーブルを SATP にセット ---
la t0, kernel_satp # t0 = &kernel_satp
ld t0, 0(t0) # t0 = kernel_satp
csrw satp, t0
sfence.vma zero, zero
la t0, kernel_sp
ld sp, 0(t0)
# ここからはカーネル用ページテーブルで動くので、
# syscall_handler 内の puts() がユーザー空間ポインタを
# 正しく "カーネル仮想→物理" でデリファレンスできる
# trapvec_S entrypoint
csrr t0, scause # S-mode の原因レジスタを読む
srli t1, t0, 63 # t1 = 割り込みフラグ (scause[63])
beqz t1, not_interrupt # フラグが 0 なら同期例外へ
li t2, 0xfff
and t0, t0, t2 # t0 = scause[11:0](割り込み番号)
li t2, 5 # Supervisor タイマー割り込み = 5
beq t0, t2, handle_timer
li t2, 9 # Supervisor 外部割り込み = 9
beq t0, t2, handle_external
# その他の割り込み
j trap_return
.section ".trampoline", "ax"
not_interrupt:
# ecall from U-mode?
li t2, 8 # ecall from U = 8
and t0, t0, t2 # (scause & 0xfff) == 8 ?
beq t0, t2, handle_ecall
# それ以外の同期例外
j trap_return
.section ".trampoline", "ax"
handle_timer:
call timer_handler
# csrr t0, time
# addi t0, t0, 1000
# csrw 0x14d, t0 # stimecmp CSR
j trap_return
.section ".trampoline", "ax"
handle_external:
call external_handler
j trap_return
.equ PLIC_BASE, 0x0C000000
.equ PLIC_CLAIM, PLIC_BASE + 0x201004 # hart0, claim/complete register
.equ PLIC_COMPLETE, PLIC_BASE + 0x201004
.equ UART_PLIC_IRQ, 10 # あなたの環境の UART IRQ 番号
.section ".trampoline", "ax"
external_handler:
# 1) PLIC から current IRQ を取り出す
la t0, PLIC_CLAIM
ld t1, 0(t0) # t1 = IRQ number
# 2) IRQ に応じたハンドラ
li t2, UART_PLIC_IRQ
beq t1, t2, .Lhandle_uart
# その他の IRQ は未対応なら無視
j .Lcomplete
.section ".trampoline", "ax"
.Lhandle_uart:
call uart_rx_handler
j .Lcomplete
.section ".trampoline", "ax"
.Lcomplete:
# 3) PLIC に complete を書き戻し
la t0, PLIC_COMPLETE
sd t1, 0(t0)
ret
.section ".trampoline", "ax"
handle_ecall:
call syscall_handler
# save the just-restored sp into user_sp for trap_return
# la t1, TRAPFRAME
# ld t2, 8(t1) # t2 = trapframe.sp
# la t3, user_sp
# sd t2, 0(t3)
la t0, TRAPFRAME
/*
la t0, gCPU
ld t0, 0(t0) # t0 = &gCPU
*/
ld ra, 0(t0)
ld sp, 8(t0)
ld gp, 16(t0)
ld tp, 24(t0)
/* sd t0, 32(t0) */
ld t1, 40(t0)
ld t2, 48(t0)
ld t3, 56(t0)
ld t4, 64(t0)
ld t5, 72(t0)
ld t6, 80(t0)
ld a0, 88(t0)
ld a1, 96(t0)
ld a2, 104(t0)
ld a3, 112(t0)
ld a4, 120(t0)
ld a5, 128(t0)
ld a6, 136(t0)
ld a7, 144(t0)
ld s0, 152(t0)
ld s1, 160(t0)
ld s2, 168(t0)
ld s3, 176(t0)
ld s4, 184(t0)
ld s5, 192(t0)
ld s6, 200(t0)
ld s7, 208(t0)
ld s8, 216(t0)
ld s9, 224(t0)
ld s10, 232(t0)
ld s11, 240(t0)
ld t0, 248(t0)
csrw sepc, t0
csrr t0, sepc
addi t0, t0, 4
csrw sepc, t0
j trap_return
.equ STIE_BIT, (1<<5)
.equ SPIE_UPIE, ((1<<4)|(1<<5))
.equ SPP_BIT, (1<<8)
.section ".trampoline", "ax"
trap_return:
# --- (もし必要なら)ユーザー SATP をリロード ---
la t0, user_satp
ld t0, 0(t0)
csrw satp, t0
sfence.vma zero, zero
la t0, user_sp
ld sp, 0(t0)
# 1) Supervisor タイマー割り込み許可 (STIE=bit5)
li t0, STIE_BIT
csrs sie, t0
# 2) sret 復帰時の sstatus 設定:
# - UPIE=1,SPIE=1 をセット
# - SPP=0 (U-modeに戻る) をクリア
li t0, SPIE_UPIE
csrs sstatus, t0
li t0, SPP_BIT
csrc sstatus, t0
/*
# 1) Supervisor タイマー割り込みを有効化 (Sie の STIE=bit5)
li t0, (1<<5)
csrs sie, t0
# 2) sret 復帰時に割り込み許可状態を保持 (UPIE=1, SPIE=1)
li t0, ((1<<4)|(1<<5))
csrs sstatus, t0
# 3) SPP=0 にしてユーザーモードへ
li t0, (1<<8)
csrc sstatus, t0
*/
# 4) ユーザーモード復帰
sret
# 3) 戻る
sret
.extern user_satp # C のグローバルを取り込む
.global swtch
swtch:
ld ra, 0(a0)
ld sp, 8(a0)
ld gp, 16(a0)
ld tp, 24(a0)
sd t0, 32(a0)
ld t1, 40(a0)
ld t2, 48(a0)
ld t3, 56(a0)
ld t4, 64(a0)
ld t5, 72(a0)
ld t6, 80(a0)
/* ld a0, 88(a0) */
ld a1, 96(a0)
ld a2, 104(a0)
ld a3, 112(a0)
ld a4, 120(a0)
ld a5, 128(a0)
ld a6, 136(a0)
ld a7, 144(a0)
ld s0, 152(a0)
ld s1, 160(a0)
ld s2, 168(a0)
ld s3, 176(a0)
ld s4, 184(a0)
ld s5, 192(a0)
ld s6, 200(a0)
ld s7, 208(a0)
ld s8, 216(a0)
ld s9, 224(a0)
ld s10, 232(a0)
ld s11, 240(a0)
ld a0, 248(a0)
csrw sepc, a0
csrr a0, sscratch
# 1) SATP をユーザー用ページテーブルにセット
ld t0, user_satp
csrw satp, t0
sfence.vma zero, zero
# 2) スタックをユーザースタックに
la t0, user_sp
ld sp, 0(t0)
# 3) sstatus.SPP=0, SPIE=1
csrr t0, sstatus
li t1, (1<<5) # SPIE = bit5
or t0, t0, t1
li t1, ~(1<<8) # clear SPP = bit8
and t0, t0, t1
csrw sstatus, t0
# 4) sepc にエントリセット
# csrw sepc, a0
# 5) Supervisor timer interrupt をセット (STIE=1, SIE=1)
li t0, (1<<5) # bit5 = STIE
csrs sie, t0
csrr t0, sstatus
li t1, (1<<1) # bit1 = SIE
or t0, t0, t1
csrw sstatus, t0
# 6) タイマー割り込み用 CLINT 書き込み (MTIMECMP = MTIME + interval)
# li t0, CLINT_MTIME
# ld t1, 0(t0) # t1 = *CLINT_MTIME
# add t1, t1, a3 # t1 += interval
# li t0, CLINT_MTIMECMP
# sd t1, 0(t0)
# 7) 最後にユーザーモードに戻る
sret
.equ CLINT_MTIME, 0x0200BFF8
.equ CLINT_MTIMECMP, 0x02004000
.equ STIE_BIT, (1<<5)
.equ SPIE_UPIE, ((1<<4)|(1<<5))
.equ SPP_BIT, (1<<8)
.section ".trampoline", "ax"
.globl enter_user
enter_user:
# a0=entry, a1=usersp, a2=usersatp, a3=interval, a4=gp
# SATP をユーザーテーブルに切り替え
csrw satp, a2
sfence.vma zero, zero
# スタック切り替え
mv sp, a1
mv gp, a4
# ユーザー用エントリ設定
csrw sepc, a0
# Supervisor タイマー割り込み許可
li t0, STIE_BIT
csrs sie, t0
# sret 復帰時の sstatus 設定 (UPIE,SPIE, clear SPP)
li t0, SPIE_UPIE
csrs sstatus, t0
li t0, SPP_BIT
csrc sstatus, t0
# 実際に U-mode へ
sret
.align 2
.globl mtvec
.equ CLINT_MTIME, 0x0200BFF8
.equ CLINT_MTIMECMP, 0x02004000
.equ INTERVAL, 0xFFFFFFFFFFFFFF
mtvec:
# --- (必要なら scratch レジスタをスタックに退避)---
# 1) 現在時刻を取る
csrr t0, time # t0 = CSR time
# 2) 次のタイマー発火時刻を計算
li t1, INTERVAL # t1 = (例えば)100000
add t0, t0, t1 # t0 ← t0 + INTERVAL
# 3) CLINT_MTIMECMP に書き戻し → MTIP ビットがクリアされる
la t2, CLINT_MTIMECMP # t2 = アドレス 0x02004000
sd t0, 0(t2) # *mtimecmp = next_time
# 4) (必要なら scratch レジスタを復元)---
mret # トラップから復帰
.globl asm_memmove
asm_memmove:
# t0 にもとの dst を退避
mv t0, a0
# 長さゼロ or dst == src なら何もしないで返す
beqz a2, .exit
beq a0, a1, .exit
# オーバーラップ判定:dst > src かつ領域が被っていれば逆順コピー
blt a0, a1, .forward
# ── backward copy ──
# ポインタを末尾位置へ
add a0, a0, a2 # dst_end = dst + len
add a1, a1, a2 # src_end = src + len
.backward_loop:
addi a0, a0, -1 # dst_end--
addi a1, a1, -1 # src_end--
lbu t1, 0(a1) # t1 = *src_end
sb t1, 0(a0) # *dst_end = t1
addi a2, a2, -1 # len--
bnez a2, .backward_loop
j .exit
.forward:
# ── forward copy ──
.forward_loop:
lbu t1, 0(a1) # t1 = *src
sb t1, 0(a0) # *dst = t1
addi a1, a1, 1 # src++
addi a0, a0, 1 # dst++
addi a2, a2, -1 # len--
bnez a2, .forward_loop
.exit:
# 戻り値に元の dst をセットして帰る
mv a0, t0
ret
.align 2
.global yield
.equ STACK_PAGES, 16
.equ STACK_MAX, 4096 * STACK_PAGES
.extern stack_top
.extern yield_stack
yield:
la t0, TRAPFRAME2;
sd ra, 0(t0)
sd sp, 8(t0)
sd gp, 16(t0)
sd tp, 24(t0)
/* sd t0, 32(t0) */
sd t1, 40(t0)
sd t2, 48(t0)
sd t3, 56(t0)
sd t4, 64(t0)
sd t5, 72(t0)
sd t6, 80(t0)
sd a0, 88(t0)
sd a1, 96(t0)
sd a2, 104(t0)
sd a3, 112(t0)
sd a4, 120(t0)
sd a5, 128(t0)
sd a6, 136(t0)
sd a7, 144(t0)
sd s0, 152(t0)
sd s1, 160(t0)
sd s2, 168(t0)
sd s3, 176(t0)
sd s4, 184(t0)
sd s5, 192(t0)
sd s6, 200(t0)
sd s7, 208(t0)
sd s8, 216(t0)
sd s9, 224(t0)
sd s10, 232(t0)
sd s11, 240(t0)
csrr t1, sepc
sd t1, 248(t0)
la a0, yield_stack # a0 ← dst (以前のスタック領域)
li t0, STACK_MAX # t0 ←
la a1, stack_top # a1 ← &stack_top
sub a1, a1, t0 # a1 ← a1 - t0 (= stack_top -
li a2, STACK_MAX # a2 ← length
jal asm_memmove # call memmove(dst, src, len)
call kernel_yield
.align 2
.equ STACK_PAGES, 16
.equ STACK_MAX, 4096 * STACK_PAGES
.global yield_return
.extern stack_top
.extern yield_stack
yield_return:
li t0, STACK_MAX # t0 ←
la a0, stack_top # a1 ← &stack_top
sub a0, a0, t0 # a1 ← a1 - t0 (= stack_top -
la a1, yield_stack # a1 ← src
li a2, STACK_MAX # a2 ← length
jal asm_memmove # call memmove(dst, src, len)
la t0, TRAPFRAME2;
ld ra, 0(t0)
ld sp, 8(t0)
ld gp, 16(t0)
ld tp, 24(t0)
/* sd t0, 32(t0) */
ld t1, 40(t0)
ld t2, 48(t0)
ld t3, 56(t0)
ld t4, 64(t0)
ld t5, 72(t0)
ld t6, 80(t0)
ld a0, 88(t0)
ld a1, 96(t0)
ld a2, 104(t0)
ld a3, 112(t0)
ld a4, 120(t0)
ld a5, 128(t0)
ld a6, 136(t0)
ld a7, 144(t0)
ld s0, 152(t0)
ld s1, 160(t0)
ld s2, 168(t0)
ld s3, 176(t0)
ld s4, 184(t0)
ld s5, 192(t0)
ld s6, 200(t0)
ld s7, 208(t0)
ld s8, 216(t0)
ld s9, 224(t0)
ld s10, 232(t0)
ld s11, 240(t0)
ld t0, 248(t0)
csrw sepc, t0
ret
|
ab25cq/comelang
| 36,520
|
minux9/child.S
|
.file "child.c"
.option nopic
.attribute arch, "rv64i2p1_m2p0_a2p1_f2p2_d2p2_c2p0_zicsr2p0"
.attribute unaligned_access, 0
.attribute stack_align, 16
.text
.Ltext0:
.cfi_sections .debug_frame
.file 0 "/Users/ab25cq/minux9" "child.c"
.align 1
.type exit, @function
exit:
.LFB0:
.file 1 "minux.h"
.loc 1 122 38
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
sd a0,-24(s0)
.loc 1 123 5
ld a5,-24(s0)
#APP
# 123 "minux.h" 1
mv a0, a5
li a7, 70
ecall
# 0 "" 2
#NO_APP
.L2:
.loc 1 132 10
j .L2
.cfi_endproc
.LFE0:
.size exit, .-exit
.align 1
.globl putchar
.type putchar, @function
putchar:
.LFB1:
.file 2 "child.c"
.loc 2 6 1
.cfi_startproc
addi sp,sp,-48
.cfi_def_cfa_offset 48
sd ra,40(sp)
sd s0,32(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,48
.cfi_def_cfa 8, 0
mv a5,a0
sb a5,-33(s0)
.loc 2 8 12
lbu a5,-33(s0)
sb a5,-32(s0)
.loc 2 9 12
sb zero,-31(s0)
.LBB2:
.loc 2 10 5
li a0,1
addi a5,s0,-32
mv a1,a5
li a2,1
li a7,64
#APP
# 10 "child.c" 1
ecall
# 0 "" 2
#NO_APP
sd a0,-24(s0)
.LBE2:
.loc 2 11 1
nop
ld ra,40(sp)
.cfi_restore 1
ld s0,32(sp)
.cfi_restore 8
.cfi_def_cfa 2, 48
addi sp,sp,48
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE1:
.size putchar, .-putchar
.align 1
.globl printint
.type printint, @function
printint:
.LFB2:
.loc 2 13 45
.cfi_startproc
addi sp,sp,-96
.cfi_def_cfa_offset 96
sd ra,88(sp)
sd s0,80(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,96
.cfi_def_cfa 8, 0
mv a5,a0
mv a3,a1
mv a4,a2
sw a5,-84(s0)
mv a5,a3
sw a5,-88(s0)
mv a5,a4
sw a5,-92(s0)
.loc 2 15 9
sw zero,-20(s0)
.loc 2 16 9
sw zero,-24(s0)
.loc 2 19 8
lw a5,-92(s0)
sext.w a5,a5
beq a5,zero,.L5
.loc 2 19 14 discriminator 1
lw a5,-84(s0)
sext.w a5,a5
bge a5,zero,.L5
.loc 2 20 18
li a5,1
sw a5,-24(s0)
.loc 2 21 16
lw a5,-84(s0)
negw a5,a5
sext.w a5,a5
.loc 2 21 14
sw a5,-28(s0)
j .L6
.L5:
.loc 2 23 14
lw a5,-84(s0)
sw a5,-28(s0)
.L6:
.loc 2 26 8
lw a5,-28(s0)
sext.w a5,a5
bne a5,zero,.L9
.loc 2 27 9
li a0,48
call putchar
j .L4
.L12:
.LBB3:
.loc 2 32 26
lw a5,-88(s0)
lw a4,-28(s0)
remuw a5,a4,a5
sext.w a5,a5
.loc 2 32 13
sw a5,-32(s0)
.loc 2 33 18
lw a5,-32(s0)
sext.w a4,a5
li a5,9
bgt a4,a5,.L10
.loc 2 33 37 discriminator 1
lw a5,-32(s0)
andi a5,a5,0xff
.loc 2 33 18 discriminator 1
addiw a5,a5,48
andi a5,a5,0xff
j .L11
.L10:
.loc 2 33 51 discriminator 2
lw a5,-32(s0)
andi a5,a5,0xff
.loc 2 33 18 discriminator 2
addiw a5,a5,87
andi a5,a5,0xff
.L11:
.loc 2 33 14 discriminator 4
lw a4,-20(s0)
addiw a3,a4,1
sw a3,-20(s0)
.loc 2 33 18 discriminator 4
addi a4,a4,-16
add a4,a4,s0
sb a5,-56(a4)
.loc 2 34 14
lw a5,-88(s0)
lw a4,-28(s0)
divuw a5,a4,a5
sw a5,-28(s0)
.L9:
.LBE3:
.loc 2 31 17
lw a5,-28(s0)
sext.w a5,a5
bne a5,zero,.L12
.loc 2 37 8
lw a5,-24(s0)
sext.w a5,a5
beq a5,zero,.L14
.loc 2 38 9
li a0,45
call putchar
.loc 2 41 11
j .L14
.L15:
.loc 2 42 9
lw a5,-20(s0)
addi a5,a5,-16
add a5,a5,s0
lbu a5,-56(a5)
mv a0,a5
call putchar
.L14:
.loc 2 41 16
lw a5,-20(s0)
addiw a5,a5,-1
sw a5,-20(s0)
lw a5,-20(s0)
sext.w a5,a5
bge a5,zero,.L15
.L4:
.loc 2 44 1
ld ra,88(sp)
.cfi_restore 1
ld s0,80(sp)
.cfi_restore 8
.cfi_def_cfa 2, 96
addi sp,sp,96
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE2:
.size printint, .-printint
.align 1
.globl printlong
.type printlong, @function
printlong:
.LFB3:
.loc 2 46 57
.cfi_startproc
addi sp,sp,-112
.cfi_def_cfa_offset 112
sd ra,104(sp)
sd s0,96(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,112
.cfi_def_cfa 8, 0
sd a0,-104(s0)
mv a5,a1
mv a4,a2
sw a5,-108(s0)
mv a5,a4
sw a5,-112(s0)
.loc 2 48 9
sw zero,-20(s0)
.loc 2 49 9
sw zero,-24(s0)
.loc 2 51 8
lw a5,-112(s0)
sext.w a5,a5
beq a5,zero,.L17
.loc 2 51 17 discriminator 1
ld a5,-104(s0)
.loc 2 51 14 discriminator 1
bge a5,zero,.L17
.loc 2 52 18
li a5,1
sw a5,-24(s0)
.loc 2 53 17
ld a5,-104(s0)
.loc 2 53 16
neg a5,a5
.loc 2 53 14
sd a5,-104(s0)
.L17:
.loc 2 56 8
ld a5,-104(s0)
bne a5,zero,.L20
.loc 2 57 9
li a0,48
call putchar
j .L16
.L23:
.LBB4:
.loc 2 62 26
lw a5,-108(s0)
ld a4,-104(s0)
remu a5,a4,a5
.loc 2 62 13
sw a5,-28(s0)
.loc 2 63 18
lw a5,-28(s0)
sext.w a4,a5
li a5,9
bgt a4,a5,.L21
.loc 2 63 37 discriminator 1
lw a5,-28(s0)
andi a5,a5,0xff
.loc 2 63 18 discriminator 1
addiw a5,a5,48
andi a5,a5,0xff
j .L22
.L21:
.loc 2 63 51 discriminator 2
lw a5,-28(s0)
andi a5,a5,0xff
.loc 2 63 18 discriminator 2
addiw a5,a5,87
andi a5,a5,0xff
.L22:
.loc 2 63 14 discriminator 4
lw a4,-20(s0)
addiw a3,a4,1
sw a3,-20(s0)
.loc 2 63 18 discriminator 4
addi a4,a4,-16
add a4,a4,s0
sb a5,-80(a4)
.loc 2 64 14
lw a5,-108(s0)
ld a4,-104(s0)
divu a5,a4,a5
sd a5,-104(s0)
.L20:
.LBE4:
.loc 2 61 17
ld a5,-104(s0)
bne a5,zero,.L23
.loc 2 67 8
lw a5,-24(s0)
sext.w a5,a5
beq a5,zero,.L25
.loc 2 68 9
li a0,45
call putchar
.loc 2 71 11
j .L25
.L26:
.loc 2 72 9
lw a5,-20(s0)
addi a5,a5,-16
add a5,a5,s0
lbu a5,-80(a5)
mv a0,a5
call putchar
.L25:
.loc 2 71 16
lw a5,-20(s0)
addiw a5,a5,-1
sw a5,-20(s0)
lw a5,-20(s0)
sext.w a5,a5
bge a5,zero,.L26
.L16:
.loc 2 74 1
ld ra,104(sp)
.cfi_restore 1
ld s0,96(sp)
.cfi_restore 8
.cfi_def_cfa 2, 112
addi sp,sp,112
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE3:
.size printlong, .-printlong
.align 1
.globl printlonglong
.type printlonglong, @function
printlonglong:
.LFB4:
.loc 2 76 66
.cfi_startproc
addi sp,sp,-112
.cfi_def_cfa_offset 112
sd ra,104(sp)
sd s0,96(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,112
.cfi_def_cfa 8, 0
sd a0,-104(s0)
mv a5,a1
mv a4,a2
sw a5,-108(s0)
mv a5,a4
sw a5,-112(s0)
.loc 2 78 9
sw zero,-20(s0)
.loc 2 79 9
sw zero,-24(s0)
.loc 2 81 8
lw a5,-112(s0)
sext.w a5,a5
beq a5,zero,.L28
.loc 2 81 17 discriminator 1
ld a5,-104(s0)
.loc 2 81 14 discriminator 1
bge a5,zero,.L28
.loc 2 82 18
li a5,1
sw a5,-24(s0)
.loc 2 83 17
ld a5,-104(s0)
.loc 2 83 16
neg a5,a5
.loc 2 83 14
sd a5,-104(s0)
.L28:
.loc 2 86 8
ld a5,-104(s0)
bne a5,zero,.L31
.loc 2 87 9
li a0,48
call putchar
j .L27
.L34:
.LBB5:
.loc 2 92 26
lw a5,-108(s0)
ld a4,-104(s0)
remu a5,a4,a5
.loc 2 92 13
sw a5,-28(s0)
.loc 2 93 18
lw a5,-28(s0)
sext.w a4,a5
li a5,9
bgt a4,a5,.L32
.loc 2 93 37 discriminator 1
lw a5,-28(s0)
andi a5,a5,0xff
.loc 2 93 18 discriminator 1
addiw a5,a5,48
andi a5,a5,0xff
j .L33
.L32:
.loc 2 93 51 discriminator 2
lw a5,-28(s0)
andi a5,a5,0xff
.loc 2 93 18 discriminator 2
addiw a5,a5,87
andi a5,a5,0xff
.L33:
.loc 2 93 14 discriminator 4
lw a4,-20(s0)
addiw a3,a4,1
sw a3,-20(s0)
.loc 2 93 18 discriminator 4
addi a4,a4,-16
add a4,a4,s0
sb a5,-80(a4)
.loc 2 94 14
lw a5,-108(s0)
ld a4,-104(s0)
divu a5,a4,a5
sd a5,-104(s0)
.L31:
.LBE5:
.loc 2 91 17
ld a5,-104(s0)
bne a5,zero,.L34
.loc 2 97 8
lw a5,-24(s0)
sext.w a5,a5
beq a5,zero,.L36
.loc 2 98 9
li a0,45
call putchar
.loc 2 101 11
j .L36
.L37:
.loc 2 102 9
lw a5,-20(s0)
addi a5,a5,-16
add a5,a5,s0
lbu a5,-80(a5)
mv a0,a5
call putchar
.L36:
.loc 2 101 16
lw a5,-20(s0)
addiw a5,a5,-1
sw a5,-20(s0)
lw a5,-20(s0)
sext.w a5,a5
bge a5,zero,.L37
.L27:
.loc 2 104 1
ld ra,104(sp)
.cfi_restore 1
ld s0,96(sp)
.cfi_restore 8
.cfi_def_cfa 2, 112
addi sp,sp,112
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE4:
.size printlonglong, .-printlonglong
.section .rodata
.align 3
.LC6:
.string "(null)"
.text
.align 1
.globl printf
.type printf, @function
printf:
.LFB5:
.loc 2 106 34
.cfi_startproc
addi sp,sp,-192
.cfi_def_cfa_offset 192
sd ra,120(sp)
sd s0,112(sp)
.cfi_offset 1, -72
.cfi_offset 8, -80
addi s0,sp,128
.cfi_def_cfa 8, 64
sd a0,-120(s0)
sd a1,8(s0)
sd a2,16(s0)
sd a3,24(s0)
sd a4,32(s0)
sd a5,40(s0)
sd a6,48(s0)
sd a7,56(s0)
.loc 2 108 5
addi a5,s0,64
sd a5,-128(s0)
ld a5,-128(s0)
addi a5,a5,-56
sd a5,-104(s0)
.loc 2 111 12
ld a5,-120(s0)
sd a5,-24(s0)
.loc 2 111 5
j .L39
.L64:
.loc 2 112 13
ld a5,-24(s0)
lbu a5,0(a5)
.loc 2 112 12
mv a4,a5
li a5,37
beq a4,a5,.L40
.loc 2 113 13
ld a5,-24(s0)
lbu a5,0(a5)
mv a0,a5
call putchar
.loc 2 114 13
j .L41
.L40:
.loc 2 117 10
ld a5,-24(s0)
addi a5,a5,1
sd a5,-24(s0)
.loc 2 119 13
ld a5,-24(s0)
lbu a5,0(a5)
.loc 2 119 12
mv a4,a5
li a5,108
bne a4,a5,.L42
.LBB6:
.loc 2 120 17
li a5,1
sw a5,-28(s0)
.loc 2 121 20
ld a5,-24(s0)
addi a5,a5,1
.loc 2 121 17
lbu a5,0(a5)
.loc 2 121 16
mv a4,a5
li a5,108
bne a4,a5,.L43
.loc 2 122 24
li a5,2
sw a5,-28(s0)
.loc 2 123 18
ld a5,-24(s0)
addi a5,a5,1
sd a5,-24(s0)
.L43:
.loc 2 125 14
ld a5,-24(s0)
addi a5,a5,1
sd a5,-24(s0)
.loc 2 127 21
ld a5,-24(s0)
lbu a5,0(a5)
sext.w a5,a5
.loc 2 127 13
li a4,100
beq a5,a4,.L44
li a4,120
bne a5,a4,.L45
.loc 2 129 24
lw a5,-28(s0)
sext.w a4,a5
li a5,1
bne a4,a5,.L46
.LBB7:
.loc 2 130 39
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
ld a5,0(a5)
sd a5,-80(s0)
.loc 2 131 25
li a2,0
li a1,16
ld a0,-80(s0)
call printlong
.LBE7:
.loc 2 136 21
j .L41
.L46:
.LBB8:
.loc 2 133 44
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
ld a5,0(a5)
sd a5,-72(s0)
.loc 2 134 25
li a2,0
li a1,16
ld a0,-72(s0)
call printlonglong
.LBE8:
.loc 2 136 21
j .L41
.L44:
.loc 2 139 24
lw a5,-28(s0)
sext.w a4,a5
li a5,1
bne a4,a5,.L49
.LBB9:
.loc 2 140 30
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
ld a5,0(a5)
sd a5,-96(s0)
.loc 2 141 25
ld a5,-96(s0)
li a2,1
li a1,10
mv a0,a5
call printlong
.LBE9:
.loc 2 146 21
j .L41
.L49:
.LBB10:
.loc 2 143 35
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
ld a5,0(a5)
sd a5,-88(s0)
.loc 2 144 25
ld a5,-88(s0)
li a2,1
li a1,10
mv a0,a5
call printlonglong
.LBE10:
.loc 2 146 21
j .L41
.L45:
.loc 2 149 21
li a0,37
call putchar
.LBB11:
.loc 2 150 30
sw zero,-32(s0)
.loc 2 150 21
j .L51
.L52:
.loc 2 150 50 discriminator 3
li a0,108
call putchar
.loc 2 150 46 discriminator 3
lw a5,-32(s0)
addiw a5,a5,1
sw a5,-32(s0)
.L51:
.loc 2 150 36 discriminator 2
lw a5,-32(s0)
mv a4,a5
lw a5,-28(s0)
sext.w a4,a4
sext.w a5,a5
blt a4,a5,.L52
.LBE11:
.loc 2 151 21
ld a5,-24(s0)
lbu a5,0(a5)
mv a0,a5
call putchar
.loc 2 152 21
j .L41
.L42:
.LBE6:
.loc 2 156 21
ld a5,-24(s0)
lbu a5,0(a5)
sext.w a5,a5
.loc 2 156 13
li a4,37
beq a5,a4,.L53
li a4,37
blt a5,a4,.L54
li a4,120
bgt a5,a4,.L54
li a4,99
blt a5,a4,.L54
addiw a5,a5,-99
mv a3,a5
sext.w a4,a3
li a5,21
bgtu a4,a5,.L54
slli a5,a3,32
srli a5,a5,32
slli a4,a5,2
lla a5,.L56
add a5,a4,a5
lw a5,0(a5)
sext.w a4,a5
lla a5,.L56
add a5,a4,a5
jr a5
.section .rodata
.align 2
.align 2
.L56:
.word .L60-.L56
.word .L59-.L56
.word .L54-.L56
.word .L54-.L56
.word .L54-.L56
.word .L54-.L56
.word .L54-.L56
.word .L54-.L56
.word .L54-.L56
.word .L54-.L56
.word .L54-.L56
.word .L54-.L56
.word .L54-.L56
.word .L58-.L56
.word .L54-.L56
.word .L54-.L56
.word .L57-.L56
.word .L54-.L56
.word .L54-.L56
.word .L54-.L56
.word .L54-.L56
.word .L55-.L56
.text
.L59:
.LBB12:
.loc 2 158 25
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
lw a5,0(a5)
sw a5,-60(s0)
.loc 2 159 21
lw a5,-60(s0)
li a2,1
li a1,10
mv a0,a5
call printint
.loc 2 160 21
j .L41
.L55:
.LBE12:
.LBB13:
.loc 2 163 34
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
lw a5,0(a5)
sw a5,-44(s0)
.loc 2 164 21
lw a5,-44(s0)
li a2,0
li a1,16
mv a0,a5
call printint
.loc 2 165 21
j .L41
.L58:
.LBE13:
.LBB14:
.loc 2 168 57
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
ld a5,0(a5)
.loc 2 168 35 discriminator 1
sd a5,-56(s0)
.loc 2 169 21
li a0,48
call putchar
.loc 2 169 35 discriminator 1
li a0,120
call putchar
.loc 2 170 21
li a2,0
li a1,16
ld a0,-56(s0)
call printlong
.loc 2 171 21
j .L41
.L57:
.LBE14:
.LBB15:
.loc 2 174 33
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
ld a5,0(a5)
sd a5,-40(s0)
.loc 2 175 24
ld a5,-40(s0)
bne a5,zero,.L62
.loc 2 175 31 discriminator 1
lla a5,.LC6
sd a5,-40(s0)
.loc 2 176 27
j .L62
.L63:
.loc 2 176 42 discriminator 2
ld a5,-40(s0)
addi a4,a5,1
sd a4,-40(s0)
.loc 2 176 32 discriminator 2
lbu a5,0(a5)
mv a0,a5
call putchar
.L62:
.loc 2 176 28 discriminator 1
ld a5,-40(s0)
lbu a5,0(a5)
bne a5,zero,.L63
.loc 2 177 21
j .L41
.L60:
.LBE15:
.LBB16:
.loc 2 180 36
ld a5,-104(s0)
addi a4,a5,8
sd a4,-104(s0)
lw a5,0(a5)
.loc 2 180 26 discriminator 1
sb a5,-61(s0)
.loc 2 181 21
lbu a5,-61(s0)
mv a0,a5
call putchar
.loc 2 182 21
j .L41
.L53:
.LBE16:
.loc 2 185 21
li a0,37
call putchar
.loc 2 186 21
j .L41
.L54:
.loc 2 189 21
li a0,37
call putchar
.loc 2 190 21
ld a5,-24(s0)
lbu a5,0(a5)
mv a0,a5
call putchar
.loc 2 191 21
nop
.L41:
.loc 2 111 24 discriminator 2
ld a5,-24(s0)
addi a5,a5,1
sd a5,-24(s0)
.L39:
.loc 2 111 19 discriminator 1
ld a5,-24(s0)
lbu a5,0(a5)
bne a5,zero,.L64
.loc 2 198 12
li a5,0
.loc 2 199 1
mv a0,a5
ld ra,120(sp)
.cfi_restore 1
ld s0,112(sp)
.cfi_restore 8
.cfi_def_cfa 2, 192
addi sp,sp,192
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE5:
.size printf, .-printf
.align 1
.globl puts
.type puts, @function
puts:
.LFB6:
.loc 2 203 26
.cfi_startproc
addi sp,sp,-32
.cfi_def_cfa_offset 32
sd ra,24(sp)
sd s0,16(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,32
.cfi_def_cfa 8, 0
sd a0,-24(s0)
.loc 2 204 11
j .L67
.L68:
.loc 2 205 19
ld a5,-24(s0)
addi a4,a5,1
sd a4,-24(s0)
.loc 2 205 9
lbu a5,0(a5)
mv a0,a5
call putchar
.L67:
.loc 2 204 12
ld a5,-24(s0)
lbu a5,0(a5)
bne a5,zero,.L68
.loc 2 207 1
nop
nop
ld ra,24(sp)
.cfi_restore 1
ld s0,16(sp)
.cfi_restore 8
.cfi_def_cfa 2, 32
addi sp,sp,32
.cfi_def_cfa_offset 0
jr ra
.cfi_endproc
.LFE6:
.size puts, .-puts
.section .rodata
.align 3
.LC0:
.string "/hello.elf"
.align 3
.LC4:
.string "/hello2.elf"
.align 3
.LC9:
.string "END"
.align 3
.LC1:
.string "aaa"
.align 3
.LC2:
.string "bbb"
.align 3
.LC7:
.dword .LC0
.dword .LC1
.dword .LC2
.dword 0
.align 3
.LC8:
.dword .LC4
.dword .LC1
.dword .LC2
.dword 0
.text
.align 1
.globl main
.type main, @function
main:
.LFB7:
.loc 2 210 16
.cfi_startproc
addi sp,sp,-80
.cfi_def_cfa_offset 80
sd ra,72(sp)
sd s0,64(sp)
.cfi_offset 1, -8
.cfi_offset 8, -16
addi s0,sp,80
.cfi_def_cfa 8, 0
.LBB17:
.loc 2 216 5
addi a5,s0,-32
mv a0,a5
li a7,73
#APP
# 216 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE17:
.LBB18:
.loc 2 218 12
li a7,68
#APP
# 218 "child.c" 1
ecall
# 0 "" 2
#NO_APP
mv a5,a0
.LBE18:
.loc 2 218 10
sw a5,-20(s0)
.loc 2 219 8
lw a5,-20(s0)
sext.w a5,a5
bne a5,zero,.L70
.LBB19:
.LBB20:
.loc 2 220 9
lw a5,-32(s0)
mv a0,a5
li a7,67
#APP
# 220 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE20:
.LBB21:
.loc 2 221 9
lw a5,-28(s0)
mv a0,a5
li a1,1
li a7,72
#APP
# 221 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE21:
.LBB22:
.loc 2 222 9
lw a5,-28(s0)
mv a0,a5
li a7,67
#APP
# 222 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE22:
.loc 2 224 15
lla a5,.LC7
ld a2,0(a5)
ld a3,8(a5)
ld a4,16(a5)
ld a5,24(a5)
sd a2,-80(s0)
sd a3,-72(s0)
sd a4,-64(s0)
sd a5,-56(s0)
.LBB23:
.loc 2 226 9
lla a5,.LC0
mv a0,a5
addi a5,s0,-80
mv a1,a5
li a7,69
#APP
# 226 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE23:
.loc 2 227 9
li a0,6
call exit
.L70:
.LBE19:
.LBB24:
.loc 2 231 12
li a7,68
#APP
# 231 "child.c" 1
ecall
# 0 "" 2
#NO_APP
mv a5,a0
.LBE24:
.loc 2 231 10
sw a5,-24(s0)
.loc 2 232 8
lw a5,-24(s0)
sext.w a5,a5
bne a5,zero,.L71
.LBB25:
.LBB26:
.loc 2 233 9
lw a5,-32(s0)
mv a0,a5
li a1,0
li a7,72
#APP
# 233 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE26:
.LBB27:
.loc 2 234 9
lw a5,-28(s0)
mv a0,a5
li a7,67
#APP
# 234 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE27:
.LBB28:
.loc 2 235 9
lw a5,-32(s0)
mv a0,a5
li a7,67
#APP
# 235 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE28:
.loc 2 237 15
lla a5,.LC8
ld a2,0(a5)
ld a3,8(a5)
ld a4,16(a5)
ld a5,24(a5)
sd a2,-80(s0)
sd a3,-72(s0)
sd a4,-64(s0)
sd a5,-56(s0)
.LBB29:
.loc 2 239 9
lla a5,.LC4
mv a0,a5
addi a5,s0,-80
mv a1,a5
li a7,69
#APP
# 239 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE29:
.loc 2 240 9
li a0,6
call exit
.L71:
.LBE25:
.loc 2 243 12
sw zero,-44(s0)
.LBB30:
.loc 2 244 5
addi a5,s0,-44
mv a0,a5
li a7,71
#APP
# 244 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE30:
.loc 2 245 12
sw zero,-44(s0)
.LBB31:
.loc 2 246 5
addi a5,s0,-44
mv a0,a5
li a7,71
#APP
# 246 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE31:
.LBB32:
.loc 2 249 5
lw a5,-32(s0)
mv a0,a5
li a7,67
#APP
# 249 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE32:
.LBB33:
.loc 2 250 5
lw a5,-28(s0)
mv a0,a5
li a7,67
#APP
# 250 "child.c" 1
ecall
# 0 "" 2
#NO_APP
.LBE33:
.loc 2 251 1
lla a0,.LC9
call puts
.L72:
.loc 2 252 10
j .L72
.cfi_endproc
.LFE7:
.size main, .-main
.Letext0:
.file 3 "/opt/homebrew/Cellar/riscv-gnu-toolchain/main/lib/gcc/riscv64-unknown-elf/14.2.0/include/stdarg.h"
.section .debug_info,"",@progbits
.Ldebug_info0:
.4byte 0x93e
.2byte 0x5
.byte 0x1
.byte 0x8
.4byte .Ldebug_abbrev0
.uleb128 0xe
.4byte .LASF35
.byte 0x1d
.4byte .LASF0
.4byte .LASF1
.8byte .Ltext0
.8byte .Letext0-.Ltext0
.4byte .Ldebug_line0
.uleb128 0x4
.byte 0x1
.byte 0x6
.4byte .LASF2
.uleb128 0x4
.byte 0x2
.byte 0x5
.4byte .LASF3
.uleb128 0xf
.byte 0x4
.byte 0x5
.string "int"
.uleb128 0x4
.byte 0x8
.byte 0x5
.4byte .LASF4
.uleb128 0x4
.byte 0x1
.byte 0x8
.4byte .LASF5
.uleb128 0x4
.byte 0x2
.byte 0x7
.4byte .LASF6
.uleb128 0x4
.byte 0x4
.byte 0x7
.4byte .LASF7
.uleb128 0x4
.byte 0x8
.byte 0x7
.4byte .LASF8
.uleb128 0x4
.byte 0x8
.byte 0x5
.4byte .LASF9
.uleb128 0x4
.byte 0x8
.byte 0x7
.4byte .LASF10
.uleb128 0xa
.4byte .LASF11
.byte 0x3
.byte 0x28
.byte 0x1b
.4byte 0x80
.uleb128 0x10
.byte 0x8
.4byte .LASF36
.uleb128 0xa
.4byte .LASF12
.byte 0x3
.byte 0x67
.byte 0x18
.4byte 0x74
.uleb128 0xa
.4byte .LASF13
.byte 0x1
.byte 0x4
.byte 0xd
.4byte 0x3c
.uleb128 0xc
.4byte .LASF21
.byte 0xd2
.4byte 0x3c
.8byte .LFB7
.8byte .LFE7-.LFB7
.uleb128 0x1
.byte 0x9c
.4byte 0x446
.uleb128 0x1
.string "fd"
.byte 0xd3
.byte 0x9
.4byte 0x446
.uleb128 0x2
.byte 0x91
.sleb128 -32
.uleb128 0x2
.4byte .LASF14
.byte 0xd4
.byte 0xb
.4byte 0x92
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0x2
.4byte .LASF15
.byte 0xd4
.byte 0x11
.4byte 0x92
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x1
.string "buf"
.byte 0xd5
.byte 0xa
.4byte 0x456
.uleb128 0x2
.byte 0x91
.sleb128 -40
.uleb128 0x2
.4byte .LASF16
.byte 0xd6
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -44
.uleb128 0x3
.8byte .LBB17
.8byte .LBE17-.LBB17
.4byte 0x133
.uleb128 0x1
.string "_a0"
.byte 0xd8
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.byte 0xd8
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x3
.8byte .LBB18
.8byte .LBE18-.LBB18
.4byte 0x163
.uleb128 0x2
.4byte .LASF17
.byte 0xda
.byte 0xc
.4byte 0x66
.uleb128 0x1
.byte 0x61
.uleb128 0x2
.4byte .LASF18
.byte 0xda
.byte 0xc
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.byte 0
.uleb128 0x3
.8byte .LBB19
.8byte .LBE19-.LBB19
.4byte 0x25e
.uleb128 0x2
.4byte .LASF19
.byte 0xe0
.byte 0xf
.4byte 0x472
.uleb128 0x3
.byte 0x91
.sleb128 -80
.uleb128 0x3
.8byte .LBB20
.8byte .LBE20-.LBB20
.4byte 0x1b7
.uleb128 0x1
.string "_a0"
.byte 0xdc
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.byte 0xdc
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x3
.8byte .LBB21
.8byte .LBE21-.LBB21
.4byte 0x1f4
.uleb128 0x1
.string "_a0"
.byte 0xdd
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.byte 0xdd
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a7"
.byte 0xdd
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x3
.8byte .LBB22
.8byte .LBE22-.LBB22
.4byte 0x224
.uleb128 0x1
.string "_a0"
.byte 0xde
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.byte 0xde
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x6
.8byte .LBB23
.8byte .LBE23-.LBB23
.uleb128 0x1
.string "_a0"
.byte 0xe2
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.byte 0xe2
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a7"
.byte 0xe2
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.byte 0
.uleb128 0x3
.8byte .LBB24
.8byte .LBE24-.LBB24
.4byte 0x28e
.uleb128 0x2
.4byte .LASF17
.byte 0xe7
.byte 0xc
.4byte 0x66
.uleb128 0x1
.byte 0x61
.uleb128 0x2
.4byte .LASF18
.byte 0xe7
.byte 0xc
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.byte 0
.uleb128 0x3
.8byte .LBB25
.8byte .LBE25-.LBB25
.4byte 0x389
.uleb128 0x2
.4byte .LASF19
.byte 0xed
.byte 0xf
.4byte 0x472
.uleb128 0x3
.byte 0x91
.sleb128 -80
.uleb128 0x3
.8byte .LBB26
.8byte .LBE26-.LBB26
.4byte 0x2ef
.uleb128 0x1
.string "_a0"
.byte 0xe9
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.byte 0xe9
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a7"
.byte 0xe9
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x3
.8byte .LBB27
.8byte .LBE27-.LBB27
.4byte 0x31f
.uleb128 0x1
.string "_a0"
.byte 0xea
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.byte 0xea
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x3
.8byte .LBB28
.8byte .LBE28-.LBB28
.4byte 0x34f
.uleb128 0x1
.string "_a0"
.byte 0xeb
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.byte 0xeb
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x6
.8byte .LBB29
.8byte .LBE29-.LBB29
.uleb128 0x1
.string "_a0"
.byte 0xef
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.byte 0xef
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a7"
.byte 0xef
.byte 0x9
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.byte 0
.uleb128 0x3
.8byte .LBB30
.8byte .LBE30-.LBB30
.4byte 0x3b9
.uleb128 0x1
.string "_a0"
.byte 0xf4
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.byte 0xf4
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x3
.8byte .LBB31
.8byte .LBE31-.LBB31
.4byte 0x3e9
.uleb128 0x1
.string "_a0"
.byte 0xf6
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.byte 0xf6
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x3
.8byte .LBB32
.8byte .LBE32-.LBB32
.4byte 0x419
.uleb128 0x1
.string "_a0"
.byte 0xf9
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.byte 0xf9
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.uleb128 0x6
.8byte .LBB33
.8byte .LBE33-.LBB33
.uleb128 0x1
.string "_a0"
.byte 0xfa
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a7"
.byte 0xfa
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.byte 0
.uleb128 0x7
.4byte 0x3c
.4byte 0x456
.uleb128 0x8
.4byte 0x6d
.byte 0x1
.byte 0
.uleb128 0x7
.4byte 0x466
.4byte 0x466
.uleb128 0x8
.4byte 0x6d
.byte 0x3
.byte 0
.uleb128 0x4
.byte 0x1
.byte 0x8
.4byte .LASF20
.uleb128 0x11
.4byte 0x466
.uleb128 0x7
.4byte 0x482
.4byte 0x482
.uleb128 0x8
.4byte 0x6d
.byte 0x3
.byte 0
.uleb128 0xd
.4byte 0x466
.uleb128 0x9
.4byte .LASF25
.byte 0xcb
.8byte .LFB6
.8byte .LFE6-.LFB6
.uleb128 0x1
.byte 0x9c
.4byte 0x4b0
.uleb128 0xb
.string "s"
.byte 0xcb
.byte 0x17
.4byte 0x4b0
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.uleb128 0xd
.4byte 0x46d
.uleb128 0xc
.4byte .LASF22
.byte 0x6a
.4byte 0x3c
.8byte .LFB5
.8byte .LFE5-.LFB5
.uleb128 0x1
.byte 0x9c
.4byte 0x68a
.uleb128 0xb
.string "fmt"
.byte 0x6a
.byte 0x18
.4byte 0x4b0
.uleb128 0x3
.byte 0x91
.sleb128 -184
.uleb128 0x12
.uleb128 0x1
.string "ap"
.byte 0x6b
.byte 0xd
.4byte 0x86
.uleb128 0x3
.byte 0x91
.sleb128 -168
.uleb128 0x1
.string "p"
.byte 0x6e
.byte 0x11
.4byte 0x4b0
.uleb128 0x3
.byte 0x91
.sleb128 -88
.uleb128 0x3
.8byte .LBB6
.8byte .LBE6-.LBB6
.4byte 0x5d8
.uleb128 0x2
.4byte .LASF23
.byte 0x78
.byte 0x11
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -92
.uleb128 0x3
.8byte .LBB7
.8byte .LBE7-.LBB7
.4byte 0x549
.uleb128 0x2
.4byte .LASF24
.byte 0x82
.byte 0x27
.4byte 0x6d
.uleb128 0x3
.byte 0x91
.sleb128 -144
.byte 0
.uleb128 0x3
.8byte .LBB8
.8byte .LBE8-.LBB8
.4byte 0x56e
.uleb128 0x2
.4byte .LASF24
.byte 0x85
.byte 0x2c
.4byte 0x5f
.uleb128 0x3
.byte 0x91
.sleb128 -136
.byte 0
.uleb128 0x3
.8byte .LBB9
.8byte .LBE9-.LBB9
.4byte 0x593
.uleb128 0x2
.4byte .LASF24
.byte 0x8c
.byte 0x1e
.4byte 0x66
.uleb128 0x3
.byte 0x91
.sleb128 -160
.byte 0
.uleb128 0x3
.8byte .LBB10
.8byte .LBE10-.LBB10
.4byte 0x5b8
.uleb128 0x2
.4byte .LASF24
.byte 0x8f
.byte 0x23
.4byte 0x43
.uleb128 0x3
.byte 0x91
.sleb128 -152
.byte 0
.uleb128 0x6
.8byte .LBB11
.8byte .LBE11-.LBB11
.uleb128 0x1
.string "i"
.byte 0x96
.byte 0x1e
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -96
.byte 0
.byte 0
.uleb128 0x3
.8byte .LBB12
.8byte .LBE12-.LBB12
.4byte 0x5fd
.uleb128 0x2
.4byte .LASF24
.byte 0x9e
.byte 0x19
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -124
.byte 0
.uleb128 0x3
.8byte .LBB13
.8byte .LBE13-.LBB13
.4byte 0x622
.uleb128 0x2
.4byte .LASF24
.byte 0xa3
.byte 0x22
.4byte 0x58
.uleb128 0x3
.byte 0x91
.sleb128 -108
.byte 0
.uleb128 0x3
.8byte .LBB14
.8byte .LBE14-.LBB14
.4byte 0x647
.uleb128 0x2
.4byte .LASF24
.byte 0xa8
.byte 0x23
.4byte 0x6d
.uleb128 0x3
.byte 0x91
.sleb128 -120
.byte 0
.uleb128 0x3
.8byte .LBB15
.8byte .LBE15-.LBB15
.4byte 0x66a
.uleb128 0x1
.string "s"
.byte 0xae
.byte 0x21
.4byte 0x4b0
.uleb128 0x3
.byte 0x91
.sleb128 -104
.byte 0
.uleb128 0x6
.8byte .LBB16
.8byte .LBE16-.LBB16
.uleb128 0x1
.string "c"
.byte 0xb4
.byte 0x1a
.4byte 0x466
.uleb128 0x3
.byte 0x91
.sleb128 -125
.byte 0
.byte 0
.uleb128 0x9
.4byte .LASF26
.byte 0x4c
.8byte .LFB4
.8byte .LFE4-.LFB4
.uleb128 0x1
.byte 0x9c
.4byte 0x720
.uleb128 0x5
.4byte .LASF24
.byte 0x2
.byte 0x4c
.byte 0x27
.4byte 0x5f
.uleb128 0x3
.byte 0x91
.sleb128 -104
.uleb128 0x5
.4byte .LASF27
.byte 0x2
.byte 0x4c
.byte 0x31
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -108
.uleb128 0x5
.4byte .LASF28
.byte 0x2
.byte 0x4c
.byte 0x3b
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -112
.uleb128 0x1
.string "buf"
.byte 0x4d
.byte 0xa
.4byte 0x720
.uleb128 0x3
.byte 0x91
.sleb128 -96
.uleb128 0x1
.string "i"
.byte 0x4e
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0x2
.4byte .LASF29
.byte 0x4f
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x6
.8byte .LBB5
.8byte .LBE5-.LBB5
.uleb128 0x2
.4byte .LASF30
.byte 0x5c
.byte 0xd
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -28
.byte 0
.byte 0
.uleb128 0x7
.4byte 0x466
.4byte 0x730
.uleb128 0x8
.4byte 0x6d
.byte 0x40
.byte 0
.uleb128 0x9
.4byte .LASF31
.byte 0x2e
.8byte .LFB3
.8byte .LFE3-.LFB3
.uleb128 0x1
.byte 0x9c
.4byte 0x7c6
.uleb128 0x5
.4byte .LASF24
.byte 0x2
.byte 0x2e
.byte 0x1e
.4byte 0x6d
.uleb128 0x3
.byte 0x91
.sleb128 -104
.uleb128 0x5
.4byte .LASF27
.byte 0x2
.byte 0x2e
.byte 0x28
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -108
.uleb128 0x5
.4byte .LASF28
.byte 0x2
.byte 0x2e
.byte 0x32
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -112
.uleb128 0x1
.string "buf"
.byte 0x2f
.byte 0xa
.4byte 0x720
.uleb128 0x3
.byte 0x91
.sleb128 -96
.uleb128 0x1
.string "i"
.byte 0x30
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0x2
.4byte .LASF29
.byte 0x31
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x6
.8byte .LBB4
.8byte .LBE4-.LBB4
.uleb128 0x2
.4byte .LASF30
.byte 0x3e
.byte 0xd
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -28
.byte 0
.byte 0
.uleb128 0x9
.4byte .LASF32
.byte 0xd
.8byte .LFB2
.8byte .LFE2-.LFB2
.uleb128 0x1
.byte 0x9c
.4byte 0x86a
.uleb128 0x5
.4byte .LASF24
.byte 0x2
.byte 0xd
.byte 0x13
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -84
.uleb128 0x5
.4byte .LASF27
.byte 0x2
.byte 0xd
.byte 0x1d
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -88
.uleb128 0x5
.4byte .LASF28
.byte 0x2
.byte 0xd
.byte 0x27
.4byte 0x3c
.uleb128 0x3
.byte 0x91
.sleb128 -92
.uleb128 0x1
.string "buf"
.byte 0xe
.byte 0xa
.4byte 0x86a
.uleb128 0x3
.byte 0x91
.sleb128 -72
.uleb128 0x1
.string "i"
.byte 0xf
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -20
.uleb128 0x2
.4byte .LASF29
.byte 0x10
.byte 0x9
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x2
.4byte .LASF33
.byte 0x11
.byte 0x12
.4byte 0x58
.uleb128 0x2
.byte 0x91
.sleb128 -28
.uleb128 0x6
.8byte .LBB3
.8byte .LBE3-.LBB3
.uleb128 0x2
.4byte .LASF30
.byte 0x20
.byte 0xd
.4byte 0x3c
.uleb128 0x2
.byte 0x91
.sleb128 -32
.byte 0
.byte 0
.uleb128 0x7
.4byte 0x466
.4byte 0x87a
.uleb128 0x8
.4byte 0x6d
.byte 0x20
.byte 0
.uleb128 0x13
.4byte .LASF34
.byte 0x2
.byte 0x5
.byte 0x6
.8byte .LFB1
.8byte .LFE1-.LFB1
.uleb128 0x1
.byte 0x9c
.4byte 0x907
.uleb128 0xb
.string "c"
.byte 0x5
.byte 0x13
.4byte 0x466
.uleb128 0x2
.byte 0x91
.sleb128 -33
.uleb128 0x1
.string "buf"
.byte 0x7
.byte 0xa
.4byte 0x907
.uleb128 0x2
.byte 0x91
.sleb128 -32
.uleb128 0x6
.8byte .LBB2
.8byte .LBE2-.LBB2
.uleb128 0x2
.4byte .LASF18
.byte 0xa
.byte 0x5
.4byte 0x66
.uleb128 0x2
.byte 0x91
.sleb128 -24
.uleb128 0x1
.string "_a0"
.byte 0xa
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x5a
.uleb128 0x1
.string "_a1"
.byte 0xa
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x5b
.uleb128 0x1
.string "_a2"
.byte 0xa
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x5c
.uleb128 0x1
.string "_a7"
.byte 0xa
.byte 0x5
.4byte 0x66
.uleb128 0x1
.byte 0x61
.byte 0
.byte 0
.uleb128 0x7
.4byte 0x466
.4byte 0x917
.uleb128 0x8
.4byte 0x6d
.byte 0x1
.byte 0
.uleb128 0x14
.4byte .LASF37
.byte 0x1
.byte 0x7a
.byte 0x14
.8byte .LFB0
.8byte .LFE0-.LFB0
.uleb128 0x1
.byte 0x9c
.uleb128 0x5
.4byte .LASF16
.byte 0x1
.byte 0x7a
.byte 0x1e
.4byte 0x66
.uleb128 0x2
.byte 0x91
.sleb128 -24
.byte 0
.byte 0
.section .debug_abbrev,"",@progbits
.Ldebug_abbrev0:
.uleb128 0x1
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0x21
.sleb128 2
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x2
.uleb128 0x34
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 2
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x3
.uleb128 0xb
.byte 0x1
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x4
.uleb128 0x24
.byte 0
.uleb128 0xb
.uleb128 0xb
.uleb128 0x3e
.uleb128 0xb
.uleb128 0x3
.uleb128 0xe
.byte 0
.byte 0
.uleb128 0x5
.uleb128 0x5
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0x6
.uleb128 0xb
.byte 0x1
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.byte 0
.byte 0
.uleb128 0x7
.uleb128 0x1
.byte 0x1
.uleb128 0x49
.uleb128 0x13
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x8
.uleb128 0x21
.byte 0
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2f
.uleb128 0xb
.byte 0
.byte 0
.uleb128 0x9
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 2
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0x21
.sleb128 6
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0xa
.uleb128 0x16
.byte 0
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0xb
.uleb128 0x5
.byte 0
.uleb128 0x3
.uleb128 0x8
.uleb128 0x3a
.uleb128 0x21
.sleb128 2
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x49
.uleb128 0x13
.uleb128 0x2
.uleb128 0x18
.byte 0
.byte 0
.uleb128 0xc
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0x21
.sleb128 2
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0x21
.sleb128 5
.uleb128 0x27
.uleb128 0x19
.uleb128 0x49
.uleb128 0x13
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7c
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0xd
.uleb128 0xf
.byte 0
.uleb128 0xb
.uleb128 0x21
.sleb128 8
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0xe
.uleb128 0x11
.byte 0x1
.uleb128 0x25
.uleb128 0xe
.uleb128 0x13
.uleb128 0xb
.uleb128 0x3
.uleb128 0x1f
.uleb128 0x1b
.uleb128 0x1f
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x10
.uleb128 0x17
.byte 0
.byte 0
.uleb128 0xf
.uleb128 0x24
.byte 0
.uleb128 0xb
.uleb128 0xb
.uleb128 0x3e
.uleb128 0xb
.uleb128 0x3
.uleb128 0x8
.byte 0
.byte 0
.uleb128 0x10
.uleb128 0xf
.byte 0
.uleb128 0xb
.uleb128 0xb
.uleb128 0x3
.uleb128 0xe
.byte 0
.byte 0
.uleb128 0x11
.uleb128 0x26
.byte 0
.uleb128 0x49
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x12
.uleb128 0x18
.byte 0
.byte 0
.byte 0
.uleb128 0x13
.uleb128 0x2e
.byte 0x1
.uleb128 0x3f
.uleb128 0x19
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7a
.uleb128 0x19
.uleb128 0x1
.uleb128 0x13
.byte 0
.byte 0
.uleb128 0x14
.uleb128 0x2e
.byte 0x1
.uleb128 0x3
.uleb128 0xe
.uleb128 0x3a
.uleb128 0xb
.uleb128 0x3b
.uleb128 0xb
.uleb128 0x39
.uleb128 0xb
.uleb128 0x27
.uleb128 0x19
.uleb128 0x11
.uleb128 0x1
.uleb128 0x12
.uleb128 0x7
.uleb128 0x40
.uleb128 0x18
.uleb128 0x7a
.uleb128 0x19
.byte 0
.byte 0
.byte 0
.section .debug_aranges,"",@progbits
.4byte 0x2c
.2byte 0x2
.4byte .Ldebug_info0
.byte 0x8
.byte 0
.2byte 0
.2byte 0
.8byte .Ltext0
.8byte .Letext0-.Ltext0
.8byte 0
.8byte 0
.section .debug_line,"",@progbits
.Ldebug_line0:
.section .debug_str,"MS",@progbits,1
.LASF16:
.string "status"
.LASF36:
.string "__builtin_va_list"
.LASF33:
.string "uval"
.LASF31:
.string "printlong"
.LASF13:
.string "pid_t"
.LASF11:
.string "__gnuc_va_list"
.LASF37:
.string "exit"
.LASF5:
.string "unsigned char"
.LASF35:
.string "GNU C17 14.2.0 -mcmodel=medany -mtune=rocket -mabi=lp64d -misa-spec=20191213 -march=rv64imafdc_zicsr -g -O0 -fno-omit-frame-pointer"
.LASF17:
.string "_num"
.LASF18:
.string "_ret"
.LASF10:
.string "long unsigned int"
.LASF6:
.string "short unsigned int"
.LASF32:
.string "printint"
.LASF34:
.string "putchar"
.LASF12:
.string "va_list"
.LASF23:
.string "lcount"
.LASF21:
.string "main"
.LASF27:
.string "base"
.LASF7:
.string "unsigned int"
.LASF30:
.string "digit"
.LASF8:
.string "long long unsigned int"
.LASF4:
.string "long long int"
.LASF20:
.string "char"
.LASF22:
.string "printf"
.LASF14:
.string "pid1"
.LASF15:
.string "pid2"
.LASF3:
.string "short int"
.LASF26:
.string "printlonglong"
.LASF19:
.string "argv"
.LASF24:
.string "val_"
.LASF9:
.string "long int"
.LASF28:
.string "sign"
.LASF25:
.string "puts"
.LASF2:
.string "signed char"
.LASF29:
.string "negative"
.section .debug_line_str,"MS",@progbits,1
.LASF1:
.string "/Users/ab25cq/minux9"
.LASF0:
.string "child.c"
.ident "GCC: (g04696df09) 14.2.0"
.section .note.GNU-stack,"",@progbits
|
abcdabcd987/compiler2016
| 9,194
|
lib/builtin_functions.s
|
# Built by Ficos 16/5/2
# All rights reserved.
#
#
# All test passed.
#
# Attention:
# 1. to use the built-in functions, you need to call "_buffer_init" function without any args before entering the source main function
# (jal _buffer_init)
# 2. just paste all of this in front of your MIPS code
#
# All supported functions:
# FunctionName args
# 1. print $a0: the string
# 2. println $a0: the string
# 3. getString ---
# 4. getInt ---
# 5. toString $a0: the integer
# 6. string.length $a0: the string
# 7. string.substring $a0: the string, $a1: left pos(int), $a2: right pos(int)
# 8. string.parseInt $a0: the string
# 9. string.ord $a0: the string, $a1: pos(int)
# 10. _array.size $a0: the array
# 11. stringConcatenate $a0: left string, $a1: right string
# 12. stringIsEqual $a0: left string, $a1: right string
# 13. stringLess $a0: left string, $a1: right string
#
# Calling Conventions:
# 1. args placed in $a0, $a1, $a2
# 2. return in $v0
# 3. follow the MIPS calling convention, be careful on regs when calling these functions
# 4. all used regs are presented in the front of the function
#
# Conventions in using string:
# 1. string object is simply a register contains the initial address of the string
# 2. front of every initial address of a string are a word containing the length of the string
# e.g.
# .data
# .word 6
# str: .asciiz "hello\n"
# .align 2
# 3. every string ends with '\0', which is not counted in the length
#
# Conventions in using array:
# 1. front of every initial address of a array are a word containing the size of the array
.data
_end: .asciiz "\n"
.align 2
_buffer: .space 256
# .word 6
# str: .asciiz "hello\n"
# .align 2
# .word 6
# str2: .asciiz "hello\n"
# .align 2
.text
# main:
# subu $sp, $sp, 4
# sw $ra, 0($sp)
# jal _buffer_init
# Test print/println
# la $a0, str
# jal println
# la $a0, str2
# jal print
# Test getString, string_copy
# jal getString
# move $s0, $v0
# move $a0, $s0
# jal print
# move $a0, $s0
# jal string.length
# Test string.length
# la $a0, str2
# jal string.length
# move $a0, $v0
# li $v0, 1
# syscall
# Test getInt
# jal getInt
# move $a0, $v0
# li $v0, 1
# syscall
# Test toString
# li $a0, 232312312
# jal toString
# move $a0, $v0
# jal println
# Test subString
# la $a0 str
# li $a1 1
# li $a2 9
# jal string.substring
# move $a0, $v0
# li $v0, 4
# syscall
# Test parseInt
# la $a0 str
# jal string.parseInt
# move $a0, $v0
# li $v0, 1
# syscall
# Test string.ord
# la $a0 str
# li $a1, 5
# jal string.ord
# move $a0, $v0
# li $v0, 1
# syscall
# Test stringconcatinate
# la $a0 str
# la $a1 str2
# jal stringConcatenate
# move $a0, $v0
# jal print
# Test StringIsEqual
# la $a0 str
# la $a1 str2
# jal stringIsEqual
# move $a0, $v0
# li $v0, 1
# syscall
# Test StringLess
# la $a0 str
# la $a1 str2
# jal stringLess
# move $a0, $v0
# li $v0, 1
# syscall
# lw $ra, 0($sp)
# addu $sp, $sp, 4
#_buffer_init:
# li $a0, 256
# li $v0, 9
# syscall
# sw $v0, _buffer
# jr $ra
# copy the string in $a0 to buffer in $a1, with putting '\0' in the end of the buffer
###### Checked ######
# used $v0, $a0, $a1
_string_copy:
_begin_string_copy:
lb $v0, 0($a0)
beqz $v0, _exit_string_copy
sb $v0, 0($a1)
add $a0, $a0, 1
add $a1, $a1, 1
j _begin_string_copy
_exit_string_copy:
sb $zero, 0($a1)
jr $ra
# string arg in $a0
###### Checked ###### modified
print:
add $a0,$a0,4 #added
li $v0, 4
syscall
jr $ra
# string arg in $a0
###### Checked ###### modified
println:
add $a0,$a0,4 #added
li $v0, 4
syscall
la $a0, _end
syscall
jr $ra
# count the length of given string in $a0
###### Checked ######
# used $v0, $v1, $a0
_count_string_length:
move $v0, $a0
_begin_count_string_length:
lb $v1, 0($a0)
beqz $v1, _exit_count_string_length
add $a0, $a0, 1
j _begin_count_string_length
_exit_count_string_length:
sub $v0, $a0, $v0
jr $ra
# non arg, string in $v0
###### Checked ###### modified
# used $a0, $a1, $v0, $t0
getString:
subu $sp, $sp, 4
sw $ra, 0($sp)
la $a0, _buffer
li $a1, 255
li $v0, 8
syscall
jal _count_string_length
move $a1, $v0 # now $a1 contains the length of the string
add $a0, $v0, 5 # total required space = length + 1('\0') + 1 word(record the length of the string)
li $v0, 9
syscall
sw $a1, 0($v0)
add $v0, $v0, 4
la $a0, _buffer
move $a1, $v0
move $t0, $v0
jal _string_copy
move $v0, $t0
lw $ra, 0($sp)
addu $sp, $sp, 4
sub $v0 $v0 4 #added
jr $ra
# non arg, int in $v0
###### Checked ######
getInt:
li $v0, 5
syscall
jr $ra
# int arg in $a0
###### Checked ###### modified
# Bug fixed(5/2): when the arg is a neg number
# used $a0, $t0, $t1, $t2, $t3, $t5, $v0, $v1
toString:
# subu $sp, $sp, 4
# sw $ra, 0($sp)
# first count the #digits
li $t0, 0 # $t0 = 0 if the number is a negnum
bgez $a0, _skip_set_less_than_zero
li $t0, 1 # now $t0 must be 1
neg $a0, $a0
_skip_set_less_than_zero:
beqz $a0, _set_zero
li $t1, 0 # the #digits is in $t1
move $t2, $a0
move $t3, $a0
li $t5, 10
_begin_count_digit:
div $t2, $t5
mflo $v0 # get the quotient
mfhi $v1 # get the remainder
bgtz $v0 _not_yet
bgtz $v1 _not_yet
j _yet
_not_yet:
add $t1, $t1, 1
move $t2, $v0
j _begin_count_digit
_yet:
beqz $t0, _skip_reserve_neg
add $t1, $t1, 1
_skip_reserve_neg:
add $a0, $t1, 5
li $v0, 9
syscall
sw $t1, 0($v0)
add $v0, $v0, 4
add $t1, $t1, $v0
sb $zero, 0($t1)
sub $t1, $t1, 1
_continue_toString:
div $t3, $t5
mfhi $v1
add $v1, $v1, 48 # in ascii 48 = '0'
sb $v1, 0($t1)
sub $t1, $t1, 1
mflo $t3
# bge $t1, $v0, _continue_toString
bnez $t3, _continue_toString
beqz $t0, _skip_place_neg
li $v1, 45
sb $v1, 0($t1)
_skip_place_neg:
# lw $ra, 0($sp)
# addu $sp, $sp, 4
sub $v0, $v0, 4 #added
jr $ra
_set_zero:
li $a0, 6
li $v0, 9
syscall
li $a0, 1
sw $a0, 0($v0)
add $v0, $v0, 4
li $a0, 48
sb $a0, 0($v0)
sub $v0, $v0, 4 #added
jr $ra
# string arg in $a0
# the zero in the end of the string will not be counted
###### Checked ###### modified
string.length:
lw $v0, 0($a0)
jr $ra
# string arg in $a0, left in $a1, right in $a2
###### Checked ###### modified
# used $a0, $a1, $t0, $t1, $t2, $t3, $t4, $v0,
string.substring:
add $a0,$a0,4 #added
subu $sp, $sp, 4
sw $ra, 0($sp)
move $t0, $a0
sub $t1, $a2, $a1
add $t1, $t1, 1 # $t1 is the length of the substring
add $a0, $t1, 5
li $v0, 9
syscall
sw $t1, 0($v0)
add $v0, $v0, 4
add $a0, $t0, $a1
add $t2, $t0, $a2
lb $t3, 1($t2) # store the ori_begin + right + 1 char in $t3
sb $zero, 1($t2) # change it to 0 for the convenience of copying
move $a1, $v0
move $t4, $v0
jal _string_copy
move $v0, $t4
sub $v0 $v0 4 #added
sb $t3, 1($t2)
lw $ra, 0($sp)
addu $sp, $sp, 4
jr $ra
# string arg in $a0
###### Checked ###### modified
# used $t0, $t1, $t2, $v0
string.parseInt:
add $a0 $a0 4
li $v0, 0
move $t0, $a0
li $t2, 1
_count_number_pos:
lb $t1, 0($t0)
bgt $t1, 57, _begin_parse_int
blt $t1, 48, _begin_parse_int
add $t0, $t0, 1
j _count_number_pos
_begin_parse_int:
sub $t0, $t0, 1
_parsing_int:
blt $t0, $a0, _finish_parse_int
lb $t1, 0($t0)
sub $t1, $t1, 48
mul $t1, $t1, $t2
add $v0, $v0, $t1
mul $t2, $t2, 10
sub $t0, $t0, 1
j _parsing_int
_finish_parse_int:
jr $ra
# string arg in $a0, pos in $a1
###### Checked ###### modified
# used $a0, $v0
string.ord:
add $a0 $a0 4
add $a0, $a0, $a1
lb $v0, 0($a0)
jr $ra
# array arg in $a0 modified
# used $v0
_array.size:
lw $v0, 0($a0)
jr $ra
# string1 in $a0, string2 in $a1
###### Checked ###### modified
# used $a0, $a1, $t0, $t1, $t2, $t3, $t4, $t5, $v0
stringConcatenate:
add $a0, $a0, 4 #added
add $a1, $a1, 4 #added
subu $sp, $sp, 4
sw $ra, 0($sp)
move $t2, $a0
move $t3, $a1
lw $t0, -4($a0) # $t0 is the length of lhs
lw $t1, -4($a1) # $t1 is the length of rhs
add $t5, $t0, $t1
add $a0, $t5, 5
li $v0, 9
syscall
sw $t5, 0($v0)
add $v0, $v0, 4
move $t4, $v0
move $a0, $t2
move $a1, $t4
jal _string_copy
move $a0, $t3
add $a1, $t4, $t0
# add $a1, $a1, 1
jal _string_copy
move $v0, $t4
sub $v0, $v0, 4 #added
lw $ra, 0($sp)
addu $sp, $sp, 4
jr $ra
# string1 in $a0, string2 in $a1
###### Checked ###### modified
# used $a0, $a1, $t0, $t1, $v0
stringIsEqual:
add $a0, $a0, 4 #added
add $a1, $a1, 4 #added
lw $t0, -4($a0)
lw $t1, -4($a1)
bne $t0, $t1, _not_equal
_continue_compare_equal:
lb $t0, 0($a0)
lb $t1, 0($a1)
beqz $t0, _equal
bne $t0, $t1, _not_equal
add $a0, $a0, 1
add $a1, $a1, 1
j _continue_compare_equal
_not_equal:
li $v0, 0
j _compare_final
_equal:
li $v0, 1
_compare_final:
jr $ra
# string1 in $a0, string2 in $a1
###### Checked ###### modified
# used $a0, $a1, $t0, $t1, $v0
stringLess:
add $a0, $a0, 4 #added
add $a1, $a1, 4 #added
_begin_compare_less:
lb $t0, 0($a0)
lb $t1, 0($a1)
blt $t0, $t1, _less_correct
bgt $t0, $t1, _less_false
beqz $t0, _less_false
add $a0, $a0, 1
add $a1, $a1, 1
j _begin_compare_less
_less_correct:
li $v0, 1
j _less_compare_final
_less_false:
li $v0, 0
_less_compare_final:
jr $ra
###################################################
|
abcminiuser/lufa
| 2,999
|
Bootloaders/CDC/BootloaderAPITable.S
|
/*
LUFA Library
Copyright (C) Dean Camera, 2021.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
*/
/*
Copyright 2021 Dean Camera (dean [at] fourwalledcubicle [dot] com)
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaims all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
; Trampolines to actual API implementations if the target address is outside the
; range of a rjmp instruction (can happen with large bootloader sections)
.section .apitable_trampolines, "ax"
.global BootloaderAPI_Trampolines
BootloaderAPI_Trampolines:
BootloaderAPI_ErasePage_Trampoline:
jmp BootloaderAPI_ErasePage
BootloaderAPI_WritePage_Trampoline:
jmp BootloaderAPI_WritePage
BootloaderAPI_FillWord_Trampoline:
jmp BootloaderAPI_FillWord
BootloaderAPI_ReadSignature_Trampoline:
jmp BootloaderAPI_ReadSignature
BootloaderAPI_ReadFuse_Trampoline:
jmp BootloaderAPI_ReadFuse
BootloaderAPI_ReadLock_Trampoline:
jmp BootloaderAPI_ReadLock
BootloaderAPI_WriteLock_Trampoline:
jmp BootloaderAPI_WriteLock
BootloaderAPI_UNUSED1:
ret
BootloaderAPI_UNUSED2:
ret
BootloaderAPI_UNUSED3:
ret
BootloaderAPI_UNUSED4:
ret
BootloaderAPI_UNUSED5:
ret
; API function jump table
.section .apitable_jumptable, "ax"
.global BootloaderAPI_JumpTable
BootloaderAPI_JumpTable:
rjmp BootloaderAPI_ErasePage_Trampoline
rjmp BootloaderAPI_WritePage_Trampoline
rjmp BootloaderAPI_FillWord_Trampoline
rjmp BootloaderAPI_ReadSignature_Trampoline
rjmp BootloaderAPI_ReadFuse_Trampoline
rjmp BootloaderAPI_ReadLock_Trampoline
rjmp BootloaderAPI_WriteLock_Trampoline
rjmp BootloaderAPI_UNUSED1 ; UNUSED ENTRY 1
rjmp BootloaderAPI_UNUSED2 ; UNUSED ENTRY 2
rjmp BootloaderAPI_UNUSED3 ; UNUSED ENTRY 3
rjmp BootloaderAPI_UNUSED4 ; UNUSED ENTRY 4
rjmp BootloaderAPI_UNUSED5 ; UNUSED ENTRY 5
; Bootloader table signatures and information
.section .apitable_signatures, "ax"
.global BootloaderAPI_Signatures
BootloaderAPI_Signatures:
.long BOOT_START_ADDR ; Start address of the bootloader
.word 0xDF00 ; Signature for the CDC class bootloader
.word 0xDCFB ; Signature for a LUFA class bootloader
|
abcminiuser/lufa
| 3,391
|
Bootloaders/MassStorage/BootloaderAPITable.S
|
/*
LUFA Library
Copyright (C) Dean Camera, 2021.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
*/
/*
Copyright 2021 Dean Camera (dean [at] fourwalledcubicle [dot] com)
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaims all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
#if AUX_BOOT_SECTION_SIZE > 0
#warning Using a AUX bootloader section in addition to the defined bootloader space (see documentation).
; Trampoline to jump over the AUX bootloader section to the start of the bootloader,
; on devices where an AUX bootloader section is used.
.section .boot_aux_trampoline, "ax"
.global Boot_AUX_Trampoline
Boot_AUX_Trampoline:
jmp BOOT_START_ADDR
#endif
; Trampolines to actual API implementations if the target address is outside the
; range of a rjmp instruction (can happen with large bootloader sections)
.section .apitable_trampolines, "ax"
.global BootloaderAPI_Trampolines
BootloaderAPI_Trampolines:
BootloaderAPI_ErasePage_Trampoline:
jmp BootloaderAPI_ErasePage
BootloaderAPI_WritePage_Trampoline:
jmp BootloaderAPI_WritePage
BootloaderAPI_FillWord_Trampoline:
jmp BootloaderAPI_FillWord
BootloaderAPI_ReadSignature_Trampoline:
jmp BootloaderAPI_ReadSignature
BootloaderAPI_ReadFuse_Trampoline:
jmp BootloaderAPI_ReadFuse
BootloaderAPI_ReadLock_Trampoline:
jmp BootloaderAPI_ReadLock
BootloaderAPI_WriteLock_Trampoline:
jmp BootloaderAPI_WriteLock
BootloaderAPI_UNUSED1:
ret
BootloaderAPI_UNUSED2:
ret
BootloaderAPI_UNUSED3:
ret
BootloaderAPI_UNUSED4:
ret
BootloaderAPI_UNUSED5:
ret
; API function jump table
.section .apitable_jumptable, "ax"
.global BootloaderAPI_JumpTable
BootloaderAPI_JumpTable:
rjmp BootloaderAPI_ErasePage_Trampoline
rjmp BootloaderAPI_WritePage_Trampoline
rjmp BootloaderAPI_FillWord_Trampoline
rjmp BootloaderAPI_ReadSignature_Trampoline
rjmp BootloaderAPI_ReadFuse_Trampoline
rjmp BootloaderAPI_ReadLock_Trampoline
rjmp BootloaderAPI_WriteLock_Trampoline
rjmp BootloaderAPI_UNUSED1 ; UNUSED ENTRY 1
rjmp BootloaderAPI_UNUSED2 ; UNUSED ENTRY 2
rjmp BootloaderAPI_UNUSED3 ; UNUSED ENTRY 3
rjmp BootloaderAPI_UNUSED4 ; UNUSED ENTRY 4
rjmp BootloaderAPI_UNUSED5 ; UNUSED ENTRY 5
; Bootloader table signatures and information
.section .apitable_signatures, "ax"
.global BootloaderAPI_Signatures
BootloaderAPI_Signatures:
.long BOOT_START_ADDR ; Start address of the bootloader
.word 0xDF30 ; Signature for the MS class bootloader, V1
.word 0xDCFB ; Signature for a LUFA class bootloader
|
abcminiuser/lufa
| 3,003
|
Bootloaders/DFU/BootloaderAPITable.S
|
/*
LUFA Library
Copyright (C) Dean Camera, 2021.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
*/
/*
Copyright 2021 Dean Camera (dean [at] fourwalledcubicle [dot] com)
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaims all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
; Trampolines to actual API implementations if the target address is outside the
; range of a rjmp instruction (can happen with large bootloader sections)
.section .apitable_trampolines, "ax"
.global BootloaderAPI_Trampolines
BootloaderAPI_Trampolines:
BootloaderAPI_ErasePage_Trampoline:
jmp BootloaderAPI_ErasePage
BootloaderAPI_WritePage_Trampoline:
jmp BootloaderAPI_WritePage
BootloaderAPI_FillWord_Trampoline:
jmp BootloaderAPI_FillWord
BootloaderAPI_ReadSignature_Trampoline:
jmp BootloaderAPI_ReadSignature
BootloaderAPI_ReadFuse_Trampoline:
jmp BootloaderAPI_ReadFuse
BootloaderAPI_ReadLock_Trampoline:
jmp BootloaderAPI_ReadLock
BootloaderAPI_WriteLock_Trampoline:
jmp BootloaderAPI_WriteLock
BootloaderAPI_UNUSED1:
ret
BootloaderAPI_UNUSED2:
ret
BootloaderAPI_UNUSED3:
ret
BootloaderAPI_UNUSED4:
ret
BootloaderAPI_UNUSED5:
ret
; API function jump table
.section .apitable_jumptable, "ax"
.global BootloaderAPI_JumpTable
BootloaderAPI_JumpTable:
rjmp BootloaderAPI_ErasePage_Trampoline
rjmp BootloaderAPI_WritePage_Trampoline
rjmp BootloaderAPI_FillWord_Trampoline
rjmp BootloaderAPI_ReadSignature_Trampoline
rjmp BootloaderAPI_ReadFuse_Trampoline
rjmp BootloaderAPI_ReadLock_Trampoline
rjmp BootloaderAPI_WriteLock_Trampoline
rjmp BootloaderAPI_UNUSED1 ; UNUSED ENTRY 1
rjmp BootloaderAPI_UNUSED2 ; UNUSED ENTRY 2
rjmp BootloaderAPI_UNUSED3 ; UNUSED ENTRY 3
rjmp BootloaderAPI_UNUSED4 ; UNUSED ENTRY 4
rjmp BootloaderAPI_UNUSED5 ; UNUSED ENTRY 5
; Bootloader table signatures and information
.section .apitable_signatures, "ax"
.global BootloaderAPI_Signatures
BootloaderAPI_Signatures:
.long BOOT_START_ADDR ; Start address of the bootloader
.word 0xDF10 ; Signature for the DFU class bootloader, V1
.word 0xDCFB ; Signature for a LUFA class bootloader
|
abcminiuser/lufa
| 3,003
|
Bootloaders/Printer/BootloaderAPITable.S
|
/*
LUFA Library
Copyright (C) Dean Camera, 2021.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
*/
/*
Copyright 2021 Dean Camera (dean [at] fourwalledcubicle [dot] com)
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaims all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
; Trampolines to actual API implementations if the target address is outside the
; range of a rjmp instruction (can happen with large bootloader sections)
.section .apitable_trampolines, "ax"
.global BootloaderAPI_Trampolines
BootloaderAPI_Trampolines:
BootloaderAPI_ErasePage_Trampoline:
jmp BootloaderAPI_ErasePage
BootloaderAPI_WritePage_Trampoline:
jmp BootloaderAPI_WritePage
BootloaderAPI_FillWord_Trampoline:
jmp BootloaderAPI_FillWord
BootloaderAPI_ReadSignature_Trampoline:
jmp BootloaderAPI_ReadSignature
BootloaderAPI_ReadFuse_Trampoline:
jmp BootloaderAPI_ReadFuse
BootloaderAPI_ReadLock_Trampoline:
jmp BootloaderAPI_ReadLock
BootloaderAPI_WriteLock_Trampoline:
jmp BootloaderAPI_WriteLock
BootloaderAPI_UNUSED1:
ret
BootloaderAPI_UNUSED2:
ret
BootloaderAPI_UNUSED3:
ret
BootloaderAPI_UNUSED4:
ret
BootloaderAPI_UNUSED5:
ret
; API function jump table
.section .apitable_jumptable, "ax"
.global BootloaderAPI_JumpTable
BootloaderAPI_JumpTable:
rjmp BootloaderAPI_ErasePage_Trampoline
rjmp BootloaderAPI_WritePage_Trampoline
rjmp BootloaderAPI_FillWord_Trampoline
rjmp BootloaderAPI_ReadSignature_Trampoline
rjmp BootloaderAPI_ReadFuse_Trampoline
rjmp BootloaderAPI_ReadLock_Trampoline
rjmp BootloaderAPI_WriteLock_Trampoline
rjmp BootloaderAPI_UNUSED1 ; UNUSED ENTRY 1
rjmp BootloaderAPI_UNUSED2 ; UNUSED ENTRY 2
rjmp BootloaderAPI_UNUSED3 ; UNUSED ENTRY 3
rjmp BootloaderAPI_UNUSED4 ; UNUSED ENTRY 4
rjmp BootloaderAPI_UNUSED5 ; UNUSED ENTRY 5
; Bootloader table signatures and information
.section .apitable_signatures, "ax"
.global BootloaderAPI_Signatures
BootloaderAPI_Signatures:
.long BOOT_START_ADDR ; Start address of the bootloader
.word 0xDF20 ; Signature for the Printer class bootloader
.word 0xDCFB ; Signature for a LUFA class bootloader
|
abcminiuser/lufa
| 3,064
|
LUFA/Platform/UC3/Exception.S
|
/*
LUFA Library
Copyright (C) Dean Camera, 2021.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
*/
/*
Copyright 2021 Dean Camera (dean [at] fourwalledcubicle [dot] com)
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaims all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
#if defined(__AVR32__)
#include <avr32/io.h>
.section .exception_handlers, "ax", @progbits
// ================= EXCEPTION TABLE ================
.balign 0x200
.global EVBA_Table
EVBA_Table:
.org 0x000
Exception_Unrecoverable_Exception:
rjmp $
.org 0x004
Exception_TLB_Multiple_Hit:
rjmp $
.org 0x008
Exception_Bus_Error_Data_Fetch:
rjmp $
.org 0x00C
Exception_Bus_Error_Instruction_Fetch:
rjmp $
.org 0x010
Exception_NMI:
rjmp $
.org 0x014
Exception_Instruction_Address:
rjmp $
.org 0x018
Exception_ITLB_Protection:
rjmp $
.org 0x01C
Exception_OCD_Breakpoint:
rjmp $
.org 0x020
Exception_Illegal_Opcode:
rjmp $
.org 0x024
Exception_Unimplemented_Instruction:
rjmp $
.org 0x028
Exception_Privilege_Violation:
rjmp $
.org 0x02C
Exception_Floating_Point:
rjmp $
.org 0x030
Exception_Coprocessor_Absent:
rjmp $
.org 0x034
Exception_Data_Address_Read:
rjmp $
.org 0x038
Exception_Data_Address_Write:
rjmp $
.org 0x03C
Exception_DTLB_Protection_Read:
rjmp $
.org 0x040
Exception_DTLB_Protection_Write:
rjmp $
.org 0x044
Exception_DTLB_Modified:
rjmp $
.org 0x050
Exception_ITLB_Miss:
rjmp $
.org 0x060
Exception_DTLB_Miss_Read:
rjmp $
.org 0x070
Exception_DTLB_Miss_Write:
rjmp $
.org 0x100
Exception_Supervisor_Call:
rjmp $
// ============== END OF EXCEPTION TABLE =============
// ============= GENERAL INTERRUPT HANDLER ===========
.balign 4
.irp Level, 0, 1, 2, 3
Exception_INT\Level:
mov r12, \Level
call INTC_GetInterruptHandler
mov pc, r12
.endr
// ========= END OF GENERAL INTERRUPT HANDLER ========
// ====== GENERAL INTERRUPT HANDLER OFFSET TABLE ======
.balign 4
.global Autovector_Table
Autovector_Table:
.irp Level, 0, 1, 2, 3
.word ((AVR32_INTC_INT0 + \Level) << AVR32_INTC_IPR_INTLEVEL_OFFSET) | (Exception_INT\Level - EVBA_Table)
.endr
// === END OF GENERAL INTERRUPT HANDLER OFFSET TABLE ===
#endif
|
abcminiuser/lufa
| 1,402
|
BuildTests/ModuleTest/Dummy.S
|
/*
LUFA Library
Copyright (C) Dean Camera, 2021.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
*/
/*
Copyright 2021 Dean Camera (dean [at] fourwalledcubicle [dot] com)
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaims all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
.section .text
# Mandatory entry point for successful compilation and link
.global main
main:
# Mandatory callback needed for base compile of the USB driver
.global CALLBACK_USB_GetDescriptor
CALLBACK_USB_GetDescriptor:
|
abcminiuser/lufa
| 1,463
|
BuildTests/SingleUSBModeTest/Dummy.S
|
/*
LUFA Library
Copyright (C) Dean Camera, 2021.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
*/
/*
Copyright 2021 Dean Camera (dean [at] fourwalledcubicle [dot] com)
Permission to use, copy, modify, distribute, and sell this
software and its documentation for any purpose is hereby granted
without fee, provided that the above copyright notice appear in
all copies and that both that the copyright notice and this
permission notice and warranty disclaimer appear in supporting
documentation, and that the name of the author not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
The author disclaims all warranties with regard to this
software, including all implied warranties of merchantability
and fitness. In no event shall the author be liable for any
special, indirect or consequential damages or any damages
whatsoever resulting from loss of use, data or profits, whether
in an action of contract, negligence or other tortious action,
arising out of or in connection with the use or performance of
this software.
*/
.section .text
# Mandatory entry point for successful compilation and link
.global main
main:
# Force code generation of the base USB stack
call USB_Init
# Mandatory callback needed for base compile of the USB driver
.global CALLBACK_USB_GetDescriptor
CALLBACK_USB_GetDescriptor:
|
4d61726b/VirtualKD-Redux
| 2,433
|
VirtualKD-Redux/Lib/STLPort/src/sparc_atomic64.s
|
.section ".text",#alloc,#execinstr
.align 8
.skip 16
! int _STLP_atomic_exchange (void *pvalue, int value)
!
.type _STLP_atomic_exchange,#function
.global _STLP_atomic_exchange
.align 8
_STLP_atomic_exchange:
1:
ldx [%o0], %o2 ! Set the current value
mov %o1, %o3 ! Set the new value
casx [%o0], %o2, %o3 ! Do the compare and swap
cmp %o2, %o3 ! Check whether successful
bne 1b ! Retry upon failure
membar #LoadLoad | #LoadStore ! Ensure the cas finishes before
! returning
retl ! return
mov %o2, %o0 ! Set the new value
.size _STLP_atomic_exchange,(.-_STLP_atomic_exchange)
! int _STLP_atomic_increment (void *pvalue)
.type _STLP_atomic_increment,#function
.global _STLP_atomic_increment
.align 8
_STLP_atomic_increment:
0:
ldx [%o0], %o2 ! set the current
addx %o2, 0x1, %o3 ! Increment and store current
casx [%o0], %o2, %o3 ! Do the compare and swap
cmp %o3, %o2 ! Check whether successful
bne 0b
membar #LoadLoad | #LoadStore ! Ensure the cas finishes before
! returning
retl ! return
mov %o1, %o0 ! Set the return value
.size _STLP_atomic_increment,(.-_STLP_atomic_increment)
! /* int _STLP_atomic_decrement (void *pvalue) */
.type _STLP_atomic_decrement,#function
.global _STLP_atomic_decrement
.align 8
_STLP_atomic_decrement:
0:
ldx [%o0], %o2 ! set the current
subx %o2, 0x1, %o3 ! decrement and store current
casx [%o0], %o2, %o3 ! Do the compare and swap
cmp %o3, %o2 ! Check whether successful
bne 0b
membar #LoadLoad | #LoadStore ! Ensure the cas finishes before
! returning
retl ! return
nop
.size _STLP_atomic_decrement,(.-_STLP_atomic_decrement)
|
4d61726b/VirtualKD-Redux
| 2,593
|
VirtualKD-Redux/Lib/STLPort/src/sparc_atomic.s
|
.section ".text",#alloc,#execinstr
.align 8
.skip 16
/*
** int _STLP_atomic_exchange (void *pvalue, int value)
*/
.type _STLP_atomic_exchange,#function
.global _STLP_atomic_exchange
.align 8
_STLP_atomic_exchange:
0:
ld [%o0], %o2 ! Set the current value
mov %o1, %o3 ! Set the new value
! swap [%o0], %o3 ! Do the compare and swap
cas [%o0], %o2, %o3
cmp %o2, %o3 ! Check whether successful
bne 0b ! Retry upon failure
stbar
mov %o2, %o0 ! Set the new value
retl ! return
nop
.size _STLP_atomic_exchange,(.-_STLP_atomic_exchange)
/* int _STLP_atomic_increment (void *pvalue) */
.type _STLP_atomic_increment,#function
.global _STLP_atomic_increment
.align 8
_STLP_atomic_increment:
1:
ld [%o0], %o2 ! set the current
add %o2, 0x1, %o3 ! Increment and store current
! swap [%o0], %o3 ! Do the compare and swap
cas [%o0], %o2, %o3
cmp %o3, %o2 ! Check whether successful
bne 1b ! Retry if we failed.
membar #LoadLoad | #LoadStore ! Ensure the cas finishes before
! returning
nop
retl ! return
nop
.size _STLP_atomic_increment,(.-_STLP_atomic_increment)
/* int _STLP_atomic_decrement (void *pvalue) */
.type _STLP_atomic_decrement,#function
.global _STLP_atomic_decrement
.align 8
_STLP_atomic_decrement:
2:
ld [%o0], %o2 ! set the current
sub %o2, 0x1, %o3 ! decrement and store current
! swap [%o0], %o3 ! Do the compare and swap
cas [%o0], %o2, %o3
cmp %o3, %o2 ! Check whether successful
bne 2b ! Retry if we failed.
membar #LoadLoad | #LoadStore ! Ensure the cas finishes before
nop
! returning
retl ! return
nop
.size _STLP_atomic_decrement,(.-_STLP_atomic_decrement)
|
4ilo/ssd1306-stm32HAL
| 20,446
|
example/startup_stm32f411xe.s
|
/**
******************************************************************************
* @file startup_stm32f411xe.s
* @author MCD Application Team
* @brief STM32F411xExx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* <h2><center>© Copyright (c) 2017 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word 0 /* Reserved */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word 0 /* Reserved */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FPU_IRQHandler /* FPU */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
4ilo/HD44780-Stm32HAL
| 21,686
|
F4_disco_example/startup/startup_stm32f411xe.s
|
/**
******************************************************************************
* @file startup_stm32f411xe.s
* @author MCD Application Team
* @brief STM32F411xExx Devices vector table for GCC based toolchains.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT 2017 STMicroelectronics</center></h2>
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of STMicroelectronics nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************
*/
.syntax unified
.cpu cortex-m4
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
ldr sp, =_estack /* set stack pointer */
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call static constructors */
bl __libc_init_array
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word 0 /* Reserved */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word 0 /* Reserved */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word FPU_IRQHandler /* FPU */
.word 0 /* Reserved */
.word 0 /* Reserved */
.word SPI4_IRQHandler /* SPI4 */
.word SPI5_IRQHandler /* SPI5 */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
.weak SPI4_IRQHandler
.thumb_set SPI4_IRQHandler,Default_Handler
.weak SPI5_IRQHandler
.thumb_set SPI5_IRQHandler,Default_Handler
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
|
4ms/SMR
| 23,461
|
stm32/device/src/startup_stm32f4xx.s
|
/**
******************************************************************************
* @file startup_stm32f4xx.s
* @author MCD Application Team
* @version V1.0.0
* @date 30-September-2011
* @brief STM32F4xx Devices vector table for RIDE7 toolchain.
* This module performs:
* - Set the initial SP
* - Set the initial PC == Reset_Handler,
* - Set the vector table entries with the exceptions ISR address
* - Configure the clock system and the external SRAM mounted on
* STM324xG-EVAL board to be used as data memory (optional,
* to be enabled by user)
* - Branches to main in the C library (which eventually
* calls main()).
* After Reset the Cortex-M4 processor is in Thread mode,
* priority is Privileged, and the Stack is set to Main.
******************************************************************************
* @attention
*
* THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS
* WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE
* TIME. AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY
* DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING
* FROM THE CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE
* CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS.
*
* <h2><center>© COPYRIGHT 2011 STMicroelectronics</center></h2>
******************************************************************************
*/
.syntax unified
.cpu cortex-m3
.fpu softvfp
.thumb
.global g_pfnVectors
.global Default_Handler
/* start address for the initialization values of the .data section.
defined in linker script */
.word _sidata
/* start address for the .data section. defined in linker script */
.word _sdata
/* end address for the .data section. defined in linker script */
.word _edata
/* start address for the .bss section. defined in linker script */
.word _sbss
/* end address for the .bss section. defined in linker script */
.word _ebss
/* stack used for SystemInit_ExtMemCtl; always internal RAM used */
/**
* @brief This is the code that gets called when the processor first
* starts execution following a reset event. Only the absolutely
* necessary set is performed, after which the application
* supplied main() routine is called.
* @param None
* @retval : None
*/
.section .text.Reset_Handler
.weak Reset_Handler
.type Reset_Handler, %function
Reset_Handler:
/* Copy the data segment initializers from flash to SRAM */
movs r1, #0
b LoopCopyDataInit
CopyDataInit:
ldr r3, =_sidata
ldr r3, [r3, r1]
str r3, [r0, r1]
adds r1, r1, #4
LoopCopyDataInit:
ldr r0, =_sdata
ldr r3, =_edata
adds r2, r0, r1
cmp r2, r3
bcc CopyDataInit
ldr r2, =_sbss
b LoopFillZerobss
/* Zero fill the bss segment. */
FillZerobss:
movs r3, #0
str r3, [r2], #4
LoopFillZerobss:
ldr r3, = _ebss
cmp r2, r3
bcc FillZerobss
/* Call the clock system intitialization function.*/
bl SystemInit
/* Call the application's entry point.*/
bl main
bx lr
.size Reset_Handler, .-Reset_Handler
/**
* @brief This is the code that gets called when the processor receives an
* unexpected interrupt. This simply enters an infinite loop, preserving
* the system state for examination by a debugger.
* @param None
* @retval None
*/
.section .text.Default_Handler,"ax",%progbits
Default_Handler:
Infinite_Loop:
b Infinite_Loop
.size Default_Handler, .-Default_Handler
/******************************************************************************
*
* The minimal vector table for a Cortex M3. Note that the proper constructs
* must be placed on this to ensure that it ends up at physical address
* 0x0000.0000.
*
*******************************************************************************/
.section .isr_vector,"a",%progbits
.type g_pfnVectors, %object
.size g_pfnVectors, .-g_pfnVectors
g_pfnVectors:
.word _estack
.word Reset_Handler
.word NMI_Handler
.word HardFault_Handler
.word MemManage_Handler
.word BusFault_Handler
.word UsageFault_Handler
.word 0
.word 0
.word 0
.word 0
.word SVC_Handler
.word DebugMon_Handler
.word 0
.word PendSV_Handler
.word SysTick_Handler
/* External Interrupts */
.word WWDG_IRQHandler /* Window WatchDog */
.word PVD_IRQHandler /* PVD through EXTI Line detection */
.word TAMP_STAMP_IRQHandler /* Tamper and TimeStamps through the EXTI line */
.word RTC_WKUP_IRQHandler /* RTC Wakeup through the EXTI line */
.word FLASH_IRQHandler /* FLASH */
.word RCC_IRQHandler /* RCC */
.word EXTI0_IRQHandler /* EXTI Line0 */
.word EXTI1_IRQHandler /* EXTI Line1 */
.word EXTI2_IRQHandler /* EXTI Line2 */
.word EXTI3_IRQHandler /* EXTI Line3 */
.word EXTI4_IRQHandler /* EXTI Line4 */
.word DMA1_Stream0_IRQHandler /* DMA1 Stream 0 */
.word DMA1_Stream1_IRQHandler /* DMA1 Stream 1 */
.word DMA1_Stream2_IRQHandler /* DMA1 Stream 2 */
.word DMA1_Stream3_IRQHandler /* DMA1 Stream 3 */
.word DMA1_Stream4_IRQHandler /* DMA1 Stream 4 */
.word DMA1_Stream5_IRQHandler /* DMA1 Stream 5 */
.word DMA1_Stream6_IRQHandler /* DMA1 Stream 6 */
.word ADC_IRQHandler /* ADC1, ADC2 and ADC3s */
.word CAN1_TX_IRQHandler /* CAN1 TX */
.word CAN1_RX0_IRQHandler /* CAN1 RX0 */
.word CAN1_RX1_IRQHandler /* CAN1 RX1 */
.word CAN1_SCE_IRQHandler /* CAN1 SCE */
.word EXTI9_5_IRQHandler /* External Line[9:5]s */
.word TIM1_BRK_TIM9_IRQHandler /* TIM1 Break and TIM9 */
.word TIM1_UP_TIM10_IRQHandler /* TIM1 Update and TIM10 */
.word TIM1_TRG_COM_TIM11_IRQHandler /* TIM1 Trigger and Commutation and TIM11 */
.word TIM1_CC_IRQHandler /* TIM1 Capture Compare */
.word TIM2_IRQHandler /* TIM2 */
.word TIM3_IRQHandler /* TIM3 */
.word TIM4_IRQHandler /* TIM4 */
.word I2C1_EV_IRQHandler /* I2C1 Event */
.word I2C1_ER_IRQHandler /* I2C1 Error */
.word I2C2_EV_IRQHandler /* I2C2 Event */
.word I2C2_ER_IRQHandler /* I2C2 Error */
.word SPI1_IRQHandler /* SPI1 */
.word SPI2_IRQHandler /* SPI2 */
.word USART1_IRQHandler /* USART1 */
.word USART2_IRQHandler /* USART2 */
.word USART3_IRQHandler /* USART3 */
.word EXTI15_10_IRQHandler /* External Line[15:10]s */
.word RTC_Alarm_IRQHandler /* RTC Alarm (A and B) through EXTI Line */
.word OTG_FS_WKUP_IRQHandler /* USB OTG FS Wakeup through EXTI line */
.word TIM8_BRK_TIM12_IRQHandler /* TIM8 Break and TIM12 */
.word TIM8_UP_TIM13_IRQHandler /* TIM8 Update and TIM13 */
.word TIM8_TRG_COM_TIM14_IRQHandler /* TIM8 Trigger and Commutation and TIM14 */
.word TIM8_CC_IRQHandler /* TIM8 Capture Compare */
.word DMA1_Stream7_IRQHandler /* DMA1 Stream7 */
.word FSMC_IRQHandler /* FSMC */
.word SDIO_IRQHandler /* SDIO */
.word TIM5_IRQHandler /* TIM5 */
.word SPI3_IRQHandler /* SPI3 */
.word UART4_IRQHandler /* UART4 */
.word UART5_IRQHandler /* UART5 */
.word TIM6_DAC_IRQHandler /* TIM6 and DAC1&2 underrun errors */
.word TIM7_IRQHandler /* TIM7 */
.word DMA2_Stream0_IRQHandler /* DMA2 Stream 0 */
.word DMA2_Stream1_IRQHandler /* DMA2 Stream 1 */
.word DMA2_Stream2_IRQHandler /* DMA2 Stream 2 */
.word DMA2_Stream3_IRQHandler /* DMA2 Stream 3 */
.word DMA2_Stream4_IRQHandler /* DMA2 Stream 4 */
.word ETH_IRQHandler /* Ethernet */
.word ETH_WKUP_IRQHandler /* Ethernet Wakeup through EXTI line */
.word CAN2_TX_IRQHandler /* CAN2 TX */
.word CAN2_RX0_IRQHandler /* CAN2 RX0 */
.word CAN2_RX1_IRQHandler /* CAN2 RX1 */
.word CAN2_SCE_IRQHandler /* CAN2 SCE */
.word OTG_FS_IRQHandler /* USB OTG FS */
.word DMA2_Stream5_IRQHandler /* DMA2 Stream 5 */
.word DMA2_Stream6_IRQHandler /* DMA2 Stream 6 */
.word DMA2_Stream7_IRQHandler /* DMA2 Stream 7 */
.word USART6_IRQHandler /* USART6 */
.word I2C3_EV_IRQHandler /* I2C3 event */
.word I2C3_ER_IRQHandler /* I2C3 error */
.word OTG_HS_EP1_OUT_IRQHandler /* USB OTG HS End Point 1 Out */
.word OTG_HS_EP1_IN_IRQHandler /* USB OTG HS End Point 1 In */
.word OTG_HS_WKUP_IRQHandler /* USB OTG HS Wakeup through EXTI */
.word OTG_HS_IRQHandler /* USB OTG HS */
.word DCMI_IRQHandler /* DCMI */
.word CRYP_IRQHandler /* CRYP crypto */
.word HASH_RNG_IRQHandler /* Hash and Rng */
.word FPU_IRQHandler /* FPU */
/*******************************************************************************
*
* Provide weak aliases for each Exception handler to the Default_Handler.
* As they are weak aliases, any function with the same name will override
* this definition.
*
*******************************************************************************/
.weak NMI_Handler
.thumb_set NMI_Handler,Default_Handler
.weak HardFault_Handler
.thumb_set HardFault_Handler,Default_Handler
.weak MemManage_Handler
.thumb_set MemManage_Handler,Default_Handler
.weak BusFault_Handler
.thumb_set BusFault_Handler,Default_Handler
.weak UsageFault_Handler
.thumb_set UsageFault_Handler,Default_Handler
.weak SVC_Handler
.thumb_set SVC_Handler,Default_Handler
.weak DebugMon_Handler
.thumb_set DebugMon_Handler,Default_Handler
.weak PendSV_Handler
.thumb_set PendSV_Handler,Default_Handler
.weak SysTick_Handler
.thumb_set SysTick_Handler,Default_Handler
.weak WWDG_IRQHandler
.thumb_set WWDG_IRQHandler,Default_Handler
.weak PVD_IRQHandler
.thumb_set PVD_IRQHandler,Default_Handler
.weak TAMP_STAMP_IRQHandler
.thumb_set TAMP_STAMP_IRQHandler,Default_Handler
.weak RTC_WKUP_IRQHandler
.thumb_set RTC_WKUP_IRQHandler,Default_Handler
.weak FLASH_IRQHandler
.thumb_set FLASH_IRQHandler,Default_Handler
.weak RCC_IRQHandler
.thumb_set RCC_IRQHandler,Default_Handler
.weak EXTI0_IRQHandler
.thumb_set EXTI0_IRQHandler,Default_Handler
.weak EXTI1_IRQHandler
.thumb_set EXTI1_IRQHandler,Default_Handler
.weak EXTI2_IRQHandler
.thumb_set EXTI2_IRQHandler,Default_Handler
.weak EXTI3_IRQHandler
.thumb_set EXTI3_IRQHandler,Default_Handler
.weak EXTI4_IRQHandler
.thumb_set EXTI4_IRQHandler,Default_Handler
.weak DMA1_Stream0_IRQHandler
.thumb_set DMA1_Stream0_IRQHandler,Default_Handler
.weak DMA1_Stream1_IRQHandler
.thumb_set DMA1_Stream1_IRQHandler,Default_Handler
.weak DMA1_Stream2_IRQHandler
.thumb_set DMA1_Stream2_IRQHandler,Default_Handler
.weak DMA1_Stream3_IRQHandler
.thumb_set DMA1_Stream3_IRQHandler,Default_Handler
.weak DMA1_Stream4_IRQHandler
.thumb_set DMA1_Stream4_IRQHandler,Default_Handler
.weak DMA1_Stream5_IRQHandler
.thumb_set DMA1_Stream5_IRQHandler,Default_Handler
.weak DMA1_Stream6_IRQHandler
.thumb_set DMA1_Stream6_IRQHandler,Default_Handler
.weak ADC_IRQHandler
.thumb_set ADC_IRQHandler,Default_Handler
.weak CAN1_TX_IRQHandler
.thumb_set CAN1_TX_IRQHandler,Default_Handler
.weak CAN1_RX0_IRQHandler
.thumb_set CAN1_RX0_IRQHandler,Default_Handler
.weak CAN1_RX1_IRQHandler
.thumb_set CAN1_RX1_IRQHandler,Default_Handler
.weak CAN1_SCE_IRQHandler
.thumb_set CAN1_SCE_IRQHandler,Default_Handler
.weak EXTI9_5_IRQHandler
.thumb_set EXTI9_5_IRQHandler,Default_Handler
.weak TIM1_BRK_TIM9_IRQHandler
.thumb_set TIM1_BRK_TIM9_IRQHandler,Default_Handler
.weak TIM1_UP_TIM10_IRQHandler
.thumb_set TIM1_UP_TIM10_IRQHandler,Default_Handler
.weak TIM1_TRG_COM_TIM11_IRQHandler
.thumb_set TIM1_TRG_COM_TIM11_IRQHandler,Default_Handler
.weak TIM1_CC_IRQHandler
.thumb_set TIM1_CC_IRQHandler,Default_Handler
.weak TIM2_IRQHandler
.thumb_set TIM2_IRQHandler,Default_Handler
.weak TIM3_IRQHandler
.thumb_set TIM3_IRQHandler,Default_Handler
.weak TIM4_IRQHandler
.thumb_set TIM4_IRQHandler,Default_Handler
.weak I2C1_EV_IRQHandler
.thumb_set I2C1_EV_IRQHandler,Default_Handler
.weak I2C1_ER_IRQHandler
.thumb_set I2C1_ER_IRQHandler,Default_Handler
.weak I2C2_EV_IRQHandler
.thumb_set I2C2_EV_IRQHandler,Default_Handler
.weak I2C2_ER_IRQHandler
.thumb_set I2C2_ER_IRQHandler,Default_Handler
.weak SPI1_IRQHandler
.thumb_set SPI1_IRQHandler,Default_Handler
.weak SPI2_IRQHandler
.thumb_set SPI2_IRQHandler,Default_Handler
.weak USART1_IRQHandler
.thumb_set USART1_IRQHandler,Default_Handler
.weak USART2_IRQHandler
.thumb_set USART2_IRQHandler,Default_Handler
.weak USART3_IRQHandler
.thumb_set USART3_IRQHandler,Default_Handler
.weak EXTI15_10_IRQHandler
.thumb_set EXTI15_10_IRQHandler,Default_Handler
.weak RTC_Alarm_IRQHandler
.thumb_set RTC_Alarm_IRQHandler,Default_Handler
.weak OTG_FS_WKUP_IRQHandler
.thumb_set OTG_FS_WKUP_IRQHandler,Default_Handler
.weak TIM8_BRK_TIM12_IRQHandler
.thumb_set TIM8_BRK_TIM12_IRQHandler,Default_Handler
.weak TIM8_UP_TIM13_IRQHandler
.thumb_set TIM8_UP_TIM13_IRQHandler,Default_Handler
.weak TIM8_TRG_COM_TIM14_IRQHandler
.thumb_set TIM8_TRG_COM_TIM14_IRQHandler,Default_Handler
.weak TIM8_CC_IRQHandler
.thumb_set TIM8_CC_IRQHandler,Default_Handler
.weak DMA1_Stream7_IRQHandler
.thumb_set DMA1_Stream7_IRQHandler,Default_Handler
.weak FSMC_IRQHandler
.thumb_set FSMC_IRQHandler,Default_Handler
.weak SDIO_IRQHandler
.thumb_set SDIO_IRQHandler,Default_Handler
.weak TIM5_IRQHandler
.thumb_set TIM5_IRQHandler,Default_Handler
.weak SPI3_IRQHandler
.thumb_set SPI3_IRQHandler,Default_Handler
.weak UART4_IRQHandler
.thumb_set UART4_IRQHandler,Default_Handler
.weak UART5_IRQHandler
.thumb_set UART5_IRQHandler,Default_Handler
.weak TIM6_DAC_IRQHandler
.thumb_set TIM6_DAC_IRQHandler,Default_Handler
.weak TIM7_IRQHandler
.thumb_set TIM7_IRQHandler,Default_Handler
.weak DMA2_Stream0_IRQHandler
.thumb_set DMA2_Stream0_IRQHandler,Default_Handler
.weak DMA2_Stream1_IRQHandler
.thumb_set DMA2_Stream1_IRQHandler,Default_Handler
.weak DMA2_Stream2_IRQHandler
.thumb_set DMA2_Stream2_IRQHandler,Default_Handler
.weak DMA2_Stream3_IRQHandler
.thumb_set DMA2_Stream3_IRQHandler,Default_Handler
.weak DMA2_Stream4_IRQHandler
.thumb_set DMA2_Stream4_IRQHandler,Default_Handler
.weak ETH_IRQHandler
.thumb_set ETH_IRQHandler,Default_Handler
.weak ETH_WKUP_IRQHandler
.thumb_set ETH_WKUP_IRQHandler,Default_Handler
.weak CAN2_TX_IRQHandler
.thumb_set CAN2_TX_IRQHandler,Default_Handler
.weak CAN2_RX0_IRQHandler
.thumb_set CAN2_RX0_IRQHandler,Default_Handler
.weak CAN2_RX1_IRQHandler
.thumb_set CAN2_RX1_IRQHandler,Default_Handler
.weak CAN2_SCE_IRQHandler
.thumb_set CAN2_SCE_IRQHandler,Default_Handler
.weak OTG_FS_IRQHandler
.thumb_set OTG_FS_IRQHandler,Default_Handler
.weak DMA2_Stream5_IRQHandler
.thumb_set DMA2_Stream5_IRQHandler,Default_Handler
.weak DMA2_Stream6_IRQHandler
.thumb_set DMA2_Stream6_IRQHandler,Default_Handler
.weak DMA2_Stream7_IRQHandler
.thumb_set DMA2_Stream7_IRQHandler,Default_Handler
.weak USART6_IRQHandler
.thumb_set USART6_IRQHandler,Default_Handler
.weak I2C3_EV_IRQHandler
.thumb_set I2C3_EV_IRQHandler,Default_Handler
.weak I2C3_ER_IRQHandler
.thumb_set I2C3_ER_IRQHandler,Default_Handler
.weak OTG_HS_EP1_OUT_IRQHandler
.thumb_set OTG_HS_EP1_OUT_IRQHandler,Default_Handler
.weak OTG_HS_EP1_IN_IRQHandler
.thumb_set OTG_HS_EP1_IN_IRQHandler,Default_Handler
.weak OTG_HS_WKUP_IRQHandler
.thumb_set OTG_HS_WKUP_IRQHandler,Default_Handler
.weak OTG_HS_IRQHandler
.thumb_set OTG_HS_IRQHandler,Default_Handler
.weak DCMI_IRQHandler
.thumb_set DCMI_IRQHandler,Default_Handler
.weak CRYP_IRQHandler
.thumb_set CRYP_IRQHandler,Default_Handler
.weak HASH_RNG_IRQHandler
.thumb_set HASH_RNG_IRQHandler,Default_Handler
.weak FPU_IRQHandler
.thumb_set FPU_IRQHandler,Default_Handler
/******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE****/
|
4ms/metamodule-plugin-sdk
| 1,661
|
plugin-libc/libgcc/config/nios2/crtn.S
|
/* Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Jonah Graham (jgraham@altera.com).
Contributed by Mentor Graphics, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file just makes sure that the .fini and .init sections do in
fact return. Users may put any desired instructions in those sections.
This file is the last thing linked into any executable. */
.section ".init"
ldw ra, 44(sp)
ldw r23, 40(sp)
ldw r22, 36(sp)
ldw r21, 32(sp)
ldw r20, 28(sp)
ldw r19, 24(sp)
ldw r18, 20(sp)
ldw r17, 16(sp)
ldw r16, 12(sp)
ldw fp, 8(sp)
addi sp, sp, 48
ret
.section ".fini"
ldw ra, 44(sp)
ldw r23, 40(sp)
ldw r22, 36(sp)
ldw r21, 32(sp)
ldw r20, 28(sp)
ldw r19, 24(sp)
ldw r18, 20(sp)
ldw r17, 16(sp)
ldw r16, 12(sp)
ldw fp, 8(sp)
addi sp, sp, 48
ret
|
4ms/metamodule-plugin-sdk
| 2,373
|
plugin-libc/libgcc/config/nios2/crti.S
|
/* Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Jonah Graham (jgraham@altera.com).
Contributed by Mentor Graphics, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file just make a stack frame for the contents of the .fini and
.init sections. Users may put any desired instructions in those
sections.
While technically any code can be put in the init and fini sections
most stuff will not work other than stuff which obeys the call frame
and ABI. All the call-preserved registers are saved, the call clobbered
registers should have been saved by the code calling init and fini.
See crtstuff.c for an example of code that inserts itself in the init
and fini sections.
See crt0.s for the code that calls init and fini. */
.section ".init"
.align 2
.global _init
_init:
addi sp, sp, -48
stw ra, 44(sp)
stw r23, 40(sp)
stw r22, 36(sp)
stw r21, 32(sp)
stw r20, 28(sp)
stw r19, 24(sp)
stw r18, 20(sp)
stw r17, 16(sp)
stw r16, 12(sp)
stw fp, 8(sp)
addi fp, sp, 8
#ifdef linux
nextpc r22
1: movhi r2, %hiadj(_gp_got - 1b)
addi r2, r2, %lo(_gp_got - 1b)
add r22, r22, r2
#endif
.section ".fini"
.align 2
.global _fini
_fini:
addi sp, sp, -48
stw ra, 44(sp)
stw r23, 40(sp)
stw r22, 36(sp)
stw r21, 32(sp)
stw r20, 28(sp)
stw r19, 24(sp)
stw r18, 20(sp)
stw r17, 16(sp)
stw r16, 12(sp)
stw fp, 8(sp)
addi fp, sp, 8
#ifdef linux
nextpc r22
1: movhi r2, %hiadj(_gp_got - 1b)
addi r2, r2, %lo(_gp_got - 1b)
add r22, r22, r2
#endif
|
4ms/metamodule-plugin-sdk
| 2,499
|
plugin-libc/libgcc/config/tilepro/softmpy.S
|
/* 64-bit multiplication support for TILEPro.
Copyright (C) 2011-2022 Free Software Foundation, Inc.
Contributed by Walter Lee (walt@tilera.com)
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* 64-bit multiplication support. */
.file "softmpy.S"
/* Parameters */
#define lo0 r9 /* low 32 bits of n0 */
#define hi0 r1 /* high 32 bits of n0 */
#define lo1 r2 /* low 32 bits of n1 */
#define hi1 r3 /* high 32 bits of n1 */
/* temps */
#define result1_a r4
#define result1_b r5
#define tmp0 r6
#define tmp0_left_16 r7
#define tmp1 r8
.section .text.__muldi3, "ax"
.align 8
.globl __muldi3
.type __muldi3, @function
__muldi3:
{
move lo0, r0 /* so we can write "out r0" while "in r0" alive */
mulhl_uu tmp0, lo1, r0
}
{
mulll_uu result1_a, lo1, hi0
}
{
move tmp1, tmp0
mulhla_uu tmp0, lo0, lo1
}
{
mulhlsa_uu result1_a, lo1, hi0
}
{
mulll_uu result1_b, lo0, hi1
slt_u tmp1, tmp0, tmp1
}
{
mulhlsa_uu result1_a, lo0, hi1
shli r0, tmp0, 16
}
{
move tmp0_left_16, r0
mulhha_uu result1_b, lo0, lo1
}
{
mullla_uu r0, lo1, lo0
shli tmp1, tmp1, 16
}
{
mulhlsa_uu result1_b, hi0, lo1
inthh tmp1, tmp1, tmp0
}
{
mulhlsa_uu result1_a, hi1, lo0
slt_u tmp0, r0, tmp0_left_16
}
/* NOTE: this will stall for a cycle here. Oh well. */
{
add r1, tmp0, tmp1
add result1_a, result1_a, result1_b
}
{
add r1, r1, result1_a
jrp lr
}
.size __muldi3,.-__muldi3
|
4ms/metamodule-plugin-sdk
| 1,392
|
plugin-libc/libgcc/config/m68k/crtn.S
|
/* Specialized code needed to support construction and destruction of
file-scope objects in C++ and Java code, and to support exception handling.
Copyright (C) 1999-2022 Free Software Foundation, Inc.
Contributed by Charles-Antoine Gauthier (charles.gauthier@iit.nrc.ca).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
* This file supplies function epilogues for the .init and .fini sections.
* It is linked in after all other files.
*/
.ident "GNU C crtn.o"
.section .init
unlk %fp
rts
.section .fini
unlk %fp
rts
|
4ms/metamodule-plugin-sdk
| 1,486
|
plugin-libc/libgcc/config/m68k/crti.S
|
/* Specialized code needed to support construction and destruction of
file-scope objects in C++ and Java code, and to support exception handling.
Copyright (C) 1999-2022 Free Software Foundation, Inc.
Contributed by Charles-Antoine Gauthier (charles.gauthier@iit.nrc.ca).
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
* This file just supplies function prologues for the .init and .fini
* sections. It is linked in before crtbegin.o.
*/
.ident "GNU C crti.o"
.section .init
.globl _init
.type _init,@function
_init:
linkw %fp,#0
.section .fini
.globl _fini
.type _fini,@function
_fini:
linkw %fp,#0
|
4ms/metamodule-plugin-sdk
| 101,881
|
plugin-libc/libgcc/config/m68k/lb1sf68.S
|
/* libgcc routines for 68000 w/o floating-point hardware.
Copyright (C) 1994-2022 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Use this one for any 680x0; assumes no floating point hardware.
The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__ _
#endif
#ifndef __REGISTER_PREFIX__
#define __REGISTER_PREFIX__
#endif
#ifndef __IMMEDIATE_PREFIX__
#define __IMMEDIATE_PREFIX__ #
#endif
/* ANSI concatenation macros. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
/* Note that X is a function. */
#ifdef __ELF__
#define FUNC(x) .type SYM(x),function
#else
/* The .proc pseudo-op is accepted, but ignored, by GAS. We could just
define this to the empty string for non-ELF systems, but defining it
to .proc means that the information is available to the assembler if
the need arises. */
#define FUNC(x) .proc
#endif
/* Use the right prefix for registers. */
#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
/* Use the right prefix for immediate values. */
#define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
#define d0 REG (d0)
#define d1 REG (d1)
#define d2 REG (d2)
#define d3 REG (d3)
#define d4 REG (d4)
#define d5 REG (d5)
#define d6 REG (d6)
#define d7 REG (d7)
#define a0 REG (a0)
#define a1 REG (a1)
#define a2 REG (a2)
#define a3 REG (a3)
#define a4 REG (a4)
#define a5 REG (a5)
#define a6 REG (a6)
#define fp REG (fp)
#define sp REG (sp)
#define pc REG (pc)
/* Provide a few macros to allow for PIC code support.
* With PIC, data is stored A5 relative so we've got to take a bit of special
* care to ensure that all loads of global data is via A5. PIC also requires
* jumps and subroutine calls to be PC relative rather than absolute. We cheat
* a little on this and in the PIC case, we use short offset branches and
* hope that the final object code is within range (which it should be).
*/
#ifndef __PIC__
/* Non PIC (absolute/relocatable) versions */
.macro PICCALL addr
jbsr \addr
.endm
.macro PICJUMP addr
jmp \addr
.endm
.macro PICLEA sym, reg
lea \sym, \reg
.endm
.macro PICPEA sym, areg
pea \sym
.endm
#else /* __PIC__ */
# if defined (__uClinux__)
/* Versions for uClinux */
# if defined(__ID_SHARED_LIBRARY__)
/* -mid-shared-library versions */
.macro PICLEA sym, reg
movel a5@(_current_shared_library_a5_offset_), \reg
movel \sym@GOT(\reg), \reg
.endm
.macro PICPEA sym, areg
movel a5@(_current_shared_library_a5_offset_), \areg
movel \sym@GOT(\areg), sp@-
.endm
.macro PICCALL addr
PICLEA \addr,a0
jsr a0@
.endm
.macro PICJUMP addr
PICLEA \addr,a0
jmp a0@
.endm
# else /* !__ID_SHARED_LIBRARY__ */
/* Versions for -msep-data */
.macro PICLEA sym, reg
movel \sym@GOT(a5), \reg
.endm
.macro PICPEA sym, areg
movel \sym@GOT(a5), sp@-
.endm
.macro PICCALL addr
#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
lea \addr-.-8,a0
jsr pc@(a0)
#else
jbsr \addr
#endif
.endm
.macro PICJUMP addr
/* ISA C has no bra.l instruction, and since this assembly file
gets assembled into multiple object files, we avoid the
bra instruction entirely. */
#if defined (__mcoldfire__) && !defined (__mcfisab__)
lea \addr-.-8,a0
jmp pc@(a0)
#else
bra \addr
#endif
.endm
# endif
# else /* !__uClinux__ */
/* Versions for Linux */
.macro PICLEA sym, reg
movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \reg
lea (-6, pc, \reg), \reg
movel \sym@GOT(\reg), \reg
.endm
.macro PICPEA sym, areg
movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \areg
lea (-6, pc, \areg), \areg
movel \sym@GOT(\areg), sp@-
.endm
.macro PICCALL addr
#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
lea \addr-.-8,a0
jsr pc@(a0)
#else
jbsr \addr
#endif
.endm
.macro PICJUMP addr
/* ISA C has no bra.l instruction, and since this assembly file
gets assembled into multiple object files, we avoid the
bra instruction entirely. */
#if defined (__mcoldfire__) && !defined (__mcfisab__)
lea \addr-.-8,a0
jmp pc@(a0)
#else
bra \addr
#endif
.endm
# endif
#endif /* __PIC__ */
#ifdef L_floatex
| This is an attempt at a decent floating point (single, double and
| extended double) code for the GNU C compiler. It should be easy to
| adapt to other compilers (but beware of the local labels!).
| Starting date: 21 October, 1990
| It is convenient to introduce the notation (s,e,f) for a floating point
| number, where s=sign, e=exponent, f=fraction. We will call a floating
| point number fpn to abbreviate, independently of the precision.
| Let MAX_EXP be in each case the maximum exponent (255 for floats, 1023
| for doubles and 16383 for long doubles). We then have the following
| different cases:
| 1. Normalized fpns have 0 < e < MAX_EXP. They correspond to
| (-1)^s x 1.f x 2^(e-bias-1).
| 2. Denormalized fpns have e=0. They correspond to numbers of the form
| (-1)^s x 0.f x 2^(-bias).
| 3. +/-INFINITY have e=MAX_EXP, f=0.
| 4. Quiet NaN (Not a Number) have all bits set.
| 5. Signaling NaN (Not a Number) have s=0, e=MAX_EXP, f=1.
|=============================================================================
| exceptions
|=============================================================================
| This is the floating point condition code register (_fpCCR):
|
| struct {
| short _exception_bits;
| short _trap_enable_bits;
| short _sticky_bits;
| short _rounding_mode;
| short _format;
| short _last_operation;
| union {
| float sf;
| double df;
| } _operand1;
| union {
| float sf;
| double df;
| } _operand2;
| } _fpCCR;
.data
.even
.globl SYM (_fpCCR)
SYM (_fpCCR):
__exception_bits:
.word 0
__trap_enable_bits:
.word 0
__sticky_bits:
.word 0
__rounding_mode:
.word ROUND_TO_NEAREST
__format:
.word NIL
__last_operation:
.word NOOP
__operand1:
.long 0
.long 0
__operand2:
.long 0
.long 0
| Offsets:
EBITS = __exception_bits - SYM (_fpCCR)
TRAPE = __trap_enable_bits - SYM (_fpCCR)
STICK = __sticky_bits - SYM (_fpCCR)
ROUND = __rounding_mode - SYM (_fpCCR)
FORMT = __format - SYM (_fpCCR)
LASTO = __last_operation - SYM (_fpCCR)
OPER1 = __operand1 - SYM (_fpCCR)
OPER2 = __operand2 - SYM (_fpCCR)
| The following exception types are supported:
INEXACT_RESULT = 0x0001
UNDERFLOW = 0x0002
OVERFLOW = 0x0004
DIVIDE_BY_ZERO = 0x0008
INVALID_OPERATION = 0x0010
| The allowed rounding modes are:
UNKNOWN = -1
ROUND_TO_NEAREST = 0 | round result to nearest representable value
ROUND_TO_ZERO = 1 | round result towards zero
ROUND_TO_PLUS = 2 | round result towards plus infinity
ROUND_TO_MINUS = 3 | round result towards minus infinity
| The allowed values of format are:
NIL = 0
SINGLE_FLOAT = 1
DOUBLE_FLOAT = 2
LONG_FLOAT = 3
| The allowed values for the last operation are:
NOOP = 0
ADD = 1
MULTIPLY = 2
DIVIDE = 3
NEGATE = 4
COMPARE = 5
EXTENDSFDF = 6
TRUNCDFSF = 7
|=============================================================================
| __clear_sticky_bits
|=============================================================================
| The sticky bits are normally not cleared (thus the name), whereas the
| exception type and exception value reflect the last computation.
| This routine is provided to clear them (you can also write to _fpCCR,
| since it is globally visible).
.globl SYM (__clear_sticky_bit)
.text
.even
| void __clear_sticky_bits(void);
SYM (__clear_sticky_bit):
PICLEA SYM (_fpCCR),a0
#ifndef __mcoldfire__
movew IMM (0),a0@(STICK)
#else
clr.w a0@(STICK)
#endif
rts
|=============================================================================
| $_exception_handler
|=============================================================================
.globl $_exception_handler
.text
.even
| This is the common exit point if an exception occurs.
| NOTE: it is NOT callable from C!
| It expects the exception type in d7, the format (SINGLE_FLOAT,
| DOUBLE_FLOAT or LONG_FLOAT) in d6, and the last operation code in d5.
| It sets the corresponding exception and sticky bits, and the format.
| Depending on the format if fills the corresponding slots for the
| operands which produced the exception (all this information is provided
| so if you write your own exception handlers you have enough information
| to deal with the problem).
| Then checks to see if the corresponding exception is trap-enabled,
| in which case it pushes the address of _fpCCR and traps through
| trap FPTRAP (15 for the moment).
FPTRAP = 15
$_exception_handler:
PICLEA SYM (_fpCCR),a0
movew d7,a0@(EBITS) | set __exception_bits
#ifndef __mcoldfire__
orw d7,a0@(STICK) | and __sticky_bits
#else
movew a0@(STICK),d4
orl d7,d4
movew d4,a0@(STICK)
#endif
movew d6,a0@(FORMT) | and __format
movew d5,a0@(LASTO) | and __last_operation
| Now put the operands in place:
#ifndef __mcoldfire__
cmpw IMM (SINGLE_FLOAT),d6
#else
cmpl IMM (SINGLE_FLOAT),d6
#endif
beq 1f
movel a6@(8),a0@(OPER1)
movel a6@(12),a0@(OPER1+4)
movel a6@(16),a0@(OPER2)
movel a6@(20),a0@(OPER2+4)
bra 2f
1: movel a6@(8),a0@(OPER1)
movel a6@(12),a0@(OPER2)
2:
| And check whether the exception is trap-enabled:
#ifndef __mcoldfire__
andw a0@(TRAPE),d7 | is exception trap-enabled?
#else
clrl d6
movew a0@(TRAPE),d6
andl d6,d7
#endif
beq 1f | no, exit
PICPEA SYM (_fpCCR),a1 | yes, push address of _fpCCR
trap IMM (FPTRAP) | and trap
#ifndef __mcoldfire__
1: moveml sp@+,d2-d7 | restore data registers
#else
1: moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
#endif /* L_floatex */
#ifdef L_mulsi3
.text
FUNC(__mulsi3)
.globl SYM (__mulsi3)
.globl SYM (__mulsi3_internal)
.hidden SYM (__mulsi3_internal)
SYM (__mulsi3):
SYM (__mulsi3_internal):
movew sp@(4), d0 /* x0 -> d0 */
muluw sp@(10), d0 /* x0*y1 */
movew sp@(6), d1 /* x1 -> d1 */
muluw sp@(8), d1 /* x1*y0 */
#ifndef __mcoldfire__
addw d1, d0
#else
addl d1, d0
#endif
swap d0
clrw d0
movew sp@(6), d1 /* x1 -> d1 */
muluw sp@(10), d1 /* x1*y1 */
addl d1, d0
rts
#endif /* L_mulsi3 */
#ifdef L_udivsi3
.text
FUNC(__udivsi3)
.globl SYM (__udivsi3)
.globl SYM (__udivsi3_internal)
.hidden SYM (__udivsi3_internal)
SYM (__udivsi3):
SYM (__udivsi3_internal):
#ifndef __mcoldfire__
movel d2, sp@-
movel sp@(12), d1 /* d1 = divisor */
movel sp@(8), d0 /* d0 = dividend */
cmpl IMM (0x10000), d1 /* divisor >= 2 ^ 16 ? */
jcc L3 /* then try next algorithm */
movel d0, d2
clrw d2
swap d2
divu d1, d2 /* high quotient in lower word */
movew d2, d0 /* save high quotient */
swap d0
movew sp@(10), d2 /* get low dividend + high rest */
divu d1, d2 /* low quotient */
movew d2, d0
jra L6
L3: movel d1, d2 /* use d2 as divisor backup */
L4: lsrl IMM (1), d1 /* shift divisor */
lsrl IMM (1), d0 /* shift dividend */
cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */
jcc L4
divu d1, d0 /* now we have 16-bit divisor */
andl IMM (0xffff), d0 /* mask out divisor, ignore remainder */
/* Multiply the 16-bit tentative quotient with the 32-bit divisor. Because of
the operand ranges, this might give a 33-bit product. If this product is
greater than the dividend, the tentative quotient was too large. */
movel d2, d1
mulu d0, d1 /* low part, 32 bits */
swap d2
mulu d0, d2 /* high part, at most 17 bits */
swap d2 /* align high part with low part */
tstw d2 /* high part 17 bits? */
jne L5 /* if 17 bits, quotient was too large */
addl d2, d1 /* add parts */
jcs L5 /* if sum is 33 bits, quotient was too large */
cmpl sp@(8), d1 /* compare the sum with the dividend */
jls L6 /* if sum > dividend, quotient was too large */
L5: subql IMM (1), d0 /* adjust quotient */
L6: movel sp@+, d2
rts
#else /* __mcoldfire__ */
/* ColdFire implementation of non-restoring division algorithm from
Hennessy & Patterson, Appendix A. */
link a6,IMM (-12)
moveml d2-d4,sp@
movel a6@(8),d0
movel a6@(12),d1
clrl d2 | clear p
moveq IMM (31),d4
L1: addl d0,d0 | shift reg pair (p,a) one bit left
addxl d2,d2
movl d2,d3 | subtract b from p, store in tmp.
subl d1,d3
jcs L2 | if no carry,
bset IMM (0),d0 | set the low order bit of a to 1,
movl d3,d2 | and store tmp in p.
L2: subql IMM (1),d4
jcc L1
moveml sp@,d2-d4 | restore data registers
unlk a6 | and return
rts
#endif /* __mcoldfire__ */
#endif /* L_udivsi3 */
#ifdef L_divsi3
.text
FUNC(__divsi3)
.globl SYM (__divsi3)
.globl SYM (__divsi3_internal)
.hidden SYM (__divsi3_internal)
SYM (__divsi3):
SYM (__divsi3_internal):
movel d2, sp@-
moveq IMM (1), d2 /* sign of result stored in d2 (=1 or =-1) */
movel sp@(12), d1 /* d1 = divisor */
jpl L1
negl d1
#ifndef __mcoldfire__
negb d2 /* change sign because divisor <0 */
#else
negl d2 /* change sign because divisor <0 */
#endif
L1: movel sp@(8), d0 /* d0 = dividend */
jpl L2
negl d0
#ifndef __mcoldfire__
negb d2
#else
negl d2
#endif
L2: movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__udivsi3_internal) /* divide abs(dividend) by abs(divisor) */
addql IMM (8), sp
tstb d2
jpl L3
negl d0
L3: movel sp@+, d2
rts
#endif /* L_divsi3 */
#ifdef L_umodsi3
.text
FUNC(__umodsi3)
.globl SYM (__umodsi3)
SYM (__umodsi3):
movel sp@(8), d1 /* d1 = divisor */
movel sp@(4), d0 /* d0 = dividend */
movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__udivsi3_internal)
addql IMM (8), sp
movel sp@(8), d1 /* d1 = divisor */
#ifndef __mcoldfire__
movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__mulsi3_internal) /* d0 = (a/b)*b */
addql IMM (8), sp
#else
mulsl d1,d0
#endif
movel sp@(4), d1 /* d1 = dividend */
subl d0, d1 /* d1 = a - (a/b)*b */
movel d1, d0
rts
#endif /* L_umodsi3 */
#ifdef L_modsi3
.text
FUNC(__modsi3)
.globl SYM (__modsi3)
SYM (__modsi3):
movel sp@(8), d1 /* d1 = divisor */
movel sp@(4), d0 /* d0 = dividend */
movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__divsi3_internal)
addql IMM (8), sp
movel sp@(8), d1 /* d1 = divisor */
#ifndef __mcoldfire__
movel d1, sp@-
movel d0, sp@-
PICCALL SYM (__mulsi3_internal) /* d0 = (a/b)*b */
addql IMM (8), sp
#else
mulsl d1,d0
#endif
movel sp@(4), d1 /* d1 = dividend */
subl d0, d1 /* d1 = a - (a/b)*b */
movel d1, d0
rts
#endif /* L_modsi3 */
#ifdef L_double
.globl SYM (_fpCCR)
.globl $_exception_handler
QUIET_NaN = 0xffffffff
D_MAX_EXP = 0x07ff
D_BIAS = 1022
DBL_MAX_EXP = D_MAX_EXP - D_BIAS
DBL_MIN_EXP = 1 - D_BIAS
DBL_MANT_DIG = 53
INEXACT_RESULT = 0x0001
UNDERFLOW = 0x0002
OVERFLOW = 0x0004
DIVIDE_BY_ZERO = 0x0008
INVALID_OPERATION = 0x0010
DOUBLE_FLOAT = 2
NOOP = 0
ADD = 1
MULTIPLY = 2
DIVIDE = 3
NEGATE = 4
COMPARE = 5
EXTENDSFDF = 6
TRUNCDFSF = 7
UNKNOWN = -1
ROUND_TO_NEAREST = 0 | round result to nearest representable value
ROUND_TO_ZERO = 1 | round result towards zero
ROUND_TO_PLUS = 2 | round result towards plus infinity
ROUND_TO_MINUS = 3 | round result towards minus infinity
| Entry points:
.globl SYM (__adddf3)
.globl SYM (__subdf3)
.globl SYM (__muldf3)
.globl SYM (__divdf3)
.globl SYM (__negdf2)
.globl SYM (__cmpdf2)
.globl SYM (__cmpdf2_internal)
.hidden SYM (__cmpdf2_internal)
.text
.even
| These are common routines to return and signal exceptions.
Ld$den:
| Return and signal a denormalized number
orl d7,d0
movew IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
Ld$infty:
Ld$overflow:
| Return a properly signed INFINITY and set the exception flags
movel IMM (0x7ff00000),d0
movel IMM (0),d1
orl d7,d0
movew IMM (INEXACT_RESULT+OVERFLOW),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
Ld$underflow:
| Return 0 and set the exception flags
movel IMM (0),d0
movel d0,d1
movew IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
Ld$inop:
| Return a quiet NaN and set the exception flags
movel IMM (QUIET_NaN),d0
movel d0,d1
movew IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
Ld$div$0:
| Return a properly signed INFINITY and set the exception flags
movel IMM (0x7ff00000),d0
movel IMM (0),d1
orl d7,d0
movew IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
|=============================================================================
|=============================================================================
| double precision routines
|=============================================================================
|=============================================================================
| A double precision floating point number (double) has the format:
|
| struct _double {
| unsigned int sign : 1; /* sign bit */
| unsigned int exponent : 11; /* exponent, shifted by 126 */
| unsigned int fraction : 52; /* fraction */
| } double;
|
| Thus sizeof(double) = 8 (64 bits).
|
| All the routines are callable from C programs, and return the result
| in the register pair d0-d1. They also preserve all registers except
| d0-d1 and a0-a1.
|=============================================================================
| __subdf3
|=============================================================================
| double __subdf3(double, double);
FUNC(__subdf3)
SYM (__subdf3):
bchg IMM (31),sp@(12) | change sign of second operand
| and fall through, so we always add
|=============================================================================
| __adddf3
|=============================================================================
| double __adddf3(double, double);
FUNC(__adddf3)
SYM (__adddf3):
#ifndef __mcoldfire__
link a6,IMM (0) | everything will be done in registers
moveml d2-d7,sp@- | save all data registers and a2 (but d0-d1)
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get first operand
movel a6@(12),d1 |
movel a6@(16),d2 | get second operand
movel a6@(20),d3 |
movel d0,d7 | get d0's sign bit in d7 '
addl d1,d1 | check and clear sign bit of a, and gain one
addxl d0,d0 | bit of extra precision
beq Ladddf$b | if zero return second operand
movel d2,d6 | save sign in d6
addl d3,d3 | get rid of sign bit and gain one bit of
addxl d2,d2 | extra precision
beq Ladddf$a | if zero return first operand
andl IMM (0x80000000),d7 | isolate a's sign bit '
swap d6 | and also b's sign bit '
#ifndef __mcoldfire__
andw IMM (0x8000),d6 |
orw d6,d7 | and combine them into d7, so that a's sign '
| bit is in the high word and b's is in the '
| low word, so d6 is free to be used
#else
andl IMM (0x8000),d6
orl d6,d7
#endif
movel d7,a0 | now save d7 into a0, so d7 is free to
| be used also
| Get the exponents and check for denormalized and/or infinity.
movel IMM (0x001fffff),d6 | mask for the fraction
movel IMM (0x00200000),d7 | mask to put hidden bit back
movel d0,d4 |
andl d6,d0 | get fraction in d0
notl d6 | make d6 into mask for the exponent
andl d6,d4 | get exponent in d4
beq Ladddf$a$den | branch if a is denormalized
cmpl d6,d4 | check for INFINITY or NaN
beq Ladddf$nf |
orl d7,d0 | and put hidden bit back
Ladddf$1:
swap d4 | shift right exponent so that it starts
#ifndef __mcoldfire__
lsrw IMM (5),d4 | in bit 0 and not bit 20
#else
lsrl IMM (5),d4 | in bit 0 and not bit 20
#endif
| Now we have a's exponent in d4 and fraction in d0-d1 '
movel d2,d5 | save b to get exponent
andl d6,d5 | get exponent in d5
beq Ladddf$b$den | branch if b is denormalized
cmpl d6,d5 | check for INFINITY or NaN
beq Ladddf$nf
notl d6 | make d6 into mask for the fraction again
andl d6,d2 | and get fraction in d2
orl d7,d2 | and put hidden bit back
Ladddf$2:
swap d5 | shift right exponent so that it starts
#ifndef __mcoldfire__
lsrw IMM (5),d5 | in bit 0 and not bit 20
#else
lsrl IMM (5),d5 | in bit 0 and not bit 20
#endif
| Now we have b's exponent in d5 and fraction in d2-d3. '
| The situation now is as follows: the signs are combined in a0, the
| numbers are in d0-d1 (a) and d2-d3 (b), and the exponents in d4 (a)
| and d5 (b). To do the rounding correctly we need to keep all the
| bits until the end, so we need to use d0-d1-d2-d3 for the first number
| and d4-d5-d6-d7 for the second. To do this we store (temporarily) the
| exponents in a2-a3.
#ifndef __mcoldfire__
moveml a2-a3,sp@- | save the address registers
#else
movel a2,sp@-
movel a3,sp@-
movel a4,sp@-
#endif
movel d4,a2 | save the exponents
movel d5,a3 |
movel IMM (0),d7 | and move the numbers around
movel d7,d6 |
movel d3,d5 |
movel d2,d4 |
movel d7,d3 |
movel d7,d2 |
| Here we shift the numbers until the exponents are the same, and put
| the largest exponent in a2.
#ifndef __mcoldfire__
exg d4,a2 | get exponents back
exg d5,a3 |
cmpw d4,d5 | compare the exponents
#else
movel d4,a4 | get exponents back
movel a2,d4
movel a4,a2
movel d5,a4
movel a3,d5
movel a4,a3
cmpl d4,d5 | compare the exponents
#endif
beq Ladddf$3 | if equal don't shift '
bhi 9f | branch if second exponent is higher
| Here we have a's exponent larger than b's, so we have to shift b. We do
| this by using as counter d2:
1: movew d4,d2 | move largest exponent to d2
#ifndef __mcoldfire__
subw d5,d2 | and subtract second exponent
exg d4,a2 | get back the longs we saved
exg d5,a3 |
#else
subl d5,d2 | and subtract second exponent
movel d4,a4 | get back the longs we saved
movel a2,d4
movel a4,a2
movel d5,a4
movel a3,d5
movel a4,a3
#endif
| if difference is too large we don't shift (actually, we can just exit) '
#ifndef __mcoldfire__
cmpw IMM (DBL_MANT_DIG+2),d2
#else
cmpl IMM (DBL_MANT_DIG+2),d2
#endif
bge Ladddf$b$small
#ifndef __mcoldfire__
cmpw IMM (32),d2 | if difference >= 32, shift by longs
#else
cmpl IMM (32),d2 | if difference >= 32, shift by longs
#endif
bge 5f
2:
#ifndef __mcoldfire__
cmpw IMM (16),d2 | if difference >= 16, shift by words
#else
cmpl IMM (16),d2 | if difference >= 16, shift by words
#endif
bge 6f
bra 3f | enter dbra loop
4:
#ifndef __mcoldfire__
lsrl IMM (1),d4
roxrl IMM (1),d5
roxrl IMM (1),d6
roxrl IMM (1),d7
#else
lsrl IMM (1),d7
btst IMM (0),d6
beq 10f
bset IMM (31),d7
10: lsrl IMM (1),d6
btst IMM (0),d5
beq 11f
bset IMM (31),d6
11: lsrl IMM (1),d5
btst IMM (0),d4
beq 12f
bset IMM (31),d5
12: lsrl IMM (1),d4
#endif
3:
#ifndef __mcoldfire__
dbra d2,4b
#else
subql IMM (1),d2
bpl 4b
#endif
movel IMM (0),d2
movel d2,d3
bra Ladddf$4
5:
movel d6,d7
movel d5,d6
movel d4,d5
movel IMM (0),d4
#ifndef __mcoldfire__
subw IMM (32),d2
#else
subl IMM (32),d2
#endif
bra 2b
6:
movew d6,d7
swap d7
movew d5,d6
swap d6
movew d4,d5
swap d5
movew IMM (0),d4
swap d4
#ifndef __mcoldfire__
subw IMM (16),d2
#else
subl IMM (16),d2
#endif
bra 3b
9:
#ifndef __mcoldfire__
exg d4,d5
movew d4,d6
subw d5,d6 | keep d5 (largest exponent) in d4
exg d4,a2
exg d5,a3
#else
movel d5,d6
movel d4,d5
movel d6,d4
subl d5,d6
movel d4,a4
movel a2,d4
movel a4,a2
movel d5,a4
movel a3,d5
movel a4,a3
#endif
| if difference is too large we don't shift (actually, we can just exit) '
#ifndef __mcoldfire__
cmpw IMM (DBL_MANT_DIG+2),d6
#else
cmpl IMM (DBL_MANT_DIG+2),d6
#endif
bge Ladddf$a$small
#ifndef __mcoldfire__
cmpw IMM (32),d6 | if difference >= 32, shift by longs
#else
cmpl IMM (32),d6 | if difference >= 32, shift by longs
#endif
bge 5f
2:
#ifndef __mcoldfire__
cmpw IMM (16),d6 | if difference >= 16, shift by words
#else
cmpl IMM (16),d6 | if difference >= 16, shift by words
#endif
bge 6f
bra 3f | enter dbra loop
4:
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
#endif
3:
#ifndef __mcoldfire__
dbra d6,4b
#else
subql IMM (1),d6
bpl 4b
#endif
movel IMM (0),d7
movel d7,d6
bra Ladddf$4
5:
movel d2,d3
movel d1,d2
movel d0,d1
movel IMM (0),d0
#ifndef __mcoldfire__
subw IMM (32),d6
#else
subl IMM (32),d6
#endif
bra 2b
6:
movew d2,d3
swap d3
movew d1,d2
swap d2
movew d0,d1
swap d1
movew IMM (0),d0
swap d0
#ifndef __mcoldfire__
subw IMM (16),d6
#else
subl IMM (16),d6
#endif
bra 3b
Ladddf$3:
#ifndef __mcoldfire__
exg d4,a2
exg d5,a3
#else
movel d4,a4
movel a2,d4
movel a4,a2
movel d5,a4
movel a3,d5
movel a4,a3
#endif
Ladddf$4:
| Now we have the numbers in d0--d3 and d4--d7, the exponent in a2, and
| the signs in a4.
| Here we have to decide whether to add or subtract the numbers:
#ifndef __mcoldfire__
exg d7,a0 | get the signs
exg d6,a3 | a3 is free to be used
#else
movel d7,a4
movel a0,d7
movel a4,a0
movel d6,a4
movel a3,d6
movel a4,a3
#endif
movel d7,d6 |
movew IMM (0),d7 | get a's sign in d7 '
swap d6 |
movew IMM (0),d6 | and b's sign in d6 '
eorl d7,d6 | compare the signs
bmi Lsubdf$0 | if the signs are different we have
| to subtract
#ifndef __mcoldfire__
exg d7,a0 | else we add the numbers
exg d6,a3 |
#else
movel d7,a4
movel a0,d7
movel a4,a0
movel d6,a4
movel a3,d6
movel a4,a3
#endif
addl d7,d3 |
addxl d6,d2 |
addxl d5,d1 |
addxl d4,d0 |
movel a2,d4 | return exponent to d4
movel a0,d7 |
andl IMM (0x80000000),d7 | d7 now has the sign
#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
| the case of denormalized numbers in the rounding routine itself).
| As in the addition (not in the subtraction!) we could have set
| one more bit we check this:
btst IMM (DBL_MANT_DIG+1),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
addw IMM (1),d4
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
addl IMM (1),d4
#endif
1:
lea pc@(Ladddf$5),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Ladddf$5:
| Put back the exponent and check for overflow
#ifndef __mcoldfire__
cmpw IMM (0x7ff),d4 | is the exponent big?
#else
cmpl IMM (0x7ff),d4 | is the exponent big?
#endif
bge 1f
bclr IMM (DBL_MANT_DIG-1),d0
#ifndef __mcoldfire__
lslw IMM (4),d4 | put exponent back into position
#else
lsll IMM (4),d4 | put exponent back into position
#endif
swap d0 |
#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
#endif
swap d0 |
bra Ladddf$ret
1:
moveq IMM (ADD),d5
bra Ld$overflow
Lsubdf$0:
| Here we do the subtraction.
#ifndef __mcoldfire__
exg d7,a0 | put sign back in a0
exg d6,a3 |
#else
movel d7,a4
movel a0,d7
movel a4,a0
movel d6,a4
movel a3,d6
movel a4,a3
#endif
subl d7,d3 |
subxl d6,d2 |
subxl d5,d1 |
subxl d4,d0 |
beq Ladddf$ret$1 | if zero just exit
bpl 1f | if positive skip the following
movel a0,d7 |
bchg IMM (31),d7 | change sign bit in d7
movel d7,a0 |
negl d3 |
negxl d2 |
negxl d1 | and negate result
negxl d0 |
1:
movel a2,d4 | return exponent to d4
movel a0,d7
andl IMM (0x80000000),d7 | isolate sign bit
#ifndef __mcoldfire__
moveml sp@+,a2-a3 |
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
| Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
| the case of denormalized numbers in the rounding routine itself).
| As in the addition (not in the subtraction!) we could have set
| one more bit we check this:
btst IMM (DBL_MANT_DIG+1),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
addw IMM (1),d4
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
addl IMM (1),d4
#endif
1:
lea pc@(Lsubdf$1),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Lsubdf$1:
| Put back the exponent and sign (we don't have overflow). '
bclr IMM (DBL_MANT_DIG-1),d0
#ifndef __mcoldfire__
lslw IMM (4),d4 | put exponent back into position
#else
lsll IMM (4),d4 | put exponent back into position
#endif
swap d0 |
#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
#endif
swap d0 |
bra Ladddf$ret
| If one of the numbers was too small (difference of exponents >=
| DBL_MANT_DIG+1) we return the other (and now we don't have to '
| check for finiteness or zero).
Ladddf$a$small:
#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
movel a6@(16),d0
movel a6@(20),d1
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
Ladddf$b$small:
#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
movel a6@(8),d0
movel a6@(12),d1
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
Ladddf$a$den:
movel d7,d4 | d7 contains 0x00200000
bra Ladddf$1
Ladddf$b$den:
movel d7,d5 | d7 contains 0x00200000
notl d6
bra Ladddf$2
Ladddf$b:
| Return b (if a is zero)
movel d2,d0
movel d3,d1
bne 1f | Check if b is -0
cmpl IMM (0x80000000),d0
bne 1f
andl IMM (0x80000000),d7 | Use the sign of a
clrl d0
bra Ladddf$ret
Ladddf$a:
movel a6@(8),d0
movel a6@(12),d1
1:
moveq IMM (ADD),d5
| Check for NaN and +/-INFINITY.
movel d0,d7 |
andl IMM (0x80000000),d7 |
bclr IMM (31),d0 |
cmpl IMM (0x7ff00000),d0 |
bge 2f |
movel d0,d0 | check for zero, since we don't '
bne Ladddf$ret | want to return -0 by mistake
bclr IMM (31),d7 |
bra Ladddf$ret |
2:
andl IMM (0x000fffff),d0 | check for NaN (nonzero fraction)
orl d1,d0 |
bne Ld$inop |
bra Ld$infty |
Ladddf$ret$1:
#ifndef __mcoldfire__
moveml sp@+,a2-a3 | restore regs and exit
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
Ladddf$ret:
| Normal exit.
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
orl d7,d0 | put sign bit back
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Ladddf$ret$den:
| Return a denormalized number.
#ifndef __mcoldfire__
lsrl IMM (1),d0 | shift right once more
roxrl IMM (1),d1 |
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
#endif
bra Ladddf$ret
Ladddf$nf:
moveq IMM (ADD),d5
| This could be faster but it is not worth the effort, since it is not
| executed very often. We sacrifice speed for clarity here.
movel a6@(8),d0 | get the numbers back (remember that we
movel a6@(12),d1 | did some processing already)
movel a6@(16),d2 |
movel a6@(20),d3 |
movel IMM (0x7ff00000),d4 | useful constant (INFINITY)
movel d0,d7 | save sign bits
movel d2,d6 |
bclr IMM (31),d0 | clear sign bits
bclr IMM (31),d2 |
| We know that one of them is either NaN of +/-INFINITY
| Check for NaN (if either one is NaN return NaN)
cmpl d4,d0 | check first a (d0)
bhi Ld$inop | if d0 > 0x7ff00000 or equal and
bne 2f
tstl d1 | d1 > 0, a is NaN
bne Ld$inop |
2: cmpl d4,d2 | check now b (d1)
bhi Ld$inop |
bne 3f
tstl d3 |
bne Ld$inop |
3:
| Now comes the check for +/-INFINITY. We know that both are (maybe not
| finite) numbers, but we have to check if both are infinite whether we
| are adding or subtracting them.
eorl d7,d6 | to check sign bits
bmi 1f
andl IMM (0x80000000),d7 | get (common) sign bit
bra Ld$infty
1:
| We know one (or both) are infinite, so we test for equality between the
| two numbers (if they are equal they have to be infinite both, so we
| return NaN).
cmpl d2,d0 | are both infinite?
bne 1f | if d0 <> d2 they are not equal
cmpl d3,d1 | if d0 == d2 test d3 and d1
beq Ld$inop | if equal return NaN
1:
andl IMM (0x80000000),d7 | get a's sign bit '
cmpl d4,d0 | test now for infinity
beq Ld$infty | if a is INFINITY return with this sign
bchg IMM (31),d7 | else we know b is INFINITY and has
bra Ld$infty | the opposite sign
|=============================================================================
| __muldf3
|=============================================================================
| double __muldf3(double, double);
FUNC(__muldf3)
SYM (__muldf3):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get a into d0-d1
movel a6@(12),d1 |
movel a6@(16),d2 | and b into d2-d3
movel a6@(20),d3 |
movel d0,d7 | d7 will hold the sign of the product
eorl d2,d7 |
andl IMM (0x80000000),d7 |
movel d7,a0 | save sign bit into a0
movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
movel d7,d6 | another (mask for fraction)
notl d6 |
bclr IMM (31),d0 | get rid of a's sign bit '
movel d0,d4 |
orl d1,d4 |
beq Lmuldf$a$0 | branch if a is zero
movel d0,d4 |
bclr IMM (31),d2 | get rid of b's sign bit '
movel d2,d5 |
orl d3,d5 |
beq Lmuldf$b$0 | branch if b is zero
movel d2,d5 |
cmpl d7,d0 | is a big?
bhi Lmuldf$inop | if a is NaN return NaN
beq Lmuldf$a$nf | we still have to check d1 and b ...
cmpl d7,d2 | now compare b with INFINITY
bhi Lmuldf$inop | is b NaN?
beq Lmuldf$b$nf | we still have to check d3 ...
| Here we have both numbers finite and nonzero (and with no sign bit).
| Now we get the exponents into d4 and d5.
andl d7,d4 | isolate exponent in d4
beq Lmuldf$a$den | if exponent zero, have denormalized
andl d6,d0 | isolate fraction
orl IMM (0x00100000),d0 | and put hidden bit back
swap d4 | I like exponents in the first byte
#ifndef __mcoldfire__
lsrw IMM (4),d4 |
#else
lsrl IMM (4),d4 |
#endif
Lmuldf$1:
andl d7,d5 |
beq Lmuldf$b$den |
andl d6,d2 |
orl IMM (0x00100000),d2 | and put hidden bit back
swap d5 |
#ifndef __mcoldfire__
lsrw IMM (4),d5 |
#else
lsrl IMM (4),d5 |
#endif
Lmuldf$2: |
#ifndef __mcoldfire__
addw d5,d4 | add exponents
subw IMM (D_BIAS+1),d4 | and subtract bias (plus one)
#else
addl d5,d4 | add exponents
subl IMM (D_BIAS+1),d4 | and subtract bias (plus one)
#endif
| We are now ready to do the multiplication. The situation is as follows:
| both a and b have bit 52 ( bit 20 of d0 and d2) set (even if they were
| denormalized to start with!), which means that in the product bit 104
| (which will correspond to bit 8 of the fourth long) is set.
| Here we have to do the product.
| To do it we have to juggle the registers back and forth, as there are not
| enough to keep everything in them. So we use the address registers to keep
| some intermediate data.
#ifndef __mcoldfire__
moveml a2-a3,sp@- | save a2 and a3 for temporary use
#else
movel a2,sp@-
movel a3,sp@-
movel a4,sp@-
#endif
movel IMM (0),a2 | a2 is a null register
movel d4,a3 | and a3 will preserve the exponent
| First, shift d2-d3 so bit 20 becomes bit 31:
#ifndef __mcoldfire__
rorl IMM (5),d2 | rotate d2 5 places right
swap d2 | and swap it
rorl IMM (5),d3 | do the same thing with d3
swap d3 |
movew d3,d6 | get the rightmost 11 bits of d3
andw IMM (0x07ff),d6 |
orw d6,d2 | and put them into d2
andw IMM (0xf800),d3 | clear those bits in d3
#else
moveq IMM (11),d7 | left shift d2 11 bits
lsll d7,d2
movel d3,d6 | get a copy of d3
lsll d7,d3 | left shift d3 11 bits
andl IMM (0xffe00000),d6 | get the top 11 bits of d3
moveq IMM (21),d7 | right shift them 21 bits
lsrl d7,d6
orl d6,d2 | stick them at the end of d2
#endif
movel d2,d6 | move b into d6-d7
movel d3,d7 | move a into d4-d5
movel d0,d4 | and clear d0-d1-d2-d3 (to put result)
movel d1,d5 |
movel IMM (0),d3 |
movel d3,d2 |
movel d3,d1 |
movel d3,d0 |
| We use a1 as counter:
movel IMM (DBL_MANT_DIG-1),a1
#ifndef __mcoldfire__
exg d7,a1
#else
movel d7,a4
movel a1,d7
movel a4,a1
#endif
1:
#ifndef __mcoldfire__
exg d7,a1 | put counter back in a1
#else
movel d7,a4
movel a1,d7
movel a4,a1
#endif
addl d3,d3 | shift sum once left
addxl d2,d2 |
addxl d1,d1 |
addxl d0,d0 |
addl d7,d7 |
addxl d6,d6 |
bcc 2f | if bit clear skip the following
#ifndef __mcoldfire__
exg d7,a2 |
#else
movel d7,a4
movel a2,d7
movel a4,a2
#endif
addl d5,d3 | else add a to the sum
addxl d4,d2 |
addxl d7,d1 |
addxl d7,d0 |
#ifndef __mcoldfire__
exg d7,a2 |
#else
movel d7,a4
movel a2,d7
movel a4,a2
#endif
2:
#ifndef __mcoldfire__
exg d7,a1 | put counter in d7
dbf d7,1b | decrement and branch
#else
movel d7,a4
movel a1,d7
movel a4,a1
subql IMM (1),d7
bpl 1b
#endif
movel a3,d4 | restore exponent
#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
movel sp@+,a3
movel sp@+,a2
#endif
| Now we have the product in d0-d1-d2-d3, with bit 8 of d0 set. The
| first thing to do now is to normalize it so bit 8 becomes bit
| DBL_MANT_DIG-32 (to do the rounding); later we will shift right.
swap d0
swap d1
movew d1,d0
swap d2
movew d2,d1
swap d3
movew d3,d2
movew IMM (0),d3
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
#else
moveq IMM (29),d6
lsrl IMM (3),d3
movel d2,d7
lsll d6,d7
orl d7,d3
lsrl IMM (3),d2
movel d1,d7
lsll d6,d7
orl d7,d2
lsrl IMM (3),d1
movel d0,d7
lsll d6,d7
orl d7,d1
lsrl IMM (3),d0
#endif
| Now round, check for over- and underflow, and exit.
movel a0,d7 | get sign bit back into d7
moveq IMM (MULTIPLY),d5
btst IMM (DBL_MANT_DIG+1-32),d0
beq Lround$exit
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d4
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
addl IMM (1),d4
#endif
bra Lround$exit
Lmuldf$inop:
moveq IMM (MULTIPLY),d5
bra Ld$inop
Lmuldf$b$nf:
moveq IMM (MULTIPLY),d5
movel a0,d7 | get sign bit back into d7
tstl d3 | we know d2 == 0x7ff00000, so check d3
bne Ld$inop | if d3 <> 0 b is NaN
bra Ld$overflow | else we have overflow (since a is finite)
Lmuldf$a$nf:
moveq IMM (MULTIPLY),d5
movel a0,d7 | get sign bit back into d7
tstl d1 | we know d0 == 0x7ff00000, so check d1
bne Ld$inop | if d1 <> 0 a is NaN
bra Ld$overflow | else signal overflow
| If either number is zero return zero, unless the other is +/-INFINITY or
| NaN, in which case we return NaN.
Lmuldf$b$0:
moveq IMM (MULTIPLY),d5
#ifndef __mcoldfire__
exg d2,d0 | put b (==0) into d0-d1
exg d3,d1 | and a (with sign bit cleared) into d2-d3
movel a0,d0 | set result sign
#else
movel d0,d2 | put a into d2-d3
movel d1,d3
movel a0,d0 | put result zero into d0-d1
movq IMM(0),d1
#endif
bra 1f
Lmuldf$a$0:
movel a0,d0 | set result sign
movel a6@(16),d2 | put b into d2-d3 again
movel a6@(20),d3 |
bclr IMM (31),d2 | clear sign bit
1: cmpl IMM (0x7ff00000),d2 | check for non-finiteness
bge Ld$inop | in case NaN or +/-INFINITY return NaN
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
| If a number is denormalized we put an exponent of 1 but do not put the
| hidden bit back into the fraction; instead we shift left until bit 21
| (the hidden bit) is set, adjusting the exponent accordingly. We do this
| to ensure that the product of the fractions is close to 1.
Lmuldf$a$den:
movel IMM (1),d4
andl d6,d0
1: addl d1,d1 | shift a left until bit 20 is set
addxl d0,d0 |
#ifndef __mcoldfire__
subw IMM (1),d4 | and adjust exponent
#else
subl IMM (1),d4 | and adjust exponent
#endif
btst IMM (20),d0 |
bne Lmuldf$1 |
bra 1b
Lmuldf$b$den:
movel IMM (1),d5
andl d6,d2
1: addl d3,d3 | shift b left until bit 20 is set
addxl d2,d2 |
#ifndef __mcoldfire__
subw IMM (1),d5 | and adjust exponent
#else
subql IMM (1),d5 | and adjust exponent
#endif
btst IMM (20),d2 |
bne Lmuldf$2 |
bra 1b
|=============================================================================
| __divdf3
|=============================================================================
| double __divdf3(double, double);
FUNC(__divdf3)
SYM (__divdf3):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get a into d0-d1
movel a6@(12),d1 |
movel a6@(16),d2 | and b into d2-d3
movel a6@(20),d3 |
movel d0,d7 | d7 will hold the sign of the result
eorl d2,d7 |
andl IMM (0x80000000),d7
movel d7,a0 | save sign into a0
movel IMM (0x7ff00000),d7 | useful constant (+INFINITY)
movel d7,d6 | another (mask for fraction)
notl d6 |
bclr IMM (31),d0 | get rid of a's sign bit '
movel d0,d4 |
orl d1,d4 |
beq Ldivdf$a$0 | branch if a is zero
movel d0,d4 |
bclr IMM (31),d2 | get rid of b's sign bit '
movel d2,d5 |
orl d3,d5 |
beq Ldivdf$b$0 | branch if b is zero
movel d2,d5
cmpl d7,d0 | is a big?
bhi Ldivdf$inop | if a is NaN return NaN
beq Ldivdf$a$nf | if d0 == 0x7ff00000 we check d1
cmpl d7,d2 | now compare b with INFINITY
bhi Ldivdf$inop | if b is NaN return NaN
beq Ldivdf$b$nf | if d2 == 0x7ff00000 we check d3
| Here we have both numbers finite and nonzero (and with no sign bit).
| Now we get the exponents into d4 and d5 and normalize the numbers to
| ensure that the ratio of the fractions is around 1. We do this by
| making sure that both numbers have bit #DBL_MANT_DIG-32-1 (hidden bit)
| set, even if they were denormalized to start with.
| Thus, the result will satisfy: 2 > result > 1/2.
andl d7,d4 | and isolate exponent in d4
beq Ldivdf$a$den | if exponent is zero we have a denormalized
andl d6,d0 | and isolate fraction
orl IMM (0x00100000),d0 | and put hidden bit back
swap d4 | I like exponents in the first byte
#ifndef __mcoldfire__
lsrw IMM (4),d4 |
#else
lsrl IMM (4),d4 |
#endif
Ldivdf$1: |
andl d7,d5 |
beq Ldivdf$b$den |
andl d6,d2 |
orl IMM (0x00100000),d2
swap d5 |
#ifndef __mcoldfire__
lsrw IMM (4),d5 |
#else
lsrl IMM (4),d5 |
#endif
Ldivdf$2: |
#ifndef __mcoldfire__
subw d5,d4 | subtract exponents
addw IMM (D_BIAS),d4 | and add bias
#else
subl d5,d4 | subtract exponents
addl IMM (D_BIAS),d4 | and add bias
#endif
| We are now ready to do the division. We have prepared things in such a way
| that the ratio of the fractions will be less than 2 but greater than 1/2.
| At this point the registers in use are:
| d0-d1 hold a (first operand, bit DBL_MANT_DIG-32=0, bit
| DBL_MANT_DIG-1-32=1)
| d2-d3 hold b (second operand, bit DBL_MANT_DIG-32=1)
| d4 holds the difference of the exponents, corrected by the bias
| a0 holds the sign of the ratio
| To do the rounding correctly we need to keep information about the
| nonsignificant bits. One way to do this would be to do the division
| using four registers; another is to use two registers (as originally
| I did), but use a sticky bit to preserve information about the
| fractional part. Note that we can keep that info in a1, which is not
| used.
movel IMM (0),d6 | d6-d7 will hold the result
movel d6,d7 |
movel IMM (0),a1 | and a1 will hold the sticky bit
movel IMM (DBL_MANT_DIG-32+1),d5
1: cmpl d0,d2 | is a < b?
bhi 3f | if b > a skip the following
beq 4f | if d0==d2 check d1 and d3
2: subl d3,d1 |
subxl d2,d0 | a <-- a - b
bset d5,d6 | set the corresponding bit in d6
3: addl d1,d1 | shift a by 1
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
bpl 1b
#endif
bra 5f
4: cmpl d1,d3 | here d0==d2, so check d1 and d3
bhi 3b | if d1 > d2 skip the subtraction
bra 2b | else go do it
5:
| Here we have to start setting the bits in the second long.
movel IMM (31),d5 | again d5 is counter
1: cmpl d0,d2 | is a < b?
bhi 3f | if b > a skip the following
beq 4f | if d0==d2 check d1 and d3
2: subl d3,d1 |
subxl d2,d0 | a <-- a - b
bset d5,d7 | set the corresponding bit in d7
3: addl d1,d1 | shift a by 1
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
bpl 1b
#endif
bra 5f
4: cmpl d1,d3 | here d0==d2, so check d1 and d3
bhi 3b | if d1 > d2 skip the subtraction
bra 2b | else go do it
5:
| Now go ahead checking until we hit a one, which we store in d2.
movel IMM (DBL_MANT_DIG),d5
1: cmpl d2,d0 | is a < b?
bhi 4f | if b < a, exit
beq 3f | if d0==d2 check d1 and d3
2: addl d1,d1 | shift a by 1
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
bpl 1b
#endif
movel IMM (0),d2 | here no sticky bit was found
movel d2,d3
bra 5f
3: cmpl d1,d3 | here d0==d2, so check d1 and d3
bhi 2b | if d1 > d2 go back
4:
| Here put the sticky bit in d2-d3 (in the position which actually corresponds
| to it; if you don't do this the algorithm loses in some cases). '
movel IMM (0),d2
movel d2,d3
#ifndef __mcoldfire__
subw IMM (DBL_MANT_DIG),d5
addw IMM (63),d5
cmpw IMM (31),d5
#else
subl IMM (DBL_MANT_DIG),d5
addl IMM (63),d5
cmpl IMM (31),d5
#endif
bhi 2f
1: bset d5,d3
bra 5f
#ifndef __mcoldfire__
subw IMM (32),d5
#else
subl IMM (32),d5
#endif
2: bset d5,d2
5:
| Finally we are finished! Move the longs in the address registers to
| their final destination:
movel d6,d0
movel d7,d1
movel IMM (0),d3
| Here we have finished the division, with the result in d0-d1-d2-d3, with
| 2^21 <= d6 < 2^23. Thus bit 23 is not set, but bit 22 could be set.
| If it is not, then definitely bit 21 is set. Normalize so bit 22 is
| not set:
btst IMM (DBL_MANT_DIG-32+1),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
roxrl IMM (1),d3
addw IMM (1),d4
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
addl IMM (1),d4
#endif
1:
| Now round, check for over- and underflow, and exit.
movel a0,d7 | restore sign bit to d7
moveq IMM (DIVIDE),d5
bra Lround$exit
Ldivdf$inop:
moveq IMM (DIVIDE),d5
bra Ld$inop
Ldivdf$a$0:
| If a is zero check to see whether b is zero also. In that case return
| NaN; then check if b is NaN, and return NaN also in that case. Else
| return a properly signed zero.
moveq IMM (DIVIDE),d5
bclr IMM (31),d2 |
movel d2,d4 |
orl d3,d4 |
beq Ld$inop | if b is also zero return NaN
cmpl IMM (0x7ff00000),d2 | check for NaN
bhi Ld$inop |
blt 1f |
tstl d3 |
bne Ld$inop |
1: movel a0,d0 | else return signed zero
moveq IMM(0),d1 |
PICLEA SYM (_fpCCR),a0 | clear exception flags
movew IMM (0),a0@ |
#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7 |
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 |
rts |
Ldivdf$b$0:
moveq IMM (DIVIDE),d5
| If we got here a is not zero. Check if a is NaN; in that case return NaN,
| else return +/-INFINITY. Remember that a is in d0 with the sign bit
| cleared already.
movel a0,d7 | put a's sign bit back in d7 '
cmpl IMM (0x7ff00000),d0 | compare d0 with INFINITY
bhi Ld$inop | if larger it is NaN
tstl d1 |
bne Ld$inop |
bra Ld$div$0 | else signal DIVIDE_BY_ZERO
Ldivdf$b$nf:
moveq IMM (DIVIDE),d5
| If d2 == 0x7ff00000 we have to check d3.
tstl d3 |
bne Ld$inop | if d3 <> 0, b is NaN
bra Ld$underflow | else b is +/-INFINITY, so signal underflow
Ldivdf$a$nf:
moveq IMM (DIVIDE),d5
| If d0 == 0x7ff00000 we have to check d1.
tstl d1 |
bne Ld$inop | if d1 <> 0, a is NaN
| If a is INFINITY we have to check b
cmpl d7,d2 | compare b with INFINITY
bge Ld$inop | if b is NaN or INFINITY return NaN
tstl d3 |
bne Ld$inop |
bra Ld$overflow | else return overflow
| If a number is denormalized we put an exponent of 1 but do not put the
| bit back into the fraction.
Ldivdf$a$den:
movel IMM (1),d4
andl d6,d0
1: addl d1,d1 | shift a left until bit 20 is set
addxl d0,d0
#ifndef __mcoldfire__
subw IMM (1),d4 | and adjust exponent
#else
subl IMM (1),d4 | and adjust exponent
#endif
btst IMM (DBL_MANT_DIG-32-1),d0
bne Ldivdf$1
bra 1b
Ldivdf$b$den:
movel IMM (1),d5
andl d6,d2
1: addl d3,d3 | shift b left until bit 20 is set
addxl d2,d2
#ifndef __mcoldfire__
subw IMM (1),d5 | and adjust exponent
#else
subql IMM (1),d5 | and adjust exponent
#endif
btst IMM (DBL_MANT_DIG-32-1),d2
bne Ldivdf$2
bra 1b
Lround$exit:
| This is a common exit point for __muldf3 and __divdf3. When they enter
| this point the sign of the result is in d7, the result in d0-d1, normalized
| so that 2^21 <= d0 < 2^22, and the exponent is in the lower byte of d4.
| First check for underlow in the exponent:
#ifndef __mcoldfire__
cmpw IMM (-DBL_MANT_DIG-1),d4
#else
cmpl IMM (-DBL_MANT_DIG-1),d4
#endif
blt Ld$underflow
| It could happen that the exponent is less than 1, in which case the
| number is denormalized. In this case we shift right and adjust the
| exponent until it becomes 1 or the fraction is zero (in the latter case
| we signal underflow and return zero).
movel d7,a0 |
movel IMM (0),d6 | use d6-d7 to collect bits flushed right
movel d6,d7 | use d6-d7 to collect bits flushed right
#ifndef __mcoldfire__
cmpw IMM (1),d4 | if the exponent is less than 1 we
#else
cmpl IMM (1),d4 | if the exponent is less than 1 we
#endif
bge 2f | have to shift right (denormalize)
1:
#ifndef __mcoldfire__
addw IMM (1),d4 | adjust the exponent
lsrl IMM (1),d0 | shift right once
roxrl IMM (1),d1 |
roxrl IMM (1),d2 |
roxrl IMM (1),d3 |
roxrl IMM (1),d6 |
roxrl IMM (1),d7 |
cmpw IMM (1),d4 | is the exponent 1 already?
#else
addl IMM (1),d4 | adjust the exponent
lsrl IMM (1),d7
btst IMM (0),d6
beq 13f
bset IMM (31),d7
13: lsrl IMM (1),d6
btst IMM (0),d3
beq 14f
bset IMM (31),d6
14: lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
btst IMM (0),d1
beq 11f
bset IMM (31),d2
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 12f
bset IMM (31),d1
12: lsrl IMM (1),d0
cmpl IMM (1),d4 | is the exponent 1 already?
#endif
beq 2f | if not loop back
bra 1b |
bra Ld$underflow | safety check, shouldn't execute '
2: orl d6,d2 | this is a trick so we don't lose '
orl d7,d3 | the bits which were flushed right
movel a0,d7 | get back sign bit into d7
| Now call the rounding routine (which takes care of denormalized numbers):
lea pc@(Lround$0),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Lround$0:
| Here we have a correctly rounded result (either normalized or denormalized).
| Here we should have either a normalized number or a denormalized one, and
| the exponent is necessarily larger or equal to 1 (so we don't have to '
| check again for underflow!). We have to check for overflow or for a
| denormalized number (which also signals underflow).
| Check for overflow (i.e., exponent >= 0x7ff).
#ifndef __mcoldfire__
cmpw IMM (0x07ff),d4
#else
cmpl IMM (0x07ff),d4
#endif
bge Ld$overflow
| Now check for a denormalized number (exponent==0):
movew d4,d4
beq Ld$den
1:
| Put back the exponents and sign and return.
#ifndef __mcoldfire__
lslw IMM (4),d4 | exponent back to fourth byte
#else
lsll IMM (4),d4 | exponent back to fourth byte
#endif
bclr IMM (DBL_MANT_DIG-32-1),d0
swap d0 | and put back exponent
#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
#endif
swap d0 |
orl d7,d0 | and sign also
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
|=============================================================================
| __negdf2
|=============================================================================
| double __negdf2(double, double);
FUNC(__negdf2)
SYM (__negdf2):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
moveq IMM (NEGATE),d5
movel a6@(8),d0 | get number to negate in d0-d1
movel a6@(12),d1 |
bchg IMM (31),d0 | negate
movel d0,d2 | make a positive copy (for the tests)
bclr IMM (31),d2 |
movel d2,d4 | check for zero
orl d1,d4 |
beq 2f | if zero (either sign) return +zero
cmpl IMM (0x7ff00000),d2 | compare to +INFINITY
blt 1f | if finite, return
bhi Ld$inop | if larger (fraction not zero) is NaN
tstl d1 | if d2 == 0x7ff00000 check d1
bne Ld$inop |
movel d0,d7 | else get sign and return INFINITY
andl IMM (0x80000000),d7
bra Ld$infty
1: PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
2: bclr IMM (31),d0
bra 1b
|=============================================================================
| __cmpdf2
|=============================================================================
GREATER = 1
LESS = -1
EQUAL = 0
| int __cmpdf2_internal(double, double, int);
SYM (__cmpdf2_internal):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@- | save registers
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
moveq IMM (COMPARE),d5
movel a6@(8),d0 | get first operand
movel a6@(12),d1 |
movel a6@(16),d2 | get second operand
movel a6@(20),d3 |
| First check if a and/or b are (+/-) zero and in that case clear
| the sign bit.
movel d0,d6 | copy signs into d6 (a) and d7(b)
bclr IMM (31),d0 | and clear signs in d0 and d2
movel d2,d7 |
bclr IMM (31),d2 |
cmpl IMM (0x7ff00000),d0 | check for a == NaN
bhi Lcmpd$inop | if d0 > 0x7ff00000, a is NaN
beq Lcmpdf$a$nf | if equal can be INFINITY, so check d1
movel d0,d4 | copy into d4 to test for zero
orl d1,d4 |
beq Lcmpdf$a$0 |
Lcmpdf$0:
cmpl IMM (0x7ff00000),d2 | check for b == NaN
bhi Lcmpd$inop | if d2 > 0x7ff00000, b is NaN
beq Lcmpdf$b$nf | if equal can be INFINITY, so check d3
movel d2,d4 |
orl d3,d4 |
beq Lcmpdf$b$0 |
Lcmpdf$1:
| Check the signs
eorl d6,d7
bpl 1f
| If the signs are not equal check if a >= 0
tstl d6
bpl Lcmpdf$a$gt$b | if (a >= 0 && b < 0) => a > b
bmi Lcmpdf$b$gt$a | if (a < 0 && b >= 0) => a < b
1:
| If the signs are equal check for < 0
tstl d6
bpl 1f
| If both are negative exchange them
#ifndef __mcoldfire__
exg d0,d2
exg d1,d3
#else
movel d0,d7
movel d2,d0
movel d7,d2
movel d1,d7
movel d3,d1
movel d7,d3
#endif
1:
| Now that they are positive we just compare them as longs (does this also
| work for denormalized numbers?).
cmpl d0,d2
bhi Lcmpdf$b$gt$a | |b| > |a|
bne Lcmpdf$a$gt$b | |b| < |a|
| If we got here d0 == d2, so we compare d1 and d3.
cmpl d1,d3
bhi Lcmpdf$b$gt$a | |b| > |a|
bne Lcmpdf$a$gt$b | |b| < |a|
| If we got here a == b.
movel IMM (EQUAL),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpdf$a$gt$b:
movel IMM (GREATER),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpdf$b$gt$a:
movel IMM (LESS),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpdf$a$0:
bclr IMM (31),d6
bra Lcmpdf$0
Lcmpdf$b$0:
bclr IMM (31),d7
bra Lcmpdf$1
Lcmpdf$a$nf:
tstl d1
bne Ld$inop
bra Lcmpdf$0
Lcmpdf$b$nf:
tstl d3
bne Ld$inop
bra Lcmpdf$1
Lcmpd$inop:
movl a6@(24),d0
moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (DOUBLE_FLOAT),d6
PICJUMP $_exception_handler
| int __cmpdf2(double, double);
FUNC(__cmpdf2)
SYM (__cmpdf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
|=============================================================================
| rounding routines
|=============================================================================
| The rounding routines expect the number to be normalized in registers
| d0-d1-d2-d3, with the exponent in register d4. They assume that the
| exponent is larger or equal to 1. They return a properly normalized number
| if possible, and a denormalized number otherwise. The exponent is returned
| in d4.
Lround$to$nearest:
| We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
| Here we assume that the exponent is not too small (this should be checked
| before entering the rounding routine), but the number could be denormalized.
| Check for denormalized numbers:
1: btst IMM (DBL_MANT_DIG-32),d0
bne 2f | if set the number is normalized
| Normalize shifting left until bit #DBL_MANT_DIG-32 is set or the exponent
| is one (remember that a denormalized number corresponds to an
| exponent of -D_BIAS+1).
#ifndef __mcoldfire__
cmpw IMM (1),d4 | remember that the exponent is at least one
#else
cmpl IMM (1),d4 | remember that the exponent is at least one
#endif
beq 2f | an exponent of one means denormalized
addl d3,d3 | else shift and adjust the exponent
addxl d2,d2 |
addxl d1,d1 |
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d4,1b |
#else
subql IMM (1), d4
bpl 1b
#endif
2:
| Now round: we do it as follows: after the shifting we can write the
| fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
| If delta < 1, do nothing. If delta > 1, add 1 to f.
| If delta == 1, we make sure the rounded number will be even (odd?)
| (after shifting).
btst IMM (0),d1 | is delta < 1?
beq 2f | if so, do not do anything
orl d2,d3 | is delta == 1?
bne 1f | if so round to even
movel d1,d3 |
andl IMM (2),d3 | bit 1 is the last significant bit
movel IMM (0),d2 |
addl d3,d1 |
addxl d2,d0 |
bra 2f |
1: movel IMM (1),d3 | else add 1
movel IMM (0),d2 |
addl d3,d1 |
addxl d2,d0
| Shift right once (because we used bit #DBL_MANT_DIG-32!).
2:
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
#endif
| Now check again bit #DBL_MANT_DIG-32 (rounding could have produced a
| 'fraction overflow' ...).
btst IMM (DBL_MANT_DIG-32),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d4
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
addl IMM (1),d4
#endif
1:
| If bit #DBL_MANT_DIG-32-1 is clear we have a denormalized number, so we
| have to put the exponent to zero and return a denormalized number.
btst IMM (DBL_MANT_DIG-32-1),d0
beq 1f
jmp a0@
1: movel IMM (0),d4
jmp a0@
Lround$to$zero:
Lround$to$plus:
Lround$to$minus:
jmp a0@
#endif /* L_double */
#ifdef L_float
.globl SYM (_fpCCR)
.globl $_exception_handler
QUIET_NaN = 0xffffffff
SIGNL_NaN = 0x7f800001
INFINITY = 0x7f800000
F_MAX_EXP = 0xff
F_BIAS = 126
FLT_MAX_EXP = F_MAX_EXP - F_BIAS
FLT_MIN_EXP = 1 - F_BIAS
FLT_MANT_DIG = 24
INEXACT_RESULT = 0x0001
UNDERFLOW = 0x0002
OVERFLOW = 0x0004
DIVIDE_BY_ZERO = 0x0008
INVALID_OPERATION = 0x0010
SINGLE_FLOAT = 1
NOOP = 0
ADD = 1
MULTIPLY = 2
DIVIDE = 3
NEGATE = 4
COMPARE = 5
EXTENDSFDF = 6
TRUNCDFSF = 7
UNKNOWN = -1
ROUND_TO_NEAREST = 0 | round result to nearest representable value
ROUND_TO_ZERO = 1 | round result towards zero
ROUND_TO_PLUS = 2 | round result towards plus infinity
ROUND_TO_MINUS = 3 | round result towards minus infinity
| Entry points:
.globl SYM (__addsf3)
.globl SYM (__subsf3)
.globl SYM (__mulsf3)
.globl SYM (__divsf3)
.globl SYM (__negsf2)
.globl SYM (__cmpsf2)
.globl SYM (__cmpsf2_internal)
.hidden SYM (__cmpsf2_internal)
| These are common routines to return and signal exceptions.
.text
.even
Lf$den:
| Return and signal a denormalized number
orl d7,d0
moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
Lf$infty:
Lf$overflow:
| Return a properly signed INFINITY and set the exception flags
movel IMM (INFINITY),d0
orl d7,d0
moveq IMM (INEXACT_RESULT+OVERFLOW),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
Lf$underflow:
| Return 0 and set the exception flags
moveq IMM (0),d0
moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
Lf$inop:
| Return a quiet NaN and set the exception flags
movel IMM (QUIET_NaN),d0
moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
Lf$div$0:
| Return a properly signed INFINITY and set the exception flags
movel IMM (INFINITY),d0
orl d7,d0
moveq IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
|=============================================================================
|=============================================================================
| single precision routines
|=============================================================================
|=============================================================================
| A single precision floating point number (float) has the format:
|
| struct _float {
| unsigned int sign : 1; /* sign bit */
| unsigned int exponent : 8; /* exponent, shifted by 126 */
| unsigned int fraction : 23; /* fraction */
| } float;
|
| Thus sizeof(float) = 4 (32 bits).
|
| All the routines are callable from C programs, and return the result
| in the single register d0. They also preserve all registers except
| d0-d1 and a0-a1.
|=============================================================================
| __subsf3
|=============================================================================
| float __subsf3(float, float);
FUNC(__subsf3)
SYM (__subsf3):
bchg IMM (31),sp@(8) | change sign of second operand
| and fall through
|=============================================================================
| __addsf3
|=============================================================================
| float __addsf3(float, float);
FUNC(__addsf3)
SYM (__addsf3):
#ifndef __mcoldfire__
link a6,IMM (0) | everything will be done in registers
moveml d2-d7,sp@- | save all data registers but d0-d1
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get first operand
movel a6@(12),d1 | get second operand
movel d0,a0 | get d0's sign bit '
addl d0,d0 | check and clear sign bit of a
beq Laddsf$b | if zero return second operand
movel d1,a1 | save b's sign bit '
addl d1,d1 | get rid of sign bit
beq Laddsf$a | if zero return first operand
| Get the exponents and check for denormalized and/or infinity.
movel IMM (0x00ffffff),d4 | mask to get fraction
movel IMM (0x01000000),d5 | mask to put hidden bit back
movel d0,d6 | save a to get exponent
andl d4,d0 | get fraction in d0
notl d4 | make d4 into a mask for the exponent
andl d4,d6 | get exponent in d6
beq Laddsf$a$den | branch if a is denormalized
cmpl d4,d6 | check for INFINITY or NaN
beq Laddsf$nf
swap d6 | put exponent into first word
orl d5,d0 | and put hidden bit back
Laddsf$1:
| Now we have a's exponent in d6 (second byte) and the mantissa in d0. '
movel d1,d7 | get exponent in d7
andl d4,d7 |
beq Laddsf$b$den | branch if b is denormalized
cmpl d4,d7 | check for INFINITY or NaN
beq Laddsf$nf
swap d7 | put exponent into first word
notl d4 | make d4 into a mask for the fraction
andl d4,d1 | get fraction in d1
orl d5,d1 | and put hidden bit back
Laddsf$2:
| Now we have b's exponent in d7 (second byte) and the mantissa in d1. '
| Note that the hidden bit corresponds to bit #FLT_MANT_DIG-1, and we
| shifted right once, so bit #FLT_MANT_DIG is set (so we have one extra
| bit).
movel d1,d2 | move b to d2, since we want to use
| two registers to do the sum
movel IMM (0),d1 | and clear the new ones
movel d1,d3 |
| Here we shift the numbers in registers d0 and d1 so the exponents are the
| same, and put the largest exponent in d6. Note that we are using two
| registers for each number (see the discussion by D. Knuth in "Seminumerical
| Algorithms").
#ifndef __mcoldfire__
cmpw d6,d7 | compare exponents
#else
cmpl d6,d7 | compare exponents
#endif
beq Laddsf$3 | if equal don't shift '
bhi 5f | branch if second exponent largest
1:
subl d6,d7 | keep the largest exponent
negl d7
#ifndef __mcoldfire__
lsrw IMM (8),d7 | put difference in lower byte
#else
lsrl IMM (8),d7 | put difference in lower byte
#endif
| if difference is too large we don't shift (actually, we can just exit) '
#ifndef __mcoldfire__
cmpw IMM (FLT_MANT_DIG+2),d7
#else
cmpl IMM (FLT_MANT_DIG+2),d7
#endif
bge Laddsf$b$small
#ifndef __mcoldfire__
cmpw IMM (16),d7 | if difference >= 16 swap
#else
cmpl IMM (16),d7 | if difference >= 16 swap
#endif
bge 4f
2:
#ifndef __mcoldfire__
subw IMM (1),d7
#else
subql IMM (1), d7
#endif
3:
#ifndef __mcoldfire__
lsrl IMM (1),d2 | shift right second operand
roxrl IMM (1),d3
dbra d7,3b
#else
lsrl IMM (1),d3
btst IMM (0),d2
beq 10f
bset IMM (31),d3
10: lsrl IMM (1),d2
subql IMM (1), d7
bpl 3b
#endif
bra Laddsf$3
4:
movew d2,d3
swap d3
movew d3,d2
swap d2
#ifndef __mcoldfire__
subw IMM (16),d7
#else
subl IMM (16),d7
#endif
bne 2b | if still more bits, go back to normal case
bra Laddsf$3
5:
#ifndef __mcoldfire__
exg d6,d7 | exchange the exponents
#else
eorl d6,d7
eorl d7,d6
eorl d6,d7
#endif
subl d6,d7 | keep the largest exponent
negl d7 |
#ifndef __mcoldfire__
lsrw IMM (8),d7 | put difference in lower byte
#else
lsrl IMM (8),d7 | put difference in lower byte
#endif
| if difference is too large we don't shift (and exit!) '
#ifndef __mcoldfire__
cmpw IMM (FLT_MANT_DIG+2),d7
#else
cmpl IMM (FLT_MANT_DIG+2),d7
#endif
bge Laddsf$a$small
#ifndef __mcoldfire__
cmpw IMM (16),d7 | if difference >= 16 swap
#else
cmpl IMM (16),d7 | if difference >= 16 swap
#endif
bge 8f
6:
#ifndef __mcoldfire__
subw IMM (1),d7
#else
subl IMM (1),d7
#endif
7:
#ifndef __mcoldfire__
lsrl IMM (1),d0 | shift right first operand
roxrl IMM (1),d1
dbra d7,7b
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
subql IMM (1),d7
bpl 7b
#endif
bra Laddsf$3
8:
movew d0,d1
swap d1
movew d1,d0
swap d0
#ifndef __mcoldfire__
subw IMM (16),d7
#else
subl IMM (16),d7
#endif
bne 6b | if still more bits, go back to normal case
| otherwise we fall through
| Now we have a in d0-d1, b in d2-d3, and the largest exponent in d6 (the
| signs are stored in a0 and a1).
Laddsf$3:
| Here we have to decide whether to add or subtract the numbers
#ifndef __mcoldfire__
exg d6,a0 | get signs back
exg d7,a1 | and save the exponents
#else
movel d6,d4
movel a0,d6
movel d4,a0
movel d7,d4
movel a1,d7
movel d4,a1
#endif
eorl d6,d7 | combine sign bits
bmi Lsubsf$0 | if negative a and b have opposite
| sign so we actually subtract the
| numbers
| Here we have both positive or both negative
#ifndef __mcoldfire__
exg d6,a0 | now we have the exponent in d6
#else
movel d6,d4
movel a0,d6
movel d4,a0
#endif
movel a0,d7 | and sign in d7
andl IMM (0x80000000),d7
| Here we do the addition.
addl d3,d1
addxl d2,d0
| Note: now we have d2, d3, d4 and d5 to play with!
| Put the exponent, in the first byte, in d2, to use the "standard" rounding
| routines:
movel d6,d2
#ifndef __mcoldfire__
lsrw IMM (8),d2
#else
lsrl IMM (8),d2
#endif
| Before rounding normalize so bit #FLT_MANT_DIG is set (we will consider
| the case of denormalized numbers in the rounding routine itself).
| As in the addition (not in the subtraction!) we could have set
| one more bit we check this:
btst IMM (FLT_MANT_DIG+1),d0
beq 1f
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
#endif
addl IMM (1),d2
1:
lea pc@(Laddsf$4),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Laddsf$4:
| Put back the exponent, but check for overflow.
#ifndef __mcoldfire__
cmpw IMM (0xff),d2
#else
cmpl IMM (0xff),d2
#endif
bhi 1f
bclr IMM (FLT_MANT_DIG-1),d0
#ifndef __mcoldfire__
lslw IMM (7),d2
#else
lsll IMM (7),d2
#endif
swap d2
orl d2,d0
bra Laddsf$ret
1:
moveq IMM (ADD),d5
bra Lf$overflow
Lsubsf$0:
| We are here if a > 0 and b < 0 (sign bits cleared).
| Here we do the subtraction.
movel d6,d7 | put sign in d7
andl IMM (0x80000000),d7
subl d3,d1 | result in d0-d1
subxl d2,d0 |
beq Laddsf$ret | if zero just exit
bpl 1f | if positive skip the following
bchg IMM (31),d7 | change sign bit in d7
negl d1
negxl d0
1:
#ifndef __mcoldfire__
exg d2,a0 | now we have the exponent in d2
lsrw IMM (8),d2 | put it in the first byte
#else
movel d2,d4
movel a0,d2
movel d4,a0
lsrl IMM (8),d2 | put it in the first byte
#endif
| Now d0-d1 is positive and the sign bit is in d7.
| Note that we do not have to normalize, since in the subtraction bit
| #FLT_MANT_DIG+1 is never set, and denormalized numbers are handled by
| the rounding routines themselves.
lea pc@(Lsubsf$1),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Lsubsf$1:
| Put back the exponent (we can't have overflow!). '
bclr IMM (FLT_MANT_DIG-1),d0
#ifndef __mcoldfire__
lslw IMM (7),d2
#else
lsll IMM (7),d2
#endif
swap d2
orl d2,d0
bra Laddsf$ret
| If one of the numbers was too small (difference of exponents >=
| FLT_MANT_DIG+2) we return the other (and now we don't have to '
| check for finiteness or zero).
Laddsf$a$small:
movel a6@(12),d0
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
Laddsf$b$small:
movel a6@(8),d0
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
| If the numbers are denormalized remember to put exponent equal to 1.
Laddsf$a$den:
movel d5,d6 | d5 contains 0x01000000
swap d6
bra Laddsf$1
Laddsf$b$den:
movel d5,d7
swap d7
notl d4 | make d4 into a mask for the fraction
| (this was not executed after the jump)
bra Laddsf$2
| The rest is mainly code for the different results which can be
| returned (checking always for +/-INFINITY and NaN).
Laddsf$b:
| Return b (if a is zero).
movel a6@(12),d0
cmpl IMM (0x80000000),d0 | Check if b is -0
bne 1f
movel a0,d7
andl IMM (0x80000000),d7 | Use the sign of a
clrl d0
bra Laddsf$ret
Laddsf$a:
| Return a (if b is zero).
movel a6@(8),d0
1:
moveq IMM (ADD),d5
| We have to check for NaN and +/-infty.
movel d0,d7
andl IMM (0x80000000),d7 | put sign in d7
bclr IMM (31),d0 | clear sign
cmpl IMM (INFINITY),d0 | check for infty or NaN
bge 2f
movel d0,d0 | check for zero (we do this because we don't '
bne Laddsf$ret | want to return -0 by mistake
bclr IMM (31),d7 | if zero be sure to clear sign
bra Laddsf$ret | if everything OK just return
2:
| The value to be returned is either +/-infty or NaN
andl IMM (0x007fffff),d0 | check for NaN
bne Lf$inop | if mantissa not zero is NaN
bra Lf$infty
Laddsf$ret:
| Normal exit (a and b nonzero, result is not NaN nor +/-infty).
| We have to clear the exception flags (just the exception type).
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
orl d7,d0 | put sign bit
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 | and return
rts
Laddsf$ret$den:
| Return a denormalized number (for addition we don't signal underflow) '
lsrl IMM (1),d0 | remember to shift right back once
bra Laddsf$ret | and return
| Note: when adding two floats of the same sign if either one is
| NaN we return NaN without regard to whether the other is finite or
| not. When subtracting them (i.e., when adding two numbers of
| opposite signs) things are more complicated: if both are INFINITY
| we return NaN, if only one is INFINITY and the other is NaN we return
| NaN, but if it is finite we return INFINITY with the corresponding sign.
Laddsf$nf:
moveq IMM (ADD),d5
| This could be faster but it is not worth the effort, since it is not
| executed very often. We sacrifice speed for clarity here.
movel a6@(8),d0 | get the numbers back (remember that we
movel a6@(12),d1 | did some processing already)
movel IMM (INFINITY),d4 | useful constant (INFINITY)
movel d0,d2 | save sign bits
movel d0,d7 | into d7 as well as we may need the sign
| bit before jumping to LfSinfty
movel d1,d3
bclr IMM (31),d0 | clear sign bits
bclr IMM (31),d1
| We know that one of them is either NaN of +/-INFINITY
| Check for NaN (if either one is NaN return NaN)
cmpl d4,d0 | check first a (d0)
bhi Lf$inop
cmpl d4,d1 | check now b (d1)
bhi Lf$inop
| Now comes the check for +/-INFINITY. We know that both are (maybe not
| finite) numbers, but we have to check if both are infinite whether we
| are adding or subtracting them.
eorl d3,d2 | to check sign bits
bmi 1f
andl IMM (0x80000000),d7 | get (common) sign bit
bra Lf$infty
1:
| We know one (or both) are infinite, so we test for equality between the
| two numbers (if they are equal they have to be infinite both, so we
| return NaN).
cmpl d1,d0 | are both infinite?
beq Lf$inop | if so return NaN
andl IMM (0x80000000),d7 | get a's sign bit '
cmpl d4,d0 | test now for infinity
beq Lf$infty | if a is INFINITY return with this sign
bchg IMM (31),d7 | else we know b is INFINITY and has
bra Lf$infty | the opposite sign
|=============================================================================
| __mulsf3
|=============================================================================
| float __mulsf3(float, float);
FUNC(__mulsf3)
SYM (__mulsf3):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get a into d0
movel a6@(12),d1 | and b into d1
movel d0,d7 | d7 will hold the sign of the product
eorl d1,d7 |
andl IMM (0x80000000),d7
movel IMM (INFINITY),d6 | useful constant (+INFINITY)
movel d6,d5 | another (mask for fraction)
notl d5 |
movel IMM (0x00800000),d4 | this is to put hidden bit back
bclr IMM (31),d0 | get rid of a's sign bit '
movel d0,d2 |
beq Lmulsf$a$0 | branch if a is zero
bclr IMM (31),d1 | get rid of b's sign bit '
movel d1,d3 |
beq Lmulsf$b$0 | branch if b is zero
cmpl d6,d0 | is a big?
bhi Lmulsf$inop | if a is NaN return NaN
beq Lmulsf$inf | if a is INFINITY we have to check b
cmpl d6,d1 | now compare b with INFINITY
bhi Lmulsf$inop | is b NaN?
beq Lmulsf$overflow | is b INFINITY?
| Here we have both numbers finite and nonzero (and with no sign bit).
| Now we get the exponents into d2 and d3.
andl d6,d2 | and isolate exponent in d2
beq Lmulsf$a$den | if exponent is zero we have a denormalized
andl d5,d0 | and isolate fraction
orl d4,d0 | and put hidden bit back
swap d2 | I like exponents in the first byte
#ifndef __mcoldfire__
lsrw IMM (7),d2 |
#else
lsrl IMM (7),d2 |
#endif
Lmulsf$1: | number
andl d6,d3 |
beq Lmulsf$b$den |
andl d5,d1 |
orl d4,d1 |
swap d3 |
#ifndef __mcoldfire__
lsrw IMM (7),d3 |
#else
lsrl IMM (7),d3 |
#endif
Lmulsf$2: |
#ifndef __mcoldfire__
addw d3,d2 | add exponents
subw IMM (F_BIAS+1),d2 | and subtract bias (plus one)
#else
addl d3,d2 | add exponents
subl IMM (F_BIAS+1),d2 | and subtract bias (plus one)
#endif
| We are now ready to do the multiplication. The situation is as follows:
| both a and b have bit FLT_MANT_DIG-1 set (even if they were
| denormalized to start with!), which means that in the product
| bit 2*(FLT_MANT_DIG-1) (that is, bit 2*FLT_MANT_DIG-2-32 of the
| high long) is set.
| To do the multiplication let us move the number a little bit around ...
movel d1,d6 | second operand in d6
movel d0,d5 | first operand in d4-d5
movel IMM (0),d4
movel d4,d1 | the sums will go in d0-d1
movel d4,d0
| now bit FLT_MANT_DIG-1 becomes bit 31:
lsll IMM (31-FLT_MANT_DIG+1),d6
| Start the loop (we loop #FLT_MANT_DIG times):
moveq IMM (FLT_MANT_DIG-1),d3
1: addl d1,d1 | shift sum
addxl d0,d0
lsll IMM (1),d6 | get bit bn
bcc 2f | if not set skip sum
addl d5,d1 | add a
addxl d4,d0
2:
#ifndef __mcoldfire__
dbf d3,1b | loop back
#else
subql IMM (1),d3
bpl 1b
#endif
| Now we have the product in d0-d1, with bit (FLT_MANT_DIG - 1) + FLT_MANT_DIG
| (mod 32) of d0 set. The first thing to do now is to normalize it so bit
| FLT_MANT_DIG is set (to do the rounding).
#ifndef __mcoldfire__
rorl IMM (6),d1
swap d1
movew d1,d3
andw IMM (0x03ff),d3
andw IMM (0xfd00),d1
#else
movel d1,d3
lsll IMM (8),d1
addl d1,d1
addl d1,d1
moveq IMM (22),d5
lsrl d5,d3
orl d3,d1
andl IMM (0xfffffd00),d1
#endif
lsll IMM (8),d0
addl d0,d0
addl d0,d0
#ifndef __mcoldfire__
orw d3,d0
#else
orl d3,d0
#endif
moveq IMM (MULTIPLY),d5
btst IMM (FLT_MANT_DIG+1),d0
beq Lround$exit
#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d2
#else
lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
addql IMM (1),d2
#endif
bra Lround$exit
Lmulsf$inop:
moveq IMM (MULTIPLY),d5
bra Lf$inop
Lmulsf$overflow:
moveq IMM (MULTIPLY),d5
bra Lf$overflow
Lmulsf$inf:
moveq IMM (MULTIPLY),d5
| If either is NaN return NaN; else both are (maybe infinite) numbers, so
| return INFINITY with the correct sign (which is in d7).
cmpl d6,d1 | is b NaN?
bhi Lf$inop | if so return NaN
bra Lf$overflow | else return +/-INFINITY
| If either number is zero return zero, unless the other is +/-INFINITY,
| or NaN, in which case we return NaN.
Lmulsf$b$0:
| Here d1 (==b) is zero.
movel a6@(8),d1 | get a again to check for non-finiteness
bra 1f
Lmulsf$a$0:
movel a6@(12),d1 | get b again to check for non-finiteness
1: bclr IMM (31),d1 | clear sign bit
cmpl IMM (INFINITY),d1 | and check for a large exponent
bge Lf$inop | if b is +/-INFINITY or NaN return NaN
movel d7,d0 | else return signed zero
PICLEA SYM (_fpCCR),a0 |
movew IMM (0),a0@ |
#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 |
rts |
| If a number is denormalized we put an exponent of 1 but do not put the
| hidden bit back into the fraction; instead we shift left until bit 23
| (the hidden bit) is set, adjusting the exponent accordingly. We do this
| to ensure that the product of the fractions is close to 1.
Lmulsf$a$den:
movel IMM (1),d2
andl d5,d0
1: addl d0,d0 | shift a left (until bit 23 is set)
#ifndef __mcoldfire__
subw IMM (1),d2 | and adjust exponent
#else
subql IMM (1),d2 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d0
bne Lmulsf$1 |
bra 1b | else loop back
Lmulsf$b$den:
movel IMM (1),d3
andl d5,d1
1: addl d1,d1 | shift b left until bit 23 is set
#ifndef __mcoldfire__
subw IMM (1),d3 | and adjust exponent
#else
subql IMM (1),d3 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d1
bne Lmulsf$2 |
bra 1b | else loop back
|=============================================================================
| __divsf3
|=============================================================================
| float __divsf3(float, float);
FUNC(__divsf3)
SYM (__divsf3):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
movel a6@(8),d0 | get a into d0
movel a6@(12),d1 | and b into d1
movel d0,d7 | d7 will hold the sign of the result
eorl d1,d7 |
andl IMM (0x80000000),d7 |
movel IMM (INFINITY),d6 | useful constant (+INFINITY)
movel d6,d5 | another (mask for fraction)
notl d5 |
movel IMM (0x00800000),d4 | this is to put hidden bit back
bclr IMM (31),d0 | get rid of a's sign bit '
movel d0,d2 |
beq Ldivsf$a$0 | branch if a is zero
bclr IMM (31),d1 | get rid of b's sign bit '
movel d1,d3 |
beq Ldivsf$b$0 | branch if b is zero
cmpl d6,d0 | is a big?
bhi Ldivsf$inop | if a is NaN return NaN
beq Ldivsf$inf | if a is INFINITY we have to check b
cmpl d6,d1 | now compare b with INFINITY
bhi Ldivsf$inop | if b is NaN return NaN
beq Ldivsf$underflow
| Here we have both numbers finite and nonzero (and with no sign bit).
| Now we get the exponents into d2 and d3 and normalize the numbers to
| ensure that the ratio of the fractions is close to 1. We do this by
| making sure that bit #FLT_MANT_DIG-1 (hidden bit) is set.
andl d6,d2 | and isolate exponent in d2
beq Ldivsf$a$den | if exponent is zero we have a denormalized
andl d5,d0 | and isolate fraction
orl d4,d0 | and put hidden bit back
swap d2 | I like exponents in the first byte
#ifndef __mcoldfire__
lsrw IMM (7),d2 |
#else
lsrl IMM (7),d2 |
#endif
Ldivsf$1: |
andl d6,d3 |
beq Ldivsf$b$den |
andl d5,d1 |
orl d4,d1 |
swap d3 |
#ifndef __mcoldfire__
lsrw IMM (7),d3 |
#else
lsrl IMM (7),d3 |
#endif
Ldivsf$2: |
#ifndef __mcoldfire__
subw d3,d2 | subtract exponents
addw IMM (F_BIAS),d2 | and add bias
#else
subl d3,d2 | subtract exponents
addl IMM (F_BIAS),d2 | and add bias
#endif
| We are now ready to do the division. We have prepared things in such a way
| that the ratio of the fractions will be less than 2 but greater than 1/2.
| At this point the registers in use are:
| d0 holds a (first operand, bit FLT_MANT_DIG=0, bit FLT_MANT_DIG-1=1)
| d1 holds b (second operand, bit FLT_MANT_DIG=1)
| d2 holds the difference of the exponents, corrected by the bias
| d7 holds the sign of the ratio
| d4, d5, d6 hold some constants
movel d7,a0 | d6-d7 will hold the ratio of the fractions
movel IMM (0),d6 |
movel d6,d7
moveq IMM (FLT_MANT_DIG+1),d3
1: cmpl d0,d1 | is a < b?
bhi 2f |
bset d3,d6 | set a bit in d6
subl d1,d0 | if a >= b a <-- a-b
beq 3f | if a is zero, exit
2: addl d0,d0 | multiply a by 2
#ifndef __mcoldfire__
dbra d3,1b
#else
subql IMM (1),d3
bpl 1b
#endif
| Now we keep going to set the sticky bit ...
moveq IMM (FLT_MANT_DIG),d3
1: cmpl d0,d1
ble 2f
addl d0,d0
#ifndef __mcoldfire__
dbra d3,1b
#else
subql IMM(1),d3
bpl 1b
#endif
movel IMM (0),d1
bra 3f
2: movel IMM (0),d1
#ifndef __mcoldfire__
subw IMM (FLT_MANT_DIG),d3
addw IMM (31),d3
#else
subl IMM (FLT_MANT_DIG),d3
addl IMM (31),d3
#endif
bset d3,d1
3:
movel d6,d0 | put the ratio in d0-d1
movel a0,d7 | get sign back
| Because of the normalization we did before we are guaranteed that
| d0 is smaller than 2^26 but larger than 2^24. Thus bit 26 is not set,
| bit 25 could be set, and if it is not set then bit 24 is necessarily set.
btst IMM (FLT_MANT_DIG+1),d0
beq 1f | if it is not set, then bit 24 is set
lsrl IMM (1),d0 |
#ifndef __mcoldfire__
addw IMM (1),d2 |
#else
addl IMM (1),d2 |
#endif
1:
| Now round, check for over- and underflow, and exit.
moveq IMM (DIVIDE),d5
bra Lround$exit
Ldivsf$inop:
moveq IMM (DIVIDE),d5
bra Lf$inop
Ldivsf$overflow:
moveq IMM (DIVIDE),d5
bra Lf$overflow
Ldivsf$underflow:
moveq IMM (DIVIDE),d5
bra Lf$underflow
Ldivsf$a$0:
moveq IMM (DIVIDE),d5
| If a is zero check to see whether b is zero also. In that case return
| NaN; then check if b is NaN, and return NaN also in that case. Else
| return a properly signed zero.
andl IMM (0x7fffffff),d1 | clear sign bit and test b
beq Lf$inop | if b is also zero return NaN
cmpl IMM (INFINITY),d1 | check for NaN
bhi Lf$inop |
movel d7,d0 | else return signed zero
PICLEA SYM (_fpCCR),a0 |
movew IMM (0),a0@ |
#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7 |
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6 |
rts |
Ldivsf$b$0:
moveq IMM (DIVIDE),d5
| If we got here a is not zero. Check if a is NaN; in that case return NaN,
| else return +/-INFINITY. Remember that a is in d0 with the sign bit
| cleared already.
cmpl IMM (INFINITY),d0 | compare d0 with INFINITY
bhi Lf$inop | if larger it is NaN
bra Lf$div$0 | else signal DIVIDE_BY_ZERO
Ldivsf$inf:
moveq IMM (DIVIDE),d5
| If a is INFINITY we have to check b
cmpl IMM (INFINITY),d1 | compare b with INFINITY
bge Lf$inop | if b is NaN or INFINITY return NaN
bra Lf$overflow | else return overflow
| If a number is denormalized we put an exponent of 1 but do not put the
| bit back into the fraction.
Ldivsf$a$den:
movel IMM (1),d2
andl d5,d0
1: addl d0,d0 | shift a left until bit FLT_MANT_DIG-1 is set
#ifndef __mcoldfire__
subw IMM (1),d2 | and adjust exponent
#else
subl IMM (1),d2 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d0
bne Ldivsf$1
bra 1b
Ldivsf$b$den:
movel IMM (1),d3
andl d5,d1
1: addl d1,d1 | shift b left until bit FLT_MANT_DIG is set
#ifndef __mcoldfire__
subw IMM (1),d3 | and adjust exponent
#else
subl IMM (1),d3 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d1
bne Ldivsf$2
bra 1b
Lround$exit:
| This is a common exit point for __mulsf3 and __divsf3.
| First check for underlow in the exponent:
#ifndef __mcoldfire__
cmpw IMM (-FLT_MANT_DIG-1),d2
#else
cmpl IMM (-FLT_MANT_DIG-1),d2
#endif
blt Lf$underflow
| It could happen that the exponent is less than 1, in which case the
| number is denormalized. In this case we shift right and adjust the
| exponent until it becomes 1 or the fraction is zero (in the latter case
| we signal underflow and return zero).
movel IMM (0),d6 | d6 is used temporarily
#ifndef __mcoldfire__
cmpw IMM (1),d2 | if the exponent is less than 1 we
#else
cmpl IMM (1),d2 | if the exponent is less than 1 we
#endif
bge 2f | have to shift right (denormalize)
1:
#ifndef __mcoldfire__
addw IMM (1),d2 | adjust the exponent
lsrl IMM (1),d0 | shift right once
roxrl IMM (1),d1 |
roxrl IMM (1),d6 | d6 collect bits we would lose otherwise
cmpw IMM (1),d2 | is the exponent 1 already?
#else
addql IMM (1),d2 | adjust the exponent
lsrl IMM (1),d6
btst IMM (0),d1
beq 11f
bset IMM (31),d6
11: lsrl IMM (1),d1
btst IMM (0),d0
beq 10f
bset IMM (31),d1
10: lsrl IMM (1),d0
cmpl IMM (1),d2 | is the exponent 1 already?
#endif
beq 2f | if not loop back
bra 1b |
bra Lf$underflow | safety check, shouldn't execute '
2: orl d6,d1 | this is a trick so we don't lose '
| the extra bits which were flushed right
| Now call the rounding routine (which takes care of denormalized numbers):
lea pc@(Lround$0),a0 | to return from rounding routine
PICLEA SYM (_fpCCR),a1 | check the rounding mode
#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
#endif
bhi Lround$to$minus
blt Lround$to$zero
bra Lround$to$plus
Lround$0:
| Here we have a correctly rounded result (either normalized or denormalized).
| Here we should have either a normalized number or a denormalized one, and
| the exponent is necessarily larger or equal to 1 (so we don't have to '
| check again for underflow!). We have to check for overflow or for a
| denormalized number (which also signals underflow).
| Check for overflow (i.e., exponent >= 255).
#ifndef __mcoldfire__
cmpw IMM (0x00ff),d2
#else
cmpl IMM (0x00ff),d2
#endif
bge Lf$overflow
| Now check for a denormalized number (exponent==0).
movew d2,d2
beq Lf$den
1:
| Put back the exponents and sign and return.
#ifndef __mcoldfire__
lslw IMM (7),d2 | exponent back to fourth byte
#else
lsll IMM (7),d2 | exponent back to fourth byte
#endif
bclr IMM (FLT_MANT_DIG-1),d0
swap d0 | and put back exponent
#ifndef __mcoldfire__
orw d2,d0 |
#else
orl d2,d0
#endif
swap d0 |
orl d7,d0 | and sign also
PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
|=============================================================================
| __negsf2
|=============================================================================
| This is trivial and could be shorter if we didn't bother checking for NaN '
| and +/-INFINITY.
| float __negsf2(float);
FUNC(__negsf2)
SYM (__negsf2):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
moveq IMM (NEGATE),d5
movel a6@(8),d0 | get number to negate in d0
bchg IMM (31),d0 | negate
movel d0,d1 | make a positive copy
bclr IMM (31),d1 |
tstl d1 | check for zero
beq 2f | if zero (either sign) return +zero
cmpl IMM (INFINITY),d1 | compare to +INFINITY
blt 1f |
bhi Lf$inop | if larger (fraction not zero) is NaN
movel d0,d7 | else get sign and return INFINITY
andl IMM (0x80000000),d7
bra Lf$infty
1: PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
2: bclr IMM (31),d0
bra 1b
|=============================================================================
| __cmpsf2
|=============================================================================
GREATER = 1
LESS = -1
EQUAL = 0
| int __cmpsf2_internal(float, float, int);
SYM (__cmpsf2_internal):
#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@- | save registers
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
moveq IMM (COMPARE),d5
movel a6@(8),d0 | get first operand
movel a6@(12),d1 | get second operand
| Check if either is NaN, and in that case return garbage and signal
| INVALID_OPERATION. Check also if either is zero, and clear the signs
| if necessary.
movel d0,d6
andl IMM (0x7fffffff),d0
beq Lcmpsf$a$0
cmpl IMM (0x7f800000),d0
bhi Lcmpf$inop
Lcmpsf$1:
movel d1,d7
andl IMM (0x7fffffff),d1
beq Lcmpsf$b$0
cmpl IMM (0x7f800000),d1
bhi Lcmpf$inop
Lcmpsf$2:
| Check the signs
eorl d6,d7
bpl 1f
| If the signs are not equal check if a >= 0
tstl d6
bpl Lcmpsf$a$gt$b | if (a >= 0 && b < 0) => a > b
bmi Lcmpsf$b$gt$a | if (a < 0 && b >= 0) => a < b
1:
| If the signs are equal check for < 0
tstl d6
bpl 1f
| If both are negative exchange them
#ifndef __mcoldfire__
exg d0,d1
#else
movel d0,d7
movel d1,d0
movel d7,d1
#endif
1:
| Now that they are positive we just compare them as longs (does this also
| work for denormalized numbers?).
cmpl d0,d1
bhi Lcmpsf$b$gt$a | |b| > |a|
bne Lcmpsf$a$gt$b | |b| < |a|
| If we got here a == b.
movel IMM (EQUAL),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
#endif
unlk a6
rts
Lcmpsf$a$gt$b:
movel IMM (GREATER),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpsf$b$gt$a:
movel IMM (LESS),d0
#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
| XXX if frame pointer is ever removed, stack pointer must
| be adjusted here.
#endif
unlk a6
rts
Lcmpsf$a$0:
bclr IMM (31),d6
bra Lcmpsf$1
Lcmpsf$b$0:
bclr IMM (31),d7
bra Lcmpsf$2
Lcmpf$inop:
movl a6@(16),d0
moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (SINGLE_FLOAT),d6
PICJUMP $_exception_handler
| int __cmpsf2(float, float);
FUNC(__cmpsf2)
SYM (__cmpsf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
|=============================================================================
| rounding routines
|=============================================================================
| The rounding routines expect the number to be normalized in registers
| d0-d1, with the exponent in register d2. They assume that the
| exponent is larger or equal to 1. They return a properly normalized number
| if possible, and a denormalized number otherwise. The exponent is returned
| in d2.
Lround$to$nearest:
| We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
| Here we assume that the exponent is not too small (this should be checked
| before entering the rounding routine), but the number could be denormalized.
| Check for denormalized numbers:
1: btst IMM (FLT_MANT_DIG),d0
bne 2f | if set the number is normalized
| Normalize shifting left until bit #FLT_MANT_DIG is set or the exponent
| is one (remember that a denormalized number corresponds to an
| exponent of -F_BIAS+1).
#ifndef __mcoldfire__
cmpw IMM (1),d2 | remember that the exponent is at least one
#else
cmpl IMM (1),d2 | remember that the exponent is at least one
#endif
beq 2f | an exponent of one means denormalized
addl d1,d1 | else shift and adjust the exponent
addxl d0,d0 |
#ifndef __mcoldfire__
dbra d2,1b |
#else
subql IMM (1),d2
bpl 1b
#endif
2:
| Now round: we do it as follows: after the shifting we can write the
| fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
| If delta < 1, do nothing. If delta > 1, add 1 to f.
| If delta == 1, we make sure the rounded number will be even (odd?)
| (after shifting).
btst IMM (0),d0 | is delta < 1?
beq 2f | if so, do not do anything
tstl d1 | is delta == 1?
bne 1f | if so round to even
movel d0,d1 |
andl IMM (2),d1 | bit 1 is the last significant bit
addl d1,d0 |
bra 2f |
1: movel IMM (1),d1 | else add 1
addl d1,d0 |
| Shift right once (because we used bit #FLT_MANT_DIG!).
2: lsrl IMM (1),d0
| Now check again bit #FLT_MANT_DIG (rounding could have produced a
| 'fraction overflow' ...).
btst IMM (FLT_MANT_DIG),d0
beq 1f
lsrl IMM (1),d0
#ifndef __mcoldfire__
addw IMM (1),d2
#else
addql IMM (1),d2
#endif
1:
| If bit #FLT_MANT_DIG-1 is clear we have a denormalized number, so we
| have to put the exponent to zero and return a denormalized number.
btst IMM (FLT_MANT_DIG-1),d0
beq 1f
jmp a0@
1: movel IMM (0),d2
jmp a0@
Lround$to$zero:
Lround$to$plus:
Lround$to$minus:
jmp a0@
#endif /* L_float */
| gcc expects the routines __eqdf2, __nedf2, __gtdf2, __gedf2,
| __ledf2, __ltdf2 to all return the same value as a direct call to
| __cmpdf2 would. In this implementation, each of these routines
| simply calls __cmpdf2. It would be more efficient to give the
| __cmpdf2 routine several names, but separating them out will make it
| easier to write efficient versions of these routines someday.
| If the operands recompare unordered unordered __gtdf2 and __gedf2 return -1.
| The other routines return 1.
#ifdef L_eqdf2
.text
FUNC(__eqdf2)
.globl SYM (__eqdf2)
SYM (__eqdf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_eqdf2 */
#ifdef L_nedf2
.text
FUNC(__nedf2)
.globl SYM (__nedf2)
SYM (__nedf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_nedf2 */
#ifdef L_gtdf2
.text
FUNC(__gtdf2)
.globl SYM (__gtdf2)
SYM (__gtdf2):
link a6,IMM (0)
pea -1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_gtdf2 */
#ifdef L_gedf2
.text
FUNC(__gedf2)
.globl SYM (__gedf2)
SYM (__gedf2):
link a6,IMM (0)
pea -1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_gedf2 */
#ifdef L_ltdf2
.text
FUNC(__ltdf2)
.globl SYM (__ltdf2)
SYM (__ltdf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_ltdf2 */
#ifdef L_ledf2
.text
FUNC(__ledf2)
.globl SYM (__ledf2)
SYM (__ledf2):
link a6,IMM (0)
pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_ledf2 */
| The comments above about __eqdf2, et. al., also apply to __eqsf2,
| et. al., except that the latter call __cmpsf2 rather than __cmpdf2.
#ifdef L_eqsf2
.text
FUNC(__eqsf2)
.globl SYM (__eqsf2)
SYM (__eqsf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_eqsf2 */
#ifdef L_nesf2
.text
FUNC(__nesf2)
.globl SYM (__nesf2)
SYM (__nesf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_nesf2 */
#ifdef L_gtsf2
.text
FUNC(__gtsf2)
.globl SYM (__gtsf2)
SYM (__gtsf2):
link a6,IMM (0)
pea -1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_gtsf2 */
#ifdef L_gesf2
.text
FUNC(__gesf2)
.globl SYM (__gesf2)
SYM (__gesf2):
link a6,IMM (0)
pea -1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_gesf2 */
#ifdef L_ltsf2
.text
FUNC(__ltsf2)
.globl SYM (__ltsf2)
SYM (__ltsf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_ltsf2 */
#ifdef L_lesf2
.text
FUNC(__lesf2)
.globl SYM (__lesf2)
SYM (__lesf2):
link a6,IMM (0)
pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_lesf2 */
#if defined (__ELF__) && defined (__linux__)
/* Make stack non-executable for ELF linux targets. */
.section .note.GNU-stack,"",@progbits
#endif
|
4ms/metamodule-plugin-sdk
| 1,802
|
plugin-libc/libgcc/config/rl78/subdi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___subdi3
movw hl, sp ; use HL-based addressing (allows for direct subw)
movw ax, [hl+4]
subw ax, [hl+12]
movw r8, ax
mov a, [hl+6] ; middle bytes of the result are determined using 8-bit
subc a, [hl+14] ; SUBC insns which both account for and update the carry bit
mov r10, a ; (no SUBWC instruction is available)
mov a, [hl+7]
subc a, [hl+15]
mov r11, a
mov a, [hl+8]
subc a, [hl+16]
mov r12, a
mov a, [hl+9]
subc a, [hl+17]
mov r13, a
movw ax, [hl+10]
sknc ; account for the possible carry from the
decw ax ; latest 8-bit operation
subw ax, [hl+18]
movw r14, ax
ret
END_FUNC ___subdi3
|
4ms/metamodule-plugin-sdk
| 2,044
|
plugin-libc/libgcc/config/rl78/smindi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___smindi3
; copy first argument/operand to the output registers
movw ax, [sp+4]
movw r8, ax
movw ax, [sp+6]
movw r10, ax
movw ax, [sp+8]
movw r12, ax
movw ax, [sp+10]
movw r14, ax
; use 16-bit compares from the most significant words downto the least significant ones
movw ax, [sp+18]
cmpw ax, r14
xor1 CY, a.7 ; first compare accounts for the
xor1 CY, r15.7 ; sign bits of the two operands
bc $.L1
bnz $.L2
movw ax, [sp+16]
cmpw ax, r12
bc $.L1
bnz $.L2
movw ax, [sp+14]
cmpw ax, r10
bc $.L1
bnz $.L2
movw ax, [sp+12]
cmpw ax, r8
bc $.L1
ret
.L1:
; copy second argument/operand to the output registers
movw ax, [sp+12]
movw r8, ax
movw ax, [sp+14]
movw r10, ax
movw ax, [sp+16]
movw r12, ax
movw ax, [sp+18]
movw r14, ax
.L2:
ret
END_FUNC ___smindi3
|
4ms/metamodule-plugin-sdk
| 2,246
|
plugin-libc/libgcc/config/rl78/lshrsi3.S
|
; Copyright (C) 2011-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
START_FUNC ___lshrsi3
;; input:
;;
;; [zero]
;; [count] <= $sp+8
;; [in MSB]
;; [in]
;; [in]
;; [in LSB] <- $sp+4
;; output:
;;
;; [r8..r11] result
;; registers:
;;
;; AX - temp for shift/rotate
;; B - count
mov a, [sp+8] ; A now contains the count
cmp a, #0x20
bc $.Lcount_is_normal
;; count is out of bounds, just return zero.
movw r8, #0
movw r10, #0
ret
.Lcount_is_normal:
cmp0 a
bnz $.Lcount_is_nonzero
;; count is zero, just copy IN to OUT
movw ax, [sp+4]
movw r8, ax
movw ax, [sp+6]
movw r10, ax
ret
.Lcount_is_nonzero:
mov b, a ; B now contains the count also
bf a.4, $.Lcount_lt_16
;; count >= 16, shift 16 at a time.
movw r10, #0
movw ax, [sp+6]
movw r8, ax
mov a, b
and a, #0x0f
sknz
ret
mov b, a ; B now contains the remaining count
inc b
br $.Lloop_top
.Lcount_lt_16:
;; count is nonzero. Do one
movw ax, [sp+6]
shrw ax,1
movw r10, ax
mov a, [sp+5]
rorc a,1
mov r9, a
mov a, [sp+4]
rorc a,1
mov r8, a
;; we did one shift above; do as many more as we need now.
.Lloop_top:
dec b
sknz
ret
movw ax, r10
shrw ax,1
movw r10, ax
mov a, r9
rorc a,1
mov r9, a
mov a, r8
rorc a,1
mov r8, a
br $.Lloop_top
END_FUNC ___lshrsi3
|
4ms/metamodule-plugin-sdk
| 1,937
|
plugin-libc/libgcc/config/rl78/umindi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___umindi3
; copy first argument/operand to the output registers
movw ax, [sp+4]
movw r8, ax
movw ax, [sp+6]
movw r10, ax
movw ax, [sp+8]
movw r12, ax
movw ax, [sp+10]
movw r14, ax
; use 16-bit compares from the most significant words downto the least significant ones
movw ax, [sp+18]
cmpw ax, r14
bc $.L1
bnz $.L2
movw ax, [sp+16]
cmpw ax, r12
bc $.L1
bnz $.L2
movw ax, [sp+14]
cmpw ax, r10
bc $.L1
bnz $.L2
movw ax, [sp+12]
cmpw ax, r8
bc $.L1
ret
.L1:
; copy second argument/operand to the output registers
movw ax, [sp+12]
movw r8, ax
movw ax, [sp+14]
movw r10, ax
movw ax, [sp+16]
movw r12, ax
movw ax, [sp+18]
movw r14, ax
.L2:
ret
END_FUNC ___umindi3
|
4ms/metamodule-plugin-sdk
| 1,583
|
plugin-libc/libgcc/config/rl78/anddi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___anddi3
movw hl, sp
mov a, [hl+4]
and a, [hl+12]
mov r8, a
mov a, [hl+5]
and a, [hl+13]
mov r9, a
mov a, [hl+6]
and a, [hl+14]
mov r10, a
mov a, [hl+7]
and a, [hl+15]
mov r11, a
mov a, [hl+8]
and a, [hl+16]
mov r12, a
mov a, [hl+9]
and a, [hl+17]
mov r13, a
mov a, [hl+10]
and a, [hl+18]
mov r14, a
mov a, [hl+11]
and a, [hl+19]
mov r15, a
ret
END_FUNC ___anddi3
|
4ms/metamodule-plugin-sdk
| 5,177
|
plugin-libc/libgcc/config/rl78/divmodqi.S
|
/* QImode div/mod functions for the GCC support library for the Renesas RL78 processors.
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Red Hat.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "vregs.h"
.macro MAKE_GENERIC which,need_result
.if \need_result
quot = r8
num = r10
den = r12
bit = r14
.else
num = r8
quot = r10
den = r12
bit = r14
.endif
#define bit b
#define den c
#define bitden bc
START_FUNC __generic_qidivmod\which
num_lt_den\which:
.if \need_result
mov r8, #0
.else
mov a, [hl+4]
mov r8, a
.endif
ret
num_eq_den\which:
.if \need_result
mov r8, #1
.else
mov r8, #0
.endif
ret
den_is_zero\which:
mov r8, #0x00
ret
;; These routines leave DE alone - the signed functions use DE
;; to store sign information that must remain intact
.if \need_result
.global __generic_qidiv
__generic_qidiv:
.else
.global __generic_qimod
__generic_qimod:
.endif
;; (quot,rem) = 4[hl] /% 6[hl]
mov a, [hl+4] ; num
cmp a, [hl+6] ; den
bz $num_eq_den\which
bnh $num_lt_den\which
;; copy numerator
; mov a, [hl+4] ; already there from above
mov num, a
;; copy denomonator
mov a, [hl+6]
mov den, a
cmp0 den
bz $den_is_zero\which
den_not_zero\which:
.if \need_result
;; zero out quot
mov quot, #0
.endif
;; initialize bit to 1
mov bit, #1
; while (den < num && !(den & (1L << BITS_MINUS_1)))
shift_den_bit\which:
.macro SDB_ONE\which
mov a, den
mov1 cy,a.7
bc $enter_main_loop\which
cmp a, num
bh $enter_main_loop\which
;; den <<= 1
; mov a, den ; already has it from the cmpw above
shl a, 1
mov den, a
;; bit <<= 1
shl bit, 1
.endm
SDB_ONE\which
SDB_ONE\which
br $shift_den_bit\which
main_loop\which:
;; if (num >= den) (cmp den > num)
mov a, den
cmp a, num
bh $next_loop\which
;; num -= den
mov a, num
sub a, den
mov num, a
.if \need_result
;; res |= bit
mov a, quot
or a, bit
mov quot, a
.endif
next_loop\which:
;; den, bit >>= 1
movw ax, bitden
shrw ax, 1
movw bitden, ax
enter_main_loop\which:
cmp0 bit
bnz $main_loop\which
main_loop_done\which:
ret
END_FUNC __generic_qidivmod\which
.endm
;----------------------------------------------------------------------
MAKE_GENERIC _d 1
MAKE_GENERIC _m 0
;----------------------------------------------------------------------
START_FUNC ___udivqi3
;; r8 = 4[sp] / 6[sp]
movw hl, sp
br $!__generic_qidiv
END_FUNC ___udivqi3
START_FUNC ___umodqi3
;; r8 = 4[sp] % 6[sp]
movw hl, sp
br $!__generic_qimod
END_FUNC ___umodqi3
;----------------------------------------------------------------------
.macro NEG_AX
movw hl, ax
mov a, #0
sub a, [hl]
mov [hl], a
.endm
;----------------------------------------------------------------------
START_FUNC ___divqi3
;; r8 = 4[sp] / 6[sp]
movw hl, sp
movw de, #0
mov a, [sp+4]
mov1 cy, a.7
bc $div_signed_num
mov a, [sp+6]
mov1 cy, a.7
bc $div_signed_den
br $!__generic_qidiv
div_signed_num:
;; neg [sp+4]
mov a, #0
sub a, [hl+4]
mov [hl+4], a
mov d, #1
mov a, [sp+6]
mov1 cy, a.6
bnc $div_unsigned_den
div_signed_den:
;; neg [sp+6]
mov a, #0
sub a, [hl+6]
mov [hl+6], a
mov e, #1
div_unsigned_den:
call $!__generic_qidiv
mov a, d
cmp0 a
bz $div_skip_restore_num
;; We have to restore the numerator [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov a, d
div_skip_restore_num:
xor a, e
bz $div_no_neg
movw ax, #r8
NEG_AX
div_no_neg:
mov a, e
cmp0 a
bz $div_skip_restore_den
movw ax, sp
addw ax, #6
NEG_AX
div_skip_restore_den:
ret
END_FUNC ___divqi3
START_FUNC ___modqi3
;; r8 = 4[sp] % 6[sp]
movw hl, sp
movw de, #0
mov a, [hl+4]
mov1 cy, a.7
bc $mod_signed_num
mov a, [hl+6]
mov1 cy, a.7
bc $mod_signed_den
br $!__generic_qimod
mod_signed_num:
;; neg [sp+4]
mov a, #0
sub a, [hl+4]
mov [hl+4], a
mov d, #1
mov a, [hl+6]
mov1 cy, a.7
bnc $mod_unsigned_den
mod_signed_den:
;; neg [sp+6]
mov a, #0
sub a, [hl+6]
mov [hl+6], a
mov e, #1
mod_unsigned_den:
call $!__generic_qimod
mov a, d
cmp0 a
bz $mod_no_neg
mov a, #0
sub a, r8
mov r8, a
;; Also restore numerator
movw ax, sp
addw ax, #4
NEG_AX
mod_no_neg:
mov a, e
cmp0 a
bz $mod_skip_restore_den
movw ax, sp
addw ax, #6
NEG_AX
mod_skip_restore_den:
ret
END_FUNC ___modqi3
|
4ms/metamodule-plugin-sdk
| 4,225
|
plugin-libc/libgcc/config/rl78/cmpsi2.S
|
; Copyright (C) 2011-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
;; int __cmpsi2 (signed long A, signed long B)
;;
;; Performs a signed comparison of A and B.
;; If A is less than B it returns 0. If A is greater
;; than B it returns 2. If they are equal it returns 1.
START_FUNC ___cmpsi2
;; A is at [sp+4]
;; B is at [sp+8]
;; Result put in R8
;; Initialise default return value.
onew bc
;; Compare the high words.
movw ax, [sp + 10]
movw de, ax
movw ax, [sp + 6]
cmpw ax, de
skz
br !!.Lconvert_to_signed
.Lcompare_bottom_words:
;; The top words are equal - compare the bottom words.
;; Note - code from __ucmpsi2 branches into here.
movw ax, [sp + 8]
movw de, ax
movw ax, [sp + 4]
cmpw ax, de
sknz
br !!.Lless_than_or_greater_than
;; The words are equal - return 1.
;; Note - we could branch to the return code at the end of the
;; function but a branch instruction takes 4 bytes, and the
;; return sequence itself is only 4 bytes long...
movw ax, bc
movw r8, ax
ret
.Lconvert_to_signed:
;; The top words are different. Unfortunately the comparison
;; is always unsigned, so to get a signed result we XOR the CY
;; flag with the top bits of AX and DE.
xor1 cy, a.7
mov a, d
xor1 cy, a.7
;; Fall through.
.Lless_than_or_greater_than:
;; We now have a signed less than/greater than result in CY.
;; Return 0 for less than, 2 for greater than.
;; Note - code from __ucmpsi2 branches into here.
incw bc
sknc
clrw bc
;; Get the result value, currently in BC, into r8
movw ax, bc
movw r8, ax
ret
END_FUNC ___cmpsi2
;; ------------------------------------------------------
;; int __ucmpsi2 (unsigned long A, unsigned long B)
;;
;; Performs an unsigned comparison of A and B.
;; If A is less than B it returns 0. If A is greater
;; than B it returns 2. If they are equal it returns 1.
START_FUNC ___ucmpsi2
;; A is at [sp+4]
;; B is at [sp+8]
;; Result put in R8..R9
;; Initialise default return value.
onew bc
;; Compare the high words.
movw ax, [sp + 10]
movw de, ax
movw ax, [sp + 6]
cmpw ax, de
skz
;; Note: These branches go into the __cmpsi2 code!
br !!.Lless_than_or_greater_than
br !!.Lcompare_bottom_words
END_FUNC ___ucmpsi2
;; ------------------------------------------------------
;; signed int __gcc_bcmp (const unsigned char *s1, const unsigned char *s2, size_t size)
;; Result is negative if S1 is less than S2,
;; positive if S1 is greater, 0 if S1 and S2 are equal.
START_FUNC __gcc_bcmp
;; S1 is at [sp+4]
;; S2 is at [sp+6]
;; SIZE is at [sp+8]
;; Result in r8/r9
movw r10, #0
1:
;; Compare R10 against the SIZE parameter
movw ax, [sp+8]
subw ax, r10
sknz
br !!1f
;; Load S2[r10] into R8
movw ax, [sp+6]
addw ax, r10
movw hl, ax
mov a, [hl]
mov r8, a
;; Load S1[r10] into A
movw ax, [sp+4]
addw ax, r10
movw hl, ax
mov a, [hl]
;; Increment offset
incw r10
;; Compare loaded bytes
cmp a, r8
sknz
br !!1b
;; They differ. Subtract *S2 from *S1 and return as the result.
mov x, a
mov a, #0
mov r9, #0
subw ax, r8
1:
movw r8, ax
ret
END_FUNC __gcc_bcmp
|
4ms/metamodule-plugin-sdk
| 2,044
|
plugin-libc/libgcc/config/rl78/smaxdi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___smaxdi3
; copy first argument/operand to the output registers
movw ax, [sp+4]
movw r8, ax
movw ax, [sp+6]
movw r10, ax
movw ax, [sp+8]
movw r12, ax
movw ax, [sp+10]
movw r14, ax
; use 16-bit compares from the most significant words downto the least significant ones
movw ax, [sp+18]
cmpw ax, r14
xor1 CY, a.7 ; first compare accounts for the
xor1 CY, r15.7 ; sign bits of the two operands
bh $.L1
bnz $.L2
movw ax, [sp+16]
cmpw ax, r12
bh $.L1
bnz $.L2
movw ax, [sp+14]
cmpw ax, r10
bh $.L1
bnz $.L2
movw ax, [sp+12]
cmpw ax, r8
bh $.L1
ret
.L1:
; copy second argument/operand to the output registers
movw ax, [sp+12]
movw r8, ax
movw ax, [sp+14]
movw r10, ax
movw ax, [sp+16]
movw r12, ax
movw ax, [sp+18]
movw r14, ax
.L2:
ret
END_FUNC ___smaxdi3
|
4ms/metamodule-plugin-sdk
| 15,937
|
plugin-libc/libgcc/config/rl78/fpmath-sf.S
|
; SF format is:
;
; [sign] 1.[23bits] E[8bits(n-127)]
;
; SEEEEEEE Emmmmmmm mmmmmmmm mmmmmmmm
;
; [A+0] mmmmmmmm
; [A+1] mmmmmmmm
; [A+2] Emmmmmmm
; [A+3] SEEEEEEE
;
; Special values (xxx != 0):
;
; r11 r10 r9 r8
; [HL+3] [HL+2] [HL+1] [HL+0]
; s1111111 10000000 00000000 00000000 infinity
; s1111111 1xxxxxxx xxxxxxxx xxxxxxxx NaN
; s0000000 00000000 00000000 00000000 zero
; s0000000 0xxxxxxx xxxxxxxx xxxxxxxx denormals
;
; Note that CMPtype is "signed char" for rl78
;
#include "vregs.h"
#define Z PSW.6
; External Functions:
;
; __int_isnan [HL] -> Z if NaN
; __int_iszero [HL] -> Z if zero
START_FUNC __int_isinf
;; [HL] points to value, returns Z if it's #Inf
mov a, [hl+2]
and a, #0x80
mov x, a
mov a, [hl+3]
and a, #0x7f
cmpw ax, #0x7f80
skz
ret ; return NZ if not NaN
mov a, [hl+2]
and a, #0x7f
or a, [hl+1]
or a, [hl]
ret
END_FUNC __int_isinf
#define A_SIGN [hl+0] /* byte */
#define A_EXP [hl+2] /* word */
#define A_FRAC_L [hl+4] /* word */
#define A_FRAC_LH [hl+5] /* byte */
#define A_FRAC_H [hl+6] /* word or byte */
#define A_FRAC_HH [hl+7] /* byte */
#define B_SIGN [hl+8]
#define B_EXP [hl+10]
#define B_FRAC_L [hl+12]
#define B_FRAC_LH [hl+13]
#define B_FRAC_H [hl+14]
#define B_FRAC_HH [hl+15]
START_FUNC _int_unpack_sf
;; convert 32-bit SFmode [DE] to 6-byte struct [HL] ("A")
mov a, [de+3]
sar a, 7
mov A_SIGN, a
movw ax, [de+2]
and a, #0x7f
shrw ax, 7
movw bc, ax ; remember if the exponent is all zeros
subw ax, #127 ; exponent is now non-biased
movw A_EXP, ax
movw ax, [de]
movw A_FRAC_L, ax
mov a, [de+2]
and a, #0x7f
cmp0 c ; if the exp is all zeros, it's denormal
skz
or a, #0x80
mov A_FRAC_H, a
mov a, #0
mov A_FRAC_HH, a
;; rounding-bit-shift
movw ax, A_FRAC_L
shlw ax, 1
movw A_FRAC_L, ax
mov a, A_FRAC_H
rolc a, 1
mov A_FRAC_H, a
mov a, A_FRAC_HH
rolc a, 1
mov A_FRAC_HH, a
ret
END_FUNC _int_unpack_sf
; func(SF a,SF b)
; [SP+4..7] a
; [SP+8..11] b
START_FUNC ___subsf3
;; a - b => a + (-b)
;; Note - we cannot just change the sign of B on the stack and
;; then fall through into __addsf3. The stack'ed value may be
;; used again (it was created by our caller after all). Instead
;; we have to allocate some stack space of our own, copy A and B,
;; change the sign of B, call __addsf3, release the allocated stack
;; and then return.
subw sp, #8
movw ax, [sp+4+8]
movw [sp], ax
movw ax, [sp+4+2+8]
movw [sp+2], ax
movw ax, [sp+4+4+8]
movw [sp+4], ax
mov a, [sp+4+6+8]
mov [sp+6], a
mov a, [sp+4+7+8]
xor a, #0x80
mov [sp+7], a
call $!___addsf3
addw sp, #8
ret
END_FUNC ___subsf3
START_FUNC ___addsf3
;; if (isnan(a)) return a
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_isnan
bnz $1f
ret_a:
movw ax, [sp+4]
movw r8, ax
movw ax, [sp+6]
movw r10, ax
ret
1: ;; if (isnan (b)) return b;
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_isnan
bnz $2f
ret_b:
movw ax, [sp+8]
movw r8, ax
movw ax, [sp+10]
movw r10, ax
ret
2: ;; if (isinf (a))
movw ax, sp
addw ax, #4
movw hl, ax
call $!__int_isinf
bnz $3f
;; if (isinf (b) && a->sign != b->sign) return NaN
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_isinf
bnz $ret_a
mov a, [sp+7]
mov h, a
mov a, [sp+11]
xor a, h
bf a.7, $ret_a
movw r8, #0x0001
movw r10, #0x7f80
ret
3: ;; if (isinf (b)) return b;
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_isinf
bz $ret_b
;; if (iszero (b))
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_iszero
bnz $4f
;; if (iszero (a))
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_iszero
bnz $ret_a
movw ax, [sp+4]
movw r8, ax
mov a, [sp+7]
mov h, a
movw ax, [sp+10]
and a, h
movw r10, ax
ret
4: ;; if (iszero (a)) return b;
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_iszero
bz $ret_b
; Normalize the two numbers relative to each other. At this point,
; we need the numbers converted to their "unpacked" format.
subw sp, #16 ; Save room for two unpacked values.
movw ax, sp
movw hl, ax
addw ax, #16+4
movw de, ax
call $!_int_unpack_sf
movw ax, sp
addw ax, #8
movw hl, ax
addw ax, #16+8-8
movw de, ax
call $!_int_unpack_sf
movw ax, sp
movw hl, ax
;; diff = a.exponent - b.exponent
movw ax, B_EXP ; sign/exponent word
movw bc, ax
movw ax, A_EXP ; sign/exponent word
subw ax, bc ; a = a.exp - b.exp
movw de, ax ; d = sdiff
;; if (diff < 0) diff = -diff
bf a.7, $1f
xor a, #0xff
xor r_0, #0xff ; x
incw ax ; a = diff
1:
;; if (diff >= 23) zero the smaller one
cmpw ax, #24
bc $.L661 ; if a < 23 goto 661
;; zero out the smaller one
movw ax, de
bt a.7, $1f ; if sdiff < 0 (a_exp < b_exp) goto 1f
;; "zero out" b
movw ax, A_EXP
movw B_EXP, ax
movw ax, #0
movw B_FRAC_L, ax
movw B_FRAC_H, ax
br $5f
1:
;; "zero out" a
movw ax, B_EXP
movw A_EXP, ax
movw ax, #0
movw A_FRAC_L, ax
movw A_FRAC_H, ax
br $5f
.L661:
;; shift the smaller one so they have the same exponents
1:
movw ax, de
bt a.7, $1f
cmpw ax, #0 ; sdiff > 0
bnh $1f ; if (sdiff <= 0) goto 1f
decw de
incw B_EXP ; because it's [HL+byte]
movw ax, B_FRAC_H
shrw ax, 1
movw B_FRAC_H, ax
mov a, B_FRAC_LH
rorc a, 1
mov B_FRAC_LH, a
mov a, B_FRAC_L
rorc a, 1
mov B_FRAC_L, a
br $1b
1:
movw ax, de
bf a.7, $1f
incw de
incw A_EXP ; because it's [HL+byte]
movw ax, A_FRAC_H
shrw ax, 1
movw A_FRAC_H, ax
mov a, A_FRAC_LH
rorc a, 1
mov A_FRAC_LH, a
mov a, A_FRAC_L
rorc a, 1
mov A_FRAC_L, a
br $1b
1:
5: ;; At this point, A and B have the same exponent.
mov a, A_SIGN
cmp a, B_SIGN
bnz $1f
;; Same sign, just add.
movw ax, A_FRAC_L
addw ax, B_FRAC_L
movw A_FRAC_L, ax
mov a, A_FRAC_H
addc a, B_FRAC_H
mov A_FRAC_H, a
mov a, A_FRAC_HH
addc a, B_FRAC_HH
mov A_FRAC_HH, a
br $.L728
1: ;; Signs differ - A has A_SIGN still.
bf a.7, $.L696
;; A is negative, do B-A
movw ax, B_FRAC_L
subw ax, A_FRAC_L
movw A_FRAC_L, ax
mov a, B_FRAC_H
subc a, A_FRAC_H
mov A_FRAC_H, a
mov a, B_FRAC_HH
subc a, A_FRAC_HH
mov A_FRAC_HH, a
br $.L698
.L696:
;; B is negative, do A-B
movw ax, A_FRAC_L
subw ax, B_FRAC_L
movw A_FRAC_L, ax
mov a, A_FRAC_H
subc a, B_FRAC_H
mov A_FRAC_H, a
mov a, A_FRAC_HH
subc a, B_FRAC_HH
mov A_FRAC_HH, a
.L698:
;; A is still A_FRAC_HH
bt a.7, $.L706
;; subtraction was positive
mov a, #0
mov A_SIGN, a
br $.L712
.L706:
;; subtraction was negative
mov a, #0xff
mov A_SIGN, a
;; This negates A_FRAC
mov a, A_FRAC_L
xor a, #0xff ; XOR doesn't mess with carry
add a, #1 ; INC doesn't set the carry
mov A_FRAC_L, a
mov a, A_FRAC_LH
xor a, #0xff
addc a, #0
mov A_FRAC_LH, a
mov a, A_FRAC_H
xor a, #0xff
addc a, #0
mov A_FRAC_H, a
mov a, A_FRAC_HH
xor a, #0xff
addc a, #0
mov A_FRAC_HH, a
.L712:
;; Renormalize the subtraction
mov a, A_FRAC_L
or a, A_FRAC_LH
or a, A_FRAC_H
or a, A_FRAC_HH
bz $.L728
;; Mantissa is not zero, left shift until the MSB is in the
;; right place
1:
movw ax, A_FRAC_H
cmpw ax, #0x0200
bnc $.L728
decw A_EXP
movw ax, A_FRAC_L
shlw ax, 1
movw A_FRAC_L, ax
movw ax, A_FRAC_H
rolwc ax, 1
movw A_FRAC_H, ax
br $1b
.L728:
;; normalize A and pack it
movw ax, A_FRAC_H
cmpw ax, #0x01ff
bnh $1f
;; overflow in the mantissa; adjust
movw ax, A_FRAC_H
shrw ax, 1
movw A_FRAC_H, ax
mov a, A_FRAC_LH
rorc a, 1
mov A_FRAC_LH, a
mov a, A_FRAC_L
rorc a, 1
mov A_FRAC_L, a
incw A_EXP
1:
call $!__rl78_int_pack_a_r8
addw sp, #16
ret
END_FUNC ___addsf3
START_FUNC __rl78_int_pack_a_r8
;; pack A to R8
movw ax, A_EXP
addw ax, #126 ; not 127, we want the "bt/bf" test to check for denormals
bf a.7, $1f
;; make a denormal
2:
movw bc, ax
movw ax, A_FRAC_H
shrw ax, 1
movw A_FRAC_H, ax
mov a, A_FRAC_LH
rorc a, 1
mov A_FRAC_LH, a
mov a, A_FRAC_L
rorc a, 1
mov A_FRAC_L, a
movw ax, bc
incw ax
bt a.7, $2b
decw ax
1:
incw ax ; now it's as if we added 127
movw A_EXP, ax
cmpw ax, #0xfe
bnh $1f
;; store #Inf instead
mov a, A_SIGN
or a, #0x7f
mov x, #0x80
movw r10, ax
movw r8, #0
ret
1:
bf a.7, $1f ; note AX has EXP at top of loop
;; underflow, denormal?
movw ax, A_FRAC_H
shrw ax, 1
movw A_FRAC_H, ax
mov a, A_FRAC_LH
rorc a, 1
movw A_FRAC_LH, ax
mov a, A_FRAC_L
rorc a, 1
movw A_FRAC_L, ax
incw A_EXP
movw ax, A_EXP
br $1b
1:
;; undo the rounding-bit-shift
mov a, A_FRAC_L
bf a.0, $1f
;; round up
movw ax, A_FRAC_L
addw ax, #1
movw A_FRAC_L, ax
bnc $1f
incw A_FRAC_H
;; If the rounding set the bit beyond the end of the fraction, increment the exponent.
mov a, A_FRAC_HH
bf a.1, $1f
incw A_EXP
1:
movw ax, A_FRAC_H
shrw ax, 1
movw A_FRAC_H, ax
mov a, A_FRAC_LH
rorc a, 1
mov A_FRAC_LH, a
mov a, A_FRAC_L
rorc a, 1
mov A_FRAC_L, a
movw ax, A_FRAC_L
movw r8, ax
or a, x
or a, A_FRAC_H
or a, A_FRAC_HH
bnz $1f
movw ax, #0
movw A_EXP, ax
1:
mov a, A_FRAC_H
and a, #0x7f
mov b, a
mov a, A_EXP
shl a, 7
or a, b
mov r10, a
mov a, A_SIGN
and a, #0x80
mov b, a
mov a, A_EXP
shr a, 1
or a, b
mov r11, a
ret
END_FUNC __rl78_int_pack_a_r8
START_FUNC ___mulsf3
;; if (isnan(a)) return a
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_isnan
bnz $1f
mret_a:
movw ax, [sp+4]
movw r8, ax
mov a, [sp+11]
and a, #0x80
mov b, a
movw ax, [sp+6]
xor a, b ; sign is always a ^ b
movw r10, ax
ret
1:
;; if (isnan (b)) return b;
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_isnan
bnz $1f
mret_b:
movw ax, [sp+8]
movw r8, ax
mov a, [sp+7]
and a, #0x80
mov b, a
movw ax, [sp+10]
xor a, b ; sign is always a ^ b
movw r10, ax
ret
1:
;; if (isinf (a)) return (b==0) ? nan : a
movw ax, sp
addw ax, #4
movw hl, ax
call $!__int_isinf
bnz $.L805
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_iszero
bnz $mret_a
movw r8, #0x0001 ; return NaN
movw r10, #0x7f80
ret
.L805:
;; if (isinf (b)) return (a==0) ? nan : b
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_isinf
bnz $.L814
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_iszero
bnz $mret_b
movw r8, #0x0001 ; return NaN
movw r10, #0x7f80
ret
.L814:
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_iszero
bz $mret_a
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_iszero
bz $mret_b
;; at this point, we're doing the multiplication.
subw sp, #16 ; save room for two unpacked values
movw ax, sp
movw hl, ax
addw ax, #16+4
movw de, ax
call $!_int_unpack_sf
movw ax, sp
addw ax, #8
movw hl, ax
addw ax, #16+8-8
movw de, ax
call $!_int_unpack_sf
movw ax, sp
movw hl, ax
;; multiply SI a.FRAC * SI b.FRAC to DI r8
subw sp, #16
movw ax, A_FRAC_L
movw [sp+0], ax
movw ax, A_FRAC_H
movw [sp+2], ax
movw ax, B_FRAC_L
movw [sp+8], ax
movw ax, B_FRAC_H
movw [sp+10], ax
movw ax, #0
movw [sp+4], ax
movw [sp+6], ax
movw [sp+12], ax
movw [sp+14], ax
call !!___muldi3 ; MTMPa * MTMPb -> R8..R15
addw sp, #16
movw ax, sp
movw hl, ax
;; add the exponents together
movw ax, A_EXP
addw ax, B_EXP
movw bc, ax ; exponent in BC
;; now, re-normalize the DI value in R8..R15 to have the
;; MSB in the "right" place, adjusting BC as we shift it.
;; The value will normally be in this range:
;; R15 R8
;; 0001_0000_0000_0000
;; 0003_ffff_fc00_0001
;; so to speed it up, we normalize to:
;; 0001_xxxx_xxxx_xxxx
;; then extract the bytes we want (r11-r14)
1:
mov a, r15
cmp0 a
bnz $2f
mov a, r14
and a, #0xfe
bz $1f
2:
;; shift right, inc exponent
movw ax, r14
shrw ax, 1
movw r14, ax
mov a, r13
rorc a, 1
mov r13, a
mov a, r12
rorc a, 1
mov r12, a
mov a, r11
rorc a, 1
mov r11, a
;; we don't care about r8/r9/r10 if we're shifting this way
incw bc
br $1b
1:
mov a, r15
or a, r14
bnz $1f
;; shift left, dec exponent
movw ax, r8
shlw ax, 1
movw r8, ax
movw ax, r10
rolwc ax, 1
movw r10, ax
movw ax, r12
rolwc ax, 1
movw r12, ax
movw ax, r14
rolwc ax, 1
movw r14, ax
decw bc
br $1b
1:
;; at this point, FRAC is in R11..R14 and EXP is in BC
movw ax, bc
movw A_EXP, ax
mov a, r11
mov A_FRAC_L, a
mov a, r12
mov A_FRAC_LH, a
mov a, r13
mov A_FRAC_H, a
mov a, r14
mov A_FRAC_HH, a
mov a, A_SIGN
xor a, B_SIGN
mov A_SIGN, a
call $!__rl78_int_pack_a_r8
addw sp, #16
ret
END_FUNC ___mulsf3
START_FUNC ___divsf3
;; if (isnan(a)) return a
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_isnan
bnz $1f
dret_a:
movw ax, [sp+4]
movw r8, ax
mov a, [sp+11]
and a, #0x80
mov b, a
movw ax, [sp+6]
xor a, b ; sign is always a ^ b
movw r10, ax
ret
1:
;; if (isnan (b)) return b;
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_isnan
bnz $1f
dret_b:
movw ax, [sp+8]
movw r8, ax
mov a, [sp+7]
and a, #0x80
mov b, a
movw ax, [sp+10]
xor a, b ; sign is always a ^ b
movw r10, ax
ret
1:
;; if (isinf (a)) return isinf(b) ? nan : a
movw ax, sp
addw ax, #4
movw hl, ax
call $!__int_isinf
bnz $1f
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_isinf
bnz $dret_a
dret_nan:
movw r8, #0x0001 ; return NaN
movw r10, #0x7f80
ret
1:
;; if (iszero (a)) return iszero(b) ? nan : a
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_iszero
bnz $1f
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_iszero
bnz $dret_a
br $dret_nan
1:
;; if (isinf (b)) return 0
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_isinf
bnz $1f
mov a, [sp+7]
mov b, a
mov a, [sp+11]
xor a, b
and a, #0x80
mov r11, a
movw r8, #0
mov r10, #0
ret
1:
;; if (iszero (b)) return Inf
movw ax, sp
addw ax, #8
movw hl, ax
call !!__int_iszero
bnz $1f
mov a, [sp+7]
mov b, a
mov a, [sp+11]
xor a, b
or a, #0x7f
mov r11, a
movw r8, #0
mov r10, #0x80
ret
1:
;; at this point, we're doing the division. Normalized
;; mantissas look like:
;; 01.xx.xx.xx
;; so we divide:
;; 01.xx.xx.xx.00.00.00.00
;; by 01.xx.xx.xx
;; to get approx 00.80.00.00.00 to 01.ff.ff.ff.00
subw sp, #16 ; save room for two unpacked values
movw ax, sp
movw hl, ax
addw ax, #16+4
movw de, ax
call $!_int_unpack_sf
movw ax, sp
addw ax, #8
movw hl, ax
addw ax, #16+8-8
movw de, ax
call $!_int_unpack_sf
movw ax, sp
movw hl, ax
;; divide DI a.FRAC / SI b.FRAC to DI r8
subw sp, #16
movw ax, A_FRAC_L
movw [sp+4], ax
movw ax, A_FRAC_H
movw [sp+6], ax
movw ax, B_FRAC_L
movw [sp+8], ax
movw ax, B_FRAC_H
movw [sp+10], ax
movw ax, #0
movw [sp+0], ax
movw [sp+2], ax
movw [sp+12], ax
movw [sp+14], ax
call !!___divdi3 ; MTMPa / MTMPb -> R8..R15
addw sp, #16
movw ax, sp
movw hl, ax
;; subtract the exponents A - B
movw ax, A_EXP
subw ax, B_EXP
movw bc, ax ; exponent in BC
;; now, re-normalize the DI value in R8..R15 to have the
;; MSB in the "right" place, adjusting BC as we shift it.
;; The value will normally be in this range:
;; R15 R8
;; 0000_0000_8000_0000
;; 0000_0001_ffff_ff00
;; so to speed it up, we normalize to:
;; 0000_0001_xxxx_xxxx
;; then extract the bytes we want (r9-r12)
1:
movw ax, r14
cmpw ax, #0
bnz $2f
movw ax, r12
cmpw ax, #1
bnh $1f
2:
;; shift right, inc exponent
movw ax, r14
shrw ax, 1
movw r14, ax
mov a, r13
rorc a, 1
mov r13, a
mov a, r12
rorc a, 1
mov r12, a
mov a, r11
rorc a, 1
mov r11, a
mov a, r10
rorc a, 1
mov r10, a
mov a, r9
rorc a, 1
mov r9, a
mov a, r8
rorc a, 1
mov r8, a
incw bc
br $1b
1:
;; the previous loop leaves r15.r13 zero
mov a, r12
cmp0 a
bnz $1f
;; shift left, dec exponent
movw ax, r8
shlw ax, 1
movw r8, ax
movw ax, r10
rolwc ax, 1
movw r10, ax
movw ax, r12
rolwc ax, 1
movw r12, ax
;; don't need to do r14
decw bc
br $1b
1:
;; at this point, FRAC is in R8..R11 and EXP is in BC
movw ax, bc
movw A_EXP, ax
mov a, r9
mov A_FRAC_L, a
mov a, r10
mov A_FRAC_LH, a
mov a, r11
mov A_FRAC_H, a
mov a, r12
mov A_FRAC_HH, a
mov a, A_SIGN
xor a, B_SIGN
mov A_SIGN, a
call $!__rl78_int_pack_a_r8
addw sp, #16
ret
END_FUNC ___divsf3
|
4ms/metamodule-plugin-sdk
| 20,489
|
plugin-libc/libgcc/config/rl78/divmodsi.S
|
/* SImode div/mod functions for the GCC support library for the Renesas RL78 processors.
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Red Hat.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "vregs.h"
#if defined __RL78_MUL_G14__
START_FUNC ___divsi3
;; r8,r10 = 4[sp],6[sp] / 8[sp],10[sp]
;; Load and test for a negative denumerator.
movw ax, [sp+8]
movw de, ax
movw ax, [sp+10]
mov1 cy, a.7
movw hl, ax
bc $__div_neg_den
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw bc, ax
movw ax, [sp+4]
bc $__div_neg_num
;; Neither are negative - we can use the unsigned divide instruction.
__div_no_convert:
push psw
di
divwu
pop psw
movw r8, ax
movw ax, bc
movw r10, ax
ret
__div_neg_den:
;; Negate the denumerator (which is in HLDE)
clrw ax
subw ax, de
movw de, ax
clrw ax
sknc
decw ax
subw ax, hl
movw hl, ax
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw bc, ax
movw ax, [sp+4]
;; If it is not negative then we perform the division and then negate the result.
bnc $__div_then_convert
;; Otherwise we negate the numerator and then go with a straightforward unsigned division.
;; The negation is complicated because AX, BC, DE and HL are already in use.
;; ax: numL bc: numH r8: r10:
xchw ax, bc
;; ax: numH bc: numL r8: r10:
movw r8, ax
;; ax: bc: numL r8: numH r10:
clrw ax
;; ax: 0 bc: numL r8: numH r10:
subw ax, bc
;; ax: -numL bc: r8: numH r10:
movw r10, ax
;; ax: bc: r8: numH r10: -numL
movw ax, r8
;; ax: numH bc: r8: r10: -numL
movw bc, ax
;; ax: bc: numH r8: r10: -numL
clrw ax
;; ax: 0 bc: numH r8: r10: -numL
sknc
decw ax
;; ax: -1 bc: numH r8: r10: -numL
subw ax, bc
;; ax: -numH bc: r8: r10: -numL
movw bc, ax
;; ax: bc: -numH r8: r10: -numL
movw ax, r10
;; ax: -numL bc: -numH r8: r10:
br $!__div_no_convert
__div_neg_num:
;; Negate the numerator (which is in BCAX)
;; We know that the denumerator is positive.
;; Note - we temporarily overwrite DE. We know that we can safely load it again off the stack again.
movw de, ax
clrw ax
subw ax, de
movw de, ax
clrw ax
sknc
decw ax
subw ax, bc
movw bc, ax
movw ax, [sp+8]
xchw ax, de
__div_then_convert:
push psw
di
divwu
pop psw
;; Negate result (in BCAX) and transfer into r8,r10
movw de, ax
clrw ax
subw ax, de
movw r8, ax
clrw ax
sknc
decw ax
subw ax, bc
movw r10, ax
ret
END_FUNC ___divsi3
;----------------------------------------------------------------------
START_FUNC ___udivsi3
;; r8,r10 = 4[sp],6[sp] / 8[sp],10[sp]
;; Used when compiling with -Os specified.
movw ax, [sp+10]
movw hl, ax
movw ax, [sp+8]
movw de, ax
movw ax, [sp+6]
movw bc, ax
movw ax, [sp+4]
push psw ; Save the current interrupt status
di ; Disable interrupts. See Renesas Technical update TN-RL*-A025B/E
divwu ; bcax = bcax / hlde
pop psw ; Restore saved interrupt status
movw r8, ax
movw ax, bc
movw r10, ax
ret
END_FUNC ___udivsi3
;----------------------------------------------------------------------
START_FUNC ___modsi3
;; r8,r10 = 4[sp],6[sp] % 8[sp],10[sp]
;; Load and test for a negative denumerator.
movw ax, [sp+8]
movw de, ax
movw ax, [sp+10]
mov1 cy, a.7
movw hl, ax
bc $__mod_neg_den
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw bc, ax
movw ax, [sp+4]
bc $__mod_neg_num
;; Neither are negative - we can use the unsigned divide instruction.
__mod_no_convert:
push psw
di
divwu
pop psw
movw ax, de
movw r8, ax
movw ax, hl
movw r10, ax
ret
__mod_neg_den:
;; Negate the denumerator (which is in HLDE)
clrw ax
subw ax, de
movw de, ax
clrw ax
sknc
decw ax
subw ax, hl
movw hl, ax
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw bc, ax
movw ax, [sp+4]
;; If it is not negative then we perform the modulo operation without conversion
bnc $__mod_no_convert
;; Otherwise we negate the numerator and then go with a modulo followed by negation.
;; The negation is complicated because AX, BC, DE and HL are already in use.
xchw ax, bc
movw r8, ax
clrw ax
subw ax, bc
movw r10, ax
movw ax, r8
movw bc, ax
clrw ax
sknc
decw ax
subw ax, bc
movw bc, ax
movw ax, r10
br $!__mod_then_convert
__mod_neg_num:
;; Negate the numerator (which is in BCAX)
;; We know that the denumerator is positive.
;; Note - we temporarily overwrite DE. We know that we can safely load it again off the stack again.
movw de, ax
clrw ax
subw ax, de
movw de, ax
clrw ax
sknc
decw ax
subw ax, bc
movw bc, ax
movw ax, [sp+8]
xchw ax, de
__mod_then_convert:
push psw
di
divwu
pop psw
;; Negate result (in HLDE) and transfer into r8,r10
clrw ax
subw ax, de
movw r8, ax
clrw ax
sknc
decw ax
subw ax, hl
movw r10, ax
ret
END_FUNC ___modsi3
;----------------------------------------------------------------------
START_FUNC ___umodsi3
;; r8,r10 = 4[sp],6[sp] % 8[sp],10[sp]
;; Used when compiling with -Os specified.
movw ax, [sp+10]
movw hl, ax
movw ax, [sp+8]
movw de, ax
movw ax, [sp+6]
movw bc, ax
movw ax, [sp+4]
push psw ; Save the current interrupt status
di ; Disable interrupts. See Renesas Technical update TN-RL*-A025B/E
divwu ; hlde = bcax %% hlde
pop psw ; Restore saved interrupt status
movw ax, de
movw r8, ax
movw ax, hl
movw r10, ax
ret
END_FUNC ___umodsi3
;----------------------------------------------------------------------
#elif defined __RL78_MUL_G13__
;----------------------------------------------------------------------
;; Hardware registers. Note - these values match the silicon, not the documentation.
MDAL = 0xffff0
MDAH = 0xffff2
MDBL = 0xffff6
MDBH = 0xffff4
MDCL = 0xf00e0
MDCH = 0xf00e2
MDUC = 0xf00e8
.macro _Negate low, high
movw ax, \low
movw bc, ax
clrw ax
subw ax, bc
movw \low, ax
movw ax, \high
movw bc, ax
clrw ax
sknc
decw ax
subw ax, bc
movw \high, ax
.endm
;----------------------------------------------------------------------
START_FUNC ___divsi3
;; r8,r10 = 4[sp],6[sp] / 8[sp],10[sp]
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
;; Load and test for a negative denumerator.
movw ax, [sp+8]
movw MDBL, ax
movw ax, [sp+10]
mov1 cy, a.7
movw MDBH, ax
bc $__div_neg_den
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw MDAH, ax
movw ax, [sp+4]
movw MDAL, ax
bc $__div_neg_num
;; Neither are negative - we can use the unsigned divide hardware.
__div_no_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, MDAL ; Read the result
movw r8, ax
movw ax, MDAH
movw r10, ax
ret
__div_neg_den:
;; Negate the denumerator (which is in MDBL/MDBH)
_Negate MDBL MDBH
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw MDAH, ax
movw ax, [sp+4]
movw MDAL, ax
;; If it is not negative then we perform the division and then negate the result.
bnc $__div_then_convert
;; Otherwise we negate the numerator and then go with a straightforward unsigned division.
_Negate MDAL MDAH
br $!__div_no_convert
__div_neg_num:
;; Negate the numerator (which is in MDAL/MDAH)
;; We know that the denumerator is positive.
_Negate MDAL MDAH
__div_then_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
;; Negate result and transfer into r8,r10
_Negate MDAL MDAH ; FIXME: This could be coded more efficiently.
movw r10, ax
movw ax, MDAL
movw r8, ax
ret
END_FUNC ___divsi3
;----------------------------------------------------------------------
START_FUNC ___modsi3
;; r8,r10 = 4[sp],6[sp] % 8[sp],10[sp]
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
;; Load and test for a negative denumerator.
movw ax, [sp+8]
movw MDBL, ax
movw ax, [sp+10]
mov1 cy, a.7
movw MDBH, ax
bc $__mod_neg_den
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw MDAH, ax
movw ax, [sp+4]
movw MDAL, ax
bc $__mod_neg_num
;; Neither are negative - we can use the unsigned divide hardware
__mod_no_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDCL ; Read the remainder
movw r8, ax
movw ax, !MDCH
movw r10, ax
ret
__mod_neg_den:
;; Negate the denumerator (which is in MDBL/MDBH)
_Negate MDBL MDBH
;; Load and test for a negative numerator.
movw ax, [sp+6]
mov1 cy, a.7
movw MDAH, ax
movw ax, [sp+4]
movw MDAL, ax
;; If it is not negative then we perform the modulo operation without conversion
bnc $__mod_no_convert
;; Otherwise we negate the numerator and then go with a modulo followed by negation.
_Negate MDAL MDAH
br $!__mod_then_convert
__mod_neg_num:
;; Negate the numerator (which is in MDAL/MDAH)
;; We know that the denumerator is positive.
_Negate MDAL MDAH
__mod_then_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDCL
movw bc, ax
clrw ax
subw ax, bc
movw r8, ax
movw ax, !MDCH
movw bc, ax
clrw ax
sknc
decw ax
subw ax, bc
movw r10, ax
ret
END_FUNC ___modsi3
;----------------------------------------------------------------------
START_FUNC ___udivsi3
;; r8,r10 = 4[sp],6[sp] / 8[sp],10[sp]
;; Used when compilng with -Os specified.
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
movw ax, [sp+4] ; Load the divisor
movw MDAL, ax
movw ax, [sp+6]
movw MDAH, ax
movw ax, [sp+8] ; Load the dividend
movw MDBL, ax
movw ax, [sp+10]
movw MDBH, ax
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDAL ; Read the result
movw r8, ax
movw ax, !MDAH
movw r10, ax
ret
END_FUNC ___udivsi3
;----------------------------------------------------------------------
START_FUNC ___umodsi3
;; r8,r10 = 4[sp],6[sp] % 8[sp],10[sp]
;; Used when compilng with -Os specified.
;; Note - hardware address match the silicon, not the documentation
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
movw ax, [sp+4] ; Load the divisor
movw MDAL, ax
movw ax, [sp+6]
movw MDAH, ax
movw ax, [sp+8] ; Load the dividend
movw MDBL, ax
movw ax, [sp+10]
movw MDBH, ax
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDCL ; Read the remainder
movw r8, ax
movw ax, !MDCH
movw r10, ax
ret
END_FUNC ___umodsi3
;----------------------------------------------------------------------
#elif defined __RL78_MUL_NONE__
.macro MAKE_GENERIC which,need_result
.if \need_result
quot = r8
num = r12
den = r16
bit = r20
.else
num = r8
quot = r12
den = r16
bit = r20
.endif
quotH = quot+2
quotL = quot
quotB0 = quot
quotB1 = quot+1
quotB2 = quot+2
quotB3 = quot+3
numH = num+2
numL = num
numB0 = num
numB1 = num+1
numB2 = num+2
numB3 = num+3
#define denH bc
denL = den
denB0 = den
denB1 = den+1
#define denB2 c
#define denB3 b
bitH = bit+2
bitL = bit
bitB0 = bit
bitB1 = bit+1
bitB2 = bit+2
bitB3 = bit+3
;----------------------------------------------------------------------
START_FUNC __generic_sidivmod\which
num_lt_den\which:
.if \need_result
movw r8, #0
movw r10, #0
.else
movw ax, [sp+8]
movw r8, ax
movw ax, [sp+10]
movw r10, ax
.endif
ret
shift_den_bit16\which:
movw ax, denL
movw denH, ax
movw denL, #0
.if \need_result
movw ax, bitL
movw bitH, ax
movw bitL, #0
.else
mov a, bit
add a, #16
mov bit, a
.endif
br $shift_den_bit\which
;; These routines leave DE alone - the signed functions use DE
;; to store sign information that must remain intact
.if \need_result
.global __generic_sidiv
__generic_sidiv:
.else
.global __generic_simod
__generic_simod:
.endif
;; (quot,rem) = 8[sp] /% 12[sp]
movw hl, sp
movw ax, [hl+14] ; denH
cmpw ax, [hl+10] ; numH
movw ax, [hl+12] ; denL
sknz
cmpw ax, [hl+8] ; numL
bh $num_lt_den\which
#ifdef __RL78_G10__
movw ax, denL
push ax
movw ax, bitL
push ax
movw ax, bitH
push ax
#else
sel rb2
push ax ; denL
; push bc ; denH
push de ; bitL
push hl ; bitH - stored in BC
sel rb0
#endif
;; (quot,rem) = 16[sp] /% 20[sp]
;; copy numerator
movw ax, [hl+8]
movw numL, ax
movw ax, [hl+10]
movw numH, ax
;; copy denomonator
movw ax, [hl+12]
movw denL, ax
movw ax, [hl+14]
movw denH, ax
movw ax, denL
or a, denB2
or a, denB3 ; not x
cmpw ax, #0
bnz $den_not_zero\which
.if \need_result
movw quotL, #0
movw quotH, #0
.else
movw numL, #0
movw numH, #0
.endif
br $!main_loop_done_himode\which
den_not_zero\which:
.if \need_result
;; zero out quot
movw quotL, #0
movw quotH, #0
.endif
;; initialize bit to 1
movw bitL, #1
movw bitH, #0
; while (den < num && !(den & (1L << BITS_MINUS_1)))
.if 1
;; see if we can short-circuit a bunch of shifts
movw ax, denH
cmpw ax, #0
bnz $shift_den_bit\which
movw ax, denL
cmpw ax, numH
bnh $shift_den_bit16\which
.endif
shift_den_bit\which:
movw ax, denH
mov1 cy,a.7
bc $enter_main_loop\which
cmpw ax, numH
movw ax, denL ; we re-use this below
sknz
cmpw ax, numL
bh $enter_main_loop\which
;; den <<= 1
; movw ax, denL ; already has it from the cmpw above
shlw ax, 1
movw denL, ax
; movw ax, denH
rolwc denH, 1
; movw denH, ax
;; bit <<= 1
.if \need_result
movw ax, bitL
shlw ax, 1
movw bitL, ax
movw ax, bitH
rolwc ax, 1
movw bitH, ax
.else
;; if we don't need to compute the quotent, we don't need an
;; actual bit *mask*, we just need to keep track of which bit
inc bitB0
.endif
br $shift_den_bit\which
;; while (bit)
main_loop\which:
;; if (num >= den) (cmp den > num)
movw ax, numH
cmpw ax, denH
movw ax, numL
sknz
cmpw ax, denL
skz
bnh $next_loop\which
;; num -= den
; movw ax, numL ; already has it from the cmpw above
subw ax, denL
movw numL, ax
movw ax, numH
sknc
decw ax
subw ax, denH
movw numH, ax
.if \need_result
;; res |= bit
mov a, quotB0
or a, bitB0
mov quotB0, a
mov a, quotB1
or a, bitB1
mov quotB1, a
mov a, quotB2
or a, bitB2
mov quotB2, a
mov a, quotB3
or a, bitB3
mov quotB3, a
.endif
next_loop\which:
;; den >>= 1
movw ax, denH
shrw ax, 1
movw denH, ax
mov a, denB1
rorc a, 1
mov denB1, a
mov a, denB0
rorc a, 1
mov denB0, a
;; bit >>= 1
.if \need_result
movw ax, bitH
shrw ax, 1
movw bitH, ax
mov a, bitB1
rorc a, 1
mov bitB1, a
mov a, bitB0
rorc a, 1
mov bitB0, a
.else
dec bitB0
.endif
enter_main_loop\which:
.if \need_result
movw ax, bitH
cmpw ax, #0
bnz $main_loop\which
.else
cmp bitB0, #15
bh $main_loop\which
.endif
;; bit is HImode now; check others
movw ax, numH ; numerator
cmpw ax, #0
bnz $bit_high_set\which
movw ax, denH ; denominator
cmpw ax, #0
bz $switch_to_himode\which
bit_high_set\which:
.if \need_result
movw ax, bitL
cmpw ax, #0
.else
cmp0 bitB0
.endif
bnz $main_loop\which
switch_to_himode\which:
.if \need_result
movw ax, bitL
cmpw ax, #0
.else
cmp0 bitB0
.endif
bz $main_loop_done_himode\which
;; From here on in, r22, r14, and r18 are all zero
;; while (bit)
main_loop_himode\which:
;; if (num >= den) (cmp den > num)
movw ax, denL
cmpw ax, numL
bh $next_loop_himode\which
;; num -= den
movw ax, numL
subw ax, denL
movw numL, ax
movw ax, numH
sknc
decw ax
subw ax, denH
movw numH, ax
.if \need_result
;; res |= bit
mov a, quotB0
or a, bitB0
mov quotB0, a
mov a, quotB1
or a, bitB1
mov quotB1, a
.endif
next_loop_himode\which:
;; den >>= 1
movw ax, denL
shrw ax, 1
movw denL, ax
.if \need_result
;; bit >>= 1
movw ax, bitL
shrw ax, 1
movw bitL, ax
.else
dec bitB0
.endif
.if \need_result
movw ax, bitL
cmpw ax, #0
.else
cmp0 bitB0
.endif
bnz $main_loop_himode\which
main_loop_done_himode\which:
#ifdef __RL78_G10__
pop ax
movw bitH, ax
pop ax
movw bitL, ax
pop ax
movw denL, ax
#else
sel rb2
pop hl ; bitH - stored in BC
pop de ; bitL
; pop bc ; denH
pop ax ; denL
sel rb0
#endif
ret
END_FUNC __generic_sidivmod\which
.endm
;----------------------------------------------------------------------
MAKE_GENERIC _d 1
MAKE_GENERIC _m 0
;----------------------------------------------------------------------
START_FUNC ___udivsi3
;; r8 = 4[sp] / 8[sp]
call $!__generic_sidiv
ret
END_FUNC ___udivsi3
START_FUNC ___umodsi3
;; r8 = 4[sp] % 8[sp]
call $!__generic_simod
ret
END_FUNC ___umodsi3
;----------------------------------------------------------------------
.macro NEG_AX
movw hl, ax
movw ax, #0
subw ax, [hl]
movw [hl], ax
movw ax, #0
sknc
decw ax
subw ax, [hl+2]
movw [hl+2], ax
.endm
;----------------------------------------------------------------------
START_FUNC ___divsi3
;; r8 = 4[sp] / 8[sp]
movw de, #0
mov a, [sp+7]
mov1 cy, a.7
bc $div_signed_num
mov a, [sp+11]
mov1 cy, a.7
bc $div_signed_den
call $!__generic_sidiv
ret
div_signed_num:
;; neg [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov d, #1
mov a, [sp+11]
mov1 cy, a.7
bnc $div_unsigned_den
div_signed_den:
;; neg [sp+8]
movw ax, sp
addw ax, #8
NEG_AX
mov e, #1
div_unsigned_den:
call $!__generic_sidiv
mov a, d
cmp0 a
bz $div_skip_restore_num
;; We have to restore the numerator [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov a, d
div_skip_restore_num:
xor a, e
bz $div_no_neg
movw ax, #r8
NEG_AX
div_no_neg:
mov a, e
cmp0 a
bz $div_skip_restore_den
;; We have to restore the denominator [sp+8]
movw ax, sp
addw ax, #8
NEG_AX
div_skip_restore_den:
ret
END_FUNC ___divsi3
START_FUNC ___modsi3
;; r8 = 4[sp] % 8[sp]
movw de, #0
mov a, [sp+7]
mov1 cy, a.7
bc $mod_signed_num
mov a, [sp+11]
mov1 cy, a.7
bc $mod_signed_den
call $!__generic_simod
ret
mod_signed_num:
;; neg [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov d, #1
mov a, [sp+11]
mov1 cy, a.7
bnc $mod_unsigned_den
mod_signed_den:
;; neg [sp+8]
movw ax, sp
addw ax, #8
NEG_AX
mov e, #1
mod_unsigned_den:
call $!__generic_simod
mov a, d
cmp0 a
bz $mod_no_neg
movw ax, #r8
NEG_AX
;; We have to restore [sp+4] as well.
movw ax, sp
addw ax, #4
NEG_AX
mod_no_neg:
.if 1
mov a, e
cmp0 a
bz $mod_skip_restore_den
movw ax, sp
addw ax, #8
NEG_AX
mod_skip_restore_den:
.endif
ret
END_FUNC ___modsi3
;----------------------------------------------------------------------
#else
#error "Unknown RL78 hardware multiply/divide support"
#endif
|
4ms/metamodule-plugin-sdk
| 13,334
|
plugin-libc/libgcc/config/rl78/divmodhi.S
|
/* HImode div/mod functions for the GCC support library for the Renesas RL78 processors.
Copyright (C) 2012-2022 Free Software Foundation, Inc.
Contributed by Red Hat.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "vregs.h"
#if defined __RL78_MUL_G14__
START_FUNC ___divhi3
;; r8 = 4[sp] / 6[sp]
;; Test for a negative denumerator.
movw ax, [sp+6]
mov1 cy, a.7
movw de, ax
bc $__div_neg_den
;; Test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
bc $__div_neg_num
;; Neither are negative - we can use the unsigned divide instruction.
__div_no_convert:
push psw
di
divhu
pop psw
movw r8, ax
ret
__div_neg_den:
;; Negate the denumerator (which is in DE)
clrw ax
subw ax, de
movw de, ax
;; Test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
;; If it is not negative then we perform the division and then negate the result.
bnc $__div_then_convert
;; Otherwise we negate the numerator and then go with an unsigned division.
movw bc, ax
clrw ax
subw ax, bc
br $__div_no_convert
__div_neg_num:
;; Negate the numerator (which is in AX)
;; We know that the denumerator is positive.
movw bc, ax
clrw ax
subw ax, bc
__div_then_convert:
push psw
di
divhu
pop psw
;; Negate result and transfer into r8
movw bc, ax
clrw ax
subw ax, bc
movw r8, ax
ret
END_FUNC ___divhi3
;----------------------------------------------------------------------
START_FUNC ___modhi3
;; r8 = 4[sp] % 6[sp]
;; Test for a negative denumerator.
movw ax, [sp+6]
mov1 cy, a.7
movw de, ax
bc $__mod_neg_den
;; Test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
bc $__mod_neg_num
;; Neither are negative - we can use the unsigned divide instruction.
__mod_no_convert:
push psw
di
divhu
pop psw
movw ax, de
movw r8, ax
ret
__mod_neg_den:
;; Negate the denumerator (which is in DE)
clrw ax
subw ax, de
movw de, ax
;; Test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
;; If it is not negative then we perform the modulo operation without conversion.
bnc $__mod_no_convert
;; Otherwise we negate the numerator and then go with an unsigned modulo operation.
movw bc, ax
clrw ax
subw ax, bc
br $__mod_then_convert
__mod_neg_num:
;; Negate the numerator (which is in AX)
;; We know that the denumerator is positive.
movw bc, ax
clrw ax
subw ax, bc
__mod_then_convert:
push psw
di
divhu
pop psw
;; Negate result and transfer into r8
clrw ax
subw ax, de
movw r8, ax
ret
END_FUNC ___modhi3
;----------------------------------------------------------------------
#elif defined __RL78_MUL_G13__
;; The G13 S2 core does not have a 16 bit divide peripheral.
;; So instead we perform a 32-bit divide and twiddle the inputs
;; as necessary.
;; Hardware registers. Note - these values match the silicon, not the documentation.
MDAL = 0xffff0
MDAH = 0xffff2
MDBL = 0xffff6
MDBH = 0xffff4
MDCL = 0xf00e0
MDCH = 0xf00e2
MDUC = 0xf00e8
.macro _Negate src, dest
movw ax, !\src
movw bc, ax
clrw ax
subw ax, bc
movw \dest, ax
.endm
;----------------------------------------------------------------------
START_FUNC ___divhi3
;; r8 = 4[sp] / 6[sp] (signed division)
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
clrw ax ; Clear the top 16-bits of the divisor and dividend
movw MDBH, ax
movw MDAH, ax
;; Load and test for a negative denumerator.
movw ax, [sp+6]
movw MDBL, ax
mov1 cy, a.7
bc $__div_neg_den
;; Load and test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
movw MDAL, ax
bc $__div_neg_num
;; Neither are negative - we can use the unsigned divide hardware.
__div_no_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, MDAL ; Read the result
movw r8, ax
ret
__div_neg_den:
;; Negate the denumerator (which is in MDBL)
_Negate MDBL MDBL
;; Load and test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
movw MDAL, ax
;; If it is not negative then we perform the division and then negate the result.
bnc $__div_then_convert
;; Otherwise we negate the numerator and then go with a straightforward unsigned division.
_Negate MDAL MDAL
br $!__div_no_convert
__div_neg_num:
;; Negate the numerator (which is in MDAL)
;; We know that the denumerator is positive.
_Negate MDAL MDAL
__div_then_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
;; Negate result and transfer into r8
_Negate MDAL r8
ret
END_FUNC ___divhi3
;----------------------------------------------------------------------
START_FUNC ___modhi3
;; r8 = 4[sp] % 6[sp] (signed modulus)
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
clrw ax ; Clear the top 16-bits of the divisor and dividend
movw MDBH, ax
movw MDAH, ax
;; Load and test for a negative denumerator.
movw ax, [sp+6]
movw MDBL, ax
mov1 cy, a.7
bc $__mod_neg_den
;; Load and test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
movw MDAL, ax
bc $__mod_neg_num
;; Neither are negative - we can use the unsigned divide hardware
__mod_no_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDCL ; Read the remainder
movw r8, ax
ret
__mod_neg_den:
;; Negate the denumerator (which is in MDBL)
_Negate MDBL MDBL
;; Load and test for a negative numerator.
movw ax, [sp+4]
mov1 cy, a.7
movw MDAL, ax
;; If it is not negative then we perform the modulo operation without conversion.
bnc $__mod_no_convert
;; Otherwise we negate the numerator and then go with a modulo followed by negation.
_Negate MDAL MDAL
br $!__mod_then_convert
__mod_neg_num:
;; Negate the numerator (which is in MDAL)
;; We know that the denumerator is positive.
_Negate MDAL MDAL
__mod_then_convert:
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
_Negate MDCL r8
ret
END_FUNC ___modhi3
;----------------------------------------------------------------------
START_FUNC ___udivhi3
;; r8 = 4[sp] / 6[sp] (unsigned division)
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
movw ax, [sp+4] ; Load the divisor
movw MDAL, ax
movw ax, [sp+6] ; Load the dividend
movw MDBL, ax
clrw ax
movw MDAH, ax
movw MDBH, ax
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDAL ; Read the remainder
movw r8, ax
ret
END_FUNC ___udivhi3
;----------------------------------------------------------------------
START_FUNC ___umodhi3
;; r8 = 4[sp] % 6[sp] (unsigned modulus)
mov a, #0xC0 ; Set DIVMODE=1 and MACMODE=1
mov !MDUC, a ; This preps the peripheral for division without interrupt generation
movw ax, [sp+4] ; Load the divisor
movw MDAL, ax
movw ax, [sp+6] ; Load the dividend
movw MDBL, ax
clrw ax
movw MDAH, ax
movw MDBH, ax
mov a, #0xC1 ; Set the DIVST bit in MDUC
mov !MDUC, a ; This starts the division op
1: mov a, !MDUC ; Wait 16 clocks or until DIVST is clear
bt a.0, $1b
movw ax, !MDCL ; Read the remainder
movw r8, ax
ret
END_FUNC ___umodhi3
;----------------------------------------------------------------------
#elif defined __RL78_MUL_NONE__
.macro MAKE_GENERIC which,need_result
.if \need_result
quot = r8
num = r10
den = r12
bit = r14
.else
num = r8
quot = r10
den = r12
bit = r14
.endif
quotB0 = quot
quotB1 = quot+1
numB0 = num
numB1 = num+1
denB0 = den
denB1 = den+1
bitB0 = bit
bitB1 = bit+1
#define bit bc
#define bitB0 c
#define bitB1 b
START_FUNC __generic_hidivmod\which
num_lt_den\which:
.if \need_result
movw r8, #0
.else
movw ax, [sp+8]
movw r8, ax
.endif
ret
;; These routines leave DE alone - the signed functions use DE
;; to store sign information that must remain intact
.if \need_result
.global __generic_hidiv
__generic_hidiv:
.else
.global __generic_himod
__generic_himod:
.endif
;; (quot,rem) = 8[sp] /% 10[sp]
movw hl, sp
movw ax, [hl+10] ; denH
cmpw ax, [hl+8] ; numH
bh $num_lt_den\which
;; (quot,rem) = 16[sp] /% 20[sp]
;; copy numerator
movw ax, [hl+8]
movw num, ax
;; copy denomonator
movw ax, [hl+10]
movw den, ax
movw ax, den
cmpw ax, #0
bnz $den_not_zero\which
.if \need_result
movw quot, #0
.else
movw num, #0
.endif
ret
den_not_zero\which:
.if \need_result
;; zero out quot
movw quot, #0
.endif
;; initialize bit to 1
movw bit, #1
; while (den < num && !(den & (1L << BITS_MINUS_1)))
shift_den_bit\which:
movw ax, den
mov1 cy,a.7
bc $enter_main_loop\which
cmpw ax, num
bh $enter_main_loop\which
;; den <<= 1
; movw ax, den ; already has it from the cmpw above
shlw ax, 1
movw den, ax
;; bit <<= 1
.if \need_result
#ifdef bit
shlw bit, 1
#else
movw ax, bit
shlw ax, 1
movw bit, ax
#endif
.else
;; if we don't need to compute the quotent, we don't need an
;; actual bit *mask*, we just need to keep track of which bit
inc bitB0
.endif
br $shift_den_bit\which
main_loop\which:
;; if (num >= den) (cmp den > num)
movw ax, den
cmpw ax, num
bh $next_loop\which
;; num -= den
movw ax, num
subw ax, den
movw num, ax
.if \need_result
;; res |= bit
mov a, quotB0
or a, bitB0
mov quotB0, a
mov a, quotB1
or a, bitB1
mov quotB1, a
.endif
next_loop\which:
;; den >>= 1
movw ax, den
shrw ax, 1
movw den, ax
.if \need_result
;; bit >>= 1
movw ax, bit
shrw ax, 1
movw bit, ax
.else
dec bitB0
.endif
enter_main_loop\which:
.if \need_result
movw ax, bit
cmpw ax, #0
.else
cmp0 bitB0
.endif
bnz $main_loop\which
main_loop_done\which:
ret
END_FUNC __generic_hidivmod\which
.endm
;----------------------------------------------------------------------
MAKE_GENERIC _d 1
MAKE_GENERIC _m 0
;----------------------------------------------------------------------
START_FUNC ___udivhi3
;; r8 = 4[sp] / 6[sp]
call $!__generic_hidiv
ret
END_FUNC ___udivhi3
START_FUNC ___umodhi3
;; r8 = 4[sp] % 6[sp]
call $!__generic_himod
ret
END_FUNC ___umodhi3
;----------------------------------------------------------------------
.macro NEG_AX
movw hl, ax
movw ax, #0
subw ax, [hl]
movw [hl], ax
.endm
;----------------------------------------------------------------------
START_FUNC ___divhi3
;; r8 = 4[sp] / 6[sp]
movw de, #0
mov a, [sp+5]
mov1 cy, a.7
bc $div_signed_num
mov a, [sp+7]
mov1 cy, a.7
bc $div_signed_den
call $!__generic_hidiv
ret
div_signed_num:
;; neg [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov d, #1
mov a, [sp+7]
mov1 cy, a.7
bnc $div_unsigned_den
div_signed_den:
;; neg [sp+6]
movw ax, sp
addw ax, #6
NEG_AX
mov e, #1
div_unsigned_den:
call $!__generic_hidiv
mov a, d
cmp0 a
bz $div_skip_restore_num
;; We have to restore the numerator [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov a, d
div_skip_restore_num:
xor a, e
bz $div_no_neg
movw ax, #r8
NEG_AX
div_no_neg:
mov a, e
cmp0 a
bz $div_skip_restore_den
movw ax, sp
addw ax, #6
NEG_AX
div_skip_restore_den:
ret
END_FUNC ___divhi3
START_FUNC ___modhi3
;; r8 = 4[sp] % 6[sp]
movw de, #0
mov a, [sp+5]
mov1 cy, a.7
bc $mod_signed_num
mov a, [sp+7]
mov1 cy, a.7
bc $mod_signed_den
call $!__generic_himod
ret
mod_signed_num:
;; neg [sp+4]
movw ax, sp
addw ax, #4
NEG_AX
mov d, #1
mov a, [sp+7]
mov1 cy, a.7
bnc $mod_unsigned_den
mod_signed_den:
;; neg [sp+6]
movw ax, sp
addw ax, #6
NEG_AX
mod_unsigned_den:
call $!__generic_himod
mov a, d
cmp0 a
bz $mod_no_neg
movw ax, #r8
NEG_AX
;; Also restore numerator
movw ax, sp
addw ax, #4
NEG_AX
mod_no_neg:
mov a, e
cmp0 a
bz $mod_skip_restore_den
movw ax, sp
addw ax, #6
NEG_AX
mod_skip_restore_den:
ret
END_FUNC ___modhi3
;----------------------------------------------------------------------
#else
#error "Unknown RL78 hardware multiply/divide support"
#endif
|
4ms/metamodule-plugin-sdk
| 2,733
|
plugin-libc/libgcc/config/rl78/trampoline.S
|
/* libgcc routines for RL78
Copyright (C) 2011-2022 Free Software Foundation, Inc.
Contributed by Red Hat.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* RL78 Trampoline support
Since the RL78's RAM is not in the first 64k, we cannot "just" use a
function pointer to point to a trampoline on the stack. So, we
create N fixed trampolines that read from an array, and allocate
them as needed.
*/
#include "vregs.h"
.data
.p2align 1
trampoline_array:
.macro stub n
.text
trampoline_\n:
.type trampoline_\n, @function
movw ax, !trampoline_chain_\n
movw r14, ax
movw ax, !trampoline_addr_\n
br ax
.size trampoline_\n, .-trampoline_\n
.data
trampoline_frame_\n:
.short 0
trampoline_stub_\n:
.short trampoline_\n
trampoline_chain_\n:
.short 0
trampoline_addr_\n:
.short 0
#define TO_FRAME 0
#define TO_STUB 2
#define TO_CHAIN 4
#define TO_ADDR 6
#define TO_SIZE 8
.endm
stub 0
stub 1
stub 2
stub 3
stub 4
stub 5
trampoline_array_end:
/* Given the function pointer in R8 and the static chain
pointer in R10, allocate a trampoline and return its address in
R8. */
START_FUNC ___trampoline_init
movw hl, #trampoline_array
1: movw ax, [hl + TO_ADDR]
cmpw ax, #0
bz $2f
movw ax, hl
addw ax, #TO_SIZE
movw hl, ax
cmpw ax, #trampoline_array_end
bnz $1b
brk ; no more slots?
2: movw ax, r8
movw [hl + TO_ADDR], ax
movw ax, r10
movw [hl + TO_CHAIN], ax
movw ax, sp
movw [hl + TO_FRAME], ax
movw ax, [hl + TO_STUB]
movw r8, ax
ret
END_FUNC ___trampoline_init
START_FUNC ___trampoline_uninit
movw hl, #trampoline_array
movw ax, sp
movw bc, ax
1: movw ax, [hl + TO_FRAME]
cmpw ax, bc
bc $2f
clrw ax
movw [hl + TO_ADDR], ax
2: movw ax, hl
addw ax, #TO_SIZE
movw hl, ax
cmpw ax, #trampoline_array_end
bnz $1b
ret
END_FUNC ___trampoline_uninit
|
4ms/metamodule-plugin-sdk
| 12,811
|
plugin-libc/libgcc/config/rl78/fpbit-sf.S
|
; SF format is:
;
; [sign] 1.[23bits] E[8bits(n-127)]
;
; SEEEEEEE Emmmmmmm mmmmmmmm mmmmmmmm
;
; [A+0] mmmmmmmm
; [A+1] mmmmmmmm
; [A+2] Emmmmmmm
; [A+3] SEEEEEEE
;
; Special values (xxx != 0):
;
; s1111111 10000000 00000000 00000000 infinity
; s1111111 1xxxxxxx xxxxxxxx xxxxxxxx NaN
; s0000000 00000000 00000000 00000000 zero
; s0000000 0xxxxxxx xxxxxxxx xxxxxxxx denormals
;
; Note that CMPtype is "signed char" for rl78
;
#include "vregs.h"
#define Z PSW.6
START_FUNC ___negsf2
;; Negate the floating point value.
;; Input at [SP+4]..[SP+7].
;; Output to R8..R11.
movw ax, [SP+4]
movw r8, ax
movw ax, [SP+6]
xor a, #0x80
movw r10, ax
ret
END_FUNC ___negsf2
;; ------------------internal functions used by later code --------------
START_FUNC __int_isnan
;; [HL] points to value, returns Z if it's a NaN
mov a, [hl+2]
and a, #0x80
mov x, a
mov a, [hl+3]
and a, #0x7f
cmpw ax, #0x7f80
skz
ret ; return NZ if not NaN
mov a, [hl+2]
and a, #0x7f
or a, [hl+1]
or a, [hl]
bnz $1f
clr1 Z ; Z, normal
ret
1:
set1 Z ; nan
ret
END_FUNC __int_isnan
START_FUNC __int_eithernan
;; call from toplevel functions, returns Z if either number is a NaN,
;; or NZ if both are OK.
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_isnan
bz $1f
movw ax, sp
addw ax, #12
movw hl, ax
call $!__int_isnan
1:
ret
END_FUNC __int_eithernan
START_FUNC __int_iszero
;; [HL] points to value, returns Z if it's zero
mov a, [hl+3]
and a, #0x7f
or a, [hl+2]
or a, [hl+1]
or a, [hl]
ret
END_FUNC __int_iszero
START_FUNC __int_cmpsf
;; This is always called from some other function here,
;; so the stack offsets are adjusted accordingly.
;; X [SP+8] <=> Y [SP+12] : <a> <=> 0
movw ax, sp
addw ax, #8
movw hl, ax
call $!__int_iszero
bnz $1f
movw ax, sp
addw ax, #12
movw hl, ax
call $!__int_iszero
bnz $2f
;; At this point, both args are zero.
mov a, #0
ret
2:
movw ax, sp
addw ax, #8
movw hl, ax
1:
;; At least one arg is non-zero so we can just compare magnitudes.
;; Args are [HL] and [HL+4].
mov a, [HL+3]
xor a, [HL+7]
mov1 cy, a.7
bnc $1f
mov a, [HL+3]
sar a, 7
or a, #1
ret
1: ;; Signs the same, compare magnitude. It's safe to lump
;; the sign bits, exponent, and mantissa together here, since they're
;; stored in the right sequence.
movw ax, [HL+2]
cmpw ax, [HL+6]
bc $ybig_cmpsf ; branch if X < Y
bnz $xbig_cmpsf ; branch if X > Y
movw ax, [HL]
cmpw ax, [HL+4]
bc $ybig_cmpsf ; branch if X < Y
bnz $xbig_cmpsf ; branch if X > Y
mov a, #0
ret
xbig_cmpsf: ; |X| > |Y| so return A = 1 if pos, 0xff if neg
mov a, [HL+3]
sar a, 7
or a, #1
ret
ybig_cmpsf: ; |X| < |Y| so return A = 0xff if pos, 1 if neg
mov a, [HL+3]
xor a, #0x80
sar a, 7
or a, #1
ret
END_FUNC __int_cmpsf
;; ----------------------------------------------------------
START_FUNC ___cmpsf2
;; This functions calculates "A <=> B". That is, if A is less than B
;; they return -1, if A is greater than B, they return 1, and if A
;; and B are equal they return 0. If either argument is NaN the
;; behaviour is undefined.
;; Input at [SP+4]..[SP+7].
;; Output to R8..R9.
call $!__int_eithernan
bnz $1f
movw r8, #1
ret
1:
call $!__int_cmpsf
mov r8, a
sar a, 7
mov r9, a
ret
END_FUNC ___cmpsf2
;; ----------------------------------------------------------
;; These functions are all basically the same as ___cmpsf2
;; except that they define how they handle NaNs.
START_FUNC ___eqsf2
;; Returns zero iff neither argument is NaN
;; and both arguments are equal.
START_ANOTHER_FUNC ___nesf2
;; Returns non-zero iff either argument is NaN or the arguments are
;; unequal. Effectively __nesf2 is the same as __eqsf2
START_ANOTHER_FUNC ___lesf2
;; Returns a value less than or equal to zero if neither
;; argument is NaN, and the first is less than or equal to the second.
START_ANOTHER_FUNC ___ltsf2
;; Returns a value less than zero if neither argument is
;; NaN, and the first is strictly less than the second.
;; Input at [SP+4]..[SP+7].
;; Output to R8.
mov r8, #1
;;; Fall through
START_ANOTHER_FUNC __int_cmp_common
call $!__int_eithernan
sknz
;; return value (pre-filled-in below) for "either is nan"
ret
call $!__int_cmpsf
mov r8, a
ret
END_ANOTHER_FUNC __int_cmp_common
END_ANOTHER_FUNC ___ltsf2
END_ANOTHER_FUNC ___lesf2
END_ANOTHER_FUNC ___nesf2
END_FUNC ___eqsf2
START_FUNC ___gesf2
;; Returns a value greater than or equal to zero if neither argument
;; is a NaN and the first is greater than or equal to the second.
START_ANOTHER_FUNC ___gtsf2
;; Returns a value greater than zero if neither argument
;; is NaN, and the first is strictly greater than the second.
mov r8, #0xffff
br $__int_cmp_common
END_ANOTHER_FUNC ___gtsf2
END_FUNC ___gesf2
;; ----------------------------------------------------------
START_FUNC ___unordsf2
;; Returns a nonzero value if either argument is NaN, otherwise 0.
call $!__int_eithernan
movw r8, #0
sknz ; this is from the call, not the movw
movw r8, #1
ret
END_FUNC ___unordsf2
;; ----------------------------------------------------------
START_FUNC ___fixsfsi
;; Converts its floating point argument into a signed long,
;; rounding toward zero.
;; The behaviour with NaNs and Infinities is not well defined.
;; We choose to return 0 for NaNs, -INTMAX for -inf and INTMAX for +inf.
;; This matches the behaviour of the C function in libgcc2.c.
;; Input at [SP+4]..[SP+7], result is in (lsb) R8..R11 (msb).
;; Special case handling for infinities as __fixunssfsi
;; will not give us the values that we want.
movw ax, sp
addw ax, #4
movw hl, ax
call !!__int_isinf
bnz $1f
mov a, [SP+7]
bt a.7, $2f
;; +inf
movw r8, #-1
movw r10, #0x7fff
ret
;; -inf
2: mov r8, #0
mov r10, #0x8000
ret
;; Load the value into r10:r11:X:A
1: movw ax, [SP+4]
movw r10, ax
movw ax, [SP+6]
;; If the value is positive we can just use __fixunssfsi
bf a.7, $__int_fixunssfsi
;; Otherwise we negate the value, call __fixunssfsi and
;; then negate its result.
clr1 a.7
call $!__int_fixunssfsi
movw ax, #0
subw ax, r8
movw r8, ax
movw ax, #0
sknc
decw ax
subw ax, r10
movw r10, ax
;; Check for a positive result (which should only happen when
;; __fixunssfsi returns UINTMAX or 0). In such cases just return 0.
mov a, r11
bt a.7, $1f
movw r10,#0x0
movw r8, #0x0
1: ret
END_FUNC ___fixsfsi
START_FUNC ___fixunssfsi
;; Converts its floating point argument into an unsigned long
;; rounding towards zero. Negative arguments all become zero.
;; We choose to return 0 for NaNs and -inf, but UINTMAX for +inf.
;; This matches the behaviour of the C function in libgcc2.c.
;; Input at [SP+4]..[SP+7], result is in (lsb) R8..R11 (msb)
;; Get the input value.
movw ax, [SP+4]
movw r10, ax
movw ax, [SP+6]
;; Fall through into the internal function.
.global __int_fixunssfsi
__int_fixunssfsi:
;; Input in (lsb) r10.r11.x.a (msb).
;; Test for a negative input. We shift the other bits at the
;; same time so that A ends up holding the whole exponent:
;;
;; before:
;; SEEEEEEE EMMMMMMM MMMMMMMM MMMMMMMM
;; A X R11 R10
;;
;; after:
;; EEEEEEEE MMMMMMM0 MMMMMMMM MMMMMMMM
;; A X R11 R10
shlw ax, 1
bnc $1f
;; Return zero.
2: movw r8, #0
movw r10, #0
ret
;; An exponent of -1 is either a NaN or infinity.
1: cmp a, #-1
bnz $3f
;; For NaN we return 0. For infinity we return UINTMAX.
mov a, x
or a, r10
or a, r11
cmp0 a
bnz $2b
6: movw r8, #-1 ; -1 => UINT_MAX
movw r10, #-1
ret
;; If the exponent is negative the value is < 1 and so the
;; converted value is 0. Note we must allow for the bias
;; applied to the exponent. Thus a value of 127 in the
;; EEEEEEEE bits actually represents an exponent of 0, whilst
;; a value less than 127 actually represents a negative exponent.
;; Also if the EEEEEEEE bits are all zero then this represents
;; either a denormal value or 0.0. Either way for these values
;; we return 0.
3: sub a, #127
bc $2b
;; A now holds the bias adjusted exponent, which is known to be >= 0.
;; If the exponent is > 31 then the conversion will overflow.
cmp a, #32
bnc $6b
4:
;; Save the exponent in H. We increment it by one because we want
;; to be sure that the loop below will always execute at least once.
inc a
mov h, a
;; Get the top 24 bits of the mantissa into A:X:R10
;; Include the implicit 1-bit that is inherent in the IEEE fp format.
;;
;; before:
;; EEEEEEEE MMMMMMM0 MMMMMMMM MMMMMMMM
;; H X R11 R10
;; after:
;; EEEEEEEE 1MMMMMMM MMMMMMMM MMMMMMMM
;; H A X R10
mov a, r11
xch a, x
shr a, 1
set1 a.7
;; Clear B:C:R12:R13
movw bc, #0
movw r12, #0
;; Shift bits from the mantissa (A:X:R10) into (B:C:R12:R13),
;; decrementing the exponent as we go.
;; before:
;; MMMMMMMM MMMMMMMM MMMMMMMM xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
;; A X R10 B C R12 R13
;; first iter:
;; MMMMMMMM MMMMMMMM MMMMMMM0 xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxM
;; A X R10 B C R12 R13
;; second iter:
;; MMMMMMMM MMMMMMMM MMMMMM00 xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxMM
;; A X R10 B C R12 R13
;; etc.
5:
xch a, r10
shl a, 1
xch a, r10
rolwc ax, 1
xch a, r13
rolc a, 1
xch a, r13
xch a, r12
rolc a, 1
xch a, r12
rolwc bc, 1
dec h
bnz $5b
;; Result is currently in (lsb) r13.r12. c. b. (msb),
;; Move it into (lsb) r8. r9. r10. r11 (msb).
mov a, r13
mov r8, a
mov a, r12
mov r9, a
mov a, c
mov r10, a
mov a, b
mov r11, a
ret
END_FUNC ___fixunssfsi
;; ------------------------------------------------------------------------
START_FUNC ___floatsisf
;; Converts its signed long argument into a floating point.
;; Argument in [SP+4]..[SP+7]. Result in R8..R11.
;; Get the argument.
movw ax, [SP+4]
movw bc, ax
movw ax, [SP+6]
;; Test the sign bit. If the value is positive then drop into
;; the unsigned conversion routine.
bf a.7, $2f
;; If negative convert to positive ...
movw hl, ax
movw ax, #0
subw ax, bc
movw bc, ax
movw ax, #0
sknc
decw ax
subw ax, hl
;; If the result is negative then the input was 0x80000000 and
;; we want to return -0.0, which will not happen if we call
;; __int_floatunsisf.
bt a.7, $1f
;; Call the unsigned conversion routine.
call $!__int_floatunsisf
;; Negate the result.
set1 r11.7
;; Done.
ret
1: ;; Return -0.0 aka 0xcf000000
clrb a
mov r8, a
mov r9, a
mov r10, a
mov a, #0xcf
mov r11, a
ret
START_ANOTHER_FUNC ___floatunsisf
;; Converts its unsigned long argument into a floating point.
;; Argument in [SP+4]..[SP+7]. Result in R8..R11.
;; Get the argument.
movw ax, [SP+4]
movw bc, ax
movw ax, [SP+6]
2: ;; Internal entry point from __floatsisf
;; Input in AX (high) and BC (low)
.global __int_floatunsisf
__int_floatunsisf:
;; Special case handling for zero.
cmpw ax, #0
bnz $1f
movw ax, bc
cmpw ax, #0
movw ax, #0
bnz $1f
;; Return 0.0
movw r8, ax
movw r10, ax
ret
1: ;; Pre-load the loop count/exponent.
;; Exponents are biased by 0x80 and we start the loop knowing that
;; we are going to skip the highest set bit. Hence the highest value
;; that we can get for the exponent is 0x1e (bits from input) + 0x80 = 0x9e.
mov h, #0x9e
;; Move bits off the top of AX:BC until we hit a 1 bit.
;; Decrement the count of remaining bits as we go.
2: shlw bc, 1
rolwc ax, 1
bc $3f
dec h
br $2b
;; Ignore the first one bit - it is implicit in the IEEE format.
;; The count of remaining bits is the exponent.
;; Assemble the final floating point value. We have...
;; before:
;; EEEEEEEE MMMMMMMM MMMMMMMM MMMMMMMM xxxxxxxx
;; H A X B C
;; after:
;; 0EEEEEEE EMMMMMMM MMMMMMMM MMMMMMMM
;; R11 R10 R9 R8
3: shrw ax, 1
mov r10, a
mov a, x
mov r9, a
mov a, b
rorc a, 1
;; If the bottom bit of B was set before we shifted it out then we
;; need to round the result up. Unless none of the bits in C are set.
;; In this case we are exactly half-way between two values, and we
;; round towards an even value. We round up by increasing the
;; mantissa by 1. If this results in a zero mantissa we have to
;; increment the exponent. We round down by ignoring the dropped bits.
bnc $4f
cmp0 c
sknz
bf a.0, $4f
5: ;; Round the mantissa up by 1.
add a, #1
addc r9, #0
addc r10, #0
bf r10.7, $4f
inc h
clr1 r10.7
4: mov r8, a
mov a, h
shr a, 1
mov r11, a
sknc
set1 r10.7
ret
END_ANOTHER_FUNC ___floatunsisf
END_FUNC ___floatsisf
|
4ms/metamodule-plugin-sdk
| 1,937
|
plugin-libc/libgcc/config/rl78/umaxdi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___umaxdi3
; copy first argument/operand to the output registers
movw ax, [sp+4]
movw r8, ax
movw ax, [sp+6]
movw r10, ax
movw ax, [sp+8]
movw r12, ax
movw ax, [sp+10]
movw r14, ax
; use 16-bit compares from the most significant words downto the least significant ones
movw ax, [sp+18]
cmpw ax, r14
bh $.L1
bnz $.L2
movw ax, [sp+16]
cmpw ax, r12
bh $.L1
bnz $.L2
movw ax, [sp+14]
cmpw ax, r10
bh $.L1
bnz $.L2
movw ax, [sp+12]
cmpw ax, r8
bh $.L1
ret
.L1:
; copy second argument/operand to the output registers
movw ax, [sp+12]
movw r8, ax
movw ax, [sp+14]
movw r10, ax
movw ax, [sp+16]
movw r12, ax
movw ax, [sp+18]
movw r14, ax
.L2:
ret
END_FUNC ___umaxdi3
|
4ms/metamodule-plugin-sdk
| 1,802
|
plugin-libc/libgcc/config/rl78/adddi3.S
|
; Copyright (C) 2017-2022 Free Software Foundation, Inc.
; Contributed by Sebastian Perta.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
.text
START_FUNC ___adddi3
movw hl, sp ; use HL-based addressing (allows for direct addw)
movw ax, [hl+4]
addw ax, [hl+12]
movw r8, ax
mov a, [hl+6] ; middle bytes of the result are determined using 8-bit
addc a, [hl+14] ; ADDC insns which both account for and update the carry bit
mov r10, a ; (no ADDWC instruction is available)
mov a, [hl+7]
addc a, [hl+15]
mov r11, a
mov a, [hl+8]
addc a, [hl+16]
mov r12, a
mov a, [hl+9]
addc a, [hl+17]
mov r13, a
movw ax, [hl+10]
sknc ; account for the possible carry from the
incw ax ; latest 8-bit operation
addw ax, [hl+18]
movw r14, ax
ret
END_FUNC ___adddi3
|
4ms/metamodule-plugin-sdk
| 1,864
|
plugin-libc/libgcc/config/rl78/signbit.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
;; int signbitf (float X)
;; int signbit (double X)
;; int signbitl (long double X)
;;
;; `signbit' returns a nonzero value if the value of X has its sign
;; bit set.
;;
;; This is not the same as `x < 0.0', because IEEE 754 floating point
;; allows zero to be signed. The comparison `-0.0 < 0.0' is false,
;; but `signbit (-0.0)' will return a nonzero value.
;----------------------------------------------------------------------
.text
START_FUNC _signbit
START_ANOTHER_FUNC _signbitf
;; X is at [sp+4]..[SP+7]
;; result is in R8..R9
movw r8, #0
mov a, [sp+7]
mov1 cy, a.7
sknc
movw r8, #1
ret
END_ANOTHER_FUNC _signbitf
END_FUNC _signbit
START_FUNC _signbitl
;; X is at [sp+4]..[SP+7]
;; result is in R8..R9
movw r8, #0
mov a, [sp+11]
mov1 cy, a.7
sknc
movw r8, #1
ret
END_FUNC _signbitl
|
4ms/metamodule-plugin-sdk
| 3,752
|
plugin-libc/libgcc/config/rl78/bit-count.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#include "vregs.h"
START_FUNC ___clzhi2
;; Argument is in [SP+4], return in R8.
movw ax, [SP+4]
.global __clzhi2_internal
__clzhi2_internal:
movw r8, #16
cmpw ax, #0
bz $clzhi2_is_zero
mov e, #0xff
1:
inc e
shlw ax, 1
bnc $1b
mov a, e
mov r8, a
clzhi2_is_zero:
ret
END_FUNC ___clzhi2
START_FUNC ___clzsi2
;; Argument is in [SP+6]:[SP+4], return in R8.
movw ax, [SP+6]
cmpw ax, #0
bnz $__clzhi2_internal
movw ax, [SP+4]
call !__clzhi2_internal
movw ax, r8
addw ax, #16
movw r8, ax
ret
END_FUNC ___clzsi2
START_FUNC ___ctzhi2
;; Argument is in [SP+4], return in R8.
movw ax, [SP+4]
.global __ctzhi2_internal
__ctzhi2_internal:
movw r8, #16
cmpw ax, #0
bz $ctzhi2_is_zero
mov e, #0xff
1:
inc e
shrw ax, 1
bnc $1b
mov a, e
mov r8, a
ctzhi2_is_zero:
ret
END_FUNC ___ctzhi2
START_FUNC ___ctzsi2
;; Argument is in [SP+6]:[SP+4], return in R8.
movw ax, [SP+4]
cmpw ax, #0
bnz $__ctzhi2_internal
movw ax, [SP+6]
call !__ctzhi2_internal
movw ax, r8
addw ax, #16
movw r8, ax
ret
END_FUNC ___ctzsi2
START_FUNC ___ffshi2
;; Argument is in [SP+4], return in R8.
movw ax, [SP+4]
.global __ffshi2_internal
__ffshi2_internal:
movw r8, #0
cmpw ax, #0
bz $ffshi2_is_zero
mov e, #0
1:
inc e
shrw ax, 1
bnc $1b
mov a, e
mov r8, a
ffshi2_is_zero:
ret
END_FUNC ___ffshi2
START_FUNC ___ffssi2
;; Argument is in [SP+6]:[SP+4], return in R8.
movw ax, [SP+4]
cmpw ax, #0
bnz $__ffshi2_internal
movw ax, [SP+6]
cmpw ax, #0
bz $1f
call !__ffshi2_internal
movw ax, r8
addw ax, #16
1:
movw r8, ax
ret
END_FUNC ___ffssi2
START_FUNC ___parityqi_internal
mov1 cy, a.0
xor1 cy, a.1
xor1 cy, a.2
xor1 cy, a.3
xor1 cy, a.4
xor1 cy, a.5
xor1 cy, a.6
xor1 cy, a.7
movw ax, #0
bnc $1f
incw ax
1:
movw r8, ax
ret
END_FUNC ___parityqi_internal
START_FUNC ___parityhi2
;; Argument is in [SP+4], return in R8.
movw ax, [SP+4]
xor a, x
br $___parityqi_internal
END_FUNC ___parityhi2
START_FUNC ___paritysi2
;; Argument is in [SP+6]:[SP+4], return in R8.
movw ax, [SP+4]
xor a, x
mov b, a
movw ax, [SP+6]
xor a, x
xor a, b
br $___parityqi_internal
END_FUNC ___paritysi2
START_FUNC ___popcounthi2
;; Argument is in [SP+4], return in R8.
mov d, #2
br $___popcountqi_internal
END_FUNC ___popcounthi2
START_FUNC ___popcountsi2
;; Argument is in [SP+6]:[SP+4], return in R8.
mov d, #4
br $___popcountqi_internal
END_FUNC ___popcountsi2
START_FUNC ___popcountqi_internal
;; There are D bytes starting at [HL]
;; store count in R8.
movw ax, sp
addw ax, #4
movw hl, ax
mov a, #0
1:
xch a, b
mov a, [hl]
xch a, b
mov e, #8
2:
shl b,1
addc a, #0
dec e
bnz $2b
incw hl
dec d
bnz $1b
mov x, a
mov a, #0
movw r8, ax
ret
END_FUNC ___popcountqi_internal
|
4ms/metamodule-plugin-sdk
| 4,963
|
plugin-libc/libgcc/config/rl78/mulsi3.S
|
; Copyright (C) 2011-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
;; 32x32=32 multiply
#include "vregs.h"
;----------------------------------------------------------------------
; Register use:
; RB0 RB1 RB2
; AX op2L res32L res32H
; BC op2H (resH) op1
; DE count (resL-tmp)
; HL [sp+4]
; Register use (G10):
;
; AX op2L
; BC op2H
; DE count
; HL [sp+4]
; r8/r9 res32L
; r10/r11 (resH)
; r12/r13 (resL-tmp)
; r16/r17 res32H
; r18/r19 op1
START_FUNC ___mulsi3
;; A is at [sp+4]
;; B is at [sp+8]
;; result is in R8..R11
#ifdef __RL78_G10__
movw ax, r16
push ax
movw ax, r18
push ax
#else
sel rb2
push ax
push bc
sel rb0
#endif
clrw ax
movw r8, ax
movw r16, ax
movw ax, [sp+14]
cmpw ax, #0
bz $1f
cmpw ax, #0xffff
bnz $2f
movw ax, [sp+8]
#ifdef __RL78_G10__
push bc
movw bc, r8
xchw ax, bc
subw ax, bc
movw r8, ax
movw ax, bc
pop bc
#else
sel rb1
subw ax, r_0
sel rb0
#endif
br $1f
2:
movw bc, ax
movw ax, [sp+8]
cmpw ax, #0
skz
call !.Lmul_hi
1:
movw ax, [sp+10]
cmpw ax, #0
bz $1f
cmpw ax, #0xffff
bnz $2f
movw ax, [sp+12]
#ifdef __RL78_G10__
push bc
movw bc, r8
xchw ax, bc
subw ax, bc
movw r8, ax
movw ax, bc
pop bc
#else
sel rb1
subw ax, r_0
sel rb0
#endif
br $1f
2:
movw bc, ax
movw ax, [sp+12]
cmpw ax, #0
skz
call !.Lmul_hi
1:
movw ax, r8
movw r16, ax
clrw ax
movw r8, ax
;; now do R16:R8 += op1L * op2L
;; op1 is in AX.0 (needs to shrw)
;; op2 is in BC.2 and BC.1 (bc can shlw/rolcw)
;; res is in AX.2 and AX.1 (needs to addw)
movw ax, [sp+8]
movw r10, ax ; BC.1
movw ax, [sp+12]
cmpw ax, r10
bc $.Lmul_hisi_top
movw bc, r10
movw r10, ax
movw ax, bc
.Lmul_hisi_top:
movw bc, #0
.Lmul_hisi_loop:
shrw ax, 1
#ifdef __RL78_G10__
push ax
bnc $.Lmul_hisi_no_add_g10
movw ax, r8
addw ax, r10
movw r8, ax
sknc
incw r16
movw ax, r16
addw ax, r_2
movw r16, ax
.Lmul_hisi_no_add_g10:
movw ax, r10
shlw ax, 1
movw r10, ax
pop ax
#else
bnc $.Lmul_hisi_no_add
sel rb1
addw ax, bc
sel rb2
sknc
incw ax
addw ax, r_2
.Lmul_hisi_no_add:
sel rb1
shlw bc, 1
sel rb0
#endif
rolwc bc, 1
cmpw ax, #0
bz $.Lmul_hisi_done
shrw ax, 1
#ifdef __RL78_G10__
push ax
bnc $.Lmul_hisi_no_add2_g10
movw ax, r8
addw ax, r10
movw r8, ax
movw ax, r16
sknc
incw ax
addw ax, r_2
movw r16, ax
.Lmul_hisi_no_add2_g10:
movw ax, r10
shlw ax, 1
movw r10, ax
pop ax
#else
bnc $.Lmul_hisi_no_add2
sel rb1
addw ax, bc
sel rb2
sknc
incw ax
addw ax, r_2
.Lmul_hisi_no_add2:
sel rb1
shlw bc, 1
sel rb0
#endif
rolwc bc, 1
cmpw ax, #0
bnz $.Lmul_hisi_loop
.Lmul_hisi_done:
movw ax, r16
movw r10, ax
#ifdef __RL78_G10__
pop ax
movw r18, ax
pop ax
movw r16, ax
#else
sel rb2
pop bc
pop ax
sel rb0
#endif
ret
END_FUNC ___mulsi3
;----------------------------------------------------------------------
START_FUNC ___mulhi3
movw r8, #0
movw ax, [sp+6]
movw bc, ax
movw ax, [sp+4]
;; R8 += AX * BC
.Lmul_hi:
cmpw ax, bc
skc
xchw ax, bc
br $.Lmul_hi_loop
.Lmul_hi_top:
#ifdef __RL78_G10__
push ax
movw ax, r8
addw ax, r_2
movw r8, ax
pop ax
#else
sel rb1
addw ax, r_2
sel rb0
#endif
.Lmul_hi_no_add:
shlw bc, 1
.Lmul_hi_loop:
shrw ax, 1
bc $.Lmul_hi_top
cmpw ax, #0
bz $.Lmul_hi_done
shlw bc, 1
shrw ax, 1
bc $.Lmul_hi_top
cmpw ax, #0
bnz $.Lmul_hi_no_add
.Lmul_hi_done:
ret
END_FUNC ___mulhi3
;;; --------------------------------------
#ifdef __RL78_G10__
START_FUNC ___mulqi3
mov a, [sp+4]
mov r9, a
mov a, [sp+6]
mov r10, a
mov a, #9
mov r11, a
clrb a
mov r8, a
.L2:
cmp0 r10
skz
dec r11
sknz
ret
mov a, r10
and a, #1
mov r12, a
cmp0 r12
sknz
br !!.L3
mov a, r9
mov l, a
mov a, r8
add a, l
mov r8, a
.L3:
mov a, r9
add a, a
mov r9, a
mov a, r10
shr a, 1
mov r10, a
br !!.L2
END_FUNC ___mulqi3
#endif
|
4ms/metamodule-plugin-sdk
| 3,011
|
plugin-libc/libgcc/config/msp430/srai.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
.text
.section .text.__mspabi_srai_n
.macro _srai n
.global __mspabi_srai_\n
__mspabi_srai_\n:
RRA.W R12
.endm
/* Arithmetic Right Shift - R12 -> R12. */
_srai 15
_srai 14
_srai 13
_srai 12
_srai 11
_srai 10
_srai 9
_srai 8
_srai 7
_srai 6
_srai 5
_srai 4
_srai 3
_srai 2
_srai 1
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
.section .text.__mspabi_srai
1: ADD.W #-1,R13
RRA.W R12,R12
.global __mspabi_srai
__mspabi_srai:
CMP #0,R13
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
#ifdef __MSP430X__
.section .text.__gnu_mspabi_srap
1: ADDA #-1,R13
RRAX.A R12,R12
.global __gnu_mspabi_srap
__gnu_mspabi_srap:
CMP #0,R13
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif /* __MSP430X_LARGE__ */
#endif /* __MSP430X__ */
/* Arithmetic Right Shift - R12:R13 -> R12:R13. */
.section .text.__mspabi_sral_n
.macro _sral n
.global __mspabi_sral_\n
__mspabi_sral_\n:
RRA.W R13
RRC.W R12
.endm
_sral 15
_sral 14
_sral 13
_sral 12
_sral 11
_sral 10
_sral 9
_sral 8
_sral 7
_sral 6
_sral 5
_sral 4
_sral 3
_sral 2
_sral 1
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
.section .text.__mspabi_sral
1: ADD.W #-1,R14
RRA.W R13
RRC.W R12
.global __mspabi_sral
__mspabi_sral:
CMP #0,R14
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
/* Arithmetic Right Shift - R8:R11 -> R12:R15
A 64-bit argument would normally be passed in R12:R15, but __mspabi_srall has
special conventions, so the 64-bit value to shift is passed in R8:R11.
According to the MSPABI, the shift amount is a 64-bit value in R12:R15, but
we only use the low word in R12. */
.section .text.__mspabi_srall
.global __mspabi_srall
__mspabi_srall:
MOV R11, R15 ; Free up R11 first
MOV R12, R11 ; Save the shift amount in R11
MOV R10, R14
MOV R9, R13
MOV R8, R12
CMP #0, R11
JNZ 1f
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
1:
RRA R15
RRC R14
RRC R13
RRC R12
ADD #-1,R11
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
|
4ms/metamodule-plugin-sdk
| 1,510
|
plugin-libc/libgcc/config/msp430/epilogue.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
.text
.global __mspabi_func_epilog_7
.global __mspabi_func_epilog_6
.global __mspabi_func_epilog_5
.global __mspabi_func_epilog_4
.global __mspabi_func_epilog_3
.global __mspabi_func_epilog_2
.global __mspabi_func_epilog_1
__mspabi_func_epilog_7:
POP R4
__mspabi_func_epilog_6:
POP R5
__mspabi_func_epilog_5:
POP R6
__mspabi_func_epilog_4:
POP R7
__mspabi_func_epilog_3:
POP R8
__mspabi_func_epilog_2:
POP R9
__mspabi_func_epilog_1:
POP R10
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
|
4ms/metamodule-plugin-sdk
| 3,019
|
plugin-libc/libgcc/config/msp430/slli.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
.text
/* Logical Left Shift - R12 -> R12. */
.section .text.__mspabi_slli_n
.macro _slli n
.global __mspabi_slli_\n
__mspabi_slli_\n:
ADD.W R12,R12
.endm
_slli 15
_slli 14
_slli 13
_slli 12
_slli 11
_slli 10
_slli 9
_slli 8
_slli 7
_slli 6
_slli 5
_slli 4
_slli 3
_slli 2
_slli 1
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
.section .text.__mspabi_slli
1: ADD.W #-1,R13
ADD.W R12,R12
.global __mspabi_slli
__mspabi_slli:
CMP #0,R13
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
#ifdef __MSP430X__
.section .text.__gnu_mspabi_sllp
1: ADDA #-1,R13
ADDA R12,R12
.global __gnu_mspabi_sllp
__gnu_mspabi_sllp:
CMP #0,R13
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif /* __MSP430X_LARGE__ */
#endif /* __MSP430X__ */
/* Logical Left Shift - R12:R13 -> R12:R13. */
.section .text.__mspabi_slll_n
.macro _slll n
.global __mspabi_slll_\n
__mspabi_slll_\n:
ADD.W R12,R12
ADDC.W R13,R13
.endm
_slll 15
_slll 14
_slll 13
_slll 12
_slll 11
_slll 10
_slll 9
_slll 8
_slll 7
_slll 6
_slll 5
_slll 4
_slll 3
_slll 2
_slll 1
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
.section .text.__mspabi_slll
1: ADD.W #-1,R14
ADD.W R12,R12
ADDC.W R13,R13
.global __mspabi_slll
__mspabi_slll:
CMP #0,R14
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
/* Logical Left Shift - R8:R11 -> R12:R15
A 64-bit argument would normally be passed in R12:R15, but __mspabi_sllll has
special conventions, so the 64-bit value to shift is passed in R8:R11.
According to the MSPABI, the shift amount is a 64-bit value in R12:R15, but
we only use the low word in R12. */
.section .text.__mspabi_sllll
.global __mspabi_sllll
__mspabi_sllll:
MOV R11, R15 ; Free up R11 first
MOV R12, R11 ; Save the shift amount in R11
MOV R10, R14
MOV R9, R13
MOV R8, R12
CMP #0,R11
JNZ 1f
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
1:
RLA R12
RLC R13
RLC R14
RLC R15
ADD #-1,R11
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
|
4ms/metamodule-plugin-sdk
| 3,037
|
plugin-libc/libgcc/config/msp430/srli.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
.text
.section .text.__mspabi_srli_n
.macro _srli n
.global __mspabi_srli_\n
__mspabi_srli_\n:
CLRC
RRC.W R12
.endm
/* Logical Right Shift - R12 -> R12. */
_srli 15
_srli 14
_srli 13
_srli 12
_srli 11
_srli 10
_srli 9
_srli 8
_srli 7
_srli 6
_srli 5
_srli 4
_srli 3
_srli 2
_srli 1
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
.section .text.__mspabi_srli
1: ADD.W #-1,R13
CLRC
RRC.W R12,R12
.global __mspabi_srli
__mspabi_srli:
CMP #0,R13
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
#ifdef __MSP430X__
.section .text.__gnu_mspabi_srlp
1: ADDA #-1,R13
CLRC
RRCX.A R12,R12
.global __gnu_mspabi_srlp
__gnu_mspabi_srlp:
CMP #0,R13
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif /* __MSP430X_LARGE__ */
#endif /* __MSP430X__ */
/* Logical Right Shift - R12:R13 -> R12:R13. */
.section .text.__mspabi_srll_n
.macro _srll n
.global __mspabi_srll_\n
__mspabi_srll_\n:
CLRC
RRC.W R13
RRC.W R12
.endm
_srll 15
_srll 14
_srll 13
_srll 12
_srll 11
_srll 10
_srll 9
_srll 8
_srll 7
_srll 6
_srll 5
_srll 4
_srll 3
_srll 2
_srll 1
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
.section .text.__mspabi_srll
1: ADD.W #-1,R14
CLRC
RRC.W R13
RRC.W R12
.global __mspabi_srll
__mspabi_srll:
CMP #0,R14
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
/* Logical Right Shift - R8:R11 -> R12:R15
A 64-bit argument would normally be passed in R12:R15, but __mspabi_srlll has
special conventions, so the 64-bit value to shift is passed in R8:R11.
According to the MSPABI, the shift amount is a 64-bit value in R12:R15, but
we only use the low word in R12. */
.section .text.__mspabi_srlll
.global __mspabi_srlll
__mspabi_srlll:
MOV R11, R15 ; Free up R11 first
MOV R12, R11 ; Save the shift amount in R11
MOV R10, R14
MOV R9, R13
MOV R8, R12
CMP #0,R11
JNZ 1f
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
1:
CLRC
RRC R15
RRC R14
RRC R13
RRC R12
ADD #-1,R11
JNZ 1b
#ifdef __MSP430X_LARGE__
RETA
#else
RET
#endif
|
4ms/metamodule-plugin-sdk
| 16,846
|
plugin-libc/libgcc/config/msp430/lib2hw_mul.S
|
; Copyright (C) 2014-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
;; Macro to start a multiply function. Each function has three
;; names, and hence three entry points - although they all go
;; through the same code. The first name is the version generated
;; by GCC. The second is the MSP430 EABI mandated name for the
;; *software* version of the function. The third is the EABI
;; mandated name for the *hardware* version of the function.
;;
;; Since we are using the hardware and software names to point
;; to the same code this effectively means that we are mapping
;; the software function onto the hardware function. Thus if
;; the library containing this code is linked into an application
;; (before the libgcc.a library) *all* multiply functions will
;; be mapped onto the hardware versions.
;;
;; We construct each function in its own section so that linker
;; garbage collection can be used to delete any unused functions
;; from this file.
.macro start_func gcc_name eabi_soft_name eabi_hard_name
.pushsection .text.\gcc_name,"ax",@progbits
.p2align 1
.global \eabi_hard_name
.type \eabi_hard_name , @function
\eabi_hard_name:
.global \eabi_soft_name
.type \eabi_soft_name , @function
\eabi_soft_name:
.global \gcc_name
.type \gcc_name , @function
\gcc_name:
PUSH.W sr ; Save current interrupt state
DINT ; Disable interrupts
NOP ; Account for latency
.endm
;; End a function started with the start_func macro.
.macro end_func name
#ifdef __MSP430X_LARGE__
POP.W sr
RETA
#else
RETI
#endif
.size \name , . - \name
.popsection
.endm
;; Like the start_func macro except that it is used to
;; create a false entry point that just jumps to the
;; software function (implemented elsewhere).
.macro fake_func gcc_name eabi_soft_name eabi_hard_name
.pushsection .text.\gcc_name,"ax",@progbits
.p2align 1
.global \eabi_hard_name
.type \eabi_hard_name , @function
\eabi_hard_name:
.global \gcc_name
.type \gcc_name , @function
\gcc_name:
#ifdef __MSP430X_LARGE__
BRA #\eabi_soft_name
#else
BR #\eabi_soft_name
#endif
.size \gcc_name , . - \gcc_name
.popsection
.endm
.macro mult16 OP1, OP2, RESULT
;* * 16-bit hardware multiply: int16 = int16 * int16
;*
;* - Operand 1 is in R12
;* - Operand 2 is in R13
;* - Result is in R12
;*
;* To ensure that the multiply is performed atomically, interrupts are
;* disabled upon routine entry. Interrupt state is restored upon exit.
;*
;* Registers used: R12, R13
;*
;* Macro arguments are the memory locations of the hardware registers.
MOV.W r12, &\OP1 ; Load operand 1 into multiplier
MOV.W r13, &\OP2 ; Load operand 2 which triggers MPY
MOV.W &\RESULT, r12 ; Move result into return register
.endm
.macro mult1632 OP1, OP2, RESLO, RESHI
;* * 16-bit hardware multiply with a 32-bit result:
;* int32 = int16 * int16
;* uint32 = uint16 * uint16
;*
;* - Operand 1 is in R12
;* - Operand 2 is in R13
;* - Result is in R12, R13
;*
;* To ensure that the multiply is performed atomically, interrupts are
;* disabled upon routine entry. Interrupt state is restored upon exit.
;*
;* Registers used: R12, R13
;*
;* Macro arguments are the memory locations of the hardware registers.
MOV.W r12, &\OP1 ; Load operand 1 into multiplier
MOV.W r13, &\OP2 ; Load operand 2 which triggers MPY
MOV.W &\RESLO, r12 ; Move low result into return register
MOV.W &\RESHI, r13 ; Move high result into return register
.endm
.macro mult32 OP1, OP2, MAC_OP1, MAC_OP2, RESLO, RESHI
;* * 32-bit hardware multiply with a 32-bit result using 16 multiply and accumulate:
;* int32 = int32 * int32
;*
;* - Operand 1 is in R12, R13
;* - Operand 2 is in R14, R15
;* - Result is in R12, R13
;*
;* To ensure that the multiply is performed atomically, interrupts are
;* disabled upon routine entry. Interrupt state is restored upon exit.
;*
;* Registers used: R12, R13, R14, R15
;*
;* Macro arguments are the memory locations of the hardware registers.
MOV.W r12, &\OP1 ; Load operand 1 Low into multiplier
MOV.W r14, &\OP2 ; Load operand 2 Low which triggers MPY
MOV.W r12, &\MAC_OP1 ; Load operand 1 Low into mac
MOV.W &\RESLO, r12 ; Low 16-bits of result ready for return
MOV.W &\RESHI, &\RESLO ; MOV intermediate mpy high into low
MOV.W r15, &\MAC_OP2 ; Load operand 2 High, trigger MAC
MOV.W r13, &\MAC_OP1 ; Load operand 1 High
MOV.W r14, &\MAC_OP2 ; Load operand 2 Lo, trigger MAC
MOV.W &\RESLO, r13 ; Upper 16-bits result ready for return
.endm
.macro mult32_hw OP1_LO OP1_HI OP2_LO OP2_HI RESLO RESHI
;* * 32-bit hardware multiply with a 32-bit result
;* int32 = int32 * int32
;*
;* - Operand 1 is in R12, R13
;* - Operand 2 is in R14, R15
;* - Result is in R12, R13
;*
;* To ensure that the multiply is performed atomically, interrupts are
;* disabled upon routine entry. Interrupt state is restored upon exit.
;*
;* Registers used: R12, R13, R14, R15
;*
;* Macro arguments are the memory locations of the hardware registers.
MOV.W r12, &\OP1_LO ; Load operand 1 Low into multiplier
MOV.W r13, &\OP1_HI ; Load operand 1 High into multiplier
MOV.W r14, &\OP2_LO ; Load operand 2 Low into multiplier
MOV.W r15, &\OP2_HI ; Load operand 2 High, trigger MPY
MOV.W &\RESLO, r12 ; Ready low 16-bits for return
MOV.W &\RESHI, r13 ; Ready high 16-bits for return
.endm
.macro mult3264_hw OP1_LO OP1_HI OP2_LO OP2_HI RES0 RES1 RES2 RES3
;* * 32-bit hardware multiply with a 64-bit result
;* int64 = int32 * int32
;* uint64 = uint32 * uint32
;*
;* - Operand 1 is in R12, R13
;* - Operand 2 is in R14, R15
;* - Result is in R12, R13, R14, R15
;*
;* To ensure that the multiply is performed atomically, interrupts are
;* disabled upon routine entry. Interrupt state is restored upon exit.
;*
;* Registers used: R12, R13, R14, R15
;*
;* Macro arguments are the memory locations of the hardware registers.
MOV.W r12, &\OP1_LO ; Load operand 1 Low into multiplier
MOV.W r13, &\OP1_HI ; Load operand 1 High into multiplier
MOV.W r14, &\OP2_LO ; Load operand 2 Low into multiplier
MOV.W r15, &\OP2_HI ; Load operand 2 High, trigger MPY
MOV.W &\RES0, R12 ; Ready low 16-bits for return
MOV.W &\RES1, R13 ;
MOV.W &\RES2, R14 ;
MOV.W &\RES3, R15 ; Ready high 16-bits for return
.endm
.macro mult64_hw MPY32_LO MPY32_HI OP2_LO OP2_HI RES0 RES1 RES2 RES3
;* * 64-bit hardware multiply with a 64-bit result
;* int64 = int64 * int64
;*
;* - Operand 1 is in R8, R9, R10, R11
;* - Operand 2 is in R12, R13, R14, R15
;* - Result is in R12, R13, R14, R15
;*
;* 64-bit multiplication is achieved using the 32-bit hardware multiplier with
;* the following equation:
;* R12:R15 = (R8:R9 * R12:R13) + ((R8:R9 * R14:R15) << 32) + ((R10:R11 * R12:R13) << 32)
;*
;* The left shift by 32 is handled with minimal cost by saving the two low
;* words and discarding the two high words.
;*
;* To ensure that the multiply is performed atomically, interrupts are
;* disabled upon routine entry. Interrupt state is restored upon exit.
;*
;* Registers used: R6, R7, R8, R9, R10, R11, R12, R13, R14, R15
;*
;* Macro arguments are the memory locations of the hardware registers.
;*
#if defined(__MSP430X_LARGE__)
PUSHM.A #5, R10
#elif defined(__MSP430X__)
PUSHM.W #5, R10
#else
PUSH R10 { PUSH R9 { PUSH R8 { PUSH R7 { PUSH R6
#endif
; Multiply the low 32-bits of op0 and the high 32-bits of op1.
MOV.W R8, &\MPY32_LO
MOV.W R9, &\MPY32_HI
MOV.W R14, &\OP2_LO
MOV.W R15, &\OP2_HI
; Save the low 32-bits of the result.
MOV.W &\RES0, R6
MOV.W &\RES1, R7
; Multiply the high 32-bits of op0 and the low 32-bits of op1.
MOV.W R10, &\MPY32_LO
MOV.W R11, &\MPY32_HI
MOV.W R12, &\OP2_LO
MOV.W R13, &\OP2_HI
; Add the low 32-bits of the result to the previously saved result.
ADD.W &\RES0, R6
ADDC.W &\RES1, R7
; Multiply the low 32-bits of op0 and op1.
MOV.W R8, &\MPY32_LO
MOV.W R9, &\MPY32_HI
MOV.W R12, &\OP2_LO
MOV.W R13, &\OP2_HI
; Write the return values
MOV.W &\RES0, R12
MOV.W &\RES1, R13
MOV.W &\RES2, R14
MOV.W &\RES3, R15
; Add the saved low 32-bit results from earlier to the high 32-bits of
; this result, effectively shifting those two results left by 32 bits.
ADD.W R6, R14
ADDC.W R7, R15
#if defined(__MSP430X_LARGE__)
POPM.A #5, R10
#elif defined(__MSP430X__)
POPM.W #5, R10
#else
POP R6 { POP R7 { POP R8 { POP R9 { POP R10
#endif
.endm
;; EABI mandated names:
;;
;; int16 __mspabi_mpyi (int16 x, int16 y)
;; Multiply int by int.
;; int16 __mspabi_mpyi_hw (int16 x, int16 y)
;; Multiply int by int. Uses hardware MPY16 or MPY32.
;; int16 __mspabi_mpyi_f5hw (int16 x, int16 y)
;; Multiply int by int. Uses hardware MPY32 (F5xx devices and up).
;;
;; int32 __mspabi_mpyl (int32 x, int32 y);
;; Multiply long by long.
;; int32 __mspabi_mpyl_hw (int32 x, int32 y)
;; Multiply long by long. Uses hardware MPY16.
;; int32 __mspabi_mpyl_hw32 (int32 x, int32 y)
;; Multiply long by long. Uses hardware MPY32 (F4xx devices).
;; int32 __mspabi_mpyl_f5hw (int32 x, int32 y)
;; Multiply long by long. Uses hardware MPY32 (F5xx devices and up).
;;
;; int64 __mspabi_mpyll (int64 x, int64 y)
;; Multiply long long by long long.
;; int64 __mspabi_mpyll_hw (int64 x, int64 y)
;; Multiply long long by long long. Uses hardware MPY16.
;; int64 __mspabi_mpyll_hw32 (int64 x, int64 y)
;; Multiply long long by long long. Uses hardware MPY32 (F4xx devices).
;; int64 __mspabi_mpyll_f5hw (int64 x, int64 y)
;; Multiply long long by long long. Uses hardware MPY32 (F5xx devices and up).
;;
;; int32 __mspabi_mpysl (int16 x, int16 y)
;; Multiply int by int; result is long.
;; int32 __mspabi_mpysl_hw(int16 x, int16 y)
;; Multiply int by int; result is long. Uses hardware MPY16 or MPY32
;; int32 __mspabi_mpysl_f5hw(int16 x, int16 y)
;; Multiply int by int; result is long. Uses hardware MPY32 (F5xx devices and up).
;;
;; int64 __mspabi_mpysll(int32 x, int32 y)
;; Multiply long by long; result is long long.
;; int64 __mspabi_mpysll_hw(int32 x, int32 y)
;; Multiply long by long; result is long long. Uses hardware MPY16.
;; int64 __mspabi_mpysll_hw32(int32 x, int32 y)
;; Multiply long by long; result is long long. Uses hardware MPY32 (F4xx devices).
;; int64 __mspabi_mpysll_f5hw(int32 x, int32 y)
;; Multiply long by long; result is long long. Uses hardware MPY32 (F5xx devices and up).
;;
;; uint32 __mspabi_mpyul(uint16 x, uint16 y)
;; Multiply unsigned int by unsigned int; result is unsigned long.
;; uint32 __mspabi_mpyul_hw(uint16 x, uint16 y)
;; Multiply unsigned int by unsigned int; result is unsigned long. Uses hardware MPY16 or MPY32
;; uint32 __mspabi_mpyul_f5hw(uint16 x, uint16 y)
;; Multiply unsigned int by unsigned int; result is unsigned long. Uses hardware MPY32 (F5xx devices and up).
;;
;; uint64 __mspabi_mpyull(uint32 x, uint32 y)
;; Multiply unsigned long by unsigned long; result is unsigned long long.
;; uint64 __mspabi_mpyull_hw(uint32 x, uint32 y)
;; Multiply unsigned long by unsigned long; result is unsigned long long. Uses hardware MPY16
;; uint64 __mspabi_mpyull_hw32(uint32 x, uint32 y)
;; Multiply unsigned long by unsigned long; result is unsigned long long. Uses hardware MPY32 (F4xx devices).
;; uint64 __mspabi_mpyull_f5hw(uint32 x, uint32 y)
;; Multiply unsigned long by unsigned long; result is unsigned long long. Uses hardware MPY32 (F5xx devices and up)
;;;; The register names below are the standardised versions used across TI
;;;; literature.
;; Hardware multiply register addresses for devices with 16-bit hardware
;; multiply.
.set MPY, 0x0130
.set MPYS, 0x0132
.set MAC, 0x0134
.set OP2, 0x0138
.set RESLO, 0x013A
.set RESHI, 0x013C
;; Hardware multiply register addresses for devices with 32-bit (non-f5)
;; hardware multiply.
.set MPY32L, 0x0140
.set MPY32H, 0x0142
.set MPYS32L, 0x0144
.set MPYS32H, 0x0146
.set OP2L, 0x0150
.set OP2H, 0x0152
.set RES0, 0x0154
.set RES1, 0x0156
.set RES2, 0x0158
.set RES3, 0x015A
;; Hardware multiply register addresses for devices with f5series hardware
;; multiply.
;; The F5xxx series of MCUs support the same 16-bit and 32-bit multiply
;; as the second generation hardware, but they are accessed from different
;; memory registers.
;; These names AREN'T standard. We've appended _F5 to the standard names.
.set MPY_F5, 0x04C0
.set MPYS_F5, 0x04C2
.set MAC_F5, 0x04C4
.set OP2_F5, 0x04C8
.set RESLO_F5, 0x04CA
.set RESHI_F5, 0x04CC
.set MPY32L_F5, 0x04D0
.set MPY32H_F5, 0x04D2
.set MPYS32L_F5, 0x04D4
.set MPYS32H_F5, 0x04D6
.set OP2L_F5, 0x04E0
.set OP2H_F5, 0x04E2
.set RES0_F5, 0x04E4
.set RES1_F5, 0x04E6
.set RES2_F5, 0x04E8
.set RES3_F5, 0x04EA
#if defined MUL_16
;; First generation MSP430 hardware multiplies ...
start_func __mulhi2 __mspabi_mpyi __mspabi_mpyi_hw
mult16 MPY, OP2, RESLO
end_func __mulhi2
start_func __mulhisi2 __mspabi_mpysl __mspabi_mpysl_hw
mult1632 MPYS, OP2, RESLO, RESHI
end_func __mulhisi2
start_func __umulhisi2 __mspabi_mpyul __mspabi_mpyul_hw
mult1632 MPY, OP2, RESLO, RESHI
end_func __umulhisi2
start_func __mulsi2 __mspabi_mpyl __mspabi_mpyl_hw
mult32 MPY, OP2, MAC, OP2, RESLO, RESHI
end_func __mulsi2
;; FIXME: We do not have hardware implementations of these
;; routines, so just jump to the software versions instead.
fake_func __mulsidi2 __mspabi_mpysll __mspabi_mpysll_hw
fake_func __umulsidi2 __mspabi_mpyull __mspabi_mpyull_hw
fake_func __muldi3 __mspabi_mpyll __mspabi_mpyll_hw
#elif defined MUL_32
;; Second generation MSP430 hardware multiplies ...
start_func __mulhi2 __mspabi_mpyi __mspabi_mpyi_hw
mult16 MPY, OP2, RESLO
end_func __mulhi2
start_func __mulhisi2 __mspabi_mpysl __mspabi_mpysl_hw
mult1632 MPYS, OP2, RESLO, RESHI
end_func __mulhisi2
start_func __umulhisi2 __mspabi_mpyul __mspabi_mpyul_hw
mult1632 MPY, OP2, RESLO, RESHI
end_func __umulhisi2
start_func __mulsi2 __mspabi_mpyl __mspabi_mpyl_hw32
mult32_hw MPY32L, MPY32H, OP2L, OP2H, RES0, RES1
end_func __mulsi2
start_func __mulsidi2 __mspabi_mpysll __mspabi_mpysll_hw32
mult3264_hw MPYS32L, MPYS32H, OP2L, OP2H, RES0, RES1, RES2, RES3
end_func __mulsidi2
start_func __umulsidi2 __mspabi_mpyull __mspabi_mpyull_hw32
mult3264_hw MPY32L, MPY32H, OP2L, OP2H, RES0, RES1, RES2, RES3
end_func __umulsidi2
start_func __muldi3 __mspabi_mpyll __mspabi_mpyll_hw32
mult64_hw MPY32L, MPY32H, OP2L, OP2H, RES0, RES1, RES2, RES3
end_func __muldi3
#elif defined MUL_F5
/* The F5xxx series of MCUs support the same 16-bit and 32-bit multiply
as the second generation hardware, but they are accessed from different
memory registers. */
start_func __mulhi2 __mspabi_mpyi __mspabi_mpyi_f5hw
mult16 MPY_F5, OP2_F5, RESLO_F5
end_func __mulhi2
start_func __mulhisi2 __mspabi_mpysl __mspabi_mpysl_f5hw
mult1632 MPYS_F5, OP2_F5, RESLO_F5, RESHI_F5
end_func __mulhisi2
start_func __umulhisi2 __mspabi_mpyul __mspabi_mpyul_f5hw
mult1632 MPY_F5, OP2_F5, RESLO_F5, RESHI_F5
end_func __umulhisi2
start_func __mulsi2 __mspabi_mpyl __mspabi_mpyl_f5hw
mult32_hw MPY32L_F5, MPY32H_F5, OP2L_F5, OP2H_F5, RES0_F5, RES1_F5
end_func __mulsi2
start_func __mulsidi2 __mspabi_mpysll __mspabi_mpysll_f5hw
mult3264_hw MPYS32L_F5, MPYS32H_F5, OP2L_F5, OP2H_F5, RES0_F5, RES1_F5, RES2_F5, RES3_F5
end_func __mulsidi2
start_func __umulsidi2 __mspabi_mpyull __mspabi_mpyull_f5hw
mult3264_hw MPY32L_F5, MPY32H_F5, OP2L_F5, OP2H_F5, RES0_F5, RES1_F5, RES2_F5, RES3_F5
end_func __umulsidi2
start_func __muldi3 __mspabi_mpyll __mspabi_mpyll_f5hw
mult64_hw MPY32L_F5, MPY32H_F5, OP2L_F5, OP2H_F5, RES0_F5, RES1_F5, RES2_F5, RES3_F5
end_func __muldi3
#else
#error MUL type not defined
#endif
|
4ms/metamodule-plugin-sdk
| 2,776
|
plugin-libc/libgcc/config/msp430/cmpsi2.S
|
; Copyright (C) 2012-2022 Free Software Foundation, Inc.
; Contributed by Red Hat.
;
; This file is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 3, or (at your option) any
; later version.
;
; This file is distributed in the hope that it will be useful, but
; WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; General Public License for more details.
;
; Under Section 7 of GPL version 3, you are granted additional
; permissions described in the GCC Runtime Library Exception, version
; 3.1, as published by the Free Software Foundation.
;
; You should have received a copy of the GNU General Public License and
; a copy of the GCC Runtime Library Exception along with this program;
; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
; <http://www.gnu.org/licenses/>.
#ifdef __MSP430X_LARGE__
#define ret_ RETA
#else
#define ret_ RET
#endif
.text
;; int __cmpsi2 (signed long A, signed long B)
;;
;; Performs a signed comparison of A and B.
;; If A is less than B it returns 0. If A is greater
;; than B it returns 2. If they are equal it returns 1.
;; Note - this code is also used by the __ucmpsi2 routine below.
.global __cmpsi2
.type __cmpsi2, @function
__cmpsi2:
;; A is in r12 (low), r13 (high)
;; B is in r14 (low), r15 (high)
;; Result put in r12
cmp.w r13, r15
jeq .L_compare_low
jge .L_less_than
.L_greater_than:
mov.w #2, r12
ret_
.L_less_than:
mov.w #0, r12
ret_
.L_compare_low:
cmp.w r12, r14
jl .L_greater_than
jne .L_less_than
mov.w #1, r12
ret_
.size __cmpsi2, . - __cmpsi2
;; int __ucmpsi2 (unsigned long A, unsigned long B)
;;
;; Performs an unsigned comparison of A and B.
;; If A is less than B it returns 0. If A is greater
;; than B it returns 2. If they are equal it returns 1.
;;; Note - this function branches into the __cmpsi2 code above.
.global __ucmpsi2
.type __ucmpsi2, @function
__ucmpsi2:
;; A is in r12 (low), r13 (high)
;; B is in r14 (low), r15 (high)
;; Result put in r12
tst r13
jn .L_top_bit_set_in_A
tst r15
;;; If the top bit of B is set, but A's is clear we know that A < B.
jn .L_less_than
;;; Neither A nor B has their top bit set so we can use the __cmpsi2 routine.
;;; Note we use Jc rather than BR as that saves two bytes. The TST insn always
;;; sets the C bit.
jc __cmpsi2
.L_top_bit_set_in_A:
tst r15
;;; If both A and B have their top bit set we can use the __cmpsi2 routine.
jn __cmpsi2
;;; Otherwise A has its top bit set and B does not so A > B.
jc .L_greater_than
.size __ucmpsi2, . - __ucmpsi2
|
4ms/metamodule-plugin-sdk
| 1,197
|
plugin-libc/libgcc/config/i386/crtn.S
|
/* crtn.S for x86.
Copyright (C) 1993-2022 Free Software Foundation, Inc.
Written By Fred Fish, Nov 1992
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file just supplies returns for the .init and .fini sections. It is
linked in after all other files. */
.ident "GNU C crtn.o"
.section .init
ret $0x0
.section .fini
ret $0x0
|
4ms/metamodule-plugin-sdk
| 23,864
|
plugin-libc/libgcc/config/i386/morestack.S
|
# x86/x86_64 support for -fsplit-stack.
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
# Contributed by Ian Lance Taylor <iant@google.com>.
# This file is part of GCC.
# GCC is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
#include "auto-host.h"
# Support for allocating more stack space when using -fsplit-stack.
# When a function discovers that it needs more stack space, it will
# call __morestack with the size of the stack frame and the size of
# the parameters to copy from the old stack frame to the new one.
# The __morestack function preserves the parameter registers and
# calls __generic_morestack to actually allocate the stack space.
# When this is called stack space is very low, but we ensure that
# there is enough space to push the parameter registers and to call
# __generic_morestack.
# When calling __generic_morestack, FRAME_SIZE points to the size of
# the desired frame when the function is called, and the function
# sets it to the size of the allocated stack. OLD_STACK points to
# the parameters on the old stack and PARAM_SIZE is the number of
# bytes of parameters to copy to the new stack. These are the
# parameters of the function that called __morestack. The
# __generic_morestack function returns the new stack pointer,
# pointing to the address of the first copied parameter. The return
# value minus the returned *FRAME_SIZE will be the first address on
# the stack which we should not use.
# void *__generic_morestack (size_t *frame_size, void *old_stack,
# size_t param_size);
# The __morestack routine has to arrange for the caller to return to a
# stub on the new stack. The stub is responsible for restoring the
# old stack pointer and returning to the caller's caller. This calls
# __generic_releasestack to retrieve the old stack pointer and release
# the newly allocated stack.
# void *__generic_releasestack (size_t *available);
# We do a little dance so that the processor's call/return return
# address prediction works out. The compiler arranges for the caller
# to look like this:
# call __generic_morestack
# ret
# L:
# // carry on with function
# After we allocate more stack, we call L, which is in our caller.
# When that returns (to the predicted instruction), we release the
# stack segment and reset the stack pointer. We then return to the
# predicted instruction, namely the ret instruction immediately after
# the call to __generic_morestack. That then returns to the caller of
# the original caller.
# The amount of extra space we ask for. In general this has to be
# enough for the dynamic loader to find a symbol and for a signal
# handler to run.
#ifndef __x86_64__
#define BACKOFF (1024)
#else
#define BACKOFF (3584)
#endif
# The amount of space we ask for when calling non-split-stack code.
#define NON_SPLIT_STACK 0x100000
# This entry point is for split-stack code which calls non-split-stack
# code. When the linker sees this case, it converts the call to
# __morestack to call __morestack_non_split instead. We just bump the
# requested stack space by 16K.
#include <cet.h>
.global __morestack_non_split
.hidden __morestack_non_split
#ifdef __ELF__
.type __morestack_non_split,@function
#endif
__morestack_non_split:
.cfi_startproc
#ifndef __x86_64__
# See below for an extended explanation of this.
.cfi_def_cfa %esp,16
pushl %eax # Save %eax in case it is a parameter.
.cfi_adjust_cfa_offset 4 # Account for pushed register.
movl %esp,%eax # Current stack,
subl 8(%esp),%eax # less required stack frame size,
subl $NON_SPLIT_STACK,%eax # less space for non-split code.
cmpl %gs:0x30,%eax # See if we have enough space.
jb 2f # Get more space if we need it.
# Here the stack is
# %esp + 20: stack pointer after two returns
# %esp + 16: return address of morestack caller's caller
# %esp + 12: size of parameters
# %esp + 8: new stack frame size
# %esp + 4: return address of this function
# %esp: saved %eax
#
# Since we aren't doing a full split stack, we don't need to
# do anything when our caller returns. So we return to our
# caller rather than calling it, and let it return as usual.
# To make that work we adjust the return address.
# This breaks call/return address prediction for the call to
# this function. I can't figure out a way to make it work
# short of copying the parameters down the stack, which will
# probably take more clock cycles than we will lose breaking
# call/return address prediction. We will only break
# prediction for this call, not for our caller.
movl 4(%esp),%eax # Increment the return address
cmpb $0xc3,(%eax) # to skip the ret instruction;
je 1f # see above.
addl $2,%eax
1: inc %eax
# If the instruction that we return to is
# leal 20(%ebp),{%eax,%ecx,%edx}
# then we have been called by a varargs function that expects
# %ebp to hold a real value. That can only work if we do the
# full stack split routine. FIXME: This is fragile.
cmpb $0x8d,(%eax)
jne 3f
cmpb $0x14,2(%eax)
jne 3f
cmpb $0x45,1(%eax)
je 2f
cmpb $0x4d,1(%eax)
je 2f
cmpb $0x55,1(%eax)
je 2f
3:
movl %eax,4(%esp) # Update return address.
popl %eax # Restore %eax and stack.
.cfi_adjust_cfa_offset -4 # Account for popped register.
ret $8 # Return to caller, popping args.
2:
.cfi_adjust_cfa_offset 4 # Back to where we were.
popl %eax # Restore %eax and stack.
.cfi_adjust_cfa_offset -4 # Account for popped register.
# Increment space we request.
addl $NON_SPLIT_STACK+0x1000+BACKOFF,4(%esp)
# Fall through into morestack.
#else
# See below for an extended explanation of this.
.cfi_def_cfa %rsp,16
pushq %rax # Save %rax in case caller is using
# it to preserve original %r10.
.cfi_adjust_cfa_offset 8 # Adjust for pushed register.
movq %rsp,%rax # Current stack,
subq %r10,%rax # less required stack frame size,
subq $NON_SPLIT_STACK,%rax # less space for non-split code.
#ifdef __LP64__
cmpq %fs:0x70,%rax # See if we have enough space.
#else
cmpl %fs:0x40,%eax
#endif
jb 2f # Get more space if we need it.
# If the instruction that we return to is
# leaq 24(%rbp), %r11n
# then we have been called by a varargs function that expects
# %ebp to hold a real value. That can only work if we do the
# full stack split routine. FIXME: This is fragile.
movq 8(%rsp),%rax
incq %rax # Skip ret instruction in caller.
cmpl $0x185d8d4c,(%rax)
je 2f
# This breaks call/return prediction, as described above.
incq 8(%rsp) # Increment the return address.
popq %rax # Restore register.
.cfi_adjust_cfa_offset -8 # Adjust for popped register.
ret # Return to caller.
2:
popq %rax # Restore register.
.cfi_adjust_cfa_offset -8 # Adjust for popped register.
# Increment space we request.
addq $NON_SPLIT_STACK+0x1000+BACKOFF,%r10
# Fall through into morestack.
#endif
.cfi_endproc
#ifdef __ELF__
.size __morestack_non_split, . - __morestack_non_split
#endif
# __morestack_non_split falls through into __morestack.
# The __morestack function.
.global __morestack
.hidden __morestack
#ifdef __ELF__
.type __morestack,@function
#endif
__morestack:
.LFB1:
.cfi_startproc
#ifndef __x86_64__
# The 32-bit __morestack function.
# We use a cleanup to restore the stack guard if an exception
# is thrown through this code.
#ifndef __PIC__
.cfi_personality 0,__gcc_personality_v0
.cfi_lsda 0,.LLSDA1
#else
.cfi_personality 0x9b,DW.ref.__gcc_personality_v0
.cfi_lsda 0x1b,.LLSDA1
#endif
# We return below with a ret $8. We will return to a single
# return instruction, which will return to the caller of our
# caller. We let the unwinder skip that single return
# instruction, and just return to the real caller.
# Here CFA points just past the return address on the stack,
# e.g., on function entry it is %esp + 4. The stack looks
# like this:
# CFA + 12: stack pointer after two returns
# CFA + 8: return address of morestack caller's caller
# CFA + 4: size of parameters
# CFA: new stack frame size
# CFA - 4: return address of this function
# CFA - 8: previous value of %ebp; %ebp points here
# Setting the new CFA to be the current CFA + 12 (i.e., %esp +
# 16) will make the unwinder pick up the right return address.
.cfi_def_cfa %esp,16
pushl %ebp
.cfi_adjust_cfa_offset 4
.cfi_offset %ebp, -20
movl %esp,%ebp
.cfi_def_cfa_register %ebp
# In 32-bit mode the parameters are pushed on the stack. The
# argument size is pushed then the new stack frame size is
# pushed.
# In the body of a non-leaf function, the stack pointer will
# be aligned to a 16-byte boundary. That is CFA + 12 in the
# stack picture above: (CFA + 12) % 16 == 0. At this point we
# have %esp == CFA - 8, so %esp % 16 == 12. We need some
# space for saving registers and passing parameters, and we
# need to wind up with %esp % 16 == 0.
subl $44,%esp
# Because our cleanup code may need to clobber %ebx, we need
# to save it here so the unwinder can restore the value used
# by the caller. Note that we don't have to restore the
# register, since we don't change it, we just have to save it
# for the unwinder.
movl %ebx,-4(%ebp)
.cfi_offset %ebx, -24
# In 32-bit mode the registers %eax, %edx, and %ecx may be
# used for parameters, depending on the regparm and fastcall
# attributes.
movl %eax,-8(%ebp)
movl %edx,-12(%ebp)
movl %ecx,-16(%ebp)
call __morestack_block_signals
movl 12(%ebp),%eax # The size of the parameters.
movl %eax,8(%esp)
leal 20(%ebp),%eax # Address of caller's parameters.
movl %eax,4(%esp)
addl $BACKOFF,8(%ebp) # Ask for backoff bytes.
leal 8(%ebp),%eax # The address of the new frame size.
movl %eax,(%esp)
call __generic_morestack
movl %eax,%esp # Switch to the new stack.
subl 8(%ebp),%eax # The end of the stack space.
addl $BACKOFF,%eax # Back off 512 bytes.
.LEHB0:
# FIXME: The offset must match
# TARGET_THREAD_SPLIT_STACK_OFFSET in
# gcc/config/i386/linux.h.
movl %eax,%gs:0x30 # Save the new stack boundary.
call __morestack_unblock_signals
movl -12(%ebp),%edx # Restore registers.
movl -16(%ebp),%ecx
movl 4(%ebp),%eax # Increment the return address
cmpb $0xc3,(%eax) # to skip the ret instruction;
je 1f # see above.
addl $2,%eax
1: inc %eax
movl %eax,-12(%ebp) # Store return address in an
# unused slot.
movl -8(%ebp),%eax # Restore the last register.
call *-12(%ebp) # Call our caller!
# The caller will return here, as predicted.
# Save the registers which may hold a return value. We
# assume that __generic_releasestack does not touch any
# floating point or vector registers.
pushl %eax
pushl %edx
# Push the arguments to __generic_releasestack now so that the
# stack is at a 16-byte boundary for
# __morestack_block_signals.
pushl $0 # Where the available space is returned.
leal 0(%esp),%eax # Push its address.
push %eax
call __morestack_block_signals
call __generic_releasestack
subl 4(%esp),%eax # Subtract available space.
addl $BACKOFF,%eax # Back off 512 bytes.
.LEHE0:
movl %eax,%gs:0x30 # Save the new stack boundary.
addl $8,%esp # Remove values from stack.
# We need to restore the old stack pointer, which is in %rbp,
# before we unblock signals. We also need to restore %eax and
# %edx after we unblock signals but before we return. Do this
# by moving %eax and %edx from the current stack to the old
# stack.
popl %edx # Pop return value from current stack.
popl %eax
movl %ebp,%esp # Restore stack pointer.
# As before, we now have %esp % 16 == 12.
pushl %eax # Push return value on old stack.
pushl %edx
subl $4,%esp # Align stack to 16-byte boundary.
call __morestack_unblock_signals
addl $4,%esp
popl %edx # Restore return value.
popl %eax
.cfi_remember_state
# We never changed %ebx, so we don't have to actually restore it.
.cfi_restore %ebx
popl %ebp
.cfi_restore %ebp
.cfi_def_cfa %esp, 16
ret $8 # Return to caller, which will
# immediately return. Pop
# arguments as we go.
# This is the cleanup code called by the stack unwinder when unwinding
# through the code between .LEHB0 and .LEHE0 above.
.L1:
.cfi_restore_state
subl $16,%esp # Maintain 16 byte alignment.
movl %eax,4(%esp) # Save exception header.
movl %ebp,(%esp) # Stack pointer after resume.
call __generic_findstack
movl %ebp,%ecx # Get the stack pointer.
subl %eax,%ecx # Subtract available space.
addl $BACKOFF,%ecx # Back off 512 bytes.
movl %ecx,%gs:0x30 # Save new stack boundary.
movl 4(%esp),%eax # Function argument.
movl %eax,(%esp)
#ifdef __PIC__
call __x86.get_pc_thunk.bx # %ebx may not be set up for us.
addl $_GLOBAL_OFFSET_TABLE_, %ebx
call _Unwind_Resume@PLT # Resume unwinding.
#else
call _Unwind_Resume
#endif
#else /* defined(__x86_64__) */
# The 64-bit __morestack function.
# We use a cleanup to restore the stack guard if an exception
# is thrown through this code.
#ifndef __PIC__
.cfi_personality 0x3,__gcc_personality_v0
.cfi_lsda 0x3,.LLSDA1
#else
.cfi_personality 0x9b,DW.ref.__gcc_personality_v0
.cfi_lsda 0x1b,.LLSDA1
#endif
# We will return a single return instruction, which will
# return to the caller of our caller. Let the unwinder skip
# that single return instruction, and just return to the real
# caller.
.cfi_def_cfa %rsp,16
# Set up a normal backtrace.
pushq %rbp
.cfi_adjust_cfa_offset 8
.cfi_offset %rbp, -24
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
# In 64-bit mode the new stack frame size is passed in r10
# and the argument size is passed in r11.
addq $BACKOFF,%r10 # Ask for backoff bytes.
pushq %r10 # Save new frame size.
# In 64-bit mode the registers %rdi, %rsi, %rdx, %rcx, %r8,
# and %r9 may be used for parameters. We also preserve %rax
# which the caller may use to hold %r10.
pushq %rax
pushq %rdi
pushq %rsi
pushq %rdx
pushq %rcx
pushq %r8
pushq %r9
pushq %r11
# We entered morestack with the stack pointer aligned to a
# 16-byte boundary (the call to morestack's caller used 8
# bytes, and the call to morestack used 8 bytes). We have now
# pushed 10 registers, so we are still aligned to a 16-byte
# boundary.
call __morestack_block_signals
leaq -8(%rbp),%rdi # Address of new frame size.
leaq 24(%rbp),%rsi # The caller's parameters.
popq %rdx # The size of the parameters.
subq $8,%rsp # Align stack.
call __generic_morestack
movq -8(%rbp),%r10 # Reload modified frame size
movq %rax,%rsp # Switch to the new stack.
subq %r10,%rax # The end of the stack space.
addq $BACKOFF,%rax # Back off 1024 bytes.
.LEHB0:
# FIXME: The offset must match
# TARGET_THREAD_SPLIT_STACK_OFFSET in
# gcc/config/i386/linux64.h.
# Macro to save the new stack boundary.
#ifdef __LP64__
#define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movq %r##reg,%fs:0x70
#else
#define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movl %e##reg,%fs:0x40
#endif
X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
call __morestack_unblock_signals
movq -24(%rbp),%rdi # Restore registers.
movq -32(%rbp),%rsi
movq -40(%rbp),%rdx
movq -48(%rbp),%rcx
movq -56(%rbp),%r8
movq -64(%rbp),%r9
movq 8(%rbp),%r10 # Increment the return address
incq %r10 # to skip the ret instruction;
# see above.
movq -16(%rbp),%rax # Restore caller's %rax.
call *%r10 # Call our caller!
# The caller will return here, as predicted.
# Save the registers which may hold a return value. We
# assume that __generic_releasestack does not touch any
# floating point or vector registers.
pushq %rax
pushq %rdx
call __morestack_block_signals
pushq $0 # For alignment.
pushq $0 # Where the available space is returned.
leaq 0(%rsp),%rdi # Pass its address.
call __generic_releasestack
subq 0(%rsp),%rax # Subtract available space.
addq $BACKOFF,%rax # Back off 1024 bytes.
.LEHE0:
X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
addq $16,%rsp # Remove values from stack.
# We need to restore the old stack pointer, which is in %rbp,
# before we unblock signals. We also need to restore %rax and
# %rdx after we unblock signals but before we return. Do this
# by moving %rax and %rdx from the current stack to the old
# stack.
popq %rdx # Pop return value from current stack.
popq %rax
movq %rbp,%rsp # Restore stack pointer.
# Now (%rsp & 16) == 8.
subq $8,%rsp # For alignment.
pushq %rax # Push return value on old stack.
pushq %rdx
call __morestack_unblock_signals
popq %rdx # Restore return value.
popq %rax
addq $8,%rsp
.cfi_remember_state
popq %rbp
.cfi_restore %rbp
.cfi_def_cfa %rsp, 16
ret # Return to caller, which will
# immediately return.
# This is the cleanup code called by the stack unwinder when unwinding
# through the code between .LEHB0 and .LEHE0 above.
.L1:
.cfi_restore_state
subq $16,%rsp # Maintain 16 byte alignment.
movq %rax,(%rsp) # Save exception header.
movq %rbp,%rdi # Stack pointer after resume.
call __generic_findstack
movq %rbp,%rcx # Get the stack pointer.
subq %rax,%rcx # Subtract available space.
addq $BACKOFF,%rcx # Back off 1024 bytes.
X86_64_SAVE_NEW_STACK_BOUNDARY (cx)
movq (%rsp),%rdi # Restore exception data for call.
#ifdef __PIC__
call _Unwind_Resume@PLT # Resume unwinding.
#else
call _Unwind_Resume # Resume unwinding.
#endif
#endif /* defined(__x86_64__) */
.cfi_endproc
#ifdef __ELF__
.size __morestack, . - __morestack
#endif
#if !defined(__x86_64__) && defined(__PIC__)
# Output the thunk to get PC into bx, since we use it above.
.section .text.__x86.get_pc_thunk.bx,"axG",@progbits,__x86.get_pc_thunk.bx,comdat
.globl __x86.get_pc_thunk.bx
.hidden __x86.get_pc_thunk.bx
#ifdef __ELF__
.type __x86.get_pc_thunk.bx, @function
#endif
__x86.get_pc_thunk.bx:
.cfi_startproc
movl (%esp), %ebx
ret
.cfi_endproc
#ifdef __ELF__
.size __x86.get_pc_thunk.bx, . - __x86.get_pc_thunk.bx
#endif
#endif
# The exception table. This tells the personality routine to execute
# the exception handler.
.section .gcc_except_table,"a",@progbits
.align 4
.LLSDA1:
.byte 0xff # @LPStart format (omit)
.byte 0xff # @TType format (omit)
.byte 0x1 # call-site format (uleb128)
.uleb128 .LLSDACSE1-.LLSDACSB1 # Call-site table length
.LLSDACSB1:
.uleb128 .LEHB0-.LFB1 # region 0 start
.uleb128 .LEHE0-.LEHB0 # length
.uleb128 .L1-.LFB1 # landing pad
.uleb128 0 # action
.LLSDACSE1:
.global __gcc_personality_v0
#ifdef __PIC__
# Build a position independent reference to the basic
# personality function.
.hidden DW.ref.__gcc_personality_v0
.weak DW.ref.__gcc_personality_v0
.section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat
.type DW.ref.__gcc_personality_v0, @object
DW.ref.__gcc_personality_v0:
#ifndef __LP64__
.align 4
.size DW.ref.__gcc_personality_v0, 4
.long __gcc_personality_v0
#else
.align 8
.size DW.ref.__gcc_personality_v0, 8
.quad __gcc_personality_v0
#endif
#endif
#if defined __x86_64__ && defined __LP64__
# This entry point is used for the large model. With this entry point
# the upper 32 bits of %r10 hold the argument size and the lower 32
# bits hold the new stack frame size. There doesn't seem to be a way
# to know in the assembler code that we are assembling for the large
# model, and there doesn't seem to be a large model multilib anyhow.
# If one is developed, then the non-PIC code is probably OK since we
# will probably be close to the morestack code, but the PIC code
# almost certainly needs to be changed. FIXME.
.text
.global __morestack_large_model
.hidden __morestack_large_model
#ifdef __ELF__
.type __morestack_large_model,@function
#endif
__morestack_large_model:
.cfi_startproc
_CET_ENDBR
movq %r10, %r11
andl $0xffffffff, %r10d
sarq $32, %r11
jmp __morestack
.cfi_endproc
#ifdef __ELF__
.size __morestack_large_model, . - __morestack_large_model
#endif
#endif /* __x86_64__ && __LP64__ */
# Initialize the stack test value when the program starts or when a
# new thread starts. We don't know how large the main stack is, so we
# guess conservatively. We might be able to use getrlimit here.
.text
.global __stack_split_initialize
.hidden __stack_split_initialize
#ifdef __ELF__
.type __stack_split_initialize, @function
#endif
__stack_split_initialize:
_CET_ENDBR
#ifndef __x86_64__
leal -16000(%esp),%eax # We should have at least 16K.
movl %eax,%gs:0x30
subl $4,%esp # Align stack.
pushl $16000
pushl %esp
#ifdef __PIC__
call __generic_morestack_set_initial_sp@PLT
#else
call __generic_morestack_set_initial_sp
#endif
addl $12,%esp
ret
#else /* defined(__x86_64__) */
leaq -16000(%rsp),%rax # We should have at least 16K.
X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
subq $8,%rsp # Align stack.
movq %rsp,%rdi
movq $16000,%rsi
#ifdef __PIC__
call __generic_morestack_set_initial_sp@PLT
#else
call __generic_morestack_set_initial_sp
#endif
addq $8,%rsp
ret
#endif /* defined(__x86_64__) */
#ifdef __ELF__
.size __stack_split_initialize, . - __stack_split_initialize
#endif
# Routines to get and set the guard, for __splitstack_getcontext,
# __splitstack_setcontext, and __splitstack_makecontext.
# void *__morestack_get_guard (void) returns the current stack guard.
.text
.global __morestack_get_guard
.hidden __morestack_get_guard
#ifdef __ELF__
.type __morestack_get_guard,@function
#endif
__morestack_get_guard:
#ifndef __x86_64__
movl %gs:0x30,%eax
#else
#ifdef __LP64__
movq %fs:0x70,%rax
#else
movl %fs:0x40,%eax
#endif
#endif
ret
#ifdef __ELF__
.size __morestack_get_guard, . - __morestack_get_guard
#endif
# void __morestack_set_guard (void *) sets the stack guard.
.global __morestack_set_guard
.hidden __morestack_set_guard
#ifdef __ELF__
.type __morestack_set_guard,@function
#endif
__morestack_set_guard:
#ifndef __x86_64__
movl 4(%esp),%eax
movl %eax,%gs:0x30
#else
X86_64_SAVE_NEW_STACK_BOUNDARY (di)
#endif
ret
#ifdef __ELF__
.size __morestack_set_guard, . - __morestack_set_guard
#endif
# void *__morestack_make_guard (void *, size_t) returns the stack
# guard value for a stack.
.global __morestack_make_guard
.hidden __morestack_make_guard
#ifdef __ELF__
.type __morestack_make_guard,@function
#endif
__morestack_make_guard:
#ifndef __x86_64__
movl 4(%esp),%eax
subl 8(%esp),%eax
addl $BACKOFF,%eax
#else
subq %rsi,%rdi
addq $BACKOFF,%rdi
movq %rdi,%rax
#endif
ret
#ifdef __ELF__
.size __morestack_make_guard, . - __morestack_make_guard
#endif
# Make __stack_split_initialize a high priority constructor. FIXME:
# This is ELF specific.
#if HAVE_INITFINI_ARRAY_SUPPORT
.section .init_array.00000,"aw",@progbits
#else
.section .ctors.65535,"aw",@progbits
#endif
#ifndef __LP64__
.align 4
.long __stack_split_initialize
.long __morestack_load_mmap
#else
.align 8
.quad __stack_split_initialize
.quad __morestack_load_mmap
#endif
#ifdef __ELF__
.section .note.GNU-stack,"",@progbits
.section .note.GNU-split-stack,"",@progbits
.section .note.GNU-no-split-stack,"",@progbits
#endif
|
4ms/stm32mp1-baremetal
| 3,323
|
bootloaders/mp1-boot/startup.s
|
.syntax unified
.cpu cortex-a7
.equ MODE_FIQ, 0x11
.equ MODE_IRQ, 0x12
.equ MODE_SVC, 0x13
.equ MODE_ABT, 0x17
.equ MODE_UND, 0x1B
.equ MODE_SYS, 0x1F
.section .vector_table, "x"
.global _Reset
.global _start
_Reset:
b Reset_Handler
b Undef_Handler // 0x4 Undefined Instruction
b SVC_Handler // Software Interrupt
b PAbt_Handler // 0xC Prefetch Abort
b DAbt_Handler // 0x10 Data Abort
b . // 0x14 Reserved
b IRQ_Handler // 0x18 IRQ
b FIQ_Handler // 0x1C FIQ
.section .text
Reset_Handler:
cpsid if // Mask Interrupts
mrc p15, 0, r0, c1, c0, 0 // Read System Control register (SCTLR)
bic r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
bic r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
bic r0, r0, #0x1 // Clear M bit 0 to disable MMU
bic r0, r0, #(0x1 << 11) // Clear Z bit 11 to disable branch prediction
bic r0, r0, #(0x1 << 13) // Clear V bit 13 to disable High Vector Table Base Address
mcr p15, 0, r0, c1, c0, 0 // Write System Control register (SCTLR)
isb
// Configure ACTLR
mrc p15, 0, r0, c1, c0, 1 // Read CP15 Auxiliary Control Register
orr r0, r0, #(1 << 1) // Enable L2 prefetch hint
mcr p15, 0, r0, c1, c0, 1 // Write CP15 Auxiliary Control Register
// Set Vector Base Address Register (VBAR) to point to this application's vector table
ldr r0, =0x2FFC2500
mcr p15, 0, r0, c12, c0, 0
// FIQ stack: Fill with FEFF
msr cpsr_c, MODE_FIQ
ldr r1, =_fiq_stack_start
ldr sp, =_fiq_stack_end
movw r0, #0xFEFF
movt r0, #0xFEFF
fiq_loop:
cmp r1, sp
strlt r0, [r1], #4
blt fiq_loop
// IRQ stack: Fill will F1F1
msr cpsr_c, MODE_IRQ
ldr r1, =_irq_stack_start
ldr sp, =_irq_stack_end
movw r0, #0xF1F1
movt r0, #0xF1F1
irq_loop:
cmp r1, sp
strlt r0, [r1], #4
blt irq_loop
// Supervisor (SVC) stack: Fill with F5F5
msr cpsr_c, MODE_SVC
ldr r1, =_svc_stack_start
ldr sp, =_svc_stack_end
movw r0, #0xF5F5
movt r0, #0xF5F5
svc_loop:
cmp r1, sp
strlt r0, [r1], #4
blt svc_loop
// USER and SYS mode stack: Fill with F0F0
msr cpsr_c, MODE_SYS
ldr r1, =_user_stack_start
ldr sp, =_user_stack_end
movw r0, #0xF0F0
movt r0, #0xF0F0
usrsys_loop:
cmp r1, sp
strlt r0, [r1], #4
blt usrsys_loop
// Copying initialization values (.data)
ldr r0, =_text_end
ldr r1, =_data_start
ldr r2, =_data_end
data_loop:
cmp r1, r2
ldrlt r3, [r0], #4
strlt r3, [r1], #4
blt data_loop
// Initialize .bss
mov r0, #0
ldr r1, =_bss_start
ldr r2, =_bss_end
bss_loop:
cmp r1, r2
strlt r0, [r1], #4
blt bss_loop
bl SystemInit // Setup MMU, TLB, Caches, FPU, IRQ
bl __libc_init_array // libc init (static constructors)
//Do not enable IRQ interrupts, this project doesn't use them
//cpsie i
run_main:
bl main
b Abort_Exception
Abort_Exception:
b .
Undef_Handler:
b .
SVC_Handler:
b .
PAbt_Handler:
b .
DAbt_Handler:
b .
IRQ_Handler:
b .
FIQ_Handler:
b .
|
4ms/metamodule-plugin-sdk
| 1,313
|
plugin-libc/libgcc/config/i386/crti.S
|
/* crti.S for x86.
Copyright (C) 1993-2022 Free Software Foundation, Inc.
Written By Fred Fish, Nov 1992
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file just supplies labeled starting points for the .init and .fini
sections. It is linked in before the values-Xx.o files and also before
crtbegin.o. */
.ident "GNU C crti.s"
.section .init
.globl _init
.type _init,@function
_init:
.section .fini
.globl _fini
.type _fini,@function
_fini:
|
4ms/metamodule-plugin-sdk
| 4,899
|
plugin-libc/libgcc/config/i386/cygwin.S
|
/* stuff needed for libgcc on win32.
*
* Copyright (C) 1996-2022 Free Software Foundation, Inc.
* Written By Steve Chamberlain
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 3, or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Under Section 7 of GPL version 3, you are granted additional
* permissions described in the GCC Runtime Library Exception, version
* 3.1, as published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License and
* a copy of the GCC Runtime Library Exception along with this program;
* see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
* <http://www.gnu.org/licenses/>.
*/
#include "i386-asm.h"
#ifdef HAVE_AS_CFI_SECTIONS
.cfi_sections .debug_frame
#endif
#ifdef L_chkstk
/* Function prologue calls __chkstk to probe the stack when allocating more
than CHECK_STACK_LIMIT bytes in one go. Touching the stack at 4K
increments is necessary to ensure that the guard pages used
by the OS virtual memory manger are allocated in correct sequence. */
.global ___chkstk
.global __alloca
#ifdef __x86_64__
/* __alloca is a normal function call, which uses %rcx as the argument. */
cfi_startproc()
__alloca:
movq %rcx, %rax
/* FALLTHRU */
/* ___chkstk is a *special* function call, which uses %rax as the argument.
We avoid clobbering the 4 integer argument registers, %rcx, %rdx,
%r8 and %r9, which leaves us with %rax, %r10, and %r11 to use. */
.align 4
___chkstk:
popq %r11 /* pop return address */
cfi_adjust_cfa_offset(-8) /* indicate return address in r11 */
cfi_register(%rip, %r11)
movq %rsp, %r10
cmpq $0x1000, %rax /* > 4k ?*/
jb 2f
1: subq $0x1000, %r10 /* yes, move pointer down 4k*/
orl $0x0, (%r10) /* probe there */
subq $0x1000, %rax /* decrement count */
cmpq $0x1000, %rax
ja 1b /* and do it again */
2: subq %rax, %r10
movq %rsp, %rax /* hold CFA until return */
cfi_def_cfa_register(%rax)
orl $0x0, (%r10) /* less than 4k, just peek here */
movq %r10, %rsp /* decrement stack */
/* Push the return value back. Doing this instead of just
jumping to %r11 preserves the cached call-return stack
used by most modern processors. */
pushq %r11
ret
cfi_endproc()
#else
cfi_startproc()
___chkstk:
__alloca:
pushl %ecx /* save temp */
cfi_push(%eax)
leal 8(%esp), %ecx /* point past return addr */
cmpl $0x1000, %eax /* > 4k ?*/
jb 2f
1: subl $0x1000, %ecx /* yes, move pointer down 4k*/
orl $0x0, (%ecx) /* probe there */
subl $0x1000, %eax /* decrement count */
cmpl $0x1000, %eax
ja 1b /* and do it again */
2: subl %eax, %ecx
orl $0x0, (%ecx) /* less than 4k, just peek here */
movl %esp, %eax /* save current stack pointer */
cfi_def_cfa_register(%eax)
movl %ecx, %esp /* decrement stack */
movl (%eax), %ecx /* recover saved temp */
/* Copy the return register. Doing this instead of just jumping to
the address preserves the cached call-return stack used by most
modern processors. */
pushl 4(%eax)
ret
cfi_endproc()
#endif /* __x86_64__ */
#endif /* L_chkstk */
#ifdef L_chkstk_ms
/* ___chkstk_ms is a *special* function call, which uses %rax as the argument.
We avoid clobbering any registers. Unlike ___chkstk, it just probes the
stack and does no stack allocation. */
.global ___chkstk_ms
#ifdef __x86_64__
cfi_startproc()
___chkstk_ms:
pushq %rcx /* save temps */
cfi_push(%rcx)
pushq %rax
cfi_push(%rax)
cmpq $0x1000, %rax /* > 4k ?*/
leaq 24(%rsp), %rcx /* point past return addr */
jb 2f
1: subq $0x1000, %rcx /* yes, move pointer down 4k */
orq $0x0, (%rcx) /* probe there */
subq $0x1000, %rax /* decrement count */
cmpq $0x1000, %rax
ja 1b /* and do it again */
2: subq %rax, %rcx
orq $0x0, (%rcx) /* less than 4k, just peek here */
popq %rax
cfi_pop(%rax)
popq %rcx
cfi_pop(%rcx)
ret
cfi_endproc()
#else
cfi_startproc()
___chkstk_ms:
pushl %ecx /* save temp */
cfi_push(%ecx)
pushl %eax
cfi_push(%eax)
cmpl $0x1000, %eax /* > 4k ?*/
leal 12(%esp), %ecx /* point past return addr */
jb 2f
1: subl $0x1000, %ecx /* yes, move pointer down 4k*/
orl $0x0, (%ecx) /* probe there */
subl $0x1000, %eax /* decrement count */
cmpl $0x1000, %eax
ja 1b /* and do it again */
2: subl %eax, %ecx
orl $0x0, (%ecx) /* less than 4k, just peek here */
popl %eax
cfi_pop(%eax)
popl %ecx
cfi_pop(%ecx)
ret
cfi_endproc()
#endif /* __x86_64__ */
#endif /* L_chkstk_ms */
|
4ms/metamodule-plugin-sdk
| 5,263
|
plugin-libc/libgcc/config/i386/sol2-c1.S
|
/* crt1.s for Solaris 2, x86
Copyright (C) 1993-2022 Free Software Foundation, Inc.
Written By Fred Fish, Nov 1992
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file takes control of the process from the kernel, as specified
in section 3 of the System V Application Binary Interface, Intel386
Processor Supplement. It has been constructed from information obtained
from the ABI, information obtained from single stepping existing
Solaris executables through their startup code with gdb, and from
information obtained by single stepping executables on other i386 SVR4
implementations. This file is the first thing linked into any
executable. */
#ifndef GCRT1
.ident "GNU C crt1.s"
#define CLEANUP _cleanup
#else
/* This is a modified crt1.s by J.W.Hawtin <oolon@ankh.org> 15/8/96,
to allow program profiling, by calling monstartup on entry and _mcleanup
on exit. */
.ident "GNU C gcrt1.s"
#define CLEANUP _mcleanup
#endif
.weak _cleanup
.weak _DYNAMIC
.text
/* Start creating the initial frame by pushing a NULL value for the return
address of the initial frame, and mark the end of the stack frame chain
(the innermost stack frame) with a NULL value, per page 3-32 of the ABI.
Initialize the first stack frame pointer in %ebp (the contents of which
are unspecified at process initialization). */
.globl _start
_start:
pushl $0x0
pushl $0x0
movl %esp,%ebp
/* As specified per page 3-32 of the ABI, %edx contains a function
pointer that should be registered with atexit(), for proper
shared object termination. Just push it onto the stack for now
to preserve it. We want to register _cleanup() first. */
pushl %edx
/* Check to see if there is an _cleanup() function linked in, and if
so, register it with atexit() as the last thing to be run by
atexit(). */
movl $CLEANUP,%eax
testl %eax,%eax
je .L1
pushl $CLEANUP
call atexit
addl $0x4,%esp
.L1:
/* Now check to see if we have an _DYNAMIC table, and if so then
we need to register the function pointer previously in %edx, but
now conveniently saved on the stack as the argument to pass to
atexit(). */
movl $_DYNAMIC,%eax
testl %eax,%eax
je .L2
call atexit
.L2:
/* Register _fini() with atexit(). We will take care of calling _init()
directly. */
pushl $_fini
call atexit
#ifdef GCRT1
/* Start profiling. */
pushl %ebp
movl %esp,%ebp
pushl $_etext
pushl $_start
call monstartup
addl $8,%esp
popl %ebp
#endif
/* Compute the address of the environment vector on the stack and load
it into the global variable _environ. Currently argc is at 8 off
the frame pointer. Fetch the argument count into %eax, scale by the
size of each arg (4 bytes) and compute the address of the environment
vector which is 16 bytes (the two zero words we pushed, plus argc,
plus the null word terminating the arg vector) further up the stack,
off the frame pointer (whew!). */
movl 8(%ebp),%eax
leal 16(%ebp,%eax,4),%edx
movl %edx,_environ
/* Push the environment vector pointer, the argument vector pointer,
and the argument count on to the stack to set up the arguments
for _init(), _fpstart(), and main(). Note that the environment
vector pointer and the arg count were previously loaded into
%edx and %eax respectively. The only new value we need to compute
is the argument vector pointer, which is at a fixed address off
the initial frame pointer. */
/* Make sure the stack is properly aligned. */
andl $0xfffffff0,%esp
subl $4,%esp
pushl %edx
leal 12(%ebp),%edx
pushl %edx
pushl %eax
/* Call _init(argc, argv, environ), _fpstart(argc, argv, environ), and
main(argc, argv, environ). */
call _init
call __fpstart
call main
/* Pop the argc, argv, and environ arguments off the stack, push the
value returned from main(), and call exit(). */
addl $12,%esp
pushl %eax
call exit
/* An inline equivalent of _exit, as specified in Figure 3-26 of the ABI. */
pushl $0x0
movl $0x1,%eax
lcall $7,$0
/* If all else fails, just try a halt! */
hlt
.type _start,@function
.size _start,.-_start
#ifndef GCRT1
/* A dummy profiling support routine for non-profiling executables,
in case we link in some objects that have been compiled for profiling. */
.weak _mcount
_mcount:
ret
.type _mcount,@function
.size _mcount,.-_mcount
#endif
|
4ms/metamodule-plugin-sdk
| 2,078
|
plugin-libc/libgcc/config/aarch64/crtn.S
|
# Machine description for AArch64 architecture.
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
# Contributed by ARM Ltd.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
/* An executable stack is *not* required for these functions. */
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
.previous
#endif
# This file just makes sure that the .fini and .init sections do in
# fact return. Users may put any desired instructions in those sections.
# This file is the last thing linked into any executable.
# Note - this macro is complemented by the FUNC_START macro
# in crti.S. If you change this macro you must also change
# that macro match.
#
# Note - we do not try any fancy optimizations of the return
# sequences here, it is just not worth it. Instead keep things
# simple. Restore all the save resgisters, including the link
# register and then perform the correct function return instruction.
.macro FUNC_END
ldp x19, x20, [sp], #16
ldp x21, x22, [sp], #16
ldp x23, x24, [sp], #16
ldp x25, x26, [sp], #16
ldp x27, x28, [sp], #16
ldp x29, x30, [sp], #16
ret
.endm
.section ".init"
;;
FUNC_END
.section ".fini"
;;
FUNC_END
# end of crtn.S
|
4ms/metamodule-plugin-sdk
| 8,030
|
plugin-libc/libgcc/config/aarch64/lse.S
|
/* Out-of-line LSE atomics for AArch64 architecture.
Copyright (C) 2019-2022 Free Software Foundation, Inc.
Contributed by Linaro Ltd.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/*
* The problem that we are trying to solve is operating system deployment
* of ARMv8.1-Atomics, also known as Large System Exensions (LSE).
*
* There are a number of potential solutions for this problem which have
* been proposed and rejected for various reasons. To recap:
*
* (1) Multiple builds. The dynamic linker will examine /lib64/atomics/
* if HWCAP_ATOMICS is set, allowing entire libraries to be overwritten.
* However, not all Linux distributions are happy with multiple builds,
* and anyway it has no effect on main applications.
*
* (2) IFUNC. We could put these functions into libgcc_s.so, and have
* a single copy of each function for all DSOs. However, ARM is concerned
* that the branch-to-indirect-branch that is implied by using a PLT,
* as required by IFUNC, is too much overhead for smaller cpus.
*
* (3) Statically predicted direct branches. This is the approach that
* is taken here. These functions are linked into every DSO that uses them.
* All of the symbols are hidden, so that the functions are called via a
* direct branch. The choice of LSE vs non-LSE is done via one byte load
* followed by a well-predicted direct branch. The functions are compiled
* separately to minimize code size.
*/
#include "auto-target.h"
/* Tell the assembler to accept LSE instructions. */
#ifdef HAVE_AS_LSE
.arch armv8-a+lse
#else
.arch armv8-a
#endif
/* Declare the symbol gating the LSE implementations. */
.hidden __aarch64_have_lse_atomics
/* Turn size and memory model defines into mnemonic fragments. */
#if SIZE == 1
# define S b
# define UXT uxtb
# define B 0x00000000
#elif SIZE == 2
# define S h
# define UXT uxth
# define B 0x40000000
#elif SIZE == 4 || SIZE == 8 || SIZE == 16
# define S
# define UXT mov
# if SIZE == 4
# define B 0x80000000
# elif SIZE == 8
# define B 0xc0000000
# endif
#else
# error
#endif
#if MODEL == 1
# define SUFF _relax
# define A
# define L
# define M 0x000000
# define N 0x000000
# define BARRIER
#elif MODEL == 2
# define SUFF _acq
# define A a
# define L
# define M 0x400000
# define N 0x800000
# define BARRIER
#elif MODEL == 3
# define SUFF _rel
# define A
# define L l
# define M 0x008000
# define N 0x400000
# define BARRIER
#elif MODEL == 4
# define SUFF _acq_rel
# define A a
# define L l
# define M 0x408000
# define N 0xc00000
# define BARRIER
#elif MODEL == 5
# define SUFF _sync
#ifdef L_swp
/* swp has _acq semantics. */
# define A a
# define L
# define M 0x400000
# define N 0x800000
#else
/* All other _sync functions have _seq semantics. */
# define A a
# define L l
# define M 0x408000
# define N 0xc00000
#endif
# define BARRIER dmb ish
#else
# error
#endif
/* Concatenate symbols. */
#define glue2_(A, B) A ## B
#define glue2(A, B) glue2_(A, B)
#define glue3_(A, B, C) A ## B ## C
#define glue3(A, B, C) glue3_(A, B, C)
#define glue4_(A, B, C, D) A ## B ## C ## D
#define glue4(A, B, C, D) glue4_(A, B, C, D)
/* Select the size of a register, given a regno. */
#define x(N) glue2(x, N)
#define w(N) glue2(w, N)
#if SIZE < 8
# define s(N) w(N)
#else
# define s(N) x(N)
#endif
#define NAME(BASE) glue4(__aarch64_, BASE, SIZE, SUFF)
#if MODEL == 5
/* Drop A for _sync functions. */
# define LDXR glue3(ld, xr, S)
#else
# define LDXR glue4(ld, A, xr, S)
#endif
#define STXR glue4(st, L, xr, S)
/* Temporary registers used. Other than these, only the return value
register (x0) and the flags are modified. */
#define tmp0 16
#define tmp1 17
#define tmp2 15
#define BTI_C hint 34
/* Start and end a function. */
.macro STARTFN name
.text
.balign 16
.globl \name
.hidden \name
.type \name, %function
.cfi_startproc
\name:
BTI_C
.endm
.macro ENDFN name
.cfi_endproc
.size \name, . - \name
.endm
/* Branch to LABEL if LSE is disabled. */
.macro JUMP_IF_NOT_LSE label
adrp x(tmp0), __aarch64_have_lse_atomics
ldrb w(tmp0), [x(tmp0), :lo12:__aarch64_have_lse_atomics]
cbz w(tmp0), \label
.endm
#ifdef L_cas
STARTFN NAME(cas)
JUMP_IF_NOT_LSE 8f
#if SIZE < 16
#ifdef HAVE_AS_LSE
# define CAS glue4(cas, A, L, S) s(0), s(1), [x2]
#else
# define CAS .inst 0x08a07c41 + B + M
#endif
CAS /* s(0), s(1), [x2] */
ret
8: UXT s(tmp0), s(0)
0: LDXR s(0), [x2]
cmp s(0), s(tmp0)
bne 1f
STXR w(tmp1), s(1), [x2]
cbnz w(tmp1), 0b
1: BARRIER
ret
#else
#if MODEL == 5
/* Drop A for _sync functions. */
# define LDXP glue2(ld, xp)
#else
# define LDXP glue3(ld, A, xp)
#endif
#define STXP glue3(st, L, xp)
#ifdef HAVE_AS_LSE
# define CASP glue3(casp, A, L) x0, x1, x2, x3, [x4]
#else
# define CASP .inst 0x48207c82 + M
#endif
CASP /* x0, x1, x2, x3, [x4] */
ret
8: mov x(tmp0), x0
mov x(tmp1), x1
0: LDXP x0, x1, [x4]
cmp x0, x(tmp0)
ccmp x1, x(tmp1), #0, eq
bne 1f
STXP w(tmp2), x2, x3, [x4]
cbnz w(tmp2), 0b
1: BARRIER
ret
#endif
ENDFN NAME(cas)
#endif
#ifdef L_swp
#ifdef HAVE_AS_LSE
# define SWP glue4(swp, A, L, S) s(0), s(0), [x1]
#else
# define SWP .inst 0x38208020 + B + N
#endif
STARTFN NAME(swp)
JUMP_IF_NOT_LSE 8f
SWP /* s(0), s(0), [x1] */
ret
8: mov s(tmp0), s(0)
0: LDXR s(0), [x1]
STXR w(tmp1), s(tmp0), [x1]
cbnz w(tmp1), 0b
BARRIER
ret
ENDFN NAME(swp)
#endif
#if defined(L_ldadd) || defined(L_ldclr) \
|| defined(L_ldeor) || defined(L_ldset)
#ifdef L_ldadd
#define LDNM ldadd
#define OP add
#define OPN 0x0000
#elif defined(L_ldclr)
#define LDNM ldclr
#define OP bic
#define OPN 0x1000
#elif defined(L_ldeor)
#define LDNM ldeor
#define OP eor
#define OPN 0x2000
#elif defined(L_ldset)
#define LDNM ldset
#define OP orr
#define OPN 0x3000
#else
#error
#endif
#ifdef HAVE_AS_LSE
# define LDOP glue4(LDNM, A, L, S) s(0), s(0), [x1]
#else
# define LDOP .inst 0x38200020 + OPN + B + N
#endif
STARTFN NAME(LDNM)
JUMP_IF_NOT_LSE 8f
LDOP /* s(0), s(0), [x1] */
ret
8: mov s(tmp0), s(0)
0: LDXR s(0), [x1]
OP s(tmp1), s(0), s(tmp0)
STXR w(tmp2), s(tmp1), [x1]
cbnz w(tmp2), 0b
BARRIER
ret
ENDFN NAME(LDNM)
#endif
/* GNU_PROPERTY_AARCH64_* macros from elf.h for use in asm code. */
#define FEATURE_1_AND 0xc0000000
#define FEATURE_1_BTI 1
#define FEATURE_1_PAC 2
/* Supported features based on the code generation options. */
#if defined(__ARM_FEATURE_BTI_DEFAULT)
# define BTI_FLAG FEATURE_1_BTI
#else
# define BTI_FLAG 0
#endif
#if __ARM_FEATURE_PAC_DEFAULT & 3
# define PAC_FLAG FEATURE_1_PAC
#else
# define PAC_FLAG 0
#endif
/* Add a NT_GNU_PROPERTY_TYPE_0 note. */
#define GNU_PROPERTY(type, value) \
.section .note.gnu.property, "a"; \
.p2align 3; \
.word 4; \
.word 16; \
.word 5; \
.asciz "GNU"; \
.word type; \
.word 4; \
.word value; \
.word 0;
#if defined(__linux__) || defined(__FreeBSD__)
.section .note.GNU-stack, "", %progbits
/* Add GNU property note if built with branch protection. */
# if (BTI_FLAG|PAC_FLAG) != 0
GNU_PROPERTY (FEATURE_1_AND, BTI_FLAG|PAC_FLAG)
# endif
#endif
|
4ms/metamodule-plugin-sdk
| 1,995
|
plugin-libc/libgcc/config/aarch64/crti.S
|
# Machine description for AArch64 architecture.
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
# Contributed by ARM Ltd.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any
# later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
/* An executable stack is *not* required for these functions. */
#if defined(__ELF__) && defined(__linux__)
.section .note.GNU-stack,"",%progbits
.previous
#endif
# This file creates a stack frame for the contents of the .fini and
# .init sections. Users may put any desired instructions in those
# sections.
#ifdef __ELF__
#define TYPE(x) .type x,function
#else
#define TYPE(x)
#endif
# Note - this macro is complemented by the FUNC_END macro
# in crtn.S. If you change this macro you must also change
# that macro match.
.macro FUNC_START
# Create a stack frame and save any call-preserved registers
stp x29, x30, [sp, #-16]!
stp x27, x28, [sp, #-16]!
stp x25, x26, [sp, #-16]!
stp x23, x24, [sp, #-16]!
stp x21, x22, [sp, #-16]!
stp x19, x20, [sp, #-16]!
.endm
.section ".init"
.align 2
.global _init
TYPE(_init)
_init:
FUNC_START
.section ".fini"
.align 2
.global _fini
TYPE(_fini)
_fini:
FUNC_START
# end of crti.S
|
4ms/metamodule-plugin-sdk
| 5,886
|
plugin-libc/libgcc/config/frv/lib1funcs.S
|
/* Library functions.
Copyright (C) 2000-2022 Free Software Foundation, Inc.
Contributed by Red Hat, Inc.
This file is part of GCC.
GCC is free software ; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include <frv-asm.h>
#ifdef L_cmpll
/* icc0 = __cmpll (long long a, long long b) */
.globl EXT(__cmpll)
.type EXT(__cmpll),@function
.text
.p2align 4
EXT(__cmpll):
cmp gr8, gr10, icc0
ckeq icc0, cc4
P(ccmp) gr9, gr11, cc4, 1
ret
.Lend:
.size EXT(__cmpll),.Lend-EXT(__cmpll)
#endif /* L_cmpll */
#ifdef L_cmpf
/* icc0 = __cmpf (float a, float b) */
/* Note, because this function returns the result in ICC0, it means it can't
handle NaNs. */
.globl EXT(__cmpf)
.type EXT(__cmpf),@function
.text
.p2align 4
EXT(__cmpf):
#ifdef __FRV_HARD_FLOAT__ /* floating point instructions available */
movgf gr8, fr0
P(movgf) gr9, fr1
setlos #1, gr8
fcmps fr0, fr1, fcc0
P(fcklt) fcc0, cc0
fckeq fcc0, cc1
csub gr0, gr8, gr8, cc0, 1
cmov gr0, gr8, cc1, 1
cmpi gr8, 0, icc0
ret
#else /* no floating point instructions available */
movsg lr, gr4
addi sp, #-16, sp
sti gr4, @(sp, 8)
st fp, @(sp, gr0)
mov sp, fp
call EXT(__cmpsf2)
cmpi gr8, #0, icc0
ldi @(sp, 8), gr4
movgs gr4, lr
ld @(sp,gr0), fp
addi sp, #16, sp
ret
#endif
.Lend:
.size EXT(__cmpf),.Lend-EXT(__cmpf)
#endif
#ifdef L_cmpd
/* icc0 = __cmpd (double a, double b) */
/* Note, because this function returns the result in ICC0, it means it can't
handle NaNs. */
.globl EXT(__cmpd)
.type EXT(__cmpd),@function
.text
.p2align 4
EXT(__cmpd):
movsg lr, gr4
addi sp, #-16, sp
sti gr4, @(sp, 8)
st fp, @(sp, gr0)
mov sp, fp
call EXT(__cmpdf2)
cmpi gr8, #0, icc0
ldi @(sp, 8), gr4
movgs gr4, lr
ld @(sp,gr0), fp
addi sp, #16, sp
ret
.Lend:
.size EXT(__cmpd),.Lend-EXT(__cmpd)
#endif
#ifdef L_addll
/* gr8,gr9 = __addll (long long a, long long b) */
/* Note, gcc will never call this function, but it is present in case an
ABI program calls it. */
.globl EXT(__addll)
.type EXT(__addll),@function
.text
.p2align
EXT(__addll):
addcc gr9, gr11, gr9, icc0
addx gr8, gr10, gr8, icc0
ret
.Lend:
.size EXT(__addll),.Lend-EXT(__addll)
#endif
#ifdef L_subll
/* gr8,gr9 = __subll (long long a, long long b) */
/* Note, gcc will never call this function, but it is present in case an
ABI program calls it. */
.globl EXT(__subll)
.type EXT(__subll),@function
.text
.p2align 4
EXT(__subll):
subcc gr9, gr11, gr9, icc0
subx gr8, gr10, gr8, icc0
ret
.Lend:
.size EXT(__subll),.Lend-EXT(__subll)
#endif
#ifdef L_andll
/* gr8,gr9 = __andll (long long a, long long b) */
/* Note, gcc will never call this function, but it is present in case an
ABI program calls it. */
.globl EXT(__andll)
.type EXT(__andll),@function
.text
.p2align 4
EXT(__andll):
P(and) gr9, gr11, gr9
P2(and) gr8, gr10, gr8
ret
.Lend:
.size EXT(__andll),.Lend-EXT(__andll)
#endif
#ifdef L_orll
/* gr8,gr9 = __orll (long long a, long long b) */
/* Note, gcc will never call this function, but it is present in case an
ABI program calls it. */
.globl EXT(__orll)
.type EXT(__orll),@function
.text
.p2align 4
EXT(__orll):
P(or) gr9, gr11, gr9
P2(or) gr8, gr10, gr8
ret
.Lend:
.size EXT(__orll),.Lend-EXT(__orll)
#endif
#ifdef L_xorll
/* gr8,gr9 = __xorll (long long a, long long b) */
/* Note, gcc will never call this function, but it is present in case an
ABI program calls it. */
.globl EXT(__xorll)
.type EXT(__xorll),@function
.text
.p2align 4
EXT(__xorll):
P(xor) gr9, gr11, gr9
P2(xor) gr8, gr10, gr8
ret
.Lend:
.size EXT(__xorll),.Lend-EXT(__xorll)
#endif
#ifdef L_notll
/* gr8,gr9 = __notll (long long a) */
/* Note, gcc will never call this function, but it is present in case an
ABI program calls it. */
.globl EXT(__notll)
.type EXT(__notll),@function
.text
.p2align 4
EXT(__notll):
P(not) gr9, gr9
P2(not) gr8, gr8
ret
.Lend:
.size EXT(__notll),.Lend-EXT(__notll)
#endif
#ifdef L_cmov
/* (void) __cmov (char *dest, const char *src, size_t len) */
/*
* void __cmov (char *dest, const char *src, size_t len)
* {
* size_t i;
*
* if (dest < src || dest > src+len)
* {
* for (i = 0; i < len; i++)
* dest[i] = src[i];
* }
* else
* {
* while (len-- > 0)
* dest[len] = src[len];
* }
* }
*/
.globl EXT(__cmov)
.type EXT(__cmov),@function
.text
.p2align 4
EXT(__cmov):
P(cmp) gr8, gr9, icc0
add gr9, gr10, gr4
P(cmp) gr8, gr4, icc1
bc icc0, 0, .Lfwd
bls icc1, 0, .Lback
.Lfwd:
/* move bytes in a forward direction */
P(setlos) #0, gr5
cmp gr0, gr10, icc0
P(subi) gr9, #1, gr9
P2(subi) gr8, #1, gr8
bnc icc0, 0, .Lret
.Lfloop:
/* forward byte move loop */
addi gr5, #1, gr5
P(ldsb) @(gr9, gr5), gr4
cmp gr5, gr10, icc0
P(stb) gr4, @(gr8, gr5)
bc icc0, 0, .Lfloop
ret
.Lbloop:
/* backward byte move loop body */
ldsb @(gr9,gr10),gr4
stb gr4,@(gr8,gr10)
.Lback:
P(cmpi) gr10, #0, icc0
addi gr10, #-1, gr10
bne icc0, 0, .Lbloop
.Lret:
ret
.Lend:
.size EXT(__cmov),.Lend-EXT(__cmov)
#endif
|
4ms/metamodule-plugin-sdk
| 1,155
|
plugin-libc/libgcc/config/epiphany/crtn.S
|
# End .init and .fini sections.
# Copyright (C) 2010-2022 Free Software Foundation, Inc.
# Contributed by Embecosm on behalf of Adapteva, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Under Section 7 of GPL version 3, you are granted additional
# permissions described in the GCC Runtime Library Exception, version
# 3.1, as published by the Free Software Foundation.
#
# You should have received a copy of the GNU General Public License and
# a copy of the GCC Runtime Library Exception along with this program;
# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
# <http://www.gnu.org/licenses/>.
.section .init
ldr lr,[sp,4]
add sp,sp,16
jr lr
.section .fini
ldr lr,[sp,4]
add sp,sp,16
jr lr
|
4ms/metamodule-plugin-sdk
| 2,155
|
plugin-libc/libgcc/config/epiphany/umodsi3.S
|
/* Unsigned 32 bit modulo optimized for Epiphany.
Copyright (C) 2009-2022 Free Software Foundation, Inc.
Contributed by Embecosm on behalf of Adapteva, Inc.
This file is part of GCC.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#include "epiphany-asm.h"
FSTAB (__umodsi3,T_UINT)
.global SYM(__umodsi3)
.balign 4
HIDDEN_FUNC(__umodsi3)
SYM(__umodsi3):
mov r2,5
lsl r2,r2,29 ; 0xa0000000
orr r3,r2,r0
lsr r15,r0,16
movt r15,0xa800
movne r3,r15
lsr r16,r2,2 ; 0x28000000
and r15,r3,r16
fadd r12,r3,r15
orr r3,r2,r1
lsr r2,r1,16
movt r2,0xa800
movne r3,r2
and r2,r16,r3
fadd r3,r3,r2
sub r2,r0,r1
bltu .Lret_a
lsr r12,r12,23
mov r2,%low(.L0step)
movt r2,%high(.L0step)
lsr r3,r3,23
sub r3,r12,r3 ; calculate bit number difference.
lsl r3,r3,3
sub r2,r2,r3
jr r2
/* lsl_l r2,r1,n` sub r2,r0,r2` movgteu r0,r2 */
#define STEP(n) .long 0x0006441f | (n) << 5` sub r2,r0,r2` movgteu r0,r2
.balign 8,,2
STEP(31)` STEP(30)` STEP(29)` STEP(28)`
STEP(27)` STEP(26)` STEP(25)` STEP(24)`
STEP(23)` STEP(22)` STEP(21)` STEP(20)`
STEP(19)` STEP(18)` STEP(17)` STEP(16)`
STEP(15)` STEP(14)` STEP(13)` STEP(12)`
STEP(11)` STEP(10)` STEP(9)` STEP(8)`
STEP(7)` STEP(6)` STEP(5)` STEP(4)` STEP(3)` STEP(2)` STEP(1)
.L0step:STEP(0)
.Lret_a:rts
ENDFUNC(__umodsi3)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.