repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
abforce/xposed_art_n
| 5,218
|
runtime/interpreter/mterp/mips/footer.S
|
/*
* ===========================================================================
* Common subroutines and data
* ===========================================================================
*/
.text
.align 2
/*
* We've detected a condition that will result in an exception, but the exception
* has not yet been thrown. Just bail out to the reference interpreter to deal with it.
* TUNING: for consistency, we may want to just go ahead and handle these here.
*/
common_errDivideByZero:
EXPORT_PC()
#if MTERP_LOGGING
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
JAL(MterpLogDivideByZeroException)
#endif
b MterpCommonFallback
common_errArrayIndex:
EXPORT_PC()
#if MTERP_LOGGING
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
JAL(MterpLogArrayIndexException)
#endif
b MterpCommonFallback
common_errNegativeArraySize:
EXPORT_PC()
#if MTERP_LOGGING
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
JAL(MterpLogNegativeArraySizeException)
#endif
b MterpCommonFallback
common_errNoSuchMethod:
EXPORT_PC()
#if MTERP_LOGGING
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
JAL(MterpLogNoSuchMethodException)
#endif
b MterpCommonFallback
common_errNullObject:
EXPORT_PC()
#if MTERP_LOGGING
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
JAL(MterpLogNullObjectException)
#endif
b MterpCommonFallback
common_exceptionThrown:
EXPORT_PC()
#if MTERP_LOGGING
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
JAL(MterpLogExceptionThrownException)
#endif
b MterpCommonFallback
MterpSuspendFallback:
EXPORT_PC()
#if MTERP_LOGGING
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
lw a2, THREAD_FLAGS_OFFSET(rSELF)
JAL(MterpLogSuspendFallback)
#endif
b MterpCommonFallback
/*
* If we're here, something is out of the ordinary. If there is a pending
* exception, handle it. Otherwise, roll back and retry with the reference
* interpreter.
*/
MterpPossibleException:
lw a0, THREAD_EXCEPTION_OFFSET(rSELF)
beqz a0, MterpFallback # If exception, fall back to reference interpreter.
/* intentional fallthrough - handle pending exception. */
/*
* On return from a runtime helper routine, we've found a pending exception.
* Can we handle it here - or need to bail out to caller?
*
*/
MterpException:
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
JAL(MterpHandleException) # (self, shadow_frame)
beqz v0, MterpExceptionReturn # no local catch, back to caller.
lw a0, OFF_FP_CODE_ITEM(rFP)
lw a1, OFF_FP_DEX_PC(rFP)
lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
addu rPC, a0, CODEITEM_INSNS_OFFSET
sll a1, a1, 1
addu rPC, rPC, a1 # generate new dex_pc_ptr
/* Do we need to switch interpreters? */
JAL(MterpShouldSwitchInterpreters)
bnez v0, MterpFallback
/* resume execution at catch block */
EXPORT_PC()
FETCH_INST()
GET_INST_OPCODE(t0)
GOTO_OPCODE(t0)
/* NOTE: no fallthrough */
/*
* Check for suspend check request. Assumes rINST already loaded, rPC advanced and
* still needs to get the opcode and branch to it, and flags are in lr.
*/
MterpCheckSuspendAndContinue:
lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh rIBASE
and ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
bnez ra, 1f
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
1:
EXPORT_PC()
move a0, rSELF
JAL(MterpSuspendCheck) # (self)
bnez v0, MterpFallback
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
/*
* On-stack replacement has happened, and now we've returned from the compiled method.
*/
MterpOnStackReplacement:
#if MTERP_LOGGING
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
JAL(MterpLogOSR)
#endif
li v0, 1 # Signal normal return
b MterpDone
/*
* Bail out to reference interpreter.
*/
MterpFallback:
EXPORT_PC()
#if MTERP_LOGGING
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
JAL(MterpLogFallback)
#endif
MterpCommonFallback:
move v0, zero # signal retry with reference interpreter.
b MterpDone
/*
* We pushed some registers on the stack in ExecuteMterpImpl, then saved
* SP and LR. Here we restore SP, restore the registers, and then restore
* LR to PC.
*
* On entry:
* uint32_t* rFP (should still be live, pointer to base of vregs)
*/
MterpExceptionReturn:
li v0, 1 # signal return to caller.
b MterpDone
MterpReturn:
lw a2, OFF_FP_RESULT_REGISTER(rFP)
sw v0, 0(a2)
sw v1, 4(a2)
li v0, 1 # signal return to caller.
MterpDone:
/* Restore from the stack and return. Frame size = STACK_SIZE */
STACK_LOAD_FULL()
jalr zero, ra
.end ExecuteMterpImpl
|
abforce/xposed_art_n
| 1,507
|
runtime/interpreter/mterp/mips/op_cmp_long.S
|
/*
* Compare two 64-bit values
* x = y return 0
* x < y return -1
* x > y return 1
*
* I think I can improve on the ARM code by the following observation
* slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
* sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
* subu v0, t0, t1 # v0= -1:1:0 for [ < > = ]
*/
/* cmp-long vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8 # a3 <- CC
EAS2(a2, rFP, a2) # a2 <- &fp[BB]
EAS2(a3, rFP, a3) # a3 <- &fp[CC]
LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
LOAD64(a2, a3, a3) # a2/a3 <- vCC/vCC+1
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
slt t0, a1, a3 # compare hi
sgt t1, a1, a3
subu v0, t1, t0 # v0 <- (-1, 1, 0)
bnez v0, .L${opcode}_finish
# at this point x.hi==y.hi
sltu t0, a0, a2 # compare lo
sgtu t1, a0, a2
subu v0, t1, t0 # v0 <- (-1, 1, 0) for [< > =]
.L${opcode}_finish:
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(v0, rOBJ, t0) # vAA <- v0
|
abforce/xposed_art_n
| 1,479
|
runtime/interpreter/mterp/mips/op_aget.S
|
%default { "load":"lw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
/*
* Array get, 32 bits or less. vAA <- vBB[vCC].
*
* Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
* instructions. We use a pair of FETCH_Bs instead.
*
* for: aget, aget-boolean, aget-byte, aget-char, aget-short
*
* NOTE: assumes data offset for arrays is the same for all non-wide types.
* If this changes, specialize.
*/
/* op vAA, vBB, vCC */
FETCH_B(a2, 1, 0) # a2 <- BB
GET_OPA(rOBJ) # rOBJ <- AA
FETCH_B(a3, 1, 1) # a3 <- CC
GET_VREG(a0, a2) # a0 <- vBB (array object)
GET_VREG(a1, a3) # a1 <- vCC (requested index)
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
.if $shift
EASN(a0, a0, a1, $shift) # a0 <- arrayObj + index*width
.else
addu a0, a0, a1
.endif
# a1 >= a3; compare unsigned index
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
$load a2, $data_offset(a0) # a2 <- vBB[vCC]
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
|
abforce/xposed_art_n
| 1,559
|
runtime/interpreter/mterp/mips/binop.S
|
%default {"preinstr":"", "result":"a0", "chkzero":"0"}
/*
* Generic 32-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0 op a1".
* This could be a MIPS instruction or a function call. (If the result
* comes back in a register other than a0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
* *don't* check for (INT_MIN / -1) here, because the CPU handles it
* correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
*/
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
GET_OPA(rOBJ) # rOBJ <- AA
srl a3, a0, 8 # a3 <- CC
and a2, a0, 255 # a2 <- BB
GET_VREG(a1, a3) # a1 <- vCC
GET_VREG(a0, a2) # a0 <- vBB
.if $chkzero
# is second operand zero?
beqz a1, common_errDivideByZero
.endif
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
$preinstr # optional op
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
/* 11-14 instructions */
|
abforce/xposed_art_n
| 1,482
|
runtime/interpreter/mterp/mips/op_shl_long.S
|
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to mask off the low
* 6 bits of the shift distance.
*/
/* shl-long vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
GET_OPA(t2) # t2 <- AA
and a3, a0, 255 # a3 <- BB
srl a0, a0, 8 # a0 <- CC
EAS2(a3, rFP, a3) # a3 <- &fp[BB]
GET_VREG(a2, a0) # a2 <- vCC
LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
andi v1, a2, 0x20 # shift< shift & 0x20
sll v0, a0, a2 # rlo<- alo << (shift&31)
bnez v1, .L${opcode}_finish
not v1, a2 # rhi<- 31-shift (shift is 5b)
srl a0, 1
srl a0, v1 # alo<- alo >> (32-(shift&31))
sll v1, a1, a2 # rhi<- ahi << (shift&31)
or v1, a0 # rhi<- rhi | alo
SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- a0/a1
%break
.L${opcode}_finish:
SET_VREG64_GOTO(zero, v0, t2, t0) # vAA/vAA+1 <- rlo/rhi
|
abforce/xposed_art_n
| 2,216
|
runtime/interpreter/mterp/mips/entry.S
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Interpreter entry point.
*/
.text
.align 2
.global ExecuteMterpImpl
.ent ExecuteMterpImpl
.frame sp, STACK_SIZE, ra
/*
* On entry:
* a0 Thread* self
* a1 code_item
* a2 ShadowFrame
* a3 JValue* result_register
*
*/
ExecuteMterpImpl:
.set noreorder
.cpload t9
.set reorder
/* Save to the stack. Frame size = STACK_SIZE */
STACK_STORE_FULL()
/* This directive will make sure all subsequent jal restore gp at a known offset */
.cprestore STACK_OFFSET_GP
/* Remember the return register */
sw a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
/* Remember the code_item */
sw a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
/* set up "named" registers */
move rSELF, a0
lw a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to vregs.
EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
addu rPC, a1, CODEITEM_INSNS_OFFSET # Point to base of insns[]
EAS1(rPC, rPC, a0) # Create direct pointer to 1st dex opcode
EXPORT_PC()
/* Starting ibase */
lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
/* start executing the instruction at rPC */
FETCH_INST() # load rINST from rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
/* NOTE: no fallthrough */
|
abforce/xposed_art_n
| 1,335
|
runtime/interpreter/mterp/mips/binop2addr.S
|
%default {"preinstr":"", "result":"a0", "chkzero":"0"}
/*
* Generic 32-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0 op a1".
* This could be an MIPS instruction or a function call.
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus.
*
* For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
* rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
* shl-int/2addr, shr-int/2addr, ushr-int/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
GET_VREG(a0, rOBJ) # a0 <- vA
GET_VREG(a1, a3) # a1 <- vB
.if $chkzero
# is second operand zero?
beqz a1, common_errDivideByZero
.endif
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
$preinstr # optional op
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
/* 10-13 instructions */
|
abforce/xposed_art_n
| 14,836
|
runtime/interpreter/mterp/mips/header.S
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Art assembly interpreter notes:
First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
handle invoke, allows higher-level code to create frame & shadow frame.
Once that's working, support direct entry code & eliminate shadow frame (and
excess locals allocation.
Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
base of the vreg array within the shadow frame. Access the other fields,
dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
the shadow frame mechanism of double-storing object references - via rFP &
number_of_vregs_.
*/
#include "asm_support.h"
#if (__mips==32) && (__mips_isa_rev>=2)
#define MIPS32REVGE2 /* mips32r2 and greater */
#if (__mips==32) && (__mips_isa_rev>=5)
#define FPU64 /* 64 bit FPU */
#if (__mips==32) && (__mips_isa_rev>=6)
#define MIPS32REVGE6 /* mips32r6 and greater */
#endif
#endif
#endif
/* MIPS definitions and declarations
reg nick purpose
s0 rPC interpreted program counter, used for fetching instructions
s1 rFP interpreted frame pointer, used for accessing locals and args
s2 rSELF self (Thread) pointer
s3 rIBASE interpreted instruction base pointer, used for computed goto
s4 rINST first 16-bit code unit of current instruction
s6 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
*/
/* single-purpose registers, given names for clarity */
#define rPC s0
#define rFP s1
#define rSELF s2
#define rIBASE s3
#define rINST s4
#define rOBJ s5
#define rREFS s6
#define rTEMP s7
#define rARG0 a0
#define rARG1 a1
#define rARG2 a2
#define rARG3 a3
#define rRESULT0 v0
#define rRESULT1 v1
/* GP register definitions */
#define zero $$0 /* always zero */
#define AT $$at /* assembler temp */
#define v0 $$2 /* return value */
#define v1 $$3
#define a0 $$4 /* argument registers */
#define a1 $$5
#define a2 $$6
#define a3 $$7
#define t0 $$8 /* temp registers (not saved across subroutine calls) */
#define t1 $$9
#define t2 $$10
#define t3 $$11
#define t4 $$12
#define t5 $$13
#define t6 $$14
#define t7 $$15
#define ta0 $$12 /* alias */
#define ta1 $$13
#define ta2 $$14
#define ta3 $$15
#define s0 $$16 /* saved across subroutine calls (callee saved) */
#define s1 $$17
#define s2 $$18
#define s3 $$19
#define s4 $$20
#define s5 $$21
#define s6 $$22
#define s7 $$23
#define t8 $$24 /* two more temp registers */
#define t9 $$25
#define k0 $$26 /* kernel temporary */
#define k1 $$27
#define gp $$28 /* global pointer */
#define sp $$29 /* stack pointer */
#define s8 $$30 /* one more callee saved */
#define ra $$31 /* return address */
/* FP register definitions */
#define fv0 $$f0
#define fv0f $$f1
#define fv1 $$f2
#define fv1f $$f3
#define fa0 $$f12
#define fa0f $$f13
#define fa1 $$f14
#define fa1f $$f15
#define ft0 $$f4
#define ft0f $$f5
#define ft1 $$f6
#define ft1f $$f7
#define ft2 $$f8
#define ft2f $$f9
#define ft3 $$f10
#define ft3f $$f11
#define ft4 $$f16
#define ft4f $$f17
#define ft5 $$f18
#define ft5f $$f19
#define fs0 $$f20
#define fs0f $$f21
#define fs1 $$f22
#define fs1f $$f23
#define fs2 $$f24
#define fs2f $$f25
#define fs3 $$f26
#define fs3f $$f27
#define fs4 $$f28
#define fs4f $$f29
#define fs5 $$f30
#define fs5f $$f31
#ifndef MIPS32REVGE6
#define fcc0 $$fcc0
#define fcc1 $$fcc1
#endif
/*
* Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
* to access other shadow frame fields, we need to use a backwards offset. Define those here.
*/
#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
#define MTERP_PROFILE_BRANCHES 1
#define MTERP_LOGGING 0
/*
* "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
* be done *before* something throws.
*
* It's okay to do this more than once.
*
* NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
* dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
* offset into the code_items_[] array. For effiency, we will "export" the
* current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
* to convert to a dex pc when needed.
*/
#define EXPORT_PC() \
sw rPC, OFF_FP_DEX_PC_PTR(rFP)
#define EXPORT_DEX_PC(tmp) \
lw tmp, OFF_FP_CODE_ITEM(rFP) \
sw rPC, OFF_FP_DEX_PC_PTR(rFP) \
addu tmp, CODEITEM_INSNS_OFFSET \
subu tmp, rPC, tmp \
sra tmp, tmp, 1 \
sw tmp, OFF_FP_DEX_PC(rFP)
/*
* Fetch the next instruction from rPC into rINST. Does not advance rPC.
*/
#define FETCH_INST() lhu rINST, (rPC)
/*
* Fetch the next instruction from the specified offset. Advances rPC
* to point to the next instruction. "_count" is in 16-bit code units.
*
* This must come AFTER anything that can throw an exception, or the
* exception catch may miss. (This also implies that it must come after
* EXPORT_PC().)
*/
#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
addu rPC, rPC, ((_count) * 2)
/*
* The operation performed here is similar to FETCH_ADVANCE_INST, except the
* src and dest registers are parameterized (not hard-wired to rPC and rINST).
*/
#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
lhu _dreg, ((_count)*2)(_sreg) ; \
addu _sreg, _sreg, (_count)*2
/*
* Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
* rINST ahead of possible exception point. Be sure to manually advance rPC
* later.
*/
#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
/* Advance rPC by some number of code units. */
#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
/*
* Fetch the next instruction from an offset specified by rd. Updates
* rPC to point to the next instruction. "rd" must specify the distance
* in bytes, *not* 16-bit code units, and may be a signed value.
*/
#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
lhu rINST, (rPC)
/*
* Fetch a half-word code unit from an offset past the current PC. The
* "_count" value is in 16-bit code units. Does not advance rPC.
*
* The "_S" variant works the same but treats the value as signed.
*/
#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
/*
* Fetch one byte from an offset past the current PC. Pass in the same
* "_count" as you would for FETCH, and an additional 0/1 indicating which
* byte of the halfword you want (lo/hi).
*/
#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
/*
* Put the instruction's opcode field into the specified register.
*/
#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
/*
* Put the prefetched instruction's opcode field into the specified register.
*/
#define GET_PREFETCHED_OPCODE(dreg, sreg) andi dreg, sreg, 255
/*
* Begin executing the opcode in rd.
*/
#define GOTO_OPCODE(rd) sll rd, rd, ${handler_size_bits}; \
addu rd, rIBASE, rd; \
jalr zero, rd
#define GOTO_OPCODE_BASE(_base, rd) sll rd, rd, ${handler_size_bits}; \
addu rd, _base, rd; \
jalr zero, rd
/*
* Get/set the 32-bit value from a Dalvik register.
*/
#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
.set noat; l.s rd, (AT); .set at
#define SET_VREG(rd, rix) .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
sw rd, 0(t8); \
addu t8, rREFS, AT; \
.set at; \
sw zero, 0(t8)
#define SET_VREG64(rlo, rhi, rix) .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
sw rlo, 0(t8); \
sw rhi, 4(t8); \
addu t8, rREFS, AT; \
.set at; \
sw zero, 0(t8); \
sw zero, 4(t8)
#ifdef FPU64
#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
sll AT, rix, 2; \
addu t8, rREFS, AT; \
sw zero, 0(t8); \
sw zero, 4(t8); \
addu t8, rFP, AT; \
mfhc1 AT, rlo; \
sw AT, 4(t8); \
.set at; \
s.s rlo, 0(t8)
#else
#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
s.s rlo, 0(t8); \
s.s rhi, 4(t8); \
addu t8, rREFS, AT; \
.set at; \
sw zero, 0(t8); \
sw zero, 4(t8)
#endif
#define SET_VREG_OBJECT(rd, rix) .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
sw rd, 0(t8); \
addu t8, rREFS, AT; \
.set at; \
sw rd, 0(t8)
/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
sll dst, dst, ${handler_size_bits}; \
addu dst, rIBASE, dst; \
.set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
sw rd, 0(t8); \
addu t8, rREFS, AT; \
.set at; \
jalr zero, dst; \
sw zero, 0(t8); \
.set reorder
/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
#define SET_VREG64_GOTO(rlo, rhi, rix, dst) .set noreorder; \
sll dst, dst, ${handler_size_bits}; \
addu dst, rIBASE, dst; \
.set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
sw rlo, 0(t8); \
sw rhi, 4(t8); \
addu t8, rREFS, AT; \
.set at; \
sw zero, 0(t8); \
jalr zero, dst; \
sw zero, 4(t8); \
.set reorder
#define SET_VREG_F(rd, rix) .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
s.s rd, 0(t8); \
addu t8, rREFS, AT; \
.set at; \
sw zero, 0(t8)
#define GET_OPA(rd) srl rd, rINST, 8
#ifdef MIPS32REVGE2
#define GET_OPA4(rd) ext rd, rINST, 8, 4
#else
#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
#endif
#define GET_OPB(rd) srl rd, rINST, 12
/*
* Form an Effective Address rd = rbase + roff<<n;
* Uses reg AT
*/
#define EASN(rd, rbase, roff, rshift) .set noat; \
sll AT, roff, rshift; \
addu rd, rbase, AT; \
.set at
#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
/*
* Form an Effective Shift Right rd = rbase + roff>>n;
* Uses reg AT
*/
#define ESRN(rd, rbase, roff, rshift) .set noat; \
srl AT, roff, rshift; \
addu rd, rbase, AT; \
.set at
#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
.set noat; lw rd, 0(AT); .set at
#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
.set noat; sw rd, 0(AT); .set at
#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
sw rhi, (off+4)(rbase)
#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
lw rhi, (off+4)(rbase)
#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
#ifdef FPU64
#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
.set noat; \
mfhc1 AT, rlo; \
sw AT, (off+4)(rbase); \
.set at
#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
.set noat; \
lw AT, (off+4)(rbase); \
mthc1 AT, rlo; \
.set at
#else
#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
s.s rhi, (off+4)(rbase)
#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
l.s rhi, (off+4)(rbase)
#endif
#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
#define STACK_STORE(rd, off) sw rd, off(sp)
#define STACK_LOAD(rd, off) lw rd, off(sp)
#define CREATE_STACK(n) subu sp, sp, n
#define DELETE_STACK(n) addu sp, sp, n
#define LOAD_ADDR(dest, addr) la dest, addr
#define LOAD_IMM(dest, imm) li dest, imm
#define MOVE_REG(dest, src) move dest, src
#define STACK_SIZE 128
#define STACK_OFFSET_ARG04 16
#define STACK_OFFSET_ARG05 20
#define STACK_OFFSET_ARG06 24
#define STACK_OFFSET_ARG07 28
#define STACK_OFFSET_GP 84
#define JAL(n) jal n
#define BAL(n) bal n
/*
* FP register usage restrictions:
* 1) We don't use the callee save FP registers so we don't have to save them.
* 2) We don't use the odd FP registers so we can share code with mips32r6.
*/
#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
STACK_STORE(ra, 124); \
STACK_STORE(s8, 120); \
STACK_STORE(s0, 116); \
STACK_STORE(s1, 112); \
STACK_STORE(s2, 108); \
STACK_STORE(s3, 104); \
STACK_STORE(s4, 100); \
STACK_STORE(s5, 96); \
STACK_STORE(s6, 92); \
STACK_STORE(s7, 88);
#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
STACK_LOAD(s7, 88); \
STACK_LOAD(s6, 92); \
STACK_LOAD(s5, 96); \
STACK_LOAD(s4, 100); \
STACK_LOAD(s3, 104); \
STACK_LOAD(s2, 108); \
STACK_LOAD(s1, 112); \
STACK_LOAD(s0, 116); \
STACK_LOAD(s8, 120); \
STACK_LOAD(ra, 124); \
DELETE_STACK(STACK_SIZE)
|
abforce/xposed_art_n
| 1,378
|
runtime/interpreter/mterp/mips/op_aput.S
|
%default { "store":"sw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
/*
* Array put, 32 bits or less. vBB[vCC] <- vAA.
*
* for: aput, aput-boolean, aput-byte, aput-char, aput-short
*
* NOTE: this assumes data offset for arrays is the same for all non-wide types.
* If this changes, specialize.
*/
/* op vAA, vBB, vCC */
FETCH_B(a2, 1, 0) # a2 <- BB
GET_OPA(rOBJ) # rOBJ <- AA
FETCH_B(a3, 1, 1) # a3 <- CC
GET_VREG(a0, a2) # a0 <- vBB (array object)
GET_VREG(a1, a3) # a1 <- vCC (requested index)
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
.if $shift
EASN(a0, a0, a1, $shift) # a0 <- arrayObj + index*width
.else
addu a0, a0, a1
.endif
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_VREG(a2, rOBJ) # a2 <- vAA
GET_INST_OPCODE(t0) # extract opcode from rINST
$store a2, $data_offset(a0) # vBB[vCC] <- a2
GOTO_OPCODE(t0) # jump to next instruction
|
abforce/xposed_art_n
| 1,268
|
runtime/interpreter/mterp/mips/op_shr_long_2addr.S
|
/*
* Long integer shift, 2addr version. vA is 64-bit value/result, vB is
* 32-bit shift distance.
*/
/* shr-long/2addr vA, vB */
GET_OPA4(t2) # t2 <- A+
GET_OPB(a3) # a3 <- B
GET_VREG(a2, a3) # a2 <- vB
EAS2(t0, rFP, t2) # t0 <- &fp[A]
LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
andi v0, a2, 0x20 # shift & 0x20
sra v1, a1, a2 # rhi<- ahi >> (shift&31)
bnez v0, .L${opcode}_finish
srl v0, a0, a2 # rlo<- alo >> (shift&31)
not a0, a2 # alo<- 31-shift (shift is 5b)
sll a1, 1
sll a1, a0 # ahi<- ahi << (32-(shift&31))
or v0, a1 # rlo<- rlo | ahi
SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- a0/a1
%break
.L${opcode}_finish:
sra a3, a1, 31 # a3<- sign(ah)
SET_VREG64_GOTO(v1, a3, t2, t0) # vAA/vAA+1 <- rlo/rhi
|
abforce/xposed_art_n
| 1,349
|
runtime/interpreter/mterp/mips/zcmp.S
|
/*
* Generic one-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
*/
/* if-cmp vAA, +BBBB */
GET_OPA(a0) # a0 <- AA
GET_VREG(a2, a0) # a2 <- vAA
FETCH_S(rINST, 1) # rINST <- branch offset, in code units
b${revcmp} a2, zero, 1f # branch to 1 if comparison failed
b 2f
1:
li rINST, 2 # rINST- BYTE branch dist for not-taken
2:
#if MTERP_PROFILE_BRANCHES
EXPORT_PC()
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
JAL(MterpProfileBranch) # (self, shadow_frame, offset)
bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
#endif
addu a1, rINST, rINST # convert to bytes
FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
bgez a1, 3f
lw ra, THREAD_FLAGS_OFFSET(rSELF)
b MterpCheckSuspendAndContinue
3:
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
|
abforce/xposed_art_n
| 1,108
|
runtime/interpreter/mterp/mips/op_iget.S
|
%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
/*
* General instance field get.
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
move a3, rSELF # a3 <- self
JAL($helper)
lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
bnez a3, MterpPossibleException # bail out
.if $is_object
SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
.else
SET_VREG(v0, a2) # fp[A] <- v0
.endif
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
|
abforce/xposed_art_n
| 1,639
|
runtime/interpreter/mterp/mips/op_goto.S
|
/*
* Unconditional branch, 8-bit offset.
*
* The branch distance is a signed code-unit offset, which we need to
* double to get a byte offset.
*/
/* goto +AA */
#if MTERP_PROFILE_BRANCHES
sll a0, rINST, 16 # a0 <- AAxx0000
sra rINST, a0, 24 # rINST <- ssssssAA (sign-extended)
EXPORT_PC()
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
JAL(MterpProfileBranch) # (self, shadow_frame, offset)
bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
addu a2, rINST, rINST # a2 <- byte offset
FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
/* If backwards branch refresh rIBASE */
bgez a2, 1f
lw ra, THREAD_FLAGS_OFFSET(rSELF)
b MterpCheckSuspendAndContinue
1:
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
#else
sll a0, rINST, 16 # a0 <- AAxx0000
sra rINST, a0, 24 # rINST <- ssssssAA (sign-extended)
addu a2, rINST, rINST # a2 <- byte offset
FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
/* If backwards branch refresh rIBASE */
bgez a1, 1f
lw ra, THREAD_FLAGS_OFFSET(rSELF)
b MterpCheckSuspendAndContinue
1:
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
#endif
|
abforce/xposed_art_n
| 1,211
|
runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
|
/*
* Long integer shift, 2addr version. vA is 64-bit value/result, vB is
* 32-bit shift distance.
*/
/* ushr-long/2addr vA, vB */
GET_OPA4(t3) # t3 <- A+
GET_OPB(a3) # a3 <- B
GET_VREG(a2, a3) # a2 <- vB
EAS2(t0, rFP, t3) # t0 <- &fp[A]
LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
andi v0, a2, 0x20 # shift & 0x20
srl v1, a1, a2 # rhi<- ahi >> (shift&31)
bnez v0, .L${opcode}_finish
srl v0, a0, a2 # rlo<- alo >> (shift&31)
not a0, a2 # alo<- 31-n (shift is 5b)
sll a1, 1
sll a1, a0 # ahi<- ahi << (32-(shift&31))
or v0, a1 # rlo<- rlo | ahi
SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/vAA+1 <- a0/a1
%break
.L${opcode}_finish:
SET_VREG64_GOTO(v1, zero, t3, t0) # vAA/vAA+1 <- rlo/rhi
|
abforce/xposed_art_n
| 1,055
|
runtime/interpreter/mterp/mips/unopNarrower.S
|
%default {"load":"LOAD64_F(fa0, fa0f, a3)"}
/*
* Generic 64bit-to-32bit unary operation. Provide an "instr" line
* that specifies an instruction that performs "result = op a0/a1", where
* "result" is a 32-bit quantity in a0.
*
* For: long-to-float, double-to-int, double-to-float
* If hard floating point support is available, use fa0 as the parameter,
* except for long-to-float opcode.
* (This would work for long-to-int, but that instruction is actually
* an exact match for OP_MOVE.)
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
GET_OPA4(rOBJ) # t1 <- A+
EAS2(a3, rFP, a3) # a3 <- &fp[B]
$load
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
$instr
.L${opcode}_set_vreg_f:
SET_VREG_F(fv0, rOBJ) # vA <- result0
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
|
abforce/xposed_art_n
| 2,496
|
runtime/interpreter/mterp/mips/op_packed_switch.S
|
%default { "func":"MterpDoPackedSwitch" }
/*
* Handle a packed-switch or sparse-switch instruction. In both cases
* we decode it and hand it off to a helper function.
*
* We don't really expect backward branches in a switch statement, but
* they're perfectly legal, so we check for them here.
*
* for: packed-switch, sparse-switch
*/
/* op vAA, +BBBB */
#if MTERP_PROFILE_BRANCHES
FETCH(a0, 1) # a0 <- bbbb (lo)
FETCH(a1, 2) # a1 <- BBBB (hi)
GET_OPA(a3) # a3 <- AA
sll t0, a1, 16
or a0, a0, t0 # a0 <- BBBBbbbb
GET_VREG(a1, a3) # a1 <- vAA
EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
JAL($func) # a0 <- code-unit branch offset
move rINST, v0
EXPORT_PC()
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
JAL(MterpProfileBranch) # (self, shadow_frame, offset)
bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
addu a1, rINST, rINST # a1 <- byte offset
FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
bgtz a1, .L${opcode}_finish
lw ra, THREAD_FLAGS_OFFSET(rSELF)
b MterpCheckSuspendAndContinue
#else
FETCH(a0, 1) # a0 <- bbbb (lo)
FETCH(a1, 2) # a1 <- BBBB (hi)
GET_OPA(a3) # a3 <- AA
sll t0, a1, 16
or a0, a0, t0 # a0 <- BBBBbbbb
GET_VREG(a1, a3) # a1 <- vAA
EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
JAL($func) # a0 <- code-unit branch offset
move rINST, v0
addu a1, rINST, rINST # a1 <- byte offset
FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
bgtz a1, 1f
lw ra, THREAD_FLAGS_OFFSET(rSELF)
b MterpCheckSuspendAndContinue
1:
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
#endif
%break
.L${opcode}_finish:
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
|
abforce/xposed_art_n
| 1,847
|
runtime/interpreter/mterp/mips/op_goto_32.S
|
/*
* Unconditional branch, 32-bit offset.
*
* The branch distance is a signed code-unit offset, which we need to
* double to get a byte offset.
*
* Unlike most opcodes, this one is allowed to branch to itself, so
* our "backward branch" test must be "<=0" instead of "<0".
*/
/* goto/32 +AAAAAAAA */
#if MTERP_PROFILE_BRANCHES
FETCH(a0, 1) # a0 <- aaaa (lo)
FETCH(a1, 2) # a1 <- AAAA (hi)
sll a1, a1, 16
or rINST, a0, a1 # rINST <- AAAAaaaa
EXPORT_PC()
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
JAL(MterpProfileBranch) # (self, shadow_frame, offset)
bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
addu a1, rINST, rINST # a1 <- byte offset
FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
bgtz a1, 1f
lw ra, THREAD_FLAGS_OFFSET(rSELF)
b MterpCheckSuspendAndContinue
1:
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
#else
FETCH(a0, 1) # a0 <- aaaa (lo)
FETCH(a1, 2) # a1 <- AAAA (hi)
sll a1, a1, 16
or rINST, a0, a1 # rINST <- AAAAaaaa
addu a1, rINST, rINST # a1 <- byte offset
FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
bgtz a1, 1f
lw ra, THREAD_FLAGS_OFFSET(rSELF)
b MterpCheckSuspendAndContinue
1:
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
#endif
|
abforce/xposed_art_n
| 1,546
|
runtime/interpreter/mterp/mips/binopLit8.S
|
%default {"preinstr":"", "result":"a0", "chkzero":"0"}
/*
* Generic 32-bit "lit8" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0 op a1".
* This could be an MIPS instruction or a function call. (If the result
* comes back in a register other than a0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus.
*
* For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
# binop/lit8 vAA, vBB, /* +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
GET_VREG(a0, a2) # a0 <- vBB
sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
.if $chkzero
# is second operand zero?
beqz a1, common_errDivideByZero
.endif
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
$preinstr # optional op
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
/* 10-12 instructions */
|
abforce/xposed_art_n
| 1,213
|
runtime/interpreter/mterp/mips/op_float_to_int.S
|
%include "mips/funop.S" {"instr":"b f2i_doconv"}
%break
/*
* Not an entry point as it is used only once !!
*/
f2i_doconv:
#ifdef MIPS32REVGE6
l.s fa1, .LFLOAT_TO_INT_max
cmp.ule.s ft2, fa1, fa0
l.s fv0, .LFLOAT_TO_INT_ret_max
bc1nez ft2, .L${opcode}_set_vreg_f
l.s fa1, .LFLOAT_TO_INT_min
cmp.ule.s ft2, fa0, fa1
l.s fv0, .LFLOAT_TO_INT_ret_min
bc1nez ft2, .L${opcode}_set_vreg_f
mov.s fa1, fa0
cmp.un.s ft2, fa0, fa1
li.s fv0, 0
bc1nez ft2, .L${opcode}_set_vreg_f
#else
l.s fa1, .LFLOAT_TO_INT_max
c.ole.s fcc0, fa1, fa0
l.s fv0, .LFLOAT_TO_INT_ret_max
bc1t .L${opcode}_set_vreg_f
l.s fa1, .LFLOAT_TO_INT_min
c.ole.s fcc0, fa0, fa1
l.s fv0, .LFLOAT_TO_INT_ret_min
bc1t .L${opcode}_set_vreg_f
mov.s fa1, fa0
c.un.s fcc0, fa0, fa1
li.s fv0, 0
bc1t .L${opcode}_set_vreg_f
#endif
trunc.w.s fv0, fa0
b .L${opcode}_set_vreg_f
.LFLOAT_TO_INT_max:
.word 0x4f000000
.LFLOAT_TO_INT_min:
.word 0xcf000000
.LFLOAT_TO_INT_ret_max:
.word 0x7fffffff
.LFLOAT_TO_INT_ret_min:
.word 0x80000000
|
abforce/xposed_art_n
| 1,616
|
runtime/interpreter/mterp/mips/op_double_to_int.S
|
%include "mips/unopNarrower.S" {"instr":"b d2i_doconv"}
/*
* Convert the double in a0/a1 to an int in a0.
*
* We have to clip values to int min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
* to modest integer. The EABI convert function isn't doing this for us.
*/
%break
d2i_doconv:
#ifdef MIPS32REVGE6
la t0, .LDOUBLE_TO_INT_max
LOAD64_F(fa1, fa1f, t0)
cmp.ule.d ft2, fa1, fa0
l.s fv0, .LDOUBLE_TO_INT_maxret
bc1nez ft2, .L${opcode}_set_vreg_f
la t0, .LDOUBLE_TO_INT_min
LOAD64_F(fa1, fa1f, t0)
cmp.ule.d ft2, fa0, fa1
l.s fv0, .LDOUBLE_TO_INT_minret
bc1nez ft2, .L${opcode}_set_vreg_f
mov.d fa1, fa0
cmp.un.d ft2, fa0, fa1
li.s fv0, 0
bc1nez ft2, .L${opcode}_set_vreg_f
#else
la t0, .LDOUBLE_TO_INT_max
LOAD64_F(fa1, fa1f, t0)
c.ole.d fcc0, fa1, fa0
l.s fv0, .LDOUBLE_TO_INT_maxret
bc1t .L${opcode}_set_vreg_f
la t0, .LDOUBLE_TO_INT_min
LOAD64_F(fa1, fa1f, t0)
c.ole.d fcc0, fa0, fa1
l.s fv0, .LDOUBLE_TO_INT_minret
bc1t .L${opcode}_set_vreg_f
mov.d fa1, fa0
c.un.d fcc0, fa0, fa1
li.s fv0, 0
bc1t .L${opcode}_set_vreg_f
#endif
trunc.w.d fv0, fa0
b .L${opcode}_set_vreg_f
.LDOUBLE_TO_INT_max:
.dword 0x41dfffffffc00000
.LDOUBLE_TO_INT_min:
.dword 0xc1e0000000000000 # minint, as a double (high word)
.LDOUBLE_TO_INT_maxret:
.word 0x7fffffff
.LDOUBLE_TO_INT_minret:
.word 0x80000000
|
abforce/xposed_art_n
| 1,450
|
runtime/interpreter/mterp/mips/binopLit16.S
|
%default {"preinstr":"", "result":"a0", "chkzero":"0"}
/*
* Generic 32-bit "lit16" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0 op a1".
* This could be an MIPS instruction or a function call. (If the result
* comes back in a register other than a0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus.
*
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
# binop/lit16 vA, vB, /* +CCCC */
FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
GET_OPB(a2) # a2 <- B
GET_OPA(rOBJ) # rOBJ <- A+
GET_VREG(a0, a2) # a0 <- vB
and rOBJ, rOBJ, 15
.if $chkzero
# cmp a1, 0; is second operand zero?
beqz a1, common_errDivideByZero
.endif
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
$preinstr # optional op
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
/* 10-13 instructions */
|
abforce/xposed_art_n
| 1,219
|
runtime/interpreter/mterp/mips/op_shl_long_2addr.S
|
/*
* Long integer shift, 2addr version. vA is 64-bit value/result, vB is
* 32-bit shift distance.
*/
/* shl-long/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
GET_VREG(a2, a3) # a2 <- vB
EAS2(t2, rFP, rOBJ) # t2 <- &fp[A]
LOAD64(a0, a1, t2) # a0/a1 <- vAA/vAA+1
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
andi v1, a2, 0x20 # shift< shift & 0x20
sll v0, a0, a2 # rlo<- alo << (shift&31)
bnez v1, .L${opcode}_finish
not v1, a2 # rhi<- 31-shift (shift is 5b)
srl a0, 1
srl a0, v1 # alo<- alo >> (32-(shift&31))
sll v1, a1, a2 # rhi<- ahi << (shift&31)
or v1, a0 # rhi<- rhi | alo
SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
%break
.L${opcode}_finish:
SET_VREG64_GOTO(zero, v0, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
|
abforce/xposed_art_n
| 1,064
|
runtime/interpreter/mterp/mips/fbinopWide.S
|
/*
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be an MIPS instruction or a function call.
*
* for: add-double, sub-double, mul-double, div-double,
* rem-double
*
*/
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
GET_OPA(rOBJ) # s5 <- AA
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8 # a3 <- CC
EAS2(a2, rFP, a2) # a2 <- &fp[BB]
EAS2(t1, rFP, a3) # a3 <- &fp[CC]
LOAD64_F(fa0, fa0f, a2)
LOAD64_F(fa1, fa1f, t1)
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
$instr
SET_VREG64_F(fv0, fv0f, rOBJ)
b .L${opcode}_finish
%break
.L${opcode}_finish:
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
|
abforce/xposed_art_n
| 1,079
|
runtime/interpreter/mterp/mips/op_aget_wide.S
|
/*
* Array get, 64 bits. vAA <- vBB[vCC].
*
* Arrays of long/double are 64-bit aligned.
*/
/* aget-wide vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8 # a3 <- CC
GET_VREG(a0, a2) # a0 <- vBB (array object)
GET_VREG(a1, a3) # a1 <- vCC (requested index)
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG64_GOTO(a2, a3, rOBJ, t0) # vAA/vAA+1 <- a2/a3
|
abforce/xposed_art_n
| 1,657
|
runtime/interpreter/mterp/mips/op_double_to_long.S
|
%include "mips/funopWide.S" {"instr":"b d2l_doconv", "st_result":"SET_VREG64(rRESULT0, rRESULT1, rOBJ)"}
%break
d2l_doconv:
#ifdef MIPS32REVGE6
la t0, .LDOUBLE_TO_LONG_max
LOAD64_F(fa1, fa1f, t0)
cmp.ule.d ft2, fa1, fa0
la t0, .LDOUBLE_TO_LONG_ret_max
LOAD64(rRESULT0, rRESULT1, t0)
bc1nez ft2, .L${opcode}_set_vreg
la t0, .LDOUBLE_TO_LONG_min
LOAD64_F(fa1, fa1f, t0)
cmp.ule.d ft2, fa0, fa1
la t0, .LDOUBLE_TO_LONG_ret_min
LOAD64(rRESULT0, rRESULT1, t0)
bc1nez ft2, .L${opcode}_set_vreg
mov.d fa1, fa0
cmp.un.d ft2, fa0, fa1
li rRESULT0, 0
li rRESULT1, 0
bc1nez ft2, .L${opcode}_set_vreg
#else
la t0, .LDOUBLE_TO_LONG_max
LOAD64_F(fa1, fa1f, t0)
c.ole.d fcc0, fa1, fa0
la t0, .LDOUBLE_TO_LONG_ret_max
LOAD64(rRESULT0, rRESULT1, t0)
bc1t .L${opcode}_set_vreg
la t0, .LDOUBLE_TO_LONG_min
LOAD64_F(fa1, fa1f, t0)
c.ole.d fcc0, fa0, fa1
la t0, .LDOUBLE_TO_LONG_ret_min
LOAD64(rRESULT0, rRESULT1, t0)
bc1t .L${opcode}_set_vreg
mov.d fa1, fa0
c.un.d fcc0, fa0, fa1
li rRESULT0, 0
li rRESULT1, 0
bc1t .L${opcode}_set_vreg
#endif
JAL(__fixdfdi)
b .L${opcode}_set_vreg
.LDOUBLE_TO_LONG_max:
.dword 0x43e0000000000000 # maxlong, as a double (high word)
.LDOUBLE_TO_LONG_min:
.dword 0xc3e0000000000000 # minlong, as a double (high word)
.LDOUBLE_TO_LONG_ret_max:
.dword 0x7fffffffffffffff
.LDOUBLE_TO_LONG_ret_min:
.dword 0x8000000000000000
|
abforce/xposed_art_n
| 1,731
|
runtime/interpreter/mterp/mips/binopWide2addr.S
|
%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
/*
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
* comes back in a register other than a0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus.
*
* For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
* and-long/2addr, or-long/2addr, xor-long/2addr
* rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a1) # a1 <- B
EAS2(a1, rFP, a1) # a1 <- &fp[B]
EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
LOAD64($arg2, $arg3, a1) # a2/a3 <- vBB/vBB+1
LOAD64($arg0, $arg1, t0) # a0/a1 <- vAA/vAA+1
.if $chkzero
or t0, $arg2, $arg3 # second arg (a2-a3) is zero?
beqz t0, common_errDivideByZero
.endif
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
$preinstr # optional op
$instr # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG64($result0, $result1, rOBJ) # vAA/vAA+1 <- $result0/$result1
GOTO_OPCODE(t0) # jump to next instruction
/* 12-15 instructions */
|
abforce/xposed_art_n
| 1,784
|
runtime/interpreter/mterp/mips/binopWide.S
|
%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
/*
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
* comes back in a register other than a0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus.
*
* for: add-long, sub-long, div-long, rem-long, and-long, or-long,
* xor-long
*
* IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
*/
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8 # a3 <- CC
EAS2(a2, rFP, a2) # a2 <- &fp[BB]
EAS2(t1, rFP, a3) # a3 <- &fp[CC]
LOAD64($arg0, $arg1, a2) # a0/a1 <- vBB/vBB+1
LOAD64($arg2, $arg3, t1) # a2/a3 <- vCC/vCC+1
.if $chkzero
or t0, $arg2, $arg3 # second arg (a2-a3) is zero?
beqz t0, common_errDivideByZero
.endif
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
$preinstr # optional op
$instr # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vAA/vAA+1 <- $result0/$result1
/* 14-17 instructions */
|
abforce/xposed_art_n
| 1,219
|
runtime/interpreter/mterp/mips/op_float_to_long.S
|
%include "mips/funopWider.S" {"instr":"b f2l_doconv", "st_result":"SET_VREG64(rRESULT0, rRESULT1, rOBJ)"}
%break
f2l_doconv:
#ifdef MIPS32REVGE6
l.s fa1, .LLONG_TO_max
cmp.ule.s ft2, fa1, fa0
li rRESULT0, ~0
li rRESULT1, ~0x80000000
bc1nez ft2, .L${opcode}_set_vreg
l.s fa1, .LLONG_TO_min
cmp.ule.s ft2, fa0, fa1
li rRESULT0, 0
li rRESULT1, 0x80000000
bc1nez ft2, .L${opcode}_set_vreg
mov.s fa1, fa0
cmp.un.s ft2, fa0, fa1
li rRESULT0, 0
li rRESULT1, 0
bc1nez ft2, .L${opcode}_set_vreg
#else
l.s fa1, .LLONG_TO_max
c.ole.s fcc0, fa1, fa0
li rRESULT0, ~0
li rRESULT1, ~0x80000000
bc1t .L${opcode}_set_vreg
l.s fa1, .LLONG_TO_min
c.ole.s fcc0, fa0, fa1
li rRESULT0, 0
li rRESULT1, 0x80000000
bc1t .L${opcode}_set_vreg
mov.s fa1, fa0
c.un.s fcc0, fa0, fa1
li rRESULT0, 0
li rRESULT1, 0
bc1t .L${opcode}_set_vreg
#endif
JAL(__fixsfdi)
b .L${opcode}_set_vreg
.LLONG_TO_max:
.word 0x5f000000
.LLONG_TO_min:
.word 0xdf000000
|
abforce/xposed_art_n
| 1,497
|
runtime/interpreter/mterp/mips/bincmp.S
|
/*
* Generic two-operand compare-and-branch operation. Provide a "revcmp"
* fragment that specifies the *reverse* comparison to perform, e.g.
* for "if-le" you would use "gt".
*
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
GET_OPA4(a0) # a0 <- A+
GET_OPB(a1) # a1 <- B
GET_VREG(a3, a1) # a3 <- vB
GET_VREG(a2, a0) # a2 <- vA
b${revcmp} a2, a3, 1f # branch to 1 if comparison failed
FETCH_S(rINST, 1) # rINST<- branch offset, in code units
b 2f
1:
li rINST, 2 # rINST- BYTE branch dist for not-taken
2:
#if MTERP_PROFILE_BRANCHES
EXPORT_PC()
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
JAL(MterpProfileBranch) # (self, shadow_frame, offset)
bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
#endif
addu a2, rINST, rINST # convert to bytes
FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
bgez a2, .L_${opcode}_finish
lw ra, THREAD_FLAGS_OFFSET(rSELF)
b MterpCheckSuspendAndContinue
%break
.L_${opcode}_finish:
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
|
abforce/xposed_art_n
| 1,739
|
runtime/interpreter/mterp/mips/op_cmpl_double.S
|
%default { "naninst":"li rTEMP, -1" }
/*
* Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
* into the destination register (rTEMP) based on the comparison results.
*
* Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
* on what value we'd like to return when one of the operands is NaN.
*
* See op_cmpl_float for more details.
*
* For: cmpl-double, cmpg-double
*/
/* op vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
and rOBJ, a0, 255 # s5 <- BB
srl t0, a0, 8 # t0 <- CC
EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[BB]
EAS2(t0, rFP, t0) # t0 <- &fp[CC]
LOAD64_F(ft0, ft0f, rOBJ)
LOAD64_F(ft1, ft1f, t0)
#ifdef MIPS32REVGE6
cmp.ult.d ft2, ft0, ft1
li rTEMP, -1
bc1nez ft2, .L${opcode}_finish
cmp.ult.d ft2, ft1, ft0
li rTEMP, 1
bc1nez ft2, .L${opcode}_finish
cmp.eq.d ft2, ft0, ft1
li rTEMP, 0
bc1nez ft2, .L${opcode}_finish
b .L${opcode}_nan
#else
c.olt.d fcc0, ft0, ft1
li rTEMP, -1
bc1t fcc0, .L${opcode}_finish
c.olt.d fcc0, ft1, ft0
li rTEMP, 1
bc1t fcc0, .L${opcode}_finish
c.eq.d fcc0, ft0, ft1
li rTEMP, 0
bc1t fcc0, .L${opcode}_finish
b .L${opcode}_nan
#endif
%break
.L${opcode}_nan:
$naninst
.L${opcode}_finish:
GET_OPA(rOBJ)
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
|
abforce/xposed_art_n
| 1,295
|
runtime/interpreter/mterp/mips/op_aput_wide.S
|
/*
* Array put, 64 bits. vBB[vCC] <- vAA.
*
* Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
*/
/* aput-wide vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
GET_OPA(t0) # t0 <- AA
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8 # a3 <- CC
GET_VREG(a0, a2) # a0 <- vBB (array object)
GET_VREG(a1, a3) # a1 <- vCC (requested index)
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
EAS2(rOBJ, rFP, t0) # rOBJ <- &fp[AA]
# compare unsigned index, length
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
LOAD64(a2, a3, rOBJ) # a2/a3 <- vAA/vAA+1
GET_INST_OPCODE(t0) # extract opcode from rINST
STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) # a2/a3 <- vBB[vCC]
GOTO_OPCODE(t0) # jump to next instruction
|
abforce/xposed_art_n
| 1,537
|
runtime/interpreter/mterp/mips/op_mul_long.S
|
/*
* Signed 64-bit integer multiply.
* a1 a0
* x a3 a2
* -------------
* a2a1 a2a0
* a3a0
* a3a1 (<= unused)
* ---------------
* v1 v0
*/
/* mul-long vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
and t0, a0, 255 # a2 <- BB
srl t1, a0, 8 # a3 <- CC
EAS2(t0, rFP, t0) # t0 <- &fp[BB]
LOAD64(a0, a1, t0) # a0/a1 <- vBB/vBB+1
EAS2(t1, rFP, t1) # t0 <- &fp[CC]
LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
mul v1, a3, a0 # v1= a3a0
#ifdef MIPS32REVGE6
mulu v0, a2, a0 # v0= a2a0
muhu t1, a2, a0
#else
multu a2, a0
mfhi t1
mflo v0 # v0= a2a0
#endif
mul t0, a2, a1 # t0= a2a1
addu v1, v1, t1 # v1+= hi(a2a0)
addu v1, v1, t0 # v1= a3a0 + a2a1;
GET_OPA(a0) # a0 <- AA
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
b .L${opcode}_finish
%break
.L${opcode}_finish:
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG64(v0, v1, a0) # vAA::vAA+1 <- v0(low) :: v1(high)
GOTO_OPCODE(t0) # jump to next instruction
|
abforce/xposed_art_n
| 1,455
|
runtime/interpreter/mterp/mips/op_goto_16.S
|
/*
* Unconditional branch, 16-bit offset.
*
* The branch distance is a signed code-unit offset, which we need to
* double to get a byte offset.
*/
/* goto/16 +AAAA */
#if MTERP_PROFILE_BRANCHES
FETCH_S(rINST, 1) # rINST <- ssssAAAA (sign-extended)
EXPORT_PC()
move a0, rSELF
addu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
JAL(MterpProfileBranch) # (self, shadow_frame, offset)
bnez v0, MterpOnStackReplacement # Note: offset must be in rINST
addu a1, rINST, rINST # a1 <- byte offset, flags set
FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
bgez a1, 1f
lw ra, THREAD_FLAGS_OFFSET(rSELF)
b MterpCheckSuspendAndContinue
1:
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
#else
FETCH_S(rINST, 1) # rINST <- ssssAAAA (sign-extended)
addu a1, rINST, rINST # a1 <- byte offset, flags set
FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
bgez a1, 1f
lw ra, THREAD_FLAGS_OFFSET(rSELF)
b MterpCheckSuspendAndContinue
1:
GET_INST_OPCODE(t0) # extract opcode from rINST
GOTO_OPCODE(t0) # jump to next instruction
#endif
|
abforce/xposed_art_n
| 1,188
|
runtime/interpreter/mterp/mips/op_mul_long_2addr.S
|
/*
* See op_mul_long.S for more details
*/
/* mul-long/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
LOAD64(a0, a1, t0) # vAA.low / high
GET_OPB(t1) # t1 <- B
EAS2(t1, rFP, t1) # t1 <- &fp[B]
LOAD64(a2, a3, t1) # vBB.low / high
mul v1, a3, a0 # v1= a3a0
#ifdef MIPS32REVGE6
mulu v0, a2, a0 # v0= a2a0
muhu t1, a2, a0
#else
multu a2, a0
mfhi t1
mflo v0 # v0= a2a0
#endif
mul t2, a2, a1 # t2= a2a1
addu v1, v1, t1 # v1= a3a0 + hi(a2a0)
addu v1, v1, t2 # v1= v1 + a2a1;
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
GET_INST_OPCODE(t1) # extract opcode from rINST
# vAA <- v0 (low)
SET_VREG64(v0, v1, rOBJ) # vAA+1 <- v1 (high)
GOTO_OPCODE(t1) # jump to next instruction
|
abforce/xposed_art_n
| 1,478
|
runtime/interpreter/mterp/mips/op_ushr_long.S
|
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to mask off the low
* 6 bits of the shift distance.
*/
/* ushr-long vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
GET_OPA(rOBJ) # rOBJ <- AA
and a3, a0, 255 # a3 <- BB
srl a0, a0, 8 # a0 <- CC
EAS2(a3, rFP, a3) # a3 <- &fp[BB]
GET_VREG(a2, a0) # a2 <- vCC
LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
andi v0, a2, 0x20 # shift & 0x20
srl v1, a1, a2 # rhi<- ahi >> (shift&31)
bnez v0, .L${opcode}_finish
srl v0, a0, a2 # rlo<- alo >> (shift&31)
not a0, a2 # alo<- 31-n (shift is 5b)
sll a1, 1
sll a1, a0 # ahi<- ahi << (32-(shift&31))
or v0, a1 # rlo<- rlo | ahi
SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
%break
.L${opcode}_finish:
SET_VREG64_GOTO(v1, zero, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
|
abforce/xposed_art_n
| 2,387
|
runtime/arch/mips64/jni_entrypoints_mips64.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_mips64.S"
.set noreorder
.balign 16
/*
* Jni dlsym lookup stub.
*/
.extern artFindNativeMethod
ENTRY art_jni_dlsym_lookup_stub
daddiu $sp, $sp, -80 # save a0-a7 and $ra
.cfi_adjust_cfa_offset 80
sd $ra, 64($sp)
.cfi_rel_offset 31, 64
sd $a7, 56($sp)
.cfi_rel_offset 11, 56
sd $a6, 48($sp)
.cfi_rel_offset 10, 48
sd $a5, 40($sp)
.cfi_rel_offset 9, 40
sd $a4, 32($sp)
.cfi_rel_offset 8, 32
sd $a3, 24($sp)
.cfi_rel_offset 7, 24
sd $a2, 16($sp)
.cfi_rel_offset 6, 16
sd $a1, 8($sp)
.cfi_rel_offset 5, 8
sd $a0, 0($sp)
.cfi_rel_offset 4, 0
move $a0, $s1 # pass Thread::Current()
jal artFindNativeMethod # (Thread*)
.cpreturn # Restore gp from t8 in branch delay slot. gp is not used
# anymore, and t8 may be clobbered in artFindNativeMethod.
ld $a0, 0($sp) # restore registers from stack
.cfi_restore 4
ld $a1, 8($sp)
.cfi_restore 5
ld $a2, 16($sp)
.cfi_restore 6
ld $a3, 24($sp)
.cfi_restore 7
ld $a4, 32($sp)
.cfi_restore 8
ld $a5, 40($sp)
.cfi_restore 9
ld $a6, 48($sp)
.cfi_restore 10
ld $a7, 56($sp)
.cfi_restore 11
ld $ra, 64($sp)
.cfi_restore 31
beq $v0, $zero, .Lno_native_code_found
daddiu $sp, $sp, 80 # restore the stack
.cfi_adjust_cfa_offset -80
move $t9, $v0 # put method code result in $t9
jalr $zero, $t9 # leaf call to method's code
nop
.Lno_native_code_found:
jalr $zero, $ra
nop
END art_jni_dlsym_lookup_stub
|
abforce/xposed_art_n
| 3,070
|
runtime/arch/mips64/asm_support_mips64.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_S_
#define ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_S_
#include "asm_support_mips64.h"
// Define special registers.
// Register holding suspend check count down.
#define rSUSPEND $s0
// Register holding Thread::Current().
#define rSELF $s1
// Declare a function called name, sets up $gp.
// This macro modifies t8.
.macro ENTRY name
.type \name, %function
.global \name
// Cache alignment for function entry.
.balign 16
\name:
.cfi_startproc
// Set up $gp and store the previous $gp value to $t8. It will be pushed to the
// stack after the frame has been constructed.
.cpsetup $t9, $t8, \name
// Ensure we get a sane starting CFA.
.cfi_def_cfa $sp,0
// Declare a local convenience label to be branched to when $gp is already set up.
.L\name\()_gp_set:
.endm
// Declare a function called name, doesn't set up $gp.
.macro ENTRY_NO_GP name
.type \name, %function
.global \name
// Cache alignment for function entry.
.balign 16
\name:
.cfi_startproc
// Ensure we get a sane starting CFA.
.cfi_def_cfa $sp,0
.endm
.macro END name
.cfi_endproc
.size \name, .-\name
.endm
.macro UNIMPLEMENTED name
ENTRY \name
break
break
END \name
.endm
// Macros to poison (negate) the reference for heap poisoning.
.macro POISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
subu \rRef, $zero, \rRef
#endif // USE_HEAP_POISONING
.endm
// Macros to unpoison (negate) the reference for heap poisoning.
.macro UNPOISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
subu \rRef, $zero, \rRef
#endif // USE_HEAP_POISONING
.endm
// Based on contents of creg select the minimum integer
// At the end of the macro the original value of creg is lost
.macro MINint dreg,rreg,sreg,creg
.set push
.set noat
.ifc \dreg, \rreg
selnez \dreg, \rreg, \creg
seleqz \creg, \sreg, \creg
.else
seleqz \dreg, \sreg, \creg
selnez \creg, \rreg, \creg
.endif
or \dreg, \dreg, \creg
.set pop
.endm
// Find minimum of two signed registers
.macro MINs dreg,rreg,sreg
.set push
.set noat
slt $at, \rreg, \sreg
MINint \dreg, \rreg, \sreg, $at
.set pop
.endm
// Find minimum of two unsigned registers
.macro MINu dreg,rreg,sreg
.set push
.set noat
sltu $at, \rreg, \sreg
MINint \dreg, \rreg, \sreg, $at
.set pop
.endm
#endif // ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_S_
|
abforce/xposed_art_n
| 73,844
|
runtime/arch/mips64/quick_entrypoints_mips64.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_mips64.S"
#include "arch/quick_alloc_entrypoints.S"
.set noreorder
.balign 16
/* Deliver the given exception */
.extern artDeliverExceptionFromCode
/* Deliver an exception pending on a thread */
.extern artDeliverPendingExceptionFromCode
/*
* Macro that sets up $gp and stores the previous $gp value to $t8.
* This macro modifies v1 and t8.
*/
.macro SETUP_GP
move $v1, $ra
bal 1f
nop
1:
.cpsetup $ra, $t8, 1b
move $ra, $v1
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
* callee-save: padding + $f24-$f31 + $s0-$s7 + $gp + $ra + $s8 = 19 total + 1x8 bytes padding
*/
.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
daddiu $sp, $sp, -160
.cfi_adjust_cfa_offset 160
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 160)
#error "SAVE_ALL_CALLEE_SAVE_FRAME(MIPS64) size not as expected."
#endif
sd $ra, 152($sp)
.cfi_rel_offset 31, 152
sd $s8, 144($sp)
.cfi_rel_offset 30, 144
sd $t8, 136($sp) # t8 holds caller's gp, now save it to the stack.
.cfi_rel_offset 28, 136 # Value from gp is pushed, so set the cfi offset accordingly.
sd $s7, 128($sp)
.cfi_rel_offset 23, 128
sd $s6, 120($sp)
.cfi_rel_offset 22, 120
sd $s5, 112($sp)
.cfi_rel_offset 21, 112
sd $s4, 104($sp)
.cfi_rel_offset 20, 104
sd $s3, 96($sp)
.cfi_rel_offset 19, 96
sd $s2, 88($sp)
.cfi_rel_offset 18, 88
sd $s1, 80($sp)
.cfi_rel_offset 17, 80
sd $s0, 72($sp)
.cfi_rel_offset 16, 72
// FP callee-saves
s.d $f31, 64($sp)
s.d $f30, 56($sp)
s.d $f29, 48($sp)
s.d $f28, 40($sp)
s.d $f27, 32($sp)
s.d $f26, 24($sp)
s.d $f25, 16($sp)
s.d $f24, 8($sp)
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
ld $t1, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t1)
sd $t1, 0($sp) # Place ArtMethod* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes
* non-moving GC.
* Does not include rSUSPEND or rSELF
* callee-save: padding + $s2-$s7 + $gp + $ra + $s8 = 9 total + 1x8 bytes padding
*/
.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
daddiu $sp, $sp, -80
.cfi_adjust_cfa_offset 80
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 80)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(MIPS64) size not as expected."
#endif
sd $ra, 72($sp)
.cfi_rel_offset 31, 72
sd $s8, 64($sp)
.cfi_rel_offset 30, 64
sd $t8, 56($sp) # t8 holds caller's gp, now save it to the stack.
.cfi_rel_offset 28, 56 # Value from gp is pushed, so set the cfi offset accordingly.
sd $s7, 48($sp)
.cfi_rel_offset 23, 48
sd $s6, 40($sp)
.cfi_rel_offset 22, 40
sd $s5, 32($sp)
.cfi_rel_offset 21, 32
sd $s4, 24($sp)
.cfi_rel_offset 20, 24
sd $s3, 16($sp)
.cfi_rel_offset 19, 16
sd $s2, 8($sp)
.cfi_rel_offset 18, 8
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
ld $t1, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t1)
sd $t1, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
ld $ra, 72($sp)
.cfi_restore 31
ld $s8, 64($sp)
.cfi_restore 30
ld $t8, 56($sp) # Restore gp back to it's temp storage.
.cfi_restore 28
ld $s7, 48($sp)
.cfi_restore 23
ld $s6, 40($sp)
.cfi_restore 22
ld $s5, 32($sp)
.cfi_restore 21
ld $s4, 24($sp)
.cfi_restore 20
ld $s3, 16($sp)
.cfi_restore 19
ld $s2, 8($sp)
.cfi_restore 18
daddiu $sp, $sp, 80
.cfi_adjust_cfa_offset -80
.cpreturn
.endm
.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
ld $ra, 72($sp)
.cfi_restore 31
ld $s8, 64($sp)
.cfi_restore 30
ld $t8, 56($sp) # Restore gp back to it's temp storage.
.cfi_restore 28
ld $s7, 48($sp)
.cfi_restore 23
ld $s6, 40($sp)
.cfi_restore 22
ld $s5, 32($sp)
.cfi_restore 21
ld $s4, 24($sp)
.cfi_restore 20
ld $s3, 16($sp)
.cfi_restore 19
ld $s2, 8($sp)
.cfi_restore 18
.cpreturn
jalr $zero, $ra
daddiu $sp, $sp, 80
.cfi_adjust_cfa_offset -80
.endm
// This assumes the top part of these stack frame types are identical.
#define REFS_AND_ARGS_MINUS_REFS_SIZE (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
daddiu $sp, $sp, -208
.cfi_adjust_cfa_offset 208
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 208)
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(MIPS64) size not as expected."
#endif
sd $ra, 200($sp) # = kQuickCalleeSaveFrame_RefAndArgs_LrOffset
.cfi_rel_offset 31, 200
sd $s8, 192($sp)
.cfi_rel_offset 30, 192
sd $t8, 184($sp) # t8 holds caller's gp, now save it to the stack.
.cfi_rel_offset 28, 184 # Value from gp is pushed, so set the cfi offset accordingly.
sd $s7, 176($sp)
.cfi_rel_offset 23, 176
sd $s6, 168($sp)
.cfi_rel_offset 22, 168
sd $s5, 160($sp)
.cfi_rel_offset 21, 160
sd $s4, 152($sp)
.cfi_rel_offset 20, 152
sd $s3, 144($sp)
.cfi_rel_offset 19, 144
sd $s2, 136($sp)
.cfi_rel_offset 18, 136
sd $a7, 128($sp)
.cfi_rel_offset 11, 128
sd $a6, 120($sp)
.cfi_rel_offset 10, 120
sd $a5, 112($sp)
.cfi_rel_offset 9, 112
sd $a4, 104($sp)
.cfi_rel_offset 8, 104
sd $a3, 96($sp)
.cfi_rel_offset 7, 96
sd $a2, 88($sp)
.cfi_rel_offset 6, 88
sd $a1, 80($sp) # = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset
.cfi_rel_offset 5, 80
s.d $f19, 72($sp)
s.d $f18, 64($sp)
s.d $f17, 56($sp)
s.d $f16, 48($sp)
s.d $f15, 40($sp)
s.d $f14, 32($sp)
s.d $f13, 24($sp) # = kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset
s.d $f12, 16($sp) # This isn't necessary to store.
# 1x8 bytes padding + Method*
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes
* non-moving GC.
* callee-save: padding + $f12-$f19 + $a1-$a7 + $s2-$s7 + $gp + $ra + $s8 = 24 total + 1 words padding + Method*
*/
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
ld $t1, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t1)
sd $t1, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
sd $a0, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
ld $ra, 200($sp)
.cfi_restore 31
ld $s8, 192($sp)
.cfi_restore 30
ld $t8, 184($sp) # Restore gp back to it's temp storage.
.cfi_restore 28
ld $s7, 176($sp)
.cfi_restore 23
ld $s6, 168($sp)
.cfi_restore 22
ld $s5, 160($sp)
.cfi_restore 21
ld $s4, 152($sp)
.cfi_restore 20
ld $s3, 144($sp)
.cfi_restore 19
ld $s2, 136($sp)
.cfi_restore 18
ld $a7, 128($sp)
.cfi_restore 11
ld $a6, 120($sp)
.cfi_restore 10
ld $a5, 112($sp)
.cfi_restore 9
ld $a4, 104($sp)
.cfi_restore 8
ld $a3, 96($sp)
.cfi_restore 7
ld $a2, 88($sp)
.cfi_restore 6
ld $a1, 80($sp)
.cfi_restore 5
l.d $f19, 72($sp)
l.d $f18, 64($sp)
l.d $f17, 56($sp)
l.d $f16, 48($sp)
l.d $f15, 40($sp)
l.d $f14, 32($sp)
l.d $f13, 24($sp)
l.d $f12, 16($sp)
.cpreturn
daddiu $sp, $sp, 208
.cfi_adjust_cfa_offset -208
.endm
/*
* Macro that set calls through to artDeliverPendingExceptionFromCode,
* where the pending
* exception is Thread::Current()->exception_
*/
.macro DELIVER_PENDING_EXCEPTION
SETUP_GP
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME # save callee saves for throw
dla $t9, artDeliverPendingExceptionFromCode
jalr $zero, $t9 # artDeliverPendingExceptionFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
.endm
.macro RETURN_IF_NO_EXCEPTION
ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
bne $t0, $zero, 1f # success if no exception is pending
nop
jalr $zero, $ra
nop
1:
DELIVER_PENDING_EXCEPTION
.endm
.macro RETURN_IF_ZERO
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
bne $v0, $zero, 1f # success?
nop
jalr $zero, $ra # return on success
nop
1:
DELIVER_PENDING_EXCEPTION
.endm
.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
beq $v0, $zero, 1f # success?
nop
jalr $zero, $ra # return on success
nop
1:
DELIVER_PENDING_EXCEPTION
.endm
/*
* On stack replacement stub.
* On entry:
* a0 = stack to copy
* a1 = size of stack
* a2 = pc to call
* a3 = JValue* result
* a4 = shorty
* a5 = thread
*/
ENTRY art_quick_osr_stub
move $t0, $sp # save stack pointer
daddiu $t1, $sp, -112 # reserve stack space
dsrl $t1, $t1, 4 # enforce 16 byte stack alignment
dsll $sp, $t1, 4 # update stack pointer
// Save callee general purpose registers, SP, T8(GP), RA, A3, and A4 (8x14 bytes)
sd $ra, 104($sp)
.cfi_rel_offset 31, 104
sd $s8, 96($sp)
.cfi_rel_offset 30, 96
sd $t0, 88($sp) # save original stack pointer stored in t0
.cfi_rel_offset 29, 88
sd $t8, 80($sp) # t8 holds caller's gp, now save it to the stack.
.cfi_rel_offset 28, 80 # Value from gp is pushed, so set the cfi offset accordingly.
sd $s7, 72($sp)
.cfi_rel_offset 23, 72
sd $s6, 64($sp)
.cfi_rel_offset 22, 64
sd $s5, 56($sp)
.cfi_rel_offset 21, 56
sd $s4, 48($sp)
.cfi_rel_offset 20, 48
sd $s3, 40($sp)
.cfi_rel_offset 19, 40
sd $s2, 32($sp)
.cfi_rel_offset 18, 32
sd $s1, 24($sp)
.cfi_rel_offset 17, 24
sd $s0, 16($sp)
.cfi_rel_offset 16, 16
sd $a4, 8($sp)
.cfi_rel_offset 8, 8
sd $a3, 0($sp)
.cfi_rel_offset 7, 0
move rSELF, $a5 # Save managed thread pointer into rSELF
daddiu $sp, $sp, -16
jal .Losr_entry
sd $zero, 0($sp) # Store null for ArtMethod* at bottom of frame
daddiu $sp, $sp, 16
// Restore return value address and shorty address
ld $a4, 8($sp) # shorty address
.cfi_restore 8
ld $a3, 0($sp) # result value address
.cfi_restore 7
lbu $t1, 0($a4) # load return type
li $t2, 'D' # put char 'D' into t2
beq $t1, $t2, .Losr_fp_result # branch if result type char == 'D'
li $t2, 'F' # put char 'F' into t2
beq $t1, $t2, .Losr_fp_result # branch if result type char == 'F'
nop
b .Losr_exit
dsrl $v1, $v0, 32 # put high half of result in v1
.Losr_fp_result:
mfc1 $v0, $f0
mfhc1 $v1, $f0 # put high half of FP result in v1
.Losr_exit:
sw $v0, 0($a3) # store low half of result
sw $v1, 4($a3) # store high half of result
// Restore callee registers
ld $ra, 104($sp)
.cfi_restore 31
ld $s8, 96($sp)
.cfi_restore 30
ld $t0, 88($sp) # save SP into t0 for now
.cfi_restore 29
ld $t8, 80($sp) # Restore gp back to it's temp storage.
.cfi_restore 28
ld $s7, 72($sp)
.cfi_restore 23
ld $s6, 64($sp)
.cfi_restore 22
ld $s5, 56($sp)
.cfi_restore 21
ld $s4, 48($sp)
.cfi_restore 20
ld $s3, 40($sp)
.cfi_restore 19
ld $s2, 32($sp)
.cfi_restore 18
ld $s1, 24($sp)
.cfi_restore 17
ld $s0, 16($sp)
.cfi_restore 16
jalr $zero, $ra
move $sp, $t0
.Losr_entry:
dsubu $sp, $sp, $a1 # Reserve space for callee stack
daddiu $a1, $a1, -8
daddu $t0, $a1, $sp
sw $ra, 0($t0) # Store low half of RA per compiler ABI
dsrl $t1, $ra, 32
sw $t1, 4($t0) # Store high half of RA per compiler ABI
// Copy arguments into callee stack
// Use simple copy routine for now.
// 4 bytes per slot.
// a0 = source address
// a1 = args length in bytes (does not include 8 bytes for RA)
// sp = destination address
beqz $a1, .Losr_loop_exit
daddiu $a1, $a1, -4
daddu $t1, $a0, $a1
daddu $t2, $sp, $a1
.Losr_loop_entry:
lw $t0, 0($t1)
daddiu $t1, $t1, -4
sw $t0, 0($t2)
bne $sp, $t2, .Losr_loop_entry
daddiu $t2, $t2, -4
.Losr_loop_exit:
move $t9, $a2
jalr $zero, $t9 # Jump to the OSR entry point.
nop
END art_quick_osr_stub
/*
* On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_
* FIXME: just guessing about the shape of the jmpbuf. Where will pc be?
*/
ENTRY_NO_GP art_quick_do_long_jump
l.d $f0, 0($a1)
l.d $f1, 8($a1)
l.d $f2, 16($a1)
l.d $f3, 24($a1)
l.d $f4, 32($a1)
l.d $f5, 40($a1)
l.d $f6, 48($a1)
l.d $f7, 56($a1)
l.d $f8, 64($a1)
l.d $f9, 72($a1)
l.d $f10, 80($a1)
l.d $f11, 88($a1)
l.d $f12, 96($a1)
l.d $f13, 104($a1)
l.d $f14, 112($a1)
l.d $f15, 120($a1)
l.d $f16, 128($a1)
l.d $f17, 136($a1)
l.d $f18, 144($a1)
l.d $f19, 152($a1)
l.d $f20, 160($a1)
l.d $f21, 168($a1)
l.d $f22, 176($a1)
l.d $f23, 184($a1)
l.d $f24, 192($a1)
l.d $f25, 200($a1)
l.d $f26, 208($a1)
l.d $f27, 216($a1)
l.d $f28, 224($a1)
l.d $f29, 232($a1)
l.d $f30, 240($a1)
l.d $f31, 248($a1)
.set push
.set nomacro
.set noat
# no need to load zero
ld $at, 8($a0)
.set pop
ld $v0, 16($a0)
ld $v1, 24($a0)
# a0 has to be loaded last
ld $a1, 40($a0)
ld $a2, 48($a0)
ld $a3, 56($a0)
ld $a4, 64($a0)
ld $a5, 72($a0)
ld $a6, 80($a0)
ld $a7, 88($a0)
ld $t0, 96($a0)
ld $t1, 104($a0)
ld $t2, 112($a0)
ld $t3, 120($a0)
ld $s0, 128($a0)
ld $s1, 136($a0)
ld $s2, 144($a0)
ld $s3, 152($a0)
ld $s4, 160($a0)
ld $s5, 168($a0)
ld $s6, 176($a0)
ld $s7, 184($a0)
ld $t8, 192($a0)
ld $t9, 200($a0)
# no need to load k0, k1
ld $gp, 224($a0)
ld $sp, 232($a0)
ld $s8, 240($a0)
ld $ra, 248($a0)
ld $a0, 32($a0)
move $v0, $zero # clear result registers v0 and v1
jalr $zero, $t9 # do long jump (do not use ra, it must not be clobbered)
move $v1, $zero
END art_quick_do_long_jump
/*
* Called by managed code, saves most registers (forms basis of long jump
* context) and passes the bottom of the stack.
* artDeliverExceptionFromCode will place the callee save Method* at
* the bottom of the thread. On entry a0 holds Throwable*
*/
ENTRY art_quick_deliver_exception
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
dla $t9, artDeliverExceptionFromCode
jalr $zero, $t9 # artDeliverExceptionFromCode(Throwable*, Thread*)
move $a1, rSELF # pass Thread::Current
END art_quick_deliver_exception
/*
* Called by managed code to create and deliver a NullPointerException
*/
.extern artThrowNullPointerExceptionFromCode
ENTRY art_quick_throw_null_pointer_exception
.Lart_quick_throw_null_pointer_exception_gp_set:
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
dla $t9, artThrowNullPointerExceptionFromCode
jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
END art_quick_throw_null_pointer_exception
/*
* Called by managed code to create and deliver an ArithmeticException
*/
.extern artThrowDivZeroFromCode
ENTRY art_quick_throw_div_zero
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
dla $t9, artThrowDivZeroFromCode
jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
END art_quick_throw_div_zero
/*
* Called by managed code to create and deliver an
* ArrayIndexOutOfBoundsException
*/
.extern artThrowArrayBoundsFromCode
ENTRY art_quick_throw_array_bounds
.Lart_quick_throw_array_bounds_gp_set:
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
dla $t9, artThrowArrayBoundsFromCode
jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*)
move $a2, rSELF # pass Thread::Current
END art_quick_throw_array_bounds
/*
* Called by managed code to create and deliver a StackOverflowError.
*/
.extern artThrowStackOverflowFromCode
ENTRY art_quick_throw_stack_overflow
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
dla $t9, artThrowStackOverflowFromCode
jalr $zero, $t9 # artThrowStackOverflowFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
END art_quick_throw_stack_overflow
/*
* Called by managed code to create and deliver a NoSuchMethodError.
*/
.extern artThrowNoSuchMethodFromCode
ENTRY art_quick_throw_no_such_method
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
dla $t9, artThrowNoSuchMethodFromCode
jalr $zero, $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*)
move $a1, rSELF # pass Thread::Current
END art_quick_throw_no_such_method
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
* the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
* stack and call the appropriate C helper.
* NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
*
* The helper will attempt to locate the target and return a 128-bit result in $v0/$v1 consisting
* of the target Method* in $v0 and method->code_ in $v1.
*
* If unsuccessful, the helper will return null/null. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the ra
* pointing back to the original caller.
*/
.macro INVOKE_TRAMPOLINE_BODY cxx_name
.extern \cxx_name
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC
move $a2, rSELF # pass Thread::Current
jal \cxx_name # (method_idx, this, Thread*, $sp)
move $a3, $sp # pass $sp
move $a0, $v0 # save target Method*
move $t9, $v1 # save $v0->code_
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
beq $v0, $zero, 1f
nop
jalr $zero, $t9
nop
1:
DELIVER_PENDING_EXCEPTION
.endm
.macro INVOKE_TRAMPOLINE c_name, cxx_name
ENTRY \c_name
INVOKE_TRAMPOLINE_BODY \cxx_name
END \c_name
.endm
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
# On entry:
# t0 = shorty
# t1 = ptr to arg_array
# t2 = number of argument bytes remain
# v0 = ptr to stack frame where to copy arg_array
# This macro modifies t3, t9 and v0
.macro LOOP_OVER_SHORTY_LOADING_REG gpu, fpu, label
lbu $t3, 0($t0) # get argument type from shorty
beqz $t3, \label
daddiu $t0, 1
li $t9, 68 # put char 'D' into t9
beq $t9, $t3, 1f # branch if result type char == 'D'
li $t9, 70 # put char 'F' into t9
beq $t9, $t3, 2f # branch if result type char == 'F'
li $t9, 74 # put char 'J' into t9
beq $t9, $t3, 3f # branch if result type char == 'J'
nop
lw $\gpu, 0($t1)
sw $\gpu, 0($v0)
daddiu $v0, 4
daddiu $t1, 4
b 4f
daddiu $t2, -4 # delay slot
1: # found double
lwu $t3, 0($t1)
mtc1 $t3, $\fpu
sw $t3, 0($v0)
lwu $t3, 4($t1)
mthc1 $t3, $\fpu
sw $t3, 4($v0)
daddiu $v0, 8
daddiu $t1, 8
b 4f
daddiu $t2, -8 # delay slot
2: # found float
lwu $t3, 0($t1)
mtc1 $t3, $\fpu
sw $t3, 0($v0)
daddiu $v0, 4
daddiu $t1, 4
b 4f
daddiu $t2, -4 # delay slot
3: # found long (8 bytes)
lwu $t3, 0($t1)
sw $t3, 0($v0)
lwu $t9, 4($t1)
sw $t9, 4($v0)
dsll $t9, $t9, 32
or $\gpu, $t9, $t3
daddiu $v0, 8
daddiu $t1, 8
daddiu $t2, -8
4:
.endm
/*
* Invocation stub for quick code.
* On entry:
* a0 = method pointer
* a1 = argument array that must at least contain the this ptr.
* a2 = size of argument array in bytes
* a3 = (managed) thread pointer
* a4 = JValue* result
* a5 = shorty
*/
ENTRY_NO_GP art_quick_invoke_stub
# push a4, a5, s0(rSUSPEND), s1(rSELF), s8, ra onto the stack
daddiu $sp, $sp, -48
.cfi_adjust_cfa_offset 48
sd $ra, 40($sp)
.cfi_rel_offset 31, 40
sd $s8, 32($sp)
.cfi_rel_offset 30, 32
sd $s1, 24($sp)
.cfi_rel_offset 17, 24
sd $s0, 16($sp)
.cfi_rel_offset 16, 16
sd $a5, 8($sp)
.cfi_rel_offset 9, 8
sd $a4, 0($sp)
.cfi_rel_offset 8, 0
daddiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
move $s1, $a3 # move managed thread pointer into s1 (rSELF)
move $s8, $sp # save sp in s8 (fp)
daddiu $t3, $a2, 24 # add 8 for ArtMethod* and 16 for stack alignment
dsrl $t3, $t3, 4 # shift the frame size right 4
dsll $t3, $t3, 4 # shift the frame size left 4 to align to 16 bytes
dsubu $sp, $sp, $t3 # reserve stack space for argument array
daddiu $t0, $a5, 1 # t0 = shorty[1] (skip 1 for return type)
daddiu $t1, $a1, 4 # t1 = ptr to arg_array[4] (skip this ptr)
daddiu $t2, $a2, -4 # t2 = number of argument bytes remain (skip this ptr)
daddiu $v0, $sp, 12 # v0 points to where to copy arg_array
LOOP_OVER_SHORTY_LOADING_REG a2, f14, call_fn
LOOP_OVER_SHORTY_LOADING_REG a3, f15, call_fn
LOOP_OVER_SHORTY_LOADING_REG a4, f16, call_fn
LOOP_OVER_SHORTY_LOADING_REG a5, f17, call_fn
LOOP_OVER_SHORTY_LOADING_REG a6, f18, call_fn
LOOP_OVER_SHORTY_LOADING_REG a7, f19, call_fn
# copy arguments onto stack (t2 should be multiples of 4)
ble $t2, $zero, call_fn # t2 = number of argument bytes remain
1:
lw $t3, 0($t1) # load from argument array
daddiu $t1, $t1, 4
sw $t3, 0($v0) # save to stack
daddiu $t2, -4
bgt $t2, $zero, 1b # t2 = number of argument bytes remain
daddiu $v0, $v0, 4
call_fn:
# call method (a0 and a1 have been untouched)
lwu $a1, 0($a1) # make a1 = this ptr
sw $a1, 8($sp) # copy this ptr (skip 8 bytes for ArtMethod*)
sd $zero, 0($sp) # store null for ArtMethod* at bottom of frame
ld $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
jalr $t9 # call the method
nop
move $sp, $s8 # restore sp
# pop a4, a5, s1(rSELF), s8, ra off of the stack
ld $a4, 0($sp)
.cfi_restore 8
ld $a5, 8($sp)
.cfi_restore 9
ld $s0, 16($sp)
.cfi_restore 16
ld $s1, 24($sp)
.cfi_restore 17
ld $s8, 32($sp)
.cfi_restore 30
ld $ra, 40($sp)
.cfi_restore 31
daddiu $sp, $sp, 48
.cfi_adjust_cfa_offset -48
# a4 = JValue* result
# a5 = shorty string
lbu $t1, 0($a5) # get result type from shorty
li $t2, 68 # put char 'D' into t2
beq $t1, $t2, 1f # branch if result type char == 'D'
li $t3, 70 # put char 'F' into t3
beq $t1, $t3, 1f # branch if result type char == 'F'
sw $v0, 0($a4) # store the result
dsrl $v1, $v0, 32
jalr $zero, $ra
sw $v1, 4($a4) # store the other half of the result
1:
mfc1 $v0, $f0
mfhc1 $v1, $f0
sw $v0, 0($a4) # store the result
jalr $zero, $ra
sw $v1, 4($a4) # store the other half of the result
END art_quick_invoke_stub
/*
* Invocation static stub for quick code.
* On entry:
* a0 = method pointer
* a1 = argument array that must at least contain the this ptr.
* a2 = size of argument array in bytes
* a3 = (managed) thread pointer
* a4 = JValue* result
* a5 = shorty
*/
ENTRY_NO_GP art_quick_invoke_static_stub
# push a4, a5, s0(rSUSPEND), s1(rSELF), s8, ra, onto the stack
daddiu $sp, $sp, -48
.cfi_adjust_cfa_offset 48
sd $ra, 40($sp)
.cfi_rel_offset 31, 40
sd $s8, 32($sp)
.cfi_rel_offset 30, 32
sd $s1, 24($sp)
.cfi_rel_offset 17, 24
sd $s0, 16($sp)
.cfi_rel_offset 16, 16
sd $a5, 8($sp)
.cfi_rel_offset 9, 8
sd $a4, 0($sp)
.cfi_rel_offset 8, 0
daddiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
move $s1, $a3 # move managed thread pointer into s1 (rSELF)
move $s8, $sp # save sp in s8 (fp)
daddiu $t3, $a2, 24 # add 8 for ArtMethod* and 16 for stack alignment
dsrl $t3, $t3, 4 # shift the frame size right 4
dsll $t3, $t3, 4 # shift the frame size left 4 to align to 16 bytes
dsubu $sp, $sp, $t3 # reserve stack space for argument array
daddiu $t0, $a5, 1 # t0 = shorty[1] (skip 1 for return type)
move $t1, $a1 # t1 = arg_array
move $t2, $a2 # t2 = number of argument bytes remain
daddiu $v0, $sp, 8 # v0 points to where to copy arg_array
LOOP_OVER_SHORTY_LOADING_REG a1, f13, call_sfn
LOOP_OVER_SHORTY_LOADING_REG a2, f14, call_sfn
LOOP_OVER_SHORTY_LOADING_REG a3, f15, call_sfn
LOOP_OVER_SHORTY_LOADING_REG a4, f16, call_sfn
LOOP_OVER_SHORTY_LOADING_REG a5, f17, call_sfn
LOOP_OVER_SHORTY_LOADING_REG a6, f18, call_sfn
LOOP_OVER_SHORTY_LOADING_REG a7, f19, call_sfn
# copy arguments onto stack (t2 should be multiples of 4)
ble $t2, $zero, call_sfn # t2 = number of argument bytes remain
1:
lw $t3, 0($t1) # load from argument array
daddiu $t1, $t1, 4
sw $t3, 0($v0) # save to stack
daddiu $t2, -4
bgt $t2, $zero, 1b # t2 = number of argument bytes remain
daddiu $v0, $v0, 4
call_sfn:
# call method (a0 has been untouched)
sd $zero, 0($sp) # store null for ArtMethod* at bottom of frame
ld $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
jalr $t9 # call the method
nop
move $sp, $s8 # restore sp
# pop a4, a5, s0(rSUSPEND), s1(rSELF), s8, ra off of the stack
ld $a4, 0($sp)
.cfi_restore 8
ld $a5, 8($sp)
.cfi_restore 9
ld $s0, 16($sp)
.cfi_restore 16
ld $s1, 24($sp)
.cfi_restore 17
ld $s8, 32($sp)
.cfi_restore 30
ld $ra, 40($sp)
.cfi_restore 31
daddiu $sp, $sp, 48
.cfi_adjust_cfa_offset -48
# a4 = JValue* result
# a5 = shorty string
lbu $t1, 0($a5) # get result type from shorty
li $t2, 68 # put char 'D' into t2
beq $t1, $t2, 1f # branch if result type char == 'D'
li $t3, 70 # put char 'F' into t3
beq $t1, $t3, 1f # branch if result type char == 'F'
sw $v0, 0($a4) # store the result
dsrl $v1, $v0, 32
jalr $zero, $ra
sw $v1, 4($a4) # store the other half of the result
1:
mfc1 $v0, $f0
mfhc1 $v1, $f0
sw $v0, 0($a4) # store the result
jalr $zero, $ra
sw $v1, 4($a4) # store the other half of the result
END art_quick_invoke_static_stub
/*
* Entry from managed code that calls artHandleFillArrayDataFromCode and
* delivers exception on failure.
*/
.extern artHandleFillArrayDataFromCode
ENTRY art_quick_handle_fill_data
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artHandleFillArrayDataFromCode # (payload offset, Array*, method, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_handle_fill_data
/*
* Entry from managed code that calls artLockObjectFromCode, may block for GC.
*/
.extern artLockObjectFromCode
ENTRY art_quick_lock_object
beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
nop
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
jal artLockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
nop
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
jal artLockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_lock_object_no_inline
/*
* Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
*/
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
nop
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
jal artUnlockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
nop
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
jal artUnlockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_unlock_object_no_inline
/*
* Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
*/
.extern artThrowClassCastException
ENTRY art_quick_check_cast
daddiu $sp, $sp, -32
.cfi_adjust_cfa_offset 32
sd $ra, 24($sp)
.cfi_rel_offset 31, 24
sd $t9, 16($sp)
sd $a1, 8($sp)
sd $a0, 0($sp)
jal artIsAssignableFromCode
.cpreturn # Restore gp from t8 in branch delay slot.
# t8 may be clobbered in artIsAssignableFromCode.
beq $v0, $zero, .Lthrow_class_cast_exception
ld $ra, 24($sp)
jalr $zero, $ra
daddiu $sp, $sp, 32
.cfi_adjust_cfa_offset -32
.Lthrow_class_cast_exception:
ld $t9, 16($sp)
ld $a1, 8($sp)
ld $a0, 0($sp)
daddiu $sp, $sp, 32
.cfi_adjust_cfa_offset -32
SETUP_GP
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
dla $t9, artThrowClassCastException
jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*)
move $a2, rSELF # pass Thread::Current
END art_quick_check_cast
/*
* Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
* nReg is the register number for rReg.
*/
.macro POP_REG_NE rReg, nReg, offset, rExclude
.ifnc \rReg, \rExclude
ld \rReg, \offset($sp) # restore rReg
.cfi_restore \nReg
.endif
.endm
/*
* Macro to insert read barrier, only used in art_quick_aput_obj.
* rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
* TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
*/
.macro READ_BARRIER rDest, rObj, offset
#ifdef USE_READ_BARRIER
# saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 16B-aligned.
daddiu $sp, $sp, -64
.cfi_adjust_cfa_offset 64
sd $ra, 56($sp)
.cfi_rel_offset 31, 56
sd $t9, 48($sp)
.cfi_rel_offset 25, 48
sd $t1, 40($sp)
.cfi_rel_offset 13, 40
sd $t0, 32($sp)
.cfi_rel_offset 12, 32
sd $a2, 16($sp) # padding slot at offset 24 (padding can be any slot in the 64B)
.cfi_rel_offset 6, 16
sd $a1, 8($sp)
.cfi_rel_offset 5, 8
sd $a0, 0($sp)
.cfi_rel_offset 4, 0
# move $a0, \rRef # pass ref in a0 (no-op for now since parameter ref is unused)
.ifnc \rObj, $a1
move $a1, \rObj # pass rObj
.endif
daddiu $a2, $zero, \offset # pass offset
jal artReadBarrierSlow # artReadBarrierSlow(ref, rObj, offset)
.cpreturn # Restore gp from t8 in branch delay slot.
# t8 may be clobbered in artReadBarrierSlow.
# No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning.
move \rDest, $v0 # save return value in rDest
# (rDest cannot be v0 in art_quick_aput_obj)
ld $a0, 0($sp) # restore registers except rDest
# (rDest can only be t0 or t1 in art_quick_aput_obj)
.cfi_restore 4
ld $a1, 8($sp)
.cfi_restore 5
ld $a2, 16($sp)
.cfi_restore 6
POP_REG_NE $t0, 12, 32, \rDest
POP_REG_NE $t1, 13, 40, \rDest
ld $t9, 48($sp)
.cfi_restore 25
ld $ra, 56($sp) # restore $ra
.cfi_restore 31
daddiu $sp, $sp, 64
.cfi_adjust_cfa_offset -64
SETUP_GP # set up gp because we are not returning
#else
lwu \rDest, \offset(\rObj)
UNPOISON_HEAP_REF \rDest
#endif // USE_READ_BARRIER
.endm
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
* a0 = array, a1 = index, a2 = value
*/
ENTRY art_quick_aput_obj_with_null_and_bound_check
bne $a0, $zero, .Lart_quick_aput_obj_with_bound_check_gp_set
nop
b .Lart_quick_throw_null_pointer_exception_gp_set
nop
END art_quick_aput_obj_with_null_and_bound_check
ENTRY art_quick_aput_obj_with_bound_check
lwu $t0, MIRROR_ARRAY_LENGTH_OFFSET($a0)
sltu $t1, $a1, $t0
bne $t1, $zero, .Lart_quick_aput_obj_gp_set
nop
move $a0, $a1
b .Lart_quick_throw_array_bounds_gp_set
move $a1, $t0
END art_quick_aput_obj_with_bound_check
ENTRY art_quick_aput_obj
beq $a2, $zero, .Ldo_aput_null
nop
READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET
READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET
READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
nop
.Ldo_aput:
dsll $a1, $a1, 2
daddu $t0, $a0, $a1
POISON_HEAP_REF $a2
sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
ld $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
dsrl $t1, $a0, 7
daddu $t1, $t1, $t0
sb $t0, ($t1)
jalr $zero, $ra
.cpreturn # Restore gp from t8 in branch delay slot.
.Ldo_aput_null:
dsll $a1, $a1, 2
daddu $t0, $a0, $a1
sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
jalr $zero, $ra
.cpreturn # Restore gp from t8 in branch delay slot.
.Lcheck_assignability:
daddiu $sp, $sp, -64
.cfi_adjust_cfa_offset 64
sd $ra, 56($sp)
.cfi_rel_offset 31, 56
sd $t9, 24($sp)
sd $a2, 16($sp)
sd $a1, 8($sp)
sd $a0, 0($sp)
move $a1, $t1
move $a0, $t0
jal artIsAssignableFromCode # (Class*, Class*)
.cpreturn # Restore gp from t8 in branch delay slot.
# t8 may be clobbered in artIsAssignableFromCode.
ld $ra, 56($sp)
ld $t9, 24($sp)
ld $a2, 16($sp)
ld $a1, 8($sp)
ld $a0, 0($sp)
daddiu $sp, $sp, 64
.cfi_adjust_cfa_offset -64
SETUP_GP
bne $v0, $zero, .Ldo_aput
nop
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
move $a1, $a2
dla $t9, artThrowArrayStoreException
jalr $zero, $t9 # artThrowArrayStoreException(Class*, Class*, Thread*)
move $a2, rSELF # pass Thread::Current
END art_quick_aput_obj
/*
* Called by managed code to resolve a static field and load a boolean primitive value.
*/
.extern artGetBooleanStaticFromCode
ENTRY art_quick_get_boolean_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_boolean_static
/*
* Called by managed code to resolve a static field and load a byte primitive value.
*/
.extern artGetByteStaticFromCode
ENTRY art_quick_get_byte_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_byte_static
/*
* Called by managed code to resolve a static field and load a char primitive value.
*/
.extern artGetCharStaticFromCode
ENTRY art_quick_get_char_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_char_static
/*
* Called by managed code to resolve a static field and load a short primitive value.
*/
.extern artGetShortStaticFromCode
ENTRY art_quick_get_short_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_short_static
/*
* Called by managed code to resolve a static field and load a 32-bit primitive value.
*/
.extern artGet32StaticFromCode
ENTRY art_quick_get32_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get32_static
/*
* Called by managed code to resolve a static field and load a 64-bit primitive value.
*/
.extern artGet64StaticFromCode
ENTRY art_quick_get64_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get64_static
/*
* Called by managed code to resolve a static field and load an object reference.
*/
.extern artGetObjStaticFromCode
ENTRY art_quick_get_obj_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_obj_static
/*
* Called by managed code to resolve an instance field and load a boolean primitive value.
*/
.extern artGetBooleanInstanceFromCode
ENTRY art_quick_get_boolean_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_boolean_instance
/*
* Called by managed code to resolve an instance field and load a byte primitive value.
*/
.extern artGetByteInstanceFromCode
ENTRY art_quick_get_byte_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_byte_instance
/*
* Called by managed code to resolve an instance field and load a char primitive value.
*/
.extern artGetCharInstanceFromCode
ENTRY art_quick_get_char_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_char_instance
/*
* Called by managed code to resolve an instance field and load a short primitive value.
*/
.extern artGetShortInstanceFromCode
ENTRY art_quick_get_short_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_short_instance
/*
* Called by managed code to resolve an instance field and load a 32-bit primitive value.
*/
.extern artGet32InstanceFromCode
ENTRY art_quick_get32_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get32_instance
/*
* Called by managed code to resolve an instance field and load a 64-bit primitive value.
*/
.extern artGet64InstanceFromCode
ENTRY art_quick_get64_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get64_instance
/*
* Called by managed code to resolve an instance field and load an object reference.
*/
.extern artGetObjInstanceFromCode
ENTRY art_quick_get_obj_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_obj_instance
/*
* Called by managed code to resolve a static field and store a 8-bit primitive value.
*/
.extern artSet8StaticFromCode
ENTRY art_quick_set8_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set8_static
/*
* Called by managed code to resolve a static field and store a 16-bit primitive value.
*/
.extern artSet16StaticFromCode
ENTRY art_quick_set16_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet16StaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set16_static
/*
* Called by managed code to resolve a static field and store a 32-bit primitive value.
*/
.extern artSet32StaticFromCode
ENTRY art_quick_set32_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set32_static
/*
* Called by managed code to resolve a static field and store a 64-bit primitive value.
*/
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
# a2 contains the new val
ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set64_static
/*
* Called by managed code to resolve a static field and store an object reference.
*/
.extern artSetObjStaticFromCode
ENTRY art_quick_set_obj_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set_obj_static
/*
* Called by managed code to resolve an instance field and store a 8-bit primitive value.
*/
.extern artSet8InstanceFromCode
ENTRY art_quick_set8_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set8_instance
/*
* Called by managed code to resolve an instance field and store a 16-bit primitive value.
*/
.extern artSet16InstanceFromCode
ENTRY art_quick_set16_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set16_instance
/*
* Called by managed code to resolve an instance field and store a 32-bit primitive value.
*/
.extern artSet32InstanceFromCode
ENTRY art_quick_set32_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set32_instance
/*
* Called by managed code to resolve an instance field and store a 64-bit primitive value.
*/
.extern artSet64InstanceFromCode
ENTRY art_quick_set64_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet64InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set64_instance
/*
* Called by managed code to resolve an instance field and store an object reference.
*/
.extern artSetObjInstanceFromCode
ENTRY art_quick_set_obj_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
ld $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
move $a4, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set_obj_instance
// Macro to facilitate adding new allocation entrypoints.
.macro ONE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
jal \entrypoint
move $a1, rSELF # pass Thread::Current
\return
END \name
.endm
// Macro to facilitate adding new allocation entrypoints.
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
jal \entrypoint
move $a2, rSELF # pass Thread::Current
\return
END \name
.endm
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
jal \entrypoint
move $a3, rSELF # pass Thread::Current
\return
END \name
.endm
.macro FOUR_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
jal \entrypoint
move $a4, rSELF # pass Thread::Current
\return
END \name
.endm
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
ENTRY art_quick_alloc_object_rosalloc
# Fast path rosalloc allocation
# a0: type_idx
# a1: ArtMethod*
# s1: Thread::Current
# -----------------------------
# t0: class
# t1: object size
# t2: rosalloc run
# t3: thread stack top offset
# a4: thread stack bottom offset
# v0: free list head
#
# a5, a6 : temps
ld $t0, ART_METHOD_DEX_CACHE_TYPES_OFFSET_64($a1) # Load dex cache resolved types array.
dsll $a5, $a0, COMPRESSED_REFERENCE_SIZE_SHIFT # Shift the value.
daddu $a5, $t0, $a5 # Compute the index.
lwu $t0, 0($a5) # Load class (t0).
beqzc $t0, .Lart_quick_alloc_object_rosalloc_slow_path
li $a6, MIRROR_CLASS_STATUS_INITIALIZED
lwu $a5, MIRROR_CLASS_STATUS_OFFSET($t0) # Check class status.
bnec $a5, $a6, .Lart_quick_alloc_object_rosalloc_slow_path
# Add a fake dependence from the following access flag and size loads to the status load. This
# is to prevent those loads from being reordered above the status load and reading wrong values.
xor $a5, $a5, $a5
daddu $t0, $t0, $a5
lwu $a5, MIRROR_CLASS_ACCESS_FLAGS_OFFSET($t0) # Check if access flags has
li $a6, ACCESS_FLAGS_CLASS_IS_FINALIZABLE # kAccClassIsFinalizable.
and $a6, $a5, $a6
bnezc $a6, .Lart_quick_alloc_object_rosalloc_slow_path
ld $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation stack
ld $a4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # has any room left.
bgeuc $t3, $a4, .Lart_quick_alloc_object_rosalloc_slow_path
lwu $t1, MIRROR_CLASS_OBJECT_SIZE_OFFSET($t0) # Load object size (t1).
li $a5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
# allocation.
bltuc $a5, $t1, .Lart_quick_alloc_object_rosalloc_slow_path
# Compute the rosalloc bracket index from the size. Allign up the size by the rosalloc bracket
# quantum size and divide by the quantum size and subtract by 1.
daddiu $t1, $t1, -1 # Decrease obj size and shift right by
dsrl $t1, $t1, ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT # quantum.
dsll $t2, $t1, POINTER_SIZE_SHIFT
daddu $t2, $t2, $s1
ld $t2, THREAD_ROSALLOC_RUNS_OFFSET($t2) # Load rosalloc run (t2).
# Load the free list head (v0).
# NOTE: this will be the return val.
ld $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
beqzc $v0, .Lart_quick_alloc_object_rosalloc_slow_path
# Load the next pointer of the head and update the list head with the next pointer.
ld $a5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
sd $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
# Store the class pointer in the header. This also overwrites the first pointer. The offsets are
# asserted to match.
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
POISON_HEAP_REF $t0
sw $t0, MIRROR_OBJECT_CLASS_OFFSET($v0)
# Push the new object onto the thread local allocation stack and increment the thread local
# allocation stack top.
sd $v0, 0($t3)
daddiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
sd $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
# Decrement the size of the free list.
lw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
addiu $a5, $a5, -1
sw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
sync # Fence.
jalr $zero, $ra
.cpreturn # Restore gp from t8 in branch delay slot.
.Lart_quick_alloc_object_rosalloc_slow_path:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
jal artAllocObjectFromCodeRosAlloc
move $a2 ,$s1 # Pass self as argument.
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_rosalloc
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
* exception on error. On success the String is returned. A0 holds the string index. The fast
* path check for hit in strings cache has already been performed.
*/
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Entry from managed code when uninitialized static storage, this stub will run the class
* initializer and deliver the exception on error. On success the static storage base is
* returned.
*/
ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Entry from managed code when dex cache misses for a type_idx.
*/
ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Entry from managed code when type_idx needs to be checked for access and dex cache may also
* miss.
*/
ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
*/
.extern artTestSuspendFromCode
ENTRY art_quick_test_suspend
lh $a0, THREAD_FLAGS_OFFSET(rSELF)
bne $a0, $zero, 1f
daddiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
jalr $zero, $ra
.cpreturn # Restore gp from t8 in branch delay slot.
1:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
jal artTestSuspendFromCode # (Thread*)
move $a0, rSELF
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
* r0 holds the proxy method; r1, r2 and r3 may contain arguments.
*/
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
move $a2, rSELF # pass Thread::Current
jal artQuickProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
move $a3, $sp # pass $sp
ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
daddiu $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE # skip a0-a7 and f12-f19
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
bne $t0, $zero, 1f
dmtc1 $v0, $f0 # place return value to FP return value
jalr $zero, $ra
dmtc1 $v1, $f1 # place return value to FP return value
1:
DELIVER_PENDING_EXCEPTION
END art_quick_proxy_invoke_handler
/*
* Called to resolve an imt conflict.
* a0 is the conflict ArtMethod.
* t0 is a hidden argument that holds the target interface method's dex method index.
*
* Mote that this stub writes to a0, t0 and t1.
*/
ENTRY art_quick_imt_conflict_trampoline
ld $t1, 0($sp) # Load referrer.
ld $t1, ART_METHOD_DEX_CACHE_METHODS_OFFSET_64($t1) # Load dex cache methods array.
dsll $t0, $t0, POINTER_SIZE_SHIFT # Calculate offset.
daddu $t0, $t1, $t0 # Add offset to base.
ld $t0, 0($t0) # Load interface method.
ld $a0, ART_METHOD_JNI_OFFSET_64($a0) # Load ImtConflictTable.
.Limt_table_iterate:
ld $t1, 0($a0) # Load next entry in ImtConflictTable.
# Branch if found.
beq $t1, $t0, .Limt_table_found
nop
# If the entry is null, the interface method is not in the ImtConflictTable.
beqzc $t1, .Lconflict_trampoline
# Iterate over the entries of the ImtConflictTable.
daddiu $a0, $a0, 2 * __SIZEOF_POINTER__ # Iterate to the next entry.
bc .Limt_table_iterate
.Limt_table_found:
# We successfully hit an entry in the table. Load the target method and jump to it.
ld $a0, __SIZEOF_POINTER__($a0)
ld $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0)
jr $t9
.cpreturn # Restore gp from t8 in branch delay slot.
.Lconflict_trampoline:
# Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
move $a2, rSELF # pass Thread::Current
jal artQuickResolutionTrampoline # (Method* called, receiver, Thread*, SP)
move $a3, $sp # pass $sp
beq $v0, $zero, 1f
ld $a0, 0($sp) # load resolved method in $a0
# artQuickResolutionTrampoline puts resolved method in *SP
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
move $t9, $v0 # code pointer must be in $t9 to generate the global pointer
jalr $zero, $t9 # tail call to method
nop
1:
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
.extern artQuickGenericJniTrampoline
.extern artQuickGenericJniEndTrampoline
ENTRY art_quick_generic_jni_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
move $s8, $sp # save $sp
# prepare for call to artQuickGenericJniTrampoline(Thread*, SP)
move $a0, rSELF # pass Thread::Current
move $a1, $sp # pass $sp
jal artQuickGenericJniTrampoline # (Thread*, SP)
daddiu $sp, $sp, -5120 # reserve space on the stack
# The C call will have registered the complete save-frame on success.
# The result of the call is:
# v0: ptr to native code, 0 on error.
# v1: ptr to the bottom of the used area of the alloca, can restore stack till here.
beq $v0, $zero, 1f # check entry error
move $t9, $v0 # save the code ptr
move $sp, $v1 # release part of the alloca
# Load parameters from stack into registers
ld $a0, 0($sp)
ld $a1, 8($sp)
ld $a2, 16($sp)
ld $a3, 24($sp)
ld $a4, 32($sp)
ld $a5, 40($sp)
ld $a6, 48($sp)
ld $a7, 56($sp)
# Load FPRs the same as GPRs. Look at BuildNativeCallFrameStateMachine.
l.d $f12, 0($sp)
l.d $f13, 8($sp)
l.d $f14, 16($sp)
l.d $f15, 24($sp)
l.d $f16, 32($sp)
l.d $f17, 40($sp)
l.d $f18, 48($sp)
l.d $f19, 56($sp)
jalr $t9 # native call
daddiu $sp, $sp, 64
# result sign extension is handled in C code
# prepare for call to artQuickGenericJniEndTrampoline(Thread*, result, result_f)
move $a0, rSELF # pass Thread::Current
move $a1, $v0
jal artQuickGenericJniEndTrampoline
dmfc1 $a2, $f0
ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
bne $t0, $zero, 1f # check for pending exceptions
move $sp, $s8 # tear down the alloca
# tear dpown the callee-save frame
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
jalr $zero, $ra
dmtc1 $v0, $f0 # place return value to FP return value
1:
ld $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
# This will create a new save-all frame, required by the runtime.
DELIVER_PENDING_EXCEPTION
END art_quick_generic_jni_trampoline
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
move $a1, rSELF # pass Thread::Current
jal artQuickToInterpreterBridge # (Method* method, Thread*, SP)
move $a2, $sp # pass $sp
ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
daddiu $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE # skip a0-a7 and f12-f19
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
bne $t0, $zero, 1f
dmtc1 $v0, $f0 # place return value to FP return value
jalr $zero, $ra
dmtc1 $v1, $f1 # place return value to FP return value
1:
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
/*
* Routine that intercepts method calls and returns.
*/
.extern artInstrumentationMethodEntryFromCode
.extern artInstrumentationMethodExitFromCode
ENTRY art_quick_instrumentation_entry
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
daddiu $sp, $sp, -16 # space for saving arg0
.cfi_adjust_cfa_offset 16
sd $a0, 0($sp) # save arg0
move $a3, $ra # pass $ra
jal artInstrumentationMethodEntryFromCode # (Method*, Object*, Thread*, RA)
move $a2, rSELF # pass Thread::Current
move $t9, $v0 # $t9 holds reference to code
ld $a0, 0($sp) # restore arg0
daddiu $sp, $sp, 16 # remove args
.cfi_adjust_cfa_offset -16
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
jalr $t9 # call method
nop
END art_quick_instrumentation_entry
/* intentional fallthrough */
.global art_quick_instrumentation_exit
art_quick_instrumentation_exit:
.cfi_startproc
SETUP_GP
move $ra, $zero # link register is to here, so clobber with 0 for later checks
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
move $t0, $sp # remember bottom of caller's frame
daddiu $sp, $sp, -16 # save return values and set up args
.cfi_adjust_cfa_offset 16
sd $v0, 0($sp)
.cfi_rel_offset 2, 0
s.d $f0, 8($sp)
mov.d $f15, $f0 # pass fpr result
move $a2, $v0 # pass gpr result
move $a1, $t0 # pass $sp
move $a0, rSELF # pass Thread::Current
jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res, fpr_res)
.cpreturn # Restore gp from t8 in branch delay slot. gp is not used anymore,
# and t8 may be clobbered in artInstrumentationMethodExitFromCode.
move $t9, $v0 # set aside returned link register
move $ra, $v1 # set link register for deoptimization
ld $v0, 0($sp) # restore return values
l.d $f0, 8($sp)
jalr $zero, $t9 # return
daddiu $sp, $sp, 16+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE # 16 bytes of saved values + ref_only callee save frame
.cfi_adjust_cfa_offset -(16+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
END art_quick_instrumentation_exit
/*
* Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
* will long jump to the upcall with a special exception of -1.
*/
.extern artDeoptimize
.extern artEnterInterpreterFromDeoptimize
ENTRY art_quick_deoptimize
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
jal artDeoptimize # artDeoptimize(Thread*, SP)
# Returns caller method's frame size.
move $a0, rSELF # pass Thread::current
END art_quick_deoptimize
/*
* Compiled code has requested that we deoptimize into the interpreter. The deoptimization
* will long jump to the upcall with a special exception of -1.
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
jal artDeoptimizeFromCompiledCode # artDeoptimizeFromCompiledCode(Thread*, SP)
# Returns caller method's frame size.
move $a0, rSELF # pass Thread::current
END art_quick_deoptimize_from_compiled_code
.set push
.set noat
/* java.lang.String.compareTo(String anotherString) */
ENTRY_NO_GP art_quick_string_compareto
/* $a0 holds address of "this" */
/* $a1 holds address of "anotherString" */
beq $a0,$a1,9f # this and anotherString are the same object
move $v0,$zero
lw $a2,MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
lw $a3,MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
MINu $t2, $a2, $a3
# $t2 now holds min(this.length(),anotherString.length())
beqz $t2,9f # while min(this.length(),anotherString.length())-i != 0
subu $v0,$a2,$a3 # if $t2==0 return
# (this.length() - anotherString.length())
1:
lhu $t0,MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i)
lhu $t1,MIRROR_STRING_VALUE_OFFSET($a1)
bne $t0,$t1,9f # if this.charAt(i) != anotherString.charAt(i)
subu $v0,$t0,$t1 # return (this.charAt(i) - anotherString.charAt(i))
daddiu $a0,$a0,2 # point at this.charAt(i++)
subu $t2,$t2,1 # new value of
# min(this.length(),anotherString.length())-i
bnez $t2,1b
daddiu $a1,$a1,2 # point at anotherString.charAt(i++)
subu $v0,$a2,$a3
9:
j $ra
nop
END art_quick_string_compareto
/* java.lang.String.indexOf(int ch, int fromIndex=0) */
ENTRY_NO_GP art_quick_indexof
/* $a0 holds address of "this" */
/* $a1 holds "ch" */
/* $a2 holds "fromIndex" */
lw $t0,MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
slt $at, $a2, $zero # if fromIndex < 0
seleqz $a2, $a2, $at # fromIndex = 0;
subu $t0,$t0,$a2 # this.length() - fromIndex
blez $t0,6f # if this.length()-fromIndex <= 0
li $v0,-1 # return -1;
sll $v0,$a2,1 # $a0 += $a2 * 2
daddu $a0,$a0,$v0 # " " " " "
move $v0,$a2 # Set i to fromIndex.
1:
lhu $t3,MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
beq $t3,$a1,6f # return i;
daddu $a0,$a0,2 # i++
subu $t0,$t0,1 # this.length() - i
bnez $t0,1b # while this.length() - i > 0
addu $v0,$v0,1 # i++
li $v0,-1 # if this.length() - i <= 0
# return -1;
6:
j $ra
nop
END art_quick_indexof
.set pop
|
abforce/xposed_art_n
| 1,251
|
runtime/arch/mips64/memcmp16_mips64.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_ARCH_MIPS64_MEMCMP16_MIPS64_S_
#define ART_RUNTIME_ARCH_MIPS64_MEMCMP16_MIPS64_S_
#include "asm_support_mips64.S"
.set noreorder
// u4 __memcmp16(const u2*, const u2*, size_t);
ENTRY_NO_GP __memcmp16
move $t0, $zero
move $t1, $zero
beqz $a2, done /* 0 length string */
nop
beq $a0, $a1, done /* addresses are identical */
nop
1:
lhu $t0, 0($a0)
lhu $t1, 0($a1)
bne $t0, $t1, done
nop
daddu $a0, 2
daddu $a1, 2
dsubu $a2, 1
bnez $a2, 1b
nop
done:
dsubu $v0, $t0, $t1
j $ra
nop
END __memcmp16
#endif // ART_RUNTIME_ARCH_MIPS64_MEMCMP16_MIPS64_S_
|
abforce/xposed_art_n
| 1,743
|
runtime/arch/arm/jni_entrypoints_arm.S
|
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_arm.S"
/*
* Jni dlsym lookup stub.
*/
.extern artFindNativeMethod
ENTRY art_jni_dlsym_lookup_stub
push {r0, r1, r2, r3, lr} @ spill regs
.cfi_adjust_cfa_offset 20
.cfi_rel_offset r0, 0
.cfi_rel_offset r1, 4
.cfi_rel_offset r2, 8
.cfi_rel_offset r3, 12
.cfi_rel_offset lr, 16
sub sp, #12 @ pad stack pointer to align frame
.cfi_adjust_cfa_offset 12
blx artFindNativeMethod
mov r12, r0 @ save result in r12
add sp, #12 @ restore stack pointer
.cfi_adjust_cfa_offset -12
cbz r0, 1f @ is method code null?
pop {r0, r1, r2, r3, lr} @ restore regs
.cfi_adjust_cfa_offset -20
.cfi_restore r0
.cfi_restore r1
.cfi_restore r2
.cfi_restore r3
.cfi_restore lr
bx r12 @ if non-null, tail call to method's code
1:
pop {r0, r1, r2, r3, pc} @ restore regs and return to caller to handle exception
END art_jni_dlsym_lookup_stub
|
abforce/xposed_art_n
| 1,524
|
runtime/arch/arm/instruction_set_features_assembly_tests.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_arm.S"
.section .text
// This function is used to check for the CPU's support for the sdiv
// instruction at runtime. It will either return the value 1 or
// will cause an invalid instruction trap (SIGILL signal). The
// caller must arrange for the signal handler to set the r0
// register to 0 and move the pc forward by 4 bytes (to skip
// the invalid instruction).
ENTRY artCheckForArmSdivInstruction
mov r1,#1
// depending on the architecture, the assembler will not allow an
// sdiv instruction, so we will have to output the bytes directly.
// sdiv r0,r1,r1 is two words: 0xfb91 0xf1f0. We need little endian.
.byte 0x91,0xfb,0xf1,0xf0
// if the divide worked, r0 will have the value #1 (result of sdiv).
// It will have 0 otherwise (set by the signal handler)
// the value is just returned from this function.
bx lr
END artCheckForArmSdivInstruction
|
abforce/xposed_art_n
| 6,811
|
runtime/arch/arm/memcmp16_arm.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_ARCH_ARM_MEMCMP16_ARM_S_
#define ART_RUNTIME_ARCH_ARM_MEMCMP16_ARM_S_
#include "asm_support_arm.S"
/*
* Optimized memcmp16() for ARM9.
* This would not be optimal on XScale or ARM11, where more prefetching
* and use of pld will be needed.
* The 2 major optimzations here are
* (1) The main loop compares 16 bytes at a time
* (2) The loads are scheduled in a way they won't stall
*/
ARM_ENTRY __memcmp16
pld [r0, #0]
pld [r1, #0]
/* take of the case where length is nul or the buffers are the same */
cmp r0, r1
cmpne r2, #0
moveq r0, #0
bxeq lr
/* since r0 hold the result, move the first source
* pointer somewhere else
*/
mov r3, r0
/* make sure we have at least 12 words, this simplify things below
* and avoid some overhead for small blocks
*/
cmp r2, #12
bpl 0f
/* small blocks (less then 12 words) */
pld [r0, #32]
pld [r1, #32]
1: ldrh r0, [r3], #2
ldrh ip, [r1], #2
subs r0, r0, ip
bxne lr
subs r2, r2, #1
bne 1b
bx lr
/* save registers */
0: push {r4, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r4, 0
.cfi_rel_offset lr, 4
/* align first pointer to word boundary */
tst r3, #2
beq 0f
ldrh r0, [r3], #2
ldrh ip, [r1], #2
sub r2, r2, #1
subs r0, r0, ip
/* restore registers and return */
popne {r4, lr}
bxne lr
0: /* here the first pointer is aligned, and we have at least 3 words
* to process.
*/
/* see if the pointers are congruent */
eor r0, r3, r1
ands r0, r0, #2
bne 5f
/* congruent case, 16 half-words per iteration
* We need to make sure there are at least 16+2 words left
* because we effectively read ahead one long word, and we could
* read past the buffer (and segfault) if we're not careful.
*/
ldr ip, [r1]
subs r2, r2, #(16 + 2)
bmi 1f
0:
pld [r3, #64]
pld [r1, #64]
ldr r0, [r3], #4
ldr lr, [r1, #4]!
eors r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
eorseq r0, r0, lr
ldreq r0, [r3], #4
ldreq lr, [r1, #4]!
eorseq r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
eorseq r0, r0, lr
ldreq r0, [r3], #4
ldreq lr, [r1, #4]!
eorseq r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
eorseq r0, r0, lr
ldreq r0, [r3], #4
ldreq lr, [r1, #4]!
eorseq r0, r0, ip
ldreq r0, [r3], #4
ldreq ip, [r1, #4]!
eorseq r0, r0, lr
bne 2f
subs r2, r2, #16
bhs 0b
/* do we have at least 2 words left? */
1: adds r2, r2, #(16 - 2 + 2)
bmi 4f
/* finish off 2 words at a time */
3: ldr r0, [r3], #4
ldr ip, [r1], #4
eors r0, r0, ip
bne 2f
subs r2, r2, #2
bhs 3b
/* are we done? */
4: adds r2, r2, #2
bne 8f
/* restore registers and return */
mov r0, #0
pop {r4, lr}
.cfi_restore r4
.cfi_restore lr
.cfi_adjust_cfa_offset -8
bx lr
2: /* the last 2 words are different, restart them */
ldrh r0, [r3, #-4]
ldrh ip, [r1, #-4]
subs r0, r0, ip
ldrheq r0, [r3, #-2]
ldrheq ip, [r1, #-2]
subseq r0, r0, ip
/* restore registers and return */
pop {r4, lr}
.cfi_restore r4
.cfi_restore lr
.cfi_adjust_cfa_offset -8
bx lr
/* process the last few words */
8: ldrh r0, [r3], #2
ldrh ip, [r1], #2
subs r0, r0, ip
bne 9f
subs r2, r2, #1
bne 8b
9: /* restore registers and return */
pop {r4, lr}
.cfi_restore r4
.cfi_restore lr
.cfi_adjust_cfa_offset -8
bx lr
5: /*************** non-congruent case ***************/
/* align the unaligned pointer */
bic r1, r1, #3
ldr lr, [r1], #4
sub r2, r2, #8
6:
pld [r3, #64]
pld [r1, #64]
mov ip, lr, lsr #16
ldr lr, [r1], #4
ldr r0, [r3], #4
orr ip, ip, lr, lsl #16
eors r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r3], #4
orreq ip, ip, lr, lsl #16
eorseq r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r3], #4
orreq ip, ip, lr, lsl #16
eorseq r0, r0, ip
moveq ip, lr, lsr #16
ldreq lr, [r1], #4
ldreq r0, [r3], #4
orreq ip, ip, lr, lsl #16
eorseq r0, r0, ip
bne 7f
subs r2, r2, #8
bhs 6b
sub r1, r1, #2
/* are we done? */
adds r2, r2, #8
moveq r0, #0
beq 9b
/* finish off the remaining bytes */
b 8b
7: /* fix up the 2 pointers and fallthrough... */
sub r1, r1, #2
b 2b
END __memcmp16
#endif // ART_RUNTIME_ARCH_ARM_MEMCMP16_ARM_S_
|
abforce/xposed_art_n
| 4,805
|
runtime/arch/arm/asm_support_arm.S
|
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_S_
#define ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_S_
#include "asm_support_arm.h"
// Define special registers.
// Register holding suspend check count down.
#define rSUSPEND r4
// Register holding Thread::Current().
#define rSELF r9
.syntax unified
.arch armv7-a
.thumb
// Macro to generate the value of Runtime::Current into rDest clobbering rTemp. As it uses labels
// then the labels need to be unique. We bind these to the function name in the ENTRY macros.
.macro RUNTIME_CURRENT name, num, rDest, rTemp
.if .Lruntime_current\num\()_used
.error
.endif
.set .Lruntime_current\num\()_used, 1
ldr \rDest, .Lgot_\name\()_\num @ Load offset of the GOT.
ldr \rTemp, .Lruntime_instance_\name\()_\num @ Load GOT offset of Runtime::instance_.
.Lload_got_\name\()_\num\():
add \rDest, pc @ Fixup GOT address.
ldr \rDest, [\rDest, \rTemp] @ Load address of Runtime::instance_.
ldr \rDest, [\rDest] @ Load Runtime::instance_.
.endm
// Common ENTRY declaration code for ARM and thumb, an ENTRY should always be paired with an END.
// Declares the RUNTIME_CURRENT[123] macros that can be used within an ENTRY and will have literals
// generated at END.
.macro DEF_ENTRY thumb_or_arm, name
\thumb_or_arm
// Clang ignores .thumb_func and requires an explicit .thumb. Investigate whether we should still
// carry around the .thumb_func.
.ifc \thumb_or_arm, .thumb_func
.thumb
.endif
.type \name, #function
.hidden \name // Hide this as a global symbol, so we do not incur plt calls.
.global \name
// Cache alignment for function entry.
.balign 16
\name:
.cfi_startproc
.fnstart
// Track whether RUNTIME_CURRENT was used.
.set .Lruntime_current1_used, 0
.set .Lruntime_current2_used, 0
.set .Lruntime_current3_used, 0
// The RUNTIME_CURRENT macros that are bound to the \name argument of DEF_ENTRY to ensure
// that label names are unique.
.macro RUNTIME_CURRENT1 rDest, rTemp
RUNTIME_CURRENT \name, 1, \rDest, \rTemp
.endm
.macro RUNTIME_CURRENT2 rDest, rTemp
RUNTIME_CURRENT \name, 2, \rDest, \rTemp
.endm
.macro RUNTIME_CURRENT3 rDest, rTemp
RUNTIME_CURRENT \name, 3, \rDest, \rTemp
.endm
.endm
// A thumb2 style ENTRY.
.macro ENTRY name
DEF_ENTRY .thumb_func, \name
.endm
// A ARM style ENTRY.
.macro ARM_ENTRY name
DEF_ENTRY .arm, \name
.endm
// Terminate an ENTRY and generate GOT references.
.macro END name
// Generate offsets of GOT and Runtime::instance_ used in RUNTIME_CURRENT.
.if .Lruntime_current1_used
.Lgot_\name\()_1:
.word _GLOBAL_OFFSET_TABLE_-(.Lload_got_\name\()_1+4)
.Lruntime_instance_\name\()_1:
.word _ZN3art7Runtime9instance_E(GOT)
.endif
.if .Lruntime_current2_used
.Lgot_\name\()_2:
.word _GLOBAL_OFFSET_TABLE_-(.Lload_got_\name\()_2+4)
.Lruntime_instance_\name\()_2:
.word _ZN3art7Runtime9instance_E(GOT)
.endif
.if .Lruntime_current3_used
.Lgot_\name\()_3:
.word _GLOBAL_OFFSET_TABLE_-(.Lload_got_\name\()_3+4)
.Lruntime_instance_\name\()_3:
.word _ZN3art7Runtime9instance_E(GOT)
.endif
// Remove the RUNTIME_CURRENTx macros so they get rebound in the next function entry.
.purgem RUNTIME_CURRENT1
.purgem RUNTIME_CURRENT2
.purgem RUNTIME_CURRENT3
.fnend
.cfi_endproc
.size \name, .-\name
.endm
// Declare an unimplemented ENTRY that will halt a debugger.
.macro UNIMPLEMENTED name
ENTRY \name
bkpt
bkpt
END \name
.endm
// Macros to poison (negate) the reference for heap poisoning.
.macro POISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
rsb \rRef, \rRef, #0
#endif // USE_HEAP_POISONING
.endm
// Macros to unpoison (negate) the reference for heap poisoning.
.macro UNPOISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
rsb \rRef, \rRef, #0
#endif // USE_HEAP_POISONING
.endm
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
|
abforce/xposed_art_n
| 79,927
|
runtime/arch/arm/quick_entrypoints_arm.S
|
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_arm.S"
#include "arch/quick_alloc_entrypoints.S"
/* Deliver the given exception */
.extern artDeliverExceptionFromCode
/* Deliver an exception pending on a thread */
.extern artDeliverPendingException
/*
* Macro to spill the GPRs.
*/
.macro SPILL_ALL_CALLEE_SAVE_GPRS
push {r4-r11, lr} @ 9 words (36 bytes) of callee saves.
.cfi_adjust_cfa_offset 36
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset r7, 12
.cfi_rel_offset r8, 16
.cfi_rel_offset r9, 20
.cfi_rel_offset r10, 24
.cfi_rel_offset r11, 28
.cfi_rel_offset lr, 32
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
*/
.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME rTemp1, rTemp2
SPILL_ALL_CALLEE_SAVE_GPRS @ 9 words (36 bytes) of callee saves.
vpush {s16-s31} @ 16 words (64 bytes) of floats.
.cfi_adjust_cfa_offset 64
sub sp, #12 @ 3 words of space, bottom word will hold Method*
.cfi_adjust_cfa_offset 12
RUNTIME_CURRENT1 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
ldr \rTemp1, [\rTemp1, #RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kSaveAll Method*.
str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 36 + 64 + 12)
#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM) size not as expected."
#endif
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly).
*/
.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME rTemp1, rTemp2
push {r5-r8, r10-r11, lr} @ 7 words of callee saves
.cfi_adjust_cfa_offset 28
.cfi_rel_offset r5, 0
.cfi_rel_offset r6, 4
.cfi_rel_offset r7, 8
.cfi_rel_offset r8, 12
.cfi_rel_offset r10, 16
.cfi_rel_offset r11, 20
.cfi_rel_offset lr, 24
sub sp, #4 @ bottom word will hold Method*
.cfi_adjust_cfa_offset 4
RUNTIME_CURRENT2 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kRefsOnly Method*.
str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 28 + 4)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM) size not as expected."
#endif
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly)
* and preserves the value of rTemp2 at entry.
*/
.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME_PRESERVE_RTEMP2 rTemp1, rTemp2
push {r5-r8, r10-r11, lr} @ 7 words of callee saves
.cfi_adjust_cfa_offset 28
.cfi_rel_offset r5, 0
.cfi_rel_offset r6, 4
.cfi_rel_offset r7, 8
.cfi_rel_offset r8, 12
.cfi_rel_offset r10, 16
.cfi_rel_offset r11, 20
.cfi_rel_offset lr, 24
sub sp, #4 @ bottom word will hold Method*
.cfi_adjust_cfa_offset 4
str \rTemp2, [sp, #0] @ save rTemp2
RUNTIME_CURRENT2 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kRefsOnly Method*.
ldr \rTemp2, [sp, #0] @ restore rTemp2
str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 28 + 4)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM) size not as expected."
#endif
.endm
.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
add sp, #4 @ bottom word holds Method*
.cfi_adjust_cfa_offset -4
pop {r5-r8, r10-r11, lr} @ 7 words of callee saves
.cfi_restore r5
.cfi_restore r6
.cfi_restore r7
.cfi_restore r8
.cfi_restore r10
.cfi_restore r11
.cfi_restore lr
.cfi_adjust_cfa_offset -28
.endm
.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
bx lr @ return
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
*/
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves and args.
.cfi_adjust_cfa_offset 40
.cfi_rel_offset r1, 0
.cfi_rel_offset r2, 4
.cfi_rel_offset r3, 8
.cfi_rel_offset r5, 12
.cfi_rel_offset r6, 16
.cfi_rel_offset r7, 20
.cfi_rel_offset r8, 24
.cfi_rel_offset r10, 28
.cfi_rel_offset r11, 32
.cfi_rel_offset lr, 36
vpush {s0-s15} @ 16 words of float args.
.cfi_adjust_cfa_offset 64
sub sp, #8 @ 2 words of space, bottom word will hold Method*
.cfi_adjust_cfa_offset 8
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 40 + 64 + 8)
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM) size not as expected."
#endif
.endm
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME rTemp1, rTemp2
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
RUNTIME_CURRENT3 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
@ rTemp1 is kRefsAndArgs Method*.
ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET]
str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
.endm
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_R0
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
str r0, [sp, #0] @ Store ArtMethod* to bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
.endm
.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
add sp, #8 @ rewind sp
.cfi_adjust_cfa_offset -8
vpop {s0-s15}
.cfi_adjust_cfa_offset -64
pop {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves
.cfi_restore r1
.cfi_restore r2
.cfi_restore r3
.cfi_restore r5
.cfi_restore r6
.cfi_restore r7
.cfi_restore r8
.cfi_restore r10
.cfi_restore r11
.cfi_restore lr
.cfi_adjust_cfa_offset -40
.endm
.macro RETURN_IF_RESULT_IS_ZERO
cbnz r0, 1f @ result non-zero branch over
bx lr @ return
1:
.endm
.macro RETURN_IF_RESULT_IS_NON_ZERO
cbz r0, 1f @ result zero branch over
bx lr @ return
1:
.endm
/*
* Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
* exception is Thread::Current()->exception_
*/
.macro DELIVER_PENDING_EXCEPTION
.fnend
.fnstart
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1 @ save callee saves for throw
mov r0, r9 @ pass Thread::Current
b artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*)
.endm
.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1 // save all registers as basis for long jump context
mov r0, r9 @ pass Thread::Current
b \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r1, r2 // save all registers as basis for long jump context
mov r1, r9 @ pass Thread::Current
b \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2, r3 // save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
b \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
ldr \reg, [r9, #THREAD_EXCEPTION_OFFSET] // Get exception field.
cbnz \reg, 1f
bx lr
1:
DELIVER_PENDING_EXCEPTION
.endm
.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
RETURN_OR_DELIVER_PENDING_EXCEPTION_REG r1
.endm
.macro RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
.endm
.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
RETURN_IF_RESULT_IS_NON_ZERO
DELIVER_PENDING_EXCEPTION
.endm
// Macros taking opportunity of code similarities for downcalls with referrer for non-wide fields.
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case of GC
ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r2, r9 @ pass Thread::Current
bl \entrypoint @ (uint32_t field_idx, const Method* referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
ldr r2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r3, r9 @ pass Thread::Current
bl \entrypoint @ (field_idx, Object*, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
ldr r3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl \entrypoint @ (field_idx, Object*, new_val, referrer, Thread*)
add sp, #16 @ release out args
.cfi_adjust_cfa_offset -16
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
\return
END \name
.endm
/*
* Called by managed code, saves callee saves and then calls artThrowException
* that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
*/
ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
/*
* Called by managed code to create and deliver a NullPointerException.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
* Called by managed code to create and deliver an ArithmeticException.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
/*
* Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
* index, arg2 holds limit.
*/
TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
* Called by managed code to create and deliver a StackOverflowError.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
/*
* Called by managed code to create and deliver a NoSuchMethodError.
*/
ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
* the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper.
* NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1.
*
* The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
* of the target Method* in r0 and method->code_ in r1.
*
* If unsuccessful, the helper will return null/null. There will bea pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
* pointing back to the original caller.
*/
.macro INVOKE_TRAMPOLINE_BODY cxx_name
.extern \cxx_name
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case allocation triggers GC
mov r2, r9 @ pass Thread::Current
mov r3, sp
bl \cxx_name @ (method_idx, this, Thread*, SP)
mov r12, r1 @ save Method*->code_
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
cbz r0, 1f @ did we find the target? if not go to exception delivery
bx r12 @ tail call to target
1:
DELIVER_PENDING_EXCEPTION
.endm
.macro INVOKE_TRAMPOLINE c_name, cxx_name
ENTRY \c_name
INVOKE_TRAMPOLINE_BODY \cxx_name
END \c_name
.endm
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
/*
* Quick invocation stub internal.
* On entry:
* r0 = method pointer
* r1 = argument array or null for no argument methods
* r2 = size of argument array in bytes
* r3 = (managed) thread pointer
* [sp] = JValue* result
* [sp + 4] = result_in_float
* [sp + 8] = core register argument array
* [sp + 12] = fp register argument array
* +-------------------------+
* | uint32_t* fp_reg_args |
* | uint32_t* core_reg_args |
* | result_in_float | <- Caller frame
* | Jvalue* result |
* +-------------------------+
* | lr |
* | r11 |
* | r9 |
* | r4 | <- r11
* +-------------------------+
* | uint32_t out[n-1] |
* | : : | Outs
* | uint32_t out[0] |
* | StackRef<ArtMethod> | <- SP value=null
* +-------------------------+
*/
ENTRY art_quick_invoke_stub_internal
SPILL_ALL_CALLEE_SAVE_GPRS @ spill regs (9)
mov r11, sp @ save the stack pointer
.cfi_def_cfa_register r11
mov r9, r3 @ move managed thread pointer into r9
add r4, r2, #4 @ create space for method pointer in frame
sub r4, sp, r4 @ reserve & align *stack* to 16 bytes: native calling
and r4, #0xFFFFFFF0 @ convention only aligns to 8B, so we have to ensure ART
mov sp, r4 @ 16B alignment ourselves.
mov r4, r0 @ save method*
add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
bl memcpy @ memcpy (dest, src, bytes)
mov ip, #0 @ set ip to 0
str ip, [sp] @ store null for method* at bottom of frame
ldr ip, [r11, #48] @ load fp register argument array pointer
vldm ip, {s0-s15} @ copy s0 - s15
ldr ip, [r11, #44] @ load core register argument array pointer
mov r0, r4 @ restore method*
add ip, ip, #4 @ skip r0
ldm ip, {r1-r3} @ copy r1 - r3
#ifdef ARM_R4_SUSPEND_FLAG
mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
#endif
ldr ip, [r0, #ART_METHOD_QUICK_CODE_OFFSET_32] @ get pointer to the code
blx ip @ call the method
mov sp, r11 @ restore the stack pointer
.cfi_def_cfa_register sp
ldr r4, [sp, #40] @ load result_is_float
ldr r9, [sp, #36] @ load the result pointer
cmp r4, #0
ite eq
strdeq r0, [r9] @ store r0/r1 into result pointer
vstrne d0, [r9] @ store s0-s1/d0 into result pointer
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} @ restore spill regs
END art_quick_invoke_stub_internal
/*
* On stack replacement stub.
* On entry:
* r0 = stack to copy
* r1 = size of stack
* r2 = pc to call
* r3 = JValue* result
* [sp] = shorty
* [sp + 4] = thread
*/
ENTRY art_quick_osr_stub
SPILL_ALL_CALLEE_SAVE_GPRS @ Spill regs (9)
mov r11, sp @ Save the stack pointer
mov r10, r1 @ Save size of stack
ldr r9, [r11, #40] @ Move managed thread pointer into r9
mov r8, r2 @ Save the pc to call
sub r7, sp, #12 @ Reserve space for stack pointer,
@ JValue* result, and ArtMethod* slot.
and r7, #0xFFFFFFF0 @ Align stack pointer
mov sp, r7 @ Update stack pointer
str r11, [sp, #4] @ Save old stack pointer
str r3, [sp, #8] @ Save JValue* result
mov ip, #0
str ip, [sp] @ Store null for ArtMethod* at bottom of frame
sub sp, sp, r1 @ Reserve space for callee stack
mov r2, r1
mov r1, r0
mov r0, sp
bl memcpy @ memcpy (dest r0, src r1, bytes r2)
bl .Losr_entry @ Call the method
ldr r10, [sp, #8] @ Restore JValue* result
ldr sp, [sp, #4] @ Restore saved stack pointer
ldr r4, [sp, #36] @ load shorty
ldrb r4, [r4, #0] @ load return type
cmp r4, #68 @ Test if result type char == 'D'.
beq .Losr_fp_result
cmp r4, #70 @ Test if result type char == 'F'.
beq .Losr_fp_result
strd r0, [r10] @ Store r0/r1 into result pointer
b .Losr_exit
.Losr_fp_result:
vstr d0, [r10] @ Store s0-s1/d0 into result pointer
.Losr_exit:
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.Losr_entry:
sub r10, r10, #4
str lr, [sp, r10] @ Store link register per the compiler ABI
bx r8
END art_quick_osr_stub
/*
* On entry r0 is uint32_t* gprs_ and r1 is uint32_t* fprs_
*/
ARM_ENTRY art_quick_do_long_jump
vldm r1, {s0-s31} @ load all fprs from argument fprs_
ldr r2, [r0, #60] @ r2 = r15 (PC from gprs_ 60=4*15)
ldr r14, [r0, #56] @ (LR from gprs_ 56=4*14)
add r0, r0, #12 @ increment r0 to skip gprs_[0..2] 12=4*3
ldm r0, {r3-r13} @ load remaining gprs from argument gprs_
ldr r0, [r0, #-12] @ load r0 value
mov r1, #0 @ clear result register r1
bx r2 @ do long jump
END art_quick_do_long_jump
/*
* Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
* failure.
*/
TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
/*
* Entry from managed code that calls artLockObjectFromCode, may block for GC. r0 holds the
* possibly null object to lock.
*/
.extern artLockObjectFromCode
ENTRY art_quick_lock_object
cbz r0, .Lslow_lock
.Lretry_lock:
ldr r2, [r9, #THREAD_ID_OFFSET]
ldrex r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
mov r3, r1
and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED @ zero the read barrier bits
cbnz r3, .Lnot_unlocked @ already thin locked
@ unlocked case - r1: original lock word that's zero except for the read barrier bits.
orr r2, r1, r2 @ r2 holds thread id with count of 0 with preserved read barrier bits
strex r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
cbnz r3, .Llock_strex_fail @ store failed, retry
dmb ish @ full (LoadLoad|LoadStore) memory barrier
bx lr
.Lnot_unlocked: @ r1: original lock word, r2: thread_id with count of 0 and zero read barrier bits
lsr r3, r1, LOCK_WORD_STATE_SHIFT
cbnz r3, .Lslow_lock @ if either of the top two bits are set, go slow path
eor r2, r1, r2 @ lock_word.ThreadId() ^ self->ThreadId()
uxth r2, r2 @ zero top 16 bits
cbnz r2, .Lslow_lock @ lock word and self thread id's match -> recursive lock
@ else contention, go to slow path
mov r3, r1 @ copy the lock word to check count overflow.
and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED @ zero the read barrier bits.
add r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ increment count in lock word placing in r2 to check overflow
lsr r3, r2, LOCK_WORD_READ_BARRIER_STATE_SHIFT @ if either of the upper two bits (28-29) are set, we overflowed.
cbnz r3, .Lslow_lock @ if we overflow the count go slow path
add r2, r1, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ increment count for real
strex r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
cbnz r3, .Llock_strex_fail @ strex failed, retry
bx lr
.Llock_strex_fail:
b .Lretry_lock @ retry
.Lslow_lock:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case we block
mov r1, r9 @ pass Thread::Current
bl artLockObjectFromCode @ (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case we block
mov r1, r9 @ pass Thread::Current
bl artLockObjectFromCode @ (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_lock_object_no_inline
/*
* Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
* r0 holds the possibly null object to lock.
*/
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
cbz r0, .Lslow_unlock
.Lretry_unlock:
#ifndef USE_READ_BARRIER
ldr r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
#else
ldrex r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ Need to use atomic instructions for read barrier
#endif
lsr r2, r1, #LOCK_WORD_STATE_SHIFT
cbnz r2, .Lslow_unlock @ if either of the top two bits are set, go slow path
ldr r2, [r9, #THREAD_ID_OFFSET]
mov r3, r1 @ copy lock word to check thread id equality
and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED @ zero the read barrier bits
eor r3, r3, r2 @ lock_word.ThreadId() ^ self->ThreadId()
uxth r3, r3 @ zero top 16 bits
cbnz r3, .Lslow_unlock @ do lock word and self thread id's match?
mov r3, r1 @ copy lock word to detect transition to unlocked
and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED @ zero the read barrier bits
cmp r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
bpl .Lrecursive_thin_unlock
@ transition to unlocked
mov r3, r1
and r3, #LOCK_WORD_READ_BARRIER_STATE_MASK @ r3: zero except for the preserved read barrier bits
dmb ish @ full (LoadStore|StoreStore) memory barrier
#ifndef USE_READ_BARRIER
str r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
#else
strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
cbnz r2, .Lunlock_strex_fail @ store failed, retry
#endif
bx lr
.Lrecursive_thin_unlock: @ r1: original lock word
sub r1, r1, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ decrement count
#ifndef USE_READ_BARRIER
str r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
#else
strex r2, r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
cbnz r2, .Lunlock_strex_fail @ store failed, retry
#endif
bx lr
.Lunlock_strex_fail:
b .Lretry_unlock @ retry
.Lslow_unlock:
@ save callee saves in case exception allocation triggers GC
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2
mov r1, r9 @ pass Thread::Current
bl artUnlockObjectFromCode @ (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
@ save callee saves in case exception allocation triggers GC
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2
mov r1, r9 @ pass Thread::Current
bl artUnlockObjectFromCode @ (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_unlock_object_no_inline
/*
* Entry from managed code that calls artIsAssignableFromCode and on failure calls
* artThrowClassCastException.
*/
.extern artThrowClassCastException
ENTRY art_quick_check_cast
push {r0-r1, lr} @ save arguments, link register and pad
.cfi_adjust_cfa_offset 12
.cfi_rel_offset r0, 0
.cfi_rel_offset r1, 4
.cfi_rel_offset lr, 8
sub sp, #4
.cfi_adjust_cfa_offset 4
bl artIsAssignableFromCode
cbz r0, .Lthrow_class_cast_exception
add sp, #4
.cfi_adjust_cfa_offset -4
pop {r0-r1, pc}
.cfi_adjust_cfa_offset 4 @ Reset unwind info so following code unwinds.
.Lthrow_class_cast_exception:
add sp, #4
.cfi_adjust_cfa_offset -4
pop {r0-r1, lr}
.cfi_adjust_cfa_offset -12
.cfi_restore r0
.cfi_restore r1
.cfi_restore lr
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2, r3 // save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
b artThrowClassCastException @ (Class*, Class*, Thread*)
bkpt
END art_quick_check_cast
// Restore rReg's value from [sp, #offset] if rReg is not the same as rExclude.
.macro POP_REG_NE rReg, offset, rExclude
.ifnc \rReg, \rExclude
ldr \rReg, [sp, #\offset] @ restore rReg
.cfi_restore \rReg
.endif
.endm
/*
* Macro to insert read barrier, only used in art_quick_aput_obj.
* rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
* TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
*/
.macro READ_BARRIER rDest, rObj, offset
#ifdef USE_READ_BARRIER
push {r0-r3, ip, lr} @ 6 words for saved registers (used in art_quick_aput_obj)
.cfi_adjust_cfa_offset 24
.cfi_rel_offset r0, 0
.cfi_rel_offset r1, 4
.cfi_rel_offset r2, 8
.cfi_rel_offset r3, 12
.cfi_rel_offset ip, 16
.cfi_rel_offset lr, 20
sub sp, #8 @ push padding
.cfi_adjust_cfa_offset 8
@ mov r0, \rRef @ pass ref in r0 (no-op for now since parameter ref is unused)
.ifnc \rObj, r1
mov r1, \rObj @ pass rObj
.endif
mov r2, #\offset @ pass offset
bl artReadBarrierSlow @ artReadBarrierSlow(ref, rObj, offset)
@ No need to unpoison return value in r0, artReadBarrierSlow() would do the unpoisoning.
.ifnc \rDest, r0
mov \rDest, r0 @ save return value in rDest
.endif
add sp, #8 @ pop padding
.cfi_adjust_cfa_offset -8
POP_REG_NE r0, 0, \rDest @ conditionally restore saved registers
POP_REG_NE r1, 4, \rDest
POP_REG_NE r2, 8, \rDest
POP_REG_NE r3, 12, \rDest
POP_REG_NE ip, 16, \rDest
add sp, #20
.cfi_adjust_cfa_offset -20
pop {lr} @ restore lr
.cfi_adjust_cfa_offset -4
.cfi_restore lr
#else
ldr \rDest, [\rObj, #\offset]
UNPOISON_HEAP_REF \rDest
#endif // USE_READ_BARRIER
.endm
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
* r0 = array, r1 = index, r2 = value
*/
ENTRY art_quick_aput_obj_with_null_and_bound_check
tst r0, r0
bne art_quick_aput_obj_with_bound_check
b art_quick_throw_null_pointer_exception
END art_quick_aput_obj_with_null_and_bound_check
.hidden art_quick_aput_obj_with_bound_check
ENTRY art_quick_aput_obj_with_bound_check
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]
cmp r3, r1
bhi art_quick_aput_obj
mov r0, r1
mov r1, r3
b art_quick_throw_array_bounds
END art_quick_aput_obj_with_bound_check
#ifdef USE_READ_BARRIER
.extern artReadBarrierSlow
#endif
.hidden art_quick_aput_obj
ENTRY art_quick_aput_obj
#ifdef USE_READ_BARRIER
@ The offset to .Ldo_aput_null is too large to use cbz due to expansion from READ_BARRIER macro.
tst r2, r2
beq .Ldo_aput_null
#else
cbz r2, .Ldo_aput_null
#endif // USE_READ_BARRIER
READ_BARRIER r3, r0, MIRROR_OBJECT_CLASS_OFFSET
READ_BARRIER ip, r2, MIRROR_OBJECT_CLASS_OFFSET
READ_BARRIER r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
cmp r3, ip @ value's type == array's component type - trivial assignability
bne .Lcheck_assignability
.Ldo_aput:
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
POISON_HEAP_REF r2
str r2, [r3, r1, lsl #2]
ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
lsr r0, r0, #7
strb r3, [r3, r0]
blx lr
.Ldo_aput_null:
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
str r2, [r3, r1, lsl #2]
blx lr
.Lcheck_assignability:
push {r0-r2, lr} @ save arguments
.cfi_adjust_cfa_offset 16
.cfi_rel_offset r0, 0
.cfi_rel_offset r1, 4
.cfi_rel_offset r2, 8
.cfi_rel_offset lr, 12
mov r1, ip
mov r0, r3
bl artIsAssignableFromCode
cbz r0, .Lthrow_array_store_exception
pop {r0-r2, lr}
.cfi_restore r0
.cfi_restore r1
.cfi_restore r2
.cfi_restore lr
.cfi_adjust_cfa_offset -16
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
POISON_HEAP_REF r2
str r2, [r3, r1, lsl #2]
ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
lsr r0, r0, #7
strb r3, [r3, r0]
blx lr
.Lthrow_array_store_exception:
pop {r0-r2, lr}
/* No need to repeat restore cfi directives, the ones above apply here. */
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r3, ip
mov r1, r2
mov r2, r9 @ pass Thread::Current
b artThrowArrayStoreException @ (Class*, Class*, Thread*)
bkpt @ unreached
END art_quick_aput_obj
// Macro to facilitate adding new allocation entrypoints.
.macro ONE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case of GC
mov r1, r9 @ pass Thread::Current
bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
// Macro to facilitate adding new allocation entrypoints.
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
// Macro to facilitate adding new array allocation entrypoints.
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
mov r3, r9 @ pass Thread::Current
@ (uint32_t type_idx, Method* method, int32_t component_count, Thread*)
bl \entrypoint
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
// Macro to facilitate adding new allocation entrypoints.
.macro FOUR_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME_PRESERVE_RTEMP2 r12, r3 @ save callee saves in case of GC
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl \entrypoint
add sp, #16 @ strip the extra frame
.cfi_adjust_cfa_offset -16
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Called by managed code to resolve a static field and load a non-wide value.
*/
ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
/*
* Called by managed code to resolve a static field and load a 64-bit primitive value.
*/
.extern artGet64StaticFromCode
ENTRY art_quick_get64_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r2, r9 @ pass Thread::Current
bl artGet64StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
cbnz r2, 1f @ success if no exception pending
bx lr @ return on success
1:
DELIVER_PENDING_EXCEPTION
END art_quick_get64_static
/*
* Called by managed code to resolve an instance field and load a non-wide value.
*/
TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
/*
* Called by managed code to resolve an instance field and load a 64-bit primitive value.
*/
.extern artGet64InstanceFromCode
ENTRY art_quick_get64_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
ldr r2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r3, r9 @ pass Thread::Current
bl artGet64InstanceFromCode @ (field_idx, Object*, referrer, Thread*)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
cbnz r2, 1f @ success if no exception pending
bx lr @ return on success
1:
DELIVER_PENDING_EXCEPTION
END art_quick_get64_instance
/*
* Called by managed code to resolve a static field and store a non-wide value.
*/
TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
/*
* Called by managed code to resolve a static field and store a 64-bit primitive value.
* On entry r0 holds field index, r2:r3 hold new_val
*/
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r12 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl artSet64StaticFromCode @ (field_idx, referrer, new_val, Thread*)
add sp, #16 @ release out args
.cfi_adjust_cfa_offset -16
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_set64_static
/*
* Called by managed code to resolve an instance field and store a non-wide value.
*/
THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
/*
* Called by managed code to resolve an instance field and store a 64-bit primitive value.
*/
.extern artSet64InstanceFromCode
ENTRY art_quick_set64_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12, lr @ save callee saves in case of GC
@ r2:r3 contain the wide argument
ldr r12, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-12]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 12
str r12, [sp, #-4]! @ expand the frame and pass the referrer
.cfi_adjust_cfa_offset 4
bl artSet64InstanceFromCode @ (field_idx, Object*, new_val, Method* referrer, Thread*)
add sp, #16 @ release out args
.cfi_adjust_cfa_offset -16
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
RETURN_IF_RESULT_IS_ZERO
DELIVER_PENDING_EXCEPTION
END art_quick_set64_instance
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
* exception on error. On success the String is returned. R0 holds the string index. The fast
* path check for hit in strings cache has already been performed.
*/
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
ENTRY art_quick_alloc_object_rosalloc
// Fast path rosalloc allocation.
// r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
// r2, r3, r12: free.
ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
// Load the class (r2)
ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
cbz r2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
// Check class status.
ldr r3, [r2, #MIRROR_CLASS_STATUS_OFFSET]
cmp r3, #MIRROR_CLASS_STATUS_INITIALIZED
bne .Lart_quick_alloc_object_rosalloc_slow_path
// Add a fake dependence from the
// following access flag and size
// loads to the status load.
// This is to prevent those loads
// from being reordered above the
// status load and reading wrong
// values (an alternative is to use
// a load-acquire for the status).
eor r3, r3, r3
add r2, r2, r3
// Check access flags has
// kAccClassIsFinalizable
ldr r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
tst r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
bne .Lart_quick_alloc_object_rosalloc_slow_path
ldr r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
// TODO: consider using ldrd.
ldr r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp r3, r12
bhs .Lart_quick_alloc_object_rosalloc_slow_path
ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (r3)
cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation
bhs .Lart_quick_alloc_object_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size.
// Align up the size by the rosalloc
// bracket quantum size and divide
// by the quantum size and subtract
// by 1. This code is a shorter but
// equivalent version.
sub r3, r3, #1
lsr r3, r3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
// Load the rosalloc run (r12)
add r12, r9, r3, lsl #POINTER_SIZE_SHIFT
ldr r12, [r12, #THREAD_ROSALLOC_RUNS_OFFSET]
// Load the free list head (r3). This
// will be the return val.
ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
cbz r3, .Lart_quick_alloc_object_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
ldr r1, [r3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
// and update the list head with the
// next pointer.
str r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
// Store the class pointer in the
// header. This also overwrites the
// next pointer. The offsets are
// asserted to match.
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
POISON_HEAP_REF r2
str r2, [r3, #MIRROR_OBJECT_CLASS_OFFSET]
// Fence. This is "ish" not "ishst" so
// that it also ensures ordering of
// the class status load with respect
// to later accesses to the class
// object. Alternatively we could use
// "ishst" if we use load-acquire for
// the class status load.)
// Needs to be done before pushing on
// allocation since Heap::VisitObjects
// relies on seeing the class pointer.
// b/28790624
dmb ish
// Push the new object onto the thread
// local allocation stack and
// increment the thread local
// allocation stack top.
ldr r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
str r3, [r1], #COMPRESSED_REFERENCE_SIZE // (Increment r1 as a side effect.)
str r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
// Decrement the size of the free list
ldr r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
sub r1, #1
// TODO: consider combining this store
// and the list head store above using
// strd.
str r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
mov r0, r3 // Set the return value and return.
bx lr
.Lart_quick_alloc_object_rosalloc_slow_path:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
bl artAllocObjectFromCodeRosAlloc @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_rosalloc
// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
//
// r0: type_idx/return value, r1: ArtMethod*, r2: class, r9: Thread::Current, r3, r12: free.
// Need to preserve r0 and r1 to the slow path.
.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
cbz r2, \slowPathLabel // Check null class
// Check class status.
ldr r3, [r2, #MIRROR_CLASS_STATUS_OFFSET]
cmp r3, #MIRROR_CLASS_STATUS_INITIALIZED
bne \slowPathLabel
// Add a fake dependence from the
// following access flag and size
// loads to the status load.
// This is to prevent those loads
// from being reordered above the
// status load and reading wrong
// values (an alternative is to use
// a load-acquire for the status).
eor r3, r3, r3
add r2, r2, r3
// Check access flags has
// kAccClassIsFinalizable.
ldr r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
tst r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
bne \slowPathLabel
// Load thread_local_pos (r12) and
// thread_local_end (r3) with ldrd.
// Check constraints for ldrd.
#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
#endif
ldrd r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET]
sub r12, r3, r12 // Compute the remaining buf size.
ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (r3).
cmp r3, r12 // Check if it fits. OK to do this
// before rounding up the object size
// assuming the buf size alignment.
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
// Round up the object size by the
// object alignment. (addr + 7) & ~7.
add r3, r3, #OBJECT_ALIGNMENT_MASK
and r3, r3, #OBJECT_ALIGNMENT_MASK_TOGGLED
// Reload old thread_local_pos (r0)
// for the return value.
ldr r0, [r9, #THREAD_LOCAL_POS_OFFSET]
add r1, r0, r3
str r1, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
ldr r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add r1, r1, #1
str r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
POISON_HEAP_REF r2
str r2, [r0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
// Fence. This is "ish" not "ishst" so
// that the code after this allocation
// site will see the right values in
// the fields of the class.
// Alternatively we could use "ishst"
// if we use load-acquire for the
// class status load.)
dmb ish
bx lr
.endm
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
ENTRY art_quick_alloc_object_tlab
// Fast path tlab allocation.
// r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
// r2, r3, r12: free.
#if defined(USE_READ_BARRIER)
mvn r0, #0 // Read barrier not supported here.
bx lr // Return -1.
#endif
ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
// Load the class (r2)
ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
.Lart_quick_alloc_object_tlab_slow_path:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 // Save callee saves in case of GC.
mov r2, r9 // Pass Thread::Current.
bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_tlab
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
ENTRY art_quick_alloc_object_region_tlab
// Fast path tlab allocation.
// r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current, r2, r3, r12: free.
#if !defined(USE_READ_BARRIER)
eor r0, r0, r0 // Read barrier must be enabled here.
sub r0, r0, #1 // Return -1.
bx lr
#endif
ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
// Load the class (r2)
ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
// Read barrier for class load.
ldr r3, [r9, #THREAD_IS_GC_MARKING_OFFSET]
cbnz r3, .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
// The read barrier slow path. Mark
// the class.
push {r0, r1, r3, lr} // Save registers. r3 is pushed only
// to align sp by 16 bytes.
mov r0, r2 // Pass the class as the first param.
bl artReadBarrierMark
mov r2, r0 // Get the (marked) class back.
pop {r0, r1, r3, lr}
b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
.Lart_quick_alloc_object_region_tlab_slow_path:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 // Save callee saves in case of GC.
mov r2, r9 // Pass Thread::Current.
bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_region_tlab
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
*/
.extern artTestSuspendFromCode
ENTRY art_quick_test_suspend
#ifdef ARM_R4_SUSPEND_FLAG
ldrh r0, [rSELF, #THREAD_FLAGS_OFFSET]
mov rSUSPEND, #SUSPEND_CHECK_INTERVAL @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL
cbnz r0, 1f @ check Thread::Current()->suspend_count_ == 0
bx lr @ return if suspend_count_ == 0
1:
#endif
mov r0, rSELF
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves for GC stack crawl
@ TODO: save FPRs to enable access in the debugger?
bl artTestSuspendFromCode @ (Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
ENTRY art_quick_implicit_suspend
mov r0, rSELF
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves for stack crawl
bl artTestSuspendFromCode @ (Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_implicit_suspend
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
* r0 holds the proxy method and r1 holds the receiver; r2 and r3 may contain arguments. The
* frame size of the invoked proxy method agrees with a ref and args callee save frame.
*/
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_R0
mov r2, r9 @ pass Thread::Current
mov r3, sp @ pass SP
blx artQuickProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
// Tear down the callee-save frame. Skip arg registers.
add sp, #(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
.cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
cbnz r2, 1f @ success if no exception is pending
vmov d0, r0, r1 @ store into fpr, for when it's a fpr return...
bx lr @ return on success
1:
DELIVER_PENDING_EXCEPTION
END art_quick_proxy_invoke_handler
/*
* Called to resolve an imt conflict.
* r0 is the conflict ArtMethod.
* r12 is a hidden argument that holds the target interface method's dex method index.
*
* Note that this stub writes to r0, r4, and r12.
*/
ENTRY art_quick_imt_conflict_trampoline
ldr r4, [sp, #0] // Load referrer
ldr r4, [r4, #ART_METHOD_DEX_CACHE_METHODS_OFFSET_32] // Load dex cache methods array
ldr r12, [r4, r12, lsl #POINTER_SIZE_SHIFT] // Load interface method
ldr r0, [r0, #ART_METHOD_JNI_OFFSET_32] // Load ImtConflictTable
ldr r4, [r0] // Load first entry in ImtConflictTable.
.Limt_table_iterate:
cmp r4, r12
// Branch if found. Benchmarks have shown doing a branch here is better.
beq .Limt_table_found
// If the entry is null, the interface method is not in the ImtConflictTable.
cbz r4, .Lconflict_trampoline
// Iterate over the entries of the ImtConflictTable.
ldr r4, [r0, #(2 * __SIZEOF_POINTER__)]!
b .Limt_table_iterate
.Limt_table_found:
// We successfully hit an entry in the table. Load the target method
// and jump to it.
ldr r0, [r0, #__SIZEOF_POINTER__]
ldr pc, [r0, #ART_METHOD_QUICK_CODE_OFFSET_32]
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3
mov r2, r9 @ pass Thread::Current
mov r3, sp @ pass SP
blx artQuickResolutionTrampoline @ (Method* called, receiver, Thread*, SP)
cbz r0, 1f @ is code pointer null? goto exception
mov r12, r0
ldr r0, [sp, #0] @ load resolved method in r0
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
bx r12 @ tail-call into actual code
1:
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
/*
* Called to do a generic JNI down-call
*/
ENTRY art_quick_generic_jni_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_R0
// Save rSELF
mov r11, rSELF
// Save SP , so we can have static CFI info. r10 is saved in ref_and_args.
mov r10, sp
.cfi_def_cfa_register r10
sub sp, sp, #5120
// prepare for artQuickGenericJniTrampoline call
// (Thread*, SP)
// r0 r1 <= C calling convention
// rSELF r10 <= where they are
mov r0, rSELF // Thread*
mov r1, r10
blx artQuickGenericJniTrampoline // (Thread*, sp)
// The C call will have registered the complete save-frame on success.
// The result of the call is:
// r0: pointer to native code, 0 on error.
// r1: pointer to the bottom of the used area of the alloca, can restore stack till there.
// Check for error = 0.
cbz r0, .Lexception_in_native
// Release part of the alloca.
mov sp, r1
// Save the code pointer
mov r12, r0
// Load parameters from frame into registers.
pop {r0-r3}
// Softfloat.
// TODO: Change to hardfloat when supported.
blx r12 // native call.
// result sign extension is handled in C code
// prepare for artQuickGenericJniEndTrampoline call
// (Thread*, result, result_f)
// r0 r2,r3 stack <= C calling convention
// r11 r0,r1 r0,r1 <= where they are
sub sp, sp, #8 // Stack alignment.
push {r0-r1}
mov r3, r1
mov r2, r0
mov r0, r11
blx artQuickGenericJniEndTrampoline
// Restore self pointer.
mov r9, r11
// Pending exceptions possible.
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
cbnz r2, .Lexception_in_native
// Tear down the alloca.
mov sp, r10
.cfi_def_cfa_register sp
// Tear down the callee-save frame. Skip arg registers.
add sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
.cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
// store into fpr, for when it's a fpr return...
vmov d0, r0, r1
bx lr // ret
// Undo the unwinding information from above since it doesn't apply below.
.cfi_def_cfa_register r10
.cfi_adjust_cfa_offset FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
.Lexception_in_native:
ldr sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]
.cfi_def_cfa_register sp
# This will create a new save-all frame, required by the runtime.
DELIVER_PENDING_EXCEPTION
END art_quick_generic_jni_trampoline
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r1, r2
mov r1, r9 @ pass Thread::Current
mov r2, sp @ pass SP
blx artQuickToInterpreterBridge @ (Method* method, Thread*, SP)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
// Tear down the callee-save frame. Skip arg registers.
add sp, #(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
.cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
cbnz r2, 1f @ success if no exception is pending
vmov d0, r0, r1 @ store into fpr, for when it's a fpr return...
bx lr @ return on success
1:
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
/*
* Routine that intercepts method calls and returns.
*/
.extern artInstrumentationMethodEntryFromCode
.extern artInstrumentationMethodExitFromCode
ENTRY art_quick_instrumentation_entry
@ Make stack crawlable and clobber r2 and r3 (post saving)
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3
@ preserve r0 (not normally an arg) knowing there is a spare slot in kRefsAndArgs.
str r0, [sp, #4]
mov r2, r9 @ pass Thread::Current
mov r3, lr @ pass LR
blx artInstrumentationMethodEntryFromCode @ (Method*, Object*, Thread*, LR)
mov r12, r0 @ r12 holds reference to code
ldr r0, [sp, #4] @ restore r0
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
blx r12 @ call method with lr set to art_quick_instrumentation_exit
@ Deliberate fall-through into art_quick_instrumentation_exit.
.type art_quick_instrumentation_exit, #function
.global art_quick_instrumentation_exit
art_quick_instrumentation_exit:
mov lr, #0 @ link register is to here, so clobber with 0 for later checks
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ set up frame knowing r2 and r3 must be dead on exit
mov r12, sp @ remember bottom of caller's frame
push {r0-r1} @ save return value
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset r1, 4
vpush {d0} @ save fp return value
.cfi_adjust_cfa_offset 8
sub sp, #8 @ space for return value argument. Note: AAPCS stack alignment is 8B, no
@ need to align by 16.
.cfi_adjust_cfa_offset 8
vstr d0, [sp] @ d0 -> [sp] for fpr_res
mov r2, r0 @ pass return value as gpr_res
mov r3, r1
mov r0, r9 @ pass Thread::Current
mov r1, r12 @ pass SP
blx artInstrumentationMethodExitFromCode @ (Thread*, SP, gpr_res, fpr_res)
add sp, #8
.cfi_adjust_cfa_offset -8
mov r2, r0 @ link register saved by instrumentation
mov lr, r1 @ r1 is holding link register if we're to bounce to deoptimize
vpop {d0} @ restore fp return value
.cfi_adjust_cfa_offset -8
pop {r0, r1} @ restore return value
.cfi_adjust_cfa_offset -8
.cfi_restore r0
.cfi_restore r1
add sp, #32 @ remove callee save frame
.cfi_adjust_cfa_offset -32
bx r2 @ return
END art_quick_instrumentation_entry
/*
* Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
* will long jump to the upcall with a special exception of -1.
*/
.extern artDeoptimize
ENTRY art_quick_deoptimize
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1
mov r0, r9 @ Set up args.
blx artDeoptimize @ artDeoptimize(Thread*)
END art_quick_deoptimize
/*
* Compiled code has requested that we deoptimize into the interpreter. The deoptimization
* will long jump to the interpreter bridge.
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1
mov r0, r9 @ Set up args.
blx artDeoptimizeFromCompiledCode @ artDeoptimizeFromCompiledCode(Thread*)
END art_quick_deoptimize_from_compiled_code
/*
* Signed 64-bit integer multiply.
*
* Consider WXxYZ (r1r0 x r3r2) with a long multiply:
* WX
* x YZ
* --------
* ZW ZX
* YW YX
*
* The low word of the result holds ZX, the high word holds
* (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
* it doesn't fit in the low 64 bits.
*
* Unlike most ARM math operations, multiply instructions have
* restrictions on using the same register more than once (Rd and Rm
* cannot be the same).
*/
/* mul-long vAA, vBB, vCC */
ENTRY art_quick_mul_long
push {r9 - r10}
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r9, 0
.cfi_rel_offset r10, 4
mul ip, r2, r1 @ ip<- ZxW
umull r9, r10, r2, r0 @ r9/r10 <- ZxX
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
mov r0,r9
mov r1,r10
pop {r9 - r10}
.cfi_adjust_cfa_offset -8
.cfi_restore r9
.cfi_restore r10
bx lr
END art_quick_mul_long
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
* 6 bits.
* On entry:
* r0: low word
* r1: high word
* r2: shift count
*/
/* shl-long vAA, vBB, vCC */
ARM_ENTRY art_quick_shl_long @ ARM code as thumb code requires spills
and r2, r2, #63 @ r2<- r2 & 0x3f
mov r1, r1, asl r2 @ r1<- r1 << r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
subs ip, r2, #32 @ ip<- r2 - 32
movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
mov r0, r0, asl r2 @ r0<- r0 << r2
bx lr
END art_quick_shl_long
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
* 6 bits.
* On entry:
* r0: low word
* r1: high word
* r2: shift count
*/
/* shr-long vAA, vBB, vCC */
ARM_ENTRY art_quick_shr_long @ ARM code as thumb code requires spills
and r2, r2, #63 @ r0<- r0 & 0x3f
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
subs ip, r2, #32 @ ip<- r2 - 32
movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
mov r1, r1, asr r2 @ r1<- r1 >> r2
bx lr
END art_quick_shr_long
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
* 6 bits.
* On entry:
* r0: low word
* r1: high word
* r2: shift count
*/
/* ushr-long vAA, vBB, vCC */
ARM_ENTRY art_quick_ushr_long @ ARM code as thumb code requires spills
and r2, r2, #63 @ r0<- r0 & 0x3f
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
subs ip, r2, #32 @ ip<- r2 - 32
movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
mov r1, r1, lsr r2 @ r1<- r1 >>> r2
bx lr
END art_quick_ushr_long
/*
* String's indexOf.
*
* On entry:
* r0: string object (known non-null)
* r1: char to match (known <= 0xFFFF)
* r2: Starting offset in string data
*/
ENTRY art_quick_indexof
push {r4, r10-r11, lr} @ 4 words of callee saves
.cfi_adjust_cfa_offset 16
.cfi_rel_offset r4, 0
.cfi_rel_offset r10, 4
.cfi_rel_offset r11, 8
.cfi_rel_offset lr, 12
ldr r3, [r0, #MIRROR_STRING_COUNT_OFFSET]
add r0, #MIRROR_STRING_VALUE_OFFSET
/* Clamp start to [0..count] */
cmp r2, #0
it lt
movlt r2, #0
cmp r2, r3
it gt
movgt r2, r3
/* Save a copy in r12 to later compute result */
mov r12, r0
/* Build pointer to start of data to compare and pre-bias */
add r0, r0, r2, lsl #1
sub r0, #2
/* Compute iteration count */
sub r2, r3, r2
/*
* At this point we have:
* r0: start of data to test
* r1: char to compare
* r2: iteration count
* r12: original start of string data
* r3, r4, r10, r11 available for loading string data
*/
subs r2, #4
blt .Lindexof_remainder
.Lindexof_loop4:
ldrh r3, [r0, #2]!
ldrh r4, [r0, #2]!
ldrh r10, [r0, #2]!
ldrh r11, [r0, #2]!
cmp r3, r1
beq .Lmatch_0
cmp r4, r1
beq .Lmatch_1
cmp r10, r1
beq .Lmatch_2
cmp r11, r1
beq .Lmatch_3
subs r2, #4
bge .Lindexof_loop4
.Lindexof_remainder:
adds r2, #4
beq .Lindexof_nomatch
.Lindexof_loop1:
ldrh r3, [r0, #2]!
cmp r3, r1
beq .Lmatch_3
subs r2, #1
bne .Lindexof_loop1
.Lindexof_nomatch:
mov r0, #-1
pop {r4, r10-r11, pc}
.Lmatch_0:
sub r0, #6
sub r0, r12
asr r0, r0, #1
pop {r4, r10-r11, pc}
.Lmatch_1:
sub r0, #4
sub r0, r12
asr r0, r0, #1
pop {r4, r10-r11, pc}
.Lmatch_2:
sub r0, #2
sub r0, r12
asr r0, r0, #1
pop {r4, r10-r11, pc}
.Lmatch_3:
sub r0, r12
asr r0, r0, #1
pop {r4, r10-r11, pc}
END art_quick_indexof
/*
* String's compareTo.
*
* Requires rARG0/rARG1 to have been previously checked for null. Will
* return negative if this's string is < comp, 0 if they are the
* same and positive if >.
*
* On entry:
* r0: this object pointer
* r1: comp object pointer
*
*/
.extern __memcmp16
ENTRY art_quick_string_compareto
mov r2, r0 @ this to r2, opening up r0 for return value
sub r0, r2, r1 @ Same?
cbnz r0,1f
bx lr
1: @ Same strings, return.
push {r4, r7-r12, lr} @ 8 words - keep alignment
.cfi_adjust_cfa_offset 32
.cfi_rel_offset r4, 0
.cfi_rel_offset r7, 4
.cfi_rel_offset r8, 8
.cfi_rel_offset r9, 12
.cfi_rel_offset r10, 16
.cfi_rel_offset r11, 20
.cfi_rel_offset r12, 24
.cfi_rel_offset lr, 28
ldr r7, [r2, #MIRROR_STRING_COUNT_OFFSET]
ldr r10, [r1, #MIRROR_STRING_COUNT_OFFSET]
add r2, #MIRROR_STRING_VALUE_OFFSET
add r1, #MIRROR_STRING_VALUE_OFFSET
/*
* At this point, we have:
* value: r2/r1
* offset: r4/r9
* count: r7/r10
* We're going to compute
* r11 <- countDiff
* r10 <- minCount
*/
subs r11, r7, r10
it ls
movls r10, r7
/*
* Note: data pointers point to previous element so we can use pre-index
* mode with base writeback.
*/
subs r2, #2 @ offset to contents[-1]
subs r1, #2 @ offset to contents[-1]
/*
* At this point we have:
* r2: *this string data
* r1: *comp string data
* r10: iteration count for comparison
* r11: value to return if the first part of the string is equal
* r0: reserved for result
* r3, r4, r7, r8, r9, r12 available for loading string data
*/
subs r10, #2
blt .Ldo_remainder2
/*
* Unroll the first two checks so we can quickly catch early mismatch
* on long strings (but preserve incoming alignment)
*/
ldrh r3, [r2, #2]!
ldrh r4, [r1, #2]!
ldrh r7, [r2, #2]!
ldrh r8, [r1, #2]!
subs r0, r3, r4
it eq
subseq r0, r7, r8
bne .Ldone
cmp r10, #28
bgt .Ldo_memcmp16
subs r10, #3
blt .Ldo_remainder
.Lloopback_triple:
ldrh r3, [r2, #2]!
ldrh r4, [r1, #2]!
ldrh r7, [r2, #2]!
ldrh r8, [r1, #2]!
ldrh r9, [r2, #2]!
ldrh r12,[r1, #2]!
subs r0, r3, r4
it eq
subseq r0, r7, r8
it eq
subseq r0, r9, r12
bne .Ldone
subs r10, #3
bge .Lloopback_triple
.Ldo_remainder:
adds r10, #3
beq .Lreturn_diff
.Lloopback_single:
ldrh r3, [r2, #2]!
ldrh r4, [r1, #2]!
subs r0, r3, r4
bne .Ldone
subs r10, #1
bne .Lloopback_single
.Lreturn_diff:
mov r0, r11
pop {r4, r7-r12, pc}
.Ldo_remainder2:
adds r10, #2
bne .Lloopback_single
mov r0, r11
pop {r4, r7-r12, pc}
/* Long string case */
.Ldo_memcmp16:
mov r7, r11
add r0, r2, #2
add r1, r1, #2
mov r2, r10
bl __memcmp16
cmp r0, #0
it eq
moveq r0, r7
.Ldone:
pop {r4, r7-r12, pc}
END art_quick_string_compareto
/* Assembly routines used to handle ABI differences. */
/* double fmod(double a, double b) */
.extern fmod
ENTRY art_quick_fmod
push {lr}
.cfi_adjust_cfa_offset 4
.cfi_rel_offset lr, 0
sub sp, #4
.cfi_adjust_cfa_offset 4
vmov r0, r1, d0
vmov r2, r3, d1
bl fmod
vmov d0, r0, r1
add sp, #4
.cfi_adjust_cfa_offset -4
pop {pc}
END art_quick_fmod
/* float fmodf(float a, float b) */
.extern fmodf
ENTRY art_quick_fmodf
push {lr}
.cfi_adjust_cfa_offset 4
.cfi_rel_offset lr, 0
sub sp, #4
.cfi_adjust_cfa_offset 4
vmov r0, r1, d0
bl fmodf
vmov s0, r0
add sp, #4
.cfi_adjust_cfa_offset -4
pop {pc}
END art_quick_fmod
/* int64_t art_d2l(double d) */
.extern art_d2l
ENTRY art_quick_d2l
vmov r0, r1, d0
b art_d2l
END art_quick_d2l
/* int64_t art_f2l(float f) */
.extern art_f2l
ENTRY art_quick_f2l
vmov r0, s0
b art_f2l
END art_quick_f2l
/* float art_l2f(int64_t l) */
.extern art_l2f
ENTRY art_quick_l2f
push {lr}
.cfi_adjust_cfa_offset 4
.cfi_rel_offset lr, 0
sub sp, #4
.cfi_adjust_cfa_offset 4
bl art_l2f
vmov s0, r0
add sp, #4
.cfi_adjust_cfa_offset -4
pop {pc}
END art_quick_l2f
|
abforce/xposed_art_n
| 1,829
|
runtime/arch/arm64/asm_support_arm64.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_S_
#define ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_S_
#include "asm_support_arm64.h"
// Define special registers.
// Register holding Thread::Current().
#define xSELF x19
// Frame Pointer
#define xFP x29
// Link Register
#define xLR x30
// Define the intraprocedural linkage temporary registers.
#define xIP0 x16
#define wIP0 w16
#define xIP1 x17
#define wIP1 w17
.macro ENTRY name
.type \name, #function
.hidden \name // Hide this as a global symbol, so we do not incur plt calls.
.global \name
/* Cache alignment for function entry */
.balign 16
\name:
.cfi_startproc
.endm
.macro END name
.cfi_endproc
.size \name, .-\name
.endm
.macro UNIMPLEMENTED name
ENTRY \name
brk 0
END \name
.endm
// Macros to poison (negate) the reference for heap poisoning.
.macro POISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
neg \rRef, \rRef
#endif // USE_HEAP_POISONING
.endm
// Macros to unpoison (negate) the reference for heap poisoning.
.macro UNPOISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
neg \rRef, \rRef
#endif // USE_HEAP_POISONING
.endm
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_S_
|
abforce/xposed_art_n
| 1,667
|
runtime/arch/arm64/jni_entrypoints_arm64.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_arm64.S"
/*
* Jni dlsym lookup stub.
*/
.extern artFindNativeMethod
// TODO: Add CFI directives.
ENTRY art_jni_dlsym_lookup_stub
// spill regs.
stp x29, x30, [sp, #-16]!
mov x29, sp
stp d6, d7, [sp, #-16]!
stp d4, d5, [sp, #-16]!
stp d2, d3, [sp, #-16]!
stp d0, d1, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x4, x5, [sp, #-16]!
stp x2, x3, [sp, #-16]!
stp x0, x1, [sp, #-16]!
bl artFindNativeMethod
mov x17, x0 // store result in scratch reg.
// load spill regs.
ldp x0, x1, [sp], #16
ldp x2, x3, [sp], #16
ldp x4, x5, [sp], #16
ldp x6, x7, [sp], #16
ldp d0, d1, [sp], #16
ldp d2, d3, [sp], #16
ldp d4, d5, [sp], #16
ldp d6, d7, [sp], #16
ldp x29, x30, [sp], #16
cbz x17, 1f // is method code null ?
br x17 // if non-null, tail call to method's code.
1:
ret // restore regs and return to caller to handle exception.
END art_jni_dlsym_lookup_stub
|
abforce/xposed_art_n
| 4,283
|
runtime/arch/arm64/memcmp16_arm64.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Assumptions:
*
* ARMv8-a, AArch64
*/
#ifndef ART_RUNTIME_ARCH_ARM64_MEMCMP16_ARM64_S_
#define ART_RUNTIME_ARCH_ARM64_MEMCMP16_ARM64_S_
#include "asm_support_arm64.S"
/* Parameters and result. */
#define src1 x0
#define src2 x1
#define limit x2
#define result x0
/* Internal variables. */
#define data1 x3
#define data1w w3
#define data2 x4
#define data2w w4
#define has_nul x5
#define diff x6
#define endloop x7
#define tmp1 x8
#define tmp2 x9
#define tmp3 x10
#define limit_wd x12
#define mask x13
// WARNING: If you change this code to use x14 and x15, you must also change
// art_quick_string_compareto, which relies on these temps being unused.
ENTRY __memcmp16
cbz limit, .Lret0
lsl limit, limit, #1 /* Half-words to bytes. */
eor tmp1, src1, src2
tst tmp1, #7
b.ne .Lmisaligned8
ands tmp1, src1, #7
b.ne .Lmutual_align
add limit_wd, limit, #7
lsr limit_wd, limit_wd, #3
/* Start of performance-critical section -- one 64B cache line. */
.Lloop_aligned:
ldr data1, [src1], #8
ldr data2, [src2], #8
.Lstart_realigned:
subs limit_wd, limit_wd, #1
eor diff, data1, data2 /* Non-zero if differences found. */
csinv endloop, diff, xzr, ne /* Last Dword or differences. */
cbz endloop, .Lloop_aligned
/* End of performance-critical section -- one 64B cache line. */
/* Not reached the limit, must have found a diff. */
cbnz limit_wd, .Lnot_limit
/* Limit % 8 == 0 => all bytes significant. */
ands limit, limit, #7
b.eq .Lnot_limit
lsl limit, limit, #3 /* Bits -> bytes. */
mov mask, #~0
lsl mask, mask, limit
bic data1, data1, mask
bic data2, data2, mask
.Lnot_limit:
// Swap the byte order of diff. Exact reverse is not important, as we only need to detect
// the half-word.
rev diff, diff
// The most significant bit of DIFF marks the least significant bit of change between DATA1/2
clz diff, diff
// Mask off 0xF to have shift amount. Why does ARM64 not have BIC with immediate?!?!
bfi diff, xzr, #0, #4
// Create a 16b mask
mov mask, #0xFFFF
// Shift to the right half-word.
lsr data1, data1, diff
lsr data2, data2, diff
// Mask the lowest half-word.
and data1, data1, mask
and data2, data2, mask
// Compute difference.
sub result, data1, data2
ret
.Lmutual_align:
/* Sources are mutually aligned, but are not currently at an
alignment boundary. Round down the addresses and then mask off
the bytes that precede the start point. */
bic src1, src1, #7
bic src2, src2, #7
add limit, limit, tmp1 /* Adjust the limit for the extra. */
lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
ldr data1, [src1], #8
neg tmp1, tmp1 /* Bits to alignment -64. */
ldr data2, [src2], #8
mov tmp2, #~0
/* Little-endian. Early bytes are at LSB. */
lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
add limit_wd, limit, #7
orr data1, data1, tmp2
orr data2, data2, tmp2
lsr limit_wd, limit_wd, #3
b .Lstart_realigned
.Lret0:
mov result, #0
ret
.p2align 6
.Lmisaligned8:
sub limit, limit, #1
1:
/* Perhaps we can do better than this. */
ldrh data1w, [src1], #2
ldrh data2w, [src2], #2
subs limit, limit, #2
ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
b.eq 1b
sub result, data1, data2
ret
END __memcmp16
#endif // ART_RUNTIME_ARCH_ARM64_MEMCMP16_ARM64_S_
|
abforce/xposed_art_n
| 83,959
|
runtime/arch/arm64/quick_entrypoints_arm64.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_arm64.S"
#include "arch/quick_alloc_entrypoints.S"
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
*/
.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
adrp xIP0, :got:_ZN3art7Runtime9instance_E
ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
// Our registers aren't intermixed - just spill in order.
ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
// Loads appropriate callee-save-method.
ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
sub sp, sp, #176
.cfi_adjust_cfa_offset 176
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 176)
#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM64) size not as expected."
#endif
// Stack alignment filler [sp, #8].
// FP callee-saves.
stp d8, d9, [sp, #16]
stp d10, d11, [sp, #32]
stp d12, d13, [sp, #48]
stp d14, d15, [sp, #64]
// GP callee-saves
stp x19, x20, [sp, #80]
.cfi_rel_offset x19, 80
.cfi_rel_offset x20, 88
stp x21, x22, [sp, #96]
.cfi_rel_offset x21, 96
.cfi_rel_offset x22, 104
stp x23, x24, [sp, #112]
.cfi_rel_offset x23, 112
.cfi_rel_offset x24, 120
stp x25, x26, [sp, #128]
.cfi_rel_offset x25, 128
.cfi_rel_offset x26, 136
stp x27, x28, [sp, #144]
.cfi_rel_offset x27, 144
.cfi_rel_offset x28, 152
stp x29, xLR, [sp, #160]
.cfi_rel_offset x29, 160
.cfi_rel_offset x30, 168
// Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs].
str xIP0, [sp]
// Place sp in Thread::Current()->top_quick_frame.
mov xIP0, sp
str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly).
*/
.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
adrp xIP0, :got:_ZN3art7Runtime9instance_E
ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
// Our registers aren't intermixed - just spill in order.
ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefOnly] .
// Loads appropriate callee-save-method.
ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
sub sp, sp, #96
.cfi_adjust_cfa_offset 96
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 96)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM64) size not as expected."
#endif
// GP callee-saves.
// x20 paired with ArtMethod* - see below.
stp x21, x22, [sp, #16]
.cfi_rel_offset x21, 16
.cfi_rel_offset x22, 24
stp x23, x24, [sp, #32]
.cfi_rel_offset x23, 32
.cfi_rel_offset x24, 40
stp x25, x26, [sp, #48]
.cfi_rel_offset x25, 48
.cfi_rel_offset x26, 56
stp x27, x28, [sp, #64]
.cfi_rel_offset x27, 64
.cfi_rel_offset x28, 72
stp x29, xLR, [sp, #80]
.cfi_rel_offset x29, 80
.cfi_rel_offset x30, 88
// Store ArtMethod* Runtime::callee_save_methods_[kRefsOnly].
stp xIP0, x20, [sp]
.cfi_rel_offset x20, 8
// Place sp in Thread::Current()->top_quick_frame.
mov xIP0, sp
str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
.endm
// TODO: Probably no need to restore registers preserved by aapcs64.
.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
// Callee-saves.
ldr x20, [sp, #8]
.cfi_restore x20
ldp x21, x22, [sp, #16]
.cfi_restore x21
.cfi_restore x22
ldp x23, x24, [sp, #32]
.cfi_restore x23
.cfi_restore x24
ldp x25, x26, [sp, #48]
.cfi_restore x25
.cfi_restore x26
ldp x27, x28, [sp, #64]
.cfi_restore x27
.cfi_restore x28
ldp x29, xLR, [sp, #80]
.cfi_restore x29
.cfi_restore x30
add sp, sp, #96
.cfi_adjust_cfa_offset -96
.endm
.macro POP_REFS_ONLY_CALLEE_SAVE_FRAME
add sp, sp, #96
.cfi_adjust_cfa_offset - 96
.endm
.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
ret
.endm
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
sub sp, sp, #224
.cfi_adjust_cfa_offset 224
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 224)
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM64) size not as expected."
#endif
// Stack alignment filler [sp, #8].
// FP args.
stp d0, d1, [sp, #16]
stp d2, d3, [sp, #32]
stp d4, d5, [sp, #48]
stp d6, d7, [sp, #64]
// Core args.
stp x1, x2, [sp, #80]
.cfi_rel_offset x1, 80
.cfi_rel_offset x2, 88
stp x3, x4, [sp, #96]
.cfi_rel_offset x3, 96
.cfi_rel_offset x4, 104
stp x5, x6, [sp, #112]
.cfi_rel_offset x5, 112
.cfi_rel_offset x6, 120
// x7, Callee-saves.
stp x7, x20, [sp, #128]
.cfi_rel_offset x7, 128
.cfi_rel_offset x20, 136
stp x21, x22, [sp, #144]
.cfi_rel_offset x21, 144
.cfi_rel_offset x22, 152
stp x23, x24, [sp, #160]
.cfi_rel_offset x23, 160
.cfi_rel_offset x24, 168
stp x25, x26, [sp, #176]
.cfi_rel_offset x25, 176
.cfi_rel_offset x26, 184
stp x27, x28, [sp, #192]
.cfi_rel_offset x27, 192
.cfi_rel_offset x28, 200
// x29(callee-save) and LR.
stp x29, xLR, [sp, #208]
.cfi_rel_offset x29, 208
.cfi_rel_offset x30, 216
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
*
* TODO This is probably too conservative - saving FP & LR.
*/
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
adrp xIP0, :got:_ZN3art7Runtime9instance_E
ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
// Our registers aren't intermixed - just spill in order.
ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
// Place sp in Thread::Current()->top_quick_frame.
mov xIP0, sp
str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
.endm
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
str x0, [sp, #0] // Store ArtMethod* to bottom of stack.
// Place sp in Thread::Current()->top_quick_frame.
mov xIP0, sp
str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
.endm
// TODO: Probably no need to restore registers preserved by aapcs64.
.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
// FP args.
ldp d0, d1, [sp, #16]
ldp d2, d3, [sp, #32]
ldp d4, d5, [sp, #48]
ldp d6, d7, [sp, #64]
// Core args.
ldp x1, x2, [sp, #80]
.cfi_restore x1
.cfi_restore x2
ldp x3, x4, [sp, #96]
.cfi_restore x3
.cfi_restore x4
ldp x5, x6, [sp, #112]
.cfi_restore x5
.cfi_restore x6
// x7, Callee-saves.
ldp x7, x20, [sp, #128]
.cfi_restore x7
.cfi_restore x20
ldp x21, x22, [sp, #144]
.cfi_restore x21
.cfi_restore x22
ldp x23, x24, [sp, #160]
.cfi_restore x23
.cfi_restore x24
ldp x25, x26, [sp, #176]
.cfi_restore x25
.cfi_restore x26
ldp x27, x28, [sp, #192]
.cfi_restore x27
.cfi_restore x28
// x29(callee-save) and LR.
ldp x29, xLR, [sp, #208]
.cfi_restore x29
.cfi_restore x30
add sp, sp, #224
.cfi_adjust_cfa_offset -224
.endm
.macro RETURN_IF_RESULT_IS_ZERO
cbnz x0, 1f // result non-zero branch over
ret // return
1:
.endm
.macro RETURN_IF_RESULT_IS_NON_ZERO
cbz x0, 1f // result zero branch over
ret // return
1:
.endm
/*
* Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
* exception is Thread::Current()->exception_
*/
.macro DELIVER_PENDING_EXCEPTION
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
mov x0, xSELF
// Point of no return.
b artDeliverPendingExceptionFromCode // artDeliverPendingExceptionFromCode(Thread*)
brk 0 // Unreached
.endm
.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
ldr \reg, [xSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field.
cbnz \reg, 1f
ret
1:
DELIVER_PENDING_EXCEPTION
.endm
.macro RETURN_OR_DELIVER_PENDING_EXCEPTION
RETURN_OR_DELIVER_PENDING_EXCEPTION_REG xIP0
.endm
// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register.
.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1
.endm
.macro RETURN_IF_W0_IS_ZERO_OR_DELIVER
cbnz w0, 1f // result non-zero branch over
ret // return
1:
DELIVER_PENDING_EXCEPTION
.endm
.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov x0, xSELF // pass Thread::Current
b \cxx_name // \cxx_name(Thread*)
END \c_name
.endm
.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context.
mov x1, xSELF // pass Thread::Current.
b \cxx_name // \cxx_name(arg, Thread*).
brk 0
END \c_name
.endm
.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov x2, xSELF // pass Thread::Current
b \cxx_name // \cxx_name(arg1, arg2, Thread*)
brk 0
END \c_name
.endm
/*
* Called by managed code, saves callee saves and then calls artThrowException
* that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
*/
ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
/*
* Called by managed code to create and deliver a NullPointerException.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
* Called by managed code to create and deliver an ArithmeticException.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
/*
* Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
* index, arg2 holds limit.
*/
TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
* Called by managed code to create and deliver a StackOverflowError.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
/*
* Called by managed code to create and deliver a NoSuchMethodError.
*/
ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain
* the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper.
* NOTE: "this" is first visible argument of the target, and so can be found in arg1/x1.
*
* The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting
* of the target Method* in x0 and method->code_ in x1.
*
* If unsuccessful, the helper will return null/????. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
* pointing back to the original caller.
*
* Adapted from ARM32 code.
*
* Clobbers xIP0.
*/
.macro INVOKE_TRAMPOLINE_BODY cxx_name
.extern \cxx_name
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // save callee saves in case allocation triggers GC
// Helper signature is always
// (method_idx, *this_object, *caller_method, *self, sp)
mov x2, xSELF // pass Thread::Current
mov x3, sp
bl \cxx_name // (method_idx, this, Thread*, SP)
mov xIP0, x1 // save Method*->code_
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
cbz x0, 1f // did we find the target? if not go to exception delivery
br xIP0 // tail call to target
1:
DELIVER_PENDING_EXCEPTION
.endm
.macro INVOKE_TRAMPOLINE c_name, cxx_name
ENTRY \c_name
INVOKE_TRAMPOLINE_BODY \cxx_name
END \c_name
.endm
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
.macro INVOKE_STUB_CREATE_FRAME
SAVE_SIZE=15*8 // x4, x5, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved.
SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
mov x9, sp // Save stack pointer.
.cfi_register sp,x9
add x10, x2, # SAVE_SIZE_AND_METHOD // calculate size of frame.
sub x10, sp, x10 // Calculate SP position - saves + ArtMethod* + args
and x10, x10, # ~0xf // Enforce 16 byte stack alignment.
mov sp, x10 // Set new SP.
sub x10, x9, #SAVE_SIZE // Calculate new FP (later). Done here as we must move SP
.cfi_def_cfa_register x10 // before this.
.cfi_adjust_cfa_offset SAVE_SIZE
str x28, [x10, #112]
.cfi_rel_offset x28, 112
stp x26, x27, [x10, #96]
.cfi_rel_offset x26, 96
.cfi_rel_offset x27, 104
stp x24, x25, [x10, #80]
.cfi_rel_offset x24, 80
.cfi_rel_offset x25, 88
stp x22, x23, [x10, #64]
.cfi_rel_offset x22, 64
.cfi_rel_offset x23, 72
stp x20, x21, [x10, #48]
.cfi_rel_offset x20, 48
.cfi_rel_offset x21, 56
stp x9, x19, [x10, #32] // Save old stack pointer and x19.
.cfi_rel_offset sp, 32
.cfi_rel_offset x19, 40
stp x4, x5, [x10, #16] // Save result and shorty addresses.
.cfi_rel_offset x4, 16
.cfi_rel_offset x5, 24
stp xFP, xLR, [x10] // Store LR & FP.
.cfi_rel_offset x29, 0
.cfi_rel_offset x30, 8
mov xFP, x10 // Use xFP now, as it's callee-saved.
.cfi_def_cfa_register x29
mov xSELF, x3 // Move thread pointer into SELF register.
// Copy arguments into stack frame.
// Use simple copy routine for now.
// 4 bytes per slot.
// X1 - source address
// W2 - args length
// X9 - destination address.
// W10 - temporary
add x9, sp, #8 // Destination address is bottom of stack + null.
// Copy parameters into the stack. Use numeric label as this is a macro and Clang's assembler
// does not have unique-id variables.
1:
cmp w2, #0
beq 2f
sub w2, w2, #4 // Need 65536 bytes of range.
ldr w10, [x1, x2]
str w10, [x9, x2]
b 1b
2:
// Store null into ArtMethod* at bottom of frame.
str xzr, [sp]
.endm
.macro INVOKE_STUB_CALL_AND_RETURN
// load method-> METHOD_QUICK_CODE_OFFSET
ldr x9, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64]
// Branch to method.
blr x9
// Restore return value address and shorty address.
ldp x4,x5, [xFP, #16]
.cfi_restore x4
.cfi_restore x5
ldr x28, [xFP, #112]
.cfi_restore x28
ldp x26, x27, [xFP, #96]
.cfi_restore x26
.cfi_restore x27
ldp x24, x25, [xFP, #80]
.cfi_restore x24
.cfi_restore x25
ldp x22, x23, [xFP, #64]
.cfi_restore x22
.cfi_restore x23
ldp x20, x21, [xFP, #48]
.cfi_restore x20
.cfi_restore x21
// Store result (w0/x0/s0/d0) appropriately, depending on resultType.
ldrb w10, [x5]
// Check the return type and store the correct register into the jvalue in memory.
// Use numeric label as this is a macro and Clang's assembler does not have unique-id variables.
// Don't set anything for a void type.
cmp w10, #'V'
beq 3f
// Is it a double?
cmp w10, #'D'
bne 1f
str d0, [x4]
b 3f
1: // Is it a float?
cmp w10, #'F'
bne 2f
str s0, [x4]
b 3f
2: // Just store x0. Doesn't matter if it is 64 or 32 bits.
str x0, [x4]
3: // Finish up.
ldp x2, x19, [xFP, #32] // Restore stack pointer and x19.
.cfi_restore x19
mov sp, x2
.cfi_restore sp
ldp xFP, xLR, [xFP] // Restore old frame pointer and link register.
.cfi_restore x29
.cfi_restore x30
ret
.endm
/*
* extern"C" void art_quick_invoke_stub(ArtMethod *method, x0
* uint32_t *args, x1
* uint32_t argsize, w2
* Thread *self, x3
* JValue *result, x4
* char *shorty); x5
* +----------------------+
* | |
* | C/C++ frame |
* | LR'' |
* | FP'' | <- SP'
* +----------------------+
* +----------------------+
* | x28 | <- TODO: Remove callee-saves.
* | : |
* | x19 |
* | SP' |
* | X5 |
* | X4 | Saved registers
* | LR' |
* | FP' | <- FP
* +----------------------+
* | uint32_t out[n-1] |
* | : : | Outs
* | uint32_t out[0] |
* | ArtMethod* | <- SP value=null
* +----------------------+
*
* Outgoing registers:
* x0 - Method*
* x1-x7 - integer parameters.
* d0-d7 - Floating point parameters.
* xSELF = self
* SP = & of ArtMethod*
* x1 = "this" pointer.
*
*/
ENTRY art_quick_invoke_stub
// Spill registers as per AACPS64 calling convention.
INVOKE_STUB_CREATE_FRAME
// Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters.
// Parse the passed shorty to determine which register to load.
// Load addresses for routines that load WXSD registers.
adr x11, .LstoreW2
adr x12, .LstoreX2
adr x13, .LstoreS0
adr x14, .LstoreD0
// Initialize routine offsets to 0 for integers and floats.
// x8 for integers, x15 for floating point.
mov x8, #0
mov x15, #0
add x10, x5, #1 // Load shorty address, plus one to skip return value.
ldr w1, [x9],#4 // Load "this" parameter, and increment arg pointer.
// Loop to fill registers.
.LfillRegisters:
ldrb w17, [x10], #1 // Load next character in signature, and increment.
cbz w17, .LcallFunction // Exit at end of signature. Shorty 0 terminated.
cmp w17, #'F' // is this a float?
bne .LisDouble
cmp x15, # 8*12 // Skip this load if all registers full.
beq .Ladvance4
add x17, x13, x15 // Calculate subroutine to jump to.
br x17
.LisDouble:
cmp w17, #'D' // is this a double?
bne .LisLong
cmp x15, # 8*12 // Skip this load if all registers full.
beq .Ladvance8
add x17, x14, x15 // Calculate subroutine to jump to.
br x17
.LisLong:
cmp w17, #'J' // is this a long?
bne .LisOther
cmp x8, # 6*12 // Skip this load if all registers full.
beq .Ladvance8
add x17, x12, x8 // Calculate subroutine to jump to.
br x17
.LisOther: // Everything else takes one vReg.
cmp x8, # 6*12 // Skip this load if all registers full.
beq .Ladvance4
add x17, x11, x8 // Calculate subroutine to jump to.
br x17
.Ladvance4:
add x9, x9, #4
b .LfillRegisters
.Ladvance8:
add x9, x9, #8
b .LfillRegisters
// Macro for loading a parameter into a register.
// counter - the register with offset into these tables
// size - the size of the register - 4 or 8 bytes.
// register - the name of the register to be loaded.
.macro LOADREG counter size register return
ldr \register , [x9], #\size
add \counter, \counter, 12
b \return
.endm
// Store ints.
.LstoreW2:
LOADREG x8 4 w2 .LfillRegisters
LOADREG x8 4 w3 .LfillRegisters
LOADREG x8 4 w4 .LfillRegisters
LOADREG x8 4 w5 .LfillRegisters
LOADREG x8 4 w6 .LfillRegisters
LOADREG x8 4 w7 .LfillRegisters
// Store longs.
.LstoreX2:
LOADREG x8 8 x2 .LfillRegisters
LOADREG x8 8 x3 .LfillRegisters
LOADREG x8 8 x4 .LfillRegisters
LOADREG x8 8 x5 .LfillRegisters
LOADREG x8 8 x6 .LfillRegisters
LOADREG x8 8 x7 .LfillRegisters
// Store singles.
.LstoreS0:
LOADREG x15 4 s0 .LfillRegisters
LOADREG x15 4 s1 .LfillRegisters
LOADREG x15 4 s2 .LfillRegisters
LOADREG x15 4 s3 .LfillRegisters
LOADREG x15 4 s4 .LfillRegisters
LOADREG x15 4 s5 .LfillRegisters
LOADREG x15 4 s6 .LfillRegisters
LOADREG x15 4 s7 .LfillRegisters
// Store doubles.
.LstoreD0:
LOADREG x15 8 d0 .LfillRegisters
LOADREG x15 8 d1 .LfillRegisters
LOADREG x15 8 d2 .LfillRegisters
LOADREG x15 8 d3 .LfillRegisters
LOADREG x15 8 d4 .LfillRegisters
LOADREG x15 8 d5 .LfillRegisters
LOADREG x15 8 d6 .LfillRegisters
LOADREG x15 8 d7 .LfillRegisters
.LcallFunction:
INVOKE_STUB_CALL_AND_RETURN
END art_quick_invoke_stub
/* extern"C"
* void art_quick_invoke_static_stub(ArtMethod *method, x0
* uint32_t *args, x1
* uint32_t argsize, w2
* Thread *self, x3
* JValue *result, x4
* char *shorty); x5
*/
ENTRY art_quick_invoke_static_stub
// Spill registers as per AACPS64 calling convention.
INVOKE_STUB_CREATE_FRAME
// Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters.
// Parse the passed shorty to determine which register to load.
// Load addresses for routines that load WXSD registers.
adr x11, .LstoreW1_2
adr x12, .LstoreX1_2
adr x13, .LstoreS0_2
adr x14, .LstoreD0_2
// Initialize routine offsets to 0 for integers and floats.
// x8 for integers, x15 for floating point.
mov x8, #0
mov x15, #0
add x10, x5, #1 // Load shorty address, plus one to skip return value.
// Loop to fill registers.
.LfillRegisters2:
ldrb w17, [x10], #1 // Load next character in signature, and increment.
cbz w17, .LcallFunction2 // Exit at end of signature. Shorty 0 terminated.
cmp w17, #'F' // is this a float?
bne .LisDouble2
cmp x15, # 8*12 // Skip this load if all registers full.
beq .Ladvance4_2
add x17, x13, x15 // Calculate subroutine to jump to.
br x17
.LisDouble2:
cmp w17, #'D' // is this a double?
bne .LisLong2
cmp x15, # 8*12 // Skip this load if all registers full.
beq .Ladvance8_2
add x17, x14, x15 // Calculate subroutine to jump to.
br x17
.LisLong2:
cmp w17, #'J' // is this a long?
bne .LisOther2
cmp x8, # 7*12 // Skip this load if all registers full.
beq .Ladvance8_2
add x17, x12, x8 // Calculate subroutine to jump to.
br x17
.LisOther2: // Everything else takes one vReg.
cmp x8, # 7*12 // Skip this load if all registers full.
beq .Ladvance4_2
add x17, x11, x8 // Calculate subroutine to jump to.
br x17
.Ladvance4_2:
add x9, x9, #4
b .LfillRegisters2
.Ladvance8_2:
add x9, x9, #8
b .LfillRegisters2
// Store ints.
.LstoreW1_2:
LOADREG x8 4 w1 .LfillRegisters2
LOADREG x8 4 w2 .LfillRegisters2
LOADREG x8 4 w3 .LfillRegisters2
LOADREG x8 4 w4 .LfillRegisters2
LOADREG x8 4 w5 .LfillRegisters2
LOADREG x8 4 w6 .LfillRegisters2
LOADREG x8 4 w7 .LfillRegisters2
// Store longs.
.LstoreX1_2:
LOADREG x8 8 x1 .LfillRegisters2
LOADREG x8 8 x2 .LfillRegisters2
LOADREG x8 8 x3 .LfillRegisters2
LOADREG x8 8 x4 .LfillRegisters2
LOADREG x8 8 x5 .LfillRegisters2
LOADREG x8 8 x6 .LfillRegisters2
LOADREG x8 8 x7 .LfillRegisters2
// Store singles.
.LstoreS0_2:
LOADREG x15 4 s0 .LfillRegisters2
LOADREG x15 4 s1 .LfillRegisters2
LOADREG x15 4 s2 .LfillRegisters2
LOADREG x15 4 s3 .LfillRegisters2
LOADREG x15 4 s4 .LfillRegisters2
LOADREG x15 4 s5 .LfillRegisters2
LOADREG x15 4 s6 .LfillRegisters2
LOADREG x15 4 s7 .LfillRegisters2
// Store doubles.
.LstoreD0_2:
LOADREG x15 8 d0 .LfillRegisters2
LOADREG x15 8 d1 .LfillRegisters2
LOADREG x15 8 d2 .LfillRegisters2
LOADREG x15 8 d3 .LfillRegisters2
LOADREG x15 8 d4 .LfillRegisters2
LOADREG x15 8 d5 .LfillRegisters2
LOADREG x15 8 d6 .LfillRegisters2
LOADREG x15 8 d7 .LfillRegisters2
.LcallFunction2:
INVOKE_STUB_CALL_AND_RETURN
END art_quick_invoke_static_stub
/* extern"C" void art_quick_osr_stub(void** stack, x0
* size_t stack_size_in_bytes, x1
* const uin8_t* native_pc, x2
* JValue *result, x3
* char *shorty, x4
* Thread *self) x5
*/
ENTRY art_quick_osr_stub
SAVE_SIZE=15*8 // x3, x4, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved.
mov x9, sp // Save stack pointer.
.cfi_register sp,x9
sub x10, sp, # SAVE_SIZE
and x10, x10, # ~0xf // Enforce 16 byte stack alignment.
mov sp, x10 // Set new SP.
str x28, [sp, #112]
stp x26, x27, [sp, #96]
stp x24, x25, [sp, #80]
stp x22, x23, [sp, #64]
stp x20, x21, [sp, #48]
stp x9, x19, [sp, #32] // Save old stack pointer and x19.
stp x3, x4, [sp, #16] // Save result and shorty addresses.
stp xFP, xLR, [sp] // Store LR & FP.
mov xSELF, x5 // Move thread pointer into SELF register.
sub sp, sp, #16
str xzr, [sp] // Store null for ArtMethod* slot
// Branch to stub.
bl .Losr_entry
add sp, sp, #16
// Restore return value address and shorty address.
ldp x3,x4, [sp, #16]
ldr x28, [sp, #112]
ldp x26, x27, [sp, #96]
ldp x24, x25, [sp, #80]
ldp x22, x23, [sp, #64]
ldp x20, x21, [sp, #48]
// Store result (w0/x0/s0/d0) appropriately, depending on resultType.
ldrb w10, [x4]
// Check the return type and store the correct register into the jvalue in memory.
// Don't set anything for a void type.
cmp w10, #'V'
beq .Losr_exit
// Is it a double?
cmp w10, #'D'
bne .Lno_double
str d0, [x3]
b .Losr_exit
.Lno_double: // Is it a float?
cmp w10, #'F'
bne .Lno_float
str s0, [x3]
b .Losr_exit
.Lno_float: // Just store x0. Doesn't matter if it is 64 or 32 bits.
str x0, [x3]
.Losr_exit: // Finish up.
ldp x2, x19, [sp, #32] // Restore stack pointer and x19.
ldp xFP, xLR, [sp] // Restore old frame pointer and link register.
mov sp, x2
ret
.Losr_entry:
// Update stack pointer for the callee
sub sp, sp, x1
// Update link register slot expected by the callee.
sub w1, w1, #8
str lr, [sp, x1]
// Copy arguments into stack frame.
// Use simple copy routine for now.
// 4 bytes per slot.
// X0 - source address
// W1 - args length
// SP - destination address.
// W10 - temporary
.Losr_loop_entry:
cmp w1, #0
beq .Losr_loop_exit
sub w1, w1, #4
ldr w10, [x0, x1]
str w10, [sp, x1]
b .Losr_loop_entry
.Losr_loop_exit:
// Branch to the OSR entry point.
br x2
END art_quick_osr_stub
/*
* On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_
*/
ENTRY art_quick_do_long_jump
// Load FPRs
ldp d0, d1, [x1], #16
ldp d2, d3, [x1], #16
ldp d4, d5, [x1], #16
ldp d6, d7, [x1], #16
ldp d8, d9, [x1], #16
ldp d10, d11, [x1], #16
ldp d12, d13, [x1], #16
ldp d14, d15, [x1], #16
ldp d16, d17, [x1], #16
ldp d18, d19, [x1], #16
ldp d20, d21, [x1], #16
ldp d22, d23, [x1], #16
ldp d24, d25, [x1], #16
ldp d26, d27, [x1], #16
ldp d28, d29, [x1], #16
ldp d30, d31, [x1]
// Load GPRs
// TODO: lots of those are smashed, could optimize.
add x0, x0, #30*8
ldp x30, x1, [x0], #-16 // LR & SP
ldp x28, x29, [x0], #-16
ldp x26, x27, [x0], #-16
ldp x24, x25, [x0], #-16
ldp x22, x23, [x0], #-16
ldp x20, x21, [x0], #-16
ldp x18, x19, [x0], #-16
ldp x16, x17, [x0], #-16
ldp x14, x15, [x0], #-16
ldp x12, x13, [x0], #-16
ldp x10, x11, [x0], #-16
ldp x8, x9, [x0], #-16
ldp x6, x7, [x0], #-16
ldp x4, x5, [x0], #-16
ldp x2, x3, [x0], #-16
mov sp, x1
// Need to load PC, it's at the end (after the space for the unused XZR). Use x1.
ldr x1, [x0, #33*8]
// And the value of x0.
ldr x0, [x0]
br x1
END art_quick_do_long_jump
/*
* Entry from managed code that calls artLockObjectFromCode, may block for GC. x0 holds the
* possibly null object to lock.
*
* Derived from arm32 code.
*/
.extern artLockObjectFromCode
ENTRY art_quick_lock_object
cbz w0, .Lslow_lock
add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore
.Lretry_lock:
ldr w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
ldxr w1, [x4]
mov x3, x1
and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED // zero the read barrier bits
cbnz w3, .Lnot_unlocked // already thin locked
// unlocked case - x1: original lock word that's zero except for the read barrier bits.
orr x2, x1, x2 // x2 holds thread id with count of 0 with preserved read barrier bits
stxr w3, w2, [x4]
cbnz w3, .Llock_stxr_fail // store failed, retry
dmb ishld // full (LoadLoad|LoadStore) memory barrier
ret
.Lnot_unlocked: // x1: original lock word
lsr w3, w1, LOCK_WORD_STATE_SHIFT
cbnz w3, .Lslow_lock // if either of the top two bits are set, go slow path
eor w2, w1, w2 // lock_word.ThreadId() ^ self->ThreadId()
uxth w2, w2 // zero top 16 bits
cbnz w2, .Lslow_lock // lock word and self thread id's match -> recursive lock
// else contention, go to slow path
mov x3, x1 // copy the lock word to check count overflow.
and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED // zero the read barrier bits.
add w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count in lock word placing in w2 to check overflow
lsr w3, w2, LOCK_WORD_READ_BARRIER_STATE_SHIFT // if either of the upper two bits (28-29) are set, we overflowed.
cbnz w3, .Lslow_lock // if we overflow the count go slow path
add w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count for real
stxr w3, w2, [x4]
cbnz w3, .Llock_stxr_fail // store failed, retry
ret
.Llock_stxr_fail:
b .Lretry_lock // retry
.Lslow_lock:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case we block
mov x1, xSELF // pass Thread::Current
bl artLockObjectFromCode // (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case we block
mov x1, xSELF // pass Thread::Current
bl artLockObjectFromCode // (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_lock_object_no_inline
/*
* Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
* x0 holds the possibly null object to lock.
*
* Derived from arm32 code.
*/
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
cbz x0, .Lslow_unlock
add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore
.Lretry_unlock:
#ifndef USE_READ_BARRIER
ldr w1, [x4]
#else
ldxr w1, [x4] // Need to use atomic instructions for read barrier
#endif
lsr w2, w1, LOCK_WORD_STATE_SHIFT
cbnz w2, .Lslow_unlock // if either of the top two bits are set, go slow path
ldr w2, [xSELF, #THREAD_ID_OFFSET]
mov x3, x1 // copy lock word to check thread id equality
and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED // zero the read barrier bits
eor w3, w3, w2 // lock_word.ThreadId() ^ self->ThreadId()
uxth w3, w3 // zero top 16 bits
cbnz w3, .Lslow_unlock // do lock word and self thread id's match?
mov x3, x1 // copy lock word to detect transition to unlocked
and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED // zero the read barrier bits
cmp w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
bpl .Lrecursive_thin_unlock
// transition to unlocked
mov x3, x1
and w3, w3, #LOCK_WORD_READ_BARRIER_STATE_MASK // w3: zero except for the preserved read barrier bits
dmb ish // full (LoadStore|StoreStore) memory barrier
#ifndef USE_READ_BARRIER
str w3, [x4]
#else
stxr w2, w3, [x4] // Need to use atomic instructions for read barrier
cbnz w2, .Lunlock_stxr_fail // store failed, retry
#endif
ret
.Lrecursive_thin_unlock: // w1: original lock word
sub w1, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // decrement count
#ifndef USE_READ_BARRIER
str w1, [x4]
#else
stxr w2, w1, [x4] // Need to use atomic instructions for read barrier
cbnz w2, .Lunlock_stxr_fail // store failed, retry
#endif
ret
.Lunlock_stxr_fail:
b .Lretry_unlock // retry
.Lslow_unlock:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case exception allocation triggers GC
mov x1, xSELF // pass Thread::Current
bl artUnlockObjectFromCode // (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case exception allocation triggers GC
mov x1, xSELF // pass Thread::Current
bl artUnlockObjectFromCode // (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_unlock_object_no_inline
/*
* Entry from managed code that calls artIsAssignableFromCode and on failure calls
* artThrowClassCastException.
*/
.extern artThrowClassCastException
ENTRY art_quick_check_cast
// Store arguments and link register
// Stack needs to be 16B aligned on calls.
stp x0, x1, [sp,#-32]!
.cfi_adjust_cfa_offset 32
.cfi_rel_offset x0, 0
.cfi_rel_offset x1, 8
str xLR, [sp, #24]
.cfi_rel_offset x30, 24
// Call runtime code
bl artIsAssignableFromCode
// Check for exception
cbz x0, .Lthrow_class_cast_exception
// Restore and return
ldr xLR, [sp, #24]
.cfi_restore x30
ldp x0, x1, [sp], #32
.cfi_restore x0
.cfi_restore x1
.cfi_adjust_cfa_offset -32
ret
.cfi_adjust_cfa_offset 32 // Reset unwind info so following code unwinds.
.Lthrow_class_cast_exception:
// Restore
ldr xLR, [sp, #24]
.cfi_restore x30
ldp x0, x1, [sp], #32
.cfi_restore x0
.cfi_restore x1
.cfi_adjust_cfa_offset -32
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov x2, xSELF // pass Thread::Current
b artThrowClassCastException // (Class*, Class*, Thread*)
brk 0 // We should not return here...
END art_quick_check_cast
// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude.
.macro POP_REG_NE xReg, offset, xExclude
.ifnc \xReg, \xExclude
ldr \xReg, [sp, #\offset] // restore xReg
.cfi_restore \xReg
.endif
.endm
/*
* Macro to insert read barrier, only used in art_quick_aput_obj.
* xDest, wDest and xObj are registers, offset is a defined literal such as
* MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle
* name mismatch between instructions. This macro uses the lower 32b of register when possible.
* TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
*/
.macro READ_BARRIER xDest, wDest, xObj, offset
#ifdef USE_READ_BARRIER
// Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned.
stp x0, x1, [sp, #-48]!
.cfi_adjust_cfa_offset 48
.cfi_rel_offset x0, 0
.cfi_rel_offset x1, 8
stp x2, x3, [sp, #16]
.cfi_rel_offset x2, 16
.cfi_rel_offset x3, 24
stp x4, xLR, [sp, #32]
.cfi_rel_offset x4, 32
.cfi_rel_offset x30, 40
// mov x0, \xRef // pass ref in x0 (no-op for now since parameter ref is unused)
.ifnc \xObj, x1
mov x1, \xObj // pass xObj
.endif
mov w2, #\offset // pass offset
bl artReadBarrierSlow // artReadBarrierSlow(ref, xObj, offset)
// No need to unpoison return value in w0, artReadBarrierSlow() would do the unpoisoning.
.ifnc \wDest, w0
mov \wDest, w0 // save return value in wDest
.endif
// Conditionally restore saved registers
POP_REG_NE x0, 0, \xDest
POP_REG_NE x1, 8, \xDest
POP_REG_NE x2, 16, \xDest
POP_REG_NE x3, 24, \xDest
POP_REG_NE x4, 32, \xDest
ldr xLR, [sp, #40]
.cfi_restore x30
add sp, sp, #48
.cfi_adjust_cfa_offset -48
#else
ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest.
UNPOISON_HEAP_REF \wDest
#endif // USE_READ_BARRIER
.endm
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
* x0 = array, x1 = index, x2 = value
*
* Currently all values should fit into w0/w1/w2, and w1 always will as indices are 32b. We
* assume, though, that the upper 32b are zeroed out. At least for x1/w1 we can do better by
* using index-zero-extension in load/stores.
*
* Temporaries: x3, x4
* TODO: x4 OK? ip seems wrong here.
*/
ENTRY art_quick_aput_obj_with_null_and_bound_check
tst x0, x0
bne art_quick_aput_obj_with_bound_check
b art_quick_throw_null_pointer_exception
END art_quick_aput_obj_with_null_and_bound_check
ENTRY art_quick_aput_obj_with_bound_check
ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]
cmp w3, w1
bhi art_quick_aput_obj
mov x0, x1
mov x1, x3
b art_quick_throw_array_bounds
END art_quick_aput_obj_with_bound_check
#ifdef USE_READ_BARRIER
.extern artReadBarrierSlow
#endif
ENTRY art_quick_aput_obj
cbz x2, .Ldo_aput_null
READ_BARRIER x3, w3, x0, MIRROR_OBJECT_CLASS_OFFSET // Heap reference = 32b
// This also zero-extends to x3
READ_BARRIER x4, w4, x2, MIRROR_OBJECT_CLASS_OFFSET // Heap reference = 32b
// This also zero-extends to x4
READ_BARRIER x3, w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET // Heap reference = 32b
// This also zero-extends to x3
cmp w3, w4 // value's type == array's component type - trivial assignability
bne .Lcheck_assignability
.Ldo_aput:
add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
// "Compress" = do nothing
POISON_HEAP_REF w2
str w2, [x3, x1, lsl #2] // Heap reference = 32b
ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
lsr x0, x0, #7
strb w3, [x3, x0]
ret
.Ldo_aput_null:
add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
// "Compress" = do nothing
str w2, [x3, x1, lsl #2] // Heap reference = 32b
ret
.Lcheck_assignability:
// Store arguments and link register
stp x0, x1, [sp,#-32]!
.cfi_adjust_cfa_offset 32
.cfi_rel_offset x0, 0
.cfi_rel_offset x1, 8
stp x2, xLR, [sp, #16]
.cfi_rel_offset x2, 16
.cfi_rel_offset x30, 24
// Call runtime code
mov x0, x3 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended
mov x1, x4 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended
bl artIsAssignableFromCode
// Check for exception
cbz x0, .Lthrow_array_store_exception
// Restore
ldp x2, x30, [sp, #16]
.cfi_restore x2
.cfi_restore x30
ldp x0, x1, [sp], #32
.cfi_restore x0
.cfi_restore x1
.cfi_adjust_cfa_offset -32
add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
// "Compress" = do nothing
POISON_HEAP_REF w2
str w2, [x3, x1, lsl #2] // Heap reference = 32b
ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
lsr x0, x0, #7
strb w3, [x3, x0]
ret
.cfi_adjust_cfa_offset 32 // 4 restores after cbz for unwinding.
.Lthrow_array_store_exception:
ldp x2, x30, [sp, #16]
.cfi_restore x2
.cfi_restore x30
ldp x0, x1, [sp], #32
.cfi_restore x0
.cfi_restore x1
.cfi_adjust_cfa_offset -32
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
mov x1, x2 // Pass value.
mov x2, xSELF // Pass Thread::Current.
b artThrowArrayStoreException // (Object*, Object*, Thread*).
brk 0 // Unreached.
END art_quick_aput_obj
// Macro to facilitate adding new allocation entrypoints.
.macro ONE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x1, xSELF // pass Thread::Current
bl \entrypoint // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
// Macro to facilitate adding new allocation entrypoints.
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x2, xSELF // pass Thread::Current
bl \entrypoint // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
// Macro to facilitate adding new allocation entrypoints.
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x3, xSELF // pass Thread::Current
bl \entrypoint
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
// Macro to facilitate adding new allocation entrypoints.
.macro FOUR_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x4, xSELF // pass Thread::Current
bl \entrypoint //
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
DELIVER_PENDING_EXCEPTION
END \name
.endm
// Macros taking opportunity of code similarities for downcalls with referrer.
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x2, xSELF // pass Thread::Current
bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
ldr x2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x3, xSELF // pass Thread::Current
bl \entrypoint
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
ldr x3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
mov x4, xSELF // pass Thread::Current
bl \entrypoint
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
\return
END \name
.endm
.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
cbz w0, 1f // result zero branch over
ret // return
1:
DELIVER_PENDING_EXCEPTION
.endm
/*
* Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
* failure.
*/
TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
/*
* Entry from managed code when uninitialized static storage, this stub will run the class
* initializer and deliver the exception on error. On success the static storage base is
* returned.
*/
ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
// This is separated out as the argument order is different.
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
// x2 contains the parameter
mov x3, xSELF // pass Thread::Current
bl artSet64StaticFromCode
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_set64_static
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
* exception on error. On success the String is returned. w0 holds the string index. The fast
* path check for hit in strings cache has already been performed.
*/
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
ENTRY art_quick_alloc_object_rosalloc
// Fast path rosalloc allocation.
// x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
// x2-x7: free.
ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
// Load the class (x2)
ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
cbz x2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
// Check class status.
ldr w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
cmp x3, #MIRROR_CLASS_STATUS_INITIALIZED
bne .Lart_quick_alloc_object_rosalloc_slow_path
// Add a fake dependence from the
// following access flag and size
// loads to the status load.
// This is to prevent those loads
// from being reordered above the
// status load and reading wrong
// values (an alternative is to use
// a load-acquire for the status).
eor x3, x3, x3
add x2, x2, x3
// Check access flags has
// kAccClassIsFinalizable
ldr w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
tst x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
bne .Lart_quick_alloc_object_rosalloc_slow_path
ldr x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
// ldp won't work due to large offset.
ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp x3, x4
bhs .Lart_quick_alloc_object_rosalloc_slow_path
ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (x3)
cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation
bhs .Lart_quick_alloc_object_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size.
// Align up the size by the rosalloc
// bracket quantum size and divide
// by the quantum size and subtract
// by 1. This code is a shorter but
// equivalent version.
sub x3, x3, #1
lsr x3, x3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
// Load the rosalloc run (x4)
add x4, xSELF, x3, lsl #POINTER_SIZE_SHIFT
ldr x4, [x4, #THREAD_ROSALLOC_RUNS_OFFSET]
// Load the free list head (x3). This
// will be the return val.
ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
cbz x3, .Lart_quick_alloc_object_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
ldr x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
// and update the list head with the
// next pointer.
str x1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
// Store the class pointer in the
// header. This also overwrites the
// next pointer. The offsets are
// asserted to match.
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
POISON_HEAP_REF w2
str w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
// Fence. This is "ish" not "ishst" so
// that it also ensures ordering of
// the class status load with respect
// to later accesses to the class
// object. Alternatively we could use
// "ishst" if we use load-acquire for
// the class status load.)
// Needs to be done before pushing on
// allocation since Heap::VisitObjects
// relies on seeing the class pointer.
// b/28790624
dmb ish
// Push the new object onto the thread
// local allocation stack and
// increment the thread local
// allocation stack top.
ldr x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
str w3, [x1], #COMPRESSED_REFERENCE_SIZE // (Increment x1 as a side effect.)
str x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
// Decrement the size of the free list
ldr w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
sub x1, x1, #1
// TODO: consider combining this store
// and the list head store above using
// strd.
str w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
mov x0, x3 // Set the return value and return.
ret
.Lart_quick_alloc_object_rosalloc_slow_path:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x2, xSELF // pass Thread::Current
bl artAllocObjectFromCodeRosAlloc // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_rosalloc
// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
//
// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current
// x3-x7: free.
// Need to preserve x0 and x1 to the slow path.
.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
cbz x2, \slowPathLabel // Check null class
// Check class status.
ldr w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
cmp x3, #MIRROR_CLASS_STATUS_INITIALIZED
bne \slowPathLabel
// Add a fake dependence from the
// following access flag and size
// loads to the status load.
// This is to prevent those loads
// from being reordered above the
// status load and reading wrong
// values (an alternative is to use
// a load-acquire for the status).
eor x3, x3, x3
add x2, x2, x3
// Check access flags has
// kAccClassIsFinalizable.
ldr w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
tbnz x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT, \slowPathLabel
// Load thread_local_pos (x4) and
// thread_local_end (x5).
ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
sub x6, x5, x4 // Compute the remaining buf size.
ldr w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (x7).
cmp x7, x6 // Check if it fits. OK to do this
// before rounding up the object size
// assuming the buf size alignment.
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
// Round up the object size by the
// object alignment. (addr + 7) & ~7.
add x7, x7, #OBJECT_ALIGNMENT_MASK
and x7, x7, #OBJECT_ALIGNMENT_MASK_TOGGLED
// Move old thread_local_pos to x0
// for the return value.
mov x0, x4
add x5, x0, x7
str x5, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add x5, x5, #1
str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
POISON_HEAP_REF w2
str w2, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
// Fence. This is "ish" not "ishst" so
// that the code after this allocation
// site will see the right values in
// the fields of the class.
// Alternatively we could use "ishst"
// if we use load-acquire for the
// class status load.)
dmb ish
ret
.endm
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
ENTRY art_quick_alloc_object_tlab
// Fast path tlab allocation.
// x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
// x2-x7: free.
#if defined(USE_READ_BARRIER)
mvn x0, xzr // Read barrier not supported here.
ret // Return -1.
#endif
ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
// Load the class (x2)
ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
.Lart_quick_alloc_object_tlab_slow_path:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // Save callee saves in case of GC.
mov x2, xSELF // Pass Thread::Current.
bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_tlab
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
ENTRY art_quick_alloc_object_region_tlab
// Fast path region tlab allocation.
// x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
// x2-x7: free.
#if !defined(USE_READ_BARRIER)
mvn x0, xzr // Read barrier must be enabled here.
ret // Return -1.
#endif
ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
// Load the class (x2)
ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
// Read barrier for class load.
ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
cbnz x3, .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
// The read barrier slow path. Mark
// the class.
stp x0, x1, [sp, #-32]! // Save registers (x0, x1, lr).
str xLR, [sp, #16] // Align sp by 16 bytes.
mov x0, x2 // Pass the class as the first param.
bl artReadBarrierMark
mov x2, x0 // Get the (marked) class back.
ldp x0, x1, [sp, #0] // Restore registers.
ldr xLR, [sp, #16]
add sp, sp, #32
b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
.Lart_quick_alloc_object_region_tlab_slow_path:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // Save callee saves in case of GC.
mov x2, xSELF // Pass Thread::Current.
bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_region_tlab
/*
* Called by managed code when the thread has been asked to suspend.
*/
.extern artTestSuspendFromCode
ENTRY art_quick_test_suspend
ldrh w0, [xSELF, #THREAD_FLAGS_OFFSET] // get xSELF->state_and_flags.as_struct.flags
cbnz w0, .Lneed_suspend // check flags == 0
ret // return if flags == 0
.Lneed_suspend:
mov x0, xSELF
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl
bl artTestSuspendFromCode // (Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
ENTRY art_quick_implicit_suspend
mov x0, xSELF
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl
bl artTestSuspendFromCode // (Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_implicit_suspend
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
* x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy
* method agrees with a ref and args callee save frame.
*/
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
mov x2, xSELF // pass Thread::Current
mov x3, sp // pass SP
bl artQuickProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP)
ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET]
cbnz x2, .Lexception_in_proxy // success if no exception is pending
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Restore frame
fmov d0, x0 // Store result in d0 in case it was float or double
ret // return on success
.Lexception_in_proxy:
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_proxy_invoke_handler
/*
* Called to resolve an imt conflict.
* x0 is the conflict ArtMethod.
* xIP1 is a hidden argument that holds the target interface method's dex method index.
*
* Note that this stub writes to xIP0, xIP1, and x0.
*/
.extern artInvokeInterfaceTrampoline
ENTRY art_quick_imt_conflict_trampoline
ldr xIP0, [sp, #0] // Load referrer
ldr xIP0, [xIP0, #ART_METHOD_DEX_CACHE_METHODS_OFFSET_64] // Load dex cache methods array
ldr xIP0, [xIP0, xIP1, lsl #POINTER_SIZE_SHIFT] // Load interface method
ldr xIP1, [x0, #ART_METHOD_JNI_OFFSET_64] // Load ImtConflictTable
ldr x0, [xIP1] // Load first entry in ImtConflictTable.
.Limt_table_iterate:
cmp x0, xIP0
// Branch if found. Benchmarks have shown doing a branch here is better.
beq .Limt_table_found
// If the entry is null, the interface method is not in the ImtConflictTable.
cbz x0, .Lconflict_trampoline
// Iterate over the entries of the ImtConflictTable.
ldr x0, [xIP1, #(2 * __SIZEOF_POINTER__)]!
b .Limt_table_iterate
.Limt_table_found:
// We successfully hit an entry in the table. Load the target method
// and jump to it.
ldr x0, [xIP1, #__SIZEOF_POINTER__]
ldr xIP0, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64]
br xIP0
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END art_quick_imt_conflict_trampoline
ENTRY art_quick_resolution_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
mov x2, xSELF
mov x3, sp
bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP)
cbz x0, 1f
mov xIP0, x0 // Remember returned code pointer in xIP0.
ldr x0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP.
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
br xIP0
1:
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
/*
* Generic JNI frame layout:
*
* #-------------------#
* | |
* | caller method... |
* #-------------------# <--- SP on entry
* | Return X30/LR |
* | X29/FP | callee save
* | X28 | callee save
* | X27 | callee save
* | X26 | callee save
* | X25 | callee save
* | X24 | callee save
* | X23 | callee save
* | X22 | callee save
* | X21 | callee save
* | X20 | callee save
* | X19 | callee save
* | X7 | arg7
* | X6 | arg6
* | X5 | arg5
* | X4 | arg4
* | X3 | arg3
* | X2 | arg2
* | X1 | arg1
* | D7 | float arg 8
* | D6 | float arg 7
* | D5 | float arg 6
* | D4 | float arg 5
* | D3 | float arg 4
* | D2 | float arg 3
* | D1 | float arg 2
* | D0 | float arg 1
* | Method* | <- X0
* #-------------------#
* | local ref cookie | // 4B
* | handle scope size | // 4B
* #-------------------#
* | JNI Call Stack |
* #-------------------# <--- SP on native call
* | |
* | Stack for Regs | The trampoline assembly will pop these values
* | | into registers for native call
* #-------------------#
* | Native code ptr |
* #-------------------#
* | Free scratch |
* #-------------------#
* | Ptr to (1) | <--- SP
* #-------------------#
*/
/*
* Called to do a generic JNI down-call
*/
ENTRY art_quick_generic_jni_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
// Save SP , so we can have static CFI info.
mov x28, sp
.cfi_def_cfa_register x28
// This looks the same, but is different: this will be updated to point to the bottom
// of the frame when the handle scope is inserted.
mov xFP, sp
mov xIP0, #5120
sub sp, sp, xIP0
// prepare for artQuickGenericJniTrampoline call
// (Thread*, SP)
// x0 x1 <= C calling convention
// xSELF xFP <= where they are
mov x0, xSELF // Thread*
mov x1, xFP
bl artQuickGenericJniTrampoline // (Thread*, sp)
// The C call will have registered the complete save-frame on success.
// The result of the call is:
// x0: pointer to native code, 0 on error.
// x1: pointer to the bottom of the used area of the alloca, can restore stack till there.
// Check for error = 0.
cbz x0, .Lexception_in_native
// Release part of the alloca.
mov sp, x1
// Save the code pointer
mov xIP0, x0
// Load parameters from frame into registers.
// TODO Check with artQuickGenericJniTrampoline.
// Also, check again APPCS64 - the stack arguments are interleaved.
ldp x0, x1, [sp]
ldp x2, x3, [sp, #16]
ldp x4, x5, [sp, #32]
ldp x6, x7, [sp, #48]
ldp d0, d1, [sp, #64]
ldp d2, d3, [sp, #80]
ldp d4, d5, [sp, #96]
ldp d6, d7, [sp, #112]
add sp, sp, #128
blr xIP0 // native call.
// result sign extension is handled in C code
// prepare for artQuickGenericJniEndTrampoline call
// (Thread*, result, result_f)
// x0 x1 x2 <= C calling convention
mov x1, x0 // Result (from saved).
mov x0, xSELF // Thread register.
fmov x2, d0 // d0 will contain floating point result, but needs to go into x2
bl artQuickGenericJniEndTrampoline
// Pending exceptions possible.
ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET]
cbnz x2, .Lexception_in_native
// Tear down the alloca.
mov sp, x28
.cfi_def_cfa_register sp
// Tear down the callee-save frame.
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
// store into fpr, for when it's a fpr return...
fmov d0, x0
ret
.Lexception_in_native:
// Move to x1 then sp to please assembler.
ldr x1, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
mov sp, x1
.cfi_def_cfa_register sp
# This will create a new save-all frame, required by the runtime.
DELIVER_PENDING_EXCEPTION
END art_quick_generic_jni_trampoline
/*
* Called to bridge from the quick to interpreter ABI. On entry the arguments match those
* of a quick call:
* x0 = method being called/to bridge to.
* x1..x7, d0..d7 = arguments to that method.
*/
ENTRY art_quick_to_interpreter_bridge
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments.
// x0 will contain mirror::ArtMethod* method.
mov x1, xSELF // How to get Thread::Current() ???
mov x2, sp
// uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
// mirror::ArtMethod** sp)
bl artQuickToInterpreterBridge
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // TODO: no need to restore arguments in this case.
fmov d0, x0
RETURN_OR_DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
//
// Instrumentation-related stubs
//
.extern artInstrumentationMethodEntryFromCode
ENTRY art_quick_instrumentation_entry
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
mov x20, x0 // Preserve method reference in a callee-save.
mov x2, xSELF
mov x3, xLR
bl artInstrumentationMethodEntryFromCode // (Method*, Object*, Thread*, LR)
mov xIP0, x0 // x0 = result of call.
mov x0, x20 // Reload method reference.
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Note: will restore xSELF
adr xLR, art_quick_instrumentation_exit
br xIP0 // Tail-call method with lr set to art_quick_instrumentation_exit.
END art_quick_instrumentation_entry
.extern artInstrumentationMethodExitFromCode
ENTRY art_quick_instrumentation_exit
mov xLR, #0 // Clobber LR for later checks.
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// We need to save x0 and d0. We could use a callee-save from SETUP_REF_ONLY, but then
// we would need to fully restore it. As there are a lot of callee-save registers, it seems
// easier to have an extra small stack area.
str x0, [sp, #-16]! // Save integer result.
.cfi_adjust_cfa_offset 16
str d0, [sp, #8] // Save floating-point result.
add x1, sp, #16 // Pass SP.
mov x2, x0 // Pass integer result.
fmov x3, d0 // Pass floating-point result.
mov x0, xSELF // Pass Thread.
bl artInstrumentationMethodExitFromCode // (Thread*, SP, gpr_res, fpr_res)
mov xIP0, x0 // Return address from instrumentation call.
mov xLR, x1 // r1 is holding link register if we're to bounce to deoptimize
ldr d0, [sp, #8] // Restore floating-point result.
ldr x0, [sp], 16 // Restore integer result, and drop stack area.
.cfi_adjust_cfa_offset 16
POP_REFS_ONLY_CALLEE_SAVE_FRAME
br xIP0 // Tail-call out.
END art_quick_instrumentation_exit
/*
* Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
* will long jump to the upcall with a special exception of -1.
*/
.extern artDeoptimize
ENTRY art_quick_deoptimize
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
mov x0, xSELF // Pass thread.
bl artDeoptimize // artDeoptimize(Thread*)
brk 0
END art_quick_deoptimize
/*
* Compiled code has requested that we deoptimize into the interpreter. The deoptimization
* will long jump to the upcall with a special exception of -1.
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
mov x0, xSELF // Pass thread.
bl artDeoptimizeFromCompiledCode // artDeoptimizeFromCompiledCode(Thread*)
brk 0
END art_quick_deoptimize_from_compiled_code
/*
* String's indexOf.
*
* TODO: Not very optimized.
* On entry:
* x0: string object (known non-null)
* w1: char to match (known <= 0xFFFF)
* w2: Starting offset in string data
*/
ENTRY art_quick_indexof
ldr w3, [x0, #MIRROR_STRING_COUNT_OFFSET]
add x0, x0, #MIRROR_STRING_VALUE_OFFSET
/* Clamp start to [0..count] */
cmp w2, #0
csel w2, wzr, w2, lt
cmp w2, w3
csel w2, w3, w2, gt
/* Save a copy to compute result */
mov x5, x0
/* Build pointer to start of data to compare and pre-bias */
add x0, x0, x2, lsl #1
sub x0, x0, #2
/* Compute iteration count */
sub w2, w3, w2
/*
* At this point we have:
* x0: start of the data to test
* w1: char to compare
* w2: iteration count
* x5: original start of string data
*/
subs w2, w2, #4
b.lt .Lindexof_remainder
.Lindexof_loop4:
ldrh w6, [x0, #2]!
ldrh w7, [x0, #2]!
ldrh wIP0, [x0, #2]!
ldrh wIP1, [x0, #2]!
cmp w6, w1
b.eq .Lmatch_0
cmp w7, w1
b.eq .Lmatch_1
cmp wIP0, w1
b.eq .Lmatch_2
cmp wIP1, w1
b.eq .Lmatch_3
subs w2, w2, #4
b.ge .Lindexof_loop4
.Lindexof_remainder:
adds w2, w2, #4
b.eq .Lindexof_nomatch
.Lindexof_loop1:
ldrh w6, [x0, #2]!
cmp w6, w1
b.eq .Lmatch_3
subs w2, w2, #1
b.ne .Lindexof_loop1
.Lindexof_nomatch:
mov x0, #-1
ret
.Lmatch_0:
sub x0, x0, #6
sub x0, x0, x5
asr x0, x0, #1
ret
.Lmatch_1:
sub x0, x0, #4
sub x0, x0, x5
asr x0, x0, #1
ret
.Lmatch_2:
sub x0, x0, #2
sub x0, x0, x5
asr x0, x0, #1
ret
.Lmatch_3:
sub x0, x0, x5
asr x0, x0, #1
ret
END art_quick_indexof
/*
* String's compareTo.
*
* TODO: Not very optimized.
*
* On entry:
* x0: this object pointer
* x1: comp object pointer
*
*/
.extern __memcmp16
ENTRY art_quick_string_compareto
mov x2, x0 // x0 is return, use x2 for first input.
sub x0, x2, x1 // Same string object?
cbnz x0,1f
ret
1: // Different string objects.
ldr w4, [x2, #MIRROR_STRING_COUNT_OFFSET]
ldr w3, [x1, #MIRROR_STRING_COUNT_OFFSET]
add x2, x2, #MIRROR_STRING_VALUE_OFFSET
add x1, x1, #MIRROR_STRING_VALUE_OFFSET
/*
* Now: Data* Count
* first arg x2 w4
* second arg x1 w3
*/
// x0 := str1.length(w4) - str2.length(w3). ldr zero-extended w3/w4 into x3/x4.
subs x0, x4, x3
// Min(count1, count2) into w3.
csel x3, x3, x4, ge
// TODO: Tune this value.
// Check for long string, do memcmp16 for them.
cmp w3, #28 // Constant from arm32.
bgt .Ldo_memcmp16
/*
* Now:
* x2: *first string data
* x1: *second string data
* w3: iteration count
* x0: return value if comparison equal
* x4, x5, x6, x7: free
*/
// Do a simple unrolled loop.
.Lloop:
// At least two more elements?
subs w3, w3, #2
b.lt .Lremainder_or_done
ldrh w4, [x2], #2
ldrh w5, [x1], #2
ldrh w6, [x2], #2
ldrh w7, [x1], #2
subs w4, w4, w5
b.ne .Lw4_result
subs w6, w6, w7
b.ne .Lw6_result
b .Lloop
.Lremainder_or_done:
adds w3, w3, #1
b.eq .Lremainder
ret
.Lremainder:
ldrh w4, [x2], #2
ldrh w5, [x1], #2
subs w4, w4, w5
b.ne .Lw4_result
ret
// Result is in w4
.Lw4_result:
sxtw x0, w4
ret
// Result is in w6
.Lw6_result:
sxtw x0, w6
ret
.Ldo_memcmp16:
mov x14, x0 // Save x0 and LR. __memcmp16 does not use these temps.
mov x15, xLR // TODO: Codify and check that?
mov x0, x2
uxtw x2, w3
bl __memcmp16
mov xLR, x15 // Restore LR.
cmp x0, #0 // Check the memcmp difference.
csel x0, x0, x14, ne // x0 := x0 != 0 ? x14(prev x0=length diff) : x1.
ret
END art_quick_string_compareto
|
abforce/xposed_art_n
| 5,818
|
runtime/arch/x86/asm_support_x86.S
|
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
#include "asm_support_x86.h"
// Regular gas(1) & current clang/llvm assembler support named macro parameters.
#define MACRO0(macro_name) .macro macro_name
#define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
#define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
#define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
#define MACRO4(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4
#define MACRO5(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4, macro_arg5) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4, macro_arg5
#define END_MACRO .endm
#if defined(__clang__)
// Clang/llvm does not support .altmacro. However, the clang/llvm preprocessor doesn't
// separate the backslash and parameter by a space. Everything just works.
#define RAW_VAR(name) \name
#define VAR(name) \name
#define CALLVAR(name) SYMBOL(\name)
#define PLT_VAR(name) \name@PLT
#define REG_VAR(name) %\name
#define CALL_MACRO(name) \name
#else
// Regular gas(1) uses \argument_name for macro arguments.
// We need to turn on alternate macro syntax so we can use & instead or the preprocessor
// will screw us by inserting a space between the \ and the name. Even in this mode there's
// no special meaning to $, so literals are still just $x. The use of altmacro means % is a
// special character meaning care needs to be taken when passing registers as macro
// arguments.
.altmacro
#define RAW_VAR(name) name&
#define VAR(name) name&
#define CALLVAR(name) SYMBOL(name&)
#define PLT_VAR(name) name&@PLT
#define REG_VAR(name) %name
#define CALL_MACRO(name) name&
#endif
#define LITERAL(value) $value
#if defined(__APPLE__)
#define MACRO_LITERAL(value) $(value)
#else
#define MACRO_LITERAL(value) $value
#endif
#if defined(__APPLE__)
#define FUNCTION_TYPE(name)
#define SIZE(name)
#else
#define FUNCTION_TYPE(name) .type name, @function
#define SIZE(name) .size name, .-name
#endif
// CFI support.
#if !defined(__APPLE__)
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_ADJUST_CFA_OFFSET(size) .cfi_adjust_cfa_offset size
#define CFI_DEF_CFA(reg,size) .cfi_def_cfa reg,size
#define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
#define CFI_RESTORE(reg) .cfi_restore reg
#define CFI_REL_OFFSET(reg,size) .cfi_rel_offset reg,size
#define CFI_RESTORE_STATE .cfi_restore_state
#define CFI_REMEMBER_STATE .cfi_remember_state
#else
// Mac OS' doesn't like cfi_* directives.
#define CFI_STARTPROC
#define CFI_ENDPROC
#define CFI_ADJUST_CFA_OFFSET(size)
#define CFI_DEF_CFA(reg,size)
#define CFI_DEF_CFA_REGISTER(reg)
#define CFI_RESTORE(reg)
#define CFI_REL_OFFSET(reg,size)
#define CFI_RESTORE_STATE
#define CFI_REMEMBER_STATE
#endif
// Symbols. On a Mac, we need a leading underscore.
#if !defined(__APPLE__)
#define SYMBOL(name) name
#define PLT_SYMBOL(name) name ## @PLT
#else
// Mac OS' symbols have an _ prefix.
#define SYMBOL(name) _ ## name
#define PLT_SYMBOL(name) _ ## name
#endif
// Directive to hide a function symbol.
#if defined(__APPLE__)
#define ASM_HIDDEN .private_extern
#else
#define ASM_HIDDEN .hidden
#endif
/* Cache alignment for function entry */
MACRO0(ALIGN_FUNCTION_ENTRY)
.balign 16
END_MACRO
MACRO1(DEFINE_FUNCTION, c_name)
FUNCTION_TYPE(SYMBOL(\c_name))
ASM_HIDDEN CALLVAR(c_name)
.globl CALLVAR(c_name)
ALIGN_FUNCTION_ENTRY
CALLVAR(c_name):
CFI_STARTPROC
// Ensure we get a sane starting CFA.
CFI_DEF_CFA(esp, 4)
END_MACRO
MACRO1(END_FUNCTION, c_name)
CFI_ENDPROC
SIZE(SYMBOL(\c_name))
END_MACRO
MACRO1(PUSH, reg)
pushl REG_VAR(reg)
CFI_ADJUST_CFA_OFFSET(4)
CFI_REL_OFFSET(REG_VAR(reg), 0)
END_MACRO
MACRO1(POP, reg)
popl REG_VAR(reg)
CFI_ADJUST_CFA_OFFSET(-4)
CFI_RESTORE(REG_VAR(reg))
END_MACRO
MACRO1(CFI_RESTORE_REG, reg)
CFI_RESTORE(REG_VAR(reg))
END_MACRO
#define UNREACHABLE int3
MACRO1(UNIMPLEMENTED,name)
FUNCTION_TYPE(\name)
.globl VAR(name)
ALIGN_FUNCTION_ENTRY
VAR(name):
CFI_STARTPROC
UNREACHABLE
UNREACHABLE
CFI_ENDPROC
SIZE(\name)
END_MACRO
MACRO1(SETUP_GOT_NOSAVE, got_reg)
#ifndef __APPLE__
.ifc VAR(got_reg), ebx
call __x86.get_pc_thunk.bx
addl $_GLOBAL_OFFSET_TABLE_, %ebx
.else
.error "Unknown GOT register \got_reg"
.endif
#endif
END_MACRO
// Macros to poison (negate) the reference for heap poisoning.
MACRO1(POISON_HEAP_REF, rRef)
#ifdef USE_HEAP_POISONING
neg REG_VAR(rRef)
#endif // USE_HEAP_POISONING
END_MACRO
// Macros to unpoison (negate) the reference for heap poisoning.
MACRO1(UNPOISON_HEAP_REF, rRef)
#ifdef USE_HEAP_POISONING
neg REG_VAR(rRef)
#endif // USE_HEAP_POISONING
END_MACRO
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
|
abforce/xposed_art_n
| 82,315
|
runtime/arch/x86/quick_entrypoints_x86.S
|
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_x86.S"
#include "arch/quick_alloc_entrypoints.S"
// For x86, the CFA is esp+4, the address above the pushed return address on the stack.
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
*/
MACRO2(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME, got_reg, temp_reg)
PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
PUSH esi
PUSH ebp
subl MACRO_LITERAL(12), %esp // Grow stack by 3 words.
CFI_ADJUST_CFA_OFFSET(12)
SETUP_GOT_NOSAVE RAW_VAR(got_reg)
// Load Runtime::instance_ from GOT.
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
pushl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 3*4 + 16 + 4)
#error "SAVE_ALL_CALLEE_SAVE_FRAME(X86) size not as expected."
#endif
END_MACRO
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly)
*/
MACRO2(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME, got_reg, temp_reg)
PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
PUSH esi
PUSH ebp
subl MACRO_LITERAL(12), %esp // Grow stack by 3 words.
CFI_ADJUST_CFA_OFFSET(12)
SETUP_GOT_NOSAVE RAW_VAR(got_reg)
// Load Runtime::instance_ from GOT.
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 3*4 + 16 + 4)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(X86) size not as expected."
#endif
END_MACRO
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly)
* and preserves the value of got_reg at entry.
*/
MACRO2(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME_PRESERVE_GOT_REG, got_reg, temp_reg)
PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
PUSH esi
PUSH ebp
pushl REG_VAR(got_reg) // Save got_reg
subl MACRO_LITERAL(8), %esp // Grow stack by 2 words.
CFI_ADJUST_CFA_OFFSET(8)
SETUP_GOT_NOSAVE RAW_VAR(got_reg)
// Load Runtime::instance_ from GOT.
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
// Restore got_reg.
movl 12(%esp), REG_VAR(got_reg)
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 3*4 + 16 + 4)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(X86) size not as expected."
#endif
END_MACRO
MACRO0(RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME)
addl MACRO_LITERAL(16), %esp // Unwind stack up to saved values
CFI_ADJUST_CFA_OFFSET(-16)
POP ebp // Restore callee saves (ebx is saved/restored by the upcall)
POP esi
POP edi
END_MACRO
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
*/
MACRO2(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME, got_reg, temp_reg)
PUSH edi // Save callee saves
PUSH esi
PUSH ebp
PUSH ebx // Save args
PUSH edx
PUSH ecx
// Create space for FPR args.
subl MACRO_LITERAL(4 * 8), %esp
CFI_ADJUST_CFA_OFFSET(4 * 8)
// Save FPRs.
movsd %xmm0, 0(%esp)
movsd %xmm1, 8(%esp)
movsd %xmm2, 16(%esp)
movsd %xmm3, 24(%esp)
SETUP_GOT_NOSAVE RAW_VAR(got_reg)
// Load Runtime::instance_ from GOT.
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
pushl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the stop quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 7*4 + 4*8 + 4)
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(X86) size not as expected."
#endif
END_MACRO
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsAndArgs) where the method is passed in EAX.
*/
MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX)
// Save callee and GPR args, mixed together to agree with core spills bitmap.
PUSH edi // Save callee saves
PUSH esi
PUSH ebp
PUSH ebx // Save args
PUSH edx
PUSH ecx
// Create space for FPR args.
subl MACRO_LITERAL(32), %esp
CFI_ADJUST_CFA_OFFSET(32)
// Save FPRs.
movsd %xmm0, 0(%esp)
movsd %xmm1, 8(%esp)
movsd %xmm2, 16(%esp)
movsd %xmm3, 24(%esp)
PUSH eax // Store the ArtMethod reference at the bottom of the stack.
// Store esp as the stop quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
END_MACRO
MACRO0(RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
// Restore FPRs. EAX is still on the stack.
movsd 4(%esp), %xmm0
movsd 12(%esp), %xmm1
movsd 20(%esp), %xmm2
movsd 28(%esp), %xmm3
addl MACRO_LITERAL(36), %esp // Remove FPRs and EAX.
CFI_ADJUST_CFA_OFFSET(-36)
POP ecx // Restore args except eax
POP edx
POP ebx
POP ebp // Restore callee saves
POP esi
POP edi
END_MACRO
// Restore register and jump to routine
// Inputs: EDI contains pointer to code.
// Notes: Need to pop EAX too (restores Method*)
MACRO0(RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME_AND_JUMP)
POP eax // Restore Method*
// Restore FPRs.
movsd 0(%esp), %xmm0
movsd 8(%esp), %xmm1
movsd 16(%esp), %xmm2
movsd 24(%esp), %xmm3
addl MACRO_LITERAL(32), %esp // Remove FPRs.
CFI_ADJUST_CFA_OFFSET(-32)
POP ecx // Restore args except eax
POP edx
POP ebx
POP ebp // Restore callee saves
POP esi
xchgl 0(%esp),%edi // restore EDI and place code pointer as only value on stack
ret
END_MACRO
/*
* Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
* exception is Thread::Current()->exception_.
*/
MACRO0(DELIVER_PENDING_EXCEPTION)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save callee saves for throw
// Outgoing argument set up
subl MACRO_LITERAL(12), %esp // Alignment padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*)
UNREACHABLE
END_MACRO
MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
subl MACRO_LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
call CALLVAR(cxx_name) // cxx_name(Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
mov %esp, %ecx
// Outgoing argument set up
subl MACRO_LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
/*
* Called by managed code to create and deliver a NullPointerException.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
* Called by managed code to create and deliver an ArithmeticException.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
/*
* Called by managed code to create and deliver a StackOverflowError.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
/*
* Called by managed code, saves callee saves and then calls artThrowException
* that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
*/
ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
/*
* Called by managed code to create and deliver a NoSuchMethodError.
*/
ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
/*
* Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
* index, arg2 holds limit.
*/
TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
* the method_idx. This wrapper will save arg1-arg3 and call the appropriate C helper.
* NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1.
*
* The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
* of the target Method* in r0 and method->code_ in r1.
*
* If unsuccessful, the helper will return null/null will bea pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
* pointing back to the original caller.
*/
MACRO1(INVOKE_TRAMPOLINE_BODY, cxx_name)
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, ebx
movl %esp, %edx // remember SP
// Outgoing argument set up
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*, SP)
movl %edx, %edi // save code pointer in EDI
addl MACRO_LITERAL(20), %esp // Pop arguments skip eax
CFI_ADJUST_CFA_OFFSET(-20)
// Restore FPRs.
movsd 0(%esp), %xmm0
movsd 8(%esp), %xmm1
movsd 16(%esp), %xmm2
movsd 24(%esp), %xmm3
// Remove space for FPR args.
addl MACRO_LITERAL(4 * 8), %esp
CFI_ADJUST_CFA_OFFSET(-4 * 8)
POP ecx // Restore args except eax
POP edx
POP ebx
POP ebp // Restore callee saves
POP esi
// Swap EDI callee save with code pointer.
xchgl %edi, (%esp)
testl %eax, %eax // Branch forward if exception pending.
jz 1f
// Tail call to intended method.
ret
1:
addl MACRO_LITERAL(4), %esp // Pop code pointer off stack
CFI_ADJUST_CFA_OFFSET(-4)
DELIVER_PENDING_EXCEPTION
END_MACRO
MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
INVOKE_TRAMPOLINE_BODY RAW_VAR(cxx_name)
END_FUNCTION VAR(c_name)
END_MACRO
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
/*
* Helper for quick invocation stub to set up XMM registers.
* Increments shorty and arg_array and clobbers temp_char.
* Branches to finished if it encounters the end of the shorty.
*/
MACRO5(LOOP_OVER_SHORTY_LOADING_XMMS, xmm_reg, shorty, arg_array, temp_char, finished)
1: // LOOP
movb (REG_VAR(shorty)), REG_VAR(temp_char) // temp_char := *shorty
addl MACRO_LITERAL(1), REG_VAR(shorty) // shorty++
cmpb MACRO_LITERAL(0), REG_VAR(temp_char) // if (temp_char == '\0')
je VAR(finished) // goto finished
cmpb MACRO_LITERAL(68), REG_VAR(temp_char) // if (temp_char == 'D')
je 2f // goto FOUND_DOUBLE
cmpb MACRO_LITERAL(70), REG_VAR(temp_char) // if (temp_char == 'F')
je 3f // goto FOUND_FLOAT
addl MACRO_LITERAL(4), REG_VAR(arg_array) // arg_array++
// Handle extra space in arg array taken by a long.
cmpb MACRO_LITERAL(74), REG_VAR(temp_char) // if (temp_char != 'J')
jne 1b // goto LOOP
addl MACRO_LITERAL(4), REG_VAR(arg_array) // arg_array++
jmp 1b // goto LOOP
2: // FOUND_DOUBLE
movsd (REG_VAR(arg_array)), REG_VAR(xmm_reg)
addl MACRO_LITERAL(8), REG_VAR(arg_array) // arg_array+=2
jmp 4f
3: // FOUND_FLOAT
movss (REG_VAR(arg_array)), REG_VAR(xmm_reg)
addl MACRO_LITERAL(4), REG_VAR(arg_array) // arg_array++
4:
END_MACRO
/*
* Helper for quick invocation stub to set up GPR registers.
* Increments shorty and arg_array, and returns the current short character in
* temp_char. Branches to finished if it encounters the end of the shorty.
*/
MACRO4(SKIP_OVER_FLOATS, shorty, arg_array, temp_char, finished)
1: // LOOP:
movb (REG_VAR(shorty)), REG_VAR(temp_char) // temp_char := *shorty
addl MACRO_LITERAL(1), REG_VAR(shorty) // shorty++
cmpb MACRO_LITERAL(0), REG_VAR(temp_char) // if (temp_char == '\0')
je VAR(finished) // goto finished
cmpb MACRO_LITERAL(70), REG_VAR(temp_char) // if (temp_char == 'F')
je 3f // goto SKIP_FLOAT
cmpb MACRO_LITERAL(68), REG_VAR(temp_char) // if (temp_char == 'D')
je 4f // goto SKIP_DOUBLE
jmp 5f // goto end
3: // SKIP_FLOAT
addl MACRO_LITERAL(4), REG_VAR(arg_array) // arg_array++
jmp 1b // goto LOOP
4: // SKIP_DOUBLE
addl MACRO_LITERAL(8), REG_VAR(arg_array) // arg_array+=2
jmp 1b // goto LOOP
5:
END_MACRO
/*
* Quick invocation stub (non-static).
* On entry:
* [sp] = return address
* [sp + 4] = method pointer
* [sp + 8] = argument array or null for no argument methods
* [sp + 12] = size of argument array in bytes
* [sp + 16] = (managed) thread pointer
* [sp + 20] = JValue* result
* [sp + 24] = shorty
*/
DEFINE_FUNCTION art_quick_invoke_stub
// Save the non-volatiles.
PUSH ebp // save ebp
PUSH ebx // save ebx
PUSH esi // save esi
PUSH edi // save edi
// Set up argument XMM registers.
mov 24+16(%esp), %esi // ESI := shorty + 1 ; ie skip return arg character.
addl LITERAL(1), %esi
mov 8+16(%esp), %edi // EDI := arg_array + 4 ; ie skip this pointer.
addl LITERAL(4), %edi
// Clobbers ESI, EDI, EAX.
LOOP_OVER_SHORTY_LOADING_XMMS xmm0, esi, edi, al, .Lxmm_setup_finished
LOOP_OVER_SHORTY_LOADING_XMMS xmm1, esi, edi, al, .Lxmm_setup_finished
LOOP_OVER_SHORTY_LOADING_XMMS xmm2, esi, edi, al, .Lxmm_setup_finished
LOOP_OVER_SHORTY_LOADING_XMMS xmm3, esi, edi, al, .Lxmm_setup_finished
.balign 16
.Lxmm_setup_finished:
mov %esp, %ebp // copy value of stack pointer into base pointer
CFI_DEF_CFA_REGISTER(ebp)
mov 28(%ebp), %ebx // get arg array size
// reserve space for return addr, method*, ebx, ebp, esi, and edi in frame
addl LITERAL(36), %ebx
// align frame size to 16 bytes
andl LITERAL(0xFFFFFFF0), %ebx
subl LITERAL(20), %ebx // remove space for return address, ebx, ebp, esi and edi
subl %ebx, %esp // reserve stack space for argument array
movl LITERAL(0), (%esp) // store null for method*
// Copy arg array into stack.
movl 28(%ebp), %ecx // ECX = size of args
movl 24(%ebp), %esi // ESI = argument array
leal 4(%esp), %edi // EDI = just after Method* in stack arguments
rep movsb // while (ecx--) { *edi++ = *esi++ }
mov 40(%ebp), %esi // ESI := shorty + 1 ; ie skip return arg character.
addl LITERAL(1), %esi
mov 24(%ebp), %edi // EDI := arg_array
mov 0(%edi), %ecx // ECX := this pointer
addl LITERAL(4), %edi // EDI := arg_array + 4 ; ie skip this pointer.
// Enumerate the possible cases for loading GPRS.
// edx (and maybe ebx):
SKIP_OVER_FLOATS esi, edi, al, .Lgpr_setup_finished
cmpb LITERAL(74), %al // if (al == 'J') goto FOUND_LONG
je .LfirstLong
// Must be an integer value.
movl (%edi), %edx
addl LITERAL(4), %edi // arg_array++
// Now check ebx
SKIP_OVER_FLOATS esi, edi, al, .Lgpr_setup_finished
// Must be first word of a long, or an integer. First word of long doesn't
// go into EBX, but can be loaded there anyways, as it is harmless.
movl (%edi), %ebx
jmp .Lgpr_setup_finished
.LfirstLong:
movl (%edi), %edx
movl 4(%edi), %ebx
// Nothing left to load.
.Lgpr_setup_finished:
mov 20(%ebp), %eax // move method pointer into eax
call *ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method
mov %ebp, %esp // restore stack pointer
CFI_DEF_CFA_REGISTER(esp)
POP edi // pop edi
POP esi // pop esi
POP ebx // pop ebx
POP ebp // pop ebp
mov 20(%esp), %ecx // get result pointer
mov %eax, (%ecx) // store the result assuming its a long, int or Object*
mov %edx, 4(%ecx) // store the other half of the result
mov 24(%esp), %edx // get the shorty
cmpb LITERAL(68), (%edx) // test if result type char == 'D'
je .Lreturn_double_quick
cmpb LITERAL(70), (%edx) // test if result type char == 'F'
je .Lreturn_float_quick
ret
.Lreturn_double_quick:
movsd %xmm0, (%ecx) // store the floating point result
ret
.Lreturn_float_quick:
movss %xmm0, (%ecx) // store the floating point result
ret
END_FUNCTION art_quick_invoke_stub
/*
* Quick invocation stub (static).
* On entry:
* [sp] = return address
* [sp + 4] = method pointer
* [sp + 8] = argument array or null for no argument methods
* [sp + 12] = size of argument array in bytes
* [sp + 16] = (managed) thread pointer
* [sp + 20] = JValue* result
* [sp + 24] = shorty
*/
DEFINE_FUNCTION art_quick_invoke_static_stub
// Save the non-volatiles.
PUSH ebp // save ebp
PUSH ebx // save ebx
PUSH esi // save esi
PUSH edi // save edi
// Set up argument XMM registers.
mov 24+16(%esp), %esi // ESI := shorty + 1 ; ie skip return arg character.
addl LITERAL(1), %esi
mov 8+16(%esp), %edi // EDI := arg_array
// Clobbers ESI, EDI, EAX.
LOOP_OVER_SHORTY_LOADING_XMMS xmm0, esi, edi, al, .Lxmm_setup_finished2
LOOP_OVER_SHORTY_LOADING_XMMS xmm1, esi, edi, al, .Lxmm_setup_finished2
LOOP_OVER_SHORTY_LOADING_XMMS xmm2, esi, edi, al, .Lxmm_setup_finished2
LOOP_OVER_SHORTY_LOADING_XMMS xmm3, esi, edi, al, .Lxmm_setup_finished2
.balign 16
.Lxmm_setup_finished2:
mov %esp, %ebp // copy value of stack pointer into base pointer
CFI_DEF_CFA_REGISTER(ebp)
mov 28(%ebp), %ebx // get arg array size
// reserve space for return addr, method*, ebx, ebp, esi, and edi in frame
addl LITERAL(36), %ebx
// align frame size to 16 bytes
andl LITERAL(0xFFFFFFF0), %ebx
subl LITERAL(20), %ebx // remove space for return address, ebx, ebp, esi and edi
subl %ebx, %esp // reserve stack space for argument array
movl LITERAL(0), (%esp) // store null for method*
// Copy arg array into stack.
movl 28(%ebp), %ecx // ECX = size of args
movl 24(%ebp), %esi // ESI = argument array
leal 4(%esp), %edi // EDI = just after Method* in stack arguments
rep movsb // while (ecx--) { *edi++ = *esi++ }
mov 40(%ebp), %esi // ESI := shorty + 1 ; ie skip return arg character.
addl LITERAL(1), %esi
mov 24(%ebp), %edi // EDI := arg_array
// Enumerate the possible cases for loading GPRS.
// ecx (and maybe edx)
SKIP_OVER_FLOATS esi, edi, al, .Lgpr_setup_finished2
cmpb LITERAL(74), %al // if (al == 'J') goto FOUND_LONG
je .LfirstLong2
// Must be an integer value. Load into ECX.
movl (%edi), %ecx
addl LITERAL(4), %edi // arg_array++
// Now check edx (and maybe ebx).
SKIP_OVER_FLOATS esi, edi, al, .Lgpr_setup_finished2
cmpb LITERAL(74), %al // if (al == 'J') goto FOUND_LONG
je .LSecondLong2
// Must be an integer. Load into EDX.
movl (%edi), %edx
addl LITERAL(4), %edi // arg_array++
// Is there anything for ebx?
SKIP_OVER_FLOATS esi, edi, al, .Lgpr_setup_finished2
// Must be first word of a long, or an integer. First word of long doesn't
// go into EBX, but can be loaded there anyways, as it is harmless.
movl (%edi), %ebx
jmp .Lgpr_setup_finished2
.LSecondLong2:
// EDX:EBX is long. That is all.
movl (%edi), %edx
movl 4(%edi), %ebx
jmp .Lgpr_setup_finished2
.LfirstLong2:
// ECX:EDX is a long
movl (%edi), %ecx
movl 4(%edi), %edx
addl LITERAL(8), %edi // arg_array += 2
// Anything for EBX?
SKIP_OVER_FLOATS esi, edi, al, .Lgpr_setup_finished2
// Must be first word of a long, or an integer. First word of long doesn't
// go into EBX, but can be loaded there anyways, as it is harmless.
movl (%edi), %ebx
jmp .Lgpr_setup_finished2
// Nothing left to load.
.Lgpr_setup_finished2:
mov 20(%ebp), %eax // move method pointer into eax
call *ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method
mov %ebp, %esp // restore stack pointer
CFI_DEF_CFA_REGISTER(esp)
POP edi // pop edi
POP esi // pop esi
POP ebx // pop ebx
POP ebp // pop ebp
mov 20(%esp), %ecx // get result pointer
mov %eax, (%ecx) // store the result assuming its a long, int or Object*
mov %edx, 4(%ecx) // store the other half of the result
mov 24(%esp), %edx // get the shorty
cmpb LITERAL(68), (%edx) // test if result type char == 'D'
je .Lreturn_double_quick2
cmpb LITERAL(70), (%edx) // test if result type char == 'F'
je .Lreturn_float_quick2
ret
.Lreturn_double_quick2:
movsd %xmm0, (%ecx) // store the floating point result
ret
.Lreturn_float_quick2:
movss %xmm0, (%ecx) // store the floating point result
ret
END_FUNCTION art_quick_invoke_static_stub
MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl MACRO_LITERAL(12), %esp // push padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
call CALLVAR(cxx_name) // cxx_name(Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl MACRO_LITERAL(8), %esp // push padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
PUSH eax // push padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass arg3
PUSH ecx // pass arg2
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME_PRESERVE_GOT_REG ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl MACRO_LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass arg4
PUSH edx // pass arg3
PUSH ecx // pass arg2
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
addl MACRO_LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ecx // get referrer
PUSH eax // push padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass referrer
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, referrer, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %edx // get referrer
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass referrer
PUSH ecx // pass arg2
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, referrer, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ebx // get referrer
subl MACRO_LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass referrer
PUSH edx // pass arg3
PUSH ecx // pass arg2
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, referrer,
// Thread*)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER)
testl %eax, %eax // eax == 0 ?
jz 1f // if eax == 0 goto 1
ret // return
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
MACRO0(RETURN_IF_EAX_ZERO)
testl %eax, %eax // eax == 0 ?
jnz 1f // if eax != 0 goto 1
ret // return
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
cmpl MACRO_LITERAL(0),%fs:THREAD_EXCEPTION_OFFSET // exception field == 0 ?
jne 1f // if exception field != 0 goto 1
ret // return
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
DEFINE_FUNCTION art_quick_alloc_object_rosalloc
// Fast path rosalloc allocation.
// eax: uint32_t type_idx/return value, ecx: ArtMethod*
// ebx, edx: free
PUSH edi
movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
// Load the class (edx)
movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
testl %edx, %edx // Check null class
jz .Lart_quick_alloc_object_rosalloc_slow_path
// Check class status
cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%edx)
jne .Lart_quick_alloc_object_rosalloc_slow_path
// No fake dependence needed on x86
// between status and flags load,
// since each load is a load-acquire,
// no loads reordering.
// Check access flags has
// kAccClassIsFinalizable
testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%edx)
jnz .Lart_quick_alloc_object_rosalloc_slow_path
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
// Check if the thread local allocation
// stack has room
movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %edi
cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %edi
jae .Lart_quick_alloc_object_rosalloc_slow_path
movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%edx), %edi // Load the object size (edi)
// Check if the size is for a thread
// local allocation
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %edi
ja .Lart_quick_alloc_object_rosalloc_slow_path
decl %edi
shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %edi // Calculate the rosalloc bracket index
// from object size.
// Align up the size by the rosalloc
// bracket quantum size and divide
// by the quantum size and subtract
// by 1. This code is a shorter but
// equivalent version.
// Load thread local rosalloc run (ebx)
movl THREAD_ROSALLOC_RUNS_OFFSET(%ebx, %edi, __SIZEOF_POINTER__), %ebx
// Load free_list head (edi),
// this will be the return value.
movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %edi
test %edi, %edi
jz .Lart_quick_alloc_object_rosalloc_slow_path
// Point of no slow path. Won't go to
// the slow path from here on. Ok to
// clobber eax and ecx.
movl %edi, %eax
// Load the next pointer of the head
// and update head of free list with
// next pointer
movl ROSALLOC_SLOT_NEXT_OFFSET(%eax), %edi
movl %edi, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx)
// Decrement size of free list by 1
decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%ebx)
// Store the class pointer in the
// header. This also overwrites the
// next pointer. The offsets are
// asserted to match.
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%eax)
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
// Push the new object onto the thread
// local allocation stack and
// increment the thread local
// allocation stack top.
movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %edi
movl %eax, (%edi)
addl LITERAL(COMPRESSED_REFERENCE_SIZE), %edi
movl %edi, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx)
// No fence needed for x86.
POP edi
ret
.Lart_quick_alloc_object_rosalloc_slow_path:
POP edi
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx
PUSH eax
call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // resotre frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_FUNCTION art_quick_alloc_object_rosalloc
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
DEFINE_FUNCTION art_quick_lock_object
testl %eax, %eax // null check object/eax
jz .Lslow_lock
.Lretry_lock:
movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax), %ecx // ecx := lock word
test LITERAL(LOCK_WORD_STATE_MASK), %ecx // test the 2 high bits.
jne .Lslow_lock // slow path if either of the two high bits are set.
movl %ecx, %edx // save lock word (edx) to keep read barrier bits.
andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %ecx // zero the read barrier bits.
test %ecx, %ecx
jnz .Lalready_thin // lock word contains a thin lock
// unlocked case - edx: original lock word, eax: obj.
movl %eax, %ecx // remember object in case of retry
movl %edx, %eax // eax: lock word zero except for read barrier bits.
movl %fs:THREAD_ID_OFFSET, %edx // load thread id.
or %eax, %edx // edx: thread id with count of 0 + read barrier bits.
lock cmpxchg %edx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%ecx) // eax: old val, edx: new val.
jnz .Llock_cmpxchg_fail // cmpxchg failed retry
ret
.Lalready_thin: // edx: lock word (with high 2 bits zero and original rb bits), eax: obj.
movl %fs:THREAD_ID_OFFSET, %ecx // ecx := thread id
cmpw %cx, %dx // do we hold the lock already?
jne .Lslow_lock
movl %edx, %ecx // copy the lock word to check count overflow.
andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %ecx // zero the read barrier bits.
addl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %ecx // increment recursion count for overflow check.
test LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // overflowed if either of the upper two bits (28-29) are set.
jne .Lslow_lock // count overflowed so go slow
movl %eax, %ecx // save obj to use eax for cmpxchg.
movl %edx, %eax // copy the lock word as the old val for cmpxchg.
addl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %edx // increment recursion count again for real.
// update lockword, cmpxchg necessary for read barrier bits.
lock cmpxchg %edx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%ecx) // eax: old val, edx: new val.
jnz .Llock_cmpxchg_fail // cmpxchg failed retry
ret
.Llock_cmpxchg_fail:
movl %ecx, %eax // restore eax
jmp .Lretry_lock
.Lslow_lock:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass object
call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object
DEFINE_FUNCTION art_quick_lock_object_no_inline
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass object
call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object_no_inline
DEFINE_FUNCTION art_quick_unlock_object
testl %eax, %eax // null check object/eax
jz .Lslow_unlock
.Lretry_unlock:
movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax), %ecx // ecx := lock word
movl %fs:THREAD_ID_OFFSET, %edx // edx := thread id
test LITERAL(LOCK_WORD_STATE_MASK), %ecx
jnz .Lslow_unlock // lock word contains a monitor
cmpw %cx, %dx // does the thread id match?
jne .Lslow_unlock
movl %ecx, %edx // copy the lock word to detect new count of 0.
andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %edx // zero the read barrier bits.
cmpl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %edx
jae .Lrecursive_thin_unlock
// update lockword, cmpxchg necessary for read barrier bits.
movl %eax, %edx // edx: obj
movl %ecx, %eax // eax: old lock word.
andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // ecx: new lock word zero except original rb bits.
#ifndef USE_READ_BARRIER
movl %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
#else
lock cmpxchg %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx) // eax: old val, ecx: new val.
jnz .Lunlock_cmpxchg_fail // cmpxchg failed retry
#endif
ret
.Lrecursive_thin_unlock: // ecx: original lock word, eax: obj
// update lockword, cmpxchg necessary for read barrier bits.
movl %eax, %edx // edx: obj
movl %ecx, %eax // eax: old lock word.
subl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %ecx // ecx: new lock word with decremented count.
#ifndef USE_READ_BARRIER
mov %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
#else
lock cmpxchg %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx) // eax: old val, ecx: new val.
jnz .Lunlock_cmpxchg_fail // cmpxchg failed retry
#endif
ret
.Lunlock_cmpxchg_fail: // edx: obj
movl %edx, %eax // restore eax
jmp .Lretry_unlock
.Lslow_unlock:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass object
call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object
DEFINE_FUNCTION art_quick_unlock_object_no_inline
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass object
call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object_no_inline
DEFINE_FUNCTION art_quick_is_assignable
PUSH eax // alignment padding
PUSH ecx // pass arg2 - obj->klass
PUSH eax // pass arg1 - checked class
call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
END_FUNCTION art_quick_is_assignable
DEFINE_FUNCTION art_quick_check_cast
PUSH eax // alignment padding
PUSH ecx // pass arg2 - obj->klass
PUSH eax // pass arg1 - checked class
call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
testl %eax, %eax
jz 1f // jump forward if not assignable
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
CFI_ADJUST_CFA_OFFSET(12) // Reset unwind info so following code unwinds.
1:
POP eax // pop arguments
POP ecx
addl LITERAL(4), %esp
CFI_ADJUST_CFA_OFFSET(-4)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
UNREACHABLE
END_FUNCTION art_quick_check_cast
// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack.
MACRO2(POP_REG_NE, reg, exclude_reg)
.ifc RAW_VAR(reg), RAW_VAR(exclude_reg)
addl MACRO_LITERAL(4), %esp
CFI_ADJUST_CFA_OFFSET(-4)
.else
POP RAW_VAR(reg)
.endif
END_MACRO
/*
* Macro to insert read barrier, only used in art_quick_aput_obj.
* obj_reg and dest_reg are registers, offset is a defined literal such as
* MIRROR_OBJECT_CLASS_OFFSET.
* pop_eax is a boolean flag, indicating if eax is popped after the call.
* TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
*/
MACRO4(READ_BARRIER, obj_reg, offset, dest_reg, pop_eax)
#ifdef USE_READ_BARRIER
PUSH eax // save registers used in art_quick_aput_obj
PUSH ebx
PUSH edx
PUSH ecx
// Outgoing argument set up
pushl MACRO_LITERAL((RAW_VAR(offset))) // pass offset, double parentheses are necessary
CFI_ADJUST_CFA_OFFSET(4)
PUSH RAW_VAR(obj_reg) // pass obj_reg
PUSH eax // pass ref, just pass eax for now since parameter ref is unused
call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset)
// No need to unpoison return value in eax, artReadBarrierSlow() would do the unpoisoning.
.ifnc RAW_VAR(dest_reg), eax
movl %eax, REG_VAR(dest_reg) // save loaded ref in dest_reg
.endif
addl MACRO_LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
POP_REG_NE ecx, RAW_VAR(dest_reg) // Restore args except dest_reg
POP_REG_NE edx, RAW_VAR(dest_reg)
POP_REG_NE ebx, RAW_VAR(dest_reg)
.ifc RAW_VAR(pop_eax), true
POP_REG_NE eax, RAW_VAR(dest_reg)
.endif
#else
movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg)
UNPOISON_HEAP_REF RAW_VAR(dest_reg)
#endif // USE_READ_BARRIER
END_MACRO
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
* eax = array, ecx = index, edx = value
*/
DEFINE_FUNCTION art_quick_aput_obj_with_null_and_bound_check
testl %eax, %eax
jnz SYMBOL(art_quick_aput_obj_with_bound_check)
jmp SYMBOL(art_quick_throw_null_pointer_exception)
END_FUNCTION art_quick_aput_obj_with_null_and_bound_check
DEFINE_FUNCTION art_quick_aput_obj_with_bound_check
movl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ebx
cmpl %ebx, %ecx
jb SYMBOL(art_quick_aput_obj)
mov %ecx, %eax
mov %ebx, %ecx
jmp SYMBOL(art_quick_throw_array_bounds)
END_FUNCTION art_quick_aput_obj_with_bound_check
DEFINE_FUNCTION art_quick_aput_obj
test %edx, %edx // store of null
jz .Ldo_aput_null
READ_BARRIER eax, MIRROR_OBJECT_CLASS_OFFSET, ebx, true
READ_BARRIER ebx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ebx, true
// value's type == array's component type - trivial assignability
#if defined(USE_READ_BARRIER)
READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, false
cmpl %eax, %ebx
POP eax // restore eax from the push in the beginning of READ_BARRIER macro
// This asymmetric push/pop saves a push of eax and maintains stack alignment.
#elif defined(USE_HEAP_POISONING)
PUSH eax // save eax
movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
UNPOISON_HEAP_REF eax
cmpl %eax, %ebx
POP eax // restore eax
#else
cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ebx
#endif
jne .Lcheck_assignability
.Ldo_aput:
POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4)
movl %fs:THREAD_CARD_TABLE_OFFSET, %edx
shrl LITERAL(7), %eax
movb %dl, (%edx, %eax)
ret
.Ldo_aput_null:
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4)
ret
.Lcheck_assignability:
PUSH eax // save arguments
PUSH ecx
PUSH edx
#if defined(USE_READ_BARRIER)
subl LITERAL(4), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(4)
READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, true
subl LITERAL(4), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg2 - type of the value to be stored
#elif defined(USE_HEAP_POISONING)
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
UNPOISON_HEAP_REF eax
PUSH eax // pass arg2 - type of the value to be stored
#else
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
pushl MIRROR_OBJECT_CLASS_OFFSET(%edx) // pass arg2 - type of the value to be stored
CFI_ADJUST_CFA_OFFSET(4)
#endif
PUSH ebx // pass arg1 - component type of the array
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
testl %eax, %eax
jz .Lthrow_array_store_exception
POP edx
POP ecx
POP eax
POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4) // do the aput
movl %fs:THREAD_CARD_TABLE_OFFSET, %edx
shrl LITERAL(7), %eax
movb %dl, (%edx, %eax)
ret
CFI_ADJUST_CFA_OFFSET(12) // 3 POP after the jz for unwinding.
.Lthrow_array_store_exception:
POP edx
POP ecx
POP eax
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass arg2 - value
PUSH eax // pass arg1 - array
call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*)
UNREACHABLE
END_FUNCTION art_quick_aput_obj
DEFINE_FUNCTION art_quick_memcpy
SETUP_GOT_NOSAVE ebx // clobbers EBX
PUSH edx // pass arg3
PUSH ecx // pass arg2
PUSH eax // pass arg1
call PLT_SYMBOL(memcpy) // (void*, const void*, size_t)
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
END_FUNCTION art_quick_memcpy
NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret
DEFINE_FUNCTION art_quick_d2l
subl LITERAL(12), %esp // alignment padding, room for argument
CFI_ADJUST_CFA_OFFSET(12)
movsd %xmm0, 0(%esp) // arg a
call SYMBOL(art_d2l) // (jdouble a)
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
END_FUNCTION art_quick_d2l
DEFINE_FUNCTION art_quick_f2l
subl LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
movss %xmm0, 0(%esp) // arg a
call SYMBOL(art_f2l) // (jfloat a)
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
END_FUNCTION art_quick_f2l
DEFINE_FUNCTION art_quick_ldiv
subl LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
PUSH ebx // pass arg4 b.hi
PUSH edx // pass arg3 b.lo
PUSH ecx // pass arg2 a.hi
PUSH eax // pass arg1 a.lo
call SYMBOL(artLdiv) // (jlong a, jlong b)
addl LITERAL(28), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-28)
ret
END_FUNCTION art_quick_ldiv
DEFINE_FUNCTION art_quick_lmod
subl LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
PUSH ebx // pass arg4 b.hi
PUSH edx // pass arg3 b.lo
PUSH ecx // pass arg2 a.hi
PUSH eax // pass arg1 a.lo
call SYMBOL(artLmod) // (jlong a, jlong b)
addl LITERAL(28), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-28)
ret
END_FUNCTION art_quick_lmod
DEFINE_FUNCTION art_quick_lmul
imul %eax, %ebx // ebx = a.lo(eax) * b.hi(ebx)
imul %edx, %ecx // ecx = b.lo(edx) * a.hi(ecx)
mul %edx // edx:eax = a.lo(eax) * b.lo(edx)
add %ebx, %ecx
add %ecx, %edx // edx += (a.lo * b.hi) + (b.lo * a.hi)
ret
END_FUNCTION art_quick_lmul
DEFINE_FUNCTION art_quick_lshl
// ecx:eax << edx
xchg %edx, %ecx
shld %cl,%eax,%edx
shl %cl,%eax
test LITERAL(32), %cl
jz 1f
mov %eax, %edx
xor %eax, %eax
1:
ret
END_FUNCTION art_quick_lshl
DEFINE_FUNCTION art_quick_lshr
// ecx:eax >> edx
xchg %edx, %ecx
shrd %cl,%edx,%eax
sar %cl,%edx
test LITERAL(32),%cl
jz 1f
mov %edx, %eax
sar LITERAL(31), %edx
1:
ret
END_FUNCTION art_quick_lshr
DEFINE_FUNCTION art_quick_lushr
// ecx:eax >>> edx
xchg %edx, %ecx
shrd %cl,%edx,%eax
shr %cl,%edx
test LITERAL(32),%cl
jz 1f
mov %edx, %eax
xor %edx, %edx
1:
ret
END_FUNCTION art_quick_lushr
ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_EAX_ZERO
// Call artSet64InstanceFromCode with 4 word size arguments and the referrer.
DEFINE_FUNCTION art_quick_set64_instance
movd %ebx, %xmm0
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
movd %xmm0, %ebx
// Outgoing argument set up
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
pushl (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE+12)(%esp) // pass referrer
CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass high half of new_val
PUSH edx // pass low half of new_val
PUSH ecx // pass object
PUSH eax // pass field_idx
call SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_instance
// Call artSet64StaticFromCode with 3 word size arguments plus with the referrer in the 2nd position
// so that new_val is aligned on even registers were we passing arguments in registers.
DEFINE_FUNCTION art_quick_set64_static
// TODO: Implement SETUP_GOT_NOSAVE for got_reg = ecx to avoid moving around the registers.
movd %ebx, %xmm0
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
movd %xmm0, %ebx
mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ecx // get referrer
subl LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass high half of new_val
PUSH edx // pass low half of new_val
PUSH ecx // pass referrer
PUSH eax // pass field_idx
call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_static
DEFINE_FUNCTION art_quick_proxy_invoke_handler
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX
PUSH esp // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass receiver
PUSH eax // pass proxy method
call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
movd %eax, %xmm0 // place return value also into floating point return value
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
addl LITERAL(16 + FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE), %esp
CFI_ADJUST_CFA_OFFSET(-(16 + FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE))
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_proxy_invoke_handler
/*
* Called to resolve an imt conflict.
* eax is the conflict ArtMethod.
* xmm7 is a hidden argument that holds the target interface method's dex method index.
*
* Note that this stub writes to eax.
* Because of lack of free registers, it also saves and restores edi.
*/
DEFINE_FUNCTION art_quick_imt_conflict_trampoline
PUSH EDI
movl 8(%esp), %edi // Load referrer
movl ART_METHOD_DEX_CACHE_METHODS_OFFSET_32(%edi), %edi // Load dex cache methods array
pushl ART_METHOD_JNI_OFFSET_32(%eax) // Push ImtConflictTable.
CFI_ADJUST_CFA_OFFSET(4)
movd %xmm7, %eax // get target method index stored in xmm7
movl 0(%edi, %eax, __SIZEOF_POINTER__), %edi // Load interface method
popl %eax // Pop ImtConflictTable.
CFI_ADJUST_CFA_OFFSET(-4)
.Limt_table_iterate:
cmpl %edi, 0(%eax)
jne .Limt_table_next_entry
// We successfully hit an entry in the table. Load the target method
// and jump to it.
POP EDI
movl __SIZEOF_POINTER__(%eax), %eax
jmp *ART_METHOD_QUICK_CODE_OFFSET_32(%eax)
.Limt_table_next_entry:
// If the entry is null, the interface method is not in the ImtConflictTable.
cmpl LITERAL(0), 0(%eax)
jz .Lconflict_trampoline
// Iterate over the entries of the ImtConflictTable.
addl LITERAL(2 * __SIZEOF_POINTER__), %eax
jmp .Limt_table_iterate
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
POP EDI
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END_FUNCTION art_quick_imt_conflict_trampoline
DEFINE_FUNCTION art_quick_resolution_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, ebx
movl %esp, %edi
PUSH EDI // pass SP. do not just PUSH ESP; that messes up unwinding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass receiver
PUSH eax // pass method
call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
movl %eax, %edi // remember code pointer in EDI
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
test %eax, %eax // if code pointer is null goto deliver pending exception
jz 1f
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME_AND_JUMP
1:
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_resolution_trampoline
DEFINE_FUNCTION art_quick_generic_jni_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX
movl %esp, %ebp // save SP at callee-save frame
CFI_DEF_CFA_REGISTER(ebp)
subl LITERAL(5120), %esp
// prepare for artQuickGenericJniTrampoline call
// (Thread*, SP)
// (esp) 4(esp) <= C calling convention
// fs:... ebp <= where they are
subl LITERAL(8), %esp // Padding for 16B alignment.
pushl %ebp // Pass SP (to ArtMethod).
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
call SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
// The C call will have registered the complete save-frame on success.
// The result of the call is:
// eax: pointer to native code, 0 on error.
// edx: pointer to the bottom of the used area of the alloca, can restore stack till there.
// Check for error = 0.
test %eax, %eax
jz .Lexception_in_native
// Release part of the alloca.
movl %edx, %esp
// On x86 there are no registers passed, so nothing to pop here.
// Native call.
call *%eax
// result sign extension is handled in C code
// prepare for artQuickGenericJniEndTrampoline call
// (Thread*, result, result_f)
// (esp) 4(esp) 12(esp) <= C calling convention
// fs:... eax:edx fp0 <= where they are
subl LITERAL(20), %esp // Padding & pass float result.
fstpl (%esp)
pushl %edx // Pass int result.
pushl %eax
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
call SYMBOL(artQuickGenericJniEndTrampoline)
// Pending exceptions possible.
mov %fs:THREAD_EXCEPTION_OFFSET, %ebx
testl %ebx, %ebx
jnz .Lexception_in_native
// Tear down the alloca.
movl %ebp, %esp
CFI_DEF_CFA_REGISTER(esp)
// Tear down the callee-save frame.
// Remove space for FPR args and EAX
addl LITERAL(4 + 4 * 8), %esp
CFI_ADJUST_CFA_OFFSET(-(4 + 4 * 8))
POP ecx
addl LITERAL(4), %esp // Avoid edx, as it may be part of the result.
CFI_ADJUST_CFA_OFFSET(-4)
POP ebx
POP ebp // Restore callee saves
POP esi
POP edi
// Quick expects the return value to be in xmm0.
movd %eax, %xmm0
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
ret
.Lexception_in_native:
movl %fs:THREAD_TOP_QUICK_FRAME_OFFSET, %esp
// Do a call to push a new save-all frame required by the runtime.
call .Lexception_call
.Lexception_call:
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_generic_jni_trampoline
DEFINE_FUNCTION art_quick_to_interpreter_bridge
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, ebx // save frame
mov %esp, %edx // remember SP
PUSH eax // alignment padding
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass method
call SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
// Return eax:edx in xmm0 also.
movd %eax, %xmm0
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
addl LITERAL(48), %esp // Remove FPRs and EAX, ECX, EDX, EBX.
CFI_ADJUST_CFA_OFFSET(-48)
POP ebp // Restore callee saves
POP esi
POP edi
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_to_interpreter_bridge
/*
* Routine that intercepts method calls and returns.
*/
DEFINE_FUNCTION art_quick_instrumentation_entry
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, edx
PUSH eax // Save eax which will be clobbered by the callee-save method.
subl LITERAL(12), %esp // Align stack.
CFI_ADJUST_CFA_OFFSET(12)
pushl FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-4+16(%esp) // Pass LR.
CFI_ADJUST_CFA_OFFSET(4)
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // Pass receiver.
PUSH eax // Pass Method*.
call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, LR)
addl LITERAL(28), %esp // Pop arguments upto saved Method*.
CFI_ADJUST_CFA_OFFSET(-28)
movl 60(%esp), %edi // Restore edi.
movl %eax, 60(%esp) // Place code* over edi, just under return pc.
movl SYMBOL(art_quick_instrumentation_exit)@GOT(%ebx), %ebx
// Place instrumentation exit as return pc. ebx holds the GOT computed on entry.
movl %ebx, 64(%esp)
movl 0(%esp), %eax // Restore eax.
// Restore FPRs (extra 4 bytes of offset due to EAX push at top).
movsd 8(%esp), %xmm0
movsd 16(%esp), %xmm1
movsd 24(%esp), %xmm2
movsd 32(%esp), %xmm3
// Restore GPRs.
movl 40(%esp), %ecx // Restore ecx.
movl 44(%esp), %edx // Restore edx.
movl 48(%esp), %ebx // Restore ebx.
movl 52(%esp), %ebp // Restore ebp.
movl 56(%esp), %esi // Restore esi.
addl LITERAL(60), %esp // Wind stack back upto code*.
CFI_ADJUST_CFA_OFFSET(-60)
ret // Call method (and pop).
END_FUNCTION art_quick_instrumentation_entry
DEFINE_FUNCTION art_quick_instrumentation_exit
pushl LITERAL(0) // Push a fake return PC as there will be none on the stack.
CFI_ADJUST_CFA_OFFSET(4)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx
mov %esp, %ecx // Remember SP
subl LITERAL(8), %esp // Save float return value.
CFI_ADJUST_CFA_OFFSET(8)
movq %xmm0, (%esp)
PUSH edx // Save gpr return value.
PUSH eax
subl LITERAL(16), %esp // Align stack
CFI_ADJUST_CFA_OFFSET(16)
movq %xmm0, (%esp) // Pass float return value.
PUSH edx // Pass gpr return value.
PUSH eax
PUSH ecx // Pass SP.
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current.
CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_result, fpr_result)
mov %eax, %ecx // Move returned link register.
addl LITERAL(32), %esp // Pop arguments.
CFI_ADJUST_CFA_OFFSET(-32)
movl %edx, %ebx // Move returned link register for deopt
// (ebx is pretending to be our LR).
POP eax // Restore gpr return value.
POP edx
movq (%esp), %xmm0 // Restore fpr return value.
addl LITERAL(8), %esp
CFI_ADJUST_CFA_OFFSET(-8)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
addl LITERAL(4), %esp // Remove fake return pc.
CFI_ADJUST_CFA_OFFSET(-4)
jmp *%ecx // Return.
END_FUNCTION art_quick_instrumentation_exit
/*
* Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
* will long jump to the upcall with a special exception of -1.
*/
DEFINE_FUNCTION art_quick_deoptimize
PUSH ebx // Entry point for a jump. Fake that we were called.
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx
subl LITERAL(12), %esp // Align stack.
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artDeoptimize) // artDeoptimize(Thread*)
UNREACHABLE
END_FUNCTION art_quick_deoptimize
/*
* Compiled code has requested that we deoptimize into the interpreter. The deoptimization
* will long jump to the interpreter bridge.
*/
DEFINE_FUNCTION art_quick_deoptimize_from_compiled_code
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx
subl LITERAL(12), %esp // Align stack.
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artDeoptimizeFromCompiledCode) // artDeoptimizeFromCompiledCode(Thread*)
UNREACHABLE
END_FUNCTION art_quick_deoptimize_from_compiled_code
/*
* String's compareTo.
*
* On entry:
* eax: this string object (known non-null)
* ecx: comp string object (known non-null)
*/
DEFINE_FUNCTION art_quick_string_compareto
PUSH esi // push callee save reg
PUSH edi // push callee save reg
mov MIRROR_STRING_COUNT_OFFSET(%eax), %edx
mov MIRROR_STRING_COUNT_OFFSET(%ecx), %ebx
lea MIRROR_STRING_VALUE_OFFSET(%eax), %esi
lea MIRROR_STRING_VALUE_OFFSET(%ecx), %edi
/* Calculate min length and count diff */
mov %edx, %ecx
mov %edx, %eax
subl %ebx, %eax
cmovg %ebx, %ecx
/*
* At this point we have:
* eax: value to return if first part of strings are equal
* ecx: minimum among the lengths of the two strings
* esi: pointer to this string data
* edi: pointer to comp string data
*/
jecxz .Lkeep_length
repe cmpsw // find nonmatching chars in [%esi] and [%edi], up to length %ecx
jne .Lnot_equal
.Lkeep_length:
POP edi // pop callee save reg
POP esi // pop callee save reg
ret
.balign 16
.Lnot_equal:
movzwl -2(%esi), %eax // get last compared char from this string
movzwl -2(%edi), %ecx // get last compared char from comp string
subl %ecx, %eax // return the difference
POP edi // pop callee save reg
POP esi // pop callee save reg
ret
END_FUNCTION art_quick_string_compareto
// Return from a nested signal:
// Entry:
// eax: address of jmp_buf in TLS
DEFINE_FUNCTION art_nested_signal_return
SETUP_GOT_NOSAVE ebx // sets %ebx for call into PLT
movl LITERAL(1), %ecx
PUSH ecx // second arg to longjmp (1)
PUSH eax // first arg to longjmp (jmp_buf)
call PLT_SYMBOL(longjmp)
UNREACHABLE
END_FUNCTION art_nested_signal_return
DEFINE_FUNCTION art_quick_read_barrier_mark
PUSH eax // pass arg1 - obj
call SYMBOL(artReadBarrierMark) // artReadBarrierMark(obj)
addl LITERAL(4), %esp // pop argument
CFI_ADJUST_CFA_OFFSET(-4)
ret
END_FUNCTION art_quick_read_barrier_mark
DEFINE_FUNCTION art_quick_read_barrier_slow
PUSH edx // pass arg3 - offset
PUSH ecx // pass arg2 - obj
PUSH eax // pass arg1 - ref
call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj, offset)
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
END_FUNCTION art_quick_read_barrier_slow
DEFINE_FUNCTION art_quick_read_barrier_for_root_slow
PUSH eax // pass arg1 - root
call SYMBOL(artReadBarrierForRootSlow) // artReadBarrierForRootSlow(root)
addl LITERAL(4), %esp // pop argument
CFI_ADJUST_CFA_OFFSET(-4)
ret
END_FUNCTION art_quick_read_barrier_for_root_slow
/*
* On stack replacement stub.
* On entry:
* [sp] = return address
* [sp + 4] = stack to copy
* [sp + 8] = size of stack
* [sp + 12] = pc to call
* [sp + 16] = JValue* result
* [sp + 20] = shorty
* [sp + 24] = thread
*/
DEFINE_FUNCTION art_quick_osr_stub
// Save native callee saves.
PUSH ebp
PUSH ebx
PUSH esi
PUSH edi
mov 4+16(%esp), %esi // ESI = argument array
mov 8+16(%esp), %ecx // ECX = size of args
mov 12+16(%esp), %ebx // EBX = pc to call
mov %esp, %ebp // Save stack pointer
andl LITERAL(0xFFFFFFF0), %esp // Align stack
PUSH ebp // Save old stack pointer
subl LITERAL(12), %esp // Align stack
movl LITERAL(0), (%esp) // Store null for ArtMethod* slot
call .Losr_entry
// Restore stack pointer.
addl LITERAL(12), %esp
POP ebp
mov %ebp, %esp
// Restore callee saves.
POP edi
POP esi
POP ebx
POP ebp
mov 16(%esp), %ecx // Get JValue result
mov %eax, (%ecx) // Store the result assuming it is a long, int or Object*
mov %edx, 4(%ecx) // Store the other half of the result
mov 20(%esp), %edx // Get the shorty
cmpb LITERAL(68), (%edx) // Test if result type char == 'D'
je .Losr_return_double_quick
cmpb LITERAL(70), (%edx) // Test if result type char == 'F'
je .Losr_return_float_quick
ret
.Losr_return_double_quick:
movsd %xmm0, (%ecx) // Store the floating point result
ret
.Losr_return_float_quick:
movss %xmm0, (%ecx) // Store the floating point result
ret
.Losr_entry:
subl LITERAL(4), %ecx // Given stack size contains pushed frame pointer, substract it.
subl %ecx, %esp
mov %esp, %edi // EDI = beginning of stack
rep movsb // while (ecx--) { *edi++ = *esi++ }
jmp *%ebx
END_FUNCTION art_quick_osr_stub
// TODO: implement these!
UNIMPLEMENTED art_quick_memcmp16
|
abforce/xposed_art_n
| 1,344
|
runtime/arch/x86/jni_entrypoints_x86.S
|
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_x86.S"
/*
* Jni dlsym lookup stub.
*/
DEFINE_FUNCTION art_jni_dlsym_lookup_stub
subl LITERAL(8), %esp // align stack
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artFindNativeMethod) // (Thread*)
addl LITERAL(12), %esp // remove argument & padding
CFI_ADJUST_CFA_OFFSET(-12)
testl %eax, %eax // check if returned method code is null
jz .Lno_native_code_found // if null, jump to return to handle
jmp *%eax // otherwise, tail call to intended method
.Lno_native_code_found:
ret
END_FUNCTION art_jni_dlsym_lookup_stub
|
abforce/xposed_art_n
| 24,766
|
runtime/arch/x86/memcmp16_x86.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_x86.S"
#define MEMCMP __memcmp16
/* int32_t memcmp16_compare(const uint16_t* s0, const uint16_t* s1, size_t count); */
#ifndef L
# define L(label) .L##label
#endif
#define CFI_PUSH(REG) \
CFI_ADJUST_CFA_OFFSET(4); \
CFI_REL_OFFSET(REG, 0)
#define CFI_POP(REG) \
CFI_ADJUST_CFA_OFFSET(-4); \
CFI_RESTORE(REG)
#define PUSH(REG) pushl REG; CFI_PUSH (REG)
#define POP(REG) popl REG; CFI_POP (REG)
#define PARMS 4
#define BLK1 PARMS
#define BLK2 BLK1+4
#define LEN BLK2+4
#define RETURN_END POP (%edi); POP (%esi); POP (%ebx); ret
#define RETURN RETURN_END; CFI_RESTORE_STATE; CFI_REMEMBER_STATE
DEFINE_FUNCTION MEMCMP
movl LEN(%esp), %ecx
shl $1, %ecx
jz L(zero)
movl BLK1(%esp), %eax
cmp $48, %ecx
movl BLK2(%esp), %edx
jae L(48bytesormore)
PUSH (%ebx)
add %ecx, %edx
add %ecx, %eax
jmp L(less48bytes)
CFI_POP (%ebx)
.p2align 4
L(zero):
xor %eax, %eax
ret
.p2align 4
L(48bytesormore):
PUSH (%ebx)
PUSH (%esi)
PUSH (%edi)
CFI_REMEMBER_STATE
movdqu (%eax), %xmm3
movdqu (%edx), %xmm0
movl %eax, %edi
movl %edx, %esi
pcmpeqb %xmm0, %xmm3
pmovmskb %xmm3, %edx
lea 16(%edi), %edi
sub $0xffff, %edx
lea 16(%esi), %esi
jnz L(less16bytes)
mov %edi, %edx
and $0xf, %edx
xor %edx, %edi
sub %edx, %esi
add %edx, %ecx
mov %esi, %edx
and $0xf, %edx
jz L(shr_0)
xor %edx, %esi
cmp $0, %edx
je L(shr_0)
cmp $2, %edx
je L(shr_2)
cmp $4, %edx
je L(shr_4)
cmp $6, %edx
je L(shr_6)
cmp $8, %edx
je L(shr_8)
cmp $10, %edx
je L(shr_10)
cmp $12, %edx
je L(shr_12)
jmp L(shr_14)
.p2align 4
L(shr_0):
cmp $80, %ecx
jae L(shr_0_gobble)
lea -48(%ecx), %ecx
xor %eax, %eax
movaps (%esi), %xmm1
pcmpeqb (%edi), %xmm1
movaps 16(%esi), %xmm2
pcmpeqb 16(%edi), %xmm2
pand %xmm1, %xmm2
pmovmskb %xmm2, %edx
add $32, %edi
add $32, %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea (%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_0_gobble):
lea -48(%ecx), %ecx
movdqa (%esi), %xmm0
xor %eax, %eax
pcmpeqb (%edi), %xmm0
sub $32, %ecx
movdqa 16(%esi), %xmm2
pcmpeqb 16(%edi), %xmm2
L(shr_0_gobble_loop):
pand %xmm0, %xmm2
sub $32, %ecx
pmovmskb %xmm2, %edx
movdqa %xmm0, %xmm1
movdqa 32(%esi), %xmm0
movdqa 48(%esi), %xmm2
sbb $0xffff, %edx
pcmpeqb 32(%edi), %xmm0
pcmpeqb 48(%edi), %xmm2
lea 32(%edi), %edi
lea 32(%esi), %esi
jz L(shr_0_gobble_loop)
pand %xmm0, %xmm2
cmp $0, %ecx
jge L(shr_0_gobble_loop_next)
inc %edx
add $32, %ecx
L(shr_0_gobble_loop_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm2, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea (%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_2):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_2_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $2,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $2,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 2(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_2_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $2,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $2,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_2_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $2,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $2,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_2_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_2_gobble_next)
inc %edx
add $32, %ecx
L(shr_2_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 2(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_4):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_4_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $4,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $4,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 4(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_4_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $4,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $4,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_4_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $4,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $4,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_4_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_4_gobble_next)
inc %edx
add $32, %ecx
L(shr_4_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 4(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_6):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_6_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $6,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $6,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 6(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_6_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $6,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $6,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_6_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $6,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $6,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_6_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_6_gobble_next)
inc %edx
add $32, %ecx
L(shr_6_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 6(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_8):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_8_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $8,(%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $8,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 8(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_8_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $8,(%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $8,16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_8_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $8,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $8,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_8_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_8_gobble_next)
inc %edx
add $32, %ecx
L(shr_8_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 8(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_10):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_10_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $10, (%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $10,%xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 10(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_10_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $10, (%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $10, 16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_10_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $10,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $10,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_10_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_10_gobble_next)
inc %edx
add $32, %ecx
L(shr_10_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 10(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_12):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_12_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $12, (%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $12, %xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 12(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_12_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $12, (%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $12, 16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_12_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $12,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $12,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_12_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_12_gobble_next)
inc %edx
add $32, %ecx
L(shr_12_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 12(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_14):
cmp $80, %ecx
lea -48(%ecx), %ecx
mov %edx, %eax
jae L(shr_14_gobble)
movdqa 16(%esi), %xmm1
movdqa %xmm1, %xmm2
palignr $14, (%esi), %xmm1
pcmpeqb (%edi), %xmm1
movdqa 32(%esi), %xmm3
palignr $14, %xmm2, %xmm3
pcmpeqb 16(%edi), %xmm3
pand %xmm1, %xmm3
pmovmskb %xmm3, %edx
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 14(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(shr_14_gobble):
sub $32, %ecx
movdqa 16(%esi), %xmm0
palignr $14, (%esi), %xmm0
pcmpeqb (%edi), %xmm0
movdqa 32(%esi), %xmm3
palignr $14, 16(%esi), %xmm3
pcmpeqb 16(%edi), %xmm3
L(shr_14_gobble_loop):
pand %xmm0, %xmm3
sub $32, %ecx
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
movdqa 64(%esi), %xmm3
palignr $14,48(%esi), %xmm3
sbb $0xffff, %edx
movdqa 48(%esi), %xmm0
palignr $14,32(%esi), %xmm0
pcmpeqb 32(%edi), %xmm0
lea 32(%esi), %esi
pcmpeqb 48(%edi), %xmm3
lea 32(%edi), %edi
jz L(shr_14_gobble_loop)
pand %xmm0, %xmm3
cmp $0, %ecx
jge L(shr_14_gobble_next)
inc %edx
add $32, %ecx
L(shr_14_gobble_next):
test %edx, %edx
jnz L(exit)
pmovmskb %xmm3, %edx
movdqa %xmm0, %xmm1
lea 32(%edi), %edi
lea 32(%esi), %esi
sub $0xffff, %edx
jnz L(exit)
lea (%ecx, %edi,1), %eax
lea 14(%ecx, %esi,1), %edx
POP (%edi)
POP (%esi)
jmp L(less48bytes)
CFI_RESTORE_STATE
CFI_REMEMBER_STATE
.p2align 4
L(exit):
pmovmskb %xmm1, %ebx
sub $0xffff, %ebx
jz L(first16bytes)
lea -16(%esi), %esi
lea -16(%edi), %edi
mov %ebx, %edx
L(first16bytes):
add %eax, %esi
L(less16bytes):
test %dl, %dl
jz L(next_four_words)
test $15, %dl
jz L(second_two_words)
test $3, %dl
jz L(second_word)
movzwl -16(%edi), %eax
movzwl -16(%esi), %ebx
subl %ebx, %eax
RETURN
.p2align 4
L(second_word):
movzwl -14(%edi), %eax
movzwl -14(%esi), %ebx
subl %ebx, %eax
RETURN
.p2align 4
L(second_two_words):
test $63, %dl
jz L(fourth_word)
movzwl -12(%edi), %eax
movzwl -12(%esi), %ebx
subl %ebx, %eax
RETURN
.p2align 4
L(fourth_word):
movzwl -10(%edi), %eax
movzwl -10(%esi), %ebx
subl %ebx, %eax
RETURN
.p2align 4
L(next_four_words):
test $15, %dh
jz L(fourth_two_words)
test $3, %dh
jz L(sixth_word)
movzwl -8(%edi), %eax
movzwl -8(%esi), %ebx
subl %ebx, %eax
RETURN
.p2align 4
L(sixth_word):
movzwl -6(%edi), %eax
movzwl -6(%esi), %ebx
subl %ebx, %eax
RETURN
.p2align 4
L(fourth_two_words):
test $63, %dh
jz L(eighth_word)
movzwl -4(%edi), %eax
movzwl -4(%esi), %ebx
subl %ebx, %eax
RETURN
.p2align 4
L(eighth_word):
movzwl -2(%edi), %eax
movzwl -2(%esi), %ebx
subl %ebx, %eax
RETURN
CFI_PUSH (%ebx)
.p2align 4
L(more8bytes):
cmp $16, %ecx
jae L(more16bytes)
cmp $8, %ecx
je L(8bytes)
cmp $10, %ecx
je L(10bytes)
cmp $12, %ecx
je L(12bytes)
jmp L(14bytes)
.p2align 4
L(more16bytes):
cmp $24, %ecx
jae L(more24bytes)
cmp $16, %ecx
je L(16bytes)
cmp $18, %ecx
je L(18bytes)
cmp $20, %ecx
je L(20bytes)
jmp L(22bytes)
.p2align 4
L(more24bytes):
cmp $32, %ecx
jae L(more32bytes)
cmp $24, %ecx
je L(24bytes)
cmp $26, %ecx
je L(26bytes)
cmp $28, %ecx
je L(28bytes)
jmp L(30bytes)
.p2align 4
L(more32bytes):
cmp $40, %ecx
jae L(more40bytes)
cmp $32, %ecx
je L(32bytes)
cmp $34, %ecx
je L(34bytes)
cmp $36, %ecx
je L(36bytes)
jmp L(38bytes)
.p2align 4
L(less48bytes):
cmp $8, %ecx
jae L(more8bytes)
cmp $2, %ecx
je L(2bytes)
cmp $4, %ecx
je L(4bytes)
jmp L(6bytes)
.p2align 4
L(more40bytes):
cmp $40, %ecx
je L(40bytes)
cmp $42, %ecx
je L(42bytes)
cmp $44, %ecx
je L(44bytes)
jmp L(46bytes)
.p2align 4
L(46bytes):
movzwl -46(%eax), %ecx
movzwl -46(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(44bytes):
movzwl -44(%eax), %ecx
movzwl -44(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(42bytes):
movzwl -42(%eax), %ecx
movzwl -42(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(40bytes):
movzwl -40(%eax), %ecx
movzwl -40(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(38bytes):
movzwl -38(%eax), %ecx
movzwl -38(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(36bytes):
movzwl -36(%eax), %ecx
movzwl -36(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(34bytes):
movzwl -34(%eax), %ecx
movzwl -34(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(32bytes):
movzwl -32(%eax), %ecx
movzwl -32(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(30bytes):
movzwl -30(%eax), %ecx
movzwl -30(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(28bytes):
movzwl -28(%eax), %ecx
movzwl -28(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(26bytes):
movzwl -26(%eax), %ecx
movzwl -26(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(24bytes):
movzwl -24(%eax), %ecx
movzwl -24(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(22bytes):
movzwl -22(%eax), %ecx
movzwl -22(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(20bytes):
movzwl -20(%eax), %ecx
movzwl -20(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(18bytes):
movzwl -18(%eax), %ecx
movzwl -18(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(16bytes):
movzwl -16(%eax), %ecx
movzwl -16(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(14bytes):
movzwl -14(%eax), %ecx
movzwl -14(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(12bytes):
movzwl -12(%eax), %ecx
movzwl -12(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(10bytes):
movzwl -10(%eax), %ecx
movzwl -10(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(8bytes):
movzwl -8(%eax), %ecx
movzwl -8(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(6bytes):
movzwl -6(%eax), %ecx
movzwl -6(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(4bytes):
movzwl -4(%eax), %ecx
movzwl -4(%edx), %ebx
subl %ebx, %ecx
jne L(memcmp16_exit)
L(2bytes):
movzwl -2(%eax), %eax
movzwl -2(%edx), %ebx
subl %ebx, %eax
POP (%ebx)
ret
CFI_PUSH (%ebx)
.p2align 4
L(memcmp16_exit):
POP (%ebx)
mov %ecx, %eax
ret
END_FUNCTION MEMCMP
|
abforce/xposed_art_n
| 5,219
|
runtime/arch/x86_64/asm_support_x86_64.S
|
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_S_
#define ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_S_
#include "asm_support_x86_64.h"
// Regular gas(1) & current clang/llvm assembler support named macro parameters.
#define MACRO0(macro_name) .macro macro_name
#define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
#define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
#define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
#define MACRO4(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4
#define END_MACRO .endm
#if defined(__clang__)
// Clang/llvm does not support .altmacro. However, the clang/llvm preprocessor doesn't
// separate the backslash and parameter by a space. Everything just works.
#define RAW_VAR(name) \name
#define VAR(name) SYMBOL(\name)
#define PLT_VAR(name) \name@PLT
#define REG_VAR(name) %\name
#define CALL_MACRO(name) \name
#else
// Regular gas(1) uses \argument_name for macro arguments.
// We need to turn on alternate macro syntax so we can use & instead or the preprocessor
// will screw us by inserting a space between the \ and the name. Even in this mode there's
// no special meaning to $, so literals are still just $x. The use of altmacro means % is a
// special character meaning care needs to be taken when passing registers as macro
// arguments.
.altmacro
#define RAW_VAR(name) name&
#define VAR(name) name&
#define PLT_VAR(name) name&@PLT
#define REG_VAR(name) %name
#define CALL_MACRO(name) name&
#endif
#define LITERAL(value) $value
#if defined(__APPLE__)
#define MACRO_LITERAL(value) $$(value)
#else
#define MACRO_LITERAL(value) $value
#endif
#if defined(__APPLE__)
#define FUNCTION_TYPE(name)
#define SIZE(name)
#else
#define FUNCTION_TYPE(name) .type name, @function
#define SIZE(name) .size name, .-name
#endif
// CFI support.
#if !defined(__APPLE__)
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_ADJUST_CFA_OFFSET(size) .cfi_adjust_cfa_offset size
#define CFI_DEF_CFA(reg,size) .cfi_def_cfa reg,size
#define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
#define CFI_RESTORE(reg) .cfi_restore reg
#define CFI_REL_OFFSET(reg,size) .cfi_rel_offset reg,size
#else
// Mac OS' doesn't like cfi_* directives.
#define CFI_STARTPROC
#define CFI_ENDPROC
#define CFI_ADJUST_CFA_OFFSET(size)
#define CFI_DEF_CFA(reg,size)
#define CFI_DEF_CFA_REGISTER(reg)
#define CFI_RESTORE(reg)
#define CFI_REL_OFFSET(reg,size)
#endif
// Symbols.
#if !defined(__APPLE__)
#define SYMBOL(name) name
#define PLT_SYMBOL(name) name ## @PLT
#else
#define SYMBOL(name) _ ## name
#define PLT_SYMBOL(name) _ ## name
#endif
// Directive to hide a function symbol.
#if defined(__APPLE__)
#define ASM_HIDDEN .private_extern
#else
#define ASM_HIDDEN .hidden
#endif
/* Cache alignment for function entry */
MACRO0(ALIGN_FUNCTION_ENTRY)
.balign 16
END_MACRO
// TODO: we might need to use SYMBOL() here to add the underscore prefix
// for mac builds.
MACRO1(DEFINE_FUNCTION, c_name)
FUNCTION_TYPE(SYMBOL(\c_name))
ASM_HIDDEN SYMBOL(\c_name)
.globl VAR(c_name)
ALIGN_FUNCTION_ENTRY
VAR(c_name):
CFI_STARTPROC
// Ensure we get a sane starting CFA.
CFI_DEF_CFA(rsp, 8)
END_MACRO
MACRO1(END_FUNCTION, c_name)
CFI_ENDPROC
SIZE(SYMBOL(\c_name))
END_MACRO
MACRO1(PUSH, reg)
pushq REG_VAR(reg)
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(REG_VAR(reg), 0)
END_MACRO
MACRO1(POP, reg)
popq REG_VAR(reg)
CFI_ADJUST_CFA_OFFSET(-8)
CFI_RESTORE(REG_VAR(reg))
END_MACRO
MACRO1(UNIMPLEMENTED,name)
FUNCTION_TYPE(SYMBOL(\name))
ASM_HIDDEN VAR(name)
.globl VAR(name)
ALIGN_FUNCTION_ENTRY
VAR(name):
CFI_STARTPROC
int3
int3
CFI_ENDPROC
SIZE(SYMBOL(\name))
END_MACRO
MACRO0(UNREACHABLE)
int3
END_MACRO
MACRO0(UNTESTED)
int3
END_MACRO
// Macros to poison (negate) the reference for heap poisoning.
MACRO1(POISON_HEAP_REF, rRef)
#ifdef USE_HEAP_POISONING
negl REG_VAR(rRef)
#endif // USE_HEAP_POISONING
END_MACRO
// Macros to unpoison (negate) the reference for heap poisoning.
MACRO1(UNPOISON_HEAP_REF, rRef)
#ifdef USE_HEAP_POISONING
negl REG_VAR(rRef)
#endif // USE_HEAP_POISONING
END_MACRO
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_S_
|
abforce/xposed_art_n
| 2,389
|
runtime/arch/x86_64/jni_entrypoints_x86_64.S
|
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_x86_64.S"
/*
* Jni dlsym lookup stub.
*/
DEFINE_FUNCTION art_jni_dlsym_lookup_stub
// Save callee and GPR args, mixed together to agree with core spills bitmap.
PUSH r9 // Arg.
PUSH r8 // Arg.
PUSH rdi // JniEnv.
PUSH rsi // Arg.
PUSH rdx // Arg.
PUSH rcx // Arg.
// Create space for FPR args, plus padding for alignment
subq LITERAL(72 + 4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(72 + 4 * 8)
// Save FPRs.
movq %xmm0, 0(%rsp)
movq %xmm1, 8(%rsp)
movq %xmm2, 16(%rsp)
movq %xmm3, 24(%rsp)
movq %xmm4, 32(%rsp)
movq %xmm5, 40(%rsp)
movq %xmm6, 48(%rsp)
movq %xmm7, 56(%rsp)
movq %xmm12, 64(%rsp)
movq %xmm13, 72(%rsp)
movq %xmm14, 80(%rsp)
movq %xmm15, 88(%rsp)
// prepare call
movq %gs:THREAD_SELF_OFFSET, %rdi // RDI := Thread::Current()
// call
call PLT_SYMBOL(artFindNativeMethod) // (Thread*)
// restore arguments
movq 0(%rsp), %xmm0
movq 8(%rsp), %xmm1
movq 16(%rsp), %xmm2
movq 24(%rsp), %xmm3
movq 32(%rsp), %xmm4
movq 40(%rsp), %xmm5
movq 48(%rsp), %xmm6
movq 56(%rsp), %xmm7
movq 64(%rsp), %xmm12
movq 72(%rsp), %xmm13
movq 80(%rsp), %xmm14
movq 88(%rsp), %xmm15
addq LITERAL(72 + 4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(-72 - 4 * 8)
POP rcx // Arg.
POP rdx // Arg.
POP rsi // Arg.
POP rdi // JniEnv.
POP r8 // Arg.
POP r9 // Arg.
testq %rax, %rax // check if returned method code is null
jz .Lno_native_code_found // if null, jump to return to handle
jmp *%rax // otherwise, tail call to intended method
.Lno_native_code_found:
ret
END_FUNCTION art_jni_dlsym_lookup_stub
|
abforce/xposed_art_n
| 29,915
|
runtime/arch/x86_64/memcmp16_x86_64.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_x86_64.S"
#define MEMCMP __memcmp16
/*
* Half of Silvermont L1 Data Cache size
*(see original file cache.h in bionic/libc/arch-x86_64/).
* This value is used for specific optimization on big lengths.
*/
#define DATA_CACHE_SIZE_HALF (12*1024)
#ifndef L
# define L(label) .L##label
#endif
#ifndef ALIGN
# define ALIGN(n) .p2align n
#endif
#define JMPTBL(I, B) (I - B)
#define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
lea TABLE(%rip), %r11; \
movslq (%r11, INDEX, SCALE), %rcx; \
add %r11, %rcx; \
jmp *%rcx; \
ud2
DEFINE_FUNCTION MEMCMP
pxor %xmm0, %xmm0
shl $1, %rdx
cmp $79, %rdx
ja L(79bytesormore)
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
ALIGN (4)
L(79bytesormore):
movdqu (%rsi), %xmm1
movdqu (%rdi), %xmm2
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(16bytesin256)
mov %rsi, %rcx
and $-16, %rsi
add $16, %rsi
sub %rsi, %rcx
sub %rcx, %rdi
add %rcx, %rdx
test $0xf, %rdi
jz L(2aligned)
cmp $128, %rdx
ja L(128bytesormore)
L(less128bytes):
sub $64, %rdx
movdqu (%rdi), %xmm2
pxor (%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(16bytesin256)
movdqu 16(%rdi), %xmm2
pxor 16(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(32bytesin256)
movdqu 32(%rdi), %xmm2
pxor 32(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(48bytesin256)
movdqu 48(%rdi), %xmm2
pxor 48(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(64bytesin256)
cmp $32, %rdx
jb L(less32bytesin64)
movdqu 64(%rdi), %xmm2
pxor 64(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(80bytesin256)
movdqu 80(%rdi), %xmm2
pxor 80(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(96bytesin256)
sub $32, %rdx
add $32, %rdi
add $32, %rsi
L(less32bytesin64):
add $64, %rdi
add $64, %rsi
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
L(128bytesormore):
cmp $512, %rdx
ja L(512bytesormore)
cmp $256, %rdx
ja L(less512bytes)
L(less256bytes):
sub $128, %rdx
movdqu (%rdi), %xmm2
pxor (%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(16bytesin256)
movdqu 16(%rdi), %xmm2
pxor 16(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(32bytesin256)
movdqu 32(%rdi), %xmm2
pxor 32(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(48bytesin256)
movdqu 48(%rdi), %xmm2
pxor 48(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(64bytesin256)
movdqu 64(%rdi), %xmm2
pxor 64(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(80bytesin256)
movdqu 80(%rdi), %xmm2
pxor 80(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(96bytesin256)
movdqu 96(%rdi), %xmm2
pxor 96(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(112bytesin256)
movdqu 112(%rdi), %xmm2
pxor 112(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(128bytesin256)
add $128, %rsi
add $128, %rdi
cmp $64, %rdx
jae L(less128bytes)
cmp $32, %rdx
jb L(less32bytesin128)
movdqu (%rdi), %xmm2
pxor (%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(16bytesin256)
movdqu 16(%rdi), %xmm2
pxor 16(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(32bytesin256)
sub $32, %rdx
add $32, %rdi
add $32, %rsi
L(less32bytesin128):
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
L(less512bytes):
sub $256, %rdx
movdqu (%rdi), %xmm2
pxor (%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(16bytesin256)
movdqu 16(%rdi), %xmm2
pxor 16(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(32bytesin256)
movdqu 32(%rdi), %xmm2
pxor 32(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(48bytesin256)
movdqu 48(%rdi), %xmm2
pxor 48(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(64bytesin256)
movdqu 64(%rdi), %xmm2
pxor 64(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(80bytesin256)
movdqu 80(%rdi), %xmm2
pxor 80(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(96bytesin256)
movdqu 96(%rdi), %xmm2
pxor 96(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(112bytesin256)
movdqu 112(%rdi), %xmm2
pxor 112(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(128bytesin256)
movdqu 128(%rdi), %xmm2
pxor 128(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(144bytesin256)
movdqu 144(%rdi), %xmm2
pxor 144(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(160bytesin256)
movdqu 160(%rdi), %xmm2
pxor 160(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(176bytesin256)
movdqu 176(%rdi), %xmm2
pxor 176(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(192bytesin256)
movdqu 192(%rdi), %xmm2
pxor 192(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(208bytesin256)
movdqu 208(%rdi), %xmm2
pxor 208(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(224bytesin256)
movdqu 224(%rdi), %xmm2
pxor 224(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(240bytesin256)
movdqu 240(%rdi), %xmm2
pxor 240(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(256bytesin256)
add $256, %rsi
add $256, %rdi
cmp $128, %rdx
jae L(less256bytes)
cmp $64, %rdx
jae L(less128bytes)
cmp $32, %rdx
jb L(less32bytesin256)
movdqu (%rdi), %xmm2
pxor (%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(16bytesin256)
movdqu 16(%rdi), %xmm2
pxor 16(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(32bytesin256)
sub $32, %rdx
add $32, %rdi
add $32, %rsi
L(less32bytesin256):
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
ALIGN (4)
L(512bytesormore):
#ifdef DATA_CACHE_SIZE_HALF
mov $DATA_CACHE_SIZE_HALF, %r8
#else
mov __x86_64_data_cache_size_half(%rip), %r8
#endif
mov %r8, %r9
shr $1, %r8
add %r9, %r8
cmp %r8, %rdx
ja L(L2_L3_cache_unaglined)
sub $64, %rdx
ALIGN (4)
L(64bytesormore_loop):
movdqu (%rdi), %xmm2
pxor (%rsi), %xmm2
movdqa %xmm2, %xmm1
movdqu 16(%rdi), %xmm3
pxor 16(%rsi), %xmm3
por %xmm3, %xmm1
movdqu 32(%rdi), %xmm4
pxor 32(%rsi), %xmm4
por %xmm4, %xmm1
movdqu 48(%rdi), %xmm5
pxor 48(%rsi), %xmm5
por %xmm5, %xmm1
ptest %xmm1, %xmm0
jnc L(64bytesormore_loop_end)
add $64, %rsi
add $64, %rdi
sub $64, %rdx
jae L(64bytesormore_loop)
add $64, %rdx
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
L(L2_L3_cache_unaglined):
sub $64, %rdx
ALIGN (4)
L(L2_L3_unaligned_128bytes_loop):
prefetchnta 0x1c0(%rdi)
prefetchnta 0x1c0(%rsi)
movdqu (%rdi), %xmm2
pxor (%rsi), %xmm2
movdqa %xmm2, %xmm1
movdqu 16(%rdi), %xmm3
pxor 16(%rsi), %xmm3
por %xmm3, %xmm1
movdqu 32(%rdi), %xmm4
pxor 32(%rsi), %xmm4
por %xmm4, %xmm1
movdqu 48(%rdi), %xmm5
pxor 48(%rsi), %xmm5
por %xmm5, %xmm1
ptest %xmm1, %xmm0
jnc L(64bytesormore_loop_end)
add $64, %rsi
add $64, %rdi
sub $64, %rdx
jae L(L2_L3_unaligned_128bytes_loop)
add $64, %rdx
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
/*
* This case is for machines which are sensitive for unaligned instructions.
*/
ALIGN (4)
L(2aligned):
cmp $128, %rdx
ja L(128bytesormorein2aligned)
L(less128bytesin2aligned):
sub $64, %rdx
movdqa (%rdi), %xmm2
pxor (%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(16bytesin256)
movdqa 16(%rdi), %xmm2
pxor 16(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(32bytesin256)
movdqa 32(%rdi), %xmm2
pxor 32(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(48bytesin256)
movdqa 48(%rdi), %xmm2
pxor 48(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(64bytesin256)
cmp $32, %rdx
jb L(less32bytesin64in2alinged)
movdqa 64(%rdi), %xmm2
pxor 64(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(80bytesin256)
movdqa 80(%rdi), %xmm2
pxor 80(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(96bytesin256)
sub $32, %rdx
add $32, %rdi
add $32, %rsi
L(less32bytesin64in2alinged):
add $64, %rdi
add $64, %rsi
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
ALIGN (4)
L(128bytesormorein2aligned):
cmp $512, %rdx
ja L(512bytesormorein2aligned)
cmp $256, %rdx
ja L(256bytesormorein2aligned)
L(less256bytesin2alinged):
sub $128, %rdx
movdqa (%rdi), %xmm2
pxor (%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(16bytesin256)
movdqa 16(%rdi), %xmm2
pxor 16(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(32bytesin256)
movdqa 32(%rdi), %xmm2
pxor 32(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(48bytesin256)
movdqa 48(%rdi), %xmm2
pxor 48(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(64bytesin256)
movdqa 64(%rdi), %xmm2
pxor 64(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(80bytesin256)
movdqa 80(%rdi), %xmm2
pxor 80(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(96bytesin256)
movdqa 96(%rdi), %xmm2
pxor 96(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(112bytesin256)
movdqa 112(%rdi), %xmm2
pxor 112(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(128bytesin256)
add $128, %rsi
add $128, %rdi
cmp $64, %rdx
jae L(less128bytesin2aligned)
cmp $32, %rdx
jb L(less32bytesin128in2aligned)
movdqu (%rdi), %xmm2
pxor (%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(16bytesin256)
movdqu 16(%rdi), %xmm2
pxor 16(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(32bytesin256)
sub $32, %rdx
add $32, %rdi
add $32, %rsi
L(less32bytesin128in2aligned):
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
ALIGN (4)
L(256bytesormorein2aligned):
sub $256, %rdx
movdqa (%rdi), %xmm2
pxor (%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(16bytesin256)
movdqa 16(%rdi), %xmm2
pxor 16(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(32bytesin256)
movdqa 32(%rdi), %xmm2
pxor 32(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(48bytesin256)
movdqa 48(%rdi), %xmm2
pxor 48(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(64bytesin256)
movdqa 64(%rdi), %xmm2
pxor 64(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(80bytesin256)
movdqa 80(%rdi), %xmm2
pxor 80(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(96bytesin256)
movdqa 96(%rdi), %xmm2
pxor 96(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(112bytesin256)
movdqa 112(%rdi), %xmm2
pxor 112(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(128bytesin256)
movdqa 128(%rdi), %xmm2
pxor 128(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(144bytesin256)
movdqa 144(%rdi), %xmm2
pxor 144(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(160bytesin256)
movdqa 160(%rdi), %xmm2
pxor 160(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(176bytesin256)
movdqa 176(%rdi), %xmm2
pxor 176(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(192bytesin256)
movdqa 192(%rdi), %xmm2
pxor 192(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(208bytesin256)
movdqa 208(%rdi), %xmm2
pxor 208(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(224bytesin256)
movdqa 224(%rdi), %xmm2
pxor 224(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(240bytesin256)
movdqa 240(%rdi), %xmm2
pxor 240(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(256bytesin256)
add $256, %rsi
add $256, %rdi
cmp $128, %rdx
jae L(less256bytesin2alinged)
cmp $64, %rdx
jae L(less128bytesin2aligned)
cmp $32, %rdx
jb L(less32bytesin256in2alinged)
movdqa (%rdi), %xmm2
pxor (%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(16bytesin256)
movdqa 16(%rdi), %xmm2
pxor 16(%rsi), %xmm2
ptest %xmm2, %xmm0
jnc L(32bytesin256)
sub $32, %rdx
add $32, %rdi
add $32, %rsi
L(less32bytesin256in2alinged):
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
ALIGN (4)
L(512bytesormorein2aligned):
#ifdef DATA_CACHE_SIZE_HALF
mov $DATA_CACHE_SIZE_HALF, %r8
#else
mov __x86_64_data_cache_size_half(%rip), %r8
#endif
mov %r8, %r9
shr $1, %r8
add %r9, %r8
cmp %r8, %rdx
ja L(L2_L3_cache_aglined)
sub $64, %rdx
ALIGN (4)
L(64bytesormore_loopin2aligned):
movdqa (%rdi), %xmm2
pxor (%rsi), %xmm2
movdqa %xmm2, %xmm1
movdqa 16(%rdi), %xmm3
pxor 16(%rsi), %xmm3
por %xmm3, %xmm1
movdqa 32(%rdi), %xmm4
pxor 32(%rsi), %xmm4
por %xmm4, %xmm1
movdqa 48(%rdi), %xmm5
pxor 48(%rsi), %xmm5
por %xmm5, %xmm1
ptest %xmm1, %xmm0
jnc L(64bytesormore_loop_end)
add $64, %rsi
add $64, %rdi
sub $64, %rdx
jae L(64bytesormore_loopin2aligned)
add $64, %rdx
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
L(L2_L3_cache_aglined):
sub $64, %rdx
ALIGN (4)
L(L2_L3_aligned_128bytes_loop):
prefetchnta 0x1c0(%rdi)
prefetchnta 0x1c0(%rsi)
movdqa (%rdi), %xmm2
pxor (%rsi), %xmm2
movdqa %xmm2, %xmm1
movdqa 16(%rdi), %xmm3
pxor 16(%rsi), %xmm3
por %xmm3, %xmm1
movdqa 32(%rdi), %xmm4
pxor 32(%rsi), %xmm4
por %xmm4, %xmm1
movdqa 48(%rdi), %xmm5
pxor 48(%rsi), %xmm5
por %xmm5, %xmm1
ptest %xmm1, %xmm0
jnc L(64bytesormore_loop_end)
add $64, %rsi
add $64, %rdi
sub $64, %rdx
jae L(L2_L3_aligned_128bytes_loop)
add $64, %rdx
add %rdx, %rsi
add %rdx, %rdi
BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
ALIGN (4)
L(64bytesormore_loop_end):
add $16, %rdi
add $16, %rsi
ptest %xmm2, %xmm0
jnc L(16bytes)
add $16, %rdi
add $16, %rsi
ptest %xmm3, %xmm0
jnc L(16bytes)
add $16, %rdi
add $16, %rsi
ptest %xmm4, %xmm0
jnc L(16bytes)
add $16, %rdi
add $16, %rsi
jmp L(16bytes)
L(256bytesin256):
add $256, %rdi
add $256, %rsi
jmp L(16bytes)
L(240bytesin256):
add $240, %rdi
add $240, %rsi
jmp L(16bytes)
L(224bytesin256):
add $224, %rdi
add $224, %rsi
jmp L(16bytes)
L(208bytesin256):
add $208, %rdi
add $208, %rsi
jmp L(16bytes)
L(192bytesin256):
add $192, %rdi
add $192, %rsi
jmp L(16bytes)
L(176bytesin256):
add $176, %rdi
add $176, %rsi
jmp L(16bytes)
L(160bytesin256):
add $160, %rdi
add $160, %rsi
jmp L(16bytes)
L(144bytesin256):
add $144, %rdi
add $144, %rsi
jmp L(16bytes)
L(128bytesin256):
add $128, %rdi
add $128, %rsi
jmp L(16bytes)
L(112bytesin256):
add $112, %rdi
add $112, %rsi
jmp L(16bytes)
L(96bytesin256):
add $96, %rdi
add $96, %rsi
jmp L(16bytes)
L(80bytesin256):
add $80, %rdi
add $80, %rsi
jmp L(16bytes)
L(64bytesin256):
add $64, %rdi
add $64, %rsi
jmp L(16bytes)
L(48bytesin256):
add $16, %rdi
add $16, %rsi
L(32bytesin256):
add $16, %rdi
add $16, %rsi
L(16bytesin256):
add $16, %rdi
add $16, %rsi
L(16bytes):
mov -16(%rdi), %rax
mov -16(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
L(8bytes):
mov -8(%rdi), %rax
mov -8(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
xor %eax, %eax
ret
ALIGN (4)
L(12bytes):
mov -12(%rdi), %rax
mov -12(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
L(4bytes):
mov -4(%rsi), %ecx
mov -4(%rdi), %eax
cmp %eax, %ecx
jne L(diffin4bytes)
L(0bytes):
xor %eax, %eax
ret
ALIGN (4)
L(66bytes):
movdqu -66(%rdi), %xmm1
movdqu -66(%rsi), %xmm2
mov $-66, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(50bytes):
movdqu -50(%rdi), %xmm1
movdqu -50(%rsi), %xmm2
mov $-50, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(34bytes):
movdqu -34(%rdi), %xmm1
movdqu -34(%rsi), %xmm2
mov $-34, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(18bytes):
mov -18(%rdi), %rax
mov -18(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
L(10bytes):
mov -10(%rdi), %rax
mov -10(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
movzwl -2(%rdi), %eax
movzwl -2(%rsi), %ecx
cmp %cl, %al
jne L(end)
and $0xffff, %eax
and $0xffff, %ecx
sub %ecx, %eax
ret
ALIGN (4)
L(14bytes):
mov -14(%rdi), %rax
mov -14(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
mov -8(%rdi), %rax
mov -8(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
xor %eax, %eax
ret
ALIGN (4)
L(6bytes):
mov -6(%rdi), %eax
mov -6(%rsi), %ecx
cmp %eax, %ecx
jne L(diffin4bytes)
L(2bytes):
movzwl -2(%rsi), %ecx
movzwl -2(%rdi), %eax
cmp %cl, %al
jne L(end)
and $0xffff, %eax
and $0xffff, %ecx
sub %ecx, %eax
ret
ALIGN (4)
L(68bytes):
movdqu -68(%rdi), %xmm2
movdqu -68(%rsi), %xmm1
mov $-68, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(52bytes):
movdqu -52(%rdi), %xmm2
movdqu -52(%rsi), %xmm1
mov $-52, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(36bytes):
movdqu -36(%rdi), %xmm2
movdqu -36(%rsi), %xmm1
mov $-36, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(20bytes):
movdqu -20(%rdi), %xmm2
movdqu -20(%rsi), %xmm1
mov $-20, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
mov -4(%rdi), %eax
mov -4(%rsi), %ecx
cmp %eax, %ecx
jne L(diffin4bytes)
xor %eax, %eax
ret
ALIGN (4)
L(70bytes):
movdqu -70(%rsi), %xmm1
movdqu -70(%rdi), %xmm2
mov $-70, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(54bytes):
movdqu -54(%rsi), %xmm1
movdqu -54(%rdi), %xmm2
mov $-54, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(38bytes):
movdqu -38(%rsi), %xmm1
movdqu -38(%rdi), %xmm2
mov $-38, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(22bytes):
movdqu -22(%rsi), %xmm1
movdqu -22(%rdi), %xmm2
mov $-22, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
mov -8(%rdi), %rax
mov -8(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
xor %eax, %eax
ret
ALIGN (4)
L(72bytes):
movdqu -72(%rsi), %xmm1
movdqu -72(%rdi), %xmm2
mov $-72, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(56bytes):
movdqu -56(%rdi), %xmm2
movdqu -56(%rsi), %xmm1
mov $-56, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(40bytes):
movdqu -40(%rdi), %xmm2
movdqu -40(%rsi), %xmm1
mov $-40, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(24bytes):
movdqu -24(%rdi), %xmm2
movdqu -24(%rsi), %xmm1
mov $-24, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
mov -8(%rdi), %rax
mov -8(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
xor %eax, %eax
ret
ALIGN (4)
L(74bytes):
movdqu -74(%rsi), %xmm1
movdqu -74(%rdi), %xmm2
mov $-74, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(58bytes):
movdqu -58(%rdi), %xmm2
movdqu -58(%rsi), %xmm1
mov $-58, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(42bytes):
movdqu -42(%rdi), %xmm2
movdqu -42(%rsi), %xmm1
mov $-42, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(26bytes):
movdqu -26(%rdi), %xmm2
movdqu -26(%rsi), %xmm1
mov $-26, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
mov -10(%rdi), %rax
mov -10(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
movzwl -2(%rdi), %eax
movzwl -2(%rsi), %ecx
jmp L(end)
ALIGN (4)
L(76bytes):
movdqu -76(%rsi), %xmm1
movdqu -76(%rdi), %xmm2
mov $-76, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(60bytes):
movdqu -60(%rdi), %xmm2
movdqu -60(%rsi), %xmm1
mov $-60, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(44bytes):
movdqu -44(%rdi), %xmm2
movdqu -44(%rsi), %xmm1
mov $-44, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(28bytes):
movdqu -28(%rdi), %xmm2
movdqu -28(%rsi), %xmm1
mov $-28, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
mov -12(%rdi), %rax
mov -12(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
mov -4(%rdi), %eax
mov -4(%rsi), %ecx
cmp %eax, %ecx
jne L(diffin4bytes)
xor %eax, %eax
ret
ALIGN (4)
L(78bytes):
movdqu -78(%rsi), %xmm1
movdqu -78(%rdi), %xmm2
mov $-78, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(62bytes):
movdqu -62(%rdi), %xmm2
movdqu -62(%rsi), %xmm1
mov $-62, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(46bytes):
movdqu -46(%rdi), %xmm2
movdqu -46(%rsi), %xmm1
mov $-46, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(30bytes):
movdqu -30(%rdi), %xmm2
movdqu -30(%rsi), %xmm1
mov $-30, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
mov -14(%rdi), %rax
mov -14(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
mov -8(%rdi), %rax
mov -8(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
xor %eax, %eax
ret
ALIGN (4)
L(64bytes):
movdqu -64(%rdi), %xmm2
movdqu -64(%rsi), %xmm1
mov $-64, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(48bytes):
movdqu -48(%rdi), %xmm2
movdqu -48(%rsi), %xmm1
mov $-48, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
L(32bytes):
movdqu -32(%rdi), %xmm2
movdqu -32(%rsi), %xmm1
mov $-32, %dl
pxor %xmm1, %xmm2
ptest %xmm2, %xmm0
jnc L(less16bytes)
mov -16(%rdi), %rax
mov -16(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
mov -8(%rdi), %rax
mov -8(%rsi), %rcx
cmp %rax, %rcx
jne L(diffin8bytes)
xor %eax, %eax
ret
/*
* Aligned 8 bytes to avoid 2 branch "taken" in one 16 alinged code block.
*/
ALIGN (3)
L(less16bytes):
movsbq %dl, %rdx
mov (%rsi, %rdx), %rcx
mov (%rdi, %rdx), %rax
cmp %rax, %rcx
jne L(diffin8bytes)
mov 8(%rsi, %rdx), %rcx
mov 8(%rdi, %rdx), %rax
L(diffin8bytes):
cmp %eax, %ecx
jne L(diffin4bytes)
shr $32, %rcx
shr $32, %rax
L(diffin4bytes):
cmp %cx, %ax
jne L(end)
shr $16, %ecx
shr $16, %eax
jmp L(end)
ALIGN (4)
L(end):
and $0xffff, %eax
and $0xffff, %ecx
sub %ecx, %eax
ret
END_FUNCTION MEMCMP
ALIGN (3)
L(table_64bytes):
.int JMPTBL (L(0bytes), L(table_64bytes))
.int JMPTBL (L(2bytes), L(table_64bytes))
.int JMPTBL (L(4bytes), L(table_64bytes))
.int JMPTBL (L(6bytes), L(table_64bytes))
.int JMPTBL (L(8bytes), L(table_64bytes))
.int JMPTBL (L(10bytes), L(table_64bytes))
.int JMPTBL (L(12bytes), L(table_64bytes))
.int JMPTBL (L(14bytes), L(table_64bytes))
.int JMPTBL (L(16bytes), L(table_64bytes))
.int JMPTBL (L(18bytes), L(table_64bytes))
.int JMPTBL (L(20bytes), L(table_64bytes))
.int JMPTBL (L(22bytes), L(table_64bytes))
.int JMPTBL (L(24bytes), L(table_64bytes))
.int JMPTBL (L(26bytes), L(table_64bytes))
.int JMPTBL (L(28bytes), L(table_64bytes))
.int JMPTBL (L(30bytes), L(table_64bytes))
.int JMPTBL (L(32bytes), L(table_64bytes))
.int JMPTBL (L(34bytes), L(table_64bytes))
.int JMPTBL (L(36bytes), L(table_64bytes))
.int JMPTBL (L(38bytes), L(table_64bytes))
.int JMPTBL (L(40bytes), L(table_64bytes))
.int JMPTBL (L(42bytes), L(table_64bytes))
.int JMPTBL (L(44bytes), L(table_64bytes))
.int JMPTBL (L(46bytes), L(table_64bytes))
.int JMPTBL (L(48bytes), L(table_64bytes))
.int JMPTBL (L(50bytes), L(table_64bytes))
.int JMPTBL (L(52bytes), L(table_64bytes))
.int JMPTBL (L(54bytes), L(table_64bytes))
.int JMPTBL (L(56bytes), L(table_64bytes))
.int JMPTBL (L(58bytes), L(table_64bytes))
.int JMPTBL (L(60bytes), L(table_64bytes))
.int JMPTBL (L(62bytes), L(table_64bytes))
.int JMPTBL (L(64bytes), L(table_64bytes))
.int JMPTBL (L(66bytes), L(table_64bytes))
.int JMPTBL (L(68bytes), L(table_64bytes))
.int JMPTBL (L(70bytes), L(table_64bytes))
.int JMPTBL (L(72bytes), L(table_64bytes))
.int JMPTBL (L(74bytes), L(table_64bytes))
.int JMPTBL (L(76bytes), L(table_64bytes))
.int JMPTBL (L(78bytes), L(table_64bytes))
|
abforce/xposed_art_n
| 76,862
|
runtime/arch/x86_64/quick_entrypoints_x86_64.S
|
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_x86_64.S"
#include "arch/quick_alloc_entrypoints.S"
MACRO0(SETUP_FP_CALLEE_SAVE_FRAME)
// Create space for ART FP callee-saved registers
subq MACRO_LITERAL(4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(4 * 8)
movq %xmm12, 0(%rsp)
movq %xmm13, 8(%rsp)
movq %xmm14, 16(%rsp)
movq %xmm15, 24(%rsp)
END_MACRO
MACRO0(RESTORE_FP_CALLEE_SAVE_FRAME)
// Restore ART FP callee-saved registers
movq 0(%rsp), %xmm12
movq 8(%rsp), %xmm13
movq 16(%rsp), %xmm14
movq 24(%rsp), %xmm15
addq MACRO_LITERAL(4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(- 4 * 8)
END_MACRO
// For x86, the CFA is esp+4, the address above the pushed return address on the stack.
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
*/
MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
#if defined(__APPLE__)
int3
int3
#else
// R10 := Runtime::Current()
movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
movq (%r10), %r10
// Save callee save registers to agree with core spills bitmap.
PUSH r15 // Callee save.
PUSH r14 // Callee save.
PUSH r13 // Callee save.
PUSH r12 // Callee save.
PUSH rbp // Callee save.
PUSH rbx // Callee save.
// Create space for FPR args, plus space for ArtMethod*.
subq MACRO_LITERAL(4 * 8 + 8), %rsp
CFI_ADJUST_CFA_OFFSET(4 * 8 + 8)
// Save FPRs.
movq %xmm12, 8(%rsp)
movq %xmm13, 16(%rsp)
movq %xmm14, 24(%rsp)
movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for save all callee save frame method.
movq RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
// Store rsp as the top quick frame.
movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +8: implicit return address pushed on stack when caller made call.
#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 6 * 8 + 4 * 8 + 8 + 8)
#error "SAVE_ALL_CALLEE_SAVE_FRAME(X86_64) size not as expected."
#endif
#endif // __APPLE__
END_MACRO
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly)
*/
MACRO0(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME)
#if defined(__APPLE__)
int3
int3
#else
// R10 := Runtime::Current()
movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
movq (%r10), %r10
// Save callee and GPR args, mixed together to agree with core spills bitmap.
PUSH r15 // Callee save.
PUSH r14 // Callee save.
PUSH r13 // Callee save.
PUSH r12 // Callee save.
PUSH rbp // Callee save.
PUSH rbx // Callee save.
// Create space for FPR args, plus space for ArtMethod*.
subq LITERAL(8 + 4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(8 + 4 * 8)
// Save FPRs.
movq %xmm12, 8(%rsp)
movq %xmm13, 16(%rsp)
movq %xmm14, 24(%rsp)
movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for refs only callee save frame method.
movq RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
// Store rsp as the stop quick frame.
movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +8: implicit return address pushed on stack when caller made call.
#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 6 * 8 + 4 * 8 + 8 + 8)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(X86_64) size not as expected."
#endif
#endif // __APPLE__
END_MACRO
MACRO0(RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME)
movq 8(%rsp), %xmm12
movq 16(%rsp), %xmm13
movq 24(%rsp), %xmm14
movq 32(%rsp), %xmm15
addq LITERAL(8 + 4*8), %rsp
CFI_ADJUST_CFA_OFFSET(-8 - 4*8)
// TODO: optimize by not restoring callee-saves restored by the ABI
POP rbx
POP rbp
POP r12
POP r13
POP r14
POP r15
END_MACRO
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
*/
MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
#if defined(__APPLE__)
int3
int3
#else
// R10 := Runtime::Current()
movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
movq (%r10), %r10
// Save callee and GPR args, mixed together to agree with core spills bitmap.
PUSH r15 // Callee save.
PUSH r14 // Callee save.
PUSH r13 // Callee save.
PUSH r12 // Callee save.
PUSH r9 // Quick arg 5.
PUSH r8 // Quick arg 4.
PUSH rsi // Quick arg 1.
PUSH rbp // Callee save.
PUSH rbx // Callee save.
PUSH rdx // Quick arg 2.
PUSH rcx // Quick arg 3.
// Create space for FPR args and create 2 slots for ArtMethod*.
subq MACRO_LITERAL(80 + 4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(80 + 4 * 8)
// R10 := ArtMethod* for ref and args callee save frame method.
movq RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Save FPRs.
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
movq %xmm4, 48(%rsp)
movq %xmm5, 56(%rsp)
movq %xmm6, 64(%rsp)
movq %xmm7, 72(%rsp)
movq %xmm12, 80(%rsp)
movq %xmm13, 88(%rsp)
movq %xmm14, 96(%rsp)
movq %xmm15, 104(%rsp)
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
// Store rsp as the top quick frame.
movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
// Ugly compile-time check, but we only have the preprocessor.
// Last +8: implicit return address pushed on stack when caller made call.
#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 11 * 8 + 4 * 8 + 80 + 8)
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(X86_64) size not as expected."
#endif
#endif // __APPLE__
END_MACRO
MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI)
// Save callee and GPR args, mixed together to agree with core spills bitmap.
PUSH r15 // Callee save.
PUSH r14 // Callee save.
PUSH r13 // Callee save.
PUSH r12 // Callee save.
PUSH r9 // Quick arg 5.
PUSH r8 // Quick arg 4.
PUSH rsi // Quick arg 1.
PUSH rbp // Callee save.
PUSH rbx // Callee save.
PUSH rdx // Quick arg 2.
PUSH rcx // Quick arg 3.
// Create space for FPR args and create 2 slots for ArtMethod*.
subq LITERAL(80 + 4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(80 + 4 * 8)
// Save FPRs.
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
movq %xmm2, 32(%rsp)
movq %xmm3, 40(%rsp)
movq %xmm4, 48(%rsp)
movq %xmm5, 56(%rsp)
movq %xmm6, 64(%rsp)
movq %xmm7, 72(%rsp)
movq %xmm12, 80(%rsp)
movq %xmm13, 88(%rsp)
movq %xmm14, 96(%rsp)
movq %xmm15, 104(%rsp)
// Store ArtMethod to bottom of stack.
movq %rdi, 0(%rsp)
// Store rsp as the stop quick frame.
movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
END_MACRO
MACRO0(RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
// Restore FPRs.
movq 16(%rsp), %xmm0
movq 24(%rsp), %xmm1
movq 32(%rsp), %xmm2
movq 40(%rsp), %xmm3
movq 48(%rsp), %xmm4
movq 56(%rsp), %xmm5
movq 64(%rsp), %xmm6
movq 72(%rsp), %xmm7
movq 80(%rsp), %xmm12
movq 88(%rsp), %xmm13
movq 96(%rsp), %xmm14
movq 104(%rsp), %xmm15
addq MACRO_LITERAL(80 + 4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(-(80 + 4 * 8))
// Restore callee and GPR args, mixed together to agree with core spills bitmap.
POP rcx
POP rdx
POP rbx
POP rbp
POP rsi
POP r8
POP r9
POP r12
POP r13
POP r14
POP r15
END_MACRO
/*
* Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
* exception is Thread::Current()->exception_.
*/
MACRO0(DELIVER_PENDING_EXCEPTION)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save callee saves for throw
// (Thread*) setup
movq %gs:THREAD_SELF_OFFSET, %rdi
call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*)
UNREACHABLE
END_MACRO
MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
call VAR(cxx_name) // cxx_name(Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call VAR(cxx_name) // cxx_name(arg1, Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call VAR(cxx_name) // cxx_name(Thread*)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
/*
* Called by managed code to create and deliver a NullPointerException.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
* Called by managed code to create and deliver an ArithmeticException.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
/*
* Called by managed code to create and deliver a StackOverflowError.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
/*
* Called by managed code, saves callee saves and then calls artThrowException
* that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
*/
ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
/*
* Called by managed code to create and deliver a NoSuchMethodError.
*/
ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
/*
* Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
* index, arg2 holds limit.
*/
TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/rdi with the target Method*, arg0/rdi will contain
* the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper.
* NOTE: "this" is first visible argument of the target, and so can be found in arg1/rsi.
*
* The helper will attempt to locate the target and return a 128-bit result in rax/rdx consisting
* of the target Method* in rax and method->code_ in rdx.
*
* If unsuccessful, the helper will return null/????. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the return
* location on the stack.
*
* Adapted from x86 code.
*/
MACRO1(INVOKE_TRAMPOLINE_BODY, cxx_name)
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // save callee saves in case allocation triggers GC
// Helper signature is always
// (method_idx, *this_object, *caller_method, *self, sp)
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread
movq %rsp, %rcx // pass SP
call VAR(cxx_name) // cxx_name(arg1, arg2, Thread*, SP)
// save the code pointer
movq %rax, %rdi
movq %rdx, %rax
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
testq %rdi, %rdi
jz 1f
// Tail call to intended method.
jmp *%rax
1:
DELIVER_PENDING_EXCEPTION
END_MACRO
MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
INVOKE_TRAMPOLINE_BODY RAW_VAR(cxx_name)
END_FUNCTION VAR(c_name)
END_MACRO
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
/*
* Helper for quick invocation stub to set up XMM registers. Assumes r10 == shorty,
* r11 == arg_array. Clobbers r10, r11 and al. Branches to xmm_setup_finished if it encounters
* the end of the shorty.
*/
MACRO2(LOOP_OVER_SHORTY_LOADING_XMMS, xmm_reg, finished)
1: // LOOP
movb (%r10), %al // al := *shorty
addq MACRO_LITERAL(1), %r10 // shorty++
cmpb MACRO_LITERAL(0), %al // if (al == '\0') goto xmm_setup_finished
je VAR(finished)
cmpb MACRO_LITERAL(68), %al // if (al == 'D') goto FOUND_DOUBLE
je 2f
cmpb MACRO_LITERAL(70), %al // if (al == 'F') goto FOUND_FLOAT
je 3f
addq MACRO_LITERAL(4), %r11 // arg_array++
// Handle extra space in arg array taken by a long.
cmpb MACRO_LITERAL(74), %al // if (al != 'J') goto LOOP
jne 1b
addq MACRO_LITERAL(4), %r11 // arg_array++
jmp 1b // goto LOOP
2: // FOUND_DOUBLE
movsd (%r11), REG_VAR(xmm_reg)
addq MACRO_LITERAL(8), %r11 // arg_array+=2
jmp 4f
3: // FOUND_FLOAT
movss (%r11), REG_VAR(xmm_reg)
addq MACRO_LITERAL(4), %r11 // arg_array++
4:
END_MACRO
/*
* Helper for quick invocation stub to set up GPR registers. Assumes r10 == shorty,
* r11 == arg_array. Clobbers r10, r11 and al. Branches to gpr_setup_finished if it encounters
* the end of the shorty.
*/
MACRO3(LOOP_OVER_SHORTY_LOADING_GPRS, gpr_reg64, gpr_reg32, finished)
1: // LOOP
movb (%r10), %al // al := *shorty
addq MACRO_LITERAL(1), %r10 // shorty++
cmpb MACRO_LITERAL(0), %al // if (al == '\0') goto gpr_setup_finished
je VAR(finished)
cmpb MACRO_LITERAL(74), %al // if (al == 'J') goto FOUND_LONG
je 2f
cmpb MACRO_LITERAL(70), %al // if (al == 'F') goto SKIP_FLOAT
je 3f
cmpb MACRO_LITERAL(68), %al // if (al == 'D') goto SKIP_DOUBLE
je 4f
movl (%r11), REG_VAR(gpr_reg32)
addq MACRO_LITERAL(4), %r11 // arg_array++
jmp 5f
2: // FOUND_LONG
movq (%r11), REG_VAR(gpr_reg64)
addq MACRO_LITERAL(8), %r11 // arg_array+=2
jmp 5f
3: // SKIP_FLOAT
addq MACRO_LITERAL(4), %r11 // arg_array++
jmp 1b
4: // SKIP_DOUBLE
addq MACRO_LITERAL(8), %r11 // arg_array+=2
jmp 1b
5:
END_MACRO
/*
* Quick invocation stub.
* On entry:
* [sp] = return address
* rdi = method pointer
* rsi = argument array that must at least contain the this pointer.
* rdx = size of argument array in bytes
* rcx = (managed) thread pointer
* r8 = JValue* result
* r9 = char* shorty
*/
DEFINE_FUNCTION art_quick_invoke_stub
#if defined(__APPLE__)
int3
int3
#else
// Set up argument XMM registers.
leaq 1(%r9), %r10 // R10 := shorty + 1 ; ie skip return arg character.
leaq 4(%rsi), %r11 // R11 := arg_array + 4 ; ie skip this pointer.
LOOP_OVER_SHORTY_LOADING_XMMS xmm0, .Lxmm_setup_finished
LOOP_OVER_SHORTY_LOADING_XMMS xmm1, .Lxmm_setup_finished
LOOP_OVER_SHORTY_LOADING_XMMS xmm2, .Lxmm_setup_finished
LOOP_OVER_SHORTY_LOADING_XMMS xmm3, .Lxmm_setup_finished
LOOP_OVER_SHORTY_LOADING_XMMS xmm4, .Lxmm_setup_finished
LOOP_OVER_SHORTY_LOADING_XMMS xmm5, .Lxmm_setup_finished
LOOP_OVER_SHORTY_LOADING_XMMS xmm6, .Lxmm_setup_finished
LOOP_OVER_SHORTY_LOADING_XMMS xmm7, .Lxmm_setup_finished
.balign 16
.Lxmm_setup_finished:
PUSH rbp // Save rbp.
PUSH r8 // Save r8/result*.
PUSH r9 // Save r9/shorty*.
PUSH rbx // Save native callee save rbx
PUSH r12 // Save native callee save r12
PUSH r13 // Save native callee save r13
PUSH r14 // Save native callee save r14
PUSH r15 // Save native callee save r15
movq %rsp, %rbp // Copy value of stack pointer into base pointer.
CFI_DEF_CFA_REGISTER(rbp)
movl %edx, %r10d
addl LITERAL(100), %edx // Reserve space for return addr, StackReference<method>, rbp,
// r8, r9, rbx, r12, r13, r14, and r15 in frame.
andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
subl LITERAL(72), %edx // Remove space for return address, rbp, r8, r9, rbx, r12,
// r13, r14, and r15
subq %rdx, %rsp // Reserve stack space for argument array.
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
#endif
movq LITERAL(0), (%rsp) // Store null for method*
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // rax := method to be called
movq %rsi, %r11 // r11 := arg_array
leaq 8(%rsp), %rdi // rdi is pointing just above the ArtMethod* in the stack
// arguments.
// Copy arg array into stack.
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
leaq 1(%r9), %r10 // r10 := shorty + 1 ; ie skip return arg character
movq %rax, %rdi // rdi := method to be called
movl (%r11), %esi // rsi := this pointer
addq LITERAL(4), %r11 // arg_array++
LOOP_OVER_SHORTY_LOADING_GPRS rdx, edx, .Lgpr_setup_finished
LOOP_OVER_SHORTY_LOADING_GPRS rcx, ecx, .Lgpr_setup_finished
LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished
LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished
.Lgpr_setup_finished:
call *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
movq %rbp, %rsp // Restore stack pointer.
POP r15 // Pop r15
POP r14 // Pop r14
POP r13 // Pop r13
POP r12 // Pop r12
POP rbx // Pop rbx
POP r9 // Pop r9 - shorty*
POP r8 // Pop r8 - result*.
POP rbp // Pop rbp
cmpb LITERAL(68), (%r9) // Test if result type char == 'D'.
je .Lreturn_double_quick
cmpb LITERAL(70), (%r9) // Test if result type char == 'F'.
je .Lreturn_float_quick
movq %rax, (%r8) // Store the result assuming its a long, int or Object*
ret
.Lreturn_double_quick:
movsd %xmm0, (%r8) // Store the double floating point result.
ret
.Lreturn_float_quick:
movss %xmm0, (%r8) // Store the floating point result.
ret
#endif // __APPLE__
END_FUNCTION art_quick_invoke_stub
/*
* Quick invocation stub.
* On entry:
* [sp] = return address
* rdi = method pointer
* rsi = argument array or null if no arguments.
* rdx = size of argument array in bytes
* rcx = (managed) thread pointer
* r8 = JValue* result
* r9 = char* shorty
*/
DEFINE_FUNCTION art_quick_invoke_static_stub
#if defined(__APPLE__)
int3
int3
#else
// Set up argument XMM registers.
leaq 1(%r9), %r10 // R10 := shorty + 1 ; ie skip return arg character
movq %rsi, %r11 // R11 := arg_array
LOOP_OVER_SHORTY_LOADING_XMMS xmm0, .Lxmm_setup_finished2
LOOP_OVER_SHORTY_LOADING_XMMS xmm1, .Lxmm_setup_finished2
LOOP_OVER_SHORTY_LOADING_XMMS xmm2, .Lxmm_setup_finished2
LOOP_OVER_SHORTY_LOADING_XMMS xmm3, .Lxmm_setup_finished2
LOOP_OVER_SHORTY_LOADING_XMMS xmm4, .Lxmm_setup_finished2
LOOP_OVER_SHORTY_LOADING_XMMS xmm5, .Lxmm_setup_finished2
LOOP_OVER_SHORTY_LOADING_XMMS xmm6, .Lxmm_setup_finished2
LOOP_OVER_SHORTY_LOADING_XMMS xmm7, .Lxmm_setup_finished2
.balign 16
.Lxmm_setup_finished2:
PUSH rbp // Save rbp.
PUSH r8 // Save r8/result*.
PUSH r9 // Save r9/shorty*.
PUSH rbx // Save rbx
PUSH r12 // Save r12
PUSH r13 // Save r13
PUSH r14 // Save r14
PUSH r15 // Save r15
movq %rsp, %rbp // Copy value of stack pointer into base pointer.
CFI_DEF_CFA_REGISTER(rbp)
movl %edx, %r10d
addl LITERAL(100), %edx // Reserve space for return addr, StackReference<method>, rbp,
// r8, r9, r12, r13, r14, and r15 in frame.
andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
subl LITERAL(72), %edx // Remove space for return address, rbp, r8, r9, rbx, r12,
// r13, r14, and r15.
subq %rdx, %rsp // Reserve stack space for argument array.
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
#endif
movq LITERAL(0), (%rsp) // Store null for method*
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // rax := method to be called
movq %rsi, %r11 // r11 := arg_array
leaq 8(%rsp), %rdi // rdi is pointing just above the ArtMethod* in the
// stack arguments.
// Copy arg array into stack.
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
leaq 1(%r9), %r10 // r10 := shorty + 1 ; ie skip return arg character
movq %rax, %rdi // rdi := method to be called
LOOP_OVER_SHORTY_LOADING_GPRS rsi, esi, .Lgpr_setup_finished2
LOOP_OVER_SHORTY_LOADING_GPRS rdx, edx, .Lgpr_setup_finished2
LOOP_OVER_SHORTY_LOADING_GPRS rcx, ecx, .Lgpr_setup_finished2
LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished2
LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished2
.Lgpr_setup_finished2:
call *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
movq %rbp, %rsp // Restore stack pointer.
POP r15 // Pop r15
POP r14 // Pop r14
POP r13 // Pop r13
POP r12 // Pop r12
POP rbx // Pop rbx
POP r9 // Pop r9 - shorty*.
POP r8 // Pop r8 - result*.
POP rbp // Pop rbp
cmpb LITERAL(68), (%r9) // Test if result type char == 'D'.
je .Lreturn_double_quick2
cmpb LITERAL(70), (%r9) // Test if result type char == 'F'.
je .Lreturn_float_quick2
movq %rax, (%r8) // Store the result assuming its a long, int or Object*
ret
.Lreturn_double_quick2:
movsd %xmm0, (%r8) // Store the double floating point result.
ret
.Lreturn_float_quick2:
movss %xmm0, (%r8) // Store the floating point result.
ret
#endif // __APPLE__
END_FUNCTION art_quick_invoke_static_stub
/*
* Long jump stub.
* On entry:
* rdi = gprs
* rsi = fprs
*/
DEFINE_FUNCTION art_quick_do_long_jump
#if defined(__APPLE__)
int3
int3
#else
// Restore FPRs.
movq 0(%rsi), %xmm0
movq 8(%rsi), %xmm1
movq 16(%rsi), %xmm2
movq 24(%rsi), %xmm3
movq 32(%rsi), %xmm4
movq 40(%rsi), %xmm5
movq 48(%rsi), %xmm6
movq 56(%rsi), %xmm7
movq 64(%rsi), %xmm8
movq 72(%rsi), %xmm9
movq 80(%rsi), %xmm10
movq 88(%rsi), %xmm11
movq 96(%rsi), %xmm12
movq 104(%rsi), %xmm13
movq 112(%rsi), %xmm14
movq 120(%rsi), %xmm15
// Restore FPRs.
movq %rdi, %rsp // RSP points to gprs.
// Load all registers except RSP and RIP with values in gprs.
popq %r15
popq %r14
popq %r13
popq %r12
popq %r11
popq %r10
popq %r9
popq %r8
popq %rdi
popq %rsi
popq %rbp
addq LITERAL(8), %rsp // Skip rsp
popq %rbx
popq %rdx
popq %rcx
popq %rax
popq %rsp // Load stack pointer.
ret // From higher in the stack pop rip.
#endif // __APPLE__
END_FUNCTION art_quick_do_long_jump
MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
call VAR(cxx_name) // cxx_name(Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call VAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call VAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
call VAR(cxx_name) // cxx_name(arg0, arg1, arg2, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
call VAR(cxx_name) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
movq 8(%rsp), %rsi // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0 is in rdi
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call VAR(cxx_name) // cxx_name(arg0, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro)
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
movq 8(%rsp), %rdx // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0 and arg1 are in rdi/rsi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
call VAR(cxx_name) // (arg0, arg1, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro)
END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
movq 8(%rsp), %rcx // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0, arg1, and arg2 are in rdi/rsi/rdx
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
call VAR(cxx_name) // cxx_name(arg0, arg1, arg2, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro) // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER)
testq %rax, %rax // rax == 0 ?
jz 1f // if rax == 0 goto 1
ret // return
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
MACRO0(RETURN_IF_EAX_ZERO)
testl %eax, %eax // eax == 0 ?
jnz 1f // if eax != 0 goto 1
ret // return
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
movq %gs:THREAD_EXCEPTION_OFFSET, %rcx // get exception field
testq %rcx, %rcx // rcx == 0 ?
jnz 1f // if rcx != 0 goto 1
ret // return
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
DEFINE_FUNCTION art_quick_alloc_object_rosalloc
// Fast path rosalloc allocation.
// RDI: type_idx, RSI: ArtMethod*, RAX: return value
// RDX, RCX, R8, R9: free.
movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
// Load the class (edx)
movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
testl %edx, %edx // Check null class
jz .Lart_quick_alloc_object_rosalloc_slow_path
// Check class status.
cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%rdx)
jne .Lart_quick_alloc_object_rosalloc_slow_path
// We don't need a fence (between the
// the status and the access flag
// loads) here because every load is
// a load acquire on x86.
// Check access flags has
// kAccClassIsFinalizable
testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%rdx)
jnz .Lart_quick_alloc_object_rosalloc_slow_path
// Check if the thread local
// allocation stack has room.
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
movq THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%r8), %rcx // rcx = alloc stack top.
cmpq THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%r8), %rcx
jae .Lart_quick_alloc_object_rosalloc_slow_path
// Load the object size
movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %eax
// Check if the size is for a thread
// local allocation
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %eax
ja .Lart_quick_alloc_object_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size.
// Align up the size by the rosalloc
// bracket quantum size and divide
// by the quantum size and subtract
// by 1. This code is a shorter but
// equivalent version.
subq LITERAL(1), %rax
shrq LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %rax
// Load the rosalloc run (r9)
movq THREAD_ROSALLOC_RUNS_OFFSET(%r8, %rax, __SIZEOF_POINTER__), %r9
// Load the free list head (rax). This
// will be the return val.
movq (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9), %rax
testq %rax, %rax
jz .Lart_quick_alloc_object_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber rdi and rsi.
// Push the new object onto the thread
// local allocation stack and
// increment the thread local
// allocation stack top.
movl %eax, (%rcx)
addq LITERAL(COMPRESSED_REFERENCE_SIZE), %rcx
movq %rcx, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%r8)
// Load the next pointer of the head
// and update the list head with the
// next pointer.
movq ROSALLOC_SLOT_NEXT_OFFSET(%rax), %rcx
movq %rcx, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9)
// Store the class pointer in the
// header. This also overwrites the
// next pointer. The offsets are
// asserted to match.
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
// Decrement the size of the free list
decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%r9)
// No fence necessary for x86.
ret
.Lart_quick_alloc_object_rosalloc_slow_path:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_FUNCTION art_quick_alloc_object_rosalloc
// A handle-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
DEFINE_FUNCTION art_quick_alloc_object_tlab
// Fast path tlab allocation.
// RDI: uint32_t type_idx, RSI: ArtMethod*
// RDX, RCX, R8, R9: free. RAX: return val.
// TODO: Add read barrier when this function is used.
// Note this function can/should implement read barrier fast path only
// (no read barrier slow path) because this is the fast path of tlab allocation.
// We can fall back to the allocation slow path to do the read barrier slow path.
#if defined(USE_READ_BARRIER)
int3
int3
#endif
// Might need a special macro since rsi and edx is 32b/64b mismatched.
movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
// TODO: Add read barrier when this function is used.
// Might need to break down into multiple instructions to get the base address in a register.
// Load the class
movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
testl %edx, %edx // Check null class
jz .Lart_quick_alloc_object_tlab_slow_path
// Check class status.
cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%rdx)
jne .Lart_quick_alloc_object_tlab_slow_path
// Check access flags has kAccClassIsFinalizable
testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%rdx)
jnz .Lart_quick_alloc_object_tlab_slow_path
movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %ecx // Load the object size.
addl LITERAL(OBJECT_ALIGNMENT_MASK), %ecx // Align the size by 8. (addr + 7) & ~7.
andl LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED), %ecx
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
movq THREAD_LOCAL_POS_OFFSET(%r8), %rax // Load thread_local_pos.
addq %rax, %rcx // Add the object size.
cmpq THREAD_LOCAL_END_OFFSET(%r8), %rcx // Check if it fits.
ja .Lart_quick_alloc_object_tlab_slow_path
movq %rcx, THREAD_LOCAL_POS_OFFSET(%r8) // Update thread_local_pos.
addq LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%r8) // Increment thread_local_objects.
// Store the class pointer in the header.
// No fence needed for x86.
movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
ret // Fast path succeeded.
.Lart_quick_alloc_object_tlab_slow_path:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call SYMBOL(artAllocObjectFromCodeTLAB) // cxx_name(arg0, arg1, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_FUNCTION art_quick_alloc_object_tlab
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
DEFINE_FUNCTION art_quick_lock_object
testl %edi, %edi // Null check object/rdi.
jz .Lslow_lock
.Lretry_lock:
movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi), %ecx // ecx := lock word.
test LITERAL(LOCK_WORD_STATE_MASK), %ecx // Test the 2 high bits.
jne .Lslow_lock // Slow path if either of the two high bits are set.
movl %ecx, %edx // save lock word (edx) to keep read barrier bits.
andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %ecx // zero the read barrier bits.
test %ecx, %ecx
jnz .Lalready_thin // Lock word contains a thin lock.
// unlocked case - edx: original lock word, edi: obj.
movl %edx, %eax // eax: lock word zero except for read barrier bits.
movl %gs:THREAD_ID_OFFSET, %edx // edx := thread id
or %eax, %edx // edx: thread id with count of 0 + read barrier bits.
lock cmpxchg %edx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi)
jnz .Lretry_lock // cmpxchg failed retry
ret
.Lalready_thin: // edx: lock word (with high 2 bits zero and original rb bits), edi: obj.
movl %gs:THREAD_ID_OFFSET, %ecx // ecx := thread id
cmpw %cx, %dx // do we hold the lock already?
jne .Lslow_lock
movl %edx, %ecx // copy the lock word to check count overflow.
andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %ecx // zero the read barrier bits.
addl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %ecx // increment recursion count
test LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // overflowed if either of the upper two bits (28-29) are set
jne .Lslow_lock // count overflowed so go slow
movl %edx, %eax // copy the lock word as the old val for cmpxchg.
addl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %edx // increment recursion count again for real.
// update lockword, cmpxchg necessary for read barrier bits.
lock cmpxchg %edx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi) // eax: old val, edx: new val.
jnz .Lretry_lock // cmpxchg failed retry
ret
.Lslow_lock:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object
DEFINE_FUNCTION art_quick_lock_object_no_inline
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object_no_inline
DEFINE_FUNCTION art_quick_unlock_object
testl %edi, %edi // null check object/edi
jz .Lslow_unlock
.Lretry_unlock:
movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi), %ecx // ecx := lock word
movl %gs:THREAD_ID_OFFSET, %edx // edx := thread id
test LITERAL(LOCK_WORD_STATE_MASK), %ecx
jnz .Lslow_unlock // lock word contains a monitor
cmpw %cx, %dx // does the thread id match?
jne .Lslow_unlock
movl %ecx, %edx // copy the lock word to detect new count of 0.
andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), %edx // zero the read barrier bits.
cmpl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %edx
jae .Lrecursive_thin_unlock
// update lockword, cmpxchg necessary for read barrier bits.
movl %ecx, %eax // eax: old lock word.
andl LITERAL(LOCK_WORD_READ_BARRIER_STATE_MASK), %ecx // ecx: new lock word zero except original rb bits.
#ifndef USE_READ_BARRIER
movl %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi)
#else
lock cmpxchg %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi) // eax: old val, ecx: new val.
jnz .Lretry_unlock // cmpxchg failed retry
#endif
ret
.Lrecursive_thin_unlock: // ecx: original lock word, edi: obj
// update lockword, cmpxchg necessary for read barrier bits.
movl %ecx, %eax // eax: old lock word.
subl LITERAL(LOCK_WORD_THIN_LOCK_COUNT_ONE), %ecx
#ifndef USE_READ_BARRIER
mov %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi)
#else
lock cmpxchg %ecx, MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi) // eax: old val, ecx: new val.
jnz .Lretry_unlock // cmpxchg failed retry
#endif
ret
.Lslow_unlock:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object
DEFINE_FUNCTION art_quick_unlock_object_no_inline
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object_no_inline
DEFINE_FUNCTION art_quick_check_cast
PUSH rdi // Save args for exc
PUSH rsi
subq LITERAL(8), %rsp // Alignment padding.
CFI_ADJUST_CFA_OFFSET(8)
SETUP_FP_CALLEE_SAVE_FRAME
call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
testq %rax, %rax
jz 1f // jump forward if not assignable
RESTORE_FP_CALLEE_SAVE_FRAME
addq LITERAL(24), %rsp // pop arguments
CFI_ADJUST_CFA_OFFSET(-24)
ret
CFI_ADJUST_CFA_OFFSET(24 + 4 * 8) // Reset unwind info so following code unwinds.
1:
RESTORE_FP_CALLEE_SAVE_FRAME
addq LITERAL(8), %rsp // pop padding
CFI_ADJUST_CFA_OFFSET(-8)
POP rsi // Pop arguments
POP rdi
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
UNREACHABLE
END_FUNCTION art_quick_check_cast
// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack.
MACRO2(POP_REG_NE, reg, exclude_reg)
.ifc RAW_VAR(reg), RAW_VAR(exclude_reg)
addq MACRO_LITERAL(8), %rsp
CFI_ADJUST_CFA_OFFSET(-8)
.else
POP RAW_VAR(reg)
.endif
END_MACRO
/*
* Macro to insert read barrier, used in art_quick_aput_obj.
* obj_reg and dest_reg{32|64} are registers, offset is a defined literal such as
* MIRROR_OBJECT_CLASS_OFFSET. dest_reg needs two versions to handle the mismatch between
* 64b PUSH/POP and 32b argument.
* TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
*
* As with art_quick_aput_obj* functions, the 64b versions are in comments.
*/
MACRO4(READ_BARRIER, obj_reg, offset, dest_reg32, dest_reg64)
#ifdef USE_READ_BARRIER
PUSH rax // save registers that might be used
PUSH rdi
PUSH rsi
PUSH rdx
PUSH rcx
SETUP_FP_CALLEE_SAVE_FRAME
// Outgoing argument set up
// movl REG_VAR(ref_reg32), %edi // pass ref, no-op for now since parameter ref is unused
// // movq REG_VAR(ref_reg64), %rdi
movl REG_VAR(obj_reg), %esi // pass obj_reg
// movq REG_VAR(obj_reg), %rsi
movl MACRO_LITERAL((RAW_VAR(offset))), %edx // pass offset, double parentheses are necessary
// movq MACRO_LITERAL((RAW_VAR(offset))), %rdx
call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset)
// No need to unpoison return value in rax, artReadBarrierSlow() would do the unpoisoning.
.ifnc RAW_VAR(dest_reg32), eax
// .ifnc RAW_VAR(dest_reg64), rax
movl %eax, REG_VAR(dest_reg32) // save loaded ref in dest_reg
// movq %rax, REG_VAR(dest_reg64)
.endif
RESTORE_FP_CALLEE_SAVE_FRAME
POP_REG_NE rcx, RAW_VAR(dest_reg64) // Restore registers except dest_reg
POP_REG_NE rdx, RAW_VAR(dest_reg64)
POP_REG_NE rsi, RAW_VAR(dest_reg64)
POP_REG_NE rdi, RAW_VAR(dest_reg64)
POP_REG_NE rax, RAW_VAR(dest_reg64)
#else
movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg32)
// movq RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg64)
UNPOISON_HEAP_REF RAW_VAR(dest_reg32) // UNPOISON_HEAP_REF only takes a 32b register
#endif // USE_READ_BARRIER
END_MACRO
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
*
* Currently all the parameters should fit into the 32b portions of the registers. Index always
* will. So we optimize for a tighter encoding. The 64b versions are in comments.
*
* rdi(edi) = array, rsi(esi) = index, rdx(edx) = value
*/
DEFINE_FUNCTION art_quick_aput_obj_with_null_and_bound_check
#if defined(__APPLE__)
int3
int3
#else
testl %edi, %edi
// testq %rdi, %rdi
jnz art_quick_aput_obj_with_bound_check
jmp art_quick_throw_null_pointer_exception
#endif // __APPLE__
END_FUNCTION art_quick_aput_obj_with_null_and_bound_check
DEFINE_FUNCTION art_quick_aput_obj_with_bound_check
#if defined(__APPLE__)
int3
int3
#else
movl MIRROR_ARRAY_LENGTH_OFFSET(%edi), %ecx
// movl MIRROR_ARRAY_LENGTH_OFFSET(%rdi), %ecx // This zero-extends, so value(%rcx)=value(%ecx)
cmpl %ecx, %esi
jb art_quick_aput_obj
mov %esi, %edi
// mov %rsi, %rdi
mov %ecx, %esi
// mov %rcx, %rsi
jmp art_quick_throw_array_bounds
#endif // __APPLE__
END_FUNCTION art_quick_aput_obj_with_bound_check
DEFINE_FUNCTION art_quick_aput_obj
testl %edx, %edx // store of null
// test %rdx, %rdx
jz .Ldo_aput_null
READ_BARRIER edi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx
// READ_BARRIER rdi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx
READ_BARRIER ecx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx
// READ_BARRIER rcx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx
#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER)
READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax // rax is free.
// READ_BARRIER rdx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax
cmpl %eax, %ecx // value's type == array's component type - trivial assignability
#else
cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ecx // value's type == array's component type - trivial assignability
// cmpq MIRROR_CLASS_OFFSET(%rdx), %rcx
#endif
jne .Lcheck_assignability
.Ldo_aput:
POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
movq %gs:THREAD_CARD_TABLE_OFFSET, %rdx
shrl LITERAL(7), %edi
// shrl LITERAL(7), %rdi
movb %dl, (%rdx, %rdi) // Note: this assumes that top 32b of %rdi are zero
ret
.Ldo_aput_null:
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
ret
.Lcheck_assignability:
// Save arguments.
PUSH rdi
PUSH rsi
PUSH rdx
SETUP_FP_CALLEE_SAVE_FRAME
#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER)
// The load of MIRROR_OBJECT_CLASS_OFFSET(%edx) is redundant, eax still holds the value.
movl %eax, %esi // Pass arg2 = value's class.
// movq %rax, %rsi
#else
// "Uncompress" = do nothing, as already zero-extended on load.
movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class.
#endif
movq %rcx, %rdi // Pass arg1 = array's component type.
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
// Exception?
testq %rax, %rax
jz .Lthrow_array_store_exception
RESTORE_FP_CALLEE_SAVE_FRAME
// Restore arguments.
POP rdx
POP rsi
POP rdi
POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
movq %gs:THREAD_CARD_TABLE_OFFSET, %rdx
shrl LITERAL(7), %edi
// shrl LITERAL(7), %rdi
movb %dl, (%rdx, %rdi) // Note: this assumes that top 32b of %rdi are zero
// movb %dl, (%rdx, %rdi)
ret
CFI_ADJUST_CFA_OFFSET(24 + 4 * 8) // Reset unwind info so following code unwinds.
.Lthrow_array_store_exception:
RESTORE_FP_CALLEE_SAVE_FRAME
// Restore arguments.
POP rdx
POP rsi
POP rdi
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // Save all registers as basis for long jump context.
// Outgoing argument set up.
movq %rdx, %rsi // Pass arg 2 = value.
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass arg 3 = Thread::Current().
// Pass arg 1 = array.
call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*)
UNREACHABLE
END_FUNCTION art_quick_aput_obj
// TODO: This is quite silly on X86_64 now.
DEFINE_FUNCTION art_quick_memcpy
call PLT_SYMBOL(memcpy) // (void*, const void*, size_t)
ret
END_FUNCTION art_quick_memcpy
NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret
UNIMPLEMENTED art_quick_ldiv
UNIMPLEMENTED art_quick_lmod
UNIMPLEMENTED art_quick_lmul
UNIMPLEMENTED art_quick_lshl
UNIMPLEMENTED art_quick_lshr
UNIMPLEMENTED art_quick_lushr
THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_EAX_ZERO
ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
// This is singled out as the argument order is different.
DEFINE_FUNCTION art_quick_set64_static
// new_val is already in %rdx
movq 8(%rsp), %rsi // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// field_idx is in rdi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_static
DEFINE_FUNCTION art_quick_proxy_invoke_handler
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass Thread::Current().
movq %rsp, %rcx // Pass SP.
call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
movq %rax, %xmm0 // Copy return value in case of float returns.
RETURN_OR_DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_proxy_invoke_handler
/*
* Called to resolve an imt conflict.
* rdi is the conflict ArtMethod.
* rax is a hidden argument that holds the target interface method's dex method index.
*
* Note that this stub writes to r10 and rdi.
*/
DEFINE_FUNCTION art_quick_imt_conflict_trampoline
#if defined(__APPLE__)
int3
int3
#else
movq __SIZEOF_POINTER__(%rsp), %r10 // Load referrer
movq ART_METHOD_DEX_CACHE_METHODS_OFFSET_64(%r10), %r10 // Load dex cache methods array
movq 0(%r10, %rax, __SIZEOF_POINTER__), %r10 // Load interface method
movq ART_METHOD_JNI_OFFSET_64(%rdi), %rdi // Load ImtConflictTable
.Limt_table_iterate:
cmpq %r10, 0(%rdi)
jne .Limt_table_next_entry
// We successfully hit an entry in the table. Load the target method
// and jump to it.
movq __SIZEOF_POINTER__(%rdi), %rdi
jmp *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi)
.Limt_table_next_entry:
// If the entry is null, the interface method is not in the ImtConflictTable.
cmpq LITERAL(0), 0(%rdi)
jz .Lconflict_trampoline
// Iterate over the entries of the ImtConflictTable.
addq LITERAL(2 * __SIZEOF_POINTER__), %rdi
jmp .Limt_table_iterate
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
#endif // __APPLE__
END_FUNCTION art_quick_imt_conflict_trampoline
DEFINE_FUNCTION art_quick_resolution_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rdx
movq %rsp, %rcx
call SYMBOL(artQuickResolutionTrampoline) // (called, receiver, Thread*, SP)
movq %rax, %r10 // Remember returned code pointer in R10.
movq (%rsp), %rdi // Load called method into RDI.
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
testq %r10, %r10 // If code pointer is null goto deliver pending exception.
jz 1f
jmp *%r10 // Tail call into method.
1:
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_resolution_trampoline
/* Generic JNI frame layout:
*
* #-------------------#
* | |
* | caller method... |
* #-------------------# <--- SP on entry
*
* |
* V
*
* #-------------------#
* | caller method... |
* #-------------------#
* | Return |
* | R15 | callee save
* | R14 | callee save
* | R13 | callee save
* | R12 | callee save
* | R9 | arg5
* | R8 | arg4
* | RSI/R6 | arg1
* | RBP/R5 | callee save
* | RBX/R3 | callee save
* | RDX/R2 | arg2
* | RCX/R1 | arg3
* | XMM7 | float arg 8
* | XMM6 | float arg 7
* | XMM5 | float arg 6
* | XMM4 | float arg 5
* | XMM3 | float arg 4
* | XMM2 | float arg 3
* | XMM1 | float arg 2
* | XMM0 | float arg 1
* | RDI/Method* | <- sp
* #-------------------#
* | Scratch Alloca | 5K scratch space
* #---------#---------#
* | | sp* |
* | Tramp. #---------#
* | args | thread |
* | Tramp. #---------#
* | | method |
* #-------------------# <--- SP on artQuickGenericJniTrampoline
*
* |
* v artQuickGenericJniTrampoline
*
* #-------------------#
* | caller method... |
* #-------------------#
* | Return |
* | Callee-Save Data |
* #-------------------#
* | handle scope |
* #-------------------#
* | Method* | <--- (1)
* #-------------------#
* | local ref cookie | // 4B
* | handle scope size | // 4B TODO: roll into call stack alignment?
* #-------------------#
* | JNI Call Stack |
* #-------------------# <--- SP on native call
* | |
* | Stack for Regs | The trampoline assembly will pop these values
* | | into registers for native call
* #-------------------#
* | Native code ptr |
* #-------------------#
* | Free scratch |
* #-------------------#
* | Ptr to (1) | <--- RSP
* #-------------------#
*/
/*
* Called to do a generic JNI down-call
*/
DEFINE_FUNCTION art_quick_generic_jni_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI
movq %rsp, %rbp // save SP at (old) callee-save frame
CFI_DEF_CFA_REGISTER(rbp)
//
// reserve a lot of space
//
// 4 local state ref
// 4 padding
// 4196 4k scratch space, enough for 2x 256 8-byte parameters (TODO: handle scope overhead?)
// 16 handle scope member fields ?
// + 112 14x 8-byte stack-2-register space
// ------
// 4332
// 16-byte aligned: 4336
// Note: 14x8 = 7*16, so the stack stays aligned for the native call...
// Also means: the padding is somewhere in the middle
//
//
// New test: use 5K and release
// 5k = 5120
subq LITERAL(5120), %rsp
// prepare for artQuickGenericJniTrampoline call
// (Thread*, SP)
// rdi rsi <= C calling convention
// gs:... rbp <= where they are
movq %gs:THREAD_SELF_OFFSET, %rdi
movq %rbp, %rsi
call SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
// The C call will have registered the complete save-frame on success.
// The result of the call is:
// %rax: pointer to native code, 0 on error.
// %rdx: pointer to the bottom of the used area of the alloca, can restore stack till there.
// Check for error = 0.
test %rax, %rax
jz .Lexception_in_native
// Release part of the alloca.
movq %rdx, %rsp
// pop from the register-passing alloca region
// what's the right layout?
popq %rdi
popq %rsi
popq %rdx
popq %rcx
popq %r8
popq %r9
// TODO: skip floating point if unused, some flag.
movq 0(%rsp), %xmm0
movq 8(%rsp), %xmm1
movq 16(%rsp), %xmm2
movq 24(%rsp), %xmm3
movq 32(%rsp), %xmm4
movq 40(%rsp), %xmm5
movq 48(%rsp), %xmm6
movq 56(%rsp), %xmm7
addq LITERAL(64), %rsp // floating-point done
// native call
call *%rax
// result sign extension is handled in C code
// prepare for artQuickGenericJniEndTrampoline call
// (Thread*, result, result_f)
// rdi rsi rdx <= C calling convention
// gs:... rax xmm0 <= where they are
movq %gs:THREAD_SELF_OFFSET, %rdi
movq %rax, %rsi
movq %xmm0, %rdx
call SYMBOL(artQuickGenericJniEndTrampoline)
// Pending exceptions possible.
// TODO: use cmpq, needs direct encoding because of gas bug
movq %gs:THREAD_EXCEPTION_OFFSET, %rcx
test %rcx, %rcx
jnz .Lexception_in_native
// Tear down the alloca.
movq %rbp, %rsp
CFI_DEF_CFA_REGISTER(rsp)
// Tear down the callee-save frame.
// Load FPRs.
// movq %xmm0, 16(%rsp) // doesn't make sense!!!
movq 24(%rsp), %xmm1 // neither does this!!!
movq 32(%rsp), %xmm2
movq 40(%rsp), %xmm3
movq 48(%rsp), %xmm4
movq 56(%rsp), %xmm5
movq 64(%rsp), %xmm6
movq 72(%rsp), %xmm7
movq 80(%rsp), %xmm12
movq 88(%rsp), %xmm13
movq 96(%rsp), %xmm14
movq 104(%rsp), %xmm15
// was 80 bytes
addq LITERAL(80 + 4*8), %rsp
CFI_ADJUST_CFA_OFFSET(-80 - 4*8)
// Save callee and GPR args, mixed together to agree with core spills bitmap.
POP rcx // Arg.
POP rdx // Arg.
POP rbx // Callee save.
POP rbp // Callee save.
POP rsi // Arg.
POP r8 // Arg.
POP r9 // Arg.
POP r12 // Callee save.
POP r13 // Callee save.
POP r14 // Callee save.
POP r15 // Callee save.
// store into fpr, for when it's a fpr return...
movq %rax, %xmm0
ret
.Lexception_in_native:
movq %gs:THREAD_TOP_QUICK_FRAME_OFFSET, %rsp
CFI_DEF_CFA_REGISTER(rsp)
// Do a call to push a new save-all frame required by the runtime.
call .Lexception_call
.Lexception_call:
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_generic_jni_trampoline
/*
* Called to bridge from the quick to interpreter ABI. On entry the arguments match those
* of a quick call:
* RDI = method being called / to bridge to.
* RSI, RDX, RCX, R8, R9 are arguments to that method.
*/
DEFINE_FUNCTION art_quick_to_interpreter_bridge
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments.
movq %gs:THREAD_SELF_OFFSET, %rsi // RSI := Thread::Current()
movq %rsp, %rdx // RDX := sp
call SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // TODO: no need to restore arguments in this case.
movq %rax, %xmm0 // Place return value also into floating point return value.
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_to_interpreter_bridge
/*
* Routine that intercepts method calls and returns.
*/
DEFINE_FUNCTION art_quick_instrumentation_entry
#if defined(__APPLE__)
int3
int3
#else
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
movq %rdi, %r12 // Preserve method pointer in a callee-save.
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass thread.
movq FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-8(%rsp), %rcx // Pass return PC.
call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, LR)
// %rax = result of call.
movq %r12, %rdi // Reload method pointer.
leaq art_quick_instrumentation_exit(%rip), %r12 // Set up return through instrumentation
movq %r12, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-8(%rsp) // exit.
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
jmp *%rax // Tail call to intended method.
#endif // __APPLE__
END_FUNCTION art_quick_instrumentation_entry
DEFINE_FUNCTION art_quick_instrumentation_exit
pushq LITERAL(0) // Push a fake return PC as there will be none on the stack.
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// We need to save rax and xmm0. We could use a callee-save from SETUP_REF_ONLY, but then
// we would need to fully restore it. As there are a good number of callee-save registers, it
// seems easier to have an extra small stack area. But this should be revisited.
movq %rsp, %rsi // Pass SP.
PUSH rax // Save integer result.
subq LITERAL(8), %rsp // Save floating-point result.
CFI_ADJUST_CFA_OFFSET(8)
movq %xmm0, (%rsp)
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
movq %rax, %rdx // Pass integer result.
movq %xmm0, %rcx // Pass floating-point result.
call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_res, fpr_res)
movq %rax, %rdi // Store return PC
movq %rdx, %rsi // Store second return PC in hidden arg.
movq (%rsp), %xmm0 // Restore floating-point result.
addq LITERAL(8), %rsp
CFI_ADJUST_CFA_OFFSET(-8)
POP rax // Restore integer result.
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
addq LITERAL(8), %rsp // Drop fake return pc.
jmp *%rdi // Return.
END_FUNCTION art_quick_instrumentation_exit
/*
* Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
* will long jump to the upcall with a special exception of -1.
*/
DEFINE_FUNCTION art_quick_deoptimize
pushq %rsi // Entry point for a jump. Fake that we were called.
// Use hidden arg.
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
// Stack should be aligned now.
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
call SYMBOL(artDeoptimize) // artDeoptimize(Thread*)
UNREACHABLE
END_FUNCTION art_quick_deoptimize
/*
* Compiled code has requested that we deoptimize into the interpreter. The deoptimization
* will long jump to the interpreter bridge.
*/
DEFINE_FUNCTION art_quick_deoptimize_from_compiled_code
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
// Stack should be aligned now.
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
call SYMBOL(artDeoptimizeFromCompiledCode) // artDeoptimizeFromCompiledCode(Thread*)
UNREACHABLE
END_FUNCTION art_quick_deoptimize_from_compiled_code
/*
* String's compareTo.
*
* On entry:
* rdi: this string object (known non-null)
* rsi: comp string object (known non-null)
*/
DEFINE_FUNCTION art_quick_string_compareto
movl MIRROR_STRING_COUNT_OFFSET(%edi), %r8d
movl MIRROR_STRING_COUNT_OFFSET(%esi), %r9d
/* Build pointers to the start of string data */
leal MIRROR_STRING_VALUE_OFFSET(%edi), %edi
leal MIRROR_STRING_VALUE_OFFSET(%esi), %esi
/* Calculate min length and count diff */
movl %r8d, %ecx
movl %r8d, %eax
subl %r9d, %eax
cmovg %r9d, %ecx
/*
* At this point we have:
* eax: value to return if first part of strings are equal
* ecx: minimum among the lengths of the two strings
* esi: pointer to comp string data
* edi: pointer to this string data
*/
jecxz .Lkeep_length
repe cmpsw // find nonmatching chars in [%esi] and [%edi], up to length %ecx
jne .Lnot_equal
.Lkeep_length:
ret
.balign 16
.Lnot_equal:
movzwl -2(%edi), %eax // get last compared char from this string
movzwl -2(%esi), %ecx // get last compared char from comp string
subl %ecx, %eax // return the difference
ret
END_FUNCTION art_quick_string_compareto
UNIMPLEMENTED art_quick_memcmp16
DEFINE_FUNCTION art_quick_assignable_from_code
SETUP_FP_CALLEE_SAVE_FRAME
subq LITERAL(8), %rsp // Alignment padding.
CFI_ADJUST_CFA_OFFSET(8)
call SYMBOL(artIsAssignableFromCode) // (const mirror::Class*, const mirror::Class*)
addq LITERAL(8), %rsp
CFI_ADJUST_CFA_OFFSET(-8)
RESTORE_FP_CALLEE_SAVE_FRAME
ret
END_FUNCTION art_quick_assignable_from_code
// Return from a nested signal:
// Entry:
// rdi: address of jmp_buf in TLS
DEFINE_FUNCTION art_nested_signal_return
// first arg to longjmp is already in correct register
movq LITERAL(1), %rsi // second arg to longjmp (1)
call PLT_SYMBOL(longjmp)
UNREACHABLE
END_FUNCTION art_nested_signal_return
DEFINE_FUNCTION art_quick_read_barrier_mark
SETUP_FP_CALLEE_SAVE_FRAME
subq LITERAL(8), %rsp // Alignment padding.
CFI_ADJUST_CFA_OFFSET(8)
call SYMBOL(artReadBarrierMark) // artReadBarrierMark(obj)
addq LITERAL(8), %rsp
CFI_ADJUST_CFA_OFFSET(-8)
RESTORE_FP_CALLEE_SAVE_FRAME
ret
END_FUNCTION art_quick_read_barrier_slow
DEFINE_FUNCTION art_quick_read_barrier_slow
SETUP_FP_CALLEE_SAVE_FRAME
subq LITERAL(8), %rsp // Alignment padding.
CFI_ADJUST_CFA_OFFSET(8)
call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj, offset)
addq LITERAL(8), %rsp
CFI_ADJUST_CFA_OFFSET(-8)
RESTORE_FP_CALLEE_SAVE_FRAME
ret
END_FUNCTION art_quick_read_barrier_slow
DEFINE_FUNCTION art_quick_read_barrier_for_root_slow
SETUP_FP_CALLEE_SAVE_FRAME
subq LITERAL(8), %rsp // Alignment padding.
CFI_ADJUST_CFA_OFFSET(8)
call SYMBOL(artReadBarrierForRootSlow) // artReadBarrierForRootSlow(root)
addq LITERAL(8), %rsp
CFI_ADJUST_CFA_OFFSET(-8)
RESTORE_FP_CALLEE_SAVE_FRAME
ret
END_FUNCTION art_quick_read_barrier_for_root_slow
/*
* On stack replacement stub.
* On entry:
* [sp] = return address
* rdi = stack to copy
* rsi = size of stack
* rdx = pc to call
* rcx = JValue* result
* r8 = shorty
* r9 = thread
*
* Note that the native C ABI already aligned the stack to 16-byte.
*/
DEFINE_FUNCTION art_quick_osr_stub
// Save the non-volatiles.
PUSH rbp // Save rbp.
PUSH rcx // Save rcx/result*.
PUSH r8 // Save r8/shorty*.
// Save callee saves.
PUSH rbx
PUSH r12
PUSH r13
PUSH r14
PUSH r15
pushq LITERAL(0) // Push null for ArtMethod*.
movl %esi, %ecx // rcx := size of stack
movq %rdi, %rsi // rsi := stack to copy
call .Losr_entry
// Restore stack and callee-saves.
addq LITERAL(8), %rsp
POP r15
POP r14
POP r13
POP r12
POP rbx
POP r8
POP rcx
POP rbp
cmpb LITERAL(68), (%r8) // Test if result type char == 'D'.
je .Losr_return_double_quick
cmpb LITERAL(70), (%r8) // Test if result type char == 'F'.
je .Losr_return_float_quick
movq %rax, (%rcx) // Store the result assuming its a long, int or Object*
ret
.Losr_return_double_quick:
movsd %xmm0, (%rcx) // Store the double floating point result.
ret
.Losr_return_float_quick:
movss %xmm0, (%rcx) // Store the floating point result.
ret
.Losr_entry:
subl LITERAL(8), %ecx // Given stack size contains pushed frame pointer, substract it.
subq %rcx, %rsp
movq %rsp, %rdi // rdi := beginning of stack
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
jmp *%rdx
END_FUNCTION art_quick_osr_stub
|
abforce/xposed_art_n
| 1,939
|
runtime/arch/mips/jni_entrypoints_mips.S
|
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_mips.S"
.set noreorder
.balign 4
/*
* Jni dlsym lookup stub.
*/
.extern artFindNativeMethod
ENTRY art_jni_dlsym_lookup_stub
addiu $sp, $sp, -48 # leave room for $f12, $f13, $f14, $f15, $a0, $a1, $a2, $a3, and $ra
.cfi_adjust_cfa_offset 48
sw $ra, 32($sp)
.cfi_rel_offset 31, 32
SDu $f14, $f15, 24, $sp, $t0
SDu $f12, $f13, 16, $sp, $t0
sw $a3, 12($sp)
.cfi_rel_offset 7, 12
sw $a2, 8($sp)
.cfi_rel_offset 6, 8
sw $a1, 4($sp)
.cfi_rel_offset 5, 4
sw $a0, 0($sp)
.cfi_rel_offset 4, 0
la $t9, artFindNativeMethod
jalr $t9 # (Thread*)
move $a0, $s1 # pass Thread::Current()
lw $a0, 0($sp) # restore registers from stack
lw $a1, 4($sp)
lw $a2, 8($sp)
lw $a3, 12($sp)
LDu $f12, $f13, 16, $sp, $t0
LDu $f14, $f15, 24, $sp, $t0
lw $ra, 32($sp)
beq $v0, $zero, .Lno_native_code_found
addiu $sp, $sp, 48 # restore the stack
.cfi_adjust_cfa_offset -48
move $t9, $v0 # put method code result in $t9
jalr $zero, $t9 # leaf call to method's code
nop
.Lno_native_code_found:
jalr $zero, $ra
nop
END art_jni_dlsym_lookup_stub
|
abforce/xposed_art_n
| 79,949
|
runtime/arch/mips/quick_entrypoints_mips.S
|
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "asm_support_mips.S"
#include "arch/quick_alloc_entrypoints.S"
.set noreorder
.balign 4
/* Deliver the given exception */
.extern artDeliverExceptionFromCode
/* Deliver an exception pending on a thread */
.extern artDeliverPendingExceptionFromCode
#define ARG_SLOT_SIZE 32 // space for a0-a3 plus 4 more words
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
* Callee-save: $s0-$s8 + $gp + $ra, 11 total + 1 word for Method*
* Clobbers $t0 and $sp
* Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
* Reserves FRAME_SIZE_SAVE_ALL_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
*/
.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
addiu $sp, $sp, -96
.cfi_adjust_cfa_offset 96
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 96)
#error "SAVE_ALL_CALLEE_SAVE_FRAME(MIPS) size not as expected."
#endif
sw $ra, 92($sp)
.cfi_rel_offset 31, 92
sw $s8, 88($sp)
.cfi_rel_offset 30, 88
sw $gp, 84($sp)
.cfi_rel_offset 28, 84
sw $s7, 80($sp)
.cfi_rel_offset 23, 80
sw $s6, 76($sp)
.cfi_rel_offset 22, 76
sw $s5, 72($sp)
.cfi_rel_offset 21, 72
sw $s4, 68($sp)
.cfi_rel_offset 20, 68
sw $s3, 64($sp)
.cfi_rel_offset 19, 64
sw $s2, 60($sp)
.cfi_rel_offset 18, 60
sw $s1, 56($sp)
.cfi_rel_offset 17, 56
sw $s0, 52($sp)
.cfi_rel_offset 16, 52
SDu $f30, $f31, 44, $sp, $t1
SDu $f28, $f29, 36, $sp, $t1
SDu $f26, $f27, 28, $sp, $t1
SDu $f24, $f25, 20, $sp, $t1
SDu $f22, $f23, 12, $sp, $t1
SDu $f20, $f21, 4, $sp, $t1
# 1 word for holding Method*
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
lw $t0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
.cfi_adjust_cfa_offset ARG_SLOT_SIZE
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes non-moving GC.
* Does not include rSUSPEND or rSELF
* callee-save: $s2-$s8 + $gp + $ra, 9 total + 2 words padding + 1 word to hold Method*
* Clobbers $t0 and $sp
* Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
* Reserves FRAME_SIZE_REFS_ONLY_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
*/
.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
addiu $sp, $sp, -48
.cfi_adjust_cfa_offset 48
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 48)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(MIPS) size not as expected."
#endif
sw $ra, 44($sp)
.cfi_rel_offset 31, 44
sw $s8, 40($sp)
.cfi_rel_offset 30, 40
sw $gp, 36($sp)
.cfi_rel_offset 28, 36
sw $s7, 32($sp)
.cfi_rel_offset 23, 32
sw $s6, 28($sp)
.cfi_rel_offset 22, 28
sw $s5, 24($sp)
.cfi_rel_offset 21, 24
sw $s4, 20($sp)
.cfi_rel_offset 20, 20
sw $s3, 16($sp)
.cfi_rel_offset 19, 16
sw $s2, 12($sp)
.cfi_rel_offset 18, 12
# 2 words for alignment and bottom word will hold Method*
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
lw $t0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
.cfi_adjust_cfa_offset ARG_SLOT_SIZE
.endm
.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
.cfi_adjust_cfa_offset -ARG_SLOT_SIZE
lw $ra, 44($sp)
.cfi_restore 31
lw $s8, 40($sp)
.cfi_restore 30
lw $gp, 36($sp)
.cfi_restore 28
lw $s7, 32($sp)
.cfi_restore 23
lw $s6, 28($sp)
.cfi_restore 22
lw $s5, 24($sp)
.cfi_restore 21
lw $s4, 20($sp)
.cfi_restore 20
lw $s3, 16($sp)
.cfi_restore 19
lw $s2, 12($sp)
.cfi_restore 18
addiu $sp, $sp, 48
.cfi_adjust_cfa_offset -48
.endm
.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
jalr $zero, $ra
nop
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
* callee-save: $a1-$a3, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
*/
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
addiu $sp, $sp, -80
.cfi_adjust_cfa_offset 80
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 80)
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(MIPS) size not as expected."
#endif
sw $ra, 76($sp)
.cfi_rel_offset 31, 76
sw $s8, 72($sp)
.cfi_rel_offset 30, 72
sw $gp, 68($sp)
.cfi_rel_offset 28, 68
sw $s7, 64($sp)
.cfi_rel_offset 23, 64
sw $s6, 60($sp)
.cfi_rel_offset 22, 60
sw $s5, 56($sp)
.cfi_rel_offset 21, 56
sw $s4, 52($sp)
.cfi_rel_offset 20, 52
sw $s3, 48($sp)
.cfi_rel_offset 19, 48
sw $s2, 44($sp)
.cfi_rel_offset 18, 44
sw $a3, 40($sp)
.cfi_rel_offset 7, 40
sw $a2, 36($sp)
.cfi_rel_offset 6, 36
sw $a1, 32($sp)
.cfi_rel_offset 5, 32
SDu $f14, $f15, 24, $sp, $t0
SDu $f12, $f13, 16, $sp, $t0
# bottom will hold Method*
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
* callee-save: $a1-$a3, $f12-$f15, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
* Clobbers $t0 and $sp
* Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
* Reserves FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
*/
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
lw $t0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
.cfi_adjust_cfa_offset ARG_SLOT_SIZE
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
* callee-save: $a1-$a3, $f12-$f15, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
* Clobbers $sp
* Use $a0 as the Method* and loads it into bottom of stack.
* Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
* Reserves FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE + ARG_SLOT_SIZE bytes on the stack
*/
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
sw $a0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack
.cfi_adjust_cfa_offset ARG_SLOT_SIZE
.endm
.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack
.cfi_adjust_cfa_offset -ARG_SLOT_SIZE
lw $ra, 76($sp)
.cfi_restore 31
lw $s8, 72($sp)
.cfi_restore 30
lw $gp, 68($sp)
.cfi_restore 28
lw $s7, 64($sp)
.cfi_restore 23
lw $s6, 60($sp)
.cfi_restore 22
lw $s5, 56($sp)
.cfi_restore 21
lw $s4, 52($sp)
.cfi_restore 20
lw $s3, 48($sp)
.cfi_restore 19
lw $s2, 44($sp)
.cfi_restore 18
lw $a3, 40($sp)
.cfi_restore 7
lw $a2, 36($sp)
.cfi_restore 6
lw $a1, 32($sp)
.cfi_restore 5
LDu $f14, $f15, 24, $sp, $t1
LDu $f12, $f13, 16, $sp, $t1
addiu $sp, $sp, 80 # pop frame
.cfi_adjust_cfa_offset -80
.endm
/*
* Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
* exception is Thread::Current()->exception_
*/
.macro DELIVER_PENDING_EXCEPTION
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME # save callee saves for throw
la $t9, artDeliverPendingExceptionFromCode
jalr $zero, $t9 # artDeliverPendingExceptionFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
.endm
.macro RETURN_IF_NO_EXCEPTION
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
bnez $t0, 1f # success if no exception is pending
nop
jalr $zero, $ra
nop
1:
DELIVER_PENDING_EXCEPTION
.endm
.macro RETURN_IF_ZERO
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
bnez $v0, 1f # success?
nop
jalr $zero, $ra # return on success
nop
1:
DELIVER_PENDING_EXCEPTION
.endm
.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
beqz $v0, 1f # success?
nop
jalr $zero, $ra # return on success
nop
1:
DELIVER_PENDING_EXCEPTION
.endm
/*
* On stack replacement stub.
* On entry:
* a0 = stack to copy
* a1 = size of stack
* a2 = pc to call
* a3 = JValue* result
* [sp + 16] = shorty
* [sp + 20] = thread
*/
ENTRY art_quick_osr_stub
// Save callee general purpose registers, RA and GP.
addiu $sp, $sp, -48
.cfi_adjust_cfa_offset 48
sw $ra, 44($sp)
.cfi_rel_offset 31, 44
sw $s8, 40($sp)
.cfi_rel_offset 30, 40
sw $gp, 36($sp)
.cfi_rel_offset 28, 36
sw $s7, 32($sp)
.cfi_rel_offset 23, 32
sw $s6, 28($sp)
.cfi_rel_offset 22, 28
sw $s5, 24($sp)
.cfi_rel_offset 21, 24
sw $s4, 20($sp)
.cfi_rel_offset 20, 20
sw $s3, 16($sp)
.cfi_rel_offset 19, 16
sw $s2, 12($sp)
.cfi_rel_offset 18, 12
sw $s1, 8($sp)
.cfi_rel_offset 17, 8
sw $s0, 4($sp)
.cfi_rel_offset 16, 4
move $s8, $sp # Save the stack pointer
move $s7, $a1 # Save size of stack
move $s6, $a2 # Save the pc to call
lw rSELF, 48+20($sp) # Save managed thread pointer into rSELF
addiu $t0, $sp, -12 # Reserve space for stack pointer,
# JValue* result, and ArtMethod* slot.
srl $t0, $t0, 4 # Align stack pointer to 16 bytes
sll $sp, $t0, 4 # Update stack pointer
sw $s8, 4($sp) # Save old stack pointer
sw $a3, 8($sp) # Save JValue* result
sw $zero, 0($sp) # Store null for ArtMethod* at bottom of frame
subu $sp, $a1 # Reserve space for callee stack
move $a2, $a1
move $a1, $a0
move $a0, $sp
la $t9, memcpy
jalr $t9 # memcpy (dest a0, src a1, bytes a2)
addiu $sp, $sp, -16 # make space for argument slots for memcpy
bal .Losr_entry # Call the method
addiu $sp, $sp, 16 # restore stack after memcpy
lw $a2, 8($sp) # Restore JValue* result
lw $sp, 4($sp) # Restore saved stack pointer
lw $a0, 48+16($sp) # load shorty
lbu $a0, 0($a0) # load return type
li $a1, 'D' # put char 'D' into a1
beq $a0, $a1, .Losr_fp_result # Test if result type char == 'D'
li $a1, 'F' # put char 'F' into a1
beq $a0, $a1, .Losr_fp_result # Test if result type char == 'F'
nop
sw $v0, 0($a2)
b .Losr_exit
sw $v1, 4($a2) # store v0/v1 into result
.Losr_fp_result:
SDu $f0, $f1, 0, $a2, $t0 # store f0/f1 into result
.Losr_exit:
lw $ra, 44($sp)
.cfi_restore 31
lw $s8, 40($sp)
.cfi_restore 30
lw $gp, 36($sp)
.cfi_restore 28
lw $s7, 32($sp)
.cfi_restore 23
lw $s6, 28($sp)
.cfi_restore 22
lw $s5, 24($sp)
.cfi_restore 21
lw $s4, 20($sp)
.cfi_restore 20
lw $s3, 16($sp)
.cfi_restore 19
lw $s2, 12($sp)
.cfi_restore 18
lw $s1, 8($sp)
.cfi_restore 17
lw $s0, 4($sp)
.cfi_restore 16
jalr $zero, $ra
addiu $sp, $sp, 48
.cfi_adjust_cfa_offset -48
.Losr_entry:
addiu $s7, $s7, -4
addu $t0, $s7, $sp
move $t9, $s6
jalr $zero, $t9
sw $ra, 0($t0) # Store RA per the compiler ABI
END art_quick_osr_stub
/*
* On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_
* FIXME: just guessing about the shape of the jmpbuf. Where will pc be?
*/
ENTRY art_quick_do_long_jump
LDu $f0, $f1, 0*8, $a1, $t1
LDu $f2, $f3, 1*8, $a1, $t1
LDu $f4, $f5, 2*8, $a1, $t1
LDu $f6, $f7, 3*8, $a1, $t1
LDu $f8, $f9, 4*8, $a1, $t1
LDu $f10, $f11, 5*8, $a1, $t1
LDu $f12, $f13, 6*8, $a1, $t1
LDu $f14, $f15, 7*8, $a1, $t1
LDu $f16, $f17, 8*8, $a1, $t1
LDu $f18, $f19, 9*8, $a1, $t1
LDu $f20, $f21, 10*8, $a1, $t1
LDu $f22, $f23, 11*8, $a1, $t1
LDu $f24, $f25, 12*8, $a1, $t1
LDu $f26, $f27, 13*8, $a1, $t1
LDu $f28, $f29, 14*8, $a1, $t1
LDu $f30, $f31, 15*8, $a1, $t1
.set push
.set nomacro
.set noat
lw $at, 4($a0)
.set pop
lw $v0, 8($a0)
lw $v1, 12($a0)
lw $a1, 20($a0)
lw $a2, 24($a0)
lw $a3, 28($a0)
lw $t0, 32($a0)
lw $t1, 36($a0)
lw $t2, 40($a0)
lw $t3, 44($a0)
lw $t4, 48($a0)
lw $t5, 52($a0)
lw $t6, 56($a0)
lw $t7, 60($a0)
lw $s0, 64($a0)
lw $s1, 68($a0)
lw $s2, 72($a0)
lw $s3, 76($a0)
lw $s4, 80($a0)
lw $s5, 84($a0)
lw $s6, 88($a0)
lw $s7, 92($a0)
lw $t8, 96($a0)
lw $t9, 100($a0)
lw $gp, 112($a0)
lw $sp, 116($a0)
lw $fp, 120($a0)
lw $ra, 124($a0)
lw $a0, 16($a0)
move $v0, $zero # clear result registers v0 and v1 (in branch delay slot)
jalr $zero, $t9 # do long jump
move $v1, $zero
END art_quick_do_long_jump
/*
* Called by managed code, saves most registers (forms basis of long jump context) and passes
* the bottom of the stack. artDeliverExceptionFromCode will place the callee save Method* at
* the bottom of the thread. On entry a0 holds Throwable*
*/
ENTRY art_quick_deliver_exception
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
la $t9, artDeliverExceptionFromCode
jalr $zero, $t9 # artDeliverExceptionFromCode(Throwable*, Thread*)
move $a1, rSELF # pass Thread::Current
END art_quick_deliver_exception
/*
* Called by managed code to create and deliver a NullPointerException
*/
.extern artThrowNullPointerExceptionFromCode
ENTRY art_quick_throw_null_pointer_exception
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
la $t9, artThrowNullPointerExceptionFromCode
jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
END art_quick_throw_null_pointer_exception
/*
* Called by managed code to create and deliver an ArithmeticException
*/
.extern artThrowDivZeroFromCode
ENTRY art_quick_throw_div_zero
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
la $t9, artThrowDivZeroFromCode
jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
END art_quick_throw_div_zero
/*
* Called by managed code to create and deliver an ArrayIndexOutOfBoundsException
*/
.extern artThrowArrayBoundsFromCode
ENTRY art_quick_throw_array_bounds
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
la $t9, artThrowArrayBoundsFromCode
jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*)
move $a2, rSELF # pass Thread::Current
END art_quick_throw_array_bounds
/*
* Called by managed code to create and deliver a StackOverflowError.
*/
.extern artThrowStackOverflowFromCode
ENTRY art_quick_throw_stack_overflow
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
la $t9, artThrowStackOverflowFromCode
jalr $zero, $t9 # artThrowStackOverflowFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
END art_quick_throw_stack_overflow
/*
* Called by managed code to create and deliver a NoSuchMethodError.
*/
.extern artThrowNoSuchMethodFromCode
ENTRY art_quick_throw_no_such_method
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
la $t9, artThrowNoSuchMethodFromCode
jalr $zero, $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*)
move $a1, rSELF # pass Thread::Current
END art_quick_throw_no_such_method
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
* the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper.
* NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
*
* The helper will attempt to locate the target and return a 64-bit result in $v0/$v1 consisting
* of the target Method* in $v0 and method->code_ in $v1.
*
* If unsuccessful, the helper will return null/null. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
* pointing back to the original caller.
*/
.macro INVOKE_TRAMPOLINE_BODY cxx_name
.extern \cxx_name
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC
move $a2, rSELF # pass Thread::Current
la $t9, \cxx_name
jalr $t9 # (method_idx, this, Thread*, $sp)
addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
move $a0, $v0 # save target Method*
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
beqz $v0, 1f
move $t9, $v1 # save $v0->code_
jalr $zero, $t9
nop
1:
DELIVER_PENDING_EXCEPTION
.endm
.macro INVOKE_TRAMPOLINE c_name, cxx_name
ENTRY \c_name
INVOKE_TRAMPOLINE_BODY \cxx_name
END \c_name
.endm
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
.macro LOAD_WORD_TO_REG reg, next_arg, index, label
lw $\reg, -4($\next_arg) # next_arg points to argument after the current one (offset is 4)
b \label
addiu $\index, 1
.endm
.macro LOAD_LONG_TO_REG reg1, reg2, next_arg, index, label
lw $\reg1, -8($\next_arg) # next_arg points to argument after the current one (offset is 8)
lw $\reg2, -4($\next_arg)
b \label
li $\index, 4 # long can be loaded only to a2_a3 pair so index will be always 4
.endm
.macro LOAD_FLOAT_TO_REG reg, next_arg, index, label
lwc1 $\reg, -4($\next_arg) # next_arg points to argument after the current one (offset is 4)
b \label
addiu $\index, 1
.endm
.macro LOAD_DOUBLE_TO_REG reg1, reg2, next_arg, index, tmp, label
LDu $\reg1, $\reg2, -8, $\next_arg, $\tmp # next_arg points to argument after the current one
# (offset is 8)
b \label
addiu $\index, 1
.endm
#define SPILL_SIZE 32
/*
* Invocation stub for quick code.
* On entry:
* a0 = method pointer
* a1 = argument array or null for no argument methods
* a2 = size of argument array in bytes
* a3 = (managed) thread pointer
* [sp + 16] = JValue* result
* [sp + 20] = shorty
*/
ENTRY art_quick_invoke_stub
sw $a0, 0($sp) # save out a0
addiu $sp, $sp, -SPILL_SIZE # spill s0, s1, fp, ra and gp
.cfi_adjust_cfa_offset SPILL_SIZE
sw $gp, 16($sp)
sw $ra, 12($sp)
.cfi_rel_offset 31, 12
sw $fp, 8($sp)
.cfi_rel_offset 30, 8
sw $s1, 4($sp)
.cfi_rel_offset 17, 4
sw $s0, 0($sp)
.cfi_rel_offset 16, 0
move $fp, $sp # save sp in fp
.cfi_def_cfa_register 30
move $s1, $a3 # move managed thread pointer into s1
addiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset s0 to suspend check interval
addiu $t0, $a2, 4 # create space for ArtMethod* in frame.
subu $t0, $sp, $t0 # reserve & align *stack* to 16 bytes:
srl $t0, $t0, 4 # native calling convention only aligns to 8B,
sll $sp, $t0, 4 # so we have to ensure ART 16B alignment ourselves.
addiu $a0, $sp, 4 # pass stack pointer + ArtMethod* as dest for memcpy
la $t9, memcpy
jalr $t9 # (dest, src, bytes)
addiu $sp, $sp, -16 # make space for argument slots for memcpy
addiu $sp, $sp, 16 # restore stack after memcpy
lw $gp, 16($fp) # restore $gp
lw $a0, SPILL_SIZE($fp) # restore ArtMethod*
lw $a1, 4($sp) # a1 = this*
addiu $t0, $sp, 8 # t0 = pointer to the current argument (skip ArtMethod* and this*)
li $t3, 2 # t3 = gpr_index = 2 (skip A0 and A1)
move $t4, $zero # t4 = fp_index = 0
lw $t1, 20 + SPILL_SIZE($fp) # get shorty (20 is offset from the $sp on entry + SPILL_SIZE
# as the $fp is SPILL_SIZE bytes below the $sp on entry)
addiu $t1, 1 # t1 = shorty + 1 (skip 1 for return type)
loop:
lbu $t2, 0($t1) # t2 = shorty[i]
beqz $t2, loopEnd # finish getting args when shorty[i] == '\0'
addiu $t1, 1
li $t9, 'J' # put char 'J' into t9
beq $t9, $t2, isLong # branch if result type char == 'J'
li $t9, 'D' # put char 'D' into t9
beq $t9, $t2, isDouble # branch if result type char == 'D'
li $t9, 'F' # put char 'F' into t9
beq $t9, $t2, isSingle # branch if result type char == 'F'
addiu $t0, 4 # next_arg = curr_arg + 4 (in branch delay slot,
# for both, int and single)
li $t5, 2 # skip a0 and a1 (ArtMethod* and this*)
bne $t5, $t3, 1f # if (gpr_index == 2)
addiu $t5, 1
LOAD_WORD_TO_REG a2, t0, t3, loop # a2 = current argument, gpr_index++
1: bne $t5, $t3, loop # else if (gpr_index == 3)
nop
LOAD_WORD_TO_REG a3, t0, t3, loop # a3 = current argument, gpr_index++
isLong:
addiu $t0, 8 # next_arg = curr_arg + 8
slti $t5, $t3, 3
beqz $t5, 2f # if (gpr_index < 3)
nop
LOAD_LONG_TO_REG a2, a3, t0, t3, loop # a2_a3 = curr_arg, gpr_index = 4
2: b loop # else
li $t3, 4 # gpr_index = 4
isDouble:
addiu $t0, 8 # next_arg = curr_arg + 8
li $t5, 0
bne $t5, $t4, 3f # if (fp_index == 0)
addiu $t5, 1
LOAD_DOUBLE_TO_REG f12, f13, t0, t4, t9, loop # f12_f13 = curr_arg, fp_index++
3: bne $t5, $t4, loop # else if (fp_index == 1)
nop
LOAD_DOUBLE_TO_REG f14, f15, t0, t4, t9, loop # f14_f15 = curr_arg, fp_index++
isSingle:
li $t5, 0
bne $t5, $t4, 4f # if (fp_index == 0)
addiu $t5, 1
LOAD_FLOAT_TO_REG f12, t0, t4, loop # f12 = curr_arg, fp_index++
4: bne $t5, $t4, loop # else if (fp_index == 1)
nop
LOAD_FLOAT_TO_REG f14, t0, t4, loop # f14 = curr_arg, fp_index++
loopEnd:
lw $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code
jalr $t9 # call the method
sw $zero, 0($sp) # store null for ArtMethod* at bottom of frame
move $sp, $fp # restore the stack
lw $s0, 0($sp)
.cfi_restore 16
lw $s1, 4($sp)
.cfi_restore 17
lw $fp, 8($sp)
.cfi_restore 30
lw $ra, 12($sp)
.cfi_restore 31
addiu $sp, $sp, SPILL_SIZE
.cfi_adjust_cfa_offset -SPILL_SIZE
lw $t0, 16($sp) # get result pointer
lw $t1, 20($sp) # get shorty
lb $t1, 0($t1) # get result type char
li $t2, 'D' # put char 'D' into t2
beq $t1, $t2, 5f # branch if result type char == 'D'
li $t3, 'F' # put char 'F' into t3
beq $t1, $t3, 5f # branch if result type char == 'F'
sw $v0, 0($t0) # store the result
jalr $zero, $ra
sw $v1, 4($t0) # store the other half of the result
5:
SDu $f0, $f1, 0, $t0, $t1 # store floating point result
jalr $zero, $ra
nop
END art_quick_invoke_stub
/*
* Invocation static stub for quick code.
* On entry:
* a0 = method pointer
* a1 = argument array or null for no argument methods
* a2 = size of argument array in bytes
* a3 = (managed) thread pointer
* [sp + 16] = JValue* result
* [sp + 20] = shorty
*/
ENTRY art_quick_invoke_static_stub
sw $a0, 0($sp) # save out a0
addiu $sp, $sp, -SPILL_SIZE # spill s0, s1, fp, ra and gp
.cfi_adjust_cfa_offset SPILL_SIZE
sw $gp, 16($sp)
sw $ra, 12($sp)
.cfi_rel_offset 31, 12
sw $fp, 8($sp)
.cfi_rel_offset 30, 8
sw $s1, 4($sp)
.cfi_rel_offset 17, 4
sw $s0, 0($sp)
.cfi_rel_offset 16, 0
move $fp, $sp # save sp in fp
.cfi_def_cfa_register 30
move $s1, $a3 # move managed thread pointer into s1
addiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset s0 to suspend check interval
addiu $t0, $a2, 4 # create space for ArtMethod* in frame.
subu $t0, $sp, $t0 # reserve & align *stack* to 16 bytes:
srl $t0, $t0, 4 # native calling convention only aligns to 8B,
sll $sp, $t0, 4 # so we have to ensure ART 16B alignment ourselves.
addiu $a0, $sp, 4 # pass stack pointer + ArtMethod* as dest for memcpy
la $t9, memcpy
jalr $t9 # (dest, src, bytes)
addiu $sp, $sp, -16 # make space for argument slots for memcpy
addiu $sp, $sp, 16 # restore stack after memcpy
lw $gp, 16($fp) # restore $gp
lw $a0, SPILL_SIZE($fp) # restore ArtMethod*
addiu $t0, $sp, 4 # t0 = pointer to the current argument (skip ArtMethod*)
li $t3, 1 # t3 = gpr_index = 1 (skip A0)
move $t4, $zero # t4 = fp_index = 0
lw $t1, 20 + SPILL_SIZE($fp) # get shorty (20 is offset from the $sp on entry + SPILL_SIZE
# as the $fp is SPILL_SIZE bytes below the $sp on entry)
addiu $t1, 1 # t1 = shorty + 1 (skip 1 for return type)
loopS:
lbu $t2, 0($t1) # t2 = shorty[i]
beqz $t2, loopEndS # finish getting args when shorty[i] == '\0'
addiu $t1, 1
li $t9, 'J' # put char 'J' into t9
beq $t9, $t2, isLongS # branch if result type char == 'J'
li $t9, 'D' # put char 'D' into t9
beq $t9, $t2, isDoubleS # branch if result type char == 'D'
li $t9, 'F' # put char 'F' into t9
beq $t9, $t2, isSingleS # branch if result type char == 'F'
addiu $t0, 4 # next_arg = curr_arg + 4 (in branch delay slot,
# for both, int and single)
li $t5, 1 # skip a0 (ArtMethod*)
bne $t5, $t3, 1f # if (gpr_index == 1)
addiu $t5, 1
LOAD_WORD_TO_REG a1, t0, t3, loopS # a1 = current argument, gpr_index++
1: bne $t5, $t3, 2f # else if (gpr_index == 2)
addiu $t5, 1
LOAD_WORD_TO_REG a2, t0, t3, loopS # a2 = current argument, gpr_index++
2: bne $t5, $t3, loopS # else if (gpr_index == 3)
nop
LOAD_WORD_TO_REG a3, t0, t3, loopS # a3 = current argument, gpr_index++
isLongS:
addiu $t0, 8 # next_arg = curr_arg + 8
slti $t5, $t3, 3
beqz $t5, 3f # if (gpr_index < 3)
nop
LOAD_LONG_TO_REG a2, a3, t0, t3, loopS # a2_a3 = curr_arg, gpr_index = 4
3: b loopS # else
li $t3, 4 # gpr_index = 4
isDoubleS:
addiu $t0, 8 # next_arg = curr_arg + 8
li $t5, 0
bne $t5, $t4, 4f # if (fp_index == 0)
addiu $t5, 1
LOAD_DOUBLE_TO_REG f12, f13, t0, t4, t9, loopS # f12_f13 = curr_arg, fp_index++
4: bne $t5, $t4, loopS # else if (fp_index == 1)
nop
LOAD_DOUBLE_TO_REG f14, f15, t0, t4, t9, loopS # f14_f15 = curr_arg, fp_index++
isSingleS:
li $t5, 0
bne $t5, $t4, 5f # if (fp_index == 0)
addiu $t5, 1
LOAD_FLOAT_TO_REG f12, t0, t4, loopS # f12 = curr_arg, fp_index++
5: bne $t5, $t4, loopS # else if (fp_index == 1)
nop
LOAD_FLOAT_TO_REG f14, t0, t4, loopS # f14 = curr_arg, fp_index++
loopEndS:
lw $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code
jalr $t9 # call the method
sw $zero, 0($sp) # store null for ArtMethod* at bottom of frame
move $sp, $fp # restore the stack
lw $s0, 0($sp)
.cfi_restore 16
lw $s1, 4($sp)
.cfi_restore 17
lw $fp, 8($sp)
.cfi_restore 30
lw $ra, 12($sp)
.cfi_restore 31
addiu $sp, $sp, SPILL_SIZE
.cfi_adjust_cfa_offset -SPILL_SIZE
lw $t0, 16($sp) # get result pointer
lw $t1, 20($sp) # get shorty
lb $t1, 0($t1) # get result type char
li $t2, 'D' # put char 'D' into t2
beq $t1, $t2, 6f # branch if result type char == 'D'
li $t3, 'F' # put char 'F' into t3
beq $t1, $t3, 6f # branch if result type char == 'F'
sw $v0, 0($t0) # store the result
jalr $zero, $ra
sw $v1, 4($t0) # store the other half of the result
6:
SDu $f0, $f1, 0, $t0, $t1 # store floating point result
jalr $zero, $ra
nop
END art_quick_invoke_static_stub
#undef SPILL_SIZE
/*
* Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
* failure.
*/
.extern artHandleFillArrayDataFromCode
ENTRY art_quick_handle_fill_data
lw $a2, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
la $t9, artHandleFillArrayDataFromCode
jalr $t9 # (payload offset, Array*, method, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_handle_fill_data
/*
* Entry from managed code that calls artLockObjectFromCode, may block for GC.
*/
.extern artLockObjectFromCode
ENTRY art_quick_lock_object
beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
nop
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
la $t9, artLockObjectFromCode
jalr $t9 # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
nop
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
la $t9, artLockObjectFromCode
jalr $t9 # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_lock_object_no_inline
/*
* Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
*/
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
nop
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
la $t9, artUnlockObjectFromCode
jalr $t9 # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
nop
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
la $t9, artUnlockObjectFromCode
jalr $t9 # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_unlock_object_no_inline
/*
* Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
*/
.extern artThrowClassCastException
ENTRY art_quick_check_cast
addiu $sp, $sp, -32
.cfi_adjust_cfa_offset 32
sw $gp, 16($sp)
sw $ra, 12($sp)
.cfi_rel_offset 31, 12
sw $t9, 8($sp)
sw $a1, 4($sp)
sw $a0, 0($sp)
la $t9, artIsAssignableFromCode
jalr $t9
addiu $sp, $sp, -16 # reserve argument slots on the stack
addiu $sp, $sp, 16
lw $gp, 16($sp)
beqz $v0, .Lthrow_class_cast_exception
lw $ra, 12($sp)
jalr $zero, $ra
addiu $sp, $sp, 32
.cfi_adjust_cfa_offset -32
.Lthrow_class_cast_exception:
lw $t9, 8($sp)
lw $a1, 4($sp)
lw $a0, 0($sp)
addiu $sp, $sp, 32
.cfi_adjust_cfa_offset -32
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
la $t9, artThrowClassCastException
jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*)
move $a2, rSELF # pass Thread::Current
END art_quick_check_cast
/*
* Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
* nReg is the register number for rReg.
*/
.macro POP_REG_NE rReg, nReg, offset, rExclude
.ifnc \rReg, \rExclude
lw \rReg, \offset($sp) # restore rReg
.cfi_restore \nReg
.endif
.endm
/*
* Macro to insert read barrier, only used in art_quick_aput_obj.
* rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
* TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
*/
.macro READ_BARRIER rDest, rObj, offset
#ifdef USE_READ_BARRIER
# saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 8 words for 16B alignment.
addiu $sp, $sp, -32
.cfi_adjust_cfa_offset 32
sw $ra, 28($sp)
.cfi_rel_offset 31, 28
sw $t9, 24($sp)
.cfi_rel_offset 25, 24
sw $t1, 20($sp)
.cfi_rel_offset 9, 20
sw $t0, 16($sp)
.cfi_rel_offset 8, 16
sw $a2, 8($sp) # padding slot at offset 12 (padding can be any slot in the 32B)
.cfi_rel_offset 6, 8
sw $a1, 4($sp)
.cfi_rel_offset 5, 4
sw $a0, 0($sp)
.cfi_rel_offset 4, 0
# move $a0, \rRef # pass ref in a0 (no-op for now since parameter ref is unused)
.ifnc \rObj, $a1
move $a1, \rObj # pass rObj
.endif
addiu $a2, $zero, \offset # pass offset
la $t9, artReadBarrierSlow
jalr $t9 # artReadBarrierSlow(ref, rObj, offset)
addiu $sp, $sp, -16 # Use branch delay slot to reserve argument slots on the stack
# before the call to artReadBarrierSlow.
addiu $sp, $sp, 16 # restore stack after call to artReadBarrierSlow
# No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning.
move \rDest, $v0 # save return value in rDest
# (rDest cannot be v0 in art_quick_aput_obj)
lw $a0, 0($sp) # restore registers except rDest
# (rDest can only be t0 or t1 in art_quick_aput_obj)
.cfi_restore 4
lw $a1, 4($sp)
.cfi_restore 5
lw $a2, 8($sp)
.cfi_restore 6
POP_REG_NE $t0, 8, 16, \rDest
POP_REG_NE $t1, 9, 20, \rDest
lw $t9, 24($sp)
.cfi_restore 25
lw $ra, 28($sp) # restore $ra
.cfi_restore 31
addiu $sp, $sp, 32
.cfi_adjust_cfa_offset -32
#else
lw \rDest, \offset(\rObj)
UNPOISON_HEAP_REF \rDest
#endif // USE_READ_BARRIER
.endm
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
* a0 = array, a1 = index, a2 = value
*/
ENTRY art_quick_aput_obj_with_null_and_bound_check
bnez $a0, .Lart_quick_aput_obj_with_bound_check_gp_set
nop
b .Lart_quick_throw_null_pointer_exception_gp_set
nop
END art_quick_aput_obj_with_null_and_bound_check
ENTRY art_quick_aput_obj_with_bound_check
lw $t0, MIRROR_ARRAY_LENGTH_OFFSET($a0)
sltu $t1, $a1, $t0
bnez $t1, .Lart_quick_aput_obj_gp_set
nop
move $a0, $a1
b .Lart_quick_throw_array_bounds_gp_set
move $a1, $t0
END art_quick_aput_obj_with_bound_check
#ifdef USE_READ_BARRIER
.extern artReadBarrierSlow
#endif
ENTRY art_quick_aput_obj
beqz $a2, .Ldo_aput_null
nop
READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET
READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET
READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
nop
.Ldo_aput:
sll $a1, $a1, 2
add $t0, $a0, $a1
POISON_HEAP_REF $a2
sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
lw $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
srl $t1, $a0, 7
add $t1, $t1, $t0
sb $t0, ($t1)
jalr $zero, $ra
nop
.Ldo_aput_null:
sll $a1, $a1, 2
add $t0, $a0, $a1
sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
jalr $zero, $ra
nop
.Lcheck_assignability:
addiu $sp, $sp, -32
.cfi_adjust_cfa_offset 32
sw $ra, 28($sp)
.cfi_rel_offset 31, 28
sw $gp, 16($sp)
sw $t9, 12($sp)
sw $a2, 8($sp)
sw $a1, 4($sp)
sw $a0, 0($sp)
move $a1, $t1
move $a0, $t0
la $t9, artIsAssignableFromCode
jalr $t9 # (Class*, Class*)
addiu $sp, $sp, -16 # reserve argument slots on the stack
addiu $sp, $sp, 16
lw $ra, 28($sp)
lw $gp, 16($sp)
lw $t9, 12($sp)
lw $a2, 8($sp)
lw $a1, 4($sp)
lw $a0, 0($sp)
addiu $sp, 32
.cfi_adjust_cfa_offset -32
bnez $v0, .Ldo_aput
nop
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
move $a1, $a2
la $t9, artThrowArrayStoreException
jalr $zero, $t9 # artThrowArrayStoreException(Class*, Class*, Thread*)
move $a2, rSELF # pass Thread::Current
END art_quick_aput_obj
/*
* Called by managed code to resolve a static field and load a boolean primitive value.
*/
.extern artGetBooleanStaticFromCode
ENTRY art_quick_get_boolean_static
lw $a1, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGetBooleanStaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_boolean_static
/*
* Called by managed code to resolve a static field and load a byte primitive value.
*/
.extern artGetByteStaticFromCode
ENTRY art_quick_get_byte_static
lw $a1, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGetByteStaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_byte_static
/*
* Called by managed code to resolve a static field and load a char primitive value.
*/
.extern artGetCharStaticFromCode
ENTRY art_quick_get_char_static
lw $a1, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGetCharStaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_char_static
/*
* Called by managed code to resolve a static field and load a short primitive value.
*/
.extern artGetShortStaticFromCode
ENTRY art_quick_get_short_static
lw $a1, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGetShortStaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_short_static
/*
* Called by managed code to resolve a static field and load a 32-bit primitive value.
*/
.extern artGet32StaticFromCode
ENTRY art_quick_get32_static
lw $a1, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGet32StaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get32_static
/*
* Called by managed code to resolve a static field and load a 64-bit primitive value.
*/
.extern artGet64StaticFromCode
ENTRY art_quick_get64_static
lw $a1, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGet64StaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get64_static
/*
* Called by managed code to resolve a static field and load an object reference.
*/
.extern artGetObjStaticFromCode
ENTRY art_quick_get_obj_static
lw $a1, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGetObjStaticFromCode
jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
move $a2, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_obj_static
/*
* Called by managed code to resolve an instance field and load a boolean primitive value.
*/
.extern artGetBooleanInstanceFromCode
ENTRY art_quick_get_boolean_instance
lw $a2, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGetBooleanInstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_boolean_instance
/*
* Called by managed code to resolve an instance field and load a byte primitive value.
*/
.extern artGetByteInstanceFromCode
ENTRY art_quick_get_byte_instance
lw $a2, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGetByteInstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_byte_instance
/*
* Called by managed code to resolve an instance field and load a char primitive value.
*/
.extern artGetCharInstanceFromCode
ENTRY art_quick_get_char_instance
lw $a2, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGetCharInstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_char_instance
/*
* Called by managed code to resolve an instance field and load a short primitive value.
*/
.extern artGetShortInstanceFromCode
ENTRY art_quick_get_short_instance
lw $a2, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGetShortInstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_short_instance
/*
* Called by managed code to resolve an instance field and load a 32-bit primitive value.
*/
.extern artGet32InstanceFromCode
ENTRY art_quick_get32_instance
lw $a2, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGet32InstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get32_instance
/*
* Called by managed code to resolve an instance field and load a 64-bit primitive value.
*/
.extern artGet64InstanceFromCode
ENTRY art_quick_get64_instance
lw $a2, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGet64InstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get64_instance
/*
* Called by managed code to resolve an instance field and load an object reference.
*/
.extern artGetObjInstanceFromCode
ENTRY art_quick_get_obj_instance
lw $a2, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artGetObjInstanceFromCode
jalr $t9 # (field_idx, Object*, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_NO_EXCEPTION
END art_quick_get_obj_instance
/*
* Called by managed code to resolve a static field and store a 8-bit primitive value.
*/
.extern artSet8StaticFromCode
ENTRY art_quick_set8_static
lw $a2, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artSet8StaticFromCode
jalr $t9 # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set8_static
/*
* Called by managed code to resolve a static field and store a 16-bit primitive value.
*/
.extern artSet16StaticFromCode
ENTRY art_quick_set16_static
lw $a2, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artSet16StaticFromCode
jalr $t9 # (field_idx, new_val, referrer, Thread*, $sp)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set16_static
/*
* Called by managed code to resolve a static field and store a 32-bit primitive value.
*/
.extern artSet32StaticFromCode
ENTRY art_quick_set32_static
lw $a2, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artSet32StaticFromCode
jalr $t9 # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set32_static
/*
* Called by managed code to resolve a static field and store a 64-bit primitive value.
*/
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
lw $a1, 0($sp) # pass referrer's Method*
# 64 bit new_val is in a2:a3 pair
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artSet64StaticFromCode
jalr $t9 # (field_idx, referrer, new_val, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set64_static
/*
* Called by managed code to resolve a static field and store an object reference.
*/
.extern artSetObjStaticFromCode
ENTRY art_quick_set_obj_static
lw $a2, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artSetObjStaticFromCode
jalr $t9 # (field_idx, new_val, referrer, Thread*)
move $a3, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set_obj_static
/*
* Called by managed code to resolve an instance field and store a 8-bit primitive value.
*/
.extern artSet8InstanceFromCode
ENTRY art_quick_set8_instance
lw $a3, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artSet8InstanceFromCode
jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set8_instance
/*
* Called by managed code to resolve an instance field and store a 16-bit primitive value.
*/
.extern artSet16InstanceFromCode
ENTRY art_quick_set16_instance
lw $a3, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artSet16InstanceFromCode
jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set16_instance
/*
* Called by managed code to resolve an instance field and store a 32-bit primitive value.
*/
.extern artSet32InstanceFromCode
ENTRY art_quick_set32_instance
lw $a3, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artSet32InstanceFromCode
jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set32_instance
/*
* Called by managed code to resolve an instance field and store a 64-bit primitive value.
*/
.extern artSet64InstanceFromCode
ENTRY art_quick_set64_instance
lw $t1, 0($sp) # load referrer's Method*
# 64 bit new_val is in a2:a3 pair
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
sw rSELF, 20($sp) # pass Thread::Current
la $t9, artSet64InstanceFromCode
jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
sw $t1, 16($sp) # pass referrer's Method*
RETURN_IF_ZERO
END art_quick_set64_instance
/*
* Called by managed code to resolve an instance field and store an object reference.
*/
.extern artSetObjInstanceFromCode
ENTRY art_quick_set_obj_instance
lw $a3, 0($sp) # pass referrer's Method*
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, artSetObjInstanceFromCode
jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
sw rSELF, 16($sp) # pass Thread::Current
RETURN_IF_ZERO
END art_quick_set_obj_instance
// Macro to facilitate adding new allocation entrypoints.
.macro ONE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, \entrypoint
jalr $t9
move $a1, rSELF # pass Thread::Current
\return
END \name
.endm
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, \entrypoint
jalr $t9
move $a2, rSELF # pass Thread::Current
\return
END \name
.endm
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, \entrypoint
jalr $t9
move $a3, rSELF # pass Thread::Current
\return
END \name
.endm
.macro FOUR_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
la $t9, \entrypoint
jalr $t9
sw rSELF, 16($sp) # pass Thread::Current
\return
END \name
.endm
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
ENTRY art_quick_alloc_object_rosalloc
# Fast path rosalloc allocation
# a0: type_idx
# a1: ArtMethod*
# s1: Thread::Current
# -----------------------------
# t0: class
# t1: object size
# t2: rosalloc run
# t3: thread stack top offset
# t4: thread stack bottom offset
# v0: free list head
#
# t5, t6 : temps
lw $t0, ART_METHOD_DEX_CACHE_TYPES_OFFSET_32($a1) # Load dex cache resolved types
# array.
sll $t5, $a0, COMPRESSED_REFERENCE_SIZE_SHIFT # Shift the value.
addu $t5, $t0, $t5 # Compute the index.
lw $t0, 0($t5) # Load class (t0).
beqz $t0, .Lart_quick_alloc_object_rosalloc_slow_path
li $t6, MIRROR_CLASS_STATUS_INITIALIZED
lw $t5, MIRROR_CLASS_STATUS_OFFSET($t0) # Check class status.
bne $t5, $t6, .Lart_quick_alloc_object_rosalloc_slow_path
# Add a fake dependence from the following access flag and size loads to the status load. This
# is to prevent those loads from being reordered above the status load and reading wrong values.
xor $t5, $t5, $t5
addu $t0, $t0, $t5
lw $t5, MIRROR_CLASS_ACCESS_FLAGS_OFFSET($t0) # Check if access flags has
li $t6, ACCESS_FLAGS_CLASS_IS_FINALIZABLE # kAccClassIsFinalizable.
and $t6, $t5, $t6
bnez $t6, .Lart_quick_alloc_object_rosalloc_slow_path
lw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation
lw $t4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # stack has any room left.
bgeu $t3, $t4, .Lart_quick_alloc_object_rosalloc_slow_path
lw $t1, MIRROR_CLASS_OBJECT_SIZE_OFFSET($t0) # Load object size (t1).
li $t5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
# allocation.
bgtu $t1, $t5, .Lart_quick_alloc_object_rosalloc_slow_path
# Compute the rosalloc bracket index from the size. Allign up the size by the rosalloc bracket
# quantum size and divide by the quantum size and subtract by 1.
addiu $t1, $t1, -1 # Decrease obj size and shift right
srl $t1, $t1, ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT # by quantum.
sll $t2, $t1, POINTER_SIZE_SHIFT
addu $t2, $t2, $s1
lw $t2, THREAD_ROSALLOC_RUNS_OFFSET($t2) # Load rosalloc run (t2).
# Load the free list head (v0).
# NOTE: this will be the return val.
lw $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
beqz $v0, .Lart_quick_alloc_object_rosalloc_slow_path
nop
# Load the next pointer of the head and update the list head with the next pointer.
lw $t5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
# Store the class pointer in the header. This also overwrites the first pointer. The offsets are
# asserted to match.
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
POISON_HEAP_REF $t0
sw $t0, MIRROR_OBJECT_CLASS_OFFSET($v0)
# Push the new object onto the thread local allocation stack and increment the thread local
# allocation stack top.
sw $v0, 0($t3)
addiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
sw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
# Decrement the size of the free list.
lw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
addiu $t5, $t5, -1
sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
sync # Fence.
jalr $zero, $ra
nop
.Lart_quick_alloc_object_rosalloc_slow_path:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
la $t9, artAllocObjectFromCodeRosAlloc
jalr $t9
move $a2, $s1 # Pass self as argument.
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END art_quick_alloc_object_rosalloc
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
* exception on error. On success the String is returned. A0 holds the string index. The fast
* path check for hit in strings cache has already been performed.
*/
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Entry from managed code when uninitialized static storage, this stub will run the class
* initializer and deliver the exception on error. On success the static storage base is
* returned.
*/
ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Entry from managed code when dex cache misses for a type_idx.
*/
ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Entry from managed code when type_idx needs to be checked for access and dex cache may also
* miss.
*/
ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
*/
.extern artTestSuspendFromCode
ENTRY art_quick_test_suspend
lh $a0, THREAD_FLAGS_OFFSET(rSELF)
bnez $a0, 1f
addiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
jalr $zero, $ra
nop
1:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
la $t9, artTestSuspendFromCode
jalr $t9 # (Thread*)
move $a0, rSELF
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
* a0 holds the proxy method; a1, a2 and a3 may contain arguments.
*/
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
move $a2, rSELF # pass Thread::Current
la $t9, artQuickProxyInvokeHandler
jalr $t9 # (Method* proxy method, receiver, Thread*, SP)
addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
bnez $t0, 1f
# don't care if $v0 and/or $v1 are modified, when exception branch taken
MTD $v0, $v1, $f0, $f1 # move float value to return value
jalr $zero, $ra
nop
1:
DELIVER_PENDING_EXCEPTION
END art_quick_proxy_invoke_handler
/*
* Called to resolve an imt conflict.
* a0 is the conflict ArtMethod.
* t0 is a hidden argument that holds the target interface method's dex method index.
*
* Note that this stub writes to a0, t0 and t1.
*/
ENTRY art_quick_imt_conflict_trampoline
lw $t1, 0($sp) # Load referrer.
lw $t1, ART_METHOD_DEX_CACHE_METHODS_OFFSET_32($t1) # Load dex cache methods array.
sll $t0, $t0, POINTER_SIZE_SHIFT # Calculate offset.
addu $t0, $t1, $t0 # Add offset to base.
lw $t0, 0($t0) # Load interface method.
lw $a0, ART_METHOD_JNI_OFFSET_32($a0) # Load ImtConflictTable.
.Limt_table_iterate:
lw $t1, 0($a0) # Load next entry in ImtConflictTable.
# Branch if found.
beq $t1, $t0, .Limt_table_found
nop
# If the entry is null, the interface method is not in the ImtConflictTable.
beqz $t1, .Lconflict_trampoline
nop
# Iterate over the entries of the ImtConflictTable.
b .Limt_table_iterate
addiu $a0, $a0, 2 * __SIZEOF_POINTER__ # Iterate to the next entry.
.Limt_table_found:
# We successfully hit an entry in the table. Load the target method and jump to it.
lw $a0, __SIZEOF_POINTER__($a0)
lw $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0)
jr $t9
nop
.Lconflict_trampoline:
# Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
move $a2, rSELF # pass Thread::Current
la $t9, artQuickResolutionTrampoline
jalr $t9 # (Method* called, receiver, Thread*, SP)
addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
beqz $v0, 1f
lw $a0, ARG_SLOT_SIZE($sp) # load resolved method to $a0
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
move $t9, $v0 # code pointer must be in $t9 to generate the global pointer
jalr $zero, $t9 # tail call to method
nop
1:
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
.extern artQuickGenericJniTrampoline
.extern artQuickGenericJniEndTrampoline
ENTRY art_quick_generic_jni_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
move $s8, $sp # save $sp to $s8
move $s3, $gp # save $gp to $s3
# prepare for call to artQuickGenericJniTrampoline(Thread*, SP)
move $a0, rSELF # pass Thread::Current
addiu $a1, $sp, ARG_SLOT_SIZE # save $sp (remove arg slots)
la $t9, artQuickGenericJniTrampoline
jalr $t9 # (Thread*, SP)
addiu $sp, $sp, -5120 # reserve space on the stack
# The C call will have registered the complete save-frame on success.
# The result of the call is:
# v0: ptr to native code, 0 on error.
# v1: ptr to the bottom of the used area of the alloca, can restore stack till here.
beq $v0, $zero, 1f # check entry error
move $t9, $v0 # save the code ptr
move $sp, $v1 # release part of the alloca
# Load parameters from stack into registers
lw $a0, 0($sp)
lw $a1, 4($sp)
lw $a2, 8($sp)
# Load FPRs the same as GPRs. Look at BuildNativeCallFrameStateMachine.
jalr $t9 # native call
lw $a3, 12($sp)
addiu $sp, $sp, 16 # remove arg slots
move $gp, $s3 # restore $gp from $s3
# result sign extension is handled in C code
# prepare for call to artQuickGenericJniEndTrampoline(Thread*, result, result_f)
move $a0, rSELF # pass Thread::Current
move $a2, $v0 # pass result
move $a3, $v1
addiu $sp, $sp, -24 # reserve arg slots
la $t9, artQuickGenericJniEndTrampoline
jalr $t9
s.d $f0, 16($sp) # pass result_f
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
bne $t0, $zero, 1f # check for pending exceptions
move $sp, $s8 # tear down the alloca
# tear dpown the callee-save frame
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
MTD $v0, $v1, $f0, $f1 # move float value to return value
jalr $zero, $ra
nop
1:
lw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
# This will create a new save-all frame, required by the runtime.
DELIVER_PENDING_EXCEPTION
END art_quick_generic_jni_trampoline
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
move $a1, rSELF # pass Thread::Current
la $t9, artQuickToInterpreterBridge
jalr $t9 # (Method* method, Thread*, SP)
addiu $a2, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
bnez $t0, 1f
# don't care if $v0 and/or $v1 are modified, when exception branch taken
MTD $v0, $v1, $f0, $f1 # move float value to return value
jalr $zero, $ra
nop
1:
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
/*
* Routine that intercepts method calls and returns.
*/
.extern artInstrumentationMethodEntryFromCode
.extern artInstrumentationMethodExitFromCode
ENTRY art_quick_instrumentation_entry
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
sw $a0, 28($sp) # save arg0 in free arg slot
move $a3, $ra # pass $ra
la $t9, artInstrumentationMethodEntryFromCode
jalr $t9 # (Method*, Object*, Thread*, LR)
move $a2, rSELF # pass Thread::Current
move $t9, $v0 # $t9 holds reference to code
lw $a0, 28($sp) # restore arg0 from free arg slot
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
jalr $t9 # call method
nop
END art_quick_instrumentation_entry
/* intentional fallthrough */
.global art_quick_instrumentation_exit
art_quick_instrumentation_exit:
.cfi_startproc
addiu $t9, $ra, 4 # put current address into $t9 to rebuild $gp
.cpload $t9
move $ra, $zero # link register is to here, so clobber with 0 for later checks
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
addiu $sp, $sp, -16 # allocate temp storage on the stack
.cfi_adjust_cfa_offset 16
sw $v0, ARG_SLOT_SIZE+12($sp)
.cfi_rel_offset 2, ARG_SLOT_SIZE+12
sw $v1, ARG_SLOT_SIZE+8($sp)
.cfi_rel_offset 3, ARG_SLOT_SIZE+8
s.d $f0, ARG_SLOT_SIZE($sp)
s.d $f0, 16($sp) # pass fpr result
move $a2, $v0 # pass gpr result
move $a3, $v1
addiu $a1, $sp, ARG_SLOT_SIZE+16 # pass $sp (remove arg slots and temp storage)
la $t9, artInstrumentationMethodExitFromCode
jalr $t9 # (Thread*, SP, gpr_res, fpr_res)
move $a0, rSELF # pass Thread::Current
move $t9, $v0 # set aside returned link register
move $ra, $v1 # set link register for deoptimization
lw $v0, ARG_SLOT_SIZE+12($sp) # restore return values
lw $v1, ARG_SLOT_SIZE+8($sp)
l.d $f0, ARG_SLOT_SIZE($sp)
jalr $zero, $t9 # return
addiu $sp, $sp, ARG_SLOT_SIZE+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE+16 # restore stack
.cfi_adjust_cfa_offset -(ARG_SLOT_SIZE+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE+16)
END art_quick_instrumentation_exit
/*
* Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
* will long jump to the upcall with a special exception of -1.
*/
.extern artDeoptimize
ENTRY art_quick_deoptimize
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
la $t9, artDeoptimize
jalr $t9 # artDeoptimize(Thread*)
# Returns caller method's frame size.
move $a0, rSELF # pass Thread::current
END art_quick_deoptimize
/*
* Compiled code has requested that we deoptimize into the interpreter. The deoptimization
* will long jump to the upcall with a special exception of -1.
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
la $t9, artDeoptimizeFromCompiledCode
jalr $t9 # artDeoptimizeFromCompiledCode(Thread*)
# Returns caller method's frame size.
move $a0, rSELF # pass Thread::current
END art_quick_deoptimize_from_compiled_code
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
* 6 bits.
* On entry:
* $a0: low word
* $a1: high word
* $a2: shift count
*/
ENTRY_NO_GP art_quick_shl_long
/* shl-long vAA, vBB, vCC */
sll $v0, $a0, $a2 # rlo<- alo << (shift&31)
not $v1, $a2 # rhi<- 31-shift (shift is 5b)
srl $a0, 1
srl $a0, $v1 # alo<- alo >> (32-(shift&31))
sll $v1, $a1, $a2 # rhi<- ahi << (shift&31)
andi $a2, 0x20 # shift< shift & 0x20
beqz $a2, 1f
or $v1, $a0 # rhi<- rhi | alo
move $v1, $v0 # rhi<- rlo (if shift&0x20)
move $v0, $zero # rlo<- 0 (if shift&0x20)
1: jalr $zero, $ra
nop
END art_quick_shl_long
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
* 6 bits.
* On entry:
* $a0: low word
* $a1: high word
* $a2: shift count
*/
ENTRY_NO_GP art_quick_shr_long
sra $v1, $a1, $a2 # rhi<- ahi >> (shift&31)
srl $v0, $a0, $a2 # rlo<- alo >> (shift&31)
sra $a3, $a1, 31 # $a3<- sign(ah)
not $a0, $a2 # alo<- 31-shift (shift is 5b)
sll $a1, 1
sll $a1, $a0 # ahi<- ahi << (32-(shift&31))
andi $a2, 0x20 # shift & 0x20
beqz $a2, 1f
or $v0, $a1 # rlo<- rlo | ahi
move $v0, $v1 # rlo<- rhi (if shift&0x20)
move $v1, $a3 # rhi<- sign(ahi) (if shift&0x20)
1: jalr $zero, $ra
nop
END art_quick_shr_long
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
* 6 bits.
* On entry:
* $a0: low word
* $a1: high word
* $a2: shift count
*/
/* ushr-long vAA, vBB, vCC */
ENTRY_NO_GP art_quick_ushr_long
srl $v1, $a1, $a2 # rhi<- ahi >> (shift&31)
srl $v0, $a0, $a2 # rlo<- alo >> (shift&31)
not $a0, $a2 # alo<- 31-shift (shift is 5b)
sll $a1, 1
sll $a1, $a0 # ahi<- ahi << (32-(shift&31))
andi $a2, 0x20 # shift & 0x20
beqz $a2, 1f
or $v0, $a1 # rlo<- rlo | ahi
move $v0, $v1 # rlo<- rhi (if shift&0x20)
move $v1, $zero # rhi<- 0 (if shift&0x20)
1: jalr $zero, $ra
nop
END art_quick_ushr_long
/* java.lang.String.indexOf(int ch, int fromIndex=0) */
ENTRY_NO_GP art_quick_indexof
/* $a0 holds address of "this" */
/* $a1 holds "ch" */
/* $a2 holds "fromIndex" */
lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
slt $t1, $a2, $zero # if fromIndex < 0
#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
seleqz $a2, $a2, $t1 # fromIndex = 0;
#else
movn $a2, $zero, $t1 # fromIndex = 0;
#endif
subu $t0, $t0, $a2 # this.length() - fromIndex
blez $t0, 6f # if this.length()-fromIndex <= 0
li $v0, -1 # return -1;
sll $v0, $a2, 1 # $a0 += $a2 * 2
addu $a0, $a0, $v0 # " " " " "
move $v0, $a2 # Set i to fromIndex.
1:
lhu $t3, MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
beq $t3, $a1, 6f # return i;
addu $a0, $a0, 2 # i++
subu $t0, $t0, 1 # this.length() - i
bnez $t0, 1b # while this.length() - i > 0
addu $v0, $v0, 1 # i++
li $v0, -1 # if this.length() - i <= 0
# return -1;
6:
j $ra
nop
END art_quick_indexof
/* java.lang.String.compareTo(String anotherString) */
ENTRY_NO_GP art_quick_string_compareto
/* $a0 holds address of "this" */
/* $a1 holds address of "anotherString" */
beq $a0, $a1, 9f # this and anotherString are the same object
move $v0, $zero
lw $a2, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
lw $a3, MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
MINu $t2, $a2, $a3
# $t2 now holds min(this.length(),anotherString.length())
beqz $t2, 9f # while min(this.length(),anotherString.length())-i != 0
subu $v0, $a2, $a3 # if $t2==0 return
# (this.length() - anotherString.length())
1:
lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i)
lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1)
bne $t0, $t1, 9f # if this.charAt(i) != anotherString.charAt(i)
subu $v0, $t0, $t1 # return (this.charAt(i) - anotherString.charAt(i))
addiu $a0, $a0, 2 # point at this.charAt(i++)
subu $t2, $t2, 1 # new value of
# min(this.length(),anotherString.length())-i
bnez $t2, 1b
addiu $a1, $a1, 2 # point at anotherString.charAt(i++)
subu $v0, $a2, $a3
9:
j $ra
nop
END art_quick_string_compareto
|
abforce/xposed_art_n
| 1,170
|
runtime/arch/mips/memcmp16_mips.S
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_ARCH_MIPS_MEMCMP16_MIPS_S_
#define ART_RUNTIME_ARCH_MIPS_MEMCMP16_MIPS_S_
#include "asm_support_mips.S"
// u4 __memcmp16(const u2*, const u2*, size_t);
ENTRY_NO_GP __memcmp16
li $t0,0
li $t1,0
beqz $a2,done /* 0 length string */
beq $a0,$a1,done /* strings are identical */
/* Unoptimized... */
1: lhu $t0,0($a0)
lhu $t1,0($a1)
addu $a1,2
bne $t0,$t1,done
addu $a0,2
subu $a2,1
bnez $a2,1b
done:
subu $v0,$t0,$t1
j $ra
END __memcmp16
#endif // ART_RUNTIME_ARCH_MIPS_MEMCMP16_MIPS_S_
|
abforce/xposed_art_n
| 4,430
|
runtime/arch/mips/asm_support_mips.S
|
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
#include "asm_support_mips.h"
// Define special registers.
// Register holding suspend check count down.
#define rSUSPEND $s0
// Register holding Thread::Current().
#define rSELF $s1
// Declare a function called name, sets up $gp.
.macro ENTRY name
.type \name, %function
.global \name
// Cache alignment for function entry.
.balign 16
\name:
.cfi_startproc
// Ensure we get a sane starting CFA.
.cfi_def_cfa $sp,0
// Load $gp. We expect that ".set noreorder" is in effect.
.cpload $t9
// Declare a local convenience label to be branched to when $gp is already set up.
.L\name\()_gp_set:
.endm
// Declare a function called name, doesn't set up $gp.
.macro ENTRY_NO_GP name
.type \name, %function
.global \name
// Cache alignment for function entry.
.balign 16
\name:
.cfi_startproc
// Ensure we get a sane starting CFA.
.cfi_def_cfa $sp,0
.endm
.macro END name
.cfi_endproc
.size \name, .-\name
.endm
.macro UNIMPLEMENTED name
ENTRY \name
break
break
END \name
.endm
#if defined(__mips_isa_rev) && __mips_isa_rev > 2
/* mips32r5 & mips32r6 have mthc1 op, and have 64-bit fp regs,
and in FPXX abi we avoid referring to odd-numbered fp regs */
/* LDu: Load 64-bit floating-point value to float reg feven,
from unaligned (mod-4-aligned) mem location disp(base) */
.macro LDu feven,fodd,disp,base,temp
l.s \feven, \disp(\base)
lw \temp, \disp+4(\base)
mthc1 \temp, \feven
.endm
/* SDu: Store 64-bit floating-point value from float reg feven,
to unaligned (mod-4-aligned) mem location disp(base) */
.macro SDu feven,fodd,disp,base,temp
mfhc1 \temp, \feven
s.s \feven, \disp(\base)
sw \temp, \disp+4(\base)
.endm
/* MTD: Move double, from general regpair (reven,rodd)
to float regpair (feven,fodd) */
.macro MTD reven,rodd,feven,fodd
mtc1 \reven, \feven
mthc1 \rodd, \feven
.endm
#else
/* mips32r1 has no mthc1 op;
mips32r1 and mips32r2 use 32-bit floating point register mode (FR=0),
and always hold doubles as (feven, fodd) fp reg pair */
.macro LDu feven,fodd,disp,base,temp
l.s \feven, \disp(\base)
l.s \fodd, \disp+4(\base)
.endm
.macro SDu feven,fodd,disp,base,temp
s.s \feven, \disp(\base)
s.s \fodd, \disp+4(\base)
.endm
.macro MTD reven,rodd,feven,fodd
mtc1 \reven, \feven
mtc1 \rodd, \fodd
.endm
#endif /* mips_isa_rev */
// Macros to poison (negate) the reference for heap poisoning.
.macro POISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
subu \rRef, $zero, \rRef
#endif // USE_HEAP_POISONING
.endm
// Macros to unpoison (negate) the reference for heap poisoning.
.macro UNPOISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
subu \rRef, $zero, \rRef
#endif // USE_HEAP_POISONING
.endm
// Based on contents of creg select the minimum integer
// At the end of the macro the original value of creg is lost
.macro MINint dreg,rreg,sreg,creg
.set push
.set noat
#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
.ifc \dreg, \rreg
selnez \dreg, \rreg, \creg
seleqz \creg, \sreg, \creg
.else
seleqz \dreg, \sreg, \creg
selnez \creg, \rreg, \creg
.endif
or \dreg, \dreg, \creg
#else
movn \dreg, \rreg, \creg
movz \dreg, \sreg, \creg
#endif
.set pop
.endm
// Find minimum of two signed registers
.macro MINs dreg,rreg,sreg
.set push
.set noat
slt $at, \rreg, \sreg
MINint \dreg, \rreg, \sreg, $at
.set pop
.endm
// Find minimum of two unsigned registers
.macro MINu dreg,rreg,sreg
.set push
.set noat
sltu $at, \rreg, \sreg
MINint \dreg, \rreg, \sreg, $at
.set pop
.endm
#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
|
abhra0897/stm32_tiny_monitor
| 3,997
|
libopencm3/ld/linker.ld.S
|
/*
* This file is part of the libopencm3 project.
*
* Copyright (C) 2009 Uwe Hermann <uwe@hermann-uwe.de>
* Copyright (C) 2013 Frantisek Burian <BuFran@seznam.cz>
*
* This library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library. If not, see <http://www.gnu.org/licenses/>.
*/
/* Generic linker script for all targets using libopencm3. */
/* Enforce emmition of the vector table. */
EXTERN(vector_table)
/* Define the entry point of the output file. */
ENTRY(reset_handler)
/* Define memory regions. */
MEMORY
{
/* RAM is always used */
ram (rwx) : ORIGIN = _RAM_OFF, LENGTH = _RAM
#if defined(_ROM)
rom (rx) : ORIGIN = _ROM_OFF, LENGTH = _ROM
#endif
#if defined(_ROM1)
rom1 (rx) : ORIGIN = _ROM1_OFF, LENGTH = _ROM1
#endif
#if defined(_ROM2)
rom2 (rx) : ORIGIN = _ROM2_OFF, LENGTH = _ROM2
#endif
#if defined(_RAM1)
ram1 (rwx) : ORIGIN = _RAM1_OFF, LENGTH = _RAM1
#endif
#if defined(_RAM2)
ram2 (rwx) : ORIGIN = _RAM2_OFF, LENGTH = _RAM2
#endif
#if defined(_RAM3)
ram3 (rwx) : ORIGIN = _RAM3_OFF, LENGTH = _RAM3
#endif
#if defined(_CCM)
ccm (rwx) : ORIGIN = _CCM_OFF, LENGTH = _CCM
#endif
#if defined(_EEP)
eep (r) : ORIGIN = _EEP_OFF, LENGTH = _EEP
#endif
#if defined(_XSRAM)
xsram (rw) : ORIGIN = _XSRAM_OFF, LENGTH = _XSRAM
#endif
#if defined(_XDRAM)
xdram (rw) : ORIGIN = _XDRAM_OFF, LENGTH = _XDRAM
#endif
#if defined(_NFCRAM)
nfcram (rw) : ORIGIN _NFCRAM_OFF, LENGTH = _NFCRAM
#endif
}
/* Define sections. */
SECTIONS
{
.text : {
*(.vectors) /* Vector table */
*(.text*) /* Program code */
. = ALIGN(4);
*(.rodata*) /* Read-only data */
. = ALIGN(4);
} >rom
/* C++ Static constructors/destructors, also used for
* __attribute__((constructor)) and the likes.
*/
.preinit_array : {
. = ALIGN(4);
__preinit_array_start = .;
KEEP (*(.preinit_array))
__preinit_array_end = .;
} >rom
.init_array : {
. = ALIGN(4);
__init_array_start = .;
KEEP (*(SORT(.init_array.*)))
KEEP (*(.init_array))
__init_array_end = .;
} >rom
.fini_array : {
. = ALIGN(4);
__fini_array_start = .;
KEEP (*(.fini_array))
KEEP (*(SORT(.fini_array.*)))
__fini_array_end = .;
} >rom
/*
* Another section used by C++ stuff, appears when using newlib with
* 64bit (long long) printf support
*/
.ARM.extab : {
*(.ARM.extab*)
} >rom
.ARM.exidx : {
__exidx_start = .;
*(.ARM.exidx*)
__exidx_end = .;
} >rom
. = ALIGN(4);
_etext = .;
.data : {
_data = .;
*(.data*) /* Read-write initialized data */
. = ALIGN(4);
_edata = .;
} >ram AT >rom
_data_loadaddr = LOADADDR(.data);
.bss : {
*(.bss*) /* Read-write zero initialized data */
*(COMMON)
. = ALIGN(4);
_ebss = .;
} >ram
#if defined(_CCM)
.ccm : {
*(.ccmram*)
. = ALIGN(4);
} >ccm
#endif
#if defined(_RAM1)
.ram1 : {
*(.ram1*)
. = ALIGN(4);
} >ram1
#endif
#if defined(_RAM2)
.ram2 : {
*(.ram2*)
. = ALIGN(4);
} >ram2
#endif
#if defined(_RAM3)
.ram3 : {
*(.ram3*)
. = ALIGN(4);
} >ram3
#endif
#if defined(_XSRAM)
.xsram : {
*(.xsram*)
. = ALIGN(4);
} >xsram
#endif
#if defined(_XDRAM)
.xdram : {
*(.xdram*)
. = ALIGN(4);
} >xdram
#endif
#if defined(_NFCRAM)
.nfcram : {
*(.nfcram*)
. = ALIGN(4);
} >nfcram
#endif
/*
* The .eh_frame section appears to be used for C++ exception handling.
* You may need to fix this if you're using C++.
*/
/DISCARD/ : { *(.eh_frame) }
. = ALIGN(4);
end = .;
}
PROVIDE(_stack = ORIGIN(ram) + LENGTH(ram));
|
abmfy/cod23-grp04
| 151,859
|
asm/rv32i.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
.zero 68
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,0
li sp,0
add a4,ra,sp
li t2,0
beq a4,t2,test_3
j fail
test_3:
li gp,3
li ra,1
li sp,1
add a4,ra,sp
li t2,2
beq a4,t2,test_4
j fail
test_4:
li gp,4
li ra,3
li sp,7
add a4,ra,sp
li t2,10
beq a4,t2,test_5
j fail
test_5:
li gp,5
li ra,0
lui sp,0xffff8
add a4,ra,sp
lui t2,0xffff8
beq a4,t2,test_6
j fail
test_6:
li gp,6
lui ra,0x80000
li sp,0
add a4,ra,sp
lui t2,0x80000
beq a4,t2,test_7
j fail
test_7:
li gp,7
lui ra,0x80000
lui sp,0xffff8
add a4,ra,sp
lui t2,0x7fff8
beq a4,t2,test_8
j fail
test_8:
li gp,8
li ra,0
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
add a4,ra,sp
lui t2,0x8
addi t2,t2,-1 # _start-0x7fff8001
beq a4,t2,test_9
j fail
test_9:
li gp,9
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
li sp,0
add a4,ra,sp
lui t2,0x80000
addi t2,t2,-1 # _end+0xffff5f2f
beq a4,t2,test_10
j fail
test_10:
li gp,10
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
add a4,ra,sp
lui t2,0x80008
addi t2,t2,-2 # _end+0xffffdf2e
beq a4,t2,test_11
j fail
test_11:
li gp,11
lui ra,0x80000
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
add a4,ra,sp
lui t2,0x80008
addi t2,t2,-1 # _end+0xffffdf2f
beq a4,t2,test_12
j fail
test_12:
li gp,12
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
lui sp,0xffff8
add a4,ra,sp
lui t2,0x7fff8
addi t2,t2,-1 # _start-0x8001
beq a4,t2,test_13
j fail
test_13:
li gp,13
li ra,0
li sp,-1
add a4,ra,sp
li t2,-1
beq a4,t2,test_14
j fail
test_14:
li gp,14
li ra,-1
li sp,1
add a4,ra,sp
li t2,0
beq a4,t2,test_15
j fail
test_15:
li gp,15
li ra,-1
li sp,-1
add a4,ra,sp
li t2,-2
beq a4,t2,test_16
j fail
test_16:
li gp,16
li ra,1
lui sp,0x80000
addi sp,sp,-1 # _end+0xffff5f2f
add a4,ra,sp
lui t2,0x80000
beq a4,t2,test_17
j fail
test_17:
li gp,17
li ra,13
li sp,11
add ra,ra,sp
li t2,24
beq ra,t2,test_18
j fail
test_18:
li gp,18
li ra,14
li sp,11
add sp,ra,sp
li t2,25
beq sp,t2,test_19
j fail
test_19:
li gp,19
li ra,13
add ra,ra,ra
li t2,26
beq ra,t2,test_20
j fail
test_20:
li gp,20
li tp,0
li ra,13
li sp,11
add a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
li t2,24
beq t1,t2,test_21
j fail
test_21:
li gp,21
li tp,0
li ra,14
li sp,11
add a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_21+0x8
li t2,25
beq t1,t2,test_22
j fail
test_22:
li gp,22
li tp,0
li ra,15
li sp,11
add a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_22+0x8
li t2,26
beq t1,t2,test_23
j fail
test_23:
li gp,23
li tp,0
li ra,13
li sp,11
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_23+0x8
li t2,24
beq a4,t2,test_24
j fail
test_24:
li gp,24
li tp,0
li ra,14
li sp,11
nop
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_24+0x8
li t2,25
beq a4,t2,test_25
j fail
test_25:
li gp,25
li tp,0
li ra,15
li sp,11
nop
nop
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_25+0x8
li t2,26
beq a4,t2,test_26
j fail
test_26:
li gp,26
li tp,0
li ra,13
nop
li sp,11
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_26+0x8
li t2,24
beq a4,t2,test_27
j fail
test_27:
li gp,27
li tp,0
li ra,14
nop
li sp,11
nop
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_27+0x8
li t2,25
beq a4,t2,test_28
j fail
test_28:
li gp,28
li tp,0
li ra,15
nop
nop
li sp,11
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_28+0x8
li t2,26
beq a4,t2,test_29
j fail
test_29:
li gp,29
li tp,0
li sp,11
li ra,13
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_29+0x8
li t2,24
beq a4,t2,test_30
j fail
test_30:
li gp,30
li tp,0
li sp,11
li ra,14
nop
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_30+0x8
li t2,25
beq a4,t2,test_31
j fail
test_31:
li gp,31
li tp,0
li sp,11
li ra,15
nop
nop
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_31+0x8
li t2,26
beq a4,t2,test_32
j fail
test_32:
li gp,32
li tp,0
li sp,11
nop
li ra,13
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_32+0x8
li t2,24
beq a4,t2,test_33
j fail
test_33:
li gp,33
li tp,0
li sp,11
nop
li ra,14
nop
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_33+0x8
li t2,25
beq a4,t2,test_34
j fail
test_34:
li gp,34
li tp,0
li sp,11
nop
nop
li ra,15
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_34+0x8
li t2,26
beq a4,t2,test_35
j fail
test_35:
li gp,35
li ra,15
add sp,zero,ra
li t2,15
beq sp,t2,test_36
j fail
test_36:
li gp,36
li ra,32
add sp,ra,zero
li t2,32
beq sp,t2,test_37
j fail
test_37:
li gp,37
add ra,zero,zero
li t2,0
beq ra,t2,test_38
j fail
test_38:
li gp,38
li ra,16
li sp,30
add zero,ra,sp
li t2,0
beq zero,t2,test_102
j fail
test_102:
li gp,102
li ra,0
mv a4,ra
li t2,0
beq a4,t2,test_103
j fail
test_103:
li gp,103
li ra,1
addi a4,ra,1
li t2,2
beq a4,t2,test_104
j fail
test_104:
li gp,104
li ra,3
addi a4,ra,7
li t2,10
beq a4,t2,test_105
j fail
test_105:
li gp,105
li ra,0
addi a4,ra,-2048
li t2,-2048
beq a4,t2,test_106
j fail
test_106:
li gp,106
lui ra,0x80000
mv a4,ra
lui t2,0x80000
beq a4,t2,test_107
j fail
test_107:
li gp,107
lui ra,0x80000
addi a4,ra,-2048 # _end+0xffff5730
lui t2,0x80000
addi t2,t2,-2048 # _end+0xffff5730
beq a4,t2,test_108
j fail
test_108:
li gp,108
li ra,0
addi a4,ra,2047
li t2,2047
beq a4,t2,test_109
j fail
test_109:
li gp,109
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
mv a4,ra
lui t2,0x80000
addi t2,t2,-1 # _end+0xffff5f2f
beq a4,t2,test_110
j fail
test_110:
li gp,110
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
addi a4,ra,2047
lui t2,0x80000
addi t2,t2,2046 # _end+0xffff672e
beq a4,t2,test_111
j fail
test_111:
li gp,111
lui ra,0x80000
addi a4,ra,2047 # _end+0xffff672f
lui t2,0x80000
addi t2,t2,2047 # _end+0xffff672f
beq a4,t2,test_112
j fail
test_112:
li gp,112
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
addi a4,ra,-2048
lui t2,0x7ffff
addi t2,t2,2047 # _start-0x801
beq a4,t2,test_113
j fail
test_113:
li gp,113
li ra,0
addi a4,ra,-1
li t2,-1
beq a4,t2,test_114
j fail
test_114:
li gp,114
li ra,-1
addi a4,ra,1
li t2,0
beq a4,t2,test_115
j fail
test_115:
li gp,115
li ra,-1
addi a4,ra,-1
li t2,-2
beq a4,t2,test_116
j fail
test_116:
li gp,116
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
addi a4,ra,1
lui t2,0x80000
beq a4,t2,test_117
j fail
test_117:
li gp,117
li ra,13
addi ra,ra,11
li t2,24
beq ra,t2,test_118
j fail
test_118:
li gp,118
li tp,0
li ra,13
addi a4,ra,11
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_118+0x8
li t2,24
beq t1,t2,test_119
j fail
test_119:
li gp,119
li tp,0
li ra,13
addi a4,ra,10
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_119+0x8
li t2,23
beq t1,t2,test_120
j fail
test_120:
li gp,120
li tp,0
li ra,13
addi a4,ra,9
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_120+0x8
li t2,22
beq t1,t2,test_121
j fail
test_121:
li gp,121
li tp,0
li ra,13
addi a4,ra,11
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_121+0x8
li t2,24
beq a4,t2,test_122
j fail
test_122:
li gp,122
li tp,0
li ra,13
nop
addi a4,ra,10
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_122+0x8
li t2,23
beq a4,t2,test_123
j fail
test_123:
li gp,123
li tp,0
li ra,13
nop
nop
addi a4,ra,9
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_123+0x8
li t2,22
beq a4,t2,test_124
j fail
test_124:
li gp,124
li ra,32
li t2,32
beq ra,t2,test_125
j fail
test_125:
li gp,125
li ra,33
addi zero,ra,50
li t2,0
beq zero,t2,test_202
j fail
test_202:
li gp,202
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq a4,t2,test_203
j fail
test_203:
li gp,203
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
and a4,ra,sp
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq a4,t2,test_204
j fail
test_204:
li gp,204
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
lui t2,0xf0
addi t2,t2,15 # _start-0x7ff0fff1
beq a4,t2,test_205
j fail
test_205:
li gp,205
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f4f3f
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
and a4,ra,sp
lui t2,0xf000f
beq a4,t2,test_206
j fail
test_206:
li gp,206
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and ra,ra,sp
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq ra,t2,test_207
j fail
test_207:
li gp,207
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
and sp,ra,sp
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq sp,t2,test_208
j fail
test_208:
li gp,208
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
and ra,ra,ra
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq ra,t2,test_209
j fail
test_209:
li gp,209
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_209+0x8
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq t1,t2,test_210
j fail
test_210:
li gp,210
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
and a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_210+0x8
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq t1,t2,test_211
j fail
test_211:
li gp,211
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_211+0x8
lui t2,0xf0
addi t2,t2,15 # _start-0x7ff0fff1
beq t1,t2,test_212
j fail
test_212:
li gp,212
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_212+0x8
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq a4,t2,test_213
j fail
test_213:
li gp,213
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
nop
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_213+0x8
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq a4,t2,test_214
j fail
test_214:
li gp,214
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
nop
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_214+0x8
lui t2,0xf0
addi t2,t2,15 # _start-0x7ff0fff1
beq a4,t2,test_215
j fail
test_215:
li gp,215
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
nop
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_215+0x8
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq a4,t2,test_216
j fail
test_216:
li gp,216
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
nop
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_216+0x8
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq a4,t2,test_217
j fail
test_217:
li gp,217
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
nop
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_217+0x8
lui t2,0xf0
addi t2,t2,15 # _start-0x7ff0fff1
beq a4,t2,test_218
j fail
test_218:
li gp,218
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_218+0x8
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq a4,t2,test_219
j fail
test_219:
li gp,219
li tp,0
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_219+0x8
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq a4,t2,test_220
j fail
test_220:
li gp,220
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
nop
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_220+0x8
lui t2,0xf0
addi t2,t2,15 # _start-0x7ff0fff1
beq a4,t2,test_221
j fail
test_221:
li gp,221
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_221+0x8
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq a4,t2,test_222
j fail
test_222:
li gp,222
li tp,0
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
nop
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_222+0x8
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq a4,t2,test_223
j fail
test_223:
li gp,223
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
nop
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_223+0x8
lui t2,0xf0
addi t2,t2,15 # _start-0x7ff0fff1
beq a4,t2,test_224
j fail
test_224:
li gp,224
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
and sp,zero,ra
li t2,0
beq sp,t2,test_225
j fail
test_225:
li gp,225
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
and sp,ra,zero
li t2,0
beq sp,t2,test_226
j fail
test_226:
li gp,226
and ra,zero,zero
li t2,0
beq ra,t2,test_227
j fail
test_227:
li gp,227
lui ra,0x11111
addi ra,ra,273 # _start-0x6eeeeeef
lui sp,0x22222
addi sp,sp,546 # _start-0x5dddddde
and zero,ra,sp
li t2,0
beq zero,t2,test_302
j fail
test_302:
li gp,302
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
andi a4,ra,-241
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq a4,t2,test_303
j fail
test_303:
li gp,303
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
andi a4,ra,240
li t2,240
beq a4,t2,test_304
j fail
test_304:
li gp,304
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
andi a4,ra,1807
li t2,15
beq a4,t2,test_305
j fail
test_305:
li gp,305
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f4f3f
andi a4,ra,240
li t2,0
beq a4,t2,test_306
j fail
test_306:
li gp,306
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
andi ra,ra,240
li t2,0
beq ra,t2,test_307
j fail
test_307:
li gp,307
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
andi a4,ra,1807
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_307+0x8
li t2,1792
beq t1,t2,test_308
j fail
test_308:
li gp,308
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
andi a4,ra,240
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_308+0x8
li t2,240
beq t1,t2,test_309
j fail
test_309:
li gp,309
li tp,0
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f4f3f
andi a4,ra,-241
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_309+0x8
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq t1,t2,test_310
j fail
test_310:
li gp,310
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
andi a4,ra,1807
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_310+0x8
li t2,1792
beq a4,t2,test_311
j fail
test_311:
li gp,311
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
andi a4,ra,240
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_311+0x8
li t2,240
beq a4,t2,test_312
j fail
test_312:
li gp,312
li tp,0
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f4f3f
nop
nop
andi a4,ra,1807
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_312+0x8
li t2,15
beq a4,t2,test_313
j fail
test_313:
li gp,313
andi ra,zero,240
li t2,0
beq ra,t2,test_314
j fail
test_314:
li gp,314
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
andi zero,ra,1807
li t2,0
beq zero,t2,test_402
j fail
test_402:
li gp,402
nop
auipc a0,0x2
addi a0,a0,1820 # test_1807+0x20
jal a1,test_402+0x14
sub a0,a0,a1
lui t2,0x2
addi t2,t2,1808 # _start-0x7fffd8f0
beq a0,t2,test_403
j fail
test_403:
li gp,403
nop
auipc a0,0xffffe
addi a0,a0,-1796 # _start-0x16ac
jal a1,test_403+0x14
sub a0,a0,a1
lui t2,0xffffe
addi t2,t2,-1808 # _end+0x7fff3820
beq a0,t2,test_502
j fail
test_502:
li gp,502
li ra,0
li sp,0
beq ra,sp,test_502+0x1c
beq zero,gp,test_502+0x18
j fail
bne zero,gp,test_503
beq ra,sp,test_502+0x18
beq zero,gp,test_503
j fail
test_503:
li gp,503
li ra,1
li sp,1
beq ra,sp,test_503+0x1c
beq zero,gp,test_503+0x18
j fail
bne zero,gp,test_504
beq ra,sp,test_503+0x18
beq zero,gp,test_504
j fail
test_504:
li gp,504
li ra,-1
li sp,-1
beq ra,sp,test_504+0x1c
beq zero,gp,test_504+0x18
j fail
bne zero,gp,test_505
beq ra,sp,test_504+0x18
beq zero,gp,test_505
j fail
test_505:
li gp,505
li ra,0
li sp,1
beq ra,sp,test_505+0x14
bne zero,gp,test_505+0x1c
beq zero,gp,test_505+0x1c
j fail
beq ra,sp,test_505+0x14
test_506:
li gp,506
li ra,1
li sp,0
beq ra,sp,test_506+0x14
bne zero,gp,test_506+0x1c
beq zero,gp,test_506+0x1c
j fail
beq ra,sp,test_506+0x14
test_507:
li gp,507
li ra,-1
li sp,1
beq ra,sp,test_507+0x14
bne zero,gp,test_507+0x1c
beq zero,gp,test_507+0x1c
j fail
beq ra,sp,test_507+0x14
test_508:
li gp,508
li ra,1
li sp,-1
beq ra,sp,test_508+0x14
bne zero,gp,test_508+0x1c
beq zero,gp,test_508+0x1c
j fail
beq ra,sp,test_508+0x14
test_509:
li gp,509
li tp,0
li ra,0
li sp,-1
bne ra,sp,test_509+0x18
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_509+0x8
test_510:
li gp,510
li tp,0
li ra,0
li sp,-1
nop
bne ra,sp,test_510+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_510+0x8
test_511:
li gp,511
li tp,0
li ra,0
li sp,-1
nop
nop
bne ra,sp,test_511+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_511+0x8
test_512:
li gp,512
li tp,0
li ra,0
nop
li sp,-1
bne ra,sp,test_512+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_512+0x8
test_513:
li gp,513
li tp,0
li ra,0
nop
li sp,-1
nop
bne ra,sp,test_513+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_513+0x8
test_514:
li gp,514
li tp,0
li ra,0
nop
nop
li sp,-1
bne ra,sp,test_514+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_514+0x8
test_515:
li gp,515
li tp,0
li ra,0
li sp,-1
bne ra,sp,test_515+0x18
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_515+0x8
test_516:
li gp,516
li tp,0
li ra,0
li sp,-1
nop
bne ra,sp,test_516+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_516+0x8
test_517:
li gp,517
li tp,0
li ra,0
li sp,-1
nop
nop
bne ra,sp,test_517+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_517+0x8
test_518:
li gp,518
li tp,0
li ra,0
nop
li sp,-1
bne ra,sp,test_518+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_518+0x8
test_519:
li gp,519
li tp,0
li ra,0
nop
li sp,-1
nop
bne ra,sp,test_519+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_519+0x8
test_520:
li gp,520
li tp,0
li ra,0
nop
nop
li sp,-1
bne ra,sp,test_520+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_520+0x8
test_602:
li gp,602
li ra,0
li sp,0
bge ra,sp,test_602+0x1c
beq zero,gp,test_602+0x18
j fail
bne zero,gp,test_603
bge ra,sp,test_602+0x18
beq zero,gp,test_603
j fail
test_603:
li gp,603
li ra,1
li sp,1
bge ra,sp,test_603+0x1c
beq zero,gp,test_603+0x18
j fail
bne zero,gp,test_604
bge ra,sp,test_603+0x18
beq zero,gp,test_604
j fail
test_604:
li gp,604
li ra,-1
li sp,-1
bge ra,sp,test_604+0x1c
beq zero,gp,test_604+0x18
j fail
bne zero,gp,test_605
bge ra,sp,test_604+0x18
beq zero,gp,test_605
j fail
test_605:
li gp,605
li ra,1
li sp,0
bge ra,sp,test_605+0x1c
beq zero,gp,test_605+0x18
j fail
bne zero,gp,test_606
bge ra,sp,test_605+0x18
beq zero,gp,test_606
j fail
test_606:
li gp,606
li ra,1
li sp,-1
bge ra,sp,test_606+0x1c
beq zero,gp,test_606+0x18
j fail
bne zero,gp,test_607
bge ra,sp,test_606+0x18
beq zero,gp,test_607
j fail
test_607:
li gp,607
li ra,-1
li sp,-2
bge ra,sp,test_607+0x1c
beq zero,gp,test_607+0x18
j fail
bne zero,gp,test_608
bge ra,sp,test_607+0x18
beq zero,gp,test_608
j fail
test_608:
li gp,608
li ra,0
li sp,1
bge ra,sp,test_608+0x14
bne zero,gp,test_608+0x1c
beq zero,gp,test_608+0x1c
j fail
bge ra,sp,test_608+0x14
test_609:
li gp,609
li ra,-1
li sp,1
bge ra,sp,test_609+0x14
bne zero,gp,test_609+0x1c
beq zero,gp,test_609+0x1c
j fail
bge ra,sp,test_609+0x14
test_610:
li gp,610
li ra,-2
li sp,-1
bge ra,sp,test_610+0x14
bne zero,gp,test_610+0x1c
beq zero,gp,test_610+0x1c
j fail
bge ra,sp,test_610+0x14
test_611:
li gp,611
li ra,-2
li sp,1
bge ra,sp,test_611+0x14
bne zero,gp,test_611+0x1c
beq zero,gp,test_611+0x1c
j fail
bge ra,sp,test_611+0x14
test_612:
li gp,612
li tp,0
li ra,-1
li sp,0
blt ra,sp,test_612+0x18
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_612+0x8
test_613:
li gp,613
li tp,0
li ra,-1
li sp,0
nop
blt ra,sp,test_613+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_613+0x8
test_614:
li gp,614
li tp,0
li ra,-1
li sp,0
nop
nop
blt ra,sp,test_614+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_614+0x8
test_615:
li gp,615
li tp,0
li ra,-1
nop
li sp,0
blt ra,sp,test_615+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_615+0x8
test_616:
li gp,616
li tp,0
li ra,-1
nop
li sp,0
nop
blt ra,sp,test_616+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_616+0x8
test_617:
li gp,617
li tp,0
li ra,-1
nop
nop
li sp,0
blt ra,sp,test_617+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_617+0x8
test_618:
li gp,618
li tp,0
li ra,-1
li sp,0
blt ra,sp,test_618+0x18
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_618+0x8
test_619:
li gp,619
li tp,0
li ra,-1
li sp,0
nop
blt ra,sp,test_619+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_619+0x8
test_620:
li gp,620
li tp,0
li ra,-1
li sp,0
nop
nop
blt ra,sp,test_620+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_620+0x8
test_621:
li gp,621
li tp,0
li ra,-1
nop
li sp,0
blt ra,sp,test_621+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_621+0x8
test_622:
li gp,622
li tp,0
li ra,-1
nop
li sp,0
nop
blt ra,sp,test_622+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_622+0x8
test_623:
li gp,623
li tp,0
li ra,-1
nop
nop
li sp,0
blt ra,sp,test_623+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_623+0x8
jalr_test_2:
li gp,702
li t0,0
auipc t1,0x0
addi t1,t1,16 # jalr_target_2
jalr t0,t1
jalr_linkaddr_2:
j fail
jalr_target_2:
auipc t1,0x0
addi t1,t1,-4 # jalr_linkaddr_2
beq t0,t1,jalr_test_3
j fail
jalr_test_3:
li gp,703
auipc t0,0x0
addi t0,t0,16 # jalr_target_3
jalr t0,t0
jalr_linkaddr_3:
j fail
jalr_target_3:
auipc t1,0x0
addi t1,t1,-4 # jalr_linkaddr_3
beq t0,t1,test_704
j fail
test_704:
li gp,704
li tp,0
auipc t1,0x0
addi t1,t1,20 # test_704+0x1c
jalr a3,t1
beq zero,gp,test_704+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_704+0x8
test_705:
li gp,705
li tp,0
auipc t1,0x0
addi t1,t1,24 # test_705+0x20
nop
jalr a3,t1
beq zero,gp,test_705+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_705+0x8
test_706:
li gp,706
li tp,0
auipc t1,0x0
addi t1,t1,28 # test_706+0x24
nop
nop
jalr a3,t1
beq zero,gp,test_706+0x24
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_706+0x8
test_802:
li gp,802
li a5,-1
auipc ra,0x9
addi ra,ra,-1944 # begin_signature
lb a4,0(ra)
li t2,-1
beq a4,t2,test_803
j fail
test_803:
li gp,803
li a5,0
auipc ra,0x9
addi ra,ra,-1976 # begin_signature
lb a4,1(ra)
li t2,0
beq a4,t2,test_804
j fail
test_804:
li gp,804
li a5,-16
auipc ra,0x9
addi ra,ra,-2008 # begin_signature
lb a4,2(ra)
li t2,-16
beq a4,t2,test_805
j fail
test_805:
li gp,805
li a5,15
auipc ra,0x9
addi ra,ra,-2040 # begin_signature
lb a4,3(ra)
li t2,15
beq a4,t2,test_806
j fail
test_806:
li gp,806
li a5,-1
auipc ra,0x8
addi ra,ra,2027 # lb_tdat4
lb a4,-3(ra)
li t2,-1
beq a4,t2,test_807
j fail
test_807:
li gp,807
li a5,0
auipc ra,0x8
addi ra,ra,1995 # lb_tdat4
lb a4,-2(ra)
li t2,0
beq a4,t2,test_808
j fail
test_808:
li gp,808
li a5,-16
auipc ra,0x8
addi ra,ra,1963 # lb_tdat4
lb a4,-1(ra)
li t2,-16
beq a4,t2,test_809
j fail
test_809:
li gp,809
li a5,15
auipc ra,0x8
addi ra,ra,1931 # lb_tdat4
lb a4,0(ra)
li t2,15
beq a4,t2,test_810
j fail
test_810:
li gp,810
auipc ra,0x8
addi ra,ra,1900 # begin_signature
addi ra,ra,-32
lb t0,32(ra)
li t2,-1
beq t0,t2,test_811
j fail
test_811:
li gp,811
auipc ra,0x8
addi ra,ra,1868 # begin_signature
addi ra,ra,-6
lb t0,7(ra)
li t2,0
beq t0,t2,test_812
j fail
test_812:
li gp,812
li tp,0
auipc ra,0x8
addi ra,ra,1833 # lb_tdat2
lb a4,1(ra)
mv t1,a4
li t2,-16
beq t1,t2,test_812+0x24
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_812+0x8
test_813:
li gp,813
li tp,0
auipc ra,0x8
addi ra,ra,1786 # lb_tdat3
lb a4,1(ra)
nop
mv t1,a4
li t2,15
beq t1,t2,test_813+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_813+0x8
test_814:
li gp,814
li tp,0
auipc ra,0x8
addi ra,ra,1732 # begin_signature
lb a4,1(ra)
nop
nop
mv t1,a4
li t2,0
beq t1,t2,test_814+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_814+0x8
test_815:
li gp,815
li tp,0
auipc ra,0x8
addi ra,ra,1677 # lb_tdat2
lb a4,1(ra)
li t2,-16
beq a4,t2,test_815+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_815+0x8
test_816:
li gp,816
li tp,0
auipc ra,0x8
addi ra,ra,1634 # lb_tdat3
nop
lb a4,1(ra)
li t2,15
beq a4,t2,test_816+0x24
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_816+0x8
test_817:
li gp,817
li tp,0
auipc ra,0x8
addi ra,ra,1584 # begin_signature
nop
nop
lb a4,1(ra)
li t2,0
beq a4,t2,test_817+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_817+0x8
test_818:
li gp,818
auipc t0,0x8
addi t0,t0,1536 # begin_signature
lb sp,0(t0)
li sp,2
li t2,2
beq sp,t2,test_819
j fail
test_819:
li gp,819
auipc t0,0x8
addi t0,t0,1504 # begin_signature
lb sp,0(t0)
nop
li sp,2
li t2,2
beq sp,t2,test_902
j fail
test_902:
li gp,902
auipc ra,0x8
addi ra,ra,1484 # sb_tdat
li sp,-86
auipc a5,0x0
addi a5,a5,20 # test_902+0x24
sb sp,0(ra)
lb a4,0(ra)
j test_902+0x28
mv a4,sp
li t2,-86
beq a4,t2,test_903
j fail
test_903:
li gp,903
auipc ra,0x8
addi ra,ra,1432 # sb_tdat
li sp,0
auipc a5,0x0
addi a5,a5,20 # test_903+0x24
sb sp,1(ra)
lb a4,1(ra)
j test_903+0x28
mv a4,sp
li t2,0
beq a4,t2,test_904
j fail
test_904:
li gp,904
auipc ra,0x8
addi ra,ra,1380 # sb_tdat
lui sp,0xfffff
addi sp,sp,-96 # _end+0x7fff4ed0
auipc a5,0x0
addi a5,a5,20 # test_904+0x28
sb sp,2(ra)
lh a4,2(ra)
j test_904+0x2c
mv a4,sp
lui t2,0xfffff
addi t2,t2,-96 # _end+0x7fff4ed0
beq a4,t2,test_905
j fail
test_905:
li gp,905
auipc ra,0x8
addi ra,ra,1320 # sb_tdat
li sp,10
auipc a5,0x0
addi a5,a5,20 # test_905+0x24
sb sp,3(ra)
lb a4,3(ra)
j test_905+0x28
mv a4,sp
li t2,10
beq a4,t2,test_906
j fail
test_906:
li gp,906
auipc ra,0x8
addi ra,ra,1275 # sb_tdat8
li sp,-86
auipc a5,0x0
addi a5,a5,20 # test_906+0x24
sb sp,-3(ra)
lb a4,-3(ra)
j test_906+0x28
mv a4,sp
li t2,-86
beq a4,t2,test_907
j fail
test_907:
li gp,907
auipc ra,0x8
addi ra,ra,1223 # sb_tdat8
li sp,0
auipc a5,0x0
addi a5,a5,20 # test_907+0x24
sb sp,-2(ra)
lb a4,-2(ra)
j test_907+0x28
mv a4,sp
li t2,0
beq a4,t2,test_908
j fail
test_908:
li gp,908
auipc ra,0x8
addi ra,ra,1171 # sb_tdat8
li sp,-96
auipc a5,0x0
addi a5,a5,20 # test_908+0x24
sb sp,-1(ra)
lb a4,-1(ra)
j test_908+0x28
mv a4,sp
li t2,-96
beq a4,t2,test_909
j fail
test_909:
li gp,909
auipc ra,0x8
addi ra,ra,1119 # sb_tdat8
li sp,10
auipc a5,0x0
addi a5,a5,20 # test_909+0x24
sb sp,0(ra)
lb a4,0(ra)
j test_909+0x28
mv a4,sp
li t2,10
beq a4,t2,test_910
j fail
test_910:
li gp,910
auipc ra,0x8
addi ra,ra,1068 # sb_tdat9
lui sp,0x12345
addi sp,sp,1656 # _start-0x6dcba988
addi tp,ra,-32
sb sp,32(tp) # _start-0x7fffffe0
lb t0,0(ra)
li t2,120
beq t0,t2,test_911
j fail
test_911:
li gp,911
auipc ra,0x8
addi ra,ra,1024 # sb_tdat9
lui sp,0x3
addi sp,sp,152 # _start-0x7fffcf68
addi ra,ra,-6
sb sp,7(ra)
auipc tp,0x8
addi tp,tp,1001 # sb_tdat10
lb t0,0(tp) # _start-0x80000000
li t2,-104
beq t0,t2,test_912
j fail
test_912:
li gp,912
li tp,0
li ra,-35
auipc sp,0x8
addi sp,sp,956 # sb_tdat
sb ra,0(sp)
lb a4,0(sp)
li t2,-35
beq a4,t2,test_912+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_912+0x8
test_913:
li gp,913
li tp,0
li ra,-51
auipc sp,0x8
addi sp,sp,904 # sb_tdat
nop
sb ra,1(sp)
lb a4,1(sp)
li t2,-51
beq a4,t2,test_913+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_913+0x8
test_914:
li gp,914
li tp,0
li ra,-52
auipc sp,0x8
addi sp,sp,848 # sb_tdat
nop
nop
sb ra,2(sp)
lb a4,2(sp)
li t2,-52
beq a4,t2,test_914+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_914+0x8
test_915:
li gp,915
li tp,0
li ra,-68
nop
auipc sp,0x8
addi sp,sp,784 # sb_tdat
sb ra,3(sp)
lb a4,3(sp)
li t2,-68
beq a4,t2,test_915+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_915+0x8
test_916:
li gp,916
li tp,0
li ra,-69
nop
auipc sp,0x8
addi sp,sp,728 # sb_tdat
nop
sb ra,4(sp)
lb a4,4(sp)
li t2,-69
beq a4,t2,test_916+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_916+0x8
test_917:
li gp,917
li tp,0
li ra,-85
nop
nop
auipc sp,0x8
addi sp,sp,664 # sb_tdat
sb ra,5(sp)
lb a4,5(sp)
li t2,-85
beq a4,t2,test_917+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_917+0x8
test_918:
li gp,918
li tp,0
auipc sp,0x8
addi sp,sp,616 # sb_tdat
li ra,51
sb ra,0(sp)
lb a4,0(sp)
li t2,51
beq a4,t2,test_918+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_918+0x8
test_919:
li gp,919
li tp,0
auipc sp,0x8
addi sp,sp,564 # sb_tdat
li ra,35
nop
sb ra,1(sp)
lb a4,1(sp)
li t2,35
beq a4,t2,test_919+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_919+0x8
test_920:
li gp,920
li tp,0
auipc sp,0x8
addi sp,sp,508 # sb_tdat
li ra,34
nop
nop
sb ra,2(sp)
lb a4,2(sp)
li t2,34
beq a4,t2,test_920+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_920+0x8
test_921:
li gp,921
li tp,0
auipc sp,0x8
addi sp,sp,448 # sb_tdat
nop
li ra,18
sb ra,3(sp)
lb a4,3(sp)
li t2,18
beq a4,t2,test_921+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_921+0x8
test_922:
li gp,922
li tp,0
auipc sp,0x8
addi sp,sp,392 # sb_tdat
nop
li ra,17
nop
sb ra,4(sp)
lb a4,4(sp)
li t2,17
beq a4,t2,test_922+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_922+0x8
test_923:
li gp,923
li tp,0
auipc sp,0x8
addi sp,sp,332 # sb_tdat
nop
nop
li ra,1
sb ra,5(sp)
lb a4,5(sp)
li t2,1
beq a4,t2,test_923+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_923+0x8
li a0,239
auipc a1,0x8
addi a1,a1,276 # sb_tdat
sb a0,3(a1)
test_1002:
li gp,1002
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq a4,t2,test_1003
j fail
test_1003:
li gp,1003
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
xor a4,ra,sp
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq a4,t2,test_1004
j fail
test_1004:
li gp,1004
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1005
j fail
test_1005:
li gp,1005
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f4f3f
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
xor a4,ra,sp
lui t2,0xff0
addi t2,t2,255 # _start-0x7f00ff01
beq a4,t2,test_1006
j fail
test_1006:
li gp,1006
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor ra,ra,sp
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq ra,t2,test_1007
j fail
test_1007:
li gp,1007
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor sp,ra,sp
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq sp,t2,test_1008
j fail
test_1008:
li gp,1008
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
xor ra,ra,ra
li t2,0
beq ra,t2,test_1009
j fail
test_1009:
li gp,1009
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1009+0x8
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq t1,t2,test_1010
j fail
test_1010:
li gp,1010
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
xor a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1010+0x8
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq t1,t2,test_1011
j fail
test_1011:
li gp,1011
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1011+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq t1,t2,test_1012
j fail
test_1012:
li gp,1012
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1012+0x8
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq a4,t2,test_1013
j fail
test_1013:
li gp,1013
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
nop
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1013+0x8
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq a4,t2,test_1014
j fail
test_1014:
li gp,1014
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
nop
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1014+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1015
j fail
test_1015:
li gp,1015
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
nop
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1015+0x8
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq a4,t2,test_1016
j fail
test_1016:
li gp,1016
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
nop
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1016+0x8
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq a4,t2,test_1017
j fail
test_1017:
li gp,1017
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
nop
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1017+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1018
j fail
test_1018:
li gp,1018
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1018+0x8
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq a4,t2,test_1019
j fail
test_1019:
li gp,1019
li tp,0
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1019+0x8
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq a4,t2,test_1020
j fail
test_1020:
li gp,1020
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
nop
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1020+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1021
j fail
test_1021:
li gp,1021
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1021+0x8
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq a4,t2,test_1022
j fail
test_1022:
li gp,1022
li tp,0
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
nop
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1022+0x8
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq a4,t2,test_1023
j fail
test_1023:
li gp,1023
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
nop
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1023+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1024
j fail
test_1024:
li gp,1024
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
xor sp,zero,ra
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq sp,t2,test_1025
j fail
test_1025:
li gp,1025
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
xor sp,ra,zero
lui t2,0xff0
addi t2,t2,255 # _start-0x7f00ff01
beq sp,t2,test_1026
j fail
test_1026:
li gp,1026
xor ra,zero,zero
li t2,0
beq ra,t2,test_1027
j fail
test_1027:
li gp,1027
lui ra,0x11111
addi ra,ra,273 # _start-0x6eeeeeef
lui sp,0x22222
addi sp,sp,546 # _start-0x5dddddde
xor zero,ra,sp
li t2,0
beq zero,t2,test_1102
j fail
test_1102:
li gp,1102
li ra,0
li sp,1
blt ra,sp,test_1102+0x1c
beq zero,gp,test_1102+0x18
j fail
bne zero,gp,test_1103
blt ra,sp,test_1102+0x18
beq zero,gp,test_1103
j fail
test_1103:
li gp,1103
li ra,-1
li sp,1
blt ra,sp,test_1103+0x1c
beq zero,gp,test_1103+0x18
j fail
bne zero,gp,test_1104
blt ra,sp,test_1103+0x18
beq zero,gp,test_1104
j fail
test_1104:
li gp,1104
li ra,-2
li sp,-1
blt ra,sp,test_1104+0x1c
beq zero,gp,test_1104+0x18
j fail
bne zero,gp,test_1105
blt ra,sp,test_1104+0x18
beq zero,gp,test_1105
j fail
test_1105:
li gp,1105
li ra,1
li sp,0
blt ra,sp,test_1105+0x14
bne zero,gp,test_1105+0x1c
beq zero,gp,test_1105+0x1c
j fail
blt ra,sp,test_1105+0x14
test_1106:
li gp,1106
li ra,1
li sp,-1
blt ra,sp,test_1106+0x14
bne zero,gp,test_1106+0x1c
beq zero,gp,test_1106+0x1c
j fail
blt ra,sp,test_1106+0x14
test_1107:
li gp,1107
li ra,-1
li sp,-2
blt ra,sp,test_1107+0x14
bne zero,gp,test_1107+0x1c
beq zero,gp,test_1107+0x1c
j fail
blt ra,sp,test_1107+0x14
test_1108:
li gp,1108
li ra,1
li sp,-2
blt ra,sp,test_1108+0x14
bne zero,gp,test_1108+0x1c
beq zero,gp,test_1108+0x1c
j fail
blt ra,sp,test_1108+0x14
test_1109:
li gp,1109
li tp,0
li ra,0
li sp,-1
bge ra,sp,test_1109+0x18
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1109+0x8
test_1110:
li gp,1110
li tp,0
li ra,0
li sp,-1
nop
bge ra,sp,test_1110+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1110+0x8
test_1111:
li gp,1111
li tp,0
li ra,0
li sp,-1
nop
nop
bge ra,sp,test_1111+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1111+0x8
test_1112:
li gp,1112
li tp,0
li ra,0
nop
li sp,-1
bge ra,sp,test_1112+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1112+0x8
test_1113:
li gp,1113
li tp,0
li ra,0
nop
li sp,-1
nop
bge ra,sp,test_1113+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1113+0x8
test_1114:
li gp,1114
li tp,0
li ra,0
nop
nop
li sp,-1
bge ra,sp,test_1114+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1114+0x8
test_1115:
li gp,1115
li tp,0
li ra,0
li sp,-1
bge ra,sp,test_1115+0x18
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1115+0x8
test_1116:
li gp,1116
li tp,0
li ra,0
li sp,-1
nop
bge ra,sp,test_1116+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1116+0x8
test_1117:
li gp,1117
li tp,0
li ra,0
li sp,-1
nop
nop
bge ra,sp,test_1117+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1117+0x8
test_1118:
li gp,1118
li tp,0
li ra,0
nop
li sp,-1
bge ra,sp,test_1118+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1118+0x8
test_1119:
li gp,1119
li tp,0
li ra,0
nop
li sp,-1
nop
bge ra,sp,test_1119+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1119+0x8
test_1120:
li gp,1120
li tp,0
li ra,0
nop
nop
li sp,-1
bge ra,sp,test_1120+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1120+0x8
test_1121:
li gp,1121
li ra,1
bgtz ra,test_1121+0x1c
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
li t2,3
beq ra,t2,test_1202
j fail
test_1202:
li gp,1202
li ra,0
li sp,1
bne ra,sp,test_1202+0x1c
beq zero,gp,test_1202+0x18
j fail
bne zero,gp,test_1203
bne ra,sp,test_1202+0x18
beq zero,gp,test_1203
j fail
test_1203:
li gp,1203
li ra,1
li sp,0
bne ra,sp,test_1203+0x1c
beq zero,gp,test_1203+0x18
j fail
bne zero,gp,test_1204
bne ra,sp,test_1203+0x18
beq zero,gp,test_1204
j fail
test_1204:
li gp,1204
li ra,-1
li sp,1
bne ra,sp,test_1204+0x1c
beq zero,gp,test_1204+0x18
j fail
bne zero,gp,test_1205
bne ra,sp,test_1204+0x18
beq zero,gp,test_1205
j fail
test_1205:
li gp,1205
li ra,1
li sp,-1
bne ra,sp,test_1205+0x1c
beq zero,gp,test_1205+0x18
j fail
bne zero,gp,test_1206
bne ra,sp,test_1205+0x18
beq zero,gp,test_1206
j fail
test_1206:
li gp,1206
li ra,0
li sp,0
bne ra,sp,test_1206+0x14
bne zero,gp,test_1206+0x1c
beq zero,gp,test_1206+0x1c
j fail
bne ra,sp,test_1206+0x14
test_1207:
li gp,1207
li ra,1
li sp,1
bne ra,sp,test_1207+0x14
bne zero,gp,test_1207+0x1c
beq zero,gp,test_1207+0x1c
j fail
bne ra,sp,test_1207+0x14
test_1208:
li gp,1208
li ra,-1
li sp,-1
bne ra,sp,test_1208+0x14
bne zero,gp,test_1208+0x1c
beq zero,gp,test_1208+0x1c
j fail
bne ra,sp,test_1208+0x14
test_1209:
li gp,1209
li tp,0
li ra,0
li sp,0
beq ra,sp,test_1209+0x18
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1209+0x8
test_1210:
li gp,1210
li tp,0
li ra,0
li sp,0
nop
beq ra,sp,test_1210+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1210+0x8
test_1211:
li gp,1211
li tp,0
li ra,0
li sp,0
nop
nop
beq ra,sp,test_1211+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1211+0x8
test_1212:
li gp,1212
li tp,0
li ra,0
nop
li sp,0
beq ra,sp,test_1212+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1212+0x8
test_1213:
li gp,1213
li tp,0
li ra,0
nop
li sp,0
nop
beq ra,sp,test_1213+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1213+0x8
test_1214:
li gp,1214
li tp,0
li ra,0
nop
nop
li sp,0
beq ra,sp,test_1214+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1214+0x8
test_1215:
li gp,1215
li tp,0
li ra,0
li sp,0
beq ra,sp,test_1215+0x18
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1215+0x8
test_1216:
li gp,1216
li tp,0
li ra,0
li sp,0
nop
beq ra,sp,test_1216+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1216+0x8
test_1217:
li gp,1217
li tp,0
li ra,0
li sp,0
nop
nop
beq ra,sp,test_1217+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1217+0x8
test_1218:
li gp,1218
li tp,0
li ra,0
nop
li sp,0
beq ra,sp,test_1218+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1218+0x8
test_1219:
li gp,1219
li tp,0
li ra,0
nop
li sp,0
nop
beq ra,sp,test_1219+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1219+0x8
test_1220:
li gp,1220
li tp,0
li ra,0
nop
nop
li sp,0
beq ra,sp,test_1220+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1220+0x8
test_1221:
li gp,1221
li ra,1
bnez ra,test_1221+0x1c
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
li t2,3
beq ra,t2,test_1221+0x30
j fail
li a3,111
auipc a0,0x7
lh a0,1464(a0) # fence_i_insn
auipc a1,0x7
lh a1,1458(a1) # fence_i_insn+0x2
nop
nop
auipc t0,0x7
sh a0,1444(t0) # fence_i_insn+0x4
auipc t0,0x7
sh a1,1438(t0) # fence_i_insn+0x6
fence.i
auipc a5,0x7
addi a5,a5,1424 # fence_i_insn+0x4
jalr a6,a5
test_1302:
li gp,1302
nop
li t2,444
beq a3,t2,test_1302+0x14
j fail
li a4,100
addi a4,a4,-1
bnez a4,test_1302+0x18
auipc t0,0x7
sh a0,1388(t0) # fence_i_insn+0xc
auipc t0,0x7
sh a1,1382(t0) # fence_i_insn+0xe
fence.i
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
auipc a5,0x7
addi a5,a5,1324 # fence_i_insn+0xc
jalr a6,a5
test_1303:
li gp,1303
nop
li t2,777
beq a3,t2,jal_test_2
j fail
jal_test_2:
li gp,1402
li ra,0
jal tp,jal_target_2
jal_linkaddr_2:
nop
nop
j fail
jal_target_2:
auipc sp,0x0
addi sp,sp,-12 # jal_linkaddr_2
beq sp,tp,test_1403
j fail
test_1403:
li gp,1403
li ra,1
j test_1403+0x1c
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
li t2,3
beq ra,t2,test_1502
j fail
test_1502:
li gp,1502
li a5,255
auipc ra,0x7
addi ra,ra,1216 # lh_tdat
lh a4,0(ra)
li t2,255
beq a4,t2,test_1503
j fail
test_1503:
li gp,1503
li a5,-256
auipc ra,0x7
addi ra,ra,1184 # lh_tdat
lh a4,2(ra)
li t2,-256
beq a4,t2,test_1504
j fail
test_1504:
li gp,1504
lui a5,0x1
addi a5,a5,-16 # _start-0x7ffff010
auipc ra,0x7
addi ra,ra,1148 # lh_tdat
lh a4,4(ra)
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
beq a4,t2,test_1505
j fail
test_1505:
li gp,1505
lui a5,0xfffff
addi a5,a5,15 # _end+0x7fff4f3f
auipc ra,0x7
addi ra,ra,1108 # lh_tdat
lh a4,6(ra)
lui t2,0xfffff
addi t2,t2,15 # _end+0x7fff4f3f
beq a4,t2,test_1506
j fail
test_1506:
li gp,1506
li a5,255
auipc ra,0x7
addi ra,ra,1078 # lh_tdat4
lh a4,-6(ra)
li t2,255
beq a4,t2,test_1507
j fail
test_1507:
li gp,1507
li a5,-256
auipc ra,0x7
addi ra,ra,1046 # lh_tdat4
lh a4,-4(ra)
li t2,-256
beq a4,t2,test_1508
j fail
test_1508:
li gp,1508
lui a5,0x1
addi a5,a5,-16 # _start-0x7ffff010
auipc ra,0x7
addi ra,ra,1010 # lh_tdat4
lh a4,-2(ra)
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
beq a4,t2,test_1509
j fail
test_1509:
li gp,1509
lui a5,0xfffff
addi a5,a5,15 # _end+0x7fff4f3f
auipc ra,0x7
addi ra,ra,970 # lh_tdat4
lh a4,0(ra)
lui t2,0xfffff
addi t2,t2,15 # _end+0x7fff4f3f
beq a4,t2,test_1510
j fail
test_1510:
li gp,1510
auipc ra,0x7
addi ra,ra,932 # lh_tdat
addi ra,ra,-32
lh t0,32(ra)
li t2,255
beq t0,t2,test_1511
j fail
test_1511:
li gp,1511
auipc ra,0x7
addi ra,ra,900 # lh_tdat
addi ra,ra,-5
lh t0,7(ra)
li t2,-256
beq t0,t2,test_1512
j fail
test_1512:
li gp,1512
li tp,0
auipc ra,0x7
addi ra,ra,866 # lh_tdat2
lh a4,2(ra)
mv t1,a4
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
beq t1,t2,test_1512+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1512+0x8
test_1513:
li gp,1513
li tp,0
auipc ra,0x7
addi ra,ra,816 # lh_tdat3
lh a4,2(ra)
nop
mv t1,a4
lui t2,0xfffff
addi t2,t2,15 # _end+0x7fff4f3f
beq t1,t2,test_1513+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1513+0x8
test_1514:
li gp,1514
li tp,0
auipc ra,0x7
addi ra,ra,756 # lh_tdat
lh a4,2(ra)
nop
nop
mv t1,a4
li t2,-256
beq t1,t2,test_1514+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1514+0x8
test_1515:
li gp,1515
li tp,0
auipc ra,0x7
addi ra,ra,702 # lh_tdat2
lh a4,2(ra)
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
beq a4,t2,test_1515+0x24
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1515+0x8
test_1516:
li gp,1516
li tp,0
auipc ra,0x7
addi ra,ra,656 # lh_tdat3
nop
lh a4,2(ra)
lui t2,0xfffff
addi t2,t2,15 # _end+0x7fff4f3f
beq a4,t2,test_1516+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1516+0x8
test_1517:
li gp,1517
li tp,0
auipc ra,0x7
addi ra,ra,600 # lh_tdat
nop
nop
lh a4,2(ra)
li t2,-256
beq a4,t2,test_1517+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1517+0x8
test_1518:
li gp,1518
auipc t0,0x7
addi t0,t0,552 # lh_tdat
lh sp,0(t0)
li sp,2
li t2,2
beq sp,t2,test_1519
j fail
test_1519:
li gp,1519
auipc t0,0x7
addi t0,t0,520 # lh_tdat
lh sp,0(t0)
nop
li sp,2
li t2,2
beq sp,t2,test_1602
j fail
test_1602:
li gp,1602
lui a5,0xff0
addi a5,a5,255 # _start-0x7f00ff01
auipc ra,0x7
addi ra,ra,492 # lw_tdat
lw a4,0(ra)
lui t2,0xff0
addi t2,t2,255 # _start-0x7f00ff01
beq a4,t2,test_1603
j fail
test_1603:
li gp,1603
lui a5,0xff010
addi a5,a5,-256 # _end+0x7f005e30
auipc ra,0x7
addi ra,ra,452 # lw_tdat
lw a4,4(ra)
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq a4,t2,test_1604
j fail
test_1604:
li gp,1604
lui a5,0xff01
addi a5,a5,-16 # _start-0x700ff010
auipc ra,0x7
addi ra,ra,412 # lw_tdat
lw a4,8(ra)
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1605
j fail
test_1605:
li gp,1605
lui a5,0xf00ff
addi a5,a5,15 # _end+0x700f4f3f
auipc ra,0x7
addi ra,ra,372 # lw_tdat
lw a4,12(ra)
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq a4,t2,test_1606
j fail
test_1606:
li gp,1606
lui a5,0xff0
addi a5,a5,255 # _start-0x7f00ff01
auipc ra,0x7
addi ra,ra,344 # lw_tdat4
lw a4,-12(ra)
lui t2,0xff0
addi t2,t2,255 # _start-0x7f00ff01
beq a4,t2,test_1607
j fail
test_1607:
li gp,1607
lui a5,0xff010
addi a5,a5,-256 # _end+0x7f005e30
auipc ra,0x7
addi ra,ra,304 # lw_tdat4
lw a4,-8(ra)
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq a4,t2,test_1608
j fail
test_1608:
li gp,1608
lui a5,0xff01
addi a5,a5,-16 # _start-0x700ff010
auipc ra,0x7
addi ra,ra,264 # lw_tdat4
lw a4,-4(ra)
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1609
j fail
test_1609:
li gp,1609
lui a5,0xf00ff
addi a5,a5,15 # _end+0x700f4f3f
auipc ra,0x7
addi ra,ra,224 # lw_tdat4
lw a4,0(ra)
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq a4,t2,test_1610
j fail
test_1610:
li gp,1610
auipc ra,0x7
addi ra,ra,180 # lw_tdat
addi ra,ra,-32
lw t0,32(ra)
lui t2,0xff0
addi t2,t2,255 # _start-0x7f00ff01
beq t0,t2,test_1611
j fail
test_1611:
li gp,1611
auipc ra,0x7
addi ra,ra,144 # lw_tdat
addi ra,ra,-3
lw t0,7(ra)
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq t0,t2,test_1612
j fail
test_1612:
li gp,1612
li tp,0
auipc ra,0x7
addi ra,ra,108 # lw_tdat2
lw a4,4(ra)
mv t1,a4
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq t1,t2,test_1612+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1612+0x8
test_1613:
li gp,1613
li tp,0
auipc ra,0x7
addi ra,ra,60 # lw_tdat3
lw a4,4(ra)
nop
mv t1,a4
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq t1,t2,test_1613+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1613+0x8
test_1614:
li gp,1614
li tp,0
auipc ra,0x7
addi ra,ra,-4 # lw_tdat
lw a4,4(ra)
nop
nop
mv t1,a4
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq t1,t2,test_1614+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1614+0x8
test_1615:
li gp,1615
li tp,0
auipc ra,0x7
addi ra,ra,-60 # lw_tdat2
lw a4,4(ra)
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1615+0x24
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1615+0x8
test_1616:
li gp,1616
li tp,0
auipc ra,0x7
addi ra,ra,-104 # lw_tdat3
nop
lw a4,4(ra)
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f4f3f
beq a4,t2,test_1616+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1616+0x8
test_1617:
li gp,1617
li tp,0
auipc ra,0x7
addi ra,ra,-164 # lw_tdat
nop
nop
lw a4,4(ra)
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq a4,t2,test_1617+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1617+0x8
test_1618:
li gp,1618
auipc t0,0x7
addi t0,t0,-216 # lw_tdat
lw sp,0(t0)
li sp,2
li t2,2
beq sp,t2,test_1619
j fail
test_1619:
li gp,1619
auipc t0,0x7
addi t0,t0,-248 # lw_tdat
lw sp,0(t0)
nop
li sp,2
li t2,2
beq sp,t2,test_1702
j fail
test_1702:
li gp,1702
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0f5e3f
beq a4,t2,test_1703
j fail
test_1703:
li gp,1703
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
or a4,ra,sp
lui t2,0xfff10
addi t2,t2,-16 # _end+0x7ff05f20
beq a4,t2,test_1704
j fail
test_1704:
li gp,1704
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
lui t2,0xfff1
addi t2,t2,-1 # _start-0x7000f001
beq a4,t2,test_1705
j fail
test_1705:
li gp,1705
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f4f3f
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
or a4,ra,sp
lui t2,0xf0fff
addi t2,t2,255 # _end+0x70ff502f
beq a4,t2,test_1706
j fail
test_1706:
li gp,1706
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or ra,ra,sp
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0f5e3f
beq ra,t2,test_1707
j fail
test_1707:
li gp,1707
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or sp,ra,sp
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0f5e3f
beq sp,t2,test_1708
j fail
test_1708:
li gp,1708
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
or ra,ra,ra
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq ra,t2,test_1709
j fail
test_1709:
li gp,1709
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1709+0x8
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0f5e3f
beq t1,t2,test_1710
j fail
test_1710:
li gp,1710
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
or a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1710+0x8
lui t2,0xfff10
addi t2,t2,-16 # _end+0x7ff05f20
beq t1,t2,test_1711
j fail
test_1711:
li gp,1711
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1711+0x8
lui t2,0xfff1
addi t2,t2,-1 # _start-0x7000f001
beq t1,t2,test_1712
j fail
test_1712:
li gp,1712
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1712+0x8
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0f5e3f
beq a4,t2,test_1713
j fail
test_1713:
li gp,1713
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
nop
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1713+0x8
lui t2,0xfff10
addi t2,t2,-16 # _end+0x7ff05f20
beq a4,t2,test_1714
j fail
test_1714:
li gp,1714
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
nop
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1714+0x8
lui t2,0xfff1
addi t2,t2,-1 # _start-0x7000f001
beq a4,t2,test_1715
j fail
test_1715:
li gp,1715
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
nop
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1715+0x8
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0f5e3f
beq a4,t2,test_1716
j fail
test_1716:
li gp,1716
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
nop
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1716+0x8
lui t2,0xfff10
addi t2,t2,-16 # _end+0x7ff05f20
beq a4,t2,test_1717
j fail
test_1717:
li gp,1717
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
nop
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1717+0x8
lui t2,0xfff1
addi t2,t2,-1 # _start-0x7000f001
beq a4,t2,test_1718
j fail
test_1718:
li gp,1718
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1718+0x8
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0f5e3f
beq a4,t2,test_1719
j fail
test_1719:
li gp,1719
li tp,0
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1719+0x8
lui t2,0xfff10
addi t2,t2,-16 # _end+0x7ff05f20
beq a4,t2,test_1720
j fail
test_1720:
li gp,1720
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
nop
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1720+0x8
lui t2,0xfff1
addi t2,t2,-1 # _start-0x7000f001
beq a4,t2,test_1721
j fail
test_1721:
li gp,1721
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1721+0x8
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0f5e3f
beq a4,t2,test_1722
j fail
test_1722:
li gp,1722
li tp,0
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f05020
nop
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1722+0x8
lui t2,0xfff10
addi t2,t2,-16 # _end+0x7ff05f20
beq a4,t2,test_1723
j fail
test_1723:
li gp,1723
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
nop
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1723+0x8
lui t2,0xfff1
addi t2,t2,-1 # _start-0x7000f001
beq a4,t2,test_1724
j fail
test_1724:
li gp,1724
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
or sp,zero,ra
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f005e30
beq sp,t2,test_1725
j fail
test_1725:
li gp,1725
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
or sp,ra,zero
lui t2,0xff0
addi t2,t2,255 # _start-0x7f00ff01
beq sp,t2,test_1726
j fail
test_1726:
li gp,1726
or ra,zero,zero
li t2,0
beq ra,t2,test_1727
j fail
test_1727:
li gp,1727
lui ra,0x11111
addi ra,ra,273 # _start-0x6eeeeeef
lui sp,0x22222
addi sp,sp,546 # _start-0x5dddddde
or zero,ra,sp
li t2,0
beq zero,t2,test_1802
j fail
test_1802:
li gp,1802
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
ori a4,ra,-241
li t2,-241
beq a4,t2,test_1803
j fail
test_1803:
li gp,1803
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
ori a4,ra,240
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1804
j fail
test_1804:
li gp,1804
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
ori a4,ra,1807
lui t2,0xff0
addi t2,t2,2047 # _start-0x7f00f801
beq a4,t2,test_1805
j fail
test_1805:
li gp,1805
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f4f3f
ori a4,ra,240
lui t2,0xf00ff
addi t2,t2,255 # _end+0x700f502f
beq a4,t2,test_1806
j fail
test_1806:
li gp,1806
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f005e30
ori ra,ra,240
lui t2,0xff010
addi t2,t2,-16 # _end+0x7f005f20
beq ra,t2,test_1807
j fail
test_1807:
li gp,1807
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
ori a4,ra,240
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1807+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq t1,t2,test_1808
j fail
test_1808:
li gp,1808
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
ori a4,ra,1807
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1808+0x8
lui t2,0xff0
addi t2,t2,2047 # _start-0x7f00f801
beq t1,t2,test_1809
j fail
test_1809:
li gp,1809
li tp,0
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f4f3f
ori a4,ra,240
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1809+0x8
lui t2,0xf00ff
addi t2,t2,255 # _end+0x700f502f
beq t1,t2,test_1810
j fail
test_1810:
li gp,1810
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
ori a4,ra,240
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1810+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1811
j fail
test_1811:
li gp,1811
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
ori a4,ra,-241
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1811+0x8
li t2,-1
beq a4,t2,test_1812
j fail
test_1812:
li gp,1812
li tp,0
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f4f3f
nop
nop
ori a4,ra,240
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1812+0x8
lui t2,0xf00ff
addi t2,t2,255 # _end+0x700f502f
beq a4,t2,test_1813
j fail
test_1813:
li gp,1813
ori ra,zero,240
li t2,240
beq ra,t2,test_1814
j fail
test_1814:
li gp,1814
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
ori zero,ra,1807
li t2,0
beq zero,t2,test_1902
j fail
test_1902:
li gp,1902
auipc ra,0x6
addi ra,ra,1984 # sh_tdat
li sp,170
auipc a5,0x0
addi a5,a5,20 # test_1902+0x24
sh sp,0(ra)
lh a4,0(ra)
j test_1902+0x28
mv a4,sp
li t2,170
beq a4,t2,test_1903
j fail
test_1903:
li gp,1903
auipc ra,0x6
addi ra,ra,1932 # sh_tdat
lui sp,0xffffb
addi sp,sp,-1536 # _end+0x7fff0930
auipc a5,0x0
addi a5,a5,20 # test_1903+0x28
sh sp,2(ra)
lh a4,2(ra)
j test_1903+0x2c
mv a4,sp
lui t2,0xffffb
addi t2,t2,-1536 # _end+0x7fff0930
beq a4,t2,test_1904
j fail
test_1904:
li gp,1904
auipc ra,0x6
addi ra,ra,1872 # sh_tdat
lui sp,0xbeef1
addi sp,sp,-1376 # _end+0x3eee69d0
auipc a5,0x0
addi a5,a5,20 # test_1904+0x28
sh sp,4(ra)
lw a4,4(ra)
j test_1904+0x2c
mv a4,sp
lui t2,0xbeef1
addi t2,t2,-1376 # _end+0x3eee69d0
beq a4,t2,test_1905
j fail
test_1905:
li gp,1905
auipc ra,0x6
addi ra,ra,1812 # sh_tdat
lui sp,0xffffa
addi sp,sp,10 # _end+0x7ffeff3a
auipc a5,0x0
addi a5,a5,20 # test_1905+0x28
sh sp,6(ra)
lh a4,6(ra)
j test_1905+0x2c
mv a4,sp
lui t2,0xffffa
addi t2,t2,10 # _end+0x7ffeff3a
beq a4,t2,test_1906
j fail
test_1906:
li gp,1906
auipc ra,0x6
addi ra,ra,1766 # sh_tdat8
li sp,170
auipc a5,0x0
addi a5,a5,20 # test_1906+0x24
sh sp,-6(ra)
lh a4,-6(ra)
j test_1906+0x28
mv a4,sp
li t2,170
beq a4,t2,test_1907
j fail
test_1907:
li gp,1907
auipc ra,0x6
addi ra,ra,1714 # sh_tdat8
lui sp,0xffffb
addi sp,sp,-1536 # _end+0x7fff0930
auipc a5,0x0
addi a5,a5,20 # test_1907+0x28
sh sp,-4(ra)
lh a4,-4(ra)
j test_1907+0x2c
mv a4,sp
lui t2,0xffffb
addi t2,t2,-1536 # _end+0x7fff0930
beq a4,t2,test_1908
j fail
test_1908:
li gp,1908
auipc ra,0x6
addi ra,ra,1654 # sh_tdat8
lui sp,0x1
addi sp,sp,-1376 # _start-0x7ffff560
auipc a5,0x0
addi a5,a5,20 # test_1908+0x28
sh sp,-2(ra)
lh a4,-2(ra)
j test_1908+0x2c
mv a4,sp
lui t2,0x1
addi t2,t2,-1376 # _start-0x7ffff560
beq a4,t2,test_1909
j fail
test_1909:
li gp,1909
auipc ra,0x6
addi ra,ra,1594 # sh_tdat8
lui sp,0xffffa
addi sp,sp,10 # _end+0x7ffeff3a
auipc a5,0x0
addi a5,a5,20 # test_1909+0x28
sh sp,0(ra)
lh a4,0(ra)
j test_1909+0x2c
mv a4,sp
lui t2,0xffffa
addi t2,t2,10 # _end+0x7ffeff3a
beq a4,t2,test_1910
j fail
test_1910:
li gp,1910
auipc ra,0x6
addi ra,ra,1536 # sh_tdat9
lui sp,0x12345
addi sp,sp,1656 # _start-0x6dcba988
addi tp,ra,-32
sh sp,32(tp) # _start-0x7fffffe0
lh t0,0(ra)
lui t2,0x5
addi t2,t2,1656 # _start-0x7fffa988
beq t0,t2,test_1911
j fail
test_1911:
li gp,1911
auipc ra,0x6
addi ra,ra,1488 # sh_tdat9
lui sp,0x3
addi sp,sp,152 # _start-0x7fffcf68
addi ra,ra,-5
sh sp,7(ra)
auipc tp,0x6
addi tp,tp,1466 # sh_tdat10
lh t0,0(tp) # _start-0x80000000
lui t2,0x3
addi t2,t2,152 # _start-0x7fffcf68
beq t0,t2,test_1912
j fail
test_1912:
li gp,1912
li tp,0
lui ra,0xffffd
addi ra,ra,-803 # _end+0x7fff2c0d
auipc sp,0x6
addi sp,sp,1404 # sh_tdat
sh ra,0(sp)
lh a4,0(sp)
lui t2,0xffffd
addi t2,t2,-803 # _end+0x7fff2c0d
beq a4,t2,test_1912+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1912+0x8
test_1913:
li gp,1913
li tp,0
lui ra,0xffffc
addi ra,ra,-819 # _end+0x7fff1bfd
auipc sp,0x6
addi sp,sp,1344 # sh_tdat
nop
sh ra,2(sp)
lh a4,2(sp)
lui t2,0xffffc
addi t2,t2,-819 # _end+0x7fff1bfd
beq a4,t2,test_1913+0x34
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1913+0x8
test_1914:
li gp,1914
li tp,0
lui ra,0xffffc
addi ra,ra,-1076 # _end+0x7fff1afc
auipc sp,0x6
addi sp,sp,1280 # sh_tdat
nop
nop
sh ra,4(sp)
lh a4,4(sp)
lui t2,0xffffc
addi t2,t2,-1076 # _end+0x7fff1afc
beq a4,t2,test_1914+0x38
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1914+0x8
test_1915:
li gp,1915
li tp,0
lui ra,0xffffb
addi ra,ra,-1092 # _end+0x7fff0aec
nop
auipc sp,0x6
addi sp,sp,1208 # sh_tdat
sh ra,6(sp)
lh a4,6(sp)
lui t2,0xffffb
addi t2,t2,-1092 # _end+0x7fff0aec
beq a4,t2,test_1915+0x34
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1915+0x8
test_1916:
li gp,1916
li tp,0
lui ra,0xffffb
addi ra,ra,-1349 # _end+0x7fff09eb
nop
auipc sp,0x6
addi sp,sp,1144 # sh_tdat
nop
sh ra,8(sp)
lh a4,8(sp)
lui t2,0xffffb
addi t2,t2,-1349 # _end+0x7fff09eb
beq a4,t2,test_1916+0x38
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1916+0x8
test_1917:
li gp,1917
li tp,0
lui ra,0xffffe
addi ra,ra,-1365 # _end+0x7fff39db
nop
nop
auipc sp,0x6
addi sp,sp,1072 # sh_tdat
sh ra,10(sp)
lh a4,10(sp)
lui t2,0xffffe
addi t2,t2,-1365 # _end+0x7fff39db
beq a4,t2,test_1917+0x38
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1917+0x8
test_1918:
li gp,1918
li tp,0
auipc sp,0x6
addi sp,sp,1020 # sh_tdat
lui ra,0x2
addi ra,ra,563 # _start-0x7fffddcd
sh ra,0(sp)
lh a4,0(sp)
lui t2,0x2
addi t2,t2,563 # _start-0x7fffddcd
beq a4,t2,test_1918+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1918+0x8
test_1919:
li gp,1919
li tp,0
auipc sp,0x6
addi sp,sp,960 # sh_tdat
lui ra,0x1
addi ra,ra,547 # _start-0x7fffeddd
nop
sh ra,2(sp)
lh a4,2(sp)
lui t2,0x1
addi t2,t2,547 # _start-0x7fffeddd
beq a4,t2,test_1919+0x34
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1919+0x8
test_1920:
li gp,1920
li tp,0
auipc sp,0x6
addi sp,sp,896 # sh_tdat
lui ra,0x1
addi ra,ra,290 # _start-0x7fffeede
nop
nop
sh ra,4(sp)
lh a4,4(sp)
lui t2,0x1
addi t2,t2,290 # _start-0x7fffeede
beq a4,t2,test_1920+0x38
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1920+0x8
test_1921:
li gp,1921
li tp,0
auipc sp,0x6
addi sp,sp,828 # sh_tdat
nop
li ra,274
sh ra,6(sp)
lh a4,6(sp)
li t2,274
beq a4,t2,test_1921+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1921+0x8
test_1922:
li gp,1922
li tp,0
auipc sp,0x6
addi sp,sp,772 # sh_tdat
nop
li ra,17
nop
sh ra,8(sp)
lh a4,8(sp)
li t2,17
beq a4,t2,test_1922+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1922+0x8
test_1923:
li gp,1923
li tp,0
auipc sp,0x6
addi sp,sp,712 # sh_tdat
nop
nop
lui ra,0x3
addi ra,ra,1 # _start-0x7fffcfff
sh ra,10(sp)
lh a4,10(sp)
lui t2,0x3
addi t2,t2,1 # _start-0x7fffcfff
beq a4,t2,test_1923+0x38
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1923+0x8
lui a0,0xc
addi a0,a0,-273 # _start-0x7fff4111
auipc a1,0x6
addi a1,a1,644 # sh_tdat
sh a0,6(a1)
test_2002:
li gp,2002
li ra,1
li sp,0
sll a4,ra,sp
li t2,1
beq a4,t2,test_2003
j fail
test_2003:
li gp,2003
li ra,1
li sp,1
sll a4,ra,sp
li t2,2
beq a4,t2,test_2004
j fail
test_2004:
li gp,2004
li ra,1
li sp,7
sll a4,ra,sp
li t2,128
beq a4,t2,test_2005
j fail
test_2005:
li gp,2005
li ra,1
li sp,14
sll a4,ra,sp
lui t2,0x4
beq a4,t2,test_2006
j fail
test_2006:
li gp,2006
li ra,1
li sp,31
sll a4,ra,sp
lui t2,0x80000
beq a4,t2,test_2007
j fail
test_2007:
li gp,2007
li ra,-1
li sp,0
sll a4,ra,sp
li t2,-1
beq a4,t2,test_2008
j fail
test_2008:
li gp,2008
li ra,-1
li sp,1
sll a4,ra,sp
li t2,-2
beq a4,t2,test_2009
j fail
test_2009:
li gp,2009
li ra,-1
li sp,7
sll a4,ra,sp
li t2,-128
beq a4,t2,test_2010
j fail
test_2010:
li gp,2010
li ra,-1
li sp,14
sll a4,ra,sp
lui t2,0xffffc
beq a4,t2,test_2011
j fail
test_2011:
li gp,2011
li ra,-1
li sp,31
sll a4,ra,sp
lui t2,0x80000
beq a4,t2,test_2012
j fail
test_2012:
li gp,2012
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,0
sll a4,ra,sp
lui t2,0x21212
addi t2,t2,289 # _start-0x5edededf
beq a4,t2,test_2013
j fail
test_2013:
li gp,2013
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,1
sll a4,ra,sp
lui t2,0x42424
addi t2,t2,578 # _start-0x3dbdbdbe
beq a4,t2,test_2014
j fail
test_2014:
li gp,2014
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,7
sll a4,ra,sp
lui t2,0x90909
addi t2,t2,128 # _end+0x108fefb0
beq a4,t2,test_2015
j fail
test_2015:
li gp,2015
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,14
sll a4,ra,sp
lui t2,0x48484
beq a4,t2,test_2016
j fail
test_2016:
li gp,2016
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,31
sll a4,ra,sp
lui t2,0x80000
beq a4,t2,test_2017
j fail
test_2017:
li gp,2017
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-64
sll a4,ra,sp
lui t2,0x21212
addi t2,t2,289 # _start-0x5edededf
beq a4,t2,test_2018
j fail
test_2018:
li gp,2018
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-63
sll a4,ra,sp
lui t2,0x42424
addi t2,t2,578 # _start-0x3dbdbdbe
beq a4,t2,test_2019
j fail
test_2019:
li gp,2019
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-57
sll a4,ra,sp
lui t2,0x90909
addi t2,t2,128 # _end+0x108fefb0
beq a4,t2,test_2020
j fail
test_2020:
li gp,2020
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-50
sll a4,ra,sp
lui t2,0x48484
beq a4,t2,test_2022
j fail
test_2022:
li gp,2022
li ra,1
li sp,7
sll ra,ra,sp
li t2,128
beq ra,t2,test_2023
j fail
test_2023:
li gp,2023
li ra,1
li sp,14
sll sp,ra,sp
lui t2,0x4
beq sp,t2,test_2024
j fail
test_2024:
li gp,2024
li ra,3
sll ra,ra,ra
li t2,24
beq ra,t2,test_2025
j fail
test_2025:
li gp,2025
li tp,0
li ra,1
li sp,7
sll a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2025+0x8
li t2,128
beq t1,t2,test_2026
j fail
test_2026:
li gp,2026
li tp,0
li ra,1
li sp,14
sll a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2026+0x8
lui t2,0x4
beq t1,t2,test_2027
j fail
test_2027:
li gp,2027
li tp,0
li ra,1
li sp,31
sll a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2027+0x8
lui t2,0x80000
beq t1,t2,test_2028
j fail
test_2028:
li gp,2028
li tp,0
li ra,1
li sp,7
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2028+0x8
li t2,128
beq a4,t2,test_2029
j fail
test_2029:
li gp,2029
li tp,0
li ra,1
li sp,14
nop
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2029+0x8
lui t2,0x4
beq a4,t2,test_2030
j fail
test_2030:
li gp,2030
li tp,0
li ra,1
li sp,31
nop
nop
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2030+0x8
lui t2,0x80000
beq a4,t2,test_2031
j fail
test_2031:
li gp,2031
li tp,0
li ra,1
nop
li sp,7
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2031+0x8
li t2,128
beq a4,t2,test_2032
j fail
test_2032:
li gp,2032
li tp,0
li ra,1
nop
li sp,14
nop
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2032+0x8
lui t2,0x4
beq a4,t2,test_2033
j fail
test_2033:
li gp,2033
li tp,0
li ra,1
nop
nop
li sp,31
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2033+0x8
lui t2,0x80000
beq a4,t2,test_2034
j fail
test_2034:
li gp,2034
li tp,0
li sp,7
li ra,1
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2034+0x8
li t2,128
beq a4,t2,test_2035
j fail
test_2035:
li gp,2035
li tp,0
li sp,14
li ra,1
nop
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2035+0x8
lui t2,0x4
beq a4,t2,test_2036
j fail
test_2036:
li gp,2036
li tp,0
li sp,31
li ra,1
nop
nop
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2036+0x8
lui t2,0x80000
beq a4,t2,test_2037
j fail
test_2037:
li gp,2037
li tp,0
li sp,7
nop
li ra,1
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2037+0x8
li t2,128
beq a4,t2,test_2038
j fail
test_2038:
li gp,2038
li tp,0
li sp,14
nop
li ra,1
nop
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2038+0x8
lui t2,0x4
beq a4,t2,test_2039
j fail
test_2039:
li gp,2039
li tp,0
li sp,31
nop
nop
li ra,1
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2039+0x8
lui t2,0x80000
beq a4,t2,test_2040
j fail
test_2040:
li gp,2040
li ra,15
sll sp,zero,ra
li t2,0
beq sp,t2,test_2041
j fail
test_2041:
li gp,2041
li ra,32
sll sp,ra,zero
li t2,32
beq sp,t2,test_2042
j fail
test_2042:
li gp,2042
sll ra,zero,zero
li t2,0
beq ra,t2,test_2043
j fail
test_2043:
li gp,2043
li ra,1024
lui sp,0x1
addi sp,sp,-2048 # _start-0x7ffff800
sll zero,ra,sp
li t2,0
beq zero,t2,test_2102
j fail
test_2102:
lui gp,0x1
addi gp,gp,-1994 # _start-0x7ffff7ca
li ra,1
slli a4,ra,0x0
li t2,1
beq a4,t2,test_2103
j fail
test_2103:
lui gp,0x1
addi gp,gp,-1993 # _start-0x7ffff7c9
li ra,1
slli a4,ra,0x1
li t2,2
beq a4,t2,test_2104
j fail
test_2104:
lui gp,0x1
addi gp,gp,-1992 # _start-0x7ffff7c8
li ra,1
slli a4,ra,0x7
li t2,128
beq a4,t2,test_2105
j fail
test_2105:
lui gp,0x1
addi gp,gp,-1991 # _start-0x7ffff7c7
li ra,1
slli a4,ra,0xe
lui t2,0x4
beq a4,t2,test_2106
j fail
test_2106:
lui gp,0x1
addi gp,gp,-1990 # _start-0x7ffff7c6
li ra,1
slli a4,ra,0x1f
lui t2,0x80000
beq a4,t2,test_2107
j fail
test_2107:
lui gp,0x1
addi gp,gp,-1989 # _start-0x7ffff7c5
li ra,-1
slli a4,ra,0x0
li t2,-1
beq a4,t2,test_2108
j fail
test_2108:
lui gp,0x1
addi gp,gp,-1988 # _start-0x7ffff7c4
li ra,-1
slli a4,ra,0x1
li t2,-2
beq a4,t2,test_2109
j fail
test_2109:
lui gp,0x1
addi gp,gp,-1987 # _start-0x7ffff7c3
li ra,-1
slli a4,ra,0x7
li t2,-128
beq a4,t2,test_2110
j fail
test_2110:
lui gp,0x1
addi gp,gp,-1986 # _start-0x7ffff7c2
li ra,-1
slli a4,ra,0xe
lui t2,0xffffc
beq a4,t2,test_2111
j fail
test_2111:
lui gp,0x1
addi gp,gp,-1985 # _start-0x7ffff7c1
li ra,-1
slli a4,ra,0x1f
lui t2,0x80000
beq a4,t2,test_2112
j fail
test_2112:
lui gp,0x1
addi gp,gp,-1984 # _start-0x7ffff7c0
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
slli a4,ra,0x0
lui t2,0x21212
addi t2,t2,289 # _start-0x5edededf
beq a4,t2,test_2113
j fail
test_2113:
lui gp,0x1
addi gp,gp,-1983 # _start-0x7ffff7bf
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
slli a4,ra,0x1
lui t2,0x42424
addi t2,t2,578 # _start-0x3dbdbdbe
beq a4,t2,test_2114
j fail
test_2114:
lui gp,0x1
addi gp,gp,-1982 # _start-0x7ffff7be
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
slli a4,ra,0x7
lui t2,0x90909
addi t2,t2,128 # _end+0x108fefb0
beq a4,t2,test_2115
j fail
test_2115:
lui gp,0x1
addi gp,gp,-1981 # _start-0x7ffff7bd
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
slli a4,ra,0xe
lui t2,0x48484
beq a4,t2,test_2116
j fail
test_2116:
lui gp,0x1
addi gp,gp,-1980 # _start-0x7ffff7bc
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
slli a4,ra,0x1f
lui t2,0x80000
beq a4,t2,test_2117
j fail
test_2117:
lui gp,0x1
addi gp,gp,-1979 # _start-0x7ffff7bb
li ra,1
slli ra,ra,0x7
li t2,128
beq ra,t2,test_2118
j fail
test_2118:
lui gp,0x1
addi gp,gp,-1978 # _start-0x7ffff7ba
li tp,0
li ra,1
slli a4,ra,0x7
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2118+0xc
li t2,128
beq t1,t2,test_2119
j fail
test_2119:
lui gp,0x1
addi gp,gp,-1977 # _start-0x7ffff7b9
li tp,0
li ra,1
slli a4,ra,0xe
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2119+0xc
lui t2,0x4
beq t1,t2,test_2120
j fail
test_2120:
lui gp,0x1
addi gp,gp,-1976 # _start-0x7ffff7b8
li tp,0
li ra,1
slli a4,ra,0x1f
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2120+0xc
lui t2,0x80000
beq t1,t2,test_2121
j fail
test_2121:
lui gp,0x1
addi gp,gp,-1975 # _start-0x7ffff7b7
li tp,0
li ra,1
slli a4,ra,0x7
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2121+0xc
li t2,128
beq a4,t2,test_2122
j fail
test_2122:
lui gp,0x1
addi gp,gp,-1974 # _start-0x7ffff7b6
li tp,0
li ra,1
nop
slli a4,ra,0xe
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2122+0xc
lui t2,0x4
beq a4,t2,test_2123
j fail
test_2123:
lui gp,0x1
addi gp,gp,-1973 # _start-0x7ffff7b5
li tp,0
li ra,1
nop
nop
slli a4,ra,0x1f
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2123+0xc
lui t2,0x80000
beq a4,t2,test_2124
j fail
test_2124:
lui gp,0x1
addi gp,gp,-1972 # _start-0x7ffff7b4
slli ra,zero,0x1f
li t2,0
beq ra,t2,test_2125
j fail
test_2125:
lui gp,0x1
addi gp,gp,-1971 # _start-0x7ffff7b3
li ra,33
slli zero,ra,0x14
li t2,0
beq zero,t2,test_2202
j fail
test_2202:
lui gp,0x1
addi gp,gp,-1894 # _start-0x7ffff766
li ra,0
li sp,0
slt a4,ra,sp
li t2,0
beq a4,t2,test_2203
j fail
test_2203:
lui gp,0x1
addi gp,gp,-1893 # _start-0x7ffff765
li ra,1
li sp,1
slt a4,ra,sp
li t2,0
beq a4,t2,test_2204
j fail
test_2204:
lui gp,0x1
addi gp,gp,-1892 # _start-0x7ffff764
li ra,3
li sp,7
slt a4,ra,sp
li t2,1
beq a4,t2,test_2205
j fail
test_2205:
lui gp,0x1
addi gp,gp,-1891 # _start-0x7ffff763
li ra,7
li sp,3
slt a4,ra,sp
li t2,0
beq a4,t2,test_2206
j fail
test_2206:
lui gp,0x1
addi gp,gp,-1890 # _start-0x7ffff762
li ra,0
lui sp,0xffff8
slt a4,ra,sp
li t2,0
beq a4,t2,test_2207
j fail
test_2207:
lui gp,0x1
addi gp,gp,-1889 # _start-0x7ffff761
lui ra,0x80000
li sp,0
slt a4,ra,sp
li t2,1
beq a4,t2,test_2208
j fail
test_2208:
lui gp,0x1
addi gp,gp,-1888 # _start-0x7ffff760
lui ra,0x80000
lui sp,0xffff8
slt a4,ra,sp
li t2,1
beq a4,t2,test_2209
j fail
test_2209:
lui gp,0x1
addi gp,gp,-1887 # _start-0x7ffff75f
li ra,0
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
slt a4,ra,sp
li t2,1
beq a4,t2,test_2210
j fail
test_2210:
lui gp,0x1
addi gp,gp,-1886 # _start-0x7ffff75e
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
li sp,0
slt a4,ra,sp
li t2,0
beq a4,t2,test_2211
j fail
test_2211:
lui gp,0x1
addi gp,gp,-1885 # _start-0x7ffff75d
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
slt a4,ra,sp
li t2,0
beq a4,t2,test_2212
j fail
test_2212:
lui gp,0x1
addi gp,gp,-1884 # _start-0x7ffff75c
lui ra,0x80000
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
slt a4,ra,sp
li t2,1
beq a4,t2,test_2213
j fail
test_2213:
lui gp,0x1
addi gp,gp,-1883 # _start-0x7ffff75b
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
lui sp,0xffff8
slt a4,ra,sp
li t2,0
beq a4,t2,test_2214
j fail
test_2214:
lui gp,0x1
addi gp,gp,-1882 # _start-0x7ffff75a
li ra,0
li sp,-1
slt a4,ra,sp
li t2,0
beq a4,t2,test_2215
j fail
test_2215:
lui gp,0x1
addi gp,gp,-1881 # _start-0x7ffff759
li ra,-1
li sp,1
slt a4,ra,sp
li t2,1
beq a4,t2,test_2216
j fail
test_2216:
lui gp,0x1
addi gp,gp,-1880 # _start-0x7ffff758
li ra,-1
li sp,-1
slt a4,ra,sp
li t2,0
beq a4,t2,test_2217
j fail
test_2217:
lui gp,0x1
addi gp,gp,-1879 # _start-0x7ffff757
li ra,14
li sp,13
slt ra,ra,sp
li t2,0
beq ra,t2,test_2218
j fail
test_2218:
lui gp,0x1
addi gp,gp,-1878 # _start-0x7ffff756
li ra,11
li sp,13
slt sp,ra,sp
li t2,1
beq sp,t2,test_2219
j fail
test_2219:
lui gp,0x1
addi gp,gp,-1877 # _start-0x7ffff755
li ra,13
slt ra,ra,ra
li t2,0
beq ra,t2,test_2220
j fail
test_2220:
lui gp,0x1
addi gp,gp,-1876 # _start-0x7ffff754
li tp,0
li ra,11
li sp,13
slt a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2220+0xc
li t2,1
beq t1,t2,test_2221
j fail
test_2221:
lui gp,0x1
addi gp,gp,-1875 # _start-0x7ffff753
li tp,0
li ra,14
li sp,13
slt a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2221+0xc
li t2,0
beq t1,t2,test_2222
j fail
test_2222:
lui gp,0x1
addi gp,gp,-1874 # _start-0x7ffff752
li tp,0
li ra,12
li sp,13
slt a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2222+0xc
li t2,1
beq t1,t2,test_2223
j fail
test_2223:
lui gp,0x1
addi gp,gp,-1873 # _start-0x7ffff751
li tp,0
li ra,14
li sp,13
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2223+0xc
li t2,0
beq a4,t2,test_2224
j fail
test_2224:
lui gp,0x1
addi gp,gp,-1872 # _start-0x7ffff750
li tp,0
li ra,11
li sp,13
nop
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2224+0xc
li t2,1
beq a4,t2,test_2225
j fail
test_2225:
lui gp,0x1
addi gp,gp,-1871 # _start-0x7ffff74f
li tp,0
li ra,15
li sp,13
nop
nop
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2225+0xc
li t2,0
beq a4,t2,test_2226
j fail
test_2226:
lui gp,0x1
addi gp,gp,-1870 # _start-0x7ffff74e
li tp,0
li ra,10
nop
li sp,13
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2226+0xc
li t2,1
beq a4,t2,test_2227
j fail
test_2227:
lui gp,0x1
addi gp,gp,-1869 # _start-0x7ffff74d
li tp,0
li ra,16
nop
li sp,13
nop
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2227+0xc
li t2,0
beq a4,t2,test_2228
j fail
test_2228:
lui gp,0x1
addi gp,gp,-1868 # _start-0x7ffff74c
li tp,0
li ra,9
nop
nop
li sp,13
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2228+0xc
li t2,1
beq a4,t2,test_2229
j fail
test_2229:
lui gp,0x1
addi gp,gp,-1867 # _start-0x7ffff74b
li tp,0
li sp,13
li ra,17
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2229+0xc
li t2,0
beq a4,t2,test_2230
j fail
test_2230:
lui gp,0x1
addi gp,gp,-1866 # _start-0x7ffff74a
li tp,0
li sp,13
li ra,8
nop
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2230+0xc
li t2,1
beq a4,t2,test_2231
j fail
test_2231:
lui gp,0x1
addi gp,gp,-1865 # _start-0x7ffff749
li tp,0
li sp,13
li ra,18
nop
nop
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2231+0xc
li t2,0
beq a4,t2,test_2232
j fail
test_2232:
lui gp,0x1
addi gp,gp,-1864 # _start-0x7ffff748
li tp,0
li sp,13
nop
li ra,7
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2232+0xc
li t2,1
beq a4,t2,test_2233
j fail
test_2233:
lui gp,0x1
addi gp,gp,-1863 # _start-0x7ffff747
li tp,0
li sp,13
nop
li ra,19
nop
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2233+0xc
li t2,0
beq a4,t2,test_2234
j fail
test_2234:
lui gp,0x1
addi gp,gp,-1862 # _start-0x7ffff746
li tp,0
li sp,13
nop
nop
li ra,6
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2234+0xc
li t2,1
beq a4,t2,test_2235
j fail
test_2235:
lui gp,0x1
addi gp,gp,-1861 # _start-0x7ffff745
li ra,-1
sgtz sp,ra
li t2,0
beq sp,t2,test_2236
j fail
test_2236:
lui gp,0x1
addi gp,gp,-1860 # _start-0x7ffff744
li ra,-1
sltz sp,ra
li t2,1
beq sp,t2,test_2237
j fail
test_2237:
lui gp,0x1
addi gp,gp,-1859 # _start-0x7ffff743
sltz ra,zero
li t2,0
beq ra,t2,test_2238
j fail
test_2238:
lui gp,0x1
addi gp,gp,-1858 # _start-0x7ffff742
li ra,16
li sp,30
slt zero,ra,sp
li t2,0
beq zero,t2,test_2302
j fail
test_2302:
lui gp,0x1
addi gp,gp,-1794 # _start-0x7ffff702
li ra,0
slti a4,ra,0
li t2,0
beq a4,t2,test_2303
j fail
test_2303:
lui gp,0x1
addi gp,gp,-1793 # _start-0x7ffff701
li ra,1
slti a4,ra,1
li t2,0
beq a4,t2,test_2304
j fail
test_2304:
lui gp,0x1
addi gp,gp,-1792 # _start-0x7ffff700
li ra,3
slti a4,ra,7
li t2,1
beq a4,t2,test_2305
j fail
test_2305:
lui gp,0x1
addi gp,gp,-1791 # _start-0x7ffff6ff
li ra,7
slti a4,ra,3
li t2,0
beq a4,t2,test_2306
j fail
test_2306:
lui gp,0x1
addi gp,gp,-1790 # _start-0x7ffff6fe
li ra,0
slti a4,ra,-2048
li t2,0
beq a4,t2,test_2307
j fail
test_2307:
lui gp,0x1
addi gp,gp,-1789 # _start-0x7ffff6fd
lui ra,0x80000
slti a4,ra,0
li t2,1
beq a4,t2,test_2308
j fail
test_2308:
lui gp,0x1
addi gp,gp,-1788 # _start-0x7ffff6fc
lui ra,0x80000
slti a4,ra,-2048
li t2,1
beq a4,t2,test_2309
j fail
test_2309:
lui gp,0x1
addi gp,gp,-1787 # _start-0x7ffff6fb
li ra,0
slti a4,ra,2047
li t2,1
beq a4,t2,test_2310
j fail
test_2310:
lui gp,0x1
addi gp,gp,-1786 # _start-0x7ffff6fa
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
slti a4,ra,0
li t2,0
beq a4,t2,test_2311
j fail
test_2311:
lui gp,0x1
addi gp,gp,-1785 # _start-0x7ffff6f9
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
slti a4,ra,2047
li t2,0
beq a4,t2,test_2312
j fail
test_2312:
lui gp,0x1
addi gp,gp,-1784 # _start-0x7ffff6f8
lui ra,0x80000
slti a4,ra,2047
li t2,1
beq a4,t2,test_2313
j fail
test_2313:
lui gp,0x1
addi gp,gp,-1783 # _start-0x7ffff6f7
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
slti a4,ra,-2048
li t2,0
beq a4,t2,test_2314
j fail
test_2314:
lui gp,0x1
addi gp,gp,-1782 # _start-0x7ffff6f6
li ra,0
slti a4,ra,-1
li t2,0
beq a4,t2,test_2315
j fail
test_2315:
lui gp,0x1
addi gp,gp,-1781 # _start-0x7ffff6f5
li ra,-1
slti a4,ra,1
li t2,1
beq a4,t2,test_2316
j fail
test_2316:
lui gp,0x1
addi gp,gp,-1780 # _start-0x7ffff6f4
li ra,-1
slti a4,ra,-1
li t2,0
beq a4,t2,test_2317
j fail
test_2317:
lui gp,0x1
addi gp,gp,-1779 # _start-0x7ffff6f3
li ra,11
slti ra,ra,13
li t2,1
beq ra,t2,test_2318
j fail
test_2318:
lui gp,0x1
addi gp,gp,-1778 # _start-0x7ffff6f2
li tp,0
li ra,15
slti a4,ra,10
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2318+0xc
li t2,0
beq t1,t2,test_2319
j fail
test_2319:
lui gp,0x1
addi gp,gp,-1777 # _start-0x7ffff6f1
li tp,0
li ra,10
slti a4,ra,16
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2319+0xc
li t2,1
beq t1,t2,test_2320
j fail
test_2320:
lui gp,0x1
addi gp,gp,-1776 # _start-0x7ffff6f0
li tp,0
li ra,16
slti a4,ra,9
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2320+0xc
li t2,0
beq t1,t2,test_2321
j fail
test_2321:
lui gp,0x1
addi gp,gp,-1775 # _start-0x7ffff6ef
li tp,0
li ra,11
slti a4,ra,15
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2321+0xc
li t2,1
beq a4,t2,test_2322
j fail
test_2322:
lui gp,0x1
addi gp,gp,-1774 # _start-0x7ffff6ee
li tp,0
li ra,17
nop
slti a4,ra,8
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2322+0xc
li t2,0
beq a4,t2,test_2323
j fail
test_2323:
lui gp,0x1
addi gp,gp,-1773 # _start-0x7ffff6ed
li tp,0
li ra,12
nop
nop
slti a4,ra,14
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2323+0xc
li t2,1
beq a4,t2,test_2324
j fail
test_2324:
lui gp,0x1
addi gp,gp,-1772 # _start-0x7ffff6ec
slti ra,zero,-1
li t2,0
beq ra,t2,test_2325
j fail
test_2325:
lui gp,0x1
addi gp,gp,-1771 # _start-0x7ffff6eb
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
slti zero,ra,-1
li t2,0
beq zero,t2,test_2402
j fail
test_2402:
lui gp,0x1
addi gp,gp,-1694 # _start-0x7ffff69e
li ra,0
li sp,0
sltu a4,ra,sp
li t2,0
beq a4,t2,test_2403
j fail
test_2403:
lui gp,0x1
addi gp,gp,-1693 # _start-0x7ffff69d
li ra,1
li sp,1
sltu a4,ra,sp
li t2,0
beq a4,t2,test_2404
j fail
test_2404:
lui gp,0x1
addi gp,gp,-1692 # _start-0x7ffff69c
li ra,3
li sp,7
sltu a4,ra,sp
li t2,1
beq a4,t2,test_2405
j fail
test_2405:
lui gp,0x1
addi gp,gp,-1691 # _start-0x7ffff69b
li ra,7
li sp,3
sltu a4,ra,sp
li t2,0
beq a4,t2,test_2406
j fail
test_2406:
lui gp,0x1
addi gp,gp,-1690 # _start-0x7ffff69a
li ra,0
lui sp,0xffff8
sltu a4,ra,sp
li t2,1
beq a4,t2,test_2407
j fail
test_2407:
lui gp,0x1
addi gp,gp,-1689 # _start-0x7ffff699
lui ra,0x80000
li sp,0
sltu a4,ra,sp
li t2,0
beq a4,t2,test_2408
j fail
test_2408:
lui gp,0x1
addi gp,gp,-1688 # _start-0x7ffff698
lui ra,0x80000
lui sp,0xffff8
sltu a4,ra,sp
li t2,1
beq a4,t2,test_2409
j fail
test_2409:
lui gp,0x1
addi gp,gp,-1687 # _start-0x7ffff697
li ra,0
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
sltu a4,ra,sp
li t2,1
beq a4,t2,test_2410
j fail
test_2410:
lui gp,0x1
addi gp,gp,-1686 # _start-0x7ffff696
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
li sp,0
sltu a4,ra,sp
li t2,0
beq a4,t2,test_2411
j fail
test_2411:
lui gp,0x1
addi gp,gp,-1685 # _start-0x7ffff695
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
sltu a4,ra,sp
li t2,0
beq a4,t2,test_2412
j fail
test_2412:
lui gp,0x1
addi gp,gp,-1684 # _start-0x7ffff694
lui ra,0x80000
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
sltu a4,ra,sp
li t2,0
beq a4,t2,test_2413
j fail
test_2413:
lui gp,0x1
addi gp,gp,-1683 # _start-0x7ffff693
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
lui sp,0xffff8
sltu a4,ra,sp
li t2,1
beq a4,t2,test_2414
j fail
test_2414:
lui gp,0x1
addi gp,gp,-1682 # _start-0x7ffff692
li ra,0
li sp,-1
sltu a4,ra,sp
li t2,1
beq a4,t2,test_2415
j fail
test_2415:
lui gp,0x1
addi gp,gp,-1681 # _start-0x7ffff691
li ra,-1
li sp,1
sltu a4,ra,sp
li t2,0
beq a4,t2,test_2416
j fail
test_2416:
lui gp,0x1
addi gp,gp,-1680 # _start-0x7ffff690
li ra,-1
li sp,-1
sltu a4,ra,sp
li t2,0
beq a4,t2,test_2417
j fail
test_2417:
lui gp,0x1
addi gp,gp,-1679 # _start-0x7ffff68f
li ra,14
li sp,13
sltu ra,ra,sp
li t2,0
beq ra,t2,test_2418
j fail
test_2418:
lui gp,0x1
addi gp,gp,-1678 # _start-0x7ffff68e
li ra,11
li sp,13
sltu sp,ra,sp
li t2,1
beq sp,t2,test_2419
j fail
test_2419:
lui gp,0x1
addi gp,gp,-1677 # _start-0x7ffff68d
li ra,13
sltu ra,ra,ra
li t2,0
beq ra,t2,test_2420
j fail
test_2420:
lui gp,0x1
addi gp,gp,-1676 # _start-0x7ffff68c
li tp,0
li ra,11
li sp,13
sltu a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2420+0xc
li t2,1
beq t1,t2,test_2421
j fail
test_2421:
lui gp,0x1
addi gp,gp,-1675 # _start-0x7ffff68b
li tp,0
li ra,14
li sp,13
sltu a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2421+0xc
li t2,0
beq t1,t2,test_2422
j fail
test_2422:
lui gp,0x1
addi gp,gp,-1674 # _start-0x7ffff68a
li tp,0
li ra,12
li sp,13
sltu a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2422+0xc
li t2,1
beq t1,t2,test_2423
j fail
test_2423:
lui gp,0x1
addi gp,gp,-1673 # _start-0x7ffff689
li tp,0
li ra,14
li sp,13
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2423+0xc
li t2,0
beq a4,t2,test_2424
j fail
test_2424:
lui gp,0x1
addi gp,gp,-1672 # _start-0x7ffff688
li tp,0
li ra,11
li sp,13
nop
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2424+0xc
li t2,1
beq a4,t2,test_2425
j fail
test_2425:
lui gp,0x1
addi gp,gp,-1671 # _start-0x7ffff687
li tp,0
li ra,15
li sp,13
nop
nop
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2425+0xc
li t2,0
beq a4,t2,test_2426
j fail
test_2426:
lui gp,0x1
addi gp,gp,-1670 # _start-0x7ffff686
li tp,0
li ra,10
nop
li sp,13
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2426+0xc
li t2,1
beq a4,t2,test_2427
j fail
test_2427:
lui gp,0x1
addi gp,gp,-1669 # _start-0x7ffff685
li tp,0
li ra,16
nop
li sp,13
nop
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2427+0xc
li t2,0
beq a4,t2,test_2428
j fail
test_2428:
lui gp,0x1
addi gp,gp,-1668 # _start-0x7ffff684
li tp,0
li ra,9
nop
nop
li sp,13
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2428+0xc
li t2,1
beq a4,t2,test_2429
j fail
test_2429:
lui gp,0x1
addi gp,gp,-1667 # _start-0x7ffff683
li tp,0
li sp,13
li ra,17
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2429+0xc
li t2,0
beq a4,t2,test_2430
j fail
test_2430:
lui gp,0x1
addi gp,gp,-1666 # _start-0x7ffff682
li tp,0
li sp,13
li ra,8
nop
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2430+0xc
li t2,1
beq a4,t2,test_2431
j fail
test_2431:
lui gp,0x1
addi gp,gp,-1665 # _start-0x7ffff681
li tp,0
li sp,13
li ra,18
nop
nop
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2431+0xc
li t2,0
beq a4,t2,test_2432
j fail
test_2432:
lui gp,0x1
addi gp,gp,-1664 # _start-0x7ffff680
li tp,0
li sp,13
nop
li ra,7
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2432+0xc
li t2,1
beq a4,t2,test_2433
j fail
test_2433:
lui gp,0x1
addi gp,gp,-1663 # _start-0x7ffff67f
li tp,0
li sp,13
nop
li ra,19
nop
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2433+0xc
li t2,0
beq a4,t2,test_2434
j fail
test_2434:
lui gp,0x1
addi gp,gp,-1662 # _start-0x7ffff67e
li tp,0
li sp,13
nop
nop
li ra,6
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2434+0xc
li t2,1
beq a4,t2,test_2435
j fail
test_2435:
lui gp,0x1
addi gp,gp,-1661 # _start-0x7ffff67d
li ra,-1
snez sp,ra
li t2,1
beq sp,t2,test_2436
j fail
test_2436:
lui gp,0x1
addi gp,gp,-1660 # _start-0x7ffff67c
li ra,-1
sltu sp,ra,zero
li t2,0
beq sp,t2,test_2437
j fail
test_2437:
lui gp,0x1
addi gp,gp,-1659 # _start-0x7ffff67b
snez ra,zero
li t2,0
beq ra,t2,test_2438
j fail
test_2438:
lui gp,0x1
addi gp,gp,-1658 # _start-0x7ffff67a
li ra,16
li sp,30
sltu zero,ra,sp
li t2,0
beq zero,t2,test_2502
j fail
test_2502:
lui gp,0x1
addi gp,gp,-1594 # _start-0x7ffff63a
li ra,0
sltiu a4,ra,0
li t2,0
beq a4,t2,test_2503
j fail
test_2503:
lui gp,0x1
addi gp,gp,-1593 # _start-0x7ffff639
li ra,1
seqz a4,ra
li t2,0
beq a4,t2,test_2504
j fail
test_2504:
lui gp,0x1
addi gp,gp,-1592 # _start-0x7ffff638
li ra,3
sltiu a4,ra,7
li t2,1
beq a4,t2,test_2505
j fail
test_2505:
lui gp,0x1
addi gp,gp,-1591 # _start-0x7ffff637
li ra,7
sltiu a4,ra,3
li t2,0
beq a4,t2,test_2506
j fail
test_2506:
lui gp,0x1
addi gp,gp,-1590 # _start-0x7ffff636
li ra,0
sltiu a4,ra,-2048
li t2,1
beq a4,t2,test_2507
j fail
test_2507:
lui gp,0x1
addi gp,gp,-1589 # _start-0x7ffff635
lui ra,0x80000
sltiu a4,ra,0
li t2,0
beq a4,t2,test_2508
j fail
test_2508:
lui gp,0x1
addi gp,gp,-1588 # _start-0x7ffff634
lui ra,0x80000
sltiu a4,ra,-2048
li t2,1
beq a4,t2,test_2509
j fail
test_2509:
lui gp,0x1
addi gp,gp,-1587 # _start-0x7ffff633
li ra,0
sltiu a4,ra,2047
li t2,1
beq a4,t2,test_2510
j fail
test_2510:
lui gp,0x1
addi gp,gp,-1586 # _start-0x7ffff632
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
sltiu a4,ra,0
li t2,0
beq a4,t2,test_2511
j fail
test_2511:
lui gp,0x1
addi gp,gp,-1585 # _start-0x7ffff631
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
sltiu a4,ra,2047
li t2,0
beq a4,t2,test_2512
j fail
test_2512:
lui gp,0x1
addi gp,gp,-1584 # _start-0x7ffff630
lui ra,0x80000
sltiu a4,ra,2047
li t2,0
beq a4,t2,test_2513
j fail
test_2513:
lui gp,0x1
addi gp,gp,-1583 # _start-0x7ffff62f
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
sltiu a4,ra,-2048
li t2,1
beq a4,t2,test_2514
j fail
test_2514:
lui gp,0x1
addi gp,gp,-1582 # _start-0x7ffff62e
li ra,0
sltiu a4,ra,-1
li t2,1
beq a4,t2,test_2515
j fail
test_2515:
lui gp,0x1
addi gp,gp,-1581 # _start-0x7ffff62d
li ra,-1
seqz a4,ra
li t2,0
beq a4,t2,test_2516
j fail
test_2516:
lui gp,0x1
addi gp,gp,-1580 # _start-0x7ffff62c
li ra,-1
sltiu a4,ra,-1
li t2,0
beq a4,t2,test_2517
j fail
test_2517:
lui gp,0x1
addi gp,gp,-1579 # _start-0x7ffff62b
li ra,11
sltiu ra,ra,13
li t2,1
beq ra,t2,test_2518
j fail
test_2518:
lui gp,0x1
addi gp,gp,-1578 # _start-0x7ffff62a
li tp,0
li ra,15
sltiu a4,ra,10
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2518+0xc
li t2,0
beq t1,t2,test_2519
j fail
test_2519:
lui gp,0x1
addi gp,gp,-1577 # _start-0x7ffff629
li tp,0
li ra,10
sltiu a4,ra,16
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2519+0xc
li t2,1
beq t1,t2,test_2520
j fail
test_2520:
lui gp,0x1
addi gp,gp,-1576 # _start-0x7ffff628
li tp,0
li ra,16
sltiu a4,ra,9
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2520+0xc
li t2,0
beq t1,t2,test_2521
j fail
test_2521:
lui gp,0x1
addi gp,gp,-1575 # _start-0x7ffff627
li tp,0
li ra,11
sltiu a4,ra,15
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2521+0xc
li t2,1
beq a4,t2,test_2522
j fail
test_2522:
lui gp,0x1
addi gp,gp,-1574 # _start-0x7ffff626
li tp,0
li ra,17
nop
sltiu a4,ra,8
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2522+0xc
li t2,0
beq a4,t2,test_2523
j fail
test_2523:
lui gp,0x1
addi gp,gp,-1573 # _start-0x7ffff625
li tp,0
li ra,12
nop
nop
sltiu a4,ra,14
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2523+0xc
li t2,1
beq a4,t2,test_2524
j fail
test_2524:
lui gp,0x1
addi gp,gp,-1572 # _start-0x7ffff624
sltiu ra,zero,-1
li t2,1
beq ra,t2,test_2525
j fail
test_2525:
lui gp,0x1
addi gp,gp,-1571 # _start-0x7ffff623
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
sltiu zero,ra,-1
li t2,0
beq zero,t2,test_2602
j fail
test_2602:
lui gp,0x1
addi gp,gp,-1494 # _start-0x7ffff5d6
lui ra,0x80000
li sp,0
sra a4,ra,sp
lui t2,0x80000
beq a4,t2,test_2603
j fail
test_2603:
lui gp,0x1
addi gp,gp,-1493 # _start-0x7ffff5d5
lui ra,0x80000
li sp,1
sra a4,ra,sp
lui t2,0xc0000
beq a4,t2,test_2604
j fail
test_2604:
lui gp,0x1
addi gp,gp,-1492 # _start-0x7ffff5d4
lui ra,0x80000
li sp,7
sra a4,ra,sp
lui t2,0xff000
beq a4,t2,test_2605
j fail
test_2605:
lui gp,0x1
addi gp,gp,-1491 # _start-0x7ffff5d3
lui ra,0x80000
li sp,14
sra a4,ra,sp
lui t2,0xfffe0
beq a4,t2,test_2606
j fail
test_2606:
lui gp,0x1
addi gp,gp,-1490 # _start-0x7ffff5d2
lui ra,0x80000
addi ra,ra,1 # _end+0xffff5f31
li sp,31
sra a4,ra,sp
li t2,-1
beq a4,t2,test_2607
j fail
test_2607:
lui gp,0x1
addi gp,gp,-1489 # _start-0x7ffff5d1
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
li sp,0
sra a4,ra,sp
lui t2,0x80000
addi t2,t2,-1 # _end+0xffff5f2f
beq a4,t2,test_2608
j fail
test_2608:
lui gp,0x1
addi gp,gp,-1488 # _start-0x7ffff5d0
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
li sp,1
sra a4,ra,sp
lui t2,0x40000
addi t2,t2,-1 # _start-0x40000001
beq a4,t2,test_2609
j fail
test_2609:
lui gp,0x1
addi gp,gp,-1487 # _start-0x7ffff5cf
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
li sp,7
sra a4,ra,sp
lui t2,0x1000
addi t2,t2,-1 # _start-0x7f000001
beq a4,t2,test_2610
j fail
test_2610:
lui gp,0x1
addi gp,gp,-1486 # _start-0x7ffff5ce
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
li sp,14
sra a4,ra,sp
lui t2,0x20
addi t2,t2,-1 # _start-0x7ffe0001
beq a4,t2,test_2611
j fail
test_2611:
lui gp,0x1
addi gp,gp,-1485 # _start-0x7ffff5cd
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
li sp,31
sra a4,ra,sp
li t2,0
beq a4,t2,test_2612
j fail
test_2612:
lui gp,0x1
addi gp,gp,-1484 # _start-0x7ffff5cc
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
li sp,0
sra a4,ra,sp
lui t2,0x81818
addi t2,t2,385 # _end+0x180e0b1
beq a4,t2,test_2613
j fail
test_2613:
lui gp,0x1
addi gp,gp,-1483 # _start-0x7ffff5cb
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
li sp,1
sra a4,ra,sp
lui t2,0xc0c0c
addi t2,t2,192 # _end+0x40c01ff0
beq a4,t2,test_2614
j fail
test_2614:
lui gp,0x1
addi gp,gp,-1482 # _start-0x7ffff5ca
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
li sp,7
sra a4,ra,sp
lui t2,0xff030
addi t2,t2,771 # _end+0x7f026233
beq a4,t2,test_2615
j fail
test_2615:
lui gp,0x1
addi gp,gp,-1481 # _start-0x7ffff5c9
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
li sp,14
sra a4,ra,sp
lui t2,0xfffe0
addi t2,t2,1542 # _end+0x7ffd6536
beq a4,t2,test_2616
j fail
test_2616:
lui gp,0x1
addi gp,gp,-1480 # _start-0x7ffff5c8
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
li sp,31
sra a4,ra,sp
li t2,-1
beq a4,t2,test_2617
j fail
test_2617:
lui gp,0x1
addi gp,gp,-1479 # _start-0x7ffff5c7
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
li sp,-64
sra a4,ra,sp
lui t2,0x81818
addi t2,t2,385 # _end+0x180e0b1
beq a4,t2,test_2618
j fail
test_2618:
lui gp,0x1
addi gp,gp,-1478 # _start-0x7ffff5c6
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
li sp,-63
sra a4,ra,sp
lui t2,0xc0c0c
addi t2,t2,192 # _end+0x40c01ff0
beq a4,t2,test_2619
j fail
test_2619:
lui gp,0x1
addi gp,gp,-1477 # _start-0x7ffff5c5
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
li sp,-57
sra a4,ra,sp
lui t2,0xff030
addi t2,t2,771 # _end+0x7f026233
beq a4,t2,test_2620
j fail
test_2620:
lui gp,0x1
addi gp,gp,-1476 # _start-0x7ffff5c4
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
li sp,-50
sra a4,ra,sp
lui t2,0xfffe0
addi t2,t2,1542 # _end+0x7ffd6536
beq a4,t2,test_2621
j fail
test_2621:
lui gp,0x1
addi gp,gp,-1475 # _start-0x7ffff5c3
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
li sp,-1
sra a4,ra,sp
li t2,-1
beq a4,t2,test_2622
j fail
test_2622:
lui gp,0x1
addi gp,gp,-1474 # _start-0x7ffff5c2
lui ra,0x80000
li sp,7
sra ra,ra,sp
lui t2,0xff000
beq ra,t2,test_2623
j fail
test_2623:
lui gp,0x1
addi gp,gp,-1473 # _start-0x7ffff5c1
lui ra,0x80000
li sp,14
sra sp,ra,sp
lui t2,0xfffe0
beq sp,t2,test_2624
j fail
test_2624:
lui gp,0x1
addi gp,gp,-1472 # _start-0x7ffff5c0
li ra,7
sra ra,ra,ra
li t2,0
beq ra,t2,test_2625
j fail
test_2625:
lui gp,0x1
addi gp,gp,-1471 # _start-0x7ffff5bf
li tp,0
lui ra,0x80000
li sp,7
sra a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2625+0xc
lui t2,0xff000
beq t1,t2,test_2626
j fail
test_2626:
lui gp,0x1
addi gp,gp,-1470 # _start-0x7ffff5be
li tp,0
lui ra,0x80000
li sp,14
sra a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2626+0xc
lui t2,0xfffe0
beq t1,t2,test_2627
j fail
test_2627:
lui gp,0x1
addi gp,gp,-1469 # _start-0x7ffff5bd
li tp,0
lui ra,0x80000
li sp,31
sra a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2627+0xc
li t2,-1
beq t1,t2,test_2628
j fail
test_2628:
lui gp,0x1
addi gp,gp,-1468 # _start-0x7ffff5bc
li tp,0
lui ra,0x80000
li sp,7
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2628+0xc
lui t2,0xff000
beq a4,t2,test_2629
j fail
test_2629:
lui gp,0x1
addi gp,gp,-1467 # _start-0x7ffff5bb
li tp,0
lui ra,0x80000
li sp,14
nop
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2629+0xc
lui t2,0xfffe0
beq a4,t2,test_2630
j fail
test_2630:
lui gp,0x1
addi gp,gp,-1466 # _start-0x7ffff5ba
li tp,0
lui ra,0x80000
li sp,31
nop
nop
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2630+0xc
li t2,-1
beq a4,t2,test_2631
j fail
test_2631:
lui gp,0x1
addi gp,gp,-1465 # _start-0x7ffff5b9
li tp,0
lui ra,0x80000
nop
li sp,7
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2631+0xc
lui t2,0xff000
beq a4,t2,test_2632
j fail
test_2632:
lui gp,0x1
addi gp,gp,-1464 # _start-0x7ffff5b8
li tp,0
lui ra,0x80000
nop
li sp,14
nop
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2632+0xc
lui t2,0xfffe0
beq a4,t2,test_2633
j fail
test_2633:
lui gp,0x1
addi gp,gp,-1463 # _start-0x7ffff5b7
li tp,0
lui ra,0x80000
nop
nop
li sp,31
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2633+0xc
li t2,-1
beq a4,t2,test_2634
j fail
test_2634:
lui gp,0x1
addi gp,gp,-1462 # _start-0x7ffff5b6
li tp,0
li sp,7
lui ra,0x80000
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2634+0xc
lui t2,0xff000
beq a4,t2,test_2635
j fail
test_2635:
lui gp,0x1
addi gp,gp,-1461 # _start-0x7ffff5b5
li tp,0
li sp,14
lui ra,0x80000
nop
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2635+0xc
lui t2,0xfffe0
beq a4,t2,test_2636
j fail
test_2636:
lui gp,0x1
addi gp,gp,-1460 # _start-0x7ffff5b4
li tp,0
li sp,31
lui ra,0x80000
nop
nop
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2636+0xc
li t2,-1
beq a4,t2,test_2637
j fail
test_2637:
lui gp,0x1
addi gp,gp,-1459 # _start-0x7ffff5b3
li tp,0
li sp,7
nop
lui ra,0x80000
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2637+0xc
lui t2,0xff000
beq a4,t2,test_2638
j fail
test_2638:
lui gp,0x1
addi gp,gp,-1458 # _start-0x7ffff5b2
li tp,0
li sp,14
nop
lui ra,0x80000
nop
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2638+0xc
lui t2,0xfffe0
beq a4,t2,test_2639
j fail
test_2639:
lui gp,0x1
addi gp,gp,-1457 # _start-0x7ffff5b1
li tp,0
li sp,31
nop
nop
lui ra,0x80000
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2639+0xc
li t2,-1
beq a4,t2,test_2640
j fail
test_2640:
lui gp,0x1
addi gp,gp,-1456 # _start-0x7ffff5b0
li ra,15
sra sp,zero,ra
li t2,0
beq sp,t2,test_2641
j fail
test_2641:
lui gp,0x1
addi gp,gp,-1455 # _start-0x7ffff5af
li ra,32
sra sp,ra,zero
li t2,32
beq sp,t2,test_2642
j fail
test_2642:
lui gp,0x1
addi gp,gp,-1454 # _start-0x7ffff5ae
sra ra,zero,zero
li t2,0
beq ra,t2,test_2643
j fail
test_2643:
lui gp,0x1
addi gp,gp,-1453 # _start-0x7ffff5ad
li ra,1024
lui sp,0x1
addi sp,sp,-2048 # _start-0x7ffff800
sra zero,ra,sp
li t2,0
beq zero,t2,test_2702
j fail
test_2702:
lui gp,0x1
addi gp,gp,-1394 # _start-0x7ffff572
li ra,0
srai a4,ra,0x0
li t2,0
beq a4,t2,test_2703
j fail
test_2703:
lui gp,0x1
addi gp,gp,-1393 # _start-0x7ffff571
lui ra,0x80000
srai a4,ra,0x1
lui t2,0xc0000
beq a4,t2,test_2704
j fail
test_2704:
lui gp,0x1
addi gp,gp,-1392 # _start-0x7ffff570
lui ra,0x80000
srai a4,ra,0x7
lui t2,0xff000
beq a4,t2,test_2705
j fail
test_2705:
lui gp,0x1
addi gp,gp,-1391 # _start-0x7ffff56f
lui ra,0x80000
srai a4,ra,0xe
lui t2,0xfffe0
beq a4,t2,test_2706
j fail
test_2706:
lui gp,0x1
addi gp,gp,-1390 # _start-0x7ffff56e
lui ra,0x80000
addi ra,ra,1 # _end+0xffff5f31
srai a4,ra,0x1f
li t2,-1
beq a4,t2,test_2707
j fail
test_2707:
lui gp,0x1
addi gp,gp,-1389 # _start-0x7ffff56d
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
srai a4,ra,0x0
lui t2,0x80000
addi t2,t2,-1 # _end+0xffff5f2f
beq a4,t2,test_2708
j fail
test_2708:
lui gp,0x1
addi gp,gp,-1388 # _start-0x7ffff56c
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
srai a4,ra,0x1
lui t2,0x40000
addi t2,t2,-1 # _start-0x40000001
beq a4,t2,test_2709
j fail
test_2709:
lui gp,0x1
addi gp,gp,-1387 # _start-0x7ffff56b
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
srai a4,ra,0x7
lui t2,0x1000
addi t2,t2,-1 # _start-0x7f000001
beq a4,t2,test_2710
j fail
test_2710:
lui gp,0x1
addi gp,gp,-1386 # _start-0x7ffff56a
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
srai a4,ra,0xe
lui t2,0x20
addi t2,t2,-1 # _start-0x7ffe0001
beq a4,t2,test_2711
j fail
test_2711:
lui gp,0x1
addi gp,gp,-1385 # _start-0x7ffff569
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
srai a4,ra,0x1f
li t2,0
beq a4,t2,test_2712
j fail
test_2712:
lui gp,0x1
addi gp,gp,-1384 # _start-0x7ffff568
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
srai a4,ra,0x0
lui t2,0x81818
addi t2,t2,385 # _end+0x180e0b1
beq a4,t2,test_2713
j fail
test_2713:
lui gp,0x1
addi gp,gp,-1383 # _start-0x7ffff567
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
srai a4,ra,0x1
lui t2,0xc0c0c
addi t2,t2,192 # _end+0x40c01ff0
beq a4,t2,test_2714
j fail
test_2714:
lui gp,0x1
addi gp,gp,-1382 # _start-0x7ffff566
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
srai a4,ra,0x7
lui t2,0xff030
addi t2,t2,771 # _end+0x7f026233
beq a4,t2,test_2715
j fail
test_2715:
lui gp,0x1
addi gp,gp,-1381 # _start-0x7ffff565
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
srai a4,ra,0xe
lui t2,0xfffe0
addi t2,t2,1542 # _end+0x7ffd6536
beq a4,t2,test_2716
j fail
test_2716:
lui gp,0x1
addi gp,gp,-1380 # _start-0x7ffff564
lui ra,0x81818
addi ra,ra,385 # _end+0x180e0b1
srai a4,ra,0x1f
li t2,-1
beq a4,t2,test_2717
j fail
test_2717:
lui gp,0x1
addi gp,gp,-1379 # _start-0x7ffff563
lui ra,0x80000
srai ra,ra,0x7
lui t2,0xff000
beq ra,t2,test_2718
j fail
test_2718:
lui gp,0x1
addi gp,gp,-1378 # _start-0x7ffff562
li tp,0
lui ra,0x80000
srai a4,ra,0x7
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2718+0xc
lui t2,0xff000
beq t1,t2,test_2719
j fail
test_2719:
lui gp,0x1
addi gp,gp,-1377 # _start-0x7ffff561
li tp,0
lui ra,0x80000
srai a4,ra,0xe
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2719+0xc
lui t2,0xfffe0
beq t1,t2,test_2720
j fail
test_2720:
lui gp,0x1
addi gp,gp,-1376 # _start-0x7ffff560
li tp,0
lui ra,0x80000
addi ra,ra,1 # _end+0xffff5f31
srai a4,ra,0x1f
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2720+0xc
li t2,-1
beq t1,t2,test_2721
j fail
test_2721:
lui gp,0x1
addi gp,gp,-1375 # _start-0x7ffff55f
li tp,0
lui ra,0x80000
srai a4,ra,0x7
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2721+0xc
lui t2,0xff000
beq a4,t2,test_2722
j fail
test_2722:
lui gp,0x1
addi gp,gp,-1374 # _start-0x7ffff55e
li tp,0
lui ra,0x80000
nop
srai a4,ra,0xe
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2722+0xc
lui t2,0xfffe0
beq a4,t2,test_2723
j fail
test_2723:
lui gp,0x1
addi gp,gp,-1373 # _start-0x7ffff55d
li tp,0
lui ra,0x80000
addi ra,ra,1 # _end+0xffff5f31
nop
nop
srai a4,ra,0x1f
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2723+0xc
li t2,-1
beq a4,t2,test_2724
j fail
test_2724:
lui gp,0x1
addi gp,gp,-1372 # _start-0x7ffff55c
srai ra,zero,0x4
li t2,0
beq ra,t2,test_2725
j fail
test_2725:
lui gp,0x1
addi gp,gp,-1371 # _start-0x7ffff55b
li ra,33
srai zero,ra,0xa
li t2,0
beq zero,t2,test_2802
j fail
test_2802:
lui gp,0x1
addi gp,gp,-1294 # _start-0x7ffff50e
lui ra,0x80000
li sp,0
srl a4,ra,sp
lui t2,0x80000
beq a4,t2,test_2803
j fail
test_2803:
lui gp,0x1
addi gp,gp,-1293 # _start-0x7ffff50d
lui ra,0x80000
li sp,1
srl a4,ra,sp
lui t2,0x40000
beq a4,t2,test_2804
j fail
test_2804:
lui gp,0x1
addi gp,gp,-1292 # _start-0x7ffff50c
lui ra,0x80000
li sp,7
srl a4,ra,sp
lui t2,0x1000
beq a4,t2,test_2805
j fail
test_2805:
lui gp,0x1
addi gp,gp,-1291 # _start-0x7ffff50b
lui ra,0x80000
li sp,14
srl a4,ra,sp
lui t2,0x20
beq a4,t2,test_2806
j fail
test_2806:
lui gp,0x1
addi gp,gp,-1290 # _start-0x7ffff50a
lui ra,0x80000
addi ra,ra,1 # _end+0xffff5f31
li sp,31
srl a4,ra,sp
li t2,1
beq a4,t2,test_2807
j fail
test_2807:
lui gp,0x1
addi gp,gp,-1289 # _start-0x7ffff509
li ra,-1
li sp,0
srl a4,ra,sp
li t2,-1
beq a4,t2,test_2808
j fail
test_2808:
lui gp,0x1
addi gp,gp,-1288 # _start-0x7ffff508
li ra,-1
li sp,1
srl a4,ra,sp
lui t2,0x80000
addi t2,t2,-1 # _end+0xffff5f2f
beq a4,t2,test_2809
j fail
test_2809:
lui gp,0x1
addi gp,gp,-1287 # _start-0x7ffff507
li ra,-1
li sp,7
srl a4,ra,sp
lui t2,0x2000
addi t2,t2,-1 # _start-0x7e000001
beq a4,t2,test_2810
j fail
test_2810:
lui gp,0x1
addi gp,gp,-1286 # _start-0x7ffff506
li ra,-1
li sp,14
srl a4,ra,sp
lui t2,0x40
addi t2,t2,-1 # _start-0x7ffc0001
beq a4,t2,test_2811
j fail
test_2811:
lui gp,0x1
addi gp,gp,-1285 # _start-0x7ffff505
li ra,-1
li sp,31
srl a4,ra,sp
li t2,1
beq a4,t2,test_2812
j fail
test_2812:
lui gp,0x1
addi gp,gp,-1284 # _start-0x7ffff504
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,0
srl a4,ra,sp
lui t2,0x21212
addi t2,t2,289 # _start-0x5edededf
beq a4,t2,test_2813
j fail
test_2813:
lui gp,0x1
addi gp,gp,-1283 # _start-0x7ffff503
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,1
srl a4,ra,sp
lui t2,0x10909
addi t2,t2,144 # _start-0x6f6f6f70
beq a4,t2,test_2814
j fail
test_2814:
lui gp,0x1
addi gp,gp,-1282 # _start-0x7ffff502
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,7
srl a4,ra,sp
lui t2,0x424
addi t2,t2,578 # _start-0x7fbdbdbe
beq a4,t2,test_2815
j fail
test_2815:
lui gp,0x1
addi gp,gp,-1281 # _start-0x7ffff501
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,14
srl a4,ra,sp
lui t2,0x8
addi t2,t2,1156 # _start-0x7fff7b7c
beq a4,t2,test_2816
j fail
test_2816:
lui gp,0x1
addi gp,gp,-1280 # _start-0x7ffff500
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,31
srl a4,ra,sp
li t2,0
beq a4,t2,test_2817
j fail
test_2817:
lui gp,0x1
addi gp,gp,-1279 # _start-0x7ffff4ff
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-64
srl a4,ra,sp
lui t2,0x21212
addi t2,t2,289 # _start-0x5edededf
beq a4,t2,test_2818
j fail
test_2818:
lui gp,0x1
addi gp,gp,-1278 # _start-0x7ffff4fe
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-63
srl a4,ra,sp
lui t2,0x10909
addi t2,t2,144 # _start-0x6f6f6f70
beq a4,t2,test_2819
j fail
test_2819:
lui gp,0x1
addi gp,gp,-1277 # _start-0x7ffff4fd
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-57
srl a4,ra,sp
lui t2,0x424
addi t2,t2,578 # _start-0x7fbdbdbe
beq a4,t2,test_2820
j fail
test_2820:
lui gp,0x1
addi gp,gp,-1276 # _start-0x7ffff4fc
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-50
srl a4,ra,sp
lui t2,0x8
addi t2,t2,1156 # _start-0x7fff7b7c
beq a4,t2,test_2821
j fail
test_2821:
lui gp,0x1
addi gp,gp,-1275 # _start-0x7ffff4fb
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-1
srl a4,ra,sp
li t2,0
beq a4,t2,test_2822
j fail
test_2822:
lui gp,0x1
addi gp,gp,-1274 # _start-0x7ffff4fa
lui ra,0x80000
li sp,7
srl ra,ra,sp
lui t2,0x1000
beq ra,t2,test_2823
j fail
test_2823:
lui gp,0x1
addi gp,gp,-1273 # _start-0x7ffff4f9
lui ra,0x80000
li sp,14
srl sp,ra,sp
lui t2,0x20
beq sp,t2,test_2824
j fail
test_2824:
lui gp,0x1
addi gp,gp,-1272 # _start-0x7ffff4f8
li ra,7
srl ra,ra,ra
li t2,0
beq ra,t2,test_2825
j fail
test_2825:
lui gp,0x1
addi gp,gp,-1271 # _start-0x7ffff4f7
li tp,0
lui ra,0x80000
li sp,7
srl a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2825+0xc
lui t2,0x1000
beq t1,t2,test_2826
j fail
test_2826:
lui gp,0x1
addi gp,gp,-1270 # _start-0x7ffff4f6
li tp,0
lui ra,0x80000
li sp,14
srl a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2826+0xc
lui t2,0x20
beq t1,t2,test_2827
j fail
test_2827:
lui gp,0x1
addi gp,gp,-1269 # _start-0x7ffff4f5
li tp,0
lui ra,0x80000
li sp,31
srl a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2827+0xc
li t2,1
beq t1,t2,test_2828
j fail
test_2828:
lui gp,0x1
addi gp,gp,-1268 # _start-0x7ffff4f4
li tp,0
lui ra,0x80000
li sp,7
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2828+0xc
lui t2,0x1000
beq a4,t2,test_2829
j fail
test_2829:
lui gp,0x1
addi gp,gp,-1267 # _start-0x7ffff4f3
li tp,0
lui ra,0x80000
li sp,14
nop
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2829+0xc
lui t2,0x20
beq a4,t2,test_2830
j fail
test_2830:
lui gp,0x1
addi gp,gp,-1266 # _start-0x7ffff4f2
li tp,0
lui ra,0x80000
li sp,31
nop
nop
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2830+0xc
li t2,1
beq a4,t2,test_2831
j fail
test_2831:
lui gp,0x1
addi gp,gp,-1265 # _start-0x7ffff4f1
li tp,0
lui ra,0x80000
nop
li sp,7
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2831+0xc
lui t2,0x1000
beq a4,t2,test_2832
j fail
test_2832:
lui gp,0x1
addi gp,gp,-1264 # _start-0x7ffff4f0
li tp,0
lui ra,0x80000
nop
li sp,14
nop
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2832+0xc
lui t2,0x20
beq a4,t2,test_2833
j fail
test_2833:
lui gp,0x1
addi gp,gp,-1263 # _start-0x7ffff4ef
li tp,0
lui ra,0x80000
nop
nop
li sp,31
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2833+0xc
li t2,1
beq a4,t2,test_2834
j fail
test_2834:
lui gp,0x1
addi gp,gp,-1262 # _start-0x7ffff4ee
li tp,0
li sp,7
lui ra,0x80000
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2834+0xc
lui t2,0x1000
beq a4,t2,test_2835
j fail
test_2835:
lui gp,0x1
addi gp,gp,-1261 # _start-0x7ffff4ed
li tp,0
li sp,14
lui ra,0x80000
nop
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2835+0xc
lui t2,0x20
beq a4,t2,test_2836
j fail
test_2836:
lui gp,0x1
addi gp,gp,-1260 # _start-0x7ffff4ec
li tp,0
li sp,31
lui ra,0x80000
nop
nop
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2836+0xc
li t2,1
beq a4,t2,test_2837
j fail
test_2837:
lui gp,0x1
addi gp,gp,-1259 # _start-0x7ffff4eb
li tp,0
li sp,7
nop
lui ra,0x80000
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2837+0xc
lui t2,0x1000
beq a4,t2,test_2838
j fail
test_2838:
lui gp,0x1
addi gp,gp,-1258 # _start-0x7ffff4ea
li tp,0
li sp,14
nop
lui ra,0x80000
nop
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2838+0xc
lui t2,0x20
beq a4,t2,test_2839
j fail
test_2839:
lui gp,0x1
addi gp,gp,-1257 # _start-0x7ffff4e9
li tp,0
li sp,31
nop
nop
lui ra,0x80000
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2839+0xc
li t2,1
beq a4,t2,test_2840
j fail
test_2840:
lui gp,0x1
addi gp,gp,-1256 # _start-0x7ffff4e8
li ra,15
srl sp,zero,ra
li t2,0
beq sp,t2,test_2841
j fail
test_2841:
lui gp,0x1
addi gp,gp,-1255 # _start-0x7ffff4e7
li ra,32
srl sp,ra,zero
li t2,32
beq sp,t2,test_2842
j fail
test_2842:
lui gp,0x1
addi gp,gp,-1254 # _start-0x7ffff4e6
srl ra,zero,zero
li t2,0
beq ra,t2,test_2843
j fail
test_2843:
lui gp,0x1
addi gp,gp,-1253 # _start-0x7ffff4e5
li ra,1024
lui sp,0x1
addi sp,sp,-2048 # _start-0x7ffff800
srl zero,ra,sp
li t2,0
beq zero,t2,test_2902
j fail
test_2902:
lui gp,0x1
addi gp,gp,-1194 # _start-0x7ffff4aa
lui ra,0x80000
srli a4,ra,0x0
lui t2,0x80000
beq a4,t2,test_2903
j fail
test_2903:
lui gp,0x1
addi gp,gp,-1193 # _start-0x7ffff4a9
lui ra,0x80000
srli a4,ra,0x1
lui t2,0x40000
beq a4,t2,test_2904
j fail
test_2904:
lui gp,0x1
addi gp,gp,-1192 # _start-0x7ffff4a8
lui ra,0x80000
srli a4,ra,0x7
lui t2,0x1000
beq a4,t2,test_2905
j fail
test_2905:
lui gp,0x1
addi gp,gp,-1191 # _start-0x7ffff4a7
lui ra,0x80000
srli a4,ra,0xe
lui t2,0x20
beq a4,t2,test_2906
j fail
test_2906:
lui gp,0x1
addi gp,gp,-1190 # _start-0x7ffff4a6
lui ra,0x80000
addi ra,ra,1 # _end+0xffff5f31
srli a4,ra,0x1f
li t2,1
beq a4,t2,test_2907
j fail
test_2907:
lui gp,0x1
addi gp,gp,-1189 # _start-0x7ffff4a5
li ra,-1
srli a4,ra,0x0
li t2,-1
beq a4,t2,test_2908
j fail
test_2908:
lui gp,0x1
addi gp,gp,-1188 # _start-0x7ffff4a4
li ra,-1
srli a4,ra,0x1
lui t2,0x80000
addi t2,t2,-1 # _end+0xffff5f2f
beq a4,t2,test_2909
j fail
test_2909:
lui gp,0x1
addi gp,gp,-1187 # _start-0x7ffff4a3
li ra,-1
srli a4,ra,0x7
lui t2,0x2000
addi t2,t2,-1 # _start-0x7e000001
beq a4,t2,test_2910
j fail
test_2910:
lui gp,0x1
addi gp,gp,-1186 # _start-0x7ffff4a2
li ra,-1
srli a4,ra,0xe
lui t2,0x40
addi t2,t2,-1 # _start-0x7ffc0001
beq a4,t2,test_2911
j fail
test_2911:
lui gp,0x1
addi gp,gp,-1185 # _start-0x7ffff4a1
li ra,-1
srli a4,ra,0x1f
li t2,1
beq a4,t2,test_2912
j fail
test_2912:
lui gp,0x1
addi gp,gp,-1184 # _start-0x7ffff4a0
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
srli a4,ra,0x0
lui t2,0x21212
addi t2,t2,289 # _start-0x5edededf
beq a4,t2,test_2913
j fail
test_2913:
lui gp,0x1
addi gp,gp,-1183 # _start-0x7ffff49f
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
srli a4,ra,0x1
lui t2,0x10909
addi t2,t2,144 # _start-0x6f6f6f70
beq a4,t2,test_2914
j fail
test_2914:
lui gp,0x1
addi gp,gp,-1182 # _start-0x7ffff49e
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
srli a4,ra,0x7
lui t2,0x424
addi t2,t2,578 # _start-0x7fbdbdbe
beq a4,t2,test_2915
j fail
test_2915:
lui gp,0x1
addi gp,gp,-1181 # _start-0x7ffff49d
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
srli a4,ra,0xe
lui t2,0x8
addi t2,t2,1156 # _start-0x7fff7b7c
beq a4,t2,test_2916
j fail
test_2916:
lui gp,0x1
addi gp,gp,-1180 # _start-0x7ffff49c
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
srli a4,ra,0x1f
li t2,0
beq a4,t2,test_2917
j fail
test_2917:
lui gp,0x1
addi gp,gp,-1179 # _start-0x7ffff49b
lui ra,0x80000
srli ra,ra,0x7
lui t2,0x1000
beq ra,t2,test_2918
j fail
test_2918:
lui gp,0x1
addi gp,gp,-1178 # _start-0x7ffff49a
li tp,0
lui ra,0x80000
srli a4,ra,0x7
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2918+0xc
lui t2,0x1000
beq t1,t2,test_2919
j fail
test_2919:
lui gp,0x1
addi gp,gp,-1177 # _start-0x7ffff499
li tp,0
lui ra,0x80000
srli a4,ra,0xe
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2919+0xc
lui t2,0x20
beq t1,t2,test_2920
j fail
test_2920:
lui gp,0x1
addi gp,gp,-1176 # _start-0x7ffff498
li tp,0
lui ra,0x80000
addi ra,ra,1 # _end+0xffff5f31
srli a4,ra,0x1f
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2920+0xc
li t2,1
beq t1,t2,test_2921
j fail
test_2921:
lui gp,0x1
addi gp,gp,-1175 # _start-0x7ffff497
li tp,0
lui ra,0x80000
srli a4,ra,0x7
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2921+0xc
lui t2,0x1000
beq a4,t2,test_2922
j fail
test_2922:
lui gp,0x1
addi gp,gp,-1174 # _start-0x7ffff496
li tp,0
lui ra,0x80000
nop
srli a4,ra,0xe
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2922+0xc
lui t2,0x20
beq a4,t2,test_2923
j fail
test_2923:
lui gp,0x1
addi gp,gp,-1173 # _start-0x7ffff495
li tp,0
lui ra,0x80000
addi ra,ra,1 # _end+0xffff5f31
nop
nop
srli a4,ra,0x1f
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_2923+0xc
li t2,1
beq a4,t2,test_2924
j fail
test_2924:
lui gp,0x1
addi gp,gp,-1172 # _start-0x7ffff494
srli ra,zero,0x4
li t2,0
beq ra,t2,test_2925
j fail
test_2925:
lui gp,0x1
addi gp,gp,-1171 # _start-0x7ffff493
li ra,33
srli zero,ra,0xa
li t2,0
beq zero,t2,test_3002
j fail
test_3002:
lui gp,0x1
addi gp,gp,-1094 # _start-0x7ffff446
li ra,0
li sp,0
sub a4,ra,sp
li t2,0
beq a4,t2,test_3003
j fail
test_3003:
lui gp,0x1
addi gp,gp,-1093 # _start-0x7ffff445
li ra,1
li sp,1
sub a4,ra,sp
li t2,0
beq a4,t2,test_3004
j fail
test_3004:
lui gp,0x1
addi gp,gp,-1092 # _start-0x7ffff444
li ra,3
li sp,7
sub a4,ra,sp
li t2,-4
beq a4,t2,test_3005
j fail
test_3005:
lui gp,0x1
addi gp,gp,-1091 # _start-0x7ffff443
li ra,0
lui sp,0xffff8
sub a4,ra,sp
lui t2,0x8
beq a4,t2,test_3006
j fail
test_3006:
lui gp,0x1
addi gp,gp,-1090 # _start-0x7ffff442
lui ra,0x80000
li sp,0
sub a4,ra,sp
lui t2,0x80000
beq a4,t2,test_3007
j fail
test_3007:
lui gp,0x1
addi gp,gp,-1089 # _start-0x7ffff441
lui ra,0x80000
lui sp,0xffff8
sub a4,ra,sp
lui t2,0x80008
beq a4,t2,test_3008
j fail
test_3008:
lui gp,0x1
addi gp,gp,-1088 # _start-0x7ffff440
li ra,0
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
sub a4,ra,sp
lui t2,0xffff8
addi t2,t2,1 # _end+0x7ffedf31
beq a4,t2,test_3009
j fail
test_3009:
lui gp,0x1
addi gp,gp,-1087 # _start-0x7ffff43f
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
li sp,0
sub a4,ra,sp
lui t2,0x80000
addi t2,t2,-1 # _end+0xffff5f2f
beq a4,t2,test_3010
j fail
test_3010:
lui gp,0x1
addi gp,gp,-1086 # _start-0x7ffff43e
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
sub a4,ra,sp
lui t2,0x7fff8
beq a4,t2,test_3011
j fail
test_3011:
lui gp,0x1
addi gp,gp,-1085 # _start-0x7ffff43d
lui ra,0x80000
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
sub a4,ra,sp
lui t2,0x7fff8
addi t2,t2,1 # _start-0x7fff
beq a4,t2,test_3012
j fail
test_3012:
lui gp,0x1
addi gp,gp,-1084 # _start-0x7ffff43c
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
lui sp,0xffff8
sub a4,ra,sp
lui t2,0x80008
addi t2,t2,-1 # _end+0xffffdf2f
beq a4,t2,test_3013
j fail
test_3013:
lui gp,0x1
addi gp,gp,-1083 # _start-0x7ffff43b
li ra,0
li sp,-1
sub a4,ra,sp
li t2,1
beq a4,t2,test_3014
j fail
test_3014:
lui gp,0x1
addi gp,gp,-1082 # _start-0x7ffff43a
li ra,-1
li sp,1
sub a4,ra,sp
li t2,-2
beq a4,t2,test_3015
j fail
test_3015:
lui gp,0x1
addi gp,gp,-1081 # _start-0x7ffff439
li ra,-1
li sp,-1
sub a4,ra,sp
li t2,0
beq a4,t2,test_3016
j fail
test_3016:
lui gp,0x1
addi gp,gp,-1080 # _start-0x7ffff438
li ra,13
li sp,11
sub ra,ra,sp
li t2,2
beq ra,t2,test_3017
j fail
test_3017:
lui gp,0x1
addi gp,gp,-1079 # _start-0x7ffff437
li ra,14
li sp,11
sub sp,ra,sp
li t2,3
beq sp,t2,test_3018
j fail
test_3018:
lui gp,0x1
addi gp,gp,-1078 # _start-0x7ffff436
li ra,13
sub ra,ra,ra
li t2,0
beq ra,t2,test_3019
j fail
test_3019:
lui gp,0x1
addi gp,gp,-1077 # _start-0x7ffff435
li tp,0
li ra,13
li sp,11
sub a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3019+0xc
li t2,2
beq t1,t2,test_3020
j fail
test_3020:
lui gp,0x1
addi gp,gp,-1076 # _start-0x7ffff434
li tp,0
li ra,14
li sp,11
sub a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3020+0xc
li t2,3
beq t1,t2,test_3021
j fail
test_3021:
lui gp,0x1
addi gp,gp,-1075 # _start-0x7ffff433
li tp,0
li ra,15
li sp,11
sub a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3021+0xc
li t2,4
beq t1,t2,test_3022
j fail
test_3022:
lui gp,0x1
addi gp,gp,-1074 # _start-0x7ffff432
li tp,0
li ra,13
li sp,11
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3022+0xc
li t2,2
beq a4,t2,test_3023
j fail
test_3023:
lui gp,0x1
addi gp,gp,-1073 # _start-0x7ffff431
li tp,0
li ra,14
li sp,11
nop
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3023+0xc
li t2,3
beq a4,t2,test_3024
j fail
test_3024:
lui gp,0x1
addi gp,gp,-1072 # _start-0x7ffff430
li tp,0
li ra,15
li sp,11
nop
nop
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3024+0xc
li t2,4
beq a4,t2,test_3025
j fail
test_3025:
lui gp,0x1
addi gp,gp,-1071 # _start-0x7ffff42f
li tp,0
li ra,13
nop
li sp,11
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3025+0xc
li t2,2
beq a4,t2,test_3026
j fail
test_3026:
lui gp,0x1
addi gp,gp,-1070 # _start-0x7ffff42e
li tp,0
li ra,14
nop
li sp,11
nop
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3026+0xc
li t2,3
beq a4,t2,test_3027
j fail
test_3027:
lui gp,0x1
addi gp,gp,-1069 # _start-0x7ffff42d
li tp,0
li ra,15
nop
nop
li sp,11
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3027+0xc
li t2,4
beq a4,t2,test_3028
j fail
test_3028:
lui gp,0x1
addi gp,gp,-1068 # _start-0x7ffff42c
li tp,0
li sp,11
li ra,13
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3028+0xc
li t2,2
beq a4,t2,test_3029
j fail
test_3029:
lui gp,0x1
addi gp,gp,-1067 # _start-0x7ffff42b
li tp,0
li sp,11
li ra,14
nop
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3029+0xc
li t2,3
beq a4,t2,test_3030
j fail
test_3030:
lui gp,0x1
addi gp,gp,-1066 # _start-0x7ffff42a
li tp,0
li sp,11
li ra,15
nop
nop
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3030+0xc
li t2,4
beq a4,t2,test_3031
j fail
test_3031:
lui gp,0x1
addi gp,gp,-1065 # _start-0x7ffff429
li tp,0
li sp,11
nop
li ra,13
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3031+0xc
li t2,2
beq a4,t2,test_3032
j fail
test_3032:
lui gp,0x1
addi gp,gp,-1064 # _start-0x7ffff428
li tp,0
li sp,11
nop
li ra,14
nop
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3032+0xc
li t2,3
beq a4,t2,test_3033
j fail
test_3033:
lui gp,0x1
addi gp,gp,-1063 # _start-0x7ffff427
li tp,0
li sp,11
nop
nop
li ra,15
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3033+0xc
li t2,4
beq a4,t2,test_3034
j fail
test_3034:
lui gp,0x1
addi gp,gp,-1062 # _start-0x7ffff426
li ra,-15
neg sp,ra
li t2,15
beq sp,t2,test_3035
j fail
test_3035:
lui gp,0x1
addi gp,gp,-1061 # _start-0x7ffff425
li ra,32
sub sp,ra,zero
li t2,32
beq sp,t2,test_3036
j fail
test_3036:
lui gp,0x1
addi gp,gp,-1060 # _start-0x7ffff424
neg ra,zero
li t2,0
beq ra,t2,test_3037
j fail
test_3037:
lui gp,0x1
addi gp,gp,-1059 # _start-0x7ffff423
li ra,16
li sp,30
sub zero,ra,sp
li t2,0
beq zero,t2,test_3102
j fail
test_3102:
lui gp,0x1
addi gp,gp,-994 # _start-0x7ffff3e2
lui ra,0xff1
addi ra,ra,-256 # _start-0x7f00f100
xori a4,ra,-241
lui t2,0xff00f
addi t2,t2,15 # _end+0x7f004f3f
beq a4,t2,test_3103
j fail
test_3103:
lui gp,0x1
addi gp,gp,-993 # _start-0x7ffff3e1
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
xori a4,ra,240
lui t2,0xff01
addi t2,t2,-256 # _start-0x700ff100
beq a4,t2,test_3104
j fail
test_3104:
lui gp,0x1
addi gp,gp,-992 # _start-0x7ffff3e0
lui ra,0xff1
addi ra,ra,-1793 # _start-0x7f00f701
xori a4,ra,1807
lui t2,0xff1
addi t2,t2,-16 # _start-0x7f00f010
beq a4,t2,test_3105
j fail
test_3105:
lui gp,0x1
addi gp,gp,-991 # _start-0x7ffff3df
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f4f3f
xori a4,ra,240
lui t2,0xf00ff
addi t2,t2,255 # _end+0x700f502f
beq a4,t2,test_3106
j fail
test_3106:
lui gp,0x1
addi gp,gp,-990 # _start-0x7ffff3de
lui ra,0xff00f
addi ra,ra,1792 # _end+0x7f005630
xori ra,ra,1807
lui t2,0xff00f
addi t2,t2,15 # _end+0x7f004f3f
beq ra,t2,test_3107
j fail
test_3107:
lui gp,0x1
addi gp,gp,-989 # _start-0x7ffff3dd
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
xori a4,ra,240
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3107+0xc
lui t2,0xff01
addi t2,t2,-256 # _start-0x700ff100
beq t1,t2,test_3108
j fail
test_3108:
lui gp,0x1
addi gp,gp,-988 # _start-0x7ffff3dc
li tp,0
lui ra,0xff1
addi ra,ra,-1793 # _start-0x7f00f701
xori a4,ra,1807
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3108+0xc
lui t2,0xff1
addi t2,t2,-16 # _start-0x7f00f010
beq t1,t2,test_3109
j fail
test_3109:
lui gp,0x1
addi gp,gp,-987 # _start-0x7ffff3db
li tp,0
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f4f3f
xori a4,ra,240
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3109+0xc
lui t2,0xf00ff
addi t2,t2,255 # _end+0x700f502f
beq t1,t2,test_3110
j fail
test_3110:
lui gp,0x1
addi gp,gp,-986 # _start-0x7ffff3da
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
xori a4,ra,240
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3110+0xc
lui t2,0xff01
addi t2,t2,-256 # _start-0x700ff100
beq a4,t2,test_3111
j fail
test_3111:
lui gp,0x1
addi gp,gp,-985 # _start-0x7ffff3d9
li tp,0
lui ra,0xff1
addi ra,ra,-1 # _start-0x7f00f001
nop
xori a4,ra,15
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3111+0xc
lui t2,0xff1
addi t2,t2,-16 # _start-0x7f00f010
beq a4,t2,test_3112
j fail
test_3112:
lui gp,0x1
addi gp,gp,-984 # _start-0x7ffff3d8
li tp,0
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f4f3f
nop
nop
xori a4,ra,240
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3112+0xc
lui t2,0xf00ff
addi t2,t2,255 # _end+0x700f502f
beq a4,t2,test_3113
j fail
test_3113:
lui gp,0x1
addi gp,gp,-983 # _start-0x7ffff3d7
xori ra,zero,240
li t2,240
beq ra,t2,test_3114
j fail
test_3114:
lui gp,0x1
addi gp,gp,-982 # _start-0x7ffff3d6
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
xori zero,ra,1807
li t2,0
beq zero,t2,test_3202
j fail
test_3202:
lui gp,0x1
addi gp,gp,-894 # _start-0x7ffff37e
li ra,0
li sp,0
bgeu ra,sp,test_3202+0x20
beq zero,gp,test_3202+0x1c
j fail
bne zero,gp,test_3203
bgeu ra,sp,test_3202+0x1c
beq zero,gp,test_3203
j fail
test_3203:
lui gp,0x1
addi gp,gp,-893 # _start-0x7ffff37d
li ra,1
li sp,1
bgeu ra,sp,test_3203+0x20
beq zero,gp,test_3203+0x1c
j fail
bne zero,gp,test_3204
bgeu ra,sp,test_3203+0x1c
beq zero,gp,test_3204
j fail
test_3204:
lui gp,0x1
addi gp,gp,-892 # _start-0x7ffff37c
li ra,-1
li sp,-1
bgeu ra,sp,test_3204+0x20
beq zero,gp,test_3204+0x1c
j fail
bne zero,gp,test_3205
bgeu ra,sp,test_3204+0x1c
beq zero,gp,test_3205
j fail
test_3205:
lui gp,0x1
addi gp,gp,-891 # _start-0x7ffff37b
li ra,1
li sp,0
bgeu ra,sp,test_3205+0x20
beq zero,gp,test_3205+0x1c
j fail
bne zero,gp,test_3206
bgeu ra,sp,test_3205+0x1c
beq zero,gp,test_3206
j fail
test_3206:
lui gp,0x1
addi gp,gp,-890 # _start-0x7ffff37a
li ra,-1
li sp,-2
bgeu ra,sp,test_3206+0x20
beq zero,gp,test_3206+0x1c
j fail
bne zero,gp,test_3207
bgeu ra,sp,test_3206+0x1c
beq zero,gp,test_3207
j fail
test_3207:
lui gp,0x1
addi gp,gp,-889 # _start-0x7ffff379
li ra,-1
li sp,0
bgeu ra,sp,test_3207+0x20
beq zero,gp,test_3207+0x1c
j fail
bne zero,gp,test_3208
bgeu ra,sp,test_3207+0x1c
beq zero,gp,test_3208
j fail
test_3208:
lui gp,0x1
addi gp,gp,-888 # _start-0x7ffff378
li ra,0
li sp,1
bgeu ra,sp,test_3208+0x18
bne zero,gp,test_3208+0x20
beq zero,gp,test_3208+0x20
j fail
bgeu ra,sp,test_3208+0x18
test_3209:
lui gp,0x1
addi gp,gp,-887 # _start-0x7ffff377
li ra,-2
li sp,-1
bgeu ra,sp,test_3209+0x18
bne zero,gp,test_3209+0x20
beq zero,gp,test_3209+0x20
j fail
bgeu ra,sp,test_3209+0x18
test_3210:
lui gp,0x1
addi gp,gp,-886 # _start-0x7ffff376
li ra,0
li sp,-1
bgeu ra,sp,test_3210+0x18
bne zero,gp,test_3210+0x20
beq zero,gp,test_3210+0x20
j fail
bgeu ra,sp,test_3210+0x18
test_3211:
lui gp,0x1
addi gp,gp,-885 # _start-0x7ffff375
lui ra,0x80000
addi ra,ra,-1 # _end+0xffff5f2f
lui sp,0x80000
bgeu ra,sp,test_3211+0x1c
bne zero,gp,test_3211+0x24
beq zero,gp,test_3211+0x24
j fail
bgeu ra,sp,test_3211+0x1c
test_3212:
lui gp,0x1
addi gp,gp,-884 # _start-0x7ffff374
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fff5f2f
lui sp,0xf0000
bltu ra,sp,test_3212+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3212+0xc
test_3213:
lui gp,0x1
addi gp,gp,-883 # _start-0x7ffff373
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fff5f2f
lui sp,0xf0000
nop
bltu ra,sp,test_3213+0x24
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3213+0xc
test_3214:
lui gp,0x1
addi gp,gp,-882 # _start-0x7ffff372
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fff5f2f
lui sp,0xf0000
nop
nop
bltu ra,sp,test_3214+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3214+0xc
test_3215:
lui gp,0x1
addi gp,gp,-881 # _start-0x7ffff371
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fff5f2f
nop
lui sp,0xf0000
bltu ra,sp,test_3215+0x24
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3215+0xc
test_3216:
lui gp,0x1
addi gp,gp,-880 # _start-0x7ffff370
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fff5f2f
nop
lui sp,0xf0000
nop
bltu ra,sp,test_3216+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3216+0xc
test_3217:
lui gp,0x1
addi gp,gp,-879 # _start-0x7ffff36f
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fff5f2f
nop
nop
lui sp,0xf0000
bltu ra,sp,test_3217+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3217+0xc
test_3218:
lui gp,0x1
addi gp,gp,-878 # _start-0x7ffff36e
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fff5f2f
lui sp,0xf0000
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3218+0xc
test_3219:
lui gp,0x1
addi gp,gp,-877 # _start-0x7ffff36d
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fff5f2f
lui sp,0xf0000
nop
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3219+0xc
test_3220:
lui gp,0x1
addi gp,gp,-876 # _start-0x7ffff36c
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fff5f2f
lui sp,0xf0000
nop
nop
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3220+0xc
test_3221:
lui gp,0x1
addi gp,gp,-875 # _start-0x7ffff36b
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fff5f2f
nop
lui sp,0xf0000
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3221+0xc
test_3222:
lui gp,0x1
addi gp,gp,-874 # _start-0x7ffff36a
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fff5f2f
nop
lui sp,0xf0000
nop
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3222+0xc
test_3223:
lui gp,0x1
addi gp,gp,-873 # _start-0x7ffff369
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fff5f2f
nop
nop
lui sp,0xf0000
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3223+0xc
test_3224:
lui gp,0x1
addi gp,gp,-872 # _start-0x7ffff368
li ra,1
bgeu ra,zero,test_3224+0x20
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
li t2,3
bne ra,t2,fail
test_3302:
lui gp,0x1
addi gp,gp,-794 # _start-0x7ffff31a
li ra,0
li sp,1
bltu ra,sp,test_3302+0x1c
bne zero,gp,fail
bne zero,gp,test_3303
bltu ra,sp,test_3302+0x18
bne zero,gp,fail
test_3303:
lui gp,0x1
addi gp,gp,-793 # _start-0x7ffff319
li ra,-2
li sp,-1
bltu ra,sp,test_3303+0x1c
bne zero,gp,fail
bne zero,gp,test_3304
bltu ra,sp,test_3303+0x18
bne zero,gp,fail
test_3304:
lui gp,0x1
addi gp,gp,-792 # _start-0x7ffff318
li ra,0
li sp,-1
bltu ra,sp,test_3304+0x1c
bne zero,gp,fail
bne zero,gp,test_3305
bltu ra,sp,test_3304+0x18
bne zero,gp,fail
test_3305:
lui gp,0x1
addi gp,gp,-791 # _start-0x7ffff317
li ra,1
li sp,0
bltu ra,sp,test_3305+0x18
bne zero,gp,test_3305+0x1c
bne zero,gp,fail
bltu ra,sp,test_3305+0x18
test_3306:
lui gp,0x1
addi gp,gp,-790 # _start-0x7ffff316
li ra,-1
li sp,-2
bltu ra,sp,test_3306+0x18
bne zero,gp,test_3306+0x1c
bne zero,gp,fail
bltu ra,sp,test_3306+0x18
test_3307:
lui gp,0x1
addi gp,gp,-789 # _start-0x7ffff315
li ra,-1
li sp,0
bltu ra,sp,test_3307+0x18
bne zero,gp,test_3307+0x1c
bne zero,gp,fail
bltu ra,sp,test_3307+0x18
test_3308:
lui gp,0x1
addi gp,gp,-788 # _start-0x7ffff314
lui ra,0x80000
lui sp,0x80000
addi sp,sp,-1 # _end+0xffff5f2f
bltu ra,sp,test_3308+0x1c
bne zero,gp,test_3308+0x20
bne zero,gp,fail
bltu ra,sp,test_3308+0x1c
test_3309:
lui gp,0x1
addi gp,gp,-787 # _start-0x7ffff313
li tp,0
lui ra,0xf0000
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fff5f2f
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3309+0xc
test_3310:
lui gp,0x1
addi gp,gp,-786 # _start-0x7ffff312
li tp,0
lui ra,0xf0000
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fff5f2f
nop
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3310+0xc
test_3311:
lui gp,0x1
addi gp,gp,-785 # _start-0x7ffff311
li tp,0
lui ra,0xf0000
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fff5f2f
nop
nop
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3311+0xc
test_3312:
lui gp,0x1
addi gp,gp,-784 # _start-0x7ffff310
li tp,0
lui ra,0xf0000
nop
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fff5f2f
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3312+0xc
test_3313:
lui gp,0x1
addi gp,gp,-783 # _start-0x7ffff30f
li tp,0
lui ra,0xf0000
nop
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fff5f2f
nop
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3313+0xc
test_3314:
lui gp,0x1
addi gp,gp,-782 # _start-0x7ffff30e
li tp,0
lui ra,0xf0000
nop
nop
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fff5f2f
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3314+0xc
test_3315:
lui gp,0x1
addi gp,gp,-781 # _start-0x7ffff30d
li tp,0
lui ra,0xf0000
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fff5f2f
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3315+0xc
test_3316:
lui gp,0x1
addi gp,gp,-780 # _start-0x7ffff30c
li tp,0
lui ra,0xf0000
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fff5f2f
nop
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3316+0xc
test_3317:
lui gp,0x1
addi gp,gp,-779 # _start-0x7ffff30b
li tp,0
lui ra,0xf0000
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fff5f2f
nop
nop
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3317+0xc
test_3318:
lui gp,0x1
addi gp,gp,-778 # _start-0x7ffff30a
li tp,0
lui ra,0xf0000
nop
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fff5f2f
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3318+0xc
test_3319:
lui gp,0x1
addi gp,gp,-777 # _start-0x7ffff309
li tp,0
lui ra,0xf0000
nop
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fff5f2f
nop
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3319+0xc
test_3320:
lui gp,0x1
addi gp,gp,-776 # _start-0x7ffff308
li tp,0
lui ra,0xf0000
nop
nop
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fff5f2f
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3320+0xc
test_3321:
lui gp,0x1
addi gp,gp,-775 # _start-0x7ffff307
li ra,1
bltu zero,ra,test_3321+0x20
addi ra,ra,1 # _end+0x6fff5f31
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
li t2,3
bne ra,t2,fail
test_3402:
lui gp,0x1
addi gp,gp,-694 # _start-0x7ffff2b6
li a5,255
auipc ra,0x2
addi ra,ra,820 # lbu_tdat
lbu a4,0(ra)
li t2,255
bne a4,t2,fail
test_3403:
lui gp,0x1
addi gp,gp,-693 # _start-0x7ffff2b5
li a5,0
auipc ra,0x2
addi ra,ra,788 # lbu_tdat
lbu a4,1(ra)
li t2,0
bne a4,t2,fail
test_3404:
lui gp,0x1
addi gp,gp,-692 # _start-0x7ffff2b4
li a5,240
auipc ra,0x2
addi ra,ra,756 # lbu_tdat
lbu a4,2(ra)
li t2,240
bne a4,t2,fail
test_3405:
lui gp,0x1
addi gp,gp,-691 # _start-0x7ffff2b3
li a5,15
auipc ra,0x2
addi ra,ra,724 # lbu_tdat
lbu a4,3(ra)
li t2,15
bne a4,t2,fail
test_3406:
lui gp,0x1
addi gp,gp,-690 # _start-0x7ffff2b2
li a5,255
auipc ra,0x2
addi ra,ra,695 # lbu_tdat4
lbu a4,-3(ra)
li t2,255
bne a4,t2,fail
test_3407:
lui gp,0x1
addi gp,gp,-689 # _start-0x7ffff2b1
li a5,0
auipc ra,0x2
addi ra,ra,663 # lbu_tdat4
lbu a4,-2(ra)
li t2,0
bne a4,t2,fail
test_3408:
lui gp,0x1
addi gp,gp,-688 # _start-0x7ffff2b0
li a5,240
auipc ra,0x2
addi ra,ra,631 # lbu_tdat4
lbu a4,-1(ra)
li t2,240
bne a4,t2,fail
test_3409:
lui gp,0x1
addi gp,gp,-687 # _start-0x7ffff2af
li a5,15
auipc ra,0x2
addi ra,ra,599 # lbu_tdat4
lbu a4,0(ra)
li t2,15
bne a4,t2,fail
test_3410:
lui gp,0x1
addi gp,gp,-686 # _start-0x7ffff2ae
auipc ra,0x2
addi ra,ra,568 # lbu_tdat
addi ra,ra,-32
lbu t0,32(ra)
li t2,255
bne t0,t2,fail
test_3411:
lui gp,0x1
addi gp,gp,-685 # _start-0x7ffff2ad
auipc ra,0x2
addi ra,ra,536 # lbu_tdat
addi ra,ra,-6
lbu t0,7(ra)
li t2,0
bne t0,t2,fail
test_3412:
lui gp,0x1
addi gp,gp,-684 # _start-0x7ffff2ac
li tp,0
auipc ra,0x2
addi ra,ra,501 # lbu_tdat2
lbu a4,1(ra)
mv t1,a4
li t2,240
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3412+0xc
test_3413:
lui gp,0x1
addi gp,gp,-683 # _start-0x7ffff2ab
li tp,0
auipc ra,0x2
addi ra,ra,454 # lbu_tdat3
lbu a4,1(ra)
nop
mv t1,a4
li t2,15
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3413+0xc
test_3414:
lui gp,0x1
addi gp,gp,-682 # _start-0x7ffff2aa
li tp,0
auipc ra,0x2
addi ra,ra,400 # lbu_tdat
lbu a4,1(ra)
nop
nop
mv t1,a4
li t2,0
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3414+0xc
test_3415:
lui gp,0x1
addi gp,gp,-681 # _start-0x7ffff2a9
li tp,0
auipc ra,0x2
addi ra,ra,345 # lbu_tdat2
lbu a4,1(ra)
li t2,240
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3415+0xc
test_3416:
lui gp,0x1
addi gp,gp,-680 # _start-0x7ffff2a8
li tp,0
auipc ra,0x2
addi ra,ra,302 # lbu_tdat3
nop
lbu a4,1(ra)
li t2,15
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3416+0xc
test_3417:
lui gp,0x1
addi gp,gp,-679 # _start-0x7ffff2a7
li tp,0
auipc ra,0x2
addi ra,ra,252 # lbu_tdat
nop
nop
lbu a4,1(ra)
li t2,0
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3417+0xc
test_3418:
lui gp,0x1
addi gp,gp,-678 # _start-0x7ffff2a6
auipc t0,0x2
addi t0,t0,204 # lbu_tdat
lbu sp,0(t0)
li sp,2
li t2,2
bne sp,t2,fail
test_3419:
lui gp,0x1
addi gp,gp,-677 # _start-0x7ffff2a5
auipc t0,0x2
addi t0,t0,172 # lbu_tdat
lbu sp,0(t0)
nop
li sp,2
li t2,2
bne sp,t2,fail
test_3502:
lui gp,0x1
addi gp,gp,-594 # _start-0x7ffff252
li a5,255
auipc ra,0x2
addi ra,ra,148 # lhu_tdat
lhu a4,0(ra)
li t2,255
bne a4,t2,fail
test_3503:
lui gp,0x1
addi gp,gp,-593 # _start-0x7ffff251
lui a5,0x10
addi a5,a5,-256 # _start-0x7fff0100
auipc ra,0x2
addi ra,ra,112 # lhu_tdat
lhu a4,2(ra)
lui t2,0x10
addi t2,t2,-256 # _start-0x7fff0100
bne a4,t2,fail
test_3504:
lui gp,0x1
addi gp,gp,-592 # _start-0x7ffff250
lui a5,0x1
addi a5,a5,-16 # _start-0x7ffff010
auipc ra,0x2
addi ra,ra,72 # lhu_tdat
lhu a4,4(ra)
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
bne a4,t2,fail
test_3505:
lui gp,0x1
addi gp,gp,-591 # _start-0x7ffff24f
lui a5,0xf
addi a5,a5,15 # _start-0x7fff0ff1
auipc ra,0x2
addi ra,ra,32 # lhu_tdat
lhu a4,6(ra)
lui t2,0xf
addi t2,t2,15 # _start-0x7fff0ff1
bne a4,t2,fail
test_3506:
lui gp,0x1
addi gp,gp,-590 # _start-0x7ffff24e
li a5,255
auipc ra,0x2
addi ra,ra,2 # lhu_tdat4
lhu a4,-6(ra)
li t2,255
bne a4,t2,fail
test_3507:
lui gp,0x1
addi gp,gp,-589 # _start-0x7ffff24d
lui a5,0x10
addi a5,a5,-256 # _start-0x7fff0100
auipc ra,0x2
addi ra,ra,-34 # lhu_tdat4
lhu a4,-4(ra)
lui t2,0x10
addi t2,t2,-256 # _start-0x7fff0100
bne a4,t2,fail
test_3508:
lui gp,0x1
addi gp,gp,-588 # _start-0x7ffff24c
lui a5,0x1
addi a5,a5,-16 # _start-0x7ffff010
auipc ra,0x2
addi ra,ra,-74 # lhu_tdat4
lhu a4,-2(ra)
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
bne a4,t2,fail
test_3509:
lui gp,0x1
addi gp,gp,-587 # _start-0x7ffff24b
lui a5,0xf
addi a5,a5,15 # _start-0x7fff0ff1
auipc ra,0x2
addi ra,ra,-114 # lhu_tdat4
lhu a4,0(ra)
lui t2,0xf
addi t2,t2,15 # _start-0x7fff0ff1
bne a4,t2,fail
test_3510:
lui gp,0x1
addi gp,gp,-586 # _start-0x7ffff24a
auipc ra,0x2
addi ra,ra,-152 # lhu_tdat
addi ra,ra,-32
lhu t0,32(ra)
li t2,255
bne t0,t2,fail
test_3511:
lui gp,0x1
addi gp,gp,-585 # _start-0x7ffff249
auipc ra,0x2
addi ra,ra,-184 # lhu_tdat
addi ra,ra,-5
lhu t0,7(ra)
lui t2,0x10
addi t2,t2,-256 # _start-0x7fff0100
bne t0,t2,fail
test_3512:
lui gp,0x1
addi gp,gp,-584 # _start-0x7ffff248
li tp,0
auipc ra,0x2
addi ra,ra,-222 # lhu_tdat2
lhu a4,2(ra)
mv t1,a4
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3512+0xc
test_3513:
lui gp,0x1
addi gp,gp,-583 # _start-0x7ffff247
li tp,0
auipc ra,0x2
addi ra,ra,-272 # lhu_tdat3
lhu a4,2(ra)
nop
mv t1,a4
lui t2,0xf
addi t2,t2,15 # _start-0x7fff0ff1
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3513+0xc
test_3514:
lui gp,0x1
addi gp,gp,-582 # _start-0x7ffff246
li tp,0
auipc ra,0x2
addi ra,ra,-332 # lhu_tdat
lhu a4,2(ra)
nop
nop
mv t1,a4
lui t2,0x10
addi t2,t2,-256 # _start-0x7fff0100
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3514+0xc
test_3515:
lui gp,0x1
addi gp,gp,-581 # _start-0x7ffff245
li tp,0
auipc ra,0x2
addi ra,ra,-390 # lhu_tdat2
lhu a4,2(ra)
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3515+0xc
test_3516:
lui gp,0x1
addi gp,gp,-580 # _start-0x7ffff244
li tp,0
auipc ra,0x2
addi ra,ra,-436 # lhu_tdat3
nop
lhu a4,2(ra)
lui t2,0xf
addi t2,t2,15 # _start-0x7fff0ff1
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3516+0xc
test_3517:
lui gp,0x1
addi gp,gp,-579 # _start-0x7ffff243
li tp,0
auipc ra,0x2
addi ra,ra,-492 # lhu_tdat
nop
nop
lhu a4,2(ra)
lui t2,0x10
addi t2,t2,-256 # _start-0x7fff0100
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3517+0xc
test_3518:
lui gp,0x1
addi gp,gp,-578 # _start-0x7ffff242
auipc t0,0x2
addi t0,t0,-544 # lhu_tdat
lhu sp,0(t0)
li sp,2
li t2,2
bne sp,t2,fail
test_3519:
lui gp,0x1
addi gp,gp,-577 # _start-0x7ffff241
auipc t0,0x2
addi t0,t0,-576 # lhu_tdat
lhu sp,0(t0)
nop
li sp,2
li t2,2
bne sp,t2,fail
test_3602:
lui gp,0x1
addi gp,gp,-494 # _start-0x7ffff1ee
lui ra,0x0
li t2,0
bne ra,t2,fail
test_3603:
lui gp,0x1
addi gp,gp,-493 # _start-0x7ffff1ed
lui ra,0xfffff
srai ra,ra,0x1
li t2,-2048
bne ra,t2,fail
test_3604:
lui gp,0x1
addi gp,gp,-492 # _start-0x7ffff1ec
lui ra,0x7ffff
srai ra,ra,0x14
li t2,2047
bne ra,t2,fail
test_3605:
lui gp,0x1
addi gp,gp,-491 # _start-0x7ffff1eb
lui ra,0x80000
srai ra,ra,0x14
li t2,-2048
bne ra,t2,fail
test_3606:
lui gp,0x1
addi gp,gp,-490 # _start-0x7ffff1ea
lui zero,0x80000
li t2,0
bne zero,t2,fail
test_3702:
lui gp,0x1
addi gp,gp,-394 # _start-0x7ffff18a
auipc ra,0x2
addi ra,ra,-708 # sw_tdat
lui sp,0xaa0
addi sp,sp,170 # _start-0x7f55ff56
auipc a5,0x0
addi a5,a5,20 # test_3702+0x2c
sw sp,0(ra)
lw a4,0(ra)
j test_3702+0x30
mv a4,sp
lui t2,0xaa0
addi t2,t2,170 # _start-0x7f55ff56
bne a4,t2,fail
test_3703:
lui gp,0x1
addi gp,gp,-393 # _start-0x7ffff189
auipc ra,0x2
addi ra,ra,-768 # sw_tdat
lui sp,0xaa00b
addi sp,sp,-1536 # _end+0x2a000930
auipc a5,0x0
addi a5,a5,20 # test_3703+0x2c
sw sp,4(ra)
lw a4,4(ra)
j test_3703+0x30
mv a4,sp
lui t2,0xaa00b
addi t2,t2,-1536 # _end+0x2a000930
bne a4,t2,fail
test_3704:
lui gp,0x1
addi gp,gp,-392 # _start-0x7ffff188
auipc ra,0x2
addi ra,ra,-828 # sw_tdat
lui sp,0xaa01
addi sp,sp,-1376 # _start-0x755ff560
auipc a5,0x0
addi a5,a5,20 # test_3704+0x2c
sw sp,8(ra)
lw a4,8(ra)
j test_3704+0x30
mv a4,sp
lui t2,0xaa01
addi t2,t2,-1376 # _start-0x755ff560
bne a4,t2,fail
test_3705:
lui gp,0x1
addi gp,gp,-391 # _start-0x7ffff187
auipc ra,0x2
addi ra,ra,-888 # sw_tdat
lui sp,0xa00aa
addi sp,sp,10 # _end+0x2009ff3a
auipc a5,0x0
addi a5,a5,20 # test_3705+0x2c
sw sp,12(ra)
lw a4,12(ra)
j test_3705+0x30
mv a4,sp
lui t2,0xa00aa
addi t2,t2,10 # _end+0x2009ff3a
bne a4,t2,fail
test_3706:
lui gp,0x1
addi gp,gp,-390 # _start-0x7ffff186
auipc ra,0x2
addi ra,ra,-920 # sw_tdat8
lui sp,0xaa0
addi sp,sp,170 # _start-0x7f55ff56
auipc a5,0x0
addi a5,a5,20 # test_3706+0x2c
sw sp,-12(ra)
lw a4,-12(ra)
j test_3706+0x30
mv a4,sp
lui t2,0xaa0
addi t2,t2,170 # _start-0x7f55ff56
bne a4,t2,fail
test_3707:
lui gp,0x1
addi gp,gp,-389 # _start-0x7ffff185
auipc ra,0x2
addi ra,ra,-980 # sw_tdat8
lui sp,0xaa00b
addi sp,sp,-1536 # _end+0x2a000930
auipc a5,0x0
addi a5,a5,20 # test_3707+0x2c
sw sp,-8(ra)
lw a4,-8(ra)
j test_3707+0x30
mv a4,sp
lui t2,0xaa00b
addi t2,t2,-1536 # _end+0x2a000930
bne a4,t2,fail
test_3708:
lui gp,0x1
addi gp,gp,-388 # _start-0x7ffff184
auipc ra,0x2
addi ra,ra,-1040 # sw_tdat8
lui sp,0xaa01
addi sp,sp,-1376 # _start-0x755ff560
auipc a5,0x0
addi a5,a5,20 # test_3708+0x2c
sw sp,-4(ra)
lw a4,-4(ra)
j test_3708+0x30
mv a4,sp
lui t2,0xaa01
addi t2,t2,-1376 # _start-0x755ff560
bne a4,t2,fail
test_3709:
lui gp,0x1
addi gp,gp,-387 # _start-0x7ffff183
auipc ra,0x2
addi ra,ra,-1100 # sw_tdat8
lui sp,0xa00aa
addi sp,sp,10 # _end+0x2009ff3a
auipc a5,0x0
addi a5,a5,20 # test_3709+0x2c
sw sp,0(ra)
lw a4,0(ra)
j test_3709+0x30
mv a4,sp
lui t2,0xa00aa
addi t2,t2,10 # _end+0x2009ff3a
bne a4,t2,fail
test_3710:
lui gp,0x1
addi gp,gp,-386 # _start-0x7ffff182
auipc ra,0x2
addi ra,ra,-1156 # sw_tdat9
lui sp,0x12345
addi sp,sp,1656 # _start-0x6dcba988
addi tp,ra,-32
sw sp,32(tp) # _start-0x7fffffe0
lw t0,0(ra)
lui t2,0x12345
addi t2,t2,1656 # _start-0x6dcba988
bne t0,t2,fail
test_3711:
lui gp,0x1
addi gp,gp,-385 # _start-0x7ffff181
auipc ra,0x2
addi ra,ra,-1204 # sw_tdat9
lui sp,0x58213
addi sp,sp,152 # _start-0x27decf68
addi ra,ra,-3
sw sp,7(ra)
auipc tp,0x2
addi tp,tp,-1224 # sw_tdat10
lw t0,0(tp) # _start-0x80000000
lui t2,0x58213
addi t2,t2,152 # _start-0x27decf68
bne t0,t2,fail
test_3712:
lui gp,0x1
addi gp,gp,-384 # _start-0x7ffff180
li tp,0
lui ra,0xaabbd
addi ra,ra,-803 # _end+0x2abb2c0d
auipc sp,0x2
addi sp,sp,-1304 # sw_tdat
sw ra,0(sp)
lw a4,0(sp)
lui t2,0xaabbd
addi t2,t2,-803 # _end+0x2abb2c0d
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3712+0xc
test_3713:
lui gp,0x1
addi gp,gp,-383 # _start-0x7ffff17f
li tp,0
lui ra,0xdaabc
addi ra,ra,-819 # _end+0x5aab1bfd
auipc sp,0x2
addi sp,sp,-1364 # sw_tdat
nop
sw ra,4(sp)
lw a4,4(sp)
lui t2,0xdaabc
addi t2,t2,-819 # _end+0x5aab1bfd
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3713+0xc
test_3714:
lui gp,0x1
addi gp,gp,-382 # _start-0x7ffff17e
li tp,0
lui ra,0xddaac
addi ra,ra,-1076 # _end+0x5daa1afc
auipc sp,0x2
addi sp,sp,-1428 # sw_tdat
nop
nop
sw ra,8(sp)
lw a4,8(sp)
lui t2,0xddaac
addi t2,t2,-1076 # _end+0x5daa1afc
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3714+0xc
test_3715:
lui gp,0x1
addi gp,gp,-381 # _start-0x7ffff17d
li tp,0
lui ra,0xcddab
addi ra,ra,-1092 # _end+0x4dda0aec
nop
auipc sp,0x2
addi sp,sp,-1500 # sw_tdat
sw ra,12(sp)
lw a4,12(sp)
lui t2,0xcddab
addi t2,t2,-1092 # _end+0x4dda0aec
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3715+0xc
test_3716:
lui gp,0x1
addi gp,gp,-380 # _start-0x7ffff17c
li tp,0
lui ra,0xccddb
addi ra,ra,-1349 # _end+0x4cdd09eb
nop
auipc sp,0x2
addi sp,sp,-1564 # sw_tdat
nop
sw ra,16(sp)
lw a4,16(sp)
lui t2,0xccddb
addi t2,t2,-1349 # _end+0x4cdd09eb
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3716+0xc
test_3717:
lui gp,0x1
addi gp,gp,-379 # _start-0x7ffff17b
li tp,0
lui ra,0xbccde
addi ra,ra,-1365 # _end+0x3ccd39db
nop
nop
auipc sp,0x2
addi sp,sp,-1636 # sw_tdat
sw ra,20(sp)
lw a4,20(sp)
lui t2,0xbccde
addi t2,t2,-1365 # _end+0x3ccd39db
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3717+0xc
test_3718:
lui gp,0x1
addi gp,gp,-378 # _start-0x7ffff17a
li tp,0
auipc sp,0x2
addi sp,sp,-1688 # sw_tdat
lui ra,0x112
addi ra,ra,563 # _start-0x7feeddcd
sw ra,0(sp)
lw a4,0(sp)
lui t2,0x112
addi t2,t2,563 # _start-0x7feeddcd
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3718+0xc
test_3719:
lui gp,0x1
addi gp,gp,-377 # _start-0x7ffff179
li tp,0
auipc sp,0x2
addi sp,sp,-1748 # sw_tdat
lui ra,0x30011
addi ra,ra,547 # _start-0x4ffeeddd
nop
sw ra,4(sp)
lw a4,4(sp)
lui t2,0x30011
addi t2,t2,547 # _start-0x4ffeeddd
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3719+0xc
test_3720:
lui gp,0x1
addi gp,gp,-376 # _start-0x7ffff178
li tp,0
auipc sp,0x2
addi sp,sp,-1812 # sw_tdat
lui ra,0x33001
addi ra,ra,290 # _start-0x4cffeede
nop
nop
sw ra,8(sp)
lw a4,8(sp)
lui t2,0x33001
addi t2,t2,290 # _start-0x4cffeede
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3720+0xc
test_3721:
lui gp,0x1
addi gp,gp,-375 # _start-0x7ffff177
li tp,0
auipc sp,0x2
addi sp,sp,-1880 # sw_tdat
nop
lui ra,0x23300
addi ra,ra,274 # _start-0x5ccffeee
sw ra,12(sp)
lw a4,12(sp)
lui t2,0x23300
addi t2,t2,274 # _start-0x5ccffeee
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3721+0xc
test_3722:
lui gp,0x1
addi gp,gp,-374 # _start-0x7ffff176
li tp,0
auipc sp,0x2
addi sp,sp,-1944 # sw_tdat
nop
lui ra,0x22330
addi ra,ra,17 # _start-0x5dccffef
nop
sw ra,16(sp)
lw a4,16(sp)
lui t2,0x22330
addi t2,t2,17 # _start-0x5dccffef
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3722+0xc
test_3723:
lui gp,0x1
addi gp,gp,-373 # _start-0x7ffff175
li tp,0
auipc sp,0x2
addi sp,sp,-2012 # sw_tdat
nop
nop
lui ra,0x12233
addi ra,ra,1 # _start-0x6ddccfff
sw ra,20(sp)
lw a4,20(sp)
lui t2,0x12233
addi t2,t2,1 # _start-0x6ddccfff
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_3723+0xc
bne zero,gp,pass
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
.data
begin_signature:
.byte 0xff
lb_tdat2:
.byte 0x00
lb_tdat3:
.byte 0xf0
lb_tdat4:
.byte 0x0f
.zero 12
sb_tdat:
.byte 0xef
sb_tdat2:
.byte 0xef
sb_tdat3:
.byte 0xef
sb_tdat4:
.byte 0xef
sb_tdat5:
.byte 0xef
sb_tdat6:
.byte 0xef
sb_tdat7:
.byte 0xef
sb_tdat8:
.byte 0xef
sb_tdat9:
.byte 0xef
sb_tdat10:
.byte 0xef
.zero 6
fence_i_insn:
addi a3,a3,333
addi a3,a3,222
jalr a5,a6
addi a3,a3,555
jalr a5,a6
.zero 12
lh_tdat:
.half 0x00ff
lh_tdat2:
.half 0xff00
lh_tdat3:
.half 0x0ff0
lh_tdat4:
.half 0xf00f
.zero 8
lw_tdat:
.word 0x00ff00ff
lw_tdat2:
.word 0xff00ff00
lw_tdat3:
.word 0x0ff00ff0
lw_tdat4:
.word 0xf00ff00f
sh_tdat:
.half 0xbeef
sh_tdat2:
.half 0xbeef
sh_tdat3:
.half 0xbeef
sh_tdat4:
.half 0xbeef
sh_tdat5:
.half 0xbeef
sh_tdat6:
.half 0xbeef
sh_tdat7:
.half 0xbeef
sh_tdat8:
.half 0xbeef
sh_tdat9:
.half 0xbeef
sh_tdat10:
.half 0xbeef
.zero 12
lbu_tdat:
.byte 0xff
lbu_tdat2:
.byte 0x00
lbu_tdat3:
.byte 0xf0
lbu_tdat4:
.byte 0x0f
.zero 12
lhu_tdat:
.half 0x00ff
lhu_tdat2:
.half 0xff00
lhu_tdat3:
.half 0x0ff0
lhu_tdat4:
.half 0xf00f
.zero 8
sw_tdat:
.word 0xdeadbeef
sw_tdat2:
.word 0xdeadbeef
sw_tdat3:
.word 0xdeadbeef
sw_tdat4:
.word 0xdeadbeef
sw_tdat5:
.word 0xdeadbeef
sw_tdat6:
.word 0xdeadbeef
sw_tdat7:
.word 0xdeadbeef
sw_tdat8:
.word 0xdeadbeef
sw_tdat9:
.word 0xdeadbeef
sw_tdat10:
.word 0xdeadbeef
.zero 8
|
abmfy/cod23-grp04
| 1,376
|
asm/exception.s
|
_start:
beq x0, x0, test
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
tvec:
csrr s0, mstatus
bnez s0, fail
output:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, output
addi a0, zero, 'a'
csrr t2, mcause
andi t2, t2, 0xff
add a0, a0, t2
sb a0, 0(t0)
csrr t3, mepc
addi t3, t3, 4
csrw mepc, t3
# If timer interrupt, disable it
csrr t0, mcause
li t1, 1 << 31 | 7
bne t0, t1, end
li t0, 1 << 7
csrc mie, t0
end:
mret
test:
# Setup trap handler
la t0, tvec
csrw mtvec, t0
la t0, user
csrw mepc, t0
# Setup timer for 10000 cycles
li t0, 0x200bff8
lw t1, 0(t0)
lw t2, 4(t0)
li t3, 10000
add t3, t1, t3
sltu t1, t3, t1
add t2, t2, t1
li t0, 0x2004000
sw t2, 4(t0)
sw t3, 0(t0)
# Enable timer interrupt
li t0, 1 << 7
csrs mie, t0
mret
user:
ecall
ebreak
spin:
j spin
csrr s0, mstatus
li s1, 1 << 7
bne s0, s1, fail
j pass
|
abmfy/cod23-grp04
| 1,966
|
asm/zicsr.s
|
_start:
beq x0, x0, test
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, test_timer
nop
nop
nop
test:
li t0, 0x114514
csrw mscratch, t0
csrr t1, mscratch
bne t0, t1, fail
test_20:
li gp, 20
csrw mscratch, zero
csrr a0, mscratch
li x7, 0
bne a0, x7, fail
test_21:
li gp, 21
csrrwi a0, mscratch, 0
csrrwi a0, mscratch, 0xF
li x7, 0
bne a0, x7, fail
test_22:
li gp, 22
csrrsi x0, mscratch, 0x10
csrr a0, mscratch
li x7, 0x1f
bne a0, x7, fail
csrwi mscratch, 3
test_2:
li gp, 2
csrr a0, mscratch
li x7, 3
bne a0, x7, fail
test_3:
li gp, 3
csrrci a1, mscratch, 1
li x7, 3
bne a1, x7, fail
test_4:
li gp, 4
csrrsi a2, mscratch, 4
li x7, 2
bne a2, x7, fail
test_5:
li gp, 5
csrrwi a3, mscratch, 2
li x7, 6
bne a3, x7, fail
test_6:
li gp, 6
li a0, 0xbad1dea
csrrw a1, mscratch, a0
li x7, 2
bne a1, x7, fail
test_7:
li gp, 7
li a0, 0x0001dea
csrrc a1, mscratch, a0
li x7, 0xbad1dea
bne a1, x7, fail
test_8:
li gp, 8
li a0, 0x000beef
csrrs a1, mscratch, a0
li x7, 0xbad0000
bne a1, x7, fail
test_9:
li gp, 9
li a0, 0xbad1dea
csrrw a0, mscratch, a0
li x7, 0xbadbeef
bne a0, x7, fail
test_10:
li gp, 10
li a0, 0x0001dea
csrrc a0, mscratch, a0
li x7, 0xbad1dea
bne a0, x7, fail
test_11:
li gp, 11
li a0, 0x000beef
csrrs a0, mscratch, a0
li x7, 0xbad0000
bne a0, x7, fail
test_12:
li gp, 12
csrr a0, mscratch
li x7, 0xbadbeef
bne a0, x7, fail
j pass
test_timer:
li t5, 2
.load:
rdtimeh x3
rdtime x2
rdtimeh x4
bne x3, x4, .load
li t6, 4
.low:
mv a0, x2
call output
addi t6, t6, -1
srli x2, x2, 8
bne t6, zero, .low
li t6, 4
.high:
mv a0, x4
call output
addi t6, t6, -1
srli x4, x4, 8
bne t6, zero, .high
addi t5, t5, -1
bne t5, zero, .load
j loop
output:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, output
sb a0, 0(t0)
ret
|
abmfy/cod23-grp04
| 1,041
|
asm/lab6.s
|
addi t0, zero, 0 # loop variable
addi t1, zero, 100 # loop upper bound
addi t2, zero, 0 # sum
loop:
addi t0, t0, 1
add t2, t0, t2
beq t0, t1, next # i == 100?
beq zero, zero, loop
next:
# store result
lui t0, 0x80000 # base ram address
sw t2, 0x100(t0)
lui t0, 0x10000 # serial address
.TESTW1:
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, .TESTW1
# do not write when serial is in used
addi a0, zero, 'd'
sb a0, 0(t0)
.TESTW2:
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, .TESTW2
addi a0, zero, 'o'
sb a0, 0(t0)
.TESTW3:
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, .TESTW3
addi a0, zero, 'n'
sb a0, 0(t0)
.TESTW4:
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, .TESTW4
addi a0, zero, 'e'
sb a0, 0(t0)
.TESTW5:
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, .TESTW5
addi a0, zero, '!'
sb a0, 0(t0)
end:
beq zero, zero, end
# loop forever, let pc under control
|
abmfy/cod23-grp04
| 62,296
|
asm/test19.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,0
li sp,0
add a4,ra,sp
li t2,0
beq a4,t2,test_3
j fail
test_3:
li gp,3
li ra,1
li sp,1
add a4,ra,sp
li t2,2
beq a4,t2,test_4
j fail
test_4:
li gp,4
li ra,3
li sp,7
add a4,ra,sp
li t2,10
beq a4,t2,test_5
j fail
test_5:
li gp,5
li ra,0
lui sp,0xffff8
add a4,ra,sp
lui t2,0xffff8
beq a4,t2,test_6
j fail
test_6:
li gp,6
lui ra,0x80000
li sp,0
add a4,ra,sp
lui t2,0x80000
beq a4,t2,test_7
j fail
test_7:
li gp,7
lui ra,0x80000
lui sp,0xffff8
add a4,ra,sp
lui t2,0x7fff8
beq a4,t2,test_8
j fail
test_8:
li gp,8
li ra,0
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
add a4,ra,sp
lui t2,0x8
addi t2,t2,-1 # _start-0x7fff8001
beq a4,t2,test_9
j fail
test_9:
li gp,9
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffaf4f
li sp,0
add a4,ra,sp
lui t2,0x80000
addi t2,t2,-1 # _end+0xffffaf4f
beq a4,t2,test_10
j fail
test_10:
li gp,10
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffaf4f
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
add a4,ra,sp
lui t2,0x80008
addi t2,t2,-2 # _end+0x2f4e
beq a4,t2,test_11
j fail
test_11:
li gp,11
lui ra,0x80000
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
add a4,ra,sp
lui t2,0x80008
addi t2,t2,-1 # _end+0x2f4f
beq a4,t2,test_12
j fail
test_12:
li gp,12
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffaf4f
lui sp,0xffff8
add a4,ra,sp
lui t2,0x7fff8
addi t2,t2,-1 # _start-0x8001
beq a4,t2,test_13
j fail
test_13:
li gp,13
li ra,0
li sp,-1
add a4,ra,sp
li t2,-1
beq a4,t2,test_14
j fail
test_14:
li gp,14
li ra,-1
li sp,1
add a4,ra,sp
li t2,0
beq a4,t2,test_15
j fail
test_15:
li gp,15
li ra,-1
li sp,-1
add a4,ra,sp
li t2,-2
beq a4,t2,test_16
j fail
test_16:
li gp,16
li ra,1
lui sp,0x80000
addi sp,sp,-1 # _end+0xffffaf4f
add a4,ra,sp
lui t2,0x80000
beq a4,t2,test_17
j fail
test_17:
li gp,17
li ra,13
li sp,11
add ra,ra,sp
li t2,24
beq ra,t2,test_18
j fail
test_18:
li gp,18
li ra,14
li sp,11
add sp,ra,sp
li t2,25
beq sp,t2,test_19
j fail
test_19:
li gp,19
li ra,13
add ra,ra,ra
li t2,26
beq ra,t2,test_20
j fail
test_20:
li gp,20
li tp,0
li ra,13
li sp,11
add a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
li t2,24
beq t1,t2,test_21
j fail
test_21:
li gp,21
li tp,0
li ra,14
li sp,11
add a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_21+0x8
li t2,25
beq t1,t2,test_22
j fail
test_22:
li gp,22
li tp,0
li ra,15
li sp,11
add a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_22+0x8
li t2,26
beq t1,t2,test_23
j fail
test_23:
li gp,23
li tp,0
li ra,13
li sp,11
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_23+0x8
li t2,24
beq a4,t2,test_24
j fail
test_24:
li gp,24
li tp,0
li ra,14
li sp,11
nop
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_24+0x8
li t2,25
beq a4,t2,test_25
j fail
test_25:
li gp,25
li tp,0
li ra,15
li sp,11
nop
nop
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_25+0x8
li t2,26
beq a4,t2,test_26
j fail
test_26:
li gp,26
li tp,0
li ra,13
nop
li sp,11
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_26+0x8
li t2,24
beq a4,t2,test_27
j fail
test_27:
li gp,27
li tp,0
li ra,14
nop
li sp,11
nop
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_27+0x8
li t2,25
beq a4,t2,test_28
j fail
test_28:
li gp,28
li tp,0
li ra,15
nop
nop
li sp,11
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_28+0x8
li t2,26
beq a4,t2,test_29
j fail
test_29:
li gp,29
li tp,0
li sp,11
li ra,13
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_29+0x8
li t2,24
beq a4,t2,test_30
j fail
test_30:
li gp,30
li tp,0
li sp,11
li ra,14
nop
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_30+0x8
li t2,25
beq a4,t2,test_31
j fail
test_31:
li gp,31
li tp,0
li sp,11
li ra,15
nop
nop
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_31+0x8
li t2,26
beq a4,t2,test_32
j fail
test_32:
li gp,32
li tp,0
li sp,11
nop
li ra,13
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_32+0x8
li t2,24
beq a4,t2,test_33
j fail
test_33:
li gp,33
li tp,0
li sp,11
nop
li ra,14
nop
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_33+0x8
li t2,25
beq a4,t2,test_34
j fail
test_34:
li gp,34
li tp,0
li sp,11
nop
nop
li ra,15
add a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_34+0x8
li t2,26
beq a4,t2,test_35
j fail
test_35:
li gp,35
li ra,15
add sp,zero,ra
li t2,15
beq sp,t2,test_36
j fail
test_36:
li gp,36
li ra,32
add sp,ra,zero
li t2,32
beq sp,t2,test_37
j fail
test_37:
li gp,37
add ra,zero,zero
li t2,0
beq ra,t2,test_38
j fail
test_38:
li gp,38
li ra,16
li sp,30
add zero,ra,sp
li t2,0
beq zero,t2,test_102
j fail
test_102:
li gp,102
li ra,0
mv a4,ra
li t2,0
beq a4,t2,test_103
j fail
test_103:
li gp,103
li ra,1
addi a4,ra,1
li t2,2
beq a4,t2,test_104
j fail
test_104:
li gp,104
li ra,3
addi a4,ra,7
li t2,10
beq a4,t2,test_105
j fail
test_105:
li gp,105
li ra,0
addi a4,ra,-2048
li t2,-2048
beq a4,t2,test_106
j fail
test_106:
li gp,106
lui ra,0x80000
mv a4,ra
lui t2,0x80000
beq a4,t2,test_107
j fail
test_107:
li gp,107
lui ra,0x80000
addi a4,ra,-2048 # _end+0xffffa750
lui t2,0x80000
addi t2,t2,-2048 # _end+0xffffa750
beq a4,t2,test_108
j fail
test_108:
li gp,108
li ra,0
addi a4,ra,2047
li t2,2047
beq a4,t2,test_109
j fail
test_109:
li gp,109
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffaf4f
mv a4,ra
lui t2,0x80000
addi t2,t2,-1 # _end+0xffffaf4f
beq a4,t2,test_110
j fail
test_110:
li gp,110
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffaf4f
addi a4,ra,2047
lui t2,0x80000
addi t2,t2,2046 # _end+0xffffb74e
beq a4,t2,test_111
j fail
test_111:
li gp,111
lui ra,0x80000
addi a4,ra,2047 # _end+0xffffb74f
lui t2,0x80000
addi t2,t2,2047 # _end+0xffffb74f
beq a4,t2,test_112
j fail
test_112:
li gp,112
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffaf4f
addi a4,ra,-2048
lui t2,0x7ffff
addi t2,t2,2047 # _start-0x801
beq a4,t2,test_113
j fail
test_113:
li gp,113
li ra,0
addi a4,ra,-1
li t2,-1
beq a4,t2,test_114
j fail
test_114:
li gp,114
li ra,-1
addi a4,ra,1
li t2,0
beq a4,t2,test_115
j fail
test_115:
li gp,115
li ra,-1
addi a4,ra,-1
li t2,-2
beq a4,t2,test_116
j fail
test_116:
li gp,116
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffaf4f
addi a4,ra,1
lui t2,0x80000
beq a4,t2,test_117
j fail
test_117:
li gp,117
li ra,13
addi ra,ra,11
li t2,24
beq ra,t2,test_118
j fail
test_118:
li gp,118
li tp,0
li ra,13
addi a4,ra,11
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_118+0x8
li t2,24
beq t1,t2,test_119
j fail
test_119:
li gp,119
li tp,0
li ra,13
addi a4,ra,10
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_119+0x8
li t2,23
beq t1,t2,test_120
j fail
test_120:
li gp,120
li tp,0
li ra,13
addi a4,ra,9
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_120+0x8
li t2,22
beq t1,t2,test_121
j fail
test_121:
li gp,121
li tp,0
li ra,13
addi a4,ra,11
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_121+0x8
li t2,24
beq a4,t2,test_122
j fail
test_122:
li gp,122
li tp,0
li ra,13
nop
addi a4,ra,10
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_122+0x8
li t2,23
beq a4,t2,test_123
j fail
test_123:
li gp,123
li tp,0
li ra,13
nop
nop
addi a4,ra,9
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_123+0x8
li t2,22
beq a4,t2,test_124
j fail
test_124:
li gp,124
li ra,32
li t2,32
beq ra,t2,test_125
j fail
test_125:
li gp,125
li ra,33
addi zero,ra,50
li t2,0
beq zero,t2,test_202
j fail
test_202:
li gp,202
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq a4,t2,test_203
j fail
test_203:
li gp,203
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
and a4,ra,sp
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq a4,t2,test_204
j fail
test_204:
li gp,204
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
lui t2,0xf0
addi t2,t2,15 # _start-0x7ff0fff1
beq a4,t2,test_205
j fail
test_205:
li gp,205
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f9f5f
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
and a4,ra,sp
lui t2,0xf000f
beq a4,t2,test_206
j fail
test_206:
li gp,206
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and ra,ra,sp
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq ra,t2,test_207
j fail
test_207:
li gp,207
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
and sp,ra,sp
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq sp,t2,test_208
j fail
test_208:
li gp,208
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
and ra,ra,ra
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq ra,t2,test_209
j fail
test_209:
li gp,209
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_209+0x8
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq t1,t2,test_210
j fail
test_210:
li gp,210
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
and a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_210+0x8
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq t1,t2,test_211
j fail
test_211:
li gp,211
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_211+0x8
lui t2,0xf0
addi t2,t2,15 # _start-0x7ff0fff1
beq t1,t2,test_212
j fail
test_212:
li gp,212
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_212+0x8
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq a4,t2,test_213
j fail
test_213:
li gp,213
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
nop
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_213+0x8
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq a4,t2,test_214
j fail
test_214:
li gp,214
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
nop
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_214+0x8
lui t2,0xf0
addi t2,t2,15 # _start-0x7ff0fff1
beq a4,t2,test_215
j fail
test_215:
li gp,215
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
nop
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_215+0x8
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq a4,t2,test_216
j fail
test_216:
li gp,216
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
nop
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_216+0x8
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq a4,t2,test_217
j fail
test_217:
li gp,217
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
nop
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_217+0x8
lui t2,0xf0
addi t2,t2,15 # _start-0x7ff0fff1
beq a4,t2,test_218
j fail
test_218:
li gp,218
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_218+0x8
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq a4,t2,test_219
j fail
test_219:
li gp,219
li tp,0
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_219+0x8
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq a4,t2,test_220
j fail
test_220:
li gp,220
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
nop
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_220+0x8
lui t2,0xf0
addi t2,t2,15 # _start-0x7ff0fff1
beq a4,t2,test_221
j fail
test_221:
li gp,221
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_221+0x8
lui t2,0xf001
addi t2,t2,-256 # _start-0x70fff100
beq a4,t2,test_222
j fail
test_222:
li gp,222
li tp,0
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
nop
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_222+0x8
lui t2,0xf00
addi t2,t2,240 # _start-0x7f0fff10
beq a4,t2,test_223
j fail
test_223:
li gp,223
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
nop
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
and a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_223+0x8
lui t2,0xf0
addi t2,t2,15 # _start-0x7ff0fff1
beq a4,t2,test_224
j fail
test_224:
li gp,224
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
and sp,zero,ra
li t2,0
beq sp,t2,test_225
j fail
test_225:
li gp,225
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
and sp,ra,zero
li t2,0
beq sp,t2,test_226
j fail
test_226:
li gp,226
and ra,zero,zero
li t2,0
beq ra,t2,test_227
j fail
test_227:
li gp,227
lui ra,0x11111
addi ra,ra,273 # _start-0x6eeeeeef
lui sp,0x22222
addi sp,sp,546 # _start-0x5dddddde
and zero,ra,sp
li t2,0
beq zero,t2,test_302
j fail
test_302:
li gp,302
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
andi a4,ra,-241
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq a4,t2,test_303
j fail
test_303:
li gp,303
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
andi a4,ra,240
li t2,240
beq a4,t2,test_304
j fail
test_304:
li gp,304
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
andi a4,ra,1807
li t2,15
beq a4,t2,test_305
j fail
test_305:
li gp,305
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f9f5f
andi a4,ra,240
li t2,0
beq a4,t2,test_306
j fail
test_306:
li gp,306
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
andi ra,ra,240
li t2,0
beq ra,t2,test_307
j fail
test_307:
li gp,307
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
andi a4,ra,1807
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_307+0x8
li t2,1792
beq t1,t2,test_308
j fail
test_308:
li gp,308
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
andi a4,ra,240
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_308+0x8
li t2,240
beq t1,t2,test_309
j fail
test_309:
li gp,309
li tp,0
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f9f5f
andi a4,ra,-241
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_309+0x8
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq t1,t2,test_310
j fail
test_310:
li gp,310
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
andi a4,ra,1807
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_310+0x8
li t2,1792
beq a4,t2,test_311
j fail
test_311:
li gp,311
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
andi a4,ra,240
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_311+0x8
li t2,240
beq a4,t2,test_312
j fail
test_312:
li gp,312
li tp,0
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f9f5f
nop
nop
andi a4,ra,1807
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_312+0x8
li t2,15
beq a4,t2,test_313
j fail
test_313:
li gp,313
andi ra,zero,240
li t2,0
beq ra,t2,test_314
j fail
test_314:
li gp,314
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
andi zero,ra,1807
li t2,0
beq zero,t2,test_402
j fail
test_402:
li gp,402
nop
auipc a0,0x2
addi a0,a0,1820 # test_1818+0x28
jal a1,test_402+0x14
li a2,-1
xor a1,a1,a2
addi a1,a1,1
add a0,a0,a1
lui t2,0x2
addi t2,t2,1808 # _start-0x7fffd8f0
beq a0,t2,test_403
j fail
test_403:
li gp,403
auipc a0,0xffffe
addi a0,a0,-1796 # _start-0x16a4
jal a1,test_403+0x10
li a2,-1
xor a1,a1,a2
addi a1,a1,1
add a0,a0,a1
lui t2,0xffffe
addi t2,t2,-1808 # _end+0x7fff8840
beq a0,t2,test_502
j fail
test_502:
li gp,502
li ra,0
li sp,0
beq ra,sp,test_502+0x1c
beq zero,gp,test_502+0x18
j fail
bne zero,gp,test_503
beq ra,sp,test_502+0x18
beq zero,gp,test_503
j fail
test_503:
li gp,503
li ra,1
li sp,1
beq ra,sp,test_503+0x1c
beq zero,gp,test_503+0x18
j fail
bne zero,gp,test_504
beq ra,sp,test_503+0x18
beq zero,gp,test_504
j fail
test_504:
li gp,504
li ra,-1
li sp,-1
beq ra,sp,test_504+0x1c
beq zero,gp,test_504+0x18
j fail
bne zero,gp,test_505
beq ra,sp,test_504+0x18
beq zero,gp,test_505
j fail
test_505:
li gp,505
li ra,0
li sp,1
beq ra,sp,test_505+0x14
bne zero,gp,test_505+0x1c
beq zero,gp,test_505+0x1c
j fail
beq ra,sp,test_505+0x14
test_506:
li gp,506
li ra,1
li sp,0
beq ra,sp,test_506+0x14
bne zero,gp,test_506+0x1c
beq zero,gp,test_506+0x1c
j fail
beq ra,sp,test_506+0x14
test_507:
li gp,507
li ra,-1
li sp,1
beq ra,sp,test_507+0x14
bne zero,gp,test_507+0x1c
beq zero,gp,test_507+0x1c
j fail
beq ra,sp,test_507+0x14
test_508:
li gp,508
li ra,1
li sp,-1
beq ra,sp,test_508+0x14
bne zero,gp,test_508+0x1c
beq zero,gp,test_508+0x1c
j fail
beq ra,sp,test_508+0x14
test_509:
li gp,509
li tp,0
li ra,0
li sp,-1
bne ra,sp,test_509+0x18
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_509+0x8
test_510:
li gp,510
li tp,0
li ra,0
li sp,-1
nop
bne ra,sp,test_510+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_510+0x8
test_511:
li gp,511
li tp,0
li ra,0
li sp,-1
nop
nop
bne ra,sp,test_511+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_511+0x8
test_512:
li gp,512
li tp,0
li ra,0
nop
li sp,-1
bne ra,sp,test_512+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_512+0x8
test_513:
li gp,513
li tp,0
li ra,0
nop
li sp,-1
nop
bne ra,sp,test_513+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_513+0x8
test_514:
li gp,514
li tp,0
li ra,0
nop
nop
li sp,-1
bne ra,sp,test_514+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_514+0x8
test_515:
li gp,515
li tp,0
li ra,0
li sp,-1
bne ra,sp,test_515+0x18
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_515+0x8
test_516:
li gp,516
li tp,0
li ra,0
li sp,-1
nop
bne ra,sp,test_516+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_516+0x8
test_517:
li gp,517
li tp,0
li ra,0
li sp,-1
nop
nop
bne ra,sp,test_517+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_517+0x8
test_518:
li gp,518
li tp,0
li ra,0
nop
li sp,-1
bne ra,sp,test_518+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_518+0x8
test_519:
li gp,519
li tp,0
li ra,0
nop
li sp,-1
nop
bne ra,sp,test_519+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_519+0x8
test_520:
li gp,520
li tp,0
li ra,0
nop
nop
li sp,-1
bne ra,sp,test_520+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_520+0x8
jalr_test_2:
li gp,602
li t0,0
auipc t1,0x0
addi t1,t1,16 # jalr_target_2
jalr t0,t1
jalr_linkaddr_2:
j fail
jalr_target_2:
auipc t1,0x0
addi t1,t1,-4 # jalr_linkaddr_2
beq t0,t1,jalr_test_3
j fail
jalr_test_3:
li gp,603
auipc t0,0x0
addi t0,t0,16 # jalr_target_3
jalr t0,t0
jalr_linkaddr_3:
j fail
jalr_target_3:
auipc t1,0x0
addi t1,t1,-4 # jalr_linkaddr_3
beq t0,t1,test_604
j fail
test_604:
li gp,604
li tp,0
auipc t1,0x0
addi t1,t1,20 # test_604+0x1c
jalr a3,t1
beq zero,gp,test_604+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_604+0x8
test_605:
li gp,605
li tp,0
auipc t1,0x0
addi t1,t1,24 # test_605+0x20
nop
jalr a3,t1
beq zero,gp,test_605+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_605+0x8
test_606:
li gp,606
li tp,0
auipc t1,0x0
addi t1,t1,28 # test_606+0x24
nop
nop
jalr a3,t1
beq zero,gp,test_606+0x24
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_606+0x8
test_702:
li gp,702
li a5,-1
auipc ra,0x4
addi ra,ra,-1100 # begin_signature
lb a4,0(ra)
li t2,-1
beq a4,t2,test_703
j fail
test_703:
li gp,703
li a5,0
auipc ra,0x4
addi ra,ra,-1132 # begin_signature
lb a4,1(ra)
li t2,0
beq a4,t2,test_704
j fail
test_704:
li gp,704
li a5,-16
auipc ra,0x4
addi ra,ra,-1164 # begin_signature
lb a4,2(ra)
li t2,-16
beq a4,t2,test_705
j fail
test_705:
li gp,705
li a5,15
auipc ra,0x4
addi ra,ra,-1196 # begin_signature
lb a4,3(ra)
li t2,15
beq a4,t2,test_706
j fail
test_706:
li gp,706
li a5,-1
auipc ra,0x4
addi ra,ra,-1225 # lb_tdat4
lb a4,-3(ra)
li t2,-1
beq a4,t2,test_707
j fail
test_707:
li gp,707
li a5,0
auipc ra,0x4
addi ra,ra,-1257 # lb_tdat4
lb a4,-2(ra)
li t2,0
beq a4,t2,test_708
j fail
test_708:
li gp,708
li a5,-16
auipc ra,0x4
addi ra,ra,-1289 # lb_tdat4
lb a4,-1(ra)
li t2,-16
beq a4,t2,test_709
j fail
test_709:
li gp,709
li a5,15
auipc ra,0x4
addi ra,ra,-1321 # lb_tdat4
lb a4,0(ra)
li t2,15
beq a4,t2,test_710
j fail
test_710:
li gp,710
auipc ra,0x4
addi ra,ra,-1352 # begin_signature
addi ra,ra,-32
lb t0,32(ra)
li t2,-1
beq t0,t2,test_711
j fail
test_711:
li gp,711
auipc ra,0x4
addi ra,ra,-1384 # begin_signature
addi ra,ra,-6
lb t0,7(ra)
li t2,0
beq t0,t2,test_712
j fail
test_712:
li gp,712
li tp,0
auipc ra,0x4
addi ra,ra,-1419 # lb_tdat2
lb a4,1(ra)
mv t1,a4
li t2,-16
beq t1,t2,test_712+0x24
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_712+0x8
test_713:
li gp,713
li tp,0
auipc ra,0x4
addi ra,ra,-1466 # lb_tdat3
lb a4,1(ra)
nop
mv t1,a4
li t2,15
beq t1,t2,test_713+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_713+0x8
test_714:
li gp,714
li tp,0
auipc ra,0x4
addi ra,ra,-1520 # begin_signature
lb a4,1(ra)
nop
nop
mv t1,a4
li t2,0
beq t1,t2,test_714+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_714+0x8
test_715:
li gp,715
li tp,0
auipc ra,0x4
addi ra,ra,-1575 # lb_tdat2
lb a4,1(ra)
li t2,-16
beq a4,t2,test_715+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_715+0x8
test_716:
li gp,716
li tp,0
auipc ra,0x4
addi ra,ra,-1618 # lb_tdat3
nop
lb a4,1(ra)
li t2,15
beq a4,t2,test_716+0x24
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_716+0x8
test_717:
li gp,717
li tp,0
auipc ra,0x4
addi ra,ra,-1668 # begin_signature
nop
nop
lb a4,1(ra)
li t2,0
beq a4,t2,test_717+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_717+0x8
test_718:
li gp,718
auipc t0,0x4
addi t0,t0,-1716 # begin_signature
lb sp,0(t0)
li sp,2
li t2,2
beq sp,t2,test_719
j fail
test_719:
li gp,719
auipc t0,0x4
addi t0,t0,-1748 # begin_signature
lb sp,0(t0)
nop
li sp,2
li t2,2
beq sp,t2,test_802
j fail
test_802:
li gp,802
auipc ra,0x4
addi ra,ra,-1768 # sb_tdat
li sp,-86
auipc a5,0x0
addi a5,a5,20 # test_802+0x24
sb sp,0(ra)
lb a4,0(ra)
j test_802+0x28
mv a4,sp
li t2,-86
beq a4,t2,test_803
j fail
test_803:
li gp,803
auipc ra,0x4
addi ra,ra,-1820 # sb_tdat
li sp,0
auipc a5,0x0
addi a5,a5,20 # test_803+0x24
sb sp,1(ra)
lb a4,1(ra)
j test_803+0x28
mv a4,sp
li t2,0
beq a4,t2,test_804
j fail
test_804:
li gp,804
auipc ra,0x4
addi ra,ra,-1872 # sb_tdat
li sp,-96
auipc a5,0x0
addi a5,a5,20 # test_804+0x24
sb sp,2(ra)
lb a4,2(ra)
j test_804+0x28
mv a4,sp
li t2,-96
beq a4,t2,test_805
j fail
test_805:
li gp,805
auipc ra,0x4
addi ra,ra,-1924 # sb_tdat
li sp,10
auipc a5,0x0
addi a5,a5,20 # test_805+0x24
sb sp,3(ra)
lb a4,3(ra)
j test_805+0x28
mv a4,sp
li t2,10
beq a4,t2,test_806
j fail
test_806:
li gp,806
auipc ra,0x4
addi ra,ra,-1969 # sb_tdat8
li sp,-86
auipc a5,0x0
addi a5,a5,20 # test_806+0x24
sb sp,-3(ra)
lb a4,-3(ra)
j test_806+0x28
mv a4,sp
li t2,-86
beq a4,t2,test_807
j fail
test_807:
li gp,807
auipc ra,0x4
addi ra,ra,-2021 # sb_tdat8
li sp,0
auipc a5,0x0
addi a5,a5,20 # test_807+0x24
sb sp,-2(ra)
lb a4,-2(ra)
j test_807+0x28
mv a4,sp
li t2,0
beq a4,t2,test_808
j fail
test_808:
li gp,808
auipc ra,0x3
addi ra,ra,2023 # sb_tdat8
li sp,-96
auipc a5,0x0
addi a5,a5,20 # test_808+0x24
sb sp,-1(ra)
lb a4,-1(ra)
j test_808+0x28
mv a4,sp
li t2,-96
beq a4,t2,test_809
j fail
test_809:
li gp,809
auipc ra,0x3
addi ra,ra,1971 # sb_tdat8
li sp,10
auipc a5,0x0
addi a5,a5,20 # test_809+0x24
sb sp,0(ra)
lb a4,0(ra)
j test_809+0x28
mv a4,sp
li t2,10
beq a4,t2,test_810
j fail
test_810:
li gp,810
auipc ra,0x3
addi ra,ra,1920 # sb_tdat9
lui sp,0x12345
addi sp,sp,1656 # _start-0x6dcba988
addi tp,ra,-32
sb sp,32(tp) # _start-0x7fffffe0
lb t0,0(ra)
li t2,120
beq t0,t2,test_811
j fail
test_811:
li gp,811
auipc ra,0x3
addi ra,ra,1876 # sb_tdat9
lui sp,0x3
addi sp,sp,152 # _start-0x7fffcf68
addi ra,ra,-6
sb sp,7(ra)
auipc tp,0x3
addi tp,tp,1853 # sb_tdat10
lb t0,0(tp) # _start-0x80000000
li t2,-104
beq t0,t2,test_812
j fail
test_812:
li gp,812
li tp,0
li ra,-35
auipc sp,0x3
addi sp,sp,1808 # sb_tdat
sb ra,0(sp)
lb a4,0(sp)
li t2,-35
beq a4,t2,test_812+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_812+0x8
test_813:
li gp,813
li tp,0
li ra,-51
auipc sp,0x3
addi sp,sp,1756 # sb_tdat
nop
sb ra,1(sp)
lb a4,1(sp)
li t2,-51
beq a4,t2,test_813+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_813+0x8
test_814:
li gp,814
li tp,0
li ra,-52
auipc sp,0x3
addi sp,sp,1700 # sb_tdat
nop
nop
sb ra,2(sp)
lb a4,2(sp)
li t2,-52
beq a4,t2,test_814+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_814+0x8
test_815:
li gp,815
li tp,0
li ra,-68
nop
auipc sp,0x3
addi sp,sp,1636 # sb_tdat
sb ra,3(sp)
lb a4,3(sp)
li t2,-68
beq a4,t2,test_815+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_815+0x8
test_816:
li gp,816
li tp,0
li ra,-69
nop
auipc sp,0x3
addi sp,sp,1580 # sb_tdat
nop
sb ra,4(sp)
lb a4,4(sp)
li t2,-69
beq a4,t2,test_816+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_816+0x8
test_817:
li gp,817
li tp,0
li ra,-85
nop
nop
auipc sp,0x3
addi sp,sp,1516 # sb_tdat
sb ra,5(sp)
lb a4,5(sp)
li t2,-85
beq a4,t2,test_817+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_817+0x8
test_818:
li gp,818
li tp,0
auipc sp,0x3
addi sp,sp,1468 # sb_tdat
li ra,51
sb ra,0(sp)
lb a4,0(sp)
li t2,51
beq a4,t2,test_818+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_818+0x8
test_819:
li gp,819
li tp,0
auipc sp,0x3
addi sp,sp,1416 # sb_tdat
li ra,35
nop
sb ra,1(sp)
lb a4,1(sp)
li t2,35
beq a4,t2,test_819+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_819+0x8
test_820:
li gp,820
li tp,0
auipc sp,0x3
addi sp,sp,1360 # sb_tdat
li ra,34
nop
nop
sb ra,2(sp)
lb a4,2(sp)
li t2,34
beq a4,t2,test_820+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_820+0x8
test_821:
li gp,821
li tp,0
auipc sp,0x3
addi sp,sp,1300 # sb_tdat
nop
li ra,18
sb ra,3(sp)
lb a4,3(sp)
li t2,18
beq a4,t2,test_821+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_821+0x8
test_822:
li gp,822
li tp,0
auipc sp,0x3
addi sp,sp,1244 # sb_tdat
nop
li ra,17
nop
sb ra,4(sp)
lb a4,4(sp)
li t2,17
beq a4,t2,test_822+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_822+0x8
test_823:
li gp,823
li tp,0
auipc sp,0x3
addi sp,sp,1184 # sb_tdat
nop
nop
li ra,1
sb ra,5(sp)
lb a4,5(sp)
li t2,1
beq a4,t2,test_823+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_823+0x8
li a0,239
auipc a1,0x3
addi a1,a1,1128 # sb_tdat
sb a0,3(a1)
test_902:
li gp,902
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq a4,t2,test_903
j fail
test_903:
li gp,903
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
xor a4,ra,sp
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq a4,t2,test_904
j fail
test_904:
li gp,904
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_905
j fail
test_905:
li gp,905
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f9f5f
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
xor a4,ra,sp
lui t2,0xff0
addi t2,t2,255 # _start-0x7f00ff01
beq a4,t2,test_906
j fail
test_906:
li gp,906
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor ra,ra,sp
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq ra,t2,test_907
j fail
test_907:
li gp,907
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor sp,ra,sp
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq sp,t2,test_908
j fail
test_908:
li gp,908
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
xor ra,ra,ra
li t2,0
beq ra,t2,test_909
j fail
test_909:
li gp,909
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_909+0x8
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq t1,t2,test_910
j fail
test_910:
li gp,910
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
xor a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_910+0x8
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq t1,t2,test_911
j fail
test_911:
li gp,911
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_911+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq t1,t2,test_912
j fail
test_912:
li gp,912
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_912+0x8
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq a4,t2,test_913
j fail
test_913:
li gp,913
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
nop
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_913+0x8
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq a4,t2,test_914
j fail
test_914:
li gp,914
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
nop
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_914+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_915
j fail
test_915:
li gp,915
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
nop
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_915+0x8
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq a4,t2,test_916
j fail
test_916:
li gp,916
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
nop
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_916+0x8
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq a4,t2,test_917
j fail
test_917:
li gp,917
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
nop
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_917+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_918
j fail
test_918:
li gp,918
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_918+0x8
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq a4,t2,test_919
j fail
test_919:
li gp,919
li tp,0
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_919+0x8
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq a4,t2,test_920
j fail
test_920:
li gp,920
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
nop
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_920+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_921
j fail
test_921:
li gp,921
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_921+0x8
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq a4,t2,test_922
j fail
test_922:
li gp,922
li tp,0
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
nop
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_922+0x8
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq a4,t2,test_923
j fail
test_923:
li gp,923
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
nop
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
xor a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_923+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_924
j fail
test_924:
li gp,924
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
xor sp,zero,ra
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq sp,t2,test_925
j fail
test_925:
li gp,925
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
xor sp,ra,zero
lui t2,0xff0
addi t2,t2,255 # _start-0x7f00ff01
beq sp,t2,test_926
j fail
test_926:
li gp,926
xor ra,zero,zero
li t2,0
beq ra,t2,test_927
j fail
test_927:
li gp,927
lui ra,0x11111
addi ra,ra,273 # _start-0x6eeeeeef
lui sp,0x22222
addi sp,sp,546 # _start-0x5dddddde
xor zero,ra,sp
li t2,0
beq zero,t2,test_1002
j fail
test_1002:
li gp,1002
li ra,0
li sp,1
bne ra,sp,test_1002+0x1c
beq zero,gp,test_1002+0x18
j fail
bne zero,gp,test_1003
bne ra,sp,test_1002+0x18
beq zero,gp,test_1003
j fail
test_1003:
li gp,1003
li ra,1
li sp,0
bne ra,sp,test_1003+0x1c
beq zero,gp,test_1003+0x18
j fail
bne zero,gp,test_1004
bne ra,sp,test_1003+0x18
beq zero,gp,test_1004
j fail
test_1004:
li gp,1004
li ra,-1
li sp,1
bne ra,sp,test_1004+0x1c
beq zero,gp,test_1004+0x18
j fail
bne zero,gp,test_1005
bne ra,sp,test_1004+0x18
beq zero,gp,test_1005
j fail
test_1005:
li gp,1005
li ra,1
li sp,-1
bne ra,sp,test_1005+0x1c
beq zero,gp,test_1005+0x18
j fail
bne zero,gp,test_1006
bne ra,sp,test_1005+0x18
beq zero,gp,test_1006
j fail
test_1006:
li gp,1006
li ra,0
li sp,0
bne ra,sp,test_1006+0x14
bne zero,gp,test_1006+0x1c
beq zero,gp,test_1006+0x1c
j fail
bne ra,sp,test_1006+0x14
test_1007:
li gp,1007
li ra,1
li sp,1
bne ra,sp,test_1007+0x14
bne zero,gp,test_1007+0x1c
beq zero,gp,test_1007+0x1c
j fail
bne ra,sp,test_1007+0x14
test_1008:
li gp,1008
li ra,-1
li sp,-1
bne ra,sp,test_1008+0x14
bne zero,gp,test_1008+0x1c
beq zero,gp,test_1008+0x1c
j fail
bne ra,sp,test_1008+0x14
test_1009:
li gp,1009
li tp,0
li ra,0
li sp,0
beq ra,sp,test_1009+0x18
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1009+0x8
test_1010:
li gp,1010
li tp,0
li ra,0
li sp,0
nop
beq ra,sp,test_1010+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1010+0x8
test_1011:
li gp,1011
li tp,0
li ra,0
li sp,0
nop
nop
beq ra,sp,test_1011+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1011+0x8
test_1012:
li gp,1012
li tp,0
li ra,0
nop
li sp,0
beq ra,sp,test_1012+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1012+0x8
test_1013:
li gp,1013
li tp,0
li ra,0
nop
li sp,0
nop
beq ra,sp,test_1013+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1013+0x8
test_1014:
li gp,1014
li tp,0
li ra,0
nop
nop
li sp,0
beq ra,sp,test_1014+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1014+0x8
test_1015:
li gp,1015
li tp,0
li ra,0
li sp,0
beq ra,sp,test_1015+0x18
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1015+0x8
test_1016:
li gp,1016
li tp,0
li ra,0
li sp,0
nop
beq ra,sp,test_1016+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1016+0x8
test_1017:
li gp,1017
li tp,0
li ra,0
li sp,0
nop
nop
beq ra,sp,test_1017+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1017+0x8
test_1018:
li gp,1018
li tp,0
li ra,0
nop
li sp,0
beq ra,sp,test_1018+0x1c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1018+0x8
test_1019:
li gp,1019
li tp,0
li ra,0
nop
li sp,0
nop
beq ra,sp,test_1019+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1019+0x8
test_1020:
li gp,1020
li tp,0
li ra,0
nop
nop
li sp,0
beq ra,sp,test_1020+0x20
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1020+0x8
test_1021:
li gp,1021
li ra,1
bnez ra,test_1021+0x1c
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
li t2,3
beq ra,t2,jal_test_2
j fail
jal_test_2:
li gp,1102
li ra,0
jal tp,jal_target_2
jal_linkaddr_2:
nop
nop
j fail
jal_target_2:
auipc sp,0x0
addi sp,sp,-12 # jal_linkaddr_2
beq sp,tp,test_1103
j fail
test_1103:
li gp,1103
li ra,1
j test_1103+0x1c
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
li t2,3
beq ra,t2,test_1202
j fail
test_1202:
li gp,1202
lui a5,0xff0
addi a5,a5,255 # _start-0x7f00ff01
auipc ra,0x3
addi ra,ra,-1068 # lw_tdat
lw a4,0(ra)
lui t2,0xff0
addi t2,t2,255 # _start-0x7f00ff01
beq a4,t2,test_1203
j fail
test_1203:
li gp,1203
lui a5,0xff010
addi a5,a5,-256 # _end+0x7f00ae50
auipc ra,0x3
addi ra,ra,-1108 # lw_tdat
lw a4,4(ra)
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq a4,t2,test_1204
j fail
test_1204:
li gp,1204
lui a5,0xff01
addi a5,a5,-16 # _start-0x700ff010
auipc ra,0x3
addi ra,ra,-1148 # lw_tdat
lw a4,8(ra)
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1205
j fail
test_1205:
li gp,1205
lui a5,0xf00ff
addi a5,a5,15 # _end+0x700f9f5f
auipc ra,0x3
addi ra,ra,-1188 # lw_tdat
lw a4,12(ra)
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq a4,t2,test_1206
j fail
test_1206:
li gp,1206
lui a5,0xff0
addi a5,a5,255 # _start-0x7f00ff01
auipc ra,0x3
addi ra,ra,-1216 # lw_tdat4
lw a4,-12(ra)
lui t2,0xff0
addi t2,t2,255 # _start-0x7f00ff01
beq a4,t2,test_1207
j fail
test_1207:
li gp,1207
lui a5,0xff010
addi a5,a5,-256 # _end+0x7f00ae50
auipc ra,0x3
addi ra,ra,-1256 # lw_tdat4
lw a4,-8(ra)
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq a4,t2,test_1208
j fail
test_1208:
li gp,1208
lui a5,0xff01
addi a5,a5,-16 # _start-0x700ff010
auipc ra,0x3
addi ra,ra,-1296 # lw_tdat4
lw a4,-4(ra)
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1209
j fail
test_1209:
li gp,1209
lui a5,0xf00ff
addi a5,a5,15 # _end+0x700f9f5f
auipc ra,0x3
addi ra,ra,-1336 # lw_tdat4
lw a4,0(ra)
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq a4,t2,test_1210
j fail
test_1210:
li gp,1210
auipc ra,0x3
addi ra,ra,-1380 # lw_tdat
addi ra,ra,-32
lw t0,32(ra)
lui t2,0xff0
addi t2,t2,255 # _start-0x7f00ff01
beq t0,t2,test_1211
j fail
test_1211:
li gp,1211
auipc ra,0x3
addi ra,ra,-1416 # lw_tdat
addi ra,ra,-3
lw t0,7(ra)
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq t0,t2,test_1212
j fail
test_1212:
li gp,1212
li tp,0
auipc ra,0x3
addi ra,ra,-1452 # lw_tdat2
lw a4,4(ra)
mv t1,a4
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq t1,t2,test_1212+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1212+0x8
test_1213:
li gp,1213
li tp,0
auipc ra,0x3
addi ra,ra,-1500 # lw_tdat3
lw a4,4(ra)
nop
mv t1,a4
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq t1,t2,test_1213+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1213+0x8
test_1214:
li gp,1214
li tp,0
auipc ra,0x3
addi ra,ra,-1564 # lw_tdat
lw a4,4(ra)
nop
nop
mv t1,a4
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq t1,t2,test_1214+0x30
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1214+0x8
test_1215:
li gp,1215
li tp,0
auipc ra,0x3
addi ra,ra,-1620 # lw_tdat2
lw a4,4(ra)
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
beq a4,t2,test_1215+0x24
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1215+0x8
test_1216:
li gp,1216
li tp,0
auipc ra,0x3
addi ra,ra,-1664 # lw_tdat3
nop
lw a4,4(ra)
lui t2,0xf00ff
addi t2,t2,15 # _end+0x700f9f5f
beq a4,t2,test_1216+0x28
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1216+0x8
test_1217:
li gp,1217
li tp,0
auipc ra,0x3
addi ra,ra,-1724 # lw_tdat
nop
nop
lw a4,4(ra)
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq a4,t2,test_1217+0x2c
j fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1217+0x8
test_1218:
li gp,1218
auipc t0,0x3
addi t0,t0,-1776 # lw_tdat
lw sp,0(t0)
li sp,2
li t2,2
beq sp,t2,test_1219
j fail
test_1219:
li gp,1219
auipc t0,0x3
addi t0,t0,-1808 # lw_tdat
lw sp,0(t0)
nop
li sp,2
li t2,2
beq sp,t2,test_1302
j fail
test_1302:
li gp,1302
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0fae5f
beq a4,t2,test_1303
j fail
test_1303:
li gp,1303
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
or a4,ra,sp
lui t2,0xfff10
addi t2,t2,-16 # _end+0x7ff0af40
beq a4,t2,test_1304
j fail
test_1304:
li gp,1304
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
lui t2,0xfff1
addi t2,t2,-1 # _start-0x7000f001
beq a4,t2,test_1305
j fail
test_1305:
li gp,1305
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f9f5f
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
or a4,ra,sp
lui t2,0xf0fff
addi t2,t2,255 # _end+0x70ffa04f
beq a4,t2,test_1306
j fail
test_1306:
li gp,1306
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or ra,ra,sp
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0fae5f
beq ra,t2,test_1307
j fail
test_1307:
li gp,1307
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or sp,ra,sp
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0fae5f
beq sp,t2,test_1308
j fail
test_1308:
li gp,1308
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
or ra,ra,ra
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
beq ra,t2,test_1309
j fail
test_1309:
li gp,1309
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1309+0x8
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0fae5f
bne t1,t2,fail
test_1310:
li gp,1310
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
or a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1310+0x8
lui t2,0xfff10
addi t2,t2,-16 # _end+0x7ff0af40
bne t1,t2,fail
test_1311:
li gp,1311
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1311+0x8
lui t2,0xfff1
addi t2,t2,-1 # _start-0x7000f001
bne t1,t2,fail
test_1312:
li gp,1312
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1312+0x8
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0fae5f
bne a4,t2,fail
test_1313:
li gp,1313
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
nop
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1313+0x8
lui t2,0xfff10
addi t2,t2,-16 # _end+0x7ff0af40
bne a4,t2,fail
test_1314:
li gp,1314
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
nop
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1314+0x8
lui t2,0xfff1
addi t2,t2,-1 # _start-0x7000f001
bne a4,t2,fail
test_1315:
li gp,1315
li tp,0
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
nop
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1315+0x8
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0fae5f
bne a4,t2,fail
test_1316:
li gp,1316
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
nop
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1316+0x8
lui t2,0xfff10
addi t2,t2,-16 # _end+0x7ff0af40
bne a4,t2,fail
test_1317:
li gp,1317
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
nop
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1317+0x8
lui t2,0xfff1
addi t2,t2,-1 # _start-0x7000f001
bne a4,t2,fail
test_1318:
li gp,1318
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1318+0x8
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0fae5f
bne a4,t2,fail
test_1319:
li gp,1319
li tp,0
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1319+0x8
lui t2,0xfff10
addi t2,t2,-16 # _end+0x7ff0af40
bne a4,t2,fail
test_1320:
li gp,1320
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
nop
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1320+0x8
lui t2,0xfff1
addi t2,t2,-1 # _start-0x7000f001
bne a4,t2,fail
test_1321:
li gp,1321
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1321+0x8
lui t2,0xff100
addi t2,t2,-241 # _end+0x7f0fae5f
bne a4,t2,fail
test_1322:
li gp,1322
li tp,0
lui sp,0xf0f0f
addi sp,sp,240 # _end+0x70f0a040
nop
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
nop
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1322+0x8
lui t2,0xfff10
addi t2,t2,-16 # _end+0x7ff0af40
bne a4,t2,fail
test_1323:
li gp,1323
li tp,0
lui sp,0xf0f1
addi sp,sp,-241 # _start-0x70f0f0f1
nop
nop
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
or a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1323+0x8
lui t2,0xfff1
addi t2,t2,-1 # _start-0x7000f001
bne a4,t2,fail
test_1324:
li gp,1324
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
or sp,zero,ra
lui t2,0xff010
addi t2,t2,-256 # _end+0x7f00ae50
bne sp,t2,fail
test_1325:
li gp,1325
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
or sp,ra,zero
lui t2,0xff0
addi t2,t2,255 # _start-0x7f00ff01
bne sp,t2,fail
test_1326:
li gp,1326
or ra,zero,zero
li t2,0
bne ra,t2,fail
test_1327:
li gp,1327
lui ra,0x11111
addi ra,ra,273 # _start-0x6eeeeeef
lui sp,0x22222
addi sp,sp,546 # _start-0x5dddddde
or zero,ra,sp
li t2,0
bne zero,t2,fail
test_1402:
li gp,1402
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
ori a4,ra,-241
li t2,-241
bne a4,t2,fail
test_1403:
li gp,1403
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
ori a4,ra,240
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
bne a4,t2,fail
test_1404:
li gp,1404
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
ori a4,ra,1807
lui t2,0xff0
addi t2,t2,2047 # _start-0x7f00f801
bne a4,t2,fail
test_1405:
li gp,1405
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f9f5f
ori a4,ra,240
lui t2,0xf00ff
addi t2,t2,255 # _end+0x700fa04f
bne a4,t2,fail
test_1406:
li gp,1406
lui ra,0xff010
addi ra,ra,-256 # _end+0x7f00ae50
ori ra,ra,240
lui t2,0xff010
addi t2,t2,-16 # _end+0x7f00af40
bne ra,t2,fail
test_1407:
li gp,1407
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
ori a4,ra,240
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1407+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
bne t1,t2,fail
test_1408:
li gp,1408
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
ori a4,ra,1807
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1408+0x8
lui t2,0xff0
addi t2,t2,2047 # _start-0x7f00f801
bne t1,t2,fail
test_1409:
li gp,1409
li tp,0
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f9f5f
ori a4,ra,240
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1409+0x8
lui t2,0xf00ff
addi t2,t2,255 # _end+0x700fa04f
bne t1,t2,fail
test_1410:
li gp,1410
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
ori a4,ra,240
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1410+0x8
lui t2,0xff01
addi t2,t2,-16 # _start-0x700ff010
bne a4,t2,fail
test_1411:
li gp,1411
li tp,0
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
nop
ori a4,ra,-241
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1411+0x8
li t2,-1
bne a4,t2,fail
test_1412:
li gp,1412
li tp,0
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700f9f5f
nop
nop
ori a4,ra,240
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1412+0x8
lui t2,0xf00ff
addi t2,t2,255 # _end+0x700fa04f
bne a4,t2,fail
test_1413:
li gp,1413
ori ra,zero,240
li t2,240
bne ra,t2,fail
test_1414:
li gp,1414
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
ori zero,ra,1807
li t2,0
bne zero,t2,fail
test_1502:
li gp,1502
li ra,1
slli a4,ra,0x0
li t2,1
bne a4,t2,fail
test_1503:
li gp,1503
li ra,1
slli a4,ra,0x1
li t2,2
bne a4,t2,fail
test_1504:
li gp,1504
li ra,1
slli a4,ra,0x7
li t2,128
bne a4,t2,fail
test_1505:
li gp,1505
li ra,1
slli a4,ra,0xe
lui t2,0x4
bne a4,t2,fail
test_1506:
li gp,1506
li ra,1
slli a4,ra,0x1f
lui t2,0x80000
bne a4,t2,fail
test_1507:
li gp,1507
li ra,-1
slli a4,ra,0x0
li t2,-1
bne a4,t2,fail
test_1508:
li gp,1508
li ra,-1
slli a4,ra,0x1
li t2,-2
bne a4,t2,fail
test_1509:
li gp,1509
li ra,-1
slli a4,ra,0x7
li t2,-128
bne a4,t2,fail
test_1510:
li gp,1510
li ra,-1
slli a4,ra,0xe
lui t2,0xffffc
bne a4,t2,fail
test_1511:
li gp,1511
li ra,-1
slli a4,ra,0x1f
lui t2,0x80000
bne a4,t2,fail
test_1512:
li gp,1512
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
slli a4,ra,0x0
lui t2,0x21212
addi t2,t2,289 # _start-0x5edededf
bne a4,t2,fail
test_1513:
li gp,1513
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
slli a4,ra,0x1
lui t2,0x42424
addi t2,t2,578 # _start-0x3dbdbdbe
bne a4,t2,fail
test_1514:
li gp,1514
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
slli a4,ra,0x7
lui t2,0x90909
addi t2,t2,128 # _end+0x10903fd0
bne a4,t2,fail
test_1515:
li gp,1515
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
slli a4,ra,0xe
lui t2,0x48484
bne a4,t2,fail
test_1516:
li gp,1516
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
slli a4,ra,0x1f
lui t2,0x80000
bne a4,t2,fail
test_1517:
li gp,1517
li ra,1
slli ra,ra,0x7
li t2,128
bne ra,t2,fail
test_1518:
li gp,1518
li tp,0
li ra,1
slli a4,ra,0x7
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1518+0x8
li t2,128
bne t1,t2,fail
test_1519:
li gp,1519
li tp,0
li ra,1
slli a4,ra,0xe
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1519+0x8
lui t2,0x4
bne t1,t2,fail
test_1520:
li gp,1520
li tp,0
li ra,1
slli a4,ra,0x1f
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1520+0x8
lui t2,0x80000
bne t1,t2,fail
test_1521:
li gp,1521
li tp,0
li ra,1
slli a4,ra,0x7
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1521+0x8
li t2,128
bne a4,t2,fail
test_1522:
li gp,1522
li tp,0
li ra,1
nop
slli a4,ra,0xe
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1522+0x8
lui t2,0x4
bne a4,t2,fail
test_1523:
li gp,1523
li tp,0
li ra,1
nop
nop
slli a4,ra,0x1f
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1523+0x8
lui t2,0x80000
bne a4,t2,fail
test_1524:
li gp,1524
slli ra,zero,0x1f
li t2,0
bne ra,t2,fail
test_1525:
li gp,1525
li ra,33
slli zero,ra,0x14
li t2,0
bne zero,t2,fail
test_1602:
li gp,1602
lui ra,0x80000
srli a4,ra,0x0
lui t2,0x80000
bne a4,t2,fail
test_1603:
li gp,1603
lui ra,0x80000
srli a4,ra,0x1
lui t2,0x40000
bne a4,t2,fail
test_1604:
li gp,1604
lui ra,0x80000
srli a4,ra,0x7
lui t2,0x1000
bne a4,t2,fail
test_1605:
li gp,1605
lui ra,0x80000
srli a4,ra,0xe
lui t2,0x20
bne a4,t2,fail
test_1606:
li gp,1606
lui ra,0x80000
addi ra,ra,1 # _end+0xffffaf51
srli a4,ra,0x1f
li t2,1
bne a4,t2,fail
test_1607:
li gp,1607
li ra,-1
srli a4,ra,0x0
li t2,-1
bne a4,t2,fail
test_1608:
li gp,1608
li ra,-1
srli a4,ra,0x1
lui t2,0x80000
addi t2,t2,-1 # _end+0xffffaf4f
bne a4,t2,fail
test_1609:
li gp,1609
li ra,-1
srli a4,ra,0x7
lui t2,0x2000
addi t2,t2,-1 # _start-0x7e000001
bne a4,t2,fail
test_1610:
li gp,1610
li ra,-1
srli a4,ra,0xe
lui t2,0x40
addi t2,t2,-1 # _start-0x7ffc0001
bne a4,t2,fail
test_1611:
li gp,1611
li ra,-1
srli a4,ra,0x1f
li t2,1
bne a4,t2,fail
test_1612:
li gp,1612
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
srli a4,ra,0x0
lui t2,0x21212
addi t2,t2,289 # _start-0x5edededf
bne a4,t2,fail
test_1613:
li gp,1613
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
srli a4,ra,0x1
lui t2,0x10909
addi t2,t2,144 # _start-0x6f6f6f70
bne a4,t2,fail
test_1614:
li gp,1614
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
srli a4,ra,0x7
lui t2,0x424
addi t2,t2,578 # _start-0x7fbdbdbe
bne a4,t2,fail
test_1615:
li gp,1615
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
srli a4,ra,0xe
lui t2,0x8
addi t2,t2,1156 # _start-0x7fff7b7c
bne a4,t2,fail
test_1616:
li gp,1616
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
srli a4,ra,0x1f
li t2,0
bne a4,t2,fail
test_1617:
li gp,1617
lui ra,0x80000
srli ra,ra,0x7
lui t2,0x1000
bne ra,t2,fail
test_1618:
li gp,1618
li tp,0
lui ra,0x80000
srli a4,ra,0x7
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1618+0x8
lui t2,0x1000
bne t1,t2,fail
test_1619:
li gp,1619
li tp,0
lui ra,0x80000
srli a4,ra,0xe
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1619+0x8
lui t2,0x20
bne t1,t2,fail
test_1620:
li gp,1620
li tp,0
lui ra,0x80000
addi ra,ra,1 # _end+0xffffaf51
srli a4,ra,0x1f
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1620+0x8
li t2,1
bne t1,t2,fail
test_1621:
li gp,1621
li tp,0
lui ra,0x80000
srli a4,ra,0x7
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1621+0x8
lui t2,0x1000
bne a4,t2,fail
test_1622:
li gp,1622
li tp,0
lui ra,0x80000
nop
srli a4,ra,0xe
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1622+0x8
lui t2,0x20
bne a4,t2,fail
test_1623:
li gp,1623
li tp,0
lui ra,0x80000
addi ra,ra,1 # _end+0xffffaf51
nop
nop
srli a4,ra,0x1f
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1623+0x8
li t2,1
bne a4,t2,fail
test_1624:
li gp,1624
srli ra,zero,0x4
li t2,0
bne ra,t2,fail
test_1625:
li gp,1625
li ra,33
srli zero,ra,0xa
li t2,0
bne zero,t2,fail
test_1702:
li gp,1702
lui ra,0x0
li t2,0
bne ra,t2,fail
test_1703:
li gp,1703
lui ra,0xfffff
srli ra,ra,0x1
lui t2,0x80000
addi t2,t2,-2048 # _end+0xffffa750
bne ra,t2,fail
test_1704:
li gp,1704
lui ra,0x7ffff
srli ra,ra,0x14
li t2,2047
bne ra,t2,fail
test_1705:
li gp,1705
lui ra,0x80000
srli ra,ra,0x14
lui t2,0x1
addi t2,t2,-2048 # _start-0x7ffff800
bne ra,t2,fail
test_1706:
li gp,1706
lui zero,0x80000
li t2,0
bne zero,t2,fail
test_1802:
li gp,1802
auipc ra,0x2
addi ra,ra,-792 # sw_tdat
lui sp,0xaa0
addi sp,sp,170 # _start-0x7f55ff56
auipc a5,0x0
addi a5,a5,20 # test_1802+0x28
sw sp,0(ra)
lw a4,0(ra)
j test_1802+0x2c
mv a4,sp
lui t2,0xaa0
addi t2,t2,170 # _start-0x7f55ff56
bne a4,t2,fail
test_1803:
li gp,1803
auipc ra,0x2
addi ra,ra,-848 # sw_tdat
lui sp,0xaa00b
addi sp,sp,-1536 # _end+0x2a005950
auipc a5,0x0
addi a5,a5,20 # test_1803+0x28
sw sp,4(ra)
lw a4,4(ra)
j test_1803+0x2c
mv a4,sp
lui t2,0xaa00b
addi t2,t2,-1536 # _end+0x2a005950
bne a4,t2,fail
test_1804:
li gp,1804
auipc ra,0x2
addi ra,ra,-904 # sw_tdat
lui sp,0xaa01
addi sp,sp,-1376 # _start-0x755ff560
auipc a5,0x0
addi a5,a5,20 # test_1804+0x28
sw sp,8(ra)
lw a4,8(ra)
j test_1804+0x2c
mv a4,sp
lui t2,0xaa01
addi t2,t2,-1376 # _start-0x755ff560
bne a4,t2,fail
test_1805:
li gp,1805
auipc ra,0x2
addi ra,ra,-960 # sw_tdat
lui sp,0xa00aa
addi sp,sp,10 # _end+0x200a4f5a
auipc a5,0x0
addi a5,a5,20 # test_1805+0x28
sw sp,12(ra)
lw a4,12(ra)
j test_1805+0x2c
mv a4,sp
lui t2,0xa00aa
addi t2,t2,10 # _end+0x200a4f5a
bne a4,t2,fail
test_1806:
li gp,1806
auipc ra,0x2
addi ra,ra,-988 # sw_tdat8
lui sp,0xaa0
addi sp,sp,170 # _start-0x7f55ff56
auipc a5,0x0
addi a5,a5,20 # test_1806+0x28
sw sp,-12(ra)
lw a4,-12(ra)
j test_1806+0x2c
mv a4,sp
lui t2,0xaa0
addi t2,t2,170 # _start-0x7f55ff56
bne a4,t2,fail
test_1807:
li gp,1807
auipc ra,0x2
addi ra,ra,-1044 # sw_tdat8
lui sp,0xaa00b
addi sp,sp,-1536 # _end+0x2a005950
auipc a5,0x0
addi a5,a5,20 # test_1807+0x28
sw sp,-8(ra)
lw a4,-8(ra)
j test_1807+0x2c
mv a4,sp
lui t2,0xaa00b
addi t2,t2,-1536 # _end+0x2a005950
bne a4,t2,fail
test_1808:
li gp,1808
auipc ra,0x2
addi ra,ra,-1100 # sw_tdat8
lui sp,0xaa01
addi sp,sp,-1376 # _start-0x755ff560
auipc a5,0x0
addi a5,a5,20 # test_1808+0x28
sw sp,-4(ra)
lw a4,-4(ra)
j test_1808+0x2c
mv a4,sp
lui t2,0xaa01
addi t2,t2,-1376 # _start-0x755ff560
bne a4,t2,fail
test_1809:
li gp,1809
auipc ra,0x2
addi ra,ra,-1156 # sw_tdat8
lui sp,0xa00aa
addi sp,sp,10 # _end+0x200a4f5a
auipc a5,0x0
addi a5,a5,20 # test_1809+0x28
sw sp,0(ra)
lw a4,0(ra)
j test_1809+0x2c
mv a4,sp
lui t2,0xa00aa
addi t2,t2,10 # _end+0x200a4f5a
bne a4,t2,fail
test_1810:
li gp,1810
auipc ra,0x2
addi ra,ra,-1208 # sw_tdat9
lui sp,0x12345
addi sp,sp,1656 # _start-0x6dcba988
addi tp,ra,-32
sw sp,32(tp) # _start-0x7fffffe0
lw t0,0(ra)
lui t2,0x12345
addi t2,t2,1656 # _start-0x6dcba988
bne t0,t2,fail
test_1811:
li gp,1811
auipc ra,0x2
addi ra,ra,-1252 # sw_tdat9
lui sp,0x58213
addi sp,sp,152 # _start-0x27decf68
addi ra,ra,-3
sw sp,7(ra)
auipc tp,0x2
addi tp,tp,-1272 # sw_tdat10
lw t0,0(tp) # _start-0x80000000
lui t2,0x58213
addi t2,t2,152 # _start-0x27decf68
bne t0,t2,fail
test_1812:
li gp,1812
li tp,0
lui ra,0xaabbd
addi ra,ra,-803 # _end+0x2abb7c2d
auipc sp,0x2
addi sp,sp,-1348 # sw_tdat
sw ra,0(sp)
lw a4,0(sp)
lui t2,0xaabbd
addi t2,t2,-803 # _end+0x2abb7c2d
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1812+0x8
test_1813:
li gp,1813
li tp,0
lui ra,0xdaabc
addi ra,ra,-819 # _end+0x5aab6c1d
auipc sp,0x2
addi sp,sp,-1404 # sw_tdat
nop
sw ra,4(sp)
lw a4,4(sp)
lui t2,0xdaabc
addi t2,t2,-819 # _end+0x5aab6c1d
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1813+0x8
test_1814:
li gp,1814
li tp,0
lui ra,0xddaac
addi ra,ra,-1076 # _end+0x5daa6b1c
auipc sp,0x2
addi sp,sp,-1464 # sw_tdat
nop
nop
sw ra,8(sp)
lw a4,8(sp)
lui t2,0xddaac
addi t2,t2,-1076 # _end+0x5daa6b1c
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1814+0x8
test_1815:
li gp,1815
li tp,0
lui ra,0xcddab
addi ra,ra,-1092 # _end+0x4dda5b0c
nop
auipc sp,0x2
addi sp,sp,-1532 # sw_tdat
sw ra,12(sp)
lw a4,12(sp)
lui t2,0xcddab
addi t2,t2,-1092 # _end+0x4dda5b0c
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1815+0x8
test_1816:
li gp,1816
li tp,0
lui ra,0xccddb
addi ra,ra,-1349 # _end+0x4cdd5a0b
nop
auipc sp,0x2
addi sp,sp,-1592 # sw_tdat
nop
sw ra,16(sp)
lw a4,16(sp)
lui t2,0xccddb
addi t2,t2,-1349 # _end+0x4cdd5a0b
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1816+0x8
test_1817:
li gp,1817
li tp,0
lui ra,0xbccde
addi ra,ra,-1365 # _end+0x3ccd89fb
nop
nop
auipc sp,0x2
addi sp,sp,-1660 # sw_tdat
sw ra,20(sp)
lw a4,20(sp)
lui t2,0xbccde
addi t2,t2,-1365 # _end+0x3ccd89fb
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1817+0x8
test_1818:
li gp,1818
li tp,0
auipc sp,0x2
addi sp,sp,-1708 # sw_tdat
lui ra,0x112
addi ra,ra,563 # _start-0x7feeddcd
sw ra,0(sp)
lw a4,0(sp)
lui t2,0x112
addi t2,t2,563 # _start-0x7feeddcd
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1818+0x8
test_1819:
li gp,1819
li tp,0
auipc sp,0x2
addi sp,sp,-1764 # sw_tdat
lui ra,0x30011
addi ra,ra,547 # _start-0x4ffeeddd
nop
sw ra,4(sp)
lw a4,4(sp)
lui t2,0x30011
addi t2,t2,547 # _start-0x4ffeeddd
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1819+0x8
test_1820:
li gp,1820
li tp,0
auipc sp,0x2
addi sp,sp,-1824 # sw_tdat
lui ra,0x33001
addi ra,ra,290 # _start-0x4cffeede
nop
nop
sw ra,8(sp)
lw a4,8(sp)
lui t2,0x33001
addi t2,t2,290 # _start-0x4cffeede
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1820+0x8
test_1821:
li gp,1821
li tp,0
auipc sp,0x2
addi sp,sp,-1888 # sw_tdat
nop
lui ra,0x23300
addi ra,ra,274 # _start-0x5ccffeee
sw ra,12(sp)
lw a4,12(sp)
lui t2,0x23300
addi t2,t2,274 # _start-0x5ccffeee
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1821+0x8
test_1822:
li gp,1822
li tp,0
auipc sp,0x2
addi sp,sp,-1948 # sw_tdat
nop
lui ra,0x22330
addi ra,ra,17 # _start-0x5dccffef
nop
sw ra,16(sp)
lw a4,16(sp)
lui t2,0x22330
addi t2,t2,17 # _start-0x5dccffef
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1822+0x8
test_1823:
li gp,1823
li tp,0
auipc sp,0x2
addi sp,sp,-2012 # sw_tdat
nop
nop
lui ra,0x12233
addi ra,ra,1 # _start-0x6ddccfff
sw ra,20(sp)
lw a4,20(sp)
lui t2,0x12233
addi t2,t2,1 # _start-0x6ddccfff
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_1823+0x8
bne zero,gp,pass
.data
begin_signature:
.byte 0xff
lb_tdat2:
.byte 0x00
lb_tdat3:
.byte 0xf0
lb_tdat4:
.byte 0x0f
.zero 12
sb_tdat:
.byte 0
sb_tdat2:
.byte 0
sb_tdat3:
.byte 0
sb_tdat4:
.byte 0
sb_tdat5:
.byte 0
sb_tdat6:
.byte 0
sb_tdat7:
.byte 0
sb_tdat8:
.byte 0
sb_tdat9:
.byte 0
sb_tdat10:
.byte 0
.zero 6
lh_tdat:
.half 0
lh_tdat2:
.half 0
lh_tdat3:
.half 0
lh_tdat4:
.half 0
.zero 8
lw_tdat:
.word 0x00ff00ff
lw_tdat2:
.word 0xff00ff00
lw_tdat3:
.word 0x0ff00ff0
lw_tdat4:
.word 0xf00ff00f
sh_tdat:
.half 0
sh_tdat2:
.half 0
sh_tdat3:
.half 0
sh_tdat4:
.half 0
sh_tdat5:
.half 0
sh_tdat6:
.half 0
sh_tdat7:
.half 0
sh_tdat8:
.half 0
sh_tdat9:
.half 0
sh_tdat10:
.half 0
.zero 12
lbu_tdat:
.byte 0
lbu_tdat2:
.byte 0
lbu_tdat3:
.byte 0
lbu_tdat4:
.byte 0
.zero 12
lhu_tdat:
.half 0
lhu_tdat2:
.half 0
lhu_tdat3:
.half 0
lhu_tdat4:
.half 0
.zero 8
sw_tdat:
.word 0
sw_tdat2:
.word 0
sw_tdat3:
.word 0
sw_tdat4:
.word 0
sw_tdat5:
.word 0
sw_tdat6:
.word 0
sw_tdat7:
.word 0
sw_tdat8:
.word 0
sw_tdat9:
.word 0
sw_tdat10:
.word 0
|
abmfy/cod23-grp04
| 3,771
|
asm/rv32i/lhu.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li a5,255
auipc ra,0x2
addi ra,ra,-208 # begin_signature
lhu a4,0(ra)
li t2,255
bne a4,t2,fail
test_3:
li gp,3
lui a5,0x10
addi a5,a5,-256 # _start-0x7fff0100
auipc ra,0x2
addi ra,ra,-240 # begin_signature
lhu a4,2(ra)
lui t2,0x10
addi t2,t2,-256 # _start-0x7fff0100
bne a4,t2,fail
test_4:
li gp,4
lui a5,0x1
addi a5,a5,-16 # _start-0x7ffff010
auipc ra,0x2
addi ra,ra,-276 # begin_signature
lhu a4,4(ra)
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
bne a4,t2,fail
test_5:
li gp,5
lui a5,0xf
addi a5,a5,15 # _start-0x7fff0ff1
auipc ra,0x2
addi ra,ra,-312 # begin_signature
lhu a4,6(ra)
lui t2,0xf
addi t2,t2,15 # _start-0x7fff0ff1
bne a4,t2,fail
test_6:
li gp,6
li a5,255
auipc ra,0x2
addi ra,ra,-338 # tdat4
lhu a4,-6(ra)
li t2,255
bne a4,t2,fail
test_7:
li gp,7
lui a5,0x10
addi a5,a5,-256 # _start-0x7fff0100
auipc ra,0x2
addi ra,ra,-370 # tdat4
lhu a4,-4(ra)
lui t2,0x10
addi t2,t2,-256 # _start-0x7fff0100
bne a4,t2,fail
test_8:
li gp,8
lui a5,0x1
addi a5,a5,-16 # _start-0x7ffff010
auipc ra,0x2
addi ra,ra,-406 # tdat4
lhu a4,-2(ra)
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
bne a4,t2,fail
test_9:
li gp,9
lui a5,0xf
addi a5,a5,15 # _start-0x7fff0ff1
auipc ra,0x2
addi ra,ra,-442 # tdat4
lhu a4,0(ra)
lui t2,0xf
addi t2,t2,15 # _start-0x7fff0ff1
bne a4,t2,fail
test_10:
li gp,10
auipc ra,0x2
addi ra,ra,-476 # begin_signature
addi ra,ra,-32
lhu t0,32(ra)
li t2,255
bne t0,t2,fail
test_11:
li gp,11
auipc ra,0x2
addi ra,ra,-504 # begin_signature
addi ra,ra,-5
lhu t0,7(ra)
lui t2,0x10
addi t2,t2,-256 # _start-0x7fff0100
bne t0,t2,fail
test_12:
li gp,12
li tp,0
auipc ra,0x2
addi ra,ra,-538 # tdat2
lhu a4,2(ra)
mv t1,a4
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_12+0x8
test_13:
li gp,13
li tp,0
auipc ra,0x2
addi ra,ra,-584 # tdat3
lhu a4,2(ra)
nop
mv t1,a4
lui t2,0xf
addi t2,t2,15 # _start-0x7fff0ff1
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_13+0x8
test_14:
li gp,14
li tp,0
auipc ra,0x2
addi ra,ra,-640 # begin_signature
lhu a4,2(ra)
nop
nop
mv t1,a4
lui t2,0x10
addi t2,t2,-256 # _start-0x7fff0100
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_14+0x8
test_15:
li gp,15
li tp,0
auipc ra,0x2
addi ra,ra,-694 # tdat2
lhu a4,2(ra)
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_15+0x8
test_16:
li gp,16
li tp,0
auipc ra,0x2
addi ra,ra,-736 # tdat3
nop
lhu a4,2(ra)
lui t2,0xf
addi t2,t2,15 # _start-0x7fff0ff1
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_16+0x8
test_17:
li gp,17
li tp,0
auipc ra,0x2
addi ra,ra,-788 # begin_signature
nop
nop
lhu a4,2(ra)
lui t2,0x10
addi t2,t2,-256 # _start-0x7fff0100
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_17+0x8
test_18:
li gp,18
auipc t0,0x2
addi t0,t0,-836 # begin_signature
lhu sp,0(t0)
li sp,2
li t2,2
bne sp,t2,fail
test_19:
li gp,19
auipc t0,0x2
addi t0,t0,-864 # begin_signature
lhu sp,0(t0)
nop
li sp,2
li t2,2
bne sp,t2,fail
bne zero,gp,pass
.data
begin_signature:
.half 0x00ff
tdat2:
.half 0xff00
tdat3:
.half 0x0ff0
tdat4:
.half 0xf00f
|
abmfy/cod23-grp04
| 2,832
|
asm/rv32i/xori.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
lui ra,0xff1
addi ra,ra,-256 # _start-0x7f00f100
xori a4,ra,-241
lui t2,0xff00f
addi t2,t2,15 # _end+0x7f00d00f
bne a4,t2,fail
test_3:
li gp,3
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
xori a4,ra,240
lui t2,0xff01
addi t2,t2,-256 # _start-0x700ff100
bne a4,t2,fail
test_4:
li gp,4
lui ra,0xff1
addi ra,ra,-1793 # _start-0x7f00f701
xori a4,ra,1807
lui t2,0xff1
addi t2,t2,-16 # _start-0x7f00f010
bne a4,t2,fail
test_5:
li gp,5
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700fd00f
xori a4,ra,240
lui t2,0xf00ff
addi t2,t2,255 # _end+0x700fd0ff
bne a4,t2,fail
test_6:
li gp,6
lui ra,0xff00f
addi ra,ra,1792 # _end+0x7f00d700
xori ra,ra,1807
lui t2,0xff00f
addi t2,t2,15 # _end+0x7f00d00f
bne ra,t2,fail
test_7:
li gp,7
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
xori a4,ra,240
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_7+0x8
lui t2,0xff01
addi t2,t2,-256 # _start-0x700ff100
bne t1,t2,fail
test_8:
li gp,8
li tp,0
lui ra,0xff1
addi ra,ra,-1793 # _start-0x7f00f701
xori a4,ra,1807
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_8+0x8
lui t2,0xff1
addi t2,t2,-16 # _start-0x7f00f010
bne t1,t2,fail
test_9:
li gp,9
li tp,0
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700fd00f
xori a4,ra,240
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_9+0x8
lui t2,0xf00ff
addi t2,t2,255 # _end+0x700fd0ff
bne t1,t2,fail
test_10:
li gp,10
li tp,0
lui ra,0xff01
addi ra,ra,-16 # _start-0x700ff010
xori a4,ra,240
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_10+0x8
lui t2,0xff01
addi t2,t2,-256 # _start-0x700ff100
bne a4,t2,fail
test_11:
li gp,11
li tp,0
lui ra,0xff1
addi ra,ra,-1 # _start-0x7f00f001
nop
xori a4,ra,15
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_11+0x8
lui t2,0xff1
addi t2,t2,-16 # _start-0x7f00f010
bne a4,t2,fail
test_12:
li gp,12
li tp,0
lui ra,0xf00ff
addi ra,ra,15 # _end+0x700fd00f
nop
nop
xori a4,ra,240
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_12+0x8
lui t2,0xf00ff
addi t2,t2,255 # _end+0x700fd0ff
bne a4,t2,fail
test_13:
li gp,13
xori ra,zero,240
li t2,240
bne ra,t2,fail
test_14:
li gp,14
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
xori zero,ra,1807
li t2,0
bne zero,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 1,354
|
asm/rv32i/fence_i.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
li a3,111
auipc a0,0x2
lh a0,-204(a0) # begin_signature
auipc a1,0x2
lh a1,-210(a1) # begin_signature+0x2
nop
nop
nop
nop
nop
nop
nop
nop
nop
auipc t0,0x2
sh a0,-252(t0) # begin_signature+0x4
auipc t0,0x2
sh a1,-258(t0) # begin_signature+0x6
fence.i
auipc a5,0x2
addi a5,a5,-272 # begin_signature+0x4
jalr t1,a5
test_2:
li gp,2
nop
li t2,444
bne a3,t2,fail
li a4,100
addi a4,a4,-1
bnez a4,test_2+0x14
auipc t0,0x2
sh a0,-304(t0) # begin_signature+0xc
auipc t0,0x2
sh a1,-310(t0) # begin_signature+0xe
fence.i
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
auipc a5,0x2
addi a5,a5,-372 # begin_signature+0xc
jalr t1,a5
test_3:
li gp,3
nop
li t2,777
bne a3,t2,fail
bne zero,gp,pass
.data
begin_signature:
addi a3,a3,333
addi a3,a3,222
jalr a5,t1
addi a3,a3,555
jalr a5,t1
|
abmfy/cod23-grp04
| 2,802
|
asm/rv32i/sltiu.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,0
sltiu a4,ra,0
li t2,0
bne a4,t2,fail
test_3:
li gp,3
li ra,1
seqz a4,ra
li t2,0
bne a4,t2,fail
test_4:
li gp,4
li ra,3
sltiu a4,ra,7
li t2,1
bne a4,t2,fail
test_5:
li gp,5
li ra,7
sltiu a4,ra,3
li t2,0
bne a4,t2,fail
test_6:
li gp,6
li ra,0
sltiu a4,ra,-2048
li t2,1
bne a4,t2,fail
test_7:
li gp,7
lui ra,0x80000
sltiu a4,ra,0
li t2,0
bne a4,t2,fail
test_8:
li gp,8
lui ra,0x80000
sltiu a4,ra,-2048
li t2,1
bne a4,t2,fail
test_9:
li gp,9
li ra,0
sltiu a4,ra,2047
li t2,1
bne a4,t2,fail
test_10:
li gp,10
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
sltiu a4,ra,0
li t2,0
bne a4,t2,fail
test_11:
li gp,11
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
sltiu a4,ra,2047
li t2,0
bne a4,t2,fail
test_12:
li gp,12
lui ra,0x80000
sltiu a4,ra,2047
li t2,0
bne a4,t2,fail
test_13:
li gp,13
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
sltiu a4,ra,-2048
li t2,1
bne a4,t2,fail
test_14:
li gp,14
li ra,0
sltiu a4,ra,-1
li t2,1
bne a4,t2,fail
test_15:
li gp,15
li ra,-1
seqz a4,ra
li t2,0
bne a4,t2,fail
test_16:
li gp,16
li ra,-1
sltiu a4,ra,-1
li t2,0
bne a4,t2,fail
test_17:
li gp,17
li ra,11
sltiu ra,ra,13
li t2,1
bne ra,t2,fail
test_18:
li gp,18
li tp,0
li ra,15
sltiu a4,ra,10
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_18+0x8
li t2,0
bne t1,t2,fail
test_19:
li gp,19
li tp,0
li ra,10
sltiu a4,ra,16
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_19+0x8
li t2,1
bne t1,t2,fail
test_20:
li gp,20
li tp,0
li ra,16
sltiu a4,ra,9
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
li t2,0
bne t1,t2,fail
test_21:
li gp,21
li tp,0
li ra,11
sltiu a4,ra,15
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_21+0x8
li t2,1
bne a4,t2,fail
test_22:
li gp,22
li tp,0
li ra,17
nop
sltiu a4,ra,8
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_22+0x8
li t2,0
bne a4,t2,fail
test_23:
li gp,23
li tp,0
li ra,12
nop
nop
sltiu a4,ra,14
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_23+0x8
li t2,1
bne a4,t2,fail
test_24:
li gp,24
sltiu ra,zero,-1
li t2,1
bne ra,t2,fail
test_25:
li gp,25
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
sltiu zero,ra,-1
li t2,0
bne zero,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 2,784
|
asm/rv32i/slti.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,0
slti a4,ra,0
li t2,0
bne a4,t2,fail
test_3:
li gp,3
li ra,1
slti a4,ra,1
li t2,0
bne a4,t2,fail
test_4:
li gp,4
li ra,3
slti a4,ra,7
li t2,1
bne a4,t2,fail
test_5:
li gp,5
li ra,7
slti a4,ra,3
li t2,0
bne a4,t2,fail
test_6:
li gp,6
li ra,0
slti a4,ra,-2048
li t2,0
bne a4,t2,fail
test_7:
li gp,7
lui ra,0x80000
slti a4,ra,0
li t2,1
bne a4,t2,fail
test_8:
li gp,8
lui ra,0x80000
slti a4,ra,-2048
li t2,1
bne a4,t2,fail
test_9:
li gp,9
li ra,0
slti a4,ra,2047
li t2,1
bne a4,t2,fail
test_10:
li gp,10
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
slti a4,ra,0
li t2,0
bne a4,t2,fail
test_11:
li gp,11
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
slti a4,ra,2047
li t2,0
bne a4,t2,fail
test_12:
li gp,12
lui ra,0x80000
slti a4,ra,2047
li t2,1
bne a4,t2,fail
test_13:
li gp,13
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
slti a4,ra,-2048
li t2,0
bne a4,t2,fail
test_14:
li gp,14
li ra,0
slti a4,ra,-1
li t2,0
bne a4,t2,fail
test_15:
li gp,15
li ra,-1
slti a4,ra,1
li t2,1
bne a4,t2,fail
test_16:
li gp,16
li ra,-1
slti a4,ra,-1
li t2,0
bne a4,t2,fail
test_17:
li gp,17
li ra,11
slti ra,ra,13
li t2,1
bne ra,t2,fail
test_18:
li gp,18
li tp,0
li ra,15
slti a4,ra,10
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_18+0x8
li t2,0
bne t1,t2,fail
test_19:
li gp,19
li tp,0
li ra,10
slti a4,ra,16
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_19+0x8
li t2,1
bne t1,t2,fail
test_20:
li gp,20
li tp,0
li ra,16
slti a4,ra,9
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
li t2,0
bne t1,t2,fail
test_21:
li gp,21
li tp,0
li ra,11
slti a4,ra,15
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_21+0x8
li t2,1
bne a4,t2,fail
test_22:
li gp,22
li tp,0
li ra,17
nop
slti a4,ra,8
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_22+0x8
li t2,0
bne a4,t2,fail
test_23:
li gp,23
li tp,0
li ra,12
nop
nop
slti a4,ra,14
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_23+0x8
li t2,1
bne a4,t2,fail
test_24:
li gp,24
slti ra,zero,-1
li t2,0
bne ra,t2,fail
test_25:
li gp,25
lui ra,0xff0
addi ra,ra,255 # _start-0x7f00ff01
slti zero,ra,-1
li t2,0
bne zero,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 4,644
|
asm/rv32i/slt.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,0
li sp,0
slt a4,ra,sp
li t2,0
bne a4,t2,fail
test_3:
li gp,3
li ra,1
li sp,1
slt a4,ra,sp
li t2,0
bne a4,t2,fail
test_4:
li gp,4
li ra,3
li sp,7
slt a4,ra,sp
li t2,1
bne a4,t2,fail
test_5:
li gp,5
li ra,7
li sp,3
slt a4,ra,sp
li t2,0
bne a4,t2,fail
test_6:
li gp,6
li ra,0
lui sp,0xffff8
slt a4,ra,sp
li t2,0
bne a4,t2,fail
test_7:
li gp,7
lui ra,0x80000
li sp,0
slt a4,ra,sp
li t2,1
bne a4,t2,fail
test_8:
li gp,8
lui ra,0x80000
lui sp,0xffff8
slt a4,ra,sp
li t2,1
bne a4,t2,fail
test_9:
li gp,9
li ra,0
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
slt a4,ra,sp
li t2,1
bne a4,t2,fail
test_10:
li gp,10
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
li sp,0
slt a4,ra,sp
li t2,0
bne a4,t2,fail
test_11:
li gp,11
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
slt a4,ra,sp
li t2,0
bne a4,t2,fail
test_12:
li gp,12
lui ra,0x80000
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
slt a4,ra,sp
li t2,1
bne a4,t2,fail
test_13:
li gp,13
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
lui sp,0xffff8
slt a4,ra,sp
li t2,0
bne a4,t2,fail
test_14:
li gp,14
li ra,0
li sp,-1
slt a4,ra,sp
li t2,0
bne a4,t2,fail
test_15:
li gp,15
li ra,-1
li sp,1
slt a4,ra,sp
li t2,1
bne a4,t2,fail
test_16:
li gp,16
li ra,-1
li sp,-1
slt a4,ra,sp
li t2,0
bne a4,t2,fail
test_17:
li gp,17
li ra,14
li sp,13
slt ra,ra,sp
li t2,0
bne ra,t2,fail
test_18:
li gp,18
li ra,11
li sp,13
slt sp,ra,sp
li t2,1
bne sp,t2,fail
test_19:
li gp,19
li ra,13
slt ra,ra,ra
li t2,0
bne ra,t2,fail
test_20:
li gp,20
li tp,0
li ra,11
li sp,13
slt a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
li t2,1
bne t1,t2,fail
test_21:
li gp,21
li tp,0
li ra,14
li sp,13
slt a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_21+0x8
li t2,0
bne t1,t2,fail
test_22:
li gp,22
li tp,0
li ra,12
li sp,13
slt a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_22+0x8
li t2,1
bne t1,t2,fail
test_23:
li gp,23
li tp,0
li ra,14
li sp,13
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_23+0x8
li t2,0
bne a4,t2,fail
test_24:
li gp,24
li tp,0
li ra,11
li sp,13
nop
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_24+0x8
li t2,1
bne a4,t2,fail
test_25:
li gp,25
li tp,0
li ra,15
li sp,13
nop
nop
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_25+0x8
li t2,0
bne a4,t2,fail
test_26:
li gp,26
li tp,0
li ra,10
nop
li sp,13
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_26+0x8
li t2,1
bne a4,t2,fail
test_27:
li gp,27
li tp,0
li ra,16
nop
li sp,13
nop
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_27+0x8
li t2,0
bne a4,t2,fail
test_28:
li gp,28
li tp,0
li ra,9
nop
nop
li sp,13
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_28+0x8
li t2,1
bne a4,t2,fail
test_29:
li gp,29
li tp,0
li sp,13
li ra,17
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_29+0x8
li t2,0
bne a4,t2,fail
test_30:
li gp,30
li tp,0
li sp,13
li ra,8
nop
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_30+0x8
li t2,1
bne a4,t2,fail
test_31:
li gp,31
li tp,0
li sp,13
li ra,18
nop
nop
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_31+0x8
li t2,0
bne a4,t2,fail
test_32:
li gp,32
li tp,0
li sp,13
nop
li ra,7
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_32+0x8
li t2,1
bne a4,t2,fail
test_33:
li gp,33
li tp,0
li sp,13
nop
li ra,19
nop
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_33+0x8
li t2,0
bne a4,t2,fail
test_34:
li gp,34
li tp,0
li sp,13
nop
nop
li ra,6
slt a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_34+0x8
li t2,1
bne a4,t2,fail
test_35:
li gp,35
li ra,-1
sgtz sp,ra
li t2,0
bne sp,t2,fail
test_36:
li gp,36
li ra,-1
sltz sp,ra
li t2,1
bne sp,t2,fail
test_37:
li gp,37
sltz ra,zero
li t2,0
bne ra,t2,fail
test_38:
li gp,38
li ra,16
li sp,30
slt zero,ra,sp
li t2,0
bne zero,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 3,559
|
asm/rv32i/srai.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,0
srai a4,ra,0x0
li t2,0
bne a4,t2,fail
test_3:
li gp,3
lui ra,0x80000
srai a4,ra,0x1
lui t2,0xc0000
bne a4,t2,fail
test_4:
li gp,4
lui ra,0x80000
srai a4,ra,0x7
lui t2,0xff000
bne a4,t2,fail
test_5:
li gp,5
lui ra,0x80000
srai a4,ra,0xe
lui t2,0xfffe0
bne a4,t2,fail
test_6:
li gp,6
lui ra,0x80000
addi ra,ra,1 # _end+0xffffe001
srai a4,ra,0x1f
li t2,-1
bne a4,t2,fail
test_7:
li gp,7
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
srai a4,ra,0x0
lui t2,0x80000
addi t2,t2,-1 # _end+0xffffdfff
bne a4,t2,fail
test_8:
li gp,8
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
srai a4,ra,0x1
lui t2,0x40000
addi t2,t2,-1 # _start-0x40000001
bne a4,t2,fail
test_9:
li gp,9
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
srai a4,ra,0x7
lui t2,0x1000
addi t2,t2,-1 # _start-0x7f000001
bne a4,t2,fail
test_10:
li gp,10
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
srai a4,ra,0xe
lui t2,0x20
addi t2,t2,-1 # _start-0x7ffe0001
bne a4,t2,fail
test_11:
li gp,11
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
srai a4,ra,0x1f
li t2,0
bne a4,t2,fail
test_12:
li gp,12
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
srai a4,ra,0x0
lui t2,0x81818
addi t2,t2,385 # _end+0x1816181
bne a4,t2,fail
test_13:
li gp,13
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
srai a4,ra,0x1
lui t2,0xc0c0c
addi t2,t2,192 # _end+0x40c0a0c0
bne a4,t2,fail
test_14:
li gp,14
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
srai a4,ra,0x7
lui t2,0xff030
addi t2,t2,771 # _end+0x7f02e303
bne a4,t2,fail
test_15:
li gp,15
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
srai a4,ra,0xe
lui t2,0xfffe0
addi t2,t2,1542 # _end+0x7ffde606
bne a4,t2,fail
test_16:
li gp,16
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
srai a4,ra,0x1f
li t2,-1
bne a4,t2,fail
test_17:
li gp,17
lui ra,0x80000
srai ra,ra,0x7
lui t2,0xff000
bne ra,t2,fail
test_18:
li gp,18
li tp,0
lui ra,0x80000
srai a4,ra,0x7
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_18+0x8
lui t2,0xff000
bne t1,t2,fail
test_19:
li gp,19
li tp,0
lui ra,0x80000
srai a4,ra,0xe
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_19+0x8
lui t2,0xfffe0
bne t1,t2,fail
test_20:
li gp,20
li tp,0
lui ra,0x80000
addi ra,ra,1 # _end+0xffffe001
srai a4,ra,0x1f
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
li t2,-1
bne t1,t2,fail
test_21:
li gp,21
li tp,0
lui ra,0x80000
srai a4,ra,0x7
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_21+0x8
lui t2,0xff000
bne a4,t2,fail
test_22:
li gp,22
li tp,0
lui ra,0x80000
nop
srai a4,ra,0xe
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_22+0x8
lui t2,0xfffe0
bne a4,t2,fail
test_23:
li gp,23
li tp,0
lui ra,0x80000
addi ra,ra,1 # _end+0xffffe001
nop
nop
srai a4,ra,0x1f
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_23+0x8
li t2,-1
bne a4,t2,fail
test_24:
li gp,24
srai ra,zero,0x4
li t2,0
bne ra,t2,fail
test_25:
li gp,25
li ra,33
srai zero,ra,0xa
li t2,0
bne zero,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 4,756
|
asm/rv32i/sub.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,0
li sp,0
sub a4,ra,sp
li t2,0
bne a4,t2,fail
test_3:
li gp,3
li ra,1
li sp,1
sub a4,ra,sp
li t2,0
bne a4,t2,fail
test_4:
li gp,4
li ra,3
li sp,7
sub a4,ra,sp
li t2,-4
bne a4,t2,fail
test_5:
li gp,5
li ra,0
lui sp,0xffff8
sub a4,ra,sp
lui t2,0x8
bne a4,t2,fail
test_6:
li gp,6
lui ra,0x80000
li sp,0
sub a4,ra,sp
lui t2,0x80000
bne a4,t2,fail
test_7:
li gp,7
lui ra,0x80000
lui sp,0xffff8
sub a4,ra,sp
lui t2,0x80008
bne a4,t2,fail
test_8:
li gp,8
li ra,0
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
sub a4,ra,sp
lui t2,0xffff8
addi t2,t2,1 # _end+0x7fff6001
bne a4,t2,fail
test_9:
li gp,9
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
li sp,0
sub a4,ra,sp
lui t2,0x80000
addi t2,t2,-1 # _end+0xffffdfff
bne a4,t2,fail
test_10:
li gp,10
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
sub a4,ra,sp
lui t2,0x7fff8
bne a4,t2,fail
test_11:
li gp,11
lui ra,0x80000
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
sub a4,ra,sp
lui t2,0x7fff8
addi t2,t2,1 # _start-0x7fff
bne a4,t2,fail
test_12:
li gp,12
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
lui sp,0xffff8
sub a4,ra,sp
lui t2,0x80008
addi t2,t2,-1 # _end+0x5fff
bne a4,t2,fail
test_13:
li gp,13
li ra,0
li sp,-1
sub a4,ra,sp
li t2,1
bne a4,t2,fail
test_14:
li gp,14
li ra,-1
li sp,1
sub a4,ra,sp
li t2,-2
bne a4,t2,fail
test_15:
li gp,15
li ra,-1
li sp,-1
sub a4,ra,sp
li t2,0
bne a4,t2,fail
test_16:
li gp,16
li ra,13
li sp,11
sub ra,ra,sp
li t2,2
bne ra,t2,fail
test_17:
li gp,17
li ra,14
li sp,11
sub sp,ra,sp
li t2,3
bne sp,t2,fail
test_18:
li gp,18
li ra,13
sub ra,ra,ra
li t2,0
bne ra,t2,fail
test_19:
li gp,19
li tp,0
li ra,13
li sp,11
sub a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_19+0x8
li t2,2
bne t1,t2,fail
test_20:
li gp,20
li tp,0
li ra,14
li sp,11
sub a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
li t2,3
bne t1,t2,fail
test_21:
li gp,21
li tp,0
li ra,15
li sp,11
sub a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_21+0x8
li t2,4
bne t1,t2,fail
test_22:
li gp,22
li tp,0
li ra,13
li sp,11
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_22+0x8
li t2,2
bne a4,t2,fail
test_23:
li gp,23
li tp,0
li ra,14
li sp,11
nop
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_23+0x8
li t2,3
bne a4,t2,fail
test_24:
li gp,24
li tp,0
li ra,15
li sp,11
nop
nop
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_24+0x8
li t2,4
bne a4,t2,fail
test_25:
li gp,25
li tp,0
li ra,13
nop
li sp,11
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_25+0x8
li t2,2
bne a4,t2,fail
test_26:
li gp,26
li tp,0
li ra,14
nop
li sp,11
nop
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_26+0x8
li t2,3
bne a4,t2,fail
test_27:
li gp,27
li tp,0
li ra,15
nop
nop
li sp,11
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_27+0x8
li t2,4
bne a4,t2,fail
test_28:
li gp,28
li tp,0
li sp,11
li ra,13
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_28+0x8
li t2,2
bne a4,t2,fail
test_29:
li gp,29
li tp,0
li sp,11
li ra,14
nop
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_29+0x8
li t2,3
bne a4,t2,fail
test_30:
li gp,30
li tp,0
li sp,11
li ra,15
nop
nop
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_30+0x8
li t2,4
bne a4,t2,fail
test_31:
li gp,31
li tp,0
li sp,11
nop
li ra,13
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_31+0x8
li t2,2
bne a4,t2,fail
test_32:
li gp,32
li tp,0
li sp,11
nop
li ra,14
nop
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_32+0x8
li t2,3
bne a4,t2,fail
test_33:
li gp,33
li tp,0
li sp,11
nop
nop
li ra,15
sub a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_33+0x8
li t2,4
bne a4,t2,fail
test_34:
li gp,34
li ra,-15
neg sp,ra
li t2,15
bne sp,t2,fail
test_35:
li gp,35
li ra,32
sub sp,ra,zero
li t2,32
bne sp,t2,fail
test_36:
li gp,36
neg ra,zero
li t2,0
bne ra,t2,fail
test_37:
li gp,37
li ra,16
li sp,30
sub zero,ra,sp
li t2,0
bne zero,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 6,530
|
asm/rv32i/sh.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
auipc ra,0x2
addi ra,ra,-204 # begin_signature
li sp,170
auipc a5,0x0
addi a5,a5,20 # test_2+0x24
sh sp,0(ra)
lh a4,0(ra)
j test_2+0x28
mv a4,sp
li t2,170
bne a4,t2,fail
test_3:
li gp,3
auipc ra,0x2
addi ra,ra,-252 # begin_signature
lui sp,0xffffb
addi sp,sp,-1536 # _end+0x7fff89e0
auipc a5,0x0
addi a5,a5,20 # test_3+0x28
sh sp,2(ra)
lh a4,2(ra)
j test_3+0x2c
mv a4,sp
lui t2,0xffffb
addi t2,t2,-1536 # _end+0x7fff89e0
bne a4,t2,fail
test_4:
li gp,4
auipc ra,0x2
addi ra,ra,-308 # begin_signature
lui sp,0xbeef1
addi sp,sp,-1376 # _end+0x3eeeea80
auipc a5,0x0
addi a5,a5,20 # test_4+0x28
sh sp,4(ra)
lw a4,4(ra)
j test_4+0x2c
mv a4,sp
lui t2,0xbeef1
addi t2,t2,-1376 # _end+0x3eeeea80
bne a4,t2,fail
test_5:
li gp,5
auipc ra,0x2
addi ra,ra,-364 # begin_signature
lui sp,0xffffa
addi sp,sp,10 # _end+0x7fff7fea
auipc a5,0x0
addi a5,a5,20 # test_5+0x28
sh sp,6(ra)
lh a4,6(ra)
j test_5+0x2c
mv a4,sp
lui t2,0xffffa
addi t2,t2,10 # _end+0x7fff7fea
bne a4,t2,fail
test_6:
li gp,6
auipc ra,0x2
addi ra,ra,-406 # tdat8
li sp,170
auipc a5,0x0
addi a5,a5,20 # test_6+0x24
sh sp,-6(ra)
lh a4,-6(ra)
j test_6+0x28
mv a4,sp
li t2,170
bne a4,t2,fail
test_7:
li gp,7
auipc ra,0x2
addi ra,ra,-454 # tdat8
lui sp,0xffffb
addi sp,sp,-1536 # _end+0x7fff89e0
auipc a5,0x0
addi a5,a5,20 # test_7+0x28
sh sp,-4(ra)
lh a4,-4(ra)
j test_7+0x2c
mv a4,sp
lui t2,0xffffb
addi t2,t2,-1536 # _end+0x7fff89e0
bne a4,t2,fail
test_8:
li gp,8
auipc ra,0x2
addi ra,ra,-510 # tdat8
lui sp,0x1
addi sp,sp,-1376 # _start-0x7ffff560
auipc a5,0x0
addi a5,a5,20 # test_8+0x28
sh sp,-2(ra)
lh a4,-2(ra)
j test_8+0x2c
mv a4,sp
lui t2,0x1
addi t2,t2,-1376 # _start-0x7ffff560
bne a4,t2,fail
test_9:
li gp,9
auipc ra,0x2
addi ra,ra,-566 # tdat8
lui sp,0xffffa
addi sp,sp,10 # _end+0x7fff7fea
auipc a5,0x0
addi a5,a5,20 # test_9+0x28
sh sp,0(ra)
lh a4,0(ra)
j test_9+0x2c
mv a4,sp
lui t2,0xffffa
addi t2,t2,10 # _end+0x7fff7fea
bne a4,t2,fail
test_10:
li gp,10
auipc ra,0x2
addi ra,ra,-620 # tdat9
lui sp,0x12345
addi sp,sp,1656 # _start-0x6dcba988
addi tp,ra,-32
sh sp,32(tp) # _start-0x7fffffe0
lh t0,0(ra)
lui t2,0x5
addi t2,t2,1656 # _start-0x7fffa988
bne t0,t2,fail
test_11:
li gp,11
auipc ra,0x2
addi ra,ra,-664 # tdat9
lui sp,0x3
addi sp,sp,152 # _start-0x7fffcf68
addi ra,ra,-5
sh sp,7(ra)
auipc tp,0x2
addi tp,tp,-686 # tdat10
lh t0,0(tp) # _start-0x80000000
lui t2,0x3
addi t2,t2,152 # _start-0x7fffcf68
bne t0,t2,fail
test_12:
li gp,12
li tp,0
lui ra,0xffffd
addi ra,ra,-803 # _end+0x7fffacbd
auipc sp,0x2
addi sp,sp,-744 # begin_signature
sh ra,0(sp)
lh a4,0(sp)
lui t2,0xffffd
addi t2,t2,-803 # _end+0x7fffacbd
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_12+0x8
test_13:
li gp,13
li tp,0
lui ra,0xffffc
addi ra,ra,-819 # _end+0x7fff9cad
auipc sp,0x2
addi sp,sp,-800 # begin_signature
nop
sh ra,2(sp)
lh a4,2(sp)
lui t2,0xffffc
addi t2,t2,-819 # _end+0x7fff9cad
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_13+0x8
test_14:
li gp,14
li tp,0
lui ra,0xffffc
addi ra,ra,-1076 # _end+0x7fff9bac
auipc sp,0x2
addi sp,sp,-860 # begin_signature
nop
nop
sh ra,4(sp)
lh a4,4(sp)
lui t2,0xffffc
addi t2,t2,-1076 # _end+0x7fff9bac
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_14+0x8
test_15:
li gp,15
li tp,0
lui ra,0xffffb
addi ra,ra,-1092 # _end+0x7fff8b9c
nop
auipc sp,0x2
addi sp,sp,-928 # begin_signature
sh ra,6(sp)
lh a4,6(sp)
lui t2,0xffffb
addi t2,t2,-1092 # _end+0x7fff8b9c
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_15+0x8
test_16:
li gp,16
li tp,0
lui ra,0xffffb
addi ra,ra,-1349 # _end+0x7fff8a9b
nop
auipc sp,0x2
addi sp,sp,-988 # begin_signature
nop
sh ra,8(sp)
lh a4,8(sp)
lui t2,0xffffb
addi t2,t2,-1349 # _end+0x7fff8a9b
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_16+0x8
test_17:
li gp,17
li tp,0
lui ra,0xffffe
addi ra,ra,-1365 # _end+0x7fffba8b
nop
nop
auipc sp,0x2
addi sp,sp,-1056 # begin_signature
sh ra,10(sp)
lh a4,10(sp)
lui t2,0xffffe
addi t2,t2,-1365 # _end+0x7fffba8b
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_17+0x8
test_18:
li gp,18
li tp,0
auipc sp,0x2
addi sp,sp,-1104 # begin_signature
lui ra,0x2
addi ra,ra,563 # _start-0x7fffddcd
sh ra,0(sp)
lh a4,0(sp)
lui t2,0x2
addi t2,t2,563 # _start-0x7fffddcd
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_18+0x8
test_19:
li gp,19
li tp,0
auipc sp,0x2
addi sp,sp,-1160 # begin_signature
lui ra,0x1
addi ra,ra,547 # _start-0x7fffeddd
nop
sh ra,2(sp)
lh a4,2(sp)
lui t2,0x1
addi t2,t2,547 # _start-0x7fffeddd
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_19+0x8
test_20:
li gp,20
li tp,0
auipc sp,0x2
addi sp,sp,-1220 # begin_signature
lui ra,0x1
addi ra,ra,290 # _start-0x7fffeede
nop
nop
sh ra,4(sp)
lh a4,4(sp)
lui t2,0x1
addi t2,t2,290 # _start-0x7fffeede
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
test_21:
li gp,21
li tp,0
auipc sp,0x2
addi sp,sp,-1284 # begin_signature
nop
li ra,274
sh ra,6(sp)
lh a4,6(sp)
li t2,274
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_21+0x8
test_22:
li gp,22
li tp,0
auipc sp,0x2
addi sp,sp,-1336 # begin_signature
nop
li ra,17
nop
sh ra,8(sp)
lh a4,8(sp)
li t2,17
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_22+0x8
test_23:
li gp,23
li tp,0
auipc sp,0x2
addi sp,sp,-1392 # begin_signature
nop
nop
lui ra,0x3
addi ra,ra,1 # _start-0x7fffcfff
sh ra,10(sp)
lh a4,10(sp)
lui t2,0x3
addi t2,t2,1 # _start-0x7fffcfff
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_23+0x8
lui a0,0xc
addi a0,a0,-273 # _start-0x7fff4111
auipc a1,0x2
addi a1,a1,-1456 # begin_signature
sh a0,6(a1)
bne zero,gp,pass
.data
begin_signature:
.half 0xbeef
tdat2:
.half 0xbeef
tdat3:
.half 0xbeef
tdat4:
.half 0xbeef
tdat5:
.half 0xbeef
tdat6:
.half 0xbeef
tdat7:
.half 0xbeef
tdat8:
.half 0xbeef
tdat9:
.half 0xbeef
tdat10:
.half 0xbeef
|
abmfy/cod23-grp04
| 6,142
|
asm/rv32i/sra.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
lui ra,0x80000
li sp,0
sra a4,ra,sp
lui t2,0x80000
bne a4,t2,fail
test_3:
li gp,3
lui ra,0x80000
li sp,1
sra a4,ra,sp
lui t2,0xc0000
bne a4,t2,fail
test_4:
li gp,4
lui ra,0x80000
li sp,7
sra a4,ra,sp
lui t2,0xff000
bne a4,t2,fail
test_5:
li gp,5
lui ra,0x80000
li sp,14
sra a4,ra,sp
lui t2,0xfffe0
bne a4,t2,fail
test_6:
li gp,6
lui ra,0x80000
addi ra,ra,1 # _end+0xffffe001
li sp,31
sra a4,ra,sp
li t2,-1
bne a4,t2,fail
test_7:
li gp,7
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
li sp,0
sra a4,ra,sp
lui t2,0x80000
addi t2,t2,-1 # _end+0xffffdfff
bne a4,t2,fail
test_8:
li gp,8
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
li sp,1
sra a4,ra,sp
lui t2,0x40000
addi t2,t2,-1 # _start-0x40000001
bne a4,t2,fail
test_9:
li gp,9
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
li sp,7
sra a4,ra,sp
lui t2,0x1000
addi t2,t2,-1 # _start-0x7f000001
bne a4,t2,fail
test_10:
li gp,10
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
li sp,14
sra a4,ra,sp
lui t2,0x20
addi t2,t2,-1 # _start-0x7ffe0001
bne a4,t2,fail
test_11:
li gp,11
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
li sp,31
sra a4,ra,sp
li t2,0
bne a4,t2,fail
test_12:
li gp,12
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
li sp,0
sra a4,ra,sp
lui t2,0x81818
addi t2,t2,385 # _end+0x1816181
bne a4,t2,fail
test_13:
li gp,13
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
li sp,1
sra a4,ra,sp
lui t2,0xc0c0c
addi t2,t2,192 # _end+0x40c0a0c0
bne a4,t2,fail
test_14:
li gp,14
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
li sp,7
sra a4,ra,sp
lui t2,0xff030
addi t2,t2,771 # _end+0x7f02e303
bne a4,t2,fail
test_15:
li gp,15
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
li sp,14
sra a4,ra,sp
lui t2,0xfffe0
addi t2,t2,1542 # _end+0x7ffde606
bne a4,t2,fail
test_16:
li gp,16
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
li sp,31
sra a4,ra,sp
li t2,-1
bne a4,t2,fail
test_17:
li gp,17
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
li sp,-64
sra a4,ra,sp
lui t2,0x81818
addi t2,t2,385 # _end+0x1816181
bne a4,t2,fail
test_18:
li gp,18
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
li sp,-63
sra a4,ra,sp
lui t2,0xc0c0c
addi t2,t2,192 # _end+0x40c0a0c0
bne a4,t2,fail
test_19:
li gp,19
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
li sp,-57
sra a4,ra,sp
lui t2,0xff030
addi t2,t2,771 # _end+0x7f02e303
bne a4,t2,fail
test_20:
li gp,20
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
li sp,-50
sra a4,ra,sp
lui t2,0xfffe0
addi t2,t2,1542 # _end+0x7ffde606
bne a4,t2,fail
test_21:
li gp,21
lui ra,0x81818
addi ra,ra,385 # _end+0x1816181
li sp,-1
sra a4,ra,sp
li t2,-1
bne a4,t2,fail
test_22:
li gp,22
lui ra,0x80000
li sp,7
sra ra,ra,sp
lui t2,0xff000
bne ra,t2,fail
test_23:
li gp,23
lui ra,0x80000
li sp,14
sra sp,ra,sp
lui t2,0xfffe0
bne sp,t2,fail
test_24:
li gp,24
li ra,7
sra ra,ra,ra
li t2,0
bne ra,t2,fail
test_25:
li gp,25
li tp,0
lui ra,0x80000
li sp,7
sra a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_25+0x8
lui t2,0xff000
bne t1,t2,fail
test_26:
li gp,26
li tp,0
lui ra,0x80000
li sp,14
sra a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_26+0x8
lui t2,0xfffe0
bne t1,t2,fail
test_27:
li gp,27
li tp,0
lui ra,0x80000
li sp,31
sra a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_27+0x8
li t2,-1
bne t1,t2,fail
test_28:
li gp,28
li tp,0
lui ra,0x80000
li sp,7
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_28+0x8
lui t2,0xff000
bne a4,t2,fail
test_29:
li gp,29
li tp,0
lui ra,0x80000
li sp,14
nop
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_29+0x8
lui t2,0xfffe0
bne a4,t2,fail
test_30:
li gp,30
li tp,0
lui ra,0x80000
li sp,31
nop
nop
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_30+0x8
li t2,-1
bne a4,t2,fail
test_31:
li gp,31
li tp,0
lui ra,0x80000
nop
li sp,7
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_31+0x8
lui t2,0xff000
bne a4,t2,fail
test_32:
li gp,32
li tp,0
lui ra,0x80000
nop
li sp,14
nop
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_32+0x8
lui t2,0xfffe0
bne a4,t2,fail
test_33:
li gp,33
li tp,0
lui ra,0x80000
nop
nop
li sp,31
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_33+0x8
li t2,-1
bne a4,t2,fail
test_34:
li gp,34
li tp,0
li sp,7
lui ra,0x80000
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_34+0x8
lui t2,0xff000
bne a4,t2,fail
test_35:
li gp,35
li tp,0
li sp,14
lui ra,0x80000
nop
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_35+0x8
lui t2,0xfffe0
bne a4,t2,fail
test_36:
li gp,36
li tp,0
li sp,31
lui ra,0x80000
nop
nop
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_36+0x8
li t2,-1
bne a4,t2,fail
test_37:
li gp,37
li tp,0
li sp,7
nop
lui ra,0x80000
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_37+0x8
lui t2,0xff000
bne a4,t2,fail
test_38:
li gp,38
li tp,0
li sp,14
nop
lui ra,0x80000
nop
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_38+0x8
lui t2,0xfffe0
bne a4,t2,fail
test_39:
li gp,39
li tp,0
li sp,31
nop
nop
lui ra,0x80000
sra a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_39+0x8
li t2,-1
bne a4,t2,fail
test_40:
li gp,40
li ra,15
sra sp,zero,ra
li t2,0
bne sp,t2,fail
test_41:
li gp,41
li ra,32
sra sp,ra,zero
li t2,32
bne sp,t2,fail
test_42:
li gp,42
sra ra,zero,zero
li t2,0
bne ra,t2,fail
test_43:
li gp,43
li ra,1024
lui sp,0x1
addi sp,sp,-2048 # _start-0x7ffff800
sra zero,ra,sp
li t2,0
bne zero,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 4,166
|
asm/rv32i/bgeu.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,0
li sp,0
bgeu ra,sp,test_2+0x18
bne zero,gp,fail
bne zero,gp,test_3
bgeu ra,sp,test_2+0x14
bne zero,gp,fail
test_3:
li gp,3
li ra,1
li sp,1
bgeu ra,sp,test_3+0x18
bne zero,gp,fail
bne zero,gp,test_4
bgeu ra,sp,test_3+0x14
bne zero,gp,fail
test_4:
li gp,4
li ra,-1
li sp,-1
bgeu ra,sp,test_4+0x18
bne zero,gp,fail
bne zero,gp,test_5
bgeu ra,sp,test_4+0x14
bne zero,gp,fail
test_5:
li gp,5
li ra,1
li sp,0
bgeu ra,sp,test_5+0x18
bne zero,gp,fail
bne zero,gp,test_6
bgeu ra,sp,test_5+0x14
bne zero,gp,fail
test_6:
li gp,6
li ra,-1
li sp,-2
bgeu ra,sp,test_6+0x18
bne zero,gp,fail
bne zero,gp,test_7
bgeu ra,sp,test_6+0x14
bne zero,gp,fail
test_7:
li gp,7
li ra,-1
li sp,0
bgeu ra,sp,test_7+0x18
bne zero,gp,fail
bne zero,gp,test_8
bgeu ra,sp,test_7+0x14
bne zero,gp,fail
test_8:
li gp,8
li ra,0
li sp,1
bgeu ra,sp,test_8+0x14
bne zero,gp,test_8+0x18
bne zero,gp,fail
bgeu ra,sp,test_8+0x14
test_9:
li gp,9
li ra,-2
li sp,-1
bgeu ra,sp,test_9+0x14
bne zero,gp,test_9+0x18
bne zero,gp,fail
bgeu ra,sp,test_9+0x14
test_10:
li gp,10
li ra,0
li sp,-1
bgeu ra,sp,test_10+0x14
bne zero,gp,test_10+0x18
bne zero,gp,fail
bgeu ra,sp,test_10+0x14
test_11:
li gp,11
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
lui sp,0x80000
bgeu ra,sp,test_11+0x18
bne zero,gp,test_11+0x1c
bne zero,gp,fail
bgeu ra,sp,test_11+0x18
test_12:
li gp,12
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fffdfff
lui sp,0xf0000
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_12+0x8
test_13:
li gp,13
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fffdfff
lui sp,0xf0000
nop
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_13+0x8
test_14:
li gp,14
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fffdfff
lui sp,0xf0000
nop
nop
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_14+0x8
test_15:
li gp,15
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fffdfff
nop
lui sp,0xf0000
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_15+0x8
test_16:
li gp,16
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fffdfff
nop
lui sp,0xf0000
nop
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_16+0x8
test_17:
li gp,17
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fffdfff
nop
nop
lui sp,0xf0000
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_17+0x8
test_18:
li gp,18
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fffdfff
lui sp,0xf0000
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_18+0x8
test_19:
li gp,19
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fffdfff
lui sp,0xf0000
nop
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_19+0x8
test_20:
li gp,20
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fffdfff
lui sp,0xf0000
nop
nop
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
test_21:
li gp,21
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fffdfff
nop
lui sp,0xf0000
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_21+0x8
test_22:
li gp,22
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fffdfff
nop
lui sp,0xf0000
nop
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_22+0x8
test_23:
li gp,23
li tp,0
lui ra,0xf0000
addi ra,ra,-1 # _end+0x6fffdfff
nop
nop
lui sp,0xf0000
bgeu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_23+0x8
test_24:
li gp,24
li ra,1
bgeu ra,zero,test_24+0x1c
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
li t2,3
bne ra,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 5,912
|
asm/rv32i/srl.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
lui ra,0x80000
li sp,0
srl a4,ra,sp
lui t2,0x80000
bne a4,t2,fail
test_3:
li gp,3
lui ra,0x80000
li sp,1
srl a4,ra,sp
lui t2,0x40000
bne a4,t2,fail
test_4:
li gp,4
lui ra,0x80000
li sp,7
srl a4,ra,sp
lui t2,0x1000
bne a4,t2,fail
test_5:
li gp,5
lui ra,0x80000
li sp,14
srl a4,ra,sp
lui t2,0x20
bne a4,t2,fail
test_6:
li gp,6
lui ra,0x80000
addi ra,ra,1 # _end+0xffffe001
li sp,31
srl a4,ra,sp
li t2,1
bne a4,t2,fail
test_7:
li gp,7
li ra,-1
li sp,0
srl a4,ra,sp
li t2,-1
bne a4,t2,fail
test_8:
li gp,8
li ra,-1
li sp,1
srl a4,ra,sp
lui t2,0x80000
addi t2,t2,-1 # _end+0xffffdfff
bne a4,t2,fail
test_9:
li gp,9
li ra,-1
li sp,7
srl a4,ra,sp
lui t2,0x2000
addi t2,t2,-1 # _start-0x7e000001
bne a4,t2,fail
test_10:
li gp,10
li ra,-1
li sp,14
srl a4,ra,sp
lui t2,0x40
addi t2,t2,-1 # _start-0x7ffc0001
bne a4,t2,fail
test_11:
li gp,11
li ra,-1
li sp,31
srl a4,ra,sp
li t2,1
bne a4,t2,fail
test_12:
li gp,12
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,0
srl a4,ra,sp
lui t2,0x21212
addi t2,t2,289 # _start-0x5edededf
bne a4,t2,fail
test_13:
li gp,13
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,1
srl a4,ra,sp
lui t2,0x10909
addi t2,t2,144 # _start-0x6f6f6f70
bne a4,t2,fail
test_14:
li gp,14
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,7
srl a4,ra,sp
lui t2,0x424
addi t2,t2,578 # _start-0x7fbdbdbe
bne a4,t2,fail
test_15:
li gp,15
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,14
srl a4,ra,sp
lui t2,0x8
addi t2,t2,1156 # _start-0x7fff7b7c
bne a4,t2,fail
test_16:
li gp,16
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,31
srl a4,ra,sp
li t2,0
bne a4,t2,fail
test_17:
li gp,17
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-64
srl a4,ra,sp
lui t2,0x21212
addi t2,t2,289 # _start-0x5edededf
bne a4,t2,fail
test_18:
li gp,18
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-63
srl a4,ra,sp
lui t2,0x10909
addi t2,t2,144 # _start-0x6f6f6f70
bne a4,t2,fail
test_19:
li gp,19
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-57
srl a4,ra,sp
lui t2,0x424
addi t2,t2,578 # _start-0x7fbdbdbe
bne a4,t2,fail
test_20:
li gp,20
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-50
srl a4,ra,sp
lui t2,0x8
addi t2,t2,1156 # _start-0x7fff7b7c
bne a4,t2,fail
test_21:
li gp,21
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-1
srl a4,ra,sp
li t2,0
bne a4,t2,fail
test_22:
li gp,22
lui ra,0x80000
li sp,7
srl ra,ra,sp
lui t2,0x1000
bne ra,t2,fail
test_23:
li gp,23
lui ra,0x80000
li sp,14
srl sp,ra,sp
lui t2,0x20
bne sp,t2,fail
test_24:
li gp,24
li ra,7
srl ra,ra,ra
li t2,0
bne ra,t2,fail
test_25:
li gp,25
li tp,0
lui ra,0x80000
li sp,7
srl a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_25+0x8
lui t2,0x1000
bne t1,t2,fail
test_26:
li gp,26
li tp,0
lui ra,0x80000
li sp,14
srl a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_26+0x8
lui t2,0x20
bne t1,t2,fail
test_27:
li gp,27
li tp,0
lui ra,0x80000
li sp,31
srl a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_27+0x8
li t2,1
bne t1,t2,fail
test_28:
li gp,28
li tp,0
lui ra,0x80000
li sp,7
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_28+0x8
lui t2,0x1000
bne a4,t2,fail
test_29:
li gp,29
li tp,0
lui ra,0x80000
li sp,14
nop
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_29+0x8
lui t2,0x20
bne a4,t2,fail
test_30:
li gp,30
li tp,0
lui ra,0x80000
li sp,31
nop
nop
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_30+0x8
li t2,1
bne a4,t2,fail
test_31:
li gp,31
li tp,0
lui ra,0x80000
nop
li sp,7
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_31+0x8
lui t2,0x1000
bne a4,t2,fail
test_32:
li gp,32
li tp,0
lui ra,0x80000
nop
li sp,14
nop
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_32+0x8
lui t2,0x20
bne a4,t2,fail
test_33:
li gp,33
li tp,0
lui ra,0x80000
nop
nop
li sp,31
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_33+0x8
li t2,1
bne a4,t2,fail
test_34:
li gp,34
li tp,0
li sp,7
lui ra,0x80000
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_34+0x8
lui t2,0x1000
bne a4,t2,fail
test_35:
li gp,35
li tp,0
li sp,14
lui ra,0x80000
nop
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_35+0x8
lui t2,0x20
bne a4,t2,fail
test_36:
li gp,36
li tp,0
li sp,31
lui ra,0x80000
nop
nop
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_36+0x8
li t2,1
bne a4,t2,fail
test_37:
li gp,37
li tp,0
li sp,7
nop
lui ra,0x80000
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_37+0x8
lui t2,0x1000
bne a4,t2,fail
test_38:
li gp,38
li tp,0
li sp,14
nop
lui ra,0x80000
nop
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_38+0x8
lui t2,0x20
bne a4,t2,fail
test_39:
li gp,39
li tp,0
li sp,31
nop
nop
lui ra,0x80000
srl a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_39+0x8
li t2,1
bne a4,t2,fail
test_40:
li gp,40
li ra,15
srl sp,zero,ra
li t2,0
bne sp,t2,fail
test_41:
li gp,41
li ra,32
srl sp,ra,zero
li t2,32
bne sp,t2,fail
test_42:
li gp,42
srl ra,zero,zero
li t2,0
bne ra,t2,fail
test_43:
li gp,43
li ra,1024
lui sp,0x1
addi sp,sp,-2048 # _start-0x7ffff800
srl zero,ra,sp
li t2,0
bne zero,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 3,506
|
asm/rv32i/lh.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li a5,255
auipc ra,0x2
addi ra,ra,-208 # begin_signature
lh a4,0(ra)
li t2,255
bne a4,t2,fail
test_3:
li gp,3
li a5,-256
auipc ra,0x2
addi ra,ra,-236 # begin_signature
lh a4,2(ra)
li t2,-256
bne a4,t2,fail
test_4:
li gp,4
lui a5,0x1
addi a5,a5,-16 # _start-0x7ffff010
auipc ra,0x2
addi ra,ra,-268 # begin_signature
lh a4,4(ra)
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
bne a4,t2,fail
test_5:
li gp,5
lui a5,0xfffff
addi a5,a5,15 # _end+0x7fffcfff
auipc ra,0x2
addi ra,ra,-304 # begin_signature
lh a4,6(ra)
lui t2,0xfffff
addi t2,t2,15 # _end+0x7fffcfff
bne a4,t2,fail
test_6:
li gp,6
li a5,255
auipc ra,0x2
addi ra,ra,-330 # tdat4
lh a4,-6(ra)
li t2,255
bne a4,t2,fail
test_7:
li gp,7
li a5,-256
auipc ra,0x2
addi ra,ra,-358 # tdat4
lh a4,-4(ra)
li t2,-256
bne a4,t2,fail
test_8:
li gp,8
lui a5,0x1
addi a5,a5,-16 # _start-0x7ffff010
auipc ra,0x2
addi ra,ra,-390 # tdat4
lh a4,-2(ra)
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
bne a4,t2,fail
test_9:
li gp,9
lui a5,0xfffff
addi a5,a5,15 # _end+0x7fffcfff
auipc ra,0x2
addi ra,ra,-426 # tdat4
lh a4,0(ra)
lui t2,0xfffff
addi t2,t2,15 # _end+0x7fffcfff
bne a4,t2,fail
test_10:
li gp,10
auipc ra,0x2
addi ra,ra,-460 # begin_signature
addi ra,ra,-32
lh t0,32(ra)
li t2,255
bne t0,t2,fail
test_11:
li gp,11
auipc ra,0x2
addi ra,ra,-488 # begin_signature
addi ra,ra,-5
lh t0,7(ra)
li t2,-256
bne t0,t2,fail
test_12:
li gp,12
li tp,0
auipc ra,0x2
addi ra,ra,-518 # tdat2
lh a4,2(ra)
mv t1,a4
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_12+0x8
test_13:
li gp,13
li tp,0
auipc ra,0x2
addi ra,ra,-564 # tdat3
lh a4,2(ra)
nop
mv t1,a4
lui t2,0xfffff
addi t2,t2,15 # _end+0x7fffcfff
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_13+0x8
test_14:
li gp,14
li tp,0
auipc ra,0x2
addi ra,ra,-620 # begin_signature
lh a4,2(ra)
nop
nop
mv t1,a4
li t2,-256
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_14+0x8
test_15:
li gp,15
li tp,0
auipc ra,0x2
addi ra,ra,-670 # tdat2
lh a4,2(ra)
lui t2,0x1
addi t2,t2,-16 # _start-0x7ffff010
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_15+0x8
test_16:
li gp,16
li tp,0
auipc ra,0x2
addi ra,ra,-712 # tdat3
nop
lh a4,2(ra)
lui t2,0xfffff
addi t2,t2,15 # _end+0x7fffcfff
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_16+0x8
test_17:
li gp,17
li tp,0
auipc ra,0x2
addi ra,ra,-764 # begin_signature
nop
nop
lh a4,2(ra)
li t2,-256
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_17+0x8
test_18:
li gp,18
auipc t0,0x2
addi t0,t0,-808 # begin_signature
lh sp,0(t0)
li sp,2
li t2,2
bne sp,t2,fail
test_19:
li gp,19
auipc t0,0x2
addi t0,t0,-836 # begin_signature
lh sp,0(t0)
nop
li sp,2
li t2,2
bne sp,t2,fail
bne zero,gp,pass
.data
begin_signature:
.half 0x00ff
tdat2:
.half 0xff00
tdat3:
.half 0x0ff0
tdat4:
.half 0xf00f
|
abmfy/cod23-grp04
| 3,051
|
asm/rv32i/lbu.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li a5,255
auipc ra,0x2
addi ra,ra,-208 # begin_signature
lbu a4,0(ra)
li t2,255
bne a4,t2,fail
test_3:
li gp,3
li a5,0
auipc ra,0x2
addi ra,ra,-236 # begin_signature
lbu a4,1(ra)
li t2,0
bne a4,t2,fail
test_4:
li gp,4
li a5,240
auipc ra,0x2
addi ra,ra,-264 # begin_signature
lbu a4,2(ra)
li t2,240
bne a4,t2,fail
test_5:
li gp,5
li a5,15
auipc ra,0x2
addi ra,ra,-292 # begin_signature
lbu a4,3(ra)
li t2,15
bne a4,t2,fail
test_6:
li gp,6
li a5,255
auipc ra,0x2
addi ra,ra,-317 # tdat4
lbu a4,-3(ra)
li t2,255
bne a4,t2,fail
test_7:
li gp,7
li a5,0
auipc ra,0x2
addi ra,ra,-345 # tdat4
lbu a4,-2(ra)
li t2,0
bne a4,t2,fail
test_8:
li gp,8
li a5,240
auipc ra,0x2
addi ra,ra,-373 # tdat4
lbu a4,-1(ra)
li t2,240
bne a4,t2,fail
test_9:
li gp,9
li a5,15
auipc ra,0x2
addi ra,ra,-401 # tdat4
lbu a4,0(ra)
li t2,15
bne a4,t2,fail
test_10:
li gp,10
auipc ra,0x2
addi ra,ra,-428 # begin_signature
addi ra,ra,-32
lbu t0,32(ra)
li t2,255
bne t0,t2,fail
test_11:
li gp,11
auipc ra,0x2
addi ra,ra,-456 # begin_signature
addi ra,ra,-6
lbu t0,7(ra)
li t2,0
bne t0,t2,fail
test_12:
li gp,12
li tp,0
auipc ra,0x2
addi ra,ra,-487 # tdat2
lbu a4,1(ra)
mv t1,a4
li t2,240
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_12+0x8
test_13:
li gp,13
li tp,0
auipc ra,0x2
addi ra,ra,-530 # tdat3
lbu a4,1(ra)
nop
mv t1,a4
li t2,15
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_13+0x8
test_14:
li gp,14
li tp,0
auipc ra,0x2
addi ra,ra,-580 # begin_signature
lbu a4,1(ra)
nop
nop
mv t1,a4
li t2,0
bne t1,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_14+0x8
test_15:
li gp,15
li tp,0
auipc ra,0x2
addi ra,ra,-631 # tdat2
lbu a4,1(ra)
li t2,240
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_15+0x8
test_16:
li gp,16
li tp,0
auipc ra,0x2
addi ra,ra,-670 # tdat3
nop
lbu a4,1(ra)
li t2,15
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_16+0x8
test_17:
li gp,17
li tp,0
auipc ra,0x2
addi ra,ra,-716 # begin_signature
nop
nop
lbu a4,1(ra)
li t2,0
bne a4,t2,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_17+0x8
test_18:
li gp,18
auipc t0,0x2
addi t0,t0,-760 # begin_signature
lbu sp,0(t0)
li sp,2
li t2,2
bne sp,t2,fail
test_19:
li gp,19
auipc t0,0x2
addi t0,t0,-788 # begin_signature
lbu sp,0(t0)
nop
li sp,2
li t2,2
bne sp,t2,fail
bne zero,gp,pass
.data
begin_signature:
.byte 0xff
tdat2:
.byte 0x00
tdat3:
.byte 0xf0
tdat4:
.byte 0x0f
|
abmfy/cod23-grp04
| 4,683
|
asm/rv32i/sltu.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,0
li sp,0
sltu a4,ra,sp
li t2,0
bne a4,t2,fail
test_3:
li gp,3
li ra,1
li sp,1
sltu a4,ra,sp
li t2,0
bne a4,t2,fail
test_4:
li gp,4
li ra,3
li sp,7
sltu a4,ra,sp
li t2,1
bne a4,t2,fail
test_5:
li gp,5
li ra,7
li sp,3
sltu a4,ra,sp
li t2,0
bne a4,t2,fail
test_6:
li gp,6
li ra,0
lui sp,0xffff8
sltu a4,ra,sp
li t2,1
bne a4,t2,fail
test_7:
li gp,7
lui ra,0x80000
li sp,0
sltu a4,ra,sp
li t2,0
bne a4,t2,fail
test_8:
li gp,8
lui ra,0x80000
lui sp,0xffff8
sltu a4,ra,sp
li t2,1
bne a4,t2,fail
test_9:
li gp,9
li ra,0
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
sltu a4,ra,sp
li t2,1
bne a4,t2,fail
test_10:
li gp,10
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
li sp,0
sltu a4,ra,sp
li t2,0
bne a4,t2,fail
test_11:
li gp,11
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
sltu a4,ra,sp
li t2,0
bne a4,t2,fail
test_12:
li gp,12
lui ra,0x80000
lui sp,0x8
addi sp,sp,-1 # _start-0x7fff8001
sltu a4,ra,sp
li t2,0
bne a4,t2,fail
test_13:
li gp,13
lui ra,0x80000
addi ra,ra,-1 # _end+0xffffdfff
lui sp,0xffff8
sltu a4,ra,sp
li t2,1
bne a4,t2,fail
test_14:
li gp,14
li ra,0
li sp,-1
sltu a4,ra,sp
li t2,1
bne a4,t2,fail
test_15:
li gp,15
li ra,-1
li sp,1
sltu a4,ra,sp
li t2,0
bne a4,t2,fail
test_16:
li gp,16
li ra,-1
li sp,-1
sltu a4,ra,sp
li t2,0
bne a4,t2,fail
test_17:
li gp,17
li ra,14
li sp,13
sltu ra,ra,sp
li t2,0
bne ra,t2,fail
test_18:
li gp,18
li ra,11
li sp,13
sltu sp,ra,sp
li t2,1
bne sp,t2,fail
test_19:
li gp,19
li ra,13
sltu ra,ra,ra
li t2,0
bne ra,t2,fail
test_20:
li gp,20
li tp,0
li ra,11
li sp,13
sltu a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
li t2,1
bne t1,t2,fail
test_21:
li gp,21
li tp,0
li ra,14
li sp,13
sltu a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_21+0x8
li t2,0
bne t1,t2,fail
test_22:
li gp,22
li tp,0
li ra,12
li sp,13
sltu a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_22+0x8
li t2,1
bne t1,t2,fail
test_23:
li gp,23
li tp,0
li ra,14
li sp,13
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_23+0x8
li t2,0
bne a4,t2,fail
test_24:
li gp,24
li tp,0
li ra,11
li sp,13
nop
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_24+0x8
li t2,1
bne a4,t2,fail
test_25:
li gp,25
li tp,0
li ra,15
li sp,13
nop
nop
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_25+0x8
li t2,0
bne a4,t2,fail
test_26:
li gp,26
li tp,0
li ra,10
nop
li sp,13
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_26+0x8
li t2,1
bne a4,t2,fail
test_27:
li gp,27
li tp,0
li ra,16
nop
li sp,13
nop
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_27+0x8
li t2,0
bne a4,t2,fail
test_28:
li gp,28
li tp,0
li ra,9
nop
nop
li sp,13
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_28+0x8
li t2,1
bne a4,t2,fail
test_29:
li gp,29
li tp,0
li sp,13
li ra,17
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_29+0x8
li t2,0
bne a4,t2,fail
test_30:
li gp,30
li tp,0
li sp,13
li ra,8
nop
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_30+0x8
li t2,1
bne a4,t2,fail
test_31:
li gp,31
li tp,0
li sp,13
li ra,18
nop
nop
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_31+0x8
li t2,0
bne a4,t2,fail
test_32:
li gp,32
li tp,0
li sp,13
nop
li ra,7
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_32+0x8
li t2,1
bne a4,t2,fail
test_33:
li gp,33
li tp,0
li sp,13
nop
li ra,19
nop
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_33+0x8
li t2,0
bne a4,t2,fail
test_34:
li gp,34
li tp,0
li sp,13
nop
nop
li ra,6
sltu a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_34+0x8
li t2,1
bne a4,t2,fail
test_35:
li gp,35
li ra,-1
snez sp,ra
li t2,1
bne sp,t2,fail
test_36:
li gp,36
li ra,-1
sltu sp,ra,zero
li t2,0
bne sp,t2,fail
test_37:
li gp,37
snez ra,zero
li t2,0
bne ra,t2,fail
test_38:
li gp,38
li ra,16
li sp,30
sltu zero,ra,sp
li t2,0
bne zero,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 5,451
|
asm/rv32i/sll.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,1
li sp,0
sll a4,ra,sp
li t2,1
bne a4,t2,fail
test_3:
li gp,3
li ra,1
li sp,1
sll a4,ra,sp
li t2,2
bne a4,t2,fail
test_4:
li gp,4
li ra,1
li sp,7
sll a4,ra,sp
li t2,128
bne a4,t2,fail
test_5:
li gp,5
li ra,1
li sp,14
sll a4,ra,sp
lui t2,0x4
bne a4,t2,fail
test_6:
li gp,6
li ra,1
li sp,31
sll a4,ra,sp
lui t2,0x80000
bne a4,t2,fail
test_7:
li gp,7
li ra,-1
li sp,0
sll a4,ra,sp
li t2,-1
bne a4,t2,fail
test_8:
li gp,8
li ra,-1
li sp,1
sll a4,ra,sp
li t2,-2
bne a4,t2,fail
test_9:
li gp,9
li ra,-1
li sp,7
sll a4,ra,sp
li t2,-128
bne a4,t2,fail
test_10:
li gp,10
li ra,-1
li sp,14
sll a4,ra,sp
lui t2,0xffffc
bne a4,t2,fail
test_11:
li gp,11
li ra,-1
li sp,31
sll a4,ra,sp
lui t2,0x80000
bne a4,t2,fail
test_12:
li gp,12
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,0
sll a4,ra,sp
lui t2,0x21212
addi t2,t2,289 # _start-0x5edededf
bne a4,t2,fail
test_13:
li gp,13
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,1
sll a4,ra,sp
lui t2,0x42424
addi t2,t2,578 # _start-0x3dbdbdbe
bne a4,t2,fail
test_14:
li gp,14
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,7
sll a4,ra,sp
lui t2,0x90909
addi t2,t2,128 # _end+0x10907080
bne a4,t2,fail
test_15:
li gp,15
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,14
sll a4,ra,sp
lui t2,0x48484
bne a4,t2,fail
test_16:
li gp,16
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,31
sll a4,ra,sp
lui t2,0x80000
bne a4,t2,fail
test_17:
li gp,17
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-64
sll a4,ra,sp
lui t2,0x21212
addi t2,t2,289 # _start-0x5edededf
bne a4,t2,fail
test_18:
li gp,18
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-63
sll a4,ra,sp
lui t2,0x42424
addi t2,t2,578 # _start-0x3dbdbdbe
bne a4,t2,fail
test_19:
li gp,19
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-57
sll a4,ra,sp
lui t2,0x90909
addi t2,t2,128 # _end+0x10907080
bne a4,t2,fail
test_20:
li gp,20
lui ra,0x21212
addi ra,ra,289 # _start-0x5edededf
li sp,-50
sll a4,ra,sp
lui t2,0x48484
bne a4,t2,fail
test_22:
li gp,22
li ra,1
li sp,7
sll ra,ra,sp
li t2,128
bne ra,t2,fail
test_23:
li gp,23
li ra,1
li sp,14
sll sp,ra,sp
lui t2,0x4
bne sp,t2,fail
test_24:
li gp,24
li ra,3
sll ra,ra,ra
li t2,24
bne ra,t2,fail
test_25:
li gp,25
li tp,0
li ra,1
li sp,7
sll a4,ra,sp
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_25+0x8
li t2,128
bne t1,t2,fail
test_26:
li gp,26
li tp,0
li ra,1
li sp,14
sll a4,ra,sp
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_26+0x8
lui t2,0x4
bne t1,t2,fail
test_27:
li gp,27
li tp,0
li ra,1
li sp,31
sll a4,ra,sp
nop
nop
mv t1,a4
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_27+0x8
lui t2,0x80000
bne t1,t2,fail
test_28:
li gp,28
li tp,0
li ra,1
li sp,7
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_28+0x8
li t2,128
bne a4,t2,fail
test_29:
li gp,29
li tp,0
li ra,1
li sp,14
nop
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_29+0x8
lui t2,0x4
bne a4,t2,fail
test_30:
li gp,30
li tp,0
li ra,1
li sp,31
nop
nop
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_30+0x8
lui t2,0x80000
bne a4,t2,fail
test_31:
li gp,31
li tp,0
li ra,1
nop
li sp,7
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_31+0x8
li t2,128
bne a4,t2,fail
test_32:
li gp,32
li tp,0
li ra,1
nop
li sp,14
nop
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_32+0x8
lui t2,0x4
bne a4,t2,fail
test_33:
li gp,33
li tp,0
li ra,1
nop
nop
li sp,31
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_33+0x8
lui t2,0x80000
bne a4,t2,fail
test_34:
li gp,34
li tp,0
li sp,7
li ra,1
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_34+0x8
li t2,128
bne a4,t2,fail
test_35:
li gp,35
li tp,0
li sp,14
li ra,1
nop
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_35+0x8
lui t2,0x4
bne a4,t2,fail
test_36:
li gp,36
li tp,0
li sp,31
li ra,1
nop
nop
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_36+0x8
lui t2,0x80000
bne a4,t2,fail
test_37:
li gp,37
li tp,0
li sp,7
nop
li ra,1
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_37+0x8
li t2,128
bne a4,t2,fail
test_38:
li gp,38
li tp,0
li sp,14
nop
li ra,1
nop
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_38+0x8
lui t2,0x4
bne a4,t2,fail
test_39:
li gp,39
li tp,0
li sp,31
nop
nop
li ra,1
sll a4,ra,sp
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_39+0x8
lui t2,0x80000
bne a4,t2,fail
test_40:
li gp,40
li ra,15
sll sp,zero,ra
li t2,0
bne sp,t2,fail
test_41:
li gp,41
li ra,32
sll sp,ra,zero
li t2,32
bne sp,t2,fail
test_42:
li gp,42
sll ra,zero,zero
li t2,0
bne ra,t2,fail
test_43:
li gp,43
li ra,1024
lui sp,0x1
addi sp,sp,-2048 # _start-0x7ffff800
sll zero,ra,sp
li t2,0
bne zero,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 3,139
|
asm/rv32i/blt.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,0
li sp,1
blt ra,sp,test_2+0x18
bne zero,gp,fail
bne zero,gp,test_3
blt ra,sp,test_2+0x14
bne zero,gp,fail
test_3:
li gp,3
li ra,-1
li sp,1
blt ra,sp,test_3+0x18
bne zero,gp,fail
bne zero,gp,test_4
blt ra,sp,test_3+0x14
bne zero,gp,fail
test_4:
li gp,4
li ra,-2
li sp,-1
blt ra,sp,test_4+0x18
bne zero,gp,fail
bne zero,gp,test_5
blt ra,sp,test_4+0x14
bne zero,gp,fail
test_5:
li gp,5
li ra,1
li sp,0
blt ra,sp,test_5+0x14
bne zero,gp,test_5+0x18
bne zero,gp,fail
blt ra,sp,test_5+0x14
test_6:
li gp,6
li ra,1
li sp,-1
blt ra,sp,test_6+0x14
bne zero,gp,test_6+0x18
bne zero,gp,fail
blt ra,sp,test_6+0x14
test_7:
li gp,7
li ra,-1
li sp,-2
blt ra,sp,test_7+0x14
bne zero,gp,test_7+0x18
bne zero,gp,fail
blt ra,sp,test_7+0x14
test_8:
li gp,8
li ra,1
li sp,-2
blt ra,sp,test_8+0x14
bne zero,gp,test_8+0x18
bne zero,gp,fail
blt ra,sp,test_8+0x14
test_9:
li gp,9
li tp,0
li ra,0
li sp,-1
blt ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_9+0x8
test_10:
li gp,10
li tp,0
li ra,0
li sp,-1
nop
blt ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_10+0x8
test_11:
li gp,11
li tp,0
li ra,0
li sp,-1
nop
nop
blt ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_11+0x8
test_12:
li gp,12
li tp,0
li ra,0
nop
li sp,-1
blt ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_12+0x8
test_13:
li gp,13
li tp,0
li ra,0
nop
li sp,-1
nop
blt ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_13+0x8
test_14:
li gp,14
li tp,0
li ra,0
nop
nop
li sp,-1
blt ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_14+0x8
test_15:
li gp,15
li tp,0
li ra,0
li sp,-1
blt ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_15+0x8
test_16:
li gp,16
li tp,0
li ra,0
li sp,-1
nop
blt ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_16+0x8
test_17:
li gp,17
li tp,0
li ra,0
li sp,-1
nop
nop
blt ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_17+0x8
test_18:
li gp,18
li tp,0
li ra,0
nop
li sp,-1
blt ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_18+0x8
test_19:
li gp,19
li tp,0
li ra,0
nop
li sp,-1
nop
blt ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_19+0x8
test_20:
li gp,20
li tp,0
li ra,0
nop
nop
li sp,-1
blt ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
test_21:
li gp,21
li ra,1
bgtz ra,test_21+0x1c
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
li t2,3
bne ra,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 3,544
|
asm/rv32i/bge.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,0
li sp,0
bge ra,sp,test_2+0x18
bne zero,gp,fail
bne zero,gp,test_3
bge ra,sp,test_2+0x14
bne zero,gp,fail
test_3:
li gp,3
li ra,1
li sp,1
bge ra,sp,test_3+0x18
bne zero,gp,fail
bne zero,gp,test_4
bge ra,sp,test_3+0x14
bne zero,gp,fail
test_4:
li gp,4
li ra,-1
li sp,-1
bge ra,sp,test_4+0x18
bne zero,gp,fail
bne zero,gp,test_5
bge ra,sp,test_4+0x14
bne zero,gp,fail
test_5:
li gp,5
li ra,1
li sp,0
bge ra,sp,test_5+0x18
bne zero,gp,fail
bne zero,gp,test_6
bge ra,sp,test_5+0x14
bne zero,gp,fail
test_6:
li gp,6
li ra,1
li sp,-1
bge ra,sp,test_6+0x18
bne zero,gp,fail
bne zero,gp,test_7
bge ra,sp,test_6+0x14
bne zero,gp,fail
test_7:
li gp,7
li ra,-1
li sp,-2
bge ra,sp,test_7+0x18
bne zero,gp,fail
bne zero,gp,test_8
bge ra,sp,test_7+0x14
bne zero,gp,fail
test_8:
li gp,8
li ra,0
li sp,1
bge ra,sp,test_8+0x14
bne zero,gp,test_8+0x18
bne zero,gp,fail
bge ra,sp,test_8+0x14
test_9:
li gp,9
li ra,-1
li sp,1
bge ra,sp,test_9+0x14
bne zero,gp,test_9+0x18
bne zero,gp,fail
bge ra,sp,test_9+0x14
test_10:
li gp,10
li ra,-2
li sp,-1
bge ra,sp,test_10+0x14
bne zero,gp,test_10+0x18
bne zero,gp,fail
bge ra,sp,test_10+0x14
test_11:
li gp,11
li ra,-2
li sp,1
bge ra,sp,test_11+0x14
bne zero,gp,test_11+0x18
bne zero,gp,fail
bge ra,sp,test_11+0x14
test_12:
li gp,12
li tp,0
li ra,-1
li sp,0
bge ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_12+0x8
test_13:
li gp,13
li tp,0
li ra,-1
li sp,0
nop
bge ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_13+0x8
test_14:
li gp,14
li tp,0
li ra,-1
li sp,0
nop
nop
bge ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_14+0x8
test_15:
li gp,15
li tp,0
li ra,-1
nop
li sp,0
bge ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_15+0x8
test_16:
li gp,16
li tp,0
li ra,-1
nop
li sp,0
nop
bge ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_16+0x8
test_17:
li gp,17
li tp,0
li ra,-1
nop
nop
li sp,0
bge ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_17+0x8
test_18:
li gp,18
li tp,0
li ra,-1
li sp,0
bge ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_18+0x8
test_19:
li gp,19
li tp,0
li ra,-1
li sp,0
nop
bge ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_19+0x8
test_20:
li gp,20
li tp,0
li ra,-1
li sp,0
nop
nop
bge ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
test_21:
li gp,21
li tp,0
li ra,-1
nop
li sp,0
bge ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_21+0x8
test_22:
li gp,22
li tp,0
li ra,-1
nop
li sp,0
nop
bge ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_22+0x8
test_23:
li gp,23
li tp,0
li ra,-1
nop
nop
li sp,0
bge ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_23+0x8
test_24:
li gp,24
li ra,1
bgez ra,test_24+0x1c
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
li t2,3
bne ra,t2,fail
bne zero,gp,pass
|
abmfy/cod23-grp04
| 3,773
|
asm/rv32i/bltu.s
|
_start:
beq x0, x0, reset_vector
loop:
beq x0, x0, loop
fail:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, fail
addi a0, zero, 'F'
sb a0, 0(t0)
beq x0, x0, loop
pass:
lui t0, 0x10000
lb t1, 5(t0)
andi t1, t1, 0x20
beq t1, zero, pass
addi a0, zero, 'P'
sb a0, 0(t0)
beq x0, x0, loop
nop
nop
nop
reset_vector:
li ra,0
li sp,0
li gp,0
li tp,0
li t0,0
li t1,0
li t2,0
li s0,0
li s1,0
li a0,0
li a1,0
li a2,0
li a3,0
li a4,0
li a5,0
li a6,0
li a7,0
li s2,0
li s3,0
li s4,0
li s5,0
li s6,0
li s7,0
li s8,0
li s9,0
li s10,0
li s11,0
li t3,0
li t4,0
li t5,0
li t6,0
test_2:
li gp,2
li ra,0
li sp,1
bltu ra,sp,test_2+0x18
bne zero,gp,fail
bne zero,gp,test_3
bltu ra,sp,test_2+0x14
bne zero,gp,fail
test_3:
li gp,3
li ra,-2
li sp,-1
bltu ra,sp,test_3+0x18
bne zero,gp,fail
bne zero,gp,test_4
bltu ra,sp,test_3+0x14
bne zero,gp,fail
test_4:
li gp,4
li ra,0
li sp,-1
bltu ra,sp,test_4+0x18
bne zero,gp,fail
bne zero,gp,test_5
bltu ra,sp,test_4+0x14
bne zero,gp,fail
test_5:
li gp,5
li ra,1
li sp,0
bltu ra,sp,test_5+0x14
bne zero,gp,test_5+0x18
bne zero,gp,fail
bltu ra,sp,test_5+0x14
test_6:
li gp,6
li ra,-1
li sp,-2
bltu ra,sp,test_6+0x14
bne zero,gp,test_6+0x18
bne zero,gp,fail
bltu ra,sp,test_6+0x14
test_7:
li gp,7
li ra,-1
li sp,0
bltu ra,sp,test_7+0x14
bne zero,gp,test_7+0x18
bne zero,gp,fail
bltu ra,sp,test_7+0x14
test_8:
li gp,8
lui ra,0x80000
lui sp,0x80000
addi sp,sp,-1 # _end+0xffffdfff
bltu ra,sp,test_8+0x18
bne zero,gp,test_8+0x1c
bne zero,gp,fail
bltu ra,sp,test_8+0x18
test_9:
li gp,9
li tp,0
lui ra,0xf0000
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fffdfff
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_9+0x8
test_10:
li gp,10
li tp,0
lui ra,0xf0000
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fffdfff
nop
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_10+0x8
test_11:
li gp,11
li tp,0
lui ra,0xf0000
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fffdfff
nop
nop
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_11+0x8
test_12:
li gp,12
li tp,0
lui ra,0xf0000
nop
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fffdfff
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_12+0x8
test_13:
li gp,13
li tp,0
lui ra,0xf0000
nop
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fffdfff
nop
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_13+0x8
test_14:
li gp,14
li tp,0
lui ra,0xf0000
nop
nop
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fffdfff
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_14+0x8
test_15:
li gp,15
li tp,0
lui ra,0xf0000
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fffdfff
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_15+0x8
test_16:
li gp,16
li tp,0
lui ra,0xf0000
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fffdfff
nop
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_16+0x8
test_17:
li gp,17
li tp,0
lui ra,0xf0000
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fffdfff
nop
nop
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_17+0x8
test_18:
li gp,18
li tp,0
lui ra,0xf0000
nop
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fffdfff
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_18+0x8
test_19:
li gp,19
li tp,0
lui ra,0xf0000
nop
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fffdfff
nop
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_19+0x8
test_20:
li gp,20
li tp,0
lui ra,0xf0000
nop
nop
lui sp,0xf0000
addi sp,sp,-1 # _end+0x6fffdfff
bltu ra,sp,fail
addi tp,tp,1 # _start-0x7fffffff
li t0,2
bne tp,t0,test_20+0x8
test_21:
li gp,21
li ra,1
bltu zero,ra,test_21+0x1c
addi ra,ra,1 # _end+0x6fffe001
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
addi ra,ra,1
li t2,3
bne ra,t2,fail
bne zero,gp,pass
|
abnoname/iceZ0mb1e
| 2,655
|
firmware/crt0/crt0.s
|
;--------------------------------------------------------------------------
; crt0.s - Generic crt0.s for a Z80
;
; Copyright (C) 2000, Michael Hope
; Modified for iceZ0mb1e - FPGA 8-Bit TV80 SoC (C) 2018, Franz Neumann
;
; This library is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License as published by the
; Free Software Foundation; either version 2.1, or (at your option) any
; later version.
;
; This library is distributed in the hope that it will be useful,
; but WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
; GNU General Public License for more details.
;
; You should have received a copy of the GNU General Public License
; along with this library; see the file COPYING. If not, write to the
; Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
; MA 02110-1301, USA.
;
; As a special exception, if you link this library with other files,
; some of which are compiled with SDCC, to produce an executable,
; this library does not by itself cause the resulting executable to
; be covered by the GNU General Public License. This exception does
; not however invalidate any other reasons why the executable file
; might be covered by the GNU General Public License.
;--------------------------------------------------------------------------
stacktop = #0x9FFF
.module crt0
.globl _main
.area _HEADER (ABS)
;; Reset vector
.org 0
jp init
.org 0x08
reti
.org 0x10
reti
.org 0x18
reti
.org 0x20
reti
.org 0x28
reti
.org 0x30
reti
.org 0x38
reti
.org 0x100
init:
ld sp,#stacktop ;; Stack at the top of memory.
;; Initialise global variables
call gsinit
call _main
jp _exit
;; Ordering of segments for the linker.
.area _HOME
.area _CODE
.area _GSINIT
.area _GSFINAL
.area _DATA
.area _BSEG
.area _BSS
.area _HEAP
.area _CODE
__clock::
ld a,#2
rst 0x08
ret
_exit::
;; Exit - special code to the emulator
ld a,#0
rst 0x08
1$:
halt
jr 1$
.area _GSINIT
gsinit::
.area _GSFINAL
ret
|
AbsInt/CompCert
| 5,864
|
runtime/aarch64/vararg.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, Collège de France and INRIA Paris
//
// Copyright (c) Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for variadic functions <stdarg.h>. AArch64 version.
#include "sysdeps.h"
// For the standard ABI:
// struct __va_list {
// void *__stack; // next stack parameter
// void *__gr_top; // top of the save area for int regs
// void *__vr_top; // top of the save area for float regs
// int__gr_offs; // offset from gr_top to next int reg
// int__vr_offs; // offset from gr_top to next FP reg
// }
// typedef struct __va_list va_list; // struct passed by reference
// For the Apple ABI:
// typedef char * va_list; // a single pointer passed by reference
// // points to the next parameter, always on stack
// In both cases:
// unsigned int __compcert_va_int32(va_list * ap);
// unsigned long long __compcert_va_int64(va_list * ap);
// double __compcert_va_float64(va_list * ap);
#ifdef ABI_standard
FUNCTION(__compcert_va_int32)
ldr w1, [x0, #24] // w1 = gr_offs
cbz w1, 1f
// gr_offs is not zero: load from int save area and update gr_offs
ldr x2, [x0, #8] // x2 = gr_top
ldr w2, [x2, w1, sxtw] // w2 = the next integer
add w1, w1, #8
str w1, [x0, #24] // update gr_offs
mov w0, w2
ret
// gr_offs is zero: load from stack save area and update stack pointer
1: ldr x1, [x0, #0] // x1 = stack
ldr w2, [x1, #0] // w2 = the next integer
add x1, x1, #8
str x1, [x0, #0] // update stack
mov w0, w2
ret
ENDFUNCTION(__compcert_va_int32)
FUNCTION(__compcert_va_int64)
ldr w1, [x0, #24] // w1 = gr_offs
cbz w1, 1f
// gr_offs is not zero: load from int save area and update gr_offs
ldr x2, [x0, #8] // x2 = gr_top
ldr x2, [x2, w1, sxtw] // x2 = the next long integer
add w1, w1, #8
str w1, [x0, #24] // update gr_offs
mov x0, x2
ret
// gr_offs is zero: load from stack save area and update stack pointer
1: ldr x1, [x0, #0] // x1 = stack
ldr x2, [x1, #0] // x2 = the next long integer
add x1, x1, #8
str x1, [x0, #0] // update stack
mov x0, x2
ret
ENDFUNCTION(__compcert_va_int64)
FUNCTION(__compcert_va_float64)
ldr w1, [x0, #28] // w1 = vr_offs
cbz w1, 1f
// vr_offs is not zero: load from float save area and update vr_offs
ldr x2, [x0, #16] // x2 = vr_top
ldr d0, [x2, w1, sxtw] // d0 = the next float
add w1, w1, #16
str w1, [x0, #28] // update vr_offs
ret
// gr_offs is zero: load from stack save area and update stack pointer
1: ldr x1, [x0, #0] // x1 = stack
ldr d0, [x1, #0] // d0 = the next float
add x1, x1, #8
str x1, [x0, #0] // update stack
ret
ENDFUNCTION(__compcert_va_float64)
#endif
#ifdef ABI_apple
FUNCTION(__compcert_va_int32)
ldr x1, [x0, #0] // x1 = stack pointer
ldr w2, [x1, #0] // w2 = the next integer
add x1, x1, #8
str x1, [x0, #0] // update stack
mov w0, w2
ret
ENDFUNCTION(__compcert_va_int32)
FUNCTION(__compcert_va_int64)
ldr x1, [x0, #0] // x1 = stack pointer
ldr x2, [x1, #0] // x2 = the next long integer
add x1, x1, #8
str x1, [x0, #0] // update stack
mov x0, x2
ret
ENDFUNCTION(__compcert_va_int64)
FUNCTION(__compcert_va_float64)
ldr x1, [x0, #0] // x1 = stack pointer
ldr d0, [x1, #0] // d0 = the next float
add x1, x1, #8
str x1, [x0, #0] // update stack
ret
ENDFUNCTION(__compcert_va_float64)
#endif
// Right now we pass structs by reference. This is not ABI conformant.
FUNCTION(__compcert_va_composite)
b GLOB(__compcert_va_int64)
ENDFUNCTION(__compcert_va_composite)
|
AbsInt/CompCert
| 2,896
|
runtime/powerpc64/i64_dtou.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC 64 version.
#include "sysdeps.h"
.text
// Conversion from double float to unsigned long
.balign 16
.globl __compcert_i64_dtou
__compcert_i64_dtou:
lis r0, 0x5f00 // 0x5f00_0000 = 2^63 in binary32 format
stwu r0, -16(r1)
lfs f2, 0(r1) // f2 = 2^63
fcmpu cr0, f1, f2 // crbit 0 is f1 < f2
bf 0, 1f // branch if f1 >= 2^63 (or f1 is NaN)
fctidz f1, f1 // convert as signed
stfd f1, 0(r1)
lwz r3, 0(r1)
lwz r4, 4(r1)
addi r1, r1, 16
blr
1: fsub f1, f1, f2 // shift argument down by 2^63
fctidz f1, f1 // convert as signed
stfd f1, 0(r1)
lwz r3, 0(r1)
lwz r4, 4(r1)
addis r3, r3, 0x8000 // shift result up by 2^63
addi r1, r1, 16
blr
.type __compcert_i64_dtou, @function
.size __compcert_i64_dtou, .-__compcert_i64_dtou
|
AbsInt/CompCert
| 3,072
|
runtime/powerpc64/i64_stof.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC 64 version.
#include "sysdeps.h"
.text
// Conversion from signed long to single float
.balign 16
.globl __compcert_i64_stof
__compcert_i64_stof:
rldimi r4, r3, 32, 0 // reassemble (r3,r4) as a 64-bit integer in r4
// Check whether -2^53 <= X < 2^53
sradi r5, r4, 53
addi r5, r5, 1
cmpldi r5, 2
blt 1f
// X is large enough that double rounding can occur.
// Avoid it by nudging X away from the points where double rounding
// occurs (the "round to odd" technique)
rldicl r5, r4, 0, 53 // extract bits 0 to 11 of X
addi r5, r5, 0x7FF // r5 = (X & 0x7FF) + 0x7FF
// bit 12 of r5 is 0 if all low 12 bits of X are 0, 1 otherwise
// bits 13-63 of r5 are 0
or r4, r4, r5 // correct bit number 12 of X
rldicr r4, r4, 0, 52 // set to 0 bits 0 to 11 of X
// Convert to double, then round to single
1: stdu r4, -16(r1)
lfd f1, 0(r1)
fcfid f1, f1
frsp f1, f1
addi r1, r1, 16
blr
.type __compcert_i64_stof, @function
.size __compcert_i64_stof, .-__compcert_i64_stof
|
AbsInt/CompCert
| 3,447
|
runtime/powerpc64/i64_utod.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC 64 version.
#include "sysdeps.h"
.text
// Conversion from unsigned long to double float
.balign 16
.globl __compcert_i64_utod
__compcert_i64_utod:
rldicl r3, r3, 0, 32 // clear top 32 bits
rldicl r4, r4, 0, 32 // clear top 32 bits
lis r5, 0x4f80 // 0x4f80_0000 = 2^32 in binary32 format
stdu r3, -32(r1)
std r4, 8(r1)
stw r5, 16(r1)
lfd f1, 0(r1) // high 32 bits of argument
lfd f2, 8(r1) // low 32 bits of argument
lfs f3, 16(r1) // 2^32
fcfid f1, f1 // convert both 32-bit halves to FP (exactly)
fcfid f2, f2
fmadd f1, f1, f3, f2 // compute hi * 2^32 + lo
addi r1, r1, 32
blr
.type __compcert_i64_utod, @function
.size __compcert_i64_utod, .-__compcert_i64_utod
// Alternate implementation using round-to-odd:
// rldimi r4, r3, 32, 0 # reassemble (r3,r4) as a 64-bit integer in r4
// cmpdi r4, 0 # is r4 >= 2^63 ?
// blt 1f
// stdu r4, -16(r1) # r4 < 2^63: convert as signed
// lfd f1, 0(r1)
// fcfid f1, f1
// addi r1, r1, 16
// blr
//1: rldicl r0, r4, 0, 63 # extract low bit of r4
// srdi r4, r4, 1
// or r4, r4, r0 # round r4 to 63 bits, using round-to-odd
// stdu r4, -16(r1) # convert to binary64
// lfd f1, 0(r1)
// fcfid f1, f1
// fadd f1, f1, f1 # multiply result by 2
// addi r1, r1, 16
// blr
|
AbsInt/CompCert
| 6,886
|
runtime/powerpc64/vararg.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for variadic functions <stdarg.h>. IA32 version
// typedef struct {
// unsigned char ireg; // index of next integer register
// unsigned char freg; // index of next FP register
// char * stk; // pointer to next argument in stack
// struct {
// int iregs[8];
// double fregs[8];
// } * regs; // pointer to saved register area
// } va_list[1];
//
// unsigned int __compcert_va_int32(va_list ap);
// unsigned long long __compcert_va_int64(va_list ap);
// double __compcert_va_float64(va_list ap);
#include "sysdeps.h"
.text
.balign 16
.globl __compcert_va_int32
__compcert_va_int32:
// r3 = ap = address of va_list structure
lbz r4, 0(r3) // r4 = ap->ireg = next integer register
cmplwi r4, 8
bge 1f
// Next argument was passed in an integer register
lwz r5, 8(r3) // r5 = ap->regs = base of saved register area
rlwinm r6, r4, 2, 0, 29 // r6 = r4 * 4
addi r4, r4, 1 // increment ap->ireg
stb r4, 0(r3)
lwzx r3, r5, r6 // load argument in r3
blr
// Next argument was passed on stack
1: lwz r5, 4(r3) // r5 = ap->stk = next argument passed on stack
addi r5, r5, 4 // advance ap->stk by 4
stw r5, 4(r3)
lwz r3, -4(r5) // load argument in r3
blr
.type __compcert_va_int32, @function
.size __compcert_va_int32, .-__compcert_va_int32
.balign 16
.globl __compcert_va_int64
__compcert_va_int64:
// r3 = ap = address of va_list structure
lbz r4, 0(r3) // r4 = ap->ireg = next integer register
cmplwi r4, 7
bge 1f
// Next argument was passed in two consecutive integer register
lwz r5, 8(r3) // r5 = ap->regs = base of saved register area
addi r4, r4, 3 // round r4 up to an even number and add 2
rlwinm r4, r4, 0, 0, 30
rlwinm r6, r4, 2, 0, 29 // r6 = r4 * 4
add r5, r5, r6 // r5 = address of argument + 8
stb r4, 0(r3) // update ap->ireg
lwz r3, -8(r5) // load argument in r3:r4
lwz r4, -4(r5)
blr
// Next argument was passed on stack
1: lwz r5, 4(r3) // r5 = ap->stk = next argument passed on stack
li r4, 8
stb r4, 0(r3) // set ap->ireg = 8 so that no ireg is left
addi r5, r5, 15 // round r5 to a multiple of 8 and add 8
rlwinm r5, r5, 0, 0, 28
stw r5, 4(r3) // update ap->stk
lwz r3, -8(r5) // load argument in r3:r4
lwz r4, -4(r5)
blr
.type __compcert_va_int64, @function
.size __compcert_va_int64, .-__compcert_va_int64
.balign 16
.globl __compcert_va_float64
__compcert_va_float64:
// r3 = ap = address of va_list structure
lbz r4, 1(r3) // r4 = ap->freg = next float register
cmplwi r4, 8
bge 1f
// Next argument was passed in a FP register
lwz r5, 8(r3) // r5 = ap->regs = base of saved register area
rlwinm r6, r4, 3, 0, 28 // r6 = r4 * 8
add r5, r5, r6
lfd f1, 32(r5) // load argument in f1
addi r4, r4, 1 // increment ap->freg
stb r4, 1(r3)
blr
// Next argument was passed on stack
1: lwz r5, 4(r3) // r5 = ap->stk = next argument passed on stack
addi r5, r5, 15 // round r5 to a multiple of 8 and add 8
rlwinm r5, r5, 0, 0, 28
lfd f1, -8(r5) // load argument in f1
stw r5, 4(r3) // update ap->stk
blr
.type __compcert_va_float64, @function
.size __compcert_va_float64, .-__compcert_va_int64
.balign 16
.globl __compcert_va_composite
__compcert_va_composite:
b __compcert_va_int32
.type __compcert_va_composite, @function
.size __compcert_va_composite, .-__compcert_va_composite
// Save integer and FP registers at beginning of vararg function
.balign 16
.globl __compcert_va_saveregs
__compcert_va_saveregs:
lwz r11, 0(r1) // r11 point to top of our frame
stwu r3, -96(r11) // register save area is 96 bytes below
stw r4, 4(r11)
stw r5, 8(r11)
stw r6, 12(r11)
stw r7, 16(r11)
stw r8, 20(r11)
stw r9, 24(r11)
stw r10, 28(r11)
bf 6, 1f // don't save FP regs if CR6 bit is clear
stfd f1, 32(r11)
stfd f2, 40(r11)
stfd f3, 48(r11)
stfd f4, 56(r11)
stfd f5, 64(r11)
stfd f6, 72(r11)
stfd f7, 80(r11)
stfd f8, 88(r11)
1: blr
.type __compcert_va_saveregs, @function
.size __compcert_va_saveregs, .-__compcert_va_saveregs
|
AbsInt/CompCert
| 2,952
|
runtime/powerpc64/i64_utof.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Conversion from unsigned long to single float
.balign 16
.globl __compcert_i64_utof
__compcert_i64_utof:
mflr r9
// Check whether X < 2^53
andis. r0, r3, 0xFFE0 // test bits 53...63 of X
beq 1f
// X is large enough that double rounding can occur.
// Avoid it by nudging X away from the points where double rounding
// occurs (the "round to odd" technique)
rlwinm r5, r4, 0, 21, 31 // extract bits 0 to 11 of X
addi r5, r5, 0x7FF // r5 = (X & 0x7FF) + 0x7FF
// bit 12 of r5 is 0 if all low 12 bits of X are 0, 1 otherwise
// bits 13-31 of r5 are 0
or r4, r4, r5 // correct bit number 12 of X
rlwinm r4, r4, 0, 0, 20 // set to 0 bits 0 to 11 of X
// Convert to double, then round to single
1: bl __compcert_i64_utod
mtlr r9
frsp f1, f1
blr
.type __compcert_i64_utof, @function
.size __compcert_i64_utof, .-__compcert_i64_utof
|
AbsInt/CompCert
| 3,489
|
runtime/riscV/vararg.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
// Prashanth Mundkur, SRI International
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// The contributions by Prashanth Mundkur are reused and adapted
// under the terms of a Contributor License Agreement between
// SRI International and INRIA.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for variadic functions <stdarg.h>. RISC-V version.
#include "sysdeps.h"
// typedef void * va_list;
// unsigned int __compcert_va_int32(va_list * ap);
// unsigned long long __compcert_va_int64(va_list * ap);
// double __compcert_va_float64(va_list * ap);
FUNCTION(__compcert_va_int32)
# a0 = ap parameter
lptr t5, 0(a0) # t5 = pointer to next argument
addi t5, t5, WORDSIZE # advance ap
sptr t5, 0(a0) # update ap
lw a0, -WORDSIZE(t5) # load it and return it in a0
jr ra
ENDFUNCTION(__compcert_va_int32)
FUNCTION(__compcert_va_int64)
# a0 = ap parameter
lptr t5, 0(a0) # t5 = pointer to next argument
addi t5, t5, 15 # 8-align and advance by 8
and t5, t5, -8
sptr t5, 0(a0) # update ap
#ifdef MODEL_64
ld a0, -8(t5) # return it in a0
#else
lw a0, -8(t5) # return it in [a0,a1]
lw a1, -4(t5)
#endif
jr ra
ENDFUNCTION(__compcert_va_int64)
FUNCTION(__compcert_va_float64)
# a0 = ap parameter
lptr t5, 0(a0) # t5 = pointer to next argument
addi t5, t5, 15 # 8-align and advance by 8
and t5, t5, -8
sptr t5, 0(a0) # update ap
fld fa0, -8(t5) # return it in fa0
jr ra
ENDFUNCTION(__compcert_va_float64)
// Right now we pass structs by reference. This is not ABI conformant.
FUNCTION(__compcert_va_composite)
#ifdef MODEL_64
j __compcert_va_int64
#else
j __compcert_va_int32
#endif
ENDFUNCTION(__compcert_va_composite)
|
AbsInt/CompCert
| 2,788
|
runtime/arm/i64_umulh.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris
@
@ Copyright (c) 2016 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Multiply-high unsigned
@ X * Y = 2^64 XH.YH + 2^32 (XH.YL + XL.YH) + XL.YL
FUNCTION(__compcert_i64_umulh)
push {r4, r5, r6, r7}
@@@ r7:r6 accumulate bits 95-32 of the full product
umull r4, r6, Reg0LO, Reg1LO @ r6 = high half of XL.YL product
umull r4, r5, Reg0LO, Reg1HI @ r5:r4 = product XL.YH
adds r6, r6, r4
ADC r7, r5, #0 @ no carry out
umull r4, r5, Reg0HI, Reg1LO @ r5:r4 = product XH.YL
adds r6, r6, r4
adcs r7, r7, r5 @ carry out is possible
@@@ r6:r7 accumulate bits 127-64 of the full product
mov r6, #0
ADC r6, r6, #0 @ put carry out in bits 127-96
umull r4, r5, Reg0HI, Reg1HI @ r5:r4 = product XH.YH
adds Reg0LO, r7, r4
ADC Reg0HI, r6, r5
pop {r4, r5, r6, r7}
bx lr
ENDFUNCTION(__compcert_i64_umulh)
|
AbsInt/CompCert
| 2,108
|
runtime/arm/i64_shr.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Shift right unsigned
FUNCTION(__compcert_i64_shr)
AND r2, r2, #63 @ normalize amount to 0...63
LSHR(Reg0, Reg0, r2, r3)
bx lr
ENDFUNCTION(__compcert_i64_shr)
|
AbsInt/CompCert
| 2,166
|
runtime/arm/i64_umod.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Unsigned remainder
FUNCTION(__compcert_i64_umod)
push {r4, r5, r6, r7, r8, lr}
bl __compcert_i64_udivmod @ remainder is already in r0,r1
pop {r4, r5, r6, r7, r8, lr}
bx lr
ENDFUNCTION(__compcert_i64_umod)
|
AbsInt/CompCert
| 3,392
|
runtime/arm/i64_dtou.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Conversion from double float to unsigned 64-bit integer
FUNCTION(__compcert_i64_dtou)
#ifndef ABI_eabi
vmov Reg0LO, Reg0HI, d0
#endif
cmp Reg0HI, #0 @ is double < 0 ?
blt 1f @ then it converts to 0
@ extract unbiased exponent ((HI & 0x7FF00000) >> 20) - (1023 + 52) in r2
@ note: 1023 + 52 = 1075 = 1024 + 51
@ note: (HI & 0x7FF00000) >> 20 = (HI << 1) >> 21
LSL r2, Reg0HI, #1
LSR r2, r2, #21
SUB r2, r2, #51
SUB r2, r2, #1024
@ check range of exponent
cmn r2, #52 @ if EXP < -52, double is < 1.0
blt 1f
cmp r2, #12 @ if EXP >= 64 - 52, double is >= 2^64
bge 2f
@ extract true mantissa
BIC Reg0HI, Reg0HI, #0xFF000000
BIC Reg0HI, Reg0HI, #0x00F00000 @ HI &= ~0xFFF00000
ORR Reg0HI, Reg0HI, #0x00100000 @ HI |= 0x00100000
@ shift it appropriately
cmp r2, #0
blt 3f
@ EXP >= 0: shift left by EXP. Note that EXP < 12
LSHL_small(Reg0, Reg0, r2, r3)
bx lr
@ EXP < 0: shift right by -EXP. Note that -EXP <= 52 but can be >= 32
3: RSB r2, r2, #0 @ r2 = -EXP = shift amount
LSHR(Reg0, Reg0, r2, r3)
bx lr
@ special cases
1: MOV Reg0LO, #0 @ result is 0
MOV Reg0HI, #0
bx lr
2: mvn Reg0LO, #0 @ result is 0xFF....FF (MAX_UINT)
MOV Reg0HI, Reg0LO
bx lr
ENDFUNCTION(__compcert_i64_dtou)
|
AbsInt/CompCert
| 3,380
|
runtime/arm/i64_stof.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Conversion from signed 64-bit integer to single float
FUNCTION(__compcert_i64_stof)
@ Check whether -2^53 <= X < 2^53
ASR r2, Reg0HI, #21 @ r2 = high 32 bits of X >> 53
@ -2^53 <= X < 2^53 iff r2 is -1 or 0, that is, iff r2 + 1 is 0 or 1
adds r2, r2, #1
cmp r2, #2
blo 1f
@ X is large enough that double rounding can occur.
@ Avoid it by nudging X away from the points where double rounding
@ occurs (the "round to odd" technique)
MOV r2, #0x700
ORR r2, r2, #0xFF @ r2 = 0x7FF
AND r3, Reg0LO, r2 @ extract bits 0 to 11 of X
ADD r3, r3, r2 @ r3 = (X & 0x7FF) + 0x7FF
@ bit 12 of r3 is 0 if all low 12 bits of X are 0, 1 otherwise
@ bits 13-31 of r3 are 0
ORR Reg0LO, Reg0LO, r3 @ correct bit number 12 of X
BIC Reg0LO, Reg0LO, r2 @ set to 0 bits 0 to 11 of X
@ Convert to double
1: vmov s0, Reg0LO
vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
vmov s2, Reg0HI
vcvt.f64.s32 d1, s2 @ convert high half to double (signed)
vldr d2, .LC1 @ d2 = 2^32
vmla.f64 d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
@ Round to single
vcvt.f32.f64 s0, d0
#ifdef ABI_eabi
@ Return result in r0
vmov r0, s0
#endif
bx lr
ENDFUNCTION(__compcert_i64_stof)
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision
|
AbsInt/CompCert
| 2,584
|
runtime/arm/i64_utod.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Conversion from unsigned 64-bit integer to double float
FUNCTION(__compcert_i64_utod)
__compcert_i64_utod:
vmov s0, Reg0LO
vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
vmov s2, Reg0HI
vcvt.f64.u32 d1, s2 @ convert high half to double (unsigned)
vldr d2, .LC1 @ d2 = 2^32
vmla.f64 d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
#ifdef ABI_eabi
vmov Reg0LO, Reg0HI, d0 @ return result in register pair r0:r1
#endif
bx lr
ENDFUNCTION(__compcert_i64_utod)
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision
|
AbsInt/CompCert
| 3,888
|
runtime/arm/vararg.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for variadic functions <stdarg.h>. ARM version
#include "sysdeps.h"
@ typedef void * va_list;
@ unsigned int __compcert_va_int32(va_list * ap);
@ unsigned long long __compcert_va_int64(va_list * ap);
@ double __compcert_va_float64(va_list * ap);
FUNCTION(__compcert_va_int32)
@ r0 = ap parameter
ldr r1, [r0, #0] @ r1 = pointer to next argument
ADD r1, r1, #4 @ advance ap by 4
str r1, [r0, #0]
ldr r0, [r1, #-4] @ load next argument and return it in r0
bx lr
ENDFUNCTION(__compcert_va_int32)
FUNCTION(__compcert_va_int64)
@ r0 = ap parameter
ldr r1, [r0, #0] @ r1 = pointer to next argument
ADD r1, r1, #15 @ 8-align and advance by 8
BIC r1, r1, #7
str r1, [r0, #0] @ update ap
ldr r0, [r1, #-8] @ load next argument and return it in r0,r1
ldr r1, [r1, #-4]
bx lr
ENDFUNCTION(__compcert_va_int64)
FUNCTION(__compcert_va_float64)
@ r0 = ap parameter
ldr r1, [r0, #0] @ r1 = pointer to next argument
ADD r1, r1, #15 @ 8-align and advance by 8
BIC r1, r1, #7
str r1, [r0, #0] @ update ap
#ifdef ABI_eabi
ldr r0, [r1, #-8] @ load next argument and return it in r0,r1
ldr r1, [r1, #-4]
#else
vldr d0, [r1, #-8] @ load next argument and return it in d0
#endif
bx lr
ENDFUNCTION(__compcert_va_float64)
FUNCTION(__compcert_va_composite)
@ r0 = ap parameter
@ r1 = size of the composite, in bytes
ldr r2, [r0, #0] @ r2 = pointer to next argument
ADD r3, r2, r1 @ advance by size
ADD r3, r3, #3 @ 4-align
BIC r3, r3, #3
str r3, [r0, #0] @ update ap
mov r0, r2 @ result is pointer to composite in stack
bx lr
ENDFUNCTION(__compcert_va_composite)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.