repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
abforce/xposed_art_n
| 19,721
|
runtime/arch/quick_alloc_entrypoints.S
|
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.macro GENERATE_ALLOC_ENTRYPOINTS c_suffix, cxx_suffix
// Called by managed code to allocate an object.
TWO_ARG_DOWNCALL art_quick_alloc_object\c_suffix, artAllocObjectFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an object of a resolved class.
TWO_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an object of an initialized class.
TWO_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an object when the caller doesn't know whether it has access
// to the created type.
TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check\c_suffix, artAllocObjectFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an array.
THREE_ARG_DOWNCALL art_quick_alloc_array\c_suffix, artAllocArrayFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an array of a resolve class.
THREE_ARG_DOWNCALL art_quick_alloc_array_resolved\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an array when the caller doesn't know whether it has access
// to the created type.
THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check\c_suffix, artAllocArrayFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
THREE_ARG_DOWNCALL art_quick_check_and_alloc_array\c_suffix, artCheckAndAllocArrayFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check\c_suffix, artCheckAndAllocArrayFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate a string from bytes
FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes\c_suffix, artAllocStringFromBytesFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate a string from chars
THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars\c_suffix, artAllocStringFromCharsFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate a string from string
ONE_ARG_DOWNCALL art_quick_alloc_string_from_string\c_suffix, artAllocStringFromStringFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
.endm
.macro GENERATE_ALL_ALLOC_ENTRYPOINTS
GENERATE_ALLOC_ENTRYPOINTS _dlmalloc, DlMalloc
GENERATE_ALLOC_ENTRYPOINTS _dlmalloc_instrumented, DlMallocInstrumented
GENERATE_ALLOC_ENTRYPOINTS _rosalloc, RosAlloc
GENERATE_ALLOC_ENTRYPOINTS _rosalloc_instrumented, RosAllocInstrumented
GENERATE_ALLOC_ENTRYPOINTS _bump_pointer, BumpPointer
GENERATE_ALLOC_ENTRYPOINTS _bump_pointer_instrumented, BumpPointerInstrumented
GENERATE_ALLOC_ENTRYPOINTS _tlab, TLAB
GENERATE_ALLOC_ENTRYPOINTS _tlab_instrumented, TLABInstrumented
GENERATE_ALLOC_ENTRYPOINTS _region, Region
GENERATE_ALLOC_ENTRYPOINTS _region_instrumented, RegionInstrumented
GENERATE_ALLOC_ENTRYPOINTS _region_tlab, RegionTLAB
GENERATE_ALLOC_ENTRYPOINTS _region_tlab_instrumented, RegionTLABInstrumented
.endm
// Generate the allocation entrypoints for each allocator. This is used as an alternative to
// GNERATE_ALL_ALLOC_ENTRYPOINTS for selectively implementing allocation fast paths in
// hand-written assembly.
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(c_suffix, cxx_suffix) \
TWO_ARG_DOWNCALL art_quick_alloc_object ## c_suffix, artAllocObjectFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(c_suffix, cxx_suffix) \
TWO_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(c_suffix, cxx_suffix) \
TWO_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check ## c_suffix, artAllocObjectFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(c_suffix, cxx_suffix) \
THREE_ARG_DOWNCALL art_quick_alloc_array ## c_suffix, artAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \
THREE_ARG_DOWNCALL art_quick_alloc_array_resolved ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check ## c_suffix, artAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(c_suffix, cxx_suffix) \
THREE_ARG_DOWNCALL art_quick_check_and_alloc_array ## c_suffix, artCheckAndAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check ## c_suffix, artCheckAndAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(c_suffix, cxx_suffix) \
FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes ## c_suffix, artAllocStringFromBytesFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(c_suffix, cxx_suffix) \
THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars ## c_suffix, artAllocStringFromCharsFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(c_suffix, cxx_suffix) \
ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented)
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer_instrumented, BumpPointerInstrumented)
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab_instrumented, RegionTLABInstrumented)
.endm
|
abforce/xposed_art_n
| 5,243
|
runtime/interpreter/mterp/mips64/footer.S
|
/*
* We've detected a condition that will result in an exception, but the exception
* has not yet been thrown. Just bail out to the reference interpreter to deal with it.
* TUNING: for consistency, we may want to just go ahead and handle these here.
*/
.extern MterpLogDivideByZeroException
common_errDivideByZero:
EXPORT_PC
#if MTERP_LOGGING
move a0, rSELF
daddu a1, rFP, OFF_FP_SHADOWFRAME
jal MterpLogDivideByZeroException
#endif
b MterpCommonFallback
.extern MterpLogArrayIndexException
common_errArrayIndex:
EXPORT_PC
#if MTERP_LOGGING
move a0, rSELF
daddu a1, rFP, OFF_FP_SHADOWFRAME
jal MterpLogArrayIndexException
#endif
b MterpCommonFallback
.extern MterpLogNullObjectException
common_errNullObject:
EXPORT_PC
#if MTERP_LOGGING
move a0, rSELF
daddu a1, rFP, OFF_FP_SHADOWFRAME
jal MterpLogNullObjectException
#endif
b MterpCommonFallback
/*
* If we're here, something is out of the ordinary. If there is a pending
* exception, handle it. Otherwise, roll back and retry with the reference
* interpreter.
*/
MterpPossibleException:
ld a0, THREAD_EXCEPTION_OFFSET(rSELF)
beqzc a0, MterpFallback # If not, fall back to reference interpreter.
/* intentional fallthrough - handle pending exception. */
/*
* On return from a runtime helper routine, we've found a pending exception.
* Can we handle it here - or need to bail out to caller?
*
*/
.extern MterpHandleException
.extern MterpShouldSwitchInterpreters
MterpException:
move a0, rSELF
daddu a1, rFP, OFF_FP_SHADOWFRAME
jal MterpHandleException # (self, shadow_frame)
beqzc v0, MterpExceptionReturn # no local catch, back to caller.
ld a0, OFF_FP_CODE_ITEM(rFP)
lwu a1, OFF_FP_DEX_PC(rFP)
REFRESH_IBASE
daddu rPC, a0, CODEITEM_INSNS_OFFSET
dlsa rPC, a1, rPC, 1 # generate new dex_pc_ptr
/* Do we need to switch interpreters? */
jal MterpShouldSwitchInterpreters
bnezc v0, MterpFallback
/* resume execution at catch block */
EXPORT_PC
FETCH_INST
GET_INST_OPCODE v0
GOTO_OPCODE v0
/* NOTE: no fallthrough */
/*
* Check for suspend check request. Assumes rINST already loaded, rPC advanced and
* still needs to get the opcode and branch to it, and flags are in ra.
*/
.extern MterpSuspendCheck
MterpCheckSuspendAndContinue:
REFRESH_IBASE
and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
bnez ra, check1
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
check1:
EXPORT_PC
move a0, rSELF
jal MterpSuspendCheck # (self)
bnezc v0, MterpFallback # Something in the environment changed, switch interpreters
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
/*
* On-stack replacement has happened, and now we've returned from the compiled method.
*/
MterpOnStackReplacement:
#if MTERP_LOGGING
move a0, rSELF
daddu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST # rINST contains offset
jal MterpLogOSR
#endif
li v0, 1 # Signal normal return
b MterpDone
/*
* Bail out to reference interpreter.
*/
.extern MterpLogFallback
MterpFallback:
EXPORT_PC
#if MTERP_LOGGING
move a0, rSELF
daddu a1, rFP, OFF_FP_SHADOWFRAME
jal MterpLogFallback
#endif
MterpCommonFallback:
li v0, 0 # signal retry with reference interpreter.
b MterpDone
/*
* We pushed some registers on the stack in ExecuteMterpImpl, then saved
* SP and RA. Here we restore SP, restore the registers, and then restore
* RA to PC.
*
* On entry:
* uint32_t* rFP (should still be live, pointer to base of vregs)
*/
MterpExceptionReturn:
li v0, 1 # signal return to caller.
b MterpDone
/*
* Returned value is expected in a0 and if it's not 64-bit, the 32 most
* significant bits of a0 must be 0.
*/
MterpReturn:
ld a2, OFF_FP_RESULT_REGISTER(rFP)
sd a0, 0(a2)
li v0, 1 # signal return to caller.
MterpDone:
ld s5, STACK_OFFSET_S5(sp)
.cfi_restore 21
ld s4, STACK_OFFSET_S4(sp)
.cfi_restore 20
ld s3, STACK_OFFSET_S3(sp)
.cfi_restore 19
ld s2, STACK_OFFSET_S2(sp)
.cfi_restore 18
ld s1, STACK_OFFSET_S1(sp)
.cfi_restore 17
ld s0, STACK_OFFSET_S0(sp)
.cfi_restore 16
ld ra, STACK_OFFSET_RA(sp)
.cfi_restore 31
ld t8, STACK_OFFSET_GP(sp)
.cpreturn
.cfi_restore 28
.set noreorder
jr ra
daddu sp, sp, STACK_SIZE
.cfi_adjust_cfa_offset -STACK_SIZE
.cfi_endproc
.size ExecuteMterpImpl, .-ExecuteMterpImpl
|
abforce/xposed_art_n
| 1,425
|
runtime/interpreter/mterp/mips64/op_aget.S
|
%default { "load":"lw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
/*
* Array get, 32 bits or less. vAA <- vBB[vCC].
*
* for: aget, aget-boolean, aget-byte, aget-char, aget-short
*
* NOTE: assumes data offset for arrays is the same for all non-wide types.
* If this changes, specialize.
*/
/* op vAA, vBB, vCC */
lbu a2, 2(rPC) # a2 <- BB
lbu a3, 3(rPC) # a3 <- CC
srl a4, rINST, 8 # a4 <- AA
GET_VREG_U a0, a2 # a0 <- vBB (array object)
GET_VREG a1, a3 # a1 <- vCC (requested index)
beqz a0, common_errNullObject # bail if null array object
lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
.if $shift
# [d]lsa does not support shift count of 0.
dlsa a0, a1, a0, $shift # a0 <- arrayObj + index*width
.else
daddu a0, a1, a0 # a0 <- arrayObj + index*width
.endif
bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
$load a2, $data_offset(a0) # a2 <- vBB[vCC]
GET_INST_OPCODE v0 # extract opcode from rINST
SET_VREG a2, a4 # vAA <- a2
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,492
|
runtime/interpreter/mterp/mips64/binop.S
|
%default {"preinstr":"", "result":"a0", "chkzero":"0"}
/*
* Generic 32-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0 op a1".
* This could be a MIPS instruction or a function call. (If the result
* comes back in a register other than a0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
* *don't* check for (INT_MIN / -1) here, because the CPU handles it
* correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
*/
/* binop vAA, vBB, vCC */
srl a4, rINST, 8 # a4 <- AA
lbu a2, 2(rPC) # a2 <- BB
lbu a3, 3(rPC) # a3 <- CC
GET_VREG a0, a2 # a0 <- vBB
GET_VREG a1, a3 # a1 <- vCC
.if $chkzero
beqz a1, common_errDivideByZero # is second operand zero?
.endif
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
$preinstr # optional op
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE v0 # extract opcode from rINST
SET_VREG $result, a4 # vAA <- $result
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 2,346
|
runtime/interpreter/mterp/mips64/entry.S
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Interpreter entry point.
*/
.set reorder
.text
.global ExecuteMterpImpl
.type ExecuteMterpImpl, %function
.balign 16
/*
* On entry:
* a0 Thread* self
* a1 code_item
* a2 ShadowFrame
* a3 JValue* result_register
*
*/
ExecuteMterpImpl:
.cfi_startproc
.cpsetup t9, t8, ExecuteMterpImpl
.cfi_def_cfa sp, 0
daddu sp, sp, -STACK_SIZE
.cfi_adjust_cfa_offset STACK_SIZE
sd t8, STACK_OFFSET_GP(sp)
.cfi_rel_offset 28, STACK_OFFSET_GP
sd ra, STACK_OFFSET_RA(sp)
.cfi_rel_offset 31, STACK_OFFSET_RA
sd s0, STACK_OFFSET_S0(sp)
.cfi_rel_offset 16, STACK_OFFSET_S0
sd s1, STACK_OFFSET_S1(sp)
.cfi_rel_offset 17, STACK_OFFSET_S1
sd s2, STACK_OFFSET_S2(sp)
.cfi_rel_offset 18, STACK_OFFSET_S2
sd s3, STACK_OFFSET_S3(sp)
.cfi_rel_offset 19, STACK_OFFSET_S3
sd s4, STACK_OFFSET_S4(sp)
.cfi_rel_offset 20, STACK_OFFSET_S4
sd s5, STACK_OFFSET_S5(sp)
.cfi_rel_offset 21, STACK_OFFSET_S5
/* Remember the return register */
sd a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
/* Remember the code_item */
sd a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2)
/* set up "named" registers */
move rSELF, a0
daddu rFP, a2, SHADOWFRAME_VREGS_OFFSET
lw v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
dlsa rREFS, v0, rFP, 2
daddu rPC, a1, CODEITEM_INSNS_OFFSET
lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
dlsa rPC, v0, rPC, 1
EXPORT_PC
/* Starting ibase */
REFRESH_IBASE
/* start executing the instruction at rPC */
FETCH_INST
GET_INST_OPCODE v0
GOTO_OPCODE v0
/* NOTE: no fallthrough */
|
abforce/xposed_art_n
| 1,521
|
runtime/interpreter/mterp/mips64/binop2addr.S
|
%default {"preinstr":"", "result":"a0", "chkzero":"0"}
/*
* Generic 32-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0 op a1".
* This could be a MIPS instruction or a function call. (If the result
* comes back in a register other than a0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vB (a1). Useful for integer division and modulus. Note that we
* *don't* check for (INT_MIN / -1) here, because the CPU handles it
* correctly.
*
* For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
* rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
* shl-int/2addr, shr-int/2addr, ushr-int/2addr
*/
/* binop/2addr vA, vB */
ext a2, rINST, 8, 4 # a2 <- A
ext a3, rINST, 12, 4 # a3 <- B
GET_VREG a0, a2 # a0 <- vA
GET_VREG a1, a3 # a1 <- vB
.if $chkzero
beqz a1, common_errDivideByZero # is second operand zero?
.endif
FETCH_ADVANCE_INST 1 # advance rPC, load rINST
$preinstr # optional op
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE v0 # extract opcode from rINST
SET_VREG $result, a2 # vA <- $result
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 7,784
|
runtime/interpreter/mterp/mips64/header.S
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <machine/regdef.h>
/* TODO: add the missing file and use its FP register definitions. */
/* #include <machine/fpregdef.h> */
/* FP register definitions */
#define f0 $$f0
#define f1 $$f1
#define f2 $$f2
#define f3 $$f3
#define f12 $$f12
#define f13 $$f13
/*
* It looks like the GNU assembler currently does not support the blec and bgtc
* idioms, which should translate into bgec and bltc respectively with swapped
* left and right register operands.
* TODO: remove these macros when the assembler is fixed.
*/
.macro blec lreg, rreg, target
bgec \rreg, \lreg, \target
.endm
.macro bgtc lreg, rreg, target
bltc \rreg, \lreg, \target
.endm
/*
Mterp and MIPS64 notes:
The following registers have fixed assignments:
reg nick purpose
s0 rPC interpreted program counter, used for fetching instructions
s1 rFP interpreted frame pointer, used for accessing locals and args
s2 rSELF self (Thread) pointer
s3 rINST first 16-bit code unit of current instruction
s4 rIBASE interpreted instruction base pointer, used for computed goto
s5 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
*/
/* During bringup, we'll use the shadow frame model instead of rFP */
/* single-purpose registers, given names for clarity */
#define rPC s0
#define rFP s1
#define rSELF s2
#define rINST s3
#define rIBASE s4
#define rREFS s5
/*
* This is a #include, not a %include, because we want the C pre-processor
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
/*
* Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
* to access other shadow frame fields, we need to use a backwards offset. Define those here.
*/
#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
#define MTERP_PROFILE_BRANCHES 1
#define MTERP_LOGGING 0
/*
* "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
* be done *before* something throws.
*
* It's okay to do this more than once.
*
* NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
* dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
* offset into the code_items_[] array. For effiency, we will "export" the
* current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
* to convert to a dex pc when needed.
*/
.macro EXPORT_PC
sd rPC, OFF_FP_DEX_PC_PTR(rFP)
.endm
/*
* Refresh handler table.
*/
.macro REFRESH_IBASE
ld rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
.endm
/*
* Fetch the next instruction from rPC into rINST. Does not advance rPC.
*/
.macro FETCH_INST
lhu rINST, 0(rPC)
.endm
/* Advance rPC by some number of code units. */
.macro ADVANCE count
daddu rPC, rPC, (\count) * 2
.endm
/*
* Fetch the next instruction from the specified offset. Advances rPC
* to point to the next instruction.
*
* This must come AFTER anything that can throw an exception, or the
* exception catch may miss. (This also implies that it must come after
* EXPORT_PC.)
*/
.macro FETCH_ADVANCE_INST count
ADVANCE \count
FETCH_INST
.endm
/*
* Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
* rINST ahead of possible exception point. Be sure to manually advance rPC
* later.
*/
.macro PREFETCH_INST count
lhu rINST, ((\count) * 2)(rPC)
.endm
/*
* Put the instruction's opcode field into the specified register.
*/
.macro GET_INST_OPCODE reg
and \reg, rINST, 255
.endm
/*
* Begin executing the opcode in _reg.
*/
.macro GOTO_OPCODE reg
.set noat
sll AT, \reg, 7
daddu AT, rIBASE, AT
jic AT, 0
.set at
.endm
/*
* Get/set the 32-bit value from a Dalvik register.
* Note, GET_VREG does sign extension to 64 bits while
* GET_VREG_U does zero extension to 64 bits.
* One is useful for arithmetic while the other is
* useful for storing the result value as 64-bit.
*/
.macro GET_VREG reg, vreg
.set noat
dlsa AT, \vreg, rFP, 2
lw \reg, 0(AT)
.set at
.endm
.macro GET_VREG_U reg, vreg
.set noat
dlsa AT, \vreg, rFP, 2
lwu \reg, 0(AT)
.set at
.endm
.macro GET_VREG_FLOAT reg, vreg
.set noat
dlsa AT, \vreg, rFP, 2
lwc1 \reg, 0(AT)
.set at
.endm
.macro SET_VREG reg, vreg
.set noat
dlsa AT, \vreg, rFP, 2
sw \reg, 0(AT)
dlsa AT, \vreg, rREFS, 2
sw zero, 0(AT)
.set at
.endm
.macro SET_VREG_OBJECT reg, vreg
.set noat
dlsa AT, \vreg, rFP, 2
sw \reg, 0(AT)
dlsa AT, \vreg, rREFS, 2
sw \reg, 0(AT)
.set at
.endm
.macro SET_VREG_FLOAT reg, vreg
.set noat
dlsa AT, \vreg, rFP, 2
swc1 \reg, 0(AT)
dlsa AT, \vreg, rREFS, 2
sw zero, 0(AT)
.set at
.endm
/*
* Get/set the 64-bit value from a Dalvik register.
* Avoid unaligned memory accesses.
* Note, SET_VREG_WIDE clobbers the register containing the value being stored.
* Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
*/
.macro GET_VREG_WIDE reg, vreg
.set noat
dlsa AT, \vreg, rFP, 2
lw \reg, 0(AT)
lw AT, 4(AT)
dinsu \reg, AT, 32, 32
.set at
.endm
.macro GET_VREG_DOUBLE reg, vreg
.set noat
dlsa AT, \vreg, rFP, 2
lwc1 \reg, 0(AT)
lw AT, 4(AT)
mthc1 AT, \reg
.set at
.endm
.macro SET_VREG_WIDE reg, vreg
.set noat
dlsa AT, \vreg, rFP, 2
sw \reg, 0(AT)
drotr32 \reg, \reg, 0
sw \reg, 4(AT)
dlsa AT, \vreg, rREFS, 2
sw zero, 0(AT)
sw zero, 4(AT)
.set at
.endm
.macro SET_VREG_DOUBLE reg, vreg
.set noat
dlsa AT, \vreg, rREFS, 2
sw zero, 0(AT)
sw zero, 4(AT)
dlsa AT, \vreg, rFP, 2
swc1 \reg, 0(AT)
mfhc1 \vreg, \reg
sw \vreg, 4(AT)
.set at
.endm
/*
* On-stack offsets for spilling/unspilling callee-saved registers
* and the frame size.
*/
#define STACK_OFFSET_RA 0
#define STACK_OFFSET_GP 8
#define STACK_OFFSET_S0 16
#define STACK_OFFSET_S1 24
#define STACK_OFFSET_S2 32
#define STACK_OFFSET_S3 40
#define STACK_OFFSET_S4 48
#define STACK_OFFSET_S5 56
#define STACK_SIZE 64
/* Constants for float/double_to_int/long conversions */
#define INT_MIN 0x80000000
#define INT_MIN_AS_FLOAT 0xCF000000
#define INT_MIN_AS_DOUBLE 0xC1E0000000000000
#define LONG_MIN 0x8000000000000000
#define LONG_MIN_AS_FLOAT 0xDF000000
#define LONG_MIN_AS_DOUBLE 0xC3E0000000000000
|
abforce/xposed_art_n
| 1,431
|
runtime/interpreter/mterp/mips64/op_aput.S
|
%default { "store":"sw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
/*
* Array put, 32 bits or less. vBB[vCC] <- vAA.
*
* for: aput, aput-boolean, aput-byte, aput-char, aput-short
*
* NOTE: this assumes data offset for arrays is the same for all non-wide types.
* If this changes, specialize.
*/
/* op vAA, vBB, vCC */
lbu a2, 2(rPC) # a2 <- BB
lbu a3, 3(rPC) # a3 <- CC
srl a4, rINST, 8 # a4 <- AA
GET_VREG_U a0, a2 # a0 <- vBB (array object)
GET_VREG a1, a3 # a1 <- vCC (requested index)
beqz a0, common_errNullObject # bail if null array object
lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
.if $shift
# [d]lsa does not support shift count of 0.
dlsa a0, a1, a0, $shift # a0 <- arrayObj + index*width
.else
daddu a0, a1, a0 # a0 <- arrayObj + index*width
.endif
bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
GET_VREG a2, a4 # a2 <- vAA
GET_INST_OPCODE v0 # extract opcode from rINST
$store a2, $data_offset(a0) # vBB[vCC] <- a2
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,254
|
runtime/interpreter/mterp/mips64/fcmpWide.S
|
%default {}
/*
* Compare two floating-point values. Puts 0, 1, or -1 into the
* destination register based on the results of the comparison.
*
* For: cmpl-double, cmpg-double
*/
/* op vAA, vBB, vCC */
srl a4, rINST, 8 # a4 <- AA
lbu a2, 2(rPC) # a2 <- BB
lbu a3, 3(rPC) # a3 <- CC
GET_VREG_DOUBLE f0, a2 # f0 <- vBB
GET_VREG_DOUBLE f1, a3 # f1 <- vCC
cmp.eq.d f2, f0, f1
li a0, 0
bc1nez f2, 1f # done if vBB == vCC (ordered)
.if $gt_bias
cmp.lt.d f2, f0, f1
li a0, -1
bc1nez f2, 1f # done if vBB < vCC (ordered)
li a0, 1 # vBB > vCC or unordered
.else
cmp.lt.d f2, f1, f0
li a0, 1
bc1nez f2, 1f # done if vBB > vCC (ordered)
li a0, -1 # vBB < vCC or unordered
.endif
1:
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
GET_INST_OPCODE v0 # extract opcode from rINST
SET_VREG a0, a4 # vAA <- a0
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,363
|
runtime/interpreter/mterp/mips64/zcmp.S
|
/*
* Generic one-operand compare-and-branch operation. Provide a "condition"
* fragment that specifies the comparison to perform, e.g. for
* "if-lez" you would use "le".
*
* For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
*/
/* if-cmp vAA, +BBBB */
.extern MterpProfileBranch
srl a2, rINST, 8 # a2 <- AA
lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
GET_VREG a0, a2 # a0 <- vAA
b${condition}zc a0, 1f
li rINST, 2 # offset if branch not taken
1:
#if MTERP_PROFILE_BRANCHES
EXPORT_PC
move a0, rSELF
daddu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
jal MterpProfileBranch # (self, shadow_frame, offset)
bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST
#endif
dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2
lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
move a0, rINST # a0 <- offset
FETCH_INST # load rINST
bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,089
|
runtime/interpreter/mterp/mips64/op_goto.S
|
/*
* Unconditional branch, 8-bit offset.
*
* The branch distance is a signed code-unit offset, which we need to
* double to get a byte offset.
*/
/* goto +AA */
.extern MterpProfileBranch
srl rINST, rINST, 8
seb rINST, rINST # rINST <- offset (sign-extended AA)
#if MTERP_PROFILE_BRANCHES
EXPORT_PC
move a0, rSELF
daddu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
jal MterpProfileBranch # (self, shadow_frame, offset)
bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST
#endif
dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2
lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
move a0, rINST # a0 <- offset
FETCH_INST # load rINST
bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,649
|
runtime/interpreter/mterp/mips64/op_packed_switch.S
|
%default { "func":"MterpDoPackedSwitch" }
/*
* Handle a packed-switch or sparse-switch instruction. In both cases
* we decode it and hand it off to a helper function.
*
* We don't really expect backward branches in a switch statement, but
* they're perfectly legal, so we check for them here.
*
* for: packed-switch, sparse-switch
*/
/* op vAA, +BBBBBBBB */
.extern $func
.extern MterpProfileBranch
lh a0, 2(rPC) # a0 <- bbbb (lo)
lh a1, 4(rPC) # a1 <- BBBB (hi)
srl a3, rINST, 8 # a3 <- AA
ins a0, a1, 16, 16 # a0 <- BBBBbbbb
GET_VREG a1, a3 # a1 <- vAA
dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2
jal $func # v0 <- code-unit branch offset
move rINST, v0
#if MTERP_PROFILE_BRANCHES
EXPORT_PC
move a0, rSELF
daddu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
jal MterpProfileBranch # (self, shadow_frame, offset)
bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST
#endif
dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2
lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
move a0, rINST # a0 <- offset
FETCH_INST # load rINST
blez a0, MterpCheckSuspendAndContinue # suspend check if backwards branch
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,252
|
runtime/interpreter/mterp/mips64/fcmp.S
|
%default {}
/*
* Compare two floating-point values. Puts 0, 1, or -1 into the
* destination register based on the results of the comparison.
*
* For: cmpl-float, cmpg-float
*/
/* op vAA, vBB, vCC */
srl a4, rINST, 8 # a4 <- AA
lbu a2, 2(rPC) # a2 <- BB
lbu a3, 3(rPC) # a3 <- CC
GET_VREG_FLOAT f0, a2 # f0 <- vBB
GET_VREG_FLOAT f1, a3 # f1 <- vCC
cmp.eq.s f2, f0, f1
li a0, 0
bc1nez f2, 1f # done if vBB == vCC (ordered)
.if $gt_bias
cmp.lt.s f2, f0, f1
li a0, -1
bc1nez f2, 1f # done if vBB < vCC (ordered)
li a0, 1 # vBB > vCC or unordered
.else
cmp.lt.s f2, f1, f0
li a0, 1
bc1nez f2, 1f # done if vBB > vCC (ordered)
li a0, -1 # vBB < vCC or unordered
.endif
1:
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
GET_INST_OPCODE v0 # extract opcode from rINST
SET_VREG a0, a4 # vAA <- a0
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,343
|
runtime/interpreter/mterp/mips64/op_goto_32.S
|
/*
* Unconditional branch, 32-bit offset.
*
* The branch distance is a signed code-unit offset, which we need to
* double to get a byte offset.
*
* Unlike most opcodes, this one is allowed to branch to itself, so
* our "backward branch" test must be "<=0" instead of "<0".
*/
/* goto/32 +AAAAAAAA */
.extern MterpProfileBranch
lh rINST, 2(rPC) # rINST <- aaaa (low)
lh a1, 4(rPC) # a1 <- AAAA (high)
ins rINST, a1, 16, 16 # rINST <- offset (sign-extended AAAAaaaa)
#if MTERP_PROFILE_BRANCHES
EXPORT_PC
move a0, rSELF
daddu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
jal MterpProfileBranch # (self, shadow_frame, offset)
bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST
#endif
dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2
lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
move a0, rINST # a0 <- offset
FETCH_INST # load rINST
blez a0, MterpCheckSuspendAndContinue # suspend check if backwards branch
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,431
|
runtime/interpreter/mterp/mips64/binopLit8.S
|
%default {"preinstr":"", "result":"a0", "chkzero":"0"}
/*
* Generic 32-bit "lit8" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0 op a1".
* This could be an MIPS instruction or a function call. (If the result
* comes back in a register other than a0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* CC (a1). Useful for integer division and modulus.
*
* For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
lbu a3, 2(rPC) # a3 <- BB
lb a1, 3(rPC) # a1 <- sign-extended CC
srl a2, rINST, 8 # a2 <- AA
GET_VREG a0, a3 # a0 <- vBB
.if $chkzero
beqz a1, common_errDivideByZero # is second operand zero?
.endif
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
$preinstr # optional op
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE v0 # extract opcode from rINST
SET_VREG $result, a2 # vAA <- $result
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,380
|
runtime/interpreter/mterp/mips64/binopLit16.S
|
%default {"preinstr":"", "result":"a0", "chkzero":"0"}
/*
* Generic 32-bit "lit16" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0 op a1".
* This could be an MIPS instruction or a function call. (If the result
* comes back in a register other than a0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* CCCC (a1). Useful for integer division and modulus.
*
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
/* binop/lit16 vA, vB, #+CCCC */
lh a1, 2(rPC) # a1 <- sign-extended CCCC
ext a2, rINST, 8, 4 # a2 <- A
ext a3, rINST, 12, 4 # a3 <- B
GET_VREG a0, a3 # a0 <- vB
.if $chkzero
beqz a1, common_errDivideByZero # is second operand zero?
.endif
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
$preinstr # optional op
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE v0 # extract opcode from rINST
SET_VREG $result, a2 # vA <- $result
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,101
|
runtime/interpreter/mterp/mips64/op_aget_wide.S
|
/*
* Array get, 64 bits. vAA <- vBB[vCC].
*
*/
/* aget-wide vAA, vBB, vCC */
lbu a2, 2(rPC) # a2 <- BB
lbu a3, 3(rPC) # a3 <- CC
srl a4, rINST, 8 # a4 <- AA
GET_VREG_U a0, a2 # a0 <- vBB (array object)
GET_VREG a1, a3 # a1 <- vCC (requested index)
beqz a0, common_errNullObject # bail if null array object
lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
lw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
lw a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
dinsu a2, a3, 32, 32 # a2 <- vBB[vCC]
GET_INST_OPCODE v0 # extract opcode from rINST
SET_VREG_WIDE a2, a4 # vAA <- a2
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,533
|
runtime/interpreter/mterp/mips64/binopWide2addr.S
|
%default {"preinstr":"", "result":"a0", "chkzero":"0"}
/*
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0 op a1".
* This could be a MIPS instruction or a function call. (If the result
* comes back in a register other than a0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vB (a1). Useful for integer division and modulus. Note that we
* *don't* check for (LONG_MIN / -1) here, because the CPU handles it
* correctly.
*
* For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
* rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
* shl-long/2addr, shr-long/2addr, ushr-long/2addr
*/
/* binop/2addr vA, vB */
ext a2, rINST, 8, 4 # a2 <- A
ext a3, rINST, 12, 4 # a3 <- B
GET_VREG_WIDE a0, a2 # a0 <- vA
GET_VREG_WIDE a1, a3 # a1 <- vB
.if $chkzero
beqz a1, common_errDivideByZero # is second operand zero?
.endif
FETCH_ADVANCE_INST 1 # advance rPC, load rINST
$preinstr # optional op
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE v0 # extract opcode from rINST
SET_VREG_WIDE $result, a2 # vA <- $result
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,504
|
runtime/interpreter/mterp/mips64/binopWide.S
|
%default {"preinstr":"", "result":"a0", "chkzero":"0"}
/*
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0 op a1".
* This could be a MIPS instruction or a function call. (If the result
* comes back in a register other than a0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
* *don't* check for (LONG_MIN / -1) here, because the CPU handles it
* correctly.
*
* For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
* xor-long, shl-long, shr-long, ushr-long
*/
/* binop vAA, vBB, vCC */
srl a4, rINST, 8 # a4 <- AA
lbu a2, 2(rPC) # a2 <- BB
lbu a3, 3(rPC) # a3 <- CC
GET_VREG_WIDE a0, a2 # a0 <- vBB
GET_VREG_WIDE a1, a3 # a1 <- vCC
.if $chkzero
beqz a1, common_errDivideByZero # is second operand zero?
.endif
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
$preinstr # optional op
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE v0 # extract opcode from rINST
SET_VREG_WIDE $result, a4 # vAA <- $result
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,461
|
runtime/interpreter/mterp/mips64/bincmp.S
|
/*
* Generic two-operand compare-and-branch operation. Provide a "condition"
* fragment that specifies the comparison to perform, e.g. for
* "if-le" you would use "le".
*
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
.extern MterpProfileBranch
ext a2, rINST, 8, 4 # a2 <- A
ext a3, rINST, 12, 4 # a3 <- B
lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
GET_VREG a0, a2 # a0 <- vA
GET_VREG a1, a3 # a1 <- vB
b${condition}c a0, a1, 1f
li rINST, 2 # offset if branch not taken
1:
#if MTERP_PROFILE_BRANCHES
EXPORT_PC
move a0, rSELF
daddu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
jal MterpProfileBranch # (self, shadow_frame, offset)
bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST
#endif
dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2
lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
move a0, rINST # a0 <- offset
FETCH_INST # load rINST
bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,084
|
runtime/interpreter/mterp/mips64/op_aput_wide.S
|
/*
* Array put, 64 bits. vBB[vCC] <- vAA.
*
*/
/* aput-wide vAA, vBB, vCC */
lbu a2, 2(rPC) # a2 <- BB
lbu a3, 3(rPC) # a3 <- CC
srl a4, rINST, 8 # a4 <- AA
GET_VREG_U a0, a2 # a0 <- vBB (array object)
GET_VREG a1, a3 # a1 <- vCC (requested index)
beqz a0, common_errNullObject # bail if null array object
lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
GET_VREG_WIDE a2, a4 # a2 <- vAA
FETCH_ADVANCE_INST 2 # advance rPC, load rINST
GET_INST_OPCODE v0 # extract opcode from rINST
sw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
dsrl32 a2, a2, 0
sw a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0) # vBB[vCC] <- a2
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,069
|
runtime/interpreter/mterp/mips64/op_goto_16.S
|
/*
* Unconditional branch, 16-bit offset.
*
* The branch distance is a signed code-unit offset, which we need to
* double to get a byte offset.
*/
/* goto/16 +AAAA */
.extern MterpProfileBranch
lh rINST, 2(rPC) # rINST <- offset (sign-extended AAAA)
#if MTERP_PROFILE_BRANCHES
EXPORT_PC
move a0, rSELF
daddu a1, rFP, OFF_FP_SHADOWFRAME
move a2, rINST
jal MterpProfileBranch # (self, shadow_frame, offset)
bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST
#endif
dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2
lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue
move a0, rINST # a0 <- offset
FETCH_INST # load rINST
bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch
GET_INST_OPCODE v0 # extract opcode from rINST
GOTO_OPCODE v0 # jump to next instruction
|
abforce/xposed_art_n
| 1,404
|
runtime/interpreter/mterp/arm/op_cmpg_double.S
|
/*
* Compare two floating-point values. Puts 0, 1, or -1 into the
* destination register based on the results of the comparison.
*
* int compare(x, y) {
* if (x == y) {
* return 0;
* } else if (x < y) {
* return -1;
* } else if (x > y) {
* return 1;
* } else {
* return 1;
* }
* }
*/
/* op vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
fldd d0, [r2] @ d0<- vBB
fldd d1, [r3] @ d1<- vCC
vcmpe.f64 d0, d1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r0, #1 @ r0<- 1 (default)
GET_INST_OPCODE ip @ extract opcode from rINST
fmstat @ export status flags
mvnmi r0, #0 @ (less than) r1<- -1
moveq r0, #0 @ (equal) r1<- 0
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,408
|
runtime/interpreter/mterp/arm/op_cmpl_float.S
|
/*
* Compare two floating-point values. Puts 0, 1, or -1 into the
* destination register based on the results of the comparison.
*
* int compare(x, y) {
* if (x == y) {
* return 0;
* } else if (x > y) {
* return 1;
* } else if (x < y) {
* return -1;
* } else {
* return -1;
* }
* }
*/
/* op vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
flds s0, [r2] @ s0<- vBB
flds s1, [r3] @ s1<- vCC
vcmpe.f32 s0, s1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mvn r0, #0 @ r0<- -1 (default)
GET_INST_OPCODE ip @ extract opcode from rINST
fmstat @ export status flags
movgt r0, #1 @ (greater than) r1<- 1
moveq r0, #0 @ (equal) r1<- 0
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,474
|
runtime/interpreter/mterp/arm/op_shr_long.S
|
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to mask off the low
* 6 bits of the shift distance.
*/
/* shr-long vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r3, r0, #255 @ r3<- BB
mov r0, r0, lsr #8 @ r0<- CC
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r0<- r0 & 0x3f
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
subs ip, r2, #32 @ ip<- r2 - 32
movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r1, r1, asr r2 @ r1<- r1 >> r2
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 10,103
|
runtime/interpreter/mterp/arm/footer.S
|
/*
* ===========================================================================
* Common subroutines and data
* ===========================================================================
*/
.text
.align 2
/*
* We've detected a condition that will result in an exception, but the exception
* has not yet been thrown. Just bail out to the reference interpreter to deal with it.
* TUNING: for consistency, we may want to just go ahead and handle these here.
*/
common_errDivideByZero:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogDivideByZeroException
#endif
b MterpCommonFallback
common_errArrayIndex:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogArrayIndexException
#endif
b MterpCommonFallback
common_errNegativeArraySize:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogNegativeArraySizeException
#endif
b MterpCommonFallback
common_errNoSuchMethod:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogNoSuchMethodException
#endif
b MterpCommonFallback
common_errNullObject:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogNullObjectException
#endif
b MterpCommonFallback
common_exceptionThrown:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogExceptionThrownException
#endif
b MterpCommonFallback
MterpSuspendFallback:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
ldr r2, [rSELF, #THREAD_FLAGS_OFFSET]
bl MterpLogSuspendFallback
#endif
b MterpCommonFallback
/*
* If we're here, something is out of the ordinary. If there is a pending
* exception, handle it. Otherwise, roll back and retry with the reference
* interpreter.
*/
MterpPossibleException:
ldr r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
cmp r0, #0 @ Exception pending?
beq MterpFallback @ If not, fall back to reference interpreter.
/* intentional fallthrough - handle pending exception. */
/*
* On return from a runtime helper routine, we've found a pending exception.
* Can we handle it here - or need to bail out to caller?
*
*/
MterpException:
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpHandleException @ (self, shadow_frame)
cmp r0, #0
beq MterpExceptionReturn @ no local catch, back to caller.
ldr r0, [rFP, #OFF_FP_CODE_ITEM]
ldr r1, [rFP, #OFF_FP_DEX_PC]
ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
add rPC, r0, #CODEITEM_INSNS_OFFSET
add rPC, rPC, r1, lsl #1 @ generate new dex_pc_ptr
/* Do we need to switch interpreters? */
bl MterpShouldSwitchInterpreters
cmp r0, #0
bne MterpFallback
/* resume execution at catch block */
EXPORT_PC
FETCH_INST
GET_INST_OPCODE ip
GOTO_OPCODE ip
/* NOTE: no fallthrough */
/*
* Common handling for branches with support for Jit profiling.
* On entry:
* rINST <= signed offset
* rPROFILE <= signed hotness countdown (expanded to 32 bits)
* condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
*
* We have quite a few different cases for branch profiling, OSR detection and
* suspend check support here.
*
* Taken backward branches:
* If profiling active, do hotness countdown and report if we hit zero.
* If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
* Is there a pending suspend request? If so, suspend.
*
* Taken forward branches and not-taken backward branches:
* If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
*
* Our most common case is expected to be a taken backward branch with active jit profiling,
* but no full OSR check and no pending suspend request.
* Next most common case is not-taken branch with no full OSR check.
*
*/
MterpCommonTakenBranchNoFlags:
cmp rINST, #0
MterpCommonTakenBranch:
bgt .L_forward_branch @ don't add forward branches to hotness
/*
* We need to subtract 1 from positive values and we should not see 0 here,
* so we may use the result of the comparison with -1.
*/
#if JIT_CHECK_OSR != -1
# error "JIT_CHECK_OSR must be -1."
#endif
cmp rPROFILE, #JIT_CHECK_OSR
beq .L_osr_check
subgts rPROFILE, #1
beq .L_add_batch @ counted down to zero - report
.L_resume_backward_branch:
ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
REFRESH_IBASE
add r2, rINST, rINST @ r2<- byte offset
FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
bne .L_suspend_request_pending
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
.L_suspend_request_pending:
EXPORT_PC
mov r0, rSELF
bl MterpSuspendCheck @ (self)
cmp r0, #0
bne MterpFallback
REFRESH_IBASE @ might have changed during suspend
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
.L_no_count_backwards:
cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
bne .L_resume_backward_branch
.L_osr_check:
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, rINST
EXPORT_PC
bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
cmp r0, #0
bne MterpOnStackReplacement
b .L_resume_backward_branch
.L_forward_branch:
cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
beq .L_check_osr_forward
.L_resume_forward_branch:
add r2, rINST, rINST @ r2<- byte offset
FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
.L_check_osr_forward:
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, rINST
EXPORT_PC
bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
cmp r0, #0
bne MterpOnStackReplacement
b .L_resume_forward_branch
.L_add_batch:
add r1, rFP, #OFF_FP_SHADOWFRAME
strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
ldr r0, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
bl MterpAddHotnessBatch @ (method, shadow_frame, self)
mov rPROFILE, r0 @ restore new hotness countdown to rPROFILE
b .L_no_count_backwards
/*
* Entered from the conditional branch handlers when OSR check request active on
* not-taken path. All Dalvik not-taken conditional branch offsets are 2.
*/
.L_check_not_taken_osr:
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, #2
EXPORT_PC
bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
cmp r0, #0
bne MterpOnStackReplacement
FETCH_ADVANCE_INST 2
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
/*
* On-stack replacement has happened, and now we've returned from the compiled method.
*/
MterpOnStackReplacement:
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, rINST
bl MterpLogOSR
#endif
mov r0, #1 @ Signal normal return
b MterpDone
/*
* Bail out to reference interpreter.
*/
MterpFallback:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogFallback
#endif
MterpCommonFallback:
mov r0, #0 @ signal retry with reference interpreter.
b MterpDone
/*
* We pushed some registers on the stack in ExecuteMterpImpl, then saved
* SP and LR. Here we restore SP, restore the registers, and then restore
* LR to PC.
*
* On entry:
* uint32_t* rFP (should still be live, pointer to base of vregs)
*/
MterpExceptionReturn:
mov r0, #1 @ signal return to caller.
b MterpDone
MterpReturn:
ldr r2, [rFP, #OFF_FP_RESULT_REGISTER]
str r0, [r2]
str r1, [r2, #4]
mov r0, #1 @ signal return to caller.
MterpDone:
/*
* At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
* checking for OSR. If greater than zero, we might have unreported hotness to register
* (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
* should only reach zero immediately after a hotness decrement, and is then reset to either
* a negative special state or the new non-zero countdown value.
*/
cmp rPROFILE, #0
bgt MterpProfileActive @ if > 0, we may have some counts to report.
ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
MterpProfileActive:
mov rINST, r0 @ stash return value
/* Report cached hotness counts */
ldr r0, [rFP, #OFF_FP_METHOD]
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, rSELF
strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
bl MterpAddHotnessBatch @ (method, shadow_frame, self)
mov r0, rINST @ restore return value
ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
.fnend
.size ExecuteMterpImpl, .-ExecuteMterpImpl
|
abforce/xposed_art_n
| 1,151
|
runtime/interpreter/mterp/arm/op_cmp_long.S
|
/*
* Compare two 64-bit values. Puts 0, 1, or -1 into the destination
* register based on the results of the comparison.
*/
/* cmp-long vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
cmp r0, r2
sbcs ip, r1, r3 @ Sets correct CCs for checking LT (but not EQ/NE)
mov ip, #0
mvnlt ip, #0 @ -1
cmpeq r0, r2 @ For correct EQ/NE, we may need to repeat the first CMP
orrne ip, #1
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
SET_VREG ip, r9 @ vAA<- ip
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,505
|
runtime/interpreter/mterp/arm/op_aget.S
|
%default { "load":"ldr", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
/*
* Array get, 32 bits or less. vAA <- vBB[vCC].
*
* Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
* instructions. We use a pair of FETCH_Bs instead.
*
* for: aget, aget-boolean, aget-byte, aget-char, aget-short
*
* NOTE: assumes data offset for arrays is the same for all non-wide types.
* If this changes, specialize.
*/
/* op vAA, vBB, vCC */
FETCH_B r2, 1, 0 @ r2<- BB
mov r9, rINST, lsr #8 @ r9<- AA
FETCH_B r3, 1, 1 @ r3<- CC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
cmp r0, #0 @ null array object?
beq common_errNullObject @ yes, bail
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
cmp r1, r3 @ compare unsigned index, length
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$load r2, [r0, #$data_offset] @ r2<- vBB[vCC]
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r2, r9 @ vAA<- r2
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,703
|
runtime/interpreter/mterp/arm/binop.S
|
%default {"preinstr":"", "result":"r0", "chkzero":"0"}
/*
* Generic 32-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = r0 op r1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus. Note that we
* *don't* check for (INT_MIN / -1) here, because the ARM math lib
* handles it correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
* mul-float, div-float, rem-float
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
mov r3, r0, lsr #8 @ r3<- CC
and r2, r0, #255 @ r2<- BB
GET_VREG r1, r3 @ r1<- vCC
GET_VREG r0, r2 @ r0<- vBB
.if $chkzero
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ $result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG $result, r9 @ vAA<- $result
GOTO_OPCODE ip @ jump to next instruction
/* 11-14 instructions */
|
abforce/xposed_art_n
| 1,267
|
runtime/interpreter/mterp/arm/op_rem_int_lit8.S
|
%default {}
/*
* Specialized 32-bit binary operation
*
* Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
* depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
* ARMv7 CPUs that have hardware division support).
*
* NOTE: idivmod returns quotient in r0 and remainder in r1
*
* rem-int/lit8
*
*/
FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
#ifdef __ARM_ARCH_EXT_IDIV__
sdiv r2, r0, r1
mls r1, r1, r2, r0 @ r1<- op
#else
bl __aeabi_idivmod @ r1<- op, r0-r3 changed
#endif
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r1, r9 @ vAA<- r1
GOTO_OPCODE ip @ jump to next instruction
/* 10-12 instructions */
|
abforce/xposed_art_n
| 1,475
|
runtime/interpreter/mterp/arm/op_shl_long.S
|
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to mask off the low
* 6 bits of the shift distance.
*/
/* shl-long vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r3, r0, #255 @ r3<- BB
mov r0, r0, lsr #8 @ r0<- CC
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r2<- r2 & 0x3f
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
mov r1, r1, asl r2 @ r1<- r1 << r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
subs ip, r2, #32 @ ip<- r2 - 32
movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r0, r0, asl r2 @ r0<- r0 << r2
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 2,340
|
runtime/interpreter/mterp/arm/entry.S
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Interpreter entry point.
*/
.text
.align 2
.global ExecuteMterpImpl
.type ExecuteMterpImpl, %function
/*
* On entry:
* r0 Thread* self/
* r1 code_item
* r2 ShadowFrame
* r3 JValue* result_register
*
*/
ExecuteMterpImpl:
.fnstart
.save {r3-r10,fp,lr}
stmfd sp!, {r3-r10,fp,lr} @ save 10 regs, (r3 just to align 64)
/* Remember the return register */
str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
/* Remember the code_item */
str r1, [r2, #SHADOWFRAME_CODE_ITEM_OFFSET]
/* set up "named" registers */
mov rSELF, r0
ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs.
VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
add rPC, r1, #CODEITEM_INSNS_OFFSET @ Point to base of insns[]
add rPC, rPC, r0, lsl #1 @ Create direct pointer to 1st dex opcode
EXPORT_PC
/* Starting ibase */
ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
/* Set up for backwards branches & osr profiling */
ldr r0, [rFP, #OFF_FP_METHOD]
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpSetUpHotnessCountdown
mov rPROFILE, r0 @ Starting hotness countdown to rPROFILE
/* start executing the instruction at rPC */
FETCH_INST @ load rINST from rPC
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
/* NOTE: no fallthrough */
|
abforce/xposed_art_n
| 1,599
|
runtime/interpreter/mterp/arm/binop2addr.S
|
%default {"preinstr":"", "result":"r0", "chkzero":"0"}
/*
* Generic 32-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = r0 op r1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
* For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
* rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
* shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
* sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if $chkzero
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ $result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG $result, r9 @ vAA<- $result
GOTO_OPCODE ip @ jump to next instruction
/* 10-13 instructions */
|
abforce/xposed_art_n
| 9,370
|
runtime/interpreter/mterp/arm/header.S
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Art assembly interpreter notes:
First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
handle invoke, allows higher-level code to create frame & shadow frame.
Once that's working, support direct entry code & eliminate shadow frame (and
excess locals allocation.
Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
base of the vreg array within the shadow frame. Access the other fields,
dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
the shadow frame mechanism of double-storing object references - via rFP &
number_of_vregs_.
*/
/*
ARM EABI general notes:
r0-r3 hold first 4 args to a method; they are not preserved across method calls
r4-r8 are available for general use
r9 is given special treatment in some situations, but not for us
r10 (sl) seems to be generally available
r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
r12 (ip) is scratch -- not preserved across method calls
r13 (sp) should be managed carefully in case a signal arrives
r14 (lr) must be preserved
r15 (pc) can be tinkered with directly
r0 holds returns of <= 4 bytes
r0-r1 hold returns of 8 bytes, low word in r0
Callee must save/restore r4+ (except r12) if it modifies them. If VFP
is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
s0-s15 (d0-d7, q0-a3) do not need to be.
Stack is "full descending". Only the arguments that don't fit in the first 4
registers are placed on the stack. "sp" points at the first stacked argument
(i.e. the 5th arg).
VFP: single-precision results in s0, double-precision results in d0.
In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
64-bit quantities (long long, double) must be 64-bit aligned.
*/
/*
Mterp and ARM notes:
The following registers have fixed assignments:
reg nick purpose
r4 rPC interpreted program counter, used for fetching instructions
r5 rFP interpreted frame pointer, used for accessing locals and args
r6 rSELF self (Thread) pointer
r7 rINST first 16-bit code unit of current instruction
r8 rIBASE interpreted instruction base pointer, used for computed goto
r10 rPROFILE branch profiling countdown
r11 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
Macros are provided for common operations. Each macro MUST emit only
one instruction to make instruction-counting easier. They MUST NOT alter
unspecified registers or condition codes.
*/
/*
* This is a #include, not a %include, because we want the C pre-processor
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
#define MTERP_PROFILE_BRANCHES 1
#define MTERP_LOGGING 0
/* During bringup, we'll use the shadow frame model instead of rFP */
/* single-purpose registers, given names for clarity */
#define rPC r4
#define rFP r5
#define rSELF r6
#define rINST r7
#define rIBASE r8
#define rPROFILE r10
#define rREFS r11
/*
* Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
* to access other shadow frame fields, we need to use a backwards offset. Define those here.
*/
#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
/*
* "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
* be done *before* something throws.
*
* It's okay to do this more than once.
*
* NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
* dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
* offset into the code_items_[] array. For effiency, we will "export" the
* current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
* to convert to a dex pc when needed.
*/
.macro EXPORT_PC
str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
.endm
.macro EXPORT_DEX_PC tmp
ldr \tmp, [rFP, #OFF_FP_CODE_ITEM]
str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
add \tmp, #CODEITEM_INSNS_OFFSET
sub \tmp, rPC, \tmp
asr \tmp, #1
str \tmp, [rFP, #OFF_FP_DEX_PC]
.endm
/*
* Fetch the next instruction from rPC into rINST. Does not advance rPC.
*/
.macro FETCH_INST
ldrh rINST, [rPC]
.endm
/*
* Fetch the next instruction from the specified offset. Advances rPC
* to point to the next instruction. "_count" is in 16-bit code units.
*
* Because of the limited size of immediate constants on ARM, this is only
* suitable for small forward movements (i.e. don't try to implement "goto"
* with this).
*
* This must come AFTER anything that can throw an exception, or the
* exception catch may miss. (This also implies that it must come after
* EXPORT_PC.)
*/
.macro FETCH_ADVANCE_INST count
ldrh rINST, [rPC, #((\count)*2)]!
.endm
/*
* The operation performed here is similar to FETCH_ADVANCE_INST, except the
* src and dest registers are parameterized (not hard-wired to rPC and rINST).
*/
.macro PREFETCH_ADVANCE_INST dreg, sreg, count
ldrh \dreg, [\sreg, #((\count)*2)]!
.endm
/*
* Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
* rINST ahead of possible exception point. Be sure to manually advance rPC
* later.
*/
.macro PREFETCH_INST count
ldrh rINST, [rPC, #((\count)*2)]
.endm
/* Advance rPC by some number of code units. */
.macro ADVANCE count
add rPC, #((\count)*2)
.endm
/*
* Fetch the next instruction from an offset specified by _reg. Updates
* rPC to point to the next instruction. "_reg" must specify the distance
* in bytes, *not* 16-bit code units, and may be a signed value.
*
* We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
* bits that hold the shift distance are used for the half/byte/sign flags.
* In some cases we can pre-double _reg for free, so we require a byte offset
* here.
*/
.macro FETCH_ADVANCE_INST_RB reg
ldrh rINST, [rPC, \reg]!
.endm
/*
* Fetch a half-word code unit from an offset past the current PC. The
* "_count" value is in 16-bit code units. Does not advance rPC.
*
* The "_S" variant works the same but treats the value as signed.
*/
.macro FETCH reg, count
ldrh \reg, [rPC, #((\count)*2)]
.endm
.macro FETCH_S reg, count
ldrsh \reg, [rPC, #((\count)*2)]
.endm
/*
* Fetch one byte from an offset past the current PC. Pass in the same
* "_count" as you would for FETCH, and an additional 0/1 indicating which
* byte of the halfword you want (lo/hi).
*/
.macro FETCH_B reg, count, byte
ldrb \reg, [rPC, #((\count)*2+(\byte))]
.endm
/*
* Put the instruction's opcode field into the specified register.
*/
.macro GET_INST_OPCODE reg
and \reg, rINST, #255
.endm
/*
* Put the prefetched instruction's opcode field into the specified register.
*/
.macro GET_PREFETCHED_OPCODE oreg, ireg
and \oreg, \ireg, #255
.endm
/*
* Begin executing the opcode in _reg. Because this only jumps within the
* interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
*/
.macro GOTO_OPCODE reg
add pc, rIBASE, \reg, lsl #${handler_size_bits}
.endm
.macro GOTO_OPCODE_BASE base,reg
add pc, \base, \reg, lsl #${handler_size_bits}
.endm
/*
* Get/set the 32-bit value from a Dalvik register.
*/
.macro GET_VREG reg, vreg
ldr \reg, [rFP, \vreg, lsl #2]
.endm
.macro SET_VREG reg, vreg
str \reg, [rFP, \vreg, lsl #2]
mov \reg, #0
str \reg, [rREFS, \vreg, lsl #2]
.endm
.macro SET_VREG_OBJECT reg, vreg, tmpreg
str \reg, [rFP, \vreg, lsl #2]
str \reg, [rREFS, \vreg, lsl #2]
.endm
.macro SET_VREG_SHADOW reg, vreg
str \reg, [rREFS, \vreg, lsl #2]
.endm
/*
* Clear the corresponding shadow regs for a vreg pair
*/
.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
mov \tmp1, #0
add \tmp2, \vreg, #1
SET_VREG_SHADOW \tmp1, \vreg
SET_VREG_SHADOW \tmp1, \tmp2
.endm
/*
* Convert a virtual register index into an address.
*/
.macro VREG_INDEX_TO_ADDR reg, vreg
add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
.endm
/*
* Refresh handler table.
*/
.macro REFRESH_IBASE
ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
.endm
|
abforce/xposed_art_n
| 1,512
|
runtime/interpreter/mterp/arm/op_aput.S
|
%default { "store":"str", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
/*
* Array put, 32 bits or less. vBB[vCC] <- vAA.
*
* Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
* instructions. We use a pair of FETCH_Bs instead.
*
* for: aput, aput-boolean, aput-byte, aput-char, aput-short
*
* NOTE: this assumes data offset for arrays is the same for all non-wide types.
* If this changes, specialize.
*/
/* op vAA, vBB, vCC */
FETCH_B r2, 1, 0 @ r2<- BB
mov r9, rINST, lsr #8 @ r9<- AA
FETCH_B r3, 1, 1 @ r3<- CC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
cmp r0, #0 @ null array object?
beq common_errNullObject @ yes, bail
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
cmp r1, r3 @ compare unsigned index, length
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_VREG r2, r9 @ r2<- vAA
GET_INST_OPCODE ip @ extract opcode from rINST
$store r2, [r0, #$data_offset] @ vBB[vCC]<- r2
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,279
|
runtime/interpreter/mterp/arm/op_rem_int.S
|
%default {}
/*
* Specialized 32-bit binary operation
*
* Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
* depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
* ARMv7 CPUs that have hardware division support).
*
* NOTE: idivmod returns quotient in r0 and remainder in r1
*
* rem-int
*
*/
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
mov r3, r0, lsr #8 @ r3<- CC
and r2, r0, #255 @ r2<- BB
GET_VREG r1, r3 @ r1<- vCC
GET_VREG r0, r2 @ r0<- vBB
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
#ifdef __ARM_ARCH_EXT_IDIV__
sdiv r2, r0, r1
mls r1, r1, r2, r0 @ r1<- op, r0-r2 changed
#else
bl __aeabi_idivmod @ r1<- op, r0-r3 changed
#endif
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r1, r9 @ vAA<- r1
GOTO_OPCODE ip @ jump to next instruction
/* 11-14 instructions */
|
abforce/xposed_art_n
| 1,037
|
runtime/interpreter/mterp/arm/op_instance_of.S
|
/*
* Check to see if an object reference is an instance of a class.
*
* Most common situation is a non-null object, being compared against
* an already-resolved class.
*/
/* instance-of vA, vB, class@CCCC */
EXPORT_PC
FETCH r0, 1 @ r0<- CCCC
mov r1, rINST, lsr #12 @ r1<- B
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
mov r3, rSELF @ r3<- self
bl MterpInstanceOf @ (index, &obj, method, self)
ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r9, rINST, #8, #4 @ r9<- A
PREFETCH_INST 2
cmp r1, #0 @ exception pending?
bne MterpException
ADVANCE 2 @ advance rPC
SET_VREG r0, r9 @ vA<- r0
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,186
|
runtime/interpreter/mterp/arm/op_rem_int_lit16.S
|
%default {}
/*
* Specialized 32-bit binary operation
*
* Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
* depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
* ARMv7 CPUs that have hardware division support).
*
* NOTE: idivmod returns quotient in r0 and remainder in r1
*
* rem-int/lit16
*
*/
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
#ifdef __ARM_ARCH_EXT_IDIV__
sdiv r2, r0, r1
mls r1, r1, r2, r0 @ r1<- op
#else
bl __aeabi_idivmod @ r1<- op, r0-r3 changed
#endif
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r1, r9 @ vAA<- r1
GOTO_OPCODE ip @ jump to next instruction
/* 10-13 instructions */
|
abforce/xposed_art_n
| 1,163
|
runtime/interpreter/mterp/arm/op_shr_long_2addr.S
|
/*
* Long integer shift, 2addr version. vA is 64-bit value/result, vB is
* 32-bit shift distance.
*/
/* shr-long/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
subs ip, r2, #32 @ ip<- r2 - 32
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
mov r1, r1, asr r2 @ r1<- r1 >> r2
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,046
|
runtime/interpreter/mterp/arm/op_iget.S
|
%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
/*
* General instance field get.
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
EXPORT_PC
FETCH r0, 1 @ r0<- field ref CCCC
mov r1, rINST, lsr #12 @ r1<- B
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
mov r3, rSELF @ r3<- self
bl $helper
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
ubfx r2, rINST, #8, #4 @ r2<- A
PREFETCH_INST 2
cmp r3, #0
bne MterpPossibleException @ bail out
.if $is_object
SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
.else
SET_VREG r0, r2 @ fp[A]<- r0
.endif
ADVANCE 2
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,065
|
runtime/interpreter/mterp/arm/op_div_int_2addr.S
|
%default {}
/*
* Specialized 32-bit binary operation
*
* Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
* depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
* ARMv7 CPUs that have hardware division support).
*
* div-int/2addr
*
*/
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
#ifdef __ARM_ARCH_EXT_IDIV__
sdiv r0, r0, r1 @ r0<- op
#else
bl __aeabi_idiv @ r0<- op, r0-r3 changed
#endif
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
/* 10-13 instructions */
|
abforce/xposed_art_n
| 1,166
|
runtime/interpreter/mterp/arm/op_ushr_long_2addr.S
|
/*
* Long integer shift, 2addr version. vA is 64-bit value/result, vB is
* 32-bit shift distance.
*/
/* ushr-long/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
subs ip, r2, #32 @ ip<- r2 - 32
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
mov r1, r1, lsr r2 @ r1<- r1 >>> r2
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,093
|
runtime/interpreter/mterp/arm/unopWide.S
|
%default {"preinstr":""}
/*
* Generic 64-bit unary operation. Provide an "instr" line that
* specifies an instruction that performs "result = op r0/r1".
* This could be an ARM instruction or a function call.
*
* For: neg-long, not-long, neg-double, long-to-double, double-to-long
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ r0/r1<- op, r2-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r0-r1} @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
/* 10-11 instructions */
|
abforce/xposed_art_n
| 1,404
|
runtime/interpreter/mterp/arm/op_cmpg_float.S
|
/*
* Compare two floating-point values. Puts 0, 1, or -1 into the
* destination register based on the results of the comparison.
*
* int compare(x, y) {
* if (x == y) {
* return 0;
* } else if (x < y) {
* return -1;
* } else if (x > y) {
* return 1;
* } else {
* return 1;
* }
* }
*/
/* op vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
flds s0, [r2] @ s0<- vBB
flds s1, [r3] @ s1<- vCC
vcmpe.f32 s0, s1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r0, #1 @ r0<- 1 (default)
GET_INST_OPCODE ip @ extract opcode from rINST
fmstat @ export status flags
mvnmi r0, #0 @ (less than) r1<- -1
moveq r0, #0 @ (equal) r1<- 0
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,071
|
runtime/interpreter/mterp/arm/unopNarrower.S
|
%default {"preinstr":""}
/*
* Generic 64bit-to-32bit unary operation. Provide an "instr" line
* that specifies an instruction that performs "result = op r0/r1", where
* "result" is a 32-bit quantity in r0.
*
* For: long-to-float, double-to-int, double-to-float
*
* (This would work for long-to-int, but that instruction is actually
* an exact match for op_move.)
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vA<- r0
GOTO_OPCODE ip @ jump to next instruction
/* 9-10 instructions */
|
abforce/xposed_art_n
| 1,029
|
runtime/interpreter/mterp/arm/unopWider.S
|
%default {"preinstr":""}
/*
* Generic 32bit-to-64bit unary operation. Provide an "instr" line
* that specifies an instruction that performs "result = op r0", where
* "result" is a 64-bit quantity in r0/r1.
*
* For: int-to-long, int-to-double, float-to-long, float-to-double
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
$preinstr @ optional op; may set condition codes
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
/* 9-10 instructions */
|
abforce/xposed_art_n
| 1,601
|
runtime/interpreter/mterp/arm/binopLit8.S
|
%default {"preinstr":"", "result":"r0", "chkzero":"0"}
/*
* Generic 32-bit "lit8" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = r0 op r1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
* For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
.if $chkzero
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ $result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG $result, r9 @ vAA<- $result
GOTO_OPCODE ip @ jump to next instruction
/* 10-12 instructions */
|
abforce/xposed_art_n
| 1,104
|
runtime/interpreter/mterp/arm/fbinop.S
|
/*
* Generic 32-bit floating-point operation. Provide an "instr" line that
* specifies an instruction that performs "s2 = s0 op s1". Because we
* use the "softfp" ABI, this must be an instruction, not a function call.
*
* For: add-float, sub-float, mul-float, div-float
*/
/* floatop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
mov r3, r0, lsr #8 @ r3<- CC
and r2, r0, #255 @ r2<- BB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
flds s1, [r3] @ s1<- vCC
flds s0, [r2] @ s0<- vBB
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$instr @ s2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
fsts s2, [r9] @ vAA<- s2
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,165
|
runtime/interpreter/mterp/arm/op_rem_int_2addr.S
|
%default {}
/*
* Specialized 32-bit binary operation
*
* Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
* depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
* ARMv7 CPUs that have hardware division support).
*
* NOTE: idivmod returns quotient in r0 and remainder in r1
*
* rem-int/2addr
*
*/
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
#ifdef __ARM_ARCH_EXT_IDIV__
sdiv r2, r0, r1
mls r1, r1, r2, r0 @ r1<- op
#else
bl __aeabi_idivmod @ r1<- op, r0-r3 changed
#endif
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r1, r9 @ vAA<- r1
GOTO_OPCODE ip @ jump to next instruction
/* 10-13 instructions */
|
abforce/xposed_art_n
| 1,374
|
runtime/interpreter/mterp/arm/binopLit16.S
|
%default {"result":"r0", "chkzero":"0"}
/*
* Generic 32-bit "lit16" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = r0 op r1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
/* binop/lit16 vA, vB, #+CCCC */
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
.if $chkzero
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$instr @ $result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG $result, r9 @ vAA<- $result
GOTO_OPCODE ip @ jump to next instruction
/* 10-13 instructions */
|
abforce/xposed_art_n
| 1,164
|
runtime/interpreter/mterp/arm/op_shl_long_2addr.S
|
/*
* Long integer shift, 2addr version. vA is 64-bit value/result, vB is
* 32-bit shift distance.
*/
/* shl-long/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
mov r1, r1, asl r2 @ r1<- r1 << r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
subs ip, r2, #32 @ ip<- r2 - 32
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
mov r0, r0, asl r2 @ r0<- r0 << r2
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,086
|
runtime/interpreter/mterp/arm/op_div_int_lit16.S
|
%default {}
/*
* Specialized 32-bit binary operation
*
* Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
* depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
* ARMv7 CPUs that have hardware division support).
*
* div-int/lit16
*
*/
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
#ifdef __ARM_ARCH_EXT_IDIV__
sdiv r0, r0, r1 @ r0<- op
#else
bl __aeabi_idiv @ r0<- op, r0-r3 changed
#endif
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
/* 10-13 instructions */
|
abforce/xposed_art_n
| 1,106
|
runtime/interpreter/mterp/arm/fbinopWide.S
|
/*
* Generic 64-bit double-precision floating point binary operation.
* Provide an "instr" line that specifies an instruction that performs
* "d2 = d0 op d1".
*
* for: add-double, sub-double, mul-double, div-double
*/
/* doubleop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
mov r3, r0, lsr #8 @ r3<- CC
and r2, r0, #255 @ r2<- BB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
fldd d1, [r3] @ d1<- vCC
fldd d0, [r2] @ d0<- vBB
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$instr @ s2<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
fstd d2, [r9] @ vAA<- d2
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,366
|
runtime/interpreter/mterp/arm/op_aget_wide.S
|
/*
* Array get, 64 bits. vAA <- vBB[vCC].
*
* Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
*/
/* aget-wide vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
cmp r0, #0 @ null array object?
beq common_errNullObject @ yes, bail
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
cmp r1, r3 @ compare unsigned index, length
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 2,125
|
runtime/interpreter/mterp/arm/op_double_to_long.S
|
@include "arm/unopWide.S" {"instr":"bl __aeabi_d2lz"}
%include "arm/unopWide.S" {"instr":"bl d2l_doconv"}
%break
/*
* Convert the double in r0/r1 to a long in r0/r1.
*
* We have to clip values to long min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
* to modest integer. The EABI convert function isn't doing this for us.
*/
d2l_doconv:
stmfd sp!, {r4, r5, lr} @ save regs
mov r3, #0x43000000 @ maxlong, as a double (high word)
add r3, #0x00e00000 @ 0x43e00000
mov r2, #0 @ maxlong, as a double (low word)
sub sp, sp, #4 @ align for EABI
mov r4, r0 @ save a copy of r0
mov r5, r1 @ and r1
bl __aeabi_dcmpge @ is arg >= maxlong?
cmp r0, #0 @ nonzero == yes
mvnne r0, #0 @ return maxlong (7fffffffffffffff)
mvnne r1, #0x80000000
bne 1f
mov r0, r4 @ recover arg
mov r1, r5
mov r3, #0xc3000000 @ minlong, as a double (high word)
add r3, #0x00e00000 @ 0xc3e00000
mov r2, #0 @ minlong, as a double (low word)
bl __aeabi_dcmple @ is arg <= minlong?
cmp r0, #0 @ nonzero == yes
movne r0, #0 @ return minlong (8000000000000000)
movne r1, #0x80000000
bne 1f
mov r0, r4 @ recover arg
mov r1, r5
mov r2, r4 @ compare against self
mov r3, r5
bl __aeabi_dcmpeq @ is arg == self?
cmp r0, #0 @ zero == no
moveq r1, #0 @ return zero for NaN
beq 1f
mov r0, r4 @ recover arg
mov r1, r5
bl __aeabi_d2lz @ convert double to long
1:
add sp, sp, #4
ldmfd sp!, {r4, r5, pc}
|
abforce/xposed_art_n
| 1,166
|
runtime/interpreter/mterp/arm/op_div_int_lit8.S
|
%default {}
/*
* Specialized 32-bit binary operation
*
* Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
* depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
* ARMv7 CPUs that have hardware division support).
*
* div-int/lit8
*
*/
FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
#ifdef __ARM_ARCH_EXT_IDIV__
sdiv r0, r0, r1 @ r0<- op
#else
bl __aeabi_idiv @ r0<- op, r0-r3 changed
#endif
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
/* 10-12 instructions */
|
abforce/xposed_art_n
| 1,781
|
runtime/interpreter/mterp/arm/binopWide2addr.S
|
%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
/*
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = r0-r1 op r2-r3".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
* For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
* and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
* sub-double/2addr, mul-double/2addr, div-double/2addr,
* rem-double/2addr
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if $chkzero
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
GOTO_OPCODE ip @ jump to next instruction
/* 12-15 instructions */
|
abforce/xposed_art_n
| 1,937
|
runtime/interpreter/mterp/arm/binopWide.S
|
%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
/*
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = r0-r1 op r2-r3".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
* for: add-long, sub-long, div-long, rem-long, and-long, or-long,
* xor-long, add-double, sub-double, mul-double, div-double,
* rem-double
*
* IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
.if $chkzero
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
GOTO_OPCODE ip @ jump to next instruction
/* 14-17 instructions */
|
abforce/xposed_art_n
| 1,507
|
runtime/interpreter/mterp/arm/op_float_to_long.S
|
@include "arm/unopWider.S" {"instr":"bl __aeabi_f2lz"}
%include "arm/unopWider.S" {"instr":"bl f2l_doconv"}
%break
/*
* Convert the float in r0 to a long in r0/r1.
*
* We have to clip values to long min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
* to modest integer. The EABI convert function isn't doing this for us.
*/
f2l_doconv:
stmfd sp!, {r4, lr}
mov r1, #0x5f000000 @ (float)maxlong
mov r4, r0
bl __aeabi_fcmpge @ is arg >= maxlong?
cmp r0, #0 @ nonzero == yes
mvnne r0, #0 @ return maxlong (7fffffff)
mvnne r1, #0x80000000
popne {r4, pc}
mov r0, r4 @ recover arg
mov r1, #0xdf000000 @ (float)minlong
bl __aeabi_fcmple @ is arg <= minlong?
cmp r0, #0 @ nonzero == yes
movne r0, #0 @ return minlong (80000000)
movne r1, #0x80000000
popne {r4, pc}
mov r0, r4 @ recover arg
mov r1, r4
bl __aeabi_fcmpeq @ is arg == self?
cmp r0, #0 @ zero == no
moveq r1, #0 @ return zero for NaN
popeq {r4, pc}
mov r0, r4 @ recover arg
bl __aeabi_f2lz @ convert float to long
ldmfd sp!, {r4, pc}
|
abforce/xposed_art_n
| 1,408
|
runtime/interpreter/mterp/arm/op_cmpl_double.S
|
/*
* Compare two floating-point values. Puts 0, 1, or -1 into the
* destination register based on the results of the comparison.
*
* int compare(x, y) {
* if (x == y) {
* return 0;
* } else if (x > y) {
* return 1;
* } else if (x < y) {
* return -1;
* } else {
* return -1;
* }
* }
*/
/* op vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
fldd d0, [r2] @ d0<- vBB
fldd d1, [r3] @ d1<- vCC
vcmpe.f64 d0, d1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mvn r0, #0 @ r0<- -1 (default)
GET_INST_OPCODE ip @ extract opcode from rINST
fmstat @ export status flags
movgt r0, #1 @ (greater than) r1<- 1
moveq r0, #0 @ (equal) r1<- 0
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,299
|
runtime/interpreter/mterp/arm/op_aput_wide.S
|
/*
* Array put, 64 bits. vBB[vCC] <- vAA.
*
* Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
*/
/* aput-wide vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
cmp r0, #0 @ null array object?
beq common_errNullObject @ yes, bail
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
cmp r1, r3 @ compare unsigned index, length
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
GET_INST_OPCODE ip @ extract opcode from rINST
strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,577
|
runtime/interpreter/mterp/arm/op_mul_long.S
|
/*
* Signed 64-bit integer multiply.
*
* Consider WXxYZ (r1r0 x r3r2) with a long multiply:
* WX
* x YZ
* --------
* ZW ZX
* YW YX
*
* The low word of the result holds ZX, the high word holds
* (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
* it doesn't fit in the low 64 bits.
*
* Unlike most ARM math operations, multiply instructions have
* restrictions on using the same register more than once (Rd and Rm
* cannot be the same).
*/
/* mul-long vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
mul ip, r2, r1 @ ip<- ZxW
umull r1, lr, r2, r0 @ r1/lr <- ZxX
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
mov r0, rINST, lsr #8 @ r0<- AA
add r2, r2, lr @ r2<- lr + low(ZxW + (YxX))
VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA]
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r0, {r1-r2 } @ vAA/vAA+1<- r1/r2
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,165
|
runtime/interpreter/mterp/arm/op_mul_long_2addr.S
|
/*
* Signed 64-bit integer multiply, "/2addr" version.
*
* See op_mul_long for an explanation.
*
* We get a little tight on registers, so to avoid looking up &fp[A]
* again we stuff it into rINST.
*/
/* mul-long/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
mul ip, r2, r1 @ ip<- ZxW
umull r1, lr, r2, r0 @ r1/lr <- ZxX
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
mov r0, rINST @ r0<- &fp[A] (free up rINST)
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
add r2, r2, lr @ r2<- r2 + low(ZxW + (YxX))
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r0, {r1-r2} @ vAA/vAA+1<- r1/r2
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 1,164
|
runtime/interpreter/mterp/arm/op_div_int.S
|
%default {}
/*
* Specialized 32-bit binary operation
*
* Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
* depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
* ARMv7 CPUs that have hardware division support).
*
* div-int
*
*/
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
mov r3, r0, lsr #8 @ r3<- CC
and r2, r0, #255 @ r2<- BB
GET_VREG r1, r3 @ r1<- vCC
GET_VREG r0, r2 @ r0<- vBB
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
#ifdef __ARM_ARCH_EXT_IDIV__
sdiv r0, r0, r1 @ r0<- op
#else
bl __aeabi_idiv @ r0<- op, r0-r3 changed
#endif
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
/* 11-14 instructions */
|
abforce/xposed_art_n
| 1,477
|
runtime/interpreter/mterp/arm/op_ushr_long.S
|
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to mask off the low
* 6 bits of the shift distance.
*/
/* ushr-long vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r3, r0, #255 @ r3<- BB
mov r0, r0, lsr #8 @ r0<- CC
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r0<- r0 & 0x3f
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
subs ip, r2, #32 @ ip<- r2 - 32
movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r1, r1, lsr r2 @ r1<- r1 >>> r2
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
|
abforce/xposed_art_n
| 10,769
|
runtime/interpreter/mterp/arm64/footer.S
|
/*
* ===========================================================================
* Common subroutines and data
* ===========================================================================
*/
/*
* We've detected a condition that will result in an exception, but the exception
* has not yet been thrown. Just bail out to the reference interpreter to deal with it.
* TUNING: for consistency, we may want to just go ahead and handle these here.
*/
common_errDivideByZero:
EXPORT_PC
#if MTERP_LOGGING
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
bl MterpLogDivideByZeroException
#endif
b MterpCommonFallback
common_errArrayIndex:
EXPORT_PC
#if MTERP_LOGGING
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
bl MterpLogArrayIndexException
#endif
b MterpCommonFallback
common_errNegativeArraySize:
EXPORT_PC
#if MTERP_LOGGING
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
bl MterpLogNegativeArraySizeException
#endif
b MterpCommonFallback
common_errNoSuchMethod:
EXPORT_PC
#if MTERP_LOGGING
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
bl MterpLogNoSuchMethodException
#endif
b MterpCommonFallback
common_errNullObject:
EXPORT_PC
#if MTERP_LOGGING
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
bl MterpLogNullObjectException
#endif
b MterpCommonFallback
common_exceptionThrown:
EXPORT_PC
#if MTERP_LOGGING
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
bl MterpLogExceptionThrownException
#endif
b MterpCommonFallback
MterpSuspendFallback:
EXPORT_PC
#if MTERP_LOGGING
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
ldr x2, [xSELF, #THREAD_FLAGS_OFFSET]
bl MterpLogSuspendFallback
#endif
b MterpCommonFallback
/*
* If we're here, something is out of the ordinary. If there is a pending
* exception, handle it. Otherwise, roll back and retry with the reference
* interpreter.
*/
MterpPossibleException:
ldr x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
cbz x0, MterpFallback // If not, fall back to reference interpreter.
/* intentional fallthrough - handle pending exception. */
/*
* On return from a runtime helper routine, we've found a pending exception.
* Can we handle it here - or need to bail out to caller?
*
*/
MterpException:
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
bl MterpHandleException // (self, shadow_frame)
cbz w0, MterpExceptionReturn // no local catch, back to caller.
ldr x0, [xFP, #OFF_FP_CODE_ITEM]
ldr w1, [xFP, #OFF_FP_DEX_PC]
ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
add xPC, x0, #CODEITEM_INSNS_OFFSET
add xPC, xPC, x1, lsl #1 // generate new dex_pc_ptr
/* Do we need to switch interpreters? */
bl MterpShouldSwitchInterpreters
cbnz w0, MterpFallback
/* resume execution at catch block */
EXPORT_PC
FETCH_INST
GET_INST_OPCODE ip
GOTO_OPCODE ip
/* NOTE: no fallthrough */
/*
* Common handling for branches with support for Jit profiling.
* On entry:
* wINST <= signed offset
* wPROFILE <= signed hotness countdown (expanded to 32 bits)
* condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
*
* We have quite a few different cases for branch profiling, OSR detection and
* suspend check support here.
*
* Taken backward branches:
* If profiling active, do hotness countdown and report if we hit zero.
* If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
* Is there a pending suspend request? If so, suspend.
*
* Taken forward branches and not-taken backward branches:
* If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
*
* Our most common case is expected to be a taken backward branch with active jit profiling,
* but no full OSR check and no pending suspend request.
* Next most common case is not-taken branch with no full OSR check.
*
*/
MterpCommonTakenBranchNoFlags:
cmp wINST, #0
b.gt .L_forward_branch // don't add forward branches to hotness
tbnz wPROFILE, #31, .L_no_count_backwards // go if negative
subs wPROFILE, wPROFILE, #1 // countdown
b.eq .L_add_batch // counted down to zero - report
.L_resume_backward_branch:
ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
add w2, wINST, wINST // w2<- byte offset
FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
REFRESH_IBASE
ands lr, lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
b.ne .L_suspend_request_pending
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
.L_suspend_request_pending:
EXPORT_PC
mov x0, xSELF
bl MterpSuspendCheck // (self)
cbnz x0, MterpFallback
REFRESH_IBASE // might have changed during suspend
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
.L_no_count_backwards:
cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
b.ne .L_resume_backward_branch
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
mov x2, xINST
EXPORT_PC
bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
cbnz x0, MterpOnStackReplacement
b .L_resume_backward_branch
.L_forward_branch:
cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
b.eq .L_check_osr_forward
.L_resume_forward_branch:
add w2, wINST, wINST // w2<- byte offset
FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
.L_check_osr_forward:
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
mov x2, xINST
EXPORT_PC
bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
cbnz x0, MterpOnStackReplacement
b .L_resume_forward_branch
.L_add_batch:
add x1, xFP, #OFF_FP_SHADOWFRAME
strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
ldr x0, [xFP, #OFF_FP_METHOD]
mov x2, xSELF
bl MterpAddHotnessBatch // (method, shadow_frame, self)
mov wPROFILE, w0 // restore new hotness countdown to wPROFILE
b .L_no_count_backwards
/*
* Entered from the conditional branch handlers when OSR check request active on
* not-taken path. All Dalvik not-taken conditional branch offsets are 2.
*/
.L_check_not_taken_osr:
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
mov x2, #2
EXPORT_PC
bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
cbnz x0, MterpOnStackReplacement
FETCH_ADVANCE_INST 2
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
/*
* Check for suspend check request. Assumes wINST already loaded, xPC advanced and
* still needs to get the opcode and branch to it, and flags are in lr.
*/
MterpCheckSuspendAndContinue:
ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE
ands w7, w7, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
b.ne check1
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
check1:
EXPORT_PC
mov x0, xSELF
bl MterpSuspendCheck // (self)
cbnz x0, MterpFallback // Something in the environment changed, switch interpreters
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
/*
* On-stack replacement has happened, and now we've returned from the compiled method.
*/
MterpOnStackReplacement:
#if MTERP_LOGGING
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
sbfm x2, xINST, 0, 31
bl MterpLogOSR
#endif
mov x0, #1 // Signal normal return
b MterpDone
/*
* Bail out to reference interpreter.
*/
MterpFallback:
EXPORT_PC
#if MTERP_LOGGING
mov x0, xSELF
add x1, xFP, #OFF_FP_SHADOWFRAME
bl MterpLogFallback
#endif
MterpCommonFallback:
mov x0, #0 // signal retry with reference interpreter.
b MterpDone
/*
* We pushed some registers on the stack in ExecuteMterpImpl, then saved
* SP and LR. Here we restore SP, restore the registers, and then restore
* LR to PC.
*
* On entry:
* uint32_t* xFP (should still be live, pointer to base of vregs)
*/
MterpExceptionReturn:
mov x0, #1 // signal return to caller.
b MterpDone
MterpReturn:
ldr x2, [xFP, #OFF_FP_RESULT_REGISTER]
str x0, [x2]
mov x0, #1 // signal return to caller.
MterpDone:
/*
* At this point, we expect wPROFILE to be non-zero. If negative, hotness is disabled or we're
* checking for OSR. If greater than zero, we might have unreported hotness to register
* (the difference between the ending wPROFILE and the cached hotness counter). wPROFILE
* should only reach zero immediately after a hotness decrement, and is then reset to either
* a negative special state or the new non-zero countdown value.
*/
cmp wPROFILE, #0
bgt MterpProfileActive // if > 0, we may have some counts to report.
ldp fp, lr, [sp, #64]
ldp xPC, xFP, [sp, #48]
ldp xSELF, xINST, [sp, #32]
ldp xIBASE, xREFS, [sp, #16]
ldp xPROFILE, x27, [sp], #80
ret
MterpProfileActive:
mov xINST, x0 // stash return value
/* Report cached hotness counts */
ldr x0, [xFP, #OFF_FP_METHOD]
add x1, xFP, #OFF_FP_SHADOWFRAME
mov x2, xSELF
strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
bl MterpAddHotnessBatch // (method, shadow_frame, self)
mov x0, xINST // restore return value
ldp fp, lr, [sp, #64]
ldp xPC, xFP, [sp, #48]
ldp xSELF, xINST, [sp, #32]
ldp xIBASE, xREFS, [sp, #16]
ldp xPROFILE, x27, [sp], #80
ret
.cfi_endproc
.size ExecuteMterpImpl, .-ExecuteMterpImpl
|
abforce/xposed_art_n
| 1,476
|
runtime/interpreter/mterp/arm64/op_aget.S
|
%default { "load":"ldr", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
/*
* Array get, 32 bits or less. vAA <- vBB[vCC].
*
* Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
* instructions. We use a pair of FETCH_Bs instead.
*
* for: aget, aget-boolean, aget-byte, aget-char, aget-short
*
* NOTE: assumes data offset for arrays is the same for all non-wide types.
* If this changes, specialize.
*/
/* op vAA, vBB, vCC */
FETCH_B w2, 1, 0 // w2<- BB
lsr w9, wINST, #8 // w9<- AA
FETCH_B w3, 1, 1 // w3<- CC
GET_VREG w0, w2 // w0<- vBB (array object)
GET_VREG w1, w3 // w1<- vCC (requested index)
cbz x0, common_errNullObject // bail if null array object.
ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
add x0, x0, w1, uxtw #$shift // w0<- arrayObj + index*width
cmp w1, w3 // compare unsigned index, length
bcs common_errArrayIndex // index >= length, bail
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
$load w2, [x0, #$data_offset] // w2<- vBB[vCC]
GET_INST_OPCODE ip // extract opcode from rINST
SET_VREG w2, w9 // vAA<- w2
GOTO_OPCODE ip // jump to next instruction
|
abforce/xposed_art_n
| 1,680
|
runtime/interpreter/mterp/arm64/binop.S
|
%default {"preinstr":"", "result":"w0", "chkzero":"0"}
/*
* Generic 32-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = w0 op w1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus. Note that we
* *don't* check for (INT_MIN / -1) here, because the ARM math lib
* handles it correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
* mul-float, div-float, rem-float
*/
/* binop vAA, vBB, vCC */
FETCH w0, 1 // w0<- CCBB
lsr w9, wINST, #8 // w9<- AA
lsr w3, w0, #8 // w3<- CC
and w2, w0, #255 // w2<- BB
GET_VREG w1, w3 // w1<- vCC
GET_VREG w0, w2 // w0<- vBB
.if $chkzero
cbz w1, common_errDivideByZero // is second operand zero?
.endif
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
$preinstr // optional op; may set condition codes
$instr // $result<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
SET_VREG $result, w9 // vAA<- $result
GOTO_OPCODE ip // jump to next instruction
/* 11-14 instructions */
|
abforce/xposed_art_n
| 2,444
|
runtime/interpreter/mterp/arm64/entry.S
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
.text
/*
* Interpreter entry point.
* On entry:
* x0 Thread* self/
* x1 code_item
* x2 ShadowFrame
* x3 JValue* result_register
*
*/
.global ExecuteMterpImpl
.type ExecuteMterpImpl, %function
.balign 16
ExecuteMterpImpl:
.cfi_startproc
stp xPROFILE, x27, [sp, #-80]!
stp xIBASE, xREFS, [sp, #16]
stp xSELF, xINST, [sp, #32]
stp xPC, xFP, [sp, #48]
stp fp, lr, [sp, #64]
add fp, sp, #64
/* Remember the return register */
str x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
/* Remember the code_item */
str x1, [x2, #SHADOWFRAME_CODE_ITEM_OFFSET]
/* set up "named" registers */
mov xSELF, x0
ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs.
add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
add xPC, x1, #CODEITEM_INSNS_OFFSET // Point to base of insns[]
add xPC, xPC, w0, lsl #1 // Create direct pointer to 1st dex opcode
EXPORT_PC
/* Starting ibase */
ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
/* Set up for backwards branches & osr profiling */
ldr x0, [xFP, #OFF_FP_METHOD]
add x1, xFP, #OFF_FP_SHADOWFRAME
bl MterpSetUpHotnessCountdown
mov wPROFILE, w0 // Starting hotness countdown to xPROFILE
/* start executing the instruction at rPC */
FETCH_INST // load wINST from rPC
GET_INST_OPCODE ip // extract opcode from wINST
GOTO_OPCODE ip // jump to next instruction
/* NOTE: no fallthrough */
|
abforce/xposed_art_n
| 1,546
|
runtime/interpreter/mterp/arm64/binop2addr.S
|
%default {"preinstr":"", "result":"w0", "chkzero":"0"}
/*
* Generic 32-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = w0 op w1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
* For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
* rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
* shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
* sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
*/
/* binop/2addr vA, vB */
lsr w3, wINST, #12 // w3<- B
ubfx w9, wINST, #8, #4 // w9<- A
GET_VREG w1, w3 // w1<- vB
GET_VREG w0, w9 // w0<- vA
.if $chkzero
cbz w1, common_errDivideByZero
.endif
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
$preinstr // optional op; may set condition codes
$instr // $result<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
SET_VREG $result, w9 // vAA<- $result
GOTO_OPCODE ip // jump to next instruction
/* 10-13 instructions */
|
abforce/xposed_art_n
| 9,129
|
runtime/interpreter/mterp/arm64/header.S
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Art assembly interpreter notes:
First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
handle invoke, allows higher-level code to create frame & shadow frame.
Once that's working, support direct entry code & eliminate shadow frame (and
excess locals allocation.
Some (hopefully) temporary ugliness. We'll treat xFP as pointing to the
base of the vreg array within the shadow frame. Access the other fields,
dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
the shadow frame mechanism of double-storing object references - via xFP &
number_of_vregs_.
*/
/*
ARM64 Runtime register usage conventions.
r0 : w0 is 32-bit return register and x0 is 64-bit.
r0-r7 : Argument registers.
r8-r15 : Caller save registers (used as temporary registers).
r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
the linker, by the trampolines and other stubs (the backend uses
these as temporary registers).
r18 : Caller save register (used as temporary register).
r19 : Pointer to thread-local storage.
r20-r29: Callee save registers.
r30 : (lr) is reserved (the link register).
rsp : (sp) is reserved (the stack pointer).
rzr : (zr) is reserved (the zero register).
Floating-point registers
v0-v31
v0 : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
This is analogous to the C/C++ (hard-float) calling convention.
v0-v7 : Floating-point argument registers in both Dalvik and C/C++ conventions.
Also used as temporary and codegen scratch registers.
v0-v7 and v16-v31 : trashed across C calls.
v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
v16-v31: Used as codegen temp/scratch.
v8-v15 : Can be used for promotion.
Must maintain 16-byte stack alignment.
Mterp notes:
The following registers have fixed assignments:
reg nick purpose
x20 xPC interpreted program counter, used for fetching instructions
x21 xFP interpreted frame pointer, used for accessing locals and args
x22 xSELF self (Thread) pointer
x23 xINST first 16-bit code unit of current instruction
x24 xIBASE interpreted instruction base pointer, used for computed goto
x25 xREFS base of object references in shadow frame (ideally, we'll get rid of this later).
x26 wPROFILE jit profile hotness countdown
x16 ip scratch reg
x17 ip2 scratch reg (used by macros)
Macros are provided for common operations. They MUST NOT alter unspecified registers or condition
codes.
*/
/*
* This is a #include, not a %include, because we want the C pre-processor
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
#define MTERP_PROFILE_BRANCHES 1
#define MTERP_LOGGING 0
/* During bringup, we'll use the shadow frame model instead of xFP */
/* single-purpose registers, given names for clarity */
#define xPC x20
#define xFP x21
#define xSELF x22
#define xINST x23
#define wINST w23
#define xIBASE x24
#define xREFS x25
#define wPROFILE w26
#define xPROFILE x26
#define ip x16
#define ip2 x17
/*
* Instead of holding a pointer to the shadow frame, we keep xFP at the base of the vregs. So,
* to access other shadow frame fields, we need to use a backwards offset. Define those here.
*/
#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
/*
* "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
* be done *before* something throws.
*
* It's okay to do this more than once.
*
* NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
* dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
* offset into the code_items_[] array. For effiency, we will "export" the
* current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
* to convert to a dex pc when needed.
*/
.macro EXPORT_PC
str xPC, [xFP, #OFF_FP_DEX_PC_PTR]
.endm
/*
* Fetch the next instruction from xPC into wINST. Does not advance xPC.
*/
.macro FETCH_INST
ldrh wINST, [xPC]
.endm
/*
* Fetch the next instruction from the specified offset. Advances xPC
* to point to the next instruction. "_count" is in 16-bit code units.
*
* Because of the limited size of immediate constants on ARM, this is only
* suitable for small forward movements (i.e. don't try to implement "goto"
* with this).
*
* This must come AFTER anything that can throw an exception, or the
* exception catch may miss. (This also implies that it must come after
* EXPORT_PC.)
*/
.macro FETCH_ADVANCE_INST count
ldrh wINST, [xPC, #((\count)*2)]!
.endm
/*
* The operation performed here is similar to FETCH_ADVANCE_INST, except the
* src and dest registers are parameterized (not hard-wired to xPC and xINST).
*/
.macro PREFETCH_ADVANCE_INST dreg, sreg, count
ldrh \dreg, [\sreg, #((\count)*2)]!
.endm
/*
* Similar to FETCH_ADVANCE_INST, but does not update xPC. Used to load
* xINST ahead of possible exception point. Be sure to manually advance xPC
* later.
*/
.macro PREFETCH_INST count
ldrh wINST, [xPC, #((\count)*2)]
.endm
/* Advance xPC by some number of code units. */
.macro ADVANCE count
add xPC, xPC, #((\count)*2)
.endm
/*
* Fetch the next instruction from an offset specified by _reg and advance xPC.
* xPC to point to the next instruction. "_reg" must specify the distance
* in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags.
*
*/
.macro FETCH_ADVANCE_INST_RB reg
add xPC, xPC, \reg, sxtw
ldrh wINST, [xPC]
.endm
/*
* Fetch a half-word code unit from an offset past the current PC. The
* "_count" value is in 16-bit code units. Does not advance xPC.
*
* The "_S" variant works the same but treats the value as signed.
*/
.macro FETCH reg, count
ldrh \reg, [xPC, #((\count)*2)]
.endm
.macro FETCH_S reg, count
ldrsh \reg, [xPC, #((\count)*2)]
.endm
/*
* Fetch one byte from an offset past the current PC. Pass in the same
* "_count" as you would for FETCH, and an additional 0/1 indicating which
* byte of the halfword you want (lo/hi).
*/
.macro FETCH_B reg, count, byte
ldrb \reg, [xPC, #((\count)*2+(\byte))]
.endm
/*
* Put the instruction's opcode field into the specified register.
*/
.macro GET_INST_OPCODE reg
and \reg, xINST, #255
.endm
/*
* Put the prefetched instruction's opcode field into the specified register.
*/
.macro GET_PREFETCHED_OPCODE oreg, ireg
and \oreg, \ireg, #255
.endm
/*
* Begin executing the opcode in _reg. Clobbers reg
*/
.macro GOTO_OPCODE reg
add \reg, xIBASE, \reg, lsl #${handler_size_bits}
br \reg
.endm
.macro GOTO_OPCODE_BASE base,reg
add \reg, \base, \reg, lsl #${handler_size_bits}
br \reg
.endm
/*
* Get/set the 32-bit value from a Dalvik register.
*/
.macro GET_VREG reg, vreg
ldr \reg, [xFP, \vreg, uxtw #2]
.endm
.macro SET_VREG reg, vreg
str \reg, [xFP, \vreg, uxtw #2]
str wzr, [xREFS, \vreg, uxtw #2]
.endm
.macro SET_VREG_OBJECT reg, vreg, tmpreg
str \reg, [xFP, \vreg, uxtw #2]
str \reg, [xREFS, \vreg, uxtw #2]
.endm
/*
* Get/set the 64-bit value from a Dalvik register.
* TUNING: can we do better here?
*/
.macro GET_VREG_WIDE reg, vreg
add ip2, xFP, \vreg, lsl #2
ldr \reg, [ip2]
.endm
.macro SET_VREG_WIDE reg, vreg
add ip2, xFP, \vreg, lsl #2
str \reg, [ip2]
add ip2, xREFS, \vreg, lsl #2
str xzr, [ip2]
.endm
/*
* Convert a virtual register index into an address.
*/
.macro VREG_INDEX_TO_ADDR reg, vreg
add \reg, xFP, \vreg, lsl #2 /* WARNING: handle shadow frame vreg zero if store */
.endm
/*
* Refresh handler table.
*/
.macro REFRESH_IBASE
ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
.endm
|
abforce/xposed_art_n
| 1,469
|
runtime/interpreter/mterp/arm64/op_aput.S
|
%default { "store":"str", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
/*
* Array put, 32 bits or less. vBB[vCC] <- vAA.
*
* Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
* instructions. We use a pair of FETCH_Bs instead.
*
* for: aput, aput-boolean, aput-byte, aput-char, aput-short
*
* NOTE: this assumes data offset for arrays is the same for all non-wide types.
* If this changes, specialize.
*/
/* op vAA, vBB, vCC */
FETCH_B w2, 1, 0 // w2<- BB
lsr w9, wINST, #8 // w9<- AA
FETCH_B w3, 1, 1 // w3<- CC
GET_VREG w0, w2 // w0<- vBB (array object)
GET_VREG w1, w3 // w1<- vCC (requested index)
cbz w0, common_errNullObject // bail if null
ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
add x0, x0, w1, lsl #$shift // w0<- arrayObj + index*width
cmp w1, w3 // compare unsigned index, length
bcs common_errArrayIndex // index >= length, bail
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
GET_VREG w2, w9 // w2<- vAA
GET_INST_OPCODE ip // extract opcode from rINST
$store w2, [x0, #$data_offset] // vBB[vCC]<- w2
GOTO_OPCODE ip // jump to next instruction
|
abforce/xposed_art_n
| 1,043
|
runtime/interpreter/mterp/arm64/op_instance_of.S
|
/*
* Check to see if an object reference is an instance of a class.
*
* Most common situation is a non-null object, being compared against
* an already-resolved class.
*/
/* instance-of vA, vB, class//CCCC */
EXPORT_PC
FETCH w0, 1 // w0<- CCCC
lsr w1, wINST, #12 // w1<- B
VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
mov x3, xSELF // w3<- self
bl MterpInstanceOf // (index, &obj, method, self)
ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
lsr w2, wINST, #8 // w2<- A+
and w2, w2, #15 // w2<- A
PREFETCH_INST 2
cbnz x1, MterpException
ADVANCE 2 // advance rPC
SET_VREG w0, w2 // vA<- w0
GET_INST_OPCODE ip // extract opcode from rINST
GOTO_OPCODE ip // jump to next instruction
|
abforce/xposed_art_n
| 1,062
|
runtime/interpreter/mterp/arm64/op_iget.S
|
%default { "extend":"", "is_object":"0", "helper":"artGet32InstanceFromCode"}
/*
* General instance field get.
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
EXPORT_PC
FETCH w0, 1 // w0<- field ref CCCC
lsr w1, wINST, #12 // w1<- B
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
mov x3, xSELF // w3<- self
bl $helper
ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
$extend
ubfx w2, wINST, #8, #4 // w2<- A
PREFETCH_INST 2
cbnz x3, MterpPossibleException // bail out
.if $is_object
SET_VREG_OBJECT w0, w2 // fp[A]<- w0
.else
SET_VREG w0, w2 // fp[A]<- w0
.endif
ADVANCE 2
GET_INST_OPCODE ip // extract opcode from rINST
GOTO_OPCODE ip // jump to next instruction
|
abforce/xposed_art_n
| 1,548
|
runtime/interpreter/mterp/arm64/binopLit8.S
|
%default {"preinstr":"", "result":"w0", "chkzero":"0"}
/*
* Generic 32-bit "lit8" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = w0 op w1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
* For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC
lsr w9, wINST, #8 // w9<- AA
and w2, w3, #255 // w2<- BB
GET_VREG w0, w2 // w0<- vBB
asr w1, w3, #8 // w1<- ssssssCC (sign extended)
.if $chkzero
cbz w1, common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
$preinstr // optional op; may set condition codes
$instr // $result<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
SET_VREG $result, w9 // vAA<- $result
GOTO_OPCODE ip // jump to next instruction
/* 10-12 instructions */
|
abforce/xposed_art_n
| 1,349
|
runtime/interpreter/mterp/arm64/binopLit16.S
|
%default {"preinstr":"", "result":"w0", "chkzero":"0"}
/*
* Generic 32-bit "lit16" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = w0 op w1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than w0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
/* binop/lit16 vA, vB, #+CCCC */
FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
lsr w2, wINST, #12 // w2<- B
ubfx w9, wINST, #8, #4 // w9<- A
GET_VREG w0, w2 // w0<- vB
.if $chkzero
cbz w1, common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
$preinstr
$instr // $result<- op, w0-w3 changed
GET_INST_OPCODE ip // extract opcode from rINST
SET_VREG $result, w9 // vAA<- $result
GOTO_OPCODE ip // jump to next instruction
/* 10-13 instructions */
|
abforce/xposed_art_n
| 1,089
|
runtime/interpreter/mterp/arm64/op_aget_wide.S
|
/*
* Array get, 64 bits. vAA <- vBB[vCC].
*
*/
/* aget-wide vAA, vBB, vCC */
FETCH w0, 1 // w0<- CCBB
lsr w4, wINST, #8 // w4<- AA
and w2, w0, #255 // w2<- BB
lsr w3, w0, #8 // w3<- CC
GET_VREG w0, w2 // w0<- vBB (array object)
GET_VREG w1, w3 // w1<- vCC (requested index)
cbz w0, common_errNullObject // yes, bail
ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
cmp w1, w3 // compare unsigned index, length
bcs common_errArrayIndex // index >= length, bail
FETCH_ADVANCE_INST 2 // advance rPC, load wINST
ldr x2, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] // x2<- vBB[vCC]
GET_INST_OPCODE ip // extract opcode from wINST
SET_VREG_WIDE x2, w4
GOTO_OPCODE ip // jump to next instruction
|
abforce/xposed_art_n
| 1,413
|
runtime/interpreter/mterp/arm64/binopWide2addr.S
|
%default {"preinstr":"", "instr":"add x0, x0, x1", "r0":"x0", "r1":"x1", "chkzero":"0"}
/*
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "x0 = x0 op x1".
* This must not be a function call, as we keep w2 live across it.
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
* For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
* and-long/2addr, or-long/2addr, xor-long/2addr,
* shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
* sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
*/
/* binop/2addr vA, vB */
lsr w1, wINST, #12 // w1<- B
ubfx w2, wINST, #8, #4 // w2<- A
GET_VREG_WIDE $r1, w1 // x1<- vB
GET_VREG_WIDE $r0, w2 // x0<- vA
.if $chkzero
cbz $r1, common_errDivideByZero
.endif
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
$preinstr
$instr // result<- op
GET_INST_OPCODE ip // extract opcode from rINST
SET_VREG_WIDE $r0, w2 // vAA<- result
GOTO_OPCODE ip // jump to next instruction
/* 10-13 instructions */
|
abforce/xposed_art_n
| 1,522
|
runtime/interpreter/mterp/arm64/binopWide.S
|
%default {"preinstr":"", "instr":"add x0, x1, x2", "result":"x0", "r1":"x1", "r2":"x2", "chkzero":"0"}
/*
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = x1 op x2".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than x0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (w1). Useful for integer division and modulus.
*
* For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
* xor-long, add-double, sub-double, mul-double, div-double, rem-double
*/
/* binop vAA, vBB, vCC */
FETCH w0, 1 // w0<- CCBB
lsr w4, wINST, #8 // w4<- AA
lsr w2, w0, #8 // w2<- CC
and w1, w0, #255 // w1<- BB
GET_VREG_WIDE $r2, w2 // w2<- vCC
GET_VREG_WIDE $r1, w1 // w1<- vBB
.if $chkzero
cbz $r2, common_errDivideByZero // is second operand zero?
.endif
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
$preinstr
$instr // $result<- op, w0-w4 changed
GET_INST_OPCODE ip // extract opcode from rINST
SET_VREG_WIDE $result, w4 // vAA<- $result
GOTO_OPCODE ip // jump to next instruction
/* 11-14 instructions */
|
abforce/xposed_art_n
| 1,070
|
runtime/interpreter/mterp/arm64/op_aput_wide.S
|
/*
* Array put, 64 bits. vBB[vCC] <- vAA.
*
*/
/* aput-wide vAA, vBB, vCC */
FETCH w0, 1 // w0<- CCBB
lsr w4, wINST, #8 // w4<- AA
and w2, w0, #255 // w2<- BB
lsr w3, w0, #8 // w3<- CC
GET_VREG w0, w2 // w0<- vBB (array object)
GET_VREG w1, w3 // w1<- vCC (requested index)
cbz w0, common_errNullObject // bail if null
ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
cmp w1, w3 // compare unsigned index, length
bcs common_errArrayIndex // index >= length, bail
GET_VREG_WIDE x1, w4
FETCH_ADVANCE_INST 2 // advance rPC, load wINST
GET_INST_OPCODE ip // extract opcode from wINST
str x1, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]
GOTO_OPCODE ip // jump to next instruction
|
abforce/xposed_art_n
| 1,149
|
runtime/interpreter/mterp/x86/op_shr_long.S
|
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to mask off the low
* 6 bits of the shift distance. x86 shifts automatically mask off
* the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
* case specially.
*/
/* shr-long vAA, vBB, vCC */
/* ecx gets shift count */
/* Need to spill rIBASE */
/* rINSTw gets AA */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
movl rIBASE, LOCAL0(%esp)
GET_VREG_HIGH rIBASE, %eax # rIBASE<- v[BB+1]
GET_VREG %ecx, %ecx # ecx <- vCC
GET_VREG %eax, %eax # eax <- v[BB+0]
shrdl rIBASE, %eax
sarl %cl, rIBASE
testb $$32, %cl
je 2f
movl rIBASE, %eax
sarl $$31, rIBASE
2:
SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
SET_VREG %eax, rINST # v[AA+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
|
abforce/xposed_art_n
| 10,012
|
runtime/interpreter/mterp/x86/footer.S
|
/*
* ===========================================================================
* Common subroutines and data
* ===========================================================================
*/
.text
.align 2
/*
* We've detected a condition that will result in an exception, but the exception
* has not yet been thrown. Just bail out to the reference interpreter to deal with it.
* TUNING: for consistency, we may want to just go ahead and handle these here.
*/
common_errDivideByZero:
EXPORT_PC
#if MTERP_LOGGING
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
call SYMBOL(MterpLogDivideByZeroException)
#endif
jmp MterpCommonFallback
common_errArrayIndex:
EXPORT_PC
#if MTERP_LOGGING
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
call SYMBOL(MterpLogArrayIndexException)
#endif
jmp MterpCommonFallback
common_errNegativeArraySize:
EXPORT_PC
#if MTERP_LOGGING
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
call SYMBOL(MterpLogNegativeArraySizeException)
#endif
jmp MterpCommonFallback
common_errNoSuchMethod:
EXPORT_PC
#if MTERP_LOGGING
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
call SYMBOL(MterpLogNoSuchMethodException)
#endif
jmp MterpCommonFallback
common_errNullObject:
EXPORT_PC
#if MTERP_LOGGING
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
call SYMBOL(MterpLogNullObjectException)
#endif
jmp MterpCommonFallback
common_exceptionThrown:
EXPORT_PC
#if MTERP_LOGGING
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG0(%esp)
call SYMBOL(MterpLogExceptionThrownException)
#endif
jmp MterpCommonFallback
MterpSuspendFallback:
EXPORT_PC
#if MTERP_LOGGING
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG0(%esp)
movl THREAD_FLAGS_OFFSET(%eax), %eax
movl %eax, OUT_ARG2(%esp)
call SYMBOL(MterpLogSuspendFallback)
#endif
jmp MterpCommonFallback
/*
* If we're here, something is out of the ordinary. If there is a pending
* exception, handle it. Otherwise, roll back and retry with the reference
* interpreter.
*/
MterpPossibleException:
movl rSELF, %eax
testl $$-1, THREAD_EXCEPTION_OFFSET(%eax)
jz MterpFallback
/* intentional fallthrough - handle pending exception. */
/*
* On return from a runtime helper routine, we've found a pending exception.
* Can we handle it here - or need to bail out to caller?
*
*/
MterpException:
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
call SYMBOL(MterpHandleException)
testb %al, %al
jz MterpExceptionReturn
movl OFF_FP_CODE_ITEM(rFP), %eax
movl OFF_FP_DEX_PC(rFP), %ecx
lea CODEITEM_INSNS_OFFSET(%eax), rPC
lea (rPC, %ecx, 2), rPC
movl rPC, OFF_FP_DEX_PC_PTR(rFP)
/* Do we need to switch interpreters? */
call SYMBOL(MterpShouldSwitchInterpreters)
testb %al, %al
jnz MterpFallback
/* resume execution at catch block */
REFRESH_IBASE
FETCH_INST
GOTO_NEXT
/* NOTE: no fallthrough */
/*
* Common handling for branches with support for Jit profiling.
* On entry:
* rINST <= signed offset
* condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
*
* We have quite a few different cases for branch profiling, OSR detection and
* suspend check support here.
*
* Taken backward branches:
* If profiling active, do hotness countdown and report if we hit zero.
* If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
* Is there a pending suspend request? If so, suspend.
*
* Taken forward branches and not-taken backward branches:
* If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
*
* Our most common case is expected to be a taken backward branch with active jit profiling,
* but no full OSR check and no pending suspend request.
* Next most common case is not-taken branch with no full OSR check.
*
*/
MterpCommonTakenBranch:
jg .L_forward_branch # don't add forward branches to hotness
/*
* We need to subtract 1 from positive values and we should not see 0 here,
* so we may use the result of the comparison with -1.
*/
#if JIT_CHECK_OSR != -1
# error "JIT_CHECK_OSR must be -1."
#endif
cmpw $$JIT_CHECK_OSR, rPROFILE
je .L_osr_check
decw rPROFILE
je .L_add_batch # counted down to zero - report
.L_resume_backward_branch:
movl rSELF, %eax
testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
leal (rPC, rINST, 2), rPC
FETCH_INST
jnz .L_suspend_request_pending
REFRESH_IBASE
GOTO_NEXT
.L_suspend_request_pending:
EXPORT_PC
movl %eax, OUT_ARG0(%esp) # rSELF in eax
call SYMBOL(MterpSuspendCheck) # (self)
testb %al, %al
jnz MterpFallback
REFRESH_IBASE # might have changed during suspend
GOTO_NEXT
.L_no_count_backwards:
cmpw $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
jne .L_resume_backward_branch
.L_osr_check:
EXPORT_PC
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
movl rINST, OUT_ARG2(%esp)
call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
testb %al, %al
jz .L_resume_backward_branch
jmp MterpOnStackReplacement
.L_forward_branch:
cmpw $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
je .L_check_osr_forward
.L_resume_forward_branch:
leal (rPC, rINST, 2), rPC
FETCH_INST
GOTO_NEXT
.L_check_osr_forward:
EXPORT_PC
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
movl rINST, OUT_ARG2(%esp)
call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
testb %al, %al
REFRESH_IBASE
jz .L_resume_forward_branch
jmp MterpOnStackReplacement
.L_add_batch:
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG2(%esp)
call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
jmp .L_no_count_backwards
/*
* Entered from the conditional branch handlers when OSR check request active on
* not-taken path. All Dalvik not-taken conditional branch offsets are 2.
*/
.L_check_not_taken_osr:
EXPORT_PC
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
movl $$2, OUT_ARG2(%esp)
call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
testb %al, %al
REFRESH_IBASE
jnz MterpOnStackReplacement
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/*
* On-stack replacement has happened, and now we've returned from the compiled method.
*/
MterpOnStackReplacement:
#if MTERP_LOGGING
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
movl rINST, OUT_ARG2(%esp)
call SYMBOL(MterpLogOSR)
#endif
movl $$1, %eax
jmp MterpDone
/*
* Bail out to reference interpreter.
*/
MterpFallback:
EXPORT_PC
#if MTERP_LOGGING
movl rSELF, %eax
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
call SYMBOL(MterpLogFallback)
#endif
MterpCommonFallback:
xor %eax, %eax
jmp MterpDone
/*
* On entry:
* uint32_t* rFP (should still be live, pointer to base of vregs)
*/
MterpExceptionReturn:
movl $$1, %eax
jmp MterpDone
MterpReturn:
movl OFF_FP_RESULT_REGISTER(rFP), %edx
movl %eax, (%edx)
movl %ecx, 4(%edx)
mov $$1, %eax
MterpDone:
/*
* At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
* checking for OSR. If greater than zero, we might have unreported hotness to register
* (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
* should only reach zero immediately after a hotness decrement, and is then reset to either
* a negative special state or the new non-zero countdown value.
*/
cmpw $$0, rPROFILE
jle MRestoreFrame # if > 0, we may have some counts to report.
movl %eax, rINST # stash return value
/* Report cached hotness counts */
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG2(%esp)
call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
movl rINST, %eax # restore return value
/* pop up frame */
MRestoreFrame:
addl $$FRAME_SIZE, %esp
.cfi_adjust_cfa_offset -FRAME_SIZE
/* Restore callee save register */
POP %ebx
POP %esi
POP %edi
POP %ebp
ret
.cfi_endproc
SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
|
abforce/xposed_art_n
| 1,142
|
runtime/interpreter/mterp/x86/op_shl_long.S
|
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to mask off the low
* 6 bits of the shift distance. x86 shifts automatically mask off
* the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
* case specially.
*/
/* shl-long vAA, vBB, vCC */
/* ecx gets shift count */
/* Need to spill rINST */
/* rINSTw gets AA */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
movl rIBASE, LOCAL0(%esp)
GET_VREG_HIGH rIBASE, %eax # ecx <- v[BB+1]
GET_VREG %ecx, %ecx # ecx <- vCC
GET_VREG %eax, %eax # eax <- v[BB+0]
shldl %eax,rIBASE
sall %cl, %eax
testb $$32, %cl
je 2f
movl %eax, rIBASE
xorl %eax, %eax
2:
SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
SET_VREG %eax, rINST # v[AA+0] <- %eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
|
abforce/xposed_art_n
| 2,156
|
runtime/interpreter/mterp/x86/entry.S
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Interpreter entry point.
*/
.text
.global SYMBOL(ExecuteMterpImpl)
FUNCTION_TYPE(ExecuteMterpImpl)
/*
* On entry:
* 0 Thread* self
* 1 code_item
* 2 ShadowFrame
* 3 JValue* result_register
*
*/
SYMBOL(ExecuteMterpImpl):
.cfi_startproc
.cfi_def_cfa esp, 4
/* Spill callee save regs */
PUSH %ebp
PUSH %edi
PUSH %esi
PUSH %ebx
/* Allocate frame */
subl $$FRAME_SIZE, %esp
.cfi_adjust_cfa_offset FRAME_SIZE
/* Load ShadowFrame pointer */
movl IN_ARG2(%esp), %edx
/* Remember the return register */
movl IN_ARG3(%esp), %eax
movl %eax, SHADOWFRAME_RESULT_REGISTER_OFFSET(%edx)
/* Remember the code_item */
movl IN_ARG1(%esp), %ecx
movl %ecx, SHADOWFRAME_CODE_ITEM_OFFSET(%edx)
/* set up "named" registers */
movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax
leal SHADOWFRAME_VREGS_OFFSET(%edx), rFP
leal (rFP, %eax, 4), rREFS
movl SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
lea CODEITEM_INSNS_OFFSET(%ecx), rPC
lea (rPC, %eax, 2), rPC
EXPORT_PC
/* Set up for backwards branches & osr profiling */
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
call SYMBOL(MterpSetUpHotnessCountdown)
/* Starting ibase */
REFRESH_IBASE
/* start executing the instruction at rPC */
FETCH_INST
GOTO_NEXT
/* NOTE: no fallthrough */
|
abforce/xposed_art_n
| 9,367
|
runtime/interpreter/mterp/x86/header.S
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Art assembly interpreter notes:
First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
handle invoke, allows higher-level code to create frame & shadow frame.
Once that's working, support direct entry code & eliminate shadow frame (and
excess locals allocation.
Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
base of the vreg array within the shadow frame. Access the other fields,
dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
the shadow frame mechanism of double-storing object references - via rFP &
number_of_vregs_.
*/
/*
x86 ABI general notes:
Caller save set:
eax, edx, ecx, st(0)-st(7)
Callee save set:
ebx, esi, edi, ebp
Return regs:
32-bit in eax
64-bit in edx:eax (low-order 32 in eax)
fp on top of fp stack st(0)
Parameters passed on stack, pushed right-to-left. On entry to target, first
parm is at 4(%esp). Traditional entry code is:
functEntry:
push %ebp # save old frame pointer
mov %ebp,%esp # establish new frame pointer
sub FrameSize,%esp # Allocate storage for spill, locals & outs
Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
Stack must be 16-byte aligned to support SSE in native code.
If we're not doing variable stack allocation (alloca), the frame pointer can be
eliminated and all arg references adjusted to be esp relative.
*/
/*
Mterp and x86 notes:
Some key interpreter variables will be assigned to registers.
nick reg purpose
rPC esi interpreted program counter, used for fetching instructions
rFP edi interpreted frame pointer, used for accessing locals and args
rINSTw bx first 16-bit code of current instruction
rINSTbl bl opcode portion of instruction word
rINSTbh bh high byte of inst word, usually contains src/tgt reg names
rIBASE edx base of instruction handler table
rREFS ebp base of object references in shadow frame.
Notes:
o High order 16 bits of ebx must be zero on entry to handler
o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
o eax and ecx are scratch, rINSTw/ebx sometimes scratch
Macros are provided for common operations. Each macro MUST emit only
one instruction to make instruction-counting easier. They MUST NOT alter
unspecified registers or condition codes.
*/
/*
* This is a #include, not a %include, because we want the C pre-processor
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
/*
* Handle mac compiler specific
*/
#if defined(__APPLE__)
#define MACRO_LITERAL(value) $$(value)
#define FUNCTION_TYPE(name)
#define SIZE(start,end)
// Mac OS' symbols have an _ prefix.
#define SYMBOL(name) _ ## name
#else
#define MACRO_LITERAL(value) $$value
#define FUNCTION_TYPE(name) .type name, @function
#define SIZE(start,end) .size start, .-end
#define SYMBOL(name) name
#endif
.macro PUSH _reg
pushl \_reg
.cfi_adjust_cfa_offset 4
.cfi_rel_offset \_reg, 0
.endm
.macro POP _reg
popl \_reg
.cfi_adjust_cfa_offset -4
.cfi_restore \_reg
.endm
/*
* Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
* to access other shadow frame fields, we need to use a backwards offset. Define those here.
*/
#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
/* Frame size must be 16-byte aligned.
* Remember about 4 bytes for return address + 4 * 4 for spills
*/
#define FRAME_SIZE 28
/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
#define IN_ARG3 (FRAME_SIZE + 16 + 16)
#define IN_ARG2 (FRAME_SIZE + 16 + 12)
#define IN_ARG1 (FRAME_SIZE + 16 + 8)
#define IN_ARG0 (FRAME_SIZE + 16 + 4)
/* Spill offsets relative to %esp */
#define LOCAL0 (FRAME_SIZE - 4)
#define LOCAL1 (FRAME_SIZE - 8)
#define LOCAL2 (FRAME_SIZE - 12)
/* Out Arg offsets, relative to %esp */
#define OUT_ARG3 ( 12)
#define OUT_ARG2 ( 8)
#define OUT_ARG1 ( 4)
#define OUT_ARG0 ( 0) /* <- ExecuteMterpImpl esp + 0 */
/* During bringup, we'll use the shadow frame model instead of rFP */
/* single-purpose registers, given names for clarity */
#define rSELF IN_ARG0(%esp)
#define rPC %esi
#define rFP %edi
#define rINST %ebx
#define rINSTw %bx
#define rINSTbh %bh
#define rINSTbl %bl
#define rIBASE %edx
#define rREFS %ebp
#define rPROFILE OFF_FP_COUNTDOWN_OFFSET(rFP)
#define MTERP_LOGGING 0
/*
* "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
* be done *before* something throws.
*
* It's okay to do this more than once.
*
* NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
* dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
* offset into the code_items_[] array. For effiency, we will "export" the
* current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
* to convert to a dex pc when needed.
*/
.macro EXPORT_PC
movl rPC, OFF_FP_DEX_PC_PTR(rFP)
.endm
/*
* Refresh handler table.
*/
.macro REFRESH_IBASE
movl rSELF, rIBASE
movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
.endm
/*
* Refresh handler table.
* IBase handles uses the caller save register so we must restore it after each call.
* Also it is used as a result of some 64-bit operations (like imul) and we should
* restore it in such cases also.
*
* TODO: Consider spilling the IBase instead of restoring it from Thread structure.
*/
.macro RESTORE_IBASE
movl rSELF, rIBASE
movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
.endm
/*
* If rSELF is already loaded then we can use it from known reg.
*/
.macro RESTORE_IBASE_FROM_SELF _reg
movl THREAD_CURRENT_IBASE_OFFSET(\_reg), rIBASE
.endm
/*
* Refresh rINST.
* At enter to handler rINST does not contain the opcode number.
* However some utilities require the full value, so this macro
* restores the opcode number.
*/
.macro REFRESH_INST _opnum
movb rINSTbl, rINSTbh
movb MACRO_LITERAL(\_opnum), rINSTbl
.endm
/*
* Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
*/
.macro FETCH_INST
movzwl (rPC), rINST
.endm
/*
* Remove opcode from rINST, compute the address of handler and jump to it.
*/
.macro GOTO_NEXT
movzx rINSTbl,%eax
movzbl rINSTbh,rINST
shll MACRO_LITERAL(${handler_size_bits}), %eax
addl rIBASE, %eax
jmp *%eax
.endm
/*
* Advance rPC by instruction count.
*/
.macro ADVANCE_PC _count
leal 2*\_count(rPC), rPC
.endm
/*
* Advance rPC by instruction count, fetch instruction and jump to handler.
*/
.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
ADVANCE_PC \_count
FETCH_INST
GOTO_NEXT
.endm
/*
* Get/set the 32-bit value from a Dalvik register.
*/
#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
.macro GET_VREG _reg _vreg
movl (rFP,\_vreg,4), \_reg
.endm
/* Read wide value to xmm. */
.macro GET_WIDE_FP_VREG _reg _vreg
movq (rFP,\_vreg,4), \_reg
.endm
.macro SET_VREG _reg _vreg
movl \_reg, (rFP,\_vreg,4)
movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
.endm
/* Write wide value from xmm. xmm is clobbered. */
.macro SET_WIDE_FP_VREG _reg _vreg
movq \_reg, (rFP,\_vreg,4)
pxor \_reg, \_reg
movq \_reg, (rREFS,\_vreg,4)
.endm
.macro SET_VREG_OBJECT _reg _vreg
movl \_reg, (rFP,\_vreg,4)
movl \_reg, (rREFS,\_vreg,4)
.endm
.macro GET_VREG_HIGH _reg _vreg
movl 4(rFP,\_vreg,4), \_reg
.endm
.macro SET_VREG_HIGH _reg _vreg
movl \_reg, 4(rFP,\_vreg,4)
movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
.endm
.macro CLEAR_REF _vreg
movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
.endm
.macro CLEAR_WIDE_REF _vreg
movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
.endm
|
abforce/xposed_art_n
| 1,125
|
runtime/interpreter/mterp/x86/op_iget.S
|
%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
/*
* General instance field get.
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
EXPORT_PC
movzwl 2(rPC), %eax # eax <- 0000CCCC
movl %eax, OUT_ARG0(%esp) # field ref CCCC
movzbl rINSTbl, %ecx # ecx <- BA
sarl $$4, %ecx # ecx <- B
GET_VREG %ecx, %ecx
movl %ecx, OUT_ARG1(%esp) # the object pointer
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
call SYMBOL($helper)
movl rSELF, %ecx
RESTORE_IBASE_FROM_SELF %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
andb $$0xf, rINSTbl # rINST <- A
.if $is_object
SET_VREG_OBJECT %eax, rINST # fp[A] <-value
.else
SET_VREG %eax, rINST # fp[A] <-value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
|
abforce/xposed_art_n
| 2,083
|
runtime/interpreter/mterp/x86/cvtfp_int.S
|
%default {"srcdouble":"1","tgtlong":"1"}
/* On fp to int conversions, Java requires that
* if the result > maxint, it should be clamped to maxint. If it is less
* than minint, it should be clamped to minint. If it is a nan, the result
* should be zero. Further, the rounding mode is to truncate. This model
* differs from what is delivered normally via the x86 fpu, so we have
* to play some games.
*/
/* float/double to int/long vA, vB */
movzbl rINSTbl, %ecx # ecx <- A+
sarl $$4, rINST # rINST <- B
.if $srcdouble
fldl VREG_ADDRESS(rINST) # %st0 <- vB
.else
flds VREG_ADDRESS(rINST) # %st0 <- vB
.endif
ftst
fnstcw LOCAL0(%esp) # remember original rounding mode
movzwl LOCAL0(%esp), %eax
movb $$0xc, %ah
movw %ax, LOCAL0+2(%esp)
fldcw LOCAL0+2(%esp) # set "to zero" rounding mode
andb $$0xf, %cl # ecx <- A
.if $tgtlong
fistpll VREG_ADDRESS(%ecx) # convert and store
.else
fistpl VREG_ADDRESS(%ecx) # convert and store
.endif
fldcw LOCAL0(%esp) # restore previous rounding mode
.if $tgtlong
movl $$0x80000000, %eax
xorl VREG_HIGH_ADDRESS(%ecx), %eax
orl VREG_ADDRESS(%ecx), %eax
.else
cmpl $$0x80000000, VREG_ADDRESS(%ecx)
.endif
je .L${opcode}_special_case # fix up result
.L${opcode}_finish:
xor %eax, %eax
mov %eax, VREG_REF_ADDRESS(%ecx)
.if $tgtlong
mov %eax, VREG_REF_HIGH_ADDRESS(%ecx)
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
.L${opcode}_special_case:
fnstsw %ax
sahf
jp .L${opcode}_isNaN
adcl $$-1, VREG_ADDRESS(%ecx)
.if $tgtlong
adcl $$-1, VREG_HIGH_ADDRESS(%ecx)
.endif
jmp .L${opcode}_finish
.L${opcode}_isNaN:
movl $$0, VREG_ADDRESS(%ecx)
.if $tgtlong
movl $$0, VREG_HIGH_ADDRESS(%ecx)
.endif
jmp .L${opcode}_finish
|
abforce/xposed_art_n
| 1,709
|
runtime/interpreter/mterp/x86/bindiv.S
|
%default {"result":"","special":"","rem":""}
/*
* 32-bit binary div/rem operation. Handles special case of op0=minint and
* op1=-1.
*/
/* div/rem vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
GET_VREG %eax, %eax # eax <- vBB
GET_VREG %ecx, %ecx # ecx <- vCC
mov rIBASE, LOCAL0(%esp)
testl %ecx, %ecx
je common_errDivideByZero
movl %eax, %edx
orl %ecx, %edx
testl $$0xFFFFFF00, %edx # If both arguments are less
# than 8-bit and +ve
jz .L${opcode}_8 # Do 8-bit divide
testl $$0xFFFF0000, %edx # If both arguments are less
# than 16-bit and +ve
jz .L${opcode}_16 # Do 16-bit divide
cmpl $$-1, %ecx
jne .L${opcode}_32
cmpl $$0x80000000, %eax
jne .L${opcode}_32
movl $special, $result
jmp .L${opcode}_finish
.L${opcode}_32:
cltd
idivl %ecx
jmp .L${opcode}_finish
.L${opcode}_8:
div %cl # 8-bit divide otherwise.
# Remainder in %ah, quotient in %al
.if $rem
movl %eax, %edx
shr $$8, %edx
.else
andl $$0x000000FF, %eax
.endif
jmp .L${opcode}_finish
.L${opcode}_16:
xorl %edx, %edx # Clear %edx before divide
div %cx
.L${opcode}_finish:
SET_VREG $result, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
|
abforce/xposed_art_n
| 1,625
|
runtime/interpreter/mterp/x86/op_mul_long.S
|
/*
* Signed 64-bit integer multiply.
*
* We could definately use more free registers for
* this code. We spill rINSTw (ebx),
* giving us eax, ebc, ecx and edx as computational
* temps. On top of that, we'll spill edi (rFP)
* for use as the vB pointer and esi (rPC) for use
* as the vC pointer. Yuck.
*
*/
/* mul-long vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- B
movzbl 3(rPC), %ecx # ecx <- C
mov rPC, LOCAL0(%esp) # save Interpreter PC
mov rFP, LOCAL1(%esp) # save FP
mov rIBASE, LOCAL2(%esp) # save rIBASE
leal (rFP,%eax,4), %esi # esi <- &v[B]
leal (rFP,%ecx,4), rFP # rFP <- &v[C]
movl 4(%esi), %ecx # ecx <- Bmsw
imull (rFP), %ecx # ecx <- (Bmsw*Clsw)
movl 4(rFP), %eax # eax <- Cmsw
imull (%esi), %eax # eax <- (Cmsw*Blsw)
addl %eax, %ecx # ecx <- (Bmsw*Clsw)+(Cmsw*Blsw)
movl (rFP), %eax # eax <- Clsw
mull (%esi) # eax <- (Clsw*Alsw)
mov LOCAL0(%esp), rPC # restore Interpreter PC
mov LOCAL1(%esp), rFP # restore FP
leal (%ecx,rIBASE), rIBASE # full result now in rIBASE:%eax
SET_VREG_HIGH rIBASE, rINST # v[B+1] <- rIBASE
mov LOCAL2(%esp), rIBASE # restore IBASE
SET_VREG %eax, rINST # v[B] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
|
abforce/xposed_art_n
| 1,831
|
runtime/interpreter/mterp/x86/op_mul_long_2addr.S
|
/*
* Signed 64-bit integer multiply, 2-addr version
*
* We could definately use more free registers for
* this code. We must spill %edx (rIBASE) because it
* is used by imul. We'll also spill rINST (ebx),
* giving us eax, ebc, ecx and rIBASE as computational
* temps. On top of that, we'll spill %esi (edi)
* for use as the vA pointer and rFP (esi) for use
* as the vB pointer. Yuck.
*/
/* mul-long/2addr vA, vB */
movzbl rINSTbl, %eax # eax <- BA
andb $$0xf, %al # eax <- A
CLEAR_WIDE_REF %eax # clear refs in advance
sarl $$4, rINST # rINST <- B
mov rPC, LOCAL0(%esp) # save Interpreter PC
mov rFP, LOCAL1(%esp) # save FP
mov rIBASE, LOCAL2(%esp) # save rIBASE
leal (rFP,%eax,4), %esi # esi <- &v[A]
leal (rFP,rINST,4), rFP # rFP <- &v[B]
movl 4(%esi), %ecx # ecx <- Amsw
imull (rFP), %ecx # ecx <- (Amsw*Blsw)
movl 4(rFP), %eax # eax <- Bmsw
imull (%esi), %eax # eax <- (Bmsw*Alsw)
addl %eax, %ecx # ecx <- (Amsw*Blsw)+(Bmsw*Alsw)
movl (rFP), %eax # eax <- Blsw
mull (%esi) # eax <- (Blsw*Alsw)
leal (%ecx,rIBASE), rIBASE # full result now in %edx:%eax
movl rIBASE, 4(%esi) # v[A+1] <- rIBASE
movl %eax, (%esi) # v[A] <- %eax
mov LOCAL0(%esp), rPC # restore Interpreter PC
mov LOCAL2(%esp), rIBASE # restore IBASE
mov LOCAL1(%esp), rFP # restore FP
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
|
abforce/xposed_art_n
| 1,152
|
runtime/interpreter/mterp/x86/op_ushr_long.S
|
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to mask off the low
* 6 bits of the shift distance. x86 shifts automatically mask off
* the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
* case specially.
*/
/* shr-long vAA, vBB, vCC */
/* ecx gets shift count */
/* Need to spill rIBASE */
/* rINSTw gets AA */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
movl rIBASE, LOCAL0(%esp)
GET_VREG_HIGH rIBASE, %eax # rIBASE <- v[BB+1]
GET_VREG %ecx, %ecx # ecx <- vCC
GET_VREG %eax, %eax # eax <- v[BB+0]
shrdl rIBASE, %eax
shrl %cl, rIBASE
testb $$32, %cl
je 2f
movl rIBASE, %eax
xorl rIBASE, rIBASE
2:
SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
SET_VREG %eax, rINST # v[BB+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
|
abforce/xposed_art_n
| 9,212
|
runtime/interpreter/mterp/x86_64/footer.S
|
/*
* ===========================================================================
* Common subroutines and data
* ===========================================================================
*/
.text
.align 2
/*
* We've detected a condition that will result in an exception, but the exception
* has not yet been thrown. Just bail out to the reference interpreter to deal with it.
* TUNING: for consistency, we may want to just go ahead and handle these here.
*/
common_errDivideByZero:
EXPORT_PC
#if MTERP_LOGGING
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
call SYMBOL(MterpLogDivideByZeroException)
#endif
jmp MterpCommonFallback
common_errArrayIndex:
EXPORT_PC
#if MTERP_LOGGING
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
call SYMBOL(MterpLogArrayIndexException)
#endif
jmp MterpCommonFallback
common_errNegativeArraySize:
EXPORT_PC
#if MTERP_LOGGING
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
call SYMBOL(MterpLogNegativeArraySizeException)
#endif
jmp MterpCommonFallback
common_errNoSuchMethod:
EXPORT_PC
#if MTERP_LOGGING
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
call SYMBOL(MterpLogNoSuchMethodException)
#endif
jmp MterpCommonFallback
common_errNullObject:
EXPORT_PC
#if MTERP_LOGGING
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
call SYMBOL(MterpLogNullObjectException)
#endif
jmp MterpCommonFallback
common_exceptionThrown:
EXPORT_PC
#if MTERP_LOGGING
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
call SYMBOL(MterpLogExceptionThrownException)
#endif
jmp MterpCommonFallback
MterpSuspendFallback:
EXPORT_PC
#if MTERP_LOGGING
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
movl THREAD_FLAGS_OFFSET(OUT_ARG0), OUT_32_ARG2
call SYMBOL(MterpLogSuspendFallback)
#endif
jmp MterpCommonFallback
/*
* If we're here, something is out of the ordinary. If there is a pending
* exception, handle it. Otherwise, roll back and retry with the reference
* interpreter.
*/
MterpPossibleException:
movq rSELF, %rcx
cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
jz MterpFallback
/* intentional fallthrough - handle pending exception. */
/*
* On return from a runtime helper routine, we've found a pending exception.
* Can we handle it here - or need to bail out to caller?
*
*/
MterpException:
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
call SYMBOL(MterpHandleException)
testb %al, %al
jz MterpExceptionReturn
movq OFF_FP_CODE_ITEM(rFP), %rax
mov OFF_FP_DEX_PC(rFP), %ecx
leaq CODEITEM_INSNS_OFFSET(%rax), rPC
leaq (rPC, %rcx, 2), rPC
movq rPC, OFF_FP_DEX_PC_PTR(rFP)
/* Do we need to switch interpreters? */
call SYMBOL(MterpShouldSwitchInterpreters)
testb %al, %al
jnz MterpFallback
/* resume execution at catch block */
REFRESH_IBASE
FETCH_INST
GOTO_NEXT
/* NOTE: no fallthrough */
/*
* Common handling for branches with support for Jit profiling.
* On entry:
* rINST <= signed offset
* rPROFILE <= signed hotness countdown (expanded to 32 bits)
* condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
*
* We have quite a few different cases for branch profiling, OSR detection and
* suspend check support here.
*
* Taken backward branches:
* If profiling active, do hotness countdown and report if we hit zero.
* If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
* Is there a pending suspend request? If so, suspend.
*
* Taken forward branches and not-taken backward branches:
* If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
*
* Our most common case is expected to be a taken backward branch with active jit profiling,
* but no full OSR check and no pending suspend request.
* Next most common case is not-taken branch with no full OSR check.
*
*/
MterpCommonTakenBranch:
jg .L_forward_branch # don't add forward branches to hotness
/*
* We need to subtract 1 from positive values and we should not see 0 here,
* so we may use the result of the comparison with -1.
*/
#if JIT_CHECK_OSR != -1
# error "JIT_CHECK_OSR must be -1."
#endif
cmpl $$JIT_CHECK_OSR, rPROFILE
je .L_osr_check
decl rPROFILE
je .L_add_batch # counted down to zero - report
.L_resume_backward_branch:
movq rSELF, %rax
testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
REFRESH_IBASE
leaq (rPC, rINSTq, 2), rPC
FETCH_INST
jnz .L_suspend_request_pending
GOTO_NEXT
.L_suspend_request_pending:
EXPORT_PC
movq rSELF, OUT_ARG0
call SYMBOL(MterpSuspendCheck) # (self)
testb %al, %al
jnz MterpFallback
REFRESH_IBASE # might have changed during suspend
GOTO_NEXT
.L_no_count_backwards:
cmpl $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
jne .L_resume_backward_branch
.L_osr_check:
EXPORT_PC
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
movq rINSTq, OUT_ARG2
call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
testb %al, %al
jz .L_resume_backward_branch
jmp MterpOnStackReplacement
.L_forward_branch:
cmpl $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
je .L_check_osr_forward
.L_resume_forward_branch:
leaq (rPC, rINSTq, 2), rPC
FETCH_INST
GOTO_NEXT
.L_check_osr_forward:
EXPORT_PC
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
movq rINSTq, OUT_ARG2
call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
testb %al, %al
jz .L_resume_forward_branch
jmp MterpOnStackReplacement
.L_add_batch:
movl rPROFILE, %eax
movq OFF_FP_METHOD(rFP), OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
movw %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
movq rSELF, OUT_ARG2
call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
movswl %ax, rPROFILE
jmp .L_no_count_backwards
/*
* Entered from the conditional branch handlers when OSR check request active on
* not-taken path. All Dalvik not-taken conditional branch offsets are 2.
*/
.L_check_not_taken_osr:
EXPORT_PC
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
movl $$2, OUT_32_ARG2
call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
testb %al, %al
jnz MterpOnStackReplacement
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/*
* On-stack replacement has happened, and now we've returned from the compiled method.
*/
MterpOnStackReplacement:
#if MTERP_LOGGING
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
movl rINST, OUT_32_ARG2
call SYMBOL(MterpLogOSR)
#endif
movl $$1, %eax
jmp MterpDone
/*
* Bail out to reference interpreter.
*/
MterpFallback:
EXPORT_PC
#if MTERP_LOGGING
movq rSELF, OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
call SYMBOL(MterpLogFallback)
#endif
MterpCommonFallback:
xorl %eax, %eax
jmp MterpDone
/*
* On entry:
* uint32_t* rFP (should still be live, pointer to base of vregs)
*/
MterpExceptionReturn:
movl $$1, %eax
jmp MterpDone
MterpReturn:
movq OFF_FP_RESULT_REGISTER(rFP), %rdx
movq %rax, (%rdx)
movl $$1, %eax
MterpDone:
/*
* At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
* checking for OSR. If greater than zero, we might have unreported hotness to register
* (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
* should only reach zero immediately after a hotness decrement, and is then reset to either
* a negative special state or the new non-zero countdown value.
*/
testl rPROFILE, rPROFILE
jle MRestoreFrame # if > 0, we may have some counts to report.
movl %eax, rINST # stash return value
/* Report cached hotness counts */
movl rPROFILE, %eax
movq OFF_FP_METHOD(rFP), OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
movw %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
movq rSELF, OUT_ARG2
call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
movl rINST, %eax # restore return value
/* pop up frame */
MRestoreFrame:
addq $$FRAME_SIZE, %rsp
.cfi_adjust_cfa_offset -FRAME_SIZE
/* Restore callee save register */
POP %r15
POP %r14
POP %r13
POP %r12
POP %rbp
POP %rbx
ret
.cfi_endproc
SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
|
abforce/xposed_art_n
| 2,059
|
runtime/interpreter/mterp/x86_64/entry.S
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Interpreter entry point.
*/
.text
.global SYMBOL(ExecuteMterpImpl)
FUNCTION_TYPE(ExecuteMterpImpl)
/*
* On entry:
* 0 Thread* self
* 1 code_item
* 2 ShadowFrame
* 3 JValue* result_register
*
*/
SYMBOL(ExecuteMterpImpl):
.cfi_startproc
.cfi_def_cfa rsp, 8
/* Spill callee save regs */
PUSH %rbx
PUSH %rbp
PUSH %r12
PUSH %r13
PUSH %r14
PUSH %r15
/* Allocate frame */
subq $$FRAME_SIZE, %rsp
.cfi_adjust_cfa_offset FRAME_SIZE
/* Remember the return register */
movq IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
/* Remember the code_item */
movq IN_ARG1, SHADOWFRAME_CODE_ITEM_OFFSET(IN_ARG2)
/* set up "named" registers */
movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
leaq SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
leaq (rFP, %rax, 4), rREFS
movl SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
leaq CODEITEM_INSNS_OFFSET(IN_ARG1), rPC
leaq (rPC, %rax, 2), rPC
EXPORT_PC
/* Starting ibase */
movq IN_ARG0, rSELF
REFRESH_IBASE
/* Set up for backwards branches & osr profiling */
movq OFF_FP_METHOD(rFP), OUT_ARG0
leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
call SYMBOL(MterpSetUpHotnessCountdown)
movswl %ax, rPROFILE
/* start executing the instruction at rPC */
FETCH_INST
GOTO_NEXT
/* NOTE: no fallthrough */
|
abforce/xposed_art_n
| 8,676
|
runtime/interpreter/mterp/x86_64/header.S
|
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Art assembly interpreter notes:
First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
handle invoke, allows higher-level code to create frame & shadow frame.
Once that's working, support direct entry code & eliminate shadow frame (and
excess locals allocation.
Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
base of the vreg array within the shadow frame. Access the other fields,
dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
the shadow frame mechanism of double-storing object references - via rFP &
number_of_vregs_.
*/
/*
x86_64 ABI general notes:
Caller save set:
rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
Callee save set:
rbx, rbp, r12-r15
Return regs:
32-bit in eax
64-bit in rax
fp on xmm0
First 8 fp parameters came in xmm0-xmm7.
First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
Other parameters passed on stack, pushed right-to-left. On entry to target, first
param is at 8(%esp). Traditional entry code is:
Stack must be 16-byte aligned to support SSE in native code.
If we're not doing variable stack allocation (alloca), the frame pointer can be
eliminated and all arg references adjusted to be esp relative.
*/
/*
Mterp and x86_64 notes:
Some key interpreter variables will be assigned to registers.
nick reg purpose
rPROFILE rbp countdown register for jit profiling
rPC r12 interpreted program counter, used for fetching instructions
rFP r13 interpreted frame pointer, used for accessing locals and args
rINSTw bx first 16-bit code of current instruction
rINSTbl bl opcode portion of instruction word
rINSTbh bh high byte of inst word, usually contains src/tgt reg names
rIBASE r14 base of instruction handler table
rREFS r15 base of object references in shadow frame.
Notes:
o High order 16 bits of ebx must be zero on entry to handler
o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
o eax and ecx are scratch, rINSTw/ebx sometimes scratch
Macros are provided for common operations. Each macro MUST emit only
one instruction to make instruction-counting easier. They MUST NOT alter
unspecified registers or condition codes.
*/
/*
* This is a #include, not a %include, because we want the C pre-processor
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
/*
* Handle mac compiler specific
*/
#if defined(__APPLE__)
#define MACRO_LITERAL(value) $$(value)
#define FUNCTION_TYPE(name)
#define SIZE(start,end)
// Mac OS' symbols have an _ prefix.
#define SYMBOL(name) _ ## name
#else
#define MACRO_LITERAL(value) $$value
#define FUNCTION_TYPE(name) .type name, @function
#define SIZE(start,end) .size start, .-end
#define SYMBOL(name) name
#endif
.macro PUSH _reg
pushq \_reg
.cfi_adjust_cfa_offset 8
.cfi_rel_offset \_reg, 0
.endm
.macro POP _reg
popq \_reg
.cfi_adjust_cfa_offset -8
.cfi_restore \_reg
.endm
/*
* Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
* to access other shadow frame fields, we need to use a backwards offset. Define those here.
*/
#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
/* Frame size must be 16-byte aligned.
* Remember about 8 bytes for return address + 6 * 8 for spills.
*/
#define FRAME_SIZE 8
/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
#define IN_ARG3 %rcx
#define IN_ARG2 %rdx
#define IN_ARG1 %rsi
#define IN_ARG0 %rdi
/* Spill offsets relative to %esp */
#define SELF_SPILL (FRAME_SIZE - 8)
/* Out Args */
#define OUT_ARG3 %rcx
#define OUT_ARG2 %rdx
#define OUT_ARG1 %rsi
#define OUT_ARG0 %rdi
#define OUT_32_ARG3 %ecx
#define OUT_32_ARG2 %edx
#define OUT_32_ARG1 %esi
#define OUT_32_ARG0 %edi
#define OUT_FP_ARG1 %xmm1
#define OUT_FP_ARG0 %xmm0
/* During bringup, we'll use the shadow frame model instead of rFP */
/* single-purpose registers, given names for clarity */
#define rSELF SELF_SPILL(%rsp)
#define rPC %r12
#define rFP %r13
#define rINST %ebx
#define rINSTq %rbx
#define rINSTw %bx
#define rINSTbh %bh
#define rINSTbl %bl
#define rIBASE %r14
#define rREFS %r15
#define rPROFILE %ebp
#define MTERP_LOGGING 0
/*
* "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
* be done *before* something throws.
*
* It's okay to do this more than once.
*
* NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
* dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
* offset into the code_items_[] array. For effiency, we will "export" the
* current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
* to convert to a dex pc when needed.
*/
.macro EXPORT_PC
movq rPC, OFF_FP_DEX_PC_PTR(rFP)
.endm
/*
* Refresh handler table.
* IBase handles uses the caller save register so we must restore it after each call.
* Also it is used as a result of some 64-bit operations (like imul) and we should
* restore it in such cases also.
*
*/
.macro REFRESH_IBASE
movq rSELF, rIBASE
movq THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
.endm
/*
* Refresh rINST.
* At enter to handler rINST does not contain the opcode number.
* However some utilities require the full value, so this macro
* restores the opcode number.
*/
.macro REFRESH_INST _opnum
movb rINSTbl, rINSTbh
movb $$\_opnum, rINSTbl
.endm
/*
* Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
*/
.macro FETCH_INST
movzwq (rPC), rINSTq
.endm
/*
* Remove opcode from rINST, compute the address of handler and jump to it.
*/
.macro GOTO_NEXT
movzx rINSTbl,%eax
movzbl rINSTbh,rINST
shll MACRO_LITERAL(${handler_size_bits}), %eax
addq rIBASE, %rax
jmp *%rax
.endm
/*
* Advance rPC by instruction count.
*/
.macro ADVANCE_PC _count
leaq 2*\_count(rPC), rPC
.endm
/*
* Advance rPC by instruction count, fetch instruction and jump to handler.
*/
.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
ADVANCE_PC \_count
FETCH_INST
GOTO_NEXT
.endm
/*
* Get/set the 32-bit value from a Dalvik register.
*/
#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
.macro GET_VREG _reg _vreg
movl (rFP,\_vreg,4), \_reg
.endm
/* Read wide value. */
.macro GET_WIDE_VREG _reg _vreg
movq (rFP,\_vreg,4), \_reg
.endm
.macro SET_VREG _reg _vreg
movl \_reg, (rFP,\_vreg,4)
movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
.endm
/* Write wide value. reg is clobbered. */
.macro SET_WIDE_VREG _reg _vreg
movq \_reg, (rFP,\_vreg,4)
xorq \_reg, \_reg
movq \_reg, (rREFS,\_vreg,4)
.endm
.macro SET_VREG_OBJECT _reg _vreg
movl \_reg, (rFP,\_vreg,4)
movl \_reg, (rREFS,\_vreg,4)
.endm
.macro GET_VREG_HIGH _reg _vreg
movl 4(rFP,\_vreg,4), \_reg
.endm
.macro SET_VREG_HIGH _reg _vreg
movl \_reg, 4(rFP,\_vreg,4)
movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
.endm
.macro CLEAR_REF _vreg
movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
.endm
.macro CLEAR_WIDE_REF _vreg
movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
.endm
|
abforce/xposed_art_n
| 1,047
|
runtime/interpreter/mterp/x86_64/op_iget.S
|
%default { "is_object":"0", "helper":"artGet32InstanceFromCode", "wide":"0"}
/*
* General instance field get.
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
*/
EXPORT_PC
movzbq rINSTbl, %rcx # rcx <- BA
movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
sarl $$4, %ecx # ecx <- B
GET_VREG OUT_32_ARG1, %rcx # the object pointer
movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
movq rSELF, OUT_ARG3
call SYMBOL($helper)
movq rSELF, %rcx
cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
jnz MterpException # bail out
andb $$0xf, rINSTbl # rINST <- A
.if $is_object
SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
.else
.if $wide
SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
.else
SET_VREG %eax, rINSTq # fp[A] <-value
.endif
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
|
abforce/xposed_art_n
| 1,142
|
runtime/interpreter/mterp/x86_64/bindiv2addr.S
|
%default {"result":"","second":"","wide":"","suffix":"","rem":"0","ext":"cdq"}
/*
* 32-bit binary div/rem operation. Handles special case of op1=-1.
*/
/* div/rem/2addr vA, vB */
movl rINST, %ecx # rcx <- BA
sarl $$4, %ecx # rcx <- B
andb $$0xf, rINSTbl # rINST <- A
.if $wide
GET_WIDE_VREG %rax, rINSTq # eax <- vA
GET_WIDE_VREG $second, %rcx # ecx <- vB
.else
GET_VREG %eax, rINSTq # eax <- vA
GET_VREG $second, %rcx # ecx <- vB
.endif
test${suffix} $second, $second
jz common_errDivideByZero
cmp${suffix} $$-1, $second
je 2f
$ext # rdx:rax <- sign-extended of rax
idiv${suffix} $second
1:
.if $wide
SET_WIDE_VREG $result, rINSTq # vA <- result
.else
SET_VREG $result, rINSTq # vA <- result
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
2:
.if $rem
xor${suffix} $result, $result
.else
neg${suffix} $result
.endif
jmp 1b
|
abforce/xposed_art_n
| 1,052
|
runtime/interpreter/mterp/x86_64/cvtfp_int.S
|
%default {"fp_suffix":"","i_suffix":"","max_const":"","result_reg":"","wide":""}
/* On fp to int conversions, Java requires that
* if the result > maxint, it should be clamped to maxint. If it is less
* than minint, it should be clamped to minint. If it is a nan, the result
* should be zero. Further, the rounding mode is to truncate.
*/
/* float/double to int/long vA, vB */
movl rINST, %ecx # rcx <- A+
sarl $$4, rINST # rINST <- B
andb $$0xf, %cl # ecx <- A
movs${fp_suffix} VREG_ADDRESS(rINSTq), %xmm0
mov${i_suffix} ${max_const}, ${result_reg}
cvtsi2s${fp_suffix}${i_suffix} ${result_reg}, %xmm1
comis${fp_suffix} %xmm1, %xmm0
jae 1f
jp 2f
cvtts${fp_suffix}2si${i_suffix} %xmm0, ${result_reg}
jmp 1f
2:
xor${i_suffix} ${result_reg}, ${result_reg}
1:
.if $wide
SET_WIDE_VREG ${result_reg}, %rcx
.else
SET_VREG ${result_reg}, %rcx
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
|
abforce/xposed_art_n
| 1,087
|
runtime/interpreter/mterp/x86_64/bindiv.S
|
%default {"result":"","second":"","wide":"","suffix":"","rem":"0","ext":"cdq"}
/*
* 32-bit binary div/rem operation. Handles special case of op1=-1.
*/
/* div/rem vAA, vBB, vCC */
movzbq 2(rPC), %rax # rax <- BB
movzbq 3(rPC), %rcx # rcx <- CC
.if $wide
GET_WIDE_VREG %rax, %rax # eax <- vBB
GET_WIDE_VREG $second, %rcx # ecx <- vCC
.else
GET_VREG %eax, %rax # eax <- vBB
GET_VREG $second, %rcx # ecx <- vCC
.endif
test${suffix} $second, $second
jz common_errDivideByZero
cmp${suffix} $$-1, $second
je 2f
$ext # rdx:rax <- sign-extended of rax
idiv${suffix} $second
1:
.if $wide
SET_WIDE_VREG $result, rINSTq # eax <- vBB
.else
SET_VREG $result, rINSTq # eax <- vBB
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
2:
.if $rem
xor${suffix} $result, $result
.else
neg${suffix} $result
.endif
jmp 1b
|
abforce/xposed_art_n
| 1,866
|
runtime/interpreter/mterp/mips/op_cmpl_float.S
|
%default { "naninst":"li rTEMP, -1" }
/*
* Compare two floating-point values. Puts 0, 1, or -1 into the
* destination register rTEMP based on the results of the comparison.
*
* Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
* on what value we'd like to return when one of the operands is NaN.
*
* The operation we're implementing is:
* if (x == y)
* return 0;
* else if (x < y)
* return -1;
* else if (x > y)
* return 1;
* else
* return {-1 or 1}; // one or both operands was NaN
*
* for: cmpl-float, cmpg-float
*/
/* op vAA, vBB, vCC */
/* "clasic" form */
FETCH(a0, 1) # a0 <- CCBB
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8
GET_VREG_F(ft0, a2)
GET_VREG_F(ft1, a3)
#ifdef MIPS32REVGE6
cmp.ult.s ft2, ft0, ft1 # Is ft0 < ft1
li rTEMP, -1
bc1nez ft2, .L${opcode}_finish
cmp.ult.s ft2, ft1, ft0
li rTEMP, 1
bc1nez ft2, .L${opcode}_finish
cmp.eq.s ft2, ft0, ft1
li rTEMP, 0
bc1nez ft2, .L${opcode}_finish
b .L${opcode}_nan
#else
c.olt.s fcc0, ft0, ft1 # Is ft0 < ft1
li rTEMP, -1
bc1t fcc0, .L${opcode}_finish
c.olt.s fcc0, ft1, ft0
li rTEMP, 1
bc1t fcc0, .L${opcode}_finish
c.eq.s fcc0, ft0, ft1
li rTEMP, 0
bc1t fcc0, .L${opcode}_finish
b .L${opcode}_nan
#endif
%break
.L${opcode}_nan:
$naninst
.L${opcode}_finish:
GET_OPA(rOBJ)
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
|
abforce/xposed_art_n
| 1,533
|
runtime/interpreter/mterp/mips/op_shr_long.S
|
/*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to mask off the low
* 6 bits of the shift distance.
*/
/* shr-long vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
GET_OPA(t3) # t3 <- AA
and a3, a0, 255 # a3 <- BB
srl a0, a0, 8 # a0 <- CC
EAS2(a3, rFP, a3) # a3 <- &fp[BB]
GET_VREG(a2, a0) # a2 <- vCC
LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
andi v0, a2, 0x20 # shift & 0x20
sra v1, a1, a2 # rhi<- ahi >> (shift&31)
bnez v0, .L${opcode}_finish
srl v0, a0, a2 # rlo<- alo >> (shift&31)
not a0, a2 # alo<- 31-shift (shift is 5b)
sll a1, 1
sll a1, a0 # ahi<- ahi << (32-(shift&31))
or v0, a1 # rlo<- rlo | ahi
SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/VAA+1 <- v0/v0
%break
.L${opcode}_finish:
sra a3, a1, 31 # a3<- sign(ah)
SET_VREG64_GOTO(v1, a3, t3, t0) # vAA/VAA+1 <- rlo/rhi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.