text
stringlengths
1
1.05M
db 100; define byte dw 0xaa55; define two byte dd 0x12345678; define four byte db "hello world!!!" ,12,13; define string
;; ;; Copyright (c) Microsoft. All rights reserved. ;; Licensed under the MIT license. See LICENSE file in the project root for full license information. ;; #include "AsmMacros.h" TEXTAREA ;;----------------------------------------------------------------------------- ;; This helper routine enregisters the appropriate arguments and makes the ;; actual call. ;;----------------------------------------------------------------------------- ;;void RhCallDescrWorker(CallDescrData * pCallDescrData); NESTED_ENTRY RhCallDescrWorker PROLOG_PUSH {r4,r5,r7,lr} PROLOG_STACK_SAVE r7 mov r5,r0 ; save pCallDescrData in r5 ldr r1, [r5,#OFFSETOF__CallDescrData__numStackSlots] cbz r1, Ldonestack ;; Add frame padding to ensure frame size is a multiple of 8 (a requirement of the OS ABI). ;; We push four registers (above) and numStackSlots arguments (below). If this comes to an odd number ;; of slots we must pad with another. This simplifies to "if the low bit of numStackSlots is set, ;; extend the stack another four bytes". lsls r2, r1, #2 and r3, r2, #4 sub sp, sp, r3 ;; This loop copies numStackSlots words ;; from [pSrcEnd-4,pSrcEnd-8,...] to [sp-4,sp-8,...] ldr r0, [r5,#OFFSETOF__CallDescrData__pSrc] add r0,r0,r2 Lstackloop ldr r2, [r0,#-4]! str r2, [sp,#-4]! subs r1, r1, #1 bne Lstackloop Ldonestack ;; If FP arguments are supplied in registers (r3 != NULL) then initialize all of them from the pointer ;; given in r3. Do not use "it" since it faults in floating point even when the instruction is not executed. ldr r3, [r5,#OFFSETOF__CallDescrData__pFloatArgumentRegisters] cbz r3, LNoFloatingPoint vldm r3, {s0-s15} LNoFloatingPoint ;; Copy [pArgumentRegisters, ..., pArgumentRegisters + 12] ;; into r0, ..., r3 ldr r4, [r5,#OFFSETOF__CallDescrData__pArgumentRegisters] ldm r4, {r0-r3} CHECK_STACK_ALIGNMENT ;; call pTarget ;; Note that remoting expect target in r4. ldr r4, [r5,#OFFSETOF__CallDescrData__pTarget] blx r4 LABELED_RETURN_ADDRESS ReturnFromCallDescrThunk ldr r3, [r5,#OFFSETOF__CallDescrData__fpReturnSize] ;; Save FP return value if appropriate cbz r3, LFloatingPointReturnDone ;; Float return case ;; Do not use "it" since it faults in floating point even when the instruction is not executed. cmp r3, #4 bne LNoFloatReturn vmov r0, s0 b LFloatingPointReturnDone LNoFloatReturn ;; Double return case ;; Do not use "it" since it faults in floating point even when the instruction is not executed. cmp r3, #8 bne LNoDoubleReturn vmov r0, r1, s0, s1 b LFloatingPointReturnDone LNoDoubleReturn ; Unlike desktop returnValue is a pointer to a return buffer, not the buffer itself ldr r2, [r5, #OFFSETOF__CallDescrData__pReturnBuffer] cmp r3, #16 bne LNoFloatHFAReturn vstm r2, {s0-s3} b LReturnDone LNoFloatHFAReturn cmp r3, #32 bne LNoDoubleHFAReturn vstm r2, {d0-d3} b LReturnDone LNoDoubleHFAReturn EMIT_BREAKPOINT ; Unreachable LFloatingPointReturnDone ; Unlike desktop returnValue is a pointer to a return buffer, not the buffer itself ldr r5, [r5, #OFFSETOF__CallDescrData__pReturnBuffer] ;; Save return value into retbuf str r0, [r5, #(0)] str r1, [r5, #(4)] LReturnDone #ifdef _DEBUG ;; trash the floating point registers to ensure that the HFA return values ;; won't survive by accident vldm sp, {d0-d3} #endif EPILOG_STACK_RESTORE r7 EPILOG_POP {r4,r5,r7,pc} NESTED_END RhCallDescrWorker END
; A033486: a(n) = n*(n + 1)*(n + 2)*(n + 3)/2. ; 0,12,60,180,420,840,1512,2520,3960,5940,8580,12012,16380,21840,28560,36720,46512,58140,71820,87780,106260,127512,151800,179400,210600,245700,285012,328860,377580,431520,491040,556512,628320,706860,792540,885780,987012,1096680,1215240,1343160,1480920,1629012,1787940,1958220,2140380,2334960,2542512,2763600,2998800,3248700,3513900,3795012,4092660,4407480,4740120,5091240,5461512,5851620,6262260,6694140,7147980,7624512,8124480,8648640,9197760,9772620,10374012,11002740,11659620,12345480,13061160 sub $1,$0 bin $1,4 mul $1,12 mov $0,$1
// Copyright 2016-2021 Doug Moen // Licensed under the Apache License, version 2.0 // See accompanying file LICENSE or https://www.apache.org/licenses/LICENSE-2.0 #include <libcurv/io/builtin.h> #include <libcurv/io/cpp_program.h> #include <libcurv/analyser.h> #include <libcurv/context.h> #include <libcurv/exception.h> #include <libcurv/frame.h> #include <libcurv/function.h> #include <libcurv/meanings.h> #include <libcurv/sc_compiler.h> #include <libcurv/record.h> namespace curv { namespace io { // Run a unit test by compiling it to C++, thus testing the SC compiler's // C++ code generator. void run_cpp_test(const Context& cx, Source_State& ss, Shared<const Function> func) { Cpp_Program cpp{ss}; cpp.define_function("test", SC_Type::Bool(), SC_Type::Bool(), func, cx); cpp.compile(cx); auto test = (void(*)(const bool*,bool*))cpp.get_function("test"); bool arg = true; bool result = false; test(&arg, &result); if (!result) { cpp.preserve_tempfile(); throw Exception(cx, stringify( "assertion failed in C++; see ",cpp.path_)); } } struct SC_Test_Action : public Just_Action { Shared<Operation> arg_; SC_Test_Action( Shared<const Phrase> syntax, Shared<Operation> arg) : Just_Action(move(syntax)), arg_(move(arg)) {} virtual void exec(Frame& f, Executor&) const override { Value arg = arg_->eval(f); At_Phrase cx(*arg_->syntax_, f); auto rec = arg.to<Record>(cx); Value nil = Value{make_tail_array<List>(0)}; rec->each_field(cx, [&](Symbol_Ref name, Value val)->void { At_Field test_cx{name.c_str(), cx}; auto func = maybe_function(val, test_cx); if (func == nullptr) throw Exception(test_cx, stringify(val," is not a function")); bool test_result = call_func({func}, nil, syntax_, f) .to_bool(test_cx); if (!test_result) throw Exception(test_cx, "assertion failed in interpreter"); run_cpp_test(test_cx, f.sstate_, func); }); } }; struct SC_Test_Metafunction : public Metafunction { using Metafunction::Metafunction; virtual Shared<Meaning> call(const Call_Phrase& ph, Environ& env) override { return make<SC_Test_Action>(share(ph), analyse_op(*ph.arg_, env)); } virtual void print_help(std::ostream& out) const override { out << "sc_test {tagname1: testfunction1, tagname2: testfunction2, ...}\n" " Each test function maps an argument (which is ignored) onto a boolean value.\n" " Execute each test function, and if any test function returns false, report an assertion failure,\n" " mentioning the tagname in the error message. Each testfunction is evaluated twice,\n" " once in the interpreter, and once by compiling to native code using the SubCurv compiler.\n" " This is used to write SubCurv unit tests.\n"; } }; void add_builtins(System_Impl& sys) { sys.std_namespace_[make_symbol("sc_test")] = make<Builtin_Meaning<SC_Test_Metafunction>>(); } }} // namespaces
; A052640: E.g.f. x*(1-x)/(1-2*x-x^2+x^3). ; Submitted by Jon Maiga ; 0,1,2,18,144,1680,22320,352800,6330240,128096640,2877638400,71131737600,1917922406400,56024506137600,1762396334899200,59401108166400000,2135568241078272000,81575844571533312000 mov $2,$0 seq $0,52994 ; Expansion of 2x(1-x)/(1-2x-x^2+x^3). lpb $2 mul $0,$2 sub $2,1 lpe div $0,2
; A034692: a(n+1) = smallest number that is not the sum of a(n) or fewer terms of a(1),...,a(n). ; Submitted by Christian Krause ; 1,2,5,23,455,197447,38895873863,1512881323770591465287,2288809899755012359449577849239960517955399,5238650757216549725917660910593720468102050623548424798300898740084824366637074960199 mov $2,1 mov $4,1 lpb $0 sub $0,1 add $4,$2 mov $3,$4 mul $3,$2 add $2,$3 lpe mov $0,$4
############################################################################### # Copyright 2019 Intel Corporation # All Rights Reserved. # # If this software was obtained under the Intel Simplified Software License, # the following terms apply: # # The source code, information and material ("Material") contained herein is # owned by Intel Corporation or its suppliers or licensors, and title to such # Material remains with Intel Corporation or its suppliers or licensors. The # Material contains proprietary information of Intel or its suppliers and # licensors. The Material is protected by worldwide copyright laws and treaty # provisions. No part of the Material may be used, copied, reproduced, # modified, published, uploaded, posted, transmitted, distributed or disclosed # in any way without Intel's prior express written permission. No license under # any patent, copyright or other intellectual property rights in the Material # is granted to or conferred upon you, either expressly, by implication, # inducement, estoppel or otherwise. Any license under such intellectual # property rights must be express and approved by Intel in writing. # # Unless otherwise agreed by Intel in writing, you may not remove or alter this # notice or any other notice embedded in Materials by Intel or Intel's # suppliers or licensors in any way. # # # If this software was obtained under the Apache License, Version 2.0 (the # "License"), the following terms apply: # # You may not use this file except in compliance with the License. You may # obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### .section .note.GNU-stack,"",%progbits .text .p2align 6, 0x90 .globl n0_cpAdd_BNU .type n0_cpAdd_BNU, @function n0_cpAdd_BNU: movslq %ecx, %rcx xor %rax, %rax cmp $(2), %rcx jge .LADD_GE2gas_1 add %rax, %rax movq (%rsi), %r8 adcq (%rdx), %r8 movq %r8, (%rdi) sbb %rax, %rax jmp .LFINALgas_1 .LADD_GE2gas_1: jg .LADD_GT2gas_1 add %rax, %rax movq (%rsi), %r8 adcq (%rdx), %r8 movq (8)(%rsi), %r9 adcq (8)(%rdx), %r9 movq %r8, (%rdi) movq %r9, (8)(%rdi) sbb %rax, %rax jmp .LFINALgas_1 .LADD_GT2gas_1: cmp $(4), %rcx jge .LADD_GE4gas_1 add %rax, %rax movq (%rsi), %r8 adcq (%rdx), %r8 movq (8)(%rsi), %r9 adcq (8)(%rdx), %r9 movq (16)(%rsi), %r10 adcq (16)(%rdx), %r10 movq %r8, (%rdi) movq %r9, (8)(%rdi) movq %r10, (16)(%rdi) sbb %rax, %rax jmp .LFINALgas_1 .LADD_GE4gas_1: jg .LADD_GT4gas_1 add %rax, %rax movq (%rsi), %r8 adcq (%rdx), %r8 movq (8)(%rsi), %r9 adcq (8)(%rdx), %r9 movq (16)(%rsi), %r10 adcq (16)(%rdx), %r10 movq (24)(%rsi), %r11 adcq (24)(%rdx), %r11 movq %r8, (%rdi) movq %r9, (8)(%rdi) movq %r10, (16)(%rdi) movq %r11, (24)(%rdi) sbb %rax, %rax jmp .LFINALgas_1 .LADD_GT4gas_1: cmp $(6), %rcx jge .LADD_GE6gas_1 add %rax, %rax movq (%rsi), %r8 adcq (%rdx), %r8 movq (8)(%rsi), %r9 adcq (8)(%rdx), %r9 movq (16)(%rsi), %r10 adcq (16)(%rdx), %r10 movq (24)(%rsi), %r11 adcq (24)(%rdx), %r11 movq (32)(%rsi), %rcx adcq (32)(%rdx), %rcx movq %r8, (%rdi) movq %r9, (8)(%rdi) movq %r10, (16)(%rdi) movq %r11, (24)(%rdi) movq %rcx, (32)(%rdi) sbb %rax, %rax jmp .LFINALgas_1 .LADD_GE6gas_1: jg .LADD_GT6gas_1 add %rax, %rax movq (%rsi), %r8 adcq (%rdx), %r8 movq (8)(%rsi), %r9 adcq (8)(%rdx), %r9 movq (16)(%rsi), %r10 adcq (16)(%rdx), %r10 movq (24)(%rsi), %r11 adcq (24)(%rdx), %r11 movq (32)(%rsi), %rcx adcq (32)(%rdx), %rcx movq (40)(%rsi), %rsi adcq (40)(%rdx), %rsi movq %r8, (%rdi) movq %r9, (8)(%rdi) movq %r10, (16)(%rdi) movq %r11, (24)(%rdi) movq %rcx, (32)(%rdi) movq %rsi, (40)(%rdi) sbb %rax, %rax jmp .LFINALgas_1 .LADD_GT6gas_1: cmp $(8), %rcx jge .LADD_GE8gas_1 .LADD_EQ7gas_1: add %rax, %rax movq (%rsi), %r8 adcq (%rdx), %r8 movq (8)(%rsi), %r9 adcq (8)(%rdx), %r9 movq (16)(%rsi), %r10 adcq (16)(%rdx), %r10 movq (24)(%rsi), %r11 adcq (24)(%rdx), %r11 movq (32)(%rsi), %rcx adcq (32)(%rdx), %rcx movq %r8, (%rdi) movq (40)(%rsi), %r8 adcq (40)(%rdx), %r8 movq (48)(%rsi), %rsi adcq (48)(%rdx), %rsi movq %r9, (8)(%rdi) movq %r10, (16)(%rdi) movq %r11, (24)(%rdi) movq %rcx, (32)(%rdi) movq %r8, (40)(%rdi) movq %rsi, (48)(%rdi) sbb %rax, %rax jmp .LFINALgas_1 .LADD_GE8gas_1: jg .LADD_GT8gas_1 add %rax, %rax movq (%rsi), %r8 adcq (%rdx), %r8 movq (8)(%rsi), %r9 adcq (8)(%rdx), %r9 movq (16)(%rsi), %r10 adcq (16)(%rdx), %r10 movq (24)(%rsi), %r11 adcq (24)(%rdx), %r11 movq (32)(%rsi), %rcx adcq (32)(%rdx), %rcx movq %r8, (%rdi) movq (40)(%rsi), %r8 adcq (40)(%rdx), %r8 movq %r9, (8)(%rdi) movq (48)(%rsi), %r9 adcq (48)(%rdx), %r9 movq (56)(%rsi), %rsi adcq (56)(%rdx), %rsi movq %r10, (16)(%rdi) movq %r11, (24)(%rdi) movq %rcx, (32)(%rdi) movq %r8, (40)(%rdi) movq %r9, (48)(%rdi) movq %rsi, (56)(%rdi) sbb %rax, %rax jmp .LFINALgas_1 .LADD_GT8gas_1: mov %rax, %r8 mov %rcx, %rax and $(3), %rcx xor %rax, %rcx lea (%rsi,%rcx,8), %rsi lea (%rdx,%rcx,8), %rdx lea (%rdi,%rcx,8), %rdi neg %rcx add %r8, %r8 jmp .LADD_GLOOPgas_1 .p2align 6, 0x90 .LADD_GLOOPgas_1: movq (%rsi,%rcx,8), %r8 movq (8)(%rsi,%rcx,8), %r9 movq (16)(%rsi,%rcx,8), %r10 movq (24)(%rsi,%rcx,8), %r11 adcq (%rdx,%rcx,8), %r8 adcq (8)(%rdx,%rcx,8), %r9 adcq (16)(%rdx,%rcx,8), %r10 adcq (24)(%rdx,%rcx,8), %r11 movq %r8, (%rdi,%rcx,8) movq %r9, (8)(%rdi,%rcx,8) movq %r10, (16)(%rdi,%rcx,8) movq %r11, (24)(%rdi,%rcx,8) lea (4)(%rcx), %rcx jrcxz .LADD_LLAST0gas_1 jmp .LADD_GLOOPgas_1 .LADD_LLAST0gas_1: sbb %rcx, %rcx and $(3), %rax jz .LFIN0gas_1 .LADD_LLOOPgas_1: test $(2), %rax jz .LADD_LLAST1gas_1 add %rcx, %rcx movq (%rsi), %r8 movq (8)(%rsi), %r9 adcq (%rdx), %r8 adcq (8)(%rdx), %r9 movq %r8, (%rdi) movq %r9, (8)(%rdi) sbb %rcx, %rcx test $(1), %rax jz .LFIN0gas_1 add $(16), %rsi add $(16), %rdx add $(16), %rdi .LADD_LLAST1gas_1: add %rcx, %rcx movq (%rsi), %r8 adcq (%rdx), %r8 movq %r8, (%rdi) sbb %rcx, %rcx .LFIN0gas_1: mov %rcx, %rax .LFINALgas_1: neg %rax ret .Lfe1: .size n0_cpAdd_BNU, .Lfe1-(n0_cpAdd_BNU)
.global s_prepare_buffers s_prepare_buffers: push %r10 push %r14 push %r15 push %rax push %rcx push %rdi push %rdx push %rsi lea addresses_A_ht+0xf57f, %rsi lea addresses_UC_ht+0x9ecf, %rdi nop nop nop nop inc %r14 mov $101, %rcx rep movsb and $1978, %rax lea addresses_WC_ht+0x1157f, %r15 nop nop nop nop nop add $16515, %rcx movups (%r15), %xmm6 vpextrq $0, %xmm6, %rax nop nop nop nop nop sub $7941, %rcx lea addresses_normal_ht+0xfa0f, %rsi lea addresses_WC_ht+0x7e0f, %rdi nop nop xor %rdx, %rdx mov $115, %rcx rep movsq nop nop nop inc %r14 lea addresses_WC_ht+0x197f, %rsi lea addresses_A_ht+0x1ca3f, %rdi nop nop nop nop nop xor %r10, %r10 mov $102, %rcx rep movsq nop and $48786, %r10 lea addresses_WC_ht+0x1657f, %r10 clflush (%r10) nop nop nop dec %r14 movups (%r10), %xmm6 vpextrq $1, %xmm6, %r15 nop nop cmp %r15, %r15 lea addresses_D_ht+0x6fa7, %rsi nop inc %r10 mov (%rsi), %edi dec %rdi lea addresses_WC_ht+0x1f3f, %rdx nop nop cmp $4816, %r14 movb (%rdx), %r15b nop nop dec %rsi lea addresses_A_ht+0x81df, %rcx nop nop nop nop nop inc %rdi mov $0x6162636465666768, %r14 movq %r14, %xmm2 and $0xffffffffffffffc0, %rcx movaps %xmm2, (%rcx) and %rdx, %rdx lea addresses_A_ht+0x1257f, %r10 nop nop nop add %rsi, %rsi and $0xffffffffffffffc0, %r10 movntdqa (%r10), %xmm1 vpextrq $1, %xmm1, %r15 nop nop nop nop inc %rcx lea addresses_WT_ht+0x47f, %rsi lea addresses_WT_ht+0x9bdf, %rdi cmp %rdx, %rdx mov $117, %rcx rep movsw and %rdx, %rdx lea addresses_normal_ht+0xbe22, %rsi lea addresses_A_ht+0x1267f, %rdi nop nop xor $35986, %rax mov $47, %rcx rep movsb nop xor %rax, %rax lea addresses_D_ht+0x1d20b, %r15 nop nop nop xor $21039, %rcx mov $0x6162636465666768, %rdi movq %rdi, %xmm0 movups %xmm0, (%r15) sub $34692, %rsi lea addresses_A_ht+0x851f, %rdi nop nop xor $28042, %r14 mov (%rdi), %eax nop nop nop dec %r15 pop %rsi pop %rdx pop %rdi pop %rcx pop %rax pop %r15 pop %r14 pop %r10 ret .global s_faulty_load s_faulty_load: push %r12 push %r13 push %r15 push %r8 push %r9 push %rbp push %rdi // Store lea addresses_WT+0x16d7f, %r13 nop cmp $35779, %rdi movw $0x5152, (%r13) // Exception!!! nop nop mov (0), %r13 nop nop cmp %rbp, %rbp // Store mov $0x250de60000000bff, %r13 nop nop nop nop xor %rdi, %rdi mov $0x5152535455565758, %r12 movq %r12, %xmm0 vmovups %ymm0, (%r13) nop add %rdi, %rdi // Store lea addresses_WT+0x14bab, %r13 nop nop nop and $23453, %rbp mov $0x5152535455565758, %rdi movq %rdi, %xmm5 vmovups %ymm5, (%r13) sub $37112, %rbp // Store lea addresses_D+0x1737f, %r8 clflush (%r8) nop nop nop nop dec %r9 mov $0x5152535455565758, %r15 movq %r15, %xmm7 vmovups %ymm7, (%r8) nop nop nop nop nop xor %r12, %r12 // Store lea addresses_WT+0x757f, %r8 nop nop nop nop xor %rdi, %rdi mov $0x5152535455565758, %rbp movq %rbp, %xmm5 movups %xmm5, (%r8) nop nop sub $61838, %r15 // Store lea addresses_RW+0x18885, %rdi nop dec %r12 movb $0x51, (%rdi) nop nop nop nop and %r9, %r9 // Store mov $0x6256630000000d7f, %r15 nop and %rbp, %rbp mov $0x5152535455565758, %rdi movq %rdi, %xmm1 vmovups %ymm1, (%r15) nop nop nop nop inc %r15 // Faulty Load mov $0x6256630000000d7f, %rbp nop nop cmp %r12, %r12 movb (%rbp), %r8b lea oracles, %r9 and $0xff, %r8 shlq $12, %r8 mov (%r9,%r8,1), %r8 pop %rdi pop %rbp pop %r9 pop %r8 pop %r15 pop %r13 pop %r12 ret /* <gen_faulty_load> [REF] {'src': {'type': 'addresses_NC', 'same': False, 'size': 4, 'congruent': 0, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} {'dst': {'type': 'addresses_WT', 'same': False, 'size': 2, 'congruent': 11, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'dst': {'type': 'addresses_NC', 'same': False, 'size': 32, 'congruent': 7, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'dst': {'type': 'addresses_WT', 'same': False, 'size': 32, 'congruent': 2, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'dst': {'type': 'addresses_D', 'same': False, 'size': 32, 'congruent': 8, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'dst': {'type': 'addresses_WT', 'same': False, 'size': 16, 'congruent': 9, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'dst': {'type': 'addresses_RW', 'same': False, 'size': 1, 'congruent': 0, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'dst': {'type': 'addresses_NC', 'same': True, 'size': 32, 'congruent': 0, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} [Faulty Load] {'src': {'type': 'addresses_NC', 'same': True, 'size': 1, 'congruent': 0, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} <gen_prepare_buffer> {'src': {'type': 'addresses_A_ht', 'congruent': 11, 'same': False}, 'dst': {'type': 'addresses_UC_ht', 'congruent': 4, 'same': False}, 'OP': 'REPM'} {'src': {'type': 'addresses_WC_ht', 'same': False, 'size': 16, 'congruent': 9, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} {'src': {'type': 'addresses_normal_ht', 'congruent': 2, 'same': False}, 'dst': {'type': 'addresses_WC_ht', 'congruent': 1, 'same': False}, 'OP': 'REPM'} {'src': {'type': 'addresses_WC_ht', 'congruent': 10, 'same': False}, 'dst': {'type': 'addresses_A_ht', 'congruent': 6, 'same': False}, 'OP': 'REPM'} {'src': {'type': 'addresses_WC_ht', 'same': False, 'size': 16, 'congruent': 10, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} {'src': {'type': 'addresses_D_ht', 'same': False, 'size': 4, 'congruent': 3, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} {'src': {'type': 'addresses_WC_ht', 'same': False, 'size': 1, 'congruent': 4, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} {'dst': {'type': 'addresses_A_ht', 'same': False, 'size': 16, 'congruent': 5, 'NT': True, 'AVXalign': True}, 'OP': 'STOR'} {'src': {'type': 'addresses_A_ht', 'same': False, 'size': 16, 'congruent': 3, 'NT': True, 'AVXalign': False}, 'OP': 'LOAD'} {'src': {'type': 'addresses_WT_ht', 'congruent': 8, 'same': False}, 'dst': {'type': 'addresses_WT_ht', 'congruent': 4, 'same': False}, 'OP': 'REPM'} {'src': {'type': 'addresses_normal_ht', 'congruent': 0, 'same': False}, 'dst': {'type': 'addresses_A_ht', 'congruent': 8, 'same': False}, 'OP': 'REPM'} {'dst': {'type': 'addresses_D_ht', 'same': False, 'size': 16, 'congruent': 2, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'src': {'type': 'addresses_A_ht', 'same': True, 'size': 4, 'congruent': 5, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} {'00': 934, '58': 20865, '52': 30} 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 00 58 58 58 58 58 58 58 58 00 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 00 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 52 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 00 58 58 58 58 58 00 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 00 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 00 58 58 58 00 58 58 58 58 58 58 58 58 00 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 00 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 58 00 58 58 58 00 58 58 */
;--------------------------------------------------------------- ; Tick Counter ; ; The tick counter provides a measure of elapsed time that is ; based on a periodic interrupts from ctc0 channel 3. The choice ; of channel 3 is based on the fact that due to package ; constraints of the CTC component, channel 3 has no zero count ; output (ZC/TC pin) that could be used for timing another ; hardware device. ; ; Based on a system clock of 3.6864 MHz, ctc0 channel 3 can be ; configured to use the timer mode with the prescaler set to ; 256 and the time constant set to 144 to arrive at a timer ; frequency of 100 Hz. ; ; period = TC * pre_scaler / clock rate ; = 144 * 256 / 3.6864 MHz ; = 10,000 microseconds ; = 10 milliseconds ; frequency = 1 / period = 1 / 0.01 seconds = 100 Hz ; ; This choice of time constant isn't ideal, however, because it ; doesn't allow the same firmware to be used with a 7.3728 MHz (2x) ; clock while requiring only a configuration change via a switch. ; If we double the time constant, the result will not fit in the ; 8-bit register of the CTC. We therefore instead choose 72 as ; the time constant with a 3.6864 MHz clock, resulting in a timer ; frequency of 200 Hz. When the clock speed is doubled, we ; also double the time constant to 144, to arrive at the same ; timer frequency. ; ; We divide the timer frequency by 2 using a flag that ; is complemented at each timer interrupt, incrementing the ; 32-bit tkcnt variable in system memory each time the flag ; is set to zero (at every other interrupt). This allows the ; tick counter to reflect the desired 100 Hz tick frequency. ;--------------------------------------------------------------- .name tk .extern gpin .extern isrtab .extern tkcnt .extern tkflag .extern d32x8 .include memory.asm .include ports.asm .include isr.asm .include ctc_defs.asm tk_ctc_ctrl .equ ctc_ei+ctc_timer+ctc_pre256+ctc_falling+ctc_tc+ctc_ctrl tk_ctc_tc .equ 72 ; time constant (pre-scale=256) tk_ctc_ch .equ ctc0_ch3 tk_ctc_isr .equ isr_ctc0_ch3 tk_5ms_flag .equ $1 .cseg ;--------------------------------------------------------------- ; tkinit: ; Initializes the tick counter and sets up ctc0 channel 3. ; tkinit:: ; zero out the flag and the counter xor a ld (tkflag),a ld hl,tkcnt ld b,4 tkinit_10: ld (hl),a inc hl djnz tkinit_10 ; Set interrupt vector ld hl,isrtab+tk_ctc_isr ld (hl),low(tkisr) inc hl ld (hl),high(tkisr) ld c,tk_ctc_tc ; time constant for "normal" clock ld a,(gpin) ; get config switch positions rla ; put "turbo" switch into carry jr c,tkinit_20 ; "normal" when switch is set to 1 rl c ; double time constant for "turbo" tkinit_20: ; configure CTC channel ld a,tk_ctc_ctrl out (tk_ctc_ch),a ; output control word ld a,c out (tk_ctc_ch),a ; output time constant ret ;--------------------------------------------------------------- ; tkisr: ; Tick count interrupt service routine. ; ; This ISR increments the 32-bit `tkcnt` variable defined in ; memory.asm each time that ctc0 channel 3 interrupts the CPU. ; tkisr:: ei push af push hl ; use a 1 flag to divide timer frequency by 2 ld a,(tkflag) xor tk_5ms_flag ld (tkflag),a ; skip counter update at every other interrupt and tk_5ms_flag jr nz,tkisr_10 ld hl,tkcnt ; HL -> 32-bit counter inc (hl) ; increment 1st byte jr nz,tkisr_10 ; go if no ripple inc hl ; ripple into 2nd byte inc (hl) ; increment it jr nz,tkisr_10 ; go if no ripple inc hl ; ripple into 3rd byte inc (hl) ; increment it jr nz,tkisr_10 ; go if no ripple inc hl ; ripple into 4th byte inc (hl) ; increment it tkisr_10: pop hl pop af reti ;--------------------------------------------------------------- ; tkrd16: ; Reads the least significant 16 bits of the tick counter. This ; function is useful for relatively short interval measurement. ; ; On return: ; HL = least significant 16 bits of the tick counter ; tkrd16:: push de ld hl,tkcnt di ld e,(hl) inc hl ld d,(hl) ei ex de,hl pop de ret ;--------------------------------------------------------------- ; tkrd32: ; Reads the 32-bit tick counter. ; ; On return: ; DEHL = 32-bit tick counter ; tkrd32:: push bc ld hl,tkcnt di ld c,(hl) inc hl ld b,(hl) inc hl ld e,(hl) inc hl ld d,(hl) ei ld l,c ld h,b pop bc ret ;--------------------------------------------------------------- ; tkrdut: ; Converts the tick count into a system uptime in a caller- ; provided buffer. ; ; On entry: ; IY = caller's 6-byte buffer for the result ; ; On return: ; AF is destroyed ; ; Caller's buffer is updated with system uptime as follows: ; buf+0 = days (2 bytes, unsigned integer) ; buf+2 = hours (1 byte, 0..23) ; buf+3 = minutes (1 byte, 0..59) ; buf+4 = seconds (1 byte, 0..59) ; buf+5 = hundreds (1 byte, 0..99) ; tkrdut:: push bc push de push hl ; load DEHL with the 32-bit counter ld hl,tkcnt di ld c,(hl) inc hl ld b,(hl) inc hl ld e,(hl) inc hl ld d,(hl) ei ld l,c ld h,b ; divide hundreths by 100 to get seconds with ; hundredths as the remainder ld c,100 call d32x8 ; DEHL is now seconds ld (iy+5),a ; store hundredths ; divide seconds by 60 to get minutes with ; seconds as the remainder ld c,60 call d32x8 ; DEHL is now minutes ld (iy+4),a ; store seconds ; divide minutes by 60 to get hours with ; minutes as the remainder call d32x8 ; DEHL is now hours ld (iy+3),a ; store minutes ; divide hours by 24 to get days with ; hours as the remainder ld c,24 call d32x8 ; HL is now days ld (iy+2),a ; store hours ; store days ld (iy+0),l ld (iy+1),h pop hl pop de pop bc ret .end
; A017523: a(n) = (12*n)^3. ; 0,1728,13824,46656,110592,216000,373248,592704,884736,1259712,1728000,2299968,2985984,3796416,4741632,5832000,7077888,8489664,10077696,11852352,13824000,16003008,18399744,21024576,23887872,27000000,30371328,34012224,37933056,42144192,46656000,51478848,56623104,62099136,67917312,74088000,80621568,87528384,94818816,102503232,110592000,119095488,128024064,137388096,147197952,157464000,168196608,179406144,191102976,203297472,216000000,229220928,242970624,257259456,272097792,287496000,303464448,320013504,337153536,354894912,373248000,392223168,411830784,432081216,452984832,474552000,496793088,519718464,543338496,567663552,592704000,618470208,644972544,672221376,700227072,729000000,758550528,788889024,820025856,851971392,884736000,918330048,952763904,988047936,1024192512,1061208000,1099104768,1137893184,1177583616,1218186432,1259712000,1302170688,1345572864,1389928896,1435249152,1481544000,1528823808,1577098944,1626379776,1676676672,1728000000,1780360128,1833767424,1888232256,1943764992,2000376000,2058075648,2116874304,2176782336,2237810112,2299968000,2363266368,2427715584,2493326016,2560108032,2628072000,2697228288,2767587264,2839159296,2911954752,2985984000,3061257408,3137785344,3215578176,3294646272,3375000000,3456649728,3539605824,3623878656,3709478592,3796416000,3884701248,3974344704,4065356736,4157747712,4251528000,4346707968,4443297984,4541308416,4640749632,4741632000,4843965888,4947761664,5053029696,5159780352,5268024000,5377771008,5489031744,5601816576,5716135872,5832000000,5949419328,6068404224,6188965056,6311112192,6434856000,6560206848,6687175104,6815771136,6946005312,7077888000,7211429568,7346640384,7483530816,7622111232,7762392000,7904383488,8048096064,8193540096,8340725952,8489664000,8640364608,8792838144,8947094976,9103145472,9261000000,9420668928,9582162624,9745491456,9910665792,10077696000,10246592448,10417365504,10590025536,10764582912,10941048000,11119431168,11299742784,11481993216,11666192832,11852352000,12040481088,12230590464,12422690496,12616791552,12812904000,13011038208,13211204544,13413413376,13617675072,13824000000,14032398528,14242881024,14455457856,14670139392,14886936000,15105858048,15326915904,15550119936,15775480512,16003008000,16232712768,16464605184,16698695616,16934994432,17173512000,17414258688,17657244864,17902480896,18149977152,18399744000,18651791808,18906130944,19162771776,19421724672,19683000000,19946608128,20212559424,20480864256,20751532992,21024576000,21300003648,21577826304,21858054336,22140698112,22425768000,22713274368,23003227584,23295638016,23590516032,23887872000,24187716288,24490059264,24794911296,25102282752,25412184000,25724625408,26039617344,26357170176,26677294272 mul $0,12 mov $1,$0 pow $1,3
#include <iostream> #include <vector> int AddEvenNumbers(const std::vector<int>& vec) { int sum {0}; for (const int element : vec) { if (!(element % 2)) sum += element; } return sum; } // int main() { // std::vector<int> vec{1, 2, 3, 4, 5}; // std::cout << AddEvenNumbers(vec) << "\n"; // return 0; // }
; (Shamelessely stolen | Adapted) from http://www.osdever.net/ global _idt_load _idt_load: [extern _idtp] lidt [_idtp] ret ;;;;;;;;;;;;;;;;;;;;;;;; INTERRUPT SERVICE ROUTINES ;;;;;;;;;;;;;;;;;;;;;;;;;; global _isr0 global _isr1 global _isr2 global _isr3 global _isr4 global _isr5 global _isr6 global _isr7 global _isr8 global _isr9 global _isr10 global _isr11 global _isr12 global _isr13 global _isr14 global _isr15 global _isr16 global _isr17 global _isr18 global _isr19 global _isr20 global _isr21 global _isr22 global _isr23 global _isr24 global _isr25 global _isr26 global _isr27 global _isr28 global _isr29 global _isr30 global _isr31 _isr0: cli push byte 0 ; A normal ISR stub that pops a dummy error code to keep a ; uniform stack frame push byte 0 jmp isr_common_stub _isr1: cli push byte 0 push byte 1 jmp isr_common_stub _isr2: cli push byte 0 push byte 2 jmp isr_common_stub _isr3: cli push byte 0 push byte 3 jmp isr_common_stub _isr4: cli push byte 0 push byte 4 jmp isr_common_stub _isr5: cli push byte 0 push byte 5 jmp isr_common_stub _isr6: cli xchg bx, bx push byte 0 push byte 6 jmp isr_common_stub _isr7: cli push byte 0 push byte 7 jmp isr_common_stub _isr8: cli push byte 8 jmp isr_common_stub _isr9: cli push byte 0 push byte 9 jmp isr_common_stub _isr10: cli push byte 10 jmp isr_common_stub _isr11: cli push byte 11 jmp isr_common_stub _isr12: cli push byte 12 jmp isr_common_stub _isr13: cli push byte 13 jmp isr_common_stub _isr14: cli push byte 14 jmp isr_common_stub _isr15: cli push byte 0 push byte 15 jmp isr_common_stub _isr16: cli push byte 0 push byte 16 jmp isr_common_stub _isr17: cli push byte 0 push byte 17 jmp isr_common_stub _isr18: cli push byte 0 push byte 18 jmp isr_common_stub _isr19: cli push byte 0 push byte 19 jmp isr_common_stub _isr20: cli push byte 0 push byte 20 jmp isr_common_stub _isr21: cli push byte 0 push byte 21 jmp isr_common_stub _isr22: cli push byte 0 push byte 22 jmp isr_common_stub _isr23: cli push byte 0 push byte 23 jmp isr_common_stub _isr24: cli push byte 0 push byte 24 jmp isr_common_stub _isr25: cli push byte 0 push byte 25 jmp isr_common_stub _isr26: cli push byte 0 push byte 26 jmp isr_common_stub _isr27: cli push byte 0 push byte 27 jmp isr_common_stub _isr28: cli push byte 0 push byte 28 jmp isr_common_stub _isr29: cli push byte 0 push byte 29 jmp isr_common_stub _isr30: cli push byte 0 push byte 30 jmp isr_common_stub _isr31: cli push byte 0 push byte 31 jmp isr_common_stub extern _fault_handler isr_common_stub: pusha push ds push es push fs push gs mov ax, 0x10 ; Load the Kernel Data Segment descriptor! mov ds, ax mov es, ax mov fs, ax mov gs, ax mov eax, esp ; Push us the stack push eax mov eax, _fault_handler call eax ; A special call, preserves the 'eip' register pop eax pop gs pop fs pop es pop ds popa add esp, 8 ; Cleans up the pushed error code and pushed ISR number iret ; pops 5 things at once: CS, EIP, EFLAGS, SS, and ESP! ;;;;;;;;;;;;;;;;;;;;;;;; INTERRUPT REQUESTS ;;;;;;;;;;;;;;;;;;;;;;;;;; global irq0 global irq1 global irq2 global irq3 global irq4 global irq5 global irq6 global irq7 global irq8 global irq9 global irq10 global irq11 global irq12 global irq13 global irq14 global irq15 irq0: cli push byte 0 push byte 32 jmp irq_common_stub irq1: cli push byte 0 push byte 33 jmp irq_common_stub irq2: cli push byte 0 push byte 34 jmp irq_common_stub irq3: cli push byte 0 push byte 35 jmp irq_common_stub irq4: cli push byte 0 push byte 36 jmp irq_common_stub irq5: cli push byte 0 push byte 37 jmp irq_common_stub irq6: cli push byte 0 push byte 38 jmp irq_common_stub irq7: cli push byte 0 push byte 39 jmp irq_common_stub irq8: cli push byte 0 push byte 40 jmp irq_common_stub irq9: cli push byte 0 push byte 41 jmp irq_common_stub irq10: cli push byte 0 push byte 42 jmp irq_common_stub irq11: cli push byte 0 push byte 43 jmp irq_common_stub irq12: cli push byte 0 push byte 44 jmp irq_common_stub irq13: cli push byte 0 push byte 45 jmp irq_common_stub irq14: cli push byte 0 push byte 46 jmp irq_common_stub irq15: cli push byte 0 push byte 47 jmp irq_common_stub [extern] _irq_handler irq_common_stub: pusha push ds push es push fs push gs mov ax, 0x10 mov ds, ax mov es, ax mov fs, ax mov gs, ax mov eax, esp push eax mov eax, _irq_handler call eax pop eax pop gs pop fs pop es pop ds popa add esp, 8 iret
; A112063: Positive integers i for which A112049(i) == 3. ; 11,12,23,36,47,48,71,72,83,96,107,108,131,132,143,156,167,168,191,192,203,216,227,228,251,252,263,276,287,288,311,312,323,336,347,348,371,372,383,396,407,408,431,432,443,456,467,468,491,492,503,516,527 lpb $0,1 mov $2,$0 sub $2,1 cal $2,112133 ; First differences of A112063. sub $0,1 add $1,$2 lpe add $1,11
;----------------------------------------------------------------------------- [bits 64] ; Global symbols global _start ;----------------------------------------------------------------------------- section .code ;================================================================================================================================ ; Save all registers - we don't know who and what state we've interrupted. push rax push rcx push rdx push rbx push rbp push rsi push rdi push r8 push r9 push r10 push r11 push r12 push r13 push r14 push r15 ; The stack must be kept 16-bytes aligned. If we're not 16-bytes aligned here, do it. xor r14,r14 test rsp, 0xF jz _start mov r14, 0x8 sub rsp, r14 _start: ; The stack is 16-bytes aligned here. ; Alloc the agent buffer xor ecx, ecx ; Alloc in nonpaged pool mov edx, dword [rel dwAgentSize] ; The size, as given by the introcore mov r8d, dword [rel dwAgentTag] ; The tag, ad given by the introcore sub rsp, 0x20 ; Alloc shadow stack space. call qword [rel ExAllocatePoolWithTag] ; Alloc the agent space add rsp, 0x20 add rsp, r14 ; Introcore may decide to de-alloc the bootstrap, so put the stack as it was before. xor rsi, rsi test rax, rax jnz _signal_alloc mov rsi, 0xC000009A _signal_alloc: mov rcx, rax ; Save return address in rcx mov rdx, qword [rel token1] ; Token in edx int3 ; Signal introcore that the buffer has been allocated sub rsp, r14 ; Re-align the stack to 16 bytes mov r15, rcx ; Copy back the buffer address mov r13, r15 ; Keep the copy in r13 so that we can de-alloc it if thread creation fails (r13 non-volatile, should be OK) mov r8d, dword [rel dwAgentEp] add r15, r8 ; Create a thread inside the agent. an even number of 8 bytes must be allocated on the stack, as it must ; be kept 16-bytes aligned. sub rsp, 8 mov rcx, rsp mov edx, 0x001F0000 ; Desired access xor r8, r8 ; object attributes xor r9, r9 ; process handle push r15 ; start context lea rax, [rel ThreadHandler] ; The entry point push rax ; thread function/agent entry point push 0 sub rsp, 8 * 4 call qword [rel PsCreateSystemThread] ; Create the thread! add rsp, 8 * 8 ; Clear the stack - 7 arguments + the thread handle test eax, eax jns _notify_intro_thread push rax sub rsp, 0x28 ; 0x28 because 0x20 is for the shadow stack space, and 0x8 in order to keep the stack aligned to 16 bytes (as we previously pushed RAX) mov rcx, r13 ; r13 is the previously saved copy of the allocated agent address mov edx, dword [rel dwAgentTag] call qword [rel ExFreePoolWithTag] add rsp, 0x28 pop rax _notify_intro_thread: ; Restore the stack, as we previously aligned it. add rsp, r14 mov rcx, rax mov rdx, qword [rel token2] int3 ; Notify that the thread has been started ; Restore every saved register - this will be done by the HVI. Also, jumping to the ; interrupted code will also be done by the HVI. ;pop r15 ;pop r14 ;pop r13 ;pop r12 ;pop r11 ;pop r10 ;pop r9 ;pop r8 ;pop rdi ;pop rsi ;pop rbp ;pop rbx ;pop rdx ;pop rcx ;pop rax ; Jump back to the original code that we interrupted. ;ret ;================================================================================================================================ ThreadHandler: ; On entry, the stack is not 16 bytes aligned. On each call, the stack must be aligned before the call; ; the call will push 8 more bytes on the stack, unaligning it. ; IMPORTANT: Save any non-volatile register that is used here (http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx) ; RAX, RCX, RDX, R8, R9, R10 and R11 can be safely modified. _spin: pause cmp dword [rel dwSemaphore], 0 jz _spin ; Save RCX - this also aligns the stack. push rcx ; Alloc shadow stack space. sub rsp, 0x20 ; Aligned stack here. call rcx ; Restore RCX, as it may have been modified. mov rcx, qword [rsp + 0x20] ; Zero out the agent region. push rdi mov rdi, rcx mov ecx, dword [rel dwAgentEp] sub rdi, rcx mov r8, rdi mov ecx, dword [rel dwAgentSize] xor eax, eax rep stosb mov rcx, r8 pop rdi ; Free the driver memory. mov edx, dword [rel dwAgentTag] call qword [rel ExFreePoolWithTag] ; Clear out the stack. add rsp, 0x28 ; We're done here. We can do the cleanup now. mov rdx, qword [rel token3] int3 ; Jump back to the trampoline. We're done. jmp qword [rel jumpback] nop nop nop nop nop nop nop nop nop nop nop nop nop nop nop ;================================================================================================================================ ; These variables will be filled by the introspection engine upon initialization. ExAllocatePoolWithTag dq 0 ExFreePoolWithTag dq 0 PsCreateSystemThread dq 0 ; These will be filled in by the introspection engine on agent injection dwAgentSize dd 0 dwAgentTag dd 0 dwAgentEp dd 0 dwSemaphore dd 0 token1 dq 0 token2 dq 0 token3 dq 0 jumpback dq 0 reserved dq 0
; ********************************************************************************* ; ********************************************************************************* ; ; File: multiply.asm ; Purpose: 16 bit unsigned multiply ; Date : 8th November 2018 ; Author: paul@robsons.org.uk ; ; ********************************************************************************* ; ********************************************************************************* ; ********************************************************************************* ; ; Does HL = HL * DE ; ; ********************************************************************************* MULTMultiply16: push bc push de ld b,h ; get multipliers in DE/BC ld c,l ld hl,0 ; zero total __Core__Mult_Loop: bit 0,c ; lsb of shifter is non-zero jr z,__Core__Mult_Shift add hl,de ; add adder to total __Core__Mult_Shift: srl b ; shift BC right. rr c ex de,hl ; shift DE left add hl,hl ex de,hl ld a,b ; loop back if BC is nonzero or c jr nz,__Core__Mult_Loop pop de pop bc ret
; A141952: Primes congruent to 7 mod 27. ; Submitted by Jon Maiga ; 7,61,223,277,331,439,547,601,709,1033,1087,1249,1303,1627,1789,1951,2113,2221,2383,2437,2707,3301,3463,3517,3571,3733,4003,4057,4111,4219,4273,4327,4597,4651,4759,4813,5407,5569,5623,5839,6163,6217,6271,6379,6703,7027,7243,7297,7351,7459,7621,8053,8161,8269,8377,8431,8539,8647,8863,8971,9133,9187,9241,9349,9403,9511,9619,9781,10159,10267,10321,10429,10753,10861,11131,11239,11617,11779,11833,11887,11941,12049,12157,12211,12373,12589,12697,12967,13183,13291,13399,13669,13723,13831,14479,14533 mov $2,$0 pow $2,2 mov $4,6 lpb $2 mov $3,$4 seq $3,10051 ; Characteristic function of primes: 1 if n is prime, else 0. sub $0,$3 mov $1,$0 max $1,0 cmp $1,$0 mul $2,$1 sub $2,1 add $4,54 lpe mov $0,$4 add $0,1
<% from pwnlib.shellcraft.mips.linux import syscall %> <%page args="path, mode"/> <%docstring> Invokes the syscall mkdir. See 'man 2 mkdir' for more information. Arguments: path(char): path mode(mode_t): mode </%docstring> ${syscall('SYS_mkdir', path, mode)}
; A070665: n^6 mod 42. ; 0,1,22,15,22,1,36,7,22,15,22,1,36,1,28,15,22,1,36,1,22,21,22,1,36,1,22,15,28,1,36,1,22,15,22,7,36,1,22,15,22,1,0,1,22,15,22,1,36,7,22,15,22,1,36,1,28,15,22,1,36,1,22,21,22,1,36,1,22,15,28,1,36,1,22,15,22,7 pow $0,6 mod $0,42 mov $1,$0
db DEX_HITMONCHAN ; pokedex id db 50, 105, 79, 76, 35 ; hp atk def spd spc db FIGHTING, FIGHTING ; type db 45 ; catch rate db 140 ; base exp INCBIN "gfx/pokemon/front/hitmonchan.pic", 0, 1 ; sprite dimensions dw HitmonchanPicFront, HitmonchanPicBack db COMET_PUNCH, AGILITY, NO_MOVE, NO_MOVE ; level 1 learnset db GROWTH_MEDIUM_FAST ; growth rate ; tm/hm learnset tmhm MEGA_PUNCH, MEGA_KICK, TOXIC, BODY_SLAM, TAKE_DOWN, \ DOUBLE_EDGE, SUBMISSION, COUNTER, SEISMIC_TOSS, RAGE, \ MIMIC, DOUBLE_TEAM, BIDE, METRONOME, SWIFT, \ SKULL_BASH, REST, SUBSTITUTE, STRENGTH ; end db 0 ; padding
; ; Size-optimized LZSA2 decompressor by spke & uniabis (139 bytes) ; ; ver.00 by spke for LZSA 1.0.0 (02-09/06/2019, 145 bytes); ; ver.01 by spke for LZSA 1.0.5 (24/07/2019, added support for backward decompression); ; ver.02 by uniabis (30/07/2019, 144(-1) bytes, +3.3% speed and support for Hitachi HD64180); ; ver.03 by spke for LZSA 1.0.7 (01/08/2019, 140(-4) bytes, -1.4% speed and small re-organization of macros); ; ver.04 by spke for LZSA 1.1.0 (26/09/2019, removed usage of IY, added full revision history) ; ver.05 by spke for LZSA 1.1.1 (11/10/2019, 139(-1) bytes, +0.1% speed) ; ; The data must be compressed using the command line compressor by Emmanuel Marty ; The compression is done as follows: ; ; lzsa.exe -f2 -r <sourcefile> <outfile> ; ; where option -r asks for the generation of raw (frame-less) data. ; ; The decompression is done in the standard way: ; ; ld hl,FirstByteOfCompressedData ; ld de,FirstByteOfMemoryForDecompressedData ; call DecompressLZSA2 ; ; Backward compression is also supported; you can compress files backward using: ; ; lzsa.exe -f2 -r -b <sourcefile> <outfile> ; ; and decompress the resulting files using: ; ; ld hl,LastByteOfCompressedData ; ld de,LastByteOfMemoryForDecompressedData ; call DecompressLZSA2 ; ; (do not forget to uncomment the BACKWARD_DECOMPRESS option in the decompressor). ; ; Of course, LZSA2 compression algorithms are (c) 2019 Emmanuel Marty, ; see https://github.com/emmanuel-marty/lzsa for more information ; ; Drop me an email if you have any comments/ideas/suggestions: zxintrospec@gmail.com ; ; This software is provided 'as-is', without any express or implied ; warranty. In no event will the authors be held liable for any damages ; arising from the use of this software. ; ; Permission is granted to anyone to use this software for any purpose, ; including commercial applications, and to alter it and redistribute it ; freely, subject to the following restrictions: ; ; 1. The origin of this software must not be misrepresented; you must not ; claim that you wrote the original software. If you use this software ; in a product, an acknowledgment in the product documentation would be ; appreciated but is not required. ; 2. Altered source versions must be plainly marked as such, and must not be ; misrepresented as being the original software. ; 3. This notice may not be removed or altered from any source distribution. ; ; DEFINE BACKWARD_DECOMPRESS ; uncomment for data compressed with option -b ; DEFINE HD64180 ; uncomment for systems using Hitachi HD64180 IFNDEF BACKWARD_DECOMPRESS MACRO NEXT_HL inc hl ENDM MACRO ADD_OFFSET ex de,hl : add hl,de ENDM MACRO BLOCKCOPY ldir ENDM ELSE MACRO NEXT_HL dec hl ENDM MACRO ADD_OFFSET push hl : or a : sbc hl,de : pop de ; 11+4+15+10 = 40t / 5 bytes ENDM MACRO BLOCKCOPY lddr ENDM ENDIF IFNDEF HD64180 MACRO LD_IX_DE ld ixl,e : ld ixh,d ENDM MACRO LD_DE_IX ld e,ixl : ld d,ixh ENDM ELSE MACRO LD_IX_DE push de : pop ix ENDM MACRO LD_DE_IX push ix : pop de ENDM ENDIF @DecompressLZSA2: xor a : ld b,a : exa : jr ReadToken CASE00x: call ReadNibble ld e,a : ld a,c cp %00100000 : rl e : jr SaveOffset CASE0xx ld d,#FF : cp %01000000 : jr c,CASE00x CASE01x: cp %01100000 : rl d OffsetReadE: ld e,(hl) : NEXT_HL SaveOffset: LD_IX_DE MatchLen: and %00000111 : add 2 : cp 9 : call z,ExtendedCode CopyMatch: ld c,a ex (sp),hl ; BC = len, DE = -offset, HL = dest, SP -> [src] ADD_OFFSET ; BC = len, DE = dest, HL = dest+(-offset), SP -> [src] BLOCKCOPY ; BC = 0, DE = dest pop hl ; HL = src ReadToken: ld a,(hl) : NEXT_HL : push af and %00011000 : jr z,NoLiterals rrca : rrca : rrca call pe,ExtendedCode ld c,a BLOCKCOPY NoLiterals: pop af : push de or a : jp p,CASE0xx CASE1xx cp %11000000 : jr nc,CASE11x CASE10x: call ReadNibble ld d,a : ld a,c cp %10100000 ;: rl d dec d : rl d : DB #CA ; jr OffsetReadE ; #CA is JP Z,.. to skip all commands in CASE110 before jr OffsetReadE CASE110: ld d,(hl) : NEXT_HL : jr OffsetReadE CASE11x cp %11100000 : jr c,CASE110 CASE111: LD_DE_IX : jr MatchLen ExtendedCode: call ReadNibble : inc a : jr z,ExtraByte sub #F0+1 : add c : ret ExtraByte ld a,15 : add c : add (hl) : NEXT_HL : ret nc ld a,(hl) : NEXT_HL ld b,(hl) : NEXT_HL : ret nz pop de : pop de ; RET is not needed, because RET from ReadNibble is sufficient ReadNibble: ld c,a : xor a : exa : ret m UpdateNibble ld a,(hl) : or #F0 : exa ld a,(hl) : NEXT_HL : or #0F rrca : rrca : rrca : rrca : ret
class Solution { public: array<vector<int>, 26> GetIndicesArray(const string& T) { array<vector<int>, 26> indicesArr{}; for (int i = 0; i < (int)T.length(); ++i) { indicesArr[T[i] - 'a'].push_back(i); } return indicesArr; } int FindNextInd(const vector<int>& Indices, int LastInd) { int lo = 0; int hi = (int)Indices.size(); while (hi > lo) { int mid = lo + ((hi - lo) / 2); int num = Indices[mid]; if (num <= LastInd) { lo = mid + 1; } else { hi = mid; } } if (lo == (int)Indices.size()) { return -1; } return Indices[lo]; } bool isSubsequence(string s, string t) { auto indicesArr = GetIndicesArray(t); int lastInd = -1; for (char c : s) { auto& indices = indicesArr[c - 'a']; lastInd = FindNextInd(indices, lastInd); if (lastInd == -1) { return false; } } return true; } };
;; Bootstrap loader. Loads an image into RAM. ;; ;; Requires image to be in the first 64K and the SD card to not be set ;; up for sector addressing. Also needs the destination to not overlap ;; the loader! ;; ;; Put this in the first block of the SD card, so that the ;; monitor can load it, and it can then load the rest of the ;; image into RAM. #target ROM source: equ $0200 ; Source offset on disk. dest: equ $dc00 ; Destination address in memory. start: equ $f200 ; Entry point len: equ 7176 ; Data length in bytes. len_blks: equ (len+511)/512 ; Data length in blocks. #code ENTRY,0x1000 top: jr main ; Put a string near the start for identification defb "Dirac CP/M system disk", $0a, $00 ; TODO: Spot failures, retry. main: ; Load the data ld b,len_blks ld de,dest ld hl,source ld_loop: call read_block inc h ; DE already incremented... inc h ; but move HL to next disk block. ld a, '.' call sio_wr djnz ld_loop ; New line. ld a, 10 call sio_wr ; Make the lowest page of memory R/W RAM. LD bc,$0000 LD a,$c0 OUT (bc), a jp start ; Disk offset in HL, memory location in DE. ; Reads 512 bytes, increments DE. read_block: push bc push hl push de ; Send CMD17 - Single block read ld c,$40 + 17 ; CMD17 ld de,$0000 ; All within first 64k. HL has lower address. call send_cmd pop de ; Exit if failed... cp $00 jr nz, rb_ret ; Expect data token. data_tok: call recv_byte cp $ff jp z,data_tok cp $fe jr nz, rb_ret ; DE contains destination address. ; Read the sector's worth of data. call read256 call read256 ; Read the CRC. call recv_byte call recv_byte rb_ret: pop hl pop bc ret ; Read 256 bytes worth of data into DE. read256: ld b,$00 r256: call recv_byte ld (de),a inc de djnz r256 ret ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Command-sending routines ; Send a command, command in C, args in DE, HL. send_cmd: push bc ; Wait until receiver's ready. wait_rdy: call recv_byte cp $ff jp nz,wait_rdy pop bc ; Send command. call send_byte ld c,d call send_byte ld c,e call send_byte ld c,h call send_byte ld c,l call send_byte ld c,1 call send_byte call find_resp ret ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Low-level bit-twiddling SD card I/O ;; ; Byte to send in C ; Modifies A. send_byte: scf rl c sb_loop: ld a,0 ; CS low, clock low. rla out ($30),a xor a,$2 ; Flip clock bit. rl c out ($30),a ; Doesn't affect NZ. jp nz,sb_loop ret ; Search for the start of a response. This is a 0 ; bit from the SD card, which is inverted by the time ; it hits our I/O port. ; ; TODO: Should arrive within 16 cycles. Time out, if needed. find_resp: ld a,$01 ; CS low, data high, -ive clk edge to shift. out ($30),a in a,($30) rra ; Next bit saved in carry flag... ld a,$03 ; CS low, data high, +ive clk edge to latch. out ($30),a jp nc,find_resp ; Loop if CD card sent 0. ; Received a 1. Let's read the rest of the byte. ld c,$03 jp rc_loop ; Read a byte into A. Modifies C. recv_byte: ld c,$01 rc_loop: ld a,$01 ; CS low, data high, -ive clk edge to shift. out ($30),a in a,($30) rra ; Next bit saved in carry flag... ld a,$03 ; CS low, data high, +ive clk edge to latch. out ($30),a rl c ; And carry flag rotated into C. jp nc,rc_loop ld a,c cpl ; Data is inverted when it hits us. ret ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Serial port I/O ;; ; Write one character to initialised serial port sio_wr: push af ld a, $80 out ($70), a ; Wait until transmit buffer has space sio_wr_lp: in a, ($21) and $04 jr z, sio_wr_lp ; Stick the data in the data register pop af out ($20), a ld a, $40 out ($70), a ret #end
; A003231: a(n) = floor(n*(sqrt(5)+5)/2). ; 3,7,10,14,18,21,25,28,32,36,39,43,47,50,54,57,61,65,68,72,75,79,83,86,90,94,97,101,104,108,112,115,119,123,126,130,133,137,141,144,148,151,155,159,162,166,170,173,177,180,184,188,191,195,198,202,206,209 mov $3,$0 mov $4,$0 add $4,1 lpb $4,1 mov $0,$3 sub $4,1 sub $0,$4 mov $7,$0 mov $9,2 lpb $9,1 sub $9,1 add $0,$9 sub $0,1 mov $2,$0 mov $6,$0 lpb $2,1 add $6,1 lpb $6,1 mov $6,$2 add $2,2 pow $6,2 lpe sub $2,1 add $6,$0 lpe mov $5,$2 mov $10,$9 lpb $10,1 mov $8,$5 sub $10,1 lpe lpe lpb $7,1 mov $7,0 sub $8,$5 lpe mov $5,$8 add $5,3 add $1,$5 lpe
; int isgreaterequal(float x, float y) __z88dk_callee SECTION code_clib SECTION code_fp_math48 PUBLIC cm48_sdcciy_isgreaterequal_callee EXTERN am48_isgreaterequal, cm48_sdcciyp_dcallee2 cm48_sdcciy_isgreaterequal_callee: call cm48_sdcciyp_dcallee2 ; AC'= y ; AC = x jp am48_isgreaterequal
; A098354: Multiplication table of the powers of 2 read by antidiagonals. ; 4,8,8,16,16,16,32,32,32,32,64,64,64,64,64,128,128,128,128,128,128,256,256,256,256,256,256,256,512,512,512,512,512,512,512,512,1024,1024,1024,1024,1024,1024,1024,1024,1024,2048,2048,2048,2048,2048,2048,2048,2048 mov $1,4 lpb $0,1 sub $0,1 add $2,1 trn $0,$2 mul $1,2 lpe
/* * Copyright 2018-2021 Mahdi Khanalizadeh * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ .section .text .global linux_syscall3 linux_syscall3: push {r7} // save r7 mov r7, r3 swi 0x0 pop {r7} // restore r7 bx lr
/* file: input.cpp */ /******************************************************************************* * Copyright 2014-2019 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #include <jni.h> #include "com_intel_daal_algorithms_qr_Input.h" #include "com_intel_daal_algorithms_qr_DistributedStep2MasterInput.h" #include "com_intel_daal_algorithms_qr_DistributedStep3LocalInput.h" #include "common_helpers.h" #include "com_intel_daal_algorithms_qr_DistributedStep3LocalInputId.h" #define inputOfStep3FromStep1Id com_intel_daal_algorithms_qr_DistributedStep3LocalInputId_inputOfStep3FromStep1Id #define inputOfStep3FromStep2Id com_intel_daal_algorithms_qr_DistributedStep3LocalInputId_inputOfStep3FromStep2Id USING_COMMON_NAMESPACES() using namespace daal::algorithms::qr; /* * Class: com_intel_daal_algorithms_qr_Input * Method: cSetInputTable * Signature:(JIJ)I */ JNIEXPORT void JNICALL Java_com_intel_daal_algorithms_qr_Input_cSetInputTable (JNIEnv *env, jobject thisObj, jlong inputAddr, jint id, jlong ntAddr) { if(id != data) { return; } jniInput<qr::Input>::set<qr::InputId, NumericTable>(inputAddr, id, ntAddr); } /* * Class: com_intel_daal_algorithms_qr_Input * Method: cGetInputTable * Signature: (JI)J */ JNIEXPORT jlong JNICALL Java_com_intel_daal_algorithms_qr_Input_cGetInputTable (JNIEnv *env, jobject thisObj, jlong inputAddr, jint id) { if(id != data) { return (jlong) - 1; } return jniInput<qr::Input>::get<qr::InputId, NumericTable>(inputAddr, id); } /* * Class: com_intel_daal_algorithms_qr_DistributedStep2MasterInput * Method: cAddDataCollection * Signature:(JIIIJ)I */ JNIEXPORT void JNICALL Java_com_intel_daal_algorithms_qr_DistributedStep2MasterInput_cAddDataCollection (JNIEnv *env, jobject thisObj, jlong inputAddr, jint key, jlong dcAddr) { jniInput<qr::DistributedStep2Input>::add<qr::MasterInputId, DataCollection>(inputAddr, qr::inputOfStep2FromStep1, key, dcAddr); } /* * Class: com_intel_daal_algorithms_qr_DistributedStep3LocalInput * Method: cSetDataCollection * Signature:(JIIIJ)I */ JNIEXPORT void JNICALL Java_com_intel_daal_algorithms_qr_DistributedStep3LocalInput_cSetDataCollection (JNIEnv *env, jobject thisObj, jlong inputAddr, jint id, jlong dcAddr) { if( id != inputOfStep3FromStep1 && id != inputOfStep3FromStep2 ) { return; } jniInput<qr::DistributedStep3Input>::set<qr::FinalizeOnLocalInputId, DataCollection>(inputAddr, id, dcAddr); }
; A237133: Values of x in the solutions to x^2 - 3xy + y^2 + 19 = 0, where 0 < x < y. ; 4,5,7,11,17,28,44,73,115,191,301,500,788,1309,2063,3427,5401,8972,14140,23489,37019,61495,96917,160996,253732,421493,664279,1103483,1739105,2888956,4553036,7563385,11920003,19801199,31206973,51840212,81700916,135719437,213895775,355318099,559986409,930234860,1466063452,2435386481,3838203947,6375924583,10048548389,16692387268,26307441220,43701237221,68873775271,114411324395,180313884593,299532735964,472067878508,784186883497,1235889750931,2053027914527,3235601374285,5374896860084,8470914371924,14071662665725,22177141741487,36840091137091,58060510852537,96448610745548,152004390816124,252505741099553,397952661595835,661068612553111,1041853593971381,1730700096559780,2727608120318308,4531031677126229,7140970766983543,11862394934818907,18695304180632321,31056153127330492,48944941774913420,81306064447172569,128139521144107939,212862040214187215,335473621657410397,557280056195389076,878281343828123252,1458978128371980013,2299370409826959359,3819654328920550963,6019829885652754825,9999984858389672876,15760119247131305116,26180300246248467665,41260527855741160523,68540915880355730119,108021464320092176453,179442447394818722692,282803865104535368836,469786426304100437957,740390130993513930055,1229916831517482591179 mov $2,1 lpb $0 add $2,1 add $1,$2 add $1,1 sub $1,$0 trn $1,$0 sub $0,1 add $2,3 add $2,$1 sub $2,2 lpe add $1,4 mov $0,$1
/* * Copyright (C) 2011 Ericsson AB. All rights reserved. * Copyright (C) 2012 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * 3. Neither the name of Ericsson nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "modules/mediastream/UserMediaRequest.h" #include "bindings/core/v8/Dictionary.h" #include "bindings/core/v8/ExceptionMessages.h" #include "bindings/core/v8/ExceptionState.h" #include "core/dom/Document.h" #include "core/dom/ExceptionCode.h" #include "core/dom/SpaceSplitString.h" #include "core/frame/Deprecation.h" #include "core/frame/HostsUsingFeatures.h" #include "modules/mediastream/MediaConstraintsImpl.h" #include "modules/mediastream/MediaStream.h" #include "modules/mediastream/MediaStreamConstraints.h" #include "modules/mediastream/MediaTrackConstraints.h" #include "modules/mediastream/UserMediaController.h" #include "platform/mediastream/MediaStreamCenter.h" #include "platform/mediastream/MediaStreamDescriptor.h" namespace blink { static WebMediaConstraints parseOptions( ExecutionContext* context, const BooleanOrMediaTrackConstraints& options, MediaErrorState& errorState) { WebMediaConstraints constraints; Dictionary constraintsDictionary; if (options.isNull()) { // Do nothing. } else if (options.isMediaTrackConstraints()) { constraints = MediaConstraintsImpl::create( context, options.getAsMediaTrackConstraints(), errorState); } else { DCHECK(options.isBoolean()); if (options.getAsBoolean()) { constraints = MediaConstraintsImpl::create(); } } return constraints; } UserMediaRequest* UserMediaRequest::create( ExecutionContext* context, UserMediaController* controller, const MediaStreamConstraints& options, NavigatorUserMediaSuccessCallback* successCallback, NavigatorUserMediaErrorCallback* errorCallback, MediaErrorState& errorState) { WebMediaConstraints audio = parseOptions(context, options.audio(), errorState); if (errorState.hadException()) return nullptr; WebMediaConstraints video = parseOptions(context, options.video(), errorState); if (errorState.hadException()) return nullptr; if (audio.isNull() && video.isNull()) { errorState.throwTypeError( "At least one of audio and video must be requested"); return nullptr; } return new UserMediaRequest(context, controller, audio, video, successCallback, errorCallback); } UserMediaRequest* UserMediaRequest::createForTesting( const WebMediaConstraints& audio, const WebMediaConstraints& video) { return new UserMediaRequest(nullptr, nullptr, audio, video, nullptr, nullptr); } UserMediaRequest::UserMediaRequest( ExecutionContext* context, UserMediaController* controller, WebMediaConstraints audio, WebMediaConstraints video, NavigatorUserMediaSuccessCallback* successCallback, NavigatorUserMediaErrorCallback* errorCallback) : ContextLifecycleObserver(context), m_audio(audio), m_video(video), m_controller(controller), m_successCallback(successCallback), m_errorCallback(errorCallback) {} UserMediaRequest::~UserMediaRequest() {} bool UserMediaRequest::audio() const { return !m_audio.isNull(); } bool UserMediaRequest::video() const { return !m_video.isNull(); } WebMediaConstraints UserMediaRequest::audioConstraints() const { return m_audio; } WebMediaConstraints UserMediaRequest::videoConstraints() const { return m_video; } bool UserMediaRequest::isSecureContextUse(String& errorMessage) { Document* document = ownerDocument(); if (document->isSecureContext(errorMessage)) { UseCounter::count(document->frame(), UseCounter::GetUserMediaSecureOrigin); UseCounter::countCrossOriginIframe( *document, UseCounter::GetUserMediaSecureOriginIframe); HostsUsingFeatures::countAnyWorld( *document, HostsUsingFeatures::Feature::GetUserMediaSecureHost); return true; } // While getUserMedia is blocked on insecure origins, we still want to // count attempts to use it. Deprecation::countDeprecation(document->frame(), UseCounter::GetUserMediaInsecureOrigin); Deprecation::countDeprecationCrossOriginIframe( *document, UseCounter::GetUserMediaInsecureOriginIframe); HostsUsingFeatures::countAnyWorld( *document, HostsUsingFeatures::Feature::GetUserMediaInsecureHost); return false; } Document* UserMediaRequest::ownerDocument() { if (ExecutionContext* context = getExecutionContext()) { return toDocument(context); } return 0; } void UserMediaRequest::start() { if (m_controller) m_controller->requestUserMedia(this); } void UserMediaRequest::succeed(MediaStreamDescriptor* streamDescriptor) { if (!getExecutionContext()) return; MediaStream* stream = MediaStream::create(getExecutionContext(), streamDescriptor); MediaStreamTrackVector audioTracks = stream->getAudioTracks(); for (MediaStreamTrackVector::iterator iter = audioTracks.begin(); iter != audioTracks.end(); ++iter) { (*iter)->component()->source()->setConstraints(m_audio); (*iter)->setConstraints(m_audio); } MediaStreamTrackVector videoTracks = stream->getVideoTracks(); for (MediaStreamTrackVector::iterator iter = videoTracks.begin(); iter != videoTracks.end(); ++iter) { (*iter)->component()->source()->setConstraints(m_video); (*iter)->setConstraints(m_video); } m_successCallback->handleEvent(stream); } void UserMediaRequest::failPermissionDenied(const String& message) { if (!getExecutionContext()) return; m_errorCallback->handleEvent(NavigatorUserMediaError::create( NavigatorUserMediaError::NamePermissionDenied, message, String())); } void UserMediaRequest::failConstraint(const String& constraintName, const String& message) { DCHECK(!constraintName.isEmpty()); if (!getExecutionContext()) return; m_errorCallback->handleEvent(NavigatorUserMediaError::create( NavigatorUserMediaError::NameConstraintNotSatisfied, message, constraintName)); } void UserMediaRequest::failUASpecific(const String& name, const String& message, const String& constraintName) { DCHECK(!name.isEmpty()); if (!getExecutionContext()) return; m_errorCallback->handleEvent( NavigatorUserMediaError::create(name, message, constraintName)); } void UserMediaRequest::contextDestroyed(ExecutionContext*) { if (m_controller) { m_controller->cancelUserMediaRequest(this); m_controller = nullptr; } } DEFINE_TRACE(UserMediaRequest) { visitor->trace(m_controller); visitor->trace(m_successCallback); visitor->trace(m_errorCallback); ContextLifecycleObserver::trace(visitor); } } // namespace blink
; TITLE RWCHAR - Character read/write for MSHERC. ;*** ;RWCHAR ; ; Copyright <C> 1987, 1988, Microsoft Corporation ; ;Purpose: ; Single character read and write. ; ;****************************************************************************** include hgcdefs.inc code segment para public 'code' assume cs:code,ds:code Public GReadAttrChar Public GWriteChar Extrn WriteGrChar:near Extrn MapGraphXYToVideoOffset:near Extrn CharBuff:byte Extrn HFont:byte Extrn ZERO:word Extrn CharHeight:byte ;------------------------------------------------------------------------------- ; A WRITE CHARACTER ONLY AT CURRENT CURSOR POSITION ; BH => Display Page ; CX => Count of chars to write ; AL => Character ;------------------------------------------------------------------------------- GWriteChar proc near mov bh,1 ;use attribute 1?? ;-----Write the Graphics Character to the screen----- jmp WriteGrChar GWriteChar endp ;------------------------------------------------------------------------------- ; 8 READ ATTRIBUTE/CHARACTER AT CURRENT CURSOR POSITION ; BH => Display Page ; AL <= Character ;------------------------------------------------------------------------------- GReadAttrChar proc near ;------ Calculate the graphic y coordinate ------ mov bl,bh ;bx = page number xor bh,bh mov cx,Scr1_Buf_Seg ;assume page 1 or bl,bl jnz RdPg1 ;go if was page 1 mov cx,Scr0_Buf_Seg ;use page 0 push cx ;save page # segment for later RdPg1: shl bx,1 mov bx,es:BIOSCURS[bx] ;Fetch start coords. mov al,bh ;ax = row coordinate xor ah,ah mul CharHeight ;Calculate y coordinate push ax ;Save y coordinate ;------ Character width is 9 bits ---- xor bh,bh ;bx = column coordinate mov cx,bx shl bx,1 ;cols*2 shl bx,1 ;cols*4 shl bx,1 ;cols*8 add cx,bx ;cols*9 = start x coordinate ;------ Calculate the video buffer offset ------ pop dx ;Retrieve the y coordinate Call MapGraphXYToVideoOffset ;Calculate buffer offset mov di,cx ;Pnt di to video offset mov bl,dl ;bl = bit position neg bl ;bl = - x mod 8 add bl,7 ;bl = 7 - x mod 8 = bit position pop es ;set ES for specified page xor cx,cx mov cl,14 ;cx = char height mov si,offset CharBuff ;place to store graphics char mov dx,1 ;DH=accumulate non-0 9th bits ;DL=accumulate equal 9th bits RdCharLp: mov ah,es:[di] ;get first byte of char mov al,es:[di+1] ; and second xchg cx,bx sal ax,cl ;left justify both bytes in word xchg cx,bx mov [si],ah ;save first byte for comparison inc si rol al,1 ;move 9th bit to least sig bit or dh,al ;non-0 9th bits xor al,ah ;AL=0 where AL==AH not al ;AL=1 where AL==AH and dl,al ;DL stays 1 if 9th bit == 8th bit add di,GrNextScan ;Inc to next scan jns ScanOk ;Go to check for more rows sub di,NextScanAdjust ;Pnt di back on screen ScanOk: loop RdCharLp ;read entire character push dx ;save 9th bit info ; setup for font table compare xor cx,cx mov cl,14 ;(cx) = font size mov dx,cx ;(dx) = font size copy push ds pop es mov di,offset HFont ;(es:di) = @font table add di,cx ;skip <NULL> in font table mov ax,1 ;(ax) = ascii of current char ; find match loop MatchLp: push di ;save current position mov si,offset CharBuff ;(ds:si) = data buffer repe cmpsb ;cmp buffer with current char pop di jz Found ;if match, exit loop inc ax ;if not, advance ascii count mov cx,dx ;restore font size add di,cx ;advance to next font entry cmp ax,0ffh ;at end of font table? jbe MatchLp ; if not, loop xor ax,ax ;if no match, ascii=0 Found: pop dx ;recover 9th bit info cmp al,179 ;such chars are in range (179-223) jbe NotBlock ;go if not cmp al,223 ja NotBlock ;go if not or dl,dl ;DL=1 if all 9th bits == 8th bits jnz RdDun ;go if so, OK block character RdBad: xor ax,ax ;invalid block character jmp short RdDun NotBlock: and dh,1 ;any non-zero 9th bits jnz RdBad ;go if so, not good for normal char RdDun: mov FunData.FunAX,AX mov es,ZERO Ret ;Finished Read A Character/Attribute GReadAttrChar endp code ends end
.size 8000 .text@48 jp lstatint .text@100 jp lbegin .data@143 80 .text@150 lbegin: ld c, 41 ld b, 02 ld d, 03 lbegin_waitm2: ldff a, (c) and a, d cmp a, b jrnz lbegin_waitm2 ld a, 08 ldff(c), a xor a, a ldff(0f), a ld a, 02 ldff(ff), a ei ld c, 0f .text@1000 lstatint: nop .text@1064 xor a, a ldff(41), a nop nop nop nop nop nop nop nop nop ldff a, (c) and a, 03 jp lprint_a .text@7000 lprint_a: push af ld b, 91 call lwaitly_b xor a, a ldff(40), a pop af ld(9800), a ld bc, 7a00 ld hl, 8000 ld d, a0 lprint_copytiles: ld a, (bc) inc bc ld(hl++), a dec d jrnz lprint_copytiles ld a, c0 ldff(47), a ld a, 80 ldff(68), a ld a, ff ldff(69), a ldff(69), a ldff(69), a ldff(69), a ldff(69), a ldff(69), a xor a, a ldff(69), a ldff(69), a ldff(43), a ld a, 91 ldff(40), a lprint_limbo: jr lprint_limbo .text@7400 lwaitly_b: ld c, 44 lwaitly_b_loop: ldff a, (c) cmp a, b jrnz lwaitly_b_loop ret .data@7a00 00 00 7f 7f 41 41 41 41 41 41 41 41 41 41 7f 7f 00 00 08 08 08 08 08 08 08 08 08 08 08 08 08 08 00 00 7f 7f 01 01 01 01 7f 7f 40 40 40 40 7f 7f 00 00 7f 7f 01 01 01 01 3f 3f 01 01 01 01 7f 7f 00 00 41 41 41 41 41 41 7f 7f 01 01 01 01 01 01 00 00 7f 7f 40 40 40 40 7e 7e 01 01 01 01 7e 7e 00 00 7f 7f 40 40 40 40 7f 7f 41 41 41 41 7f 7f 00 00 7f 7f 01 01 02 02 04 04 08 08 10 10 10 10 00 00 3e 3e 41 41 41 41 3e 3e 41 41 41 41 3e 3e 00 00 7f 7f 41 41 41 41 7f 7f 01 01 01 01 7f 7f
; A161834: a(n) = A161828(n)/3. ; Submitted by Jamie Morken(s4) ; 0,0,1,1,3,3,5,7,11,13 mov $1,$0 div $0,2 sub $1,3 trn $1,3 add $0,$1 mul $0,2 trn $0,1
PUBLIC VDM_CAPS PUBLIC VDM_CAPS_MASK INCLUDE "ioctl.def" defc VDM_CAPS = CAP_GENCON_UNDERLINE defc VDM_CAPS_MASK = @00001000
// Copyright (c) 2011-2014 The Bitcoin developers // Copyright (c) 2014-2015 The Dash developers // Copyright (c) 2015-2017 The PIVX developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/localtrade-config.h" #endif #include "optionsmodel.h" #include "bitcoinunits.h" #include "guiutil.h" #include "amount.h" #include "init.h" #include "main.h" #include "net.h" #include "txdb.h" // for -dbcache defaults #ifdef ENABLE_WALLET #include "masternodeconfig.h" #include "wallet.h" #include "walletdb.h" #endif #include <QNetworkProxy> #include <QSettings> #include <QStringList> OptionsModel::OptionsModel(QObject* parent) : QAbstractListModel(parent) { Init(); } void OptionsModel::addOverriddenOption(const std::string& option) { strOverriddenByCommandLine += QString::fromStdString(option) + "=" + QString::fromStdString(mapArgs[option]) + " "; } // Writes all missing QSettings with their default values void OptionsModel::Init() { resetSettings = false; QSettings settings; // Ensure restart flag is unset on client startup setRestartRequired(false); // These are Qt-only settings: // Window if (!settings.contains("fMinimizeToTray")) settings.setValue("fMinimizeToTray", false); fMinimizeToTray = settings.value("fMinimizeToTray").toBool(); if (!settings.contains("fMinimizeOnClose")) settings.setValue("fMinimizeOnClose", false); fMinimizeOnClose = settings.value("fMinimizeOnClose").toBool(); // Display if (!settings.contains("nDisplayUnit")) settings.setValue("nDisplayUnit", BitcoinUnits::LOT); nDisplayUnit = settings.value("nDisplayUnit").toInt(); if (!settings.contains("strThirdPartyTxUrls")) settings.setValue("strThirdPartyTxUrls", ""); strThirdPartyTxUrls = settings.value("strThirdPartyTxUrls", "").toString(); if (!settings.contains("fCoinControlFeatures")) settings.setValue("fCoinControlFeatures", false); fCoinControlFeatures = settings.value("fCoinControlFeatures", false).toBool(); if (!settings.contains("nPreferredDenom")) settings.setValue("nPreferredDenom", 0); nPreferredDenom = settings.value("nPreferredDenom", "0").toLongLong(); if (!settings.contains("nZeromintPercentage")) settings.setValue("nZeromintPercentage", 10); nZeromintPercentage = settings.value("nZeromintPercentage").toLongLong(); if (!settings.contains("nAnonymizeLocalTradeAmount")) settings.setValue("nAnonymizeLocalTradeAmount", 1000); nAnonymizeLocalTradeAmount = settings.value("nAnonymizeLocalTradeAmount").toLongLong(); if (!settings.contains("fShowMasternodesTab")) settings.setValue("fShowMasternodesTab", masternodeConfig.getCount()); // These are shared with the core or have a command-line parameter // and we want command-line parameters to overwrite the GUI settings. // // If setting doesn't exist create it with defaults. // // If SoftSetArg() or SoftSetBoolArg() return false we were overridden // by command-line and show this in the UI. // Main if (!settings.contains("nDatabaseCache")) settings.setValue("nDatabaseCache", (qint64)nDefaultDbCache); if (!SoftSetArg("-dbcache", settings.value("nDatabaseCache").toString().toStdString())) addOverriddenOption("-dbcache"); if (!settings.contains("nThreadsScriptVerif")) settings.setValue("nThreadsScriptVerif", DEFAULT_SCRIPTCHECK_THREADS); if (!SoftSetArg("-par", settings.value("nThreadsScriptVerif").toString().toStdString())) addOverriddenOption("-par"); // Wallet #ifdef ENABLE_WALLET if (!settings.contains("bSpendZeroConfChange")) settings.setValue("bSpendZeroConfChange", false); if (!SoftSetBoolArg("-spendzeroconfchange", settings.value("bSpendZeroConfChange").toBool())) addOverriddenOption("-spendzeroconfchange"); #endif // Network if (!settings.contains("fUseUPnP")) settings.setValue("fUseUPnP", DEFAULT_UPNP); if (!SoftSetBoolArg("-upnp", settings.value("fUseUPnP").toBool())) addOverriddenOption("-upnp"); if (!settings.contains("fListen")) settings.setValue("fListen", DEFAULT_LISTEN); if (!SoftSetBoolArg("-listen", settings.value("fListen").toBool())) addOverriddenOption("-listen"); if (!settings.contains("fUseProxy")) settings.setValue("fUseProxy", false); if (!settings.contains("addrProxy")) settings.setValue("addrProxy", "127.0.0.1:9050"); // Only try to set -proxy, if user has enabled fUseProxy if (settings.value("fUseProxy").toBool() && !SoftSetArg("-proxy", settings.value("addrProxy").toString().toStdString())) addOverriddenOption("-proxy"); else if (!settings.value("fUseProxy").toBool() && !GetArg("-proxy", "").empty()) addOverriddenOption("-proxy"); // Display if (!settings.contains("digits")) settings.setValue("digits", "2"); if (!settings.contains("theme")) settings.setValue("theme", ""); if (!settings.contains("fCSSexternal")) settings.setValue("fCSSexternal", false); if (!settings.contains("language")) settings.setValue("language", ""); if (!SoftSetArg("-lang", settings.value("language").toString().toStdString())) addOverriddenOption("-lang"); if (settings.contains("nZeromintPercentage")) SoftSetArg("-zeromintpercentage", settings.value("nZeromintPercentage").toString().toStdString()); if (settings.contains("nPreferredDenom")) SoftSetArg("-preferredDenom", settings.value("nPreferredDenom").toString().toStdString()); if (settings.contains("nAnonymizeLocalTradeAmount")) SoftSetArg("-anonymizelocaltradeamount", settings.value("nAnonymizeLocalTradeAmount").toString().toStdString()); language = settings.value("language").toString(); } void OptionsModel::Reset() { QSettings settings; // Remove all entries from our QSettings object settings.clear(); resetSettings = true; // Needed in localtrade.cpp during shotdown to also remove the window positions // default setting for OptionsModel::StartAtStartup - disabled if (GUIUtil::GetStartOnSystemStartup()) GUIUtil::SetStartOnSystemStartup(false); } int OptionsModel::rowCount(const QModelIndex& parent) const { return OptionIDRowCount; } // read QSettings values and return them QVariant OptionsModel::data(const QModelIndex& index, int role) const { if (role == Qt::EditRole) { QSettings settings; switch (index.row()) { case StartAtStartup: return GUIUtil::GetStartOnSystemStartup(); case MinimizeToTray: return fMinimizeToTray; case MapPortUPnP: #ifdef USE_UPNP return settings.value("fUseUPnP"); #else return false; #endif case MinimizeOnClose: return fMinimizeOnClose; // default proxy case ProxyUse: return settings.value("fUseProxy", false); case ProxyIP: { // contains IP at index 0 and port at index 1 QStringList strlIpPort = settings.value("addrProxy").toString().split(":", QString::SkipEmptyParts); return strlIpPort.at(0); } case ProxyPort: { // contains IP at index 0 and port at index 1 QStringList strlIpPort = settings.value("addrProxy").toString().split(":", QString::SkipEmptyParts); return strlIpPort.at(1); } #ifdef ENABLE_WALLET case SpendZeroConfChange: return settings.value("bSpendZeroConfChange"); case ShowMasternodesTab: return settings.value("fShowMasternodesTab"); #endif case DisplayUnit: return nDisplayUnit; case ThirdPartyTxUrls: return strThirdPartyTxUrls; case Digits: return settings.value("digits"); case Theme: return settings.value("theme"); case Language: return settings.value("language"); case CoinControlFeatures: return fCoinControlFeatures; case DatabaseCache: return settings.value("nDatabaseCache"); case ThreadsScriptVerif: return settings.value("nThreadsScriptVerif"); case ZeromintPercentage: return QVariant(nZeromintPercentage); case ZeromintPrefDenom: return QVariant(nPreferredDenom); case AnonymizeLocalTradeAmount: return QVariant(nAnonymizeLocalTradeAmount); case Listen: return settings.value("fListen"); default: return QVariant(); } } return QVariant(); } // write QSettings values bool OptionsModel::setData(const QModelIndex& index, const QVariant& value, int role) { bool successful = true; /* set to false on parse error */ if (role == Qt::EditRole) { QSettings settings; switch (index.row()) { case StartAtStartup: successful = GUIUtil::SetStartOnSystemStartup(value.toBool()); break; case MinimizeToTray: fMinimizeToTray = value.toBool(); settings.setValue("fMinimizeToTray", fMinimizeToTray); break; case MapPortUPnP: // core option - can be changed on-the-fly settings.setValue("fUseUPnP", value.toBool()); MapPort(value.toBool()); break; case MinimizeOnClose: fMinimizeOnClose = value.toBool(); settings.setValue("fMinimizeOnClose", fMinimizeOnClose); break; // default proxy case ProxyUse: if (settings.value("fUseProxy") != value) { settings.setValue("fUseProxy", value.toBool()); setRestartRequired(true); } break; case ProxyIP: { // contains current IP at index 0 and current port at index 1 QStringList strlIpPort = settings.value("addrProxy").toString().split(":", QString::SkipEmptyParts); // if that key doesn't exist or has a changed IP if (!settings.contains("addrProxy") || strlIpPort.at(0) != value.toString()) { // construct new value from new IP and current port QString strNewValue = value.toString() + ":" + strlIpPort.at(1); settings.setValue("addrProxy", strNewValue); setRestartRequired(true); } } break; case ProxyPort: { // contains current IP at index 0 and current port at index 1 QStringList strlIpPort = settings.value("addrProxy").toString().split(":", QString::SkipEmptyParts); // if that key doesn't exist or has a changed port if (!settings.contains("addrProxy") || strlIpPort.at(1) != value.toString()) { // construct new value from current IP and new port QString strNewValue = strlIpPort.at(0) + ":" + value.toString(); settings.setValue("addrProxy", strNewValue); setRestartRequired(true); } } break; #ifdef ENABLE_WALLET case SpendZeroConfChange: if (settings.value("bSpendZeroConfChange") != value) { settings.setValue("bSpendZeroConfChange", value); setRestartRequired(true); } break; case ShowMasternodesTab: if (settings.value("fShowMasternodesTab") != value) { settings.setValue("fShowMasternodesTab", value); setRestartRequired(true); } break; #endif case DisplayUnit: setDisplayUnit(value); break; case ThirdPartyTxUrls: if (strThirdPartyTxUrls != value.toString()) { strThirdPartyTxUrls = value.toString(); settings.setValue("strThirdPartyTxUrls", strThirdPartyTxUrls); setRestartRequired(true); } break; case Digits: if (settings.value("digits") != value) { settings.setValue("digits", value); setRestartRequired(true); } break; case Theme: if (settings.value("theme") != value) { settings.setValue("theme", value); setRestartRequired(true); } break; case Language: if (settings.value("language") != value) { settings.setValue("language", value); setRestartRequired(true); } break; case ZeromintPercentage: nZeromintPercentage = value.toInt(); settings.setValue("nZeromintPercentage", nZeromintPercentage); emit zeromintPercentageChanged(nZeromintPercentage); break; case ZeromintPrefDenom: nPreferredDenom = value.toInt(); settings.setValue("nPreferredDenom", nPreferredDenom); emit preferredDenomChanged(nPreferredDenom); break; case AnonymizeLocalTradeAmount: nAnonymizeLocalTradeAmount = value.toInt(); settings.setValue("nAnonymizeLocalTradeAmount", nAnonymizeLocalTradeAmount); emit anonymizeLocalTradeAmountChanged(nAnonymizeLocalTradeAmount); break; case CoinControlFeatures: fCoinControlFeatures = value.toBool(); settings.setValue("fCoinControlFeatures", fCoinControlFeatures); emit coinControlFeaturesChanged(fCoinControlFeatures); break; case DatabaseCache: if (settings.value("nDatabaseCache") != value) { settings.setValue("nDatabaseCache", value); setRestartRequired(true); } break; case ThreadsScriptVerif: if (settings.value("nThreadsScriptVerif") != value) { settings.setValue("nThreadsScriptVerif", value); setRestartRequired(true); } break; case Listen: if (settings.value("fListen") != value) { settings.setValue("fListen", value); setRestartRequired(true); } break; default: break; } } emit dataChanged(index, index); return successful; } /** Updates current unit in memory, settings and emits displayUnitChanged(newUnit) signal */ void OptionsModel::setDisplayUnit(const QVariant& value) { if (!value.isNull()) { QSettings settings; nDisplayUnit = value.toInt(); settings.setValue("nDisplayUnit", nDisplayUnit); emit displayUnitChanged(nDisplayUnit); } } bool OptionsModel::getProxySettings(QNetworkProxy& proxy) const { // Directly query current base proxy, because // GUI settings can be overridden with -proxy. proxyType curProxy; if (GetProxy(NET_IPV4, curProxy)) { proxy.setType(QNetworkProxy::Socks5Proxy); proxy.setHostName(QString::fromStdString(curProxy.proxy.ToStringIP())); proxy.setPort(curProxy.proxy.GetPort()); return true; } else proxy.setType(QNetworkProxy::NoProxy); return false; } void OptionsModel::setRestartRequired(bool fRequired) { QSettings settings; return settings.setValue("fRestartRequired", fRequired); } bool OptionsModel::isRestartRequired() { QSettings settings; return settings.value("fRestartRequired", false).toBool(); }
; A108732: a(0)=22; if n odd, a(n) = a(n-1)/2, otherwise a(n) = 4*a(n-1). ; 22,11,44,22,88,44,176,88,352,176,704,352,1408,704,2816,1408,5632,2816,11264,5632,22528,11264,45056,22528,90112,45056,180224,90112,360448,180224,720896,360448,1441792,720896 mov $1,$0 gcd $1,2 lpb $0,1 sub $0,2 mul $1,2 lpe mul $1,11
; A123903: Total number of "Emperors" in all tournaments on n labeled nodes. ; 0,1,2,6,32,320,6144,229376,16777216,2415919104,687194767360,387028092977152,432345564227567616,959230691832896684032,4231240368651202111471616,37138201178561408246973726720,649037107316853453566312041152512 mov $1,2 mov $2,$0 sub $2,1 bin $2,2 pow $1,$2 mul $0,$1
Name: zel_enmy3.asm Type: file Size: 334969 Last-Modified: '2016-05-13T04:36:32Z' SHA-1: 03E16D840FA79B53D57770236DA6E734C06577EE Description: null
; =============================================================== ; Dec 2013 ; =============================================================== ; ; char *strchrnul(const char *s, int c) ; ; Return ptr to first occurrence of c in string s or ptr to ; terminating 0 in s if c is not found. ; ; =============================================================== SECTION code_clib SECTION code_string PUBLIC asm_strchrnul asm_strchrnul: ; enter : c = char c ; hl = char *s ; ; exit : c = char c ; ; found ; ; carry reset ; hl = ptr to c ; ; not found ; ; carry set ; hl = ptr to terminating 0 ; ; uses : af, hl loop: ld a,(hl) cp c ret z inc hl or a jr nz, loop dec hl scf ret
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2017 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <script/uniteconsensus.h> #include <primitives/transaction.h> #include <pubkey.h> #include <script/interpreter.h> #include <version.h> namespace { /** A class that deserializes a single CTransaction one time. */ class TxInputStream { public: TxInputStream(int nTypeIn, int nVersionIn, const unsigned char *txTo, size_t txToLen) : m_type(nTypeIn), m_version(nVersionIn), m_data(txTo), m_remaining(txToLen) {} void read(char* pch, size_t nSize) { if (nSize > m_remaining) throw std::ios_base::failure(std::string(__func__) + ": end of data"); if (pch == nullptr) throw std::ios_base::failure(std::string(__func__) + ": bad destination buffer"); if (m_data == nullptr) throw std::ios_base::failure(std::string(__func__) + ": bad source buffer"); memcpy(pch, m_data, nSize); m_remaining -= nSize; m_data += nSize; } template<typename T> TxInputStream& operator>>(T& obj) { ::Unserialize(*this, obj); return *this; } int GetVersion() const { return m_version; } int GetType() const { return m_type; } private: const int m_type; const int m_version; const unsigned char* m_data; size_t m_remaining; }; inline int set_error(uniteconsensus_error* ret, uniteconsensus_error serror) { if (ret) *ret = serror; return 0; } struct ECCryptoClosure { ECCVerifyHandle handle; }; ECCryptoClosure instance_of_eccryptoclosure; } // namespace /** Check that all specified flags are part of the libconsensus interface. */ static bool verify_flags(unsigned int flags) { return (flags & ~(uniteconsensus_SCRIPT_FLAGS_VERIFY_ALL)) == 0; } static int verify_script(const unsigned char *scriptPubKey, unsigned int scriptPubKeyLen, CAmount amount, const unsigned char *txTo , unsigned int txToLen, unsigned int nIn, unsigned int flags, uniteconsensus_error* err) { if (!verify_flags(flags)) { return set_error(err, uniteconsensus_ERR_INVALID_FLAGS); } try { TxInputStream stream(SER_NETWORK, PROTOCOL_VERSION, txTo, txToLen); CTransaction tx(deserialize, stream); if (nIn >= tx.vin.size()) return set_error(err, uniteconsensus_ERR_TX_INDEX); if (GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) != txToLen) return set_error(err, uniteconsensus_ERR_TX_SIZE_MISMATCH); // Regardless of the verification result, the tx did not error. set_error(err, uniteconsensus_ERR_OK); PrecomputedTransactionData txdata(tx); return VerifyScript(tx.vin[nIn].scriptSig, CScript(scriptPubKey, scriptPubKey + scriptPubKeyLen), &tx.vin[nIn].scriptWitness, flags, TransactionSignatureChecker(&tx, nIn, amount, txdata), nullptr); } catch (const std::exception&) { return set_error(err, uniteconsensus_ERR_TX_DESERIALIZE); // Error deserializing } } int uniteconsensus_verify_script_with_amount(const unsigned char *scriptPubKey, unsigned int scriptPubKeyLen, int64_t amount, const unsigned char *txTo , unsigned int txToLen, unsigned int nIn, unsigned int flags, uniteconsensus_error* err) { CAmount am(amount); return ::verify_script(scriptPubKey, scriptPubKeyLen, am, txTo, txToLen, nIn, flags, err); } int uniteconsensus_verify_script(const unsigned char *scriptPubKey, unsigned int scriptPubKeyLen, const unsigned char *txTo , unsigned int txToLen, unsigned int nIn, unsigned int flags, uniteconsensus_error* err) { if (flags & uniteconsensus_SCRIPT_FLAGS_VERIFY_WITNESS) { return set_error(err, uniteconsensus_ERR_AMOUNT_REQUIRED); } CAmount am(0); return ::verify_script(scriptPubKey, scriptPubKeyLen, am, txTo, txToLen, nIn, flags, err); } unsigned int uniteconsensus_version() { // Just use the API version for now return UNITECONSENSUS_API_VER; }
[global gdt_flush] [extern __gdt_ptr] gdt_flush: lgdt [__gdt_ptr] mov ax, 0x10 mov ds, ax mov es, ax mov ss, ax ;mov fs, ax ;mov gs, ax mov ax, 0x30 ;or ax, 3 mov fs, ax ;gs mov ax, 0x38 ;or ax, 3 mov gs, ax jmp 0x08:__gdt_flush_exit __gdt_flush_exit: ret [global tss_flush] tss_flush: mov ax, 0x2B ; TSS is sixth entry in GDT ;mov ax, 0x28 ; TSS is sixth entry in GDT ltr ax ret
/* * Copyright 2021 MusicScience37 (Kenta Kabashima) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /*! * \file * \brief Test of iteration_logger class. */ #include "num_collect/logging/iteration_logger.h" #include <iterator> #include <catch2/catch_test_macros.hpp> #include <fmt/format.h> #include "mock_log_sink.h" #include "num_collect/base/index_type.h" #include "num_collect/logging/log_config.h" #include "num_collect/logging/log_tag_config.h" #include "num_collect/logging/log_tag_view.h" #include "num_collect/logging/logger.h" TEST_CASE("num_collect::logging::iteration_logger_item") { using num_collect::logging::iteration_logger_item; SECTION("format integers") { using value_type = num_collect::index_type; value_type val = 0; const auto get_val = [&val] { return val; }; iteration_logger_item<value_type, decltype(get_val)> item{ "abc", get_val}; constexpr num_collect::index_type width = 7; item.width(width); CHECK(item.width() == width); val = 12345; // NOLINT fmt::memory_buffer buffer; item.format_value_to(buffer); CHECK(std::string(buffer.data(), buffer.size()) == " 12345"); buffer.clear(); item.format_label_to(buffer); CHECK(std::string(buffer.data(), buffer.size()) == " abc"); buffer.clear(); item.format_summary_to(buffer); CHECK(std::string(buffer.data(), buffer.size()) == "abc=12345"); } SECTION("format string") { using value_type = std::string; value_type val; const auto get_val = [&val] { return val; }; iteration_logger_item<value_type, decltype(get_val)> item{ "abc", get_val}; constexpr num_collect::index_type width = 7; item.width(width); CHECK(item.width() == width); val = "def"; fmt::memory_buffer buffer; item.format_value_to(buffer); CHECK(std::string(buffer.data(), buffer.size()) == " def"); buffer.clear(); item.format_label_to(buffer); CHECK(std::string(buffer.data(), buffer.size()) == " abc"); buffer.clear(); item.format_summary_to(buffer); CHECK(std::string(buffer.data(), buffer.size()) == "abc=def"); } SECTION("format floating-point value") { using value_type = double; value_type val = 0.0; const auto get_val = [&val] { return val; }; iteration_logger_item<value_type, decltype(get_val)> item{ "abc", get_val}; constexpr num_collect::index_type width = 7; constexpr num_collect::index_type precision = 3; item.width(width); item.precision(precision); CHECK(item.width() == width); CHECK(item.precision() == precision); val = 3.14; // NOLINT fmt::memory_buffer buffer; item.format_value_to(buffer); CHECK(std::string(buffer.data(), buffer.size()) == " 3.14"); buffer.clear(); item.format_label_to(buffer); CHECK(std::string(buffer.data(), buffer.size()) == " abc"); buffer.clear(); item.format_summary_to(buffer); CHECK(std::string(buffer.data(), buffer.size()) == "abc=3.14"); } } TEST_CASE("num_collect::logging::iteration_logger") { using trompeloeil::_; constexpr num_collect::index_type iteration_output_period = 2; constexpr num_collect::index_type iteration_label_period = 3; constexpr auto tag = num_collect::logging::log_tag_view( "num_collect::logging::iteration_logger_test"); const auto sink = std::make_shared<num_collect_test::logging::mock_log_sink>(); const auto config = num_collect::logging::log_tag_config() .write_traces(true) .write_iterations(true) .write_summary(true) .iteration_output_period(iteration_output_period) .iteration_label_period(iteration_label_period) .sink(sink); const auto logger = num_collect::logging::logger(tag, config); auto iteration_logger = num_collect::logging::iteration_logger(); SECTION("set items") { constexpr num_collect::index_type width = 7; int val1 = 0; const auto get_val1 = [&val1] { return val1; }; iteration_logger.append<int>("val1", get_val1)->width(width); double val2 = 0.0; iteration_logger.append("val2", val2)->width(width)->precision(3); std::string val3; iteration_logger.append("val3", val3)->width(width); std::vector<std::string> logs; REQUIRE_CALL(*sink, write(_, _, _, _, _)) .TIMES(2) // NOLINTNEXTLINE .LR_SIDE_EFFECT(logs.emplace_back(_5)); val1 = 12345; // NOLINT val2 = 3.14; // NOLINT val3 = "abc"; iteration_logger.write_iteration_to(logger); REQUIRE(logs.size() == 2); CHECK(logs.at(0) == " val1 val2 val3"); CHECK(logs.at(1) == " 12345 3.14 abc"); } SECTION("take period configurations into account") { constexpr num_collect::index_type width = 7; int val1 = 0; iteration_logger.append("val1", val1)->width(width); double val2 = 0.0; iteration_logger.append("val2", val2)->width(width)->precision(3); std::string val3; iteration_logger.append("val3", val3)->width(width); std::vector<std::string> logs; ALLOW_CALL(*sink, write(_, _, _, _, _)) // NOLINTNEXTLINE .LR_SIDE_EFFECT(logs.emplace_back(_5)); val2 = 3.14; // NOLINT val3 = "abc"; constexpr int repetition = 10; for (int i = 0; i < repetition; ++i) { val1 = i; iteration_logger.write_iteration_to(logger); } CHECK(logs.size() == 7); // NOLINT CHECK(logs.at(0) == " val1 val2 val3"); // 0th time. CHECK(logs.at(1) == " 0 3.14 abc"); // 0th time. CHECK(logs.at(2) == " 2 3.14 abc"); // 2nd time. CHECK(logs.at(3) == " 4 3.14 abc"); // 4th time. CHECK(logs.at(4) == " val1 val2 val3"); // 6th time. CHECK(logs.at(5) == " 6 3.14 abc"); // 6th time. CHECK(logs.at(6) == " 8 3.14 abc"); // 8th time. } SECTION("reset iteration count") { constexpr num_collect::index_type width = 7; int val1 = 0; iteration_logger.append("val1", val1)->width(width); double val2 = 0.0; iteration_logger.append("val2", val2)->width(width)->precision(3); std::string val3; iteration_logger.append("val3", val3)->width(width); std::vector<std::string> logs; ALLOW_CALL(*sink, write(_, _, _, _, _)) // NOLINTNEXTLINE .LR_SIDE_EFFECT(logs.emplace_back(_5)); val2 = 3.14; // NOLINT val3 = "abc"; constexpr int repetition = 3; for (int i = 0; i < repetition; ++i) { iteration_logger.reset_count(); val1 = i; iteration_logger.write_iteration_to(logger); } CHECK(logs.size() == 6); // NOLINT CHECK(logs.at(0) == " val1 val2 val3"); // 0th time. CHECK(logs.at(1) == " 0 3.14 abc"); // 0th time. CHECK(logs.at(2) == " val1 val2 val3"); // 1st time. CHECK(logs.at(3) == " 1 3.14 abc"); // 1st time. CHECK(logs.at(4) == " val1 val2 val3"); // 2nd time. CHECK(logs.at(5) == " 2 3.14 abc"); // 2nd time. } SECTION("write summary") { constexpr num_collect::index_type width = 7; int val1 = 0; iteration_logger.append("val1", val1)->width(width); double val2 = 0.0; iteration_logger.append("val2", val2)->width(width)->precision(3); std::string val3; iteration_logger.append("val3", val3)->width(width); std::vector<std::string> logs; ALLOW_CALL(*sink, write(_, _, _, _, _)) // NOLINTNEXTLINE .LR_SIDE_EFFECT(logs.emplace_back(_5)); val1 = 12345; // NOLINT val2 = 3.14; // NOLINT val3 = "abc"; iteration_logger.write_summary_to(logger); CHECK(logs.size() == 1); // NOLINT CHECK(logs.at(0) == "Last state: val1=12345, val2=3.14, val3=abc, "); } }
/*============================================================================= PHAS0100ASSIGNMENT1: PHAS0100 Assignment 1 Game of Life Simulation. Copyright (c) University College London (UCL). All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt in the top level directory for details. =============================================================================*/ #include <nbsimParticle.h> #include <nbsimMassiveParticle.h> #include <nbsimRandomParticle.h> #include <iostream> #include <Eigen/Dense> #include <chrono> // Example, header-only library, included in project for simplicity's sake. /** * \brief Demo file to check that includes and library linkage is correct. */ static void show_usage(std::string name) { std::cerr << "Usage: " << name << "Options:\n" << "\tOption: (Random Particle Simulator):\tUsers should input .1) step-size(unit: year) .2) length of time(unit: year)\n" << "Options:\n" << "\t-h,--help\t\t\tShow this help message.\n" << std::endl; } int main(int argc, char** argv) { // -h and --help: if (argc == 2){ if((argv[1] == "-h") or (argv[1] == "--help")){ show_usage(argv[0]); return 0; } else{ show_usage(argv[0]); return 0; } } // Option: Solar System: if (argc == 3){ double step_size = std::stod(argv[1]); double totalTime = std::stod(argv[2]); int NParticles = 2000; std::vector<std::shared_ptr<nbsim::MassiveParticle>> ListParticles; nbsim::RandomParticle * p = new nbsim::RandomParticle; ListParticles = p->generateParticle(); // Add the attractor: for(int i=0;i<NParticles;i++){ for(int j=0;j<NParticles;j++){ if(i != j){ ListParticles[i]->addAttractor(ListParticles[j]); } } } // Benchmark the time of the solar system: Begin std::clock_t c_start = std::clock(); auto t_start = std::chrono::high_resolution_clock::now(); // Parallel: omp_set_num_threads (16); #pragma omp parallel { // Outer time: for (double t = 0;t<totalTime;t+=step_size){ // Loop 1: Acceleration: #pragma omp for for (int i=0;i<NParticles;i++){ ListParticles[i]->calculateAcceleration(); } // Loop 2: intergateTimestep: #pragma omp for for (int i=0;i<NParticles;i++){ ListParticles[i]->integrateTimestep(step_size); } } } // Benchmark the time of the solar system: End std::clock_t c_end = std::clock(); auto t_end = std::chrono::high_resolution_clock::now(); // Benchmark and output the time: std::cout<<"\nThe run time is: " <<1000.0*(c_end - c_start)/CLOCKS_PER_SEC<<" ms\n" <<"Wall clock time passed: " <<std::chrono::duration<double, std::milli>(t_end-t_start).count()<<" ms\n"; return 1; } // Default Option: else{ show_usage(argv[0]); return 0; } }
.global s_prepare_buffers s_prepare_buffers: push %r13 push %r9 push %rbp push %rcx push %rdi push %rsi lea addresses_WC_ht+0x14f6c, %r9 nop nop cmp %r13, %r13 mov $0x6162636465666768, %rdi movq %rdi, %xmm6 movups %xmm6, (%r9) nop nop nop add $17875, %rbp lea addresses_UC_ht+0x7fc6, %rsi lea addresses_normal_ht+0x1a876, %rdi nop nop nop nop nop xor $46476, %r9 mov $97, %rcx rep movsl sub $6608, %r13 pop %rsi pop %rdi pop %rcx pop %rbp pop %r9 pop %r13 ret .global s_faulty_load s_faulty_load: push %r10 push %r8 push %rax push %rbp push %rcx push %rdi push %rsi // REPMOV lea addresses_A+0x35b6, %rsi mov $0x321dc50000000656, %rdi nop and $15548, %r8 mov $4, %rcx rep movsq nop nop nop nop nop xor $41105, %rcx // Store lea addresses_D+0x181d6, %rbp nop cmp %rsi, %rsi movl $0x51525354, (%rbp) nop nop cmp %rsi, %rsi // Faulty Load mov $0x321dc50000000656, %rcx nop nop nop nop nop sub %rbp, %rbp movups (%rcx), %xmm4 vpextrq $1, %xmm4, %r8 lea oracles, %r10 and $0xff, %r8 shlq $12, %r8 mov (%r10,%r8,1), %r8 pop %rsi pop %rdi pop %rcx pop %rbp pop %rax pop %r8 pop %r10 ret /* <gen_faulty_load> [REF] {'src': {'NT': False, 'same': False, 'congruent': 0, 'type': 'addresses_NC', 'AVXalign': False, 'size': 8}, 'OP': 'LOAD'} {'src': {'same': False, 'congruent': 4, 'type': 'addresses_A'}, 'OP': 'REPM', 'dst': {'same': True, 'congruent': 0, 'type': 'addresses_NC'}} {'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 6, 'type': 'addresses_D', 'AVXalign': False, 'size': 4}} [Faulty Load] {'src': {'NT': False, 'same': True, 'congruent': 0, 'type': 'addresses_NC', 'AVXalign': False, 'size': 16}, 'OP': 'LOAD'} <gen_prepare_buffer> {'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 0, 'type': 'addresses_WC_ht', 'AVXalign': False, 'size': 16}} {'src': {'same': False, 'congruent': 1, 'type': 'addresses_UC_ht'}, 'OP': 'REPM', 'dst': {'same': False, 'congruent': 5, 'type': 'addresses_normal_ht'}} {'ff': 1, '00': 9} 00 00 ff 00 00 00 00 00 00 00 */
; A210622: Decimal expansion of 377/120. ; 3,1,4,1,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6 mov $7,$0 mov $9,2 lpb $9,1 mov $0,$7 sub $9,1 add $0,$9 sub $0,1 mov $2,2 mov $3,$0 mov $5,$0 lpb $0,1 add $4,$2 add $2,1 sub $5,$4 mov $0,$5 trn $2,$3 gcd $2,$5 mul $2,5 lpe trn $4,$3 mov $6,$9 mov $8,$2 lpb $6,1 mov $1,$8 sub $6,1 lpe lpe lpb $7,1 sub $1,$8 mov $7,0 lpe add $1,1
GLOBAL _cli GLOBAL _sti GLOBAL picMasterMask GLOBAL picSlaveMask GLOBAL haltcpu GLOBAL _hlt GLOBAL _irq00Handler EXTERN scheduler GLOBAL _irq01Handler GLOBAL _irq02Handler GLOBAL _irq03Handler GLOBAL _irq04Handler GLOBAL _irq05Handler EXTERN irqDispatcher GLOBAL _syscallHandler EXTERN sysCallDispatcher GLOBAL _exception0Handler GLOBAL _exception6Handler EXTERN exceptionDispatcher SECTION .text %macro pushState 0 push rax push rbx push rcx push rdx push rbp push rdi push rsi push r8 push r9 push r10 push r11 push r12 push r13 push r14 push r15 %endmacro %macro popState 0 pop r15 pop r14 pop r13 pop r12 pop r11 pop r10 pop r9 pop r8 pop rsi pop rdi pop rbp pop rdx pop rcx pop rbx pop rax %endmacro %macro pushaq 0 push rax push rbx push rcx push rdx push rbp push rdi push rsi push r8 push r9 push r10 push r12 push r11 push r13 push r14 push r15 push fs push gs %endmacro %macro popaq 0 pop gs pop fs pop r15 pop r14 pop r13 pop r12 pop r11 pop r10 pop r9 pop r8 pop rsi pop rdi pop rbp pop rdx pop rcx pop rbx pop rax %endmacro %macro irqHandlerMaster 1 pushState mov rdi, %1 ; pasaje de parametro mov rsi, rsp ; pasaje de parametro call irqDispatcher ; signal pic EOI (End of Interrupt) mov al, 20h out 20h, al popState iretq %endmacro %macro exceptionHandler 1 pushState mov rdi, %1 ; pasaje de parametro mov rsi, rsp call exceptionDispatcher popState iretq %endmacro _syscallHandler: push rdi mov [sysRegisters+8*0],rax mov [sysRegisters+8*1],rdi mov [sysRegisters+8*2],rsi mov [sysRegisters+8*3],rdx mov [sysRegisters+8*4],r10 mov [sysRegisters+8*5],r8 mov [sysRegisters+8*6],r9 mov rdi,sysRegisters call sysCallDispatcher pop rdi iretq _hlt: sti hlt ret _cli: cli ret _sti: sti ret picMasterMask: push rbp mov rbp, rsp mov ax, di out 21h,al pop rbp retn picSlaveMask: push rbp mov rbp, rsp mov ax, di ; ax = mascara de 16 bits out 0A1h,al pop rbp retn ;8254 Timer (Timer Tick) _irq00Handler: pushaq mov rdi, 0 ; pasaje de parametro mov rsi, rsp ; pasaje de parametro call irqDispatcher mov rdi,rsp call scheduler mov rsp,rax ; signal pic EOI (End of Interrupt) mov al, 20h out 20h, al popaq iretq ;Keyboard _irq01Handler: irqHandlerMaster 1 ;Cascade pic never called _irq02Handler: irqHandlerMaster 2 ;Serial Port 2 and 4 _irq03Handler: irqHandlerMaster 3 ;Serial Port 1 and 3 _irq04Handler: irqHandlerMaster 4 ;USB _irq05Handler: irqHandlerMaster 5 ;Zero Division Exception _exception0Handler: exceptionHandler 0 ;Invalid Opcode Exception _exception6Handler: exceptionHandler 6 haltcpu: cli hlt ret SECTION .bss aux resq 1 sysRegisters: rsrax resq 1 rsrdi resq 1 rsrsi resq 1 rsrdx resq 1 rsr10 resq 1 rsr8 resq 1 rsr9 resq 1
; A123958: Expansion of x^3 / ( 1+2*x^2+2*x^3 ). ; Submitted by Christian Krause ; 0,0,1,0,-2,-2,4,8,-4,-24,-8,56,64,-96,-240,64,672,352,-1472,-2048,2240,7040,-384,-18560,-13312,37888,63744,-49152,-203264,-29184,504832,464896,-951296,-1939456,972800,5781504,1933312,-13508608,-15429632,23150592,57876480,-15441920,-162054144,-84869120,354992128,493846528,-540246016,-1697677312,92798976,4475846656,3209756672,-9137291264,-15371206656,11855069184,49016995840,7032274944,-121744130048,-112098541568,229423710208,467685343232,-234650337280,-1394218106880,-466070011904,3257736888320 mov $2,1 lpb $0 sub $0,1 mul $3,2 sub $3,$1 add $1,$3 dif $2,-1 add $1,$2 sub $2,$1 add $3,$2 lpe mov $0,$3
#include <iostream> #include <vector> using namespace std; vector<int> lookup_table[1000001]; int main() { int n, m, k, v; while (cin >> n >> m) { for (int i = 0; i < n; i++) { int elem; cin >> elem; lookup_table[elem].push_back(i+1); } for (int i = 0; i < m; i++) { cin >> k >> v; if (k >= lookup_table[v].size()) cout << 0 << endl; else cout << lookup_table[v][k-1] << endl; } } return 0; }
# ORG 8000 MVI H,34 // load by question MVI D,12 // load by question MVI A,99 // for 10s complement SUB D // sub for complement INR A // 10s complement obtained ADD H // adds 10s complement of D and H DAA // convert Hex to BCD MOV H,A // moved final answer to H ,as question HLT
db "CAT FERRET@" ; species name db "Its fur will stand" next "on end if it" next "smells a SEVIPER" page "nearby. It uses" next "its sharp claws to" next "tear up its foes.@"
; A044790: Numbers n such that string 7,7 occurs in the base 10 representation of n but not of n+1. ; 77,177,277,377,477,577,677,779,877,977,1077,1177,1277,1377,1477,1577,1677,1779,1877,1977,2077,2177,2277,2377,2477,2577,2677,2779,2877,2977,3077,3177,3277,3377,3477,3577,3677,3779,3877 mov $1,2 mov $4,$0 add $0,3 mod $0,10 lpb $0 trn $0,9 mul $1,$3 lpe add $1,77 mov $2,$4 mul $2,100 add $1,$2
; A157716: One-eighth of triangular numbers (integers only). ; 0,15,17,62,66,141,147,252,260,395,405,570,582,777,791,1016,1032,1287,1305,1590,1610,1925,1947,2292,2316,2691,2717,3122,3150,3585,3615,4080,4112,4607,4641,5166,5202,5757,5795,6380,6420,7035,7077,7722,7766,8441,8487,9192,9240,9975,10025,10790,10842,11637,11691,12516,12572,13427,13485,14370,14430,15345,15407,16352,16416,17391,17457,18462,18530,19565,19635,20700,20772,21867,21941,23066,23142,24297,24375,25560,25640,26855,26937,28182,28266,29541,29627,30932,31020,32355,32445,33810,33902,35297,35391,36816,36912,38367,38465,39950 mov $3,$0 lpb $0 trn $0,2 add $3,6 add $2,$3 add $1,$2 add $1,$2 add $1,1 mov $3,6 lpe mov $0,$1
; ; Constants ; .EQU INITV_NUM_BYTE = 8 .EQU PTEXT_NUM_BYTE = (8*16) .EQU KEY0_NUM_BYTE = 8 .EQU KEY1_NUM_BYTE = 8 .EQU KEYR_NUM_BYTE = (8*12) #define KEYSCHEDULE #define ENCRYPT #define iKEYSCHEDULE #define DECRYPT ; Original State: ;303, 302, 301, 300 : 203, 202, 201, 200 : 103, 102, 101, 100 : 003, 002, 001, 000 ;313, 312, 311, 310 : 213, 212, 211, 210 : 113, 112, 111, 110 : 013, 012, 011, 010 ;323, 322, 321, 320 : 223, 222, 221, 220 : 123, 122, 121, 120 : 023, 022, 021, 020 ;333, 332, 331, 330 : 233, 232, 231, 230 : 133, 132, 131, 130 : 033, 032, 031, 030 ;the 0-bit in the nibbles ;s200: 320 220 120 020 : 300 200 100 000 ;s310: 330 230 130 030 : 310 210 110 010 ;the 1-bit in the nibbles ;s201: 321 221 121 021 : 301 201 101 001 ;s311: 331 231 131 031 : 311 211 111 011 ;the 2-bit in the nibbles ;s202: 322 222 122 022 : 302 202 102 002 ;s312: 332 232 132 032 : 312 212 112 012 ;the 3-bit in the nibbles ;s203: 323 223 123 023 : 303 203 103 003 ;s313: 333 233 133 033 : 313 213 113 013 .def s200 =r0 .def s310 =r1 .def s201 =r2 .def s311 =r3 .def s202 =r4 .def s312 =r5 .def s203 =r6 .def s313 =r7 .def t0 =r8 .def t1 =r9 .def t2 =r10 .def t3 =r11 .def t4 =r12 .def t5 =r13 .def t6 =r14 .def t7 =r15 .def m66 =r16 ; ldi m66, 0b01100110 .def m99 =r17 ; ldi m99, 0b10011001 .def mf0 =r18 ; ldi mf0, 0b11110000 .def m0f =r19 ; ldi m0f, 0b00001111 .def p0 =r20 .def p1 =r21 .def k0 =r8 .def k1 =r9 .def k2 =r10 .def k3 =r11 .def k4 =r12 .def k5 =r13 .def k6 =r14 .def k7 =r15 .def kt0 =r20 .def kt1 =r21 .def bn =r22 .def bcnt =r23 .def rrn =r24 .def rcnt =r25 .def XL =r26 .def XH =r27 .def YL =r28 .def YH =r29 .def ZL =r30 .def ZH =r31 ;;;**************************************************************************** ;;; ;;; load_init ;;; .MACRO loadInitv ld t0, X+ ld t1, X+ ld t2, X+ ld t3, X+ ld t4, X+ ld t5, X+ ld t6, X+ ld t7, X+ .ENDMACRO ;;;**************************************************************************** ;;; .MACRO loadPlain ld kt0, X+ eor t0, kt0 ld kt0, X+ eor t1, kt0 ld kt0, X+ eor t2, kt0 ld kt0, X+ eor t3, kt0 ld kt0, X+ eor t4, kt0 ld kt0, X+ eor t5, kt0 ld kt0, X+ eor t6, kt0 ld kt0, X+ eor t7, kt0 .ENDMACRO .MACRO storeCipher st Y+, t0 st Y+, t1 st Y+, t2 st Y+, t3 st Y+, t4 st Y+, t5 st Y+, t6 st Y+, t7 .ENDMACRO .MACRO loadCipher ld t0, Y+ ld t1, Y+ ld t2, Y+ ld t3, Y+ ld t4, Y+ ld t5, Y+ ld t6, Y+ ld t7, Y+ .ENDMACRO .MACRO storePlain ld kt0, X eor t0, kt0 st X+, t0 ld kt0, X eor t1, kt0 st X+, t1 ld kt0, X eor t2, kt0 st X+, t2 ld kt0, X eor t3, kt0 st X+, t3 ld kt0, X eor t4, kt0 st X+, t4 ld kt0, X eor t5, kt0 st X+, t5 ld kt0, X eor t6, kt0 st X+, t6 ld kt0, X eor t7, kt0 st X+, t7 .ENDMACRO .MACRO Reorder1Byte ror @0 ror @1 ror @0 ror @2 ror @0 ror @3 ror @0 ror @4 .ENDMACRO .MACRO Reorder1ByteOutput ror @1 ror @0 ror @2 ror @0 ror @3 ror @0 ror @4 ror @0 .ENDMACRO #if defined(ENCRYPT) || defined(DECRYPT) ReorderInput: Reorder1Byte t1, s310, s311, s312, s313 Reorder1Byte t1, s200, s201, s202, s203 Reorder1Byte t3, s310, s311, s312, s313 Reorder1Byte t3, s200, s201, s202, s203 Reorder1Byte t5, s310, s311, s312, s313 Reorder1Byte t5, s200, s201, s202, s203 Reorder1Byte t7, s310, s311, s312, s313 Reorder1Byte t7, s200, s201, s202, s203 Reorder1Byte t0, s310, s311, s312, s313 Reorder1Byte t0, s200, s201, s202, s203 Reorder1Byte t2, s310, s311, s312, s313 Reorder1Byte t2, s200, s201, s202, s203 Reorder1Byte t4, s310, s311, s312, s313 Reorder1Byte t4, s200, s201, s202, s203 Reorder1Byte t6, s310, s311, s312, s313 Reorder1Byte t6, s200, s201, s202, s203 ret #endif .MACRO ReorderOutput Reorder1ByteOutput t1, s310, s311, s312, s313 Reorder1ByteOutput t1, s200, s201, s202, s203 Reorder1ByteOutput t3, s310, s311, s312, s313 Reorder1ByteOutput t3, s200, s201, s202, s203 Reorder1ByteOutput t5, s310, s311, s312, s313 Reorder1ByteOutput t5, s200, s201, s202, s203 Reorder1ByteOutput t7, s310, s311, s312, s313 Reorder1ByteOutput t7, s200, s201, s202, s203 Reorder1ByteOutput t0, s310, s311, s312, s313 Reorder1ByteOutput t0, s200, s201, s202, s203 Reorder1ByteOutput t2, s310, s311, s312, s313 Reorder1ByteOutput t2, s200, s201, s202, s203 Reorder1ByteOutput t4, s310, s311, s312, s313 Reorder1ByteOutput t4, s200, s201, s202, s203 Reorder1ByteOutput t6, s310, s311, s312, s313 Reorder1ByteOutput t6, s200, s201, s202, s203 .ENDMACRO #if defined(KEYSCHEDULE) || defined(iKEYSCHEDULE) loadYTok: ld k0,Y+ ld k1,Y+ ld k2,Y+ ld k3,Y+ ld k4,Y+ ld k5,Y+ ld k6,Y+ ld k7,Y+ ret storesToX: st X+, s200 st X+, s310 st X+, s201 st X+, s311 st X+, s202 st X+, s312 st X+, s203 st X+, s313 ret #endif .MACRO istoresToX sbiw X, 8 rcall storesToX .ENDMACRO .MACRO key_pre ldi YH, high(SRAM_KTEXT1) ldi YL, low(SRAM_KTEXT1) rcall loadYTok rcall ReorderInput rcall storesToX .ENDMACRO .MACRO key_rc lpm kt0, Z+ ; 1 ins, 3 clocks eor @0, kt0 st X+, @0 .ENDMACRO #if defined(KEYSCHEDULE) || defined(iKEYSCHEDULE) key_rc_oneRound: ldi YH, high(SRAM_KTEXTR) ldi YL, low(SRAM_KTEXTR) rcall loadYTok key_rc k0 key_rc k1 key_rc k2 key_rc k3 key_rc k4 key_rc k5 key_rc k6 key_rc k7 ret #endif .MACRO key_01 ld kt0, Y+ eor @0, kt0 .ENDMACRO .MACRO key_rc_01 ld kt0, Y+ eor @0, kt0 lpm kt0, Z+ eor @0, kt0 .ENDMACRO .MACRO key_rc_01_Post ldi YH, high(SRAM_KTEXT0) ldi YL, low(SRAM_KTEXT0) rcall loadYTok bst k0, 0 ror k7 ror k6 ror k5 ror k4 ror k3 ror k2 ror k1 ror k0 bld k7, 7 eor kt0, kt0 bst k7, 6 bld kt0, 0 eor k0, kt0 rcall ReorderInput ldi YH, high(SRAM_KTEXTR) ldi YL, low(SRAM_KTEXTR) key_rc_01 s200 key_rc_01 s310 key_rc_01 s201 key_rc_01 s311 key_rc_01 s202 key_rc_01 s312 key_rc_01 s203 key_rc_01 s313 rcall storesToX ldi YH, high(SRAM_KTEXT0) ldi YL, low(SRAM_KTEXT0) rcall loadYTok rcall ReorderInput ldi YH, high(SRAM_KTEXTR) ldi YL, low(SRAM_KTEXTR) key_01 s200 key_01 s310 key_01 s201 key_01 s311 key_01 s202 key_01 s312 key_01 s203 key_01 s313 ldi XH, high(SRAM_KTEXTR) ldi XL, low(SRAM_KTEXTR) rcall storesToX .ENDMACRO #ifdef KEYSCHEDULE keySchedule: ldi XH, high(SRAM_KTEXTR) ldi XL, low(SRAM_KTEXTR) key_pre ldi ZH, high(RC<<1) ldi ZL, low(RC<<1) rcall key_rc_oneRound ; 1 rcall key_rc_oneRound ; 2 rcall key_rc_oneRound ; 3 rcall key_rc_oneRound ; 4 rcall key_rc_oneRound ; 5 rcall key_rc_oneRound ; 6 rcall key_rc_oneRound ; 7 rcall key_rc_oneRound ; 8 rcall key_rc_oneRound ; 9 rcall key_rc_oneRound ; 10 key_rc_01_Post ; 11 ret #endif .MACRO ikey_rc_oneRound sbiw X, 16 rcall key_rc_oneRound .ENDMACRO .MACRO ikey_rc_01_Post ldi YH, high(SRAM_KTEXT0) ldi YL, low(SRAM_KTEXT0) rcall loadYTok rcall ReorderInput ldi YH, high(SRAM_KTEXTR) ldi YL, low(SRAM_KTEXTR) key_01 s200 key_01 s310 key_01 s201 key_01 s311 key_01 s202 key_01 s312 key_01 s203 key_01 s313 ldi XH, high(SRAM_KTEXTR+KEYR_NUM_BYTE-8) ldi XL, low(SRAM_KTEXTR+KEYR_NUM_BYTE-8) rcall storesToX ldi YH, high(SRAM_KTEXT0) ldi YL, low(SRAM_KTEXT0) rcall loadYTok bst k0, 0 ror k7 ror k6 ror k5 ror k4 ror k3 ror k2 ror k1 ror k0 bld k7, 7 eor kt0, kt0 bst k7, 6 bld kt0, 0 eor k0, kt0 rcall ReorderInput ldi YH, high(SRAM_KTEXTR) ldi YL, low(SRAM_KTEXTR) key_rc_01 s200 key_rc_01 s310 key_rc_01 s201 key_rc_01 s311 key_rc_01 s202 key_rc_01 s312 key_rc_01 s203 key_rc_01 s313 ldi XH, high(SRAM_KTEXTR) ldi XL, low(SRAM_KTEXTR) rcall storesToX .ENDMACRO #ifdef iKEYSCHEDULE ikeySchedule: ldi XH, high(SRAM_KTEXTR) ldi XL, low(SRAM_KTEXTR) key_pre ldi XH, high(SRAM_KTEXTR+KEYR_NUM_BYTE) ldi XL, low(SRAM_KTEXTR+KEYR_NUM_BYTE) ldi ZH, high(RC<<1) ldi ZL, low(RC<<1) ikey_rc_oneRound ; 1 ikey_rc_oneRound ; 2 ikey_rc_oneRound ; 3 ikey_rc_oneRound ; 4 ikey_rc_oneRound ; 5 ikey_rc_oneRound ; 6 ikey_rc_oneRound ; 7 ikey_rc_oneRound ; 8 ikey_rc_oneRound ; 9 ikey_rc_oneRound ; 10 ikey_rc_01_Post ret #endif .MACRO KeyXor ld t0, Z+ ld t1, Z+ eor s200, t0 eor s310, t1 ld t0, Z+ ld t1, Z+ eor s201, t0 eor s311, t1 ld t0, Z+ ld t1, Z+ eor s202, t0 eor s312, t1 ld t0, Z+ ld t1, Z+ eor s203, t0 eor s313, t1 .ENDMACRO .MACRO Sbox ; a = s310:s200 = r1:r0 ; b = s311:s201 = r3:r2 ; c = s312:s202 = r5:r4 ; d = s313:s203 = r7:r6 com s201 com s311 movw t0, s201 and s201, s200 and s311, s310 movw t2, s202 or s202, t0 or s312, t1 movw t4, s202 and s202, s203 and s312, s313 movw t6, s202 or s202, s201 or s312, s311 com t6 com t7 eor t2, t6 eor t3, t7 eor s203, t2 eor s313, t3 eor s201, t0 eor s311, t1 or s201, s203 or s311, s313 eor s200, s202 eor s310, s312 or s203, s200 or s313, s310 eor s200, t0 eor s310, t1 eor s203, t4 eor s313, t5 and t6, s203 and t7, s313 eor s200, t6 eor s310, t7 eor s203, t2 eor s313, t3 .ENDMACRO .MACRO iSbox ; a = s310:s200 = r1:r0 ; b = s311:s201 = r3:r2 ; c = s312:s202 = r5:r4 ; d = s313:s203 = r7:r6 movw t0, s200 and s200, s201 and s310, s311 or s200, s203 or s310, s313 eor s200, s202 eor s310, s312 movw t2, s200 com s200 com s310 or t2, s201 or t3, s311 eor t2, s203 eor t3, s313 or s203, s200 or s313, s310 movw t4, t2 or t2, t0 or t3, t1 eor t0, s203 eor t1, s313 eor s201, t2 eor s311, t3 movw s202, s201 eor s201, t0 eor s311, t1 movw s203, s201 or s203, s200 or s313, s310 and t0, s203 and t1, s313 eor t0, t4 eor t1, t5 movw s203, t0 and t0, s202 and t1, s312 eor s200, t0 eor s310, t1 .ENDMACRO ;;;**************************************************************************** ;;; M_XOR .MACRO M_Bits0 mov p0, s200 eor p0, s310 mov p1, p0 swap p1 eor p0, p1 eor s200, p0 eor s310, p0 ;s200: 320 220 120 020 : 300 200 100 000 ;s310: 330 230 130 030 : 310 210 110 010 ; | ;s200: 310 200 100 010 : 330 220 120 030 ;s310: 300 230 130 000 : 320 210 110 020 swap s200 ; s200 = 300 200 100 000 : 320 220 120 020 movw t0, s200 ; t1 = 330 230 130 030 : 310 210 110 010; t0 = 300 200 100 000 : 320 220 120 020 and s200, m66 ; s200 = xxx 200 100 xxx : xxx 220 120 xxx and s310, m66 ; s310 = xxx 230 130 xxx : xxx 210 110 xxx swap t1 ; t1 = 310 210 110 010 : 330 230 130 030 and t1, m99 ; t1 = 310 xxx xxx 010 : 330 xxx xxx 030 and t0, m99 ; t0 = 300 xxx xxx 000 : 320 xxx xxx 020 eor s200, t1 ; s200 = 310 200 100 010 : 330 220 120 030 eor s310, t0 ; s310 = 300 230 130 000 : 320 210 110 020 .ENDMACRO .MACRO M_Bits1 mov p0, s201 eor p0, s311 mov p1, p0 swap p1 eor p0, p1 eor s201, p0 eor s311, p0 ;s201: 321 221 121 021 : 301 201 101 001 ;s311: 331 231 131 031 : 311 211 111 011 ; | ;s201: 301 231 131 001 : 321 211 111 021 ;s311: 331 221 121 031 : 311 201 101 011 movw t0, s201 ; t1 = 331 231 131 031 : 311 211 111 011; t0 = 321 221 121 021 : 301 201 101 001 and s201, m99 ; s201 = 321 xxx xxx 021 : 301 xxx xxx 001 and s311, m99 ; s311 = 331 xxx xxx 031 : 311 xxx xxx 011 swap s201 ; s201 = 301 xxx xxx 001 : 321 xxx xxx 021 and t0, m66 ; t0 = xxx 221 121 xxx : xxx 201 101 xxx and t1, m66 ; t1 = xxx 231 131 xxx : xxx 211 111 xxx eor s201, t1 ; s201 = 301 231 131 001 : 321 211 111 021 eor s311, t0 ; s311 = 331 221 121 031 : 311 201 101 011 .ENDMACRO .MACRO M_Bits2 mov p0, s202 eor p0, s312 mov p1, p0 swap p1 eor p0, p1 eor s202, p0 eor s312, p0 ;s202: 322 222 122 022 : 302 202 102 002 ;s312: 332 232 132 032 : 312 212 112 012 ; | ;s202: 332 222 122 032 : 312 202 102 012 ;s312: 322 212 112 022 : 302 232 132 002 movw t0, s202 ; t1 = 332 232 132 032 : 312 212 112 012; t0 = 322 222 122 022 : 302 202 102 002 and s202, m66 ; s202 = xxx 222 122 xxx : xxx 202 102 xxx and s312, m66 ; s312 = xxx 232 132 xxx : xxx 212 112 xxx swap s312 ; s312 = xxx 212 112 xxx : xxx 232 132 xxx and t0, m99 ; t0 = 322 xxx xxx 022 : 302 xxx xxx 002 and t1, m99 ; t1 = 332 xxx xxx 032 : 312 xxx xxx 012 eor s202, t1 ; s202 = 332 222 122 032 : 312 202 102 012 eor s312, t0 ; s312 = 322 212 112 022 : 302 232 132 002 .ENDMACRO .MACRO M_Bits3 mov p0, s203 eor p0, s313 mov p1, p0 swap p1 eor p0, p1 eor s203, p0 eor s313, p0 ;s203: 323 223 123 023 : 303 203 103 003 ;s313: 333 233 133 033 : 313 213 113 013 ; | ;s203: 323 213 113 023 : 303 233 133 003 ;s313: 313 203 103 013 : 333 223 123 033 swap s313 ; s313 = 313 213 113 013 : 333 233 133 033 movw t0, s203 ; t1 = 313 213 113 013 : 333 233 133 033; t0 = 323 223 123 023 : 303 203 103 003 and s203, m99 ; s203 = 323 xxx xxx 023 : 303 xxx xxx 003 and s313, m99 ; s313 = 313 xxx xxx 013 : 333 xxx xxx 033 swap t0 ; t0 = 303 203 103 003 : 323 223 123 023 and t1, m66 ; t1 = xxx 213 113 xxx : xxx 233 133 xxx and t0, m66 ; t0 = xxx 203 103 xxx : xxx 223 123 xxx eor s203, t1 ; s203 = 323 213 113 023 : 303 233 133 003 eor s313, t0 ; s313 = 313 203 103 013 : 333 223 123 033 .ENDMACRO .MACRO M_XOR M_Bits0 M_Bits1 M_Bits2 M_Bits3 .ENDMACRO .MACRO SR_1bits movw t0, @0 ;320 220 120 020 300 200 100 000 lsl t0 ;120 020 300 200 100 000 xxx xxx lsl t0 bst @0, 6 ;220 bld t0, 4 ;120 020 300 220 100 000 xxx xxx bst @0, 7 ;320 bld t0, 5 ;120 020 320 220 100 000 xxx xxx and @0, m0f;xxx xxx xxx xxx 300 200 100 000 and t0, mf0;120 020 320 220 xxx xxx xxx xxx eor @0, t0 ;120 020 320 220 300 200 100 000 lsl @1 ;230 130 030 310 210 110 010 xxx bst @1, 4 ;310 bld @1, 0 ;230 130 030 310 210 110 010 310 lsr t1 ;xxx 330 230 130 030 310 210 110 bst t1, 3 ;030 bld t1, 7 ;030 330 230 130 030 310 210 110 and @1, m0f;xxx xxx xxx xxx 210 110 010 310 and t1, mf0;030 330 230 130 xxx xxx xxx xxx eor @1, t1 ;030 330 230 130 210 110 010 310 .ENDMACRO .MACRO SR SR_1bits s200, s310 SR_1bits s201, s311 SR_1bits s202, s312 SR_1bits s203, s313 .ENDMACRO .MACRO iSR_1bits movw t0, @0 lsl t0 lsl t0 bst @0, 6 bld t0, 4 bst @0, 7 bld t0, 5 and @0, m0f and t0, mf0 eor @0, t0 bst @1, 0 lsr @1 bld @1, 3 bst t1, 7 lsl t1 bld t1, 4 and @1, m0f and t1, mf0 eor @1, t1 .ENDMACRO .MACRO iSR iSR_1bits s200, s310 iSR_1bits s201, s311 iSR_1bits s202, s312 iSR_1bits s203, s313 .ENDMACRO .MACRO forward_round KeyXor Sbox M_XOR SR .ENDMACRO .MACRO middle_round KeyXor Sbox M_XOR iSbox KeyXor .ENDMACRO .MACRO invert_round iSR M_XOR iSbox KeyXor .ENDMACRO #if defined(ENCRYPT) || defined(DECRYPT) crypt: rcall ReorderInput forword_start: forward_round inc rcnt cpse rcnt, rrn rjmp forword_start middle_start: middle_round ldi rrn, 5 clr rcnt invert_start: invert_round inc rcnt cpse rcnt, rrn rjmp invert_start ReorderOutput ret #endif #ifdef ENCRYPT encrypt: ldi m66, 0b01100110 ldi m99, 0b10011001 ldi mf0, 0b11110000 ldi m0f, 0b00001111 ldi bn, 16 clr bcnt ldi XH, high(SRAM_INITV) ldi XL, low(SRAM_INITV) loadInitv ldi YH, high(SRAM_PTEXT) ldi YL, low(SRAM_PTEXT) CBC16_encrypt_start: ldi rrn, 5 ldi ZH, high(SRAM_KTEXTR) ldi ZL, low(SRAM_KTEXTR) clr rcnt loadPlain rcall crypt storeCipher inc bcnt cpse bcnt, bn rjmp CBC16_encrypt_start CBC16_encrypt_end: ret #endif #ifdef DECRYPT decrypt: ldi m66, 0b01100110 ldi m99, 0b10011001 ldi mf0, 0b11110000 ldi m0f, 0b00001111 ldi bn, 16 clr bcnt ldi XH, high(SRAM_INITV) ldi XL, low(SRAM_INITV) ldi YH, high(SRAM_PTEXT) ldi YL, low(SRAM_PTEXT) CBC16_decrypt_start: ldi rrn, 5 ldi ZH, high(SRAM_KTEXTR) ldi ZL, low(SRAM_KTEXTR) clr rcnt loadCipher rcall crypt storePlain inc bcnt cpse bcnt, bn rjmp CBC16_decrypt_start CBC16_decrypt_end: ret #endif #if defined(KEYSCHEDULE) || defined(iKEYSCHEDULE) RC: ; Rearranged .db $a9, $8b, $61, $4f, $31, $50, $04, $c4 .db $35, $a3, $4f, $60, $10, $28, $38, $a6 .db $44, $10, $87, $a4, $27, $a3, $56, $ff .db $33, $1d, $d6, $51, $78, $58, $60, $82 .db $8a, $60, $3e, $4c, $f4, $df, $68, $79 .db $fb, $36, $da, $0c, $cf, $2f, $b2, $cf .db $42, $4b, $32, $11, $43, $a8, $ba, $34 .db $35, $46, $63, $e4, $1c, $53, $8c, $49 .db $44, $f5, $ab, $20, $2b, $d8, $e2, $10 .db $d8, $dd, $85, $0f, $0a, $a0, $de, $72 .db $71, $56, $e4, $40, $3b, $f0, $da, $b6 #endif ; Original arrangement ;.db $44, $73, $70, $03, $2E, $8A, $19, $13 ;.db $D0, $31, $9F, $29, $22, $38, $09, $A4 ;.db $89, $6C, $4E, $EC, $98, $FA, $2E, $08 ;.db $77, $13, $D0, $38, $E6, $21, $28, $45 ;.db $6C, $0C, $E9, $34, $CF, $66, $54, $BE ;.db $B1, $5C, $95, $FD, $78, $4F, $F8, $7E ;.db $AA, $43, $AC, $F1, $51, $08, $84, $85 ;.db $54, $3C, $32, $25, $2F, $D3, $82, $C8 ;.db $0D, $61, $E3, $E0, $95, $11, $A5, $64 ;.db $99, $23, $0C, $CA, $99, $A3, $B5, $D3 ;.db $DD, $50, $7C, $C9, $B7, $29, $AC, $C0 ; ;******** R1 ; k1 + k0 + RC0 ; | ; S ; | ; M ; | ; SR ; ; ;******** R2 ; k1+RC1 ; | ; S ; | ; M ; | ; SR ; . ; . ; . ; ;******** R5 ; k1+RC4 ; | ; S ; | ; M ; | ; SR ; ; ;******** Rmiddle ; k1+RC5 ; | ; S ; | ; M ; | ; iS ; | ; k1+RC6 ; ; ; ;******** iR1 ; iSR ; | ; M ; | ; iS ; | ; k1+RC7 ; ; ; ;******** iR2 ; iSR ; | ; M ; | ; iS ; | ; k1+RC8 ; ; ;******** iR3 ; iSR ; | ; M ; | ; iS ; | ; k1+RC9 ; ; ;******** iR4 ; iSR ; | ; M ; | ; iS ; | ; k1+RC10 ; ; ;******** iR5 ; iSR ; | ; M ; | ; iS ; | ; k1+RC11+k0'
.global s_prepare_buffers s_prepare_buffers: push %r12 push %r15 push %r8 push %rax push %rbx push %rcx push %rdi push %rsi lea addresses_D_ht+0x13751, %r8 and %rcx, %rcx vmovups (%r8), %ymm1 vextracti128 $1, %ymm1, %xmm1 vpextrq $0, %xmm1, %rax nop sub %r12, %r12 lea addresses_WC_ht+0x1e87b, %rsi lea addresses_UC_ht+0x1adb, %rdi clflush (%rdi) nop nop nop nop add %rax, %rax mov $109, %rcx rep movsb nop nop nop dec %rsi lea addresses_normal_ht+0x96db, %rsi lea addresses_A_ht+0x1dc3b, %rdi nop nop nop nop sub $36957, %rax mov $111, %rcx rep movsl nop nop xor %r8, %r8 lea addresses_D_ht+0x11b, %r12 nop nop nop and $3823, %rbx mov $0x6162636465666768, %rcx movq %rcx, %xmm2 and $0xffffffffffffffc0, %r12 vmovntdq %ymm2, (%r12) nop nop nop nop nop xor %rcx, %rcx lea addresses_UC_ht+0xeedb, %rsi lea addresses_WC_ht+0x8ddb, %rdi nop nop nop nop dec %r15 mov $118, %rcx rep movsl nop nop add %r8, %r8 lea addresses_WC_ht+0x14e2e, %rsi lea addresses_WT_ht+0xcfcd, %rdi nop nop nop dec %r8 mov $20, %rcx rep movsq nop nop nop nop nop sub %rbx, %rbx lea addresses_WT_ht+0x14163, %r12 clflush (%r12) nop nop inc %rax mov $0x6162636465666768, %rcx movq %rcx, %xmm0 vmovups %ymm0, (%r12) nop nop nop nop sub $63304, %rax lea addresses_WT_ht+0x1bf5b, %rsi lea addresses_D_ht+0x13f7b, %rdi clflush (%rdi) nop nop nop sub $30843, %r12 mov $93, %rcx rep movsb nop dec %r8 pop %rsi pop %rdi pop %rcx pop %rbx pop %rax pop %r8 pop %r15 pop %r12 ret .global s_faulty_load s_faulty_load: push %r12 push %r13 push %rcx push %rsi // Faulty Load lea addresses_normal+0x112db, %rsi nop nop and $22142, %rcx mov (%rsi), %r13d lea oracles, %r12 and $0xff, %r13 shlq $12, %r13 mov (%r12,%r13,1), %r13 pop %rsi pop %rcx pop %r13 pop %r12 ret /* <gen_faulty_load> [REF] {'src': {'type': 'addresses_normal', 'AVXalign': False, 'size': 8, 'NT': False, 'same': False, 'congruent': 0}, 'OP': 'LOAD'} [Faulty Load] {'src': {'type': 'addresses_normal', 'AVXalign': False, 'size': 4, 'NT': False, 'same': True, 'congruent': 0}, 'OP': 'LOAD'} <gen_prepare_buffer> {'src': {'type': 'addresses_D_ht', 'AVXalign': False, 'size': 32, 'NT': False, 'same': False, 'congruent': 0}, 'OP': 'LOAD'} {'src': {'type': 'addresses_WC_ht', 'congruent': 2, 'same': False}, 'OP': 'REPM', 'dst': {'type': 'addresses_UC_ht', 'congruent': 11, 'same': False}} {'src': {'type': 'addresses_normal_ht', 'congruent': 10, 'same': False}, 'OP': 'REPM', 'dst': {'type': 'addresses_A_ht', 'congruent': 4, 'same': False}} {'OP': 'STOR', 'dst': {'type': 'addresses_D_ht', 'AVXalign': False, 'size': 32, 'NT': True, 'same': False, 'congruent': 6}} {'src': {'type': 'addresses_UC_ht', 'congruent': 10, 'same': False}, 'OP': 'REPM', 'dst': {'type': 'addresses_WC_ht', 'congruent': 7, 'same': False}} {'src': {'type': 'addresses_WC_ht', 'congruent': 0, 'same': False}, 'OP': 'REPM', 'dst': {'type': 'addresses_WT_ht', 'congruent': 1, 'same': False}} {'OP': 'STOR', 'dst': {'type': 'addresses_WT_ht', 'AVXalign': False, 'size': 32, 'NT': False, 'same': False, 'congruent': 3}} {'src': {'type': 'addresses_WT_ht', 'congruent': 5, 'same': False}, 'OP': 'REPM', 'dst': {'type': 'addresses_D_ht', 'congruent': 4, 'same': False}} {'34': 21829} 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 */
; A285354: Positions of 1 in A285351; complement of A285353. ; Submitted by Jamie Morken(s2) ; 2,4,5,6,8,10,11,12,14,15,17,18,20,22,23,24,26,28,29,30,32,33,35,36,38,40,41,42,44,45,47,49,50,51,53,54,56,58,59,60,62,64,65,66,68,69,71,72,74,76,77,78,80,82,83,84,86,87,89,90,92,94,95,96,98,99,101,103,104,105,107,108,110,112,113,114,116,118,119,120,122,123,125,126,128,130,131,132,134,135,137,139,140,141,143,145,146,147,149,150 mov $2,$0 mov $3,$0 lpb $2 mov $0,$3 sub $2,1 sub $0,$2 mov $1,727 sub $1,$0 seq $1,284817 ; a(n) = 2n - 1 - A284776(n). add $3,$1 lpe mov $0,$3 add $0,2
;------------------------------------------------------------------------------ ; ; Copyright (c) 2006 - 2008, Intel Corporation. All rights reserved.<BR> ; This program and the accompanying materials ; are licensed and made available under the terms and conditions of the BSD License ; which accompanies this distribution. The full text of the license may be found at ; http://opensource.org/licenses/bsd-license.php. ; ; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, ; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. ; ; Module Name: ; ; ScanMem16.Asm ; ; Abstract: ; ; ScanMem16 function ; ; Notes: ; ; The following BaseMemoryLib instances contain the same copy of this file: ; ; BaseMemoryLibRepStr ; BaseMemoryLibMmx ; BaseMemoryLibSse2 ; BaseMemoryLibOptDxe ; BaseMemoryLibOptPei ; ;------------------------------------------------------------------------------ DEFAULT REL SECTION .text ;------------------------------------------------------------------------------ ; CONST VOID * ; EFIAPI ; InternalMemScanMem16 ( ; IN CONST VOID *Buffer, ; IN UINTN Length, ; IN UINT16 Value ; ); ;------------------------------------------------------------------------------ global ASM_PFX(InternalMemScanMem16) ASM_PFX(InternalMemScanMem16): push rdi mov rdi, rcx mov rax, r8 mov rcx, rdx repne scasw lea rax, [rdi - 2] cmovnz rax, rcx pop rdi ret
.global s_prepare_buffers s_prepare_buffers: ret .global s_faulty_load s_faulty_load: push %r11 push %r12 push %r13 push %r15 push %r8 push %r9 push %rcx // Store lea addresses_UC+0x11694, %r15 inc %r8 movl $0x51525354, (%r15) nop nop cmp $30477, %r15 // Faulty Load lea addresses_normal+0x8994, %r12 nop nop nop nop nop sub %rcx, %rcx movups (%r12), %xmm4 vpextrq $1, %xmm4, %r15 lea oracles, %r12 and $0xff, %r15 shlq $12, %r15 mov (%r12,%r15,1), %r15 pop %rcx pop %r9 pop %r8 pop %r15 pop %r13 pop %r12 pop %r11 ret /* <gen_faulty_load> [REF] {'OP': 'LOAD', 'src': {'same': False, 'type': 'addresses_normal', 'NT': False, 'AVXalign': False, 'size': 32, 'congruent': 0}} {'OP': 'STOR', 'dst': {'same': False, 'type': 'addresses_UC', 'NT': False, 'AVXalign': False, 'size': 4, 'congruent': 6}} [Faulty Load] {'OP': 'LOAD', 'src': {'same': True, 'type': 'addresses_normal', 'NT': False, 'AVXalign': False, 'size': 16, 'congruent': 0}} <gen_prepare_buffer> {'34': 21829} 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 */
; Copyright (c) 2020, The rav1e contributors. All rights reserved ; ; This source code is subject to the terms of the BSD 2 Clause License and ; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License ; was not distributed with this source code in the LICENSE file, you can ; obtain it at www.aomedia.org/license/software. If the Alliance for Open ; Media Patent License 1.0 was not distributed with this source code in the ; PATENTS file, you can obtain it at www.aomedia.org/license/patent. %include "config.asm" %include "ext/x86/x86inc.asm" SECTION_RODATA 32 addsub: times 16 db 1, -1 rounding: times 4 dq 0x800 SECTION .text %define m(x) mangle(private_prefix %+ _ %+ x %+ SUFFIX) ; Consolidate scaling and rounding to one place so that it is easier to change. %macro SSE_SCALE_4X4 0 ; Multiply and shift using scalar code mov scaled, [scaleq] imul rax, scaleq add rax, 0x800 shr rax, 12 %endmacro ; 1 is the input and output register. ; 2-3 are tmp registers. %macro SSE_SCALE 2-3 ; Reduce 32-bit sums to 64-bits sums. pshufd m%2, m%1, q3311 paddd m%1, m%2 LOAD_SCALES %2, %3 ; Multiply and shift with rounding. pmuludq m%1, m%2 ; TODO: Alter rust source so that rounding can always done at the end (i.e. ; only do it once) mova m%2, [rounding] paddq m%1, m%2 psrlq m%1, 12 %endmacro %macro LOAD_SCALES_4X8 2 ; Load 1 scale from each of the 2 rows. movd m%1, [scaleq] movd m%2, [scaleq+scale_strideq] ; 64-bit unpack since our loads have only one value each. punpcklqdq m%1, m%2 %endmacro ; 2 is unused %macro LOAD_SCALES_8X4 2 ; Convert to 64-bits. ; It doesn't matter that the upper halves are full of garbage. movq m%1, [scaleq] pshufd m%1, m%1, q1100 %endmacro ; 2 is unused %macro LOAD_SCALES_16X4 2 pmovzxdq m%1, [scaleq] %endmacro ; Separate from other scale macros, since it uses 2 inputs. ; 1-2 are inputs regs and 1 is the output reg. ; 3-4 are tmp registers %macro SSE_SCALE_32X4 4 pshufd m%3, m%1, q3311 paddd m%1, m%3 pshufd m%3, m%2, q3311 paddd m%2, m%3 ; Load scale for 4x4 blocks and convert to 64-bits. ; It doesn't matter if the upper halves are full of garbage. ; raw load: 0, 1, 2, 3 | 4, 5, 6, 7 ; unpack low: 0, 1 | 4, 5 ; unpack high: 2, 3, | 6, 7 mova m%4, [scaleq] punpckldq m%3, m%4, m%4 punpckhdq m%4, m%4 pmuludq m%1, m%3 pmuludq m%2, m%4 mova m%3, [rounding] paddq m%1, m%3 paddq m%2, m%3 psrlq m%1, 12 psrlq m%2, 12 paddq m%1, m%2 %endmacro INIT_XMM ssse3 ; Use scale_stride's register to store src_stride3 cglobal weighted_sse_4x4, 6, 7, 5, \ src, src_stride, dst, dst_stride, scale, \ src_stride3, dst_stride3 lea src_stride3q, [src_strideq*3] lea dst_stride3q, [dst_strideq*3] movq m0, [addsub] movd m1, [srcq] movd m2, [dstq] punpcklbw m1, m2 movd m2, [srcq+src_strideq] movd m3, [dstq+dst_strideq] punpcklbw m2, m3 pmaddubsw m1, m0 pmaddubsw m2, m0 pmaddwd m1, m1 pmaddwd m2, m2 paddd m1, m2 movd m2, [srcq+src_strideq*2] movd m3, [dstq+dst_strideq*2] punpcklbw m2, m3 movd m3, [srcq+src_stride3q] movd m4, [dstq+dst_stride3q] punpcklbw m3, m4 pmaddubsw m2, m0 pmaddubsw m3, m0 pmaddwd m2, m2 pmaddwd m3, m3 paddd m2, m3 paddd m1, m2 pshuflw m0, m1, q3232 paddd m0, m1 movd eax, m0 ; Multiply and shift using scalar code. SSE_SCALE_4X4 RET %macro WEIGHTED_SSE_4X8_KERNEL 0 movd m1, [srcq] movd m2, [srcq+src_strideq*4] punpckldq m1, m2 movd m2, [dstq] movd m3, [dstq+dst_strideq*4] add srcq, src_strideq add dstq, dst_strideq punpckldq m2, m3 punpcklbw m1, m2 movd m2, [srcq] movd m3, [srcq+src_strideq*4] punpckldq m2, m3 movd m3, [dstq] movd m4, [dstq+dst_strideq*4] add srcq, src_strideq add dstq, dst_strideq punpckldq m3, m4 punpcklbw m2, m3 pmaddubsw m1, m0 pmaddubsw m2, m0 pmaddwd m1, m1 pmaddwd m2, m2 paddd m1, m2 movd m2, [srcq] movd m3, [srcq+src_strideq*4] punpckldq m2, m3 movd m3, [dstq] movd m4, [dstq+dst_strideq*4] add srcq, src_strideq add dstq, dst_strideq punpckldq m3, m4 punpcklbw m2, m3 movd m3, [srcq] movd m4, [srcq+src_strideq*4] punpckldq m3, m4 movd m4, [dstq] movd m5, [dstq+dst_strideq*4] punpckldq m4, m5 punpcklbw m3, m4 pmaddubsw m2, m0 pmaddubsw m3, m0 pmaddwd m2, m2 pmaddwd m3, m3 paddd m2, m3 paddd m1, m2 %define LOAD_SCALES LOAD_SCALES_4X8 SSE_SCALE 1, 2, 3 %endmacro INIT_XMM ssse3 cglobal weighted_sse_4x8, 6, 6, 6, \ src, src_stride, dst, dst_stride, scale, scale_stride mova m0, [addsub] WEIGHTED_SSE_4X8_KERNEL pshufd m0, m1, q3232 paddq m1, m0 movq rax, m1 RET INIT_XMM ssse3 cglobal weighted_sse_4x16, 6, 6, 7, \ src, src_stride, dst, dst_stride, scale, scale_stride mova m0, [addsub] WEIGHTED_SSE_4X8_KERNEL ; Swap so the use of this macro will use m6 as the result SWAP 1, 6 lea scaleq, [scaleq+scale_strideq*2] ; Already incremented by stride 3 times, but must go up 5 more to get to 8 add srcq, src_strideq add dstq, dst_strideq lea srcq, [srcq+src_strideq*4] lea dstq, [dstq+dst_strideq*4] WEIGHTED_SSE_4X8_KERNEL paddq m6, m1 pshufd m0, m6, q3232 paddq m6, m0 movq rax, m6 RET %macro WEIGHTED_SSE_8X4_KERNEL 0 movq m1, [srcq] movq m2, [dstq] punpcklbw m1, m2 movq m2, [srcq+src_strideq] movq m3, [dstq+dst_strideq] punpcklbw m2, m3 pmaddubsw m1, m0 pmaddubsw m2, m0 pmaddwd m1, m1 pmaddwd m2, m2 paddd m1, m2 movq m2, [srcq+src_strideq*2] movq m3, [dstq+dst_strideq*2] punpcklbw m2, m3 movq m3, [srcq+src_stride3q] movq m4, [dstq+dst_stride3q] punpcklbw m3, m4 pmaddubsw m2, m0 pmaddubsw m3, m0 pmaddwd m2, m2 pmaddwd m3, m3 paddd m2, m3 paddd m1, m2 %define LOAD_SCALES LOAD_SCALES_8X4 SSE_SCALE 1, 2 %endmacro %macro WEIGHTED_SSE_16X4_KERNEL 0 pmovzxbw m0, [srcq] pmovzxbw m1, [dstq] psubw m0, m1 pmaddwd m0, m0 pmovzxbw m1, [srcq+src_strideq] pmovzxbw m2, [dstq+dst_strideq] psubw m1, m2 pmaddwd m1, m1 paddd m0, m1 pmovzxbw m1, [srcq+src_strideq*2] pmovzxbw m2, [dstq+dst_strideq*2] psubw m1, m2 pmaddwd m1, m1 pmovzxbw m2, [srcq+src_stride3q] pmovzxbw m3, [dstq+dst_stride3q] psubw m2, m3 pmaddwd m2, m2 paddd m1, m2 paddd m1, m0 %define LOAD_SCALES LOAD_SCALES_16X4 SSE_SCALE 1, 2 %endmacro %macro WEIGHTED_SSE_32X4_KERNEL 0 ; Unpacking high and low results in sums that are 8 samples apart. To ; correctly apply weights, two separate registers are needed to accumulate. mova m2, [srcq] mova m3, [dstq] punpcklbw m1, m2, m3 punpckhbw m2, m3 mova m4, [srcq+src_strideq] mova m5, [dstq+dst_strideq] punpcklbw m3, m4, m5 punpckhbw m4, m5 pmaddubsw m1, m0 pmaddubsw m2, m0 pmaddubsw m3, m0 pmaddubsw m4, m0 pmaddwd m1, m1 pmaddwd m2, m2 pmaddwd m3, m3 pmaddwd m4, m4 ; Accumulate paddd m1, m3 paddd m2, m4 mova m4, [srcq+src_strideq*2] mova m5, [dstq+dst_strideq*2] punpcklbw m3, m4, m5 punpckhbw m4, m5 mova m6, [srcq+src_stride3q] mova m7, [dstq+dst_stride3q] punpcklbw m5, m6, m7 punpckhbw m6, m7 pmaddubsw m3, m0 pmaddubsw m4, m0 pmaddubsw m5, m0 pmaddubsw m6, m0 pmaddwd m3, m3 pmaddwd m4, m4 pmaddwd m5, m5 pmaddwd m6, m6 paddd m3, m5 paddd m4, m6 paddd m1, m3 paddd m2, m4 SSE_SCALE_32X4 1, 2, 3, 4 %endmacro %macro WEIGHTED_SSE 2 ; w, h %if %1 == 8 %if %2 == 4 ; Use scale_stride's register to store src_stride3 cglobal weighted_sse_%1x%2, 6, 7, 5, \ src, src_stride, dst, dst_stride, scale, \ src_stride3, dst_stride3 %else cglobal weighted_sse_%1x%2, 6, 9, 6, \ src, src_stride, dst, dst_stride, scale, scale_stride, \ src_stride3, dst_stride3, h %endif %elif %1 == 16 %if %2 == 4 ; Use scale_stride's register to store src_stride3 cglobal weighted_sse_%1x%2, 6, 7, 4, \ src, src_stride, dst, dst_stride, scale, \ src_stride3, dst_stride3 %else cglobal weighted_sse_%1x%2, 6, 9, 5, \ src, src_stride, dst, dst_stride, scale, scale_stride, \ src_stride3, dst_stride3, h %endif %elif %1 == 32 cglobal weighted_sse_%1x%2, 6, 9, 9, \ src, src_stride, dst, dst_stride, scale, scale_stride, \ src_stride3, dst_stride3, h %else ; > 32 cglobal weighted_sse_%1x%2, 6, 10, 9, \ src, src_stride, dst, dst_stride, scale, scale_stride, \ src_stride3, dst_stride3, h, w %endif ; === Setup === ; kernel_width/kernel_height: number of elements that the kernel processes. ; m0: except for when w == 16, m0 is used to hold a constant 1, -1... vector ; register for diffing the two sources. ; sum: The kernel stores it's results on m1. The last vector register is used ; unless only one iteration is done. ; Default the kernel width to the width of this function. %define kernel_width %1 %define kernel_height 4 %if %1 == 8 mova m0, [addsub] %endif %if %1 >= 32 mova m0, [addsub] ; Iterate multiple times when w > 32. %define kernel_width 32 %endif %if %1 > kernel_width || %2 > kernel_height ; Add onto the last used vector register. %assign sum xmm_regs_used-1 %else ; Use the result from the kernel %define sum 1 %endif lea src_stride3q, [src_strideq*3] lea dst_stride3q, [dst_strideq*3] %if %1 > kernel_width || %2 > kernel_height pxor m%[sum], m%[sum] %endif %if %2 > kernel_height mov hd, %2/kernel_height-1 .loop: %endif %if %1 > kernel_width mov wd, %1/kernel_width-1 .loop_horiz: %endif WEIGHTED_SSE_%[kernel_width]X%[kernel_height]_KERNEL %if %2 > kernel_height || %1 > kernel_width paddq m%[sum], m1 %endif %if %1 > kernel_width add scaleq, kernel_width*4/4 add srcq, kernel_width add dstq, kernel_width dec wq jge .loop_horiz %endif %if %2 > kernel_height ; Move down 4 rows. %if %1 > kernel_width ; src/dst is incremented by width when processing multi iteration rows. ; Reduce the offset by the width of the row. lea srcq, [srcq+src_strideq*4 - %1] lea dstq, [dstq+dst_strideq*4 - %1] ; The behaviour for scale is similar lea scaleq, [scaleq+scale_strideq - %1*4/4] %else lea srcq, [srcq+src_strideq*4] lea dstq, [dstq+dst_strideq*4] add scaleq, scale_strideq %endif dec hq jge .loop %endif %if mmsize == 16 pshufd m2, m%[sum], q3232 paddq m%[sum], m2 movq rax, m%[sum] %elif mmsize == 32 vextracti128 xm2, m%[sum], 1 paddq xm%[sum], xm2 pshufd xm2, xm%[sum], q3232 paddq xm%[sum], xm2 movq rax, xm%[sum] %endif RET %undef sum, kernel_width, res %endmacro INIT_XMM ssse3 WEIGHTED_SSE 8, 4 %if ARCH_X86_64 WEIGHTED_SSE 8, 8 WEIGHTED_SSE 8, 16 WEIGHTED_SSE 8, 32 %endif ; ARCH_X86_64 INIT_YMM avx2 WEIGHTED_SSE 16, 4 %if ARCH_X86_64 WEIGHTED_SSE 16, 8 WEIGHTED_SSE 16, 16 WEIGHTED_SSE 16, 32 WEIGHTED_SSE 16, 64 WEIGHTED_SSE 32, 8 WEIGHTED_SSE 32, 16 WEIGHTED_SSE 32, 32 WEIGHTED_SSE 32, 64 WEIGHTED_SSE 64, 16 WEIGHTED_SSE 64, 32 WEIGHTED_SSE 64, 64 WEIGHTED_SSE 64, 128 WEIGHTED_SSE 128, 64 WEIGHTED_SSE 128, 128 %endif ; ARCH_X86_64 INIT_XMM sse2 cglobal weighted_sse_4x4_hbd, 6, 8, 4, \ src, src_stride, dst, dst_stride, scale, scale_stride, \ src_stride3, dst_stride3 lea src_stride3q, [src_strideq*3] lea dst_stride3q, [dst_strideq*3] movq m0, [srcq] movq m1, [dstq] psubw m0, m1 pmaddwd m0, m0 movq m1, [srcq+src_strideq] movq m2, [dstq+dst_strideq] psubw m1, m2 pmaddwd m1, m1 paddd m0, m1 movq m1, [srcq+src_strideq*2] movq m2, [dstq+dst_strideq*2] psubw m1, m2 pmaddwd m1, m1 movq m2, [srcq+src_stride3q] movq m3, [dstq+dst_stride3q] psubw m2, m3 pmaddwd m2, m2 paddd m1, m2 paddd m0, m1 pshuflw m1, m0, q3232 paddd m0, m1 movd eax, m0 ; Multiply and shift using scalar code. SSE_SCALE_4X4 RET
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // Flowgraph Construction and Maintenance void Compiler::fgInit() { impInit(); /* Initialization for fgWalkTreePre() and fgWalkTreePost() */ fgFirstBBScratch = nullptr; #ifdef DEBUG fgPrintInlinedMethods = false; #endif // DEBUG /* We haven't yet computed the bbPreds lists */ fgComputePredsDone = false; /* We haven't yet computed the bbCheapPreds lists */ fgCheapPredsValid = false; /* We haven't yet computed the edge weight */ fgEdgeWeightsComputed = false; fgHaveValidEdgeWeights = false; fgSlopUsedInEdgeWeights = false; fgRangeUsedInEdgeWeights = true; fgNeedsUpdateFlowGraph = false; fgCalledCount = BB_ZERO_WEIGHT; /* We haven't yet computed the dominator sets */ fgDomsComputed = false; #ifdef DEBUG fgReachabilitySetsValid = false; #endif // DEBUG /* We don't know yet which loops will always execute calls */ fgLoopCallMarked = false; /* Initialize the basic block list */ fgFirstBB = nullptr; fgLastBB = nullptr; fgFirstColdBlock = nullptr; fgEntryBB = nullptr; #if defined(FEATURE_EH_FUNCLETS) fgFirstFuncletBB = nullptr; fgFuncletsCreated = false; #endif // FEATURE_EH_FUNCLETS fgBBcount = 0; #ifdef DEBUG fgBBcountAtCodegen = 0; #endif // DEBUG fgBBNumMax = 0; fgEdgeCount = 0; fgDomBBcount = 0; fgBBVarSetsInited = false; fgReturnCount = 0; // Initialize BlockSet data. fgCurBBEpoch = 0; fgCurBBEpochSize = 0; fgBBSetCountInSizeTUnits = 0; genReturnBB = nullptr; genReturnLocal = BAD_VAR_NUM; /* We haven't reached the global morphing phase */ fgGlobalMorph = false; fgModified = false; #ifdef DEBUG fgSafeBasicBlockCreation = true; #endif // DEBUG fgLocalVarLivenessDone = false; /* Statement list is not threaded yet */ fgStmtListThreaded = false; // Initialize the logic for adding code. This is used to insert code such // as the code that raises an exception when an array range check fails. fgAddCodeList = nullptr; fgAddCodeModf = false; for (int i = 0; i < SCK_COUNT; i++) { fgExcptnTargetCache[i] = nullptr; } /* Keep track of the max count of pointer arguments */ fgPtrArgCntMax = 0; /* This global flag is set whenever we remove a statement */ fgStmtRemoved = false; /* This global flag is set whenever we add a throw block for a RngChk */ fgRngChkThrowAdded = false; /* reset flag for fgIsCodeAdded() */ /* Keep track of whether or not EH statements have been optimized */ fgOptimizedFinally = false; /* We will record a list of all BBJ_RETURN blocks here */ fgReturnBlocks = nullptr; /* This is set by fgComputeReachability */ fgEnterBlks = BlockSetOps::UninitVal(); #ifdef DEBUG fgEnterBlksSetValid = false; #endif // DEBUG #if !defined(FEATURE_EH_FUNCLETS) ehMaxHndNestingCount = 0; #endif // !FEATURE_EH_FUNCLETS /* Init the fgBigOffsetMorphingTemps to be BAD_VAR_NUM. */ for (int i = 0; i < TYP_COUNT; i++) { fgBigOffsetMorphingTemps[i] = BAD_VAR_NUM; } fgNoStructPromotion = false; fgNoStructParamPromotion = false; optValnumCSE_phase = false; // referenced in fgMorphSmpOp() #ifdef DEBUG fgNormalizeEHDone = false; #endif // DEBUG #ifdef DEBUG if (!compIsForInlining()) { const int noStructPromotionValue = JitConfig.JitNoStructPromotion(); assert(0 <= noStructPromotionValue && noStructPromotionValue <= 2); if (noStructPromotionValue == 1) { fgNoStructPromotion = true; } if (noStructPromotionValue == 2) { fgNoStructParamPromotion = true; } } #endif // DEBUG if (!compIsForInlining()) { m_promotedStructDeathVars = nullptr; } #ifdef FEATURE_SIMD fgPreviousCandidateSIMDFieldAsgStmt = nullptr; #endif fgHasSwitch = false; fgPgoDisabled = false; fgPgoSchema = nullptr; fgPgoData = nullptr; fgPgoSchemaCount = 0; fgNumProfileRuns = 0; fgPgoBlockCounts = 0; fgPgoEdgeCounts = 0; fgPgoClassProfiles = 0; fgPgoInlineePgo = 0; fgPgoInlineeNoPgo = 0; fgPgoInlineeNoPgoSingleBlock = 0; fgCountInstrumentor = nullptr; fgClassInstrumentor = nullptr; fgPredListSortVector = nullptr; } /***************************************************************************** * * Create a basic block and append it to the current BB list. */ BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind) { // This method must not be called after the exception table has been // constructed, because it doesn't not provide support for patching // the exception table. noway_assert(compHndBBtabCount == 0); BasicBlock* block; /* Allocate the block descriptor */ block = bbNewBasicBlock(jumpKind); noway_assert(block->bbJumpKind == jumpKind); /* Append the block to the end of the global basic block list */ if (fgFirstBB) { fgLastBB->setNext(block); } else { fgFirstBB = block; block->bbPrev = nullptr; } fgLastBB = block; return block; } //------------------------------------------------------------------------ // fgEnsureFirstBBisScratch: Ensure that fgFirstBB is a scratch BasicBlock // // Returns: // Nothing. May allocate a new block and alter the value of fgFirstBB. // // Notes: // This should be called before adding on-entry initialization code to // the method, to ensure that fgFirstBB is not part of a loop. // // Does nothing, if fgFirstBB is already a scratch BB. After calling this, // fgFirstBB may already contain code. Callers have to be careful // that they do not mess up the order of things added to this block and // inadvertently change semantics. // // We maintain the invariant that a scratch BB ends with BBJ_NONE or // BBJ_ALWAYS, so that when adding independent bits of initialization, // callers can generally append to the fgFirstBB block without worring // about what code is there already. // // Can be called at any time, and can be called multiple times. // void Compiler::fgEnsureFirstBBisScratch() { // Have we already allocated a scratch block? if (fgFirstBBisScratch()) { return; } assert(fgFirstBBScratch == nullptr); BasicBlock* block = bbNewBasicBlock(BBJ_NONE); if (fgFirstBB != nullptr) { // If we have profile data the new block will inherit fgFirstBlock's weight if (fgFirstBB->hasProfileWeight()) { block->inheritWeight(fgFirstBB); } // The first block has an implicit ref count which we must // remove. Note the ref count could be greater that one, if // the first block is not scratch and is targeted by a // branch. assert(fgFirstBB->bbRefs >= 1); fgFirstBB->bbRefs--; // The new scratch bb will fall through to the old first bb fgAddRefPred(fgFirstBB, block); fgInsertBBbefore(fgFirstBB, block); } else { noway_assert(fgLastBB == nullptr); fgFirstBB = block; fgLastBB = block; } noway_assert(fgLastBB != nullptr); // Set the expected flags block->bbFlags |= (BBF_INTERNAL | BBF_IMPORTED); // This new first BB has an implicit ref, and no others. block->bbRefs = 1; fgFirstBBScratch = fgFirstBB; #ifdef DEBUG if (verbose) { printf("New scratch " FMT_BB "\n", block->bbNum); } #endif } //------------------------------------------------------------------------ // fgFirstBBisScratch: Check if fgFirstBB is a scratch block // // Returns: // true if fgFirstBB is a scratch block. // bool Compiler::fgFirstBBisScratch() { if (fgFirstBBScratch != nullptr) { assert(fgFirstBBScratch == fgFirstBB); assert(fgFirstBBScratch->bbFlags & BBF_INTERNAL); assert(fgFirstBBScratch->countOfInEdges() == 1); // Normally, the first scratch block is a fall-through block. However, if the block after it was an empty // BBJ_ALWAYS block, it might get removed, and the code that removes it will make the first scratch block // a BBJ_ALWAYS block. assert((fgFirstBBScratch->bbJumpKind == BBJ_NONE) || (fgFirstBBScratch->bbJumpKind == BBJ_ALWAYS)); return true; } else { return false; } } //------------------------------------------------------------------------ // fgBBisScratch: Check if a given block is a scratch block. // // Arguments: // block - block in question // // Returns: // true if this block is the first block and is a scratch block. // bool Compiler::fgBBisScratch(BasicBlock* block) { return fgFirstBBisScratch() && (block == fgFirstBB); } /* Removes a block from the return block list */ void Compiler::fgRemoveReturnBlock(BasicBlock* block) { if (fgReturnBlocks == nullptr) { return; } if (fgReturnBlocks->block == block) { // It's the 1st entry, assign new head of list. fgReturnBlocks = fgReturnBlocks->next; return; } for (BasicBlockList* retBlocks = fgReturnBlocks; retBlocks->next != nullptr; retBlocks = retBlocks->next) { if (retBlocks->next->block == block) { // Found it; splice it out. retBlocks->next = retBlocks->next->next; return; } } } /***************************************************************************** * fgChangeSwitchBlock: * * We have a BBJ_SWITCH jump at 'oldSwitchBlock' and we want to move this * switch jump over to 'newSwitchBlock'. All of the blocks that are jumped * to from jumpTab[] need to have their predecessor lists updated by removing * the 'oldSwitchBlock' and adding 'newSwitchBlock'. */ void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock) { noway_assert(oldSwitchBlock != nullptr); noway_assert(newSwitchBlock != nullptr); noway_assert(oldSwitchBlock->bbJumpKind == BBJ_SWITCH); // Walk the switch's jump table, updating the predecessor for each branch. for (BasicBlock* const bJump : oldSwitchBlock->SwitchTargets()) { noway_assert(bJump != nullptr); // Note that if there are duplicate branch targets in the switch jump table, // fgRemoveRefPred()/fgAddRefPred() will do the right thing: the second and // subsequent duplicates will simply subtract from and add to the duplicate // count (respectively). if (bJump->countOfInEdges() > 0) { // // Remove the old edge [oldSwitchBlock => bJump] // fgRemoveRefPred(bJump, oldSwitchBlock); } else { // bJump->countOfInEdges() must not be zero after preds are calculated. assert(!fgComputePredsDone); } // // Create the new edge [newSwitchBlock => bJump] // fgAddRefPred(bJump, newSwitchBlock); } if (m_switchDescMap != nullptr) { SwitchUniqueSuccSet uniqueSuccSet; // If already computed and cached the unique descriptors for the old block, let's // update those for the new block. if (m_switchDescMap->Lookup(oldSwitchBlock, &uniqueSuccSet)) { m_switchDescMap->Set(newSwitchBlock, uniqueSuccSet, BlockToSwitchDescMap::Overwrite); } else { fgInvalidateSwitchDescMapEntry(newSwitchBlock); } fgInvalidateSwitchDescMapEntry(oldSwitchBlock); } } //------------------------------------------------------------------------ // fgReplaceSwitchJumpTarget: update BBJ_SWITCH block so that all control // that previously flowed to oldTarget now flows to newTarget. // // Arguments: // blockSwitch - block ending in a switch // newTarget - new branch target // oldTarget - old branch target // // Notes: // Updates the jump table and the cached unique target set (if any). // Can be called before or after pred lists are built. // If pred lists are built, updates pred lists. // void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget) { noway_assert(blockSwitch != nullptr); noway_assert(newTarget != nullptr); noway_assert(oldTarget != nullptr); noway_assert(blockSwitch->bbJumpKind == BBJ_SWITCH); // For the jump targets values that match oldTarget of our BBJ_SWITCH // replace predecessor 'blockSwitch' with 'newTarget' // unsigned jumpCnt = blockSwitch->bbJumpSwt->bbsCount; BasicBlock** jumpTab = blockSwitch->bbJumpSwt->bbsDstTab; unsigned i = 0; // Walk the switch's jump table looking for blocks to update the preds for while (i < jumpCnt) { if (jumpTab[i] == oldTarget) // We will update when jumpTab[i] matches { // Remove the old edge [oldTarget from blockSwitch] // if (fgComputePredsDone) { fgRemoveAllRefPreds(oldTarget, blockSwitch); } // // Change the jumpTab entry to branch to the new location // jumpTab[i] = newTarget; // // Create the new edge [newTarget from blockSwitch] // flowList* newEdge = nullptr; if (fgComputePredsDone) { newEdge = fgAddRefPred(newTarget, blockSwitch); } // Now set the correct value of newEdge->flDupCount // and replace any other jumps in jumpTab[] that go to oldTarget. // i++; while (i < jumpCnt) { if (jumpTab[i] == oldTarget) { // // We also must update this entry in the jumpTab // jumpTab[i] = newTarget; newTarget->bbRefs++; // // Increment the flDupCount // if (fgComputePredsDone) { newEdge->flDupCount++; } } i++; // Check the next entry in jumpTab[] } // Maintain, if necessary, the set of unique targets of "block." UpdateSwitchTableTarget(blockSwitch, oldTarget, newTarget); return; // We have replaced the jumps to oldTarget with newTarget } i++; // Check the next entry in jumpTab[] for a match } noway_assert(!"Did not find oldTarget in jumpTab[]"); } //------------------------------------------------------------------------ // Compiler::fgReplaceJumpTarget: For a given block, replace the target 'oldTarget' with 'newTarget'. // // Arguments: // block - the block in which a jump target will be replaced. // newTarget - the new branch target of the block. // oldTarget - the old branch target of the block. // // Notes: // 1. Only branches are changed: BBJ_ALWAYS, the non-fallthrough path of BBJ_COND, BBJ_SWITCH, etc. // We ignore other block types. // 2. Only the first target found is updated. If there are multiple ways for a block // to reach 'oldTarget' (e.g., multiple arms of a switch), only the first one found is changed. // 3. The predecessor lists are not changed. // 4. The switch table "unique successor" cache is invalidated. // // This function is most useful early, before the full predecessor lists have been computed. // void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget) { assert(block != nullptr); switch (block->bbJumpKind) { case BBJ_CALLFINALLY: case BBJ_COND: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: // This function will be called before import, so we still have BBJ_LEAVE if (block->bbJumpDest == oldTarget) { block->bbJumpDest = newTarget; } break; case BBJ_NONE: case BBJ_EHFINALLYRET: case BBJ_THROW: case BBJ_RETURN: break; case BBJ_SWITCH: unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jumpTab; jumpTab = block->bbJumpSwt->bbsDstTab; for (unsigned i = 0; i < jumpCnt; i++) { if (jumpTab[i] == oldTarget) { jumpTab[i] = newTarget; break; } } break; default: assert(!"Block doesn't have a valid bbJumpKind!!!!"); unreached(); break; } } //------------------------------------------------------------------------ // fgReplacePred: update the predecessor list, swapping one pred for another // // Arguments: // block - block with the pred list we want to update // oldPred - pred currently appearing in block's pred list // newPred - pred that will take oldPred's place. // // Notes: // // A block can only appear once in the preds list (for normal preds, not // cheap preds): if a predecessor has multiple ways to get to this block, then // flDupCount will be >1, but the block will still appear exactly once. Thus, this // function assumes that all branches from the predecessor (practically, that all // switch cases that target this block) are changed to branch from the new predecessor, // with the same dup count. // // Note that the block bbRefs is not changed, since 'block' has the same number of // references as before, just from a different predecessor block. // // Also note this may cause sorting of the pred list. // void Compiler::fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred) { noway_assert(block != nullptr); noway_assert(oldPred != nullptr); noway_assert(newPred != nullptr); assert(!fgCheapPredsValid); bool modified = false; for (flowList* const pred : block->PredEdges()) { if (oldPred == pred->getBlock()) { pred->setBlock(newPred); modified = true; break; } } // We may now need to reorder the pred list. // if (modified) { block->ensurePredListOrder(this); } } /***************************************************************************** * For a block that is in a handler region, find the first block of the most-nested * handler containing the block. */ BasicBlock* Compiler::fgFirstBlockOfHandler(BasicBlock* block) { assert(block->hasHndIndex()); return ehGetDsc(block->getHndIndex())->ebdHndBeg; } /***************************************************************************** * * The following helps find a basic block given its PC offset. */ void Compiler::fgInitBBLookup() { BasicBlock** dscBBptr; /* Allocate the basic block table */ dscBBptr = fgBBs = new (this, CMK_BasicBlock) BasicBlock*[fgBBcount]; /* Walk all the basic blocks, filling in the table */ for (BasicBlock* const block : Blocks()) { *dscBBptr++ = block; } noway_assert(dscBBptr == fgBBs + fgBBcount); } BasicBlock* Compiler::fgLookupBB(unsigned addr) { unsigned lo; unsigned hi; /* Do a binary search */ for (lo = 0, hi = fgBBcount - 1;;) { AGAIN:; if (lo > hi) { break; } unsigned mid = (lo + hi) / 2; BasicBlock* dsc = fgBBs[mid]; // We introduce internal blocks for BBJ_CALLFINALLY. Skip over these. while (dsc->bbFlags & BBF_INTERNAL) { dsc = dsc->bbNext; mid++; // We skipped over too many, Set hi back to the original mid - 1 if (mid > hi) { mid = (lo + hi) / 2; hi = mid - 1; goto AGAIN; } } unsigned pos = dsc->bbCodeOffs; if (pos < addr) { if ((lo == hi) && (lo == (fgBBcount - 1))) { noway_assert(addr == dsc->bbCodeOffsEnd); return nullptr; // NULL means the end of method } lo = mid + 1; continue; } if (pos > addr) { hi = mid - 1; continue; } return dsc; } #ifdef DEBUG printf("ERROR: Couldn't find basic block at offset %04X\n", addr); #endif // DEBUG NO_WAY("fgLookupBB failed."); } //------------------------------------------------------------------------ // FgStack: simple stack model for the inlinee's evaluation stack. // // Model the inputs available to various operations in the inline body. // Tracks constants, arguments, array lengths. class FgStack { public: FgStack() : slot0(SLOT_INVALID), slot1(SLOT_INVALID), depth(0) { // Empty } void Clear() { depth = 0; } void PushUnknown() { Push(SLOT_UNKNOWN); } void PushConstant() { Push(SLOT_CONSTANT); } void PushArrayLen() { Push(SLOT_ARRAYLEN); } void PushArgument(unsigned arg) { Push(static_cast<FgSlot>(SLOT_ARGUMENT + arg)); } unsigned GetSlot0() const { assert(depth >= 1); return slot0; } unsigned GetSlot1() const { assert(depth >= 2); return slot1; } static bool IsConstant(unsigned value) { return value == SLOT_CONSTANT; } static bool IsArrayLen(unsigned value) { return value == SLOT_ARRAYLEN; } static bool IsArgument(unsigned value) { return value >= SLOT_ARGUMENT; } static unsigned SlotTypeToArgNum(unsigned value) { assert(IsArgument(value)); return value - SLOT_ARGUMENT; } bool IsStackTwoDeep() const { return depth == 2; } bool IsStackOneDeep() const { return depth == 1; } bool IsStackAtLeastOneDeep() const { return depth >= 1; } private: enum FgSlot { SLOT_INVALID = UINT_MAX, SLOT_UNKNOWN = 0, SLOT_CONSTANT = 1, SLOT_ARRAYLEN = 2, SLOT_ARGUMENT = 3 }; void Push(FgSlot type) { switch (depth) { case 0: ++depth; slot0 = type; break; case 1: ++depth; FALLTHROUGH; case 2: slot1 = slot0; slot0 = type; } } FgSlot slot0; FgSlot slot1; unsigned depth; }; void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget) { const BYTE* codeBegp = codeAddr; const BYTE* codeEndp = codeAddr + codeSize; unsigned varNum; bool seenJump = false; var_types varType = DUMMY_INIT(TYP_UNDEF); // TYP_ type typeInfo ti; // Verifier type. bool typeIsNormed = false; FgStack pushedStack; const bool isForceInline = (info.compFlags & CORINFO_FLG_FORCEINLINE) != 0; const bool makeInlineObservations = (compInlineResult != nullptr); const bool isInlining = compIsForInlining(); const bool isTier1 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1); const bool isPreJit = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT); const bool resolveTokens = makeInlineObservations && (isTier1 || isPreJit); unsigned retBlocks = 0; unsigned intrinsicCalls = 0; int prefixFlags = 0; int value = 0; if (makeInlineObservations) { // Observe force inline state and code size. compInlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, isForceInline); compInlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); // Determine if call site is within a try. if (isInlining && impInlineInfo->iciBlock->hasTryIndex()) { compInlineResult->Note(InlineObservation::CALLSITE_IN_TRY_REGION); } // Determine if the call site is in a no-return block if (isInlining && (impInlineInfo->iciBlock->bbJumpKind == BBJ_THROW)) { compInlineResult->Note(InlineObservation::CALLSITE_IN_NORETURN_REGION); } // Determine if the call site is in a loop. if (isInlining && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) { compInlineResult->Note(InlineObservation::CALLSITE_IN_LOOP); } #ifdef DEBUG // If inlining, this method should still be a candidate. if (isInlining) { assert(compInlineResult->IsCandidate()); } #endif // DEBUG // note that we're starting to look at the opcodes. compInlineResult->Note(InlineObservation::CALLEE_BEGIN_OPCODE_SCAN); } CORINFO_RESOLVED_TOKEN resolvedToken; while (codeAddr < codeEndp) { OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); DECODE_OPCODE: if ((unsigned)opcode >= CEE_COUNT) { BADCODE3("Illegal opcode", ": %02X", (int)opcode); } if ((opcode >= CEE_LDARG_0 && opcode <= CEE_STLOC_S) || (opcode >= CEE_LDARG && opcode <= CEE_STLOC)) { opts.lvRefCount++; } if (makeInlineObservations && (opcode >= CEE_LDNULL) && (opcode <= CEE_LDC_R8)) { pushedStack.PushConstant(); } unsigned sz = opcodeSizes[opcode]; switch (opcode) { case CEE_PREFIX1: { if (codeAddr >= codeEndp) { goto TOO_FAR; } opcode = (OPCODE)(256 + getU1LittleEndian(codeAddr)); codeAddr += sizeof(__int8); goto DECODE_OPCODE; } case CEE_PREFIX2: case CEE_PREFIX3: case CEE_PREFIX4: case CEE_PREFIX5: case CEE_PREFIX6: case CEE_PREFIX7: case CEE_PREFIXREF: { BADCODE3("Illegal opcode", ": %02X", (int)opcode); } case CEE_THROW: { if (makeInlineObservations) { compInlineResult->Note(InlineObservation::CALLEE_THROW_BLOCK); } break; } case CEE_BOX: { if (makeInlineObservations) { int toSkip = impBoxPatternMatch(nullptr, codeAddr + sz, codeEndp, true); if (toSkip > 0) { // toSkip > 0 means we most likely will hit a pattern (e.g. box+isinst+brtrue) that // will be folded into a const // TODO: uncomment later // codeAddr += toSkip; } } break; } case CEE_CALL: case CEE_CALLVIRT: { // There has to be code after the call, otherwise the inlinee is unverifiable. if (isInlining) { noway_assert(codeAddr < codeEndp - sz); } if (!makeInlineObservations) { break; } CORINFO_METHOD_HANDLE methodHnd = nullptr; bool isJitIntrinsic = false; bool mustExpand = false; NamedIntrinsic ni = NI_Illegal; if (resolveTokens) { impResolveToken(codeAddr, &resolvedToken, CORINFO_TOKENKIND_Method); methodHnd = resolvedToken.hMethod; isJitIntrinsic = eeIsJitIntrinsic(methodHnd); } if (isJitIntrinsic) { intrinsicCalls++; ni = lookupNamedIntrinsic(methodHnd); switch (ni) { case NI_IsSupported_True: case NI_IsSupported_False: { pushedStack.PushConstant(); break; } default: { break; } } } if ((OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET) { // If the method has a call followed by a ret, assume that // it is a wrapper method. compInlineResult->Note(InlineObservation::CALLEE_LOOKS_LIKE_WRAPPER); } } break; case CEE_LEAVE: case CEE_LEAVE_S: case CEE_BR: case CEE_BR_S: case CEE_BRFALSE: case CEE_BRFALSE_S: case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BEQ: case CEE_BEQ_S: case CEE_BGE: case CEE_BGE_S: case CEE_BGE_UN: case CEE_BGE_UN_S: case CEE_BGT: case CEE_BGT_S: case CEE_BGT_UN: case CEE_BGT_UN_S: case CEE_BLE: case CEE_BLE_S: case CEE_BLE_UN: case CEE_BLE_UN_S: case CEE_BLT: case CEE_BLT_S: case CEE_BLT_UN: case CEE_BLT_UN_S: case CEE_BNE_UN: case CEE_BNE_UN_S: { seenJump = true; if (codeAddr > codeEndp - sz) { goto TOO_FAR; } // Compute jump target address signed jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0 && (opcode == CEE_LEAVE || opcode == CEE_LEAVE_S || opcode == CEE_BR || opcode == CEE_BR_S)) { break; /* NOP */ } unsigned jmpAddr = (IL_OFFSET)(codeAddr - codeBegp) + sz + jmpDist; // Make sure target is reasonable if (jmpAddr >= codeSize) { BADCODE3("code jumps to outer space", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } // Mark the jump target jumpTarget->bitVectSet(jmpAddr); // See if jump might be sensitive to inlining if (makeInlineObservations && (opcode != CEE_BR_S) && (opcode != CEE_BR)) { fgObserveInlineConstants(opcode, pushedStack, isInlining); } } break; case CEE_SWITCH: { seenJump = true; if (makeInlineObservations) { compInlineResult->Note(InlineObservation::CALLEE_HAS_SWITCH); // Fail fast, if we're inlining and can't handle this. if (isInlining && compInlineResult->IsFailure()) { return; } } // Make sure we don't go past the end reading the number of cases if (codeAddr > codeEndp - sizeof(DWORD)) { goto TOO_FAR; } // Read the number of cases unsigned jmpCnt = getU4LittleEndian(codeAddr); codeAddr += sizeof(DWORD); if (jmpCnt > codeSize / sizeof(DWORD)) { goto TOO_FAR; } // Find the end of the switch table unsigned jmpBase = (unsigned)((codeAddr - codeBegp) + jmpCnt * sizeof(DWORD)); // Make sure there is more code after the switch if (jmpBase >= codeSize) { goto TOO_FAR; } // jmpBase is also the target of the default case, so mark it jumpTarget->bitVectSet(jmpBase); // Process table entries while (jmpCnt > 0) { unsigned jmpAddr = jmpBase + getI4LittleEndian(codeAddr); codeAddr += 4; if (jmpAddr >= codeSize) { BADCODE3("jump target out of range", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } jumpTarget->bitVectSet(jmpAddr); jmpCnt--; } // We've advanced past all the bytes in this instruction sz = 0; } break; case CEE_UNALIGNED: { noway_assert(sz == sizeof(__int8)); prefixFlags |= PREFIX_UNALIGNED; value = getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); impValidateMemoryAccessOpcode(codeAddr, codeEndp, false); goto OBSERVE_OPCODE; } case CEE_CONSTRAINED: { noway_assert(sz == sizeof(unsigned)); prefixFlags |= PREFIX_CONSTRAINED; codeAddr += sizeof(unsigned); { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN) { BADCODE("constrained. has to be followed by callvirt, call or ldftn"); } } goto OBSERVE_OPCODE; } case CEE_READONLY: { noway_assert(sz == 0); prefixFlags |= PREFIX_READONLY; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if ((actualOpcode != CEE_LDELEMA) && !impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("readonly. has to be followed by ldelema or call"); } } goto OBSERVE_OPCODE; } case CEE_VOLATILE: { noway_assert(sz == 0); prefixFlags |= PREFIX_VOLATILE; impValidateMemoryAccessOpcode(codeAddr, codeEndp, true); goto OBSERVE_OPCODE; } case CEE_TAILCALL: { noway_assert(sz == 0); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("tailcall. has to be followed by call, callvirt or calli"); } } goto OBSERVE_OPCODE; } case CEE_STARG: case CEE_STARG_S: { noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD)); if (codeAddr > codeEndp - sz) { goto TOO_FAR; } varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); if (isInlining) { if (varNum < impInlineInfo->argCnt) { impInlineInfo->inlArgInfo[varNum].argHasStargOp = true; } } else { // account for possible hidden param varNum = compMapILargNum(varNum); // This check is only intended to prevent an AV. Bad varNum values will later // be handled properly by the verifier. if (varNum < lvaTableCnt) { // In non-inline cases, note written-to arguments. lvaTable[varNum].lvHasILStoreOp = 1; } } } break; case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: varNum = (opcode - CEE_STLOC_0); goto STLOC; case CEE_STLOC: case CEE_STLOC_S: { noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD)); if (codeAddr > codeEndp - sz) { goto TOO_FAR; } varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); STLOC: if (isInlining) { InlLclVarInfo& lclInfo = impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt]; if (lclInfo.lclHasStlocOp) { lclInfo.lclHasMultipleStlocOp = 1; } else { lclInfo.lclHasStlocOp = 1; } } else { varNum += info.compArgsCount; // This check is only intended to prevent an AV. Bad varNum values will later // be handled properly by the verifier. if (varNum < lvaTableCnt) { // In non-inline cases, note written-to locals. if (lvaTable[varNum].lvHasILStoreOp) { lvaTable[varNum].lvHasMultipleILStoreOp = 1; } else { lvaTable[varNum].lvHasILStoreOp = 1; } } } } break; case CEE_LDARGA: case CEE_LDARGA_S: case CEE_LDLOCA: case CEE_LDLOCA_S: { // Handle address-taken args or locals noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD)); if (codeAddr > codeEndp - sz) { goto TOO_FAR; } varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); if (isInlining) { if (opcode == CEE_LDLOCA || opcode == CEE_LDLOCA_S) { varType = impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt].lclTypeInfo; ti = impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt].lclVerTypeInfo; impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt].lclHasLdlocaOp = true; } else { noway_assert(opcode == CEE_LDARGA || opcode == CEE_LDARGA_S); varType = impInlineInfo->lclVarInfo[varNum].lclTypeInfo; ti = impInlineInfo->lclVarInfo[varNum].lclVerTypeInfo; impInlineInfo->inlArgInfo[varNum].argHasLdargaOp = true; pushedStack.PushArgument(varNum); } } else { if (opcode == CEE_LDLOCA || opcode == CEE_LDLOCA_S) { if (varNum >= info.compMethodInfo->locals.numArgs) { BADCODE("bad local number"); } varNum += info.compArgsCount; } else { noway_assert(opcode == CEE_LDARGA || opcode == CEE_LDARGA_S); if (varNum >= info.compILargsCount) { BADCODE("bad argument number"); } varNum = compMapILargNum(varNum); // account for possible hidden param } varType = (var_types)lvaTable[varNum].lvType; ti = lvaTable[varNum].lvVerTypeInfo; // Determine if the next instruction will consume // the address. If so we won't mark this var as // address taken. // // We will put structs on the stack and changing // the addrTaken of a local requires an extra pass // in the morpher so we won't apply this // optimization to structs. // // Debug code spills for every IL instruction, and // therefore it will split statements, so we will // need the address. Note that this optimization // is based in that we know what trees we will // generate for this ldfld, and we require that we // won't need the address of this local at all noway_assert(varNum < lvaTableCnt); const bool notStruct = !varTypeIsStruct(&lvaTable[varNum]); const bool notLastInstr = (codeAddr < codeEndp - sz); const bool notDebugCode = !opts.compDbgCode; if (notStruct && notLastInstr && notDebugCode && impILConsumesAddr(codeAddr + sz)) { // We can skip the addrtaken, as next IL instruction consumes // the address. } else { lvaTable[varNum].lvHasLdAddrOp = 1; if (!info.compIsStatic && (varNum == 0)) { // Addr taken on "this" pointer is significant, // go ahead to mark it as permanently addr-exposed here. lvaSetVarAddrExposed(0); // This may be conservative, but probably not very. } } } // isInlining typeIsNormed = ti.IsValueClass() && !varTypeIsStruct(varType); } break; case CEE_JMP: retBlocks++; #if !defined(TARGET_X86) && !defined(TARGET_ARM) if (!isInlining) { // We transform this into a set of ldarg's + tail call and // thus may push more onto the stack than originally thought. // This doesn't interfere with verification because CEE_JMP // is never verifiable, and there's nothing unsafe you can // do with a an IL stack overflow if the JIT is expecting it. info.compMaxStack = max(info.compMaxStack, info.compILargsCount); break; } #endif // !TARGET_X86 && !TARGET_ARM // If we are inlining, we need to fail for a CEE_JMP opcode, just like // the list of other opcodes (for all platforms). FALLTHROUGH; case CEE_MKREFANY: case CEE_RETHROW: if (makeInlineObservations) { // Arguably this should be NoteFatal, but the legacy behavior is // to ignore this for the prejit root. compInlineResult->Note(InlineObservation::CALLEE_UNSUPPORTED_OPCODE); // Fail fast if we're inlining... if (isInlining) { assert(compInlineResult->IsFailure()); return; } } break; case CEE_LOCALLOC: // We now allow localloc callees to become candidates in some cases. if (makeInlineObservations) { compInlineResult->Note(InlineObservation::CALLEE_HAS_LOCALLOC); if (isInlining && compInlineResult->IsFailure()) { return; } } break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: if (makeInlineObservations) { pushedStack.PushArgument(opcode - CEE_LDARG_0); } break; case CEE_LDARG_S: case CEE_LDARG: { if (codeAddr > codeEndp - sz) { goto TOO_FAR; } varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); if (makeInlineObservations) { pushedStack.PushArgument(varNum); } } break; case CEE_LDLEN: if (makeInlineObservations) { pushedStack.PushArrayLen(); } break; case CEE_CEQ: case CEE_CGT: case CEE_CGT_UN: case CEE_CLT: case CEE_CLT_UN: if (makeInlineObservations) { fgObserveInlineConstants(opcode, pushedStack, isInlining); } break; case CEE_RET: retBlocks++; break; default: break; } // Skip any remaining operands this opcode may have codeAddr += sz; // Clear any prefix flags that may have been set prefixFlags = 0; // Increment the number of observed instructions opts.instrCount++; OBSERVE_OPCODE: // Note the opcode we just saw if (makeInlineObservations) { InlineObservation obs = typeIsNormed ? InlineObservation::CALLEE_OPCODE_NORMED : InlineObservation::CALLEE_OPCODE; compInlineResult->NoteInt(obs, opcode); } typeIsNormed = false; } if (codeAddr != codeEndp) { TOO_FAR: BADCODE3("Code ends in the middle of an opcode, or there is a branch past the end of the method", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } if (makeInlineObservations) { compInlineResult->Note(InlineObservation::CALLEE_END_OPCODE_SCAN); // If there are no return blocks we know it does not return, however if there // return blocks we don't know it returns as it may be counting unreachable code. // However we will still make the CALLEE_DOES_NOT_RETURN observation. compInlineResult->NoteBool(InlineObservation::CALLEE_DOES_NOT_RETURN, retBlocks == 0); if (retBlocks == 0 && isInlining) { // Mark the call node as "no return" as it can impact caller's code quality. impInlineInfo->iciCall->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; // Mark root method as containing a noreturn call. impInlineRoot()->setMethodHasNoReturnCalls(); } // If the inline is viable and discretionary, do the // profitability screening. if (compInlineResult->IsDiscretionaryCandidate()) { // Make some callsite specific observations that will feed // into the profitability model. impMakeDiscretionaryInlineObservations(impInlineInfo, compInlineResult); // None of those observations should have changed the // inline's viability. assert(compInlineResult->IsCandidate()); if (isInlining) { // Assess profitability... CORINFO_METHOD_INFO* methodInfo = &impInlineInfo->inlineCandidateInfo->methInfo; compInlineResult->DetermineProfitability(methodInfo); if (compInlineResult->IsFailure()) { impInlineRoot()->m_inlineStrategy->NoteUnprofitable(); JITDUMP("\n\nInline expansion aborted, inline not profitable\n"); return; } else { // The inline is still viable. assert(compInlineResult->IsCandidate()); } } else { // Prejit root case. Profitability assessment for this // is done over in compCompileHelper. } } } // None of the local vars in the inlinee should have address taken or been written to. // Therefore we should NOT need to enter this "if" statement. if (!isInlining && !info.compIsStatic) { fgAdjustForAddressExposedOrWrittenThis(); } // Now that we've seen the IL, set lvSingleDef for root method // locals. // // We could also do this for root method arguments but single-def // arguments are set by the caller and so we don't know anything // about the possible values or types. // // For inlinees we do this over in impInlineFetchLocal and // impInlineFetchArg (here args are included as we somtimes get // new information about the types of inlinee args). if (!isInlining) { const unsigned firstLcl = info.compArgsCount; const unsigned lastLcl = firstLcl + info.compMethodInfo->locals.numArgs; for (unsigned lclNum = firstLcl; lclNum < lastLcl; lclNum++) { LclVarDsc* lclDsc = lvaGetDesc(lclNum); assert(lclDsc->lvSingleDef == 0); // could restrict this to TYP_REF lclDsc->lvSingleDef = !lclDsc->lvHasMultipleILStoreOp && !lclDsc->lvHasLdAddrOp; if (lclDsc->lvSingleDef) { JITDUMP("Marked V%02u as a single def local\n", lclNum); } } } } #ifdef _PREFAST_ #pragma warning(pop) #endif //------------------------------------------------------------------------ // fgAdjustForAddressExposedOrWrittenThis: update var table for cases // where the this pointer value can change. // // Notes: // Modifies lvaArg0Var to refer to a temp if the value of 'this' can // change. The original this (info.compThisArg) then remains // unmodified in the method. fgAddInternal is reponsible for // adding the code to copy the initial this into the temp. void Compiler::fgAdjustForAddressExposedOrWrittenThis() { // Optionally enable adjustment during stress. if (!tiVerificationNeeded && compStressCompile(STRESS_GENERIC_VARN, 15)) { lvaTable[info.compThisArg].lvHasILStoreOp = true; } // If this is exposed or written to, create a temp for the modifiable this if (lvaTable[info.compThisArg].lvAddrExposed || lvaTable[info.compThisArg].lvHasILStoreOp) { // If there is a "ldarga 0" or "starg 0", grab and use the temp. lvaArg0Var = lvaGrabTemp(false DEBUGARG("Address-exposed, or written this pointer")); noway_assert(lvaArg0Var > (unsigned)info.compThisArg); lvaTable[lvaArg0Var].lvType = lvaTable[info.compThisArg].TypeGet(); lvaTable[lvaArg0Var].lvAddrExposed = lvaTable[info.compThisArg].lvAddrExposed; lvaTable[lvaArg0Var].lvDoNotEnregister = lvaTable[info.compThisArg].lvDoNotEnregister; #ifdef DEBUG lvaTable[lvaArg0Var].lvVMNeedsStackAddr = lvaTable[info.compThisArg].lvVMNeedsStackAddr; lvaTable[lvaArg0Var].lvLiveInOutOfHndlr = lvaTable[info.compThisArg].lvLiveInOutOfHndlr; lvaTable[lvaArg0Var].lvLclFieldExpr = lvaTable[info.compThisArg].lvLclFieldExpr; lvaTable[lvaArg0Var].lvLiveAcrossUCall = lvaTable[info.compThisArg].lvLiveAcrossUCall; #endif lvaTable[lvaArg0Var].lvHasILStoreOp = lvaTable[info.compThisArg].lvHasILStoreOp; lvaTable[lvaArg0Var].lvVerTypeInfo = lvaTable[info.compThisArg].lvVerTypeInfo; // Clear the TI_FLAG_THIS_PTR in the original 'this' pointer. noway_assert(lvaTable[lvaArg0Var].lvVerTypeInfo.IsThisPtr()); lvaTable[info.compThisArg].lvVerTypeInfo.ClearThisPtr(); lvaTable[info.compThisArg].lvAddrExposed = false; lvaTable[info.compThisArg].lvHasILStoreOp = false; } } //------------------------------------------------------------------------ // fgObserveInlineConstants: look for operations that might get optimized // if this method were to be inlined, and report these to the inliner. // // Arguments: // opcode -- MSIL opcode under consideration // stack -- abstract stack model at this point in the IL // isInlining -- true if we're inlining (vs compiling a prejit root) // // Notes: // Currently only invoked on compare and branch opcodes. // // If we're inlining we also look at the argument values supplied by // the caller at this call site. // // The crude stack model may overestimate stack depth. void Compiler::fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining) { // We should be able to record inline observations. assert(compInlineResult != nullptr); // The stack only has to be 1 deep for BRTRUE/FALSE bool lookForBranchCases = stack.IsStackAtLeastOneDeep(); if (lookForBranchCases) { if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S || opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) { unsigned slot0 = stack.GetSlot0(); if (FgStack::IsArgument(slot0)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST); if (isInlining) { // Check for the double whammy of an incoming constant argument // feeding a constant test. unsigned varNum = FgStack::SlotTypeToArgNum(slot0); if (impInlineInfo->inlArgInfo[varNum].argIsInvariant) { compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); } } } return; } } // Remaining cases require at least two things on the stack. if (!stack.IsStackTwoDeep()) { return; } unsigned slot0 = stack.GetSlot0(); unsigned slot1 = stack.GetSlot1(); // Arg feeds constant test if ((FgStack::IsConstant(slot0) && FgStack::IsArgument(slot1)) || (FgStack::IsConstant(slot1) && FgStack::IsArgument(slot0))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST); } // Arg feeds range check if ((FgStack::IsArrayLen(slot0) && FgStack::IsArgument(slot1)) || (FgStack::IsArrayLen(slot1) && FgStack::IsArgument(slot0))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK); } // Check for an incoming arg that's a constant if (isInlining) { if (FgStack::IsArgument(slot0)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_TEST); unsigned varNum = FgStack::SlotTypeToArgNum(slot0); if (impInlineInfo->inlArgInfo[varNum].argIsInvariant) { compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); } } if (FgStack::IsArgument(slot1)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_TEST); unsigned varNum = FgStack::SlotTypeToArgNum(slot1); if (impInlineInfo->inlArgInfo[varNum].argIsInvariant) { compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); } } } } //------------------------------------------------------------------------ // fgMarkBackwardJump: mark blocks indicating there is a jump backwards in // IL, from a higher to lower IL offset. // // Arguments: // targetBlock -- target of the jump // sourceBlock -- source of the jump void Compiler::fgMarkBackwardJump(BasicBlock* targetBlock, BasicBlock* sourceBlock) { noway_assert(targetBlock->bbNum <= sourceBlock->bbNum); for (BasicBlock* const block : Blocks(targetBlock, sourceBlock)) { if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && (block->bbJumpKind != BBJ_RETURN)) { block->bbFlags |= BBF_BACKWARD_JUMP; compHasBackwardJump = true; } } targetBlock->bbFlags |= BBF_BACKWARD_JUMP_TARGET; } /***************************************************************************** * * Finally link up the bbJumpDest of the blocks together */ void Compiler::fgLinkBasicBlocks() { /* Create the basic block lookup tables */ fgInitBBLookup(); /* First block is always reachable */ fgFirstBB->bbRefs = 1; /* Walk all the basic blocks, filling in the target addresses */ for (BasicBlock* const curBBdesc : Blocks()) { switch (curBBdesc->bbJumpKind) { case BBJ_COND: case BBJ_ALWAYS: case BBJ_LEAVE: curBBdesc->bbJumpDest = fgLookupBB(curBBdesc->bbJumpOffs); curBBdesc->bbJumpDest->bbRefs++; if (curBBdesc->bbJumpDest->bbNum <= curBBdesc->bbNum) { fgMarkBackwardJump(curBBdesc->bbJumpDest, curBBdesc); } /* Is the next block reachable? */ if (curBBdesc->bbJumpKind == BBJ_ALWAYS || curBBdesc->bbJumpKind == BBJ_LEAVE) { break; } if (!curBBdesc->bbNext) { BADCODE("Fall thru the end of a method"); } // Fall through, the next block is also reachable FALLTHROUGH; case BBJ_NONE: curBBdesc->bbNext->bbRefs++; break; case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_THROW: case BBJ_RETURN: break; case BBJ_SWITCH: unsigned jumpCnt; jumpCnt = curBBdesc->bbJumpSwt->bbsCount; BasicBlock** jumpPtr; jumpPtr = curBBdesc->bbJumpSwt->bbsDstTab; do { *jumpPtr = fgLookupBB((unsigned)*(size_t*)jumpPtr); (*jumpPtr)->bbRefs++; if ((*jumpPtr)->bbNum <= curBBdesc->bbNum) { fgMarkBackwardJump(*jumpPtr, curBBdesc); } } while (++jumpPtr, --jumpCnt); /* Default case of CEE_SWITCH (next block), is at end of jumpTab[] */ noway_assert(*(jumpPtr - 1) == curBBdesc->bbNext); break; case BBJ_CALLFINALLY: // BBJ_CALLFINALLY and BBJ_EHCATCHRET don't appear until later case BBJ_EHCATCHRET: default: noway_assert(!"Unexpected bbJumpKind"); break; } } } //------------------------------------------------------------------------ // fgMakeBasicBlocks: walk the IL creating basic blocks, and look for // operations that might get optimized if this method were to be inlined. // // Arguments: // codeAddr -- starting address of the method's IL stream // codeSize -- length of the IL stream // jumpTarget -- [in] bit vector of jump targets found by fgFindJumpTargets // // Returns: // number of return blocks (BBJ_RETURN) in the method (may be zero) // // Notes: // Invoked for prejited and jitted methods, and for all inlinees unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget) { unsigned retBlocks = 0; const BYTE* codeBegp = codeAddr; const BYTE* codeEndp = codeAddr + codeSize; bool tailCall = false; unsigned curBBoffs = 0; BasicBlock* curBBdesc; // Keep track of where we are in the scope lists, as we will also // create blocks at scope boundaries. if (opts.compDbgCode && (info.compVarScopesCount > 0)) { compResetScopeLists(); // Ignore scopes beginning at offset 0 while (compGetNextEnterScope(0)) { /* do nothing */ } while (compGetNextExitScope(0)) { /* do nothing */ } } do { unsigned jmpAddr = DUMMY_INIT(BAD_IL_OFFSET); BasicBlockFlags bbFlags = BBF_EMPTY; BBswtDesc* swtDsc = nullptr; unsigned nxtBBoffs; OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); BBjumpKinds jmpKind = BBJ_NONE; DECODE_OPCODE: /* Get the size of additional parameters */ noway_assert((unsigned)opcode < CEE_COUNT); unsigned sz = opcodeSizes[opcode]; switch (opcode) { signed jmpDist; case CEE_PREFIX1: if (jumpTarget->bitVectTest((UINT)(codeAddr - codeBegp))) { BADCODE3("jump target between prefix 0xFE and opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } opcode = (OPCODE)(256 + getU1LittleEndian(codeAddr)); codeAddr += sizeof(__int8); goto DECODE_OPCODE; /* Check to see if we have a jump/return opcode */ case CEE_BRFALSE: case CEE_BRFALSE_S: case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BEQ: case CEE_BEQ_S: case CEE_BGE: case CEE_BGE_S: case CEE_BGE_UN: case CEE_BGE_UN_S: case CEE_BGT: case CEE_BGT_S: case CEE_BGT_UN: case CEE_BGT_UN_S: case CEE_BLE: case CEE_BLE_S: case CEE_BLE_UN: case CEE_BLE_UN_S: case CEE_BLT: case CEE_BLT_S: case CEE_BLT_UN: case CEE_BLT_UN_S: case CEE_BNE_UN: case CEE_BNE_UN_S: jmpKind = BBJ_COND; goto JMP; case CEE_LEAVE: case CEE_LEAVE_S: // We need to check if we are jumping out of a finally-protected try. jmpKind = BBJ_LEAVE; goto JMP; case CEE_BR: case CEE_BR_S: jmpKind = BBJ_ALWAYS; goto JMP; JMP: /* Compute the target address of the jump */ jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0 && (opcode == CEE_BR || opcode == CEE_BR_S)) { continue; /* NOP */ } jmpAddr = (IL_OFFSET)(codeAddr - codeBegp) + sz + jmpDist; break; case CEE_SWITCH: { unsigned jmpBase; unsigned jmpCnt; // # of switch cases (excluding default) BasicBlock** jmpTab; BasicBlock** jmpPtr; /* Allocate the switch descriptor */ swtDsc = new (this, CMK_BasicBlock) BBswtDesc; /* Read the number of entries in the table */ jmpCnt = getU4LittleEndian(codeAddr); codeAddr += 4; /* Compute the base offset for the opcode */ jmpBase = (IL_OFFSET)((codeAddr - codeBegp) + jmpCnt * sizeof(DWORD)); /* Allocate the jump table */ jmpPtr = jmpTab = new (this, CMK_BasicBlock) BasicBlock*[jmpCnt + 1]; /* Fill in the jump table */ for (unsigned count = jmpCnt; count; count--) { jmpDist = getI4LittleEndian(codeAddr); codeAddr += 4; // store the offset in the pointer. We change these in fgLinkBasicBlocks(). *jmpPtr++ = (BasicBlock*)(size_t)(jmpBase + jmpDist); } /* Append the default label to the target table */ *jmpPtr++ = (BasicBlock*)(size_t)jmpBase; /* Make sure we found the right number of labels */ noway_assert(jmpPtr == jmpTab + jmpCnt + 1); /* Compute the size of the switch opcode operands */ sz = sizeof(DWORD) + jmpCnt * sizeof(DWORD); /* Fill in the remaining fields of the switch descriptor */ swtDsc->bbsCount = jmpCnt + 1; swtDsc->bbsDstTab = jmpTab; /* This is definitely a jump */ jmpKind = BBJ_SWITCH; fgHasSwitch = true; if (opts.compProcedureSplitting) { // TODO-CQ: We might need to create a switch table; we won't know for sure until much later. // However, switch tables don't work with hot/cold splitting, currently. The switch table data needs // a relocation such that if the base (the first block after the prolog) and target of the switch // branch are put in different sections, the difference stored in the table is updated. However, our // relocation implementation doesn't support three different pointers (relocation address, base, and // target). So, we need to change our switch table implementation to be more like // JIT64: put the table in the code section, in the same hot/cold section as the switch jump itself // (maybe immediately after the switch jump), and make the "base" address be also in that section, // probably the address after the switch jump. opts.compProcedureSplitting = false; JITDUMP("Turning off procedure splitting for this method, as it might need switch tables; " "implementation limitation.\n"); } } goto GOT_ENDP; case CEE_ENDFILTER: bbFlags |= BBF_DONT_REMOVE; jmpKind = BBJ_EHFILTERRET; break; case CEE_ENDFINALLY: jmpKind = BBJ_EHFINALLYRET; break; case CEE_TAILCALL: if (compIsForInlining()) { // TODO-CQ: We can inline some callees with explicit tail calls if we can guarantee that the calls // can be dispatched as tail calls from the caller. compInlineResult->NoteFatal(InlineObservation::CALLEE_EXPLICIT_TAIL_PREFIX); retBlocks++; return retBlocks; } FALLTHROUGH; case CEE_READONLY: case CEE_CONSTRAINED: case CEE_VOLATILE: case CEE_UNALIGNED: // fgFindJumpTargets should have ruled out this possibility // (i.e. a prefix opcodes as last intruction in a block) noway_assert(codeAddr < codeEndp); if (jumpTarget->bitVectTest((UINT)(codeAddr - codeBegp))) { BADCODE3("jump target between prefix and an opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } break; case CEE_CALL: case CEE_CALLVIRT: case CEE_CALLI: { if (compIsForInlining() || // Ignore tail call in the inlinee. Period. (!tailCall && !compTailCallStress()) // A new BB with BBJ_RETURN would have been created // after a tailcall statement. // We need to keep this invariant if we want to stress the tailcall. // That way, the potential (tail)call statement is always the last // statement in the block. // Otherwise, we will assert at the following line in fgMorphCall() // noway_assert(fgMorphStmt->GetNextStmt() == NULL); ) { // Neither .tailcall prefix, no tailcall stress. So move on. break; } // Make sure the code sequence is legal for the tail call. // If so, mark this BB as having a BBJ_RETURN. if (codeAddr >= codeEndp - sz) { BADCODE3("No code found after the call instruction", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } if (tailCall) { // impIsTailCallILPattern uses isRecursive flag to determine whether ret in a fallthrough block is // allowed. We don't know at this point whether the call is recursive so we conservatively pass // false. This will only affect explicit tail calls when IL verification is not needed for the // method. bool isRecursive = false; if (!impIsTailCallILPattern(tailCall, opcode, codeAddr + sz, codeEndp, isRecursive)) { BADCODE3("tail call not followed by ret", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } if (fgCanSwitchToOptimized() && fgMayExplicitTailCall()) { // Method has an explicit tail call that may run like a loop or may not be generated as a tail // call in tier 0, switch to optimized to avoid spending too much time running slower code and // to avoid stack overflow from recursion fgSwitchToOptimized(); } } else { OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr + sz); if (nextOpcode != CEE_RET) { noway_assert(compTailCallStress()); // Next OPCODE is not a CEE_RET, bail the attempt to stress the tailcall. // (I.e. We will not make a new BB after the "call" statement.) break; } } } /* For tail call, we just call CORINFO_HELP_TAILCALL, and it jumps to the target. So we don't need an epilog - just like CORINFO_HELP_THROW. Make the block BBJ_RETURN, but we will change it to BBJ_THROW if the tailness of the call is satisfied. NOTE : The next instruction is guaranteed to be a CEE_RET and it will create another BasicBlock. But there may be an jump directly to that CEE_RET. If we want to avoid creating an unnecessary block, we need to check if the CEE_RETURN is the target of a jump. */ FALLTHROUGH; case CEE_JMP: /* These are equivalent to a return from the current method But instead of directly returning to the caller we jump and execute something else in between */ case CEE_RET: retBlocks++; jmpKind = BBJ_RETURN; break; case CEE_THROW: case CEE_RETHROW: jmpKind = BBJ_THROW; break; #ifdef DEBUG // make certain we did not forget any flow of control instructions // by checking the 'ctrl' field in opcode.def. First filter out all // non-ctrl instructions #define BREAK(name) \ case name: \ break; #define NEXT(name) \ case name: \ break; #define CALL(name) #define THROW(name) #undef RETURN // undef contract RETURN macro #define RETURN(name) #define META(name) #define BRANCH(name) #define COND_BRANCH(name) #define PHI(name) #define OPDEF(name, string, pop, push, oprType, opcType, l, s1, s2, ctrl) ctrl(name) #include "opcode.def" #undef OPDEF #undef PHI #undef BREAK #undef CALL #undef NEXT #undef THROW #undef RETURN #undef META #undef BRANCH #undef COND_BRANCH // These ctrl-flow opcodes don't need any special handling case CEE_NEWOBJ: // CTRL_CALL break; // what's left are forgotten instructions default: BADCODE("Unrecognized control Opcode"); break; #else // !DEBUG default: break; #endif // !DEBUG } /* Jump over the operand */ codeAddr += sz; GOT_ENDP: tailCall = (opcode == CEE_TAILCALL); /* Make sure a jump target isn't in the middle of our opcode */ if (sz) { IL_OFFSET offs = (IL_OFFSET)(codeAddr - codeBegp) - sz; // offset of the operand for (unsigned i = 0; i < sz; i++, offs++) { if (jumpTarget->bitVectTest(offs)) { BADCODE3("jump into the middle of an opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } } } /* Compute the offset of the next opcode */ nxtBBoffs = (IL_OFFSET)(codeAddr - codeBegp); bool foundScope = false; if (opts.compDbgCode && (info.compVarScopesCount > 0)) { while (compGetNextEnterScope(nxtBBoffs)) { foundScope = true; } while (compGetNextExitScope(nxtBBoffs)) { foundScope = true; } } /* Do we have a jump? */ if (jmpKind == BBJ_NONE) { /* No jump; make sure we don't fall off the end of the function */ if (codeAddr == codeEndp) { BADCODE3("missing return opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } /* If a label follows this opcode, we'll have to make a new BB */ bool makeBlock = jumpTarget->bitVectTest(nxtBBoffs); if (!makeBlock && foundScope) { makeBlock = true; #ifdef DEBUG if (verbose) { printf("Splitting at BBoffs = %04u\n", nxtBBoffs); } #endif // DEBUG } if (!makeBlock) { continue; } } /* We need to create a new basic block */ curBBdesc = fgNewBasicBlock(jmpKind); curBBdesc->bbFlags |= bbFlags; curBBdesc->bbRefs = 0; curBBdesc->bbCodeOffs = curBBoffs; curBBdesc->bbCodeOffsEnd = nxtBBoffs; switch (jmpKind) { case BBJ_SWITCH: curBBdesc->bbJumpSwt = swtDsc; break; case BBJ_COND: case BBJ_ALWAYS: case BBJ_LEAVE: noway_assert(jmpAddr != DUMMY_INIT(BAD_IL_OFFSET)); curBBdesc->bbJumpOffs = jmpAddr; break; default: break; } DBEXEC(verbose, curBBdesc->dspBlockHeader(this, false, false, false)); /* Remember where the next BB will start */ curBBoffs = nxtBBoffs; } while (codeAddr < codeEndp); noway_assert(codeAddr == codeEndp); /* Finally link up the bbJumpDest of the blocks together */ fgLinkBasicBlocks(); return retBlocks; } /***************************************************************************** * * Main entry point to discover the basic blocks for the current function. */ void Compiler::fgFindBasicBlocks() { #ifdef DEBUG if (verbose) { printf("*************** In fgFindBasicBlocks() for %s\n", info.compFullName); } // Call this here so any dump printing it inspires doesn't appear in the bb table. // fgStressBBProf(); #endif // Allocate the 'jump target' bit vector FixedBitVect* jumpTarget = FixedBitVect::bitVectInit(info.compILCodeSize + 1, this); // Walk the instrs to find all jump targets fgFindJumpTargets(info.compCode, info.compILCodeSize, jumpTarget); if (compDonotInline()) { return; } unsigned XTnum; /* Are there any exception handlers? */ if (info.compXcptnsCount > 0) { noway_assert(!compIsForInlining()); /* Check and mark all the exception handlers */ for (XTnum = 0; XTnum < info.compXcptnsCount; XTnum++) { CORINFO_EH_CLAUSE clause; info.compCompHnd->getEHinfo(info.compMethodHnd, XTnum, &clause); noway_assert(clause.HandlerLength != (unsigned)-1); if (clause.TryLength <= 0) { BADCODE("try block length <=0"); } /* Mark the 'try' block extent and the handler itself */ if (clause.TryOffset > info.compILCodeSize) { BADCODE("try offset is > codesize"); } jumpTarget->bitVectSet(clause.TryOffset); if (clause.TryOffset + clause.TryLength > info.compILCodeSize) { BADCODE("try end is > codesize"); } jumpTarget->bitVectSet(clause.TryOffset + clause.TryLength); if (clause.HandlerOffset > info.compILCodeSize) { BADCODE("handler offset > codesize"); } jumpTarget->bitVectSet(clause.HandlerOffset); if (clause.HandlerOffset + clause.HandlerLength > info.compILCodeSize) { BADCODE("handler end > codesize"); } jumpTarget->bitVectSet(clause.HandlerOffset + clause.HandlerLength); if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { if (clause.FilterOffset > info.compILCodeSize) { BADCODE("filter offset > codesize"); } jumpTarget->bitVectSet(clause.FilterOffset); } } } #ifdef DEBUG if (verbose) { bool anyJumpTargets = false; printf("Jump targets:\n"); for (unsigned i = 0; i < info.compILCodeSize + 1; i++) { if (jumpTarget->bitVectTest(i)) { anyJumpTargets = true; printf(" IL_%04x\n", i); } } if (!anyJumpTargets) { printf(" none\n"); } } #endif // DEBUG /* Now create the basic blocks */ unsigned retBlocks = fgMakeBasicBlocks(info.compCode, info.compILCodeSize, jumpTarget); if (compIsForInlining()) { #ifdef DEBUG // If fgFindJumpTargets marked the call as "no return" there // really should be no BBJ_RETURN blocks in the method. bool markedNoReturn = (impInlineInfo->iciCall->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0; assert((markedNoReturn && (retBlocks == 0)) || (!markedNoReturn && (retBlocks >= 1))); #endif // DEBUG if (compInlineResult->IsFailure()) { return; } noway_assert(info.compXcptnsCount == 0); compHndBBtab = impInlineInfo->InlinerCompiler->compHndBBtab; compHndBBtabAllocCount = impInlineInfo->InlinerCompiler->compHndBBtabAllocCount; // we probably only use the table, not add to it. compHndBBtabCount = impInlineInfo->InlinerCompiler->compHndBBtabCount; info.compXcptnsCount = impInlineInfo->InlinerCompiler->info.compXcptnsCount; // Use a spill temp for the return value if there are multiple return blocks, // or if the inlinee has GC ref locals. if ((info.compRetNativeType != TYP_VOID) && ((retBlocks > 1) || impInlineInfo->HasGcRefLocals())) { // If we've spilled the ret expr to a temp we can reuse the temp // as the inlinee return spill temp. // // Todo: see if it is even better to always use this existing temp // for return values, even if we otherwise wouldn't need a return spill temp... lvaInlineeReturnSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) { // This temp should already have the type of the return value. JITDUMP("\nInliner: re-using pre-existing spill temp V%02u\n", lvaInlineeReturnSpillTemp); if (info.compRetType == TYP_REF) { // We may have co-opted an existing temp for the return spill. // We likely assumed it was single-def at the time, but now // we can see it has multiple definitions. if ((retBlocks > 1) && (lvaTable[lvaInlineeReturnSpillTemp].lvSingleDef == 1)) { // Make sure it is no longer marked single def. This is only safe // to do if we haven't ever updated the type. assert(!lvaTable[lvaInlineeReturnSpillTemp].lvClassInfoUpdated); JITDUMP("Marked return spill temp V%02u as NOT single def temp\n", lvaInlineeReturnSpillTemp); lvaTable[lvaInlineeReturnSpillTemp].lvSingleDef = 0; } } } else { // The lifetime of this var might expand multiple BBs. So it is a long lifetime compiler temp. lvaInlineeReturnSpillTemp = lvaGrabTemp(false DEBUGARG("Inline return value spill temp")); lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetType; // If the method returns a ref class, set the class of the spill temp // to the method's return value. We may update this later if it turns // out we can prove the method returns a more specific type. if (info.compRetType == TYP_REF) { // The return spill temp is single def only if the method has a single return block. if (retBlocks == 1) { lvaTable[lvaInlineeReturnSpillTemp].lvSingleDef = 1; JITDUMP("Marked return spill temp V%02u as a single def temp\n", lvaInlineeReturnSpillTemp); } CORINFO_CLASS_HANDLE retClassHnd = impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass; if (retClassHnd != nullptr) { lvaSetClass(lvaInlineeReturnSpillTemp, retClassHnd); } } } } return; } // If we are doing OSR, add an entry block that simply branches to the right IL offset. if (opts.IsOSR()) { // Remember the original entry block in case this method is tail recursive. fgEntryBB = fgLookupBB(0); // Find the OSR entry block. assert(info.compILEntry >= 0); BasicBlock* bbTarget = fgLookupBB(info.compILEntry); fgEnsureFirstBBisScratch(); fgFirstBB->bbJumpKind = BBJ_ALWAYS; fgFirstBB->bbJumpDest = bbTarget; fgAddRefPred(bbTarget, fgFirstBB); JITDUMP("OSR: redirecting flow at entry via " FMT_BB " to " FMT_BB " (il offset 0x%x)\n", fgFirstBB->bbNum, bbTarget->bbNum, info.compILEntry); // rebuild lookup table... should be able to avoid this by leaving room up front. fgInitBBLookup(); } /* Mark all blocks within 'try' blocks as such */ if (info.compXcptnsCount == 0) { return; } if (info.compXcptnsCount > MAX_XCPTN_INDEX) { IMPL_LIMITATION("too many exception clauses"); } /* Allocate the exception handler table */ fgAllocEHTable(); /* Assume we don't need to sort the EH table (such that nested try/catch * appear before their try or handler parent). The EH verifier will notice * when we do need to sort it. */ fgNeedToSortEHTable = false; verInitEHTree(info.compXcptnsCount); EHNodeDsc* initRoot = ehnNext; // remember the original root since // it may get modified during insertion // Annotate BBs with exception handling information required for generating correct eh code // as well as checking for correct IL EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { CORINFO_EH_CLAUSE clause; info.compCompHnd->getEHinfo(info.compMethodHnd, XTnum, &clause); noway_assert(clause.HandlerLength != (unsigned)-1); // @DEPRECATED #ifdef DEBUG if (verbose) { dispIncomingEHClause(XTnum, clause); } #endif // DEBUG IL_OFFSET tryBegOff = clause.TryOffset; IL_OFFSET tryEndOff = tryBegOff + clause.TryLength; IL_OFFSET filterBegOff = 0; IL_OFFSET hndBegOff = clause.HandlerOffset; IL_OFFSET hndEndOff = hndBegOff + clause.HandlerLength; if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { filterBegOff = clause.FilterOffset; } if (tryEndOff > info.compILCodeSize) { BADCODE3("end of try block beyond end of method for try", " at offset %04X", tryBegOff); } if (hndEndOff > info.compILCodeSize) { BADCODE3("end of hnd block beyond end of method for try", " at offset %04X", tryBegOff); } HBtab->ebdTryBegOffset = tryBegOff; HBtab->ebdTryEndOffset = tryEndOff; HBtab->ebdFilterBegOffset = filterBegOff; HBtab->ebdHndBegOffset = hndBegOff; HBtab->ebdHndEndOffset = hndEndOff; /* Convert the various addresses to basic blocks */ BasicBlock* tryBegBB = fgLookupBB(tryBegOff); BasicBlock* tryEndBB = fgLookupBB(tryEndOff); // note: this can be NULL if the try region is at the end of the function BasicBlock* hndBegBB = fgLookupBB(hndBegOff); BasicBlock* hndEndBB = nullptr; BasicBlock* filtBB = nullptr; BasicBlock* block; // // Assert that the try/hnd beginning blocks are set up correctly // if (tryBegBB == nullptr) { BADCODE("Try Clause is invalid"); } if (hndBegBB == nullptr) { BADCODE("Handler Clause is invalid"); } #if HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION // This will change the block weight from 0 to 1 // and clear the rarely run flag hndBegBB->makeBlockHot(); #else hndBegBB->bbSetRunRarely(); // handler entry points are rarely executed #endif if (hndEndOff < info.compILCodeSize) { hndEndBB = fgLookupBB(hndEndOff); } if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { filtBB = HBtab->ebdFilter = fgLookupBB(clause.FilterOffset); filtBB->bbCatchTyp = BBCT_FILTER; hndBegBB->bbCatchTyp = BBCT_FILTER_HANDLER; #if HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION // This will change the block weight from 0 to 1 // and clear the rarely run flag filtBB->makeBlockHot(); #else filtBB->bbSetRunRarely(); // filter entry points are rarely executed #endif // Mark all BBs that belong to the filter with the XTnum of the corresponding handler for (block = filtBB; /**/; block = block->bbNext) { if (block == nullptr) { BADCODE3("Missing endfilter for filter", " at offset %04X", filtBB->bbCodeOffs); return; } // Still inside the filter block->setHndIndex(XTnum); if (block->bbJumpKind == BBJ_EHFILTERRET) { // Mark catch handler as successor. block->bbJumpDest = hndBegBB; assert(block->bbJumpDest->bbCatchTyp == BBCT_FILTER_HANDLER); break; } } if (!block->bbNext || block->bbNext != hndBegBB) { BADCODE3("Filter does not immediately precede handler for filter", " at offset %04X", filtBB->bbCodeOffs); } } else { HBtab->ebdTyp = clause.ClassToken; /* Set bbCatchTyp as appropriate */ if (clause.Flags & CORINFO_EH_CLAUSE_FINALLY) { hndBegBB->bbCatchTyp = BBCT_FINALLY; } else { if (clause.Flags & CORINFO_EH_CLAUSE_FAULT) { hndBegBB->bbCatchTyp = BBCT_FAULT; } else { hndBegBB->bbCatchTyp = clause.ClassToken; // These values should be non-zero value that will // not collide with real tokens for bbCatchTyp if (clause.ClassToken == 0) { BADCODE("Exception catch type is Null"); } noway_assert(clause.ClassToken != BBCT_FAULT); noway_assert(clause.ClassToken != BBCT_FINALLY); noway_assert(clause.ClassToken != BBCT_FILTER); noway_assert(clause.ClassToken != BBCT_FILTER_HANDLER); } } } /* Mark the initial block and last blocks in the 'try' region */ tryBegBB->bbFlags |= BBF_TRY_BEG; /* Prevent future optimizations of removing the first block */ /* of a TRY block and the first block of an exception handler */ tryBegBB->bbFlags |= BBF_DONT_REMOVE; hndBegBB->bbFlags |= BBF_DONT_REMOVE; hndBegBB->bbRefs++; // The first block of a handler gets an extra, "artificial" reference count. if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { filtBB->bbFlags |= BBF_DONT_REMOVE; filtBB->bbRefs++; // The first block of a filter gets an extra, "artificial" reference count. } tryBegBB->bbFlags |= BBF_DONT_REMOVE; hndBegBB->bbFlags |= BBF_DONT_REMOVE; // // Store the info to the table of EH block handlers // HBtab->ebdHandlerType = ToEHHandlerType(clause.Flags); HBtab->ebdTryBeg = tryBegBB; HBtab->ebdTryLast = (tryEndBB == nullptr) ? fgLastBB : tryEndBB->bbPrev; HBtab->ebdHndBeg = hndBegBB; HBtab->ebdHndLast = (hndEndBB == nullptr) ? fgLastBB : hndEndBB->bbPrev; // // Assert that all of our try/hnd blocks are setup correctly. // if (HBtab->ebdTryLast == nullptr) { BADCODE("Try Clause is invalid"); } if (HBtab->ebdHndLast == nullptr) { BADCODE("Handler Clause is invalid"); } // // Verify that it's legal // verInsertEhNode(&clause, HBtab); } // end foreach handler table entry fgSortEHTable(); // Next, set things related to nesting that depend on the sorting being complete. for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { /* Mark all blocks in the finally/fault or catch clause */ BasicBlock* tryBegBB = HBtab->ebdTryBeg; BasicBlock* hndBegBB = HBtab->ebdHndBeg; IL_OFFSET tryBegOff = HBtab->ebdTryBegOffset; IL_OFFSET tryEndOff = HBtab->ebdTryEndOffset; IL_OFFSET hndBegOff = HBtab->ebdHndBegOffset; IL_OFFSET hndEndOff = HBtab->ebdHndEndOffset; BasicBlock* block; for (block = hndBegBB; block && (block->bbCodeOffs < hndEndOff); block = block->bbNext) { if (!block->hasHndIndex()) { block->setHndIndex(XTnum); } // All blocks in a catch handler or filter are rarely run, except the entry if ((block != hndBegBB) && (hndBegBB->bbCatchTyp != BBCT_FINALLY)) { block->bbSetRunRarely(); } } /* Mark all blocks within the covered range of the try */ for (block = tryBegBB; block && (block->bbCodeOffs < tryEndOff); block = block->bbNext) { /* Mark this BB as belonging to a 'try' block */ if (!block->hasTryIndex()) { block->setTryIndex(XTnum); } #ifdef DEBUG /* Note: the BB can't span the 'try' block */ if (!(block->bbFlags & BBF_INTERNAL)) { noway_assert(tryBegOff <= block->bbCodeOffs); noway_assert(tryEndOff >= block->bbCodeOffsEnd || tryEndOff == tryBegOff); } #endif } /* Init ebdHandlerNestingLevel of current clause, and bump up value for all * enclosed clauses (which have to be before it in the table). * Innermost try-finally blocks must precede outermost * try-finally blocks. */ #if !defined(FEATURE_EH_FUNCLETS) HBtab->ebdHandlerNestingLevel = 0; #endif // !FEATURE_EH_FUNCLETS HBtab->ebdEnclosingTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; HBtab->ebdEnclosingHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; noway_assert(XTnum < compHndBBtabCount); noway_assert(XTnum == ehGetIndex(HBtab)); for (EHblkDsc* xtab = compHndBBtab; xtab < HBtab; xtab++) { #if !defined(FEATURE_EH_FUNCLETS) if (jitIsBetween(xtab->ebdHndBegOffs(), hndBegOff, hndEndOff)) { xtab->ebdHandlerNestingLevel++; } #endif // !FEATURE_EH_FUNCLETS /* If we haven't recorded an enclosing try index for xtab then see * if this EH region should be recorded. We check if the * first offset in the xtab lies within our region. If so, * the last offset also must lie within the region, due to * nesting rules. verInsertEhNode(), below, will check for proper nesting. */ if (xtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { bool begBetween = jitIsBetween(xtab->ebdTryBegOffs(), tryBegOff, tryEndOff); if (begBetween) { // Record the enclosing scope link xtab->ebdEnclosingTryIndex = (unsigned short)XTnum; } } /* Do the same for the enclosing handler index. */ if (xtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) { bool begBetween = jitIsBetween(xtab->ebdTryBegOffs(), hndBegOff, hndEndOff); if (begBetween) { // Record the enclosing scope link xtab->ebdEnclosingHndIndex = (unsigned short)XTnum; } } } } // end foreach handler table entry #if !defined(FEATURE_EH_FUNCLETS) for (EHblkDsc* const HBtab : EHClauses(this)) { if (ehMaxHndNestingCount <= HBtab->ebdHandlerNestingLevel) ehMaxHndNestingCount = HBtab->ebdHandlerNestingLevel + 1; } #endif // !FEATURE_EH_FUNCLETS #ifndef DEBUG if (tiVerificationNeeded) #endif { // always run these checks for a debug build verCheckNestingLevel(initRoot); } #ifndef DEBUG // fgNormalizeEH assumes that this test has been passed. And Ssa assumes that fgNormalizeEHTable // has been run. So do this unless we're in minOpts mode (and always in debug). if (tiVerificationNeeded || !opts.MinOpts()) #endif { fgCheckBasicBlockControlFlow(); } #ifdef DEBUG if (verbose) { JITDUMP("*************** After fgFindBasicBlocks() has created the EH table\n"); fgDispHandlerTab(); } // We can't verify the handler table until all the IL legality checks have been done (above), since bad IL // (such as illegal nesting of regions) will trigger asserts here. fgVerifyHandlerTab(); #endif fgNormalizeEH(); } /***************************************************************************** * Check control flow constraints for well formed IL. Bail if any of the constraints * are violated. */ void Compiler::fgCheckBasicBlockControlFlow() { assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks EHblkDsc* HBtab; for (BasicBlock* const blk : Blocks()) { if (blk->bbFlags & BBF_INTERNAL) { continue; } switch (blk->bbJumpKind) { case BBJ_NONE: // block flows into the next one (no jump) fgControlFlowPermitted(blk, blk->bbNext); break; case BBJ_ALWAYS: // block does unconditional jump to target fgControlFlowPermitted(blk, blk->bbJumpDest); break; case BBJ_COND: // block conditionally jumps to the target fgControlFlowPermitted(blk, blk->bbNext); fgControlFlowPermitted(blk, blk->bbJumpDest); break; case BBJ_RETURN: // block ends with 'ret' if (blk->hasTryIndex() || blk->hasHndIndex()) { BADCODE3("Return from a protected block", ". Before offset %04X", blk->bbCodeOffsEnd); } break; case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: if (!blk->hasHndIndex()) // must be part of a handler { BADCODE3("Missing handler", ". Before offset %04X", blk->bbCodeOffsEnd); } HBtab = ehGetDsc(blk->getHndIndex()); // Endfilter allowed only in a filter block if (blk->bbJumpKind == BBJ_EHFILTERRET) { if (!HBtab->HasFilter()) { BADCODE("Unexpected endfilter"); } } // endfinally allowed only in a finally/fault block else if (!HBtab->HasFinallyOrFaultHandler()) { BADCODE("Unexpected endfinally"); } // The handler block should be the innermost block // Exception blocks are listed, innermost first. if (blk->hasTryIndex() && (blk->getTryIndex() < blk->getHndIndex())) { BADCODE("endfinally / endfilter in nested try block"); } break; case BBJ_THROW: // block ends with 'throw' /* throw is permitted from every BB, so nothing to check */ /* importer makes sure that rethrow is done from a catch */ break; case BBJ_LEAVE: // block always jumps to the target, maybe out of guarded // region. Used temporarily until importing fgControlFlowPermitted(blk, blk->bbJumpDest, true); break; case BBJ_SWITCH: // block ends with a switch statement for (BasicBlock* const bTarget : blk->SwitchTargets()) { fgControlFlowPermitted(blk, bTarget); } break; case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only #if defined(FEATURE_EH_FUNCLETS)) case BBJ_CALLFINALLY: // block always calls the target finally default: noway_assert(!"Unexpected bbJumpKind"); // these blocks don't get created until importing break; } } } /**************************************************************************** * Check that the leave from the block is legal. * Consider removing this check here if we can do it cheaply during importing */ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool isLeave) { assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks unsigned srcHndBeg, destHndBeg; unsigned srcHndEnd, destHndEnd; bool srcInFilter, destInFilter; bool srcInCatch = false; EHblkDsc* srcHndTab; srcHndTab = ehInitHndRange(blkSrc, &srcHndBeg, &srcHndEnd, &srcInFilter); ehInitHndRange(blkDest, &destHndBeg, &destHndEnd, &destInFilter); /* Impose the rules for leaving or jumping from handler blocks */ if (blkSrc->hasHndIndex()) { srcInCatch = srcHndTab->HasCatchHandler() && srcHndTab->InHndRegionILRange(blkSrc); /* Are we jumping within the same handler index? */ if (BasicBlock::sameHndRegion(blkSrc, blkDest)) { /* Do we have a filter clause? */ if (srcHndTab->HasFilter()) { /* filters and catch handlers share same eh index */ /* we need to check for control flow between them. */ if (srcInFilter != destInFilter) { if (!jitIsBetween(blkDest->bbCodeOffs, srcHndBeg, srcHndEnd)) { BADCODE3("Illegal control flow between filter and handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } } } else { /* The handler indexes of blkSrc and blkDest are different */ if (isLeave) { /* Any leave instructions must not enter the dest handler from outside*/ if (!jitIsBetween(srcHndBeg, destHndBeg, destHndEnd)) { BADCODE3("Illegal use of leave to enter handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } else { /* We must use a leave to exit a handler */ BADCODE3("Illegal control flow out of a handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } /* Do we have a filter clause? */ if (srcHndTab->HasFilter()) { /* It is ok to leave from the handler block of a filter, */ /* but not from the filter block of a filter */ if (srcInFilter != destInFilter) { BADCODE3("Illegal to leave a filter handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } /* We should never leave a finally handler */ if (srcHndTab->HasFinallyHandler()) { BADCODE3("Illegal to leave a finally handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } /* We should never leave a fault handler */ if (srcHndTab->HasFaultHandler()) { BADCODE3("Illegal to leave a fault handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } } else if (blkDest->hasHndIndex()) { /* blkSrc was not inside a handler, but blkDst is inside a handler */ BADCODE3("Illegal control flow into a handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } /* Are we jumping from a catch handler into the corresponding try? */ /* VB uses this for "on error goto " */ if (isLeave && srcInCatch) { // inspect all handlers containing the jump source bool bValidJumpToTry = false; // are we jumping in a valid way from a catch to the corresponding try? bool bCatchHandlerOnly = true; // false if we are jumping out of a non-catch handler EHblkDsc* ehTableEnd; EHblkDsc* ehDsc; for (ehDsc = compHndBBtab, ehTableEnd = compHndBBtab + compHndBBtabCount; bCatchHandlerOnly && ehDsc < ehTableEnd; ehDsc++) { if (ehDsc->InHndRegionILRange(blkSrc)) { if (ehDsc->HasCatchHandler()) { if (ehDsc->InTryRegionILRange(blkDest)) { // If we already considered the jump for a different try/catch, // we would have two overlapping try regions with two overlapping catch // regions, which is illegal. noway_assert(!bValidJumpToTry); // Allowed if it is the first instruction of an inner try // (and all trys in between) // // try { // .. // _tryAgain: // .. // try { // _tryNestedInner: // .. // try { // _tryNestedIllegal: // .. // } catch { // .. // } // .. // } catch { // .. // } // .. // } catch { // .. // leave _tryAgain // Allowed // .. // leave _tryNestedInner // Allowed // .. // leave _tryNestedIllegal // Not Allowed // .. // } // // Note: The leave is allowed also from catches nested inside the catch shown above. /* The common case where leave is to the corresponding try */ if (ehDsc->ebdIsSameTry(this, blkDest->getTryIndex()) || /* Also allowed is a leave to the start of a try which starts in the handler's try */ fgFlowToFirstBlockOfInnerTry(ehDsc->ebdTryBeg, blkDest, false)) { bValidJumpToTry = true; } } } else { // We are jumping from a handler which is not a catch handler. // If it's a handler, but not a catch handler, it must be either a finally or fault if (!ehDsc->HasFinallyOrFaultHandler()) { BADCODE3("Handlers must be catch, finally, or fault", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } // Are we jumping out of this handler? if (!ehDsc->InHndRegionILRange(blkDest)) { bCatchHandlerOnly = false; } } } else if (ehDsc->InFilterRegionILRange(blkSrc)) { // Are we jumping out of a filter? if (!ehDsc->InFilterRegionILRange(blkDest)) { bCatchHandlerOnly = false; } } } if (bCatchHandlerOnly) { if (bValidJumpToTry) { return; } else { // FALL THROUGH // This is either the case of a leave to outside the try/catch, // or a leave to a try not nested in this try/catch. // The first case is allowed, the second one will be checked // later when we check the try block rules (it is illegal if we // jump to the middle of the destination try). } } else { BADCODE3("illegal leave to exit a finally, fault or filter", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } /* Check all the try block rules */ IL_OFFSET srcTryBeg; IL_OFFSET srcTryEnd; IL_OFFSET destTryBeg; IL_OFFSET destTryEnd; ehInitTryRange(blkSrc, &srcTryBeg, &srcTryEnd); ehInitTryRange(blkDest, &destTryBeg, &destTryEnd); /* Are we jumping between try indexes? */ if (!BasicBlock::sameTryRegion(blkSrc, blkDest)) { // Are we exiting from an inner to outer try? if (jitIsBetween(srcTryBeg, destTryBeg, destTryEnd) && jitIsBetween(srcTryEnd - 1, destTryBeg, destTryEnd)) { if (!isLeave) { BADCODE3("exit from try block without a leave", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } else if (jitIsBetween(destTryBeg, srcTryBeg, srcTryEnd)) { // check that the dest Try is first instruction of an inner try if (!fgFlowToFirstBlockOfInnerTry(blkSrc, blkDest, false)) { BADCODE3("control flow into middle of try", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } else // there is no nesting relationship between src and dest { if (isLeave) { // check that the dest Try is first instruction of an inner try sibling if (!fgFlowToFirstBlockOfInnerTry(blkSrc, blkDest, true)) { BADCODE3("illegal leave into middle of try", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } else { BADCODE3("illegal control flow in to/out of try block", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } } } /***************************************************************************** * Check that blkDest is the first block of an inner try or a sibling * with no intervening trys in between */ bool Compiler::fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling) { assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks noway_assert(blkDest->hasTryIndex()); unsigned XTnum = blkDest->getTryIndex(); unsigned lastXTnum = blkSrc->hasTryIndex() ? blkSrc->getTryIndex() : compHndBBtabCount; noway_assert(XTnum < compHndBBtabCount); noway_assert(lastXTnum <= compHndBBtabCount); EHblkDsc* HBtab = ehGetDsc(XTnum); // check that we are not jumping into middle of try if (HBtab->ebdTryBeg != blkDest) { return false; } if (sibling) { noway_assert(!BasicBlock::sameTryRegion(blkSrc, blkDest)); // find the l.u.b of the two try ranges // Set lastXTnum to the l.u.b. HBtab = ehGetDsc(lastXTnum); for (lastXTnum++, HBtab++; lastXTnum < compHndBBtabCount; lastXTnum++, HBtab++) { if (jitIsBetweenInclusive(blkDest->bbNum, HBtab->ebdTryBeg->bbNum, HBtab->ebdTryLast->bbNum)) { break; } } } // now check there are no intervening trys between dest and l.u.b // (it is ok to have intervening trys as long as they all start at // the same code offset) HBtab = ehGetDsc(XTnum); for (XTnum++, HBtab++; XTnum < lastXTnum; XTnum++, HBtab++) { if (HBtab->ebdTryBeg->bbNum < blkDest->bbNum && blkDest->bbNum <= HBtab->ebdTryLast->bbNum) { return false; } } return true; } /***************************************************************************** * Returns the handler nesting level of the block. * *pFinallyNesting is set to the nesting level of the inner-most * finally-protected try the block is in. */ unsigned Compiler::fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting) { unsigned curNesting = 0; // How many handlers is the block in unsigned tryFin = (unsigned)-1; // curNesting when we see innermost finally-protected try unsigned XTnum; EHblkDsc* HBtab; /* We find the block's handler nesting level by walking over the complete exception table and find enclosing clauses. */ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { noway_assert(HBtab->ebdTryBeg && HBtab->ebdHndBeg); if (HBtab->HasFinallyHandler() && (tryFin == (unsigned)-1) && bbInTryRegions(XTnum, block)) { tryFin = curNesting; } else if (bbInHandlerRegions(XTnum, block)) { curNesting++; } } if (tryFin == (unsigned)-1) { tryFin = curNesting; } if (pFinallyNesting) { *pFinallyNesting = curNesting - tryFin; } return curNesting; } //------------------------------------------------------------------------ // fgFindBlockILOffset: Given a block, find the IL offset corresponding to the first statement // in the block with a legal IL offset. Skip any leading statements that have BAD_IL_OFFSET. // If no statement has an initialized statement offset (including the case where there are // no statements in the block), then return BAD_IL_OFFSET. This function is used when // blocks are split or modified, and we want to maintain the IL offset as much as possible // to preserve good debugging behavior. // // Arguments: // block - The block to check. // // Return Value: // The first good IL offset of a statement in the block, or BAD_IL_OFFSET if such an IL offset // cannot be found. // IL_OFFSET Compiler::fgFindBlockILOffset(BasicBlock* block) { // This function searches for IL offsets in statement nodes, so it can't be used in LIR. We // could have a similar function for LIR that searches for GT_IL_OFFSET nodes. assert(!block->IsLIR()); for (Statement* const stmt : block->Statements()) { if (stmt->GetILOffsetX() != BAD_IL_OFFSET) { return jitGetILoffs(stmt->GetILOffsetX()); } } return BAD_IL_OFFSET; } //------------------------------------------------------------------------------ // fgSplitBlockAtEnd - split the given block into two blocks. // All code in the block stays in the original block. // Control falls through from original to new block, and // the new block is returned. //------------------------------------------------------------------------------ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) { // We'd like to use fgNewBBafter(), but we need to update the preds list before linking in the new block. // (We need the successors of 'curr' to be correct when we do this.) BasicBlock* newBlock = bbNewBasicBlock(curr->bbJumpKind); // Start the new block with no refs. When we set the preds below, this will get updated correctly. newBlock->bbRefs = 0; // For each successor of the original block, set the new block as their predecessor. // Note we are using the "rational" version of the successor iterator that does not hide the finallyret arcs. // Without these arcs, a block 'b' may not be a member of succs(preds(b)) if (curr->bbJumpKind != BBJ_SWITCH) { for (BasicBlock* const succ : curr->Succs(this)) { if (succ != newBlock) { JITDUMP(FMT_BB " previous predecessor was " FMT_BB ", now is " FMT_BB "\n", succ->bbNum, curr->bbNum, newBlock->bbNum); fgReplacePred(succ, curr, newBlock); } } newBlock->bbJumpDest = curr->bbJumpDest; curr->bbJumpDest = nullptr; } else { // In the case of a switch statement there's more complicated logic in order to wire up the predecessor lists // but fortunately there's an existing method that implements this functionality. newBlock->bbJumpSwt = curr->bbJumpSwt; fgChangeSwitchBlock(curr, newBlock); curr->bbJumpSwt = nullptr; } newBlock->inheritWeight(curr); // Set the new block's flags. Note that the new block isn't BBF_INTERNAL unless the old block is. newBlock->bbFlags = curr->bbFlags; // Remove flags that the new block can't have. newBlock->bbFlags &= ~(BBF_TRY_BEG | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_FUNCLET_BEG | BBF_LOOP_PREHEADER | BBF_KEEP_BBJ_ALWAYS | BBF_PATCHPOINT | BBF_BACKWARD_JUMP_TARGET | BBF_LOOP_ALIGN); // Remove the GC safe bit on the new block. It seems clear that if we split 'curr' at the end, // such that all the code is left in 'curr', and 'newBlock' just gets the control flow, then // both 'curr' and 'newBlock' could accurately retain an existing GC safe bit. However, callers // use this function to split blocks in the middle, or at the beginning, and they don't seem to // be careful about updating this flag appropriately. So, removing the GC safe bit is simply // conservative: some functions might end up being fully interruptible that could be partially // interruptible if we exercised more care here. newBlock->bbFlags &= ~BBF_GC_SAFE_POINT; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) newBlock->bbFlags &= ~(BBF_FINALLY_TARGET); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // The new block has no code, so we leave bbCodeOffs/bbCodeOffsEnd set to BAD_IL_OFFSET. If a caller // puts code in the block, then it needs to update these. // Insert the new block in the block list after the 'curr' block. fgInsertBBafter(curr, newBlock); fgExtendEHRegionAfter(curr); // The new block is in the same EH region as the old block. // Remove flags from the old block that are no longer possible. curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL); // Default to fallthru, and add the arc for that. curr->bbJumpKind = BBJ_NONE; fgAddRefPred(newBlock, curr); return newBlock; } //------------------------------------------------------------------------------ // fgSplitBlockAfterStatement - Split the given block, with all code after // the given statement going into the second block. //------------------------------------------------------------------------------ BasicBlock* Compiler::fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt) { assert(!curr->IsLIR()); // No statements in LIR, so you can't use this function. BasicBlock* newBlock = fgSplitBlockAtEnd(curr); if (stmt != nullptr) { newBlock->bbStmtList = stmt->GetNextStmt(); if (newBlock->bbStmtList != nullptr) { newBlock->bbStmtList->SetPrevStmt(curr->bbStmtList->GetPrevStmt()); } curr->bbStmtList->SetPrevStmt(stmt); stmt->SetNextStmt(nullptr); // Update the IL offsets of the blocks to match the split. assert(newBlock->bbCodeOffs == BAD_IL_OFFSET); assert(newBlock->bbCodeOffsEnd == BAD_IL_OFFSET); // curr->bbCodeOffs remains the same newBlock->bbCodeOffsEnd = curr->bbCodeOffsEnd; IL_OFFSET splitPointILOffset = fgFindBlockILOffset(newBlock); curr->bbCodeOffsEnd = splitPointILOffset; newBlock->bbCodeOffs = splitPointILOffset; } else { assert(curr->bbStmtList == nullptr); // if no tree was given then it better be an empty block } return newBlock; } //------------------------------------------------------------------------------ // fgSplitBlockAfterNode - Split the given block, with all code after // the given node going into the second block. // This function is only used in LIR. //------------------------------------------------------------------------------ BasicBlock* Compiler::fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node) { assert(curr->IsLIR()); BasicBlock* newBlock = fgSplitBlockAtEnd(curr); if (node != nullptr) { LIR::Range& currBBRange = LIR::AsRange(curr); if (node != currBBRange.LastNode()) { LIR::Range nodesToMove = currBBRange.Remove(node->gtNext, currBBRange.LastNode()); LIR::AsRange(newBlock).InsertAtBeginning(std::move(nodesToMove)); } // Update the IL offsets of the blocks to match the split. assert(newBlock->bbCodeOffs == BAD_IL_OFFSET); assert(newBlock->bbCodeOffsEnd == BAD_IL_OFFSET); // curr->bbCodeOffs remains the same newBlock->bbCodeOffsEnd = curr->bbCodeOffsEnd; // Search backwards from the end of the current block looking for the IL offset to use // for the end IL offset for the original block. IL_OFFSET splitPointILOffset = BAD_IL_OFFSET; LIR::Range::ReverseIterator riter; LIR::Range::ReverseIterator riterEnd; for (riter = currBBRange.rbegin(), riterEnd = currBBRange.rend(); riter != riterEnd; ++riter) { if ((*riter)->gtOper == GT_IL_OFFSET) { GenTreeILOffset* ilOffset = (*riter)->AsILOffset(); if (ilOffset->gtStmtILoffsx != BAD_IL_OFFSET) { splitPointILOffset = jitGetILoffs(ilOffset->gtStmtILoffsx); break; } } } curr->bbCodeOffsEnd = splitPointILOffset; // Also use this as the beginning offset of the next block. Presumably we could/should // look to see if the first node is a GT_IL_OFFSET node, and use that instead. newBlock->bbCodeOffs = splitPointILOffset; } else { assert(curr->bbStmtList == nullptr); // if no node was given then it better be an empty block } return newBlock; } //------------------------------------------------------------------------------ // fgSplitBlockAtBeginning - Split the given block into two blocks. // Control falls through from original to new block, // and the new block is returned. // All code in the original block goes into the new block //------------------------------------------------------------------------------ BasicBlock* Compiler::fgSplitBlockAtBeginning(BasicBlock* curr) { BasicBlock* newBlock = fgSplitBlockAtEnd(curr); if (curr->IsLIR()) { newBlock->SetFirstLIRNode(curr->GetFirstLIRNode()); curr->SetFirstLIRNode(nullptr); } else { newBlock->bbStmtList = curr->bbStmtList; curr->bbStmtList = nullptr; } // The new block now has all the code, and the old block has none. Update the // IL offsets for the block to reflect this. newBlock->bbCodeOffs = curr->bbCodeOffs; newBlock->bbCodeOffsEnd = curr->bbCodeOffsEnd; curr->bbCodeOffs = BAD_IL_OFFSET; curr->bbCodeOffsEnd = BAD_IL_OFFSET; return newBlock; } //------------------------------------------------------------------------ // fgSplitEdge: Splits the edge between a block 'curr' and its successor 'succ' by creating a new block // that replaces 'succ' as a successor of 'curr', and which branches unconditionally // to (or falls through to) 'succ'. Note that for a BBJ_COND block 'curr', // 'succ' might be the fall-through path or the branch path from 'curr'. // // Arguments: // curr - A block which branches to 'succ' // succ - The target block // // Return Value: // Returns a new block, that is a successor of 'curr' and which branches unconditionally to 'succ' // // Assumptions: // 'curr' must have a bbJumpKind of BBJ_COND, BBJ_ALWAYS, or BBJ_SWITCH // // Notes: // The returned block is empty. // Can be invoked before pred lists are built. BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) { assert(curr->bbJumpKind == BBJ_COND || curr->bbJumpKind == BBJ_SWITCH || curr->bbJumpKind == BBJ_ALWAYS); if (fgComputePredsDone) { assert(fgGetPredForBlock(succ, curr) != nullptr); } BasicBlock* newBlock; if (succ == curr->bbNext) { // The successor is the fall-through path of a BBJ_COND, or // an immediately following block of a BBJ_SWITCH (which has // no fall-through path). For this case, simply insert a new // fall-through block after 'curr'. newBlock = fgNewBBafter(BBJ_NONE, curr, true /*extendRegion*/); } else { newBlock = fgNewBBinRegion(BBJ_ALWAYS, curr, curr->isRunRarely()); // The new block always jumps to 'succ' newBlock->bbJumpDest = succ; } newBlock->bbFlags |= (curr->bbFlags & succ->bbFlags & (BBF_BACKWARD_JUMP)); JITDUMP("Splitting edge from " FMT_BB " to " FMT_BB "; adding " FMT_BB "\n", curr->bbNum, succ->bbNum, newBlock->bbNum); if (curr->bbJumpKind == BBJ_COND) { fgReplacePred(succ, curr, newBlock); if (curr->bbJumpDest == succ) { // Now 'curr' jumps to newBlock curr->bbJumpDest = newBlock; } fgAddRefPred(newBlock, curr); } else if (curr->bbJumpKind == BBJ_SWITCH) { // newBlock replaces 'succ' in the switch. fgReplaceSwitchJumpTarget(curr, newBlock, succ); // And 'succ' has 'newBlock' as a new predecessor. fgAddRefPred(succ, newBlock); } else { assert(curr->bbJumpKind == BBJ_ALWAYS); fgReplacePred(succ, curr, newBlock); curr->bbJumpDest = newBlock; fgAddRefPred(newBlock, curr); } // This isn't accurate, but it is complex to compute a reasonable number so just assume that we take the // branch 50% of the time. // if (curr->bbJumpKind != BBJ_ALWAYS) { newBlock->inheritWeightPercentage(curr, 50); } // The bbLiveIn and bbLiveOut are both equal to the bbLiveIn of 'succ' if (fgLocalVarLivenessDone) { VarSetOps::Assign(this, newBlock->bbLiveIn, succ->bbLiveIn); VarSetOps::Assign(this, newBlock->bbLiveOut, succ->bbLiveIn); } return newBlock; } // Removes the block from the bbPrev/bbNext chain // Updates fgFirstBB and fgLastBB if necessary // Does not update fgFirstFuncletBB or fgFirstColdBlock (fgUnlinkRange does) void Compiler::fgUnlinkBlock(BasicBlock* block) { if (block->bbPrev) { block->bbPrev->bbNext = block->bbNext; if (block->bbNext) { block->bbNext->bbPrev = block->bbPrev; } else { fgLastBB = block->bbPrev; } } else { assert(block == fgFirstBB); assert(block != fgLastBB); assert((fgFirstBBScratch == nullptr) || (fgFirstBBScratch == fgFirstBB)); fgFirstBB = block->bbNext; fgFirstBB->bbPrev = nullptr; if (fgFirstBBScratch != nullptr) { #ifdef DEBUG // We had created an initial scratch BB, but now we're deleting it. if (verbose) { printf("Unlinking scratch " FMT_BB "\n", block->bbNum); } #endif // DEBUG fgFirstBBScratch = nullptr; } } } /***************************************************************************************************** * * Function called to unlink basic block range [bBeg .. bEnd] from the basic block list. * * 'bBeg' can't be the first block. */ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) { assert(bBeg != nullptr); assert(bEnd != nullptr); BasicBlock* bPrev = bBeg->bbPrev; assert(bPrev != nullptr); // Can't unlink a range starting with the first block bPrev->setNext(bEnd->bbNext); /* If we removed the last block in the method then update fgLastBB */ if (fgLastBB == bEnd) { fgLastBB = bPrev; noway_assert(fgLastBB->bbNext == nullptr); } // If bEnd was the first Cold basic block update fgFirstColdBlock if (fgFirstColdBlock == bEnd) { fgFirstColdBlock = bPrev->bbNext; } #if defined(FEATURE_EH_FUNCLETS) #ifdef DEBUG // You can't unlink a range that includes the first funclet block. A range certainly // can't cross the non-funclet/funclet region. And you can't unlink the first block // of the first funclet with this, either. (If that's necessary, it could be allowed // by updating fgFirstFuncletBB to bEnd->bbNext.) for (BasicBlock* tempBB = bBeg; tempBB != bEnd->bbNext; tempBB = tempBB->bbNext) { assert(tempBB != fgFirstFuncletBB); } #endif // DEBUG #endif // FEATURE_EH_FUNCLETS } /***************************************************************************************************** * * Function called to remove a basic block */ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) { /* The block has to be either unreachable or empty */ PREFIX_ASSUME(block != nullptr); BasicBlock* bPrev = block->bbPrev; JITDUMP("fgRemoveBlock " FMT_BB "\n", block->bbNum); // If we've cached any mappings from switch blocks to SwitchDesc's (which contain only the // *unique* successors of the switch block), invalidate that cache, since an entry in one of // the SwitchDescs might be removed. InvalidateUniqueSwitchSuccMap(); noway_assert((block == fgFirstBB) || (bPrev && (bPrev->bbNext == block))); noway_assert(!(block->bbFlags & BBF_DONT_REMOVE)); // Should never remove a genReturnBB, as we might have special hookups there. noway_assert(block != genReturnBB); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't remove a finally target assert(!(block->bbFlags & BBF_FINALLY_TARGET)); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (unreachable) { PREFIX_ASSUME(bPrev != nullptr); fgUnreachableBlock(block); /* If this is the last basic block update fgLastBB */ if (block == fgLastBB) { fgLastBB = bPrev; } #if defined(FEATURE_EH_FUNCLETS) // If block was the fgFirstFuncletBB then set fgFirstFuncletBB to block->bbNext if (block == fgFirstFuncletBB) { fgFirstFuncletBB = block->bbNext; } #endif // FEATURE_EH_FUNCLETS if (bPrev->bbJumpKind == BBJ_CALLFINALLY) { // bPrev CALL becomes RETLESS as the BBJ_ALWAYS block is unreachable bPrev->bbFlags |= BBF_RETLESS_CALL; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) NO_WAY("No retless call finally blocks; need unwind target instead"); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else if (bPrev->bbJumpKind == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && (block->bbNext != fgFirstColdBlock)) { // previous block is a BBJ_ALWAYS to the next block: change to BBJ_NONE. // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), // because that would violate our invariant that BBJ_CALLFINALLY blocks are followed by // BBJ_ALWAYS blocks. bPrev->bbJumpKind = BBJ_NONE; } // If this is the first Cold basic block update fgFirstColdBlock if (block == fgFirstColdBlock) { fgFirstColdBlock = block->bbNext; } /* Unlink this block from the bbNext chain */ fgUnlinkBlock(block); /* At this point the bbPreds and bbRefs had better be zero */ noway_assert((block->bbRefs == 0) && (block->bbPreds == nullptr)); /* A BBJ_CALLFINALLY is usually paired with a BBJ_ALWAYS. * If we delete such a BBJ_CALLFINALLY we also delete the BBJ_ALWAYS */ if (block->isBBCallAlwaysPair()) { BasicBlock* leaveBlk = block->bbNext; noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; leaveBlk->bbRefs = 0; leaveBlk->bbPreds = nullptr; fgRemoveBlock(leaveBlk, true); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) fgClearFinallyTargetBit(leaveBlk->bbJumpDest); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else if (block->bbJumpKind == BBJ_RETURN) { fgRemoveReturnBlock(block); } } else // block is empty { noway_assert(block->isEmpty()); // The block cannot follow a non-retless BBJ_CALLFINALLY (because we don't know who may jump to it). noway_assert(!block->isBBCallAlwaysPairTail()); /* This cannot be the last basic block */ noway_assert(block != fgLastBB); #ifdef DEBUG if (verbose) { printf("Removing empty " FMT_BB "\n", block->bbNum); } #endif // DEBUG #ifdef DEBUG /* Some extra checks for the empty case */ switch (block->bbJumpKind) { case BBJ_NONE: break; case BBJ_ALWAYS: /* Do not remove a block that jumps to itself - used for while (true){} */ noway_assert(block->bbJumpDest != block); /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ noway_assert(bPrev && bPrev->bbJumpKind == BBJ_NONE); break; default: noway_assert(!"Empty block of this type cannot be removed!"); break; } #endif // DEBUG noway_assert(block->bbJumpKind == BBJ_NONE || block->bbJumpKind == BBJ_ALWAYS); /* Who is the "real" successor of this block? */ BasicBlock* succBlock; if (block->bbJumpKind == BBJ_ALWAYS) { succBlock = block->bbJumpDest; } else { succBlock = block->bbNext; } bool skipUnmarkLoop = false; // If block is the backedge for a loop and succBlock precedes block // then the succBlock becomes the new LOOP HEAD // NOTE: there's an assumption here that the blocks are numbered in increasing bbNext order. // NOTE 2: if fgDomsComputed is false, then we can't check reachability. However, if this is // the case, then the loop structures probably are also invalid, and shouldn't be used. This // can be the case late in compilation (such as Lower), where remnants of earlier created // structures exist, but haven't been maintained. if (block->isLoopHead() && (succBlock->bbNum <= block->bbNum)) { succBlock->bbFlags |= BBF_LOOP_HEAD; if (block->isLoopAlign()) { succBlock->bbFlags |= BBF_LOOP_ALIGN; JITDUMP("Propagating LOOP_ALIGN flag from " FMT_BB " to " FMT_BB " for loop# %d.", block->bbNum, succBlock->bbNum, block->bbNatLoopNum); } if (fgDomsComputed && fgReachable(succBlock, block)) { /* Mark all the reachable blocks between 'succBlock' and 'block', excluding 'block' */ optMarkLoopBlocks(succBlock, block, true); } } else if (succBlock->isLoopHead() && bPrev && (succBlock->bbNum <= bPrev->bbNum)) { skipUnmarkLoop = true; } noway_assert(succBlock); // If this is the first Cold basic block update fgFirstColdBlock if (block == fgFirstColdBlock) { fgFirstColdBlock = block->bbNext; } #if defined(FEATURE_EH_FUNCLETS) // Update fgFirstFuncletBB if necessary if (block == fgFirstFuncletBB) { fgFirstFuncletBB = block->bbNext; } #endif // FEATURE_EH_FUNCLETS /* First update the loop table and bbWeights */ optUpdateLoopsBeforeRemoveBlock(block, skipUnmarkLoop); // Update successor block start IL offset, if empty predecessor // covers the immediately preceding range. if ((block->bbCodeOffsEnd == succBlock->bbCodeOffs) && (block->bbCodeOffs != BAD_IL_OFFSET)) { assert(block->bbCodeOffs <= succBlock->bbCodeOffs); succBlock->bbCodeOffs = block->bbCodeOffs; } /* Remove the block */ if (bPrev == nullptr) { /* special case if this is the first BB */ noway_assert(block == fgFirstBB); /* Must be a fall through to next block */ noway_assert(block->bbJumpKind == BBJ_NONE); /* old block no longer gets the extra ref count for being the first block */ block->bbRefs--; succBlock->bbRefs++; } fgUnlinkBlock(block); /* mark the block as removed and set the change flag */ block->bbFlags |= BBF_REMOVED; /* Update bbRefs and bbPreds. * All blocks jumping to 'block' now jump to 'succBlock'. * First, remove 'block' from the predecessor list of succBlock. */ fgRemoveRefPred(succBlock, block); for (flowList* const pred : block->PredEdges()) { BasicBlock* predBlock = pred->getBlock(); /* Are we changing a loop backedge into a forward jump? */ if (block->isLoopHead() && (predBlock->bbNum >= block->bbNum) && (predBlock->bbNum <= succBlock->bbNum)) { /* First update the loop table and bbWeights */ optUpdateLoopsBeforeRemoveBlock(predBlock); } /* If predBlock is a new predecessor, then add it to succBlock's predecessor's list. */ if (predBlock->bbJumpKind != BBJ_SWITCH) { // Even if the pred is not a switch, we could have a conditional branch // to the fallthrough, so duplicate there could be preds for (unsigned i = 0; i < pred->flDupCount; i++) { fgAddRefPred(succBlock, predBlock); } } /* change all jumps to the removed block */ switch (predBlock->bbJumpKind) { default: noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()"); break; case BBJ_NONE: noway_assert(predBlock == bPrev); PREFIX_ASSUME(bPrev != nullptr); /* In the case of BBJ_ALWAYS we have to change the type of its predecessor */ if (block->bbJumpKind == BBJ_ALWAYS) { /* bPrev now becomes a BBJ_ALWAYS */ bPrev->bbJumpKind = BBJ_ALWAYS; bPrev->bbJumpDest = succBlock; } break; case BBJ_COND: /* The links for the direct predecessor case have already been updated above */ if (predBlock->bbJumpDest != block) { break; } /* Check if both side of the BBJ_COND now jump to the same block */ if (predBlock->bbNext == succBlock) { // Make sure we are replacing "block" with "succBlock" in predBlock->bbJumpDest. noway_assert(predBlock->bbJumpDest == block); predBlock->bbJumpDest = succBlock; fgRemoveConditionalJump(predBlock); break; } /* Fall through for the jump case */ FALLTHROUGH; case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: noway_assert(predBlock->bbJumpDest == block); predBlock->bbJumpDest = succBlock; break; case BBJ_SWITCH: // Change any jumps from 'predBlock' (a BBJ_SWITCH) to 'block' to jump to 'succBlock' // // For the jump targets of 'predBlock' (a BBJ_SWITCH) that jump to 'block' // remove the old predecessor at 'block' from 'predBlock' and // add the new predecessor at 'succBlock' from 'predBlock' // fgReplaceSwitchJumpTarget(predBlock, succBlock, block); break; } } } if (bPrev != nullptr) { switch (bPrev->bbJumpKind) { case BBJ_CALLFINALLY: // If prev is a BBJ_CALLFINALLY it better be marked as RETLESS noway_assert(bPrev->bbFlags & BBF_RETLESS_CALL); break; case BBJ_ALWAYS: // Check for branch to next block. Just make sure the BBJ_ALWAYS block is not // part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. We do this here and don't rely on fgUpdateFlowGraph // because we can be called by ComputeDominators and it expects it to remove this jump to // the next block. This is the safest fix. We should remove all this BBJ_CALLFINALLY/BBJ_ALWAYS // pairing. if ((bPrev->bbJumpDest == bPrev->bbNext) && !fgInDifferentRegions(bPrev, bPrev->bbJumpDest)) // We don't remove a branch from Hot -> Cold { if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail()) { // It's safe to change the jump type bPrev->bbJumpKind = BBJ_NONE; } } break; case BBJ_COND: /* Check for branch to next block */ if (bPrev->bbJumpDest == bPrev->bbNext) { fgRemoveConditionalJump(bPrev); } break; default: break; } ehUpdateForDeletedBlock(block); } } /***************************************************************************** * * Function called to connect to block that previously had a fall through */ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) { BasicBlock* jmpBlk = nullptr; /* If bSrc is non-NULL */ if (bSrc != nullptr) { /* If bSrc falls through to a block that is not bDst, we will insert a jump to bDst */ if (bSrc->bbFallsThrough() && (bSrc->bbNext != bDst)) { switch (bSrc->bbJumpKind) { case BBJ_NONE: bSrc->bbJumpKind = BBJ_ALWAYS; bSrc->bbJumpDest = bDst; #ifdef DEBUG if (verbose) { printf("Block " FMT_BB " ended with a BBJ_NONE, Changed to an unconditional jump to " FMT_BB "\n", bSrc->bbNum, bSrc->bbJumpDest->bbNum); } #endif break; case BBJ_CALLFINALLY: case BBJ_COND: // Add a new block after bSrc which jumps to 'bDst' jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true); if (fgComputePredsDone) { fgAddRefPred(jmpBlk, bSrc, fgGetPredForBlock(bDst, bSrc)); } // Record the loop number in the new block jmpBlk->bbNatLoopNum = bSrc->bbNatLoopNum; // When adding a new jmpBlk we will set the bbWeight and bbFlags // if (fgHaveValidEdgeWeights && fgHaveProfileData()) { noway_assert(fgComputePredsDone); flowList* newEdge = fgGetPredForBlock(jmpBlk, bSrc); jmpBlk->bbWeight = (newEdge->edgeWeightMin() + newEdge->edgeWeightMax()) / 2; if (bSrc->bbWeight == BB_ZERO_WEIGHT) { jmpBlk->bbWeight = BB_ZERO_WEIGHT; } if (jmpBlk->bbWeight == BB_ZERO_WEIGHT) { jmpBlk->bbFlags |= BBF_RUN_RARELY; } BasicBlock::weight_t weightDiff = (newEdge->edgeWeightMax() - newEdge->edgeWeightMin()); BasicBlock::weight_t slop = BasicBlock::GetSlopFraction(bSrc, bDst); // // If the [min/max] values for our edge weight is within the slop factor // then we will set the BBF_PROF_WEIGHT flag for the block // if (weightDiff <= slop) { jmpBlk->bbFlags |= BBF_PROF_WEIGHT; } } else { // We set the bbWeight to the smaller of bSrc->bbWeight or bDst->bbWeight if (bSrc->bbWeight < bDst->bbWeight) { jmpBlk->bbWeight = bSrc->bbWeight; jmpBlk->bbFlags |= (bSrc->bbFlags & BBF_RUN_RARELY); } else { jmpBlk->bbWeight = bDst->bbWeight; jmpBlk->bbFlags |= (bDst->bbFlags & BBF_RUN_RARELY); } } jmpBlk->bbJumpDest = bDst; if (fgComputePredsDone) { fgReplacePred(bDst, bSrc, jmpBlk); } else { jmpBlk->bbFlags |= BBF_IMPORTED; } #ifdef DEBUG if (verbose) { printf("Added an unconditional jump to " FMT_BB " after block " FMT_BB "\n", jmpBlk->bbJumpDest->bbNum, bSrc->bbNum); } #endif // DEBUG break; default: noway_assert(!"Unexpected bbJumpKind"); break; } } else { // If bSrc is an unconditional branch to the next block // then change it to a BBJ_NONE block // if ((bSrc->bbJumpKind == BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (bSrc->bbJumpDest == bSrc->bbNext)) { bSrc->bbJumpKind = BBJ_NONE; #ifdef DEBUG if (verbose) { printf("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB " into a BBJ_NONE block\n", bSrc->bbNum, bSrc->bbNext->bbNum); } #endif // DEBUG } } } return jmpBlk; } //------------------------------------------------------------------------ // fgRenumberBlocks: update block bbNums to reflect bbNext order // // Returns: // true if blocks were renumbered or maxBBNum was updated. // // Notes: // Walk the flow graph, reassign block numbers to keep them in ascending order. // Return 'true' if any renumbering was actually done, OR if we change the // maximum number of assigned basic blocks (this can happen if we do inlining, // create a new, high-numbered block, then that block goes away. We go to // renumber the blocks, none of them actually change number, but we shrink the // maximum assigned block number. This affects the block set epoch). // // As a consequence of renumbering, block pred lists may need to be reordered. // bool Compiler::fgRenumberBlocks() { // If we renumber the blocks the dominator information will be out-of-date if (fgDomsComputed) { noway_assert(!"Can't call Compiler::fgRenumberBlocks() when fgDomsComputed==true"); } #ifdef DEBUG if (verbose) { printf("\n*************** Before renumbering the basic blocks\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool renumbered = false; bool newMaxBBNum = false; BasicBlock* block; unsigned numStart = 1 + (compIsForInlining() ? impInlineInfo->InlinerCompiler->fgBBNumMax : 0); unsigned num; for (block = fgFirstBB, num = numStart; block != nullptr; block = block->bbNext, num++) { noway_assert((block->bbFlags & BBF_REMOVED) == 0); if (block->bbNum != num) { renumbered = true; #ifdef DEBUG if (verbose) { printf("Renumber " FMT_BB " to " FMT_BB "\n", block->bbNum, num); } #endif // DEBUG block->bbNum = num; } if (block->bbNext == nullptr) { fgLastBB = block; fgBBcount = num - numStart + 1; if (compIsForInlining()) { if (impInlineInfo->InlinerCompiler->fgBBNumMax != num) { impInlineInfo->InlinerCompiler->fgBBNumMax = num; newMaxBBNum = true; } } else { if (fgBBNumMax != num) { fgBBNumMax = num; newMaxBBNum = true; } } } } // If we renumbered, then we may need to reorder some pred lists. // if (renumbered && fgComputePredsDone) { for (BasicBlock* const block : Blocks()) { block->ensurePredListOrder(this); } } #ifdef DEBUG if (verbose) { printf("\n*************** After renumbering the basic blocks\n"); if (renumbered) { fgDispBasicBlocks(); fgDispHandlerTab(); } else { printf("=============== No blocks renumbered!\n"); } } #endif // DEBUG // Now update the BlockSet epoch, which depends on the block numbers. // If any blocks have been renumbered then create a new BlockSet epoch. // Even if we have not renumbered any blocks, we might still need to force // a new BlockSet epoch, for one of several reasons. If there are any new // blocks with higher numbers than the former maximum numbered block, then we // need a new epoch with a new size matching the new largest numbered block. // Also, if the number of blocks is different from the last time we set the // BlockSet epoch, then we need a new epoch. This wouldn't happen if we // renumbered blocks after every block addition/deletion, but it might be // the case that we can change the number of blocks, then set the BlockSet // epoch without renumbering, then change the number of blocks again, then // renumber. if (renumbered || newMaxBBNum) { NewBasicBlockEpoch(); // The key in the unique switch successor map is dependent on the block number, so invalidate that cache. InvalidateUniqueSwitchSuccMap(); } else { EnsureBasicBlockEpoch(); } // Tell our caller if any blocks actually were renumbered. return renumbered || newMaxBBNum; } /***************************************************************************** * * Is the BasicBlock bJump a forward branch? * Optionally bSrc can be supplied to indicate that * bJump must be forward with respect to bSrc */ bool Compiler::fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc /* = NULL */) { bool result = false; if ((bJump->bbJumpKind == BBJ_COND) || (bJump->bbJumpKind == BBJ_ALWAYS)) { BasicBlock* bDest = bJump->bbJumpDest; BasicBlock* bTemp = (bSrc == nullptr) ? bJump : bSrc; while (true) { bTemp = bTemp->bbNext; if (bTemp == nullptr) { break; } if (bTemp == bDest) { result = true; break; } } } return result; } /***************************************************************************** * * Returns true if it is allowable (based upon the EH regions) * to place block bAfter immediately after bBefore. It is allowable * if the 'bBefore' and 'bAfter' blocks are in the exact same EH region. */ bool Compiler::fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter) { return BasicBlock::sameEHRegion(bBefore, bAfter); } /***************************************************************************** * * Function called to move the range of blocks [bStart .. bEnd]. * The blocks are placed immediately after the insertAfterBlk. * fgFirstFuncletBB is not updated; that is the responsibility of the caller, if necessary. */ void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk) { /* We have decided to insert the block(s) after 'insertAfterBlk' */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("Relocated block%s [" FMT_BB ".." FMT_BB "] inserted after " FMT_BB "%s\n", (bStart == bEnd) ? "" : "s", bStart->bbNum, bEnd->bbNum, insertAfterBlk->bbNum, (insertAfterBlk->bbNext == nullptr) ? " at the end of method" : ""); } #endif // DEBUG /* relink [bStart .. bEnd] into the flow graph */ bEnd->bbNext = insertAfterBlk->bbNext; if (insertAfterBlk->bbNext) { insertAfterBlk->bbNext->bbPrev = bEnd; } insertAfterBlk->setNext(bStart); /* If insertAfterBlk was fgLastBB then update fgLastBB */ if (insertAfterBlk == fgLastBB) { fgLastBB = bEnd; noway_assert(fgLastBB->bbNext == nullptr); } } /***************************************************************************** * * Function called to relocate a single range to the end of the method. * Only an entire consecutive region can be moved and it will be kept together. * Except for the first block, the range cannot have any blocks that jump into or out of the region. * When successful we return the bLast block which is the last block that we relocated. * When unsuccessful we return NULL. ============================================================= NOTE: This function can invalidate all pointers into the EH table, as well as change the size of the EH table! ============================================================= */ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType) { INDEBUG(const char* reason = "None";) // Figure out the range of blocks we're going to move unsigned XTnum; EHblkDsc* HBtab; BasicBlock* bStart = nullptr; BasicBlock* bMiddle = nullptr; BasicBlock* bLast = nullptr; BasicBlock* bPrev = nullptr; #if defined(FEATURE_EH_FUNCLETS) // We don't support moving try regions... yet? noway_assert(relocateType == FG_RELOCATE_HANDLER); #endif // FEATURE_EH_FUNCLETS HBtab = ehGetDsc(regionIndex); if (relocateType == FG_RELOCATE_TRY) { bStart = HBtab->ebdTryBeg; bLast = HBtab->ebdTryLast; } else if (relocateType == FG_RELOCATE_HANDLER) { if (HBtab->HasFilter()) { // The filter and handler funclets must be moved together, and remain contiguous. bStart = HBtab->ebdFilter; bMiddle = HBtab->ebdHndBeg; bLast = HBtab->ebdHndLast; } else { bStart = HBtab->ebdHndBeg; bLast = HBtab->ebdHndLast; } } // Our range must contain either all rarely run blocks or all non-rarely run blocks bool inTheRange = false; bool validRange = false; BasicBlock* block; noway_assert(bStart != nullptr && bLast != nullptr); if (bStart == fgFirstBB) { INDEBUG(reason = "can not relocate first block";) goto FAILURE; } #if !defined(FEATURE_EH_FUNCLETS) // In the funclets case, we still need to set some information on the handler blocks if (bLast->bbNext == NULL) { INDEBUG(reason = "region is already at the end of the method";) goto FAILURE; } #endif // !FEATURE_EH_FUNCLETS // Walk the block list for this purpose: // 1. Verify that all the blocks in the range are either all rarely run or not rarely run. // When creating funclets, we ignore the run rarely flag, as we need to be able to move any blocks // in the range. CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(FEATURE_EH_FUNCLETS) bool isRare; isRare = bStart->isRunRarely(); #endif // !FEATURE_EH_FUNCLETS block = fgFirstBB; while (true) { if (block == bStart) { noway_assert(inTheRange == false); inTheRange = true; } else if (block == bLast->bbNext) { noway_assert(inTheRange == true); inTheRange = false; break; // we found the end, so we're done } if (inTheRange) { #if !defined(FEATURE_EH_FUNCLETS) // Unless all blocks are (not) run rarely we must return false. if (isRare != block->isRunRarely()) { INDEBUG(reason = "this region contains both rarely run and non-rarely run blocks";) goto FAILURE; } #endif // !FEATURE_EH_FUNCLETS validRange = true; } if (block == nullptr) { break; } block = block->bbNext; } // Ensure that bStart .. bLast defined a valid range noway_assert((validRange == true) && (inTheRange == false)); bPrev = bStart->bbPrev; noway_assert(bPrev != nullptr); // Can't move a range that includes the first block of the function. JITDUMP("Relocating %s range " FMT_BB ".." FMT_BB " (EH#%u) to end of BBlist\n", (relocateType == FG_RELOCATE_TRY) ? "try" : "handler", bStart->bbNum, bLast->bbNum, regionIndex); #ifdef DEBUG if (verbose) { fgDispBasicBlocks(); fgDispHandlerTab(); } #if !defined(FEATURE_EH_FUNCLETS) // This is really expensive, and quickly becomes O(n^n) with funclets // so only do it once after we've created them (see fgCreateFunclets) if (expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } #endif #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) bStart->bbFlags |= BBF_FUNCLET_BEG; // Mark the start block of the funclet if (bMiddle != nullptr) { bMiddle->bbFlags |= BBF_FUNCLET_BEG; // Also mark the start block of a filter handler as a funclet } #endif // FEATURE_EH_FUNCLETS BasicBlock* bNext; bNext = bLast->bbNext; /* Temporarily unlink [bStart .. bLast] from the flow graph */ fgUnlinkRange(bStart, bLast); BasicBlock* insertAfterBlk; insertAfterBlk = fgLastBB; #if defined(FEATURE_EH_FUNCLETS) // There are several cases we need to consider when moving an EH range. // If moving a range X, we must consider its relationship to every other EH // range A in the table. Note that each entry in the table represents both // a protected region and a handler region (possibly including a filter region // that must live before and adjacent to the handler region), so we must // consider try and handler regions independently. These are the cases: // 1. A is completely contained within X (where "completely contained" means // that the 'begin' and 'last' parts of A are strictly between the 'begin' // and 'end' parts of X, and aren't equal to either, for example, they don't // share 'last' blocks). In this case, when we move X, A moves with it, and // the EH table doesn't need to change. // 2. X is completely contained within A. In this case, X gets extracted from A, // and the range of A shrinks, but because A is strictly within X, the EH // table doesn't need to change. // 3. A and X have exactly the same range. In this case, A is moving with X and // the EH table doesn't need to change. // 4. A and X share the 'last' block. There are two sub-cases: // (a) A is a larger range than X (such that the beginning of A precedes the // beginning of X): in this case, we are moving the tail of A. We set the // 'last' block of A to the the block preceding the beginning block of X. // (b) A is a smaller range than X. Thus, we are moving the entirety of A along // with X. In this case, nothing in the EH record for A needs to change. // 5. A and X share the 'beginning' block (but aren't the same range, as in #3). // This can never happen here, because we are only moving handler ranges (we don't // move try ranges), and handler regions cannot start at the beginning of a try // range or handler range and be a subset. // // Note that A and X must properly nest for the table to be well-formed. For example, // the beginning of A can't be strictly within the range of X (that is, the beginning // of A isn't shared with the beginning of X) and the end of A outside the range. for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { if (XTnum != regionIndex) // we don't need to update our 'last' pointer { if (HBtab->ebdTryLast == bLast) { // If we moved a set of blocks that were at the end of // a different try region then we may need to update ebdTryLast for (block = HBtab->ebdTryBeg; block != nullptr; block = block->bbNext) { if (block == bPrev) { // We were contained within it, so shrink its region by // setting its 'last' fgSetTryEnd(HBtab, bPrev); break; } else if (block == HBtab->ebdTryLast->bbNext) { // bPrev does not come after the TryBeg, thus we are larger, and // it is moving with us. break; } } } if (HBtab->ebdHndLast == bLast) { // If we moved a set of blocks that were at the end of // a different handler region then we must update ebdHndLast for (block = HBtab->ebdHndBeg; block != nullptr; block = block->bbNext) { if (block == bPrev) { fgSetHndEnd(HBtab, bPrev); break; } else if (block == HBtab->ebdHndLast->bbNext) { // bPrev does not come after the HndBeg break; } } } } } // end exception table iteration // Insert the block(s) we are moving after fgLastBlock fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); if (fgFirstFuncletBB == nullptr) // The funclet region isn't set yet { fgFirstFuncletBB = bStart; } else { assert(fgFirstFuncletBB != insertAfterBlk->bbNext); // We insert at the end, not at the beginning, of the funclet region. } // These asserts assume we aren't moving try regions (which we might need to do). Only // try regions can have fall through into or out of the region. noway_assert(!bPrev->bbFallsThrough()); // There can be no fall through into a filter or handler region noway_assert(!bLast->bbFallsThrough()); // There can be no fall through out of a handler region #ifdef DEBUG if (verbose) { printf("Create funclets: moved region\n"); fgDispHandlerTab(); } // We have to wait to do this until we've created all the additional regions // Because this relies on ebdEnclosingTryIndex and ebdEnclosingHndIndex #endif // DEBUG #else // !FEATURE_EH_FUNCLETS for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { if (XTnum == regionIndex) { // Don't update our handler's Last info continue; } if (HBtab->ebdTryLast == bLast) { // If we moved a set of blocks that were at the end of // a different try region then we may need to update ebdTryLast for (block = HBtab->ebdTryBeg; block != NULL; block = block->bbNext) { if (block == bPrev) { fgSetTryEnd(HBtab, bPrev); break; } else if (block == HBtab->ebdTryLast->bbNext) { // bPrev does not come after the TryBeg break; } } } if (HBtab->ebdHndLast == bLast) { // If we moved a set of blocks that were at the end of // a different handler region then we must update ebdHndLast for (block = HBtab->ebdHndBeg; block != NULL; block = block->bbNext) { if (block == bPrev) { fgSetHndEnd(HBtab, bPrev); break; } else if (block == HBtab->ebdHndLast->bbNext) { // bPrev does not come after the HndBeg break; } } } } // end exception table iteration // We have decided to insert the block(s) after fgLastBlock fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); // If bPrev falls through, we will insert a jump to block fgConnectFallThrough(bPrev, bStart); // If bLast falls through, we will insert a jump to bNext fgConnectFallThrough(bLast, bNext); #endif // !FEATURE_EH_FUNCLETS goto DONE; FAILURE: #ifdef DEBUG if (verbose) { printf("*************** Failed fgRelocateEHRange(" FMT_BB ".." FMT_BB ") because %s\n", bStart->bbNum, bLast->bbNum, reason); } #endif // DEBUG bLast = nullptr; DONE: return bLast; } // return true if there is a possibility that the method has a loop (a backedge is present) bool Compiler::fgMightHaveLoop() { // Don't use a BlockSet for this temporary bitset of blocks: we don't want to have to call EnsureBasicBlockEpoch() // and potentially change the block epoch. BitVecTraits blockVecTraits(fgBBNumMax + 1, this); BitVec blocksSeen(BitVecOps::MakeEmpty(&blockVecTraits)); for (BasicBlock* const block : Blocks()) { BitVecOps::AddElemD(&blockVecTraits, blocksSeen, block->bbNum); for (BasicBlock* succ : block->GetAllSuccs(this)) { if (BitVecOps::IsMember(&blockVecTraits, blocksSeen, succ->bbNum)) { return true; } } } return false; } /***************************************************************************** * * Insert a BasicBlock before the given block. */ BasicBlock* Compiler::fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion) { // Create a new BasicBlock and chain it in BasicBlock* newBlk = bbNewBasicBlock(jumpKind); newBlk->bbFlags |= BBF_INTERNAL; fgInsertBBbefore(block, newBlk); newBlk->bbRefs = 0; if (newBlk->bbFallsThrough() && block->isRunRarely()) { newBlk->bbSetRunRarely(); } if (extendRegion) { fgExtendEHRegionBefore(block); } else { // When extendRegion is false the caller is responsible for setting these two values newBlk->setTryIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely newBlk->setHndIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely } // We assume that if the block we are inserting before is in the cold region, then this new // block will also be in the cold region. newBlk->bbFlags |= (block->bbFlags & BBF_COLD); return newBlk; } /***************************************************************************** * * Insert a BasicBlock after the given block. */ BasicBlock* Compiler::fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion) { // Create a new BasicBlock and chain it in BasicBlock* newBlk = bbNewBasicBlock(jumpKind); newBlk->bbFlags |= BBF_INTERNAL; fgInsertBBafter(block, newBlk); newBlk->bbRefs = 0; if (block->bbFallsThrough() && block->isRunRarely()) { newBlk->bbSetRunRarely(); } if (extendRegion) { fgExtendEHRegionAfter(block); } else { // When extendRegion is false the caller is responsible for setting these two values newBlk->setTryIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely newBlk->setHndIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely } // If the new block is in the cold region (because the block we are inserting after // is in the cold region), mark it as such. newBlk->bbFlags |= (block->bbFlags & BBF_COLD); return newBlk; } /***************************************************************************** * Inserts basic block before existing basic block. * * If insertBeforeBlk is in the funclet region, then newBlk will be in the funclet region. * (If insertBeforeBlk is the first block of the funclet region, then 'newBlk' will be the * new first block of the funclet region.) */ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk) { if (insertBeforeBlk->bbPrev) { fgInsertBBafter(insertBeforeBlk->bbPrev, newBlk); } else { newBlk->setNext(fgFirstBB); fgFirstBB = newBlk; newBlk->bbPrev = nullptr; } #if defined(FEATURE_EH_FUNCLETS) /* Update fgFirstFuncletBB if insertBeforeBlk is the first block of the funclet region. */ if (fgFirstFuncletBB == insertBeforeBlk) { fgFirstFuncletBB = newBlk; } #endif // FEATURE_EH_FUNCLETS } /***************************************************************************** * Inserts basic block after existing basic block. * * If insertBeforeBlk is in the funclet region, then newBlk will be in the funclet region. * (It can't be used to insert a block as the first block of the funclet region). */ void Compiler::fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk) { newBlk->bbNext = insertAfterBlk->bbNext; if (insertAfterBlk->bbNext) { insertAfterBlk->bbNext->bbPrev = newBlk; } insertAfterBlk->bbNext = newBlk; newBlk->bbPrev = insertAfterBlk; if (fgLastBB == insertAfterBlk) { fgLastBB = newBlk; assert(fgLastBB->bbNext == nullptr); } } // We have two edges (bAlt => bCur) and (bCur => bNext). // // Returns true if the weight of (bAlt => bCur) // is greater than the weight of (bCur => bNext). // We compare the edge weights if we have valid edge weights // otherwise we compare blocks weights. // bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) { // bCur can't be NULL and must be a fall through bbJumpKind noway_assert(bCur != nullptr); noway_assert(bCur->bbFallsThrough()); noway_assert(bAlt != nullptr); // We only handle the cases when bAlt is a BBJ_ALWAYS or a BBJ_COND if ((bAlt->bbJumpKind != BBJ_ALWAYS) && (bAlt->bbJumpKind != BBJ_COND)) { return false; } // if bAlt doesn't jump to bCur it can't be a better fall through than bCur if (bAlt->bbJumpDest != bCur) { return false; } // Currently bNext is the fall through for bCur BasicBlock* bNext = bCur->bbNext; noway_assert(bNext != nullptr); // We will set result to true if bAlt is a better fall through than bCur bool result; if (fgHaveValidEdgeWeights) { // We will compare the edge weight for our two choices flowList* edgeFromAlt = fgGetPredForBlock(bCur, bAlt); flowList* edgeFromCur = fgGetPredForBlock(bNext, bCur); noway_assert(edgeFromCur != nullptr); noway_assert(edgeFromAlt != nullptr); result = (edgeFromAlt->edgeWeightMin() > edgeFromCur->edgeWeightMax()); } else { if (bAlt->bbJumpKind == BBJ_ALWAYS) { // Our result is true if bAlt's weight is more than bCur's weight result = (bAlt->bbWeight > bCur->bbWeight); } else { noway_assert(bAlt->bbJumpKind == BBJ_COND); // Our result is true if bAlt's weight is more than twice bCur's weight result = (bAlt->bbWeight > (2 * bCur->bbWeight)); } } return result; } //------------------------------------------------------------------------ // Finds the block closest to endBlk in the range [startBlk..endBlk) after which a block can be // inserted easily. Note that endBlk cannot be returned; its predecessor is the last block that can // be returned. The new block will be put in an EH region described by the arguments regionIndex, // putInTryRegion, startBlk, and endBlk (explained below), so it must be legal to place to put the // new block after the insertion location block, give it the specified EH region index, and not break // EH nesting rules. This function is careful to choose a block in the correct EH region. However, // it assumes that the new block can ALWAYS be placed at the end (just before endBlk). That means // that the caller must ensure that is true. // // Below are the possible cases for the arguments to this method: // 1. putInTryRegion == true and regionIndex > 0: // Search in the try region indicated by regionIndex. // 2. putInTryRegion == false and regionIndex > 0: // a. If startBlk is the first block of a filter and endBlk is the block after the end of the // filter (that is, the startBlk and endBlk match a filter bounds exactly), then choose a // location within this filter region. (Note that, due to IL rules, filters do not have any // EH nested within them.) Otherwise, filters are skipped. // b. Else, search in the handler region indicated by regionIndex. // 3. regionIndex = 0: // Search in the entire main method, excluding all EH regions. In this case, putInTryRegion must be true. // // This method makes sure to find an insertion point which would not cause the inserted block to // be put inside any inner try/filter/handler regions. // // The actual insertion occurs after the returned block. Note that the returned insertion point might // be the last block of a more nested EH region, because the new block will be inserted after the insertion // point, and will not extend the more nested EH region. For example: // // try3 try2 try1 // |--- | | BB01 // | |--- | BB02 // | | |--- BB03 // | | | BB04 // | |--- |--- BB05 // | BB06 // |----------------- BB07 // // for regionIndex==try3, putInTryRegion==true, we might return BB05, even though BB05 will have a try index // for try1 (the most nested 'try' region the block is in). That's because when we insert after BB05, the new // block will be in the correct, desired EH region, since try1 and try2 regions will not be extended to include // the inserted block. Furthermore, for regionIndex==try2, putInTryRegion==true, we can also return BB05. In this // case, when the new block is inserted, the try1 region remains the same, but we need extend region 'try2' to // include the inserted block. (We also need to check all parent regions as well, just in case any parent regions // also end on the same block, in which case we would also need to extend the parent regions. This is standard // procedure when inserting a block at the end of an EH region.) // // If nearBlk is non-nullptr then we return the closest block after nearBlk that will work best. // // We try to find a block in the appropriate region that is not a fallthrough block, so we can insert after it // without the need to insert a jump around the inserted block. // // Note that regionIndex is numbered the same as BasicBlock::bbTryIndex and BasicBlock::bbHndIndex, that is, "0" is // "main method" and otherwise is +1 from normal, so we can call, e.g., ehGetDsc(tryIndex - 1). // // Arguments: // regionIndex - the region index where the new block will be inserted. Zero means entire method; // non-zero means either a "try" or a "handler" region, depending on what putInTryRegion says. // putInTryRegion - 'true' to put the block in the 'try' region corresponding to 'regionIndex', 'false' // to put the block in the handler region. Should be 'true' if regionIndex==0. // startBlk - start block of range to search. // endBlk - end block of range to search (don't include this block in the range). Can be nullptr to indicate // the end of the function. // nearBlk - If non-nullptr, try to find an insertion location closely after this block. If nullptr, we insert // at the best location found towards the end of the acceptable block range. // jumpBlk - When nearBlk is set, this can be set to the block which jumps to bNext->bbNext (TODO: need to review // this?) // runRarely - true if the block being inserted is expected to be rarely run. This helps determine // the best place to put the new block, by putting in a place that has the same 'rarely run' characteristic. // // Return Value: // A block with the desired characteristics, so the new block will be inserted after this one. // If there is no suitable location, return nullptr. This should basically never happen. // BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely) { noway_assert(startBlk != nullptr); noway_assert(startBlk != endBlk); noway_assert((regionIndex == 0 && putInTryRegion) || // Search in the main method (putInTryRegion && regionIndex > 0 && startBlk->bbTryIndex == regionIndex) || // Search in the specified try region (!putInTryRegion && regionIndex > 0 && startBlk->bbHndIndex == regionIndex)); // Search in the specified handler region #ifdef DEBUG // Assert that startBlk precedes endBlk in the block list. // We don't want to use bbNum to assert this condition, as we cannot depend on the block numbers being // sequential at all times. for (BasicBlock* b = startBlk; b != endBlk; b = b->bbNext) { assert(b != nullptr); // We reached the end of the block list, but never found endBlk. } #endif // DEBUG JITDUMP("fgFindInsertPoint(regionIndex=%u, putInTryRegion=%s, startBlk=" FMT_BB ", endBlk=" FMT_BB ", nearBlk=" FMT_BB ", " "jumpBlk=" FMT_BB ", runRarely=%s)\n", regionIndex, dspBool(putInTryRegion), startBlk->bbNum, (endBlk == nullptr) ? 0 : endBlk->bbNum, (nearBlk == nullptr) ? 0 : nearBlk->bbNum, (jumpBlk == nullptr) ? 0 : jumpBlk->bbNum, dspBool(runRarely)); bool insertingIntoFilter = false; if (!putInTryRegion) { EHblkDsc* const dsc = ehGetDsc(regionIndex - 1); insertingIntoFilter = dsc->HasFilter() && (startBlk == dsc->ebdFilter) && (endBlk == dsc->ebdHndBeg); } bool reachedNear = false; // Have we reached 'nearBlk' in our search? If not, we'll keep searching. bool inFilter = false; // Are we in a filter region that we need to skip? BasicBlock* bestBlk = nullptr; // Set to the best insertion point we've found so far that meets all the EH requirements. BasicBlock* goodBlk = nullptr; // Set to an acceptable insertion point that we'll use if we don't find a 'best' option. BasicBlock* blk; if (nearBlk != nullptr) { // Does the nearBlk precede the startBlk? for (blk = nearBlk; blk != nullptr; blk = blk->bbNext) { if (blk == startBlk) { reachedNear = true; break; } else if (blk == endBlk) { break; } } } for (blk = startBlk; blk != endBlk; blk = blk->bbNext) { // The only way (blk == nullptr) could be true is if the caller passed an endBlk that preceded startBlk in the // block list, or if endBlk isn't in the block list at all. In DEBUG, we'll instead hit the similar // well-formedness assert earlier in this function. noway_assert(blk != nullptr); if (blk == nearBlk) { reachedNear = true; } if (blk->bbCatchTyp == BBCT_FILTER) { // Record the fact that we entered a filter region, so we don't insert into filters... // Unless the caller actually wanted the block inserted in this exact filter region. if (!insertingIntoFilter || (blk != startBlk)) { inFilter = true; } } else if (blk->bbCatchTyp == BBCT_FILTER_HANDLER) { // Record the fact that we exited a filter region. inFilter = false; } // Don't insert a block inside this filter region. if (inFilter) { continue; } // Note that the new block will be inserted AFTER "blk". We check to make sure that doing so // would put the block in the correct EH region. We make an assumption here that you can // ALWAYS insert the new block before "endBlk" (that is, at the end of the search range) // and be in the correct EH region. This is must be guaranteed by the caller (as it is by // fgNewBBinRegion(), which passes the search range as an exact EH region block range). // Because of this assumption, we only check the EH information for blocks before the last block. if (blk->bbNext != endBlk) { // We are in the middle of the search range. We can't insert the new block in // an inner try or handler region. We can, however, set the insertion // point to the last block of an EH try/handler region, if the enclosing // region is the region we wish to insert in. (Since multiple regions can // end at the same block, we need to search outwards, checking that the // block is the last block of every EH region out to the region we want // to insert in.) This is especially useful for putting a call-to-finally // block on AMD64 immediately after its corresponding 'try' block, so in the // common case, we'll just fall through to it. For example: // // BB01 // BB02 -- first block of try // BB03 // BB04 -- last block of try // BB05 -- first block of finally // BB06 // BB07 -- last block of handler // BB08 // // Assume there is only one try/finally, so BB01 and BB08 are in the "main function". // For AMD64 call-to-finally, we'll want to insert the BBJ_CALLFINALLY in // the main function, immediately after BB04. This allows us to do that. if (!fgCheckEHCanInsertAfterBlock(blk, regionIndex, putInTryRegion)) { // Can't insert here. continue; } } // Look for an insert location: // 1. We want blocks that don't end with a fall through, // 2. Also, when blk equals nearBlk we may want to insert here. if (!blk->bbFallsThrough() || (blk == nearBlk)) { bool updateBestBlk = true; // We will probably update the bestBlk // If blk falls through then we must decide whether to use the nearBlk // hint if (blk->bbFallsThrough()) { noway_assert(blk == nearBlk); if (jumpBlk != nullptr) { updateBestBlk = fgIsBetterFallThrough(blk, jumpBlk); } else { updateBestBlk = false; } } // If we already have a best block, see if the 'runRarely' flags influences // our choice. If we want a runRarely insertion point, and the existing best // block is run rarely but the current block isn't run rarely, then don't // update the best block. // TODO-CQ: We should also handle the reverse case, where runRarely is false (we // want a non-rarely-run block), but bestBlock->isRunRarely() is true. In that // case, we should update the block, also. Probably what we want is: // (bestBlk->isRunRarely() != runRarely) && (blk->isRunRarely() == runRarely) if (updateBestBlk && (bestBlk != nullptr) && runRarely && bestBlk->isRunRarely() && !blk->isRunRarely()) { updateBestBlk = false; } if (updateBestBlk) { // We found a 'best' insertion location, so save it away. bestBlk = blk; // If we've reached nearBlk, we've satisfied all the criteria, // so we're done. if (reachedNear) { goto DONE; } // If we haven't reached nearBlk, keep looking for a 'best' location, just // in case we'll find one at or after nearBlk. If no nearBlk was specified, // we prefer inserting towards the end of the given range, so keep looking // for more acceptable insertion locations. } } // No need to update goodBlk after we have set bestBlk, but we could still find a better // bestBlk, so keep looking. if (bestBlk != nullptr) { continue; } // Set the current block as a "good enough" insertion point, if it meets certain criteria. // We'll return this block if we don't find a "best" block in the search range. The block // can't be a BBJ_CALLFINALLY of a BBJ_CALLFINALLY/BBJ_ALWAYS pair (since we don't want // to insert anything between these two blocks). Otherwise, we can use it. However, // if we'd previously chosen a BBJ_COND block, then we'd prefer the "good" block to be // something else. We keep updating it until we've reached the 'nearBlk', to push it as // close to endBlk as possible. if (!blk->isBBCallAlwaysPair()) { if (goodBlk == nullptr) { goodBlk = blk; } else if ((goodBlk->bbJumpKind == BBJ_COND) || (blk->bbJumpKind != BBJ_COND)) { if ((blk == nearBlk) || !reachedNear) { goodBlk = blk; } } } } // If we didn't find a non-fall_through block, then insert at the last good block. if (bestBlk == nullptr) { bestBlk = goodBlk; } DONE: #if defined(JIT32_GCENCODER) // If we are inserting into a filter and the best block is the end of the filter region, we need to // insert after its predecessor instead: the JIT32 GC encoding used by the x86 CLR ABI states that the // terminal block of a filter region is its exit block. If the filter region consists of a single block, // a new block cannot be inserted without either splitting the single block before inserting a new block // or inserting the new block before the single block and updating the filter description such that the // inserted block is marked as the entry block for the filter. Becuase this sort of split can be complex // (especially given that it must ensure that the liveness of the exception object is properly tracked), // we avoid this situation by never generating single-block filters on x86 (see impPushCatchArgOnStack). if (insertingIntoFilter && (bestBlk == endBlk->bbPrev)) { assert(bestBlk != startBlk); bestBlk = bestBlk->bbPrev; } #endif // defined(JIT32_GCENCODER) return bestBlk; } //------------------------------------------------------------------------ // Creates a new BasicBlock and inserts it in a specific EH region, given by 'tryIndex', 'hndIndex', and 'putInFilter'. // // If 'putInFilter' it true, then the block is inserted in the filter region given by 'hndIndex'. In this case, tryIndex // must be a less nested EH region (that is, tryIndex > hndIndex). // // Otherwise, the block is inserted in either the try region or the handler region, depending on which one is the inner // region. In other words, if the try region indicated by tryIndex is nested in the handler region indicated by // hndIndex, // then the new BB will be created in the try region. Vice versa. // // Note that tryIndex and hndIndex are numbered the same as BasicBlock::bbTryIndex and BasicBlock::bbHndIndex, that is, // "0" is "main method" and otherwise is +1 from normal, so we can call, e.g., ehGetDsc(tryIndex - 1). // // To be more specific, this function will create a new BB in one of the following 5 regions (if putInFilter is false): // 1. When tryIndex = 0 and hndIndex = 0: // The new BB will be created in the method region. // 2. When tryIndex != 0 and hndIndex = 0: // The new BB will be created in the try region indicated by tryIndex. // 3. When tryIndex == 0 and hndIndex != 0: // The new BB will be created in the handler region indicated by hndIndex. // 4. When tryIndex != 0 and hndIndex != 0 and tryIndex < hndIndex: // In this case, the try region is nested inside the handler region. Therefore, the new BB will be created // in the try region indicated by tryIndex. // 5. When tryIndex != 0 and hndIndex != 0 and tryIndex > hndIndex: // In this case, the handler region is nested inside the try region. Therefore, the new BB will be created // in the handler region indicated by hndIndex. // // Note that if tryIndex != 0 and hndIndex != 0 then tryIndex must not be equal to hndIndex (this makes sense because // if they are equal, you are asking to put the new block in both the try and handler, which is impossible). // // The BasicBlock will not be inserted inside an EH region that is more nested than the requested tryIndex/hndIndex // region (so the function is careful to skip more nested EH regions when searching for a place to put the new block). // // This function cannot be used to insert a block as the first block of any region. It always inserts a block after // an existing block in the given region. // // If nearBlk is nullptr, or the block is run rarely, then the new block is assumed to be run rarely. // // Arguments: // jumpKind - the jump kind of the new block to create. // tryIndex - the try region to insert the new block in, described above. This must be a number in the range // [0..compHndBBtabCount]. // hndIndex - the handler region to insert the new block in, described above. This must be a number in the range // [0..compHndBBtabCount]. // nearBlk - insert the new block closely after this block, if possible. If nullptr, put the new block anywhere // in the requested region. // putInFilter - put the new block in the filter region given by hndIndex, as described above. // runRarely - 'true' if the new block is run rarely. // insertAtEnd - 'true' if the block should be inserted at the end of the region. Note: this is currently only // implemented when inserting into the main function (not into any EH region). // // Return Value: // The new block. BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter /* = false */, bool runRarely /* = false */, bool insertAtEnd /* = false */) { assert(tryIndex <= compHndBBtabCount); assert(hndIndex <= compHndBBtabCount); /* afterBlk is the block which will precede the newBB */ BasicBlock* afterBlk; // start and end limit for inserting the block BasicBlock* startBlk = nullptr; BasicBlock* endBlk = nullptr; bool putInTryRegion = true; unsigned regionIndex = 0; // First, figure out which region (the "try" region or the "handler" region) to put the newBB in. if ((tryIndex == 0) && (hndIndex == 0)) { assert(!putInFilter); endBlk = fgEndBBAfterMainFunction(); // don't put new BB in funclet region if (insertAtEnd || (nearBlk == nullptr)) { /* We'll just insert the block at the end of the method, before the funclets */ afterBlk = fgLastBBInMainFunction(); goto _FoundAfterBlk; } else { // We'll search through the entire method startBlk = fgFirstBB; } noway_assert(regionIndex == 0); } else { noway_assert(tryIndex > 0 || hndIndex > 0); PREFIX_ASSUME(tryIndex <= compHndBBtabCount); PREFIX_ASSUME(hndIndex <= compHndBBtabCount); // Decide which region to put in, the "try" region or the "handler" region. if (tryIndex == 0) { noway_assert(hndIndex > 0); putInTryRegion = false; } else if (hndIndex == 0) { noway_assert(tryIndex > 0); noway_assert(putInTryRegion); assert(!putInFilter); } else { noway_assert(tryIndex > 0 && hndIndex > 0 && tryIndex != hndIndex); putInTryRegion = (tryIndex < hndIndex); } if (putInTryRegion) { // Try region is the inner region. // In other words, try region must be nested inside the handler region. noway_assert(hndIndex == 0 || bbInHandlerRegions(hndIndex - 1, ehGetDsc(tryIndex - 1)->ebdTryBeg)); assert(!putInFilter); } else { // Handler region is the inner region. // In other words, handler region must be nested inside the try region. noway_assert(tryIndex == 0 || bbInTryRegions(tryIndex - 1, ehGetDsc(hndIndex - 1)->ebdHndBeg)); } // Figure out the start and end block range to search for an insertion location. Pick the beginning and // ending blocks of the target EH region (the 'endBlk' is one past the last block of the EH region, to make // loop iteration easier). Note that, after funclets have been created (for FEATURE_EH_FUNCLETS), // this linear block range will not include blocks of handlers for try/handler clauses nested within // this EH region, as those blocks have been extracted as funclets. That is ok, though, because we don't // want to insert a block in any nested EH region. if (putInTryRegion) { // We will put the newBB in the try region. EHblkDsc* ehDsc = ehGetDsc(tryIndex - 1); startBlk = ehDsc->ebdTryBeg; endBlk = ehDsc->ebdTryLast->bbNext; regionIndex = tryIndex; } else if (putInFilter) { // We will put the newBB in the filter region. EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1); startBlk = ehDsc->ebdFilter; endBlk = ehDsc->ebdHndBeg; regionIndex = hndIndex; } else { // We will put the newBB in the handler region. EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1); startBlk = ehDsc->ebdHndBeg; endBlk = ehDsc->ebdHndLast->bbNext; regionIndex = hndIndex; } noway_assert(regionIndex > 0); } // Now find the insertion point. afterBlk = fgFindInsertPoint(regionIndex, putInTryRegion, startBlk, endBlk, nearBlk, nullptr, runRarely); _FoundAfterBlk:; /* We have decided to insert the block after 'afterBlk'. */ noway_assert(afterBlk != nullptr); JITDUMP("fgNewBBinRegion(jumpKind=%u, tryIndex=%u, hndIndex=%u, putInFilter=%s, runRarely=%s, insertAtEnd=%s): " "inserting after " FMT_BB "\n", jumpKind, tryIndex, hndIndex, dspBool(putInFilter), dspBool(runRarely), dspBool(insertAtEnd), afterBlk->bbNum); return fgNewBBinRegionWorker(jumpKind, afterBlk, regionIndex, putInTryRegion); } //------------------------------------------------------------------------ // Creates a new BasicBlock and inserts it in the same EH region as 'srcBlk'. // // See the implementation of fgNewBBinRegion() used by this one for more notes. // // Arguments: // jumpKind - the jump kind of the new block to create. // srcBlk - insert the new block in the same EH region as this block, and closely after it if possible. // // Return Value: // The new block. BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely /* = false */, bool insertAtEnd /* = false */) { assert(srcBlk != nullptr); const unsigned tryIndex = srcBlk->bbTryIndex; const unsigned hndIndex = srcBlk->bbHndIndex; bool putInFilter = false; // Check to see if we need to put the new block in a filter. We do if srcBlk is in a filter. // This can only be true if there is a handler index, and the handler region is more nested than the // try region (if any). This is because no EH regions can be nested within a filter. if (BasicBlock::ehIndexMaybeMoreNested(hndIndex, tryIndex)) { assert(hndIndex != 0); // If hndIndex is more nested, we must be in some handler! putInFilter = ehGetDsc(hndIndex - 1)->InFilterRegionBBRange(srcBlk); } return fgNewBBinRegion(jumpKind, tryIndex, hndIndex, srcBlk, putInFilter, runRarely, insertAtEnd); } //------------------------------------------------------------------------ // Creates a new BasicBlock and inserts it at the end of the function. // // See the implementation of fgNewBBinRegion() used by this one for more notes. // // Arguments: // jumpKind - the jump kind of the new block to create. // // Return Value: // The new block. BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind) { return fgNewBBinRegion(jumpKind, 0, 0, nullptr, /* putInFilter */ false, /* runRarely */ false, /* insertAtEnd */ true); } //------------------------------------------------------------------------ // Creates a new BasicBlock, and inserts it after 'afterBlk'. // // The block cannot be inserted into a more nested try/handler region than that specified by 'regionIndex'. // (It is given exactly 'regionIndex'.) Thus, the parameters must be passed to ensure proper EH nesting // rules are followed. // // Arguments: // jumpKind - the jump kind of the new block to create. // afterBlk - insert the new block after this one. // regionIndex - the block will be put in this EH region. // putInTryRegion - If true, put the new block in the 'try' region corresponding to 'regionIndex', and // set its handler index to the most nested handler region enclosing that 'try' region. // Otherwise, put the block in the handler region specified by 'regionIndex', and set its 'try' // index to the most nested 'try' region enclosing that handler region. // // Return Value: // The new block. BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned regionIndex, bool putInTryRegion) { /* Insert the new block */ BasicBlock* afterBlkNext = afterBlk->bbNext; (void)afterBlkNext; // prevent "unused variable" error from GCC BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false); if (putInTryRegion) { noway_assert(regionIndex <= MAX_XCPTN_INDEX); newBlk->bbTryIndex = (unsigned short)regionIndex; newBlk->bbHndIndex = bbFindInnermostHandlerRegionContainingTryRegion(regionIndex); } else { newBlk->bbTryIndex = bbFindInnermostTryRegionContainingHandlerRegion(regionIndex); noway_assert(regionIndex <= MAX_XCPTN_INDEX); newBlk->bbHndIndex = (unsigned short)regionIndex; } // We're going to compare for equal try regions (to handle the case of 'mutually protect' // regions). We need to save off the current try region, otherwise we might change it // before it gets compared later, thereby making future comparisons fail. BasicBlock* newTryBeg; BasicBlock* newTryLast; (void)ehInitTryBlockRange(newBlk, &newTryBeg, &newTryLast); unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Is afterBlk at the end of a try region? if (HBtab->ebdTryLast == afterBlk) { noway_assert(afterBlkNext == newBlk->bbNext); bool extendTryRegion = false; if (newBlk->hasTryIndex()) { // We're adding a block after the last block of some try region. Do // we extend the try region to include the block, or not? // If the try region is exactly the same as the try region // associated with the new block (based on the block's try index, // which represents the innermost try the block is a part of), then // we extend it. // If the try region is a "parent" try region -- an enclosing try region // that has the same last block as the new block's try region -- then // we also extend. For example: // try { // 1 // ... // try { // 2 // ... // } /* 2 */ } /* 1 */ // This example is meant to indicate that both try regions 1 and 2 end at // the same block, and we're extending 2. Thus, we must also extend 1. If we // only extended 2, we would break proper nesting. (Dev11 bug 137967) extendTryRegion = HBtab->ebdIsSameTry(newTryBeg, newTryLast) || bbInTryRegions(XTnum, newBlk); } // Does newBlk extend this try region? if (extendTryRegion) { // Yes, newBlk extends this try region // newBlk is the now the new try last block fgSetTryEnd(HBtab, newBlk); } } // Is afterBlk at the end of a handler region? if (HBtab->ebdHndLast == afterBlk) { noway_assert(afterBlkNext == newBlk->bbNext); // Does newBlk extend this handler region? bool extendHndRegion = false; if (newBlk->hasHndIndex()) { // We're adding a block after the last block of some handler region. Do // we extend the handler region to include the block, or not? // If the handler region is exactly the same as the handler region // associated with the new block (based on the block's handler index, // which represents the innermost handler the block is a part of), then // we extend it. // If the handler region is a "parent" handler region -- an enclosing // handler region that has the same last block as the new block's handler // region -- then we also extend. For example: // catch { // 1 // ... // catch { // 2 // ... // } /* 2 */ } /* 1 */ // This example is meant to indicate that both handler regions 1 and 2 end at // the same block, and we're extending 2. Thus, we must also extend 1. If we // only extended 2, we would break proper nesting. (Dev11 bug 372051) extendHndRegion = bbInHandlerRegions(XTnum, newBlk); } if (extendHndRegion) { // Yes, newBlk extends this handler region // newBlk is now the last block of the handler. fgSetHndEnd(HBtab, newBlk); } } } /* If afterBlk falls through, we insert a jump around newBlk */ fgConnectFallThrough(afterBlk, newBlk->bbNext); #ifdef DEBUG fgVerifyHandlerTab(); #endif return newBlk; } //------------------------------------------------------------------------ // fgUseThrowHelperBlocks: Determinate does compiler use throw helper blocks. // // Note: // For debuggable code, codegen will generate the 'throw' code inline. // Return Value: // true if 'throw' helper block should be created. bool Compiler::fgUseThrowHelperBlocks() { return !opts.compDbgCode; }
<% from pwnlib.util import lists, packing, fiddling %> <% from pwnlib import shellcraft %> <% import six %> <%page args="string, append_null = True, register1='x14', register2='x15', pretty=None"/> <%docstring> Pushes a string onto the stack. r12 is defined as the inter-procedural scratch register ($ip), so this should not interfere with most usage. Args: string (str): The string to push. append_null (bool): Whether to append a single NULL-byte before pushing. register (str): Temporary register to use. By default, R7 is used. Examples: >>> print(shellcraft.pushstr("Hello!").rstrip()) /* push b'Hello!\x00' */ /* Set x14 = 36762444129608 = 0x216f6c6c6548 */ mov x14, #25928 movk x14, #27756, lsl #16 movk x14, #8559, lsl #0x20 str x14, [sp, #-16]! >>> print(shellcraft.pushstr("Hello, world!").rstrip()) /* push b'Hello, world!\x00' */ /* Set x14 = 8583909746840200520 = 0x77202c6f6c6c6548 */ mov x14, #25928 movk x14, #27756, lsl #16 movk x14, #11375, lsl #0x20 movk x14, #30496, lsl #0x30 /* Set x15 = 143418749551 = 0x21646c726f */ mov x15, #29295 movk x15, #25708, lsl #16 movk x15, #33, lsl #0x20 stp x14, x15, [sp, #-16]! >>> print(shellcraft.pushstr("Hello, world, bienvenue").rstrip()) /* push b'Hello, world, bienvenue\x00' */ /* Set x14 = 8583909746840200520 = 0x77202c6f6c6c6548 */ mov x14, #25928 movk x14, #27756, lsl #16 movk x14, #11375, lsl #0x20 movk x14, #30496, lsl #0x30 /* Set x15 = 7593667296735556207 = 0x6962202c646c726f */ mov x15, #29295 movk x15, #25708, lsl #16 movk x15, #8236, lsl #0x20 movk x15, #26978, lsl #0x30 stp x14, x15, [sp, #-16]! /* Set x14 = 28558089656888933 = 0x65756e65766e65 */ mov x14, #28261 movk x14, #25974, lsl #16 movk x14, #30062, lsl #0x20 movk x14, #101, lsl #0x30 str x14, [sp, #-16]! >>> print(shellcraft.pushstr("Hello, world, bienvenue!").rstrip()) /* push b'Hello, world, bienvenue!\x00' */ /* Set x14 = 8583909746840200520 = 0x77202c6f6c6c6548 */ mov x14, #25928 movk x14, #27756, lsl #16 movk x14, #11375, lsl #0x20 movk x14, #30496, lsl #0x30 /* Set x15 = 7593667296735556207 = 0x6962202c646c726f */ mov x15, #29295 movk x15, #25708, lsl #16 movk x15, #8236, lsl #0x20 movk x15, #26978, lsl #0x30 stp x14, x15, [sp, #-16]! /* Set x14 = 2406458692908510821 = 0x2165756e65766e65 */ mov x14, #28261 movk x14, #25974, lsl #16 movk x14, #30062, lsl #0x20 movk x14, #8549, lsl #0x30 mov x15, xzr stp x14, x15, [sp, #-16]! </%docstring> <% if isinstance(string, six.text_type): string = string.encode('utf-8') if append_null and not string.endswith(b'\x00'): string += b'\x00' pretty_string = pretty or shellcraft.pretty(string) while len(string) % 8: string += b'\x00' # Unpack everything into integers, and group them by twos # so we may use STP to store multiple in a single instruction words = packing.unpack_many(string) pairs = lists.group(2, words) # The stack must be 16-byte aligned total = len(pairs) * 16 offset = 0 %>\ /* push ${pretty_string} */ %for i,pair in enumerate(pairs): ${shellcraft.mov(register1, pair[0])} %if len(pair) == 1: str ${register1}, [sp, #-16]! %else: ${shellcraft.mov(register2, pair[1])} stp ${register1}, ${register2}, [sp, #-16]! %endif %endfor
.global s_prepare_buffers s_prepare_buffers: push %r10 push %r13 push %r15 push %r9 push %rcx push %rdi push %rdx push %rsi lea addresses_WC_ht+0xbd57, %r10 nop nop nop cmp $53755, %r13 mov $0x6162636465666768, %r9 movq %r9, %xmm2 vmovups %ymm2, (%r10) nop inc %rdx lea addresses_A_ht+0xc1f7, %r13 clflush (%r13) nop and %r9, %r9 movb $0x61, (%r13) nop dec %r13 lea addresses_A_ht+0x15657, %rsi lea addresses_normal_ht+0x1de57, %rdi nop nop cmp %r10, %r10 mov $59, %rcx rep movsl nop nop nop nop inc %r9 lea addresses_A_ht+0x16a67, %rdi nop nop nop nop inc %r10 movl $0x61626364, (%rdi) nop xor $8147, %r15 lea addresses_UC_ht+0x12357, %r10 clflush (%r10) nop nop nop nop and $49876, %rsi mov (%r10), %r15 nop nop nop xor $17422, %rsi lea addresses_WT_ht+0x15627, %rcx nop nop nop nop nop sub $20092, %rdi movb $0x61, (%rcx) nop nop nop nop nop add $19888, %r15 lea addresses_normal_ht+0x129d7, %r10 nop nop nop nop nop inc %rcx movb $0x61, (%r10) nop nop nop sub $63587, %r10 lea addresses_D_ht+0x19057, %r15 nop nop nop nop sub %rsi, %rsi mov (%r15), %r9d nop nop cmp %r15, %r15 pop %rsi pop %rdx pop %rdi pop %rcx pop %r9 pop %r15 pop %r13 pop %r10 ret .global s_faulty_load s_faulty_load: push %r10 push %r12 push %r9 push %rbp push %rdi push %rdx push %rsi // Store lea addresses_A+0x18d8b, %rbp nop nop mfence mov $0x5152535455565758, %rdx movq %rdx, (%rbp) nop cmp %r9, %r9 // Load lea addresses_WC+0xb177, %rbp nop nop inc %rsi vmovups (%rbp), %ymm0 vextracti128 $0, %ymm0, %xmm0 vpextrq $1, %xmm0, %r9 nop nop nop nop dec %r10 // Store lea addresses_RW+0x4057, %rbp nop nop nop nop nop add $2155, %r10 movl $0x51525354, (%rbp) nop nop nop nop nop sub %rsi, %rsi // Store lea addresses_WT+0xc397, %r9 nop nop nop nop nop dec %r12 mov $0x5152535455565758, %rbp movq %rbp, %xmm0 movups %xmm0, (%r9) nop nop nop xor %rbp, %rbp // Load lea addresses_UC+0x17829, %r9 sub $53428, %rdx mov (%r9), %r10d nop nop nop sub %r10, %r10 // Faulty Load lea addresses_normal+0x11857, %r9 nop nop nop nop and %rbp, %rbp mov (%r9), %rsi lea oracles, %r10 and $0xff, %rsi shlq $12, %rsi mov (%r10,%rsi,1), %rsi pop %rsi pop %rdx pop %rdi pop %rbp pop %r9 pop %r12 pop %r10 ret /* <gen_faulty_load> [REF] {'src': {'type': 'addresses_normal', 'same': False, 'size': 16, 'congruent': 0, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} {'dst': {'type': 'addresses_A', 'same': False, 'size': 8, 'congruent': 1, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'src': {'type': 'addresses_WC', 'same': False, 'size': 32, 'congruent': 1, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} {'dst': {'type': 'addresses_RW', 'same': False, 'size': 4, 'congruent': 11, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'dst': {'type': 'addresses_WT', 'same': False, 'size': 16, 'congruent': 6, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'src': {'type': 'addresses_UC', 'same': False, 'size': 4, 'congruent': 1, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} [Faulty Load] {'src': {'type': 'addresses_normal', 'same': True, 'size': 8, 'congruent': 0, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} <gen_prepare_buffer> {'dst': {'type': 'addresses_WC_ht', 'same': False, 'size': 32, 'congruent': 8, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'dst': {'type': 'addresses_A_ht', 'same': False, 'size': 1, 'congruent': 0, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'src': {'type': 'addresses_A_ht', 'congruent': 7, 'same': False}, 'dst': {'type': 'addresses_normal_ht', 'congruent': 0, 'same': False}, 'OP': 'REPM'} {'dst': {'type': 'addresses_A_ht', 'same': False, 'size': 4, 'congruent': 2, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'src': {'type': 'addresses_UC_ht', 'same': False, 'size': 8, 'congruent': 8, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} {'dst': {'type': 'addresses_WT_ht', 'same': False, 'size': 1, 'congruent': 2, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'dst': {'type': 'addresses_normal_ht', 'same': False, 'size': 1, 'congruent': 5, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'} {'src': {'type': 'addresses_D_ht', 'same': False, 'size': 4, 'congruent': 11, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'} {'34': 21829} 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 34 */
;------------------------------------------------------------------------------ ; ; Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR> ; SPDX-License-Identifier: BSD-2-Clause-Patent ; ; ; Module Name: ; ; FspTempRamInit.nasm ; ; Abstract: ; ; This is the code that will call into FSP TempRamInit API ; ;------------------------------------------------------------------------------ SECTION .text ; FSP API offset %define FSP_HEADER_TEMPRAMINIT_OFFSET 0x30 extern ASM_PFX(PcdGet32(PcdFSPTBase)) extern ASM_PFX(TempRamInitParams) global ASM_PFX(FspTempRamInit) ASM_PFX(FspTempRamInit): ; ; This hook is called to initialize temporay RAM ; ESI, EDI need to be preserved ; ESP contains return address ; ECX, EDX return the temprary RAM start and end ; ; ; Get FSP-T base in EAX ; mov ebp, esp mov eax, dword [ASM_PFX(PcdGet32(PcdFSPTBase))] ; ; Find the fsp info header ; Jump to TempRamInit API ; add eax, dword [eax + 094h + FSP_HEADER_TEMPRAMINIT_OFFSET] mov esp, TempRamInitStack jmp eax TempRamInitDone: mov esp, ebp jmp esp align 16 TempRamInitStack: DD TempRamInitDone DD ASM_PFX(TempRamInitParams)
; A131924: Row sums of triangle A131923. ; 1,4,10,20,36,62,106,184,328,602,1134,2180,4252,8374,16594,33008,65808,131378,262486,524668,1048996,2097614,4194810,8389160,16777816,33555082,67109566,134218484,268436268,536871782,1073742754,2147484640,4294968352,8589935714,17179870374,34359739628,68719478068,137438954878,274877908426,549755815448,1099511629416,2199023257274,4398046512910,8796093024100,17592186046396,35184372090902,70368744179826,140737488357584,281474976713008,562949953423762,1125899906845174,2251799813687900,4503599627373252,9007199254743854,18014398509484954,36028797018967048,72057594037931128,144115188075859178,288230376151715166,576460752303427028,1152921504606850636,2305843009213697734,4611686018427391810,9223372036854779840,18446744073709555776,36893488147419107522,73786976294838210886,147573952589676417484,295147905179352830548,590295810358705656542,1180591620717411308394,2361183241434822611960,4722366482869645218952,9444732965739290432794,18889465931478580860334,37778931862957161715268,75557863725914323424988,151115727451828646844278,302231454903657293682706,604462909807314587359408,1208925819614629174712656,2417851639229258349418994,4835703278458516698831510,9671406556917033397656380,19342813113834066795305956,38685626227668133590604942,77371252455336267181202746,154742504910672534362398184,309485009821345068724788888,618970019642690137449570122,1237940039285380274899132414,2475880078570760549798256820,4951760157141521099596505452,9903520314283042199193002534,19807040628566084398385996514,39614081257132168796771984288,79228162514264337593543959648,158456325028528675187087910178,316912650057057350374175811046,633825300114114700748351612588 mov $1,$0 mov $2,$0 bin $2,2 add $0,$2 mul $0,2 mov $2,2 pow $2,$1 sub $2,1 add $0,$2 add $0,1
db 0 ; 312 DEX NO db 60, 40, 50, 95, 75, 85 ; hp atk def spd sat sdf db ELECTRIC, ELECTRIC ; type db 200 ; catch rate db 120 ; base exp db NO_ITEM, NO_ITEM ; items db GENDER_F50 ; gender ratio db 100 ; unknown 1 db 20 ; step cycles to hatch db 5 ; unknown 2 INCBIN "gfx/pokemon/hoenn/minun/front.dimensions" db 0, 0, 0, 0 ; padding db GROWTH_MEDIUM_FAST ; growth rate dn EGG_FAIRY, EGG_FAIRY ; egg groups ; tm/hm learnset tmhm ; end
; ; Small C+ Long Library Functions ; ; Divide 2 32 bit numbers ; ; Hopefully this routine does work! ; ; I think the use of ix is unavoidable in this case..unless you know ; otherwise! ; ; This is for unsigned quantities..separate routine for signed.. ; ; Replaced use of ix with bcbc' XLIB l_long_div_u XDEF L_LONG_DIVIDE0, L_LONG_DIVENTRY ; 32 bit division ; enter: ; dehl = arg2 ; stack = arg1, ret ; exit: ; dehl = arg1/arg2 ; de'hl'= arg1%arg2 .l_long_div_u ld a,d or e or h or l jr z, divide0 pop af push hl exx pop de pop bc ld hl,0 exx pop bc ld hl,0 push af .entry ld a,32 or a ; bcbc' = arg1 ; hlhl' = res ; dede' = arg2 .l_long_div1 exx ; arg1 <<= 1 rl c rl b exx rl c rl b exx ; res <<= 1 adc hl,hl exx adc hl,hl exx ; res -= arg2 sbc hl,de exx sbc hl,de jr nc, l_long_div2 exx ; res += arg2 add hl,de exx adc hl,de .l_long_div2 ccf dec a jp nz, l_long_div1 exx ; arg1 <<= 1 rl c rl b exx rl c rl b ; looking to return: ; dehl = quotient = arg1 ; de'hl' = remainder = res push hl exx pop de push bc exx pop hl ld e,c ld d,b ret .divide0 exx pop bc pop hl pop de push bc exx ret DEFC L_LONG_DIVIDE0 = divide0 - l_long_div_u DEFC L_LONG_DIVENTRY = entry - l_long_div_u
.cseg .org 0x03 rjmp timer2_comp_handler ; timer 2 compare rjmp timer2_ovf_handler ; timer 2 overflow ;.org 0x05 ;rjmp timer1_capt_handler ; timer 1 compared ;rjmp timer1_compa_handler ; timer 1 overflow ;rjmp timer1_compb_handler ; timer 1 overflow ;rjmp timer1_ovf_handler ; timer 1 overflow ;.org 0x09 ;rjmp timer0_ovf_handler ; timer 0 overflow
; char *strncat(char * restrict s1, const char * restrict s2, size_t n) SECTION code_clib SECTION code_string PUBLIC strncat EXTERN asm_strncat strncat: pop af pop bc pop hl pop de push de push hl push bc push af jp asm_strncat ; SDCC bridge for Classic IF __CLASSIC PUBLIC _strncat defc _strncat = strncat ENDIF
Map_58780: dc.w word_58786-Map_58780 dc.w word_58786-Map_58780 dc.w word_5879A-Map_58780 word_58786: dc.w 3 dc.b $F0, $F, 0, 0, $FF, $D0 dc.b $F0, $F, 0, $10, $FF, $F0 dc.b $F0, $F, $18, 0, 0, $10 word_5879A: dc.w 2 dc.b $F4, $E, 0, $20, $FF, $E0 dc.b $F4, $E, 0, $2C, 0, 0
db 204,136,0,204,136,204,238,238,0,136,0,238,238,0,0,136 db 238,238,238,238,204,0,0,0,0,238,0,238,204,238,136,0 db 204,204,204,204,238,204,238,0,0,0,0,0,238,204,238,238 db 238,238,238,204,136,0,136,204,0,204,204,136,238,0,238,204 db 0,136,238,0,0,238,238,238,0,136,0,0,204,0,0,204 db 136,0,238,136,238,136,0,0,204,136,204,238,136,136,204,204 db 136,204,204,238,136,238,238,204,0,0,136,136,238,204,204,0 db 136,204,238,136,238,0,204,204,204,238,136,0,0,204,204,238 db 238,0,204,0,0,204,204,238,204,0,136,204,204,0,204,0 db 136,136,204,136,238,0,0,204,136,204,136,238,238,238,238,238 db 136,204,0,204,0,136,204,204,238,0,238,136,238,136,204,204 db 136,136,0,0,204,238,0,204,238,136,238,238,204,238,204,0 db 204,0,0,0,204,204,136,136,136,204,136,0,238,0,204,136 db 204,136,136,136,136,136,204,204,0,238,204,204,238,238,0,204 db 0,204,136,136,238,0,238,204,204,136,0,136,136,204,238,0 db 0,136,204,204,238,238,238,0,238,204,0,204,136,204,136,136
; double copysign(double x, double y) SECTION code_clib SECTION code_fp_math48 PUBLIC am48_copysign am48_copysign: ; Make sign of AC' the same as sign of AC ; ; enter : AC' = double x ; AC = double y ; ; exit : AC' = abs(x) * sgn(y) ; ; uses : af, b' ld a,l or a jr z, zero ; sgn(0) is positive ld a,b and $80 ; a = sgn(y) zero: exx res 7,b or b ld b,a exx ret
// comment test (a) @R0 @R1 @R2 @R3 @R4 @R5 // comment test @R6 @R7 @R8 @R9 @R10 @R11 @R12 @R13 @R14 @R15 @SP @LCL // あああああ @THIS @THAT @SCREEN @KBD @a // comment test@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890_.$: @ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890_.$: @zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz // てすと @0 (ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890_.$:) @24576 // テスト
; ; Created by Mateusz Stompór on 25/06/2019. ; %include "source/iterator.asm" global LLI_DEREFERENCE section .text LLI_DEREFERENCE: mov rax, [rdi] mov rax, [rax] ret
;; Licensed to the .NET Foundation under one or more agreements. ;; The .NET Foundation licenses this file to you under the MIT license. ;; See the LICENSE file in the project root for more information. .586 .model flat option casemap:none .code include AsmMacros.inc ifdef FEATURE_CACHED_INTERFACE_DISPATCH EXTERN @RhpResolveInterfaceMethodCacheMiss@12 : PROC ;; Macro that generates code to check a single cache entry. CHECK_CACHE_ENTRY macro entry NextLabel textequ @CatStr( Attempt, %entry+1 ) cmp ebx, [eax + (OFFSETOF__InterfaceDispatchCache__m_rgEntries + (entry * 8))] jne @F pop ebx jmp dword ptr [eax + (OFFSETOF__InterfaceDispatchCache__m_rgEntries + (entry * 8) + 4)] @@: endm ;; Macro that generates a stub consuming a cache with the given number of entries. DEFINE_INTERFACE_DISPATCH_STUB macro entries StubName textequ @CatStr( _RhpInterfaceDispatch, entries ) StubName proc public ;; Check the instance here to catch null references. We're going to touch it again below (to cache ;; the EEType pointer), but that's after we've pushed ebx below, and taking an A/V there will ;; mess up the stack trace for debugging. We also don't have a spare scratch register (eax holds ;; the cache pointer and the push of ebx below is precisely so we can access a second register ;; to hold the EEType pointer). test ecx, ecx je RhpInterfaceDispatchNullReference ;; eax currently contains the indirection cell address. We need to update it to point to the cache ;; block instead. mov eax, [eax + OFFSETOF__InterfaceDispatchCell__m_pCache] ;; Cache pointer is already loaded in the only scratch register we have so far, eax. We need ;; another scratch register to hold the instance type so save the value of ebx and use that. push ebx ;; Load the EEType from the object instance in ebx. mov ebx, [ecx] CurrentEntry = 0 while CurrentEntry lt entries CHECK_CACHE_ENTRY %CurrentEntry CurrentEntry = CurrentEntry + 1 endm ;; eax currently contains the cache block. We need to point it back to the ;; indirection cell using the back pointer in the cache block mov eax, [eax + OFFSETOF__InterfaceDispatchCache__m_pCell] jmp InterfaceDispatchCacheMiss StubName endp endm ;; DEFINE_INTERFACE_DISPATCH_STUB ;; Define all the stub routines we currently need. DEFINE_INTERFACE_DISPATCH_STUB 1 DEFINE_INTERFACE_DISPATCH_STUB 2 DEFINE_INTERFACE_DISPATCH_STUB 4 DEFINE_INTERFACE_DISPATCH_STUB 8 DEFINE_INTERFACE_DISPATCH_STUB 16 DEFINE_INTERFACE_DISPATCH_STUB 32 DEFINE_INTERFACE_DISPATCH_STUB 64 ;; Shared out of line helper used on cache misses. InterfaceDispatchCacheMiss proc ;; Push an ebp frame since it makes some of our later calculations easier. push ebp mov ebp, esp ;; Save argument registers while we call out to the C++ helper. Note that we depend on these registers ;; (which may contain GC references) being spilled before we build the PInvokeTransitionFrame below ;; due to the way we build a stack range to report to the GC conservatively during a collection. push ecx push edx ;; Build PInvokeTransitionFrame. This is only required if we end up resolving the interface method via ;; a callout to a managed ICastable method. In that instance we need to be able to cope with garbage ;; collections which in turn need to be able to walk the stack from the ICastable method, skip the ;; unmanaged runtime portions and resume walking at our caller. This frame provides both the means to ;; unwind to that caller and a place to spill callee saved registers in case they contain GC ;; references from the caller. ;; Calculate caller's esp: relative to ebp's current value we've pushed the old ebp, ebx and a return ;; address. lea edx, [ebp + (3 * 4)] push edx ;; Push callee saved registers. Note we've already pushed ebx but we need to do it here again so that ;; it is reported to the GC correctly if necessary. As such it's necessary to pushed the saved version ;; of ebx and make sure when we restore it we use this copy and discard the version that was initially ;; pushed (since its value may now be stale). push edi push esi mov edx, [ebp + 04h] ; Old RBX value push edx ;; Push flags. push PTFF_SAVE_ALL_PRESERVED + PTFF_SAVE_RSP ;; Leave space for the Thread* (stackwalker does not use this). push 0 ;; The caller's ebp. push [ebp] ;; The caller's eip. push [ebp + 08h] ;; First argument is the instance we're dispatching on which is already in ecx. ;; Second argument is the dispatch data cell. ;; We still have this in eax mov edx, eax ;; The third argument is the address of the transition frame we build above. Currently it's at the top ;; of the stack so esp points to it. push esp call @RhpResolveInterfaceMethodCacheMiss@12 ;; Recover callee-saved values from the transition frame in case a GC updated them. mov ebx, [esp + 010h] mov esi, [esp + 014h] mov edi, [esp + 018h] ;; Restore real argument registers. mov edx, [ebp - 08h] mov ecx, [ebp - 04h] ;; Remove the transition and ebp frames from the stack. mov esp, ebp pop ebp ;; Discard the space where ebx was pushed on entry, its value is now potentially stale. add esp, 4 ;; Final target address is in eax. jmp eax InterfaceDispatchCacheMiss endp ;; Out of line helper used when we try to interface dispatch on a null pointer. Sets up the stack so the ;; debugger gives a reasonable stack trace. RhpInterfaceDispatchNullReference proc public push ebp mov ebp, esp mov ebx, [ecx] ;; This should A/V int 3 RhpInterfaceDispatchNullReference endp ;; Initial dispatch on an interface when we don't have a cache yet. RhpInitialInterfaceDispatch proc public ALTERNATE_ENTRY RhpInitialDynamicInterfaceDispatch ;; Mainly we just tail call to the cache miss helper. But this helper expects that ebx has been pushed ;; on the stack. push ebx jmp InterfaceDispatchCacheMiss RhpInitialInterfaceDispatch endp endif ;; FEATURE_CACHED_INTERFACE_DISPATCH end
#include <btrc/builtin/film_filter/box.h> BTRC_BUILTIN_BEGIN CVec2f BoxFilter::sample(Sampler &sampler) const { var x = sampler.get1d() - 0.5f; var y = sampler.get1d() - 0.5f; return CVec2f(x, y); } RC<FilmFilter> BoxFilterCreator::create(RC<const factory::Node> node, factory::Context &context) { return newRC<BoxFilter>(); } BTRC_BUILTIN_END
SFX_Swap_3_Ch5: duty_cycle 2 square_note 8, 14, 1, 1856 sound_ret SFX_Swap_3_Ch6: duty_cycle 2 square_note 2, 0, 8, 0 square_note 8, 11, 1, 1857 sound_ret
;------------------------------------------------------------------------------ ; ; Copyright (c) 2019, Intel Corporation. All rights reserved.<BR> ; SPDX-License-Identifier: BSD-2-Clause-Patent ; Abstract: ; ; Switch the stack from temporary memory to permanent memory. ; ;------------------------------------------------------------------------------ SECTION .text ;------------------------------------------------------------------------------ ; VOID ; EFIAPI ; SecSwitchStack ( ; UINT32 TemporaryMemoryBase, ; UINT32 PermanentMemoryBase ; ); ;------------------------------------------------------------------------------ global ASM_PFX(SecSwitchStack) ASM_PFX(SecSwitchStack): ; ; Save three register: eax, ebx, ecx ; push eax push ebx push ecx push edx ; ; !!CAUTION!! this function address's is pushed into stack after ; migration of whole temporary memory, so need save it to permanent ; memory at first! ; mov ebx, [esp + 20] ; Save the first parameter mov ecx, [esp + 24] ; Save the second parameter ; ; Save this function's return address into permanent memory at first. ; Then, Fixup the esp point to permanent memory ; mov eax, esp sub eax, ebx add eax, ecx mov edx, dword [esp] ; copy pushed register's value to permanent memory mov dword [eax], edx mov edx, dword [esp + 4] mov dword [eax + 4], edx mov edx, dword [esp + 8] mov dword [eax + 8], edx mov edx, dword [esp + 12] mov dword [eax + 12], edx mov edx, dword [esp + 16] ; Update this function's return address into permanent memory mov dword [eax + 16], edx mov esp, eax ; From now, esp is pointed to permanent memory ; ; Fixup the ebp point to permanent memory ; mov eax, ebp sub eax, ebx add eax, ecx mov ebp, eax ; From now, ebp is pointed to permanent memory pop edx pop ecx pop ebx pop eax ret
; A302058: Numbers that are not square pyramidal numbers. ; 2,3,4,6,7,8,9,10,11,12,13,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82 add $0,1 mov $1,$0 mov $3,1 lpb $1 add $0,1 add $3,1 add $2,$3 sub $1,$2 trn $1,1 add $2,$3 lpe
; A079861: a(n) is the number of occurrences of 7's in the palindromic compositions of 2*n-1, or also, the number of occurrences of 8's in the palindromic compositions of 2*n. ; 10,22,48,104,224,480,1024,2176,4608,9728,20480,43008,90112,188416,393216,819200,1703936,3538944,7340032,15204352,31457280,65011712,134217728,276824064,570425344,1174405120,2415919104,4966055936,10200547328,20937965568,42949672960,88046829568,180388626432,369367187456,755914244096,1546188226560,3161095929856,6459630813184,13194139533312,26938034880512,54975581388800,112150186033152,228698418577408,466192930177024,949978046398464,1935140464885760,3940649673949184,8022036836253696 mov $1,2 pow $1,$0 add $0,10 mul $1,$0
; SYNTAX TEST "Packages/User/x86_64 Assembly.tmbundle/Syntaxes/Nasm Assembly.sublime-syntax" ideal jumps p386 p486 p586 end ;^^^^^ invalid.deprecated ; ^^^^^ invalid.deprecated ; ^^^^ invalid.deprecated ; ^^^^ invalid.deprecated ; ^^^^ invalid.deprecated ; ^^^ invalid.deprecated bits 16 ;^^^^ support.function.directive use16 use32 use64 ;^^^^^ support.function.directive ; ^^^^^ support.function.directive ; ^^^^^ support.function.directive bits16 ;^^^^^^ - support.function.directive use8 ;^^^^ - support.function.directive default ;^^^^^^^ support.function.directive default rel ;^^^^^^^ support.function.directive ; ^^^ support.constant.directive default nonsense ;^^^^^^^ support.function.directive ; ^^^^^^^^ - support.constant.directive nobnd ;^^^^^ - support.constant.directive section .text ;^^^^^^^ support.function.directive ; ^^^^^ entity.name.section string.unquoted support.constant.section segment .arm ;^^^^^^^ support.function.directive ; ^^^^ entity.name.section string.unquoted %macro writefile 2+ [section custom] ; ^^^^^^ string.unquoted %%str: db %2 %%endstr: __SECT__ ; ^^^^^^^^ support.constant.directive mov dx,%%str mov cx,%%endstr-%%str mov bx,%1 mov ah,0x40 int 0x21 %endmacro absolute 0x1A ;^^^^^^^^ support.function.directive kbuf_chr resw 1 kbuf_free resw 1 kbuf resw 16 org 100h ; it's a .COM program jmp setup ; setup code comes last ; the resident part of the TSR goes here setup: ; now write the code that installs the TSR here absolute setup runtimevar1 resw 1 runtimevar2 resd 20 tsr_end: extern _printf ;^^^^^^ support.function.directive extern _sscanf,_fscanf extern _variable:wrt dgroup global _main ;^^^^^^ support.function.directive _main: ; some code global hashlookup:function, hashtable:data common intvar 4 ;^^^^^^ support.function.directive global intvar section .bss intvar resd 1 common commvar 4:near ; works in OBJ common intarray 100:4 ; works in ELF: 4 byte aligned CPU 8086 8086 ;^^^ support.function.directive ; ^^^^ support.constant.directive ; ^^^^ - support.constant.directive CPU 186 ; ^^^ support.constant.directive CPU 286 ; ^^^ support.constant.directive CPU 386 ; ^^^ support.constant.directive CPU 486 ; ^^^ support.constant.directive CPU 586 ; ^^^ support.constant.directive CPU PENTIUM ; ^^^^^^^ support.constant.directive CPU 686 ; ^^^ support.constant.directive CPU PPRO ; ^^^^ support.constant.directive CPU P2 ; ^^ support.constant.directive CPU P3 ; ^^ support.constant.directive CPU KATMAI ; ^^^^^^ support.constant.directive CPU P4 ; ^^ support.constant.directive CPU WILLAMETTE ; ^^^^^^^^^^ support.constant.directive CPU PRESCOTT ; ^^^^^^^^ support.constant.directive CPU X64 ; ^^^ support.constant.directive CPU IA64 ; ^^^^ support.constant.directive 8086 186 286 386 486 586 PENTIUM 686 PPRO P2 P3 KATMAI P4 WILLAMETTE PRESCOTT X64 IA64 ;^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - support.constant.directive FLOAT DAZ DAZ ; Flush denormals to zero ; ^^^^^ support.function.directive ; ^^^ support.constant.directive ; ^^^ - support.constant.directive FLOAT NODAZ ; Do not flush denormals to zero (default) ; ^^^^^ support.constant.directive FLOAT NEAR ; Round to nearest (default) ; ^^^^ support.constant.directive FLOAT UP ; Round up (toward +Infinity) ; ^^ support.constant.directive FLOAT DOWN ; Round down (toward –Infinity) ; ^^^^ support.constant.directive FLOAT ZERO ; Round toward zero ; ^^^^ support.constant.directive FLOAT DEFAULT ; Restore default settings ; ^^^^^^^ support.constant.directive DAZ NODAZ NEAR UP DOWN ZERO DEFAULT ;^^^^^^^^^^^^^^^^^^^^^^^^^^^ - support.constant.directive ; ^^^^^^^ support.function.directive [warning +macro-params] ; enables warnings for warning-class. [warning -macro-selfref] ; disables warnings for warning-class. [warning *macro-defaults] ; restores warning-class to the original value, either the default value or as specified on the command line. ; ^^^^^^^ support.function.directive ; ^ keyword.control.warning ; ^^^^^^^^^^^^^^ support.constant.directive.warning ; ^ - support.constant.directive.warning [ warning *macro-defaults ] ; restores warning-class to the original value, either the default value or as specified on the command line. ; ^^^^^^^ support.function.directive ; ^ keyword.control.warning ; ^^^^^^^^^^^^^^ support.constant.directive.warning ; ^ - support.constant.directive.warning [warning *invalid-warning] ; restores warning-class to the original value, either the default value or as specified on the command line. ; ^^^^^^^^^^^^^^^ invalid.illegal ; ^ - invalid.illegal [ warning *invalid-warning ] ; restores warning-class to the original value, either the default value or as specified on the command line. ; ^^^^^^^^^^^^^^^ invalid.illegal ; ^ - invalid.illegal [warning all] ; ^^^ support.constant.directive.warning [warning error] ; ^^^^^ support.constant.directive.warning [warning error=unknown-warning] ; ^^^^^ support.constant.directive.warning ; ^ keyword.operator ; ^^^^^^^^^^^^^^^ support.constant.directive.warning ; ^ - support.constant.directive.warning [ warning error=unknown-warning ] ; ^^^^^^^^^^^^^^^ support.constant.directive.warning ; ^ - support.constant.directive.warning [warning error=unknown-warning-as-error] ; ^^^^^^^^^^^^^^^^^^^^^^^^ invalid.illegal ; ^^^^^^^^^^^^^^^ - support.constant.directive.warning ; ^ - invalid.illegal [ warning error=unknown-warning-as-error ] ; ^^^^^^^^^^^^^^^^^^^^^^^^ invalid.illegal ; ^^^^^^^^^^^^^^^ - support.constant.directive.warning ; ^ - invalid.illegal warning all unknown-warning ;^^^^^^^^^^^^^^^^^^^^^^^^^^^ - support.function.directive - support.constant.directive
; A233904: a(2n) = a(n) - n, a(2n+1) = a(n) + n, with a(0)=0. ; 0,0,-1,1,-3,1,-2,4,-7,1,-4,6,-8,4,-3,11,-15,1,-8,10,-14,6,-5,17,-20,4,-9,17,-17,11,-4,26,-31,1,-16,18,-26,10,-9,29,-34,6,-15,27,-27,17,-6,40,-44,4,-21,29,-35,17,-10,44,-45,11,-18,40,-34,26,-5,57,-63,1,-32,34,-50,18,-17,53,-62,10,-27,47,-47,29,-10,68,-74,6,-35,47,-57,27,-16,70,-71,17,-28,62,-52,40,-7,87,-92,4,-45,53 sub $1,$0 lpb $0 lpb $0 add $1,$0 div $0,2 mul $0,2 lpe div $0,2 lpe mov $0,$1
; A192967: Constant term of the reduction by x^2 -> x+1 of the polynomial p(n,x) defined at Comments. ; 1,0,2,4,9,17,31,54,92,154,255,419,685,1116,1814,2944,4773,7733,12523,20274,32816,53110,85947,139079,225049,364152,589226,953404,1542657,2496089,4038775,6534894,10573700,17108626,27682359,44791019,72473413,117264468,189737918,307002424,496740381,803742845,1300483267,2104226154,3404709464,5508935662,8913645171,14422580879,23336226097,37758807024,61095033170,98853840244,159948873465,258802713761,418751587279,677554301094,1096305888428,1773860189578,2870166078063,4644026267699,7514192345821,12158218613580,19672410959462,31830629573104,51503040532629,83333670105797,134836710638491,218170380744354,353007091382912,571177472127334,924184563510315,1495362035637719,2419546599148105,3914908634785896,6334455233934074 add $0,1 mov $1,4 sub $1,$0 cal $0,22086 ; Fibonacci sequence beginning 0, 3. add $1,$0 sub $1,5
; The constants for the Multiboot 2 header MAGIC equ 0xE85250D6 ARCHITECTURE equ 0x0 CHECKSUM equ -(MAGIC + ARCHITECTURE + HEADER_LENGTH) HEADER_LENGTH equ multiboot_end - multiboot_start ; Declare a multiboot header that marks the program as a kernel. These are magic ; values that are documented in the multiboot standard. The bootloader will ; search for this signature in the first 8 KiB of the kernel file, aligned at a ; 32-bit boundary. The signature is in its own section so the header can be ; forced to be within the first 8 KiB of the kernel file. section .multiboot multiboot_start: align 8 dd MAGIC dd ARCHITECTURE dd HEADER_LENGTH dd CHECKSUM ; MB2 tags are of the following format: ; u16 type ; u16 flags ; u32 size ; ; They signal to the bootloader information to provide to the OS ; or specific elements of the environment to set up. ; Terminator tag dw 0x0 dw 0x0 dd 0x8 multiboot_end: ; The multiboot standard does not define the value of the stack pointer register ; (esp) and it is up to the kernel to provide a stack. This allocates room for a ; small stack by creating a symbol at the bottom of it, then allocating 16384 ; bytes for it, and finally creating a symbol at the top. The stack grows ; downwards on x86. The stack is in its own section so it can be marked nobits, ; which means the kernel file is smaller because it does not contain an ; uninitialized stack. The stack on x86 must be 16-byte aligned according to the ; System V ABI standard and de-facto extensions. The compiler will assume the ; stack is properly aligned and failure to align the stack will result in ; undefined behavior. section .bss align 16 stack_bottom: resb 16384 ; 16 KiB stack_top: ; The linker script specifies _start as the entry point to the kernel and the ; bootloader will jump to this position once the kernel has been loaded. It ; doesn't make sense to return from this function as the bootloader is gone. ; Declare _start as a function symbol with the given symbol size. section .text global start:function (start.end - start) start: mov rsp, stack_top ; Pass the parameters to kernel_main by pushing them on the stack, ; first parameter last. ;push rbx ;push rax extern kernel_main call kernel_main cli .hang: hlt jmp .hang .end:
/* * INY.asm * * Created: 5/13/2018 4:12:18 PM * Author: ROTP */ INY_implied: ;UNTESTED but highly likely to work. swapPCwithTEMPPC INC YR updateNZfromREGISTER YR ADIW ZH:ZL, 1 RET
; A036573: Size of maximal triangulation of an n-antiprism with regular polygonal base. ; 4,8,12,17,22,28,34,41,48,56,64,73,82,92,102,113,124,136,148,161,174,188,202,217,232,248,264,281,298,316,334,353,372,392,412,433,454,476,498,521,544,568,592,617,642,668,694,721,748,776,804,833,862,892,922,953,984,1016,1048,1081,1114,1148,1182,1217,1252,1288,1324,1361,1398,1436,1474,1513,1552,1592,1632,1673,1714,1756,1798,1841,1884,1928,1972,2017,2062,2108,2154,2201,2248,2296,2344,2393,2442,2492,2542,2593,2644,2696,2748,2801,2854,2908,2962,3017,3072,3128,3184,3241,3298,3356,3414,3473,3532,3592,3652,3713,3774,3836,3898,3961,4024,4088,4152,4217,4282,4348,4414,4481,4548,4616,4684,4753,4822,4892,4962,5033,5104,5176,5248,5321,5394,5468,5542,5617,5692,5768,5844,5921,5998,6076,6154,6233,6312,6392,6472,6553,6634,6716,6798,6881,6964,7048,7132,7217,7302,7388,7474,7561,7648,7736,7824,7913,8002,8092,8182,8273,8364,8456,8548,8641,8734,8828,8922,9017,9112,9208,9304,9401,9498,9596,9694,9793,9892,9992,10092,10193,10294,10396,10498,10601,10704,10808,10912,11017,11122,11228,11334,11441,11548,11656,11764,11873,11982,12092,12202,12313,12424,12536,12648,12761,12874,12988,13102,13217,13332,13448,13564,13681,13798,13916,14034,14153,14272,14392,14512,14633,14754,14876,14998,15121,15244,15368,15492,15617,15742,15868,15994,16121,16248,16376 add $0,7 mov $1,$0 pow $1,2 div $1,4 sub $1,8
//alternating-characters.cpp //Alternating Characters //Weekly Challenges - Week 10 //Author: derekhh #include<iostream> #include<string> using namespace std; int main() { int t; cin >> t; while (t--) { string str; cin >> str; int len = (int)str.size(); int cnt = 0, ans = 0; for (int i = 0; i < len; i++) { if (i == 0 || str[i] == str[i - 1]) cnt++; else { if (cnt > 1) { ans += cnt - 1; } cnt = 1; } } if (cnt > 1) { ans += cnt - 1; } cout << ans << endl; } return 0; }
; A129955: Third differences of A129952. ; 2,3,8,18,40,88,192,416,896,1920,4096,8704,18432,38912,81920,172032,360448,753664,1572864,3276800,6815744,14155776,29360128,60817408,125829120,260046848,536870912,1107296256,2281701376,4697620480,9663676416,19864223744,40802189312,83751862272,171798691840,352187318272,721554505728,1477468749824,3023656976384,6184752906240,12644383719424,25838523252736,52776558133248,107752139522048,219902325555200,448600744132608,914793674309632,1864771720708096,3799912185593856,7740561859543040 mov $1,2 mov $2,$0 mov $3,2 lpb $0,1 sub $0,1 add $3,$1 add $1,$2 mov $2,$3 lpe
org $8000 tilestest: INCBIN "tiles_nivel06.SR5.plet1"
frame 1, 14 frame 2, 14 frame 1, 17 frame 2, 10 frame 3, 04 frame 4, 06 frame 2, 08 frame 3, 04 frame 4, 08 endanim
#pragma once #include <Register/Utility.hpp> namespace Kvasir { // Base Timer I/O Select namespace Btiosel47Btsel4567{ ///<I/O Select Register using Addr = Register::Address<0x40025300,0xffff00ff,0x00000000,unsigned>; ///I/O select bits for Ch.6/Ch.7 constexpr Register::FieldLocation<Addr,Register::maskFromRange(15,12),Register::ReadWriteAccess,unsigned> sel67{}; ///I/O select bits for Ch.4/Ch.5 constexpr Register::FieldLocation<Addr,Register::maskFromRange(11,8),Register::ReadWriteAccess,unsigned> sel45{}; } }
ChrisNameMenuHeader: db MENU_BACKUP_TILES ; flags menu_coords 0, 0, 10, TEXTBOX_Y - 1 dw .MaleNames db 1 ; ???? db 0 ; default option .MaleNames: db STATICMENU_CURSOR | STATICMENU_PLACE_TITLE | STATICMENU_DISABLE_B ; flags db 5 ; items db "NEW NAME@" MalePlayerNameArray: db "Yellow@" db "Ash@" db "Daniel@" db "Pascal@" db 2 ; displacement db " NAME @" ; title KrisNameMenuHeader: db MENU_BACKUP_TILES ; flags menu_coords 0, 0, 10, TEXTBOX_Y - 1 dw .FemaleNames db 1 ; ???? db 0 ; default option .FemaleNames: db STATICMENU_CURSOR | STATICMENU_PLACE_TITLE | STATICMENU_DISABLE_B ; flags db 5 ; items db "NEW NAME@" FemalePlayerNameArray: db "Green@" db "Daisy@" db "Andrea@" db "Nadine@" db 2 ; displacement db " NAME @" ; title
; 16 bit dos assembly .model small .stack .data message db "Happy New Year 1396!", "$" .code main proc mov ax,seg message mov ds,ax mov ah,09 lea dx,message int 21h mov ax,4c00h int 21h main endp end main
stack segment stack ends data segment data ends code segment assume cs:code, ds:data, ss:stack mov ah,02h mov cx,26 mov dl,41h lop: int 21h add dl,1h loop lop mov dl,0ah mov ah,02h int 21h mov dl,0dh mov ah,02h int 21h mov ah,02h mov cx,26 mov dl,61h lop1: int 21h add dl,1h loop lop1 mov ah,4ch int 21h code ends END
; A059018: Write 10*n in base 4; a(n) = sum of digits mod 4. ; 0,0,2,2,0,1,2,0,2,2,0,0,2,0,1,2,0,0,2,2,1,2,3,0,2,2,2,2,0,1,2,3,2,2,0,0,2,3,0,2,0,0,2,2,0,2,3,0,2,2,0,0,0,1,2,3,1,1,0,0,2,3,0,1,0,0,2,2,0,1,2,0,2,2,0,0,2,1,2,3,1,1,3,3,2,3,0,1,3,3,2,2,0,1,2,3,2,2,0,0 mul $0,10 lpb $0 add $1,$0 div $0,4 lpe lpb $1 mod $1,4 lpe
; A002203: Companion Pell numbers: a(n) = 2*a(n-1) + a(n-2), a(0) = a(1) = 2. ; 2,2,6,14,34,82,198,478,1154,2786,6726,16238,39202,94642,228486,551614,1331714,3215042,7761798,18738638,45239074,109216786,263672646,636562078,1536796802,3710155682,8957108166,21624372014,52205852194,126036076402,304278004998,734592086398,1773462177794,4281516441986,10336495061766,24954506565518,60245508192802,145445522951122,351136554095046,847718631141214,2046573816377474,4940866263896162 mov $1,2 lpb $0 sub $0,1 mov $3,$1 add $1,$2 mul $3,2 add $2,$3 lpe
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "futf8strchrfieldsearcher.h" #include "fold.h" using vespalib::Optimized; using search::byte; using search::QueryTerm; using search::v16qi; namespace vsm { IMPLEMENT_DUPLICATE(FUTF8StrChrFieldSearcher); FUTF8StrChrFieldSearcher::FUTF8StrChrFieldSearcher() : UTF8StrChrFieldSearcher(), _folded(4096) { } FUTF8StrChrFieldSearcher::FUTF8StrChrFieldSearcher(FieldIdT fId) : UTF8StrChrFieldSearcher(fId), _folded(4096) { } FUTF8StrChrFieldSearcher::~FUTF8StrChrFieldSearcher() {} bool FUTF8StrChrFieldSearcher::ansiFold(const char * toFold, size_t sz, char * folded) { bool retval(true); for(size_t i=0; i < sz; i++) { byte c = toFold[i]; if (c>=128) { retval = false; break; } folded[i] = FieldSearcher::_foldLowCase[c]; } return retval; } bool FUTF8StrChrFieldSearcher::lfoldaa(const char * toFold, size_t sz, char * folded, size_t & unalignedStart) { bool retval(true); unalignedStart = (size_t(toFold) & 0xF); size_t unalignedsz = std::min(sz, (16 - unalignedStart) & 0xF); size_t foldedUnaligned = (size_t(folded) & 0xF); unalignedStart = (foldedUnaligned < unalignedStart) ? (unalignedStart-foldedUnaligned) : unalignedStart + 16 - foldedUnaligned; size_t alignedStart = unalignedStart+unalignedsz; size_t alignedsz = sz - unalignedsz; size_t alignsz16 = alignedsz & 0xFFFFFFF0; size_t rest = alignedsz - alignsz16; if (unalignedStart) { retval = ansiFold(toFold, unalignedsz, folded + unalignedStart); } if (alignsz16 && retval) { const byte * end = sse2_foldaa(reinterpret_cast<const byte *>(toFold+unalignedsz), alignsz16, reinterpret_cast<byte *>(folded+alignedStart)); retval = (end == reinterpret_cast<const byte *>(toFold+unalignedsz+alignsz16)); } if(rest && retval) { retval = ansiFold(toFold + unalignedsz + alignsz16, rest, folded+alignedStart+alignsz16); } return retval; } bool FUTF8StrChrFieldSearcher::lfoldua(const char * toFold, size_t sz, char * folded, size_t & alignedStart) { bool retval(true); alignedStart = 0xF - (size_t(folded + 0xF) % 0x10); size_t alignsz16 = sz & 0xFFFFFFF0; size_t rest = sz - alignsz16; if (alignsz16) { const byte * end = sse2_foldua(reinterpret_cast<const byte *>(toFold), alignsz16, reinterpret_cast<byte *>(folded+alignedStart)); retval = (end == reinterpret_cast<const byte *>(toFold+alignsz16)); } if(rest && retval) { retval = ansiFold(toFold + alignsz16, rest, folded+alignedStart+alignsz16); } return retval; } namespace { inline const char * advance(const char * n, const v16qi zero) { uint32_t charMap = 0; unsigned zeroCountSum = 0; do { // find first '\0' character (the end of the word) #ifndef __INTEL_COMPILER v16qi tmpCurrent = __builtin_ia32_loaddqu(n+zeroCountSum); v16qi tmp0 = __builtin_ia32_pcmpeqb128(tmpCurrent, reinterpret_cast<v16qi>(zero)); charMap = __builtin_ia32_pmovmskb128(tmp0); // 1 in charMap equals to '\0' in input buffer #else # warning "Intel's icc compiler does not like __builtin_ia32_xxxxx" abort(); #endif zeroCountSum += 16; } while (!charMap); int charCount = Optimized::lsbIdx(charMap); // number of word characters in last 16 bytes uint32_t zeroMap = ((~charMap) & 0xffff) >> charCount; int zeroCounter = Optimized::lsbIdx(zeroMap); // number of non-characters ('\0') in last 16 bytes int sum = zeroCountSum - 16 + charCount + zeroCounter; if (!zeroMap) { // only '\0' in last 16 bytes (no new word found) do { // find first word character (the next word) #ifndef __INTEL_COMPILER v16qi tmpCurrent = __builtin_ia32_loaddqu(n+zeroCountSum); tmpCurrent = __builtin_ia32_pcmpgtb128(tmpCurrent, reinterpret_cast<v16qi>(zero)); zeroMap = __builtin_ia32_pmovmskb128(tmpCurrent); // 1 in zeroMap equals to word character in input buffer #else # warning "Intel's icc compiler does not like __builtin_ia32_xxxxx" abort(); #endif zeroCountSum += 16; } while(!zeroMap); zeroCounter = Optimized::lsbIdx(zeroMap); sum = zeroCountSum - 16 + zeroCounter; } return n + sum; } } size_t FUTF8StrChrFieldSearcher::match(const char *folded, size_t sz, QueryTerm & qt) { const v16qi _G_zero = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; termcount_t words(0); const char * term; termsize_t tsz = qt.term(term); const char *et=term+tsz; const char * n = folded; const char *e = n + sz; while (!*n) n++; while (true) { if (n>=e) break; #if 0 v16qi current = __builtin_ia32_loaddqu(n); current = __builtin_ia32_pcmpeqb128(current, _qtlFast[0]); unsigned eqMap = __builtin_ia32_pmovmskb128(current); unsigned neqMap = ~eqMap; unsigned numEq = Optimized::lsbIdx(neqMap); /* if (eqMap)*/ { if (numEq >= 16) { const char *tt = term+16; const char *p = n+16; while ( (*tt == *p) && (tt < et)) { tt++; p++; numEq++; } } if ((numEq >= tsz) && (prefix() || qt.isPrefix() || !n[tsz])) { addHit(qt, words); } } #else const char *tt = term; while ((tt < et) && (*tt == *n)) { tt++; n++; } if ((tt == et) && (prefix() || qt.isPrefix() || !*n)) { addHit(qt, words); } #endif words++; n = advance(n, _G_zero); } return words; } size_t FUTF8StrChrFieldSearcher::match(const char *folded, size_t sz, size_t mintsz, QueryTerm ** qtl, size_t qtlSize) { (void) mintsz; const v16qi _G_zero = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; termcount_t words(0); const char * n = folded; const char *e = n + sz; while (!*n) n++; for( ; ; ) { if (n>=e) break; #if 0 v16qi current = __builtin_ia32_loaddqu(n); for(size_t i=0; i < qtlSize; i++) { v16qi tmpEq = __builtin_ia32_pcmpeqb128(current, _qtlFast[i]); unsigned eqMap = __builtin_ia32_pmovmskb128(tmpEq); /* if (eqMap) */ { QueryTerm & qt = *qtl[i]; unsigned neqMap = ~eqMap; unsigned numEq = Optimized::lsbIdx(neqMap); termsize_t tsz = qt.termLen(); if (numEq >= 16) { const char *tt = qt.term() + 16; const char *et=tt+tsz; const char *p = n+16; while ( (*tt == *p) && (tt < et)) { tt++; p++; numEq++; } } if ((numEq >= tsz) && (prefix() || qt.isPrefix() || !n[tsz])) { addHit(qt, words); } } } #else for(QueryTerm ** it=qtl, ** mt=qtl+qtlSize; it != mt; it++) { QueryTerm & qt = **it; const char * term; termsize_t tsz = qt.term(term); const char *et=term+tsz; const char *fnt; for (fnt = n; (term < et) && (*term == *fnt); term++, fnt++); if ((term == et) && (prefix() || qt.isPrefix() || !*fnt)) { addHit(qt, words); } } #endif words++; n = advance(n, _G_zero); } return words; } size_t FUTF8StrChrFieldSearcher::matchTerm(const FieldRef & f, QueryTerm & qt) { _folded.reserve(f.size()+16*3); //Enable fulle xmm0 store size_t unalignedStart(0); bool ascii7Bit = lfoldua(f.c_str(), f.size(), &_folded[0], unalignedStart); if (ascii7Bit) { char * folded = &_folded[unalignedStart]; /// Add the pattern 00 01 00 to avoid multiple eof tests of falling off the edge. folded[f.size()] = 0; folded[f.size()+1] = 0x01; memset(folded + f.size() + 2, 0, 16); // initialize padding data to avoid valgrind complaining about uninitialized values return match(folded, f.size(), qt); NEED_CHAR_STAT(addPureUsAsciiField(f.size())); } else { return UTF8StrChrFieldSearcher::matchTerm(f, qt); } } size_t FUTF8StrChrFieldSearcher::matchTerms(const FieldRef & f, const size_t mintsz) { _folded.reserve(f.size()+16*3); //Enable fulle xmm0 store size_t unalignedStart(0); bool ascii7Bit = lfoldua(f.c_str(), f.size(), &_folded[0], unalignedStart); if (ascii7Bit) { char * folded = &_folded[unalignedStart]; /// Add the pattern 00 01 00 to avoid multiple eof tests of falling off the edge. folded[f.size()] = 0; folded[f.size()+1] = 0x01; memset(folded + f.size() + 2, 0, 16); // initialize padding data to avoid valgrind complaining about uninitialized values return match(folded, f.size(), mintsz, &_qtl[0], _qtl.size()); NEED_CHAR_STAT(addPureUsAsciiField(f.size())); } else { return UTF8StrChrFieldSearcher::matchTerms(f, mintsz); } } }
//////////////////////////////////////////////////////////////////////////////// // // VirtualJetProducer // ------------------ // // 04/21/2009 Philipp Schieferdecker <philipp.schieferdecker@cern.ch> //////////////////////////////////////////////////////////////////////////////// #include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" #include "FWCore/ParameterSet/interface/ParameterSetDescription.h" #include "RecoJets/JetProducers/plugins/VirtualJetProducer.h" #include "RecoJets/JetProducers/interface/BackgroundEstimator.h" #include "RecoJets/JetProducers/interface/VirtualJetProducerHelper.h" #include "DataFormats/Common/interface/RefProd.h" #include "DataFormats/Common/interface/Ref.h" #include "DataFormats/Common/interface/RefVector.h" #include "FWCore/Framework/interface/Event.h" #include "FWCore/Framework/interface/EventSetup.h" #include "FWCore/Framework/interface/ESHandle.h" #include "FWCore/Utilities/interface/Exception.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "FWCore/Framework/interface/MakerMacros.h" #include "FWCore/Utilities/interface/isFinite.h" #include "FWCore/Framework/interface/ConsumesCollector.h" #include "DataFormats/Common/interface/View.h" #include "DataFormats/Common/interface/Handle.h" #include "DataFormats/VertexReco/interface/Vertex.h" #include "DataFormats/VertexReco/interface/VertexFwd.h" #include "DataFormats/JetReco/interface/CaloJetCollection.h" #include "DataFormats/JetReco/interface/GenJetCollection.h" #include "DataFormats/JetReco/interface/PFJetCollection.h" #include "DataFormats/JetReco/interface/BasicJetCollection.h" #include "DataFormats/JetReco/interface/TrackJetCollection.h" #include "DataFormats/JetReco/interface/PFClusterJetCollection.h" #include "DataFormats/Candidate/interface/CandidateFwd.h" #include "DataFormats/Candidate/interface/LeafCandidate.h" #include "DataFormats/Math/interface/deltaR.h" #include "DataFormats/ParticleFlowCandidate/interface/PFCandidate.h" #include "fastjet/SISConePlugin.hh" #include "fastjet/CMSIterativeConePlugin.hh" #include "fastjet/ATLASConePlugin.hh" #include "fastjet/CDFMidPointPlugin.hh" #include <iostream> #include <memory> #include <algorithm> #include <limits> #include <cmath> #include <vdt/vdtMath.h> using namespace std; using namespace edm; namespace reco { namespace helper { struct GreaterByPtPseudoJet { bool operator()( const fastjet::PseudoJet & t1, const fastjet::PseudoJet & t2 ) const { return t1.perp2() > t2.perp2(); } }; } } //______________________________________________________________________________ const char *const VirtualJetProducer::JetType::names[] = { "BasicJet","GenJet","CaloJet","PFJet","TrackJet","PFClusterJet" }; //______________________________________________________________________________ VirtualJetProducer::JetType::Type VirtualJetProducer::JetType::byName(const string &name) { const char *const *pos = std::find(names, names + LastJetType, name); if (pos == names + LastJetType) { std::string errorMessage="Requested jetType not supported: "+name+"\n"; throw cms::Exception("Configuration",errorMessage); } return (Type)(pos-names); } void VirtualJetProducer::makeProduces( std::string alias, std::string tag ) { if ( writeCompound_ ) { produces<reco::BasicJetCollection>(); } if ( writeJetsWithConst_ ) { produces<reco::PFCandidateCollection>(tag).setBranchAlias(alias); produces<reco::PFJetCollection>(); } else { if (makeCaloJet(jetTypeE)) { produces<reco::CaloJetCollection>(tag).setBranchAlias(alias); } else if (makePFJet(jetTypeE)) { produces<reco::PFJetCollection>(tag).setBranchAlias(alias); } else if (makeGenJet(jetTypeE)) { produces<reco::GenJetCollection>(tag).setBranchAlias(alias); } else if (makeTrackJet(jetTypeE)) { produces<reco::TrackJetCollection>(tag).setBranchAlias(alias); } else if (makePFClusterJet(jetTypeE)) { produces<reco::PFClusterJetCollection>(tag).setBranchAlias(alias); } else if (makeBasicJet(jetTypeE)) { produces<reco::BasicJetCollection>(tag).setBranchAlias(alias); } } } //////////////////////////////////////////////////////////////////////////////// // construction / destruction //////////////////////////////////////////////////////////////////////////////// //______________________________________________________________________________ VirtualJetProducer::VirtualJetProducer(const edm::ParameterSet& iConfig) { moduleLabel_ = iConfig.getParameter<string> ("@module_label"); src_ = iConfig.getParameter<edm::InputTag>("src"); srcPVs_ = iConfig.getParameter<edm::InputTag>("srcPVs"); jetType_ = iConfig.getParameter<string> ("jetType"); jetAlgorithm_ = iConfig.getParameter<string> ("jetAlgorithm"); rParam_ = iConfig.getParameter<double> ("rParam"); inputEtMin_ = iConfig.getParameter<double> ("inputEtMin"); inputEMin_ = iConfig.getParameter<double> ("inputEMin"); jetPtMin_ = iConfig.getParameter<double> ("jetPtMin"); doPVCorrection_ = iConfig.getParameter<bool> ("doPVCorrection"); doAreaFastjet_ = iConfig.getParameter<bool> ("doAreaFastjet"); doRhoFastjet_ = iConfig.getParameter<bool> ("doRhoFastjet"); jetCollInstanceName_ = iConfig.getParameter<string> ("jetCollInstanceName"); doPUOffsetCorr_ = iConfig.getParameter<bool> ("doPUOffsetCorr"); puSubtractorName_ = iConfig.getParameter<string> ("subtractorName"); useExplicitGhosts_ = iConfig.getParameter<bool> ("useExplicitGhosts"); // use explicit ghosts in the fastjet clustering sequence? doAreaDiskApprox_ = iConfig.getParameter<bool> ("doAreaDiskApprox"); voronoiRfact_ = iConfig.getParameter<double> ("voronoiRfact"); // Voronoi-based area calculation allows for an empirical scale factor rhoEtaMax_ = iConfig.getParameter<double> ("Rho_EtaMax"); // do fasjet area / rho calcluation? => accept corresponding parameters ghostEtaMax_ = iConfig.getParameter<double> ("Ghost_EtaMax"); activeAreaRepeats_ = iConfig.getParameter<int> ("Active_Area_Repeats"); ghostArea_ = iConfig.getParameter<double> ("GhostArea"); restrictInputs_ = iConfig.getParameter<bool> ("restrictInputs"); // restrict inputs to first "maxInputs" towers? maxInputs_ = iConfig.getParameter<unsigned int>("maxInputs"); writeCompound_ = iConfig.getParameter<bool> ("writeCompound"); // Check to see if we are writing compound jets for substructure and jet grooming writeJetsWithConst_ = iConfig.getParameter<bool>("writeJetsWithConst"); //write subtracted jet constituents doFastJetNonUniform_ = iConfig.getParameter<bool> ("doFastJetNonUniform"); puCenters_ = iConfig.getParameter<vector<double> >("puCenters"); puWidth_ = iConfig.getParameter<double> ("puWidth"); nExclude_ = iConfig.getParameter<unsigned int>("nExclude"); useDeterministicSeed_ = iConfig.getParameter<bool> ("useDeterministicSeed"); minSeed_ = iConfig.getParameter<unsigned int>("minSeed"); verbosity_ = iConfig.getParameter<int> ("verbosity"); anomalousTowerDef_ = auto_ptr<AnomalousTower>(new AnomalousTower(iConfig)); input_vertex_token_ = consumes<reco::VertexCollection>(srcPVs_); input_candidateview_token_ = consumes<reco::CandidateView>(src_); input_candidatefwdptr_token_ = consumes<vector<edm::FwdPtr<reco::PFCandidate> > >(iConfig.getParameter<edm::InputTag>("src")); input_packedcandidatefwdptr_token_ = consumes<vector<edm::FwdPtr<pat::PackedCandidate> > >(iConfig.getParameter<edm::InputTag>("src")); // // additional parameters to think about: // - overlap threshold (set to 0.75 for the time being) // - p parameter for generalized kT (set to -2 for the time being) // - fastjet PU subtraction parameters (not yet considered) // if (jetAlgorithm_=="Kt") fjJetDefinition_= JetDefPtr(new fastjet::JetDefinition(fastjet::kt_algorithm,rParam_)); else if (jetAlgorithm_=="CambridgeAachen") fjJetDefinition_= JetDefPtr(new fastjet::JetDefinition(fastjet::cambridge_algorithm,rParam_) ); else if (jetAlgorithm_=="AntiKt") fjJetDefinition_= JetDefPtr( new fastjet::JetDefinition(fastjet::antikt_algorithm,rParam_) ); else if (jetAlgorithm_=="GeneralizedKt") fjJetDefinition_= JetDefPtr( new fastjet::JetDefinition(fastjet::genkt_algorithm,rParam_,-2) ); else if (jetAlgorithm_=="SISCone") { fjPlugin_ = PluginPtr( new fastjet::SISConePlugin(rParam_,0.75,0,0.0,false,fastjet::SISConePlugin::SM_pttilde) ); fjJetDefinition_= JetDefPtr( new fastjet::JetDefinition(&*fjPlugin_) ); } else if (jetAlgorithm_=="IterativeCone") { fjPlugin_ = PluginPtr(new fastjet::CMSIterativeConePlugin(rParam_,1.0)); fjJetDefinition_= JetDefPtr(new fastjet::JetDefinition(&*fjPlugin_)); } else if (jetAlgorithm_=="CDFMidPoint") { fjPlugin_ = PluginPtr(new fastjet::CDFMidPointPlugin(rParam_,0.75)); fjJetDefinition_= JetDefPtr(new fastjet::JetDefinition(&*fjPlugin_)); } else if (jetAlgorithm_=="ATLASCone") { fjPlugin_ = PluginPtr(new fastjet::ATLASConePlugin(rParam_)); fjJetDefinition_= JetDefPtr(new fastjet::JetDefinition(&*fjPlugin_)); } else { throw cms::Exception("Invalid jetAlgorithm") <<"Jet algorithm for VirtualJetProducer is invalid, Abort!\n"; } jetTypeE=JetType::byName(jetType_); if ( doPUOffsetCorr_ ) { if(puSubtractorName_.empty()){ LogWarning("VirtualJetProducer") << "Pile Up correction on; however, pile up type is not specified. Using default... \n"; subtractor_ = boost::shared_ptr<PileUpSubtractor>(new PileUpSubtractor(iConfig, consumesCollector())); } else subtractor_ = boost::shared_ptr<PileUpSubtractor>( PileUpSubtractorFactory::get()->create( puSubtractorName_, iConfig, consumesCollector())); } // do approximate disk-based area calculation => warn if conflicting request if (doAreaDiskApprox_ && doAreaFastjet_) throw cms::Exception("Conflicting area calculations") << "Both the calculation of jet area via fastjet and via an analytical disk approximation have been requested. Please decide on one.\n"; if ( doAreaFastjet_ || doRhoFastjet_ ) { if (voronoiRfact_ <= 0) { fjActiveArea_ = ActiveAreaSpecPtr(new fastjet::GhostedAreaSpec(ghostEtaMax_,activeAreaRepeats_,ghostArea_)); fjActiveArea_->set_fj2_placement(true); if ( !useExplicitGhosts_ ) { fjAreaDefinition_ = AreaDefinitionPtr( new fastjet::AreaDefinition(fastjet::active_area, *fjActiveArea_ ) ); } else { fjAreaDefinition_ = AreaDefinitionPtr( new fastjet::AreaDefinition(fastjet::active_area_explicit_ghosts, *fjActiveArea_ ) ); } } fjRangeDef_ = RangeDefPtr( new fastjet::RangeDefinition(rhoEtaMax_) ); } if( ( doFastJetNonUniform_ ) && ( puCenters_.size() == 0 ) ) throw cms::Exception("doFastJetNonUniform") << "Parameter puCenters for doFastJetNonUniform is not defined." << std::endl; // make the "produces" statements makeProduces( moduleLabel_, jetCollInstanceName_ ); produces<vector<double> >("rhos"); produces<vector<double> >("sigmas"); produces<double>("rho"); produces<double>("sigma"); } //______________________________________________________________________________ VirtualJetProducer::~VirtualJetProducer() { } //////////////////////////////////////////////////////////////////////////////// // implementation of member functions //////////////////////////////////////////////////////////////////////////////// //______________________________________________________________________________ void VirtualJetProducer::produce(edm::Event& iEvent,const edm::EventSetup& iSetup) { // If requested, set the fastjet random seed to a deterministic function // of the run/lumi/event. // NOTE!!! The fastjet random number sequence is a global singleton. // Thus, we have to create an object and get access to the global singleton // in order to change it. if ( useDeterministicSeed_ ) { fastjet::GhostedAreaSpec gas; std::vector<int> seeds(2); unsigned int runNum_uint = static_cast <unsigned int> (iEvent.id().run()); unsigned int evNum_uint = static_cast <unsigned int> (iEvent.id().event()); seeds[0] = std::max(runNum_uint,minSeed_ + 3) + 3 * evNum_uint; seeds[1] = std::max(runNum_uint,minSeed_ + 5) + 5 * evNum_uint; gas.set_random_status(seeds); } LogDebug("VirtualJetProducer") << "Entered produce\n"; //determine signal vertex2 vertex_=reco::Jet::Point(0,0,0); if ( (makeCaloJet(jetTypeE) || makePFJet(jetTypeE)) &&doPVCorrection_) { LogDebug("VirtualJetProducer") << "Adding PV info\n"; edm::Handle<reco::VertexCollection> pvCollection; iEvent.getByToken(input_vertex_token_ , pvCollection); if (pvCollection->size()>0) vertex_=pvCollection->begin()->position(); } // For Pileup subtraction using offset correction: // set up geometry map if ( doPUOffsetCorr_ ) { subtractor_->setupGeometryMap(iEvent, iSetup); } // clear data LogDebug("VirtualJetProducer") << "Clear data\n"; fjInputs_.clear(); fjJets_.clear(); inputs_.clear(); // get inputs and convert them to the fastjet format (fastjet::PeudoJet) edm::Handle<reco::CandidateView> inputsHandle; edm::Handle< std::vector<edm::FwdPtr<reco::PFCandidate> > > pfinputsHandleAsFwdPtr; edm::Handle< std::vector<edm::FwdPtr<pat::PackedCandidate> > > packedinputsHandleAsFwdPtr; bool isView = iEvent.getByToken(input_candidateview_token_, inputsHandle); if ( isView ) { if ( inputsHandle->size() == 0) { output( iEvent, iSetup ); return; } for (size_t i = 0; i < inputsHandle->size(); ++i) { inputs_.push_back(inputsHandle->ptrAt(i)); } } else { bool isPF = iEvent.getByToken(input_candidatefwdptr_token_, pfinputsHandleAsFwdPtr); if ( isPF ) { if ( pfinputsHandleAsFwdPtr->size() == 0) { output( iEvent, iSetup ); return; } for (size_t i = 0; i < pfinputsHandleAsFwdPtr->size(); ++i) { if ( (*pfinputsHandleAsFwdPtr)[i].ptr().isAvailable() ) { inputs_.push_back( (*pfinputsHandleAsFwdPtr)[i].ptr() ); } else if ( (*pfinputsHandleAsFwdPtr)[i].backPtr().isAvailable() ) { inputs_.push_back( (*pfinputsHandleAsFwdPtr)[i].backPtr() ); } } } else { iEvent.getByToken(input_packedcandidatefwdptr_token_, packedinputsHandleAsFwdPtr); if ( packedinputsHandleAsFwdPtr->size() == 0) { output( iEvent, iSetup ); return; } for (size_t i = 0; i < packedinputsHandleAsFwdPtr->size(); ++i) { if ( (*packedinputsHandleAsFwdPtr)[i].ptr().isAvailable() ) { inputs_.push_back( (*packedinputsHandleAsFwdPtr)[i].ptr() ); } else if ( (*packedinputsHandleAsFwdPtr)[i].backPtr().isAvailable() ) { inputs_.push_back( (*packedinputsHandleAsFwdPtr)[i].backPtr() ); } } } } LogDebug("VirtualJetProducer") << "Got inputs\n"; // Convert candidates to fastjet::PseudoJets. // Also correct to Primary Vertex. Will modify fjInputs_ // and use inputs_ fjInputs_.reserve(inputs_.size()); inputTowers(); LogDebug("VirtualJetProducer") << "Inputted towers\n"; // For Pileup subtraction using offset correction: // Subtract pedestal. if ( doPUOffsetCorr_ ) { subtractor_->setDefinition(fjJetDefinition_); subtractor_->reset(inputs_,fjInputs_,fjJets_); subtractor_->calculatePedestal(fjInputs_); subtractor_->subtractPedestal(fjInputs_); LogDebug("VirtualJetProducer") << "Subtracted pedestal\n"; } // Run algorithm. Will modify fjJets_ and allocate fjClusterSeq_. // This will use fjInputs_ runAlgorithm( iEvent, iSetup ); // if ( doPUOffsetCorr_ ) { // subtractor_->setAlgorithm(fjClusterSeq_); // } LogDebug("VirtualJetProducer") << "Ran algorithm\n"; // For Pileup subtraction using offset correction: // Now we find jets and need to recalculate their energy, // mark towers participated in jet, // remove occupied towers from the list and recalculate mean and sigma // put the initial towers collection to the jet, // and subtract from initial towers in jet recalculated mean and sigma of towers if ( doPUOffsetCorr_ ) { LogDebug("VirtualJetProducer") << "Do PUOffsetCorr\n"; vector<fastjet::PseudoJet> orphanInput; subtractor_->calculateOrphanInput(orphanInput); subtractor_->calculatePedestal(orphanInput); subtractor_->offsetCorrectJets(); } // Write the output jets. // This will (by default) call the member function template // "writeJets", but can be overridden. // this will use inputs_ output( iEvent, iSetup ); LogDebug("VirtualJetProducer") << "Wrote jets\n"; // Clear the work vectors so that memory is free for other modules. // Use the trick of swapping with an empty vector so that the memory // is actually given back rather than silently kept. decltype(fjInputs_)().swap(fjInputs_); decltype(fjJets_)().swap(fjJets_); decltype(inputs_)().swap(inputs_); return; } //______________________________________________________________________________ void VirtualJetProducer::inputTowers( ) { auto inBegin = inputs_.begin(), inEnd = inputs_.end(), i = inBegin; for (; i != inEnd; ++i ) { auto const & input = **i; // std::cout << "CaloTowerVI jets " << input->pt() << " " << input->et() << ' '<< input->energy() << ' ' << (isAnomalousTower(input) ? " bad" : " ok") << std::endl; if (edm::isNotFinite(input.pt())) continue; if (input.et() <inputEtMin_) continue; if (input.energy()<inputEMin_) continue; if (isAnomalousTower(*i)) continue; // Change by SRR : this is no longer an error nor warning, this can happen with PU mitigation algos. // Also switch to something more numerically safe. (VI: 10^-42GeV????) if (input.pt() < 100 * std::numeric_limits<double>::epsilon() ) { continue; } if (makeCaloJet(jetTypeE)&&doPVCorrection_) { const CaloTower & tower = dynamic_cast<const CaloTower &>(input); auto const & ct = tower.p4(vertex_); // very expensive as computed in eta/phi fjInputs_.emplace_back(ct.px(),ct.py(),ct.pz(),ct.energy()); //std::cout << "tower:" << *tower << '\n'; } else { /* if(makePFJet(jetTypeE)) { reco::PFCandidate& pfc = (reco::PFCandidate&)input; std::cout << "PF cand:" << pfc << '\n'; } */ fjInputs_.emplace_back(input.px(),input.py(),input.pz(), input.energy()); } fjInputs_.back().set_user_index(i - inBegin); } if ( restrictInputs_ && fjInputs_.size() > maxInputs_ ) { reco::helper::GreaterByPtPseudoJet pTComparator; std::sort(fjInputs_.begin(), fjInputs_.end(), pTComparator); fjInputs_.resize(maxInputs_); edm::LogWarning("JetRecoTooManyEntries") << "Too many inputs in the event, limiting to first " << maxInputs_ << ". Output is suspect."; } } //______________________________________________________________________________ bool VirtualJetProducer::isAnomalousTower(reco::CandidatePtr input) { if (!makeCaloJet(jetTypeE)) return false; else return (*anomalousTowerDef_)(*input); } //------------------------------------------------------------------------------ // This is pure virtual. //______________________________________________________________________________ // void VirtualJetProducer::runAlgorithm( edm::Event & iEvent, edm::EventSetup const& iSetup, // std::vector<edm::Ptr<reco::Candidate> > const & inputs_); //______________________________________________________________________________ void VirtualJetProducer::copyConstituents(const vector<fastjet::PseudoJet>& fjConstituents, reco::Jet* jet) { for (unsigned int i=0;i<fjConstituents.size();++i) { int index = fjConstituents[i].user_index(); if ( index >= 0 && static_cast<unsigned int>(index) < inputs_.size() ) jet->addDaughter(inputs_[index]); } } //______________________________________________________________________________ vector<reco::CandidatePtr> VirtualJetProducer::getConstituents(const vector<fastjet::PseudoJet>&fjConstituents) { vector<reco::CandidatePtr> result; result.reserve(fjConstituents.size()/2); for (unsigned int i=0;i<fjConstituents.size();i++) { auto index = fjConstituents[i].user_index(); if ( index >= 0 && static_cast<unsigned int>(index) < inputs_.size() ) { result.emplace_back(inputs_[index]); } } return result; } //_____________________________________________________________________________ void VirtualJetProducer::output(edm::Event & iEvent, edm::EventSetup const& iSetup) { // Write jets and constitutents. Will use fjJets_, inputs_ // and fjClusterSeq_ if ( writeCompound_ ) { // Write jets and subjets switch( jetTypeE ) { case JetType::CaloJet : writeCompoundJets<reco::CaloJet>( iEvent, iSetup ); break; case JetType::PFJet : writeCompoundJets<reco::PFJet>( iEvent, iSetup ); break; case JetType::GenJet : writeCompoundJets<reco::GenJet>( iEvent, iSetup ); break; case JetType::BasicJet : writeCompoundJets<reco::BasicJet>( iEvent, iSetup ); break; default: throw cms::Exception("InvalidInput") << "invalid jet type in CompoundJetProducer\n"; break; }; } else if ( writeJetsWithConst_ ) { // Write jets and new constituents. writeJetsWithConstituents<reco::PFJet>( iEvent, iSetup ); } else { switch( jetTypeE ) { case JetType::CaloJet : writeJets<reco::CaloJet>( iEvent, iSetup); break; case JetType::PFJet : writeJets<reco::PFJet>( iEvent, iSetup); break; case JetType::GenJet : writeJets<reco::GenJet>( iEvent, iSetup); break; case JetType::TrackJet : writeJets<reco::TrackJet>( iEvent, iSetup); break; case JetType::PFClusterJet : writeJets<reco::PFClusterJet>( iEvent, iSetup); break; case JetType::BasicJet : writeJets<reco::BasicJet>( iEvent, iSetup); break; default: throw cms::Exception("InvalidInput") << "invalid jet type in VirtualJetProducer\n"; break; }; } } namespace { template< typename T > struct Area { static float get(T const &) {return 0;}}; template<> struct Area<reco::CaloJet>{ static float get(reco::CaloJet const & jet) { return jet.getSpecific().mTowersArea; } }; } template< typename T > void VirtualJetProducer::writeJets( edm::Event & iEvent, edm::EventSetup const& iSetup ) { // std::cout << "writeJets " << typeid(T).name() // << (doRhoFastjet_ ? " doRhoFastjet " : "") // << (doAreaFastjet_ ? " doAreaFastjet " : "") // << (doAreaDiskApprox_ ? " doAreaDiskApprox " : "") // << std::endl; if (doRhoFastjet_) { // declare jet collection without the two jets, // for unbiased background estimation. std::vector<fastjet::PseudoJet> fjexcluded_jets; fjexcluded_jets=fjJets_; if(fjexcluded_jets.size()>2) fjexcluded_jets.resize(nExclude_); if(doFastJetNonUniform_){ auto rhos = std::make_unique<std::vector<double>>(); auto sigmas = std::make_unique<std::vector<double>>(); int nEta = puCenters_.size(); rhos->reserve(nEta); sigmas->reserve(nEta); fastjet::ClusterSequenceAreaBase const* clusterSequenceWithArea = dynamic_cast<fastjet::ClusterSequenceAreaBase const *> ( &*fjClusterSeq_ ); if (clusterSequenceWithArea ==nullptr ){ if (fjJets_.size() > 0) { throw cms::Exception("LogicError")<<"fjClusterSeq is not initialized while inputs are present\n "; } } else { for(int ie = 0; ie < nEta; ++ie){ double eta = puCenters_[ie]; double etamin=eta-puWidth_; double etamax=eta+puWidth_; fastjet::RangeDefinition range_rho(etamin,etamax); fastjet::BackgroundEstimator bkgestim(*clusterSequenceWithArea,range_rho); bkgestim.set_excluded_jets(fjexcluded_jets); rhos->push_back(bkgestim.rho()); sigmas->push_back(bkgestim.sigma()); } } iEvent.put(std::move(rhos),"rhos"); iEvent.put(std::move(sigmas),"sigmas"); }else{ auto rho = std::make_unique<double>(0.0); auto sigma = std::make_unique<double>(0.0); double mean_area = 0; fastjet::ClusterSequenceAreaBase const* clusterSequenceWithArea = dynamic_cast<fastjet::ClusterSequenceAreaBase const *> ( &*fjClusterSeq_ ); /* const double nemptyjets = clusterSequenceWithArea->n_empty_jets(*fjRangeDef_); if(( nemptyjets < -15 ) || ( nemptyjets > fjRangeDef_->area()+ 15)) { edm::LogWarning("StrangeNEmtpyJets") << "n_empty_jets is : " << clusterSequenceWithArea->n_empty_jets(*fjRangeDef_) << " with range " << fjRangeDef_->description() << "."; } */ if (clusterSequenceWithArea ==nullptr ){ if (fjJets_.size() > 0) { throw cms::Exception("LogicError")<<"fjClusterSeq is not initialized while inputs are present\n "; } } else { clusterSequenceWithArea->get_median_rho_and_sigma(*fjRangeDef_,false,*rho,*sigma,mean_area); if((*rho < 0)|| (edm::isNotFinite(*rho))) { edm::LogError("BadRho") << "rho value is " << *rho << " area:" << mean_area << " and n_empty_jets: " << clusterSequenceWithArea->n_empty_jets(*fjRangeDef_) << " with range " << fjRangeDef_->description() <<". Setting rho to rezo."; *rho = 0; } } iEvent.put(std::move(rho),"rho"); iEvent.put(std::move(sigma),"sigma"); } } // doRhoFastjet_ // produce output jet collection using namespace reco; // allocate fjJets_.size() Ts in vector auto jets = std::make_unique<std::vector<T>>(fjJets_.size()); // Distance between jet centers and overlap area -- for disk-based area calculation using RIJ = std::pair<double,double>; std::vector<RIJ> rijStorage(fjJets_.size()*(fjJets_.size()/2)); RIJ * rij[fjJets_.size()]; unsigned int k=0; for (unsigned int ijet=0;ijet<fjJets_.size();++ijet) { rij[ijet] = &rijStorage[k]; k+=ijet; } float etaJ[fjJets_.size()], phiJ[fjJets_.size()]; auto orParam_ = 1./rParam_; // fill jets for (unsigned int ijet=0;ijet<fjJets_.size();++ijet) { auto & jet = (*jets)[ijet]; // get the fastjet jet const fastjet::PseudoJet& fjJet = fjJets_[ijet]; // get the constituents from fastjet std::vector<fastjet::PseudoJet> const & fjConstituents = fastjet::sorted_by_pt(fjJet.constituents()); // convert them to CandidatePtr vector std::vector<CandidatePtr> const & constituents = getConstituents(fjConstituents); // write the specifics to the jet (simultaneously sets 4-vector, vertex). // These are overridden functions that will call the appropriate // specific allocator. writeSpecific(jet, Particle::LorentzVector(fjJet.px(), fjJet.py(), fjJet.pz(), fjJet.E()), vertex_, constituents, iSetup); phiJ[ijet] = jet.phi(); etaJ[ijet] = jet.eta(); } // calcuate the jet area for (unsigned int ijet=0;ijet<fjJets_.size();++ijet) { // calcuate the jet area double jetArea=0.0; // get the fastjet jet const auto & fjJet = fjJets_[ijet]; if ( doAreaFastjet_ && fjJet.has_area() ) { jetArea = fjJet.area(); } else if ( doAreaDiskApprox_ ) { // Here it is assumed that fjJets_ is in decreasing order of pT, // which should happen in FastjetJetProducer::runAlgorithm() jetArea = M_PI; RIJ * distance = rij[ijet]; for (unsigned jJet = 0; jJet < ijet; ++jJet) { distance[jJet].first = std::sqrt(reco::deltaR2(etaJ[ijet],phiJ[ijet], etaJ[jJet],phiJ[jJet]))*orParam_; distance[jJet].second = reco::helper::VirtualJetProducerHelper::intersection(distance[jJet].first); jetArea -=distance[jJet].second; for (unsigned kJet = 0; kJet < jJet; ++kJet) { jetArea += reco::helper::VirtualJetProducerHelper::intersection(distance[jJet].first, distance[kJet].first, rij[jJet][kJet].first, distance[jJet].second, distance[kJet].second, rij[jJet][kJet].second); } // end loop over harder jets } // end loop over harder jets jetArea *= (rParam_*rParam_); } auto & jet = (*jets)[ijet]; jet.setJetArea (jetArea); if(doPUOffsetCorr_){ jet.setPileup(subtractor_->getPileUpEnergy(ijet)); }else{ jet.setPileup (0.0); } // std::cout << "area " << ijet << " " << jetArea << " " << Area<T>::get(jet) << std::endl; // std::cout << "JetVI " << ijet << ' '<< jet.pt() << " " << jet.et() << ' '<< jet.energy() << ' '<< jet.mass() << std::endl; } // put the jets in the collection iEvent.put(std::move(jets),jetCollInstanceName_); } /// function template to write out the outputs template< class T> void VirtualJetProducer::writeCompoundJets( edm::Event & iEvent, edm::EventSetup const& iSetup) { if ( verbosity_ >= 1 ) { std::cout << "<VirtualJetProducer::writeCompoundJets (moduleLabel = " << moduleLabel_ << ")>:" << std::endl; } // get a list of output jets auto jetCollection = std::make_unique<reco::BasicJetCollection>(); // get a list of output subjets auto subjetCollection = std::make_unique<std::vector<T>>(); // This will store the handle for the subjets after we write them edm::OrphanHandle< std::vector<T> > subjetHandleAfterPut; // this is the mapping of subjet to hard jet std::vector< std::vector<int> > indices; // this is the list of hardjet 4-momenta std::vector<math::XYZTLorentzVector> p4_hardJets; // this is the hardjet areas std::vector<double> area_hardJets; // Loop over the hard jets std::vector<fastjet::PseudoJet>::const_iterator it = fjJets_.begin(), iEnd = fjJets_.end(), iBegin = fjJets_.begin(); indices.resize( fjJets_.size() ); for ( ; it != iEnd; ++it ) { fastjet::PseudoJet const & localJet = *it; unsigned int jetIndex = it - iBegin; // Get the 4-vector for the hard jet p4_hardJets.push_back( math::XYZTLorentzVector(localJet.px(), localJet.py(), localJet.pz(), localJet.e() )); double localJetArea = 0.0; if ( doAreaFastjet_ && localJet.has_area() ) { localJetArea = localJet.area(); } area_hardJets.push_back( localJetArea ); // create the subjet list std::vector<fastjet::PseudoJet> constituents; if ( it->has_pieces() ) { constituents = it->pieces(); } else if ( it->has_constituents() ) { constituents = it->constituents(); } std::vector<fastjet::PseudoJet>::const_iterator itSubJetBegin = constituents.begin(), itSubJet = itSubJetBegin, itSubJetEnd = constituents.end(); for (; itSubJet != itSubJetEnd; ++itSubJet ){ fastjet::PseudoJet const & subjet = *itSubJet; if ( verbosity_ >= 1 ) { std::cout << "subjet #" << (itSubJet - itSubJetBegin) << ": Pt = " << subjet.pt() << ", eta = " << subjet.eta() << ", phi = " << subjet.phi() << ", mass = " << subjet.m() << " (#constituents = " << subjet.constituents().size() << ")" << std::endl; std::vector<fastjet::PseudoJet> subjet_constituents = subjet.constituents(); int idx_constituent = 0; for ( std::vector<fastjet::PseudoJet>::const_iterator constituent = subjet_constituents.begin(); constituent != subjet_constituents.end(); ++constituent ) { if ( constituent->pt() < 1.e-3 ) continue; // CV: skip ghosts std::cout << " constituent #" << idx_constituent << ": Pt = " << constituent->pt() << ", eta = " << constituent->eta() << ", phi = " << constituent->phi() << "," << " mass = " << constituent->m() << std::endl; ++idx_constituent; } } if ( verbosity_ >= 1 ) { std::cout << "subjet #" << (itSubJet - itSubJetBegin) << ": Pt = " << subjet.pt() << ", eta = " << subjet.eta() << ", phi = " << subjet.phi() << ", mass = " << subjet.m() << " (#constituents = " << subjet.constituents().size() << ")" << std::endl; std::vector<fastjet::PseudoJet> subjet_constituents = subjet.constituents(); int idx_constituent = 0; for ( std::vector<fastjet::PseudoJet>::const_iterator constituent = subjet_constituents.begin(); constituent != subjet_constituents.end(); ++constituent ) { if ( constituent->pt() < 1.e-3 ) continue; // CV: skip ghosts std::cout << " constituent #" << idx_constituent << ": Pt = " << constituent->pt() << ", eta = " << constituent->eta() << ", phi = " << constituent->phi() << "," << " mass = " << constituent->m() << std::endl; ++idx_constituent; } } math::XYZTLorentzVector p4Subjet(subjet.px(), subjet.py(), subjet.pz(), subjet.e() ); reco::Particle::Point point(0,0,0); // This will hold ptr's to the subjets std::vector<reco::CandidatePtr> subjetConstituents; // Get the transient subjet constituents from fastjet std::vector<fastjet::PseudoJet> subjetFastjetConstituents = subjet.constituents(); std::vector<reco::CandidatePtr> constituents = getConstituents(subjetFastjetConstituents ); indices[jetIndex].push_back( subjetCollection->size() ); // Add the concrete subjet type to the subjet list to write to event record T jet; reco::writeSpecific( jet, p4Subjet, point, constituents, iSetup); double subjetArea = 0.0; if ( doAreaFastjet_ && itSubJet->has_area() ){ subjetArea = itSubJet->area(); } jet.setJetArea( subjetArea ); subjetCollection->push_back( jet ); } } // put subjets into event record subjetHandleAfterPut = iEvent.put(std::move(subjetCollection), jetCollInstanceName_); // Now create the hard jets with ptr's to the subjets as constituents std::vector<math::XYZTLorentzVector>::const_iterator ip4 = p4_hardJets.begin(), ip4Begin = p4_hardJets.begin(), ip4End = p4_hardJets.end(); for ( ; ip4 != ip4End; ++ip4 ) { int p4_index = ip4 - ip4Begin; std::vector<int> & ind = indices[p4_index]; std::vector<reco::CandidatePtr> i_hardJetConstituents; // Add the subjets to the hard jet for( std::vector<int>::const_iterator isub = ind.begin(); isub != ind.end(); ++isub ) { reco::CandidatePtr candPtr( subjetHandleAfterPut, *isub, false ); i_hardJetConstituents.push_back( candPtr ); } reco::Particle::Point point(0,0,0); reco::BasicJet toput( *ip4, point, i_hardJetConstituents); toput.setJetArea( area_hardJets[ip4 - ip4Begin] ); jetCollection->push_back( toput ); } // put hard jets into event record // Store the Orphan handle for adding HTT information edm::OrphanHandle<reco::BasicJetCollection> oh = iEvent.put(std::move(jetCollection)); if (fromHTTTopJetProducer_){ addHTTTopJetTagInfoCollection( iEvent, iSetup, oh); } } /// function template to write out the outputs template< class T> void VirtualJetProducer::writeJetsWithConstituents( edm::Event & iEvent, edm::EventSetup const& iSetup) { if ( verbosity_ >= 1 ) { std::cout << "<VirtualJetProducer::writeJetsWithConstituents (moduleLabel = " << moduleLabel_ << ")>:" << std::endl; } // get a list of output jets MV: make this compatible with template auto jetCollection = std::make_unique<reco::PFJetCollection>(); // this is the mapping of jet to constituents std::vector< std::vector<int> > indices; // this is the list of jet 4-momenta std::vector<math::XYZTLorentzVector> p4_Jets; // this is the jet areas std::vector<double> area_Jets; // get a list of output constituents auto constituentCollection = std::make_unique<reco::PFCandidateCollection>(); // This will store the handle for the constituents after we write them edm::OrphanHandle<reco::PFCandidateCollection> constituentHandleAfterPut; // Loop over the jets and extract constituents std::vector<fastjet::PseudoJet> constituentsSub; std::vector<fastjet::PseudoJet>::const_iterator it = fjJets_.begin(), iEnd = fjJets_.end(), iBegin = fjJets_.begin(); indices.resize( fjJets_.size() ); for ( ; it != iEnd; ++it ) { fastjet::PseudoJet const & localJet = *it; unsigned int jetIndex = it - iBegin; // Get the 4-vector for the hard jet p4_Jets.push_back( math::XYZTLorentzVector(localJet.px(), localJet.py(), localJet.pz(), localJet.e() )); double localJetArea = 0.0; if ( doAreaFastjet_ && localJet.has_area() ) { localJetArea = localJet.area(); } area_Jets.push_back( localJetArea ); // create the constituent list std::vector<fastjet::PseudoJet> constituents,ghosts; if ( it->has_pieces() ) constituents = it->pieces(); else if ( it->has_constituents() ) fastjet::SelectorIsPureGhost().sift(it->constituents(), ghosts, constituents); //filter out ghosts //loop over constituents of jet (can be subjets or normal constituents) indices[jetIndex].reserve(constituents.size()); constituentsSub.reserve(constituentsSub.size()+constituents.size()); for (fastjet::PseudoJet const& constit : constituents) { indices[jetIndex].push_back( constituentsSub.size() ); constituentsSub.push_back(constit); } } //Loop over constituents and store in the event static const reco::PFCandidate dummySinceTranslateIsNotStatic; for (fastjet::PseudoJet const& constit : constituentsSub) { auto orig = inputs_[constit.user_index()]; auto id = dummySinceTranslateIsNotStatic.translatePdgIdToType(orig->pdgId()); reco::PFCandidate pCand( reco::PFCandidate(orig->charge(), orig->p4(), id) ); math::XYZTLorentzVector pVec; pVec.SetPxPyPzE(constit.px(),constit.py(),constit.pz(),constit.e()); pCand.setP4(pVec); pCand.setSourceCandidatePtr( orig->sourceCandidatePtr(0) ); constituentCollection->push_back(pCand); } // put constituents into event record constituentHandleAfterPut = iEvent.put(std::move(constituentCollection), jetCollInstanceName_ ); // Now create the jets with ptr's to the constituents std::vector<math::XYZTLorentzVector>::const_iterator ip4 = p4_Jets.begin(), ip4Begin = p4_Jets.begin(), ip4End = p4_Jets.end(); for ( ; ip4 != ip4End; ++ip4 ) { int p4_index = ip4 - ip4Begin; std::vector<int> & ind = indices[p4_index]; std::vector<reco::CandidatePtr> i_jetConstituents; // Add the constituents to the jet for( std::vector<int>::const_iterator iconst = ind.begin(); iconst != ind.end(); ++iconst ) { reco::CandidatePtr candPtr( constituentHandleAfterPut, *iconst, false ); i_jetConstituents.push_back( candPtr ); } if(i_jetConstituents.size()>0) { //only keep jets which have constituents after subtraction reco::Particle::Point point(0,0,0); reco::PFJet jet; reco::writeSpecific(jet,*ip4,point,i_jetConstituents,iSetup); jet.setJetArea( area_Jets[ip4 - ip4Begin] ); jetCollection->emplace_back( jet ); } } // put jets into event record iEvent.put(std::move(jetCollection)); } // ------------ method fills 'descriptions' with the allowed parameters for the module ------------ void VirtualJetProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; fillDescriptionsFromVirtualJetProducer(desc); desc.add<string>("jetCollInstanceName", "" ); // addDefault must be used here instead of add unless // all the classes that inherit from this class redefine // the fillDescriptions function. Otherwise, the autogenerated // cfi filenames are the same and conflict. descriptions.addDefault(desc); } void VirtualJetProducer::fillDescriptionsFromVirtualJetProducer(edm::ParameterSetDescription& desc) { desc.add<edm::InputTag>("src", edm::InputTag("particleFlow") ); desc.add<edm::InputTag>("srcPVs", edm::InputTag("") ); desc.add<string>("jetType", "PFJet" ); desc.add<string>("jetAlgorithm", "AntiKt" ); desc.add<double>("rParam", 0.4 ); desc.add<double>("inputEtMin", 0.0 ); desc.add<double>("inputEMin", 0.0 ); desc.add<double>("jetPtMin", 5. ); desc.add<bool> ("doPVCorrection", false ); desc.add<bool> ("doAreaFastjet", false ); desc.add<bool> ("doRhoFastjet", false ); desc.add<bool> ("doPUOffsetCorr", false ); desc.add<double>("puPtMin", 10.); desc.add<double>("nSigmaPU", 1.0 ); desc.add<double>("radiusPU", 0.5 ); desc.add<string>("subtractorName", "" ); desc.add<bool> ("useExplicitGhosts", false ); desc.add<bool> ("doAreaDiskApprox", false ); desc.add<double>("voronoiRfact", -0.9 ); desc.add<double>("Rho_EtaMax", 4.4 ); desc.add<double>("Ghost_EtaMax", 5. ); desc.add<int> ("Active_Area_Repeats", 1 ); desc.add<double>("GhostArea", 0.01 ); desc.add<bool> ("restrictInputs", false ); desc.add<unsigned int> ("maxInputs", 1 ); desc.add<bool> ("writeCompound", false ); desc.add<bool> ("writeJetsWithConst", false ); desc.add<bool> ("doFastJetNonUniform", false ); desc.add<bool> ("useDeterministicSeed",false ); desc.add<unsigned int> ("minSeed", 14327 ); desc.add<int> ("verbosity", 0 ); desc.add<double>("puWidth", 0. ); desc.add<unsigned int>("nExclude", 0 ); desc.add<unsigned int>("maxBadEcalCells", 9999999 ); desc.add<unsigned int>("maxBadHcalCells", 9999999 ); desc.add<unsigned int>("maxProblematicEcalCells", 9999999 ); desc.add<unsigned int>("maxProblematicHcalCells", 9999999 ); desc.add<unsigned int>("maxRecoveredEcalCells", 9999999 ); desc.add<unsigned int>("maxRecoveredHcalCells", 9999999 ); vector<double> puCentersDefault; desc.add<vector<double>>("puCenters", puCentersDefault); }
; DO NOT MODIFY THIS FILE DIRECTLY! ; author: @TinySecEx ; shadowssdt asm stub for 6.2.9200-sp0-windows-8 amd64 option casemap:none option prologue:none option epilogue:none .code ; ULONG64 __stdcall NtUserYieldTask( ); NtUserYieldTask PROC STDCALL mov r10 , rcx mov eax , 4096 ;syscall db 0Fh , 05h ret NtUserYieldTask ENDP ; ULONG64 __stdcall NtUserGetThreadState( ULONG64 arg_01 ); NtUserGetThreadState PROC STDCALL mov r10 , rcx mov eax , 4097 ;syscall db 0Fh , 05h ret NtUserGetThreadState ENDP ; ULONG64 __stdcall NtUserPeekMessage( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserPeekMessage PROC STDCALL mov r10 , rcx mov eax , 4098 ;syscall db 0Fh , 05h ret NtUserPeekMessage ENDP ; ULONG64 __stdcall NtUserCallOneParam( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserCallOneParam PROC STDCALL mov r10 , rcx mov eax , 4099 ;syscall db 0Fh , 05h ret NtUserCallOneParam ENDP ; ULONG64 __stdcall NtUserGetKeyState( ULONG64 arg_01 ); NtUserGetKeyState PROC STDCALL mov r10 , rcx mov eax , 4100 ;syscall db 0Fh , 05h ret NtUserGetKeyState ENDP ; ULONG64 __stdcall NtUserInvalidateRect( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserInvalidateRect PROC STDCALL mov r10 , rcx mov eax , 4101 ;syscall db 0Fh , 05h ret NtUserInvalidateRect ENDP ; ULONG64 __stdcall NtUserCallNoParam( ULONG64 arg_01 ); NtUserCallNoParam PROC STDCALL mov r10 , rcx mov eax , 4102 ;syscall db 0Fh , 05h ret NtUserCallNoParam ENDP ; ULONG64 __stdcall NtUserGetMessage( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserGetMessage PROC STDCALL mov r10 , rcx mov eax , 4103 ;syscall db 0Fh , 05h ret NtUserGetMessage ENDP ; ULONG64 __stdcall NtUserMessageCall( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtUserMessageCall PROC STDCALL mov r10 , rcx mov eax , 4104 ;syscall db 0Fh , 05h ret NtUserMessageCall ENDP ; ULONG64 __stdcall NtGdiBitBlt( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 ); NtGdiBitBlt PROC STDCALL mov r10 , rcx mov eax , 4105 ;syscall db 0Fh , 05h ret NtGdiBitBlt ENDP ; ULONG64 __stdcall NtGdiGetCharSet( ULONG64 arg_01 ); NtGdiGetCharSet PROC STDCALL mov r10 , rcx mov eax , 4106 ;syscall db 0Fh , 05h ret NtGdiGetCharSet ENDP ; ULONG64 __stdcall NtUserGetDC( ULONG64 arg_01 ); NtUserGetDC PROC STDCALL mov r10 , rcx mov eax , 4107 ;syscall db 0Fh , 05h ret NtUserGetDC ENDP ; ULONG64 __stdcall NtGdiSelectBitmap( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSelectBitmap PROC STDCALL mov r10 , rcx mov eax , 4108 ;syscall db 0Fh , 05h ret NtGdiSelectBitmap ENDP ; ULONG64 __stdcall NtUserWaitMessage( ); NtUserWaitMessage PROC STDCALL mov r10 , rcx mov eax , 4109 ;syscall db 0Fh , 05h ret NtUserWaitMessage ENDP ; ULONG64 __stdcall NtUserTranslateMessage( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserTranslateMessage PROC STDCALL mov r10 , rcx mov eax , 4110 ;syscall db 0Fh , 05h ret NtUserTranslateMessage ENDP ; ULONG64 __stdcall NtUserGetProp( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetProp PROC STDCALL mov r10 , rcx mov eax , 4111 ;syscall db 0Fh , 05h ret NtUserGetProp ENDP ; ULONG64 __stdcall NtUserPostMessage( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserPostMessage PROC STDCALL mov r10 , rcx mov eax , 4112 ;syscall db 0Fh , 05h ret NtUserPostMessage ENDP ; ULONG64 __stdcall NtUserQueryWindow( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserQueryWindow PROC STDCALL mov r10 , rcx mov eax , 4113 ;syscall db 0Fh , 05h ret NtUserQueryWindow ENDP ; ULONG64 __stdcall NtUserTranslateAccelerator( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserTranslateAccelerator PROC STDCALL mov r10 , rcx mov eax , 4114 ;syscall db 0Fh , 05h ret NtUserTranslateAccelerator ENDP ; ULONG64 __stdcall NtGdiFlush( ); NtGdiFlush PROC STDCALL mov r10 , rcx mov eax , 4115 ;syscall db 0Fh , 05h ret NtGdiFlush ENDP ; ULONG64 __stdcall NtUserRedrawWindow( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserRedrawWindow PROC STDCALL mov r10 , rcx mov eax , 4116 ;syscall db 0Fh , 05h ret NtUserRedrawWindow ENDP ; ULONG64 __stdcall NtUserWindowFromPoint( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserWindowFromPoint PROC STDCALL mov r10 , rcx mov eax , 4117 ;syscall db 0Fh , 05h ret NtUserWindowFromPoint ENDP ; ULONG64 __stdcall NtUserCallMsgFilter( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserCallMsgFilter PROC STDCALL mov r10 , rcx mov eax , 4118 ;syscall db 0Fh , 05h ret NtUserCallMsgFilter ENDP ; ULONG64 __stdcall NtUserValidateTimerCallback( ULONG64 arg_01 ); NtUserValidateTimerCallback PROC STDCALL mov r10 , rcx mov eax , 4119 ;syscall db 0Fh , 05h ret NtUserValidateTimerCallback ENDP ; ULONG64 __stdcall NtUserBeginPaint( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserBeginPaint PROC STDCALL mov r10 , rcx mov eax , 4120 ;syscall db 0Fh , 05h ret NtUserBeginPaint ENDP ; ULONG64 __stdcall NtUserSetTimer( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserSetTimer PROC STDCALL mov r10 , rcx mov eax , 4121 ;syscall db 0Fh , 05h ret NtUserSetTimer ENDP ; ULONG64 __stdcall NtUserEndPaint( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserEndPaint PROC STDCALL mov r10 , rcx mov eax , 4122 ;syscall db 0Fh , 05h ret NtUserEndPaint ENDP ; ULONG64 __stdcall NtUserSetCursor( ULONG64 arg_01 ); NtUserSetCursor PROC STDCALL mov r10 , rcx mov eax , 4123 ;syscall db 0Fh , 05h ret NtUserSetCursor ENDP ; ULONG64 __stdcall NtUserKillTimer( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserKillTimer PROC STDCALL mov r10 , rcx mov eax , 4124 ;syscall db 0Fh , 05h ret NtUserKillTimer ENDP ; ULONG64 __stdcall NtUserBuildHwndList( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtUserBuildHwndList PROC STDCALL mov r10 , rcx mov eax , 4125 ;syscall db 0Fh , 05h ret NtUserBuildHwndList ENDP ; ULONG64 __stdcall NtUserSelectPalette( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSelectPalette PROC STDCALL mov r10 , rcx mov eax , 4126 ;syscall db 0Fh , 05h ret NtUserSelectPalette ENDP ; ULONG64 __stdcall NtUserCallNextHookEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserCallNextHookEx PROC STDCALL mov r10 , rcx mov eax , 4127 ;syscall db 0Fh , 05h ret NtUserCallNextHookEx ENDP ; ULONG64 __stdcall NtUserHideCaret( ULONG64 arg_01 ); NtUserHideCaret PROC STDCALL mov r10 , rcx mov eax , 4128 ;syscall db 0Fh , 05h ret NtUserHideCaret ENDP ; ULONG64 __stdcall NtGdiIntersectClipRect( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiIntersectClipRect PROC STDCALL mov r10 , rcx mov eax , 4129 ;syscall db 0Fh , 05h ret NtGdiIntersectClipRect ENDP ; ULONG64 __stdcall NtUserCallHwndLock( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserCallHwndLock PROC STDCALL mov r10 , rcx mov eax , 4130 ;syscall db 0Fh , 05h ret NtUserCallHwndLock ENDP ; ULONG64 __stdcall NtUserGetProcessWindowStation( ); NtUserGetProcessWindowStation PROC STDCALL mov r10 , rcx mov eax , 4131 ;syscall db 0Fh , 05h ret NtUserGetProcessWindowStation ENDP ; ULONG64 __stdcall NtGdiDeleteObjectApp( ULONG64 arg_01 ); NtGdiDeleteObjectApp PROC STDCALL mov r10 , rcx mov eax , 4132 ;syscall db 0Fh , 05h ret NtGdiDeleteObjectApp ENDP ; ULONG64 __stdcall NtUserSetWindowPos( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtUserSetWindowPos PROC STDCALL mov r10 , rcx mov eax , 4133 ;syscall db 0Fh , 05h ret NtUserSetWindowPos ENDP ; ULONG64 __stdcall NtUserShowCaret( ULONG64 arg_01 ); NtUserShowCaret PROC STDCALL mov r10 , rcx mov eax , 4134 ;syscall db 0Fh , 05h ret NtUserShowCaret ENDP ; ULONG64 __stdcall NtUserEndDeferWindowPosEx( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserEndDeferWindowPosEx PROC STDCALL mov r10 , rcx mov eax , 4135 ;syscall db 0Fh , 05h ret NtUserEndDeferWindowPosEx ENDP ; ULONG64 __stdcall NtUserCallHwndParamLock( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserCallHwndParamLock PROC STDCALL mov r10 , rcx mov eax , 4136 ;syscall db 0Fh , 05h ret NtUserCallHwndParamLock ENDP ; ULONG64 __stdcall NtUserVkKeyScanEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserVkKeyScanEx PROC STDCALL mov r10 , rcx mov eax , 4137 ;syscall db 0Fh , 05h ret NtUserVkKeyScanEx ENDP ; ULONG64 __stdcall NtGdiSetDIBitsToDeviceInternal( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 , ULONG64 arg_12 , ULONG64 arg_13 , ULONG64 arg_14 , ULONG64 arg_15 , ULONG64 arg_16 ); NtGdiSetDIBitsToDeviceInternal PROC STDCALL mov r10 , rcx mov eax , 4138 ;syscall db 0Fh , 05h ret NtGdiSetDIBitsToDeviceInternal ENDP ; ULONG64 __stdcall NtUserCallTwoParam( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserCallTwoParam PROC STDCALL mov r10 , rcx mov eax , 4139 ;syscall db 0Fh , 05h ret NtUserCallTwoParam ENDP ; ULONG64 __stdcall NtGdiGetRandomRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetRandomRgn PROC STDCALL mov r10 , rcx mov eax , 4140 ;syscall db 0Fh , 05h ret NtGdiGetRandomRgn ENDP ; ULONG64 __stdcall NtUserCopyAcceleratorTable( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserCopyAcceleratorTable PROC STDCALL mov r10 , rcx mov eax , 4141 ;syscall db 0Fh , 05h ret NtUserCopyAcceleratorTable ENDP ; ULONG64 __stdcall NtUserNotifyWinEvent( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserNotifyWinEvent PROC STDCALL mov r10 , rcx mov eax , 4142 ;syscall db 0Fh , 05h ret NtUserNotifyWinEvent ENDP ; ULONG64 __stdcall NtGdiExtSelectClipRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiExtSelectClipRgn PROC STDCALL mov r10 , rcx mov eax , 4143 ;syscall db 0Fh , 05h ret NtGdiExtSelectClipRgn ENDP ; ULONG64 __stdcall NtUserIsClipboardFormatAvailable( ULONG64 arg_01 ); NtUserIsClipboardFormatAvailable PROC STDCALL mov r10 , rcx mov eax , 4144 ;syscall db 0Fh , 05h ret NtUserIsClipboardFormatAvailable ENDP ; ULONG64 __stdcall NtUserSetScrollInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetScrollInfo PROC STDCALL mov r10 , rcx mov eax , 4145 ;syscall db 0Fh , 05h ret NtUserSetScrollInfo ENDP ; ULONG64 __stdcall GreStretchBlt( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 , ULONG64 arg_12 ); GreStretchBlt PROC STDCALL mov r10 , rcx mov eax , 4146 ;syscall db 0Fh , 05h ret GreStretchBlt ENDP ; ULONG64 __stdcall NtUserCreateCaret( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserCreateCaret PROC STDCALL mov r10 , rcx mov eax , 4147 ;syscall db 0Fh , 05h ret NtUserCreateCaret ENDP ; ULONG64 __stdcall NtGdiRectVisible( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiRectVisible PROC STDCALL mov r10 , rcx mov eax , 4148 ;syscall db 0Fh , 05h ret NtGdiRectVisible ENDP ; ULONG64 __stdcall NtGdiCombineRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiCombineRgn PROC STDCALL mov r10 , rcx mov eax , 4149 ;syscall db 0Fh , 05h ret NtGdiCombineRgn ENDP ; ULONG64 __stdcall NtGdiGetDCObject( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetDCObject PROC STDCALL mov r10 , rcx mov eax , 4150 ;syscall db 0Fh , 05h ret NtGdiGetDCObject ENDP ; ULONG64 __stdcall NtUserDispatchMessage( ULONG64 arg_01 ); NtUserDispatchMessage PROC STDCALL mov r10 , rcx mov eax , 4151 ;syscall db 0Fh , 05h ret NtUserDispatchMessage ENDP ; ULONG64 __stdcall NtUserRegisterWindowMessage( ULONG64 arg_01 ); NtUserRegisterWindowMessage PROC STDCALL mov r10 , rcx mov eax , 4152 ;syscall db 0Fh , 05h ret NtUserRegisterWindowMessage ENDP ; ULONG64 __stdcall NtGdiExtTextOutW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 ); NtGdiExtTextOutW PROC STDCALL mov r10 , rcx mov eax , 4153 ;syscall db 0Fh , 05h ret NtGdiExtTextOutW ENDP ; ULONG64 __stdcall NtGdiSelectFont( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSelectFont PROC STDCALL mov r10 , rcx mov eax , 4154 ;syscall db 0Fh , 05h ret NtGdiSelectFont ENDP ; ULONG64 __stdcall NtGdiRestoreDC( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiRestoreDC PROC STDCALL mov r10 , rcx mov eax , 4155 ;syscall db 0Fh , 05h ret NtGdiRestoreDC ENDP ; ULONG64 __stdcall NtGdiSaveDC( ULONG64 arg_01 ); NtGdiSaveDC PROC STDCALL mov r10 , rcx mov eax , 4156 ;syscall db 0Fh , 05h ret NtGdiSaveDC ENDP ; ULONG64 __stdcall NtUserGetForegroundWindow( ); NtUserGetForegroundWindow PROC STDCALL mov r10 , rcx mov eax , 4157 ;syscall db 0Fh , 05h ret NtUserGetForegroundWindow ENDP ; ULONG64 __stdcall NtUserShowScrollBar( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserShowScrollBar PROC STDCALL mov r10 , rcx mov eax , 4158 ;syscall db 0Fh , 05h ret NtUserShowScrollBar ENDP ; ULONG64 __stdcall NtUserFindExistingCursorIcon( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserFindExistingCursorIcon PROC STDCALL mov r10 , rcx mov eax , 4159 ;syscall db 0Fh , 05h ret NtUserFindExistingCursorIcon ENDP ; ULONG64 __stdcall NtGdiGetDCDword( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetDCDword PROC STDCALL mov r10 , rcx mov eax , 4160 ;syscall db 0Fh , 05h ret NtGdiGetDCDword ENDP ; ULONG64 __stdcall NtGdiGetRegionData( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetRegionData PROC STDCALL mov r10 , rcx mov eax , 4161 ;syscall db 0Fh , 05h ret NtGdiGetRegionData ENDP ; ULONG64 __stdcall NtGdiLineTo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiLineTo PROC STDCALL mov r10 , rcx mov eax , 4162 ;syscall db 0Fh , 05h ret NtGdiLineTo ENDP ; ULONG64 __stdcall NtUserSystemParametersInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSystemParametersInfo PROC STDCALL mov r10 , rcx mov eax , 4163 ;syscall db 0Fh , 05h ret NtUserSystemParametersInfo ENDP ; ULONG64 __stdcall NtGdiGetAppClipBox( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetAppClipBox PROC STDCALL mov r10 , rcx mov eax , 4164 ;syscall db 0Fh , 05h ret NtGdiGetAppClipBox ENDP ; ULONG64 __stdcall NtUserGetAsyncKeyState( ULONG64 arg_01 ); NtUserGetAsyncKeyState PROC STDCALL mov r10 , rcx mov eax , 4165 ;syscall db 0Fh , 05h ret NtUserGetAsyncKeyState ENDP ; ULONG64 __stdcall NtUserGetCPD( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetCPD PROC STDCALL mov r10 , rcx mov eax , 4166 ;syscall db 0Fh , 05h ret NtUserGetCPD ENDP ; ULONG64 __stdcall NtUserRemoveProp( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserRemoveProp PROC STDCALL mov r10 , rcx mov eax , 4167 ;syscall db 0Fh , 05h ret NtUserRemoveProp ENDP ; ULONG64 __stdcall NtGdiDoPalette( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiDoPalette PROC STDCALL mov r10 , rcx mov eax , 4168 ;syscall db 0Fh , 05h ret NtGdiDoPalette ENDP ; ULONG64 __stdcall NtGdiPolyPolyDraw( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiPolyPolyDraw PROC STDCALL mov r10 , rcx mov eax , 4169 ;syscall db 0Fh , 05h ret NtGdiPolyPolyDraw ENDP ; ULONG64 __stdcall NtUserSetCapture( ULONG64 arg_01 ); NtUserSetCapture PROC STDCALL mov r10 , rcx mov eax , 4170 ;syscall db 0Fh , 05h ret NtUserSetCapture ENDP ; ULONG64 __stdcall NtUserEnumDisplayMonitors( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserEnumDisplayMonitors PROC STDCALL mov r10 , rcx mov eax , 4171 ;syscall db 0Fh , 05h ret NtUserEnumDisplayMonitors ENDP ; ULONG64 __stdcall NtGdiCreateCompatibleBitmap( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiCreateCompatibleBitmap PROC STDCALL mov r10 , rcx mov eax , 4172 ;syscall db 0Fh , 05h ret NtGdiCreateCompatibleBitmap ENDP ; ULONG64 __stdcall NtUserSetProp( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSetProp PROC STDCALL mov r10 , rcx mov eax , 4173 ;syscall db 0Fh , 05h ret NtUserSetProp ENDP ; ULONG64 __stdcall NtGdiGetTextCharsetInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetTextCharsetInfo PROC STDCALL mov r10 , rcx mov eax , 4174 ;syscall db 0Fh , 05h ret NtGdiGetTextCharsetInfo ENDP ; ULONG64 __stdcall NtUserSBGetParms( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSBGetParms PROC STDCALL mov r10 , rcx mov eax , 4175 ;syscall db 0Fh , 05h ret NtUserSBGetParms ENDP ; ULONG64 __stdcall NtUserGetIconInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserGetIconInfo PROC STDCALL mov r10 , rcx mov eax , 4176 ;syscall db 0Fh , 05h ret NtUserGetIconInfo ENDP ; ULONG64 __stdcall NtUserExcludeUpdateRgn( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserExcludeUpdateRgn PROC STDCALL mov r10 , rcx mov eax , 4177 ;syscall db 0Fh , 05h ret NtUserExcludeUpdateRgn ENDP ; ULONG64 __stdcall NtUserSetFocus( ULONG64 arg_01 ); NtUserSetFocus PROC STDCALL mov r10 , rcx mov eax , 4178 ;syscall db 0Fh , 05h ret NtUserSetFocus ENDP ; ULONG64 __stdcall NtGdiExtGetObjectW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiExtGetObjectW PROC STDCALL mov r10 , rcx mov eax , 4179 ;syscall db 0Fh , 05h ret NtGdiExtGetObjectW ENDP ; ULONG64 __stdcall NtUserGetUpdateRect( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetUpdateRect PROC STDCALL mov r10 , rcx mov eax , 4180 ;syscall db 0Fh , 05h ret NtUserGetUpdateRect ENDP ; ULONG64 __stdcall NtGdiCreateCompatibleDC( ULONG64 arg_01 ); NtGdiCreateCompatibleDC PROC STDCALL mov r10 , rcx mov eax , 4181 ;syscall db 0Fh , 05h ret NtGdiCreateCompatibleDC ENDP ; ULONG64 __stdcall NtUserGetClipboardSequenceNumber( ); NtUserGetClipboardSequenceNumber PROC STDCALL mov r10 , rcx mov eax , 4182 ;syscall db 0Fh , 05h ret NtUserGetClipboardSequenceNumber ENDP ; ULONG64 __stdcall NtGdiCreatePen( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiCreatePen PROC STDCALL mov r10 , rcx mov eax , 4183 ;syscall db 0Fh , 05h ret NtGdiCreatePen ENDP ; ULONG64 __stdcall NtUserShowWindow( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserShowWindow PROC STDCALL mov r10 , rcx mov eax , 4184 ;syscall db 0Fh , 05h ret NtUserShowWindow ENDP ; ULONG64 __stdcall NtUserGetKeyboardLayoutList( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetKeyboardLayoutList PROC STDCALL mov r10 , rcx mov eax , 4185 ;syscall db 0Fh , 05h ret NtUserGetKeyboardLayoutList ENDP ; ULONG64 __stdcall NtGdiPatBlt( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiPatBlt PROC STDCALL mov r10 , rcx mov eax , 4186 ;syscall db 0Fh , 05h ret NtGdiPatBlt ENDP ; ULONG64 __stdcall NtUserMapVirtualKeyEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserMapVirtualKeyEx PROC STDCALL mov r10 , rcx mov eax , 4187 ;syscall db 0Fh , 05h ret NtUserMapVirtualKeyEx ENDP ; ULONG64 __stdcall NtUserSetWindowLong( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetWindowLong PROC STDCALL mov r10 , rcx mov eax , 4188 ;syscall db 0Fh , 05h ret NtUserSetWindowLong ENDP ; ULONG64 __stdcall NtGdiHfontCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiHfontCreate PROC STDCALL mov r10 , rcx mov eax , 4189 ;syscall db 0Fh , 05h ret NtGdiHfontCreate ENDP ; ULONG64 __stdcall NtUserMoveWindow( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserMoveWindow PROC STDCALL mov r10 , rcx mov eax , 4190 ;syscall db 0Fh , 05h ret NtUserMoveWindow ENDP ; ULONG64 __stdcall NtUserPostThreadMessage( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserPostThreadMessage PROC STDCALL mov r10 , rcx mov eax , 4191 ;syscall db 0Fh , 05h ret NtUserPostThreadMessage ENDP ; ULONG64 __stdcall NtUserDrawIconEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 ); NtUserDrawIconEx PROC STDCALL mov r10 , rcx mov eax , 4192 ;syscall db 0Fh , 05h ret NtUserDrawIconEx ENDP ; ULONG64 __stdcall NtUserGetSystemMenu( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetSystemMenu PROC STDCALL mov r10 , rcx mov eax , 4193 ;syscall db 0Fh , 05h ret NtUserGetSystemMenu ENDP ; ULONG64 __stdcall NtGdiDrawStream( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiDrawStream PROC STDCALL mov r10 , rcx mov eax , 4194 ;syscall db 0Fh , 05h ret NtGdiDrawStream ENDP ; ULONG64 __stdcall NtUserInternalGetWindowText( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserInternalGetWindowText PROC STDCALL mov r10 , rcx mov eax , 4195 ;syscall db 0Fh , 05h ret NtUserInternalGetWindowText ENDP ; ULONG64 __stdcall NtUserGetWindowDC( ULONG64 arg_01 ); NtUserGetWindowDC PROC STDCALL mov r10 , rcx mov eax , 4196 ;syscall db 0Fh , 05h ret NtUserGetWindowDC ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4197 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiInvertRgn( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiInvertRgn PROC STDCALL mov r10 , rcx mov eax , 4198 ;syscall db 0Fh , 05h ret NtGdiInvertRgn ENDP ; ULONG64 __stdcall NtGdiGetRgnBox( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetRgnBox PROC STDCALL mov r10 , rcx mov eax , 4199 ;syscall db 0Fh , 05h ret NtGdiGetRgnBox ENDP ; ULONG64 __stdcall NtGdiGetAndSetDCDword( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiGetAndSetDCDword PROC STDCALL mov r10 , rcx mov eax , 4200 ;syscall db 0Fh , 05h ret NtGdiGetAndSetDCDword ENDP ; ULONG64 __stdcall NtGdiMaskBlt( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 , ULONG64 arg_12 , ULONG64 arg_13 ); NtGdiMaskBlt PROC STDCALL mov r10 , rcx mov eax , 4201 ;syscall db 0Fh , 05h ret NtGdiMaskBlt ENDP ; ULONG64 __stdcall NtGdiGetWidthTable( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtGdiGetWidthTable PROC STDCALL mov r10 , rcx mov eax , 4202 ;syscall db 0Fh , 05h ret NtGdiGetWidthTable ENDP ; ULONG64 __stdcall NtUserScrollDC( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtUserScrollDC PROC STDCALL mov r10 , rcx mov eax , 4203 ;syscall db 0Fh , 05h ret NtUserScrollDC ENDP ; ULONG64 __stdcall NtUserGetObjectInformation( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserGetObjectInformation PROC STDCALL mov r10 , rcx mov eax , 4204 ;syscall db 0Fh , 05h ret NtUserGetObjectInformation ENDP ; ULONG64 __stdcall NtGdiCreateBitmap( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiCreateBitmap PROC STDCALL mov r10 , rcx mov eax , 4205 ;syscall db 0Fh , 05h ret NtGdiCreateBitmap ENDP ; ULONG64 __stdcall NtUserFindWindowEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserFindWindowEx PROC STDCALL mov r10 , rcx mov eax , 4206 ;syscall db 0Fh , 05h ret NtUserFindWindowEx ENDP ; ULONG64 __stdcall NtGdiPolyPatBlt( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiPolyPatBlt PROC STDCALL mov r10 , rcx mov eax , 4207 ;syscall db 0Fh , 05h ret NtGdiPolyPatBlt ENDP ; ULONG64 __stdcall NtUserUnhookWindowsHookEx( ULONG64 arg_01 ); NtUserUnhookWindowsHookEx PROC STDCALL mov r10 , rcx mov eax , 4208 ;syscall db 0Fh , 05h ret NtUserUnhookWindowsHookEx ENDP ; ULONG64 __stdcall NtGdiGetNearestColor( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetNearestColor PROC STDCALL mov r10 , rcx mov eax , 4209 ;syscall db 0Fh , 05h ret NtGdiGetNearestColor ENDP ; ULONG64 __stdcall NtGdiTransformPoints( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiTransformPoints PROC STDCALL mov r10 , rcx mov eax , 4210 ;syscall db 0Fh , 05h ret NtGdiTransformPoints ENDP ; ULONG64 __stdcall NtGdiGetDCPoint( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetDCPoint PROC STDCALL mov r10 , rcx mov eax , 4211 ;syscall db 0Fh , 05h ret NtGdiGetDCPoint ENDP ; ULONG64 __stdcall NtGdiCreateDIBBrush( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiCreateDIBBrush PROC STDCALL mov r10 , rcx mov eax , 4212 ;syscall db 0Fh , 05h ret NtGdiCreateDIBBrush ENDP ; ULONG64 __stdcall NtGdiGetTextMetricsW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetTextMetricsW PROC STDCALL mov r10 , rcx mov eax , 4213 ;syscall db 0Fh , 05h ret NtGdiGetTextMetricsW ENDP ; ULONG64 __stdcall NtUserCreateWindowEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 , ULONG64 arg_12 , ULONG64 arg_13 , ULONG64 arg_14 , ULONG64 arg_15 , ULONG64 arg_16 ); NtUserCreateWindowEx PROC STDCALL mov r10 , rcx mov eax , 4214 ;syscall db 0Fh , 05h ret NtUserCreateWindowEx ENDP ; ULONG64 __stdcall NtUserSetParent( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetParent PROC STDCALL mov r10 , rcx mov eax , 4215 ;syscall db 0Fh , 05h ret NtUserSetParent ENDP ; ULONG64 __stdcall NtUserGetKeyboardState( ULONG64 arg_01 ); NtUserGetKeyboardState PROC STDCALL mov r10 , rcx mov eax , 4216 ;syscall db 0Fh , 05h ret NtUserGetKeyboardState ENDP ; ULONG64 __stdcall NtUserToUnicodeEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtUserToUnicodeEx PROC STDCALL mov r10 , rcx mov eax , 4217 ;syscall db 0Fh , 05h ret NtUserToUnicodeEx ENDP ; ULONG64 __stdcall NtUserGetControlBrush( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetControlBrush PROC STDCALL mov r10 , rcx mov eax , 4218 ;syscall db 0Fh , 05h ret NtUserGetControlBrush ENDP ; ULONG64 __stdcall NtUserGetClassName( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetClassName PROC STDCALL mov r10 , rcx mov eax , 4219 ;syscall db 0Fh , 05h ret NtUserGetClassName ENDP ; ULONG64 __stdcall NtGdiAlphaBlend( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 , ULONG64 arg_12 ); NtGdiAlphaBlend PROC STDCALL mov r10 , rcx mov eax , 4220 ;syscall db 0Fh , 05h ret NtGdiAlphaBlend ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4221 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiOffsetRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiOffsetRgn PROC STDCALL mov r10 , rcx mov eax , 4222 ;syscall db 0Fh , 05h ret NtGdiOffsetRgn ENDP ; ULONG64 __stdcall NtUserDefSetText( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserDefSetText PROC STDCALL mov r10 , rcx mov eax , 4223 ;syscall db 0Fh , 05h ret NtUserDefSetText ENDP ; ULONG64 __stdcall NtGdiGetTextFaceW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiGetTextFaceW PROC STDCALL mov r10 , rcx mov eax , 4224 ;syscall db 0Fh , 05h ret NtGdiGetTextFaceW ENDP ; ULONG64 __stdcall NtGdiStretchDIBitsInternal( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 , ULONG64 arg_12 , ULONG64 arg_13 , ULONG64 arg_14 , ULONG64 arg_15 , ULONG64 arg_16 ); NtGdiStretchDIBitsInternal PROC STDCALL mov r10 , rcx mov eax , 4225 ;syscall db 0Fh , 05h ret NtGdiStretchDIBitsInternal ENDP ; ULONG64 __stdcall NtUserSendInput( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSendInput PROC STDCALL mov r10 , rcx mov eax , 4226 ;syscall db 0Fh , 05h ret NtUserSendInput ENDP ; ULONG64 __stdcall NtUserGetThreadDesktop( ULONG64 arg_01 ); NtUserGetThreadDesktop PROC STDCALL mov r10 , rcx mov eax , 4227 ;syscall db 0Fh , 05h ret NtUserGetThreadDesktop ENDP ; ULONG64 __stdcall NtGdiCreateRectRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiCreateRectRgn PROC STDCALL mov r10 , rcx mov eax , 4228 ;syscall db 0Fh , 05h ret NtGdiCreateRectRgn ENDP ; ULONG64 __stdcall NtGdiGetDIBitsInternal( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 ); NtGdiGetDIBitsInternal PROC STDCALL mov r10 , rcx mov eax , 4229 ;syscall db 0Fh , 05h ret NtGdiGetDIBitsInternal ENDP ; ULONG64 __stdcall NtUserGetUpdateRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetUpdateRgn PROC STDCALL mov r10 , rcx mov eax , 4230 ;syscall db 0Fh , 05h ret NtUserGetUpdateRgn ENDP ; ULONG64 __stdcall NtGdiDeleteClientObj( ULONG64 arg_01 ); NtGdiDeleteClientObj PROC STDCALL mov r10 , rcx mov eax , 4231 ;syscall db 0Fh , 05h ret NtGdiDeleteClientObj ENDP ; ULONG64 __stdcall NtUserGetIconSize( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserGetIconSize PROC STDCALL mov r10 , rcx mov eax , 4232 ;syscall db 0Fh , 05h ret NtUserGetIconSize ENDP ; ULONG64 __stdcall NtUserFillWindow( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserFillWindow PROC STDCALL mov r10 , rcx mov eax , 4233 ;syscall db 0Fh , 05h ret NtUserFillWindow ENDP ; ULONG64 __stdcall NtGdiExtCreateRegion( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiExtCreateRegion PROC STDCALL mov r10 , rcx mov eax , 4234 ;syscall db 0Fh , 05h ret NtGdiExtCreateRegion ENDP ; ULONG64 __stdcall NtGdiComputeXformCoefficients( ULONG64 arg_01 ); NtGdiComputeXformCoefficients PROC STDCALL mov r10 , rcx mov eax , 4235 ;syscall db 0Fh , 05h ret NtGdiComputeXformCoefficients ENDP ; ULONG64 __stdcall NtUserSetWindowsHookEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserSetWindowsHookEx PROC STDCALL mov r10 , rcx mov eax , 4236 ;syscall db 0Fh , 05h ret NtUserSetWindowsHookEx ENDP ; ULONG64 __stdcall NtUserNotifyProcessCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserNotifyProcessCreate PROC STDCALL mov r10 , rcx mov eax , 4237 ;syscall db 0Fh , 05h ret NtUserNotifyProcessCreate ENDP ; ULONG64 __stdcall NtGdiUnrealizeObject( ULONG64 arg_01 ); NtGdiUnrealizeObject PROC STDCALL mov r10 , rcx mov eax , 4238 ;syscall db 0Fh , 05h ret NtGdiUnrealizeObject ENDP ; ULONG64 __stdcall NtUserGetTitleBarInfo( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetTitleBarInfo PROC STDCALL mov r10 , rcx mov eax , 4239 ;syscall db 0Fh , 05h ret NtUserGetTitleBarInfo ENDP ; ULONG64 __stdcall NtGdiRectangle( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiRectangle PROC STDCALL mov r10 , rcx mov eax , 4240 ;syscall db 0Fh , 05h ret NtGdiRectangle ENDP ; ULONG64 __stdcall NtUserSetThreadDesktop( ULONG64 arg_01 ); NtUserSetThreadDesktop PROC STDCALL mov r10 , rcx mov eax , 4241 ;syscall db 0Fh , 05h ret NtUserSetThreadDesktop ENDP ; ULONG64 __stdcall NtUserGetDCEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetDCEx PROC STDCALL mov r10 , rcx mov eax , 4242 ;syscall db 0Fh , 05h ret NtUserGetDCEx ENDP ; ULONG64 __stdcall NtUserGetScrollBarInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetScrollBarInfo PROC STDCALL mov r10 , rcx mov eax , 4243 ;syscall db 0Fh , 05h ret NtUserGetScrollBarInfo ENDP ; ULONG64 __stdcall NtGdiGetTextExtent( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiGetTextExtent PROC STDCALL mov r10 , rcx mov eax , 4244 ;syscall db 0Fh , 05h ret NtGdiGetTextExtent ENDP ; ULONG64 __stdcall NtUserSetWindowFNID( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetWindowFNID PROC STDCALL mov r10 , rcx mov eax , 4245 ;syscall db 0Fh , 05h ret NtUserSetWindowFNID ENDP ; ULONG64 __stdcall NtGdiSetLayout( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiSetLayout PROC STDCALL mov r10 , rcx mov eax , 4246 ;syscall db 0Fh , 05h ret NtGdiSetLayout ENDP ; ULONG64 __stdcall NtUserCalcMenuBar( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserCalcMenuBar PROC STDCALL mov r10 , rcx mov eax , 4247 ;syscall db 0Fh , 05h ret NtUserCalcMenuBar ENDP ; ULONG64 __stdcall NtUserThunkedMenuItemInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserThunkedMenuItemInfo PROC STDCALL mov r10 , rcx mov eax , 4248 ;syscall db 0Fh , 05h ret NtUserThunkedMenuItemInfo ENDP ; ULONG64 __stdcall NtGdiExcludeClipRect( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiExcludeClipRect PROC STDCALL mov r10 , rcx mov eax , 4249 ;syscall db 0Fh , 05h ret NtGdiExcludeClipRect ENDP ; ULONG64 __stdcall NtGdiCreateDIBSection( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 ); NtGdiCreateDIBSection PROC STDCALL mov r10 , rcx mov eax , 4250 ;syscall db 0Fh , 05h ret NtGdiCreateDIBSection ENDP ; ULONG64 __stdcall NtGdiGetDCforBitmap( ULONG64 arg_01 ); NtGdiGetDCforBitmap PROC STDCALL mov r10 , rcx mov eax , 4251 ;syscall db 0Fh , 05h ret NtGdiGetDCforBitmap ENDP ; ULONG64 __stdcall NtUserDestroyCursor( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserDestroyCursor PROC STDCALL mov r10 , rcx mov eax , 4252 ;syscall db 0Fh , 05h ret NtUserDestroyCursor ENDP ; ULONG64 __stdcall NtUserDestroyWindow( ULONG64 arg_01 ); NtUserDestroyWindow PROC STDCALL mov r10 , rcx mov eax , 4253 ;syscall db 0Fh , 05h ret NtUserDestroyWindow ENDP ; ULONG64 __stdcall NtUserCallHwndParam( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserCallHwndParam PROC STDCALL mov r10 , rcx mov eax , 4254 ;syscall db 0Fh , 05h ret NtUserCallHwndParam ENDP ; ULONG64 __stdcall NtGdiCreateDIBitmapInternal( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 ); NtGdiCreateDIBitmapInternal PROC STDCALL mov r10 , rcx mov eax , 4255 ;syscall db 0Fh , 05h ret NtGdiCreateDIBitmapInternal ENDP ; ULONG64 __stdcall NtUserOpenWindowStation( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserOpenWindowStation PROC STDCALL mov r10 , rcx mov eax , 4256 ;syscall db 0Fh , 05h ret NtUserOpenWindowStation ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4257 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4258 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4259 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtUserSetCursorIconData( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetCursorIconData PROC STDCALL mov r10 , rcx mov eax , 4260 ;syscall db 0Fh , 05h ret NtUserSetCursorIconData ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4261 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtUserCloseDesktop( ULONG64 arg_01 ); NtUserCloseDesktop PROC STDCALL mov r10 , rcx mov eax , 4262 ;syscall db 0Fh , 05h ret NtUserCloseDesktop ENDP ; ULONG64 __stdcall NtUserOpenDesktop( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserOpenDesktop PROC STDCALL mov r10 , rcx mov eax , 4263 ;syscall db 0Fh , 05h ret NtUserOpenDesktop ENDP ; ULONG64 __stdcall NtUserSetProcessWindowStation( ULONG64 arg_01 ); NtUserSetProcessWindowStation PROC STDCALL mov r10 , rcx mov eax , 4264 ;syscall db 0Fh , 05h ret NtUserSetProcessWindowStation ENDP ; ULONG64 __stdcall NtUserGetAtomName( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetAtomName PROC STDCALL mov r10 , rcx mov eax , 4265 ;syscall db 0Fh , 05h ret NtUserGetAtomName ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4266 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiExtCreatePen( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 ); NtGdiExtCreatePen PROC STDCALL mov r10 , rcx mov eax , 4267 ;syscall db 0Fh , 05h ret NtGdiExtCreatePen ENDP ; ULONG64 __stdcall NtGdiCreatePaletteInternal( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiCreatePaletteInternal PROC STDCALL mov r10 , rcx mov eax , 4268 ;syscall db 0Fh , 05h ret NtGdiCreatePaletteInternal ENDP ; ULONG64 __stdcall NtGdiSetBrushOrg( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiSetBrushOrg PROC STDCALL mov r10 , rcx mov eax , 4269 ;syscall db 0Fh , 05h ret NtGdiSetBrushOrg ENDP ; ULONG64 __stdcall NtUserBuildNameList( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserBuildNameList PROC STDCALL mov r10 , rcx mov eax , 4270 ;syscall db 0Fh , 05h ret NtUserBuildNameList ENDP ; ULONG64 __stdcall NtGdiSetPixel( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiSetPixel PROC STDCALL mov r10 , rcx mov eax , 4271 ;syscall db 0Fh , 05h ret NtGdiSetPixel ENDP ; ULONG64 __stdcall NtUserRegisterClassExWOW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtUserRegisterClassExWOW PROC STDCALL mov r10 , rcx mov eax , 4272 ;syscall db 0Fh , 05h ret NtUserRegisterClassExWOW ENDP ; ULONG64 __stdcall NtGdiCreatePatternBrushInternal( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiCreatePatternBrushInternal PROC STDCALL mov r10 , rcx mov eax , 4273 ;syscall db 0Fh , 05h ret NtGdiCreatePatternBrushInternal ENDP ; ULONG64 __stdcall NtUserGetAncestor( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetAncestor PROC STDCALL mov r10 , rcx mov eax , 4274 ;syscall db 0Fh , 05h ret NtUserGetAncestor ENDP ; ULONG64 __stdcall NtGdiGetOutlineTextMetricsInternalW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiGetOutlineTextMetricsInternalW PROC STDCALL mov r10 , rcx mov eax , 4275 ;syscall db 0Fh , 05h ret NtGdiGetOutlineTextMetricsInternalW ENDP ; ULONG64 __stdcall NtGdiSetBitmapBits( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiSetBitmapBits PROC STDCALL mov r10 , rcx mov eax , 4276 ;syscall db 0Fh , 05h ret NtGdiSetBitmapBits ENDP ; ULONG64 __stdcall NtUserCloseWindowStation( ULONG64 arg_01 ); NtUserCloseWindowStation PROC STDCALL mov r10 , rcx mov eax , 4277 ;syscall db 0Fh , 05h ret NtUserCloseWindowStation ENDP ; ULONG64 __stdcall NtUserGetDoubleClickTime( ); NtUserGetDoubleClickTime PROC STDCALL mov r10 , rcx mov eax , 4278 ;syscall db 0Fh , 05h ret NtUserGetDoubleClickTime ENDP ; ULONG64 __stdcall NtUserEnableScrollBar( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserEnableScrollBar PROC STDCALL mov r10 , rcx mov eax , 4279 ;syscall db 0Fh , 05h ret NtUserEnableScrollBar ENDP ; ULONG64 __stdcall NtGdiCreateSolidBrush( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiCreateSolidBrush PROC STDCALL mov r10 , rcx mov eax , 4280 ;syscall db 0Fh , 05h ret NtGdiCreateSolidBrush ENDP ; ULONG64 __stdcall NtUserGetClassInfoEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserGetClassInfoEx PROC STDCALL mov r10 , rcx mov eax , 4281 ;syscall db 0Fh , 05h ret NtUserGetClassInfoEx ENDP ; ULONG64 __stdcall NtGdiCreateClientObj( ULONG64 arg_01 ); NtGdiCreateClientObj PROC STDCALL mov r10 , rcx mov eax , 4282 ;syscall db 0Fh , 05h ret NtGdiCreateClientObj ENDP ; ULONG64 __stdcall NtUserUnregisterClass( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserUnregisterClass PROC STDCALL mov r10 , rcx mov eax , 4283 ;syscall db 0Fh , 05h ret NtUserUnregisterClass ENDP ; ULONG64 __stdcall NtUserDeleteMenu( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserDeleteMenu PROC STDCALL mov r10 , rcx mov eax , 4284 ;syscall db 0Fh , 05h ret NtUserDeleteMenu ENDP ; ULONG64 __stdcall NtGdiRectInRegion( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiRectInRegion PROC STDCALL mov r10 , rcx mov eax , 4285 ;syscall db 0Fh , 05h ret NtGdiRectInRegion ENDP ; ULONG64 __stdcall NtUserScrollWindowEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtUserScrollWindowEx PROC STDCALL mov r10 , rcx mov eax , 4286 ;syscall db 0Fh , 05h ret NtUserScrollWindowEx ENDP ; ULONG64 __stdcall NtGdiGetPixel( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetPixel PROC STDCALL mov r10 , rcx mov eax , 4287 ;syscall db 0Fh , 05h ret NtGdiGetPixel ENDP ; ULONG64 __stdcall NtUserSetClassLong( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetClassLong PROC STDCALL mov r10 , rcx mov eax , 4288 ;syscall db 0Fh , 05h ret NtUserSetClassLong ENDP ; ULONG64 __stdcall NtUserGetMenuBarInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserGetMenuBarInfo PROC STDCALL mov r10 , rcx mov eax , 4289 ;syscall db 0Fh , 05h ret NtUserGetMenuBarInfo ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4290 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4291 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiGetNearestPaletteIndex( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetNearestPaletteIndex PROC STDCALL mov r10 , rcx mov eax , 4292 ;syscall db 0Fh , 05h ret NtGdiGetNearestPaletteIndex ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4293 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4294 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiGetCharWidthW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiGetCharWidthW PROC STDCALL mov r10 , rcx mov eax , 4295 ;syscall db 0Fh , 05h ret NtGdiGetCharWidthW ENDP ; ULONG64 __stdcall NtUserInvalidateRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserInvalidateRgn PROC STDCALL mov r10 , rcx mov eax , 4296 ;syscall db 0Fh , 05h ret NtUserInvalidateRgn ENDP ; ULONG64 __stdcall NtUserGetClipboardOwner( ); NtUserGetClipboardOwner PROC STDCALL mov r10 , rcx mov eax , 4297 ;syscall db 0Fh , 05h ret NtUserGetClipboardOwner ENDP ; ULONG64 __stdcall NtUserSetWindowRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSetWindowRgn PROC STDCALL mov r10 , rcx mov eax , 4298 ;syscall db 0Fh , 05h ret NtUserSetWindowRgn ENDP ; ULONG64 __stdcall NtUserBitBltSysBmp( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtUserBitBltSysBmp PROC STDCALL mov r10 , rcx mov eax , 4299 ;syscall db 0Fh , 05h ret NtUserBitBltSysBmp ENDP ; ULONG64 __stdcall NtGdiGetCharWidthInfo( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetCharWidthInfo PROC STDCALL mov r10 , rcx mov eax , 4300 ;syscall db 0Fh , 05h ret NtGdiGetCharWidthInfo ENDP ; ULONG64 __stdcall NtUserValidateRect( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserValidateRect PROC STDCALL mov r10 , rcx mov eax , 4301 ;syscall db 0Fh , 05h ret NtUserValidateRect ENDP ; ULONG64 __stdcall NtUserCloseClipboard( ); NtUserCloseClipboard PROC STDCALL mov r10 , rcx mov eax , 4302 ;syscall db 0Fh , 05h ret NtUserCloseClipboard ENDP ; ULONG64 __stdcall NtUserOpenClipboard( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserOpenClipboard PROC STDCALL mov r10 , rcx mov eax , 4303 ;syscall db 0Fh , 05h ret NtUserOpenClipboard ENDP ; ULONG64 __stdcall NtGdiGetStockObject( ULONG64 arg_01 ); NtGdiGetStockObject PROC STDCALL mov r10 , rcx mov eax , 4304 ;syscall db 0Fh , 05h ret NtGdiGetStockObject ENDP ; ULONG64 __stdcall NtUserSetClipboardData( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSetClipboardData PROC STDCALL mov r10 , rcx mov eax , 4305 ;syscall db 0Fh , 05h ret NtUserSetClipboardData ENDP ; ULONG64 __stdcall NtUserEnableMenuItem( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserEnableMenuItem PROC STDCALL mov r10 , rcx mov eax , 4306 ;syscall db 0Fh , 05h ret NtUserEnableMenuItem ENDP ; ULONG64 __stdcall NtUserAlterWindowStyle( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserAlterWindowStyle PROC STDCALL mov r10 , rcx mov eax , 4307 ;syscall db 0Fh , 05h ret NtUserAlterWindowStyle ENDP ; ULONG64 __stdcall NtGdiFillRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiFillRgn PROC STDCALL mov r10 , rcx mov eax , 4308 ;syscall db 0Fh , 05h ret NtGdiFillRgn ENDP ; ULONG64 __stdcall NtUserGetWindowPlacement( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetWindowPlacement PROC STDCALL mov r10 , rcx mov eax , 4309 ;syscall db 0Fh , 05h ret NtUserGetWindowPlacement ENDP ; ULONG64 __stdcall NtGdiModifyWorldTransform( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiModifyWorldTransform PROC STDCALL mov r10 , rcx mov eax , 4310 ;syscall db 0Fh , 05h ret NtGdiModifyWorldTransform ENDP ; ULONG64 __stdcall NtGdiGetFontData( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiGetFontData PROC STDCALL mov r10 , rcx mov eax , 4311 ;syscall db 0Fh , 05h ret NtGdiGetFontData ENDP ; ULONG64 __stdcall NtUserGetOpenClipboardWindow( ); NtUserGetOpenClipboardWindow PROC STDCALL mov r10 , rcx mov eax , 4312 ;syscall db 0Fh , 05h ret NtUserGetOpenClipboardWindow ENDP ; ULONG64 __stdcall NtUserSetThreadState( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetThreadState PROC STDCALL mov r10 , rcx mov eax , 4313 ;syscall db 0Fh , 05h ret NtUserSetThreadState ENDP ; ULONG64 __stdcall NtGdiOpenDCW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 ); NtGdiOpenDCW PROC STDCALL mov r10 , rcx mov eax , 4314 ;syscall db 0Fh , 05h ret NtGdiOpenDCW ENDP ; ULONG64 __stdcall NtUserTrackMouseEvent( ULONG64 arg_01 ); NtUserTrackMouseEvent PROC STDCALL mov r10 , rcx mov eax , 4315 ;syscall db 0Fh , 05h ret NtUserTrackMouseEvent ENDP ; ULONG64 __stdcall NtGdiGetTransform( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetTransform PROC STDCALL mov r10 , rcx mov eax , 4316 ;syscall db 0Fh , 05h ret NtGdiGetTransform ENDP ; ULONG64 __stdcall NtUserDestroyMenu( ULONG64 arg_01 ); NtUserDestroyMenu PROC STDCALL mov r10 , rcx mov eax , 4317 ;syscall db 0Fh , 05h ret NtUserDestroyMenu ENDP ; ULONG64 __stdcall NtGdiGetBitmapBits( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetBitmapBits PROC STDCALL mov r10 , rcx mov eax , 4318 ;syscall db 0Fh , 05h ret NtGdiGetBitmapBits ENDP ; ULONG64 __stdcall NtUserConsoleControl( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserConsoleControl PROC STDCALL mov r10 , rcx mov eax , 4319 ;syscall db 0Fh , 05h ret NtUserConsoleControl ENDP ; ULONG64 __stdcall NtUserSetActiveWindow( ULONG64 arg_01 ); NtUserSetActiveWindow PROC STDCALL mov r10 , rcx mov eax , 4320 ;syscall db 0Fh , 05h ret NtUserSetActiveWindow ENDP ; ULONG64 __stdcall NtUserSetInformationThread( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetInformationThread PROC STDCALL mov r10 , rcx mov eax , 4321 ;syscall db 0Fh , 05h ret NtUserSetInformationThread ENDP ; ULONG64 __stdcall NtUserSetWindowPlacement( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetWindowPlacement PROC STDCALL mov r10 , rcx mov eax , 4322 ;syscall db 0Fh , 05h ret NtUserSetWindowPlacement ENDP ; ULONG64 __stdcall NtUserGetControlColor( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserGetControlColor PROC STDCALL mov r10 , rcx mov eax , 4323 ;syscall db 0Fh , 05h ret NtUserGetControlColor ENDP ; ULONG64 __stdcall NtGdiSetMetaRgn( ULONG64 arg_01 ); NtGdiSetMetaRgn PROC STDCALL mov r10 , rcx mov eax , 4324 ;syscall db 0Fh , 05h ret NtGdiSetMetaRgn ENDP ; ULONG64 __stdcall NtGdiSetMiterLimit( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiSetMiterLimit PROC STDCALL mov r10 , rcx mov eax , 4325 ;syscall db 0Fh , 05h ret NtGdiSetMiterLimit ENDP ; ULONG64 __stdcall NtGdiSetVirtualResolution( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiSetVirtualResolution PROC STDCALL mov r10 , rcx mov eax , 4326 ;syscall db 0Fh , 05h ret NtGdiSetVirtualResolution ENDP ; ULONG64 __stdcall NtGdiGetRasterizerCaps( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetRasterizerCaps PROC STDCALL mov r10 , rcx mov eax , 4327 ;syscall db 0Fh , 05h ret NtGdiGetRasterizerCaps ENDP ; ULONG64 __stdcall NtUserSetWindowWord( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSetWindowWord PROC STDCALL mov r10 , rcx mov eax , 4328 ;syscall db 0Fh , 05h ret NtUserSetWindowWord ENDP ; ULONG64 __stdcall NtUserGetClipboardFormatName( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetClipboardFormatName PROC STDCALL mov r10 , rcx mov eax , 4329 ;syscall db 0Fh , 05h ret NtUserGetClipboardFormatName ENDP ; ULONG64 __stdcall NtUserRealInternalGetMessage( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserRealInternalGetMessage PROC STDCALL mov r10 , rcx mov eax , 4330 ;syscall db 0Fh , 05h ret NtUserRealInternalGetMessage ENDP ; ULONG64 __stdcall NtUserCreateLocalMemHandle( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserCreateLocalMemHandle PROC STDCALL mov r10 , rcx mov eax , 4331 ;syscall db 0Fh , 05h ret NtUserCreateLocalMemHandle ENDP ; ULONG64 __stdcall NtUserAttachThreadInput( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserAttachThreadInput PROC STDCALL mov r10 , rcx mov eax , 4332 ;syscall db 0Fh , 05h ret NtUserAttachThreadInput ENDP ; ULONG64 __stdcall NtGdiCreateHalftonePalette( ULONG64 arg_01 ); NtGdiCreateHalftonePalette PROC STDCALL mov r10 , rcx mov eax , 4333 ;syscall db 0Fh , 05h ret NtGdiCreateHalftonePalette ENDP ; ULONG64 __stdcall NtUserPaintMenuBar( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserPaintMenuBar PROC STDCALL mov r10 , rcx mov eax , 4334 ;syscall db 0Fh , 05h ret NtUserPaintMenuBar ENDP ; ULONG64 __stdcall NtUserSetKeyboardState( ULONG64 arg_01 ); NtUserSetKeyboardState PROC STDCALL mov r10 , rcx mov eax , 4335 ;syscall db 0Fh , 05h ret NtUserSetKeyboardState ENDP ; ULONG64 __stdcall NtGdiCombineTransform( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiCombineTransform PROC STDCALL mov r10 , rcx mov eax , 4336 ;syscall db 0Fh , 05h ret NtGdiCombineTransform ENDP ; ULONG64 __stdcall NtUserCreateAcceleratorTable( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserCreateAcceleratorTable PROC STDCALL mov r10 , rcx mov eax , 4337 ;syscall db 0Fh , 05h ret NtUserCreateAcceleratorTable ENDP ; ULONG64 __stdcall NtUserGetCursorFrameInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserGetCursorFrameInfo PROC STDCALL mov r10 , rcx mov eax , 4338 ;syscall db 0Fh , 05h ret NtUserGetCursorFrameInfo ENDP ; ULONG64 __stdcall NtUserGetAltTabInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserGetAltTabInfo PROC STDCALL mov r10 , rcx mov eax , 4339 ;syscall db 0Fh , 05h ret NtUserGetAltTabInfo ENDP ; ULONG64 __stdcall NtUserGetCaretBlinkTime( ); NtUserGetCaretBlinkTime PROC STDCALL mov r10 , rcx mov eax , 4340 ;syscall db 0Fh , 05h ret NtUserGetCaretBlinkTime ENDP ; ULONG64 __stdcall NtGdiQueryFontAssocInfo( ULONG64 arg_01 ); NtGdiQueryFontAssocInfo PROC STDCALL mov r10 , rcx mov eax , 4341 ;syscall db 0Fh , 05h ret NtGdiQueryFontAssocInfo ENDP ; ULONG64 __stdcall NtUserProcessConnect( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserProcessConnect PROC STDCALL mov r10 , rcx mov eax , 4342 ;syscall db 0Fh , 05h ret NtUserProcessConnect ENDP ; ULONG64 __stdcall NtUserEnumDisplayDevices( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserEnumDisplayDevices PROC STDCALL mov r10 , rcx mov eax , 4343 ;syscall db 0Fh , 05h ret NtUserEnumDisplayDevices ENDP ; ULONG64 __stdcall NtUserEmptyClipboard( ); NtUserEmptyClipboard PROC STDCALL mov r10 , rcx mov eax , 4344 ;syscall db 0Fh , 05h ret NtUserEmptyClipboard ENDP ; ULONG64 __stdcall NtUserGetClipboardData( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetClipboardData PROC STDCALL mov r10 , rcx mov eax , 4345 ;syscall db 0Fh , 05h ret NtUserGetClipboardData ENDP ; ULONG64 __stdcall NtUserRemoveMenu( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserRemoveMenu PROC STDCALL mov r10 , rcx mov eax , 4346 ;syscall db 0Fh , 05h ret NtUserRemoveMenu ENDP ; ULONG64 __stdcall NtGdiSetBoundsRect( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiSetBoundsRect PROC STDCALL mov r10 , rcx mov eax , 4347 ;syscall db 0Fh , 05h ret NtGdiSetBoundsRect ENDP ; ULONG64 __stdcall NtGdiGetBitmapDimension( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetBitmapDimension PROC STDCALL mov r10 , rcx mov eax , 4348 ;syscall db 0Fh , 05h ret NtGdiGetBitmapDimension ENDP ; ULONG64 __stdcall NtUserConvertMemHandle( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserConvertMemHandle PROC STDCALL mov r10 , rcx mov eax , 4349 ;syscall db 0Fh , 05h ret NtUserConvertMemHandle ENDP ; ULONG64 __stdcall NtUserDestroyAcceleratorTable( ULONG64 arg_01 ); NtUserDestroyAcceleratorTable PROC STDCALL mov r10 , rcx mov eax , 4350 ;syscall db 0Fh , 05h ret NtUserDestroyAcceleratorTable ENDP ; ULONG64 __stdcall NtUserGetGUIThreadInfo( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetGUIThreadInfo PROC STDCALL mov r10 , rcx mov eax , 4351 ;syscall db 0Fh , 05h ret NtUserGetGUIThreadInfo ENDP ; ULONG64 __stdcall NtGdiCloseFigure( ULONG64 arg_01 ); NtGdiCloseFigure PROC STDCALL mov r10 , rcx mov eax , 4352 ;syscall db 0Fh , 05h ret NtGdiCloseFigure ENDP ; ULONG64 __stdcall NtUserSetWindowsHookAW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSetWindowsHookAW PROC STDCALL mov r10 , rcx mov eax , 4353 ;syscall db 0Fh , 05h ret NtUserSetWindowsHookAW ENDP ; ULONG64 __stdcall NtUserSetMenuDefaultItem( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSetMenuDefaultItem PROC STDCALL mov r10 , rcx mov eax , 4354 ;syscall db 0Fh , 05h ret NtUserSetMenuDefaultItem ENDP ; ULONG64 __stdcall NtUserCheckMenuItem( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserCheckMenuItem PROC STDCALL mov r10 , rcx mov eax , 4355 ;syscall db 0Fh , 05h ret NtUserCheckMenuItem ENDP ; ULONG64 __stdcall NtUserSetWinEventHook( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtUserSetWinEventHook PROC STDCALL mov r10 , rcx mov eax , 4356 ;syscall db 0Fh , 05h ret NtUserSetWinEventHook ENDP ; ULONG64 __stdcall NtUserUnhookWinEvent( ULONG64 arg_01 ); NtUserUnhookWinEvent PROC STDCALL mov r10 , rcx mov eax , 4357 ;syscall db 0Fh , 05h ret NtUserUnhookWinEvent ENDP ; ULONG64 __stdcall NtUserLockWindowUpdate( ULONG64 arg_01 ); NtUserLockWindowUpdate PROC STDCALL mov r10 , rcx mov eax , 4358 ;syscall db 0Fh , 05h ret NtUserLockWindowUpdate ENDP ; ULONG64 __stdcall NtUserSetSystemMenu( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetSystemMenu PROC STDCALL mov r10 , rcx mov eax , 4359 ;syscall db 0Fh , 05h ret NtUserSetSystemMenu ENDP ; ULONG64 __stdcall NtUserThunkedMenuInfo( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserThunkedMenuInfo PROC STDCALL mov r10 , rcx mov eax , 4360 ;syscall db 0Fh , 05h ret NtUserThunkedMenuInfo ENDP ; ULONG64 __stdcall NtGdiBeginPath( ULONG64 arg_01 ); NtGdiBeginPath PROC STDCALL mov r10 , rcx mov eax , 4361 ;syscall db 0Fh , 05h ret NtGdiBeginPath ENDP ; ULONG64 __stdcall NtGdiEndPath( ULONG64 arg_01 ); NtGdiEndPath PROC STDCALL mov r10 , rcx mov eax , 4362 ;syscall db 0Fh , 05h ret NtGdiEndPath ENDP ; ULONG64 __stdcall NtGdiFillPath( ULONG64 arg_01 ); NtGdiFillPath PROC STDCALL mov r10 , rcx mov eax , 4363 ;syscall db 0Fh , 05h ret NtGdiFillPath ENDP ; ULONG64 __stdcall NtUserCallHwnd( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserCallHwnd PROC STDCALL mov r10 , rcx mov eax , 4364 ;syscall db 0Fh , 05h ret NtUserCallHwnd ENDP ; ULONG64 __stdcall NtUserDdeInitialize( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserDdeInitialize PROC STDCALL mov r10 , rcx mov eax , 4365 ;syscall db 0Fh , 05h ret NtUserDdeInitialize ENDP ; ULONG64 __stdcall NtUserModifyUserStartupInfoFlags( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserModifyUserStartupInfoFlags PROC STDCALL mov r10 , rcx mov eax , 4366 ;syscall db 0Fh , 05h ret NtUserModifyUserStartupInfoFlags ENDP ; ULONG64 __stdcall NtUserCountClipboardFormats( ); NtUserCountClipboardFormats PROC STDCALL mov r10 , rcx mov eax , 4367 ;syscall db 0Fh , 05h ret NtUserCountClipboardFormats ENDP ; ULONG64 __stdcall NtGdiAddFontMemResourceEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiAddFontMemResourceEx PROC STDCALL mov r10 , rcx mov eax , 4368 ;syscall db 0Fh , 05h ret NtGdiAddFontMemResourceEx ENDP ; ULONG64 __stdcall NtGdiEqualRgn( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiEqualRgn PROC STDCALL mov r10 , rcx mov eax , 4369 ;syscall db 0Fh , 05h ret NtGdiEqualRgn ENDP ; ULONG64 __stdcall NtGdiGetSystemPaletteUse( ULONG64 arg_01 ); NtGdiGetSystemPaletteUse PROC STDCALL mov r10 , rcx mov eax , 4370 ;syscall db 0Fh , 05h ret NtGdiGetSystemPaletteUse ENDP ; ULONG64 __stdcall NtGdiRemoveFontMemResourceEx( ULONG64 arg_01 ); NtGdiRemoveFontMemResourceEx PROC STDCALL mov r10 , rcx mov eax , 4371 ;syscall db 0Fh , 05h ret NtGdiRemoveFontMemResourceEx ENDP ; ULONG64 __stdcall NtUserEnumDisplaySettings( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserEnumDisplaySettings PROC STDCALL mov r10 , rcx mov eax , 4372 ;syscall db 0Fh , 05h ret NtUserEnumDisplaySettings ENDP ; ULONG64 __stdcall NtUserPaintDesktop( ULONG64 arg_01 ); NtUserPaintDesktop PROC STDCALL mov r10 , rcx mov eax , 4373 ;syscall db 0Fh , 05h ret NtUserPaintDesktop ENDP ; ULONG64 __stdcall NtGdiExtEscape( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtGdiExtEscape PROC STDCALL mov r10 , rcx mov eax , 4374 ;syscall db 0Fh , 05h ret NtGdiExtEscape ENDP ; ULONG64 __stdcall NtGdiSetBitmapDimension( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiSetBitmapDimension PROC STDCALL mov r10 , rcx mov eax , 4375 ;syscall db 0Fh , 05h ret NtGdiSetBitmapDimension ENDP ; ULONG64 __stdcall NtGdiSetFontEnumeration( ULONG64 arg_01 ); NtGdiSetFontEnumeration PROC STDCALL mov r10 , rcx mov eax , 4376 ;syscall db 0Fh , 05h ret NtGdiSetFontEnumeration ENDP ; ULONG64 __stdcall NtUserChangeClipboardChain( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserChangeClipboardChain PROC STDCALL mov r10 , rcx mov eax , 4377 ;syscall db 0Fh , 05h ret NtUserChangeClipboardChain ENDP ; ULONG64 __stdcall NtUserSetClipboardViewer( ULONG64 arg_01 ); NtUserSetClipboardViewer PROC STDCALL mov r10 , rcx mov eax , 4378 ;syscall db 0Fh , 05h ret NtUserSetClipboardViewer ENDP ; ULONG64 __stdcall NtUserShowWindowAsync( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserShowWindowAsync PROC STDCALL mov r10 , rcx mov eax , 4379 ;syscall db 0Fh , 05h ret NtUserShowWindowAsync ENDP ; ULONG64 __stdcall NtGdiCreateColorSpace( ULONG64 arg_01 ); NtGdiCreateColorSpace PROC STDCALL mov r10 , rcx mov eax , 4380 ;syscall db 0Fh , 05h ret NtGdiCreateColorSpace ENDP ; ULONG64 __stdcall NtGdiDeleteColorSpace( ULONG64 arg_01 ); NtGdiDeleteColorSpace PROC STDCALL mov r10 , rcx mov eax , 4381 ;syscall db 0Fh , 05h ret NtGdiDeleteColorSpace ENDP ; ULONG64 __stdcall NtUserActivateKeyboardLayout( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserActivateKeyboardLayout PROC STDCALL mov r10 , rcx mov eax , 4382 ;syscall db 0Fh , 05h ret NtUserActivateKeyboardLayout ENDP ; ULONG64 __stdcall NtBindCompositionSurface( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtBindCompositionSurface PROC STDCALL mov r10 , rcx mov eax , 4383 ;syscall db 0Fh , 05h ret NtBindCompositionSurface ENDP ; ULONG64 __stdcall NtCreateCompositionSurfaceHandle( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtCreateCompositionSurfaceHandle PROC STDCALL mov r10 , rcx mov eax , 4384 ;syscall db 0Fh , 05h ret NtCreateCompositionSurfaceHandle ENDP ; ULONG64 __stdcall NtDCompositionAddCrossDeviceVisualChild( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtDCompositionAddCrossDeviceVisualChild PROC STDCALL mov r10 , rcx mov eax , 4385 ;syscall db 0Fh , 05h ret NtDCompositionAddCrossDeviceVisualChild ENDP ; ULONG64 __stdcall NtDCompositionAddVisualChild( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtDCompositionAddVisualChild PROC STDCALL mov r10 , rcx mov eax , 4386 ;syscall db 0Fh , 05h ret NtDCompositionAddVisualChild ENDP ; ULONG64 __stdcall NtDCompositionBeginFrame( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtDCompositionBeginFrame PROC STDCALL mov r10 , rcx mov eax , 4387 ;syscall db 0Fh , 05h ret NtDCompositionBeginFrame ENDP ; ULONG64 __stdcall NtDCompositionCommitChannel( ULONG64 arg_01 , ULONG64 arg_02 ); NtDCompositionCommitChannel PROC STDCALL mov r10 , rcx mov eax , 4388 ;syscall db 0Fh , 05h ret NtDCompositionCommitChannel ENDP ; ULONG64 __stdcall NtDCompositionConfirmFrame( ULONG64 arg_01 , ULONG64 arg_02 ); NtDCompositionConfirmFrame PROC STDCALL mov r10 , rcx mov eax , 4389 ;syscall db 0Fh , 05h ret NtDCompositionConfirmFrame ENDP ; ULONG64 __stdcall NtDCompositionGetChannels( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtDCompositionGetChannels PROC STDCALL mov r10 , rcx mov eax , 4390 ;syscall db 0Fh , 05h ret NtDCompositionGetChannels ENDP ; ULONG64 __stdcall NtDCompositionCreateChannel( ULONG64 arg_01 ); NtDCompositionCreateChannel PROC STDCALL mov r10 , rcx mov eax , 4391 ;syscall db 0Fh , 05h ret NtDCompositionCreateChannel ENDP ; ULONG64 __stdcall NtDCompositionCreateConnectionContext( ULONG64 arg_01 , ULONG64 arg_02 ); NtDCompositionCreateConnectionContext PROC STDCALL mov r10 , rcx mov eax , 4392 ;syscall db 0Fh , 05h ret NtDCompositionCreateConnectionContext ENDP ; ULONG64 __stdcall NtDCompositionCreateDwmChannel( ULONG64 arg_01 ); NtDCompositionCreateDwmChannel PROC STDCALL mov r10 , rcx mov eax , 4393 ;syscall db 0Fh , 05h ret NtDCompositionCreateDwmChannel ENDP ; ULONG64 __stdcall NtDCompositionCreateResource( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtDCompositionCreateResource PROC STDCALL mov r10 , rcx mov eax , 4394 ;syscall db 0Fh , 05h ret NtDCompositionCreateResource ENDP ; ULONG64 __stdcall NtDCompositionCurrentBatchId( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtDCompositionCurrentBatchId PROC STDCALL mov r10 , rcx mov eax , 4395 ;syscall db 0Fh , 05h ret NtDCompositionCurrentBatchId ENDP ; ULONG64 __stdcall NtDCompositionDestroyChannel( ULONG64 arg_01 ); NtDCompositionDestroyChannel PROC STDCALL mov r10 , rcx mov eax , 4396 ;syscall db 0Fh , 05h ret NtDCompositionDestroyChannel ENDP ; ULONG64 __stdcall NtDCompositionDestroyConnectionContext( ULONG64 arg_01 ); NtDCompositionDestroyConnectionContext PROC STDCALL mov r10 , rcx mov eax , 4397 ;syscall db 0Fh , 05h ret NtDCompositionDestroyConnectionContext ENDP ; ULONG64 __stdcall NtDCompositionDiscardFrame( ULONG64 arg_01 , ULONG64 arg_02 ); NtDCompositionDiscardFrame PROC STDCALL mov r10 , rcx mov eax , 4398 ;syscall db 0Fh , 05h ret NtDCompositionDiscardFrame ENDP ; ULONG64 __stdcall NtDCompositionDwmSyncFlush( ); NtDCompositionDwmSyncFlush PROC STDCALL mov r10 , rcx mov eax , 4399 ;syscall db 0Fh , 05h ret NtDCompositionDwmSyncFlush ENDP ; ULONG64 __stdcall NtDCompositionGetChannels( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtDCompositionGetChannels PROC STDCALL mov r10 , rcx mov eax , 4400 ;syscall db 0Fh , 05h ret NtDCompositionGetChannels ENDP ; ULONG64 __stdcall NtDCompositionGetConnectionContextBatch( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtDCompositionGetConnectionContextBatch PROC STDCALL mov r10 , rcx mov eax , 4401 ;syscall db 0Fh , 05h ret NtDCompositionGetConnectionContextBatch ENDP ; ULONG64 __stdcall NtDCompositionGetDeletedResources( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtDCompositionGetDeletedResources PROC STDCALL mov r10 , rcx mov eax , 4402 ;syscall db 0Fh , 05h ret NtDCompositionGetDeletedResources ENDP ; ULONG64 __stdcall NtDCompositionGetFrameLegacyTokens( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtDCompositionGetFrameLegacyTokens PROC STDCALL mov r10 , rcx mov eax , 4403 ;syscall db 0Fh , 05h ret NtDCompositionGetFrameLegacyTokens ENDP ; ULONG64 __stdcall NtDCompositionGetFrameStatistics( ULONG64 arg_01 ); NtDCompositionGetFrameStatistics PROC STDCALL mov r10 , rcx mov eax , 4404 ;syscall db 0Fh , 05h ret NtDCompositionGetFrameStatistics ENDP ; ULONG64 __stdcall NtDCompositionGetFrameSurfaceUpdates( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtDCompositionGetFrameSurfaceUpdates PROC STDCALL mov r10 , rcx mov eax , 4405 ;syscall db 0Fh , 05h ret NtDCompositionGetFrameSurfaceUpdates ENDP ; ULONG64 __stdcall NtDCompositionReleaseAllResources( ULONG64 arg_01 , ULONG64 arg_02 ); NtDCompositionReleaseAllResources PROC STDCALL mov r10 , rcx mov eax , 4406 ;syscall db 0Fh , 05h ret NtDCompositionReleaseAllResources ENDP ; ULONG64 __stdcall NtDCompositionReleaseResource( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtDCompositionReleaseResource PROC STDCALL mov r10 , rcx mov eax , 4407 ;syscall db 0Fh , 05h ret NtDCompositionReleaseResource ENDP ; ULONG64 __stdcall NtDCompositionRemoveCrossDeviceVisualChild( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtDCompositionRemoveCrossDeviceVisualChild PROC STDCALL mov r10 , rcx mov eax , 4408 ;syscall db 0Fh , 05h ret NtDCompositionRemoveCrossDeviceVisualChild ENDP ; ULONG64 __stdcall NtDCompositionRemoveVisualChild( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtDCompositionRemoveVisualChild PROC STDCALL mov r10 , rcx mov eax , 4409 ;syscall db 0Fh , 05h ret NtDCompositionRemoveVisualChild ENDP ; ULONG64 __stdcall NtDCompositionReplaceVisualChildren( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtDCompositionReplaceVisualChildren PROC STDCALL mov r10 , rcx mov eax , 4410 ;syscall db 0Fh , 05h ret NtDCompositionReplaceVisualChildren ENDP ; ULONG64 __stdcall NtDCompositionRetireFrame( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtDCompositionRetireFrame PROC STDCALL mov r10 , rcx mov eax , 4411 ;syscall db 0Fh , 05h ret NtDCompositionRetireFrame ENDP ; ULONG64 __stdcall NtDCompositionSetChannelCommitCompletionEvent( ULONG64 arg_01 , ULONG64 arg_02 ); NtDCompositionSetChannelCommitCompletionEvent PROC STDCALL mov r10 , rcx mov eax , 4412 ;syscall db 0Fh , 05h ret NtDCompositionSetChannelCommitCompletionEvent ENDP ; ULONG64 __stdcall NtDCompositionSetResourceAnimationProperty( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtDCompositionSetResourceAnimationProperty PROC STDCALL mov r10 , rcx mov eax , 4413 ;syscall db 0Fh , 05h ret NtDCompositionSetResourceAnimationProperty ENDP ; ULONG64 __stdcall NtDCompositionSetResourceBufferProperty( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtDCompositionSetResourceBufferProperty PROC STDCALL mov r10 , rcx mov eax , 4414 ;syscall db 0Fh , 05h ret NtDCompositionSetResourceBufferProperty ENDP ; ULONG64 __stdcall NtDCompositionSetResourceDeletedNotificationTag( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtDCompositionSetResourceDeletedNotificationTag PROC STDCALL mov r10 , rcx mov eax , 4415 ;syscall db 0Fh , 05h ret NtDCompositionSetResourceDeletedNotificationTag ENDP ; ULONG64 __stdcall NtDCompositionSetResourceFloatProperty( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtDCompositionSetResourceFloatProperty PROC STDCALL mov r10 , rcx mov eax , 4416 ;syscall db 0Fh , 05h ret NtDCompositionSetResourceFloatProperty ENDP ; ULONG64 __stdcall NtDCompositionSetResourceIntegerProperty( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtDCompositionSetResourceIntegerProperty PROC STDCALL mov r10 , rcx mov eax , 4417 ;syscall db 0Fh , 05h ret NtDCompositionSetResourceIntegerProperty ENDP ; ULONG64 __stdcall NtDCompositionSetResourceReferenceArrayProperty( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtDCompositionSetResourceReferenceArrayProperty PROC STDCALL mov r10 , rcx mov eax , 4418 ;syscall db 0Fh , 05h ret NtDCompositionSetResourceReferenceArrayProperty ENDP ; ULONG64 __stdcall NtDCompositionSetResourceReferenceProperty( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtDCompositionSetResourceReferenceProperty PROC STDCALL mov r10 , rcx mov eax , 4419 ;syscall db 0Fh , 05h ret NtDCompositionSetResourceReferenceProperty ENDP ; ULONG64 __stdcall NtDCompositionSignalGpuFence( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtDCompositionSignalGpuFence PROC STDCALL mov r10 , rcx mov eax , 4420 ;syscall db 0Fh , 05h ret NtDCompositionSignalGpuFence ENDP ; ULONG64 __stdcall NtDCompositionSubmitDWMBatch( ULONG64 arg_01 , ULONG64 arg_02 ); NtDCompositionSubmitDWMBatch PROC STDCALL mov r10 , rcx mov eax , 4421 ;syscall db 0Fh , 05h ret NtDCompositionSubmitDWMBatch ENDP ; ULONG64 __stdcall NtDCompositionSynchronize( ULONG64 arg_01 , ULONG64 arg_02 ); NtDCompositionSynchronize PROC STDCALL mov r10 , rcx mov eax , 4422 ;syscall db 0Fh , 05h ret NtDCompositionSynchronize ENDP ; ULONG64 __stdcall NtDCompositionTelemetryTouchInteractionBegin( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtDCompositionTelemetryTouchInteractionBegin PROC STDCALL mov r10 , rcx mov eax , 4423 ;syscall db 0Fh , 05h ret NtDCompositionTelemetryTouchInteractionBegin ENDP ; ULONG64 __stdcall NtDCompositionTelemetryTouchInteractionEnd( ULONG64 arg_01 , ULONG64 arg_02 ); NtDCompositionTelemetryTouchInteractionEnd PROC STDCALL mov r10 , rcx mov eax , 4424 ;syscall db 0Fh , 05h ret NtDCompositionTelemetryTouchInteractionEnd ENDP ; ULONG64 __stdcall NtDCompositionTelemetryTouchInteractionUpdate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtDCompositionTelemetryTouchInteractionUpdate PROC STDCALL mov r10 , rcx mov eax , 4425 ;syscall db 0Fh , 05h ret NtDCompositionTelemetryTouchInteractionUpdate ENDP ; ULONG64 __stdcall NtDCompositionValidateAndReferenceSystemVisualForHwndTarget( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtDCompositionValidateAndReferenceSystemVisualForHwndTarget PROC STDCALL mov r10 , rcx mov eax , 4426 ;syscall db 0Fh , 05h ret NtDCompositionValidateAndReferenceSystemVisualForHwndTarget ENDP ; ULONG64 __stdcall NtDCompositionWaitForChannel( ULONG64 arg_01 ); NtDCompositionWaitForChannel PROC STDCALL mov r10 , rcx mov eax , 4427 ;syscall db 0Fh , 05h ret NtDCompositionWaitForChannel ENDP ; ULONG64 __stdcall NtGdiAbortDoc( ULONG64 arg_01 ); NtGdiAbortDoc PROC STDCALL mov r10 , rcx mov eax , 4428 ;syscall db 0Fh , 05h ret NtGdiAbortDoc ENDP ; ULONG64 __stdcall NtGdiAbortPath( ULONG64 arg_01 ); NtGdiAbortPath PROC STDCALL mov r10 , rcx mov eax , 4429 ;syscall db 0Fh , 05h ret NtGdiAbortPath ENDP ; ULONG64 __stdcall NtGdiAddEmbFontToDC( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiAddEmbFontToDC PROC STDCALL mov r10 , rcx mov eax , 4430 ;syscall db 0Fh , 05h ret NtGdiAddEmbFontToDC ENDP ; ULONG64 __stdcall NtGdiAddFontResourceW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiAddFontResourceW PROC STDCALL mov r10 , rcx mov eax , 4431 ;syscall db 0Fh , 05h ret NtGdiAddFontResourceW ENDP ; ULONG64 __stdcall NtGdiAddRemoteFontToDC( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiAddRemoteFontToDC PROC STDCALL mov r10 , rcx mov eax , 4432 ;syscall db 0Fh , 05h ret NtGdiAddRemoteFontToDC ENDP ; ULONG64 __stdcall NtGdiAddRemoteMMInstanceToDC( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiAddRemoteMMInstanceToDC PROC STDCALL mov r10 , rcx mov eax , 4433 ;syscall db 0Fh , 05h ret NtGdiAddRemoteMMInstanceToDC ENDP ; ULONG64 __stdcall NtGdiAngleArc( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiAngleArc PROC STDCALL mov r10 , rcx mov eax , 4434 ;syscall db 0Fh , 05h ret NtGdiAngleArc ENDP ; ULONG64 __stdcall NtGdiAnyLinkedFonts( ); NtGdiAnyLinkedFonts PROC STDCALL mov r10 , rcx mov eax , 4435 ;syscall db 0Fh , 05h ret NtGdiAnyLinkedFonts ENDP ; ULONG64 __stdcall NtGdiArcInternal( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 ); NtGdiArcInternal PROC STDCALL mov r10 , rcx mov eax , 4436 ;syscall db 0Fh , 05h ret NtGdiArcInternal ENDP ; ULONG64 __stdcall NtGdiBRUSHOBJ_DeleteRbrush( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiBRUSHOBJ_DeleteRbrush PROC STDCALL mov r10 , rcx mov eax , 4437 ;syscall db 0Fh , 05h ret NtGdiBRUSHOBJ_DeleteRbrush ENDP ; ULONG64 __stdcall NtGdiBRUSHOBJ_hGetColorTransform( ULONG64 arg_01 ); NtGdiBRUSHOBJ_hGetColorTransform PROC STDCALL mov r10 , rcx mov eax , 4438 ;syscall db 0Fh , 05h ret NtGdiBRUSHOBJ_hGetColorTransform ENDP ; ULONG64 __stdcall NtGdiBRUSHOBJ_pvAllocRbrush( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiBRUSHOBJ_pvAllocRbrush PROC STDCALL mov r10 , rcx mov eax , 4439 ;syscall db 0Fh , 05h ret NtGdiBRUSHOBJ_pvAllocRbrush ENDP ; ULONG64 __stdcall NtGdiBRUSHOBJ_pvGetRbrush( ULONG64 arg_01 ); NtGdiBRUSHOBJ_pvGetRbrush PROC STDCALL mov r10 , rcx mov eax , 4440 ;syscall db 0Fh , 05h ret NtGdiBRUSHOBJ_pvGetRbrush ENDP ; ULONG64 __stdcall NtGdiBRUSHOBJ_ulGetBrushColor( ULONG64 arg_01 ); NtGdiBRUSHOBJ_ulGetBrushColor PROC STDCALL mov r10 , rcx mov eax , 4441 ;syscall db 0Fh , 05h ret NtGdiBRUSHOBJ_ulGetBrushColor ENDP ; ULONG64 __stdcall NtGdiBeginGdiRendering( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiBeginGdiRendering PROC STDCALL mov r10 , rcx mov eax , 4442 ;syscall db 0Fh , 05h ret NtGdiBeginGdiRendering ENDP ; ULONG64 __stdcall NtGdiCLIPOBJ_bEnum( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiCLIPOBJ_bEnum PROC STDCALL mov r10 , rcx mov eax , 4443 ;syscall db 0Fh , 05h ret NtGdiCLIPOBJ_bEnum ENDP ; ULONG64 __stdcall NtGdiCLIPOBJ_cEnumStart( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiCLIPOBJ_cEnumStart PROC STDCALL mov r10 , rcx mov eax , 4444 ;syscall db 0Fh , 05h ret NtGdiCLIPOBJ_cEnumStart ENDP ; ULONG64 __stdcall NtGdiCLIPOBJ_ppoGetPath( ULONG64 arg_01 ); NtGdiCLIPOBJ_ppoGetPath PROC STDCALL mov r10 , rcx mov eax , 4445 ;syscall db 0Fh , 05h ret NtGdiCLIPOBJ_ppoGetPath ENDP ; ULONG64 __stdcall NtGdiCancelDC( ULONG64 arg_01 ); NtGdiCancelDC PROC STDCALL mov r10 , rcx mov eax , 4446 ;syscall db 0Fh , 05h ret NtGdiCancelDC ENDP ; ULONG64 __stdcall NtGdiChangeGhostFont( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiChangeGhostFont PROC STDCALL mov r10 , rcx mov eax , 4447 ;syscall db 0Fh , 05h ret NtGdiChangeGhostFont ENDP ; ULONG64 __stdcall NtGdiCheckBitmapBits( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtGdiCheckBitmapBits PROC STDCALL mov r10 , rcx mov eax , 4448 ;syscall db 0Fh , 05h ret NtGdiCheckBitmapBits ENDP ; ULONG64 __stdcall NtGdiClearBitmapAttributes( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiClearBitmapAttributes PROC STDCALL mov r10 , rcx mov eax , 4449 ;syscall db 0Fh , 05h ret NtGdiClearBitmapAttributes ENDP ; ULONG64 __stdcall NtGdiClearBrushAttributes( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiClearBrushAttributes PROC STDCALL mov r10 , rcx mov eax , 4450 ;syscall db 0Fh , 05h ret NtGdiClearBrushAttributes ENDP ; ULONG64 __stdcall NtGdiColorCorrectPalette( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiColorCorrectPalette PROC STDCALL mov r10 , rcx mov eax , 4451 ;syscall db 0Fh , 05h ret NtGdiColorCorrectPalette ENDP ; ULONG64 __stdcall NtGdiConfigureOPMProtectedOutput( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiConfigureOPMProtectedOutput PROC STDCALL mov r10 , rcx mov eax , 4452 ;syscall db 0Fh , 05h ret NtGdiConfigureOPMProtectedOutput ENDP ; ULONG64 __stdcall NtGdiConvertMetafileRect( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiConvertMetafileRect PROC STDCALL mov r10 , rcx mov eax , 4453 ;syscall db 0Fh , 05h ret NtGdiConvertMetafileRect ENDP ; ULONG64 __stdcall NtGdiCreateBitmapFromDxSurface( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiCreateBitmapFromDxSurface PROC STDCALL mov r10 , rcx mov eax , 4454 ;syscall db 0Fh , 05h ret NtGdiCreateBitmapFromDxSurface ENDP ; ULONG64 __stdcall NtGdiCreateBitmapFromDxSurface2( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtGdiCreateBitmapFromDxSurface2 PROC STDCALL mov r10 , rcx mov eax , 4455 ;syscall db 0Fh , 05h ret NtGdiCreateBitmapFromDxSurface2 ENDP ; ULONG64 __stdcall NtGdiCreateColorTransform( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtGdiCreateColorTransform PROC STDCALL mov r10 , rcx mov eax , 4456 ;syscall db 0Fh , 05h ret NtGdiCreateColorTransform ENDP ; ULONG64 __stdcall NtGdiCreateEllipticRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiCreateEllipticRgn PROC STDCALL mov r10 , rcx mov eax , 4457 ;syscall db 0Fh , 05h ret NtGdiCreateEllipticRgn ENDP ; ULONG64 __stdcall NtGdiCreateHatchBrushInternal( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiCreateHatchBrushInternal PROC STDCALL mov r10 , rcx mov eax , 4458 ;syscall db 0Fh , 05h ret NtGdiCreateHatchBrushInternal ENDP ; ULONG64 __stdcall NtGdiCreateMetafileDC( ULONG64 arg_01 ); NtGdiCreateMetafileDC PROC STDCALL mov r10 , rcx mov eax , 4459 ;syscall db 0Fh , 05h ret NtGdiCreateMetafileDC ENDP ; ULONG64 __stdcall NtGdiCreateOPMProtectedOutputs( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiCreateOPMProtectedOutputs PROC STDCALL mov r10 , rcx mov eax , 4460 ;syscall db 0Fh , 05h ret NtGdiCreateOPMProtectedOutputs ENDP ; ULONG64 __stdcall NtGdiCreateRoundRectRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiCreateRoundRectRgn PROC STDCALL mov r10 , rcx mov eax , 4461 ;syscall db 0Fh , 05h ret NtGdiCreateRoundRectRgn ENDP ; ULONG64 __stdcall NtGdiCreateServerMetaFile( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiCreateServerMetaFile PROC STDCALL mov r10 , rcx mov eax , 4462 ;syscall db 0Fh , 05h ret NtGdiCreateServerMetaFile ENDP ; ULONG64 __stdcall NtGdiCreateSessionMappedDIBSection( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtGdiCreateSessionMappedDIBSection PROC STDCALL mov r10 , rcx mov eax , 4463 ;syscall db 0Fh , 05h ret NtGdiCreateSessionMappedDIBSection ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4464 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4465 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4466 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4467 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiDDCCIGetCapabilitiesString( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiDDCCIGetCapabilitiesString PROC STDCALL mov r10 , rcx mov eax , 4468 ;syscall db 0Fh , 05h ret NtGdiDDCCIGetCapabilitiesString ENDP ; ULONG64 __stdcall NtGdiDDCCIGetCapabilitiesStringLength( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiDDCCIGetCapabilitiesStringLength PROC STDCALL mov r10 , rcx mov eax , 4469 ;syscall db 0Fh , 05h ret NtGdiDDCCIGetCapabilitiesStringLength ENDP ; ULONG64 __stdcall NtGdiDDCCIGetTimingReport( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiDDCCIGetTimingReport PROC STDCALL mov r10 , rcx mov eax , 4470 ;syscall db 0Fh , 05h ret NtGdiDDCCIGetTimingReport ENDP ; ULONG64 __stdcall NtGdiDDCCIGetVCPFeature( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiDDCCIGetVCPFeature PROC STDCALL mov r10 , rcx mov eax , 4471 ;syscall db 0Fh , 05h ret NtGdiDDCCIGetVCPFeature ENDP ; ULONG64 __stdcall NtGdiDDCCISaveCurrentSettings( ULONG64 arg_01 ); NtGdiDDCCISaveCurrentSettings PROC STDCALL mov r10 , rcx mov eax , 4472 ;syscall db 0Fh , 05h ret NtGdiDDCCISaveCurrentSettings ENDP ; ULONG64 __stdcall NtGdiDDCCISetVCPFeature( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiDDCCISetVCPFeature PROC STDCALL mov r10 , rcx mov eax , 4473 ;syscall db 0Fh , 05h ret NtGdiDDCCISetVCPFeature ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4474 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4475 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4476 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4477 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4478 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4479 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4480 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4481 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiDdCreateFullscreenSprite( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiDdCreateFullscreenSprite PROC STDCALL mov r10 , rcx mov eax , 4482 ;syscall db 0Fh , 05h ret NtGdiDdCreateFullscreenSprite ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4483 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiDdDDIAcquireKeyedMutex( ULONG64 arg_01 ); NtGdiDdDDIAcquireKeyedMutex PROC STDCALL mov r10 , rcx mov eax , 4484 ;syscall db 0Fh , 05h ret NtGdiDdDDIAcquireKeyedMutex ENDP ; ULONG64 __stdcall NtGdiDdDDIAcquireKeyedMutex2( ULONG64 arg_01 ); NtGdiDdDDIAcquireKeyedMutex2 PROC STDCALL mov r10 , rcx mov eax , 4485 ;syscall db 0Fh , 05h ret NtGdiDdDDIAcquireKeyedMutex2 ENDP ; ULONG64 __stdcall NtGdiDdDDICheckExclusiveOwnership( ); NtGdiDdDDICheckExclusiveOwnership PROC STDCALL mov r10 , rcx mov eax , 4486 ;syscall db 0Fh , 05h ret NtGdiDdDDICheckExclusiveOwnership ENDP ; ULONG64 __stdcall NtGdiDdDDICheckMonitorPowerState( ULONG64 arg_01 ); NtGdiDdDDICheckMonitorPowerState PROC STDCALL mov r10 , rcx mov eax , 4487 ;syscall db 0Fh , 05h ret NtGdiDdDDICheckMonitorPowerState ENDP ; ULONG64 __stdcall NtGdiDdDDICheckOcclusion( ULONG64 arg_01 ); NtGdiDdDDICheckOcclusion PROC STDCALL mov r10 , rcx mov eax , 4488 ;syscall db 0Fh , 05h ret NtGdiDdDDICheckOcclusion ENDP ; ULONG64 __stdcall NtGdiDdDDICheckSharedResourceAccess( ULONG64 arg_01 ); NtGdiDdDDICheckSharedResourceAccess PROC STDCALL mov r10 , rcx mov eax , 4489 ;syscall db 0Fh , 05h ret NtGdiDdDDICheckSharedResourceAccess ENDP ; ULONG64 __stdcall NtGdiDdDDICheckVidPnExclusiveOwnership( ULONG64 arg_01 ); NtGdiDdDDICheckVidPnExclusiveOwnership PROC STDCALL mov r10 , rcx mov eax , 4490 ;syscall db 0Fh , 05h ret NtGdiDdDDICheckVidPnExclusiveOwnership ENDP ; ULONG64 __stdcall NtGdiDdDDICloseAdapter( ULONG64 arg_01 ); NtGdiDdDDICloseAdapter PROC STDCALL mov r10 , rcx mov eax , 4491 ;syscall db 0Fh , 05h ret NtGdiDdDDICloseAdapter ENDP ; ULONG64 __stdcall NtGdiDdDDIConfigureSharedResource( ULONG64 arg_01 ); NtGdiDdDDIConfigureSharedResource PROC STDCALL mov r10 , rcx mov eax , 4492 ;syscall db 0Fh , 05h ret NtGdiDdDDIConfigureSharedResource ENDP ; ULONG64 __stdcall NtGdiDdDDICreateAllocation( ULONG64 arg_01 ); NtGdiDdDDICreateAllocation PROC STDCALL mov r10 , rcx mov eax , 4493 ;syscall db 0Fh , 05h ret NtGdiDdDDICreateAllocation ENDP ; ULONG64 __stdcall NtGdiDdDDICreateContext( ULONG64 arg_01 ); NtGdiDdDDICreateContext PROC STDCALL mov r10 , rcx mov eax , 4494 ;syscall db 0Fh , 05h ret NtGdiDdDDICreateContext ENDP ; ULONG64 __stdcall NtGdiDdDDICreateDCFromMemory( ULONG64 arg_01 ); NtGdiDdDDICreateDCFromMemory PROC STDCALL mov r10 , rcx mov eax , 4495 ;syscall db 0Fh , 05h ret NtGdiDdDDICreateDCFromMemory ENDP ; ULONG64 __stdcall NtGdiDdDDICreateDevice( ULONG64 arg_01 ); NtGdiDdDDICreateDevice PROC STDCALL mov r10 , rcx mov eax , 4496 ;syscall db 0Fh , 05h ret NtGdiDdDDICreateDevice ENDP ; ULONG64 __stdcall NtGdiDdDDICreateKeyedMutex( ULONG64 arg_01 ); NtGdiDdDDICreateKeyedMutex PROC STDCALL mov r10 , rcx mov eax , 4497 ;syscall db 0Fh , 05h ret NtGdiDdDDICreateKeyedMutex ENDP ; ULONG64 __stdcall NtGdiDdDDICreateKeyedMutex2( ULONG64 arg_01 ); NtGdiDdDDICreateKeyedMutex2 PROC STDCALL mov r10 , rcx mov eax , 4498 ;syscall db 0Fh , 05h ret NtGdiDdDDICreateKeyedMutex2 ENDP ; ULONG64 __stdcall NtGdiDdDDICreateOutputDupl( ULONG64 arg_01 ); NtGdiDdDDICreateOutputDupl PROC STDCALL mov r10 , rcx mov eax , 4499 ;syscall db 0Fh , 05h ret NtGdiDdDDICreateOutputDupl ENDP ; ULONG64 __stdcall NtGdiDdDDICreateOverlay( ULONG64 arg_01 ); NtGdiDdDDICreateOverlay PROC STDCALL mov r10 , rcx mov eax , 4500 ;syscall db 0Fh , 05h ret NtGdiDdDDICreateOverlay ENDP ; ULONG64 __stdcall NtGdiDdDDICreateSynchronizationObject( ULONG64 arg_01 ); NtGdiDdDDICreateSynchronizationObject PROC STDCALL mov r10 , rcx mov eax , 4501 ;syscall db 0Fh , 05h ret NtGdiDdDDICreateSynchronizationObject ENDP ; ULONG64 __stdcall NtGdiDdDDIDestroyAllocation( ULONG64 arg_01 ); NtGdiDdDDIDestroyAllocation PROC STDCALL mov r10 , rcx mov eax , 4502 ;syscall db 0Fh , 05h ret NtGdiDdDDIDestroyAllocation ENDP ; ULONG64 __stdcall NtGdiDdDDIDestroyContext( ULONG64 arg_01 ); NtGdiDdDDIDestroyContext PROC STDCALL mov r10 , rcx mov eax , 4503 ;syscall db 0Fh , 05h ret NtGdiDdDDIDestroyContext ENDP ; ULONG64 __stdcall NtGdiDdDDIDestroyDCFromMemory( ULONG64 arg_01 ); NtGdiDdDDIDestroyDCFromMemory PROC STDCALL mov r10 , rcx mov eax , 4504 ;syscall db 0Fh , 05h ret NtGdiDdDDIDestroyDCFromMemory ENDP ; ULONG64 __stdcall NtGdiDdDDIDestroyDevice( ULONG64 arg_01 ); NtGdiDdDDIDestroyDevice PROC STDCALL mov r10 , rcx mov eax , 4505 ;syscall db 0Fh , 05h ret NtGdiDdDDIDestroyDevice ENDP ; ULONG64 __stdcall NtGdiDdDDIDestroyKeyedMutex( ULONG64 arg_01 ); NtGdiDdDDIDestroyKeyedMutex PROC STDCALL mov r10 , rcx mov eax , 4506 ;syscall db 0Fh , 05h ret NtGdiDdDDIDestroyKeyedMutex ENDP ; ULONG64 __stdcall NtGdiDdDDIDestroyOutputDupl( ULONG64 arg_01 ); NtGdiDdDDIDestroyOutputDupl PROC STDCALL mov r10 , rcx mov eax , 4507 ;syscall db 0Fh , 05h ret NtGdiDdDDIDestroyOutputDupl ENDP ; ULONG64 __stdcall NtGdiDdDDIDestroyOverlay( ULONG64 arg_01 ); NtGdiDdDDIDestroyOverlay PROC STDCALL mov r10 , rcx mov eax , 4508 ;syscall db 0Fh , 05h ret NtGdiDdDDIDestroyOverlay ENDP ; ULONG64 __stdcall NtGdiDdDDIDestroySynchronizationObject( ULONG64 arg_01 ); NtGdiDdDDIDestroySynchronizationObject PROC STDCALL mov r10 , rcx mov eax , 4509 ;syscall db 0Fh , 05h ret NtGdiDdDDIDestroySynchronizationObject ENDP ; ULONG64 __stdcall NtGdiDdDDIEnumAdapters( ULONG64 arg_01 ); NtGdiDdDDIEnumAdapters PROC STDCALL mov r10 , rcx mov eax , 4510 ;syscall db 0Fh , 05h ret NtGdiDdDDIEnumAdapters ENDP ; ULONG64 __stdcall NtGdiDdDDIEscape( ULONG64 arg_01 ); NtGdiDdDDIEscape PROC STDCALL mov r10 , rcx mov eax , 4511 ;syscall db 0Fh , 05h ret NtGdiDdDDIEscape ENDP ; ULONG64 __stdcall NtGdiDdDDIFlipOverlay( ULONG64 arg_01 ); NtGdiDdDDIFlipOverlay PROC STDCALL mov r10 , rcx mov eax , 4512 ;syscall db 0Fh , 05h ret NtGdiDdDDIFlipOverlay ENDP ; ULONG64 __stdcall NtGdiDdDDIGetContextInProcessSchedulingPriority( ULONG64 arg_01 ); NtGdiDdDDIGetContextInProcessSchedulingPriority PROC STDCALL mov r10 , rcx mov eax , 4513 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetContextInProcessSchedulingPriority ENDP ; ULONG64 __stdcall NtGdiDdDDIGetContextSchedulingPriority( ULONG64 arg_01 ); NtGdiDdDDIGetContextSchedulingPriority PROC STDCALL mov r10 , rcx mov eax , 4514 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetContextSchedulingPriority ENDP ; ULONG64 __stdcall NtGdiDdDDIGetDeviceState( ULONG64 arg_01 ); NtGdiDdDDIGetDeviceState PROC STDCALL mov r10 , rcx mov eax , 4515 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetDeviceState ENDP ; ULONG64 __stdcall NtGdiDdDDIGetDisplayModeList( ULONG64 arg_01 ); NtGdiDdDDIGetDisplayModeList PROC STDCALL mov r10 , rcx mov eax , 4516 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetDisplayModeList ENDP ; ULONG64 __stdcall NtGdiDdDDIGetMultisampleMethodList( ULONG64 arg_01 ); NtGdiDdDDIGetMultisampleMethodList PROC STDCALL mov r10 , rcx mov eax , 4517 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetMultisampleMethodList ENDP ; ULONG64 __stdcall NtGdiDdDDIGetOverlayState( ULONG64 arg_01 ); NtGdiDdDDIGetOverlayState PROC STDCALL mov r10 , rcx mov eax , 4518 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetOverlayState ENDP ; ULONG64 __stdcall NtGdiDdDDIGetPresentHistory( ULONG64 arg_01 ); NtGdiDdDDIGetPresentHistory PROC STDCALL mov r10 , rcx mov eax , 4519 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetPresentHistory ENDP ; ULONG64 __stdcall NtGdiDdDDIGetPresentQueueEvent( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiDdDDIGetPresentQueueEvent PROC STDCALL mov r10 , rcx mov eax , 4520 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetPresentQueueEvent ENDP ; ULONG64 __stdcall NtGdiDdDDIGetProcessSchedulingPriorityClass( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiDdDDIGetProcessSchedulingPriorityClass PROC STDCALL mov r10 , rcx mov eax , 4521 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetProcessSchedulingPriorityClass ENDP ; ULONG64 __stdcall NtGdiDdDDIGetRuntimeData( ULONG64 arg_01 ); NtGdiDdDDIGetRuntimeData PROC STDCALL mov r10 , rcx mov eax , 4522 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetRuntimeData ENDP ; ULONG64 __stdcall NtGdiDdDDIGetScanLine( ULONG64 arg_01 ); NtGdiDdDDIGetScanLine PROC STDCALL mov r10 , rcx mov eax , 4523 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetScanLine ENDP ; ULONG64 __stdcall NtGdiDdDDIGetSharedPrimaryHandle( ULONG64 arg_01 ); NtGdiDdDDIGetSharedPrimaryHandle PROC STDCALL mov r10 , rcx mov eax , 4524 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetSharedPrimaryHandle ENDP ; ULONG64 __stdcall NtGdiDdDDIGetSharedResourceAdapterLuid( ULONG64 arg_01 ); NtGdiDdDDIGetSharedResourceAdapterLuid PROC STDCALL mov r10 , rcx mov eax , 4525 ;syscall db 0Fh , 05h ret NtGdiDdDDIGetSharedResourceAdapterLuid ENDP ; ULONG64 __stdcall NtGdiDdDDIInvalidateActiveVidPn( ULONG64 arg_01 ); NtGdiDdDDIInvalidateActiveVidPn PROC STDCALL mov r10 , rcx mov eax , 4526 ;syscall db 0Fh , 05h ret NtGdiDdDDIInvalidateActiveVidPn ENDP ; ULONG64 __stdcall NtGdiDdDDILock( ULONG64 arg_01 ); NtGdiDdDDILock PROC STDCALL mov r10 , rcx mov eax , 4527 ;syscall db 0Fh , 05h ret NtGdiDdDDILock ENDP ; ULONG64 __stdcall NtGdiDdDDIOfferAllocations( ULONG64 arg_01 ); NtGdiDdDDIOfferAllocations PROC STDCALL mov r10 , rcx mov eax , 4528 ;syscall db 0Fh , 05h ret NtGdiDdDDIOfferAllocations ENDP ; ULONG64 __stdcall NtGdiDdDDIOpenAdapterFromDeviceName( ULONG64 arg_01 ); NtGdiDdDDIOpenAdapterFromDeviceName PROC STDCALL mov r10 , rcx mov eax , 4529 ;syscall db 0Fh , 05h ret NtGdiDdDDIOpenAdapterFromDeviceName ENDP ; ULONG64 __stdcall NtGdiDdDDIOpenAdapterFromHdc( ULONG64 arg_01 ); NtGdiDdDDIOpenAdapterFromHdc PROC STDCALL mov r10 , rcx mov eax , 4530 ;syscall db 0Fh , 05h ret NtGdiDdDDIOpenAdapterFromHdc ENDP ; ULONG64 __stdcall NtGdiDdDDIOpenAdapterFromLuid( ULONG64 arg_01 ); NtGdiDdDDIOpenAdapterFromLuid PROC STDCALL mov r10 , rcx mov eax , 4531 ;syscall db 0Fh , 05h ret NtGdiDdDDIOpenAdapterFromLuid ENDP ; ULONG64 __stdcall NtGdiDdDDIOpenKeyedMutex( ULONG64 arg_01 ); NtGdiDdDDIOpenKeyedMutex PROC STDCALL mov r10 , rcx mov eax , 4532 ;syscall db 0Fh , 05h ret NtGdiDdDDIOpenKeyedMutex ENDP ; ULONG64 __stdcall NtGdiDdDDIOpenKeyedMutex2( ULONG64 arg_01 ); NtGdiDdDDIOpenKeyedMutex2 PROC STDCALL mov r10 , rcx mov eax , 4533 ;syscall db 0Fh , 05h ret NtGdiDdDDIOpenKeyedMutex2 ENDP ; ULONG64 __stdcall NtGdiDdDDIOpenNtHandleFromName( ULONG64 arg_01 ); NtGdiDdDDIOpenNtHandleFromName PROC STDCALL mov r10 , rcx mov eax , 4534 ;syscall db 0Fh , 05h ret NtGdiDdDDIOpenNtHandleFromName ENDP ; ULONG64 __stdcall NtGdiDdDDIOpenResource( ULONG64 arg_01 ); NtGdiDdDDIOpenResource PROC STDCALL mov r10 , rcx mov eax , 4535 ;syscall db 0Fh , 05h ret NtGdiDdDDIOpenResource ENDP ; ULONG64 __stdcall NtGdiDdDDIOpenResourceFromNtHandle( ULONG64 arg_01 ); NtGdiDdDDIOpenResourceFromNtHandle PROC STDCALL mov r10 , rcx mov eax , 4536 ;syscall db 0Fh , 05h ret NtGdiDdDDIOpenResourceFromNtHandle ENDP ; ULONG64 __stdcall NtGdiDdDDIOpenSyncObjectFromNtHandle( ULONG64 arg_01 ); NtGdiDdDDIOpenSyncObjectFromNtHandle PROC STDCALL mov r10 , rcx mov eax , 4537 ;syscall db 0Fh , 05h ret NtGdiDdDDIOpenSyncObjectFromNtHandle ENDP ; ULONG64 __stdcall NtGdiDdDDIOpenSynchronizationObject( ULONG64 arg_01 ); NtGdiDdDDIOpenSynchronizationObject PROC STDCALL mov r10 , rcx mov eax , 4538 ;syscall db 0Fh , 05h ret NtGdiDdDDIOpenSynchronizationObject ENDP ; ULONG64 __stdcall NtGdiDdDDIOutputDuplGetFrameInfo( ULONG64 arg_01 ); NtGdiDdDDIOutputDuplGetFrameInfo PROC STDCALL mov r10 , rcx mov eax , 4539 ;syscall db 0Fh , 05h ret NtGdiDdDDIOutputDuplGetFrameInfo ENDP ; ULONG64 __stdcall NtGdiDdDDIOutputDuplGetMetaData( ULONG64 arg_01 ); NtGdiDdDDIOutputDuplGetMetaData PROC STDCALL mov r10 , rcx mov eax , 4540 ;syscall db 0Fh , 05h ret NtGdiDdDDIOutputDuplGetMetaData ENDP ; ULONG64 __stdcall NtGdiDdDDIOutputDuplGetPointerShapeData( ULONG64 arg_01 ); NtGdiDdDDIOutputDuplGetPointerShapeData PROC STDCALL mov r10 , rcx mov eax , 4541 ;syscall db 0Fh , 05h ret NtGdiDdDDIOutputDuplGetPointerShapeData ENDP ; ULONG64 __stdcall NtGdiDdDDIOutputDuplPresent( ULONG64 arg_01 ); NtGdiDdDDIOutputDuplPresent PROC STDCALL mov r10 , rcx mov eax , 4542 ;syscall db 0Fh , 05h ret NtGdiDdDDIOutputDuplPresent ENDP ; ULONG64 __stdcall NtGdiDdDDIOutputDuplReleaseFrame( ULONG64 arg_01 ); NtGdiDdDDIOutputDuplReleaseFrame PROC STDCALL mov r10 , rcx mov eax , 4543 ;syscall db 0Fh , 05h ret NtGdiDdDDIOutputDuplReleaseFrame ENDP ; ULONG64 __stdcall NtGdiDdDDIPinDirectFlipResources( ULONG64 arg_01 ); NtGdiDdDDIPinDirectFlipResources PROC STDCALL mov r10 , rcx mov eax , 4544 ;syscall db 0Fh , 05h ret NtGdiDdDDIPinDirectFlipResources ENDP ; ULONG64 __stdcall NtGdiDdDDIPollDisplayChildren( ULONG64 arg_01 ); NtGdiDdDDIPollDisplayChildren PROC STDCALL mov r10 , rcx mov eax , 4545 ;syscall db 0Fh , 05h ret NtGdiDdDDIPollDisplayChildren ENDP ; ULONG64 __stdcall NtGdiDdDDIPresent( ULONG64 arg_01 ); NtGdiDdDDIPresent PROC STDCALL mov r10 , rcx mov eax , 4546 ;syscall db 0Fh , 05h ret NtGdiDdDDIPresent ENDP ; ULONG64 __stdcall NtGdiDdDDIQueryAdapterInfo( ULONG64 arg_01 ); NtGdiDdDDIQueryAdapterInfo PROC STDCALL mov r10 , rcx mov eax , 4547 ;syscall db 0Fh , 05h ret NtGdiDdDDIQueryAdapterInfo ENDP ; ULONG64 __stdcall NtGdiDdDDIQueryAllocationResidency( ULONG64 arg_01 ); NtGdiDdDDIQueryAllocationResidency PROC STDCALL mov r10 , rcx mov eax , 4548 ;syscall db 0Fh , 05h ret NtGdiDdDDIQueryAllocationResidency ENDP ; ULONG64 __stdcall NtGdiDdDDIQueryRemoteVidPnSourceFromGdiDisplayName( ULONG64 arg_01 ); NtGdiDdDDIQueryRemoteVidPnSourceFromGdiDisplayName PROC STDCALL mov r10 , rcx mov eax , 4549 ;syscall db 0Fh , 05h ret NtGdiDdDDIQueryRemoteVidPnSourceFromGdiDisplayName ENDP ; ULONG64 __stdcall NtGdiDdDDIQueryResourceInfo( ULONG64 arg_01 ); NtGdiDdDDIQueryResourceInfo PROC STDCALL mov r10 , rcx mov eax , 4550 ;syscall db 0Fh , 05h ret NtGdiDdDDIQueryResourceInfo ENDP ; ULONG64 __stdcall NtGdiDdDDIQueryResourceInfoFromNtHandle( ULONG64 arg_01 ); NtGdiDdDDIQueryResourceInfoFromNtHandle PROC STDCALL mov r10 , rcx mov eax , 4551 ;syscall db 0Fh , 05h ret NtGdiDdDDIQueryResourceInfoFromNtHandle ENDP ; ULONG64 __stdcall NtGdiDdDDIQueryStatistics( ULONG64 arg_01 ); NtGdiDdDDIQueryStatistics PROC STDCALL mov r10 , rcx mov eax , 4552 ;syscall db 0Fh , 05h ret NtGdiDdDDIQueryStatistics ENDP ; ULONG64 __stdcall NtGdiDdDDIReclaimAllocations( ULONG64 arg_01 ); NtGdiDdDDIReclaimAllocations PROC STDCALL mov r10 , rcx mov eax , 4553 ;syscall db 0Fh , 05h ret NtGdiDdDDIReclaimAllocations ENDP ; ULONG64 __stdcall NtGdiDdDDIReleaseKeyedMutex( ULONG64 arg_01 ); NtGdiDdDDIReleaseKeyedMutex PROC STDCALL mov r10 , rcx mov eax , 4554 ;syscall db 0Fh , 05h ret NtGdiDdDDIReleaseKeyedMutex ENDP ; ULONG64 __stdcall NtGdiDdDDIReleaseKeyedMutex2( ULONG64 arg_01 ); NtGdiDdDDIReleaseKeyedMutex2 PROC STDCALL mov r10 , rcx mov eax , 4555 ;syscall db 0Fh , 05h ret NtGdiDdDDIReleaseKeyedMutex2 ENDP ; ULONG64 __stdcall NtGdiDdDDIReleaseProcessVidPnSourceOwners( ULONG64 arg_01 ); NtGdiDdDDIReleaseProcessVidPnSourceOwners PROC STDCALL mov r10 , rcx mov eax , 4556 ;syscall db 0Fh , 05h ret NtGdiDdDDIReleaseProcessVidPnSourceOwners ENDP ; ULONG64 __stdcall NtGdiDdDDIRender( ULONG64 arg_01 ); NtGdiDdDDIRender PROC STDCALL mov r10 , rcx mov eax , 4557 ;syscall db 0Fh , 05h ret NtGdiDdDDIRender ENDP ; ULONG64 __stdcall NtGdiDdDDISetAllocationPriority( ULONG64 arg_01 ); NtGdiDdDDISetAllocationPriority PROC STDCALL mov r10 , rcx mov eax , 4558 ;syscall db 0Fh , 05h ret NtGdiDdDDISetAllocationPriority ENDP ; ULONG64 __stdcall NtGdiDdDDISetContextInProcessSchedulingPriority( ULONG64 arg_01 ); NtGdiDdDDISetContextInProcessSchedulingPriority PROC STDCALL mov r10 , rcx mov eax , 4559 ;syscall db 0Fh , 05h ret NtGdiDdDDISetContextInProcessSchedulingPriority ENDP ; ULONG64 __stdcall NtGdiDdDDISetContextSchedulingPriority( ULONG64 arg_01 ); NtGdiDdDDISetContextSchedulingPriority PROC STDCALL mov r10 , rcx mov eax , 4560 ;syscall db 0Fh , 05h ret NtGdiDdDDISetContextSchedulingPriority ENDP ; ULONG64 __stdcall NtGdiDdDDISetDisplayMode( ULONG64 arg_01 ); NtGdiDdDDISetDisplayMode PROC STDCALL mov r10 , rcx mov eax , 4561 ;syscall db 0Fh , 05h ret NtGdiDdDDISetDisplayMode ENDP ; ULONG64 __stdcall NtGdiDdDDISetDisplayPrivateDriverFormat( ULONG64 arg_01 ); NtGdiDdDDISetDisplayPrivateDriverFormat PROC STDCALL mov r10 , rcx mov eax , 4562 ;syscall db 0Fh , 05h ret NtGdiDdDDISetDisplayPrivateDriverFormat ENDP ; ULONG64 __stdcall NtGdiDdDDISetGammaRamp( ULONG64 arg_01 ); NtGdiDdDDISetGammaRamp PROC STDCALL mov r10 , rcx mov eax , 4563 ;syscall db 0Fh , 05h ret NtGdiDdDDISetGammaRamp ENDP ; ULONG64 __stdcall NtGdiDdDDISetProcessSchedulingPriorityClass( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiDdDDISetProcessSchedulingPriorityClass PROC STDCALL mov r10 , rcx mov eax , 4564 ;syscall db 0Fh , 05h ret NtGdiDdDDISetProcessSchedulingPriorityClass ENDP ; ULONG64 __stdcall NtGdiDdDDISetQueuedLimit( ULONG64 arg_01 ); NtGdiDdDDISetQueuedLimit PROC STDCALL mov r10 , rcx mov eax , 4565 ;syscall db 0Fh , 05h ret NtGdiDdDDISetQueuedLimit ENDP ; ULONG64 __stdcall NtGdiDdDDISetStereoEnabled( ULONG64 arg_01 ); NtGdiDdDDISetStereoEnabled PROC STDCALL mov r10 , rcx mov eax , 4566 ;syscall db 0Fh , 05h ret NtGdiDdDDISetStereoEnabled ENDP ; ULONG64 __stdcall NtGdiDdDDISetVidPnSourceOwner( ULONG64 arg_01 ); NtGdiDdDDISetVidPnSourceOwner PROC STDCALL mov r10 , rcx mov eax , 4567 ;syscall db 0Fh , 05h ret NtGdiDdDDISetVidPnSourceOwner ENDP ; ULONG64 __stdcall NtGdiDdDDISetVidPnSourceOwner1( ULONG64 arg_01 ); NtGdiDdDDISetVidPnSourceOwner1 PROC STDCALL mov r10 , rcx mov eax , 4568 ;syscall db 0Fh , 05h ret NtGdiDdDDISetVidPnSourceOwner1 ENDP ; ULONG64 __stdcall NtGdiDdDDIShareObjects( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiDdDDIShareObjects PROC STDCALL mov r10 , rcx mov eax , 4569 ;syscall db 0Fh , 05h ret NtGdiDdDDIShareObjects ENDP ; ULONG64 __stdcall NtGdiDdDDISharedPrimaryLockNotification( ULONG64 arg_01 ); NtGdiDdDDISharedPrimaryLockNotification PROC STDCALL mov r10 , rcx mov eax , 4570 ;syscall db 0Fh , 05h ret NtGdiDdDDISharedPrimaryLockNotification ENDP ; ULONG64 __stdcall NtGdiDdDDISharedPrimaryUnLockNotification( ULONG64 arg_01 ); NtGdiDdDDISharedPrimaryUnLockNotification PROC STDCALL mov r10 , rcx mov eax , 4571 ;syscall db 0Fh , 05h ret NtGdiDdDDISharedPrimaryUnLockNotification ENDP ; ULONG64 __stdcall NtGdiDdDDISignalSynchronizationObject( ULONG64 arg_01 ); NtGdiDdDDISignalSynchronizationObject PROC STDCALL mov r10 , rcx mov eax , 4572 ;syscall db 0Fh , 05h ret NtGdiDdDDISignalSynchronizationObject ENDP ; ULONG64 __stdcall NtGdiDdDDIUnlock( ULONG64 arg_01 ); NtGdiDdDDIUnlock PROC STDCALL mov r10 , rcx mov eax , 4573 ;syscall db 0Fh , 05h ret NtGdiDdDDIUnlock ENDP ; ULONG64 __stdcall NtGdiDdDDIUnpinDirectFlipResources( ULONG64 arg_01 ); NtGdiDdDDIUnpinDirectFlipResources PROC STDCALL mov r10 , rcx mov eax , 4574 ;syscall db 0Fh , 05h ret NtGdiDdDDIUnpinDirectFlipResources ENDP ; ULONG64 __stdcall NtGdiDdDDIUpdateOverlay( ULONG64 arg_01 ); NtGdiDdDDIUpdateOverlay PROC STDCALL mov r10 , rcx mov eax , 4575 ;syscall db 0Fh , 05h ret NtGdiDdDDIUpdateOverlay ENDP ; ULONG64 __stdcall NtGdiDdDDIWaitForIdle( ULONG64 arg_01 ); NtGdiDdDDIWaitForIdle PROC STDCALL mov r10 , rcx mov eax , 4576 ;syscall db 0Fh , 05h ret NtGdiDdDDIWaitForIdle ENDP ; ULONG64 __stdcall NtGdiDdDDIWaitForSynchronizationObject( ULONG64 arg_01 ); NtGdiDdDDIWaitForSynchronizationObject PROC STDCALL mov r10 , rcx mov eax , 4577 ;syscall db 0Fh , 05h ret NtGdiDdDDIWaitForSynchronizationObject ENDP ; ULONG64 __stdcall NtGdiDdDDIWaitForVerticalBlankEvent( ULONG64 arg_01 ); NtGdiDdDDIWaitForVerticalBlankEvent PROC STDCALL mov r10 , rcx mov eax , 4578 ;syscall db 0Fh , 05h ret NtGdiDdDDIWaitForVerticalBlankEvent ENDP ; ULONG64 __stdcall NtGdiDdDDIWaitForVerticalBlankEvent2( ULONG64 arg_01 ); NtGdiDdDDIWaitForVerticalBlankEvent2 PROC STDCALL mov r10 , rcx mov eax , 4579 ;syscall db 0Fh , 05h ret NtGdiDdDDIWaitForVerticalBlankEvent2 ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4580 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4581 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiDdDestroyFullscreenSprite( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiDdDestroyFullscreenSprite PROC STDCALL mov r10 , rcx mov eax , 4582 ;syscall db 0Fh , 05h ret NtGdiDdDestroyFullscreenSprite ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4583 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4584 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4585 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4586 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4587 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4588 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4589 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4590 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4591 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4592 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4593 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4594 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4595 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4596 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4597 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4598 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4599 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiDdNotifyFullscreenSpriteUpdate( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiDdNotifyFullscreenSpriteUpdate PROC STDCALL mov r10 , rcx mov eax , 4600 ;syscall db 0Fh , 05h ret NtGdiDdNotifyFullscreenSpriteUpdate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4601 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4602 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiDdQueryVisRgnUniqueness( ); NtGdiDdQueryVisRgnUniqueness PROC STDCALL mov r10 , rcx mov eax , 4603 ;syscall db 0Fh , 05h ret NtGdiDdQueryVisRgnUniqueness ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4604 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4605 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4606 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4607 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4608 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4609 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4610 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiDdUnattachSurface( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiDdUnattachSurface PROC STDCALL mov r10 , rcx mov eax , 4611 ;syscall db 0Fh , 05h ret NtGdiDdUnattachSurface ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4612 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4613 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4614 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiDeleteColorTransform( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiDeleteColorTransform PROC STDCALL mov r10 , rcx mov eax , 4615 ;syscall db 0Fh , 05h ret NtGdiDeleteColorTransform ENDP ; ULONG64 __stdcall NtGdiDescribePixelFormat( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiDescribePixelFormat PROC STDCALL mov r10 , rcx mov eax , 4616 ;syscall db 0Fh , 05h ret NtGdiDescribePixelFormat ENDP ; ULONG64 __stdcall NtGdiDestroyOPMProtectedOutput( ULONG64 arg_01 ); NtGdiDestroyOPMProtectedOutput PROC STDCALL mov r10 , rcx mov eax , 4617 ;syscall db 0Fh , 05h ret NtGdiDestroyOPMProtectedOutput ENDP ; ULONG64 __stdcall _DestroyPhysicalMonitor_CMonitorAPI__QEAAJPEAX_Z( ULONG64 arg_01 , ULONG64 arg_02 ); _DestroyPhysicalMonitor_CMonitorAPI__QEAAJPEAX_Z PROC STDCALL mov r10 , rcx mov eax , 4618 ;syscall db 0Fh , 05h ret _DestroyPhysicalMonitor_CMonitorAPI__QEAAJPEAX_Z ENDP ; ULONG64 __stdcall NtGdiDoBanding( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiDoBanding PROC STDCALL mov r10 , rcx mov eax , 4619 ;syscall db 0Fh , 05h ret NtGdiDoBanding ENDP ; ULONG64 __stdcall NtGdiDrawEscape( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiDrawEscape PROC STDCALL mov r10 , rcx mov eax , 4620 ;syscall db 0Fh , 05h ret NtGdiDrawEscape ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4621 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4622 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4623 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4624 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4625 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4626 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4627 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4628 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4629 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4630 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4631 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4632 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4633 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4634 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4635 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4636 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4637 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiDwmCreatedBitmapRemotingOutput( ); NtGdiDwmCreatedBitmapRemotingOutput PROC STDCALL mov r10 , rcx mov eax , 4638 ;syscall db 0Fh , 05h ret NtGdiDwmCreatedBitmapRemotingOutput ENDP ; ULONG64 __stdcall NtGdiD3dContextCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiD3dContextCreate PROC STDCALL mov r10 , rcx mov eax , 4639 ;syscall db 0Fh , 05h ret NtGdiD3dContextCreate ENDP ; ULONG64 __stdcall NtGdiEllipse( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiEllipse PROC STDCALL mov r10 , rcx mov eax , 4640 ;syscall db 0Fh , 05h ret NtGdiEllipse ENDP ; ULONG64 __stdcall NtGdiEnableEudc( ULONG64 arg_01 ); NtGdiEnableEudc PROC STDCALL mov r10 , rcx mov eax , 4641 ;syscall db 0Fh , 05h ret NtGdiEnableEudc ENDP ; ULONG64 __stdcall NtGdiEndDoc( ULONG64 arg_01 ); NtGdiEndDoc PROC STDCALL mov r10 , rcx mov eax , 4642 ;syscall db 0Fh , 05h ret NtGdiEndDoc ENDP ; ULONG64 __stdcall NtGdiEndGdiRendering( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiEndGdiRendering PROC STDCALL mov r10 , rcx mov eax , 4643 ;syscall db 0Fh , 05h ret NtGdiEndGdiRendering ENDP ; ULONG64 __stdcall NtGdiEndPage( ULONG64 arg_01 ); NtGdiEndPage PROC STDCALL mov r10 , rcx mov eax , 4644 ;syscall db 0Fh , 05h ret NtGdiEndPage ENDP ; ULONG64 __stdcall NtGdiEngAlphaBlend( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtGdiEngAlphaBlend PROC STDCALL mov r10 , rcx mov eax , 4645 ;syscall db 0Fh , 05h ret NtGdiEngAlphaBlend ENDP ; ULONG64 __stdcall NtGdiEngAssociateSurface( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiEngAssociateSurface PROC STDCALL mov r10 , rcx mov eax , 4646 ;syscall db 0Fh , 05h ret NtGdiEngAssociateSurface ENDP ; ULONG64 __stdcall NtGdiEngBitBlt( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 ); NtGdiEngBitBlt PROC STDCALL mov r10 , rcx mov eax , 4647 ;syscall db 0Fh , 05h ret NtGdiEngBitBlt ENDP ; ULONG64 __stdcall NtGdiEngCheckAbort( ULONG64 arg_01 ); NtGdiEngCheckAbort PROC STDCALL mov r10 , rcx mov eax , 4648 ;syscall db 0Fh , 05h ret NtGdiEngCheckAbort ENDP ; ULONG64 __stdcall NtGdiEngComputeGlyphSet( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiEngComputeGlyphSet PROC STDCALL mov r10 , rcx mov eax , 4649 ;syscall db 0Fh , 05h ret NtGdiEngComputeGlyphSet ENDP ; ULONG64 __stdcall NtGdiEngCopyBits( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiEngCopyBits PROC STDCALL mov r10 , rcx mov eax , 4650 ;syscall db 0Fh , 05h ret NtGdiEngCopyBits ENDP ; ULONG64 __stdcall NtGdiEngCreateBitmap( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiEngCreateBitmap PROC STDCALL mov r10 , rcx mov eax , 4651 ;syscall db 0Fh , 05h ret NtGdiEngCreateBitmap ENDP ; ULONG64 __stdcall NtGdiEngCreateClip( ); NtGdiEngCreateClip PROC STDCALL mov r10 , rcx mov eax , 4652 ;syscall db 0Fh , 05h ret NtGdiEngCreateClip ENDP ; ULONG64 __stdcall NtGdiEngCreateDeviceBitmap( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiEngCreateDeviceBitmap PROC STDCALL mov r10 , rcx mov eax , 4653 ;syscall db 0Fh , 05h ret NtGdiEngCreateDeviceBitmap ENDP ; ULONG64 __stdcall NtGdiEngCreateDeviceSurface( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiEngCreateDeviceSurface PROC STDCALL mov r10 , rcx mov eax , 4654 ;syscall db 0Fh , 05h ret NtGdiEngCreateDeviceSurface ENDP ; ULONG64 __stdcall NtGdiEngCreatePalette( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiEngCreatePalette PROC STDCALL mov r10 , rcx mov eax , 4655 ;syscall db 0Fh , 05h ret NtGdiEngCreatePalette ENDP ; ULONG64 __stdcall NtGdiEngDeleteClip( ULONG64 arg_01 ); NtGdiEngDeleteClip PROC STDCALL mov r10 , rcx mov eax , 4656 ;syscall db 0Fh , 05h ret NtGdiEngDeleteClip ENDP ; ULONG64 __stdcall NtGdiEngDeletePalette( ULONG64 arg_01 ); NtGdiEngDeletePalette PROC STDCALL mov r10 , rcx mov eax , 4657 ;syscall db 0Fh , 05h ret NtGdiEngDeletePalette ENDP ; ULONG64 __stdcall NtGdiEngDeletePath( ULONG64 arg_01 ); NtGdiEngDeletePath PROC STDCALL mov r10 , rcx mov eax , 4658 ;syscall db 0Fh , 05h ret NtGdiEngDeletePath ENDP ; ULONG64 __stdcall NtGdiEngDeleteSurface( ULONG64 arg_01 ); NtGdiEngDeleteSurface PROC STDCALL mov r10 , rcx mov eax , 4659 ;syscall db 0Fh , 05h ret NtGdiEngDeleteSurface ENDP ; ULONG64 __stdcall NtGdiEngEraseSurface( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiEngEraseSurface PROC STDCALL mov r10 , rcx mov eax , 4660 ;syscall db 0Fh , 05h ret NtGdiEngEraseSurface ENDP ; ULONG64 __stdcall NtGdiEngFillPath( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtGdiEngFillPath PROC STDCALL mov r10 , rcx mov eax , 4661 ;syscall db 0Fh , 05h ret NtGdiEngFillPath ENDP ; ULONG64 __stdcall NtGdiEngGradientFill( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 ); NtGdiEngGradientFill PROC STDCALL mov r10 , rcx mov eax , 4662 ;syscall db 0Fh , 05h ret NtGdiEngGradientFill ENDP ; ULONG64 __stdcall NtGdiEngLineTo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 ); NtGdiEngLineTo PROC STDCALL mov r10 , rcx mov eax , 4663 ;syscall db 0Fh , 05h ret NtGdiEngLineTo ENDP ; ULONG64 __stdcall NtGdiEngLockSurface( ULONG64 arg_01 ); NtGdiEngLockSurface PROC STDCALL mov r10 , rcx mov eax , 4664 ;syscall db 0Fh , 05h ret NtGdiEngLockSurface ENDP ; ULONG64 __stdcall NtGdiEngMarkBandingSurface( ULONG64 arg_01 ); NtGdiEngMarkBandingSurface PROC STDCALL mov r10 , rcx mov eax , 4665 ;syscall db 0Fh , 05h ret NtGdiEngMarkBandingSurface ENDP ; ULONG64 __stdcall NtGdiEngPaint( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiEngPaint PROC STDCALL mov r10 , rcx mov eax , 4666 ;syscall db 0Fh , 05h ret NtGdiEngPaint ENDP ; ULONG64 __stdcall NtGdiEngPlgBlt( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 ); NtGdiEngPlgBlt PROC STDCALL mov r10 , rcx mov eax , 4667 ;syscall db 0Fh , 05h ret NtGdiEngPlgBlt ENDP ; ULONG64 __stdcall NtGdiEngStretchBlt( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 ); NtGdiEngStretchBlt PROC STDCALL mov r10 , rcx mov eax , 4668 ;syscall db 0Fh , 05h ret NtGdiEngStretchBlt ENDP ; ULONG64 __stdcall NtGdiEngStretchBltROP( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 , ULONG64 arg_12 , ULONG64 arg_13 ); NtGdiEngStretchBltROP PROC STDCALL mov r10 , rcx mov eax , 4669 ;syscall db 0Fh , 05h ret NtGdiEngStretchBltROP ENDP ; ULONG64 __stdcall NtGdiEngStrokeAndFillPath( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 ); NtGdiEngStrokeAndFillPath PROC STDCALL mov r10 , rcx mov eax , 4670 ;syscall db 0Fh , 05h ret NtGdiEngStrokeAndFillPath ENDP ; ULONG64 __stdcall NtGdiEngStrokePath( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtGdiEngStrokePath PROC STDCALL mov r10 , rcx mov eax , 4671 ;syscall db 0Fh , 05h ret NtGdiEngStrokePath ENDP ; ULONG64 __stdcall NtGdiEngTextOut( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 ); NtGdiEngTextOut PROC STDCALL mov r10 , rcx mov eax , 4672 ;syscall db 0Fh , 05h ret NtGdiEngTextOut ENDP ; ULONG64 __stdcall NtGdiEngTransparentBlt( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtGdiEngTransparentBlt PROC STDCALL mov r10 , rcx mov eax , 4673 ;syscall db 0Fh , 05h ret NtGdiEngTransparentBlt ENDP ; ULONG64 __stdcall NtGdiEngUnlockSurface( ULONG64 arg_01 ); NtGdiEngUnlockSurface PROC STDCALL mov r10 , rcx mov eax , 4674 ;syscall db 0Fh , 05h ret NtGdiEngUnlockSurface ENDP ; ULONG64 __stdcall NtGdiEnumFonts( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtGdiEnumFonts PROC STDCALL mov r10 , rcx mov eax , 4675 ;syscall db 0Fh , 05h ret NtGdiEnumFonts ENDP ; ULONG64 __stdcall NtGdiEnumObjects( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiEnumObjects PROC STDCALL mov r10 , rcx mov eax , 4676 ;syscall db 0Fh , 05h ret NtGdiEnumObjects ENDP ; ULONG64 __stdcall NtGdiEudcLoadUnloadLink( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtGdiEudcLoadUnloadLink PROC STDCALL mov r10 , rcx mov eax , 4677 ;syscall db 0Fh , 05h ret NtGdiEudcLoadUnloadLink ENDP ; ULONG64 __stdcall NtGdiExtFloodFill( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiExtFloodFill PROC STDCALL mov r10 , rcx mov eax , 4678 ;syscall db 0Fh , 05h ret NtGdiExtFloodFill ENDP ; ULONG64 __stdcall NtGdiFONTOBJ_cGetAllGlyphHandles( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiFONTOBJ_cGetAllGlyphHandles PROC STDCALL mov r10 , rcx mov eax , 4679 ;syscall db 0Fh , 05h ret NtGdiFONTOBJ_cGetAllGlyphHandles ENDP ; ULONG64 __stdcall NtGdiFONTOBJ_cGetGlyphs( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiFONTOBJ_cGetGlyphs PROC STDCALL mov r10 , rcx mov eax , 4680 ;syscall db 0Fh , 05h ret NtGdiFONTOBJ_cGetGlyphs ENDP ; ULONG64 __stdcall NtGdiFONTOBJ_pQueryGlyphAttrs( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiFONTOBJ_pQueryGlyphAttrs PROC STDCALL mov r10 , rcx mov eax , 4681 ;syscall db 0Fh , 05h ret NtGdiFONTOBJ_pQueryGlyphAttrs ENDP ; ULONG64 __stdcall NtGdiFONTOBJ_pfdg( ULONG64 arg_01 ); NtGdiFONTOBJ_pfdg PROC STDCALL mov r10 , rcx mov eax , 4682 ;syscall db 0Fh , 05h ret NtGdiFONTOBJ_pfdg ENDP ; ULONG64 __stdcall NtGdiFONTOBJ_pifi( ULONG64 arg_01 ); NtGdiFONTOBJ_pifi PROC STDCALL mov r10 , rcx mov eax , 4683 ;syscall db 0Fh , 05h ret NtGdiFONTOBJ_pifi ENDP ; ULONG64 __stdcall NtGdiFONTOBJ_pvTrueTypeFontFile( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiFONTOBJ_pvTrueTypeFontFile PROC STDCALL mov r10 , rcx mov eax , 4684 ;syscall db 0Fh , 05h ret NtGdiFONTOBJ_pvTrueTypeFontFile ENDP ; ULONG64 __stdcall NtGdiFONTOBJ_pxoGetXform( ULONG64 arg_01 ); NtGdiFONTOBJ_pxoGetXform PROC STDCALL mov r10 , rcx mov eax , 4685 ;syscall db 0Fh , 05h ret NtGdiFONTOBJ_pxoGetXform ENDP ; ULONG64 __stdcall NtGdiFONTOBJ_vGetInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiFONTOBJ_vGetInfo PROC STDCALL mov r10 , rcx mov eax , 4686 ;syscall db 0Fh , 05h ret NtGdiFONTOBJ_vGetInfo ENDP ; ULONG64 __stdcall NtGdiFlattenPath( ULONG64 arg_01 ); NtGdiFlattenPath PROC STDCALL mov r10 , rcx mov eax , 4687 ;syscall db 0Fh , 05h ret NtGdiFlattenPath ENDP ; ULONG64 __stdcall NtGdiFontIsLinked( ULONG64 arg_01 ); NtGdiFontIsLinked PROC STDCALL mov r10 , rcx mov eax , 4688 ;syscall db 0Fh , 05h ret NtGdiFontIsLinked ENDP ; ULONG64 __stdcall NtGdiForceUFIMapping( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiForceUFIMapping PROC STDCALL mov r10 , rcx mov eax , 4689 ;syscall db 0Fh , 05h ret NtGdiForceUFIMapping ENDP ; ULONG64 __stdcall NtGdiFrameRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiFrameRgn PROC STDCALL mov r10 , rcx mov eax , 4690 ;syscall db 0Fh , 05h ret NtGdiFrameRgn ENDP ; ULONG64 __stdcall NtGdiFullscreenControl( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiFullscreenControl PROC STDCALL mov r10 , rcx mov eax , 4691 ;syscall db 0Fh , 05h ret NtGdiFullscreenControl ENDP ; ULONG64 __stdcall NtGdiGetBoundsRect( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetBoundsRect PROC STDCALL mov r10 , rcx mov eax , 4692 ;syscall db 0Fh , 05h ret NtGdiGetBoundsRect ENDP ; ULONG64 __stdcall NtGdiGetCOPPCompatibleOPMInformation( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetCOPPCompatibleOPMInformation PROC STDCALL mov r10 , rcx mov eax , 4693 ;syscall db 0Fh , 05h ret NtGdiGetCOPPCompatibleOPMInformation ENDP ; ULONG64 __stdcall NtGdiGetCertificate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiGetCertificate PROC STDCALL mov r10 , rcx mov eax , 4694 ;syscall db 0Fh , 05h ret NtGdiGetCertificate ENDP ; ULONG64 __stdcall NtGdiGetCertificateSize( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetCertificateSize PROC STDCALL mov r10 , rcx mov eax , 4695 ;syscall db 0Fh , 05h ret NtGdiGetCertificateSize ENDP ; ULONG64 __stdcall NtGdiGetCharABCWidthsW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiGetCharABCWidthsW PROC STDCALL mov r10 , rcx mov eax , 4696 ;syscall db 0Fh , 05h ret NtGdiGetCharABCWidthsW ENDP ; ULONG64 __stdcall NtGdiGetCharacterPlacementW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiGetCharacterPlacementW PROC STDCALL mov r10 , rcx mov eax , 4697 ;syscall db 0Fh , 05h ret NtGdiGetCharacterPlacementW ENDP ; ULONG64 __stdcall NtGdiGetColorAdjustment( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetColorAdjustment PROC STDCALL mov r10 , rcx mov eax , 4698 ;syscall db 0Fh , 05h ret NtGdiGetColorAdjustment ENDP ; ULONG64 __stdcall NtGdiGetColorSpaceforBitmap( ULONG64 arg_01 ); NtGdiGetColorSpaceforBitmap PROC STDCALL mov r10 , rcx mov eax , 4699 ;syscall db 0Fh , 05h ret NtGdiGetColorSpaceforBitmap ENDP ; ULONG64 __stdcall NtGdiGetDeviceCaps( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetDeviceCaps PROC STDCALL mov r10 , rcx mov eax , 4700 ;syscall db 0Fh , 05h ret NtGdiGetDeviceCaps ENDP ; ULONG64 __stdcall NtGdiGetDeviceCapsAll( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetDeviceCapsAll PROC STDCALL mov r10 , rcx mov eax , 4701 ;syscall db 0Fh , 05h ret NtGdiGetDeviceCapsAll ENDP ; ULONG64 __stdcall NtGdiGetDeviceGammaRamp( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetDeviceGammaRamp PROC STDCALL mov r10 , rcx mov eax , 4702 ;syscall db 0Fh , 05h ret NtGdiGetDeviceGammaRamp ENDP ; ULONG64 __stdcall NtGdiGetDeviceWidth( ULONG64 arg_01 ); NtGdiGetDeviceWidth PROC STDCALL mov r10 , rcx mov eax , 4703 ;syscall db 0Fh , 05h ret NtGdiGetDeviceWidth ENDP ; ULONG64 __stdcall NtGdiGetDhpdev( ULONG64 arg_01 ); NtGdiGetDhpdev PROC STDCALL mov r10 , rcx mov eax , 4704 ;syscall db 0Fh , 05h ret NtGdiGetDhpdev ENDP ; ULONG64 __stdcall NtGdiGetETM( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetETM PROC STDCALL mov r10 , rcx mov eax , 4705 ;syscall db 0Fh , 05h ret NtGdiGetETM ENDP ; ULONG64 __stdcall NtGdiGetEmbUFI( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtGdiGetEmbUFI PROC STDCALL mov r10 , rcx mov eax , 4706 ;syscall db 0Fh , 05h ret NtGdiGetEmbUFI ENDP ; ULONG64 __stdcall NtGdiGetEmbedFonts( ); NtGdiGetEmbedFonts PROC STDCALL mov r10 , rcx mov eax , 4707 ;syscall db 0Fh , 05h ret NtGdiGetEmbedFonts ENDP ; ULONG64 __stdcall NtGdiGetEudcTimeStampEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetEudcTimeStampEx PROC STDCALL mov r10 , rcx mov eax , 4708 ;syscall db 0Fh , 05h ret NtGdiGetEudcTimeStampEx ENDP ; ULONG64 __stdcall NtGdiGetFontFileData( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiGetFontFileData PROC STDCALL mov r10 , rcx mov eax , 4709 ;syscall db 0Fh , 05h ret NtGdiGetFontFileData ENDP ; ULONG64 __stdcall NtGdiGetFontFileInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiGetFontFileInfo PROC STDCALL mov r10 , rcx mov eax , 4710 ;syscall db 0Fh , 05h ret NtGdiGetFontFileInfo ENDP ; ULONG64 __stdcall NtGdiGetFontResourceInfoInternalW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtGdiGetFontResourceInfoInternalW PROC STDCALL mov r10 , rcx mov eax , 4711 ;syscall db 0Fh , 05h ret NtGdiGetFontResourceInfoInternalW ENDP ; ULONG64 __stdcall NtGdiGetFontUnicodeRanges( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetFontUnicodeRanges PROC STDCALL mov r10 , rcx mov eax , 4712 ;syscall db 0Fh , 05h ret NtGdiGetFontUnicodeRanges ENDP ; ULONG64 __stdcall NtGdiGetGlyphIndicesW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiGetGlyphIndicesW PROC STDCALL mov r10 , rcx mov eax , 4713 ;syscall db 0Fh , 05h ret NtGdiGetGlyphIndicesW ENDP ; ULONG64 __stdcall NtGdiGetGlyphIndicesWInternal( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiGetGlyphIndicesWInternal PROC STDCALL mov r10 , rcx mov eax , 4714 ;syscall db 0Fh , 05h ret NtGdiGetGlyphIndicesWInternal ENDP ; ULONG64 __stdcall NtGdiGetGlyphOutline( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtGdiGetGlyphOutline PROC STDCALL mov r10 , rcx mov eax , 4715 ;syscall db 0Fh , 05h ret NtGdiGetGlyphOutline ENDP ; ULONG64 __stdcall NtGdiGetKerningPairs( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetKerningPairs PROC STDCALL mov r10 , rcx mov eax , 4716 ;syscall db 0Fh , 05h ret NtGdiGetKerningPairs ENDP ; ULONG64 __stdcall NtGdiGetLinkedUFIs( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetLinkedUFIs PROC STDCALL mov r10 , rcx mov eax , 4717 ;syscall db 0Fh , 05h ret NtGdiGetLinkedUFIs ENDP ; ULONG64 __stdcall NtGdiGetMiterLimit( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetMiterLimit PROC STDCALL mov r10 , rcx mov eax , 4718 ;syscall db 0Fh , 05h ret NtGdiGetMiterLimit ENDP ; ULONG64 __stdcall NtGdiGetMonitorID( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetMonitorID PROC STDCALL mov r10 , rcx mov eax , 4719 ;syscall db 0Fh , 05h ret NtGdiGetMonitorID ENDP ; ULONG64 __stdcall NtGdiGetNumberOfPhysicalMonitors( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetNumberOfPhysicalMonitors PROC STDCALL mov r10 , rcx mov eax , 4720 ;syscall db 0Fh , 05h ret NtGdiGetNumberOfPhysicalMonitors ENDP ; ULONG64 __stdcall NtGdiGetOPMInformation( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetOPMInformation PROC STDCALL mov r10 , rcx mov eax , 4721 ;syscall db 0Fh , 05h ret NtGdiGetOPMInformation ENDP ; ULONG64 __stdcall NtGdiGetOPMRandomNumber( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetOPMRandomNumber PROC STDCALL mov r10 , rcx mov eax , 4722 ;syscall db 0Fh , 05h ret NtGdiGetOPMRandomNumber ENDP ; ULONG64 __stdcall NtGdiGetObjectBitmapHandle( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetObjectBitmapHandle PROC STDCALL mov r10 , rcx mov eax , 4723 ;syscall db 0Fh , 05h ret NtGdiGetObjectBitmapHandle ENDP ; ULONG64 __stdcall NtGdiGetPath( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiGetPath PROC STDCALL mov r10 , rcx mov eax , 4724 ;syscall db 0Fh , 05h ret NtGdiGetPath ENDP ; ULONG64 __stdcall NtGdiGetPerBandInfo( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetPerBandInfo PROC STDCALL mov r10 , rcx mov eax , 4725 ;syscall db 0Fh , 05h ret NtGdiGetPerBandInfo ENDP ; ULONG64 __stdcall NtGdiGetPhysicalMonitorDescription( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiGetPhysicalMonitorDescription PROC STDCALL mov r10 , rcx mov eax , 4726 ;syscall db 0Fh , 05h ret NtGdiGetPhysicalMonitorDescription ENDP ; ULONG64 __stdcall NtGdiGetPhysicalMonitors( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiGetPhysicalMonitors PROC STDCALL mov r10 , rcx mov eax , 4727 ;syscall db 0Fh , 05h ret NtGdiGetPhysicalMonitors ENDP ; ULONG64 __stdcall NtGdiGetRealizationInfo( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetRealizationInfo PROC STDCALL mov r10 , rcx mov eax , 4728 ;syscall db 0Fh , 05h ret NtGdiGetRealizationInfo ENDP ; ULONG64 __stdcall NtGdiGetServerMetaFileBits( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtGdiGetServerMetaFileBits PROC STDCALL mov r10 , rcx mov eax , 4729 ;syscall db 0Fh , 05h ret NtGdiGetServerMetaFileBits ENDP ; ULONG64 __stdcall UMPDDrvQuerySpoolType( ); UMPDDrvQuerySpoolType PROC STDCALL mov r10 , rcx mov eax , 4730 ;syscall db 0Fh , 05h ret UMPDDrvQuerySpoolType ENDP ; ULONG64 __stdcall NtGdiGetStats( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiGetStats PROC STDCALL mov r10 , rcx mov eax , 4731 ;syscall db 0Fh , 05h ret NtGdiGetStats ENDP ; ULONG64 __stdcall NtGdiGetStringBitmapW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiGetStringBitmapW PROC STDCALL mov r10 , rcx mov eax , 4732 ;syscall db 0Fh , 05h ret NtGdiGetStringBitmapW ENDP ; ULONG64 __stdcall NtGdiGetSuggestedOPMProtectedOutputArraySize( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiGetSuggestedOPMProtectedOutputArraySize PROC STDCALL mov r10 , rcx mov eax , 4733 ;syscall db 0Fh , 05h ret NtGdiGetSuggestedOPMProtectedOutputArraySize ENDP ; ULONG64 __stdcall NtGdiGetTextExtentExW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtGdiGetTextExtentExW PROC STDCALL mov r10 , rcx mov eax , 4734 ;syscall db 0Fh , 05h ret NtGdiGetTextExtentExW ENDP ; ULONG64 __stdcall NtGdiGetUFI( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiGetUFI PROC STDCALL mov r10 , rcx mov eax , 4735 ;syscall db 0Fh , 05h ret NtGdiGetUFI ENDP ; ULONG64 __stdcall NtGdiGetUFIPathname( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 ); NtGdiGetUFIPathname PROC STDCALL mov r10 , rcx mov eax , 4736 ;syscall db 0Fh , 05h ret NtGdiGetUFIPathname ENDP ; ULONG64 __stdcall NtGdiGradientFill( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiGradientFill PROC STDCALL mov r10 , rcx mov eax , 4737 ;syscall db 0Fh , 05h ret NtGdiGradientFill ENDP ; ULONG64 __stdcall NtGdiHLSurfGetInformation( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiHLSurfGetInformation PROC STDCALL mov r10 , rcx mov eax , 4738 ;syscall db 0Fh , 05h ret NtGdiHLSurfGetInformation ENDP ; ULONG64 __stdcall NtGdiHLSurfSetInformation( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiHLSurfSetInformation PROC STDCALL mov r10 , rcx mov eax , 4739 ;syscall db 0Fh , 05h ret NtGdiHLSurfSetInformation ENDP ; ULONG64 __stdcall NtGdiHT_Get8BPPFormatPalette( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiHT_Get8BPPFormatPalette PROC STDCALL mov r10 , rcx mov eax , 4740 ;syscall db 0Fh , 05h ret NtGdiHT_Get8BPPFormatPalette ENDP ; ULONG64 __stdcall NtGdiHT_Get8BPPMaskPalette( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiHT_Get8BPPMaskPalette PROC STDCALL mov r10 , rcx mov eax , 4741 ;syscall db 0Fh , 05h ret NtGdiHT_Get8BPPMaskPalette ENDP ; ULONG64 __stdcall NtGdiIcmBrushInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtGdiIcmBrushInfo PROC STDCALL mov r10 , rcx mov eax , 4742 ;syscall db 0Fh , 05h ret NtGdiIcmBrushInfo ENDP ; for CDiscardInputQueue::GetType ; ULONG64 __stdcall CDiscardInputQueue__GetType( ); CDiscardInputQueue__GetType PROC STDCALL mov r10 , rcx mov eax , 4743 ;syscall db 0Fh , 05h ret CDiscardInputQueue__GetType ENDP ; ULONG64 __stdcall NtGdiInitSpool( ); NtGdiInitSpool PROC STDCALL mov r10 , rcx mov eax , 4744 ;syscall db 0Fh , 05h ret NtGdiInitSpool ENDP ; ULONG64 __stdcall NtGdiMakeFontDir( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiMakeFontDir PROC STDCALL mov r10 , rcx mov eax , 4745 ;syscall db 0Fh , 05h ret NtGdiMakeFontDir ENDP ; ULONG64 __stdcall NtGdiMakeInfoDC( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiMakeInfoDC PROC STDCALL mov r10 , rcx mov eax , 4746 ;syscall db 0Fh , 05h ret NtGdiMakeInfoDC ENDP ; ULONG64 __stdcall NtGdiMakeObjectUnXferable( ULONG64 arg_01 ); NtGdiMakeObjectUnXferable PROC STDCALL mov r10 , rcx mov eax , 4747 ;syscall db 0Fh , 05h ret NtGdiMakeObjectUnXferable ENDP ; ULONG64 __stdcall NtGdiMakeObjectXferable( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiMakeObjectXferable PROC STDCALL mov r10 , rcx mov eax , 4748 ;syscall db 0Fh , 05h ret NtGdiMakeObjectXferable ENDP ; ULONG64 __stdcall NtGdiMirrorWindowOrg( ULONG64 arg_01 ); NtGdiMirrorWindowOrg PROC STDCALL mov r10 , rcx mov eax , 4749 ;syscall db 0Fh , 05h ret NtGdiMirrorWindowOrg ENDP ; ULONG64 __stdcall NtGdiMonoBitmap( ULONG64 arg_01 ); NtGdiMonoBitmap PROC STDCALL mov r10 , rcx mov eax , 4750 ;syscall db 0Fh , 05h ret NtGdiMonoBitmap ENDP ; ULONG64 __stdcall NtGdiMoveTo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiMoveTo PROC STDCALL mov r10 , rcx mov eax , 4751 ;syscall db 0Fh , 05h ret NtGdiMoveTo ENDP ; ULONG64 __stdcall NtGdiOffsetClipRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiOffsetClipRgn PROC STDCALL mov r10 , rcx mov eax , 4752 ;syscall db 0Fh , 05h ret NtGdiOffsetClipRgn ENDP ; ULONG64 __stdcall NtGdiPATHOBJ_bEnum( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiPATHOBJ_bEnum PROC STDCALL mov r10 , rcx mov eax , 4753 ;syscall db 0Fh , 05h ret NtGdiPATHOBJ_bEnum ENDP ; ULONG64 __stdcall NtGdiPATHOBJ_bEnumClipLines( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiPATHOBJ_bEnumClipLines PROC STDCALL mov r10 , rcx mov eax , 4754 ;syscall db 0Fh , 05h ret NtGdiPATHOBJ_bEnumClipLines ENDP ; ULONG64 __stdcall NtGdiPATHOBJ_vEnumStart( ULONG64 arg_01 ); NtGdiPATHOBJ_vEnumStart PROC STDCALL mov r10 , rcx mov eax , 4755 ;syscall db 0Fh , 05h ret NtGdiPATHOBJ_vEnumStart ENDP ; ULONG64 __stdcall NtGdiPATHOBJ_vEnumStartClipLines( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiPATHOBJ_vEnumStartClipLines PROC STDCALL mov r10 , rcx mov eax , 4756 ;syscall db 0Fh , 05h ret NtGdiPATHOBJ_vEnumStartClipLines ENDP ; ULONG64 __stdcall NtGdiPATHOBJ_vGetBounds( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiPATHOBJ_vGetBounds PROC STDCALL mov r10 , rcx mov eax , 4757 ;syscall db 0Fh , 05h ret NtGdiPATHOBJ_vGetBounds ENDP ; ULONG64 __stdcall NtGdiPathToRegion( ULONG64 arg_01 ); NtGdiPathToRegion PROC STDCALL mov r10 , rcx mov eax , 4758 ;syscall db 0Fh , 05h ret NtGdiPathToRegion ENDP ; ULONG64 __stdcall NtGdiPlgBlt( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 ); NtGdiPlgBlt PROC STDCALL mov r10 , rcx mov eax , 4759 ;syscall db 0Fh , 05h ret NtGdiPlgBlt ENDP ; ULONG64 __stdcall NtGdiPolyDraw( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiPolyDraw PROC STDCALL mov r10 , rcx mov eax , 4760 ;syscall db 0Fh , 05h ret NtGdiPolyDraw ENDP ; ULONG64 __stdcall NtGdiPolyTextOutW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiPolyTextOutW PROC STDCALL mov r10 , rcx mov eax , 4761 ;syscall db 0Fh , 05h ret NtGdiPolyTextOutW ENDP ; ULONG64 __stdcall NtGdiPtInRegion( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiPtInRegion PROC STDCALL mov r10 , rcx mov eax , 4762 ;syscall db 0Fh , 05h ret NtGdiPtInRegion ENDP ; ULONG64 __stdcall NtGdiPtVisible( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiPtVisible PROC STDCALL mov r10 , rcx mov eax , 4763 ;syscall db 0Fh , 05h ret NtGdiPtVisible ENDP ; ULONG64 __stdcall NtGdiQueryFonts( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiQueryFonts PROC STDCALL mov r10 , rcx mov eax , 4764 ;syscall db 0Fh , 05h ret NtGdiQueryFonts ENDP ; ULONG64 __stdcall NtGdiRemoveFontResourceW( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiRemoveFontResourceW PROC STDCALL mov r10 , rcx mov eax , 4765 ;syscall db 0Fh , 05h ret NtGdiRemoveFontResourceW ENDP ; ULONG64 __stdcall NtGdiRemoveMergeFont( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiRemoveMergeFont PROC STDCALL mov r10 , rcx mov eax , 4766 ;syscall db 0Fh , 05h ret NtGdiRemoveMergeFont ENDP ; ULONG64 __stdcall NtGdiResetDC( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiResetDC PROC STDCALL mov r10 , rcx mov eax , 4767 ;syscall db 0Fh , 05h ret NtGdiResetDC ENDP ; ULONG64 __stdcall NtGdiResizePalette( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiResizePalette PROC STDCALL mov r10 , rcx mov eax , 4768 ;syscall db 0Fh , 05h ret NtGdiResizePalette ENDP ; ULONG64 __stdcall NtGdiRoundRect( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtGdiRoundRect PROC STDCALL mov r10 , rcx mov eax , 4769 ;syscall db 0Fh , 05h ret NtGdiRoundRect ENDP ; ULONG64 __stdcall NtGdiSTROBJ_bEnum( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiSTROBJ_bEnum PROC STDCALL mov r10 , rcx mov eax , 4770 ;syscall db 0Fh , 05h ret NtGdiSTROBJ_bEnum ENDP ; ULONG64 __stdcall NtGdiSTROBJ_bEnumPositionsOnly( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiSTROBJ_bEnumPositionsOnly PROC STDCALL mov r10 , rcx mov eax , 4771 ;syscall db 0Fh , 05h ret NtGdiSTROBJ_bEnumPositionsOnly ENDP ; ULONG64 __stdcall NtGdiSTROBJ_bGetAdvanceWidths( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiSTROBJ_bGetAdvanceWidths PROC STDCALL mov r10 , rcx mov eax , 4772 ;syscall db 0Fh , 05h ret NtGdiSTROBJ_bGetAdvanceWidths ENDP ; ULONG64 __stdcall NtGdiSTROBJ_dwGetCodePage( ULONG64 arg_01 ); NtGdiSTROBJ_dwGetCodePage PROC STDCALL mov r10 , rcx mov eax , 4773 ;syscall db 0Fh , 05h ret NtGdiSTROBJ_dwGetCodePage ENDP ; ULONG64 __stdcall NtGdiSTROBJ_vEnumStart( ULONG64 arg_01 ); NtGdiSTROBJ_vEnumStart PROC STDCALL mov r10 , rcx mov eax , 4774 ;syscall db 0Fh , 05h ret NtGdiSTROBJ_vEnumStart ENDP ; ULONG64 __stdcall NtGdiScaleViewportExtEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiScaleViewportExtEx PROC STDCALL mov r10 , rcx mov eax , 4775 ;syscall db 0Fh , 05h ret NtGdiScaleViewportExtEx ENDP ; ULONG64 __stdcall NtGdiScaleWindowExtEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtGdiScaleWindowExtEx PROC STDCALL mov r10 , rcx mov eax , 4776 ;syscall db 0Fh , 05h ret NtGdiScaleWindowExtEx ENDP ; ULONG64 __stdcall NtGdiSelectBrush( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSelectBrush PROC STDCALL mov r10 , rcx mov eax , 4777 ;syscall db 0Fh , 05h ret NtGdiSelectBrush ENDP ; ULONG64 __stdcall NtGdiSelectClipPath( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSelectClipPath PROC STDCALL mov r10 , rcx mov eax , 4778 ;syscall db 0Fh , 05h ret NtGdiSelectClipPath ENDP ; ULONG64 __stdcall NtGdiSelectPen( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSelectPen PROC STDCALL mov r10 , rcx mov eax , 4779 ;syscall db 0Fh , 05h ret NtGdiSelectPen ENDP ; ULONG64 __stdcall NtGdiSetBitmapAttributes( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSetBitmapAttributes PROC STDCALL mov r10 , rcx mov eax , 4780 ;syscall db 0Fh , 05h ret NtGdiSetBitmapAttributes ENDP ; ULONG64 __stdcall NtGdiSetBrushAttributes( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSetBrushAttributes PROC STDCALL mov r10 , rcx mov eax , 4781 ;syscall db 0Fh , 05h ret NtGdiSetBrushAttributes ENDP ; ULONG64 __stdcall NtGdiSetColorAdjustment( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSetColorAdjustment PROC STDCALL mov r10 , rcx mov eax , 4782 ;syscall db 0Fh , 05h ret NtGdiSetColorAdjustment ENDP ; ULONG64 __stdcall NtGdiSetColorSpace( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSetColorSpace PROC STDCALL mov r10 , rcx mov eax , 4783 ;syscall db 0Fh , 05h ret NtGdiSetColorSpace ENDP ; ULONG64 __stdcall NtGdiSetDeviceGammaRamp( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSetDeviceGammaRamp PROC STDCALL mov r10 , rcx mov eax , 4784 ;syscall db 0Fh , 05h ret NtGdiSetDeviceGammaRamp ENDP ; ULONG64 __stdcall NtGdiSetFontXform( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiSetFontXform PROC STDCALL mov r10 , rcx mov eax , 4785 ;syscall db 0Fh , 05h ret NtGdiSetFontXform ENDP ; ULONG64 __stdcall NtGdiSetIcmMode( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiSetIcmMode PROC STDCALL mov r10 , rcx mov eax , 4786 ;syscall db 0Fh , 05h ret NtGdiSetIcmMode ENDP ; ULONG64 __stdcall NtGdiSetLinkedUFIs( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiSetLinkedUFIs PROC STDCALL mov r10 , rcx mov eax , 4787 ;syscall db 0Fh , 05h ret NtGdiSetLinkedUFIs ENDP ; ULONG64 __stdcall NtGdiSetMagicColors( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiSetMagicColors PROC STDCALL mov r10 , rcx mov eax , 4788 ;syscall db 0Fh , 05h ret NtGdiSetMagicColors ENDP ; ULONG64 __stdcall NtGdiSetOPMSigningKeyAndSequenceNumbers( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSetOPMSigningKeyAndSequenceNumbers PROC STDCALL mov r10 , rcx mov eax , 4789 ;syscall db 0Fh , 05h ret NtGdiSetOPMSigningKeyAndSequenceNumbers ENDP ; ULONG64 __stdcall NtGdiSetPUMPDOBJ( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiSetPUMPDOBJ PROC STDCALL mov r10 , rcx mov eax , 4790 ;syscall db 0Fh , 05h ret NtGdiSetPUMPDOBJ ENDP ; ULONG64 __stdcall NtGdiSetPixelFormat( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSetPixelFormat PROC STDCALL mov r10 , rcx mov eax , 4791 ;syscall db 0Fh , 05h ret NtGdiSetPixelFormat ENDP ; ULONG64 __stdcall NtGdiSetRectRgn( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiSetRectRgn PROC STDCALL mov r10 , rcx mov eax , 4792 ;syscall db 0Fh , 05h ret NtGdiSetRectRgn ENDP ; ULONG64 __stdcall NtGdiSetSizeDevice( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiSetSizeDevice PROC STDCALL mov r10 , rcx mov eax , 4793 ;syscall db 0Fh , 05h ret NtGdiSetSizeDevice ENDP ; ULONG64 __stdcall NtGdiSetSystemPaletteUse( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiSetSystemPaletteUse PROC STDCALL mov r10 , rcx mov eax , 4794 ;syscall db 0Fh , 05h ret NtGdiSetSystemPaletteUse ENDP ; ULONG64 __stdcall NtGdiSetTextJustification( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtGdiSetTextJustification PROC STDCALL mov r10 , rcx mov eax , 4795 ;syscall db 0Fh , 05h ret NtGdiSetTextJustification ENDP ; ULONG64 __stdcall NtGdiSetUMPDSandboxState( ULONG64 arg_01 ); NtGdiSetUMPDSandboxState PROC STDCALL mov r10 , rcx mov eax , 4796 ;syscall db 0Fh , 05h ret NtGdiSetUMPDSandboxState ENDP ; ULONG64 __stdcall NtGdiStartDoc( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiStartDoc PROC STDCALL mov r10 , rcx mov eax , 4797 ;syscall db 0Fh , 05h ret NtGdiStartDoc ENDP ; ULONG64 __stdcall NtGdiStartPage( ULONG64 arg_01 ); NtGdiStartPage PROC STDCALL mov r10 , rcx mov eax , 4798 ;syscall db 0Fh , 05h ret NtGdiStartPage ENDP ; ULONG64 __stdcall NtGdiStrokeAndFillPath( ULONG64 arg_01 ); NtGdiStrokeAndFillPath PROC STDCALL mov r10 , rcx mov eax , 4799 ;syscall db 0Fh , 05h ret NtGdiStrokeAndFillPath ENDP ; ULONG64 __stdcall NtGdiStrokePath( ULONG64 arg_01 ); NtGdiStrokePath PROC STDCALL mov r10 , rcx mov eax , 4800 ;syscall db 0Fh , 05h ret NtGdiStrokePath ENDP ; ULONG64 __stdcall NtGdiSwapBuffers( ULONG64 arg_01 ); NtGdiSwapBuffers PROC STDCALL mov r10 , rcx mov eax , 4801 ;syscall db 0Fh , 05h ret NtGdiSwapBuffers ENDP ; ULONG64 __stdcall NtGdiTransparentBlt( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 ); NtGdiTransparentBlt PROC STDCALL mov r10 , rcx mov eax , 4802 ;syscall db 0Fh , 05h ret NtGdiTransparentBlt ENDP ; ULONG64 __stdcall NtGdiUMPDEngFreeUserMem( ULONG64 arg_01 ); NtGdiUMPDEngFreeUserMem PROC STDCALL mov r10 , rcx mov eax , 4803 ;syscall db 0Fh , 05h ret NtGdiUMPDEngFreeUserMem ENDP ; for CCompositionBuffer::GetIndependentFlipState ; ULONG64 __stdcall CCompositionBuffer__GetIndependentFlipState( ); CCompositionBuffer__GetIndependentFlipState PROC STDCALL mov r10 , rcx mov eax , 4804 ;syscall db 0Fh , 05h ret CCompositionBuffer__GetIndependentFlipState ENDP ; for CDiscardInputQueue::GetType ; ULONG64 __stdcall CDiscardInputQueue__GetType( ); CDiscardInputQueue__GetType PROC STDCALL mov r10 , rcx mov eax , 4805 ;syscall db 0Fh , 05h ret CDiscardInputQueue__GetType ENDP ; ULONG64 __stdcall NtGdiUpdateColors( ULONG64 arg_01 ); NtGdiUpdateColors PROC STDCALL mov r10 , rcx mov eax , 4806 ;syscall db 0Fh , 05h ret NtGdiUpdateColors ENDP ; ULONG64 __stdcall NtGdiUpdateTransform( ULONG64 arg_01 ); NtGdiUpdateTransform PROC STDCALL mov r10 , rcx mov eax , 4807 ;syscall db 0Fh , 05h ret NtGdiUpdateTransform ENDP ; ULONG64 __stdcall NtGdiWidenPath( ULONG64 arg_01 ); NtGdiWidenPath PROC STDCALL mov r10 , rcx mov eax , 4808 ;syscall db 0Fh , 05h ret NtGdiWidenPath ENDP ; ULONG64 __stdcall NtGdiXFORMOBJ_bApplyXform( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtGdiXFORMOBJ_bApplyXform PROC STDCALL mov r10 , rcx mov eax , 4809 ;syscall db 0Fh , 05h ret NtGdiXFORMOBJ_bApplyXform ENDP ; ULONG64 __stdcall NtGdiXFORMOBJ_iGetXform( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiXFORMOBJ_iGetXform PROC STDCALL mov r10 , rcx mov eax , 4810 ;syscall db 0Fh , 05h ret NtGdiXFORMOBJ_iGetXform ENDP ; ULONG64 __stdcall NtGdiXLATEOBJ_cGetPalette( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtGdiXLATEOBJ_cGetPalette PROC STDCALL mov r10 , rcx mov eax , 4811 ;syscall db 0Fh , 05h ret NtGdiXLATEOBJ_cGetPalette ENDP ; ULONG64 __stdcall NtGdiXLATEOBJ_hGetColorTransform( ULONG64 arg_01 ); NtGdiXLATEOBJ_hGetColorTransform PROC STDCALL mov r10 , rcx mov eax , 4812 ;syscall db 0Fh , 05h ret NtGdiXLATEOBJ_hGetColorTransform ENDP ; ULONG64 __stdcall NtGdiXLATEOBJ_iXlate( ULONG64 arg_01 , ULONG64 arg_02 ); NtGdiXLATEOBJ_iXlate PROC STDCALL mov r10 , rcx mov eax , 4813 ;syscall db 0Fh , 05h ret NtGdiXLATEOBJ_iXlate ENDP ; ULONG64 __stdcall NtNotifyPresentToCompositionSurface( ); NtNotifyPresentToCompositionSurface PROC STDCALL mov r10 , rcx mov eax , 4814 ;syscall db 0Fh , 05h ret NtNotifyPresentToCompositionSurface ENDP ; ULONG64 __stdcall NtOpenCompositionSurfaceDirtyRegion( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtOpenCompositionSurfaceDirtyRegion PROC STDCALL mov r10 , rcx mov eax , 4815 ;syscall db 0Fh , 05h ret NtOpenCompositionSurfaceDirtyRegion ENDP ; ULONG64 __stdcall NtOpenCompositionSurfaceSectionInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtOpenCompositionSurfaceSectionInfo PROC STDCALL mov r10 , rcx mov eax , 4816 ;syscall db 0Fh , 05h ret NtOpenCompositionSurfaceSectionInfo ENDP ; ULONG64 __stdcall NtOpenCompositionSurfaceSwapChainHandleInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtOpenCompositionSurfaceSwapChainHandleInfo PROC STDCALL mov r10 , rcx mov eax , 4817 ;syscall db 0Fh , 05h ret NtOpenCompositionSurfaceSwapChainHandleInfo ENDP ; ULONG64 __stdcall NtQueryCompositionSurfaceBinding( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtQueryCompositionSurfaceBinding PROC STDCALL mov r10 , rcx mov eax , 4818 ;syscall db 0Fh , 05h ret NtQueryCompositionSurfaceBinding ENDP ; ULONG64 __stdcall NtQueryCompositionSurfaceRenderingRealization( ULONG64 arg_01 , ULONG64 arg_02 ); NtQueryCompositionSurfaceRenderingRealization PROC STDCALL mov r10 , rcx mov eax , 4819 ;syscall db 0Fh , 05h ret NtQueryCompositionSurfaceRenderingRealization ENDP ; ULONG64 __stdcall NtQueryCompositionSurfaceStatistics( ULONG64 arg_01 , ULONG64 arg_02 ); NtQueryCompositionSurfaceStatistics PROC STDCALL mov r10 , rcx mov eax , 4820 ;syscall db 0Fh , 05h ret NtQueryCompositionSurfaceStatistics ENDP ; ULONG64 __stdcall NtSetCompositionSurfaceOutOfFrameDirectFlipNotification( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtSetCompositionSurfaceOutOfFrameDirectFlipNotification PROC STDCALL mov r10 , rcx mov eax , 4821 ;syscall db 0Fh , 05h ret NtSetCompositionSurfaceOutOfFrameDirectFlipNotification ENDP ; ULONG64 __stdcall NtSetCompositionSurfaceStatistics( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtSetCompositionSurfaceStatistics PROC STDCALL mov r10 , rcx mov eax , 4822 ;syscall db 0Fh , 05h ret NtSetCompositionSurfaceStatistics ENDP ; ULONG64 __stdcall NtTokenManagerGetOutOfFrameDirectFlipSurfaceUpdates( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtTokenManagerGetOutOfFrameDirectFlipSurfaceUpdates PROC STDCALL mov r10 , rcx mov eax , 4823 ;syscall db 0Fh , 05h ret NtTokenManagerGetOutOfFrameDirectFlipSurfaceUpdates ENDP ; ULONG64 __stdcall NtTokenManagerOpenEvent( ULONG64 arg_01 ); NtTokenManagerOpenEvent PROC STDCALL mov r10 , rcx mov eax , 4824 ;syscall db 0Fh , 05h ret NtTokenManagerOpenEvent ENDP ; ULONG64 __stdcall NtTokenManagerThread( ULONG64 arg_01 ); NtTokenManagerThread PROC STDCALL mov r10 , rcx mov eax , 4825 ;syscall db 0Fh , 05h ret NtTokenManagerThread ENDP ; ULONG64 __stdcall NtUnBindCompositionSurface( ULONG64 arg_01 , ULONG64 arg_02 ); NtUnBindCompositionSurface PROC STDCALL mov r10 , rcx mov eax , 4826 ;syscall db 0Fh , 05h ret NtUnBindCompositionSurface ENDP ; ULONG64 __stdcall NtUserAcquireIAMKey( ULONG64 arg_01 ); NtUserAcquireIAMKey PROC STDCALL mov r10 , rcx mov eax , 4827 ;syscall db 0Fh , 05h ret NtUserAcquireIAMKey ENDP ; ULONG64 __stdcall NtUserAddClipboardFormatListener( ULONG64 arg_01 ); NtUserAddClipboardFormatListener PROC STDCALL mov r10 , rcx mov eax , 4828 ;syscall db 0Fh , 05h ret NtUserAddClipboardFormatListener ENDP ; ULONG64 __stdcall NtUserAssociateInputContext( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserAssociateInputContext PROC STDCALL mov r10 , rcx mov eax , 4829 ;syscall db 0Fh , 05h ret NtUserAssociateInputContext ENDP ; ULONG64 __stdcall NtUserAutoPromoteMouseInPointer( ULONG64 arg_01 ); NtUserAutoPromoteMouseInPointer PROC STDCALL mov r10 , rcx mov eax , 4830 ;syscall db 0Fh , 05h ret NtUserAutoPromoteMouseInPointer ENDP ; ULONG64 __stdcall NtUserAutoRotateScreen( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserAutoRotateScreen PROC STDCALL mov r10 , rcx mov eax , 4831 ;syscall db 0Fh , 05h ret NtUserAutoRotateScreen ENDP ; ULONG64 __stdcall NtUserBlockInput( ULONG64 arg_01 ); NtUserBlockInput PROC STDCALL mov r10 , rcx mov eax , 4832 ;syscall db 0Fh , 05h ret NtUserBlockInput ENDP ; ULONG64 __stdcall NtUserBuildHimcList( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserBuildHimcList PROC STDCALL mov r10 , rcx mov eax , 4833 ;syscall db 0Fh , 05h ret NtUserBuildHimcList ENDP ; ULONG64 __stdcall NtUserBuildPropList( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserBuildPropList PROC STDCALL mov r10 , rcx mov eax , 4834 ;syscall db 0Fh , 05h ret NtUserBuildPropList ENDP ; ULONG64 __stdcall NtUserCalculatePopupWindowPosition( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserCalculatePopupWindowPosition PROC STDCALL mov r10 , rcx mov eax , 4835 ;syscall db 0Fh , 05h ret NtUserCalculatePopupWindowPosition ENDP ; ULONG64 __stdcall NtUserCallHwndOpt( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserCallHwndOpt PROC STDCALL mov r10 , rcx mov eax , 4836 ;syscall db 0Fh , 05h ret NtUserCallHwndOpt ENDP ; ULONG64 __stdcall NtUserCanBrokerForceForeground( ULONG64 arg_01 ); NtUserCanBrokerForceForeground PROC STDCALL mov r10 , rcx mov eax , 4837 ;syscall db 0Fh , 05h ret NtUserCanBrokerForceForeground ENDP ; ULONG64 __stdcall NtUserChangeDisplaySettings( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserChangeDisplaySettings PROC STDCALL mov r10 , rcx mov eax , 4838 ;syscall db 0Fh , 05h ret NtUserChangeDisplaySettings ENDP ; ULONG64 __stdcall NtUserChangeWindowMessageFilterEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserChangeWindowMessageFilterEx PROC STDCALL mov r10 , rcx mov eax , 4839 ;syscall db 0Fh , 05h ret NtUserChangeWindowMessageFilterEx ENDP ; ULONG64 __stdcall NtUserCheckAccessForIntegrityLevel( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserCheckAccessForIntegrityLevel PROC STDCALL mov r10 , rcx mov eax , 4840 ;syscall db 0Fh , 05h ret NtUserCheckAccessForIntegrityLevel ENDP ; ULONG64 __stdcall NtUserCheckProcessForClipboardAccess( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserCheckProcessForClipboardAccess PROC STDCALL mov r10 , rcx mov eax , 4841 ;syscall db 0Fh , 05h ret NtUserCheckProcessForClipboardAccess ENDP ; ULONG64 __stdcall NtUserCheckProcessSession( ULONG64 arg_01 ); NtUserCheckProcessSession PROC STDCALL mov r10 , rcx mov eax , 4842 ;syscall db 0Fh , 05h ret NtUserCheckProcessSession ENDP ; ULONG64 __stdcall NtUserCheckWindowThreadDesktop( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserCheckWindowThreadDesktop PROC STDCALL mov r10 , rcx mov eax , 4843 ;syscall db 0Fh , 05h ret NtUserCheckWindowThreadDesktop ENDP ; ULONG64 __stdcall NtUserChildWindowFromPointEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserChildWindowFromPointEx PROC STDCALL mov r10 , rcx mov eax , 4844 ;syscall db 0Fh , 05h ret NtUserChildWindowFromPointEx ENDP ; ULONG64 __stdcall NtUserClipCursor( ULONG64 arg_01 ); NtUserClipCursor PROC STDCALL mov r10 , rcx mov eax , 4845 ;syscall db 0Fh , 05h ret NtUserClipCursor ENDP ; ULONG64 __stdcall NtUserCreateDCompositionHwndTarget( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserCreateDCompositionHwndTarget PROC STDCALL mov r10 , rcx mov eax , 4846 ;syscall db 0Fh , 05h ret NtUserCreateDCompositionHwndTarget ENDP ; ULONG64 __stdcall NtUserCreateDesktopEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserCreateDesktopEx PROC STDCALL mov r10 , rcx mov eax , 4847 ;syscall db 0Fh , 05h ret NtUserCreateDesktopEx ENDP ; ULONG64 __stdcall NtUserCreateInputContext( ULONG64 arg_01 ); NtUserCreateInputContext PROC STDCALL mov r10 , rcx mov eax , 4848 ;syscall db 0Fh , 05h ret NtUserCreateInputContext ENDP ; ULONG64 __stdcall NtUserCreateWindowStation( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtUserCreateWindowStation PROC STDCALL mov r10 , rcx mov eax , 4849 ;syscall db 0Fh , 05h ret NtUserCreateWindowStation ENDP ; ULONG64 __stdcall NtUserCtxDisplayIOCtl( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserCtxDisplayIOCtl PROC STDCALL mov r10 , rcx mov eax , 4850 ;syscall db 0Fh , 05h ret NtUserCtxDisplayIOCtl ENDP ; ULONG64 __stdcall NtUserDeferWindowPosAndBand( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 ); NtUserDeferWindowPosAndBand PROC STDCALL mov r10 , rcx mov eax , 4851 ;syscall db 0Fh , 05h ret NtUserDeferWindowPosAndBand ENDP ; ULONG64 __stdcall NtUserDelegateCapturePointers( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserDelegateCapturePointers PROC STDCALL mov r10 , rcx mov eax , 4852 ;syscall db 0Fh , 05h ret NtUserDelegateCapturePointers ENDP ; ULONG64 __stdcall NtUserDelegateInput( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserDelegateInput PROC STDCALL mov r10 , rcx mov eax , 4853 ;syscall db 0Fh , 05h ret NtUserDelegateInput ENDP ; ULONG64 __stdcall NtUserDestroyDCompositionHwndTarget( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserDestroyDCompositionHwndTarget PROC STDCALL mov r10 , rcx mov eax , 4854 ;syscall db 0Fh , 05h ret NtUserDestroyDCompositionHwndTarget ENDP ; ULONG64 __stdcall NtUserDestroyInputContext( ULONG64 arg_01 ); NtUserDestroyInputContext PROC STDCALL mov r10 , rcx mov eax , 4855 ;syscall db 0Fh , 05h ret NtUserDestroyInputContext ENDP ; ULONG64 __stdcall NtUserDisableImmersiveOwner( ULONG64 arg_01 ); NtUserDisableImmersiveOwner PROC STDCALL mov r10 , rcx mov eax , 4856 ;syscall db 0Fh , 05h ret NtUserDisableImmersiveOwner ENDP ; ULONG64 __stdcall NtUserDisableProcessWindowFiltering( ); NtUserDisableProcessWindowFiltering PROC STDCALL mov r10 , rcx mov eax , 4857 ;syscall db 0Fh , 05h ret NtUserDisableProcessWindowFiltering ENDP ; ULONG64 __stdcall NtUserDisableThreadIme( ULONG64 arg_01 ); NtUserDisableThreadIme PROC STDCALL mov r10 , rcx mov eax , 4858 ;syscall db 0Fh , 05h ret NtUserDisableThreadIme ENDP ; ULONG64 __stdcall NtUserDiscardPointerFrameMessages( ULONG64 arg_01 ); NtUserDiscardPointerFrameMessages PROC STDCALL mov r10 , rcx mov eax , 4859 ;syscall db 0Fh , 05h ret NtUserDiscardPointerFrameMessages ENDP ; ULONG64 __stdcall NtUserDisplayConfigGetDeviceInfo( ULONG64 arg_01 ); NtUserDisplayConfigGetDeviceInfo PROC STDCALL mov r10 , rcx mov eax , 4860 ;syscall db 0Fh , 05h ret NtUserDisplayConfigGetDeviceInfo ENDP ; ULONG64 __stdcall NtUserDisplayConfigSetDeviceInfo( ULONG64 arg_01 ); NtUserDisplayConfigSetDeviceInfo PROC STDCALL mov r10 , rcx mov eax , 4861 ;syscall db 0Fh , 05h ret NtUserDisplayConfigSetDeviceInfo ENDP ; ULONG64 __stdcall NtUserDoSoundConnect( ); NtUserDoSoundConnect PROC STDCALL mov r10 , rcx mov eax , 4862 ;syscall db 0Fh , 05h ret NtUserDoSoundConnect ENDP ; ULONG64 __stdcall NtUserDoSoundDisconnect( ); NtUserDoSoundDisconnect PROC STDCALL mov r10 , rcx mov eax , 4863 ;syscall db 0Fh , 05h ret NtUserDoSoundDisconnect ENDP ; ULONG64 __stdcall NtUserDragDetect( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserDragDetect PROC STDCALL mov r10 , rcx mov eax , 4864 ;syscall db 0Fh , 05h ret NtUserDragDetect ENDP ; ULONG64 __stdcall NtUserDragObject( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserDragObject PROC STDCALL mov r10 , rcx mov eax , 4865 ;syscall db 0Fh , 05h ret NtUserDragObject ENDP ; ULONG64 __stdcall NtUserDrawAnimatedRects( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserDrawAnimatedRects PROC STDCALL mov r10 , rcx mov eax , 4866 ;syscall db 0Fh , 05h ret NtUserDrawAnimatedRects ENDP ; ULONG64 __stdcall NtUserDrawCaption( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserDrawCaption PROC STDCALL mov r10 , rcx mov eax , 4867 ;syscall db 0Fh , 05h ret NtUserDrawCaption ENDP ; ULONG64 __stdcall NtUserDrawCaptionTemp( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 ); NtUserDrawCaptionTemp PROC STDCALL mov r10 , rcx mov eax , 4868 ;syscall db 0Fh , 05h ret NtUserDrawCaptionTemp ENDP ; ULONG64 __stdcall NtUserDrawMenuBarTemp( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserDrawMenuBarTemp PROC STDCALL mov r10 , rcx mov eax , 4869 ;syscall db 0Fh , 05h ret NtUserDrawMenuBarTemp ENDP ; ULONG64 __stdcall NtUserDwmGetRemoteSessionOcclusionEvent( ); NtUserDwmGetRemoteSessionOcclusionEvent PROC STDCALL mov r10 , rcx mov eax , 4870 ;syscall db 0Fh , 05h ret NtUserDwmGetRemoteSessionOcclusionEvent ENDP ; ULONG64 __stdcall NtUserDwmGetRemoteSessionOcclusionState( ); NtUserDwmGetRemoteSessionOcclusionState PROC STDCALL mov r10 , rcx mov eax , 4871 ;syscall db 0Fh , 05h ret NtUserDwmGetRemoteSessionOcclusionState ENDP ; ULONG64 __stdcall NtUserDwmStartRedirection( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserDwmStartRedirection PROC STDCALL mov r10 , rcx mov eax , 4872 ;syscall db 0Fh , 05h ret NtUserDwmStartRedirection ENDP ; ULONG64 __stdcall NtUserDwmStopRedirection( ); NtUserDwmStopRedirection PROC STDCALL mov r10 , rcx mov eax , 4873 ;syscall db 0Fh , 05h ret NtUserDwmStopRedirection ENDP ; ULONG64 __stdcall NtUserDwmValidateWindow( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserDwmValidateWindow PROC STDCALL mov r10 , rcx mov eax , 4874 ;syscall db 0Fh , 05h ret NtUserDwmValidateWindow ENDP ; ULONG64 __stdcall NtUserEnableIAMAccess( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserEnableIAMAccess PROC STDCALL mov r10 , rcx mov eax , 4875 ;syscall db 0Fh , 05h ret NtUserEnableIAMAccess ENDP ; ULONG64 __stdcall NtUserEnableMouseInPointer( ULONG64 arg_01 ); NtUserEnableMouseInPointer PROC STDCALL mov r10 , rcx mov eax , 4876 ;syscall db 0Fh , 05h ret NtUserEnableMouseInPointer ENDP ; ULONG64 __stdcall NtUserEnableMouseInputForCursorSuppression( ULONG64 arg_01 ); NtUserEnableMouseInputForCursorSuppression PROC STDCALL mov r10 , rcx mov eax , 4877 ;syscall db 0Fh , 05h ret NtUserEnableMouseInputForCursorSuppression ENDP ; ULONG64 __stdcall NtUserEndMenu( ); NtUserEndMenu PROC STDCALL mov r10 , rcx mov eax , 4878 ;syscall db 0Fh , 05h ret NtUserEndMenu ENDP ; ULONG64 __stdcall NtUserEvent( ULONG64 arg_01 ); NtUserEvent PROC STDCALL mov r10 , rcx mov eax , 4879 ;syscall db 0Fh , 05h ret NtUserEvent ENDP ; ULONG64 __stdcall NtUserFlashWindowEx( ULONG64 arg_01 ); NtUserFlashWindowEx PROC STDCALL mov r10 , rcx mov eax , 4880 ;syscall db 0Fh , 05h ret NtUserFlashWindowEx ENDP ; ULONG64 __stdcall NtUserFrostCrashedWindow( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserFrostCrashedWindow PROC STDCALL mov r10 , rcx mov eax , 4881 ;syscall db 0Fh , 05h ret NtUserFrostCrashedWindow ENDP ; ULONG64 __stdcall NtUserGetAppImeLevel( ULONG64 arg_01 ); NtUserGetAppImeLevel PROC STDCALL mov r10 , rcx mov eax , 4882 ;syscall db 0Fh , 05h ret NtUserGetAppImeLevel ENDP ; ULONG64 __stdcall NtUserGetAutoRotationState( ULONG64 arg_01 ); NtUserGetAutoRotationState PROC STDCALL mov r10 , rcx mov eax , 4883 ;syscall db 0Fh , 05h ret NtUserGetAutoRotationState ENDP ; ULONG64 __stdcall NtUserGetCIMSSM( ULONG64 arg_01 ); NtUserGetCIMSSM PROC STDCALL mov r10 , rcx mov eax , 4884 ;syscall db 0Fh , 05h ret NtUserGetCIMSSM ENDP ; ULONG64 __stdcall NtUserGetCaretPos( ULONG64 arg_01 ); NtUserGetCaretPos PROC STDCALL mov r10 , rcx mov eax , 4885 ;syscall db 0Fh , 05h ret NtUserGetCaretPos ENDP ; ULONG64 __stdcall NtUserGetClipCursor( ULONG64 arg_01 ); NtUserGetClipCursor PROC STDCALL mov r10 , rcx mov eax , 4886 ;syscall db 0Fh , 05h ret NtUserGetClipCursor ENDP ; ULONG64 __stdcall NtUserGetClipboardAccessToken( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetClipboardAccessToken PROC STDCALL mov r10 , rcx mov eax , 4887 ;syscall db 0Fh , 05h ret NtUserGetClipboardAccessToken ENDP ; ULONG64 __stdcall NtUserGetClipboardViewer( ); NtUserGetClipboardViewer PROC STDCALL mov r10 , rcx mov eax , 4888 ;syscall db 0Fh , 05h ret NtUserGetClipboardViewer ENDP ; ULONG64 __stdcall NtUserGetComboBoxInfo( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetComboBoxInfo PROC STDCALL mov r10 , rcx mov eax , 4889 ;syscall db 0Fh , 05h ret NtUserGetComboBoxInfo ENDP ; ULONG64 __stdcall NtUserGetCurrentInputMessageSource( ULONG64 arg_01 ); NtUserGetCurrentInputMessageSource PROC STDCALL mov r10 , rcx mov eax , 4890 ;syscall db 0Fh , 05h ret NtUserGetCurrentInputMessageSource ENDP ; ULONG64 __stdcall NtUserGetCursorInfo( ULONG64 arg_01 ); NtUserGetCursorInfo PROC STDCALL mov r10 , rcx mov eax , 4891 ;syscall db 0Fh , 05h ret NtUserGetCursorInfo ENDP ; ULONG64 __stdcall NtUserGetDesktopID( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetDesktopID PROC STDCALL mov r10 , rcx mov eax , 4892 ;syscall db 0Fh , 05h ret NtUserGetDesktopID ENDP ; ULONG64 __stdcall NtUserGetDisplayAutoRotationPreferences( ULONG64 arg_01 ); NtUserGetDisplayAutoRotationPreferences PROC STDCALL mov r10 , rcx mov eax , 4893 ;syscall db 0Fh , 05h ret NtUserGetDisplayAutoRotationPreferences ENDP ; ULONG64 __stdcall NtUserGetDisplayAutoRotationPreferencesByProcessId( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetDisplayAutoRotationPreferencesByProcessId PROC STDCALL mov r10 , rcx mov eax , 4894 ;syscall db 0Fh , 05h ret NtUserGetDisplayAutoRotationPreferencesByProcessId ENDP ; ULONG64 __stdcall NtUserGetDisplayConfigBufferSizes( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetDisplayConfigBufferSizes PROC STDCALL mov r10 , rcx mov eax , 4895 ;syscall db 0Fh , 05h ret NtUserGetDisplayConfigBufferSizes ENDP ; ULONG64 __stdcall NtUserGetGestureConfig( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserGetGestureConfig PROC STDCALL mov r10 , rcx mov eax , 4896 ;syscall db 0Fh , 05h ret NtUserGetGestureConfig ENDP ; ULONG64 __stdcall NtUserGetGestureExtArgs( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetGestureExtArgs PROC STDCALL mov r10 , rcx mov eax , 4897 ;syscall db 0Fh , 05h ret NtUserGetGestureExtArgs ENDP ; ULONG64 __stdcall NtUserGetGestureInfo( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetGestureInfo PROC STDCALL mov r10 , rcx mov eax , 4898 ;syscall db 0Fh , 05h ret NtUserGetGestureInfo ENDP ; ULONG64 __stdcall NtUserGetGlobalIMEStatus( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetGlobalIMEStatus PROC STDCALL mov r10 , rcx mov eax , 4899 ;syscall db 0Fh , 05h ret NtUserGetGlobalIMEStatus ENDP ; ULONG64 __stdcall NtUserGetGuiResources( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetGuiResources PROC STDCALL mov r10 , rcx mov eax , 4900 ;syscall db 0Fh , 05h ret NtUserGetGuiResources ENDP ; ULONG64 __stdcall NtUserGetImeHotKey( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserGetImeHotKey PROC STDCALL mov r10 , rcx mov eax , 4901 ;syscall db 0Fh , 05h ret NtUserGetImeHotKey ENDP ; ULONG64 __stdcall NtUserGetImeInfoEx( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetImeInfoEx PROC STDCALL mov r10 , rcx mov eax , 4902 ;syscall db 0Fh , 05h ret NtUserGetImeInfoEx ENDP ; ULONG64 __stdcall NtUserGetInputLocaleInfo( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetInputLocaleInfo PROC STDCALL mov r10 , rcx mov eax , 4903 ;syscall db 0Fh , 05h ret NtUserGetInputLocaleInfo ENDP ; ULONG64 __stdcall NtUserGetInternalWindowPos( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetInternalWindowPos PROC STDCALL mov r10 , rcx mov eax , 4904 ;syscall db 0Fh , 05h ret NtUserGetInternalWindowPos ENDP ; ULONG64 __stdcall NtUserGetKeyNameText( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetKeyNameText PROC STDCALL mov r10 , rcx mov eax , 4905 ;syscall db 0Fh , 05h ret NtUserGetKeyNameText ENDP ; ULONG64 __stdcall NtUserGetKeyboardLayoutName( ULONG64 arg_01 ); NtUserGetKeyboardLayoutName PROC STDCALL mov r10 , rcx mov eax , 4906 ;syscall db 0Fh , 05h ret NtUserGetKeyboardLayoutName ENDP ; ULONG64 __stdcall NtUserGetLayeredWindowAttributes( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserGetLayeredWindowAttributes PROC STDCALL mov r10 , rcx mov eax , 4907 ;syscall db 0Fh , 05h ret NtUserGetLayeredWindowAttributes ENDP ; ULONG64 __stdcall NtUserGetListBoxInfo( ULONG64 arg_01 ); NtUserGetListBoxInfo PROC STDCALL mov r10 , rcx mov eax , 4908 ;syscall db 0Fh , 05h ret NtUserGetListBoxInfo ENDP ; ULONG64 __stdcall NtUserGetMenuIndex( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetMenuIndex PROC STDCALL mov r10 , rcx mov eax , 4909 ;syscall db 0Fh , 05h ret NtUserGetMenuIndex ENDP ; ULONG64 __stdcall NtUserGetMenuItemRect( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserGetMenuItemRect PROC STDCALL mov r10 , rcx mov eax , 4910 ;syscall db 0Fh , 05h ret NtUserGetMenuItemRect ENDP ; ULONG64 __stdcall NtUserGetMouseMovePointsEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserGetMouseMovePointsEx PROC STDCALL mov r10 , rcx mov eax , 4911 ;syscall db 0Fh , 05h ret NtUserGetMouseMovePointsEx ENDP ; ULONG64 __stdcall NtUserGetPointerCursorId( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetPointerCursorId PROC STDCALL mov r10 , rcx mov eax , 4912 ;syscall db 0Fh , 05h ret NtUserGetPointerCursorId ENDP ; ULONG64 __stdcall NtUserGetPointerDevice( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetPointerDevice PROC STDCALL mov r10 , rcx mov eax , 4913 ;syscall db 0Fh , 05h ret NtUserGetPointerDevice ENDP ; ULONG64 __stdcall NtUserGetPointerDeviceCursors( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetPointerDeviceCursors PROC STDCALL mov r10 , rcx mov eax , 4914 ;syscall db 0Fh , 05h ret NtUserGetPointerDeviceCursors ENDP ; ULONG64 __stdcall NtUserGetPointerDeviceProperties( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetPointerDeviceProperties PROC STDCALL mov r10 , rcx mov eax , 4915 ;syscall db 0Fh , 05h ret NtUserGetPointerDeviceProperties ENDP ; ULONG64 __stdcall NtUserGetPointerDeviceRects( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetPointerDeviceRects PROC STDCALL mov r10 , rcx mov eax , 4916 ;syscall db 0Fh , 05h ret NtUserGetPointerDeviceRects ENDP ; ULONG64 __stdcall NtUserGetPointerDevices( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetPointerDevices PROC STDCALL mov r10 , rcx mov eax , 4917 ;syscall db 0Fh , 05h ret NtUserGetPointerDevices ENDP ; ULONG64 __stdcall NtUserGetPointerInfoList( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtUserGetPointerInfoList PROC STDCALL mov r10 , rcx mov eax , 4918 ;syscall db 0Fh , 05h ret NtUserGetPointerInfoList ENDP ; ULONG64 __stdcall NtUserGetPointerType( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetPointerType PROC STDCALL mov r10 , rcx mov eax , 4919 ;syscall db 0Fh , 05h ret NtUserGetPointerType ENDP ; ULONG64 __stdcall NtUserGetPriorityClipboardFormat( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetPriorityClipboardFormat PROC STDCALL mov r10 , rcx mov eax , 4920 ;syscall db 0Fh , 05h ret NtUserGetPriorityClipboardFormat ENDP ; ULONG64 __stdcall NtUserGetProcessUIContextInformation( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetProcessUIContextInformation PROC STDCALL mov r10 , rcx mov eax , 4921 ;syscall db 0Fh , 05h ret NtUserGetProcessUIContextInformation ENDP ; ULONG64 __stdcall NtUserGetQueueEventStatus( ); NtUserGetQueueEventStatus PROC STDCALL mov r10 , rcx mov eax , 4922 ;syscall db 0Fh , 05h ret NtUserGetQueueEventStatus ENDP ; ULONG64 __stdcall NtUserGetRawInputBuffer( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetRawInputBuffer PROC STDCALL mov r10 , rcx mov eax , 4923 ;syscall db 0Fh , 05h ret NtUserGetRawInputBuffer ENDP ; ULONG64 __stdcall NtUserGetRawInputData( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserGetRawInputData PROC STDCALL mov r10 , rcx mov eax , 4924 ;syscall db 0Fh , 05h ret NtUserGetRawInputData ENDP ; ULONG64 __stdcall NtUserGetRawInputDeviceInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserGetRawInputDeviceInfo PROC STDCALL mov r10 , rcx mov eax , 4925 ;syscall db 0Fh , 05h ret NtUserGetRawInputDeviceInfo ENDP ; ULONG64 __stdcall NtUserGetRawInputDeviceList( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetRawInputDeviceList PROC STDCALL mov r10 , rcx mov eax , 4926 ;syscall db 0Fh , 05h ret NtUserGetRawInputDeviceList ENDP ; ULONG64 __stdcall NtUserGetRawPointerDeviceData( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserGetRawPointerDeviceData PROC STDCALL mov r10 , rcx mov eax , 4927 ;syscall db 0Fh , 05h ret NtUserGetRawPointerDeviceData ENDP ; ULONG64 __stdcall NtUserGetRegisteredRawInputDevices( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetRegisteredRawInputDevices PROC STDCALL mov r10 , rcx mov eax , 4928 ;syscall db 0Fh , 05h ret NtUserGetRegisteredRawInputDevices ENDP ; ULONG64 __stdcall NtUserGetTopLevelWindow( ULONG64 arg_01 ); NtUserGetTopLevelWindow PROC STDCALL mov r10 , rcx mov eax , 4929 ;syscall db 0Fh , 05h ret NtUserGetTopLevelWindow ENDP ; ULONG64 __stdcall NtUserGetTouchInputInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserGetTouchInputInfo PROC STDCALL mov r10 , rcx mov eax , 4930 ;syscall db 0Fh , 05h ret NtUserGetTouchInputInfo ENDP ; ULONG64 __stdcall NtUserGetTouchValidationStatus( ULONG64 arg_01 ); NtUserGetTouchValidationStatus PROC STDCALL mov r10 , rcx mov eax , 4931 ;syscall db 0Fh , 05h ret NtUserGetTouchValidationStatus ENDP ; ULONG64 __stdcall NtUserGetUpdatedClipboardFormats( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetUpdatedClipboardFormats PROC STDCALL mov r10 , rcx mov eax , 4932 ;syscall db 0Fh , 05h ret NtUserGetUpdatedClipboardFormats ENDP ; ULONG64 __stdcall NtUserGetWOWClass( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetWOWClass PROC STDCALL mov r10 , rcx mov eax , 4933 ;syscall db 0Fh , 05h ret NtUserGetWOWClass ENDP ; ULONG64 __stdcall NtUserGetWindowBand( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetWindowBand PROC STDCALL mov r10 , rcx mov eax , 4934 ;syscall db 0Fh , 05h ret NtUserGetWindowBand ENDP ; ULONG64 __stdcall NtUserGetWindowCompositionAttribute( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetWindowCompositionAttribute PROC STDCALL mov r10 , rcx mov eax , 4935 ;syscall db 0Fh , 05h ret NtUserGetWindowCompositionAttribute ENDP ; ULONG64 __stdcall NtUserGetWindowCompositionInfo( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetWindowCompositionInfo PROC STDCALL mov r10 , rcx mov eax , 4936 ;syscall db 0Fh , 05h ret NtUserGetWindowCompositionInfo ENDP ; ULONG64 __stdcall NtUserGetWindowDisplayAffinity( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetWindowDisplayAffinity PROC STDCALL mov r10 , rcx mov eax , 4937 ;syscall db 0Fh , 05h ret NtUserGetWindowDisplayAffinity ENDP ; ULONG64 __stdcall NtUserGetWindowFeedbackSetting( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserGetWindowFeedbackSetting PROC STDCALL mov r10 , rcx mov eax , 4938 ;syscall db 0Fh , 05h ret NtUserGetWindowFeedbackSetting ENDP ; ULONG64 __stdcall NtUserGetWindowMinimizeRect( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserGetWindowMinimizeRect PROC STDCALL mov r10 , rcx mov eax , 4939 ;syscall db 0Fh , 05h ret NtUserGetWindowMinimizeRect ENDP ; ULONG64 __stdcall NtUserGetWindowRgnEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserGetWindowRgnEx PROC STDCALL mov r10 , rcx mov eax , 4940 ;syscall db 0Fh , 05h ret NtUserGetWindowRgnEx ENDP ; ULONG64 __stdcall NtUserGhostWindowFromHungWindow( ULONG64 arg_01 ); NtUserGhostWindowFromHungWindow PROC STDCALL mov r10 , rcx mov eax , 4941 ;syscall db 0Fh , 05h ret NtUserGhostWindowFromHungWindow ENDP ; ULONG64 __stdcall NtUserHandleDelegatedInput( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserHandleDelegatedInput PROC STDCALL mov r10 , rcx mov eax , 4942 ;syscall db 0Fh , 05h ret NtUserHandleDelegatedInput ENDP ; ULONG64 __stdcall NtUserHardErrorControl( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserHardErrorControl PROC STDCALL mov r10 , rcx mov eax , 4943 ;syscall db 0Fh , 05h ret NtUserHardErrorControl ENDP ; ULONG64 __stdcall NtUserHidePointerContactVisualization( ULONG64 arg_01 ); NtUserHidePointerContactVisualization PROC STDCALL mov r10 , rcx mov eax , 4944 ;syscall db 0Fh , 05h ret NtUserHidePointerContactVisualization ENDP ; ULONG64 __stdcall NtUserHiliteMenuItem( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserHiliteMenuItem PROC STDCALL mov r10 , rcx mov eax , 4945 ;syscall db 0Fh , 05h ret NtUserHiliteMenuItem ENDP ; ULONG64 __stdcall NtUserHungWindowFromGhostWindow( ULONG64 arg_01 ); NtUserHungWindowFromGhostWindow PROC STDCALL mov r10 , rcx mov eax , 4946 ;syscall db 0Fh , 05h ret NtUserHungWindowFromGhostWindow ENDP ; ULONG64 __stdcall NtUserHwndQueryRedirectionInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserHwndQueryRedirectionInfo PROC STDCALL mov r10 , rcx mov eax , 4947 ;syscall db 0Fh , 05h ret NtUserHwndQueryRedirectionInfo ENDP ; ULONG64 __stdcall NtUserHwndSetRedirectionInfo( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserHwndSetRedirectionInfo PROC STDCALL mov r10 , rcx mov eax , 4948 ;syscall db 0Fh , 05h ret NtUserHwndSetRedirectionInfo ENDP ; ULONG64 __stdcall NtUserImpersonateDdeClientWindow( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserImpersonateDdeClientWindow PROC STDCALL mov r10 , rcx mov eax , 4949 ;syscall db 0Fh , 05h ret NtUserImpersonateDdeClientWindow ENDP ; ULONG64 __stdcall NtUserInitTask( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 , ULONG64 arg_11 , ULONG64 arg_12 ); NtUserInitTask PROC STDCALL mov r10 , rcx mov eax , 4950 ;syscall db 0Fh , 05h ret NtUserInitTask ENDP ; ULONG64 __stdcall NtUserInitialize( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserInitialize PROC STDCALL mov r10 , rcx mov eax , 4951 ;syscall db 0Fh , 05h ret NtUserInitialize ENDP ; ULONG64 __stdcall NtUserInitializeClientPfnArrays( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserInitializeClientPfnArrays PROC STDCALL mov r10 , rcx mov eax , 4952 ;syscall db 0Fh , 05h ret NtUserInitializeClientPfnArrays ENDP ; ULONG64 __stdcall NtUserInitializeTouchInjection( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserInitializeTouchInjection PROC STDCALL mov r10 , rcx mov eax , 4953 ;syscall db 0Fh , 05h ret NtUserInitializeTouchInjection ENDP ; ULONG64 __stdcall NtUserInjectGesture( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserInjectGesture PROC STDCALL mov r10 , rcx mov eax , 4954 ;syscall db 0Fh , 05h ret NtUserInjectGesture ENDP ; ULONG64 __stdcall NtUserInjectTouchInput( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserInjectTouchInput PROC STDCALL mov r10 , rcx mov eax , 4955 ;syscall db 0Fh , 05h ret NtUserInjectTouchInput ENDP ; ULONG64 __stdcall NtUserInternalClipCursor( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserInternalClipCursor PROC STDCALL mov r10 , rcx mov eax , 4956 ;syscall db 0Fh , 05h ret NtUserInternalClipCursor ENDP ; ULONG64 __stdcall NtUserInternalGetWindowIcon( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserInternalGetWindowIcon PROC STDCALL mov r10 , rcx mov eax , 4957 ;syscall db 0Fh , 05h ret NtUserInternalGetWindowIcon ENDP ; ULONG64 __stdcall NtUserIsMouseInPointerEnabled( ); NtUserIsMouseInPointerEnabled PROC STDCALL mov r10 , rcx mov eax , 4958 ;syscall db 0Fh , 05h ret NtUserIsMouseInPointerEnabled ENDP ; ULONG64 __stdcall NtUserIsMouseInputEnabled( ); NtUserIsMouseInputEnabled PROC STDCALL mov r10 , rcx mov eax , 4959 ;syscall db 0Fh , 05h ret NtUserIsMouseInputEnabled ENDP ; ULONG64 __stdcall NtUserIsTopLevelWindow( ULONG64 arg_01 ); NtUserIsTopLevelWindow PROC STDCALL mov r10 , rcx mov eax , 4960 ;syscall db 0Fh , 05h ret NtUserIsTopLevelWindow ENDP ; ULONG64 __stdcall NtUserIsTouchWindow( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserIsTouchWindow PROC STDCALL mov r10 , rcx mov eax , 4961 ;syscall db 0Fh , 05h ret NtUserIsTouchWindow ENDP ; ULONG64 __stdcall NtUserLayoutCompleted( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserLayoutCompleted PROC STDCALL mov r10 , rcx mov eax , 4962 ;syscall db 0Fh , 05h ret NtUserLayoutCompleted ENDP ; ULONG64 __stdcall NtUserLoadKeyboardLayoutEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 ); NtUserLoadKeyboardLayoutEx PROC STDCALL mov r10 , rcx mov eax , 4963 ;syscall db 0Fh , 05h ret NtUserLoadKeyboardLayoutEx ENDP ; ULONG64 __stdcall NtUserLockWindowStation( ULONG64 arg_01 ); NtUserLockWindowStation PROC STDCALL mov r10 , rcx mov eax , 4964 ;syscall db 0Fh , 05h ret NtUserLockWindowStation ENDP ; ULONG64 __stdcall NtUserLockWorkStation( ); NtUserLockWorkStation PROC STDCALL mov r10 , rcx mov eax , 4965 ;syscall db 0Fh , 05h ret NtUserLockWorkStation ENDP ; ULONG64 __stdcall NtUserLogicalToPhysicalPoint( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserLogicalToPhysicalPoint PROC STDCALL mov r10 , rcx mov eax , 4966 ;syscall db 0Fh , 05h ret NtUserLogicalToPhysicalPoint ENDP ; ULONG64 __stdcall NtUserMNDragLeave( ); NtUserMNDragLeave PROC STDCALL mov r10 , rcx mov eax , 4967 ;syscall db 0Fh , 05h ret NtUserMNDragLeave ENDP ; ULONG64 __stdcall NtUserMNDragOver( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserMNDragOver PROC STDCALL mov r10 , rcx mov eax , 4968 ;syscall db 0Fh , 05h ret NtUserMNDragOver ENDP ; ULONG64 __stdcall NtUserMagControl( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserMagControl PROC STDCALL mov r10 , rcx mov eax , 4969 ;syscall db 0Fh , 05h ret NtUserMagControl ENDP ; ULONG64 __stdcall NtUserMagGetContextInformation( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserMagGetContextInformation PROC STDCALL mov r10 , rcx mov eax , 4970 ;syscall db 0Fh , 05h ret NtUserMagGetContextInformation ENDP ; ULONG64 __stdcall NtUserMagSetContextInformation( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserMagSetContextInformation PROC STDCALL mov r10 , rcx mov eax , 4971 ;syscall db 0Fh , 05h ret NtUserMagSetContextInformation ENDP ; ULONG64 __stdcall NtUserMenuItemFromPoint( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserMenuItemFromPoint PROC STDCALL mov r10 , rcx mov eax , 4972 ;syscall db 0Fh , 05h ret NtUserMenuItemFromPoint ENDP ; ULONG64 __stdcall NtUserMinMaximize( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserMinMaximize PROC STDCALL mov r10 , rcx mov eax , 4973 ;syscall db 0Fh , 05h ret NtUserMinMaximize ENDP ; ULONG64 __stdcall NtUserModifyWindowTouchCapability( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserModifyWindowTouchCapability PROC STDCALL mov r10 , rcx mov eax , 4974 ;syscall db 0Fh , 05h ret NtUserModifyWindowTouchCapability ENDP ; ULONG64 __stdcall NtUserNotifyIMEStatus( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserNotifyIMEStatus PROC STDCALL mov r10 , rcx mov eax , 4975 ;syscall db 0Fh , 05h ret NtUserNotifyIMEStatus ENDP ; ULONG64 __stdcall NtUserOpenInputDesktop( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserOpenInputDesktop PROC STDCALL mov r10 , rcx mov eax , 4976 ;syscall db 0Fh , 05h ret NtUserOpenInputDesktop ENDP ; ULONG64 __stdcall NtUserOpenThreadDesktop( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserOpenThreadDesktop PROC STDCALL mov r10 , rcx mov eax , 4977 ;syscall db 0Fh , 05h ret NtUserOpenThreadDesktop ENDP ; ULONG64 __stdcall NtUserPaintMonitor( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserPaintMonitor PROC STDCALL mov r10 , rcx mov eax , 4978 ;syscall db 0Fh , 05h ret NtUserPaintMonitor ENDP ; ULONG64 __stdcall NtUserPhysicalToLogicalPoint( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserPhysicalToLogicalPoint PROC STDCALL mov r10 , rcx mov eax , 4979 ;syscall db 0Fh , 05h ret NtUserPhysicalToLogicalPoint ENDP ; ULONG64 __stdcall NtUserPrintWindow( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserPrintWindow PROC STDCALL mov r10 , rcx mov eax , 4980 ;syscall db 0Fh , 05h ret NtUserPrintWindow ENDP ; ULONG64 __stdcall NtUserPromoteMouseInPointer( ULONG64 arg_01 ); NtUserPromoteMouseInPointer PROC STDCALL mov r10 , rcx mov eax , 4981 ;syscall db 0Fh , 05h ret NtUserPromoteMouseInPointer ENDP ; ULONG64 __stdcall NtUserPromotePointer( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserPromotePointer PROC STDCALL mov r10 , rcx mov eax , 4982 ;syscall db 0Fh , 05h ret NtUserPromotePointer ENDP ; ULONG64 __stdcall NtUserQueryBSDRWindow( ); NtUserQueryBSDRWindow PROC STDCALL mov r10 , rcx mov eax , 4983 ;syscall db 0Fh , 05h ret NtUserQueryBSDRWindow ENDP ; ULONG64 __stdcall NtUserQueryDisplayConfig( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserQueryDisplayConfig PROC STDCALL mov r10 , rcx mov eax , 4984 ;syscall db 0Fh , 05h ret NtUserQueryDisplayConfig ENDP ; ULONG64 __stdcall NtUserQueryInformationThread( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserQueryInformationThread PROC STDCALL mov r10 , rcx mov eax , 4985 ;syscall db 0Fh , 05h ret NtUserQueryInformationThread ENDP ; ULONG64 __stdcall NtUserQueryInputContext( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserQueryInputContext PROC STDCALL mov r10 , rcx mov eax , 4986 ;syscall db 0Fh , 05h ret NtUserQueryInputContext ENDP ; ULONG64 __stdcall NtUserQuerySendMessage( ULONG64 arg_01 ); NtUserQuerySendMessage PROC STDCALL mov r10 , rcx mov eax , 4987 ;syscall db 0Fh , 05h ret NtUserQuerySendMessage ENDP ; ULONG64 __stdcall NtUserRealChildWindowFromPoint( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserRealChildWindowFromPoint PROC STDCALL mov r10 , rcx mov eax , 4988 ;syscall db 0Fh , 05h ret NtUserRealChildWindowFromPoint ENDP ; ULONG64 __stdcall NtUserRealWaitMessageEx( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserRealWaitMessageEx PROC STDCALL mov r10 , rcx mov eax , 4989 ;syscall db 0Fh , 05h ret NtUserRealWaitMessageEx ENDP ; ULONG64 __stdcall NtUserRegisterBSDRWindow( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserRegisterBSDRWindow PROC STDCALL mov r10 , rcx mov eax , 4990 ;syscall db 0Fh , 05h ret NtUserRegisterBSDRWindow ENDP ; ULONG64 __stdcall NtUserRegisterEdgy( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserRegisterEdgy PROC STDCALL mov r10 , rcx mov eax , 4991 ;syscall db 0Fh , 05h ret NtUserRegisterEdgy ENDP ; ULONG64 __stdcall NtUserRegisterErrorReportingDialog( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserRegisterErrorReportingDialog PROC STDCALL mov r10 , rcx mov eax , 4992 ;syscall db 0Fh , 05h ret NtUserRegisterErrorReportingDialog ENDP ; ULONG64 __stdcall NtUserRegisterHotKey( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserRegisterHotKey PROC STDCALL mov r10 , rcx mov eax , 4993 ;syscall db 0Fh , 05h ret NtUserRegisterHotKey ENDP ; ULONG64 __stdcall NtUserRegisterPointerDeviceNotifications( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserRegisterPointerDeviceNotifications PROC STDCALL mov r10 , rcx mov eax , 4994 ;syscall db 0Fh , 05h ret NtUserRegisterPointerDeviceNotifications ENDP ; ULONG64 __stdcall NtUserRegisterPointerInputTarget( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserRegisterPointerInputTarget PROC STDCALL mov r10 , rcx mov eax , 4995 ;syscall db 0Fh , 05h ret NtUserRegisterPointerInputTarget ENDP ; ULONG64 __stdcall NtUserRegisterRawInputDevices( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserRegisterRawInputDevices PROC STDCALL mov r10 , rcx mov eax , 4996 ;syscall db 0Fh , 05h ret NtUserRegisterRawInputDevices ENDP ; ULONG64 __stdcall NtUserRegisterServicesProcess( ULONG64 arg_01 ); NtUserRegisterServicesProcess PROC STDCALL mov r10 , rcx mov eax , 4997 ;syscall db 0Fh , 05h ret NtUserRegisterServicesProcess ENDP ; ULONG64 __stdcall NtUserRegisterSessionPort( ULONG64 arg_01 ); NtUserRegisterSessionPort PROC STDCALL mov r10 , rcx mov eax , 4998 ;syscall db 0Fh , 05h ret NtUserRegisterSessionPort ENDP ; ULONG64 __stdcall NtUserRegisterTasklist( ULONG64 arg_01 ); NtUserRegisterTasklist PROC STDCALL mov r10 , rcx mov eax , 4999 ;syscall db 0Fh , 05h ret NtUserRegisterTasklist ENDP ; ULONG64 __stdcall NtUserRegisterTouchHitTestingWindow( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserRegisterTouchHitTestingWindow PROC STDCALL mov r10 , rcx mov eax , 5000 ;syscall db 0Fh , 05h ret NtUserRegisterTouchHitTestingWindow ENDP ; ULONG64 __stdcall NtUserRegisterUserApiHook( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserRegisterUserApiHook PROC STDCALL mov r10 , rcx mov eax , 5001 ;syscall db 0Fh , 05h ret NtUserRegisterUserApiHook ENDP ; ULONG64 __stdcall NtUserRemoteConnect( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserRemoteConnect PROC STDCALL mov r10 , rcx mov eax , 5002 ;syscall db 0Fh , 05h ret NtUserRemoteConnect ENDP ; ULONG64 __stdcall NtUserRemoteRedrawRectangle( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserRemoteRedrawRectangle PROC STDCALL mov r10 , rcx mov eax , 5003 ;syscall db 0Fh , 05h ret NtUserRemoteRedrawRectangle ENDP ; ULONG64 __stdcall NtUserRemoteRedrawScreen( ); NtUserRemoteRedrawScreen PROC STDCALL mov r10 , rcx mov eax , 5004 ;syscall db 0Fh , 05h ret NtUserRemoteRedrawScreen ENDP ; ULONG64 __stdcall NtUserRemoteStopScreenUpdates( ); NtUserRemoteStopScreenUpdates PROC STDCALL mov r10 , rcx mov eax , 5005 ;syscall db 0Fh , 05h ret NtUserRemoteStopScreenUpdates ENDP ; ULONG64 __stdcall NtUserRemoveClipboardFormatListener( ULONG64 arg_01 ); NtUserRemoveClipboardFormatListener PROC STDCALL mov r10 , rcx mov eax , 5006 ;syscall db 0Fh , 05h ret NtUserRemoveClipboardFormatListener ENDP ; ULONG64 __stdcall NtUserResolveDesktopForWOW( ULONG64 arg_01 ); NtUserResolveDesktopForWOW PROC STDCALL mov r10 , rcx mov eax , 5007 ;syscall db 0Fh , 05h ret NtUserResolveDesktopForWOW ENDP ; ULONG64 __stdcall NtUserSendEventMessage( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSendEventMessage PROC STDCALL mov r10 , rcx mov eax , 5008 ;syscall db 0Fh , 05h ret NtUserSendEventMessage ENDP ; ULONG64 __stdcall NtUserSetActiveProcess( ULONG64 arg_01 ); NtUserSetActiveProcess PROC STDCALL mov r10 , rcx mov eax , 5009 ;syscall db 0Fh , 05h ret NtUserSetActiveProcess ENDP ; ULONG64 __stdcall NtUserSetAppImeLevel( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetAppImeLevel PROC STDCALL mov r10 , rcx mov eax , 5010 ;syscall db 0Fh , 05h ret NtUserSetAppImeLevel ENDP ; ULONG64 __stdcall NtUserSetAutoRotation( ULONG64 arg_01 ); NtUserSetAutoRotation PROC STDCALL mov r10 , rcx mov eax , 5011 ;syscall db 0Fh , 05h ret NtUserSetAutoRotation ENDP ; ULONG64 __stdcall NtUserSetBrokeredForeground( ULONG64 arg_01 ); NtUserSetBrokeredForeground PROC STDCALL mov r10 , rcx mov eax , 5012 ;syscall db 0Fh , 05h ret NtUserSetBrokeredForeground ENDP ; ULONG64 __stdcall NtUserSetCalibrationData( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetCalibrationData PROC STDCALL mov r10 , rcx mov eax , 5013 ;syscall db 0Fh , 05h ret NtUserSetCalibrationData ENDP ; ULONG64 __stdcall NtUserSetChildWindowNoActivate( ULONG64 arg_01 ); NtUserSetChildWindowNoActivate PROC STDCALL mov r10 , rcx mov eax , 5014 ;syscall db 0Fh , 05h ret NtUserSetChildWindowNoActivate ENDP ; ULONG64 __stdcall NtUserSetClassWord( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSetClassWord PROC STDCALL mov r10 , rcx mov eax , 5015 ;syscall db 0Fh , 05h ret NtUserSetClassWord ENDP ; ULONG64 __stdcall NtUserSetCursorContents( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetCursorContents PROC STDCALL mov r10 , rcx mov eax , 5016 ;syscall db 0Fh , 05h ret NtUserSetCursorContents ENDP ; ULONG64 __stdcall NtUserSetDisplayAutoRotationPreferences( ULONG64 arg_01 ); NtUserSetDisplayAutoRotationPreferences PROC STDCALL mov r10 , rcx mov eax , 5017 ;syscall db 0Fh , 05h ret NtUserSetDisplayAutoRotationPreferences ENDP ; ULONG64 __stdcall NtUserSetDisplayConfig( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserSetDisplayConfig PROC STDCALL mov r10 , rcx mov eax , 5018 ;syscall db 0Fh , 05h ret NtUserSetDisplayConfig ENDP ; ULONG64 __stdcall NtUserSetDisplayMapping( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetDisplayMapping PROC STDCALL mov r10 , rcx mov eax , 5019 ;syscall db 0Fh , 05h ret NtUserSetDisplayMapping ENDP ; ULONG64 __stdcall NtUserSetFallbackForeground( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetFallbackForeground PROC STDCALL mov r10 , rcx mov eax , 5020 ;syscall db 0Fh , 05h ret NtUserSetFallbackForeground ENDP ; ULONG64 __stdcall NtUserSetGestureConfig( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserSetGestureConfig PROC STDCALL mov r10 , rcx mov eax , 5021 ;syscall db 0Fh , 05h ret NtUserSetGestureConfig ENDP ; ULONG64 __stdcall NtUserSetImeHotKey( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserSetImeHotKey PROC STDCALL mov r10 , rcx mov eax , 5022 ;syscall db 0Fh , 05h ret NtUserSetImeHotKey ENDP ; ULONG64 __stdcall NtUserSetImeInfoEx( ULONG64 arg_01 ); NtUserSetImeInfoEx PROC STDCALL mov r10 , rcx mov eax , 5023 ;syscall db 0Fh , 05h ret NtUserSetImeInfoEx ENDP ; ULONG64 __stdcall NtUserSetImeOwnerWindow( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetImeOwnerWindow PROC STDCALL mov r10 , rcx mov eax , 5024 ;syscall db 0Fh , 05h ret NtUserSetImeOwnerWindow ENDP ; ULONG64 __stdcall NtUserSetImmersiveBackgroundWindow( ULONG64 arg_01 ); NtUserSetImmersiveBackgroundWindow PROC STDCALL mov r10 , rcx mov eax , 5025 ;syscall db 0Fh , 05h ret NtUserSetImmersiveBackgroundWindow ENDP ; ULONG64 __stdcall NtUserSetInternalWindowPos( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetInternalWindowPos PROC STDCALL mov r10 , rcx mov eax , 5026 ;syscall db 0Fh , 05h ret NtUserSetInternalWindowPos ENDP ; ULONG64 __stdcall NtUserSetLayeredWindowAttributes( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetLayeredWindowAttributes PROC STDCALL mov r10 , rcx mov eax , 5027 ;syscall db 0Fh , 05h ret NtUserSetLayeredWindowAttributes ENDP ; ULONG64 __stdcall NtUserSetMenu( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSetMenu PROC STDCALL mov r10 , rcx mov eax , 5028 ;syscall db 0Fh , 05h ret NtUserSetMenu ENDP ; ULONG64 __stdcall NtUserSetMenuContextHelpId( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetMenuContextHelpId PROC STDCALL mov r10 , rcx mov eax , 5029 ;syscall db 0Fh , 05h ret NtUserSetMenuContextHelpId ENDP ; ULONG64 __stdcall NtUserSetMenuFlagRtoL( ULONG64 arg_01 ); NtUserSetMenuFlagRtoL PROC STDCALL mov r10 , rcx mov eax , 5030 ;syscall db 0Fh , 05h ret NtUserSetMenuFlagRtoL ENDP ; ULONG64 __stdcall NtUserSetMirrorRendering( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetMirrorRendering PROC STDCALL mov r10 , rcx mov eax , 5031 ;syscall db 0Fh , 05h ret NtUserSetMirrorRendering ENDP ; ULONG64 __stdcall NtUserSetObjectInformation( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetObjectInformation PROC STDCALL mov r10 , rcx mov eax , 5032 ;syscall db 0Fh , 05h ret NtUserSetObjectInformation ENDP ; ULONG64 __stdcall NtUserSetProcessDPIAware( ); NtUserSetProcessDPIAware PROC STDCALL mov r10 , rcx mov eax , 5033 ;syscall db 0Fh , 05h ret NtUserSetProcessDPIAware ENDP ; ULONG64 __stdcall NtUserSetProcessRestrictionExemption( ULONG64 arg_01 ); NtUserSetProcessRestrictionExemption PROC STDCALL mov r10 , rcx mov eax , 5034 ;syscall db 0Fh , 05h ret NtUserSetProcessRestrictionExemption ENDP ; ULONG64 __stdcall NtUserSetProcessUIAccessZorder( ); NtUserSetProcessUIAccessZorder PROC STDCALL mov r10 , rcx mov eax , 5035 ;syscall db 0Fh , 05h ret NtUserSetProcessUIAccessZorder ENDP ; ULONG64 __stdcall NtUserSetSensorPresence( ULONG64 arg_01 ); NtUserSetSensorPresence PROC STDCALL mov r10 , rcx mov eax , 5036 ;syscall db 0Fh , 05h ret NtUserSetSensorPresence ENDP ; ULONG64 __stdcall NtUserSetShellWindowEx( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetShellWindowEx PROC STDCALL mov r10 , rcx mov eax , 5037 ;syscall db 0Fh , 05h ret NtUserSetShellWindowEx ENDP ; ULONG64 __stdcall NtUserSetSysColors( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetSysColors PROC STDCALL mov r10 , rcx mov eax , 5038 ;syscall db 0Fh , 05h ret NtUserSetSysColors ENDP ; ULONG64 __stdcall NtUserSetSystemCursor( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetSystemCursor PROC STDCALL mov r10 , rcx mov eax , 5039 ;syscall db 0Fh , 05h ret NtUserSetSystemCursor ENDP ; ULONG64 __stdcall NtUserSetSystemTimer( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSetSystemTimer PROC STDCALL mov r10 , rcx mov eax , 5040 ;syscall db 0Fh , 05h ret NtUserSetSystemTimer ENDP ; ULONG64 __stdcall NtUserSetThreadInputBlocked( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetThreadInputBlocked PROC STDCALL mov r10 , rcx mov eax , 5041 ;syscall db 0Fh , 05h ret NtUserSetThreadInputBlocked ENDP ; ULONG64 __stdcall NtUserSetThreadLayoutHandles( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetThreadLayoutHandles PROC STDCALL mov r10 , rcx mov eax , 5042 ;syscall db 0Fh , 05h ret NtUserSetThreadLayoutHandles ENDP ; ULONG64 __stdcall NtUserSetWindowBand( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSetWindowBand PROC STDCALL mov r10 , rcx mov eax , 5043 ;syscall db 0Fh , 05h ret NtUserSetWindowBand ENDP ; ULONG64 __stdcall NtUserSetWindowCompositionAttribute( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetWindowCompositionAttribute PROC STDCALL mov r10 , rcx mov eax , 5044 ;syscall db 0Fh , 05h ret NtUserSetWindowCompositionAttribute ENDP ; ULONG64 __stdcall NtUserSetWindowCompositionTransition( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserSetWindowCompositionTransition PROC STDCALL mov r10 , rcx mov eax , 5045 ;syscall db 0Fh , 05h ret NtUserSetWindowCompositionTransition ENDP ; ULONG64 __stdcall NtUserSetWindowDisplayAffinity( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserSetWindowDisplayAffinity PROC STDCALL mov r10 , rcx mov eax , 5046 ;syscall db 0Fh , 05h ret NtUserSetWindowDisplayAffinity ENDP ; ULONG64 __stdcall NtUserSetWindowFeedbackSetting( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserSetWindowFeedbackSetting PROC STDCALL mov r10 , rcx mov eax , 5047 ;syscall db 0Fh , 05h ret NtUserSetWindowFeedbackSetting ENDP ; ULONG64 __stdcall NtUserSetWindowRgnEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSetWindowRgnEx PROC STDCALL mov r10 , rcx mov eax , 5048 ;syscall db 0Fh , 05h ret NtUserSetWindowRgnEx ENDP ; ULONG64 __stdcall NtUserSetWindowStationUser( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetWindowStationUser PROC STDCALL mov r10 , rcx mov eax , 5049 ;syscall db 0Fh , 05h ret NtUserSetWindowStationUser ENDP ; ULONG64 __stdcall NtUserShowSystemCursor( ULONG64 arg_01 ); NtUserShowSystemCursor PROC STDCALL mov r10 , rcx mov eax , 5050 ;syscall db 0Fh , 05h ret NtUserShowSystemCursor ENDP ; ULONG64 __stdcall NtUserShutdownBlockReasonCreate( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserShutdownBlockReasonCreate PROC STDCALL mov r10 , rcx mov eax , 5051 ;syscall db 0Fh , 05h ret NtUserShutdownBlockReasonCreate ENDP ; ULONG64 __stdcall NtUserShutdownBlockReasonQuery( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserShutdownBlockReasonQuery PROC STDCALL mov r10 , rcx mov eax , 5052 ;syscall db 0Fh , 05h ret NtUserShutdownBlockReasonQuery ENDP ; ULONG64 __stdcall NtUserShutdownReasonDestroy( ULONG64 arg_01 ); NtUserShutdownReasonDestroy PROC STDCALL mov r10 , rcx mov eax , 5053 ;syscall db 0Fh , 05h ret NtUserShutdownReasonDestroy ENDP ; ULONG64 __stdcall NtUserSignalRedirectionStartComplete( ); NtUserSignalRedirectionStartComplete PROC STDCALL mov r10 , rcx mov eax , 5054 ;syscall db 0Fh , 05h ret NtUserSignalRedirectionStartComplete ENDP ; ULONG64 __stdcall NtUserSlicerControl( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSlicerControl PROC STDCALL mov r10 , rcx mov eax , 5055 ;syscall db 0Fh , 05h ret NtUserSlicerControl ENDP ; ULONG64 __stdcall NtUserSoundSentry( ); NtUserSoundSentry PROC STDCALL mov r10 , rcx mov eax , 5056 ;syscall db 0Fh , 05h ret NtUserSoundSentry ENDP ; ULONG64 __stdcall NtUserSwitchDesktop( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserSwitchDesktop PROC STDCALL mov r10 , rcx mov eax , 5057 ;syscall db 0Fh , 05h ret NtUserSwitchDesktop ENDP ; ULONG64 __stdcall NtUserTestForInteractiveUser( ULONG64 arg_01 ); NtUserTestForInteractiveUser PROC STDCALL mov r10 , rcx mov eax , 5058 ;syscall db 0Fh , 05h ret NtUserTestForInteractiveUser ENDP ; ULONG64 __stdcall NtUserTrackPopupMenuEx( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 ); NtUserTrackPopupMenuEx PROC STDCALL mov r10 , rcx mov eax , 5059 ;syscall db 0Fh , 05h ret NtUserTrackPopupMenuEx ENDP ; ULONG64 __stdcall NtUserUndelegateInput( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserUndelegateInput PROC STDCALL mov r10 , rcx mov eax , 5060 ;syscall db 0Fh , 05h ret NtUserUndelegateInput ENDP ; ULONG64 __stdcall NtUserUnloadKeyboardLayout( ULONG64 arg_01 ); NtUserUnloadKeyboardLayout PROC STDCALL mov r10 , rcx mov eax , 5061 ;syscall db 0Fh , 05h ret NtUserUnloadKeyboardLayout ENDP ; ULONG64 __stdcall NtUserUnlockWindowStation( ULONG64 arg_01 ); NtUserUnlockWindowStation PROC STDCALL mov r10 , rcx mov eax , 5062 ;syscall db 0Fh , 05h ret NtUserUnlockWindowStation ENDP ; ULONG64 __stdcall NtUserUnregisterHotKey( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserUnregisterHotKey PROC STDCALL mov r10 , rcx mov eax , 5063 ;syscall db 0Fh , 05h ret NtUserUnregisterHotKey ENDP ; ULONG64 __stdcall NtUserUnregisterSessionPort( ); NtUserUnregisterSessionPort PROC STDCALL mov r10 , rcx mov eax , 5064 ;syscall db 0Fh , 05h ret NtUserUnregisterSessionPort ENDP ; ULONG64 __stdcall NtUserUnregisterUserApiHook( ); NtUserUnregisterUserApiHook PROC STDCALL mov r10 , rcx mov eax , 5065 ;syscall db 0Fh , 05h ret NtUserUnregisterUserApiHook ENDP ; ULONG64 __stdcall NtUserUpdateDefaultDesktopThumbnail( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 ); NtUserUpdateDefaultDesktopThumbnail PROC STDCALL mov r10 , rcx mov eax , 5066 ;syscall db 0Fh , 05h ret NtUserUpdateDefaultDesktopThumbnail ENDP ; ULONG64 __stdcall NtUserUpdateInputContext( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserUpdateInputContext PROC STDCALL mov r10 , rcx mov eax , 5067 ;syscall db 0Fh , 05h ret NtUserUpdateInputContext ENDP ; ULONG64 __stdcall NtUserUpdateInstance( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserUpdateInstance PROC STDCALL mov r10 , rcx mov eax , 5068 ;syscall db 0Fh , 05h ret NtUserUpdateInstance ENDP ; ULONG64 __stdcall NtUserUpdateLayeredWindow( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 , ULONG64 arg_05 , ULONG64 arg_06 , ULONG64 arg_07 , ULONG64 arg_08 , ULONG64 arg_09 , ULONG64 arg_10 ); NtUserUpdateLayeredWindow PROC STDCALL mov r10 , rcx mov eax , 5069 ;syscall db 0Fh , 05h ret NtUserUpdateLayeredWindow ENDP ; ULONG64 __stdcall NtUserUpdatePerUserSystemParameters( ULONG64 arg_01 ); NtUserUpdatePerUserSystemParameters PROC STDCALL mov r10 , rcx mov eax , 5070 ;syscall db 0Fh , 05h ret NtUserUpdatePerUserSystemParameters ENDP ; ULONG64 __stdcall NtUserUpdateWindowTransform( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserUpdateWindowTransform PROC STDCALL mov r10 , rcx mov eax , 5071 ;syscall db 0Fh , 05h ret NtUserUpdateWindowTransform ENDP ; ULONG64 __stdcall NtUserUserHandleGrantAccess( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserUserHandleGrantAccess PROC STDCALL mov r10 , rcx mov eax , 5072 ;syscall db 0Fh , 05h ret NtUserUserHandleGrantAccess ENDP ; ULONG64 __stdcall NtUserValidateHandleSecure( ULONG64 arg_01 ); NtUserValidateHandleSecure PROC STDCALL mov r10 , rcx mov eax , 5073 ;syscall db 0Fh , 05h ret NtUserValidateHandleSecure ENDP ; ULONG64 __stdcall NtUserWaitAvailableMessageEx( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserWaitAvailableMessageEx PROC STDCALL mov r10 , rcx mov eax , 5074 ;syscall db 0Fh , 05h ret NtUserWaitAvailableMessageEx ENDP ; ULONG64 __stdcall NtUserWaitForInputIdle( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 ); NtUserWaitForInputIdle PROC STDCALL mov r10 , rcx mov eax , 5075 ;syscall db 0Fh , 05h ret NtUserWaitForInputIdle ENDP ; ULONG64 __stdcall NtUserWaitForMsgAndEvent( ULONG64 arg_01 ); NtUserWaitForMsgAndEvent PROC STDCALL mov r10 , rcx mov eax , 5076 ;syscall db 0Fh , 05h ret NtUserWaitForMsgAndEvent ENDP ; ULONG64 __stdcall NtUserWaitForRedirectionStartComplete( ); NtUserWaitForRedirectionStartComplete PROC STDCALL mov r10 , rcx mov eax , 5077 ;syscall db 0Fh , 05h ret NtUserWaitForRedirectionStartComplete ENDP ; ULONG64 __stdcall NtUserWindowFromPhysicalPoint( ULONG64 arg_01 , ULONG64 arg_02 ); NtUserWindowFromPhysicalPoint PROC STDCALL mov r10 , rcx mov eax , 5078 ;syscall db 0Fh , 05h ret NtUserWindowFromPhysicalPoint ENDP ; ULONG64 __stdcall NtValidateCompositionSurfaceHandle( ULONG64 arg_01 , ULONG64 arg_02 ); NtValidateCompositionSurfaceHandle PROC STDCALL mov r10 , rcx mov eax , 5079 ;syscall db 0Fh , 05h ret NtValidateCompositionSurfaceHandle ENDP ; ULONG64 __stdcall NtUserSetClassLongPtr( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetClassLongPtr PROC STDCALL mov r10 , rcx mov eax , 5080 ;syscall db 0Fh , 05h ret NtUserSetClassLongPtr ENDP ; ULONG64 __stdcall NtUserSetWindowLongPtr( ULONG64 arg_01 , ULONG64 arg_02 , ULONG64 arg_03 , ULONG64 arg_04 ); NtUserSetWindowLongPtr PROC STDCALL mov r10 , rcx mov eax , 5081 ;syscall db 0Fh , 05h ret NtUserSetWindowLongPtr ENDP
BITS 32 ;TEST_FILE_META_BEGIN ;TEST_TYPE=TEST_F ;TEST_IGNOREFLAGS=FLAG_AF|FLAG_OF ;TEST_FILE_META_END ; Sar32RI1 mov ebx, 0x56 ;TEST_BEGIN_RECORDING sar ebx, 0x3 ;TEST_END_RECORDING
/***************************************************************************** * degrib1.c * * DESCRIPTION * This file contains the main driver routines to unpack GRIB 1 files. * * HISTORY * 4/2003 Arthur Taylor (MDL / RSIS): Created. * * NOTES * GRIB 1 files are assumed to be big endian. ***************************************************************************** */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include "degrib2.h" #include "myerror.h" #include "myassert.h" #include "memendian.h" #include "scan.h" #include "degrib1.h" #include "metaname.h" #include "clock.h" #include "cpl_error.h" /* default missing data value (see: bitmap GRIB1: sect3 and sect4) */ /* UNDEFINED is default, UNDEFINED_PRIM is desired choice. */ #define UNDEFINED 9.999e20 #define UNDEFINED_PRIM 9999 #define GRIB_UNSIGN_INT3(a,b,c) ((a<<16)+(b<<8)+c) #define GRIB_UNSIGN_INT2(a,b) ((a<<8)+b) #define GRIB_SIGN_INT3(a,b,c) ((1-(int) ((unsigned) (a & 0x80) >> 6)) * (int) (((a & 127) << 16)+(b<<8)+c)) #define GRIB_SIGN_INT2(a,b) ((1-(int) ((unsigned) (a & 0x80) >> 6)) * (int) (((a & 0x7f) << 8) + b)) /* various centers */ #define NMC 7 #define US_OTHER 9 #define CPTEC 46 /* Canada Center */ #define CMC 54 #define AFWA 57 #define DWD 78 #define ECMWF 98 #define ATHENS 96 /* various subcenters */ #define SUBCENTER_MDL 14 #define SUBCENTER_TDL 11 /* The idea of rean or opn is to give a warning about default choice of which table to use. */ #define DEF_NCEP_TABLE rean_nowarn enum Def_NCEP_Table { rean, opn, rean_nowarn, opn_nowarn }; /***************************************************************************** * Choose_ParmTable() -- * * Arthur Taylor / MDL * * PURPOSE * Chooses the correct Parameter table depending on what is in the GRIB1 * message's "Product Definition Section". * * ARGUMENTS * pdsMeta = The filled out pdsMeta data structure to base choice on. (Input) * center = The Center that created the data (Input) * subcenter = The Sub Center that created the data (Input) * * FILES/DATABASES: None * * RETURNS: ParmTable (appropriate parameter table.) * * HISTORY * <unknown> : wgrib library : cnames.c * 4/2003 Arthur Taylor (MDL/RSIS): Modified * 10/2005 AAT: Adjusted to take center, subcenter * * NOTES ***************************************************************************** */ static const GRIB1ParmTable *Choose_ParmTable (pdsG1Type *pdsMeta, unsigned short int center, unsigned short int subcenter) { int process; /* The process ID from the GRIB1 message. */ switch (center) { case NMC: if (pdsMeta->mstrVersion <= 3) { switch (subcenter) { case 1: return &parm_table_ncep_reanal[0]; case SUBCENTER_TDL: return &parm_table_ncep_tdl[0]; case SUBCENTER_MDL: return &parm_table_ncep_mdl[0]; } } /* figure out if NCEP opn or reanalysis */ switch (pdsMeta->mstrVersion) { case 0: return &parm_table_ncep_opn[0]; case 1: case 2: process = pdsMeta->genProcess; if ((subcenter != 0) || ((process != 80) && (process != 180))) { return &parm_table_ncep_opn[0]; } /* At this point could be either the opn or reanalysis table */ switch (DEF_NCEP_TABLE) { case opn_nowarn: return &parm_table_ncep_opn[0]; case rean_nowarn: return &parm_table_ncep_reanal[0]; } break; case 3: return &parm_table_ncep_opn[0]; case 128: return &parm_table_omb[0]; case 129: return &parm_table_nceptab_129[0]; case 130: return &parm_table_nceptab_130[0]; case 131: return &parm_table_nceptab_131[0]; } break; case AFWA: switch (subcenter) { case 0: return &parm_table_afwa_000[0]; case 1: case 4: return &parm_table_afwa_001[0]; case 2: return &parm_table_afwa_002[0]; case 3: return &parm_table_afwa_003[0]; case 10: return &parm_table_afwa_010[0]; case 11: return &parm_table_afwa_011[0]; /* case 5:*/ /* Didn't have a table 5. */ } break; case ECMWF: switch (pdsMeta->mstrVersion) { case 128: return &parm_table_ecmwf_128[0]; case 129: return &parm_table_ecmwf_129[0]; case 130: return &parm_table_ecmwf_130[0]; case 131: return &parm_table_ecmwf_131[0]; case 140: return &parm_table_ecmwf_140[0]; case 150: return &parm_table_ecmwf_150[0]; case 160: return &parm_table_ecmwf_160[0]; case 170: return &parm_table_ecmwf_170[0]; case 180: return &parm_table_ecmwf_180[0]; } break; case DWD: switch (pdsMeta->mstrVersion) { case 2: return &parm_table_dwd_002[0]; case 201: return &parm_table_dwd_201[0]; case 202: return &parm_table_dwd_202[0]; case 203: return &parm_table_dwd_203[0]; } break; case CPTEC: switch (pdsMeta->mstrVersion) { case 254: return &parm_table_cptec_254[0]; } break; case US_OTHER: switch (subcenter) { case 163: return &parm_table_nohrsc[0]; } break; case ATHENS: return &parm_table_athens[0]; break; case CMC: return &parm_table_cmc[0]; break; } if ((pdsMeta->mstrVersion > 3) || (pdsMeta->cat > 127)) { CPLDebug ( "GRIB", "Undefined parameter table (center %d-%d table %d).", center, subcenter, pdsMeta->mstrVersion); } return &parm_table_undefined[0]; } /***************************************************************************** * GRIB1_Table2LookUp() -- * * Arthur Taylor / MDL * * PURPOSE * Returns the variable name (type of data) and comment (longer form of the * name) for the data that is in the GRIB1 message. * * ARGUMENTS * name = A pointer to the resulting short name. (Output) * comment = A pointer to the resulting long name. (Output) * pdsMeta = The filled out pdsMeta data structure to base choice on. (Input) * center = The Center that created the data (Input) * subcenter = The Sub Center that created the data (Input) * * FILES/DATABASES: None * * RETURNS: void * * HISTORY * 4/2003 Arthur Taylor (MDL/RSIS): Created * 10/2005 AAT: Adjusted to take center, subcenter * * NOTES ***************************************************************************** */ static void GRIB1_Table2LookUp (pdsG1Type *pdsMeta, const char **name, const char **comment, const char **unit, int *convert, unsigned short int center, unsigned short int subcenter) { const GRIB1ParmTable *table; /* The parameter table chosen by the pdsMeta data */ table = Choose_ParmTable (pdsMeta, center, subcenter); if ((center == NMC) && (pdsMeta->mstrVersion == 129) && (pdsMeta->cat == 180)) { if (pdsMeta->timeRange == 3) { *name = "AVGOZCON"; *comment = "Average Ozone Concentration"; *unit = "PPB"; *convert = UC_NONE; return; } } *name = table[pdsMeta->cat].name; *comment = table[pdsMeta->cat].comment; *unit = table[pdsMeta->cat].unit; *convert = table[pdsMeta->cat].convert; /* printf ("%s %s %s\n", *name, *comment, *unit);*/ } /* Similar to metaname.c :: ParseLevelName() */ static void GRIB1_Table3LookUp (pdsG1Type *pdsMeta, char **shortLevelName, char **longLevelName) { uChar type = pdsMeta->levelType; uChar level1, level2; free (*shortLevelName); *shortLevelName = NULL; free (*longLevelName); *longLevelName = NULL; /* Find out if val is a 2 part value or not */ if (GRIB1Surface[type].f_twoPart) { level1 = (pdsMeta->levelVal >> 8); level2 = (pdsMeta->levelVal & 0xff); reallocSprintf (shortLevelName, "%d-%d-%s", level1, level2, GRIB1Surface[type].name); reallocSprintf (longLevelName, "%d-%d[%s] %s (%s)", level1, level2, GRIB1Surface[type].unit, GRIB1Surface[type].name, GRIB1Surface[type].comment); } else { reallocSprintf (shortLevelName, "%d-%s", pdsMeta->levelVal, GRIB1Surface[type].name); reallocSprintf (longLevelName, "%d[%s] %s (%s)", pdsMeta->levelVal, GRIB1Surface[type].unit, GRIB1Surface[type].name, GRIB1Surface[type].comment); } } /***************************************************************************** * fval_360() -- * * Albion Taylor / ARL * * PURPOSE * Converts an IBM360 floating point number to an IEEE floating point * number. The IBM floating point spec represents the fraction as the last * 3 bytes of the number, with 0xffffff being just shy of 1.0. The first byte * leads with a sign bit, and the last seven bits represent the powers of 16 * (not 2), with a bias of 0x40 giving 16^0. * * ARGUMENTS * aval = A sInt4 containing the original IBM 360 number. (Input) * * FILES/DATABASES: None * * RETURNS: double = the value that aval represents. * * HISTORY * <unknown> Albion Taylor (ARL): Created * 4/2003 Arthur Taylor (MDL/RSIS): Cleaned up. * 5/2003 AAT: some kind of Bug due to optimizations... * -1055916032 => 0 instead of -1 * * NOTES ***************************************************************************** */ static double fval_360 (uInt4 aval) { short int ptr[4]; #ifdef LITTLE_ENDIAN ptr[3] = ((((aval >> 24) & 0x7f) << 2) + (0x3ff - 0x100)) << 4; ptr[2] = 0; ptr[1] = 0; ptr[0] = 0; #else ptr[0] = ((((aval >> 24) & 0x7f) << 2) + (0x3ff - 0x100)) << 4; ptr[1] = 0; ptr[2] = 0; ptr[3] = 0; #endif double pow16; memcpy(&pow16, ptr, 8); return ((aval & 0x80000000) ? -pow16 : pow16) * (aval & 0xffffff) / ((double) 0x1000000); } /***************************************************************************** * ReadGrib1Sect1() -- * * Arthur Taylor / MDL * * PURPOSE * Parses the GRIB1 "Product Definition Section" or section 1, filling out * the pdsMeta data structure. * * ARGUMENTS * pds = The compressed part of the message dealing with "PDS". (Input) * gribLen = The total length of the GRIB1 message. (Input) * curLoc = Current location in the GRIB1 message. (Output) * pdsMeta = The filled out pdsMeta data structure. (Output) * f_gds = boolean if there is a Grid Definition Section. (Output) * gridID = The Grid ID. (Output) * f_bms = boolean if there is a Bitmap Section. (Output) * DSF = Decimal Scale Factor for unpacking the data. (Output) * center = The Center that created the data (Output) * subcenter = The Sub Center that created the data (Output) * * FILES/DATABASES: None * * RETURNS: int (could use errSprintf()) * 0 = OK * -1 = gribLen is too small. * * HISTORY * 4/2003 Arthur Taylor (MDL/RSIS): Created. * 5/2004 AAT: Paid attention to table 5 (Time range indicator) and which of * P1 and P2 are the valid times. * 10/2005 AAT: Adjusted to take center, subcenter * * NOTES ***************************************************************************** */ static int ReadGrib1Sect1 (uChar *pds, uInt4 gribLen, uInt4 *curLoc, pdsG1Type *pdsMeta, char *f_gds, uChar *gridID, char *f_bms, short int *DSF, unsigned short int *center, unsigned short int *subcenter) { sInt4 sectLen; /* Length in bytes of the current section. */ int year; /* The year of the GRIB1 Message. */ double P1_DeltaTime; /* Used to parse the time for P1 */ double P2_DeltaTime; /* Used to parse the time for P2 */ uInt4 uli_temp; #ifdef DEBUG /* int i; */ #endif sectLen = GRIB_UNSIGN_INT3 (*pds, pds[1], pds[2]); #ifdef DEBUG /* printf ("Section 1 length = %ld\n", sectLen); for (i = 0; i < sectLen; i++) { printf ("Sect1: item %d = %d\n", i + 1, pds[i]); } printf ("Century is item 25\n"); */ #endif *curLoc += sectLen; if (*curLoc > gribLen) { errSprintf ("Ran out of data in PDS (GRIB 1 Section 1)\n"); return -1; } pds += 3; pdsMeta->mstrVersion = *(pds++); *center = *(pds++); pdsMeta->genProcess = *(pds++); *gridID = *(pds++); *f_gds = GRIB2BIT_1 & *pds; *f_bms = GRIB2BIT_2 & *pds; pds++; pdsMeta->cat = *(pds++); pdsMeta->levelType = *(pds++); pdsMeta->levelVal = GRIB_UNSIGN_INT2 (*pds, pds[1]); pds += 2; if (*pds == 0) { /* The 12 is because we have increased pds by 12. (but 25 is in * reference of 1..25, so we need another -1) */ year = (pds[25 - 13] * 100); } else { /* The 12 is because we have increased pds by 12. (but 25 is in * reference of 1..25, so we need another -1) */ year = *pds + ((pds[25 - 13] - 1) * 100); /* It seems like some old files (such as spring/I000176.grb) do not have a century byte, and assum 19xx. */ // if( (year < 1900 || year > 2100) && *pds >= 0 && *pds < 100 ) // year = *pds + 1900; } if (ParseTime (&(pdsMeta->refTime), year, pds[1], pds[2], pds[3], pds[4], 0) != 0) { preErrSprintf ("Error In call to ParseTime\n"); errSprintf ("(Probably a corrupt file)\n"); return -1; } pds += 5; pdsMeta->timeRange = pds[3]; if (ParseSect4Time2secV1 (pds[1], *pds, &P1_DeltaTime) == 0) { pdsMeta->P1 = pdsMeta->refTime + P1_DeltaTime; } else { pdsMeta->P1 = pdsMeta->refTime; printf ("Warning! : Can't figure out time unit of %d\n", *pds); } if (ParseSect4Time2secV1 (pds[2], *pds, &P2_DeltaTime) == 0) { pdsMeta->P2 = pdsMeta->refTime + P2_DeltaTime; } else { pdsMeta->P2 = pdsMeta->refTime; printf ("Warning! : Can't figure out time unit of %d\n", *pds); } /* The following is based on Table 5. */ /* Note: For ensemble forecasts, 119 has meaning. */ switch (pdsMeta->timeRange) { case 0: case 1: case 113: case 114: case 115: case 116: case 117: case 118: case 123: case 124: pdsMeta->validTime = pdsMeta->P1; break; case 2: /* Puzzling case. */ pdsMeta->validTime = pdsMeta->P2; break; case 3: case 4: case 5: case 51: pdsMeta->validTime = pdsMeta->P2; break; case 10: if (ParseSect4Time2secV1 (GRIB_UNSIGN_INT2 (pds[1], pds[2]), *pds, &P1_DeltaTime) == 0) { pdsMeta->P2 = pdsMeta->P1 = pdsMeta->refTime + P1_DeltaTime; } else { pdsMeta->P2 = pdsMeta->P1 = pdsMeta->refTime; printf ("Warning! : Can't figure out time unit of %d\n", *pds); } pdsMeta->validTime = pdsMeta->P1; break; default: pdsMeta->validTime = pdsMeta->P1; } pds += 4; pdsMeta->Average = GRIB_UNSIGN_INT2 (*pds, pds[1]); pds += 2; pdsMeta->numberMissing = *(pds++); /* Skip over centry of reference time. */ pds++; *subcenter = *(pds++); *DSF = GRIB_SIGN_INT2 (*pds, pds[1]); pds += 2; pdsMeta->f_hasEns = 0; pdsMeta->f_hasProb = 0; pdsMeta->f_hasCluster = 0; if (sectLen < 41) { return 0; } /* Following is based on: * http://www.emc.ncep.noaa.gov/gmb/ens/info/ens_grib.html */ if ((*center == NMC) && (*subcenter == 2)) { if (sectLen < 45) { printf ("Warning! Problems with Ensemble section\n"); return 0; } pdsMeta->f_hasEns = 1; pdsMeta->ens.BitFlag = *(pds++); /* octet21 = pdsMeta->timeRange; = 119 has meaning now */ pds += 11; pdsMeta->ens.Application = *(pds++); pdsMeta->ens.Type = *(pds++); pdsMeta->ens.Number = *(pds++); pdsMeta->ens.ProdID = *(pds++); pdsMeta->ens.Smooth = *(pds++); if ((pdsMeta->cat == 191) || (pdsMeta->cat == 192) || (pdsMeta->cat == 193)) { if (sectLen < 60) { printf ("Warning! Problems with Ensemble Probability section\n"); return 0; } pdsMeta->f_hasProb = 1; pdsMeta->prob.Cat = pdsMeta->cat; pdsMeta->cat = *(pds++); pdsMeta->prob.Type = *(pds++); MEMCPY_BIG (&uli_temp, pds, sizeof (sInt4)); pdsMeta->prob.lower = fval_360 (uli_temp); pds += 4; MEMCPY_BIG (&uli_temp, pds, sizeof (sInt4)); pdsMeta->prob.upper = fval_360 (uli_temp); pds += 4; pds += 4; } if ((pdsMeta->ens.Type == 4) || (pdsMeta->ens.Type == 5)) { /* 87 ... 100 was reserved, but may not be encoded */ if ((sectLen < 100) && (sectLen != 86)) { printf ("Warning! Problems with Ensemble Clustering section\n"); printf ("Section length == %d\n", sectLen); return 0; } if (pdsMeta->f_hasProb == 0) { pds += 14; } pdsMeta->f_hasCluster = 1; pdsMeta->cluster.ensSize = *(pds++); pdsMeta->cluster.clusterSize = *(pds++); pdsMeta->cluster.Num = *(pds++); pdsMeta->cluster.Method = *(pds++); pdsMeta->cluster.NorLat = GRIB_UNSIGN_INT3 (*pds, pds[1], pds[2]); pdsMeta->cluster.NorLat = pdsMeta->cluster.NorLat / 1000.; pds += 3; pdsMeta->cluster.SouLat = GRIB_UNSIGN_INT3 (*pds, pds[1], pds[2]); pdsMeta->cluster.SouLat = pdsMeta->cluster.SouLat / 1000.; pds += 3; pdsMeta->cluster.EasLon = GRIB_UNSIGN_INT3 (*pds, pds[1], pds[2]); pdsMeta->cluster.EasLon = pdsMeta->cluster.EasLon / 1000.; pds += 3; pdsMeta->cluster.WesLon = GRIB_UNSIGN_INT3 (*pds, pds[1], pds[2]); pdsMeta->cluster.WesLon = pdsMeta->cluster.WesLon / 1000.; pds += 3; memcpy (pdsMeta->cluster.Member, pds, 10); pdsMeta->cluster.Member[10] = '\0'; } /* Following based on: * http://www.ecmwf.int/publications/manuals/libraries/gribex/ * localGRIBUsage.html */ } else if (*center == ECMWF) { if (sectLen < 45) { printf ("Warning! Problems with ECMWF PDS extension\n"); return 0; } /* sInt4 i_temp; pds += 12; i_temp = GRIB_SIGN_INT2 (pds[3], pds[4]); printf ("ID %d Class %d Type %d Stream %d", pds[0], pds[1], pds[2], i_temp); pds += 5; printf (" Ver %c%c%c%c, ", pds[0], pds[1], pds[2], pds[3]); pds += 4; printf ("Octet-50 %d, Octet-51 %d SectLen %d\n", pds[0], pds[1], sectLen); */ } else { printf ("Un-handled possible ensemble section center %d " "subcenter %d\n", *center, *subcenter); } return 0; } /***************************************************************************** * Grib1_Inventory() -- * * Arthur Taylor / MDL * * PURPOSE * Parses the GRIB1 "Product Definition Section" for enough information to * fill out the inventory data structure so we can do a simple inventory on * the file in a similar way to how we did it for GRIB2. * * ARGUMENTS * fp = An opened GRIB2 file already at the correct message. (Input) * gribLen = The total length of the GRIB1 message. (Input) * inv = The inventory data structure that we need to fill. (Output) * * FILES/DATABASES: None * * RETURNS: int (could use errSprintf()) * 0 = OK * -1 = gribLen is too small. * * HISTORY * 4/2003 Arthur Taylor (MDL/RSIS): Created. * * NOTES ***************************************************************************** */ int GRIB1_Inventory (DataSource &fp, uInt4 gribLen, inventoryType *inv) { char temp[3]; /* Used to determine the section length. */ uInt4 sectLen; /* Length in bytes of the current section. */ uChar *pds; /* The part of the message dealing with the PDS. */ pdsG1Type pdsMeta; /* The pds parsed into a usable data structure. */ char f_gds; /* flag if there is a gds section. */ char f_bms; /* flag if there is a bms section. */ short int DSF; /* Decimal Scale Factor for unpacking the data. */ uChar gridID; /* Which GDS specs to use. */ const char *varName; /* The name of the data stored in the grid. */ const char *varComment; /* Extra comments about the data stored in grid. */ const char *varUnit; /* Holds the name of the unit [K] [%] .. etc */ int convert; /* Conversion method for this variable's unit. */ uInt4 curLoc; /* Where we are in the current GRIB message. */ unsigned short int center; /* The Center that created the data */ unsigned short int subcenter; /* The Sub Center that created the data */ curLoc = 8; if (fp.DataSourceFread(temp, sizeof (char), 3) != 3) { errSprintf ("Ran out of file.\n"); return -1; } sectLen = GRIB_UNSIGN_INT3 (*temp, temp[1], temp[2]); if (curLoc + sectLen > gribLen) { errSprintf ("Ran out of data in PDS (GRIB1_Inventory)\n"); return -1; } if( sectLen < 3 ) { errSprintf ("Invalid sectLen.\n"); return -1; } pds = (uChar *) malloc (sectLen * sizeof (uChar)); if( pds == NULL ) { errSprintf ("Ran out of memory.\n"); return -1; } *pds = *temp; pds[1] = temp[1]; pds[2] = temp[2]; if (fp.DataSourceFread(pds + 3, sizeof (char), sectLen - 3) + 3 != sectLen) { errSprintf ("Ran out of file.\n"); free (pds); return -1; } if (ReadGrib1Sect1 (pds, gribLen, &curLoc, &pdsMeta, &f_gds, &gridID, &f_bms, &DSF, &center, &subcenter) != 0) { preErrSprintf ("Inside GRIB1_Inventory\n"); free (pds); return -1; } free (pds); inv->refTime = pdsMeta.refTime; inv->validTime = pdsMeta.validTime; inv->foreSec = inv->validTime - inv->refTime; GRIB1_Table2LookUp (&(pdsMeta), &varName, &varComment, &varUnit, &convert, center, subcenter); inv->element = (char *) malloc ((1 + strlen (varName)) * sizeof (char)); strcpy (inv->element, varName); inv->unitName = (char *) malloc ((1 + 2 + strlen (varUnit)) * sizeof (char)); snprintf (inv->unitName, (1 + 2 + strlen (varUnit)) * sizeof (char), "[%s]", varUnit); inv->comment = (char *) malloc ((1 + strlen (varComment) + strlen (varUnit) + 2 + 1) * sizeof (char)); snprintf (inv->comment, (1 + strlen (varComment) + strlen (varUnit) + 2 + 1) * sizeof (char), "%s [%s]", varComment, varUnit); GRIB1_Table3LookUp (&(pdsMeta), &(inv->shortFstLevel), &(inv->longFstLevel)); /* Get to the end of the GRIB1 message. */ /* (inventory.c : GRIB2Inventory), is responsible for this. */ /* fseek (fp, gribLen - sectLen, SEEK_CUR); */ return 0; } int GRIB1_RefTime (DataSource &fp, uInt4 gribLen, double *refTime) { char temp[3]; /* Used to determine the section length. */ uInt4 sectLen; /* Length in bytes of the current section. */ uChar *pds; /* The part of the message dealing with the PDS. */ pdsG1Type pdsMeta; /* The pds parsed into a usable data structure. */ char f_gds; /* flag if there is a gds section. */ char f_bms; /* flag if there is a bms section. */ short int DSF; /* Decimal Scale Factor for unpacking the data. */ uChar gridID; /* Which GDS specs to use. */ uInt4 curLoc; /* Where we are in the current GRIB message. */ unsigned short int center; /* The Center that created the data */ unsigned short int subcenter; /* The Sub Center that created the data */ curLoc = 8; if (fp.DataSourceFread (temp, sizeof (char), 3) != 3) { errSprintf ("Ran out of file.\n"); return -1; } sectLen = GRIB_UNSIGN_INT3 (*temp, temp[1], temp[2]); if (curLoc + sectLen > gribLen) { errSprintf ("Ran out of data in PDS (GRIB1_Inventory)\n"); return -1; } pds = (uChar *) malloc (sectLen * sizeof (uChar)); *pds = *temp; pds[1] = temp[1]; pds[2] = temp[2]; if (fp.DataSourceFread (pds + 3, sizeof (char), sectLen - 3) + 3 != sectLen) { errSprintf ("Ran out of file.\n"); free (pds); return -1; } if (ReadGrib1Sect1 (pds, gribLen, &curLoc, &pdsMeta, &f_gds, &gridID, &f_bms, &DSF, &center, &subcenter) != 0) { preErrSprintf ("Inside GRIB1_Inventory\n"); free (pds); return -1; } free (pds); *refTime = pdsMeta.refTime; /* Get to the end of the GRIB1 message. */ /* (inventory.c : GRIB2Inventory), is responsible for this. */ /* fseek (fp, gribLen - sectLen, SEEK_CUR); */ return 0; } /***************************************************************************** * ReadGrib1Sect2() -- * * Arthur Taylor / MDL * * PURPOSE * Parses the GRIB1 "Grid Definition Section" or section 2, filling out * the gdsMeta data structure. * * ARGUMENTS * gds = The compressed part of the message dealing with "GDS". (Input) * gribLen = The total length of the GRIB1 message. (Input) * curLoc = Current location in the GRIB1 message. (Output) * gdsMeta = The filled out gdsMeta data structure. (Output) * * FILES/DATABASES: None * * RETURNS: int (could use errSprintf()) * 0 = OK * -1 = gribLen is too small. * -2 = unexpected values in gds. * * HISTORY * 4/2003 Arthur Taylor (MDL/RSIS): Created. * 12/2003 AAT: adas data encoder seems to have # of vertical data = 1, but * parameters of vertical data = 255, which doesn't make sense. * Changed the error from "fatal" to a warning in debug mode. * 6/2004 AAT: Modified to allow "extended" lat/lon grids (i.e. stretched or * stretched and rotated). * * NOTES ***************************************************************************** */ static int ReadGrib1Sect2 (uChar *gds, uInt4 gribLen, uInt4 *curLoc, gdsType *gdsMeta) { uInt4 sectLen; /* Length in bytes of the current section. */ int gridType; /* Which type of grid. (see enumerated types). */ double unit = 1e-3; /* Used for converting to the correct unit. */ uInt4 uli_temp; /* Used for reading a GRIB1 float. */ int i; int f_allZero; /* Used to find out if the "lat/lon" extension part * is all 0 hence missing. */ int f_allOne; /* Used to find out if the "lat/lon" extension part * is all 1 hence missing. */ sectLen = GRIB_UNSIGN_INT3 (*gds, gds[1], gds[2]); #ifdef DEBUG /* printf ("Section 2 length = %ld\n", sectLen); */ #endif *curLoc += sectLen; if (*curLoc > gribLen) { errSprintf ("Ran out of data in GDS (GRIB 1 Section 2)\n"); return -1; } gds += 3; /* #ifdef DEBUG if ((*gds != 0) || (gds[1] != 255)) { printf ("GRIB1 GDS: Expect (NV = 0) != %d, (PV = 255) != %d\n", *gds, gds[1]); errSprintf ("SectLen == %ld\n", sectLen); errSprintf ("GridType == %d\n", gds[2]); } #endif */ #ifdef DEBUG if (gds[1] != 255) { printf ("\n\tCaution: GRIB1 GDS: FOR ALL NWS products, PV should be " "255 rather than %d\n", gds[1]); } #endif if ((gds[1] != 255) && (gds[1] > 6)) { errSprintf ("GRIB1 GDS: Expect PV = 255 != %d\n", gds[1]); return -2; } gds += 2; gridType = *(gds++); switch (gridType) { case GB1S2_LATLON: // Latitude/Longitude Grid case GB1S2_GAUSSIAN_LATLON: // Gaussian Latitude/Longitude case GB1S2_ROTATED_LATLON: // Rotated Latitude/Longitude if ((sectLen != 32) && (sectLen != 42) && (sectLen != 52)) { errSprintf ("For LatLon GDS, should have 32 or 42 or 52 bytes " "of data\n"); return -1; } switch(gridType) { case GB1S2_GAUSSIAN_LATLON: gdsMeta->projType = GS3_GAUSSIAN_LATLON; break; case GB1S2_ROTATED_LATLON: gdsMeta->projType = GS3_ROTATED_LATLON; break; default: gdsMeta->projType = GS3_LATLON; break; } gdsMeta->orientLon = 0; gdsMeta->meshLat = 0; gdsMeta->scaleLat1 = 0; gdsMeta->scaleLat2 = 0; gdsMeta->southLat = 0; gdsMeta->southLon = 0; gdsMeta->center = 0; gdsMeta->Nx = GRIB_UNSIGN_INT2 (*gds, gds[1]); gds += 2; gdsMeta->Ny = GRIB_UNSIGN_INT2 (*gds, gds[1]); gds += 2; gdsMeta->lat1 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->lon1 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->resFlag = *(gds++); if (gdsMeta->resFlag & 0x40) { gdsMeta->f_sphere = 0; gdsMeta->majEarth = 6378.160; gdsMeta->minEarth = 6356.775; } else { gdsMeta->f_sphere = 1; gdsMeta->majEarth = 6367.47; gdsMeta->minEarth = 6367.47; } gdsMeta->lat2 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->lon2 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->Dx = GRIB_UNSIGN_INT2 (*gds, gds[1]) * unit; gds += 2; if (gridType == GB1S2_GAUSSIAN_LATLON) { int np = GRIB_UNSIGN_INT2 (*gds, gds[1]); /* parallels between a pole and the equator */ gdsMeta->Dy = 90.0 / np; } else gdsMeta->Dy = GRIB_UNSIGN_INT2 (*gds, gds[1]) * unit; gds += 2; gdsMeta->scan = *gds; gdsMeta->f_typeLatLon = 0; #ifdef DEBUG /* printf ("sectLen %ld\n", sectLen); */ #endif if (sectLen == 42) { /* Check if all 0's or all 1's, which means f_typeLatLon == 0 */ f_allZero = 1; f_allOne = 1; for (i = 0; i < 10; i++) { if (gds[i] != 0) f_allZero = 0; if (gds[i] != 255) f_allOne = 0; } if (!f_allZero && !f_allOne) { gdsMeta->f_typeLatLon = 1; gds += 5; gdsMeta->poleLat = (GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit); gds += 3; gdsMeta->poleLon = (GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit); gds += 3; MEMCPY_BIG (&uli_temp, gds, sizeof (sInt4)); gdsMeta->stretchFactor = fval_360 (uli_temp); } } else if (sectLen == 52) { gds += 5; /* Check if all 0's or all 1's, which means f_typeLatLon == 0 */ f_allZero = 1; f_allOne = 1; for (i = 0; i < 20; i++) { if (gds[i] != 0) f_allZero = 0; if (gds[i] != 255) f_allOne = 0; } if (!f_allZero && !f_allOne) { gdsMeta->f_typeLatLon = 2; gdsMeta->southLat = (GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit); gds += 3; gdsMeta->southLon = (GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit); gds += 3; MEMCPY_BIG (&uli_temp, gds, sizeof (sInt4)); gdsMeta->angleRotate = fval_360 (uli_temp); gds += 4; gdsMeta->poleLat = (GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit); gds += 3; gdsMeta->poleLon = (GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit); gds += 3; MEMCPY_BIG (&uli_temp, gds, sizeof (sInt4)); gdsMeta->stretchFactor = fval_360 (uli_temp); } #ifdef DEBUG /* if (gdsMeta->lon2 == 360.25) gdsMeta->lon2 = 359.75; */ /* printf ("south %f %f rotate %f pole %f %f stretch %f\n", gdsMeta->southLat, gdsMeta->southLon, gdsMeta->angleRotate, gdsMeta->poleLat, gdsMeta->poleLon, gdsMeta->stretchFactor); printf ("lat/lon type %d \n", gdsMeta->f_typeLatLon); */ #endif } break; case GB1S2_POLAR: if (sectLen != 32) { errSprintf ("For Polar GDS, should have 32 bytes of data\n"); return -1; } gdsMeta->projType = GS3_POLAR; gdsMeta->lat2 = 0; gdsMeta->lon2 = 0; gdsMeta->southLat = 0; gdsMeta->southLon = 0; gdsMeta->Nx = GRIB_UNSIGN_INT2 (*gds, gds[1]); gds += 2; gdsMeta->Ny = GRIB_UNSIGN_INT2 (*gds, gds[1]); gds += 2; gdsMeta->lat1 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->lon1 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->resFlag = *(gds++); if (gdsMeta->resFlag & 0x40) { gdsMeta->f_sphere = 0; gdsMeta->majEarth = 6378.160; gdsMeta->minEarth = 6356.775; } else { gdsMeta->f_sphere = 1; gdsMeta->majEarth = 6367.47; gdsMeta->minEarth = 6367.47; } gdsMeta->orientLon = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->Dx = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]); gds += 3; gdsMeta->Dy = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]); gds += 3; gdsMeta->meshLat = 60; /* Depends on hemisphere. */ gdsMeta->center = *(gds++); if (gdsMeta->center & GRIB2BIT_1) { /* South polar stereographic. */ gdsMeta->scaleLat1 = gdsMeta->scaleLat2 = -90; } else { /* North polar stereographic. */ gdsMeta->scaleLat1 = gdsMeta->scaleLat2 = 90; } gdsMeta->scan = *gds; break; case GB1S2_LAMBERT: if (sectLen != 42) { errSprintf ("For Lambert GDS, should have 42 bytes of data\n"); return -1; } gdsMeta->projType = GS3_LAMBERT; gdsMeta->lat2 = 0; gdsMeta->lon2 = 0; gdsMeta->Nx = GRIB_UNSIGN_INT2 (*gds, gds[1]); gds += 2; gdsMeta->Ny = GRIB_UNSIGN_INT2 (*gds, gds[1]); gds += 2; gdsMeta->lat1 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->lon1 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->resFlag = *(gds++); if (gdsMeta->resFlag & 0x40) { gdsMeta->f_sphere = 0; gdsMeta->majEarth = 6378.160; gdsMeta->minEarth = 6356.775; } else { gdsMeta->f_sphere = 1; gdsMeta->majEarth = 6367.47; gdsMeta->minEarth = 6367.47; } gdsMeta->orientLon = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->Dx = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]); gds += 3; gdsMeta->Dy = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]); gds += 3; gdsMeta->center = *(gds++); gdsMeta->scan = *(gds++); gdsMeta->scaleLat1 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->scaleLat2 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->meshLat = gdsMeta->scaleLat1; gdsMeta->southLat = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->southLon = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; break; case GB1S2_MERCATOR: if (sectLen != 42) { errSprintf ("For Mercator GDS, should have 42 bytes of data\n"); return -1; } gdsMeta->projType = GS3_MERCATOR; gdsMeta->southLat = 0; gdsMeta->southLon = 0; gdsMeta->orientLon = 0; gdsMeta->center = 0; gdsMeta->Nx = GRIB_UNSIGN_INT2 (*gds, gds[1]); gds += 2; gdsMeta->Ny = GRIB_UNSIGN_INT2 (*gds, gds[1]); gds += 2; gdsMeta->lat1 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->lon1 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->resFlag = *(gds++); if (gdsMeta->resFlag & 0x40) { gdsMeta->f_sphere = 0; gdsMeta->majEarth = 6378.160; gdsMeta->minEarth = 6356.775; } else { gdsMeta->f_sphere = 1; gdsMeta->majEarth = 6367.47; gdsMeta->minEarth = 6367.47; } gdsMeta->lat2 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->lon2 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->scaleLat1 = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]) * unit; gds += 3; gdsMeta->scaleLat2 = gdsMeta->scaleLat1; gdsMeta->meshLat = gdsMeta->scaleLat1; /* Reserved set to 0. */ gds++; gdsMeta->scan = *(gds++); gdsMeta->Dx = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]); gds += 3; gdsMeta->Dy = GRIB_SIGN_INT3 (*gds, gds[1], gds[2]); break; default: errSprintf ("Grid projection number is %d\n", gridType); errSprintf ("Don't know how to handle this grid projection.\n"); return -2; } gdsMeta->numPts = gdsMeta->Nx * gdsMeta->Ny; #ifdef DEBUG /* printf ("NumPts = %ld\n", gdsMeta->numPts); printf ("Nx = %ld, Ny = %ld\n", gdsMeta->Nx, gdsMeta->Ny); */ #endif return 0; } /***************************************************************************** * ReadGrib1Sect3() -- * * Arthur Taylor / MDL * * PURPOSE * Parses the GRIB1 "Bit Map Section" or section 3, filling out the bitmap * as needed. * * ARGUMENTS * bms = The compressed part of the message dealing with "BMS". (Input) * gribLen = The total length of the GRIB1 message. (Input) * curLoc = Current location in the GRIB1 message. (Output) * bitmap = The extracted bitmap. (Output) * NxNy = The total size of the grid. (Input) * * FILES/DATABASES: None * * RETURNS: int (could use errSprintf()) * 0 = OK * -1 = gribLen is too small. * -2 = unexpected values in bms. * * HISTORY * 4/2003 Arthur Taylor (MDL/RSIS): Created. * * NOTES ***************************************************************************** */ static int ReadGrib1Sect3 (uChar *bms, uInt4 gribLen, uInt4 *curLoc, uChar *bitmap, uInt4 NxNy) { uInt4 sectLen; /* Length in bytes of the current section. */ short int numeric; /* Determine if this is a predefined bitmap */ uChar bits; /* Used to locate which bit we are currently using. */ uInt4 i; /* Helps traverse the bitmap. */ sectLen = GRIB_UNSIGN_INT3 (*bms, bms[1], bms[2]); #ifdef DEBUG /* printf ("Section 3 length = %ld\n", sectLen); */ #endif *curLoc += sectLen; if (*curLoc > gribLen) { errSprintf ("Ran out of data in BMS (GRIB 1 Section 3)\n"); return -1; } bms += 3; /* Assert: *bms currently points to number of unused bits at end of BMS. */ if (NxNy + *bms + 6 * 8 != sectLen * 8) { errSprintf ("NxNy + # of unused bits %ld != # of available bits %ld\n", (sInt4) (NxNy + *bms), (sInt4) ((sectLen - 6) * 8)); return -2; } bms++; /* Assert: Non-zero "numeric" means predefined bitmap. */ numeric = GRIB_UNSIGN_INT2 (*bms, bms[1]); bms += 2; if (numeric != 0) { errSprintf ("Don't handle predefined bitmaps yet.\n"); return -2; } bits = 0x80; for (i = 0; i < NxNy; i++) { *(bitmap++) = (*bms) & bits; bits = bits >> 1; if (bits == 0) { bms++; bits = 0x80; } } return 0; } #ifdef DEBUG static int UnpackCmplx (uChar *bds, CPL_UNUSED uInt4 gribLen, CPL_UNUSED uInt4 *curLoc, CPL_UNUSED short int DSF, CPL_UNUSED double *data, CPL_UNUSED grib_MetaData *meta, CPL_UNUSED char f_bms, CPL_UNUSED uChar *bitmap, CPL_UNUSED double unitM, CPL_UNUSED double unitB, CPL_UNUSED short int ESF, CPL_UNUSED double refVal, uChar numBits, uChar f_octet14) { uInt4 secLen; int N1; int N2; int P1; int P2; uChar octet14; uChar f_maxtrixValues; uChar f_secBitmap = 0; uChar f_secValDiffWid; int i; uInt4 uli_temp; /* Used to store sInt4s (temporarily) */ uChar bufLoc; /* Keeps track of where to start getting more data * out of the packed data stream. */ size_t numUsed; /* How many bytes were used in a given call to * memBitRead. */ uChar *width; secLen = 11; N1 = GRIB_UNSIGN_INT2 (bds[0], bds[1]); octet14 = bds[2]; printf ("octet14, %d\n", octet14); if (f_octet14) { f_maxtrixValues = octet14 & GRIB2BIT_2; f_secBitmap = octet14 & GRIB2BIT_3; f_secValDiffWid = octet14 & GRIB2BIT_4; printf ("f_matrixValues, f_secBitmap, f_secValeDiffWid %d %d %d\n", f_maxtrixValues, f_secBitmap, f_secValDiffWid); } N2 = GRIB_UNSIGN_INT2 (bds[3], bds[4]); P1 = GRIB_UNSIGN_INT2 (bds[5], bds[6]); P2 = GRIB_UNSIGN_INT2 (bds[7], bds[8]); printf ("N1 N2 P1 P2 : %d %d %d %d\n", N1, N2, P1, P2); printf ("Reserved %d\n", bds[9]); bds += 10; secLen += 10; width = (uChar *) malloc (P1 * sizeof (uChar)); for (i = 0; i < P1; i++) { width[i] = *bds; printf ("(Width %d %d)\n", i, width[i]); bds++; secLen++; } if (f_secBitmap) { bufLoc = 8; for (i = 0; i < P2; i++) { memBitRead (&uli_temp, sizeof (sInt4), bds, 1, &bufLoc, &numUsed); printf ("(%d %d) ", i, uli_temp); if (numUsed != 0) { printf ("\n"); bds += numUsed; secLen++; } } if (bufLoc != 8) { bds++; secLen++; } printf ("Observed Sec Len %d\n", secLen); } else { /* Jump over widths and secondary bitmap */ bds += (N1 - 21); secLen += (N1 - 21); } bufLoc = 8; for (i = 0; i < P1; i++) { memBitRead (&uli_temp, sizeof (sInt4), bds, numBits, &bufLoc, &numUsed); printf ("(%d %d) (numUsed %ld numBits %d)", i, uli_temp, (long) numUsed, numBits); if (numUsed != 0) { printf ("\n"); bds += numUsed; secLen++; } } if (bufLoc != 8) { bds++; secLen++; } printf ("Observed Sec Len %d\n", secLen); printf ("N2 = %d\n", N2); errSprintf ("Don't know how to handle Complex GRIB1 packing yet.\n"); free (width); return -2; } #endif /* DEBUG */ /***************************************************************************** * ReadGrib1Sect4() -- * * Arthur Taylor / MDL * * PURPOSE * Unpacks the "Binary Data Section" or section 4. * * ARGUMENTS * bds = The compressed part of the message dealing with "BDS". (Input) * gribLen = The total length of the GRIB1 message. (Input) * curLoc = Current location in the GRIB1 message. (Output) * DSF = Decimal Scale Factor for unpacking the data. (Input) * data = The extracted grid. (Output) * meta = The meta data associated with the grid (Input/Output) * f_bms = True if bitmap is to be used. (Input) * bitmap = 0 if missing data, 1 if valid data. (Input) * unitM = The M unit conversion value in equation y = Mx + B. (Input) * unitB = The B unit conversion value in equation y = Mx + B. (Input) * * FILES/DATABASES: None * * RETURNS: int (could use errSprintf()) * 0 = OK * -1 = gribLen is too small. * -2 = unexpected values in bds. * * HISTORY * 4/2003 Arthur Taylor (MDL/RSIS): Created * 3/2004 AAT: Switched {# Pts * (# Bits in a Group) + * # of unused bits != # of available bits} to a warning from an * error. * * NOTES * 1) See metaparse.c : ParseGrid() * 2) Currently, only handles "Simple pack". ***************************************************************************** */ static int ReadGrib1Sect4 (uChar *bds, uInt4 gribLen, uInt4 *curLoc, short int DSF, double *data, grib_MetaData *meta, char f_bms, uChar *bitmap, double unitM, double unitB) { uInt4 sectLen; /* Length in bytes of the current section. */ short int ESF; /* Power of 2 scaling factor. */ uInt4 uli_temp; /* Used to store sInt4s (temporarily) */ double refVal; /* The reference value for the grid, also the minimum * value. */ uChar numBits; /* # of bits for a single element of data. */ uChar numUnusedBit; /* # of extra bits at end of record. */ uChar f_spherHarm; /* Flag if data contains Spherical Harmonics. */ uChar f_cmplxPack; /* Flag if complex packing was used. */ #ifdef DEBUG uChar f_octet14; /* Flag if octet 14 was used. */ #endif uChar bufLoc; /* Keeps track of where to start getting more data * out of the packed data stream. */ uChar f_convert; /* Determine if scan mode implies that we have to do * manipulation as we read the grid to get desired * internal scan mode. */ uInt4 i; /* Used to traverse the grid. */ size_t numUsed; /* How many bytes were used in a given call to * memBitRead. */ double d_temp; /* Holds the extracted data until we put it in data */ sInt4 newIndex; /* Where to put the answer (primarily if f_convert) */ sInt4 x; /* Used to help compute newIndex , if f_convert. */ sInt4 y; /* Used to help compute newIndex , if f_convert. */ double resetPrim; /* If possible, used to reset the primary missing * value from 9.999e20 to a reasonable # (9999) */ if (meta->gds.Nx * meta->gds.Ny != meta->gds.numPts) { errSprintf ("(Nx * Ny != numPts) ?? in BDS (GRIB 1 Section 4)\n"); return -2; } sectLen = GRIB_UNSIGN_INT3 (*bds, bds[1], bds[2]); #ifdef DEBUG /* printf ("Section 4 length = %ld\n", sectLen); */ #endif *curLoc += sectLen; if (*curLoc > gribLen) { errSprintf ("Ran out of data in BDS (GRIB 1 Section 4)\n"); return -1; } bds += 3; /* Assert: bds now points to the main pack flag. */ f_spherHarm = (*bds) & GRIB2BIT_1; f_cmplxPack = (*bds) & GRIB2BIT_2; meta->gridAttrib.fieldType = (*bds) & GRIB2BIT_3; #ifdef DEBUG f_octet14 = (*bds) & GRIB2BIT_4; #endif numUnusedBit = (*bds) & 0x0f; #ifdef DEBUG /* printf ("bds byte flag = %d\n", *bds); printf ("Number of unused bits = %d\n", numUnusedBit); */ #endif if (f_spherHarm) { errSprintf ("Don't know how to handle Spherical Harmonics yet.\n"); return -2; } /* if (f_octet14) { errSprintf ("Don't know how to handle Octet 14 data yet.\n"); errSprintf ("bds byte flag = %d\n", *bds); errSprintf ("bds byte: %d %d %d %d\n", f_spherHarm, f_cmplxPack, meta->gridAttrib.fieldType, f_octet14); return -2; } */ if (f_cmplxPack) { meta->gridAttrib.packType = 2; } else { meta->gridAttrib.packType = 0; } bds++; /* Assert: bds now points to E (power of 2 scaling factor). */ ESF = GRIB_SIGN_INT2 (*bds, bds[1]); bds += 2; MEMCPY_BIG (&uli_temp, bds, sizeof (sInt4)); refVal = fval_360 (uli_temp); bds += 4; /* Assert: bds is now the number of bits in a group. */ numBits = *bds; /* #ifdef DEBUG printf ("refValue %f numBits %d\n", refVal, numBits); printf ("ESF %d DSF %d\n", ESF, DSF); #endif */ if (f_cmplxPack) { bds++; #ifdef DEBUG return UnpackCmplx (bds, gribLen, curLoc, DSF, data, meta, f_bms, bitmap, unitM, unitB, ESF, refVal, numBits, f_octet14); #else errSprintf ("Don't know how to handle Complex GRIB1 packing yet.\n"); return -2; #endif } if (!f_bms && (meta->gds.numPts * numBits + numUnusedBit) != (sectLen - 11) * 8) { printf ("numPts * (numBits in a Group) + # of unused bits %d != " "# of available bits %d\n", (sInt4) (meta->gds.numPts * numBits + numUnusedBit), (sInt4) ((sectLen - 11) * 8)); /* errSprintf ("numPts * (numBits in a Group) + # of unused bits %ld != " "# of available bits %ld\n", (sInt4) (meta->gds.numPts * numBits + numUnusedBit), (sInt4) ((sectLen - 11) * 8)); return -2; */ } if (numBits > 32) { errSprintf ("The number of bits per number is larger than 32?\n"); return -2; } bds++; /* Convert Units. */ if (unitM == -10) { meta->gridAttrib.min = pow (10.0, (refVal * pow (2.0, ESF) / pow (10.0, DSF))); } else { /* meta->gridAttrib.min = unitM * (refVal / pow (10.0, DSF)) + unitB; */ meta->gridAttrib.min = unitM * (refVal * pow (2.0, ESF) / pow (10.0, DSF)) + unitB; } meta->gridAttrib.max = meta->gridAttrib.min; meta->gridAttrib.f_maxmin = 1; meta->gridAttrib.numMiss = 0; meta->gridAttrib.refVal = (float)refVal; meta->gridAttrib.ESF = ESF; meta->gridAttrib.DSF = DSF; bufLoc = 8; /* Internally we use scan = 0100. Scan is usually 0100 but if need be, we * can convert it. */ f_convert = ((meta->gds.scan & 0xe0) != 0x40); if (f_bms) { /* #ifdef DEBUG printf ("There is a bitmap?\n"); #endif */ /* Start unpacking the data, assuming there is a bitmap. */ meta->gridAttrib.f_miss = 1; meta->gridAttrib.missPri = UNDEFINED; for (i = 0; i < meta->gds.numPts; i++) { /* Find the destination index. */ if (f_convert) { /* ScanIndex2XY returns value as if scan was 0100 */ ScanIndex2XY (i, &x, &y, meta->gds.scan, meta->gds.Nx, meta->gds.Ny); newIndex = (x - 1) + (y - 1) * meta->gds.Nx; } else { newIndex = i; } /* A 0 in bitmap means no data. A 1 in bitmap means data. */ if (!bitmap[i]) { meta->gridAttrib.numMiss++; data[newIndex] = UNDEFINED; } else { if (numBits != 0) { memBitRead (&uli_temp, sizeof (sInt4), bds, numBits, &bufLoc, &numUsed); bds += numUsed; d_temp = (refVal + (uli_temp * pow (2.0, ESF))) / pow (10.0, DSF); /* Convert Units. */ if (unitM == -10) { d_temp = pow (10.0, d_temp); } else { d_temp = unitM * d_temp + unitB; } if (meta->gridAttrib.max < d_temp) { meta->gridAttrib.max = d_temp; } data[newIndex] = d_temp; } else { /* Assert: d_temp = unitM * refVal / pow (10.0,DSF) + unitB. */ /* Assert: min = unitM * refVal / pow (10.0, DSF) + unitB. */ data[newIndex] = meta->gridAttrib.min; } } } /* Reset the missing value to UNDEFINED_PRIM if possible. If not * possible, make sure UNDEFINED is outside the range. If UNDEFINED * is_ in the range, choose max + 1 for missing. */ resetPrim = 0; if ((meta->gridAttrib.max < UNDEFINED_PRIM) || (meta->gridAttrib.min > UNDEFINED_PRIM)) { resetPrim = UNDEFINED_PRIM; } else if ((meta->gridAttrib.max >= UNDEFINED) && (meta->gridAttrib.min <= UNDEFINED)) { resetPrim = meta->gridAttrib.max + 1; } if (resetPrim != 0) { meta->gridAttrib.missPri = resetPrim; for (i = 0; i < meta->gds.numPts; i++) { /* Find the destination index. */ if (f_convert) { /* ScanIndex2XY returns value as if scan was 0100 */ ScanIndex2XY (i, &x, &y, meta->gds.scan, meta->gds.Nx, meta->gds.Ny); newIndex = (x - 1) + (y - 1) * meta->gds.Nx; } else { newIndex = i; } if (!bitmap[i]) { data[newIndex] = resetPrim; } } } } else { #ifdef DEBUG /* printf ("There is no bitmap?\n"); */ #endif /* Start unpacking the data, assuming there is NO bitmap. */ meta->gridAttrib.f_miss = 0; for (i = 0; i < meta->gds.numPts; i++) { if (numBits != 0) { /* Find the destination index. */ if (f_convert) { /* ScanIndex2XY returns value as if scan was 0100 */ ScanIndex2XY (i, &x, &y, meta->gds.scan, meta->gds.Nx, meta->gds.Ny); newIndex = (x - 1) + (y - 1) * meta->gds.Nx; } else { newIndex = i; } memBitRead (&uli_temp, sizeof (sInt4), bds, numBits, &bufLoc, &numUsed); bds += numUsed; d_temp = (refVal + (uli_temp * pow (2.0, ESF))) / pow (10.0, DSF); #ifdef DEBUG /* if (i == 1) { printf ("refVal %f, uli_temp %ld, ans %f\n", refVal, uli_temp, d_temp); printf ("numBits %d, bufLoc %d, numUsed %d\n", numBits, bufLoc, numUsed); } */ #endif /* Convert Units. */ if (unitM == -10) { d_temp = pow (10.0, d_temp); } else { d_temp = unitM * d_temp + unitB; } if (meta->gridAttrib.max < d_temp) { meta->gridAttrib.max = d_temp; } data[newIndex] = d_temp; } else { /* Assert: whole array = unitM * refVal + unitB. */ /* Assert: *min = unitM * refVal + unitB. */ data[i] = meta->gridAttrib.min; } } } return 0; } /***************************************************************************** * ReadGrib1Record() -- * * Arthur Taylor / MDL * * PURPOSE * Reads in a GRIB1 message, and parses the data into various data * structures, for use with other code. * * ARGUMENTS * fp = An opened GRIB2 file already at the correct message. (Input) * f_unit = 0 use GRIB2 units, 1 use English, 2 use metric. (Input) * Grib_Data = The read in GRIB2 grid. (Output) * grib_DataLen = Size of Grib_Data. (Output) * meta = A filled in meta structure (Output) * IS = The structure containing all the arrays that the * unpacker uses (Output) * sect0 = Already read in section 0 data. (Input) * gribLen = Length of the GRIB1 message. (Input) * majEarth = Used to override the GRIB major axis of earth. (Input) * minEarth = Used to override the GRIB minor axis of earth. (Input) * * FILES/DATABASES: * An already opened file pointing to the desired GRIB1 message. * * RETURNS: int (could use errSprintf()) * 0 = OK * -1 = Problems reading in the PDS. * -2 = Problems reading in the GDS. * -3 = Problems reading in the BMS. * -4 = Problems reading in the BDS. * -5 = Problems reading the closing section. * * HISTORY * 4/2003 Arthur Taylor (MDL/RSIS): Created * 5/2003 AAT: Was not updating offset. It should be updated by * calling routine anyways, so I got rid of the parameter. * 7/2003 AAT: Allowed user to override the radius of earth. * 8/2003 AAT: Found a memory Leak (Had been setting unitName to NULL). * 2/2004 AAT: Added maj/min earth override. * 3/2004 AAT: Added ability to change units. * * NOTES * 1) Could also compare GDS with the one specified by gridID * 2) Could add gridID support. * 3) Should add unitM / unitB support. ***************************************************************************** */ int ReadGrib1Record (DataSource &fp, sChar f_unit, double **Grib_Data, uInt4 *grib_DataLen, grib_MetaData *meta, IS_dataType *IS, sInt4 sect0[SECT0LEN_WORD], uInt4 gribLen, double majEarth, double minEarth) { sInt4 nd5; /* Size of grib message rounded up to the nearest * * sInt4. */ uChar *c_ipack; /* A char ptr to the message stored in IS->ipack */ uInt4 curLoc; /* Current location in the GRIB message. */ char f_gds; /* flag if there is a gds section. */ char f_bms; /* flag if there is a bms section. */ double *grib_Data; /* A pointer to Grib_Data for ease of manipulation. */ uChar *bitmap = NULL; /* A char field (0=noData, 1=data) set up in BMS. */ short int DSF; /* Decimal Scale Factor for unpacking the data. */ double unitM = 1; /* M in y = Mx + B, for unit conversion. */ double unitB = 0; /* B in y = Mx + B, for unit conversion. */ uChar gridID; /* Which GDS specs to use. */ const char *varName; /* The name of the data stored in the grid. */ const char *varComment; /* Extra comments about the data stored in grid. */ const char *varUnit; /* Holds the name of the unit [K] [%] .. etc */ sInt4 li_temp; /* Used to make sure section 5 is 7777. */ char unitName[15]; /* Holds the string name of the current unit. */ int unitLen; /* String length of string name of current unit. */ /* Make room for entire message, and read it in. */ /* nd5 needs to be gribLen in (sInt4) units rounded up. */ nd5 = (gribLen + 3) / 4; if (nd5 > IS->ipackLen) { IS->ipackLen = nd5; IS->ipack = (sInt4 *) realloc ((void *) (IS->ipack), (IS->ipackLen) * sizeof (sInt4)); } c_ipack = (uChar *) IS->ipack; /* Init last sInt4 to 0, to make sure that the padded bytes are 0. */ IS->ipack[nd5 - 1] = 0; /* Init first 2 sInt4 to sect0. */ memcpy (c_ipack, sect0, SECT0LEN_WORD * 2); /* Read in the rest of the message. */ if (fp.DataSourceFread (c_ipack + SECT0LEN_WORD * 2, sizeof (char), (gribLen - SECT0LEN_WORD * 2)) + SECT0LEN_WORD * 2 != gribLen) { errSprintf ("Ran out of file\n"); return -1; } /* Preceding was in degrib2, next part is specific to GRIB1. */ curLoc = 8; if (ReadGrib1Sect1 (c_ipack + curLoc, gribLen, &curLoc, &(meta->pds1), &f_gds, &gridID, &f_bms, &DSF, &(meta->center), &(meta->subcenter)) != 0) { preErrSprintf ("Inside ReadGrib1Record\n"); return -1; } /* Get the Grid Definition Section. */ if (f_gds) { if (ReadGrib1Sect2 (c_ipack + curLoc, gribLen, &curLoc, &(meta->gds)) != 0) { preErrSprintf ("Inside ReadGrib1Record\n"); return -2; } /* Could also compare GDS with the one specified by gridID? */ } else { errSprintf ("Don't know how to handle a gridID lookup yet.\n"); return -2; } meta->pds1.gridID = gridID; /* Allow data originating from NCEP to be 6371.2 by default. */ if (meta->center == NMC) { if (meta->gds.majEarth == 6367.47) { meta->gds.f_sphere = 1; meta->gds.majEarth = 6371.2; meta->gds.minEarth = 6371.2; } } if ((majEarth > 6300) && (majEarth < 6400)) { if ((minEarth > 6300) && (minEarth < 6400)) { meta->gds.f_sphere = 0; meta->gds.majEarth = majEarth; meta->gds.minEarth = minEarth; if (majEarth == minEarth) { meta->gds.f_sphere = 1; } } else { meta->gds.f_sphere = 1; meta->gds.majEarth = majEarth; meta->gds.minEarth = majEarth; } } /* Allocate memory for the grid. */ if (meta->gds.numPts > *grib_DataLen) { *grib_DataLen = meta->gds.numPts; *Grib_Data = (double *) realloc ((void *) (*Grib_Data), (*grib_DataLen) * sizeof (double)); if (!(*Grib_Data)) { *grib_DataLen = 0; return -1; } } grib_Data = *Grib_Data; /* Get the Bit Map Section. */ if (f_bms) { bitmap = (uChar *) malloc (meta->gds.numPts * sizeof (char)); if (ReadGrib1Sect3 (c_ipack + curLoc, gribLen, &curLoc, bitmap, meta->gds.numPts) != 0) { free (bitmap); preErrSprintf ("Inside ReadGrib1Record\n"); return -3; } } /* Figure out some basic stuff about the grid. */ /* Following is similar to metaparse.c : ParseElemName */ GRIB1_Table2LookUp (&(meta->pds1), &varName, &varComment, &varUnit, &(meta->convert), meta->center, meta->subcenter); meta->element = (char *) realloc ((void *) (meta->element), (1 + strlen (varName)) * sizeof (char)); strcpy (meta->element, varName); meta->unitName = (char *) realloc ((void *) (meta->unitName), (1 + 2 + strlen (varUnit)) * sizeof (char)); snprintf (meta->unitName, (1 + 2 + strlen (varUnit)) * sizeof (char), "[%s]", varUnit); meta->comment = (char *) realloc ((void *) (meta->comment), (1 + strlen (varComment) + strlen (varUnit) + 2 + 1) * sizeof (char)); snprintf (meta->comment, (1 + strlen (varComment) + strlen (varUnit) + 2 + 1) * sizeof (char), "%s [%s]", varComment, varUnit); if (ComputeUnit (meta->convert, meta->unitName, f_unit, &unitM, &unitB, unitName) == 0) { unitLen = static_cast<int>(strlen (unitName)); meta->unitName = (char *) realloc ((void *) (meta->unitName), 1 + unitLen * sizeof (char)); strncpy (meta->unitName, unitName, unitLen); meta->unitName[unitLen] = '\0'; } /* Read the GRID. */ if (ReadGrib1Sect4 (c_ipack + curLoc, gribLen, &curLoc, DSF, grib_Data, meta, f_bms, bitmap, unitM, unitB) != 0) { free (bitmap); preErrSprintf ("Inside ReadGrib1Record\n"); return -4; } if (f_bms) { free (bitmap); } GRIB1_Table3LookUp (&(meta->pds1), &(meta->shortFstLevel), &(meta->longFstLevel)); /* printf ("%s .. %s\n", meta->shortFstLevel, meta->longFstLevel);*/ /* strftime (meta->refTime, 20, "%Y%m%d%H%M", gmtime (&(meta->pds1.refTime))); */ Clock_Print (meta->refTime, 20, meta->pds1.refTime, "%Y%m%d%H%M", 0); /* strftime (meta->validTime, 20, "%Y%m%d%H%M", gmtime (&(meta->pds1.validTime))); */ Clock_Print (meta->validTime, 20, meta->pds1.validTime, "%Y%m%d%H%M", 0); meta->deltTime = (sInt4) (meta->pds1.validTime - meta->pds1.refTime); /* Read section 5. If it is "7777" == 926365495 we are done. */ if (curLoc == gribLen) { printf ("Warning: either gribLen did not account for section 5, or " "section 5 is missing\n"); return 0; } if (curLoc + 4 != gribLen) { errSprintf ("Invalid number of bytes for the end of the message.\n"); return -5; } memcpy (&li_temp, c_ipack + curLoc, 4); if (li_temp != 926365495L) { errSprintf ("Did not find the end of the message.\n"); return -5; } return 0; } /***************************************************************************** * main() -- * * Arthur Taylor / MDL * * PURPOSE * To test the capabilities of this module, and give an example as to how * ReadGrib1Record expects to be called. * * ARGUMENTS * argc = The number of arguments on the command line. (Input) * argv = The arguments on the command line. (Input) * * FILES/DATABASES: * A GRIB1 file. * * RETURNS: int * 0 = OK * * HISTORY * 4/2003 Arthur Taylor (MDL/RSIS): Created * 6/2003 Matthew T. Kallio (matt@wunderground.com): * "wmo" dimension increased to WMO_HEADER_LEN + 1 (for '\0' char) * * NOTES ***************************************************************************** */ #ifdef DEBUG_DEGRIB1 int main (int argc, char **argv) { DataSource grib_fp; /* The opened grib2 file for input. */ sInt4 offset; /* Where we currently are in grib_fp. */ sInt4 sect0[SECT0LEN_WORD]; /* Holds the current Section 0. */ char wmo[WMO_HEADER_LEN + 1]; /* Holds the current wmo message. */ sInt4 gribLen; /* Length of the current GRIB message. */ sInt4 wmoLen; /* Length of current wmo Message. */ char *msg; int version; sChar f_unit = 0; double *grib_Data; sInt4 grib_DataLen; grib_MetaData meta; IS_dataType is; /* Un-parsed meta data for this GRIB2 message. As * well as some memory used by the unpacker. */ //if ((grib_fp = fopen (argv[1], "rb")) == NULL) { // printf ("Problems opening %s for read\n", argv[1]); // return 1; //} grib_fp = FileDataSource(argv[1]); IS_Init (&is); MetaInit (&meta); offset = 0; if (ReadSECT0 (grib_fp, offset, WMO_HEADER_LEN, WMO_SECOND_LEN, wmo, sect0, &gribLen, &wmoLen, &version) < 0) { msg = errSprintf (NULL); printf ("%s\n", msg); return -1; } grib_DataLen = 0; grib_Data = NULL; if (version == 1) { meta.GribVersion = version; ReadGrib1Record (grib_fp, f_unit, &grib_Data, &grib_DataLen, &meta, &is, sect0, gribLen); offset = offset + gribLen + wmoLen; } MetaFree (&meta); IS_Free (&is); free (grib_Data); //fclose (grib_fp); return 0; } #endif
/* * DRace, a dynamic data race detector * * Copyright 2020 Siemens AG * * Authors: * Felix Moessbauer <felix.moessbauer@siemens.com> * * SPDX-License-Identifier: MIT */ #include <iostream> #include <mutex> #include <thread> #define NUM_INCREMENTS 10000 // #define USE_HEAP // race on the heap // #define USE_STATIC // race on a static variable void inc(int* v) { for (int i = 0; i < NUM_INCREMENTS; ++i) { int var = *v; ++var; std::this_thread::yield(); *v = var; } } void dec(int* v) { for (int i = 0; i < NUM_INCREMENTS; ++i) { int var = *v; --var; std::this_thread::yield(); *v = var; } } int main() { #ifdef USE_HEAP int* mem = new int[1]; *mem = 0; #elif USE_STATIC static int var = 0; int* mem = &var; #else int var = 0; int* mem = &var; #endif auto ta = std::thread(&inc, mem); auto tb = std::thread(&dec, mem); ta.join(); tb.join(); // mx.lock(); std::cout << "EXPECTED: " << 0 << ", " << "ACTUAL: " << *mem << ", " << "LOCATION: " << std::hex << (uintptr_t)(mem) << std::endl; // mx.unlock(); #ifdef USE_HEAP delete[] mem; #endif return 0; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "corexcep.h" #define Verify(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ } \ } while (0) #define VerifyOrReturn(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return; \ } \ } while (0) #define VerifyOrReturnSpeculative(cond, msg, speculative) \ do \ { \ if (speculative) \ { \ if (!(cond)) \ { \ return false; \ } \ } \ else \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return false; \ } \ } \ } while (0) /*****************************************************************************/ void Compiler::impInit() { impStmtList = impLastStmt = nullptr; #ifdef DEBUG impInlinedCodeSize = 0; #endif // DEBUG } /***************************************************************************** * * Pushes the given tree on the stack. */ void Compiler::impPushOnStack(GenTree* tree, typeInfo ti) { /* Check for overflow. If inlining, we may be using a bigger stack */ if ((verCurrentState.esStackDepth >= info.compMaxStack) && (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0))) { BADCODE("stack overflow"); } #ifdef DEBUG // If we are pushing a struct, make certain we know the precise type! if (tree->TypeGet() == TYP_STRUCT) { assert(ti.IsType(TI_STRUCT)); CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle(); assert(clsHnd != NO_CLASS_HANDLE); } #endif // DEBUG verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti; verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree; if ((tree->gtType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } inline void Compiler::impPushNullObjRefOnStack() { impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL)); } // This method gets called when we run into unverifiable code // (and we are verifying the method) inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { #ifdef DEBUG const char* tail = strrchr(file, '\\'); if (tail) { file = tail + 1; } if (JitConfig.JitBreakOnUnsafeCode()) { assert(!"Unsafe code detected"); } #endif JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); if (compIsForImportOnly()) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line)); } } inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); #ifdef DEBUG // BreakIfDebuggerPresent(); if (getBreakOnBadCode()) { assert(!"Typechecking error"); } #endif RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr); UNREACHABLE(); } // helper function that will tell us if the IL instruction at the addr passed // by param consumes an address at the top of the stack. We use it to save // us lvAddrTaken bool Compiler::impILConsumesAddr(const BYTE* codeAddr) { assert(!compIsForInlining()); OPCODE opcode; opcode = (OPCODE)getU1LittleEndian(codeAddr); switch (opcode) { // case CEE_LDFLDA: We're taking this one out as if you have a sequence // like // // ldloca.0 // ldflda whatever // // of a primitivelike struct, you end up after morphing with addr of a local // that's not marked as addrtaken, which is wrong. Also ldflda is usually used // for structs that contain other structs, which isnt a case we handle very // well now for other reasons. case CEE_LDFLD: { // We won't collapse small fields. This is probably not the right place to have this // check, but we're only using the function for this purpose, and is easy to factor // out if we need to do so. CORINFO_RESOLVED_TOKEN resolvedToken; impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field); var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField)); // Preserve 'small' int types if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } if (varTypeIsSmall(lclTyp)) { return false; } return true; } default: break; } return false; } void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind) { pResolvedToken->tokenContext = impTokenLookupContextHandle; pResolvedToken->tokenScope = info.compScopeHnd; pResolvedToken->token = getU4LittleEndian(addr); pResolvedToken->tokenType = kind; info.compCompHnd->resolveToken(pResolvedToken); } /***************************************************************************** * * Pop one tree from the stack. */ StackEntry Compiler::impPopStack() { if (verCurrentState.esStackDepth == 0) { BADCODE("stack underflow"); } return verCurrentState.esStack[--verCurrentState.esStackDepth]; } /***************************************************************************** * * Peep at n'th (0-based) tree on the top of the stack. */ StackEntry& Compiler::impStackTop(unsigned n) { if (verCurrentState.esStackDepth <= n) { BADCODE("stack underflow"); } return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1]; } unsigned Compiler::impStackHeight() { return verCurrentState.esStackDepth; } /***************************************************************************** * Some of the trees are spilled specially. While unspilling them, or * making a copy, these need to be handled specially. The function * enumerates the operators possible after spilling. */ #ifdef DEBUG // only used in asserts static bool impValidSpilledStackEntry(GenTree* tree) { if (tree->gtOper == GT_LCL_VAR) { return true; } if (tree->OperIsConst()) { return true; } return false; } #endif /***************************************************************************** * * The following logic is used to save/restore stack contents. * If 'copy' is true, then we make a copy of the trees on the stack. These * have to all be cloneable/spilled values. */ void Compiler::impSaveStackState(SavedStack* savePtr, bool copy) { savePtr->ssDepth = verCurrentState.esStackDepth; if (verCurrentState.esStackDepth) { savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth]; size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees); if (copy) { StackEntry* table = savePtr->ssTrees; /* Make a fresh copy of all the stack entries */ for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++) { table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo; GenTree* tree = verCurrentState.esStack[level].val; assert(impValidSpilledStackEntry(tree)); switch (tree->gtOper) { case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_LCL_VAR: table->val = gtCloneExpr(tree); break; default: assert(!"Bad oper - Not covered by impValidSpilledStackEntry()"); break; } } } else { memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize); } } } void Compiler::impRestoreStackState(SavedStack* savePtr) { verCurrentState.esStackDepth = savePtr->ssDepth; if (verCurrentState.esStackDepth) { memcpy(verCurrentState.esStack, savePtr->ssTrees, verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack)); } } //------------------------------------------------------------------------ // impBeginTreeList: Get the tree list started for a new basic block. // inline void Compiler::impBeginTreeList() { assert(impStmtList == nullptr && impLastStmt == nullptr); } /***************************************************************************** * * Store the given start and end stmt in the given basic block. This is * mostly called by impEndTreeList(BasicBlock *block). It is called * directly only for handling CEE_LEAVEs out of finally-protected try's. */ inline void Compiler::impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt) { /* Make the list circular, so that we can easily walk it backwards */ firstStmt->SetPrevStmt(lastStmt); /* Store the tree list in the basic block */ block->bbStmtList = firstStmt; /* The block should not already be marked as imported */ assert((block->bbFlags & BBF_IMPORTED) == 0); block->bbFlags |= BBF_IMPORTED; } //------------------------------------------------------------------------ // impEndTreeList: Store the current tree list in the given basic block. // // Arguments: // block - the basic block to store into. // inline void Compiler::impEndTreeList(BasicBlock* block) { if (impStmtList == nullptr) { // The block should not already be marked as imported. assert((block->bbFlags & BBF_IMPORTED) == 0); // Empty block. Just mark it as imported. block->bbFlags |= BBF_IMPORTED; } else { impEndTreeList(block, impStmtList, impLastStmt); } #ifdef DEBUG if (impLastILoffsStmt != nullptr) { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } #endif impStmtList = impLastStmt = nullptr; } /***************************************************************************** * * Check that storing the given tree doesnt mess up the semantic order. Note * that this has only limited value as we can only check [0..chkLevel). */ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) { #ifndef DEBUG return; #else if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE) { return; } GenTree* tree = stmt->GetRootNode(); // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack if (tree->gtFlags & GTF_CALL) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0); } } if (tree->gtOper == GT_ASG) { // For an assignment to a local variable, all references of that // variable have to be spilled. If it is aliased, all calls and // indirect accesses have to be spilled if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); for (unsigned level = 0; level < chkLevel; level++) { assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum)); assert(!lvaTable[lclNum].IsAddressExposed() || (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0); } } // If the access may be to global memory, all side effects have to be spilled. else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0); } } } #endif } //------------------------------------------------------------------------ // impAppendStmt: Append the given statement to the current block's tree list. // // // Arguments: // stmt - The statement to add. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo) { if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE)) { assert(chkLevel <= verCurrentState.esStackDepth); /* If the statement being appended has any side-effects, check the stack to see if anything needs to be spilled to preserve correct ordering. */ GenTree* expr = stmt->GetRootNode(); GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT; // Assignment to (unaliased) locals don't count as a side-effect as // we handle them specially using impSpillLclRefs(). Temp locals should // be fine too. if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && ((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2)) { GenTreeFlags op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT; assert(flags == (op2Flags | GTF_ASG)); flags = op2Flags; } if (flags != 0) { bool spillGlobEffects = false; if ((flags & GTF_CALL) != 0) { // If there is a call, we have to spill global refs spillGlobEffects = true; } else if (!expr->OperIs(GT_ASG)) { if ((flags & GTF_ASG) != 0) { // The expression is not an assignment node but it has an assignment side effect, it // must be an atomic op, HW intrinsic or some other kind of node that stores to memory. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } } else { GenTree* lhs = expr->gtGetOp1(); GenTree* rhs = expr->gtGetOp2(); if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0) { // Either side of the assignment node has an assignment side effect. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } else if ((lhs->gtFlags & GTF_GLOB_REF) != 0) { spillGlobEffects = true; } } impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt")); } else { impSpillSpecialSideEff(); } } impAppendStmtCheck(stmt, chkLevel); impAppendStmt(stmt); #ifdef FEATURE_SIMD impMarkContiguousSIMDFieldAssignments(stmt); #endif // Once we set the current offset as debug info in an appended tree, we are // ready to report the following offsets. Note that we need to compare // offsets here instead of debug info, since we do not set the "is call" // bit in impCurStmtDI. if (checkConsumedDebugInfo && (impLastStmt->GetDebugInfo().GetLocation().GetOffset() == impCurStmtDI.GetLocation().GetOffset())) { impCurStmtOffsSet(BAD_IL_OFFSET); } #ifdef DEBUG if (impLastILoffsStmt == nullptr) { impLastILoffsStmt = stmt; } if (verbose) { printf("\n\n"); gtDispStmt(stmt); } #endif } //------------------------------------------------------------------------ // impAppendStmt: Add the statement to the current stmts list. // // Arguments: // stmt - the statement to add. // inline void Compiler::impAppendStmt(Statement* stmt) { if (impStmtList == nullptr) { // The stmt is the first in the list. impStmtList = stmt; } else { // Append the expression statement to the existing list. impLastStmt->SetNextStmt(stmt); stmt->SetPrevStmt(impLastStmt); } impLastStmt = stmt; } //------------------------------------------------------------------------ // impExtractLastStmt: Extract the last statement from the current stmts list. // // Return Value: // The extracted statement. // // Notes: // It assumes that the stmt will be reinserted later. // Statement* Compiler::impExtractLastStmt() { assert(impLastStmt != nullptr); Statement* stmt = impLastStmt; impLastStmt = impLastStmt->GetPrevStmt(); if (impLastStmt == nullptr) { impStmtList = nullptr; } return stmt; } //------------------------------------------------------------------------- // impInsertStmtBefore: Insert the given "stmt" before "stmtBefore". // // Arguments: // stmt - a statement to insert; // stmtBefore - an insertion point to insert "stmt" before. // inline void Compiler::impInsertStmtBefore(Statement* stmt, Statement* stmtBefore) { assert(stmt != nullptr); assert(stmtBefore != nullptr); if (stmtBefore == impStmtList) { impStmtList = stmt; } else { Statement* stmtPrev = stmtBefore->GetPrevStmt(); stmt->SetPrevStmt(stmtPrev); stmtPrev->SetNextStmt(stmt); } stmt->SetNextStmt(stmtBefore); stmtBefore->SetPrevStmt(stmt); } //------------------------------------------------------------------------ // impAppendTree: Append the given expression tree to the current block's tree list. // // // Arguments: // tree - The tree that will be the root of the newly created statement. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // di - Debug information to associate with the statement. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // // Return value: // The newly created statement. // Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo) { assert(tree); /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impAppendStmt(stmt, chkLevel, checkConsumedDebugInfo); return stmt; } /***************************************************************************** * * Insert the given expression tree before "stmtBefore" */ void Compiler::impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore) { /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impInsertStmtBefore(stmt, stmtBefore); } /***************************************************************************** * * Append an assignment of the given value to a temp to the current tree list. * curLevel is the stack level for which the spill to the temp is being done. */ void Compiler::impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg = gtNewTempAssign(tmp, val); if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * same as above, but handle the valueclass case too */ void Compiler::impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structType, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg; assert(val->TypeGet() != TYP_STRUCT || structType != NO_CLASS_HANDLE); if (varTypeIsStruct(val) && (structType != NO_CLASS_HANDLE)) { assert(tmpNum < lvaCount); assert(structType != NO_CLASS_HANDLE); // if the method is non-verifiable the assert is not true // so at least ignore it in the case when verification is turned on // since any block that tries to use the temp would have failed verification. var_types varType = lvaTable[tmpNum].lvType; assert(varType == TYP_UNDEF || varTypeIsStruct(varType)); lvaSetStruct(tmpNum, structType, false); varType = lvaTable[tmpNum].lvType; // Now, set the type of the struct value. Note that lvaSetStruct may modify the type // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType) // that has been passed in for the value being assigned to the temp, in which case we // need to set 'val' to that same type. // Note also that if we always normalized the types of any node that might be a struct // type, this would not be necessary - but that requires additional JIT/EE interface // calls that may not actually be required - e.g. if we only access a field of a struct. GenTree* dst = gtNewLclvNode(tmpNum, varType); asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, di, block); } else { asg = gtNewTempAssign(tmpNum, val); } if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * * Pop the given number of values from the stack and return a list node with * their values. * The 'prefixTree' argument may optionally contain an argument * list that is prepended to the list returned from this function. * * The notion of prepended is a bit misleading in that the list is backwards * from the way I would expect: The first element popped is at the end of * the returned list, and prefixTree is 'before' that, meaning closer to * the end of the list. To get to prefixTree, you have to walk to the * end of the list. * * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as * such we reverse its meaning such that returnValue has a reversed * prefixTree at the head of the list. */ GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs) { assert(sig == nullptr || count == sig->numArgs); CORINFO_CLASS_HANDLE structType; GenTreeCall::Use* argList; if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { argList = nullptr; } else { // ARG_ORDER_L2R argList = prefixArgs; } while (count--) { StackEntry se = impPopStack(); typeInfo ti = se.seTypeInfo; GenTree* temp = se.val; if (varTypeIsStruct(temp)) { // Morph trees that aren't already OBJs or MKREFANY to be OBJs assert(ti.IsType(TI_STRUCT)); structType = ti.GetClassHandleForValueClass(); bool forceNormalization = false; if (varTypeIsSIMD(temp)) { // We need to ensure that fgMorphArgs will use the correct struct handle to ensure proper // ABI handling of this argument. // Note that this can happen, for example, if we have a SIMD intrinsic that returns a SIMD type // with a different baseType than we've seen. // We also need to ensure an OBJ node if we have a FIELD node that might be transformed to LCL_FLD // or a plain GT_IND. // TODO-Cleanup: Consider whether we can eliminate all of these cases. if ((gtGetStructHandleIfPresent(temp) != structType) || temp->OperIs(GT_FIELD)) { forceNormalization = true; } } #ifdef DEBUG if (verbose) { printf("Calling impNormStructVal on:\n"); gtDispTree(temp); } #endif temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL, forceNormalization); #ifdef DEBUG if (verbose) { printf("resulting tree:\n"); gtDispTree(temp); } #endif } /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */ argList = gtPrependNewCallArg(temp, argList); } if (sig != nullptr) { if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass); } CORINFO_ARG_LIST_HANDLE sigArgs = sig->args; GenTreeCall::Use* arg; for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--) { PREFIX_ASSUME(arg != nullptr); CORINFO_CLASS_HANDLE classHnd; CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd)); var_types jitSigType = JITtype2varType(corType); if (!impCheckImplicitArgumentCoercion(jitSigType, arg->GetNode()->TypeGet())) { BADCODE("the call argument has a type that can't be implicitly converted to the signature type"); } // insert implied casts (from float to double or double to float) if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT)) { arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE)); } else if ((jitSigType == TYP_FLOAT) && (arg->GetNode()->TypeGet() == TYP_DOUBLE)) { arg->SetNode(gtNewCastNode(TYP_FLOAT, arg->GetNode(), false, TYP_FLOAT)); } // insert any widening or narrowing casts for backwards compatibility arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType)); if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR && corType != CORINFO_TYPE_VAR) { CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs); if (argRealClass != nullptr) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggered from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass); } } const var_types nodeArgType = arg->GetNode()->TypeGet(); if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType)) { assert(!varTypeIsStruct(nodeArgType)); // Some ABI require precise size information for call arguments less than target pointer size, // for example arm64 OSX. Create a special node to keep this information until morph // consumes it into `fgArgInfo`. GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode()); arg->SetNode(putArgType); } sigArgs = info.compCompHnd->getArgNext(sigArgs); } } if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { // Prepend the prefixTree // Simple in-place reversal to place treeList // at the end of a reversed prefixTree while (prefixArgs != nullptr) { GenTreeCall::Use* next = prefixArgs->GetNext(); prefixArgs->SetNext(argList); argList = prefixArgs; prefixArgs = next; } } return argList; } static bool TypeIs(var_types type1, var_types type2) { return type1 == type2; } // Check if type1 matches any type from the list. template <typename... T> static bool TypeIs(var_types type1, var_types type2, T... rest) { return TypeIs(type1, type2) || TypeIs(type1, rest...); } //------------------------------------------------------------------------ // impCheckImplicitArgumentCoercion: check that the node's type is compatible with // the signature's type using ECMA implicit argument coercion table. // // Arguments: // sigType - the type in the call signature; // nodeType - the node type. // // Return Value: // true if they are compatible, false otherwise. // // Notes: // - it is currently allowing byref->long passing, should be fixed in VM; // - it can't check long -> native int case on 64-bit platforms, // so the behavior is different depending on the target bitness. // bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const { if (sigType == nodeType) { return true; } if (TypeIs(sigType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT)) { if (TypeIs(nodeType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT, TYP_I_IMPL)) { return true; } } else if (TypeIs(sigType, TYP_ULONG, TYP_LONG)) { if (TypeIs(nodeType, TYP_LONG)) { return true; } } else if (TypeIs(sigType, TYP_FLOAT, TYP_DOUBLE)) { if (TypeIs(nodeType, TYP_FLOAT, TYP_DOUBLE)) { return true; } } else if (TypeIs(sigType, TYP_BYREF)) { if (TypeIs(nodeType, TYP_I_IMPL)) { return true; } // This condition tolerates such IL: // ; V00 this ref this class-hnd // ldarg.0 // call(byref) if (TypeIs(nodeType, TYP_REF)) { return true; } } else if (varTypeIsStruct(sigType)) { if (varTypeIsStruct(nodeType)) { return true; } } // This condition should not be under `else` because `TYP_I_IMPL` // intersects with `TYP_LONG` or `TYP_INT`. if (TypeIs(sigType, TYP_I_IMPL, TYP_U_IMPL)) { // Note that it allows `ldc.i8 1; call(nint)` on 64-bit platforms, // but we can't distinguish `nint` from `long` there. if (TypeIs(nodeType, TYP_I_IMPL, TYP_U_IMPL, TYP_INT, TYP_UINT)) { return true; } // It tolerates IL that ECMA does not allow but that is commonly used. // Example: // V02 loc1 struct <RTL_OSVERSIONINFOEX, 32> // ldloca.s 0x2 // call(native int) if (TypeIs(nodeType, TYP_BYREF)) { return true; } } return false; } /***************************************************************************** * * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.) * The first "skipReverseCount" items are not reversed. */ GenTreeCall::Use* Compiler::impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount) { assert(skipReverseCount <= count); GenTreeCall::Use* list = impPopCallArgs(count, sig); // reverse the list if (list == nullptr || skipReverseCount == count) { return list; } GenTreeCall::Use* ptr = nullptr; // Initialized to the first node that needs to be reversed GenTreeCall::Use* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed if (skipReverseCount == 0) { ptr = list; } else { lastSkipNode = list; // Get to the first node that needs to be reversed for (unsigned i = 0; i < skipReverseCount - 1; i++) { lastSkipNode = lastSkipNode->GetNext(); } PREFIX_ASSUME(lastSkipNode != nullptr); ptr = lastSkipNode->GetNext(); } GenTreeCall::Use* reversedList = nullptr; do { GenTreeCall::Use* tmp = ptr->GetNext(); ptr->SetNext(reversedList); reversedList = ptr; ptr = tmp; } while (ptr != nullptr); if (skipReverseCount) { lastSkipNode->SetNext(reversedList); return list; } else { return reversedList; } } //------------------------------------------------------------------------ // impAssignStruct: Create a struct assignment // // Arguments: // dest - the destination of the assignment // src - the value to be assigned // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // ilOffset - il offset for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = nullptr */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = nullptr */ ) { assert(varTypeIsStruct(dest)); DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } while (dest->gtOper == GT_COMMA) { // Second thing is the struct. assert(varTypeIsStruct(dest->AsOp()->gtOp2)); // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree. if (pAfterStmt) { Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(dest->AsOp()->gtOp1, curLevel, usedDI); // do the side effect } // set dest to the second thing dest = dest->AsOp()->gtOp2; } assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD || dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX); // Return a NOP if this is a self-assignment. if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR && src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum()) { return gtNewNothingNode(); } // TODO-1stClassStructs: Avoid creating an address if it is not needed, // or re-creating a Blk node if it is. GenTree* destAddr; if (dest->gtOper == GT_IND || dest->OperIsBlk()) { destAddr = dest->AsOp()->gtOp1; } else { destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); } return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, usedDI, block)); } //------------------------------------------------------------------------ // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'. // // Arguments: // destAddr - address of the destination of the assignment // src - source of the assignment // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // di - debug info for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* dest = nullptr; GenTreeFlags destFlags = GTF_EMPTY; DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR, GT_COMMA) || (src->TypeGet() != TYP_STRUCT && src->OperIsSimdOrHWintrinsic())); var_types asgType = src->TypeGet(); if (src->gtOper == GT_CALL) { GenTreeCall* srcCall = src->AsCall(); if (srcCall->TreatAsHasRetBufArg(this)) { // Case of call returning a struct via hidden retbuf arg CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARM) // Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter if ((TargetOS::IsWindows || compUnixX86Abi()) && srcCall->IsUnmanaged()) { if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv())) { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the second-to-last node // so it will be pushed on to the stack after the user args but before the native this arg // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else if (srcCall->GetUnmanagedCallConv() == CorInfoCallConvExtension::Thiscall) { // For thiscall, the "this" parameter is not included in the argument list reversal, // so we need to put the return buffer as the last parameter. for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } else if (lastArg->GetNext() == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, lastArg); } else { assert(lastArg != nullptr && lastArg->GetNext() != nullptr); GenTreeCall::Use* secondLastArg = lastArg; lastArg = lastArg->GetNext(); for (; lastArg->GetNext() != nullptr; secondLastArg = lastArg, lastArg = lastArg->GetNext()) ; assert(secondLastArg->GetNext() != nullptr); gtInsertNewCallArgAfter(destAddr, secondLastArg); } #else GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs); #endif } else { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the last node so it will be pushed on to the stack last // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else { for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } #else // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); #endif } } else #endif // !defined(TARGET_ARM) { // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } // now returns void, not a struct src->gtType = TYP_VOID; // return the morphed call node return src; } else { // Case of call returning a struct in one or more registers. var_types returnType = (var_types)srcCall->gtReturnType; // First we try to change this to "LclVar/LclFld = call" // if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)) { // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD. // That is, the IR will be of the form lclVar = call for multi-reg return // GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar(); unsigned lclNum = lcl->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (src->AsCall()->HasMultiRegRetVal()) { // Mark the struct LclVar as used in a MultiReg return context // which currently makes it non promotable. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; } dest = lcl; #if defined(TARGET_ARM) // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case, // but that method has not been updadted to include ARM. impMarkLclDstNotPromotable(lclNum, src, structHnd); lcl->gtFlags |= GTF_DONT_CSE; #elif defined(UNIX_AMD64_ABI) // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs. assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs."); // Make the struct non promotable. The eightbytes could contain multiple fields. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. // TODO-Cleanup: Why is this needed here? This seems that it will set this even for // non-multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; #endif } else // we don't have a GT_ADDR of a GT_LCL_VAR { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. asgType = returnType; destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->gtOper == GT_RET_EXPR) { GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall(); noway_assert(call->gtOper == GT_CALL); if (call->HasRetBufArg()) { // insert the return value buffer into the argument list as first byref parameter call->gtCallArgs = gtPrependNewCallArg(destAddr, call->gtCallArgs); // now returns void, not a struct src->gtType = TYP_VOID; call->gtType = TYP_VOID; // We already have appended the write to 'dest' GT_CALL's args // So now we just return an empty node (pruning the GT_RET_EXPR) return src; } else { // Case of inline method returning a struct in one or more registers. // We won't need a return buffer asgType = src->gtType; if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR)) { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->OperIsBlk()) { asgType = impNormStructType(structHnd); if (src->gtOper == GT_OBJ) { assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd); } } else if (src->gtOper == GT_INDEX) { asgType = impNormStructType(structHnd); assert(src->AsIndex()->gtStructElemClass == structHnd); } else if (src->gtOper == GT_MKREFANY) { // Since we are assigning the result of a GT_MKREFANY, // "destAddr" must point to a refany. GenTree* destAddrClone; destAddr = impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment")); assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0); assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF); fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr); GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()); GenTree* typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); // append the assign of the pointer value GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1); if (pAfterStmt) { Statement* newStmt = gtNewStmt(asg, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(asg, curLevel, usedDI); } // return the assign of the type value, to be appended return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2); } else if (src->gtOper == GT_COMMA) { // The second thing is the struct or its address. assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF); if (pAfterStmt) { // Insert op1 after '*pAfterStmt' Statement* newStmt = gtNewStmt(src->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else if (impLastStmt != nullptr) { // Do the side-effect as a separate statement. impAppendTree(src->AsOp()->gtOp1, curLevel, usedDI); } else { // In this case we have neither been given a statement to insert after, nor are we // in the importer where we can append the side effect. // Instead, we're going to sink the assignment below the COMMA. src->AsOp()->gtOp2 = impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); return src; } // Evaluate the second thing using recursion. return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); } else if (src->IsLocal()) { asgType = src->TypeGet(); } else if (asgType == TYP_STRUCT) { // It should already have the appropriate type. assert(asgType == impNormStructType(structHnd)); } if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR)) { GenTree* destNode = destAddr->gtGetOp1(); // If the actual destination is a local, a GT_INDEX or a block node, or is a node that // will be morphed, don't insert an OBJ(ADDR) if it already has the right type. if (destNode->OperIs(GT_LCL_VAR, GT_INDEX) || destNode->OperIsBlk()) { var_types destType = destNode->TypeGet(); // If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible // iff their handles are the same. // Otherwise, they are compatible if their types are the same. bool typesAreCompatible = ((destType == TYP_STRUCT) || (asgType == TYP_STRUCT)) ? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType)) : (destType == asgType); if (typesAreCompatible) { dest = destNode; if (destType != TYP_STRUCT) { // Use a normalized type if available. We know from above that they're equivalent. asgType = destType; } } } } if (dest == nullptr) { if (asgType == TYP_STRUCT) { dest = gtNewObjNode(structHnd, destAddr); gtSetObjGcInfo(dest->AsObj()); // Although an obj as a call argument was always assumed to be a globRef // (which is itself overly conservative), that is not true of the operands // of a block assignment. dest->gtFlags &= ~GTF_GLOB_REF; dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF); } else { dest = gtNewOperNode(GT_IND, asgType, destAddr); } } if (dest->OperIs(GT_LCL_VAR) && (src->IsMultiRegNode() || (src->OperIs(GT_RET_EXPR) && src->AsRetExpr()->gtInlineCandidate->AsCall()->HasMultiRegRetVal()))) { if (lvaEnregMultiRegVars && varTypeIsStruct(dest)) { dest->AsLclVar()->SetMultiReg(); } if (src->OperIs(GT_CALL)) { lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true; } } dest->gtFlags |= destFlags; destFlags = dest->gtFlags; // return an assignment node, to be appended GenTree* asgNode = gtNewAssignNode(dest, src); gtBlockOpInit(asgNode, dest, src, false); // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs // of assignments. if ((destFlags & GTF_DONT_CSE) == 0) { dest->gtFlags &= ~(GTF_DONT_CSE); } return asgNode; } /***************************************************************************** Given a struct value, and the class handle for that structure, return the expression for the address for that structure value. willDeref - does the caller guarantee to dereference the pointer. */ GenTree* Compiler::impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref) { assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd)); var_types type = structVal->TypeGet(); genTreeOps oper = structVal->gtOper; if (oper == GT_OBJ && willDeref) { assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd); return (structVal->AsObj()->Addr()); } else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY || structVal->OperIsSimdOrHWintrinsic()) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The 'return value' is now the temp itself type = genActualType(lvaTable[tmpNum].TypeGet()); GenTree* temp = gtNewLclvNode(tmpNum, type); temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp); return temp; } else if (oper == GT_COMMA) { assert(structVal->AsOp()->gtOp2->gtType == type); // Second thing is the struct Statement* oldLastStmt = impLastStmt; structVal->AsOp()->gtOp2 = impGetStructAddr(structVal->AsOp()->gtOp2, structHnd, curLevel, willDeref); structVal->gtType = TYP_BYREF; if (oldLastStmt != impLastStmt) { // Some temp assignment statement was placed on the statement list // for Op2, but that would be out of order with op1, so we need to // spill op1 onto the statement list after whatever was last // before we recursed on Op2 (i.e. before whatever Op2 appended). Statement* beforeStmt; if (oldLastStmt == nullptr) { // The op1 stmt should be the first in the list. beforeStmt = impStmtList; } else { // Insert after the oldLastStmt before the first inserted for op2. beforeStmt = oldLastStmt->GetNextStmt(); } impInsertTreeBefore(structVal->AsOp()->gtOp1, impCurStmtDI, beforeStmt); structVal->AsOp()->gtOp1 = gtNewNothingNode(); } return (structVal); } return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } //------------------------------------------------------------------------ // impNormStructType: Normalize the type of a (known to be) struct class handle. // // Arguments: // structHnd - The class handle for the struct type of interest. // pSimdBaseJitType - (optional, default nullptr) - if non-null, and the struct is a SIMD // type, set to the SIMD base JIT type // // Return Value: // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*). // It may also modify the compFloatingPointUsed flag if the type is a SIMD type. // // Notes: // Normalizing the type involves examining the struct type to determine if it should // be modified to one that is handled specially by the JIT, possibly being a candidate // for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known // call structSizeMightRepresentSIMDType to determine if this api needs to be called. var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType) { assert(structHnd != NO_CLASS_HANDLE); var_types structType = TYP_STRUCT; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd); // Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type. if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) == 0) { unsigned originalSize = info.compCompHnd->getClassSize(structHnd); if (structSizeMightRepresentSIMDType(originalSize)) { unsigned int sizeBytes; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(sizeBytes == originalSize); structType = getSIMDTypeForSize(sizeBytes); if (pSimdBaseJitType != nullptr) { *pSimdBaseJitType = simdBaseJitType; } // Also indicate that we use floating point registers. compFloatingPointUsed = true; } } } } #endif // FEATURE_SIMD return structType; } //------------------------------------------------------------------------ // Compiler::impNormStructVal: Normalize a struct value // // Arguments: // structVal - the node we are going to normalize // structHnd - the class handle for the node // curLevel - the current stack level // forceNormalization - Force the creation of an OBJ node (default is false). // // Notes: // Given struct value 'structVal', make sure it is 'canonical', that is // it is either: // - a known struct type (non-TYP_STRUCT, e.g. TYP_SIMD8) // - an OBJ or a MKREFANY node, or // - a node (e.g. GT_INDEX) that will be morphed. // If the node is a CALL or RET_EXPR, a copy will be made to a new temp. // GenTree* Compiler::impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization /*=false*/) { assert(forceNormalization || varTypeIsStruct(structVal)); assert(structHnd != NO_CLASS_HANDLE); var_types structType = structVal->TypeGet(); bool makeTemp = false; if (structType == TYP_STRUCT) { structType = impNormStructType(structHnd); } bool alreadyNormalized = false; GenTreeLclVarCommon* structLcl = nullptr; genTreeOps oper = structVal->OperGet(); switch (oper) { // GT_RETURN and GT_MKREFANY don't capture the handle. case GT_RETURN: break; case GT_MKREFANY: alreadyNormalized = true; break; case GT_CALL: structVal->AsCall()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_RET_EXPR: structVal->AsRetExpr()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_ARGPLACE: structVal->AsArgPlace()->gtArgPlaceClsHnd = structHnd; break; case GT_INDEX: // This will be transformed to an OBJ later. alreadyNormalized = true; structVal->AsIndex()->gtStructElemClass = structHnd; structVal->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(structHnd); break; case GT_FIELD: // Wrap it in a GT_OBJ, if needed. structVal->gtType = structType; if ((structType == TYP_STRUCT) || forceNormalization) { structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } break; case GT_LCL_VAR: case GT_LCL_FLD: structLcl = structVal->AsLclVarCommon(); // Wrap it in a GT_OBJ. structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); FALLTHROUGH; case GT_OBJ: case GT_BLK: case GT_ASG: // These should already have the appropriate type. assert(structVal->gtType == structType); alreadyNormalized = true; break; case GT_IND: assert(structVal->gtType == structType); structVal = gtNewObjNode(structHnd, structVal->gtGetOp1()); alreadyNormalized = true; break; #ifdef FEATURE_SIMD case GT_SIMD: assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType)); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType)); break; #endif case GT_COMMA: { // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node. GenTree* blockNode = structVal->AsOp()->gtOp2; assert(blockNode->gtType == structType); // Is this GT_COMMA(op1, GT_COMMA())? GenTree* parent = structVal; if (blockNode->OperGet() == GT_COMMA) { // Find the last node in the comma chain. do { assert(blockNode->gtType == structType); parent = blockNode; blockNode = blockNode->AsOp()->gtOp2; } while (blockNode->OperGet() == GT_COMMA); } if (blockNode->OperGet() == GT_FIELD) { // If we have a GT_FIELD then wrap it in a GT_OBJ. blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode)); } #ifdef FEATURE_SIMD if (blockNode->OperIsSimdOrHWintrinsic()) { parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization); alreadyNormalized = true; } else #endif { noway_assert(blockNode->OperIsBlk()); // Sink the GT_COMMA below the blockNode addr. // That is GT_COMMA(op1, op2=blockNode) is tranformed into // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)). // // In case of a chained GT_COMMA case, we sink the last // GT_COMMA below the blockNode addr. GenTree* blockNodeAddr = blockNode->AsOp()->gtOp1; assert(blockNodeAddr->gtType == TYP_BYREF); GenTree* commaNode = parent; commaNode->gtType = TYP_BYREF; commaNode->AsOp()->gtOp2 = blockNodeAddr; blockNode->AsOp()->gtOp1 = commaNode; if (parent == structVal) { structVal = blockNode; } alreadyNormalized = true; } } break; default: noway_assert(!"Unexpected node in impNormStructVal()"); break; } structVal->gtType = structType; if (!alreadyNormalized || forceNormalization) { if (makeTemp) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The structVal is now the temp itself structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon(); structVal = structLcl; } if ((forceNormalization || (structType == TYP_STRUCT)) && !structVal->OperIsBlk()) { // Wrap it in a GT_OBJ structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } } if (structLcl != nullptr) { // A OBJ on a ADDR(LCL_VAR) can never raise an exception // so we don't set GTF_EXCEPT here. if (!lvaIsImplicitByRefLocal(structLcl->GetLclNum())) { structVal->gtFlags &= ~GTF_GLOB_REF; } } else if (structVal->OperIsBlk()) { // In general a OBJ is an indirection and could raise an exception. structVal->gtFlags |= GTF_EXCEPT; } return structVal; } /******************************************************************************/ // Given a type token, generate code that will evaluate to the correct // handle representation of that token (type handle, field handle, or method handle) // // For most cases, the handle is determined at compile-time, and the code // generated is simply an embedded handle. // // Run-time lookup is required if the enclosing method is shared between instantiations // and the token refers to formal type parameters whose instantiation is not known // at compile-time. // GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup /* = NULL */, bool mustRestoreHandle /* = false */, bool importParent /* = false */) { assert(!fgGlobalMorph); CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo); if (pRuntimeLookup) { *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup; } if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup) { switch (embedInfo.handleType) { case CORINFO_HANDLETYPE_CLASS: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_METHOD: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_FIELD: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle)); break; default: break; } } // Generate the full lookup tree. May be null if we're abandoning an inline attempt. GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG size_t handleToTrack; if (handleFlags == GTF_ICON_TOKEN_HDL) { handleToTrack = 0; } else { handleToTrack = (size_t)compileTimeHandle; } if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = handleToTrack; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = handleToTrack; } #endif return addr; } if (pLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case assert(compIsForInlining()); compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP); return nullptr; } // Need to use dictionary-based access which depends on the typeContext // which is only available at runtime, not at compile-time. return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle); } #ifdef FEATURE_READYTORUN GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE); if (pLookup->accessType == IAT_VALUE) { handle = pLookup->handle; } else if (pLookup->accessType == IAT_PVALUE) { pIndirection = pLookup->addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG assert((handleFlags == GTF_ICON_CLASS_HDL) || (handleFlags == GTF_ICON_METHOD_HDL)); if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } #endif // DEBUG return addr; } GenTreeCall* Compiler::impReadyToRunHelperToTree( CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args /* = nullptr */, CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */) { CORINFO_CONST_LOOKUP lookup; if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup)) { return nullptr; } GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args); op1->setEntryPoint(lookup); return op1; } #endif GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* op1 = nullptr; switch (pCallInfo->kind) { case CORINFO_CALL: op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod); #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1->AsFptrVal()->gtEntryPoint = pCallInfo->codePointerLookup.constLookup; } #endif break; case CORINFO_CALL_CODE_POINTER: op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod); break; default: noway_assert(!"unknown call kind"); break; } return op1; } //------------------------------------------------------------------------ // getRuntimeContextTree: find pointer to context for runtime lookup. // // Arguments: // kind - lookup kind. // // Return Value: // Return GenTree pointer to generic shared context. // // Notes: // Reports about generic context using. GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind) { GenTree* ctxTree = nullptr; // Collectible types requires that for shared generic code, if we use the generic context parameter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; Compiler* pRoot = impInlineRoot(); if (kind == CORINFO_LOOKUP_THISOBJ) { // this Object ctxTree = gtNewLclvNode(pRoot->info.compThisArg, TYP_REF); ctxTree->gtFlags |= GTF_VAR_CONTEXT; // context is the method table pointer of the this object ctxTree = gtNewMethodTableLookup(ctxTree); } else { assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM); // Exact method descriptor as passed in ctxTree = gtNewLclvNode(pRoot->info.compTypeCtxtArg, TYP_I_IMPL); ctxTree->gtFlags |= GTF_VAR_CONTEXT; } return ctxTree; } /*****************************************************************************/ /* Import a dictionary lookup to access a handle in code shared between generic instantiations. The lookup depends on the typeContext which is only available at runtime, and not at compile-time. pLookup->token1 and pLookup->token2 specify the handle that is needed. The cases are: 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the instantiation-specific handle, and the tokens to lookup the handle. 2. pLookup->indirections != CORINFO_USEHELPER : 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle to get the handle. 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle. If it is non-NULL, it is the handle required. Else, call a helper to lookup the handle. */ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // It's available only via the run-time helper function if (pRuntimeLookup->indirections == CORINFO_USEHELPER) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pLookup->lookupKind); } #endif return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, ctxTree, compileTimeHandle); } // Slot pointer GenTree* slotPtrTree = ctxTree; if (pRuntimeLookup->testForNull) { slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup slot")); } GenTree* indOffTree = nullptr; GenTree* lastIndOfTree = nullptr; // Applied repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } // The last indirection could be subject to a size check (dynamic dictionary expansion) bool isLastIndirectionWithSizeCheck = ((i == pRuntimeLookup->indirections - 1) && (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)); if (i != 0) { slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!isLastIndirectionWithSizeCheck) { slotPtrTree->gtFlags |= GTF_IND_INVARIANT; } } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree); } if (pRuntimeLookup->offsets[i] != 0) { if (isLastIndirectionWithSizeCheck) { lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } // No null test required if (!pRuntimeLookup->testForNull) { if (pRuntimeLookup->indirections == 0) { return slotPtrTree; } slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!pRuntimeLookup->testForFixup) { return slotPtrTree; } impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0")); unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test")); impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI); GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); // downcast the pointer to a TYP_INT on 64-bit targets slot = impImplicitIorI4Cast(slot, TYP_INT); // Use a GT_AND to check for the lowest bit and indirect if it is set GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1)); GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0)); // slot = GT_IND(slot - 1) slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL)); GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add); indir->gtFlags |= GTF_IND_NONFAULTING; indir->gtFlags |= GTF_IND_INVARIANT; slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* asg = gtNewAssignNode(slot, indir); GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg); GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon); impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); return gtNewLclvNode(slotLclNum, TYP_I_IMPL); } assert(pRuntimeLookup->indirections != 0); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1")); // Extract the handle GenTree* handleForNullCheck = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); handleForNullCheck->gtFlags |= GTF_IND_NONFAULTING; // Call the helper // - Setup argNode with the pointer to the signature returned by the lookup GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle); GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode); GenTreeCall* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs); // Check for null and possibly call helper GenTree* nullCheck = gtNewOperNode(GT_NE, TYP_INT, handleForNullCheck, gtNewIconNode(0, TYP_I_IMPL)); GenTree* handleForResult = gtCloneExpr(handleForNullCheck); GenTree* result = nullptr; if (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { // Dynamic dictionary expansion support assert((lastIndOfTree != nullptr) && (pRuntimeLookup->indirections > 0)); // sizeValue = dictionary[pRuntimeLookup->sizeOffset] GenTreeIntCon* sizeOffset = gtNewIconNode(pRuntimeLookup->sizeOffset, TYP_I_IMPL); GenTree* sizeValueOffset = gtNewOperNode(GT_ADD, TYP_I_IMPL, lastIndOfTree, sizeOffset); GenTree* sizeValue = gtNewOperNode(GT_IND, TYP_I_IMPL, sizeValueOffset); sizeValue->gtFlags |= GTF_IND_NONFAULTING; // sizeCheck fails if sizeValue < pRuntimeLookup->offsets[i] GenTree* offsetValue = gtNewIconNode(pRuntimeLookup->offsets[pRuntimeLookup->indirections - 1], TYP_I_IMPL); GenTree* sizeCheck = gtNewOperNode(GT_LE, TYP_INT, sizeValue, offsetValue); // revert null check condition. nullCheck->ChangeOperUnchecked(GT_EQ); // ((sizeCheck fails || nullCheck fails))) ? (helperCall : handle). // Add checks and the handle as call arguments, indirect call transformer will handle this. helperCall->gtCallArgs = gtPrependNewCallArg(handleForResult, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(sizeCheck, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(nullCheck, helperCall->gtCallArgs); result = helperCall; addExpRuntimeLookupCandidate(helperCall); } else { GenTreeColon* colonNullCheck = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, handleForResult, helperCall); result = gtNewQmarkNode(TYP_I_IMPL, nullCheck, colonNullCheck); } unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree")); impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE); return gtNewLclvNode(tmp, TYP_I_IMPL); } /****************************************************************************** * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp. * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum, * else, grab a new temp. * For structs (which can be pushed on the stack using obj, etc), * special handling is needed */ struct RecursiveGuard { public: RecursiveGuard() { m_pAddress = nullptr; } ~RecursiveGuard() { if (m_pAddress) { *m_pAddress = false; } } void Init(bool* pAddress, bool bInitialize) { assert(pAddress && *pAddress == false && "Recursive guard violation"); m_pAddress = pAddress; if (bInitialize) { *m_pAddress = true; } } protected: bool* m_pAddress; }; bool Compiler::impSpillStackEntry(unsigned level, unsigned tnum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ) { #ifdef DEBUG RecursiveGuard guard; guard.Init(&impNestedStackSpill, bAssertOnRecursion); #endif GenTree* tree = verCurrentState.esStack[level].val; /* Allocate a temp if we haven't been asked to use a particular one */ if (tnum != BAD_VAR_NUM && (tnum >= lvaCount)) { return false; } bool isNewTemp = false; if (tnum == BAD_VAR_NUM) { tnum = lvaGrabTemp(true DEBUGARG(reason)); isNewTemp = true; } /* Assign the spilled entry to the temp */ impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level); // If temp is newly introduced and a ref type, grab what type info we can. if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF)) { assert(lvaTable[tnum].lvSingleDef == 0); lvaTable[tnum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tnum); CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle(); lvaSetClass(tnum, tree, stkHnd); // If we're assigning a GT_RET_EXPR, note the temp over on the call, // so the inliner can use it in case it needs a return spill temp. if (tree->OperGet() == GT_RET_EXPR) { JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum); GenTree* call = tree->AsRetExpr()->gtInlineCandidate; InlineCandidateInfo* ici = call->AsCall()->gtInlineCandidateInfo; ici->preexistingSpillTemp = tnum; } } // The tree type may be modified by impAssignTempGen, so use the type of the lclVar. var_types type = genActualType(lvaTable[tnum].TypeGet()); GenTree* temp = gtNewLclvNode(tnum, type); verCurrentState.esStack[level].val = temp; return true; } /***************************************************************************** * * Ensure that the stack has only spilled values */ void Compiler::impSpillStackEnsure(bool spillLeaves) { assert(!spillLeaves || opts.compDbgCode); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (!spillLeaves && tree->OperIsLeaf()) { continue; } // Temps introduced by the importer itself don't need to be spilled bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->AsLclVarCommon()->GetLclNum() >= info.compLocalsCount); if (isTempLcl) { continue; } impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure")); } } void Compiler::impSpillEvalStack() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack")); } } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and append the assignments to the statement list. * On return the stack is guaranteed to be empty. */ inline void Compiler::impEvalSideEffects() { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); verCurrentState.esStackDepth = 0; } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and replace them on the stack with refs to their temps. * [0..chkLevel) is the portion of the stack which will be checked and spilled. */ inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)) { assert(chkLevel != (unsigned)CHECK_SPILL_NONE); /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */ impSpillSpecialSideEff(); if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } assert(chkLevel <= verCurrentState.esStackDepth); GenTreeFlags spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT; for (unsigned i = 0; i < chkLevel; i++) { GenTree* tree = verCurrentState.esStack[i].val; if ((tree->gtFlags & spillFlags) != 0 || (spillGlobEffects && // Only consider the following when spillGlobEffects == true !impIsAddressInLocal(tree) && // No need to spill the GT_ADDR node on a local. gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or // lvAddrTaken flag. { impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason)); } } } /***************************************************************************** * * If the stack contains any trees with special side effects in them, assign * those trees to temps and replace them on the stack with refs to their temps. */ inline void Compiler::impSpillSpecialSideEff() { // Only exception objects need to be carefully handled if (!compCurBB->bbCatchTyp) { return; } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; // Make sure if we have an exception object in the sub tree we spill ourselves. if (gtHasCatchArg(tree)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff")); } } } /***************************************************************************** * * Spill all stack references to value classes (TYP_STRUCT nodes) */ void Compiler::impSpillValueClasses() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT) { // Tree walk was aborted, which means that we found a // value class on the stack. Need to spill that // stack entry. impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses")); } } } /***************************************************************************** * * Callback that checks if a tree node is TYP_STRUCT */ Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data) { fgWalkResult walkResult = WALK_CONTINUE; if ((*pTree)->gtType == TYP_STRUCT) { // Abort the walk and indicate that we found a value class walkResult = WALK_ABORT; } return walkResult; } /***************************************************************************** * * If the stack contains any trees with references to local #lclNum, assign * those trees to temps and replace their place on the stack with refs to * their temps. */ void Compiler::impSpillLclRefs(ssize_t lclNum) { /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */ impSpillSpecialSideEff(); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; /* If the tree may throw an exception, and the block has a handler, then we need to spill assignments to the local if the local is live on entry to the handler. Just spill 'em all without considering the liveness */ bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT)); /* Skip the tree if it doesn't have an affected reference, unless xcptnCaught */ if (xcptnCaught || gtHasRef(tree, lclNum)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs")); } } } /***************************************************************************** * * Push catch arg onto the stack. * If there are jumps to the beginning of the handler, insert basic block * and spill catch arg to a temp. Update the handler block if necessary. * * Returns the basic block of the actual handler. */ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter) { // Do not inject the basic block twice on reimport. This should be // hit only under JIT stress. See if the block is the one we injected. // Note that EH canonicalization can inject internal blocks here. We might // be able to re-use such a block (but we don't, right now). if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) == (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) { Statement* stmt = hndBlk->firstStmt(); if (stmt != nullptr) { GenTree* tree = stmt->GetRootNode(); assert(tree != nullptr); if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && (tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG)) { tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF); impPushOnStack(tree, typeInfo(TI_REF, clsHnd)); return hndBlk->bbNext; } } // If we get here, it must have been some other kind of internal block. It's possible that // someone prepended something to our injected block, but that's unlikely. } /* Push the exception address value on the stack */ GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); /* Mark the node as having a side-effect - i.e. cannot be * moved around since it is tied to a fixed location (EAX) */ arg->gtFlags |= GTF_ORDER_SIDEEFF; #if defined(JIT32_GCENCODER) const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5); #else const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5); #endif // defined(JIT32_GCENCODER) /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */ if (hndBlk->bbRefs > 1 || forceInsertNewBlock) { if (hndBlk->bbRefs == 1) { hndBlk->bbRefs++; } /* Create extra basic block for the spill */ BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true); newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE; newBlk->inheritWeight(hndBlk); newBlk->bbCodeOffs = hndBlk->bbCodeOffs; /* Account for the new link we are about to create */ hndBlk->bbRefs++; // Spill into a temp. unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); lvaTable[tempNum].lvType = TYP_REF; GenTree* argAsg = gtNewTempAssign(tempNum, arg); arg = gtNewLclvNode(tempNum, TYP_REF); hndBlk->bbStkTempsIn = tempNum; Statement* argStmt; if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { // Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus // won't do it for us. // TODO-DEBUGINFO: Previous code always set stack as non-empty // here. Can we not just use impCurStmtOffsSet? Are we out of sync // here with the stack? impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false)); argStmt = gtNewStmt(argAsg, impCurStmtDI); } else { argStmt = gtNewStmt(argAsg); } fgInsertStmtAtEnd(newBlk, argStmt); } impPushOnStack(arg, typeInfo(TI_REF, clsHnd)); return hndBlk; } /***************************************************************************** * * Given a tree, clone it. *pClone is set to the cloned tree. * Returns the original tree if the cloning was easy, * else returns the temp to which the tree had to be spilled to. * If the tree has side-effects, it will be spilled to a temp. */ GenTree* Compiler::impCloneExpr(GenTree* tree, GenTree** pClone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)) { if (!(tree->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(tree, true); if (clone) { *pClone = clone; return tree; } } /* Store the operand in a temp and return the temp */ unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which // return a struct type. It also may modify the struct type to a more // specialized type (e.g. a SIMD type). So we will get the type from // the lclVar AFTER calling impAssignTempGen(). impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtDI); var_types type = genActualType(lvaTable[temp].TypeGet()); *pClone = gtNewLclvNode(temp, type); return gtNewLclvNode(temp, type); } //------------------------------------------------------------------------ // impCreateDIWithCurrentStackInfo: Create a DebugInfo instance with the // specified IL offset and 'is call' bit, using the current stack to determine // whether to set the 'stack empty' bit. // // Arguments: // offs - the IL offset for the DebugInfo // isCall - whether the created DebugInfo should have the IsCall bit set // // Return Value: // The DebugInfo instance. // DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall) { assert(offs != BAD_IL_OFFSET); bool isStackEmpty = verCurrentState.esStackDepth <= 0; return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall)); } //------------------------------------------------------------------------ // impCurStmtOffsSet: Set the "current debug info" to attach to statements that // we are generating next. // // Arguments: // offs - the IL offset // // Remarks: // This function will be called in the main IL processing loop when it is // determined that we have reached a location in the IL stream for which we // want to report debug information. This is the main way we determine which // statements to report debug info for to the EE: for other statements, they // will have no debug information attached. // inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs) { if (offs == BAD_IL_OFFSET) { impCurStmtDI = DebugInfo(compInlineContext, ILLocation()); } else { impCurStmtDI = impCreateDIWithCurrentStackInfo(offs, false); } } //------------------------------------------------------------------------ // impCanSpillNow: check is it possible to spill all values from eeStack to local variables. // // Arguments: // prevOpcode - last importer opcode // // Return Value: // true if it is legal, false if it could be a sequence that we do not want to divide. bool Compiler::impCanSpillNow(OPCODE prevOpcode) { // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence. // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed. return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ); } /***************************************************************************** * * Remember the instr offset for the statements * * When we do impAppendTree(tree), we can't set stmt->SetLastILOffset(impCurOpcOffs), * if the append was done because of a partial stack spill, * as some of the trees corresponding to code up to impCurOpcOffs might * still be sitting on the stack. * So we delay calling of SetLastILOffset() until impNoteLastILoffs(). * This should be called when an opcode finally/explicitly causes * impAppendTree(tree) to be called (as opposed to being called because of * a spill caused by the opcode) */ #ifdef DEBUG void Compiler::impNoteLastILoffs() { if (impLastILoffsStmt == nullptr) { // We should have added a statement for the current basic block // Is this assert correct ? assert(impLastStmt); impLastStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); } else { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } } #endif // DEBUG /***************************************************************************** * We don't create any GenTree (excluding spills) for a branch. * For debugging info, we need a placeholder so that we can note * the IL offset in gtStmt.gtStmtOffs. So append an empty statement. */ void Compiler::impNoteBranchOffs() { if (opts.compDbgCode) { impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } } /***************************************************************************** * Locate the next stmt boundary for which we need to record info. * We will have to spill the stack at such boundaries if it is not * already empty. * Returns the next stmt boundary (after the start of the block) */ unsigned Compiler::impInitBlockLineInfo() { /* Assume the block does not correspond with any IL offset. This prevents us from reporting extra offsets. Extra mappings can cause confusing stepping, especially if the extra mapping is a jump-target, and the debugger does not ignore extra mappings, but instead rewinds to the nearest known offset */ impCurStmtOffsSet(BAD_IL_OFFSET); IL_OFFSET blockOffs = compCurBB->bbCodeOffs; if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES)) { impCurStmtOffsSet(blockOffs); } /* Always report IL offset 0 or some tests get confused. Probably a good idea anyways */ if (blockOffs == 0) { impCurStmtOffsSet(blockOffs); } if (!info.compStmtOffsetsCount) { return ~0; } /* Find the lowest explicit stmt boundary within the block */ /* Start looking at an entry that is based on our instr offset */ unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize; if (index >= info.compStmtOffsetsCount) { index = info.compStmtOffsetsCount - 1; } /* If we've guessed too far, back up */ while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs) { index--; } /* If we guessed short, advance ahead */ while (info.compStmtOffsets[index] < blockOffs) { index++; if (index == info.compStmtOffsetsCount) { return info.compStmtOffsetsCount; } } assert(index < info.compStmtOffsetsCount); if (info.compStmtOffsets[index] == blockOffs) { /* There is an explicit boundary for the start of this basic block. So we will start with bbCodeOffs. Else we will wait until we get to the next explicit boundary */ impCurStmtOffsSet(blockOffs); index++; } return index; } /*****************************************************************************/ bool Compiler::impOpcodeIsCallOpcode(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: return true; default: return false; } } /*****************************************************************************/ static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: case CEE_JMP: case CEE_NEWOBJ: case CEE_NEWARR: return true; default: return false; } } /*****************************************************************************/ // One might think it is worth caching these values, but results indicate // that it isn't. // In addition, caching them causes SuperPMI to be unable to completely // encapsulate an individual method context. CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass() { CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr); return refAnyClass; } CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass() { CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE); assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr); return typeHandleClass; } CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle() { CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE); assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr); return argIteratorClass; } CORINFO_CLASS_HANDLE Compiler::impGetStringClass() { CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING); assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr); return stringClass; } CORINFO_CLASS_HANDLE Compiler::impGetObjectClass() { CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT); assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr); return objectClass; } /***************************************************************************** * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we * set its type to TYP_BYREF when we create it. We know if it can be * changed to TYP_I_IMPL only at the point where we use it */ /* static */ void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2) { if (tree1->IsLocalAddrExpr() != nullptr) { tree1->gtType = TYP_I_IMPL; } if (tree2 && (tree2->IsLocalAddrExpr() != nullptr)) { tree2->gtType = TYP_I_IMPL; } } /***************************************************************************** * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want * to make that an explicit cast in our trees, so any implicit casts that * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are * turned into explicit casts here. * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0) */ GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp) { var_types currType = genActualType(tree->gtType); var_types wantedType = genActualType(dstTyp); if (wantedType != currType) { // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp)) { if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->AsIntCon()->gtIconVal == 0))) { tree->gtType = TYP_I_IMPL; } } #ifdef TARGET_64BIT else if (varTypeIsI(wantedType) && (currType == TYP_INT)) { // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } else if ((wantedType == TYP_INT) && varTypeIsI(currType)) { // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT); } #endif // TARGET_64BIT } return tree; } /***************************************************************************** * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases, * but we want to make that an explicit cast in our trees, so any implicit casts * that exist in the IL are turned into explicit casts here. */ GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp) { if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType)) { tree = gtNewCastNode(dstTyp, tree, false, dstTyp); } return tree; } //------------------------------------------------------------------------ // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray // with a GT_COPYBLK node. // // Arguments: // sig - The InitializeArray signature. // // Return Value: // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or // nullptr otherwise. // // Notes: // The function recognizes the following IL pattern: // ldc <length> or a list of ldc <lower bound>/<length> // newarr or newobj // dup // ldtoken <field handle> // call InitializeArray // The lower bounds need not be constant except when the array rank is 1. // The function recognizes all kinds of arrays thus enabling a small runtime // such as CoreRT to skip providing an implementation for InitializeArray. GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 2); GenTree* fieldTokenNode = impStackTop(0).val; GenTree* arrayLocalNode = impStackTop(1).val; // // Verify that the field token is known and valid. Note that It's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } // // We need to get the number of elements in the array and the size of each element. // We verify that the newarr statement is exactly what we expect it to be. // If it's not then we just return NULL and we don't optimize this call // // It is possible the we don't have any statements in the block yet. if (impLastStmt == nullptr) { return nullptr; } // // We start by looking at the last statement, making sure it's an assignment, and // that the target of the assignment is the array passed to InitializeArray. // GenTree* arrayAssignment = impLastStmt->GetRootNode(); if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != arrayLocalNode->AsLclVarCommon()->GetLclNum())) { return nullptr; } // // Make sure that the object being assigned is a helper call. // GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2; if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER)) { return nullptr; } // // Verify that it is one of the new array helpers. // bool isMDArray = false; if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8) #ifdef FEATURE_READYTORUN && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1) #endif ) { if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR)) { return nullptr; } isMDArray = true; } CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle; // // Make sure we found a compile time handle to the array // if (!arrayClsHnd) { return nullptr; } unsigned rank = 0; S_UINT32 numElements; if (isMDArray) { rank = info.compCompHnd->getArrayRank(arrayClsHnd); if (rank == 0) { return nullptr; } GenTreeCall::Use* tokenArg = newArrayCall->AsCall()->gtCallArgs; assert(tokenArg != nullptr); GenTreeCall::Use* numArgsArg = tokenArg->GetNext(); assert(numArgsArg != nullptr); GenTreeCall::Use* argsArg = numArgsArg->GetNext(); assert(argsArg != nullptr); // // The number of arguments should be a constant between 1 and 64. The rank can't be 0 // so at least one length must be present and the rank can't exceed 32 so there can // be at most 64 arguments - 32 lengths and 32 lower bounds. // if ((!numArgsArg->GetNode()->IsCnsIntOrI()) || (numArgsArg->GetNode()->AsIntCon()->IconValue() < 1) || (numArgsArg->GetNode()->AsIntCon()->IconValue() > 64)) { return nullptr; } unsigned numArgs = static_cast<unsigned>(numArgsArg->GetNode()->AsIntCon()->IconValue()); bool lowerBoundsSpecified; if (numArgs == rank * 2) { lowerBoundsSpecified = true; } else if (numArgs == rank) { lowerBoundsSpecified = false; // // If the rank is 1 and a lower bound isn't specified then the runtime creates // a SDArray. Note that even if a lower bound is specified it can be 0 and then // we get a SDArray as well, see the for loop below. // if (rank == 1) { isMDArray = false; } } else { return nullptr; } // // The rank is known to be at least 1 so we can start with numElements being 1 // to avoid the need to special case the first dimension. // numElements = S_UINT32(1); struct Match { static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) && (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) && (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs); } static bool IsComma(GenTree* tree) { return (tree != nullptr) && (tree->OperGet() == GT_COMMA); } }; unsigned argIndex = 0; GenTree* comma; for (comma = argsArg->GetNode(); Match::IsComma(comma); comma = comma->gtGetOp2()) { if (lowerBoundsSpecified) { // // In general lower bounds can be ignored because they're not needed to // calculate the total number of elements. But for single dimensional arrays // we need to know if the lower bound is 0 because in this case the runtime // creates a SDArray and this affects the way the array data offset is calculated. // if (rank == 1) { GenTree* lowerBoundAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2(); if (lowerBoundNode->IsIntegralConst(0)) { isMDArray = false; } } comma = comma->gtGetOp2(); argIndex++; } GenTree* lengthNodeAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lengthNode = lengthNodeAssign->gtGetOp2(); if (!lengthNode->IsCnsIntOrI()) { return nullptr; } numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue()); argIndex++; } assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs)); if (argIndex != numArgs) { return nullptr; } } else { // // Make sure there are exactly two arguments: the array class and // the number of elements. // GenTree* arrayLengthNode; GenTreeCall::Use* args = newArrayCall->AsCall()->gtCallArgs; #ifdef FEATURE_READYTORUN if (newArrayCall->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)) { // Array length is 1st argument for readytorun helper arrayLengthNode = args->GetNode(); } else #endif { // Array length is 2nd argument for regular helper arrayLengthNode = args->GetNext()->GetNode(); } // // This optimization is only valid for a constant array size. // if (arrayLengthNode->gtOper != GT_CNS_INT) { return nullptr; } numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal); if (!info.compCompHnd->isSDArray(arrayClsHnd)) { return nullptr; } } CORINFO_CLASS_HANDLE elemClsHnd; var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd)); // // Note that genTypeSize will return zero for non primitive types, which is exactly // what we want (size will then be 0, and we will catch this in the conditional below). // Note that we don't expect this to fail for valid binaries, so we assert in the // non-verification case (the verification case should not assert but rather correctly // handle bad binaries). This assert is not guarding any specific invariant, but rather // saying that we don't expect this to happen, and if it is hit, we need to investigate // why. // S_UINT32 elemSize(genTypeSize(elementType)); S_UINT32 size = elemSize * S_UINT32(numElements); if (size.IsOverflow()) { return nullptr; } if ((size.Value() == 0) || (varTypeIsGC(elementType))) { return nullptr; } void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value()); if (!initData) { return nullptr; } // // At this point we are ready to commit to implementing the InitializeArray // intrinsic using a struct assignment. Pop the arguments from the stack and // return the struct assignment node. // impPopStack(); impPopStack(); const unsigned blkSize = size.Value(); unsigned dataOffset; if (isMDArray) { dataOffset = eeGetMDArrayDataOffset(rank); } else { dataOffset = eeGetArrayDataOffset(); } GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL)); GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize)); GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true); #ifdef DEBUG src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_IntializeArrayIntrinsics; #endif return gtNewBlkOpNode(dst, // dst src, // src false, // volatile true); // copyBlock } GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 1); assert(sig->sigInst.methInstCount == 1); GenTree* fieldTokenNode = impStackTop(0).val; // // Verify that the field token is known and valid. Note that it's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } CORINFO_CLASS_HANDLE fieldOwnerHnd = info.compCompHnd->getFieldClass(fieldToken); CORINFO_CLASS_HANDLE fieldClsHnd; var_types fieldElementType = JITtype2varType(info.compCompHnd->getFieldType(fieldToken, &fieldClsHnd, fieldOwnerHnd)); unsigned totalFieldSize; // Most static initialization data fields are of some structure, but it is possible for them to be of various // primitive types as well if (fieldElementType == var_types::TYP_STRUCT) { totalFieldSize = info.compCompHnd->getClassSize(fieldClsHnd); } else { totalFieldSize = genTypeSize(fieldElementType); } // Limit to primitive or enum type - see ArrayNative::GetSpanDataFrom() CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0]; if (info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd) == CORINFO_TYPE_UNDEF) { return nullptr; } const unsigned targetElemSize = info.compCompHnd->getClassSize(targetElemHnd); assert(targetElemSize != 0); const unsigned count = totalFieldSize / targetElemSize; if (count == 0) { return nullptr; } void* data = info.compCompHnd->getArrayInitializationData(fieldToken, totalFieldSize); if (!data) { return nullptr; } // // Ready to commit to the work // impPopStack(); // Turn count and pointer value into constants. GenTree* lengthValue = gtNewIconNode(count, TYP_INT); GenTree* pointerValue = gtNewIconHandleNode((size_t)data, GTF_ICON_CONST_PTR); // Construct ReadOnlySpan<T> to return. CORINFO_CLASS_HANDLE spanHnd = sig->retTypeClass; unsigned spanTempNum = lvaGrabTemp(true DEBUGARG("ReadOnlySpan<T> for CreateSpan<T>")); lvaSetStruct(spanTempNum, spanHnd, false); CORINFO_FIELD_HANDLE pointerFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 0); CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1); GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0); pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd)); GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue); GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE); lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd)); GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // And finally create a tree that points at the span. return impCreateLocalNode(spanTempNum DEBUGARG(0)); } //------------------------------------------------------------------------ // impIntrinsic: possibly expand intrinsic call into alternate IR sequence // // Arguments: // newobjThis - for constructor calls, the tree for the newly allocated object // clsHnd - handle for the intrinsic method's class // method - handle for the intrinsic method // sig - signature of the intrinsic method // methodFlags - CORINFO_FLG_XXX flags of the intrinsic method // memberRef - the token for the intrinsic method // readonlyCall - true if call has a readonly prefix // tailCall - true if call is in tail position // pConstrainedResolvedToken -- resolved token for constrained call, or nullptr // if call is not constrained // constraintCallThisTransform -- this transform to apply for a constrained call // pIntrinsicName [OUT] -- intrinsic name (see enumeration in namedintrinsiclist.h) // for "traditional" jit intrinsics // isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call // that is amenable to special downstream optimization opportunities // // Returns: // IR tree to use in place of the call, or nullptr if the jit should treat // the intrinsic call like a normal call. // // pIntrinsicName set to non-illegal value if the call is recognized as a // traditional jit intrinsic, even if the intrinsic is not expaned. // // isSpecial set true if the expansion is subject to special // optimizations later in the jit processing // // Notes: // On success the IR tree may be a call to a different method or an inline // sequence. If it is a call, then the intrinsic processing here is responsible // for handling all the special cases, as upon return to impImportCall // expanded intrinsics bypass most of the normal call processing. // // Intrinsics are generally not recognized in minopts and debug codegen. // // However, certain traditional intrinsics are identifed as "must expand" // if there is no fallback implmentation to invoke; these must be handled // in all codegen modes. // // New style intrinsics (where the fallback implementation is in IL) are // identified as "must expand" if they are invoked from within their // own method bodies. // GenTree* Compiler::impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic) { assert((methodFlags & CORINFO_FLG_INTRINSIC) != 0); bool mustExpand = false; bool isSpecial = false; NamedIntrinsic ni = NI_Illegal; if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0) { // The recursive non-virtual calls to Jit intrinsics are must-expand by convention. mustExpand = mustExpand || (gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL)); ni = lookupNamedIntrinsic(method); // We specially support the following on all platforms to allow for dead // code optimization and to more generally support recursive intrinsics. if (ni == NI_IsSupported_True) { assert(sig->numArgs == 0); return gtNewIconNode(true); } if (ni == NI_IsSupported_False) { assert(sig->numArgs == 0); return gtNewIconNode(false); } if (ni == NI_Throw_PlatformNotSupportedException) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } #ifdef FEATURE_HW_INTRINSICS if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END)) { GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand); if (mustExpand && (hwintrinsic == nullptr)) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand); } return hwintrinsic; } if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END)) { // These intrinsics aren't defined recursively and so they will never be mustExpand // Instead, they provide software fallbacks that will be executed instead. assert(!mustExpand); return impSimdAsHWIntrinsic(ni, clsHnd, method, sig, newobjThis); } #endif // FEATURE_HW_INTRINSICS } *pIntrinsicName = ni; if (ni == NI_System_StubHelpers_GetStubContext) { // must be done regardless of DbgCode and MinOpts return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL); } if (ni == NI_System_StubHelpers_NextCallReturnAddress) { // For now we just avoid inlining anything into these methods since // this intrinsic is only rarely used. We could do this better if we // wanted to by trying to match which call is the one we need to get // the return address of. info.compHasNextCallRetAddr = true; return new (this, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); } switch (ni) { // CreateSpan must be expanded for NativeAOT case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: mustExpand |= IsTargetAbi(CORINFO_CORERT_ABI); break; case NI_System_ByReference_ctor: case NI_System_ByReference_get_Value: case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: mustExpand = true; break; default: break; } GenTree* retNode = nullptr; // Under debug and minopts, only expand what is required. // NextCallReturnAddress intrinsic returns the return address of the next call. // If that call is an intrinsic and is expanded, codegen for NextCallReturnAddress will fail. // To avoid that we conservatively expand only required intrinsics in methods that call // the NextCallReturnAddress intrinsic. if (!mustExpand && (opts.OptimizationDisabled() || info.compHasNextCallRetAddr)) { *pIntrinsicName = NI_Illegal; return retNode; } CorInfoType callJitType = sig->retType; var_types callType = JITtype2varType(callJitType); /* First do the intrinsics which are always smaller than a call */ if (ni != NI_Illegal) { assert(retNode == nullptr); switch (ni) { case NI_Array_Address: case NI_Array_Get: case NI_Array_Set: retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, ni); break; case NI_System_String_get_Chars: { GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; retNode = gtNewIndexRef(TYP_USHORT, op1, op2); retNode->gtFlags |= GTF_INX_STRING_LAYOUT; break; } case NI_System_String_get_Length: { GenTree* op1 = impPopStack().val; if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { retNode = iconNode; break; } } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen, compCurBB); op1 = arrLen; // Getting the length of a null string should throw op1->gtFlags |= GTF_EXCEPT; retNode = op1; break; } // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field // in a value type. The canonical example of this is Span<T>. In effect this is just a // substitution. The parameter byref will be assigned into the newly allocated object. case NI_System_ByReference_ctor: { // Remove call to constructor and directly assign the byref passed // to the call to the first slot of the ByReference struct. GenTree* op1 = impPopStack().val; GenTree* thisptr = newobjThis; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0); GenTree* assign = gtNewAssignNode(field, op1); GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1()); assert(byReferenceStruct != nullptr); impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd)); retNode = assign; break; } // Implement ptr value getter for ByReference struct. case NI_System_ByReference_get_Value: { GenTree* op1 = impPopStack().val; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0); retNode = field; break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: { retNode = impCreateSpanIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: { retNode = impInitializeArrayIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant: { GenTree* op1 = impPopStack().val; if (op1->OperIsConst()) { // op1 is a known constant, replace with 'true'. retNode = gtNewIconNode(1); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to true early\n"); // We can also consider FTN_ADDR and typeof(T) here } else { // op1 is not a known constant, we'll do the expansion in morph retNode = new (this, GT_INTRINSIC) GenTreeIntrinsic(TYP_INT, op1, ni, method); JITDUMP("\nConverting RuntimeHelpers.IsKnownConstant to:\n"); DISPTREE(retNode); } break; } case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: { assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it. CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = memberRef; resolvedToken.tokenType = CORINFO_TOKENKIND_Method; CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo); GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef), embedInfo.compileTimeHandle); if (rawHandle == nullptr) { return nullptr; } noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE); GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL); GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar); var_types resultType = JITtype2varType(sig->retType); retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr); break; } case NI_System_Span_get_Item: case NI_System_ReadOnlySpan_get_Item: { // Have index, stack pointer-to Span<T> s on the stack. Expand to: // // For Span<T> // Comma // BoundsCheck(index, s->_length) // s->_pointer + index * sizeof(T) // // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref // // Signature should show one class type parameter, which // we need to examine. assert(sig->sigInst.classInstCount == 1); assert(sig->numArgs == 1); CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0]; const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd); assert(elemSize > 0); const bool isReadOnly = (ni == NI_System_ReadOnlySpan_get_Item); JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "", info.compCompHnd->getClassName(spanElemHnd), elemSize); GenTree* index = impPopStack().val; GenTree* ptrToSpan = impPopStack().val; GenTree* indexClone = nullptr; GenTree* ptrToSpanClone = nullptr; assert(genActualType(index) == TYP_INT); assert(ptrToSpan->TypeGet() == TYP_BYREF); #if defined(DEBUG) if (verbose) { printf("with ptr-to-span\n"); gtDispTree(ptrToSpan); printf("and index\n"); gtDispTree(index); } #endif // defined(DEBUG) // We need to use both index and ptr-to-span twice, so clone or spill. index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item index")); ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item ptrToSpan")); // Bounds check CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset); GenTree* boundsCheck = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, length, SCK_RNGCHK_FAIL); // Element access index = indexClone; #ifdef TARGET_64BIT if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } #endif if (elemSize != 1) { GenTree* sizeofNode = gtNewIconNode(static_cast<ssize_t>(elemSize), TYP_I_IMPL); index = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, sizeofNode); } CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd); GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset); GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index); // Prepare result var_types resultType = JITtype2varType(sig->retType); assert(resultType == result->TypeGet()); retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result); break; } case NI_System_RuntimeTypeHandle_GetValueInternal: { GenTree* op1 = impStackTop(0).val; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall())) { // Old tree // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle // // New tree // TreeToGetNativeTypeHandle // Remove call to helper and return the native TypeHandle pointer that was the parameter // to that helper. op1 = impPopStack().val; // Get native TypeHandle argument to old helper GenTreeCall::Use* arg = op1->AsCall()->gtCallArgs; assert(arg->GetNext() == nullptr); op1 = arg->GetNode(); retNode = op1; } // Call the regular function. break; } case NI_System_Type_GetTypeFromHandle: { GenTree* op1 = impStackTop(0).val; CorInfoHelpFunc typeHandleHelper; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper)) { op1 = impPopStack().val; // Replace helper with a more specialized helper that returns RuntimeType if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE) { typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE; } else { assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL); typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL; } assert(op1->AsCall()->gtCallArgs->GetNext() == nullptr); op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->AsCall()->gtCallArgs); op1->gtType = TYP_REF; retNode = op1; } break; } case NI_System_Type_op_Equality: case NI_System_Type_op_Inequality: { JITDUMP("Importing Type.op_*Equality intrinsic\n"); GenTree* op1 = impStackTop(1).val; GenTree* op2 = impStackTop(0).val; GenTree* optTree = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2); if (optTree != nullptr) { // Success, clean up the evaluation stack. impPopStack(); impPopStack(); // See if we can optimize even further, to a handle compare. optTree = gtFoldTypeCompare(optTree); // See if we can now fold a handle compare to a constant. optTree = gtFoldExpr(optTree); retNode = optTree; } else { // Retry optimizing these later isSpecial = true; } break; } case NI_System_Enum_HasFlag: { GenTree* thisOp = impStackTop(1).val; GenTree* flagOp = impStackTop(0).val; GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp); if (optTree != nullptr) { // Optimization successful. Pop the stack for real. impPopStack(); impPopStack(); retNode = optTree; } else { // Retry optimizing this during morph. isSpecial = true; } break; } case NI_System_Type_IsAssignableFrom: { GenTree* typeTo = impStackTop(1).val; GenTree* typeFrom = impStackTop(0).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_IsAssignableTo: { GenTree* typeTo = impStackTop(0).val; GenTree* typeFrom = impStackTop(1).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_get_IsValueType: { // Optimize // // call Type.GetTypeFromHandle (which is replaced with CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) // call Type.IsValueType // // to `true` or `false` // e.g. `typeof(int).IsValueType` => `true` if (impStackTop().val->IsCall()) { GenTreeCall* call = impStackTop().val->AsCall(); if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE)) { CORINFO_CLASS_HANDLE hClass = gtGetHelperArgClassHandle(call->gtCallArgs->GetNode()); if (hClass != NO_CLASS_HANDLE) { retNode = gtNewIconNode((eeIsValueClass(hClass) && // pointers are not value types (e.g. typeof(int*).IsValueType is false) info.compCompHnd->asCorInfoType(hClass) != CORINFO_TYPE_PTR) ? 1 : 0); impPopStack(); // drop CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE call } } } break; } case NI_System_Threading_Thread_get_ManagedThreadId: { if (impStackTop().val->OperIs(GT_RET_EXPR)) { GenTreeCall* call = impStackTop().val->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { if (lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Threading_Thread_get_CurrentThread) { // drop get_CurrentThread() call impPopStack(); call->ReplaceWith(gtNewNothingNode(), this); retNode = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT); } } } break; } #ifdef TARGET_ARM64 // Intrinsify Interlocked.Or and Interlocked.And only for arm64-v8.1 (and newer) // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). case NI_System_Threading_Interlocked_Or: case NI_System_Threading_Interlocked_And: { if (compOpportunisticallyDependsOn(InstructionSet_Atomics)) { assert(sig->numArgs == 2); GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; genTreeOps op = (ni == NI_System_Threading_Interlocked_Or) ? GT_XORR : GT_XAND; retNode = gtNewOperNode(op, genActualType(callType), op1, op2); retNode->gtFlags |= GTF_GLOB_REF | GTF_ASG; } break; } #endif // TARGET_ARM64 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic case NI_System_Threading_Interlocked_CompareExchange: { var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } assert(callType != TYP_STRUCT); assert(sig->numArgs == 3); GenTree* op3 = impPopStack().val; // comparand GenTree* op2 = impPopStack().val; // value GenTree* op1 = impPopStack().val; // location GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3); node->AsCmpXchg()->gtOpLocation->gtFlags |= GTF_DONT_CSE; retNode = node; break; } case NI_System_Threading_Interlocked_Exchange: case NI_System_Threading_Interlocked_ExchangeAdd: { assert(callType != TYP_STRUCT); assert(sig->numArgs == 2); var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; // This creates: // val // XAdd // addr // field (for example) // // In the case where the first argument is the address of a local, we might // want to make this *not* make the var address-taken -- but atomic instructions // on a local are probably pretty useless anyway, so we probably don't care. op1 = gtNewOperNode(ni == NI_System_Threading_Interlocked_ExchangeAdd ? GT_XADD : GT_XCHG, genActualType(callType), op1, op2); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; retNode = op1; break; } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) case NI_System_Threading_Interlocked_MemoryBarrier: case NI_System_Threading_Interlocked_ReadMemoryBarrier: { assert(sig->numArgs == 0); GenTree* op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; // On XARCH `NI_System_Threading_Interlocked_ReadMemoryBarrier` fences need not be emitted. // However, we still need to capture the effect on reordering. if (ni == NI_System_Threading_Interlocked_ReadMemoryBarrier) { op1->gtFlags |= GTF_MEMORYBARRIER_LOAD; } retNode = op1; break; } #ifdef FEATURE_HW_INTRINSICS case NI_System_Math_FusedMultiplyAdd: { #ifdef TARGET_XARCH if (compExactlyDependsOn(InstructionSet_FMA) && supportSIMDTypes()) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return FMA.MultiplyAddScalar( // Vector128.CreateScalarUnsafe(x), // Vector128.CreateScalarUnsafe(y), // Vector128.CreateScalarUnsafe(z) // ).ToScalar(); GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* res = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callJitType, 16); retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callJitType, 16); break; } #elif defined(TARGET_ARM64) if (compExactlyDependsOn(InstructionSet_AdvSimd)) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return AdvSimd.FusedMultiplyAddScalar( // Vector64.Create{ScalarUnsafe}(z), // Vector64.Create{ScalarUnsafe}(y), // Vector64.Create{ScalarUnsafe}(x) // ).ToScalar(); NamedIntrinsic createVector64 = (callType == TYP_DOUBLE) ? NI_Vector64_Create : NI_Vector64_CreateScalarUnsafe; constexpr unsigned int simdSize = 8; GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); // Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3 // while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3 retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar, callJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callJitType, simdSize); break; } #endif // TODO-CQ-XArch: Ideally we would create a GT_INTRINSIC node for fma, however, that currently // requires more extensive changes to valuenum to support methods with 3 operands // We want to generate a GT_INTRINSIC node in the case the call can't be treated as // a target intrinsic so that we can still benefit from CSE and constant folding. break; } #endif // FEATURE_HW_INTRINSICS case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: { retNode = impMathIntrinsic(method, sig, callType, ni, tailCall); break; } case NI_System_Array_Clone: case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: case NI_System_Object_MemberwiseClone: case NI_System_Threading_Thread_get_CurrentThread: { // Flag for later handling. isSpecial = true; break; } case NI_System_Object_GetType: { JITDUMP("\n impIntrinsic: call to Object.GetType\n"); GenTree* op1 = impStackTop(0).val; // If we're calling GetType on a boxed value, just get the type directly. if (op1->IsBoxedValue()) { JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n"); // Try and clean up the box. Obtain the handle we // were going to pass to the newobj. GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE); if (boxTypeHandle != nullptr) { // Note we don't need to play the TYP_STRUCT games here like // do for LDTOKEN since the return value of this operator is Type, // not RuntimeTypeHandle. impPopStack(); GenTreeCall::Use* helperArgs = gtNewCallArgs(boxTypeHandle); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } // If we have a constrained callvirt with a "box this" transform // we know we have a value class and hence an exact type. // // If so, instead of boxing and then extracting the type, just // construct the type directly. if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) && (constraintCallThisTransform == CORINFO_BOX_THIS)) { // Ensure this is one of the is simple box cases (in particular, rule out nullables). const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass); const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX); if (isSafeToOptimize) { JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n"); impPopStack(); GenTree* typeHandleOp = impTokenToHandle(pConstrainedResolvedToken, nullptr, true /* mustRestoreHandle */); if (typeHandleOp == nullptr) { assert(compDonotInline()); return nullptr; } GenTreeCall::Use* helperArgs = gtNewCallArgs(typeHandleOp); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } #ifdef DEBUG if (retNode != nullptr) { JITDUMP("Optimized result for call to GetType is\n"); if (verbose) { gtDispTree(retNode); } } #endif // Else expand as an intrinsic, unless the call is constrained, // in which case we defer expansion to allow impImportCall do the // special constraint processing. if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr)) { JITDUMP("Expanding as special intrinsic\n"); impPopStack(); op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, ni, method); // Set the CALL flag to indicate that the operator is implemented by a call. // Set also the EXCEPTION flag because the native implementation of // NI_System_Object_GetType intrinsic can throw NullReferenceException. op1->gtFlags |= (GTF_CALL | GTF_EXCEPT); retNode = op1; // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } if (retNode == nullptr) { JITDUMP("Leaving as normal call\n"); // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } break; } case NI_System_Array_GetLength: case NI_System_Array_GetLowerBound: case NI_System_Array_GetUpperBound: { // System.Array.GetLength(Int32) method: // public int GetLength(int dimension) // System.Array.GetLowerBound(Int32) method: // public int GetLowerBound(int dimension) // System.Array.GetUpperBound(Int32) method: // public int GetUpperBound(int dimension) // // Only implement these as intrinsics for multi-dimensional arrays. // Only handle constant dimension arguments. GenTree* gtDim = impStackTop().val; GenTree* gtArr = impStackTop(1).val; if (gtDim->IsIntegralConst()) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE arrCls = gtGetClassHandle(gtArr, &isExact, &isNonNull); if (arrCls != NO_CLASS_HANDLE) { unsigned rank = info.compCompHnd->getArrayRank(arrCls); if ((rank > 1) && !info.compCompHnd->isSDArray(arrCls)) { // `rank` is guaranteed to be <=32 (see MAX_RANK in vm\array.h). Any constant argument // is `int` sized. INT64 dimValue = gtDim->AsIntConCommon()->IntegralValue(); assert((unsigned int)dimValue == dimValue); unsigned dim = (unsigned int)dimValue; if (dim < rank) { // This is now known to be a multi-dimension array with a constant dimension // that is in range; we can expand it as an intrinsic. impPopStack().val; // Pop the dim and array object; we already have a pointer to them. impPopStack().val; // Make sure there are no global effects in the array (such as it being a function // call), so we can mark the generated indirection with GTF_IND_INVARIANT. In the // GetUpperBound case we need the cloned object, since we refer to the array // object twice. In the other cases, we don't need to clone. GenTree* gtArrClone = nullptr; if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound)) { gtArr = impCloneExpr(gtArr, &gtArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("MD intrinsics array")); } switch (ni) { case NI_System_Array_GetLength: { // Generate *(array + offset-to-length-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLengthOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetLowerBound: { // Generate *(array + offset-to-bounds-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetUpperBound: { assert(gtArrClone != nullptr); // Generate: // *(array + offset-to-length-array + sizeof(int) * dim) + // *(array + offset-to-bounds-array + sizeof(int) * dim) - 1 unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); GenTree* gtLowerBound = gtNewIndir(TYP_INT, gtAddr); gtLowerBound->gtFlags |= GTF_IND_INVARIANT; offs = eeGetMDArrayLengthOffset(rank, dim); gtOffs = gtNewIconNode(offs, TYP_I_IMPL); gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArrClone, gtOffs); GenTree* gtLength = gtNewIndir(TYP_INT, gtAddr); gtLength->gtFlags |= GTF_IND_INVARIANT; GenTree* gtSum = gtNewOperNode(GT_ADD, TYP_INT, gtLowerBound, gtLength); GenTree* gtOne = gtNewIconNode(1, TYP_INT); retNode = gtNewOperNode(GT_SUB, TYP_INT, gtSum, gtOne); break; } default: unreached(); } } } } } break; } case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness: { assert(sig->numArgs == 1); // We expect the return type of the ReverseEndianness routine to match the type of the // one and only argument to the method. We use a special instruction for 16-bit // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally, // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below. switch (sig->retType) { case CorInfoType::CORINFO_TYPE_SHORT: case CorInfoType::CORINFO_TYPE_USHORT: retNode = gtNewCastNode(TYP_INT, gtNewOperNode(GT_BSWAP16, TYP_INT, impPopStack().val), false, callType); break; case CorInfoType::CORINFO_TYPE_INT: case CorInfoType::CORINFO_TYPE_UINT: #ifdef TARGET_64BIT case CorInfoType::CORINFO_TYPE_LONG: case CorInfoType::CORINFO_TYPE_ULONG: #endif // TARGET_64BIT retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val); break; default: // This default case gets hit on 32-bit archs when a call to a 64-bit overload // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard // method call, where the implementation decomposes the operation into two 32-bit // bswap routines. If the input to the 64-bit function is a constant, then we rely // on inlining + constant folding of 32-bit bswaps to effectively constant fold // the 64-bit call site. break; } break; } // Fold PopCount for constant input case NI_System_Numerics_BitOperations_PopCount: { assert(sig->numArgs == 1); if (impStackTop().val->IsIntegralConst()) { typeInfo argType = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); INT64 cns = impPopStack().val->AsIntConCommon()->IntegralValue(); if (argType.IsType(TI_LONG)) { retNode = gtNewIconNode(genCountBits(cns), callType); } else { assert(argType.IsType(TI_INT)); retNode = gtNewIconNode(genCountBits(static_cast<unsigned>(cns)), callType); } } break; } case NI_System_GC_KeepAlive: { retNode = impKeepAliveIntrinsic(impPopStack().val); break; } default: break; } } if (mustExpand && (retNode == nullptr)) { assert(!"Unhandled must expand intrinsic, throwing PlatformNotSupportedException"); return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } // Optionally report if this intrinsic is special // (that is, potentially re-optimizable during morph). if (isSpecialIntrinsic != nullptr) { *isSpecialIntrinsic = isSpecial; } return retNode; } GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) { // Optimize patterns like: // // typeof(TTo).IsAssignableFrom(typeof(TTFrom)) // valueTypeVar.GetType().IsAssignableFrom(typeof(TTFrom)) // typeof(TTFrom).IsAssignableTo(typeof(TTo)) // typeof(TTFrom).IsAssignableTo(valueTypeVar.GetType()) // // to true/false if (typeTo->IsCall() && typeFrom->IsCall()) { // make sure both arguments are `typeof()` CORINFO_METHOD_HANDLE hTypeof = eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE); if ((typeTo->AsCall()->gtCallMethHnd == hTypeof) && (typeFrom->AsCall()->gtCallMethHnd == hTypeof)) { CORINFO_CLASS_HANDLE hClassTo = gtGetHelperArgClassHandle(typeTo->AsCall()->gtCallArgs->GetNode()); CORINFO_CLASS_HANDLE hClassFrom = gtGetHelperArgClassHandle(typeFrom->AsCall()->gtCallArgs->GetNode()); if (hClassTo == NO_CLASS_HANDLE || hClassFrom == NO_CLASS_HANDLE) { return nullptr; } TypeCompareState castResult = info.compCompHnd->compareTypesForCast(hClassFrom, hClassTo); if (castResult == TypeCompareState::May) { // requires runtime check // e.g. __Canon, COMObjects, Nullable return nullptr; } GenTreeIntCon* retNode = gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0); impPopStack(); // drop both CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE calls impPopStack(); return retNode; } } return nullptr; } GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall) { GenTree* op1; GenTree* op2; assert(callType != TYP_STRUCT); assert(IsMathIntrinsic(intrinsicName)); op1 = nullptr; #if !defined(TARGET_X86) // Intrinsics that are not implemented directly by target instructions will // be re-materialized as users calls in rationalizer. For prefixed tail calls, // don't do this optimization, because // a) For back compatibility reasons on desktop .NET Framework 4.6 / 4.6.1 // b) It will be non-trivial task or too late to re-materialize a surviving // tail prefixed GT_INTRINSIC as tail call in rationalizer. if (!IsIntrinsicImplementedByUserCall(intrinsicName) || !tailCall) #else // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad // code generation for certain EH constructs. if (!IsIntrinsicImplementedByUserCall(intrinsicName)) #endif { CORINFO_CLASS_HANDLE tmpClass; CORINFO_ARG_LIST_HANDLE arg; var_types op1Type; var_types op2Type; switch (sig->numArgs) { case 1: op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicName, method); break; case 2: op2 = impPopStack().val; op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } arg = info.compCompHnd->getArgNext(arg); op2Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op2->TypeGet() != genActualType(op2Type)) { assert(varTypeIsFloating(op2)); op2 = gtNewCastNode(callType, op2, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicName, method); break; default: NO_WAY("Unsupported number of args for Math Intrinsic"); } if (IsIntrinsicImplementedByUserCall(intrinsicName)) { op1->gtFlags |= GTF_CALL; } } return op1; } //------------------------------------------------------------------------ // lookupNamedIntrinsic: map method to jit named intrinsic value // // Arguments: // method -- method handle for method // // Return Value: // Id for the named intrinsic, or Illegal if none. // // Notes: // method should have CORINFO_FLG_INTRINSIC set in its attributes, // otherwise it is not a named jit intrinsic. // NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) { const char* className = nullptr; const char* namespaceName = nullptr; const char* enclosingClassName = nullptr; const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName, &enclosingClassName); JITDUMP("Named Intrinsic "); if (namespaceName != nullptr) { JITDUMP("%s.", namespaceName); } if (enclosingClassName != nullptr) { JITDUMP("%s.", enclosingClassName); } if (className != nullptr) { JITDUMP("%s.", className); } if (methodName != nullptr) { JITDUMP("%s", methodName); } if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr)) { // Check if we are dealing with an MD array's known runtime method CorInfoArrayIntrinsic arrayFuncIndex = info.compCompHnd->getArrayIntrinsicID(method); switch (arrayFuncIndex) { case CorInfoArrayIntrinsic::GET: JITDUMP("ARRAY_FUNC_GET: Recognized\n"); return NI_Array_Get; case CorInfoArrayIntrinsic::SET: JITDUMP("ARRAY_FUNC_SET: Recognized\n"); return NI_Array_Set; case CorInfoArrayIntrinsic::ADDRESS: JITDUMP("ARRAY_FUNC_ADDRESS: Recognized\n"); return NI_Array_Address; default: break; } JITDUMP(": Not recognized, not enough metadata\n"); return NI_Illegal; } JITDUMP(": "); NamedIntrinsic result = NI_Illegal; if (strcmp(namespaceName, "System") == 0) { if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0)) { result = NI_System_Enum_HasFlag; } else if (strcmp(className, "Activator") == 0) { if (strcmp(methodName, "AllocatorOf") == 0) { result = NI_System_Activator_AllocatorOf; } else if (strcmp(methodName, "DefaultConstructorOf") == 0) { result = NI_System_Activator_DefaultConstructorOf; } } else if (strcmp(className, "ByReference`1") == 0) { if (strcmp(methodName, ".ctor") == 0) { result = NI_System_ByReference_ctor; } else if (strcmp(methodName, "get_Value") == 0) { result = NI_System_ByReference_get_Value; } } else if (strcmp(className, "Math") == 0 || strcmp(className, "MathF") == 0) { if (strcmp(methodName, "Abs") == 0) { result = NI_System_Math_Abs; } else if (strcmp(methodName, "Acos") == 0) { result = NI_System_Math_Acos; } else if (strcmp(methodName, "Acosh") == 0) { result = NI_System_Math_Acosh; } else if (strcmp(methodName, "Asin") == 0) { result = NI_System_Math_Asin; } else if (strcmp(methodName, "Asinh") == 0) { result = NI_System_Math_Asinh; } else if (strcmp(methodName, "Atan") == 0) { result = NI_System_Math_Atan; } else if (strcmp(methodName, "Atanh") == 0) { result = NI_System_Math_Atanh; } else if (strcmp(methodName, "Atan2") == 0) { result = NI_System_Math_Atan2; } else if (strcmp(methodName, "Cbrt") == 0) { result = NI_System_Math_Cbrt; } else if (strcmp(methodName, "Ceiling") == 0) { result = NI_System_Math_Ceiling; } else if (strcmp(methodName, "Cos") == 0) { result = NI_System_Math_Cos; } else if (strcmp(methodName, "Cosh") == 0) { result = NI_System_Math_Cosh; } else if (strcmp(methodName, "Exp") == 0) { result = NI_System_Math_Exp; } else if (strcmp(methodName, "Floor") == 0) { result = NI_System_Math_Floor; } else if (strcmp(methodName, "FMod") == 0) { result = NI_System_Math_FMod; } else if (strcmp(methodName, "FusedMultiplyAdd") == 0) { result = NI_System_Math_FusedMultiplyAdd; } else if (strcmp(methodName, "ILogB") == 0) { result = NI_System_Math_ILogB; } else if (strcmp(methodName, "Log") == 0) { result = NI_System_Math_Log; } else if (strcmp(methodName, "Log2") == 0) { result = NI_System_Math_Log2; } else if (strcmp(methodName, "Log10") == 0) { result = NI_System_Math_Log10; } else if (strcmp(methodName, "Pow") == 0) { result = NI_System_Math_Pow; } else if (strcmp(methodName, "Round") == 0) { result = NI_System_Math_Round; } else if (strcmp(methodName, "Sin") == 0) { result = NI_System_Math_Sin; } else if (strcmp(methodName, "Sinh") == 0) { result = NI_System_Math_Sinh; } else if (strcmp(methodName, "Sqrt") == 0) { result = NI_System_Math_Sqrt; } else if (strcmp(methodName, "Tan") == 0) { result = NI_System_Math_Tan; } else if (strcmp(methodName, "Tanh") == 0) { result = NI_System_Math_Tanh; } } else if (strcmp(className, "GC") == 0) { if (strcmp(methodName, "KeepAlive") == 0) { result = NI_System_GC_KeepAlive; } } else if (strcmp(className, "Array") == 0) { if (strcmp(methodName, "Clone") == 0) { result = NI_System_Array_Clone; } else if (strcmp(methodName, "GetLength") == 0) { result = NI_System_Array_GetLength; } else if (strcmp(methodName, "GetLowerBound") == 0) { result = NI_System_Array_GetLowerBound; } else if (strcmp(methodName, "GetUpperBound") == 0) { result = NI_System_Array_GetUpperBound; } } else if (strcmp(className, "Object") == 0) { if (strcmp(methodName, "MemberwiseClone") == 0) { result = NI_System_Object_MemberwiseClone; } else if (strcmp(methodName, "GetType") == 0) { result = NI_System_Object_GetType; } else if (strcmp(methodName, "MethodTableOf") == 0) { result = NI_System_Object_MethodTableOf; } } else if (strcmp(className, "RuntimeTypeHandle") == 0) { if (strcmp(methodName, "GetValueInternal") == 0) { result = NI_System_RuntimeTypeHandle_GetValueInternal; } } else if (strcmp(className, "Type") == 0) { if (strcmp(methodName, "get_IsValueType") == 0) { result = NI_System_Type_get_IsValueType; } else if (strcmp(methodName, "IsAssignableFrom") == 0) { result = NI_System_Type_IsAssignableFrom; } else if (strcmp(methodName, "IsAssignableTo") == 0) { result = NI_System_Type_IsAssignableTo; } else if (strcmp(methodName, "op_Equality") == 0) { result = NI_System_Type_op_Equality; } else if (strcmp(methodName, "op_Inequality") == 0) { result = NI_System_Type_op_Inequality; } else if (strcmp(methodName, "GetTypeFromHandle") == 0) { result = NI_System_Type_GetTypeFromHandle; } } else if (strcmp(className, "String") == 0) { if (strcmp(methodName, "get_Chars") == 0) { result = NI_System_String_get_Chars; } else if (strcmp(methodName, "get_Length") == 0) { result = NI_System_String_get_Length; } } else if (strcmp(className, "Span`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_Span_get_Item; } } else if (strcmp(className, "ReadOnlySpan`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_ReadOnlySpan_get_Item; } } else if (strcmp(className, "EETypePtr") == 0) { if (strcmp(methodName, "EETypePtrOf") == 0) { result = NI_System_EETypePtr_EETypePtrOf; } } } else if (strcmp(namespaceName, "System.Threading") == 0) { if (strcmp(className, "Thread") == 0) { if (strcmp(methodName, "get_CurrentThread") == 0) { result = NI_System_Threading_Thread_get_CurrentThread; } else if (strcmp(methodName, "get_ManagedThreadId") == 0) { result = NI_System_Threading_Thread_get_ManagedThreadId; } } else if (strcmp(className, "Interlocked") == 0) { #ifndef TARGET_ARM64 // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). if (strcmp(methodName, "And") == 0) { result = NI_System_Threading_Interlocked_And; } else if (strcmp(methodName, "Or") == 0) { result = NI_System_Threading_Interlocked_Or; } #endif if (strcmp(methodName, "CompareExchange") == 0) { result = NI_System_Threading_Interlocked_CompareExchange; } else if (strcmp(methodName, "Exchange") == 0) { result = NI_System_Threading_Interlocked_Exchange; } else if (strcmp(methodName, "ExchangeAdd") == 0) { result = NI_System_Threading_Interlocked_ExchangeAdd; } else if (strcmp(methodName, "MemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_MemoryBarrier; } else if (strcmp(methodName, "ReadMemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_ReadMemoryBarrier; } } } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Buffers.Binary") == 0) { if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0)) { result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Collections.Generic") == 0) { if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_EqualityComparer_get_Default; } else if ((strcmp(className, "Comparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_Comparer_get_Default; } } else if ((strcmp(namespaceName, "System.Numerics") == 0) && (strcmp(className, "BitOperations") == 0)) { if (strcmp(methodName, "PopCount") == 0) { result = NI_System_Numerics_BitOperations_PopCount; } } #ifdef FEATURE_HW_INTRINSICS else if (strcmp(namespaceName, "System.Numerics") == 0) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); int sizeOfVectorT = getSIMDVectorRegisterByteLength(); result = SimdAsHWIntrinsicInfo::lookupId(&sig, className, methodName, enclosingClassName, sizeOfVectorT); } #endif // FEATURE_HW_INTRINSICS else if ((strcmp(namespaceName, "System.Runtime.CompilerServices") == 0) && (strcmp(className, "RuntimeHelpers") == 0)) { if (strcmp(methodName, "CreateSpan") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; } else if (strcmp(methodName, "InitializeArray") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; } else if (strcmp(methodName, "IsKnownConstant") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; } } else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0) { // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled // so we can specially handle IsSupported and recursive calls. // This is required to appropriately handle the intrinsics on platforms // which don't support them. On such a platform methods like Vector64.Create // will be seen as `Intrinsic` and `mustExpand` due to having a code path // which is recursive. When such a path is hit we expect it to be handled by // the importer and we fire an assert if it wasn't and in previous versions // of the JIT would fail fast. This was changed to throw a PNSE instead but // we still assert as most intrinsics should have been recognized/handled. // In order to avoid the assert, we specially handle the IsSupported checks // (to better allow dead-code optimizations) and we explicitly throw a PNSE // as we know that is the desired behavior for the HWIntrinsics when not // supported. For cases like Vector64.Create, this is fine because it will // be behind a relevant IsSupported check and will never be hit and the // software fallback will be executed instead. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_HW_INTRINSICS namespaceName += 25; const char* platformNamespaceName; #if defined(TARGET_XARCH) platformNamespaceName = ".X86"; #elif defined(TARGET_ARM64) platformNamespaceName = ".Arm"; #else #error Unsupported platform #endif if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); } #endif // FEATURE_HW_INTRINSICS if (result == NI_Illegal) { if ((strcmp(methodName, "get_IsSupported") == 0) || (strcmp(methodName, "get_IsHardwareAccelerated") == 0)) { // This allows the relevant code paths to be dropped as dead code even // on platforms where FEATURE_HW_INTRINSICS is not supported. result = NI_IsSupported_False; } else if (gtIsRecursiveCall(method)) { // For the framework itself, any recursive intrinsics will either be // only supported on a single platform or will be guarded by a relevant // IsSupported check so the throw PNSE will be valid or dropped. result = NI_Throw_PlatformNotSupportedException; } } } else if (strcmp(namespaceName, "System.StubHelpers") == 0) { if (strcmp(className, "StubHelpers") == 0) { if (strcmp(methodName, "GetStubContext") == 0) { result = NI_System_StubHelpers_GetStubContext; } else if (strcmp(methodName, "NextCallReturnAddress") == 0) { result = NI_System_StubHelpers_NextCallReturnAddress; } } } if (result == NI_Illegal) { JITDUMP("Not recognized\n"); } else if (result == NI_IsSupported_False) { JITDUMP("Unsupported - return false"); } else if (result == NI_Throw_PlatformNotSupportedException) { JITDUMP("Unsupported - throw PlatformNotSupportedException"); } else { JITDUMP("Recognized\n"); } return result; } //------------------------------------------------------------------------ // impUnsupportedNamedIntrinsic: Throws an exception for an unsupported named intrinsic // // Arguments: // helper - JIT helper ID for the exception to be thrown // method - method handle of the intrinsic function. // sig - signature of the intrinsic call // mustExpand - true if the intrinsic must return a GenTree*; otherwise, false // // Return Value: // a gtNewMustThrowException if mustExpand is true; otherwise, nullptr // GenTree* Compiler::impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand) { // We've hit some error case and may need to return a node for the given error. // // When `mustExpand=false`, we are attempting to inline the intrinsic directly into another method. In this // scenario, we need to return `nullptr` so that a GT_CALL to the intrinsic is emitted instead. This is to // ensure that everything continues to behave correctly when optimizations are enabled (e.g. things like the // inliner may expect the node we return to have a certain signature, and the `MustThrowException` node won't // match that). // // When `mustExpand=true`, we are in a GT_CALL to the intrinsic and are attempting to JIT it. This will generally // be in response to an indirect call (e.g. done via reflection) or in response to an earlier attempt returning // `nullptr` (under `mustExpand=false`). In that scenario, we are safe to return the `MustThrowException` node. if (mustExpand) { for (unsigned i = 0; i < sig->numArgs; i++) { impPopStack(); } return gtNewMustThrowException(helper, JITtype2varType(sig->retType), sig->retTypeClass); } else { return nullptr; } } /*****************************************************************************/ GenTree* Compiler::impArrayAccessIntrinsic( CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName) { /* If we are generating SMALL_CODE, we don't want to use intrinsics for the following, as it generates fatter code. */ if (compCodeOpt() == SMALL_CODE) { return nullptr; } /* These intrinsics generate fatter (but faster) code and are only done if we don't need SMALL_CODE */ unsigned rank = (intrinsicName == NI_Array_Set) ? (sig->numArgs - 1) : sig->numArgs; // The rank 1 case is special because it has to handle two array formats // we will simply not do that case if (rank > GT_ARR_MAX_RANK || rank <= 1) { return nullptr; } CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr; var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd)); // For the ref case, we will only be able to inline if the types match // (verifier checks for this, we don't care for the nonverified case and the // type is final (so we don't need to do the cast) if ((intrinsicName != NI_Array_Get) && !readonlyCall && varTypeIsGC(elemType)) { // Get the call site signature CORINFO_SIG_INFO LocalSig; eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig); assert(LocalSig.hasThis()); CORINFO_CLASS_HANDLE actualElemClsHnd; if (intrinsicName == NI_Array_Set) { // Fetch the last argument, the one that indicates the type we are setting. CORINFO_ARG_LIST_HANDLE argType = LocalSig.args; for (unsigned r = 0; r < rank; r++) { argType = info.compCompHnd->getArgNext(argType); } typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType); actualElemClsHnd = argInfo.GetClassHandle(); } else { assert(intrinsicName == NI_Array_Address); // Fetch the return type typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass); assert(retInfo.IsByRef()); actualElemClsHnd = retInfo.GetClassHandle(); } // if it's not final, we can't do the optimization if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL)) { return nullptr; } } unsigned arrayElemSize; if (elemType == TYP_STRUCT) { assert(arrElemClsHnd); arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd); } else { arrayElemSize = genTypeSize(elemType); } if ((unsigned char)arrayElemSize != arrayElemSize) { // arrayElemSize would be truncated as an unsigned char. // This means the array element is too large. Don't do the optimization. return nullptr; } GenTree* val = nullptr; if (intrinsicName == NI_Array_Set) { // Assignment of a struct is more work, and there are more gets than sets. if (elemType == TYP_STRUCT) { return nullptr; } val = impPopStack().val; assert(genActualType(elemType) == genActualType(val->gtType) || (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) || (elemType == TYP_INT && val->gtType == TYP_BYREF) || (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT)); } noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK); GenTree* inds[GT_ARR_MAX_RANK]; for (unsigned k = rank; k > 0; k--) { inds[k - 1] = impPopStack().val; } GenTree* arr = impPopStack().val; assert(arr->gtType == TYP_REF); GenTree* arrElem = new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank), static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]); if (intrinsicName != NI_Array_Address) { if (varTypeIsStruct(elemType)) { arrElem = gtNewObjNode(sig->retTypeClass, arrElem); } else { arrElem = gtNewOperNode(GT_IND, elemType, arrElem); } } if (intrinsicName == NI_Array_Set) { assert(val != nullptr); return gtNewAssignNode(arrElem, val); } else { return arrElem; } } //------------------------------------------------------------------------ // impKeepAliveIntrinsic: Import the GC.KeepAlive intrinsic call // // Imports the intrinsic as a GT_KEEPALIVE node, and, as an optimization, // if the object to keep alive is a GT_BOX, removes its side effects and // uses the address of a local (copied from the box's source if needed) // as the operand for GT_KEEPALIVE. For the BOX optimization, if the class // of the box has no GC fields, a GT_NOP is returned. // // Arguments: // objToKeepAlive - the intrinisic call's argument // // Return Value: // The imported GT_KEEPALIVE or GT_NOP - see description. // GenTree* Compiler::impKeepAliveIntrinsic(GenTree* objToKeepAlive) { assert(objToKeepAlive->TypeIs(TYP_REF)); if (opts.OptimizationEnabled() && objToKeepAlive->IsBoxedValue()) { CORINFO_CLASS_HANDLE boxedClass = lvaGetDesc(objToKeepAlive->AsBox()->BoxOp()->AsLclVar())->lvClassHnd; ClassLayout* layout = typGetObjLayout(boxedClass); if (!layout->HasGCPtr()) { gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_AND_NARROW); JITDUMP("\nBOX class has no GC fields, KEEPALIVE is a NOP"); return gtNewNothingNode(); } GenTree* boxSrc = gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_BUT_NOT_NARROW); if (boxSrc != nullptr) { unsigned boxTempNum; if (boxSrc->OperIs(GT_LCL_VAR)) { boxTempNum = boxSrc->AsLclVarCommon()->GetLclNum(); } else { boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source")); GenTree* boxTempAsg = gtNewTempAssign(boxTempNum, boxSrc); Statement* boxAsgStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue; boxAsgStmt->SetRootNode(boxTempAsg); } JITDUMP("\nImporting KEEPALIVE(BOX) as KEEPALIVE(ADDR(LCL_VAR V%02u))", boxTempNum); GenTree* boxTemp = gtNewLclvNode(boxTempNum, boxSrc->TypeGet()); GenTree* boxTempAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, boxTemp); return gtNewKeepAliveNode(boxTempAddr); } } return gtNewKeepAliveNode(objToKeepAlive); } bool Compiler::verMergeEntryStates(BasicBlock* block, bool* changed) { unsigned i; // do some basic checks first if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth) { return false; } if (verCurrentState.esStackDepth > 0) { // merge stack types StackEntry* parentStack = block->bbStackOnEntry(); StackEntry* childStack = verCurrentState.esStack; for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++) { if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == false) { return false; } } } // merge initialization status of this ptr if (verTrackObjCtorInitState) { // If we're tracking the CtorInitState, then it must not be unknown in the current state. assert(verCurrentState.thisInitialized != TIS_Bottom); // If the successor block's thisInit state is unknown, copy it from the current state. if (block->bbThisOnEntry() == TIS_Bottom) { *changed = true; verSetThisInit(block, verCurrentState.thisInitialized); } else if (verCurrentState.thisInitialized != block->bbThisOnEntry()) { if (block->bbThisOnEntry() != TIS_Top) { *changed = true; verSetThisInit(block, TIS_Top); if (block->bbFlags & BBF_FAILED_VERIFICATION) { // The block is bad. Control can flow through the block to any handler that catches the // verification exception, but the importer ignores bad blocks and therefore won't model // this flow in the normal way. To complete the merge into the bad block, the new state // needs to be manually pushed to the handlers that may be reached after the verification // exception occurs. // // Usually, the new state was already propagated to the relevant handlers while processing // the predecessors of the bad block. The exception is when the bad block is at the start // of a try region, meaning it is protected by additional handlers that do not protect its // predecessors. // if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0)) { // Push TIS_Top to the handlers that protect the bad block. Note that this can cause // recursive calls back into this code path (if successors of the current bad block are // also bad blocks). // ThisInitState origTIS = verCurrentState.thisInitialized; verCurrentState.thisInitialized = TIS_Top; impVerifyEHBlock(block, true); verCurrentState.thisInitialized = origTIS; } } } } } else { assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom); } return true; } /***************************************************************************** * 'logMsg' is true if a log message needs to be logged. false if the caller has * already logged it (presumably in a more detailed fashion than done here) */ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { block->bbJumpKind = BBJ_THROW; block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; impCurStmtOffsSet(block->bbCodeOffs); // Clear the statement list as it exists so far; we're only going to have a verification exception. impStmtList = impLastStmt = nullptr; #ifdef DEBUG if (logMsg) { JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName, block->bbCodeOffs, block->bbCodeOffsEnd)); if (verbose) { printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs); } } if (JitConfig.DebugBreakOnVerificationFailure()) { DebugBreak(); } #endif impBeginTreeList(); // if the stack is non-empty evaluate all the side-effects if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); GenTree* op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewCallArgs(gtNewIconNode(block->bbCodeOffs))); // verCurrentState.esStackDepth = 0; impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // The inliner is not able to handle methods that require throw block, so // make sure this methods never gets inlined. info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE); } /***************************************************************************** * */ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)) { verResetCurrentState(block, &verCurrentState); verConvertBBToThrowVerificationException(block DEBUGARG(logMsg)); #ifdef DEBUG impNoteLastILoffs(); // Remember at which BC offset the tree was finished #endif // DEBUG } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd) { assert(ciType < CORINFO_TYPE_COUNT); typeInfo tiResult; switch (ciType) { case CORINFO_TYPE_STRING: case CORINFO_TYPE_CLASS: tiResult = verMakeTypeInfo(clsHnd); if (!tiResult.IsType(TI_REF)) { // type must be consistent with element type return typeInfo(); } break; #ifdef TARGET_64BIT case CORINFO_TYPE_NATIVEINT: case CORINFO_TYPE_NATIVEUINT: if (clsHnd) { // If we have more precise information, use it return verMakeTypeInfo(clsHnd); } else { return typeInfo::nativeInt(); } break; #endif // TARGET_64BIT case CORINFO_TYPE_VALUECLASS: case CORINFO_TYPE_REFANY: tiResult = verMakeTypeInfo(clsHnd); // type must be constant with element type; if (!tiResult.IsValueClass()) { return typeInfo(); } break; case CORINFO_TYPE_VAR: return verMakeTypeInfo(clsHnd); case CORINFO_TYPE_PTR: // for now, pointers are treated as an error case CORINFO_TYPE_VOID: return typeInfo(); break; case CORINFO_TYPE_BYREF: { CORINFO_CLASS_HANDLE childClassHandle; CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle); return ByRef(verMakeTypeInfo(childType, childClassHandle)); } break; default: if (clsHnd) { // If we have more precise information, use it return typeInfo(TI_STRUCT, clsHnd); } else { return typeInfo(JITtype2tiType(ciType)); } } return tiResult; } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */) { if (clsHnd == nullptr) { return typeInfo(); } // Byrefs should only occur in method and local signatures, which are accessed // using ICorClassInfo and ICorClassInfo.getChildType. // So findClass() and getClassAttribs() should not be called for byrefs if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF) { assert(!"Did findClass() return a Byref?"); return typeInfo(); } unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd); if (attribs & CORINFO_FLG_VALUECLASS) { CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd); // Meta-data validation should ensure that CORINF_TYPE_BYREF should // not occur here, so we may want to change this to an assert instead. if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR) { return typeInfo(); } #ifdef TARGET_64BIT if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT) { return typeInfo::nativeInt(); } #endif // TARGET_64BIT if (t != CORINFO_TYPE_UNDEF) { return (typeInfo(JITtype2tiType(t))); } else if (bashStructToRef) { return (typeInfo(TI_REF, clsHnd)); } else { return (typeInfo(TI_STRUCT, clsHnd)); } } else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE) { // See comment in _typeInfo.h for why we do it this way. return (typeInfo(TI_REF, clsHnd, true)); } else { return (typeInfo(TI_REF, clsHnd)); } } /******************************************************************************/ bool Compiler::verIsSDArray(const typeInfo& ti) { if (ti.IsNullObjRef()) { // nulls are SD arrays return true; } if (!ti.IsType(TI_REF)) { return false; } if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef())) { return false; } return true; } /******************************************************************************/ /* Given 'arrayObjectType' which is an array type, fetch the element type. */ /* Returns an error type if anything goes wrong */ typeInfo Compiler::verGetArrayElemType(const typeInfo& arrayObjectType) { assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explicitly since that is a success case if (!verIsSDArray(arrayObjectType)) { return typeInfo(); } CORINFO_CLASS_HANDLE childClassHandle = nullptr; CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle); return verMakeTypeInfo(ciType, childClassHandle); } /***************************************************************************** */ typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args) { CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle)); var_types type = JITtype2varType(ciType); if (varTypeIsGC(type)) { // For efficiency, getArgType only returns something in classHandle for // value types. For other types that have addition type info, you // have to call back explicitly classHandle = info.compCompHnd->getArgClass(sig, args); if (!classHandle) { NO_WAY("Could not figure out Class specified in argument or local signature"); } } return verMakeTypeInfo(ciType, classHandle); } bool Compiler::verIsByRefLike(const typeInfo& ti) { if (ti.IsByRef()) { return true; } if (!ti.IsType(TI_STRUCT)) { return false; } return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE; } bool Compiler::verIsSafeToReturnByRef(const typeInfo& ti) { if (ti.IsPermanentHomeByRef()) { return true; } else { return false; } } bool Compiler::verIsBoxable(const typeInfo& ti) { return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables || ti.IsUnboxedGenericTypeVar() || (ti.IsType(TI_STRUCT) && // exclude byreflike structs !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE))); } // Is it a boxed value type? bool Compiler::verIsBoxedValueType(const typeInfo& ti) { if (ti.GetType() == TI_REF) { CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef(); return !!eeIsValueClass(clsHnd); } else { return false; } } /***************************************************************************** * * Check if a TailCall is legal. */ bool Compiler::verCheckTailCallConstraint( OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ) { DWORD mflags; CORINFO_SIG_INFO sig; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped CORINFO_METHOD_HANDLE methodHnd = nullptr; CORINFO_CLASS_HANDLE methodClassHnd = nullptr; unsigned methodClassFlgs = 0; assert(impOpcodeIsCallOpcode(opcode)); if (compIsForInlining()) { return false; } // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { /* Get the call sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; } else { methodHnd = pResolvedToken->hMethod; mflags = info.compCompHnd->getMethodAttribs(methodHnd); // When verifying generic code we pair the method handle with its // owning class to get the exact method signature. methodClassHnd = pResolvedToken->hClass; assert(methodClassHnd); eeGetMethodSig(methodHnd, &sig, methodClassHnd); // opcode specific check methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd); } // We must have got the methodClassHnd if opcode is not CEE_CALLI assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI); if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } // check compatibility of the arguments unsigned int argCount; argCount = sig.numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig.args; while (argCount--) { typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack(); // check that the argument is not a byref for tailcalls VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative); // For unsafe code, we might have parameters containing pointer to the stack location. // Disallow the tailcall for this kind. CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle)); VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative); args = info.compCompHnd->getArgNext(args); } // update popCount popCount += sig.numArgs; // check for 'this' which is on non-static methods, not called via NEWOBJ if (!(mflags & CORINFO_FLG_STATIC)) { // Always update the popCount. // This is crucial for the stack calculation to be correct. typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; if (opcode == CEE_CALLI) { // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object // on the stack. if (tiThis.IsValueClass()) { tiThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative); } else { // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative); } } // Tail calls on constrained calls should be illegal too: // when instantiated at a value type, a constrained call may pass the address of a stack allocated value VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative); // Get the exact view of the signature for an array method if (sig.retType != CORINFO_TYPE_VOID) { if (methodClassFlgs & CORINFO_FLG_ARRAY) { assert(opcode != CEE_CALLI); eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } } typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass); typeInfo tiCallerRetType = verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass); // void return type gets morphed into the error type, so we have to treat them specially here if (sig.retType == CORINFO_TYPE_VOID) { VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch", speculative); } else { VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType), NormaliseForStack(tiCallerRetType), true), "tailcall return mismatch", speculative); } // for tailcall, stack must be empty VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative); return true; // Yes, tailcall is legal } /***************************************************************************** * * Checks the IL verification rules for the call */ void Compiler::verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)) { DWORD mflags; CORINFO_SIG_INFO* sig = nullptr; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { Verify(false, "Calli not verifiable"); return; } //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item. mflags = callInfo->verMethodFlags; sig = &callInfo->verSig; if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } // opcode specific check unsigned methodClassFlgs = callInfo->classFlags; switch (opcode) { case CEE_CALLVIRT: // cannot do callvirt on valuetypes VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class"); VerifyOrReturn(sig->hasThis(), "CallVirt on static method"); break; case CEE_NEWOBJ: { assert(!tailCall); // Importer should not allow this VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC), "newobj must be on instance"); if (methodClassFlgs & CORINFO_FLG_DELEGATE) { VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor"); typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); typeInfo tiDeclaredFtn = verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack(); VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type"); assert(popCount == 0); typeInfo tiActualObj = impStackTop(1).seTypeInfo; typeInfo tiActualFtn = impStackTop(0).seTypeInfo; VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg"); VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch"); VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF), "delegate object type mismatch"); CORINFO_CLASS_HANDLE objTypeHandle = tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef(); // the method signature must be compatible with the delegate's invoke method // check that for virtual functions, the type of the object used to get the // ftn ptr is the same as the type of the object passed to the delegate ctor. // since this is a bit of work to determine in general, we pattern match stylized // code sequences // the delegate creation code check, which used to be done later, is now done here // so we can read delegateMethodRef directly from // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence; // we then use it in our call to isCompatibleDelegate(). mdMemberRef delegateMethodRef = mdMemberRefNil; VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef), "must create delegates with certain IL"); CORINFO_RESOLVED_TOKEN delegateResolvedToken; delegateResolvedToken.tokenContext = impTokenLookupContextHandle; delegateResolvedToken.tokenScope = info.compScopeHnd; delegateResolvedToken.token = delegateMethodRef; delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method; info.compCompHnd->resolveToken(&delegateResolvedToken); CORINFO_CALL_INFO delegateCallInfo; eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */, CORINFO_CALLINFO_SECURITYCHECKS, &delegateCallInfo); bool isOpenDelegate = false; VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass, tiActualFtn.GetMethod(), pResolvedToken->hClass, &isOpenDelegate), "function incompatible with delegate"); // check the constraints on the target method VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass), "delegate target has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass, tiActualFtn.GetMethod()), "delegate target has unsatisfied method constraints"); // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch) // for additional verification rules for delegates CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod(); DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle); if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiActualObj), "The 'this' parameter to the call must be either the calling method's " "'this' parameter or " "a boxed value type."); } } if (actualMethodAttribs & CORINFO_FLG_PROTECTED) { bool targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC; Verify(targetIsStatic || !isOpenDelegate, "Unverifiable creation of an open instance delegate for a protected member."); CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic) ? info.compClassHnd : tiActualObj.GetClassHandleForObjRef(); // In the case of protected methods, it is a requirement that the 'this' // pointer be a subclass of the current context. Perform this check. Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Accessing protected method through wrong type."); } goto DONE_ARGS; } } // fall thru to default checks FALLTHROUGH; default: VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract"); } VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)), "can only newobj a delegate constructor"); // check compatibility of the arguments unsigned int argCount; argCount = sig->numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig->args; while (argCount--) { typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo; typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack(); VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch"); args = info.compCompHnd->getArgNext(args); } DONE_ARGS: // update popCount popCount += sig->numArgs; // check for 'this' which are is non-static methods, not called via NEWOBJ CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd; if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ)) { typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a reference class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis.IsType(TI_REF)) { instanceClassHnd = tiThis.GetClassHandleForObjRef(); } // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } // If this is a call to the base class .ctor, set thisPtr Init for // this block. if (mflags & CORINFO_FLG_CONSTRUCTOR) { if (verTrackObjCtorInitState && tiThis.IsThisPtr() && verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass)) { assert(verCurrentState.thisInitialized != TIS_Bottom); // This should never be the case just from the logic of the verifier. VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit, "Call to base class constructor when 'this' is possibly initialized"); // Otherwise, 'this' is now initialized. verCurrentState.thisInitialized = TIS_Init; tiThis.SetInitialisedObjRef(); } else { // We allow direct calls to value type constructors // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a // constrained callvirt to illegally re-enter a .ctor on a value of reference type. VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(), "Bad call to a constructor"); } } if (pConstrainedResolvedToken != nullptr) { VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call"); typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass); // We just dereference this and test for equality tiThis.DereferenceByRef(); VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint), "this type mismatch with constrained type operand"); // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass); } // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef()) { tiDeclaredThis.SetIsReadonlyByRef(); } VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch"); if (tiThis.IsByRef()) { // Find the actual type where the method exists (as opposed to what is declared // in the metadata). This is to prevent passing a byref as the "this" argument // while calling methods like System.ValueType.GetHashCode() which expect boxed objects. CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod); VerifyOrReturn(eeIsValueClass(actualClassHnd), "Call to base type of valuetype (which is never a valuetype)"); } // Rules for non-virtual call to a non-final virtual method: // Define: // The "this" pointer is considered to be "possibly written" if // 1. Its address have been taken (LDARGA 0) anywhere in the method. // (or) // 2. It has been stored to (STARG.0) anywhere in the method. // A non-virtual call to a non-final virtual method is only allowed if // 1. The this pointer passed to the callee is an instance of a boxed value type. // (or) // 2. The this pointer passed to the callee is the current method's this pointer. // (and) The current method's this pointer is not "possibly written". // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to // virtual methods. (Luckily this does affect .ctors, since they are not virtual). // This is stronger that is strictly needed, but implementing a laxer rule is significantly // hard and more error prone. if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiThis), "The 'this' parameter to the call must be either the calling method's 'this' parameter or " "a boxed value type."); } } // check any constraints on the callee's class and type parameters VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass), "method has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod), "method has unsatisfied method constraints"); if (mflags & CORINFO_FLG_PROTECTED) { VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Can't access protected method"); } // Get the exact view of the signature for an array method if (sig->retType != CORINFO_TYPE_VOID) { eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass); } // "readonly." prefixed calls only allowed for the Address operation on arrays. // The methods supported by array types are under the control of the EE // so we can trust that only the Address operation returns a byref. if (readonlyCall) { typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass); VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(), "unexpected use of readonly prefix"); } // Verify the tailcall if (tailCall) { verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false); } } /***************************************************************************** * Checks that a delegate creation is done using the following pattern: * dup * ldvirtftn targetMemberRef * OR * ldftn targetMemberRef * * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if * not in this basic block) * * targetMemberRef is read from the code sequence. * targetMemberRef is validated iff verificationNeeded. */ bool Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef) { if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]); return true; } else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]); return true; } return false; } typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType) { Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref"); typeInfo ptrVal = verVerifyLDIND(tiTo, instrType); typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack(); if (!tiCompatibleWith(value, normPtrVal, true)) { Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch"); compUnsafeCastUsed = true; } return ptrVal; } typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType) { assert(!instrType.IsStruct()); typeInfo ptrVal; if (ptr.IsByRef()) { ptrVal = DereferenceByRef(ptr); if (instrType.IsObjRef() && !ptrVal.IsObjRef()) { Verify(false, "bad pointer"); compUnsafeCastUsed = true; } else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal)) { Verify(false, "pointer not consistent with instr"); compUnsafeCastUsed = true; } } else { Verify(false, "pointer not byref"); compUnsafeCastUsed = true; } return ptrVal; } // Verify that the field is used properly. 'tiThis' is NULL for statics, // 'fieldFlags' is the fields attributes, and mutator is true if it is a // ld*flda or a st*fld. // 'enclosingClass' is given if we are accessing a field in some specific type. void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis) { CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass; unsigned fieldFlags = fieldInfo.fieldFlags; CORINFO_CLASS_HANDLE instanceClass = info.compClassHnd; // for statics, we imagine the instance is the current class. bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0); if (mutator) { Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static"); if ((fieldFlags & CORINFO_FLG_FIELD_FINAL)) { Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd && info.compIsStatic == isStaticField, "bad use of initonly field (set or address taken)"); } } if (tiThis == nullptr) { Verify(isStaticField, "used static opcode with non-static field"); } else { typeInfo tThis = *tiThis; if (allowPlainStructAsThis && tThis.IsValueClass()) { tThis.MakeByRef(); } // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a refernce class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis->IsType(TI_REF)) { instanceClass = tiThis->GetClassHandleForObjRef(); } // Note that even if the field is static, we require that the this pointer // satisfy the same constraints as a non-static field This happens to // be simpler and seems reasonable typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); // we allow read-only tThis, on any field access (even stores!), because if the // class implementor wants to prohibit stores he should make the field private. // we do this by setting the read-only bit on the type we compare tThis to. tiDeclaredThis.SetIsReadonlyByRef(); } else if (verTrackObjCtorInitState && tThis.IsThisPtr()) { // Any field access is legal on "uninitialized" this pointers. // The easiest way to implement this is to simply set the // initialized bit for the duration of the type check on the // field access only. It does not change the state of the "this" // for the function as a whole. Note that the "tThis" is a copy // of the original "this" type (*tiThis) passed in. tThis.SetInitialisedObjRef(); } Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch"); } // Presently the JIT does not check that we don't store or take the address of init-only fields // since we cannot guarantee their immutability and it is not a security issue. // check any constraints on the fields's class --- accessing the field might cause a class constructor to run. VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass), "field has unsatisfied class constraints"); if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED) { Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass), "Accessing protected method through wrong type."); } } void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode) { if (tiOp1.IsNumberType()) { #ifdef TARGET_64BIT Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch"); #else // TARGET_64BIT // [10/17/2013] Consider changing this: to put on my verification lawyer hat, // this is non-conforming to the ECMA Spec: types don't have to be equivalent, // but compatible, since we can coalesce native int with int32 (see section III.1.5). Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch"); #endif // !TARGET_64BIT } else if (tiOp1.IsObjRef()) { switch (opcode) { case CEE_BEQ_S: case CEE_BEQ: case CEE_BNE_UN_S: case CEE_BNE_UN: case CEE_CEQ: case CEE_CGT_UN: break; default: Verify(false, "Cond not allowed on object types"); } Verify(tiOp2.IsObjRef(), "Cond type mismatch"); } else if (tiOp1.IsByRef()) { Verify(tiOp2.IsByRef(), "Cond type mismatch"); } else { Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch"); } } void Compiler::verVerifyThisPtrInitialised() { if (verTrackObjCtorInitState) { Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized"); } } bool Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target) { // Either target == context, in this case calling an alternate .ctor // Or target is the immediate parent of context return ((target == context) || (target == info.compCompHnd->getParentType(context))); } GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } // CoreRT generic virtual method if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* runtimeMethodHandle = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_METHOD_HDL, pCallInfo->hMethod); return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, gtNewCallArgs(thisPtr, runtimeMethodHandle)); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { if (!pCallInfo->exactContextNeedsRuntimeLookup) { GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewCallArgs(thisPtr)); call->setEntryPoint(pCallInfo->codePointerLookup.constLookup); return call; } // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too. if (IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind); return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pCallInfo->codePointerLookup.lookupKind); } } #endif // Get the exact descriptor for the static callsite GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken); if (exactTypeDesc == nullptr) { // compDonotInline() return nullptr; } GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken); if (exactMethodDesc == nullptr) { // compDonotInline() return nullptr; } GenTreeCall::Use* helpArgs = gtNewCallArgs(exactMethodDesc); helpArgs = gtPrependNewCallArg(exactTypeDesc, helpArgs); helpArgs = gtPrependNewCallArg(thisPtr, helpArgs); // Call helper function. This gets the target address of the final destination callsite. return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // impBoxPatternMatch: match and import common box idioms // // Arguments: // pResolvedToken - resolved token from the box operation // codeAddr - position in IL stream after the box instruction // codeEndp - end of IL stream // // Return Value: // Number of IL bytes matched and imported, -1 otherwise // // Notes: // pResolvedToken is known to be a value type; ref type boxing // is handled in the CEE_BOX clause. int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation) { if (codeAddr >= codeEndp) { return -1; } switch (codeAddr[0]) { case CEE_UNBOX_ANY: // box + unbox.any if (codeAddr + 1 + sizeof(mdToken) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } CORINFO_RESOLVED_TOKEN unboxResolvedToken; impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // See if the resolved tokens describe types that are equal. const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass); // If so, box/unbox.any is a nop. if (compare == TypeCompareState::Must) { JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n"); // Skip the next unbox.any instruction return 1 + sizeof(mdToken); } } break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: // box + br_true/false if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 0; } GenTree* const treeToBox = impStackTop().val; bool canOptimize = true; GenTree* treeToNullcheck = nullptr; // Can the thing being boxed cause a side effect? if ((treeToBox->gtFlags & GTF_SIDE_EFFECT) != 0) { // Is this a side effect we can replicate cheaply? if (((treeToBox->gtFlags & GTF_SIDE_EFFECT) == GTF_EXCEPT) && treeToBox->OperIs(GT_OBJ, GT_BLK, GT_IND)) { // Yes, we just need to perform a null check if needed. GenTree* const addr = treeToBox->AsOp()->gtGetOp1(); if (fgAddrCouldBeNull(addr)) { treeToNullcheck = addr; } } else { canOptimize = false; } } if (canOptimize) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { JITDUMP("\n Importing BOX; BR_TRUE/FALSE as %sconstant\n", treeToNullcheck == nullptr ? "" : "nullcheck+"); impPopStack(); GenTree* result = gtNewIconNode(1); if (treeToNullcheck != nullptr) { GenTree* nullcheck = gtNewNullCheck(treeToNullcheck, compCurBB); result = gtNewOperNode(GT_COMMA, TYP_INT, nullcheck, result); } impPushOnStack(result, typeInfo(TI_INT)); return 0; } } } break; case CEE_ISINST: if (codeAddr + 1 + sizeof(mdToken) + 1 <= codeEndp) { const BYTE* nextCodeAddr = codeAddr + 1 + sizeof(mdToken); switch (nextCodeAddr[0]) { // box + isinst + br_true/false case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT)) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(pResolvedToken->hClass, isInstResolvedToken.hClass); if (castResult != TypeCompareState::May) { JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant\n"); impPopStack(); impPushOnStack(gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0), typeInfo(TI_INT)); // Skip the next isinst instruction return 1 + sizeof(mdToken); } } else if (boxHelper == CORINFO_HELP_BOX_NULLABLE) { // For nullable we're going to fold it to "ldfld hasValue + brtrue/brfalse" or // "ldc.i4.0 + brtrue/brfalse" in case if the underlying type is not castable to // the target type. CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); CORINFO_CLASS_HANDLE nullableCls = pResolvedToken->hClass; CORINFO_CLASS_HANDLE underlyingCls = info.compCompHnd->getTypeForBox(nullableCls); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(underlyingCls, isInstResolvedToken.hClass); if (castResult == TypeCompareState::Must) { const CORINFO_FIELD_HANDLE hasValueFldHnd = info.compCompHnd->getFieldInClass(nullableCls, 0); assert(info.compCompHnd->getFieldOffset(hasValueFldHnd) == 0); assert(!strcmp(info.compCompHnd->getFieldName(hasValueFldHnd, nullptr), "hasValue")); GenTree* objToBox = impPopStack().val; // Spill struct to get its address (to access hasValue field) objToBox = impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true); impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as nullableVT.hasValue\n"); return 1 + sizeof(mdToken); } else if (castResult == TypeCompareState::MustNot) { impPopStack(); impPushOnStack(gtNewIconNode(0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant (false)\n"); return 1 + sizeof(mdToken); } } } } break; // box + isinst + unbox.any case CEE_UNBOX_ANY: if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 2 + sizeof(mdToken) * 2; } // See if the resolved tokens in box, isinst and unbox.any describe types that are equal. CORINFO_RESOLVED_TOKEN isinstResolvedToken = {}; impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class); if (info.compCompHnd->compareTypesForEquality(isinstResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { CORINFO_RESOLVED_TOKEN unboxResolvedToken = {}; impResolveToken(nextCodeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // If so, box + isinst + unbox.any is a nop. if (info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { JITDUMP("\n Importing BOX; ISINST, UNBOX.ANY as NOP\n"); return 2 + sizeof(mdToken) * 2; } } } break; } } break; default: break; } return -1; } //------------------------------------------------------------------------ // impImportAndPushBox: build and import a value-type box // // Arguments: // pResolvedToken - resolved token from the box operation // // Return Value: // None. // // Side Effects: // The value to be boxed is popped from the stack, and a tree for // the boxed value is pushed. This method may create upstream // statements, spill side effecting trees, and create new temps. // // If importing an inlinee, we may also discover the inline must // fail. If so there is no new value pushed on the stack. Callers // should use CompDoNotInline after calling this method to see if // ongoing importation should be aborted. // // Notes: // Boxing of ref classes results in the same value as the value on // the top of the stack, so is handled inline in impImportBlockCode // for the CEE_BOX case. Only value or primitive type boxes make it // here. // // Boxing for nullable types is done via a helper call; boxing // of other value types is expanded inline or handled via helper // call, depending on the jit's codegen mode. // // When the jit is operating in size and time constrained modes, // using a helper call here can save jit time and code size. But it // also may inhibit cleanup optimizations that could have also had a // even greater benefit effect on code size and jit time. An optimal // strategy may need to peek ahead and see if it is easy to tell how // the box is being used. For now, we defer. void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) { // Spill any special side effects impSpillSpecialSideEff(); // Get get the expression to box from the stack. GenTree* op1 = nullptr; GenTree* op2 = nullptr; StackEntry se = impPopStack(); CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle(); GenTree* exprToBox = se.val; // Look at what helper we should use. CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); // Determine what expansion to prefer. // // In size/time/debuggable constrained modes, the helper call // expansion for box is generally smaller and is preferred, unless // the value to box is a struct that comes from a call. In that // case the call can construct its return value directly into the // box payload, saving possibly some up-front zeroing. // // Currently primitive type boxes always get inline expanded. We may // want to do the same for small structs if they don't come from // calls and don't have GC pointers, since explicitly copying such // structs is cheap. JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via"); bool canExpandInline = (boxHelper == CORINFO_HELP_BOX); bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled(); bool expandInline = canExpandInline && !optForSize; if (expandInline) { JITDUMP(" inline allocate/copy sequence\n"); // we are doing 'normal' boxing. This means that we can inline the box operation // Box(expr) gets morphed into // temp = new(clsHnd) // cpobj(temp+4, expr, clsHnd) // push temp // The code paths differ slightly below for structs and primitives because // "cpobj" differs in these cases. In one case you get // impAssignStructPtr(temp+4, expr, clsHnd) // and the other you get // *(temp+4) = expr if (opts.OptimizationDisabled()) { // For minopts/debug code, try and minimize the total number // of box temps by reusing an existing temp when possible. if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM) { impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper")); } } else { // When optimizing, use a new temp for each box operation // since we then know the exact class of the box temp. impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper")); lvaTable[impBoxTemp].lvType = TYP_REF; lvaTable[impBoxTemp].lvSingleDef = 1; JITDUMP("Marking V%02u as a single def local\n", impBoxTemp); const bool isExact = true; lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact); } // needs to stay in use until this box expression is appended // some other node. We approximate this by keeping it alive until // the opcode stack becomes empty impBoxTempInUse = true; // Remember the current last statement in case we need to move // a range of statements to ensure the box temp is initialized // before it's used. // Statement* const cursor = impLastStmt; const bool useParent = false; op1 = gtNewAllocObjNode(pResolvedToken, useParent); if (op1 == nullptr) { // If we fail to create the newobj node, we must be inlining // and have run across a type we can't describe. // assert(compDonotInline()); return; } // Remember that this basic block contains 'new' of an object, // and so does this method // compCurBB->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Assign the boxed object to the box temp. // GenTree* asg = gtNewTempAssign(impBoxTemp, op1); Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, // move the assignment statement(s) before the call (which must be a top level tree). // // We do this because impAssignStructPtr (invoked below) will // back-substitute into a call when it sees a GT_RET_EXPR and the call // has a hidden buffer pointer, So we need to reorder things to avoid // creating out-of-sequence IR. // if (varTypeIsStruct(exprToBox) && exprToBox->OperIs(GT_RET_EXPR)) { GenTreeCall* const call = exprToBox->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->HasRetBufArg()) { JITDUMP("Must insert newobj stmts for box before call [%06u]\n", dspTreeID(call)); // Walk back through the statements in this block, looking for the one // that has this call as the root node. // // Because gtNewTempAssign (above) may have added statements that // feed into the actual assignment we need to move this set of added // statements as a group. // // Note boxed allocations are side-effect free (no com or finalizer) so // our only worries here are (correctness) not overlapping the box temp // lifetime and (perf) stretching the temp lifetime across the inlinee // body. // // Since this is an inline candidate, we must be optimizing, and so we have // a unique box temp per call. So no worries about overlap. // assert(!opts.OptimizationDisabled()); // Lifetime stretching could addressed with some extra cleverness--sinking // the allocation back down to just before the copy, once we figure out // where the copy is. We defer for now. // Statement* insertBeforeStmt = cursor; noway_assert(insertBeforeStmt != nullptr); while (true) { if (insertBeforeStmt->GetRootNode() == call) { break; } // If we've searched all the statements in the block and failed to // find the call, then something's wrong. // noway_assert(insertBeforeStmt != impStmtList); insertBeforeStmt = insertBeforeStmt->GetPrevStmt(); } // Found the call. Move the statements comprising the assignment. // JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(), asgStmt->GetID(), insertBeforeStmt->GetID()); assert(asgStmt == impLastStmt); do { Statement* movingStmt = impExtractLastStmt(); impInsertStmtBefore(movingStmt, insertBeforeStmt); insertBeforeStmt = movingStmt; } while (impLastStmt != cursor); } } // Create a pointer to the box payload in op1. // op1 = gtNewLclvNode(impBoxTemp, TYP_REF); op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2); // Copy from the exprToBox to the box payload. // if (varTypeIsStruct(exprToBox)) { assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls)); op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL); } else { var_types lclTyp = exprToBox->TypeGet(); if (lclTyp == TYP_BYREF) { lclTyp = TYP_I_IMPL; } CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass); if (impIsPrimitive(jitType)) { lclTyp = JITtype2varType(jitType); } var_types srcTyp = exprToBox->TypeGet(); var_types dstTyp = lclTyp; // We allow float <-> double mismatches and implicit truncation for small types. assert((genActualType(srcTyp) == genActualType(dstTyp)) || (varTypeIsFloating(srcTyp) == varTypeIsFloating(dstTyp))); // Note regarding small types. // We are going to store to the box here via an indirection, so the cast added below is // redundant, since the store has an implicit truncation semantic. The reason we still // add this cast is so that the code which deals with GT_BOX optimizations does not have // to account for this implicit truncation (e. g. understand that BOX<byte>(0xFF + 1) is // actually BOX<byte>(0) or deal with signedness mismatch and other GT_CAST complexities). if (srcTyp != dstTyp) { exprToBox = gtNewCastNode(genActualType(dstTyp), exprToBox, false, dstTyp); } op1 = gtNewAssignNode(gtNewOperNode(GT_IND, dstTyp, op1), exprToBox); } // Spill eval stack to flush out any pending side effects. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); // Set up this copy as a second assignment. Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); op1 = gtNewLclvNode(impBoxTemp, TYP_REF); // Record that this is a "box" node and keep track of the matching parts. op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt); // If it is a value class, mark the "box" node. We can use this information // to optimise several cases: // "box(x) == null" --> false // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod" // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod" op1->gtFlags |= GTF_BOX_VALUE; assert(op1->IsBoxedValue()); assert(asg->gtOper == GT_ASG); } else { // Don't optimize, just call the helper and be done with it. JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable"); assert(operCls != nullptr); // Ensure that the value class is restored op2 = impTokenToHandle(pResolvedToken, nullptr, true /* mustRestoreHandle */); if (op2 == nullptr) { // We must be backing out of an inline. assert(compDonotInline()); return; } GenTreeCall::Use* args = gtNewCallArgs(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true)); op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args); } /* Push the result back on the stack, */ /* even if clsHnd is a value class we want the TI_REF */ typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass)); impPushOnStack(op1, tiRetVal); } //------------------------------------------------------------------------ // impImportNewObjArray: Build and import `new` of multi-dimmensional array // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // pCallInfo - The CORINFO_CALL_INFO that has been initialized // by a call to CEEInfo::getCallInfo(). // // Assumptions: // The multi-dimensional array constructor arguments (array dimensions) are // pushed on the IL stack on entry to this method. // // Notes: // Multi-dimensional array constructors are imported as calls to a JIT // helper, not as regular calls. void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken); if (classHandle == nullptr) { // compDonotInline() return; } assert(pCallInfo->sig.numArgs); GenTree* node; // Reuse the temp used to pass the array dimensions to avoid bloating // the stack frame in case there are multiple calls to multi-dim array // constructors within a single method. if (lvaNewObjArrayArgs == BAD_VAR_NUM) { lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs")); lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK; lvaTable[lvaNewObjArrayArgs].lvExactSize = 0; } // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers // for our call to CORINFO_HELP_NEW_MDARR. lvaTable[lvaNewObjArrayArgs].lvExactSize = max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32)); // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments // to one allocation at a time. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); // // The arguments of the CORINFO_HELP_NEW_MDARR helper are: // - Array class handle // - Number of dimension arguments // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp. // node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node); // Pop dimension arguments from the stack one at a time and store it // into lvaNewObjArrayArgs temp. for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--) { GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT); GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest); dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest, new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i)); dest = gtNewOperNode(GT_IND, TYP_INT, dest); node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node); } GenTreeCall::Use* args = gtNewCallArgs(node); // pass number of arguments to the helper args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args); args = gtPrependNewCallArg(classHandle, args); node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args); for (GenTreeCall::Use& use : node->AsCall()->Args()) { node->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } node->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass; // Remember that this basic block contains 'new' of a md array compCurBB->bbFlags |= BBF_HAS_NEWARRAY; impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass)); } GenTree* Compiler::impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform) { switch (transform) { case CORINFO_DEREF_THIS: { GenTree* obj = thisPtr; // This does a LDIND on the obj, which should be a byref. pointing to a ref impBashVarAddrsToI(obj); assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF); CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj); // ldind could point anywhere, example a boxed class static int obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); return obj; } case CORINFO_BOX_THIS: { // Constraint calls where there might be no // unboxed entry point require us to implement the call via helper. // These only occur when a possible target of the call // may have inherited an implementation of an interface // method from System.Object or System.ValueType. The EE does not provide us with // "unboxed" versions of these methods. GenTree* obj = thisPtr; assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL); obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj); obj->gtFlags |= GTF_EXCEPT; CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); if (impIsPrimitive(jitTyp)) { if (obj->OperIsBlk()) { obj->ChangeOperUnchecked(GT_IND); // Obj could point anywhere, example a boxed class static int obj->gtFlags |= GTF_IND_TGTANYWHERE; obj->AsOp()->gtOp2 = nullptr; // must be zero for tree walkers } obj->gtType = JITtype2varType(jitTyp); assert(varTypeIsArithmetic(obj->gtType)); } // This pushes on the dereferenced byref // This is then used immediately to box. impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack()); // This pops off the byref-to-a-value-type remaining on the stack and // replaces it with a boxed object. // This is then used as the object to the virtual call immediately below. impImportAndPushBox(pConstrainedResolvedToken); if (compDonotInline()) { return nullptr; } obj = impPopStack().val; return obj; } case CORINFO_NO_THIS_TRANSFORM: default: return thisPtr; } } //------------------------------------------------------------------------ // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method. // // Return Value: // true if PInvoke inlining should be enabled in current method, false otherwise // // Notes: // Checks a number of ambient conditions where we could pinvoke but choose not to bool Compiler::impCanPInvokeInline() { return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke ; } //------------------------------------------------------------------------ // impCanPInvokeInlineCallSite: basic legality checks using information // from a call to see if the call qualifies as an inline pinvoke. // // Arguments: // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Return Value: // true if this call can legally qualify as an inline pinvoke, false otherwise // // Notes: // For runtimes that support exception handling interop there are // restrictions on using inline pinvoke in handler regions. // // * We have to disable pinvoke inlining inside of filters because // in case the main execution (i.e. in the try block) is inside // unmanaged code, we cannot reuse the inlined stub (we still need // the original state until we are in the catch handler) // // * We disable pinvoke inlining inside handlers since the GSCookie // is in the inlined Frame (see // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but // this would not protect framelets/return-address of handlers. // // These restrictions are currently also in place for CoreCLR but // can be relaxed when coreclr/#8459 is addressed. bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block) { if (block->hasHndIndex()) { return false; } // The remaining limitations do not apply to CoreRT if (IsTargetAbi(CORINFO_CORERT_ABI)) { return true; } #ifdef TARGET_64BIT // On 64-bit platforms, we disable pinvoke inlining inside of try regions. // Note that this could be needed on other architectures too, but we // haven't done enough investigation to know for sure at this point. // // Here is the comment from JIT64 explaining why: // [VSWhidbey: 611015] - because the jitted code links in the // Frame (instead of the stub) we rely on the Frame not being // 'active' until inside the stub. This normally happens by the // stub setting the return address pointer in the Frame object // inside the stub. On a normal return, the return address // pointer is zeroed out so the Frame can be safely re-used, but // if an exception occurs, nobody zeros out the return address // pointer. Thus if we re-used the Frame object, it would go // 'active' as soon as we link it into the Frame chain. // // Technically we only need to disable PInvoke inlining if we're // in a handler or if we're in a try body with a catch or // filter/except where other non-handler code in this method // might run and try to re-use the dirty Frame object. // // A desktop test case where this seems to matter is // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe if (block->hasTryIndex()) { // This does not apply to the raw pinvoke call that is inside the pinvoke // ILStub. In this case, we have to inline the raw pinvoke call into the stub, // otherwise we would end up with a stub that recursively calls itself, and end // up with a stack overflow. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { return true; } return false; } #endif // TARGET_64BIT return true; } //------------------------------------------------------------------------ // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so // if it can be expressed as an inline pinvoke. // // Arguments: // call - tree for the call // methHnd - handle for the method being called (may be null) // sig - signature of the method being called // mflags - method flags for the method being called // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Notes: // Sets GTF_CALL_M_PINVOKE on the call for pinvokes. // // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the // call passes a combination of legality and profitabilty checks. // // If GTF_CALL_UNMANAGED is set, increments info.compUnmanagedCallCountWithGCTransition void Compiler::impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block) { CorInfoCallConvExtension unmanagedCallConv; // If VM flagged it as Pinvoke, flag the call node accordingly if ((mflags & CORINFO_FLG_PINVOKE) != 0) { call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE; } bool suppressGCTransition = false; if (methHnd) { if ((mflags & CORINFO_FLG_PINVOKE) == 0) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd, nullptr, &suppressGCTransition); } else { if (sig->getCallConv() == CORINFO_CALLCONV_DEFAULT || sig->getCallConv() == CORINFO_CALLCONV_VARARG) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(nullptr, sig, &suppressGCTransition); assert(!call->gtCallCookie); } if (suppressGCTransition) { call->gtCallMoreFlags |= GTF_CALL_M_SUPPRESS_GC_TRANSITION; } // If we can't get the unmanaged calling convention or the calling convention is unsupported in the JIT, // return here without inlining the native call. if (unmanagedCallConv == CorInfoCallConvExtension::Managed || unmanagedCallConv == CorInfoCallConvExtension::Fastcall || unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction) { return; } optNativeCallCount++; if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI))) { // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been // converted to regular method calls earlier using convertPInvokeCalliToCall. // PInvoke CALLI in IL stubs must be inlined } else { // Check legality if (!impCanPInvokeInlineCallSite(block)) { return; } // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive // inlining in CoreRT. Skip the ambient conditions checks and profitability checks. if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { // Raw PInvoke call in PInvoke IL stub generated must be inlined to avoid infinite // recursive calls to the stub. } else { if (!impCanPInvokeInline()) { return; } // Size-speed tradeoff: don't use inline pinvoke at rarely // executed call sites. The non-inline version is more // compact. if (block->isRunRarely()) { return; } } } // The expensive check should be last if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig)) { return; } } JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s\n", info.compFullName)); call->gtFlags |= GTF_CALL_UNMANAGED; call->unmgdCallConv = unmanagedCallConv; if (!call->IsSuppressGCTransition()) { info.compUnmanagedCallCountWithGCTransition++; } // AMD64 convention is same for native and managed if (unmanagedCallConv == CorInfoCallConvExtension::C || unmanagedCallConv == CorInfoCallConvExtension::CMemberFunction) { call->gtFlags |= GTF_CALL_POP_ARGS; } if (unmanagedCallConv == CorInfoCallConvExtension::Thiscall) { call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL; } } GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di) { var_types callRetTyp = JITtype2varType(sig->retType); /* The function pointer is on top of the stack - It may be a * complex expression. As it is evaluated after the args, * it may cause registered args to be spilled. Simply spill it. */ // Ignore this trivial case. if (impStackTop().val->gtOper != GT_LCL_VAR) { impSpillStackEntry(verCurrentState.esStackDepth - 1, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall")); } /* Get the function pointer */ GenTree* fptr = impPopStack().val; // The function pointer is typically a sized to match the target pointer size // However, stubgen IL optimization can change LDC.I8 to LDC.I4 // See ILCodeStream::LowerOpcode assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT); #ifdef DEBUG // This temporary must never be converted to a double in stress mode, // because that can introduce a call to the cast helper after the // arguments have already been evaluated. if (fptr->OperGet() == GT_LCL_VAR) { lvaTable[fptr->AsLclVarCommon()->GetLclNum()].lvKeepType = 1; } #endif /* Create the call node */ GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); #ifdef UNIX_X86_ABI call->gtFlags &= ~GTF_CALL_POP_ARGS; #endif return call; } /*****************************************************************************/ void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig) { assert(call->gtFlags & GTF_CALL_UNMANAGED); /* Since we push the arguments in reverse order (i.e. right -> left) * spill any side effects from the stack * * OBS: If there is only one side effect we do not need to spill it * thus we have to spill all side-effects except last one */ unsigned lastLevelWithSideEffects = UINT_MAX; unsigned argsToReverse = sig->numArgs; // For "thiscall", the first argument goes in a register. Since its // order does not need to be changed, we do not need to spill it if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { assert(argsToReverse); argsToReverse--; } #ifndef TARGET_X86 // Don't reverse args on ARM or x64 - first four args always placed in regs in order argsToReverse = 0; #endif for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++) { if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF) { assert(lastLevelWithSideEffects == UINT_MAX); impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect")); } else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) { if (lastLevelWithSideEffects != UINT_MAX) { /* We had a previous side effect - must spill it */ impSpillStackEntry(lastLevelWithSideEffects, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect")); /* Record the level for the current side effect in case we will spill it */ lastLevelWithSideEffects = level; } else { /* This is the first side effect encountered - record its level */ lastLevelWithSideEffects = level; } } } /* The argument list is now "clean" - no out-of-order side effects * Pop the argument list in reverse order */ GenTreeCall::Use* args = impPopReverseCallArgs(sig->numArgs, sig, sig->numArgs - argsToReverse); call->AsCall()->gtCallArgs = args; if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { GenTree* thisPtr = args->GetNode(); impBashVarAddrsToI(thisPtr); assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF); } for (GenTreeCall::Use& argUse : GenTreeCall::UseList(args)) { GenTree* arg = argUse.GetNode(); call->gtFlags |= arg->gtFlags & GTF_GLOB_EFFECT; // We should not be passing gc typed args to an unmanaged call. if (varTypeIsGC(arg->TypeGet())) { // Tolerate byrefs by retyping to native int. // // This is needed or we'll generate inconsistent GC info // for this arg at the call site (gc info says byref, // pinvoke sig says native int). // if (arg->TypeGet() == TYP_BYREF) { arg->ChangeType(TYP_I_IMPL); } else { assert(!"*** invalid IL: gc ref passed to unmanaged call"); } } } } //------------------------------------------------------------------------ // impInitClass: Build a node to initialize the class before accessing the // field if necessary // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // // Return Value: If needed, a pointer to the node that will perform the class // initializtion. Otherwise, nullptr. // GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle); if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0) { return nullptr; } bool runtimeLookup; GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup); if (node == nullptr) { assert(compDonotInline()); return nullptr; } if (runtimeLookup) { node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(node)); } else { // Call the shared non gc static helper, as its the fastest node = fgGetSharedCCtor(pResolvedToken->hClass); } return node; } GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp) { GenTree* op1 = nullptr; #if defined(DEBUG) // If we're replaying under SuperPMI, we're going to read the data stored by SuperPMI and use it // for optimization. Unfortunately, SuperPMI doesn't implement a guarantee on the alignment of // this data, so for some platforms which don't allow unaligned access (e.g., Linux arm32), // this can fault. We should fix SuperPMI to guarantee alignment, but that is a big change. // Instead, simply fix up the data here for future use. // This variable should be the largest size element, with the largest alignment requirement, // and the native C++ compiler should guarantee sufficient alignment. double aligned_data = 0.0; void* p_aligned_data = &aligned_data; if (info.compMethodSuperPMIIndex != -1) { switch (lclTyp) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: static_assert_no_msg(sizeof(unsigned __int8) == sizeof(bool)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(signed char)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(unsigned char)); // No alignment necessary for byte. break; case TYP_SHORT: case TYP_USHORT: static_assert_no_msg(sizeof(unsigned __int16) == sizeof(short)); static_assert_no_msg(sizeof(unsigned __int16) == sizeof(unsigned short)); if ((size_t)fldAddr % sizeof(unsigned __int16) != 0) { *(unsigned __int16*)p_aligned_data = GET_UNALIGNED_16(fldAddr); fldAddr = p_aligned_data; } break; case TYP_INT: case TYP_UINT: case TYP_FLOAT: static_assert_no_msg(sizeof(unsigned __int32) == sizeof(int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(unsigned int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(float)); if ((size_t)fldAddr % sizeof(unsigned __int32) != 0) { *(unsigned __int32*)p_aligned_data = GET_UNALIGNED_32(fldAddr); fldAddr = p_aligned_data; } break; case TYP_LONG: case TYP_ULONG: case TYP_DOUBLE: static_assert_no_msg(sizeof(unsigned __int64) == sizeof(__int64)); static_assert_no_msg(sizeof(unsigned __int64) == sizeof(double)); if ((size_t)fldAddr % sizeof(unsigned __int64) != 0) { *(unsigned __int64*)p_aligned_data = GET_UNALIGNED_64(fldAddr); fldAddr = p_aligned_data; } break; default: assert(!"Unexpected lclTyp"); break; } } #endif // DEBUG switch (lclTyp) { int ival; __int64 lval; double dval; case TYP_BOOL: ival = *((bool*)fldAddr); goto IVAL_COMMON; case TYP_BYTE: ival = *((signed char*)fldAddr); goto IVAL_COMMON; case TYP_UBYTE: ival = *((unsigned char*)fldAddr); goto IVAL_COMMON; case TYP_SHORT: ival = *((short*)fldAddr); goto IVAL_COMMON; case TYP_USHORT: ival = *((unsigned short*)fldAddr); goto IVAL_COMMON; case TYP_UINT: case TYP_INT: ival = *((int*)fldAddr); IVAL_COMMON: op1 = gtNewIconNode(ival); break; case TYP_LONG: case TYP_ULONG: lval = *((__int64*)fldAddr); op1 = gtNewLconNode(lval); break; case TYP_FLOAT: dval = *((float*)fldAddr); op1 = gtNewDconNode(dval); op1->gtType = TYP_FLOAT; break; case TYP_DOUBLE: dval = *((double*)fldAddr); op1 = gtNewDconNode(dval); break; default: assert(!"Unexpected lclTyp"); break; } return op1; } GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp) { // Ordinary static fields never overlap. RVA statics, however, can overlap (if they're // mapped to the same ".data" declaration). That said, such mappings only appear to be // possible with ILASM, and in ILASM-produced (ILONLY) images, RVA statics are always // read-only (using "stsfld" on them is UB). In mixed-mode assemblies, RVA statics can // be mutable, but the only current producer of such images, the C++/CLI compiler, does // not appear to support mapping different fields to the same address. So we will say // that "mutable overlapping RVA statics" are UB as well. If this ever changes, code in // morph and value numbering will need to be updated to respect "gtFldMayOverlap" and // "NotAField FldSeq". // For statics that are not "boxed", the initial address tree will contain the field sequence. // For those that are, we will attach it later, when adding the indirection for the box, since // that tree will represent the true address. bool isBoxedStatic = (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) != 0; FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField) : FieldSeqStore::NotAField(); GenTree* op1; switch (pFieldInfo->fieldAccessor) { case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: { assert(!compIsForInlining()); // We first call a special helper to get the statics base pointer op1 = impParentClassTokenToHandle(pResolvedToken); // compIsForInlining() is false so we should not get NULL here assert(op1 != nullptr); var_types type = TYP_BYREF; switch (pFieldInfo->helper) { case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE: type = TYP_I_IMPL; break; case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE: break; default: assert(!"unknown generic statics helper"); break; } op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewCallArgs(op1)); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); } break; case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); } else #endif { op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper); } op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); break; } case CORINFO_FIELD_STATIC_READYTORUN_HELPER: { #ifdef FEATURE_READYTORUN assert(opts.IsReadyToRun()); assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); assert(kind.needsRuntimeLookup); GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); GenTreeCall::Use* args = gtNewCallArgs(ctxTree); GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } var_types type = TYP_BYREF; op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); #else unreached(); #endif // FEATURE_READYTORUN } break; default: { // Do we need the address of a static field? // if (access & CORINFO_ACCESS_ADDRESS) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr); // We should always be able to access this static's address directly. assert(pFldAddr == nullptr); // Create the address node. GenTreeFlags handleKind = isBoxedStatic ? GTF_ICON_STATIC_BOX_PTR : GTF_ICON_STATIC_HDL; op1 = gtNewIconHandleNode((size_t)fldAddr, handleKind, innerFldSeq); #ifdef DEBUG op1->AsIntCon()->gtTargetHandle = op1->AsIntCon()->gtIconVal; #endif if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_ICON_INITCLASS; } } else // We need the value of a static field { // In future, it may be better to just create the right tree here instead of folding it later. op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField); if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_FLD_INITCLASS; } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1->ChangeType(TYP_REF); // points at boxed object op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING; } } return op1; } break; } } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); } if (!(access & CORINFO_ACCESS_ADDRESS)) { if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF; } } return op1; } // In general try to call this before most of the verification work. Most people expect the access // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns // out if you can't access something we also think that you're unverifiable for other reasons. void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { if (result != CORINFO_ACCESS_ALLOWED) { impHandleAccessAllowedInternal(result, helperCall); } } void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { switch (result) { case CORINFO_ACCESS_ALLOWED: break; case CORINFO_ACCESS_ILLEGAL: // if we're verifying, then we need to reject the illegal access to ensure that we don't think the // method is verifiable. Otherwise, delay the exception to runtime. if (compIsForImportOnly()) { info.compCompHnd->ThrowExceptionForHelper(helperCall); } else { impInsertHelperCall(helperCall); } break; } } void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo) { // Construct the argument list GenTreeCall::Use* args = nullptr; assert(helperInfo->helperNum != CORINFO_HELP_UNDEF); for (unsigned i = helperInfo->numArgs; i > 0; --i) { const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1]; GenTree* currentArg = nullptr; switch (helperArg.argType) { case CORINFO_HELPER_ARG_TYPE_Field: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass(helperArg.fieldHandle)); currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle); break; case CORINFO_HELPER_ARG_TYPE_Method: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle); currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle); break; case CORINFO_HELPER_ARG_TYPE_Class: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle); currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle); break; case CORINFO_HELPER_ARG_TYPE_Module: currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle); break; case CORINFO_HELPER_ARG_TYPE_Const: currentArg = gtNewIconNode(helperArg.constant); break; default: NO_WAY("Illegal helper arg type"); } args = gtPrependNewCallArg(currentArg, args); } /* TODO-Review: * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee. * Also, consider sticking this in the first basic block. */ GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args); impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } //------------------------------------------------------------------------ // impTailCallRetTypeCompatible: Checks whether the return types of caller // and callee are compatible so that calle can be tail called. // sizes are not supported integral type sizes return values to temps. // // Arguments: // allowWidening -- whether to allow implicit widening by the callee. // For instance, allowing int32 -> int16 tailcalls. // The managed calling convention allows this, but // we don't want explicit tailcalls to depend on this // detail of the managed calling convention. // callerRetType -- the caller's return type // callerRetTypeClass - the caller's return struct type // callerCallConv -- calling convention of the caller // calleeRetType -- the callee's return type // calleeRetTypeClass - the callee return struct type // calleeCallConv -- calling convention of the callee // // Returns: // True if the tailcall types are compatible. // // Remarks: // Note that here we don't check compatibility in IL Verifier sense, but on the // lines of return types getting returned in the same return register. bool Compiler::impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv) { // Early out if the types are the same. if (callerRetType == calleeRetType) { return true; } // For integral types the managed calling convention dictates that callee // will widen the return value to 4 bytes, so we can allow implicit widening // in managed to managed tailcalls when dealing with <= 4 bytes. bool isManaged = (callerCallConv == CorInfoCallConvExtension::Managed) && (calleeCallConv == CorInfoCallConvExtension::Managed); if (allowWidening && isManaged && varTypeIsIntegral(callerRetType) && varTypeIsIntegral(calleeRetType) && (genTypeSize(callerRetType) <= 4) && (genTypeSize(calleeRetType) <= genTypeSize(callerRetType))) { return true; } // If the class handles are the same and not null, the return types are compatible. if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass)) { return true; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Jit64 compat: if (callerRetType == TYP_VOID) { // This needs to be allowed to support the following IL pattern that Jit64 allows: // tail.call // pop // ret // // Note that the above IL pattern is not valid as per IL verification rules. // Therefore, only full trust code can take advantage of this pattern. return true; } // These checks return true if the return value type sizes are the same and // get returned in the same return register i.e. caller doesn't need to normalize // return value. Some of the tail calls permitted by below checks would have // been rejected by IL Verifier before we reached here. Therefore, only full // trust code can make those tail calls. unsigned callerRetTypeSize = 0; unsigned calleeRetTypeSize = 0; bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs, callerCallConv); bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs, calleeCallConv); if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg) { return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize); } #endif // TARGET_AMD64 || TARGET_ARM64 return false; } /******************************************************************************** * * Returns true if the current opcode and and the opcodes following it correspond * to a supported tail call IL pattern. * */ bool Compiler::impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive) { // Bail out if the current opcode is not a call. if (!impOpcodeIsCallOpcode(curOpcode)) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // If shared ret tail opt is not enabled, we will enable // it for recursive methods. if (isRecursive) #endif { // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the // sequence. Make sure we don't go past the end of the IL however. codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize); } // Bail out if there is no next opcode after call if (codeAddrOfNextOpcode >= codeEnd) { return false; } OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); return (nextOpcode == CEE_RET); } /***************************************************************************** * * Determine whether the call could be converted to an implicit tail call * */ bool Compiler::impIsImplicitTailCallCandidate( OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive) { #if FEATURE_TAILCALL_OPT if (!opts.compTailCallOpt) { return false; } if (opts.OptimizationDisabled()) { return false; } // must not be tail prefixed if (prefixFlags & PREFIX_TAILCALL_EXPLICIT) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN // must be call+ret or call+pop+ret if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive)) { return false; } return true; #else return false; #endif // FEATURE_TAILCALL_OPT } //------------------------------------------------------------------------ // impImportCall: import a call-inspiring opcode // // Arguments: // opcode - opcode that inspires the call // pResolvedToken - resolved token for the call target // pConstrainedResolvedToken - resolved constraint token (or nullptr) // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr) // prefixFlags - IL prefix flags for the call // callInfo - EE supplied info for the call // rawILOffset - IL offset of the opcode, used for guarded devirtualization. // // Returns: // Type of the call's return value. // If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF. // However we can't assert for this here yet because there are cases we miss. See issue #13272. // // // Notes: // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ. // // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated // uninitalized object. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif var_types Compiler::impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset) { assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI); // The current statement DI may not refer to the exact call, but for calls // we wish to be able to attach the exact IL instruction to get "return // value" support in the debugger, so create one with the exact IL offset. DebugInfo di = impCreateDIWithCurrentStackInfo(rawILOffset, true); var_types callRetTyp = TYP_COUNT; CORINFO_SIG_INFO* sig = nullptr; CORINFO_METHOD_HANDLE methHnd = nullptr; CORINFO_CLASS_HANDLE clsHnd = nullptr; unsigned clsFlags = 0; unsigned mflags = 0; GenTree* call = nullptr; GenTreeCall::Use* args = nullptr; CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM; CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr; bool exactContextNeedsRuntimeLookup = false; bool canTailCall = true; const char* szCanTailCallFailReason = nullptr; const int tailCallFlags = (prefixFlags & PREFIX_TAILCALL); const bool isReadonlyCall = (prefixFlags & PREFIX_READONLY) != 0; CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr; // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could // do that before tailcalls, but that is probably not the intended // semantic. So just disallow tailcalls from synchronized methods. // Also, popping arguments in a varargs function is more work and NYI // If we have a security object, we have to keep our frame around for callers // to see any imperative security. // Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT // at the end, so tailcalls should be disabled. if (info.compFlags & CORINFO_FLG_SYNCH) { canTailCall = false; szCanTailCallFailReason = "Caller is synchronized"; } else if (opts.IsReversePInvoke()) { canTailCall = false; szCanTailCallFailReason = "Caller is Reverse P/Invoke"; } #if !FEATURE_FIXED_OUT_ARGS else if (info.compIsVarArgs) { canTailCall = false; szCanTailCallFailReason = "Caller is varargs"; } #endif // FEATURE_FIXED_OUT_ARGS // We only need to cast the return value of pinvoke inlined calls that return small types // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there. // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for // the time being that the callee might be compiled by the other JIT and thus the return // value will need to be widened by us (or not widened at all...) // ReadyToRun code sticks with default calling convention that does not widen small return types. bool checkForSmallType = opts.IsReadyToRun(); bool bIntrinsicImported = false; CORINFO_SIG_INFO calliSig; GenTreeCall::Use* extraArg = nullptr; /*------------------------------------------------------------------------- * First create the call node */ if (opcode == CEE_CALLI) { if (IsTargetAbi(CORINFO_CORERT_ABI)) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block))) { eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo); return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset); } } /* Get the call site sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig); callRetTyp = JITtype2varType(calliSig.retType); call = impImportIndirectCall(&calliSig, di); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif sig = &calliSig; } else // (opcode != CEE_CALLI) { NamedIntrinsic ni = NI_Illegal; // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to // supply the instantiation parameters necessary to make direct calls to underlying // shared generic code, rather than calling through instantiating stubs. If the // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT // must indeed pass an instantiation parameter. methHnd = callInfo->hMethod; sig = &(callInfo->sig); callRetTyp = JITtype2varType(sig->retType); mflags = callInfo->methodFlags; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif if (compIsForInlining()) { /* Does the inlinee use StackCrawlMark */ if (mflags & CORINFO_FLG_DONT_INLINE_CALLER) { compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK); return TYP_UNDEF; } /* For now ignore varargs */ if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS); return TYP_UNDEF; } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return TYP_UNDEF; } if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT)) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL); return TYP_UNDEF; } } clsHnd = pResolvedToken->hClass; clsFlags = callInfo->classFlags; #ifdef DEBUG // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute. // This recognition should really be done by knowing the methHnd of the relevant Mark method(s). // These should be in corelib.h, and available through a JIT/EE interface call. const char* modName; const char* className; const char* methodName; if ((className = eeGetClassName(clsHnd)) != nullptr && strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 && (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0) { return impImportJitTestLabelMark(sig->numArgs); } #endif // DEBUG // <NICE> Factor this into getCallInfo </NICE> bool isSpecialIntrinsic = false; if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_INTRINSIC)) != 0) { const bool isTailCall = canTailCall && (tailCallFlags != 0); call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, isReadonlyCall, isTailCall, pConstrainedResolvedToken, callInfo->thisTransform, &ni, &isSpecialIntrinsic); if (compDonotInline()) { return TYP_UNDEF; } if (call != nullptr) { #ifdef FEATURE_READYTORUN if (call->OperGet() == GT_INTRINSIC) { if (opts.IsReadyToRun()) { noway_assert(callInfo->kind == CORINFO_CALL); call->AsIntrinsic()->gtEntryPoint = callInfo->codePointerLookup.constLookup; } else { call->AsIntrinsic()->gtEntryPoint.addr = nullptr; call->AsIntrinsic()->gtEntryPoint.accessType = IAT_VALUE; } } #endif bIntrinsicImported = true; goto DONE_CALL; } } #ifdef FEATURE_SIMD if (featureSIMD) { call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token); if (call != nullptr) { bIntrinsicImported = true; goto DONE_CALL; } } #endif // FEATURE_SIMD if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG) { BADCODE("Bad calling convention"); } //------------------------------------------------------------------------- // Construct the call node // // Work out what sort of call we're making. // Dispense with virtual calls implemented via LDVIRTFTN immediately. constraintCallThisTransform = callInfo->thisTransform; exactContextHnd = callInfo->contextHandle; exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup; switch (callInfo->kind) { case CORINFO_VIRTUALCALL_STUB: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); if (callInfo->stubLookup.lookupKind.needsRuntimeLookup) { if (callInfo->stubLookup.lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE); return TYP_UNDEF; } GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd); assert(!compDonotInline()); // This is the rough code to set up an indirect stub call assert(stubAddr != nullptr); // The stubAddr may be a // complex expression. As it is evaluated after the args, // it may cause registered args to be spilled. Simply spill it. unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup")); impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr); call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT); call->gtFlags |= GTF_CALL_VIRT_STUB; #ifdef TARGET_X86 // No tailcalls allowed for these yet... canTailCall = false; szCanTailCallFailReason = "VirtualCall with runtime lookup"; #endif } else { // The stub address is known at compile time call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->AsCall()->gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr; call->gtFlags |= GTF_CALL_VIRT_STUB; assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE && callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE); if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT; } } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is sometimes needed for ready to run to handle // non-virtual <-> virtual changes between versions if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } } #endif break; } case CORINFO_VIRTUALCALL_VTABLE: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->gtFlags |= GTF_CALL_VIRT_VTABLE; // Should we expand virtual call targets early for this method? // if (opts.compExpandCallsEarly) { // Mark this method to expand the virtual call target early in fgMorpgCall call->AsCall()->SetExpandedEarly(); } break; } case CORINFO_VIRTUALCALL_LDVIRTFTN: { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN); return TYP_UNDEF; } assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); // OK, We've been told to call via LDVIRTFTN, so just // take the call now.... GenTreeCall::Use* args = impPopCallArgs(sig->numArgs, sig); GenTree* thisPtr = impPopStack().val; thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform); assert(thisPtr != nullptr); // Clone the (possibly transformed) "this" pointer GenTree* thisPtrCopy; thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("LDVIRTFTN this pointer")); GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo); assert(fptr != nullptr); thisPtr = nullptr; // can't reuse it // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node call = gtNewIndCallNode(fptr, callRetTyp, args, di); call->AsCall()->gtCallThisArg = gtNewCallArgs(thisPtrCopy); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { // CoreRT generic virtual method: need to handle potential fat function pointers addFatPointerCandidate(call->AsCall()); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is needed for ready to run to handle // non-virtual <-> virtual changes between versions call->gtFlags |= GTF_CALL_NULLCHECK; } #endif // Sine we are jumping over some code, check that its OK to skip that code assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); goto DONE; } case CORINFO_CALL: { // This is for a non-virtual, non-interface etc. call call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); // We remove the nullcheck for the GetType call intrinsic. // TODO-CQ: JIT64 does not introduce the null check for many more helper calls // and intrinsics. if (callInfo->nullInstanceCheck && !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (ni == NI_System_Object_GetType))) { call->gtFlags |= GTF_CALL_NULLCHECK; } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { call->AsCall()->setEntryPoint(callInfo->codePointerLookup.constLookup); } #endif break; } case CORINFO_CALL_CODE_POINTER: { // The EE has asked us to call by computing a code pointer and then doing an // indirect call. This is because a runtime lookup is required to get the code entry point. // These calls always follow a uniform calling convention, i.e. no extra hidden params assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); GenTree* fptr = impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod); if (compDonotInline()) { return TYP_UNDEF; } // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } break; } default: assert(!"unknown call kind"); break; } //------------------------------------------------------------------------- // Set more flags PREFIX_ASSUME(call != nullptr); if (mflags & CORINFO_FLG_NOGCCHECK) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK; } // Mark call if it's one of the ones we will maybe treat as an intrinsic if (isSpecialIntrinsic) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC; } } assert(sig); assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set. /* Some sanity checks */ // CALL_VIRT and NEWOBJ must have a THIS pointer assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS)); // static bit and hasThis are negations of one another assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0)); assert(call != nullptr); /*------------------------------------------------------------------------- * Check special-cases etc */ /* Special case - Check if it is a call to Delegate.Invoke(). */ if (mflags & CORINFO_FLG_DELEGATE_INVOKE) { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(mflags & CORINFO_FLG_FINAL); /* Set the delegate flag */ call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV; if (callInfo->wrapperDelegateInvoke) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV; } if (opcode == CEE_CALLVIRT) { assert(mflags & CORINFO_FLG_FINAL); /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */ assert(call->gtFlags & GTF_CALL_NULLCHECK); call->gtFlags &= ~GTF_CALL_NULLCHECK; } } CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass; actualMethodRetTypeSigClass = sig->retTypeSigClass; /* Check for varargs */ if (!compFeatureVarArg() && ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)) { BADCODE("Varargs not supported."); } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { assert(!compIsForInlining()); /* Set the right flags */ call->gtFlags |= GTF_CALL_POP_ARGS; call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VARARGS; /* Can't allow tailcall for varargs as it is caller-pop. The caller will be expecting to pop a certain number of arguments, but if we tailcall to a function with a different number of arguments, we are hosed. There are ways around this (caller remembers esp value, varargs is not caller-pop, etc), but not worth it. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is varargs"; } #endif /* Get the total number of arguments - this is already correct * for CALLI - for methods we have to get it from the call site */ if (opcode != CEE_CALLI) { #ifdef DEBUG unsigned numArgsDef = sig->numArgs; #endif eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); // For vararg calls we must be sure to load the return type of the // method actually being called, as well as the return types of the // specified in the vararg signature. With type equivalency, these types // may not be the same. if (sig->retTypeSigClass != actualMethodRetTypeSigClass) { if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass); } } assert(numArgsDef <= sig->numArgs); } /* We will have "cookie" as the last argument but we cannot push * it on the operand stack because we may overflow, so we append it * to the arg list next after we pop them */ } //--------------------------- Inline NDirect ------------------------------ // For inline cases we technically should look at both the current // block and the call site block (or just the latter if we've // fused the EH trees). However the block-related checks pertain to // EH and we currently won't inline a method with EH. So for // inlinees, just checking the call site block is sufficient. { // New lexical block here to avoid compilation errors because of GOTOs. BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block); } #ifdef UNIX_X86_ABI // On Unix x86 we use caller-cleaned convention. if ((call->gtFlags & GTF_CALL_UNMANAGED) == 0) call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI if (call->gtFlags & GTF_CALL_UNMANAGED) { // We set up the unmanaged call by linking the frame, disabling GC, etc // This needs to be cleaned up on return. // In addition, native calls have different normalization rules than managed code // (managed calling convention always widens return values in the callee) if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is native"; } checkForSmallType = true; impPopArgsForUnmanagedCall(call, sig); goto DONE; } else if ((opcode == CEE_CALLI) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG)) { if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig)) { // Normally this only happens with inlining. // However, a generic method (or type) being NGENd into another module // can run into this issue as well. There's not an easy fall-back for NGEN // so instead we fallback to JIT. if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE); } else { IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)"); } return TYP_UNDEF; } GenTree* cookie = eeGetPInvokeCookie(sig); // This cookie is required to be either a simple GT_CNS_INT or // an indirection of a GT_CNS_INT // GenTree* cookieConst = cookie; if (cookie->gtOper == GT_IND) { cookieConst = cookie->AsOp()->gtOp1; } assert(cookieConst->gtOper == GT_CNS_INT); // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that // we won't allow this tree to participate in any CSE logic // cookie->gtFlags |= GTF_DONT_CSE; cookieConst->gtFlags |= GTF_DONT_CSE; call->AsCall()->gtCallCookie = cookie; if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "PInvoke calli"; } } /*------------------------------------------------------------------------- * Create the argument list */ //------------------------------------------------------------------------- // Special case - for varargs we have an implicit last argument if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { assert(!compIsForInlining()); void *varCookie, *pVarCookie; if (!info.compCompHnd->canGetVarArgsHandle(sig)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE); return TYP_UNDEF; } varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie); assert((!varCookie) != (!pVarCookie)); GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig); assert(extraArg == nullptr); extraArg = gtNewCallArgs(cookie); } //------------------------------------------------------------------------- // Extra arg for shared generic code and array methods // // Extra argument containing instantiation information is passed in the // following circumstances: // (a) To the "Address" method on array classes; the extra parameter is // the array's type handle (a TypeDesc) // (b) To shared-code instance methods in generic structs; the extra parameter // is the struct's type handle (a vtable ptr) // (c) To shared-code per-instantiation non-generic static methods in generic // classes and structs; the extra parameter is the type handle // (d) To shared-code generic methods; the extra parameter is an // exact-instantiation MethodDesc // // We also set the exact type context associated with the call so we can // inline the call correctly later on. if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE) { assert(call->AsCall()->gtCallType == CT_USER_FUNC); if (clsHnd == nullptr) { NO_WAY("CALLI on parameterized type"); } assert(opcode != CEE_CALLI); GenTree* instParam; bool runtimeLookup; // Instantiated generic method if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD) { assert(exactContextHnd != METHOD_BEING_COMPILED_CONTEXT()); CORINFO_METHOD_HANDLE exactMethodHandle = (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK); if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbMethHndNode(exactMethodHandle); info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle); } } else { instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } // otherwise must be an instance method in a generic struct, // a static method in a generic type, or a runtime-generated array method else { assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); CORINFO_CLASS_HANDLE exactClassHandle = eeGetClassFromContext(exactContextHnd); if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD); return TYP_UNDEF; } if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall) { // We indicate "readonly" to the Address operation by using a null // instParam. instParam = gtNewIconNode(0, TYP_REF); } else if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbClsHndNode(exactClassHandle); info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle); } } else { instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } assert(extraArg == nullptr); extraArg = gtNewCallArgs(instParam); } if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0)) { // Only verifiable cases are supported. // dup; ldvirtftn; newobj; or ldftn; newobj. // IL test could contain unverifiable sequence, in this case optimization should not be done. if (impStackHeight() > 0) { typeInfo delegateTypeInfo = impStackTop().seTypeInfo; if (delegateTypeInfo.IsToken()) { ldftnToken = delegateTypeInfo.GetToken(); } } } //------------------------------------------------------------------------- // The main group of arguments args = impPopCallArgs(sig->numArgs, sig, extraArg); call->AsCall()->gtCallArgs = args; for (GenTreeCall::Use& use : call->AsCall()->Args()) { call->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } //------------------------------------------------------------------------- // The "this" pointer if (((mflags & CORINFO_FLG_STATIC) == 0) && ((sig->callConv & CORINFO_CALLCONV_EXPLICITTHIS) == 0) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr))) { GenTree* obj; if (opcode == CEE_NEWOBJ) { obj = newobjThis; } else { obj = impPopStack().val; obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform); if (compDonotInline()) { return TYP_UNDEF; } } // Store the "this" value in the call call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT; call->AsCall()->gtCallThisArg = gtNewCallArgs(obj); // Is this a virtual or interface call? if (call->AsCall()->IsVirtual()) { // only true object pointers can be virtual assert(obj->gtType == TYP_REF); // See if we can devirtualize. const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isLateDevirtualization = false; impDevirtualizeCall(call->AsCall(), pResolvedToken, &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle, &exactContextHnd, isLateDevirtualization, isExplicitTailCall, // Take care to pass raw IL offset here as the 'debug info' might be different for // inlinees. rawILOffset); // Devirtualization may change which method gets invoked. Update our local cache. // methHnd = callInfo->hMethod; } if (impIsThis(obj)) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS; } } //------------------------------------------------------------------------- // The "this" pointer for "newobj" if (opcode == CEE_NEWOBJ) { if (clsFlags & CORINFO_FLG_VAROBJSIZE) { assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately // This is a 'new' of a variable sized object, wher // the constructor is to return the object. In this case // the constructor claims to return VOID but we know it // actually returns the new object assert(callRetTyp == TYP_VOID); callRetTyp = TYP_REF; call->gtType = TYP_REF; impSpillSpecialSideEff(); impPushOnStack(call, typeInfo(TI_REF, clsHnd)); } else { if (clsFlags & CORINFO_FLG_DELEGATE) { // New inliner morph it in impImportCall. // This will allow us to inline the call to the delegate constructor. call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken); } if (!bIntrinsicImported) { #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // append the call node. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Now push the value of the 'new onto the stack // This is a 'new' of a non-variable sized object. // Append the new node (op1) to the statement list, // and then push the local holding the value of this // new instruction on the stack. if (clsFlags & CORINFO_FLG_VALUECLASS) { assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR); unsigned tmp = newobjThis->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack()); } else { if (newobjThis->gtOper == GT_COMMA) { // We must have inserted the callout. Get the real newobj. newobjThis = newobjThis->AsOp()->gtOp2; } assert(newobjThis->gtOper == GT_LCL_VAR); impPushOnStack(gtNewLclvNode(newobjThis->AsLclVarCommon()->GetLclNum(), TYP_REF), typeInfo(TI_REF, clsHnd)); } } return callRetTyp; } DONE: #ifdef DEBUG // In debug we want to be able to register callsites with the EE. assert(call->AsCall()->callSig == nullptr); call->AsCall()->callSig = new (this, CMK_Generic) CORINFO_SIG_INFO; *call->AsCall()->callSig = *sig; #endif // Final importer checks for calls flagged as tail calls. // if (tailCallFlags != 0) { const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isImplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_IMPLICIT) != 0; const bool isStressTailCall = (tailCallFlags & PREFIX_TAILCALL_STRESS) != 0; // Exactly one of these should be true. assert(isExplicitTailCall != isImplicitTailCall); // This check cannot be performed for implicit tail calls for the reason // that impIsImplicitTailCallCandidate() is not checking whether return // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT. // As a result it is possible that in the following case, we find that // the type stack is non-empty if Callee() is considered for implicit // tail calling. // int Caller(..) { .... void Callee(); ret val; ... } // // Note that we cannot check return type compatibility before ImpImportCall() // as we don't have required info or need to duplicate some of the logic of // ImpImportCall(). // // For implicit tail calls, we perform this check after return types are // known to be compatible. if (isExplicitTailCall && (verCurrentState.esStackDepth != 0)) { BADCODE("Stack should be empty after tailcall"); } // For opportunistic tailcalls we allow implicit widening, i.e. tailcalls from int32 -> int16, since the // managed calling convention dictates that the callee widens the value. For explicit tailcalls we don't // want to require this detail of the calling convention to bubble up to the tailcall helpers bool allowWidening = isImplicitTailCall; if (canTailCall && !impTailCallRetTypeCompatible(allowWidening, info.compRetType, info.compMethodInfo->args.retTypeClass, info.compCallConv, callRetTyp, sig->retTypeClass, call->AsCall()->GetUnmanagedCallConv())) { canTailCall = false; szCanTailCallFailReason = "Return types are not tail call compatible"; } // Stack empty check for implicit tail calls. if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0)) { #ifdef TARGET_AMD64 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException // in JIT64, not an InvalidProgramException. Verify(false, "Stack should be empty after tailcall"); #else // TARGET_64BIT BADCODE("Stack should be empty after tailcall"); #endif //! TARGET_64BIT } // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN); // Ask VM for permission to tailcall if (canTailCall) { // True virtual or indirect calls, shouldn't pass in a callee handle. CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->AsCall()->gtCallType != CT_USER_FUNC) || call->AsCall()->IsVirtual()) ? nullptr : methHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, isExplicitTailCall)) { if (isExplicitTailCall) { // In case of explicit tail calls, mark it so that it is not considered // for in-lining. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_EXPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); if (isStressTailCall) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_STRESS_TAILCALL; JITDUMP("\nGTF_CALL_M_STRESS_TAILCALL set for call [%06u]\n", dspTreeID(call)); } } else { #if FEATURE_TAILCALL_OPT // Must be an implicit tail call. assert(isImplicitTailCall); // It is possible that a call node is both an inline candidate and marked // for opportunistic tail calling. In-lining happens before morhphing of // trees. If in-lining of an in-line candidate gets aborted for whatever // reason, it will survive to the morphing stage at which point it will be // transformed into a tail call after performing additional checks. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_IMPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); #else //! FEATURE_TAILCALL_OPT NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls"); #endif // FEATURE_TAILCALL_OPT } // This might or might not turn into a tailcall. We do more // checks in morph. For explicit tailcalls we need more // information in morph in case it turns out to be a // helper-based tailcall. if (isExplicitTailCall) { assert(call->AsCall()->tailCallInfo == nullptr); call->AsCall()->tailCallInfo = new (this, CMK_CorTailCallInfo) TailCallSiteInfo; switch (opcode) { case CEE_CALLI: call->AsCall()->tailCallInfo->SetCalli(sig); break; case CEE_CALLVIRT: call->AsCall()->tailCallInfo->SetCallvirt(sig, pResolvedToken); break; default: call->AsCall()->tailCallInfo->SetCall(sig, pResolvedToken); break; } } } else { // canTailCall reported its reasons already canTailCall = false; JITDUMP("\ninfo.compCompHnd->canTailCall returned false for call [%06u]\n", dspTreeID(call)); } } else { // If this assert fires it means that canTailCall was set to false without setting a reason! assert(szCanTailCallFailReason != nullptr); JITDUMP("\nRejecting %splicit tail call for [%06u], reason: '%s'\n", isExplicitTailCall ? "ex" : "im", dspTreeID(call), szCanTailCallFailReason); info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, isExplicitTailCall, TAILCALL_FAIL, szCanTailCallFailReason); } } // Note: we assume that small return types are already normalized by the managed callee // or by the pinvoke stub for calls to unmanaged code. if (!bIntrinsicImported) { // // Things needed to be checked when bIntrinsicImported is false. // assert(call->gtOper == GT_CALL); assert(callInfo != nullptr); if (compIsForInlining() && opcode == CEE_CALLVIRT) { GenTree* callObj = call->AsCall()->gtCallThisArg->GetNode(); if ((call->AsCall()->IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, call->AsCall()->gtCallArgs, callObj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // Extra checks for tail calls and tail recursion. // // A tail recursive call is a potential loop from the current block to the start of the root method. // If we see a tail recursive call, mark the blocks from the call site back to the entry as potentially // being in a loop. // // Note: if we're importing an inlinee we don't mark the right set of blocks, but by then it's too // late. Currently this doesn't lead to problems. See GitHub issue 33529. // // OSR also needs to handle tail calls specially: // * block profiling in OSR methods needs to ensure probes happen before tail calls, not after. // * the root method entry must be imported if there's a recursive tail call or a potentially // inlineable tail call. // if ((tailCallFlags != 0) && canTailCall) { if (gtIsRecursiveCall(methHnd)) { assert(verCurrentState.esStackDepth == 0); BasicBlock* loopHead = nullptr; if (!compIsForInlining() && opts.IsOSR()) { // For root method OSR we may branch back to the actual method entry, // which is not fgFirstBB, and which we will need to import. assert(fgEntryBB != nullptr); loopHead = fgEntryBB; } else { // For normal jitting we may branch back to the firstBB; this // should already be imported. loopHead = fgFirstBB; } JITDUMP("\nTail recursive call [%06u] in the method. Mark " FMT_BB " to " FMT_BB " as having a backward branch.\n", dspTreeID(call), loopHead->bbNum, compCurBB->bbNum); fgMarkBackwardJump(loopHead, compCurBB); } // We only do these OSR checks in the root method because: // * If we fail to import the root method entry when importing the root method, we can't go back // and import it during inlining. So instead of checking jsut for recursive tail calls we also // have to check for anything that might introduce a recursive tail call. // * We only instrument root method blocks in OSR methods, // if (opts.IsOSR() && !compIsForInlining()) { // If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // if (compCurBB->bbJumpKind != BBJ_RETURN) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); assert(successor->bbJumpKind == BBJ_RETURN); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } // If this call might eventually turn into a loop back to method entry, make sure we // import the method entry. // assert(call->IsCall()); GenTreeCall* const actualCall = call->AsCall(); const bool mustImportEntryBlock = gtIsRecursiveCall(methHnd) || actualCall->IsInlineCandidate() || actualCall->IsGuardedDevirtualizationCandidate(); // Only schedule importation if we're not currently importing. // if (mustImportEntryBlock && (compCurBB != fgEntryBB)) { JITDUMP("\nOSR: inlineable or recursive tail call [%06u] in the method, so scheduling " FMT_BB " for importation\n", dspTreeID(call), fgEntryBB->bbNum); impImportBlockPending(fgEntryBB); } } } if ((sig->flags & CORINFO_SIGFLAG_FAT_CALL) != 0) { assert(opcode == CEE_CALLI || callInfo->kind == CORINFO_CALL_CODE_POINTER); addFatPointerCandidate(call->AsCall()); } DONE_CALL: // Push or append the result of the call if (callRetTyp == TYP_VOID) { if (opcode == CEE_NEWOBJ) { // we actually did push something, so don't spill the thing we just pushed. assert(verCurrentState.esStackDepth > 0); impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI); } else { impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } } else { impSpillSpecialSideEff(); if (clsFlags & CORINFO_FLG_ARRAY) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass); tiRetVal.NormaliseForStack(); // The CEE_READONLY prefix modifies the verification semantics of an Address // operation on an array type. if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall && tiRetVal.IsByRef()) { tiRetVal.SetIsReadonlyByRef(); } if (call->IsCall()) { // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call) GenTreeCall* origCall = call->AsCall(); const bool isFatPointerCandidate = origCall->IsFatPointerCandidate(); const bool isInlineCandidate = origCall->IsInlineCandidate(); const bool isGuardedDevirtualizationCandidate = origCall->IsGuardedDevirtualizationCandidate(); if (varTypeIsStruct(callRetTyp)) { // Need to treat all "split tree" cases here, not just inline candidates call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass); } // TODO: consider handling fatcalli cases this way too...? if (isInlineCandidate || isGuardedDevirtualizationCandidate) { // We should not have made any adjustments in impFixupCallStructReturn // as we defer those until we know the fate of the call. assert(call == origCall); assert(opts.OptEnabled(CLFLG_INLINING)); assert(!isFatPointerCandidate); // We should not try to inline calli. // Make the call its own tree (spill the stack if needed). // Do not consume the debug info here. This is particularly // important if we give up on the inline, in which case the // call will typically end up in the statement that contains // the GT_RET_EXPR that we leave on the stack. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false); // TODO: Still using the widened type. GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags); // Link the retExpr to the call so if necessary we can manipulate it later. origCall->gtInlineCandidateInfo->retExpr = retExpr; // Propagate retExpr as the placeholder for the call. call = retExpr; } else { // If the call is virtual, and has a generics context, and is not going to have a class probe, // record the context for possible use during late devirt. // // If we ever want to devirt at Tier0, and/or see issues where OSR methods under PGO lose // important devirtualizations, we'll want to allow both a class probe and a captured context. // if (origCall->IsVirtual() && (origCall->gtCallType != CT_INDIRECT) && (exactContextHnd != nullptr) && (origCall->gtClassProfileCandidateInfo == nullptr)) { JITDUMP("\nSaving context %p for call [%06u]\n", exactContextHnd, dspTreeID(origCall)); origCall->gtCallMoreFlags |= GTF_CALL_M_LATE_DEVIRT; LateDevirtualizationInfo* const info = new (this, CMK_Inlining) LateDevirtualizationInfo; info->exactContextHnd = exactContextHnd; origCall->gtLateDevirtualizationInfo = info; } if (isFatPointerCandidate) { // fatPointer candidates should be in statements of the form call() or var = call(). // Such form allows to find statements with fat calls without walking through whole trees // and removes problems with cutting trees. assert(!bIntrinsicImported); assert(IsTargetAbi(CORINFO_CORERT_ABI)); if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn. { unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli")); LclVarDsc* varDsc = lvaGetDesc(calliSlot); varDsc->lvVerTypeInfo = tiRetVal; impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE); // impAssignTempGen can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); } } // For non-candidates we must also spill, since we // might have locals live on the eval stack that this // call can modify. // // Suppress this for certain well-known call targets // that we know won't modify locals, eg calls that are // recognized in gtCanOptimizeTypeEquality. Otherwise // we may break key fragile pattern matches later on. bool spillStack = true; if (call->IsCall()) { GenTreeCall* callNode = call->AsCall(); if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) || gtIsTypeHandleToRuntimeTypeHandleHelper(callNode))) { spillStack = false; } else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0) { spillStack = false; } } if (spillStack) { impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call")); } } } if (!bIntrinsicImported) { //------------------------------------------------------------------------- // /* If the call is of a small type and the callee is managed, the callee will normalize the result before returning. However, we need to normalize small type values returned by unmanaged functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here if we use the shorter inlined pinvoke stub. */ if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT)) { call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp); } } impPushOnStack(call, tiRetVal); } // VSD functions get a new call target each time we getCallInfo, so clear the cache. // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out. // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB)) // callInfoCache.uncacheCallInfo(); return callRetTyp; } #ifdef _PREFAST_ #pragma warning(pop) #endif bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv) { CorInfoType corType = methInfo->args.retType; if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY)) { // We have some kind of STRUCT being returned structPassingKind howToReturnStruct = SPK_Unknown; var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, callConv, &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { return true; } } return false; } #ifdef DEBUG // var_types Compiler::impImportJitTestLabelMark(int numArgs) { TestLabelAndNum tlAndN; if (numArgs == 2) { tlAndN.m_num = 0; StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else if (numArgs == 3) { StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_num = val->AsIntConCommon()->IconValue(); se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else { assert(false); } StackEntry expSe = impPopStack(); GenTree* node = expSe.val; // There are a small number of special cases, where we actually put the annotation on a subnode. if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100) { // A loop hoist annotation with value >= 100 means that the expression should be a static field access, // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some // offset within the the static field block whose address is returned by the helper call. // The annotation is saying that this address calculation, but not the entire access, should be hoisted. assert(node->OperGet() == GT_IND); tlAndN.m_num -= 100; GetNodeTestData()->Set(node->AsOp()->gtOp1, tlAndN); GetNodeTestData()->Remove(node); } else { GetNodeTestData()->Set(node, tlAndN); } impPushOnStack(node, expSe.seTypeInfo); return node->TypeGet(); } #endif // DEBUG //----------------------------------------------------------------------------------- // impFixupCallStructReturn: For a call node that returns a struct do one of the following: // - set the flag to indicate struct return via retbuf arg; // - adjust the return type to a SIMD type if it is returned in 1 reg; // - spill call result into a temp if it is returned into 2 registers or more and not tail call or inline candidate. // // Arguments: // call - GT_CALL GenTree node // retClsHnd - Class handle of return type of the call // // Return Value: // Returns new GenTree node after fixing struct return of call node // GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd) { if (!varTypeIsStruct(call)) { return call; } call->gtRetClsHnd = retClsHnd; #if FEATURE_MULTIREG_RET call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv()); const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc(); const unsigned retRegCount = retTypeDesc->GetReturnRegCount(); #else // !FEATURE_MULTIREG_RET const unsigned retRegCount = 1; #endif // !FEATURE_MULTIREG_RET structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { assert(returnType == TYP_UNKNOWN); call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG; return call; } // Recognize SIMD types as we do for LCL_VARs, // note it could be not the ABI specific type, for example, on x64 we can set 'TYP_SIMD8` // for `System.Numerics.Vector2` here but lower will change it to long as ABI dictates. var_types simdReturnType = impNormStructType(call->gtRetClsHnd); if (simdReturnType != call->TypeGet()) { assert(varTypeIsSIMD(simdReturnType)); JITDUMP("changing the type of a call [%06u] from %s to %s\n", dspTreeID(call), varTypeName(call->TypeGet()), varTypeName(simdReturnType)); call->ChangeType(simdReturnType); } if (retRegCount == 1) { return call; } #if FEATURE_MULTIREG_RET assert(varTypeIsStruct(call)); // It could be a SIMD returned in several regs. assert(returnType == TYP_STRUCT); assert((howToReturnStruct == SPK_ByValueAsHfa) || (howToReturnStruct == SPK_ByValue)); #ifdef UNIX_AMD64_ABI // must be a struct returned in two registers assert(retRegCount == 2); #else // not UNIX_AMD64_ABI assert(retRegCount >= 2); #endif // not UNIX_AMD64_ABI if (!call->CanTailCall() && !call->IsInlineCandidate()) { // Force a call returning multi-reg struct to be always of the IR form // tmp = call // // No need to assign a multi-reg struct to a local var if: // - It is a tail call or // - The call is marked for in-lining later return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv())); } return call; #endif // FEATURE_MULTIREG_RET } /***************************************************************************** For struct return values, re-type the operand in the case where the ABI does not use a struct return buffer */ //------------------------------------------------------------------------ // impFixupStructReturnType: For struct return values it sets appropriate flags in MULTIREG returns case; // in non-multiref case it handles two special helpers: `CORINFO_HELP_GETFIELDSTRUCT`, `CORINFO_HELP_UNBOX_NULLABLE`. // // Arguments: // op - the return value; // retClsHnd - the struct handle; // unmgdCallConv - the calling convention of the function that returns this struct. // // Return Value: // the result tree that does the return. // GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv) { assert(varTypeIsStruct(info.compRetType)); assert(info.compRetBuffArg == BAD_VAR_NUM); JITDUMP("\nimpFixupStructReturnType: retyping\n"); DISPTREE(op); #if defined(TARGET_XARCH) #if FEATURE_MULTIREG_RET // No VarArgs for CoreCLR on x64 Unix UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs)); // Is method returning a multi-reg struct? if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { // In case of multi-reg struct return, we force IR to be one of the following: // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp). if (op->gtOper == GT_LCL_VAR) { // Note that this is a multi-reg return. unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { return op; } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #else assert(info.compRetNativeType != TYP_STRUCT); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86) #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM) if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); // Make sure this struct type stays as struct so that we can return it as an HFA lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64) // Is method returning a multi-reg struct? if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); if (!lvaIsImplicitByRefLocal(lclNum)) { // Make sure this struct type is not struct promoted lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #endif // FEATURE_MULTIREG_RET && TARGET_ARM64 if (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)) { // Don't retype `struct` as a primitive type in `ret` instruction. return op; } // This must be one of those 'special' helpers that don't // really have a return buffer, but instead use it as a way // to keep the trees cleaner with fewer address-taken temps. // // Well now we have to materialize the the return buffer as // an address-taken temp. Then we can return the temp. // // NOTE: this code assumes that since the call directly // feeds the return, then the call must be returning the // same structure/class/type. // unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); DISPTREE(op); return op; } /***************************************************************************** CEE_LEAVE may be jumping out of a protected block, viz, a catch or a finally-protected try. We find the finally blocks protecting the current offset (in order) by walking over the complete exception table and finding enclosing clauses. This assumes that the table is sorted. This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS. If we are leaving a catch handler, we need to attach the CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks. After this function, the BBJ_LEAVE block has been converted to a different type. */ #if !defined(FEATURE_EH_FUNCLETS) void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); unsigned encFinallies = 0; // Number of enclosing finallies. GenTree* endCatches = NULL; Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally. unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? * If so, we need to call CORINFO_HELP_ENDCATCH. */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) BADCODE("leave out of fault/finally block"); // Create the call to CORINFO_HELP_ENDCATCH GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID); // Make a list of all the currently pending endCatches if (endCatches) endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch); else endCatches = endCatch; #ifdef DEBUG if (verbose) { printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to " "CORINFO_HELP_ENDCATCH\n", block->bbNum, XTnum); } #endif } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* This is a finally-protected try we are jumping out of */ /* If there are any pending endCatches, and we have already jumped out of a finally-protected try, then the endCatches have to be put in a block in an outer try for async exceptions to work correctly. Else, just use append to the original block */ BasicBlock* callBlock; assert(!encFinallies == !endLFinStmt); // if we have finallies, we better have an endLFin tree, and vice-versa if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY " "block %s\n", callBlock->dspToString()); } #endif } else { assert(step != DUMMY_INIT(NULL)); /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); assert(step->bbJumpKind == BBJ_ALWAYS); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n", callBlock->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } // note that this sets BBF_IMPORTED on the block impEndTreeList(callBlock, endLFinStmt, lastStmt); } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n", step->dspToString()); } #endif unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting); endLFinStmt = gtNewStmt(endLFin); endCatches = NULL; encFinallies++; invalidatePreds = true; } } /* Append any remaining endCatches, if any */ assert(!encFinallies == !endLFinStmt); if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS " "block %s\n", block->dspToString()); } #endif } else { // If leaveTarget is the start of another try block, we want to make sure that // we do not insert finalStep into that try block. Hence, we find the enclosing // try block. unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget); // Insert a new BB either in the try region indicated by tryIndex or // the handler region indicated by leaveTarget->bbHndIndex, // depending on which is the inner region. BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS; step->bbJumpDest = finalStep; /* The new block will inherit this block's weight */ finalStep->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies, finalStep->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } impEndTreeList(finalStep, endLFinStmt, lastStmt); finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE // Queue up the jump target for importing impImportBlockPending(leaveTarget); invalidatePreds = true; } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #else // FEATURE_EH_FUNCLETS void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum, block->bbJumpDest->bbNum); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; enum StepType { // No step type; step == NULL. ST_None, // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair? // That is, is step->bbJumpDest where a finally will return to? ST_FinallyReturn, // The step block is a catch return. ST_Catch, // The step block is in a "try", created as the target for a finally return or the target for a catch return. ST_Try }; StepType stepType = ST_None; unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) { BADCODE("leave out of fault/finally block"); } /* We are jumping out of a catch */ if (step == nullptr) { step = block; step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET stepType = ST_Catch; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB " to BBJ_EHCATCHRET " "block\n", XTnum, step->bbNum); } #endif } else { BasicBlock* exitBlock; /* Create a new catch exit block in the catch region for the existing step block to jump to in this * scope */ exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step); assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch // exit) returns to this block step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ exitBlock->inheritWeight(block); exitBlock->bbFlags |= BBF_IMPORTED; /* This exit block is the new step */ step = exitBlock; stepType = ST_Catch; invalidatePreds = true; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n", XTnum, exitBlock->bbNum); } #endif } } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* We are jumping out of a finally-protected try */ BasicBlock* callBlock; if (step == nullptr) { #if FEATURE_EH_CALLFINALLY_THUNKS // Put the call to the finally in the enclosing region. unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = callBlock; block->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n", XTnum, block->bbNum, callBlock->bbNum); } #endif #else // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_CALLFINALLY block\n", XTnum, callBlock->bbNum); } #endif #endif // !FEATURE_EH_CALLFINALLY_THUNKS } else { // Calling the finally block. We already have a step block that is either the call-to-finally from a // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by // a 'finally'), or the step block is the return from a catch. // // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will // automatically re-raise the exception, using the return address of the catch (that is, the target // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64, // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly // within the 'try' region protected by the finally, since we generate code in such a way that execution // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on // stack walks.) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS if (step->bbJumpKind == BBJ_EHCATCHRET) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = step2; step->bbJumpDest->bbRefs++; step2->inheritWeight(block); step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is " "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n", XTnum, step->bbNum, step2->bbNum); } #endif step = step2; assert(stepType == ST_Catch); // Leave it as catch type for now. } #endif // FEATURE_EH_CALLFINALLY_THUNKS #if FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; #else // !FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = XTnum + 1; unsigned callFinallyHndIndex = 0; // don't care #endif // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY " "block " FMT_BB "\n", XTnum, callBlock->bbNum); } #endif } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); stepType = ST_FinallyReturn; /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) " "block " FMT_BB "\n", XTnum, step->bbNum); } #endif callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. invalidatePreds = true; } else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { // We are jumping out of a catch-protected try. // // If we are returning from a call to a finally, then we must have a step block within a try // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the // finally raises an exception), the VM will find this step block, notice that it is in a protected region, // and invoke the appropriate catch. // // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception), // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM, // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target // address of the catch return as the new exception address. That is, the re-raised exception appears to // occur at the catch return address. If this exception return address skips an enclosing try/catch that // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should. // For example: // // try { // try { // // something here raises ThreadAbortException // LEAVE LABEL_1; // no need to stop at LABEL_2 // } catch (Exception) { // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode. // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only // // need to do this transformation if the current EH block is a try/catch that catches // // ThreadAbortException (or one of its parents), however we might not be able to find that // // information, so currently we do it for all catch types. // LEAVE LABEL_1; // Convert this to LEAVE LABEL2; // } // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code // } catch (ThreadAbortException) { // } // LABEL_1: // // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C# // compiler. if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch)) { BasicBlock* catchStep; assert(step); if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); } else { assert(stepType == ST_Catch); assert(step->bbJumpKind == BBJ_EHCATCHRET); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = catchStep; step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ catchStep->inheritWeight(block); catchStep->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { if (stepType == ST_FinallyReturn) { printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } else { assert(stepType == ST_Catch); printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } } #endif // DEBUG /* This block is the new step */ step = catchStep; stepType = ST_Try; invalidatePreds = true; } } } if (step == nullptr) { block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE " "block " FMT_BB " to BBJ_ALWAYS\n", block->bbNum); } #endif } else { step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) #ifdef DEBUG if (verbose) { printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum); } #endif // Queue up the jump target for importing impImportBlockPending(leaveTarget); } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #endif // FEATURE_EH_FUNCLETS /*****************************************************************************/ // This is called when reimporting a leave block. It resets the JumpKind, // JumpDest, and bbNext to the original values void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) { #if defined(FEATURE_EH_FUNCLETS) // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1) // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0, // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the // only predecessor are also considered orphans and attempted to be deleted. // // try { // .... // try // { // .... // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1 // } finally { } // } finally { } // OUTSIDE: // // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block. // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. if (block->bbJumpKind == BBJ_CALLFINALLY) { BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; dupBlock->copyEHRegion(block); dupBlock->bbCatchTyp = block->bbCatchTyp; // Mark this block as // a) not referenced by any other block to make sure that it gets deleted // b) weight zero // c) prevent from being imported // d) as internal // e) as rarely run dupBlock->bbRefs = 0; dupBlock->bbWeight = BB_ZERO_WEIGHT; dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY; // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS // will be next to each other. fgInsertBBafter(block, dupBlock); #ifdef DEBUG if (verbose) { printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum); } #endif } #endif // FEATURE_EH_FUNCLETS block->bbJumpKind = BBJ_LEAVE; fgInitBBLookup(); block->bbJumpDest = fgLookupBB(jmpAddr); // We will leave the BBJ_ALWAYS block we introduced. When it's reimported // the BBJ_ALWAYS block will be unreachable, and will be removed after. The // reason we don't want to remove the block at this point is that if we call // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be // added and the linked list length will be different than fgBBcount. } /*****************************************************************************/ // Get the first non-prefix opcode. Used for verification of valid combinations // of prefixes and actual opcodes. OPCODE Compiler::impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp) { while (codeAddr < codeEndp) { OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); if (opcode == CEE_PREFIX1) { if (codeAddr >= codeEndp) { break; } opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); codeAddr += sizeof(__int8); } switch (opcode) { case CEE_UNALIGNED: case CEE_VOLATILE: case CEE_TAILCALL: case CEE_CONSTRAINED: case CEE_READONLY: break; default: return opcode; } codeAddr += opcodeSizes[opcode]; } return CEE_ILLEGAL; } /*****************************************************************************/ // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes void Compiler::impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix) { OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!( // Opcode of all ldind and stdind happen to be in continuous, except stind.i. ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) || (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) || (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) || // volatile. prefix is allowed with the ldsfld and stsfld (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD))))) { BADCODE("Invalid opcode for unaligned. or volatile. prefix"); } } /*****************************************************************************/ #ifdef DEBUG #undef RETURN // undef contracts RETURN macro enum controlFlow_t { NEXT, CALL, RETURN, THROW, BRANCH, COND_BRANCH, BREAK, PHI, META, }; const static controlFlow_t controlFlow[] = { #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow, #include "opcode.def" #undef OPDEF }; #endif // DEBUG /***************************************************************************** * Determine the result type of an arithemetic operation * On 64-bit inserts upcasts when native int is mixed with int32 */ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2) { var_types type = TYP_UNDEF; GenTree* op1 = *pOp1; GenTree* op2 = *pOp2; // Arithemetic operations are generally only allowed with // primitive types, but certain operations are allowed // with byrefs if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref1-byref2 => gives a native int type = TYP_I_IMPL; } else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // [native] int - byref => gives a native int // // The reason is that it is possible, in managed C++, // to have a tree like this: // // - // / \. // / \. // / \. // / \. // const(h) int addr byref // // <BUGNUM> VSW 318822 </BUGNUM> // // So here we decide to make the resulting type to be a native int. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_I_IMPL; } else { // byref - [native] int => gives a byref assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if ((genActualType(op2->TypeGet()) != TYP_I_IMPL)) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } } else if ((oper == GT_ADD) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref + [native] int => gives a byref // (or) // [native] int + byref => gives a byref // only one can be a byref : byref op byref not allowed assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if (genActualType(op2->TypeGet()) == TYP_BYREF) { if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } #ifdef TARGET_64BIT else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long // we get this because in the IL the long isn't Int64, it's just IntPtr if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } type = TYP_I_IMPL; } #else // 32-bit TARGET else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long type = TYP_LONG; } #endif // TARGET_64BIT else { // int + int => gives an int assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); type = genActualType(op1->gtType); // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT. // Otherwise, turn floats into doubles if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT)) { assert(genActualType(op2->gtType) == TYP_DOUBLE); type = TYP_DOUBLE; } } assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT); return type; } //------------------------------------------------------------------------ // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting // // Arguments: // op1 - value to cast // pResolvedToken - resolved token for type to cast to // isCastClass - true if this is a castclass, false if isinst // // Return Value: // tree representing optimized cast, or null if no optimization possible GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass) { assert(op1->TypeGet() == TYP_REF); // Don't optimize for minopts or debug codegen. if (opts.OptimizationDisabled()) { return nullptr; } // See what we know about the type of the object being cast. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull); if (fromClass != nullptr) { CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass; JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst", isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass), info.compCompHnd->getClassName(toClass)); // Perhaps we know if the cast will succeed or fail. TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass); if (castResult == TypeCompareState::Must) { // Cast will succeed, result is simply op1. JITDUMP("Cast will succeed, optimizing to simply return input\n"); return op1; } else if (castResult == TypeCompareState::MustNot) { // See if we can sharpen exactness by looking for final classes if (!isExact) { isExact = impIsClassExact(fromClass); } // Cast to exact type will fail. Handle case where we have // an exact type (that is, fromClass is not a subtype) // and we're not going to throw on failure. if (isExact && !isCastClass) { JITDUMP("Cast will fail, optimizing to return null\n"); GenTree* result = gtNewIconNode(0, TYP_REF); // If the cast was fed by a box, we can remove that too. if (op1->IsBoxedValue()) { JITDUMP("Also removing upstream box\n"); gtTryRemoveBoxUpstreamEffects(op1); } return result; } else if (isExact) { JITDUMP("Not optimizing failing castclass (yet)\n"); } else { JITDUMP("Can't optimize since fromClass is inexact\n"); } } else { JITDUMP("Result of cast unknown, must generate runtime test\n"); } } else { JITDUMP("\nCan't optimize since fromClass is unknown\n"); } return nullptr; } //------------------------------------------------------------------------ // impCastClassOrIsInstToTree: build and import castclass/isinst // // Arguments: // op1 - value to cast // op2 - type handle for type to cast to // pResolvedToken - resolved token from the cast operation // isCastClass - true if this is castclass, false means isinst // // Return Value: // Tree representing the cast // // Notes: // May expand into a series of runtime checks or a helper call. GenTree* Compiler::impCastClassOrIsInstToTree(GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass) { assert(op1->TypeGet() == TYP_REF); // Optimistically assume the jit should expand this as an inline test bool shouldExpandInline = true; // Profitability check. // // Don't bother with inline expansion when jit is trying to // generate code quickly, or the cast is in code that won't run very // often, or the method already is pretty big. if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { // not worth the code expansion if jitting fast or in a rarely run block shouldExpandInline = false; } else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals()) { // not worth creating an untracked local variable shouldExpandInline = false; } // Pessimistically assume the jit cannot expand this as an inline test bool canExpandInline = false; const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass); // Legality check. // // Not all classclass/isinst operations can be inline expanded. // Check legality only if an inline expansion is desirable. if (shouldExpandInline) { if (isCastClass) { // Jit can only inline expand the normal CHKCASTCLASS helper. canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS); } else { if (helper == CORINFO_HELP_ISINSTANCEOFCLASS) { // If the class is exact, the jit can expand the IsInst check inline. canExpandInline = impIsClassExact(pResolvedToken->hClass); } } } const bool expandInline = canExpandInline && shouldExpandInline; if (!expandInline) { JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // If we CSE this class handle we prevent assertionProp from making SubType assertions // so instead we force the CSE logic to not consider CSE-ing this class handle. // op2->gtFlags |= GTF_DONT_CSE; return gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, op1)); } JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst"); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2")); GenTree* temp; GenTree* condMT; // // expand the methodtable match: // // condMT ==> GT_NE // / \. // GT_IND op2 (typically CNS_INT) // | // op1Copy // // This can replace op1 with a GT_COMMA that evaluates op1 into a local // op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); // // op1 is now known to be a non-complex tree // thus we can use gtClone(op1) from now on // GenTree* op2Var = op2; if (isCastClass) { op2Var = fgInsertCommaFormTemp(&op2); lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true; } temp = gtNewMethodTableLookup(temp); condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2); GenTree* condNull; // // expand the null check: // // condNull ==> GT_EQ // / \. // op1Copy CNS_INT // null // condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF)); // // expand the true and false trees for the condMT // GenTree* condFalse = gtClone(op1); GenTree* condTrue; if (isCastClass) { // // use the special helper that skips the cases checked by our inlined cast // const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL; condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewCallArgs(op2Var, gtClone(op1))); } else { condTrue = gtNewIconNode(0, TYP_REF); } GenTree* qmarkMT; // // Generate first QMARK - COLON tree // // qmarkMT ==> GT_QMARK // / \. // condMT GT_COLON // / \. // condFalse condTrue // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse); qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp->AsColon()); if (isCastClass && impIsClassExact(pResolvedToken->hClass) && condTrue->OperIs(GT_CALL)) { // condTrue is used only for throwing InvalidCastException in case of casting to an exact class. condTrue->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; } GenTree* qmarkNull; // // Generate second QMARK - COLON tree // // qmarkNull ==> GT_QMARK // / \. // condNull GT_COLON // / \. // qmarkMT op1Copy // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT); qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp->AsColon()); qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF; // Make QMark node a top level node by spilling it. unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2")); impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE); // TODO-CQ: Is it possible op1 has a better type? // // See also gtGetHelperCallClassHandle where we make the same // determination for the helper call variants. LclVarDsc* lclDsc = lvaGetDesc(tmp); assert(lclDsc->lvSingleDef == 0); lclDsc->lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); lvaSetClass(tmp, pResolvedToken->hClass); return gtNewLclvNode(tmp, TYP_REF); } #ifndef DEBUG #define assertImp(cond) ((void)0) #else #define assertImp(cond) \ do \ { \ if (!(cond)) \ { \ const int cchAssertImpBuf = 600; \ char* assertImpBuf = (char*)_alloca(cchAssertImpBuf); \ _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \ "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \ impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \ op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \ assertAbort(assertImpBuf, __FILE__, __LINE__); \ } \ } while (0) #endif // DEBUG //------------------------------------------------------------------------ // impBlockIsInALoop: check if a block might be in a loop // // Arguments: // block - block to check // // Returns: // true if the block might be in a loop. // // Notes: // Conservatively correct; may return true for some blocks that are // not actually in loops. // bool Compiler::impBlockIsInALoop(BasicBlock* block) { return (compIsForInlining() && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif /***************************************************************************** * Import the instr for the given basic block */ void Compiler::impImportBlockCode(BasicBlock* block) { #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind) #ifdef DEBUG if (verbose) { printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName); } #endif unsigned nxtStmtIndex = impInitBlockLineInfo(); IL_OFFSET nxtStmtOffs; CorInfoHelpFunc helper; CorInfoIsAccessAllowedResult accessAllowedResult; CORINFO_HELPER_DESC calloutHelper; const BYTE* lastLoadToken = nullptr; /* Get the tree list started */ impBeginTreeList(); #ifdef FEATURE_ON_STACK_REPLACEMENT bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0); #ifdef DEBUG // Optionally suppress patchpoints by method hash // static ConfigMethodRange JitEnablePatchpointRange; JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); const bool inRange = JitEnablePatchpointRange.Contains(hash); enablePatchpoints &= inRange; #endif // DEBUG if (enablePatchpoints) { // We don't inline at Tier0, if we do, we may need rethink our approach. // Could probably support inlines that don't introduce flow. // assert(!compIsForInlining()); // OSR is not yet supported for methods with explicit tail calls. // // But we also do not have to switch these methods to be optimized as we should be // able to avoid getting trapped in Tier0 code by normal call counting. // So instead, just suppress adding patchpoints. // if (!compTailPrefixSeen) { // The normaly policy is only to add patchpoints to the targets of lexically // backwards branches. // if (compHasBackwardJump) { assert(compCanHavePatchpoints()); // Is the start of this block a suitable patchpoint? // if (((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) && (verCurrentState.esStackDepth == 0)) { // We should have noted this earlier and bailed out of OSR. // assert(!block->hasHndIndex()); block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } } else { // Should not see backward branch targets w/o backwards branches assert((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == 0); } } #ifdef DEBUG // As a stress test, we can place patchpoints at the start of any block // that is a stack empty point and is not within a handler. // // Todo: enable for mid-block stack empty points too. // const int offsetOSR = JitConfig.JitOffsetOnStackReplacement(); const int randomOSR = JitConfig.JitRandomOnStackReplacement(); const bool tryOffsetOSR = offsetOSR >= 0; const bool tryRandomOSR = randomOSR > 0; if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0)) { // Block start can have a patchpoint. See if we should add one. // bool addPatchpoint = false; // Specific offset? // if (tryOffsetOSR) { if (impCurOpcOffs == (unsigned)offsetOSR) { addPatchpoint = true; } } // Random? // else { // Reuse the random inliner's random state. // Note m_inlineStrategy is always created, even if we're not inlining. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(randomOSR); const int randomValue = (int)random->Next(100); addPatchpoint = (randomValue < randomOSR); } if (addPatchpoint) { block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } JITDUMP("\n** %s patchpoint%s added to " FMT_BB " (il offset %u)\n", tryOffsetOSR ? "offset" : "random", addPatchpoint ? "" : " not", block->bbNum, impCurOpcOffs); } #endif // DEBUG } // Mark stack-empty rare blocks to be considered for partial compilation. // // Ideally these are conditionally executed blocks -- if the method is going // to unconditionally throw, there's not as much to be gained by deferring jitting. // For now, we just screen out the entry bb. // // In general we might want track all the IL stack empty points so we can // propagate rareness back through flow and place the partial compilation patchpoints "earlier" // so there are fewer overall. // // Note unlike OSR, it's ok to forgo these. // // Todo: stress mode... // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) && compCanHavePatchpoints() && !compTailPrefixSeen) { // Is this block a good place for partial compilation? // if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) && ((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex()) { JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum); block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT; setMethodHasPartialCompilationPatchpoint(); // Change block to BBJ_THROW so we won't trigger importation of successors. // block->bbJumpKind = BBJ_THROW; // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. // if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) { lvaGenericsContextInUse = true; } return; } } #endif // FEATURE_ON_STACK_REPLACEMENT /* Walk the opcodes that comprise the basic block */ const BYTE* codeAddr = info.compCode + block->bbCodeOffs; const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd; IL_OFFSET opcodeOffs = block->bbCodeOffs; IL_OFFSET lastSpillOffs = opcodeOffs; signed jmpDist; /* remember the start of the delegate creation sequence (used for verification) */ const BYTE* delegateCreateStart = nullptr; int prefixFlags = 0; bool explicitTailCall, constraintCall, readonlyCall; typeInfo tiRetVal; unsigned numArgs = info.compArgsCount; /* Now process all the opcodes in the block */ var_types callTyp = TYP_COUNT; OPCODE prevOpcode = CEE_ILLEGAL; if (block->bbCatchTyp) { if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { impCurStmtOffsSet(block->bbCodeOffs); } // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block // to a temp. This is a trade off for code simplicity impSpillSpecialSideEff(); } while (codeAddr < codeEndp) { #ifdef FEATURE_READYTORUN bool usingReadyToRunHelper = false; #endif CORINFO_RESOLVED_TOKEN resolvedToken; CORINFO_RESOLVED_TOKEN constrainedResolvedToken; CORINFO_CALL_INFO callInfo; CORINFO_FIELD_INFO fieldInfo; tiRetVal = typeInfo(); // Default type info //--------------------------------------------------------------------- /* We need to restrict the max tree depth as many of the Compiler functions are recursive. We do this by spilling the stack */ if (verCurrentState.esStackDepth) { /* Has it been a while since we last saw a non-empty stack (which guarantees that the tree depth isnt accumulating. */ if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode)) { impSpillStackEnsure(); lastSpillOffs = opcodeOffs; } } else { lastSpillOffs = opcodeOffs; impBoxTempInUse = false; // nothing on the stack, box temp OK to use again } /* Compute the current instr offset */ opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); #ifndef DEBUG if (opts.compDbgInfo) #endif { nxtStmtOffs = (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET; /* Have we reached the next stmt boundary ? */ if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs) { assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]); if (verCurrentState.esStackDepth != 0 && opts.compDbgCode) { /* We need to provide accurate IP-mapping at this point. So spill anything on the stack so that it will form gtStmts with the correct stmt offset noted */ impSpillStackEnsure(true); } // Have we reported debug info for any tree? if (impCurStmtDI.IsValid() && opts.compDbgCode) { GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); assert(!impCurStmtDI.IsValid()); } if (!impCurStmtDI.IsValid()) { /* Make sure that nxtStmtIndex is in sync with opcodeOffs. If opcodeOffs has gone past nxtStmtIndex, catch up */ while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount && info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs) { nxtStmtIndex++; } /* Go to the new stmt */ impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]); /* Update the stmt boundary index */ nxtStmtIndex++; assert(nxtStmtIndex <= info.compStmtOffsetsCount); /* Are there any more line# entries after this one? */ if (nxtStmtIndex < info.compStmtOffsetsCount) { /* Remember where the next line# starts */ nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex]; } else { /* No more line# entries */ nxtStmtOffs = BAD_IL_OFFSET; } } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) && (verCurrentState.esStackDepth == 0)) { /* At stack-empty locations, we have already added the tree to the stmt list with the last offset. We just need to update impCurStmtDI */ impCurStmtOffsSet(opcodeOffs); } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) && impOpcodeIsCallSiteBoundary(prevOpcode)) { /* Make sure we have a type cached */ assert(callTyp != TYP_COUNT); if (callTyp == TYP_VOID) { impCurStmtOffsSet(opcodeOffs); } else if (opts.compDbgCode) { impSpillStackEnsure(true); impCurStmtOffsSet(opcodeOffs); } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP)) { if (opts.compDbgCode) { impSpillStackEnsure(true); } impCurStmtOffsSet(opcodeOffs); } assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) || (impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs)); } CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL); var_types lclTyp, ovflType = TYP_UNKNOWN; GenTree* op1 = DUMMY_INIT(NULL); GenTree* op2 = DUMMY_INIT(NULL); GenTree* newObjThisPtr = DUMMY_INIT(NULL); bool uns = DUMMY_INIT(false); bool isLocal = false; /* Get the next opcode and the size of its parameters */ OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); #ifdef DEBUG impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs); #endif DECODE_OPCODE: // Return if any previous code has caused inline to fail. if (compDonotInline()) { return; } /* Get the size of additional parameters */ signed int sz = opcodeSizes[opcode]; #ifdef DEBUG clsHnd = NO_CLASS_HANDLE; lclTyp = TYP_COUNT; callTyp = TYP_COUNT; impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); impCurOpcName = opcodeNames[opcode]; if (verbose && (opcode != CEE_PREFIX1)) { printf("%s", impCurOpcName); } /* Use assertImp() to display the opcode */ op1 = op2 = nullptr; #endif /* See what kind of an opcode we have, then */ unsigned mflags = 0; unsigned clsFlags = 0; switch (opcode) { unsigned lclNum; var_types type; GenTree* op3; genTreeOps oper; unsigned size; int val; CORINFO_SIG_INFO sig; IL_OFFSET jmpAddr; bool ovfl, unordered, callNode; bool ldstruct; CORINFO_CLASS_HANDLE tokenType; union { int intVal; float fltVal; __int64 lngVal; double dblVal; } cval; case CEE_PREFIX1: opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; SPILL_APPEND: // We need to call impSpillLclRefs() for a struct type lclVar. // This is because there may be loads of that lclVar on the evaluation stack, and // we need to ensure that those loads are completed before we modify it. if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1())) { GenTree* lhs = op1->gtGetOp1(); GenTreeLclVarCommon* lclVar = nullptr; if (lhs->gtOper == GT_LCL_VAR) { lclVar = lhs->AsLclVarCommon(); } else if (lhs->OperIsBlk()) { // Check if LHS address is within some struct local, to catch // cases where we're updating the struct by something other than a stfld GenTree* addr = lhs->AsBlk()->Addr(); // Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT)) lclVar = addr->IsLocalAddrExpr(); // Catches ADDR(FIELD(... ADDR(LCL_VAR))) if (lclVar == nullptr) { GenTree* lclTree = nullptr; if (impIsAddressInLocal(addr, &lclTree)) { lclVar = lclTree->AsLclVarCommon(); } } } if (lclVar != nullptr) { impSpillLclRefs(lclVar->GetLclNum()); } } /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); goto DONE_APPEND; APPEND: /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); goto DONE_APPEND; DONE_APPEND: #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif break; case CEE_LDNULL: impPushNullObjRefOnStack(); break; case CEE_LDC_I4_M1: case CEE_LDC_I4_0: case CEE_LDC_I4_1: case CEE_LDC_I4_2: case CEE_LDC_I4_3: case CEE_LDC_I4_4: case CEE_LDC_I4_5: case CEE_LDC_I4_6: case CEE_LDC_I4_7: case CEE_LDC_I4_8: cval.intVal = (opcode - CEE_LDC_I4_0); assert(-1 <= cval.intVal && cval.intVal <= 8); goto PUSH_I4CON; case CEE_LDC_I4_S: cval.intVal = getI1LittleEndian(codeAddr); goto PUSH_I4CON; case CEE_LDC_I4: cval.intVal = getI4LittleEndian(codeAddr); goto PUSH_I4CON; PUSH_I4CON: JITDUMP(" %d", cval.intVal); impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT)); break; case CEE_LDC_I8: cval.lngVal = getI8LittleEndian(codeAddr); JITDUMP(" 0x%016llx", cval.lngVal); impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG)); break; case CEE_LDC_R8: cval.dblVal = getR8LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE)); break; case CEE_LDC_R4: cval.dblVal = getR4LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal, TYP_FLOAT), typeInfo(TI_DOUBLE)); break; case CEE_LDSTR: val = getU4LittleEndian(codeAddr); JITDUMP(" %08X", val); impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal); break; case CEE_LDARG: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: lclNum = (opcode - CEE_LDARG_0); assert(lclNum >= 0 && lclNum < 4); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: lclNum = (opcode - CEE_LDLOC_0); assert(lclNum >= 0 && lclNum < 4); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_STARG: lclNum = getU2LittleEndian(codeAddr); goto STARG; case CEE_STARG_S: lclNum = getU1LittleEndian(codeAddr); STARG: JITDUMP(" %u", lclNum); if (compIsForInlining()) { op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); noway_assert(op1->gtOper == GT_LCL_VAR); lclNum = op1->AsLclVar()->GetLclNum(); goto VAR_ST_VALID; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } // We should have seen this arg write in the prescan assert(lvaTable[lclNum].lvHasILStoreOp); goto VAR_ST; case CEE_STLOC: lclNum = getU2LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_S: lclNum = getU1LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: isLocal = true; lclNum = (opcode - CEE_STLOC_0); assert(lclNum >= 0 && lclNum < 4); LOC_ST: if (compIsForInlining()) { lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp")); goto _PopValue; } lclNum += numArgs; VAR_ST: if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var) { BADCODE("Bad IL"); } VAR_ST_VALID: /* if it is a struct assignment, make certain we don't overflow the buffer */ assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd)); if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } _PopValue: /* Pop the value being assigned */ { StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; tiRetVal = se.seTypeInfo; } #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet())) { assert(op1->TypeGet() == TYP_STRUCT); op1->gtType = lclTyp; } #endif // FEATURE_SIMD op1 = impImplicitIorI4Cast(op1, lclTyp); #ifdef TARGET_64BIT // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT)) { op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT); } #endif // TARGET_64BIT // We had better assign it a value of the correct type assertImp( genActualType(lclTyp) == genActualType(op1->gtType) || (genActualType(lclTyp) == TYP_I_IMPL && op1->IsLocalAddrExpr() != nullptr) || (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) || (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) || (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) || ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF)); /* If op1 is "&var" then its type is the transient "*" and it can be used either as TYP_BYREF or TYP_I_IMPL */ if (op1->IsLocalAddrExpr() != nullptr) { assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF); /* When "&var" is created, we assume it is a byref. If it is being assigned to a TYP_I_IMPL var, change the type to prevent unnecessary GC info */ if (genActualType(lclTyp) == TYP_I_IMPL) { op1->gtType = TYP_I_IMPL; } } // If this is a local and the local is a ref type, see // if we can improve type information based on the // value being assigned. if (isLocal && (lclTyp == TYP_REF)) { // We should have seen a stloc in our IL prescan. assert(lvaTable[lclNum].lvHasILStoreOp); // Is there just one place this local is defined? const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef; // Conservative check that there is just one // definition that reaches this store. const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0); if (isSingleDefLocal && hasSingleReachingDef) { lvaUpdateClass(lclNum, op1, clsHnd); } } /* Filter out simple assignments to itself */ if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum()) { if (opts.compDbgCode) { op1 = gtNewNothingNode(); goto SPILL_APPEND; } else { break; } } /* Create the assignment node */ op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1)); /* If the local is aliased or pinned, we need to spill calls and indirections from the stack. */ if ((lvaTable[lclNum].IsAddressExposed() || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) && (verCurrentState.esStackDepth > 0)) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned")); } /* Spill any refs to the local from the stack */ impSpillLclRefs(lclNum); // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op2' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet()); } if (varTypeIsStruct(lclTyp)) { op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL); } else { op1 = gtNewAssignNode(op2, op1); } goto SPILL_APPEND; case CEE_LDLOCA: lclNum = getU2LittleEndian(codeAddr); goto LDLOCA; case CEE_LDLOCA_S: lclNum = getU1LittleEndian(codeAddr); LDLOCA: JITDUMP(" %u", lclNum); if (compIsForInlining()) { // Get the local type lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp")); assert(!lvaGetDesc(lclNum)->lvNormalizeOnLoad()); op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum)); goto _PUSH_ADRVAR; } lclNum += numArgs; assertImp(lclNum < info.compLocalsCount); goto ADRVAR; case CEE_LDARGA: lclNum = getU2LittleEndian(codeAddr); goto LDARGA; case CEE_LDARGA_S: lclNum = getU1LittleEndian(codeAddr); LDARGA: JITDUMP(" %u", lclNum); Verify(lclNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument, // followed by a ldfld to load the field. op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); if (op1->gtOper != GT_LCL_VAR) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR); return; } assert(op1->gtOper == GT_LCL_VAR); goto _PUSH_ADRVAR; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } goto ADRVAR; ADRVAR: op1 = impCreateLocalNode(lclNum DEBUGARG(opcodeOffs + sz + 1)); _PUSH_ADRVAR: assert(op1->gtOper == GT_LCL_VAR); /* Note that this is supposed to create the transient type "*" which may be used as a TYP_I_IMPL. However we catch places where it is used as a TYP_I_IMPL and change the node if needed. Thus we are pessimistic and may report byrefs in the GC info where it was not absolutely needed, but it is safer this way. */ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does assert((op1->gtFlags & GTF_GLOB_REF) == 0); tiRetVal = lvaTable[lclNum].lvVerTypeInfo; impPushOnStack(op1, tiRetVal); break; case CEE_ARGLIST: if (!info.compIsVarArgs) { BADCODE("arglist in non-vararg method"); } assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG); /* The ARGLIST cookie is a hidden 'last' parameter, we have already adjusted the arg count cos this is like fetching the last param */ assertImp(0 < numArgs); lclNum = lvaVarargsHandleArg; op1 = gtNewLclvNode(lclNum, TYP_I_IMPL DEBUGARG(opcodeOffs + sz + 1)); op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); impPushOnStack(op1, tiRetVal); break; case CEE_ENDFINALLY: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY); return; } if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } if (info.compXcptnsCount == 0) { BADCODE("endfinally outside finally"); } assert(verCurrentState.esStackDepth == 0); op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr); goto APPEND; case CEE_ENDFILTER: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER); return; } block->bbSetRunRarely(); // filters are rare if (info.compXcptnsCount == 0) { BADCODE("endfilter outside filter"); } op1 = impPopStack().val; assertImp(op1->gtType == TYP_INT); if (!bbInFilterILRange(block)) { BADCODE("EndFilter outside a filter handler"); } /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET); /* Mark catch handler as successor */ op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1); if (verCurrentState.esStackDepth != 0) { verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__) DEBUGARG(__LINE__)); } goto APPEND; case CEE_RET: prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it RET: if (!impReturnInstruction(prefixFlags, opcode)) { return; // abort } else { break; } case CEE_JMP: assert(!compIsForInlining()); if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex()) { /* CEE_JMP does not make sense in some "protected" regions. */ BADCODE("Jmp not allowed in protected region"); } if (opts.IsReversePInvoke()) { BADCODE("Jmp not allowed in reverse P/Invoke"); } if (verCurrentState.esStackDepth != 0) { BADCODE("Stack must be empty after CEE_JMPs"); } _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); /* The signature of the target has to be identical to ours. At least check that argCnt and returnType match */ eeGetMethodSig(resolvedToken.hMethod, &sig); if (sig.numArgs != info.compMethodInfo->args.numArgs || sig.retType != info.compMethodInfo->args.retType || sig.callConv != info.compMethodInfo->args.callConv) { BADCODE("Incompatible target for CEE_JMPs"); } op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod); /* Mark the basic block as being a JUMP instead of RETURN */ block->bbFlags |= BBF_HAS_JMP; /* Set this flag to make sure register arguments have a location assigned * even if we don't use them inside the method */ compJmpOpUsed = true; fgNoStructPromotion = true; goto APPEND; case CEE_LDELEMA: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a value class array we just do a simple address-of if (eeIsValueClass(ldelemClsHnd)) { CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd); if (cit == CORINFO_TYPE_UNDEF) { lclTyp = TYP_STRUCT; } else { lclTyp = JITtype2varType(cit); } goto ARR_LD_POST_VERIFY; } // Similarly, if its a readonly access, we can do a simple address-of // without doing a runtime type-check if (prefixFlags & PREFIX_READONLY) { lclTyp = TYP_REF; goto ARR_LD_POST_VERIFY; } // Otherwise we need the full helper function with run-time type check op1 = impTokenToHandle(&resolvedToken); if (op1 == nullptr) { // compDonotInline() return; } { GenTreeCall::Use* args = gtNewCallArgs(op1); // Type args = gtPrependNewCallArg(impPopStack().val, args); // index args = gtPrependNewCallArg(impPopStack().val, args); // array op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args); } impPushOnStack(op1, tiRetVal); break; // ldelem for reference and value types case CEE_LDELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a reference type or generic variable type // then just generate code as though it's a ldelem.ref instruction if (!eeIsValueClass(ldelemClsHnd)) { lclTyp = TYP_REF; opcode = CEE_LDELEM_REF; } else { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd); lclTyp = JITtype2varType(jitTyp); tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct tiRetVal.NormaliseForStack(); } goto ARR_LD_POST_VERIFY; case CEE_LDELEM_I1: lclTyp = TYP_BYTE; goto ARR_LD; case CEE_LDELEM_I2: lclTyp = TYP_SHORT; goto ARR_LD; case CEE_LDELEM_I: lclTyp = TYP_I_IMPL; goto ARR_LD; // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter // and treating it as TYP_INT avoids other asserts. case CEE_LDELEM_U4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I8: lclTyp = TYP_LONG; goto ARR_LD; case CEE_LDELEM_REF: lclTyp = TYP_REF; goto ARR_LD; case CEE_LDELEM_R4: lclTyp = TYP_FLOAT; goto ARR_LD; case CEE_LDELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_LD; case CEE_LDELEM_U1: lclTyp = TYP_UBYTE; goto ARR_LD; case CEE_LDELEM_U2: lclTyp = TYP_USHORT; goto ARR_LD; ARR_LD: ARR_LD_POST_VERIFY: /* Pull the index value and array address */ op2 = impPopStack().val; op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); /* Check for null pointer - in the inliner case we simply abort */ if (compIsForInlining()) { if (op1->gtOper == GT_CNS_INT) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM); return; } } /* Mark the block as containing an index expression */ if (op1->gtOper == GT_LCL_VAR) { if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node and push it on the stack */ op1 = gtNewIndexRef(lclTyp, op1, op2); ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT); if ((opcode == CEE_LDELEMA) || ldstruct || (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd))) { assert(ldelemClsHnd != DUMMY_INIT(NULL)); // remember the element size if (lclTyp == TYP_REF) { op1->AsIndex()->gtIndElemSize = TARGET_POINTER_SIZE; } else { // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type. if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF) { op1->AsIndex()->gtStructElemClass = ldelemClsHnd; } assert(lclTyp != TYP_STRUCT || op1->AsIndex()->gtStructElemClass != nullptr); if (lclTyp == TYP_STRUCT) { size = info.compCompHnd->getClassSize(ldelemClsHnd); op1->AsIndex()->gtIndElemSize = size; op1->gtType = lclTyp; } } if ((opcode == CEE_LDELEMA) || ldstruct) { // wrap it in a & lclTyp = TYP_BYREF; op1 = gtNewOperNode(GT_ADDR, lclTyp, op1); } else { assert(lclTyp != TYP_STRUCT); } } if (ldstruct) { // Create an OBJ for the result op1 = gtNewObjNode(ldelemClsHnd, op1); op1->gtFlags |= GTF_EXCEPT; } impPushOnStack(op1, tiRetVal); break; // stelem for reference and value types case CEE_STELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); stelemClsHnd = resolvedToken.hClass; // If it's a reference type just behave as though it's a stelem.ref instruction if (!eeIsValueClass(stelemClsHnd)) { goto STELEM_REF_POST_VERIFY; } // Otherwise extract the type { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd); lclTyp = JITtype2varType(jitTyp); goto ARR_ST_POST_VERIFY; } case CEE_STELEM_REF: STELEM_REF_POST_VERIFY: if (opts.OptimizationEnabled()) { GenTree* array = impStackTop(2).val; GenTree* value = impStackTop().val; // Is this a case where we can skip the covariant store check? if (impCanSkipCovariantStoreCheck(value, array)) { lclTyp = TYP_REF; goto ARR_ST_POST_VERIFY; } } // Else call a helper function to do the assignment op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopCallArgs(3, nullptr)); goto SPILL_APPEND; case CEE_STELEM_I1: lclTyp = TYP_BYTE; goto ARR_ST; case CEE_STELEM_I2: lclTyp = TYP_SHORT; goto ARR_ST; case CEE_STELEM_I: lclTyp = TYP_I_IMPL; goto ARR_ST; case CEE_STELEM_I4: lclTyp = TYP_INT; goto ARR_ST; case CEE_STELEM_I8: lclTyp = TYP_LONG; goto ARR_ST; case CEE_STELEM_R4: lclTyp = TYP_FLOAT; goto ARR_ST; case CEE_STELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_ST; ARR_ST: ARR_ST_POST_VERIFY: /* The strict order of evaluation is LHS-operands, RHS-operands, range-check, and then assignment. However, codegen currently does the range-check before evaluation the RHS-operands. So to maintain strict ordering, we spill the stack. */ if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Strict ordering of exceptions for Array store")); } /* Pull the new value from the stack */ op2 = impPopStack().val; /* Pull the index value */ op1 = impPopStack().val; /* Pull the array address */ op3 = impPopStack().val; assertImp(op3->gtType == TYP_REF); if (op2->IsLocalAddrExpr() != nullptr) { op2->gtType = TYP_I_IMPL; } // Mark the block as containing an index expression if (op3->gtOper == GT_LCL_VAR) { if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node */ op1 = gtNewIndexRef(lclTyp, op3, op1); /* Create the assignment node and append it */ if (lclTyp == TYP_STRUCT) { assert(stelemClsHnd != DUMMY_INIT(NULL)); op1->AsIndex()->gtStructElemClass = stelemClsHnd; op1->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd); } if (varTypeIsStruct(op1)) { op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL); } else { op2 = impImplicitR4orR8Cast(op2, op1->TypeGet()); op1 = gtNewAssignNode(op1, op2); } /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; goto SPILL_APPEND; case CEE_ADD: oper = GT_ADD; goto MATH_OP2; case CEE_ADD_OVF: uns = false; goto ADD_OVF; case CEE_ADD_OVF_UN: uns = true; goto ADD_OVF; ADD_OVF: ovfl = true; callNode = false; oper = GT_ADD; goto MATH_OP2_FLAGS; case CEE_SUB: oper = GT_SUB; goto MATH_OP2; case CEE_SUB_OVF: uns = false; goto SUB_OVF; case CEE_SUB_OVF_UN: uns = true; goto SUB_OVF; SUB_OVF: ovfl = true; callNode = false; oper = GT_SUB; goto MATH_OP2_FLAGS; case CEE_MUL: oper = GT_MUL; goto MATH_MAYBE_CALL_NO_OVF; case CEE_MUL_OVF: uns = false; goto MUL_OVF; case CEE_MUL_OVF_UN: uns = true; goto MUL_OVF; MUL_OVF: ovfl = true; oper = GT_MUL; goto MATH_MAYBE_CALL_OVF; // Other binary math operations case CEE_DIV: oper = GT_DIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_DIV_UN: oper = GT_UDIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM: oper = GT_MOD; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM_UN: oper = GT_UMOD; goto MATH_MAYBE_CALL_NO_OVF; MATH_MAYBE_CALL_NO_OVF: ovfl = false; MATH_MAYBE_CALL_OVF: // Morpher has some complex logic about when to turn different // typed nodes on different platforms into helper calls. We // need to either duplicate that logic here, or just // pessimistically make all the nodes large enough to become // call nodes. Since call nodes aren't that much larger and // these opcodes are infrequent enough I chose the latter. callNode = true; goto MATH_OP2_FLAGS; case CEE_AND: oper = GT_AND; goto MATH_OP2; case CEE_OR: oper = GT_OR; goto MATH_OP2; case CEE_XOR: oper = GT_XOR; goto MATH_OP2; MATH_OP2: // For default values of 'ovfl' and 'callNode' ovfl = false; callNode = false; MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set /* Pull two values and push back the result */ op2 = impPopStack().val; op1 = impPopStack().val; /* Can't do arithmetic with references */ assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF); // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only // if it is in the stack) impBashVarAddrsToI(op1, op2); type = impGetByRefResultType(oper, uns, &op1, &op2); assert(!ovfl || !varTypeIsFloating(op1->gtType)); /* Special case: "int+0", "int-0", "int*1", "int/1" */ if (op2->gtOper == GT_CNS_INT) { if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) || (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV))) { impPushOnStack(op1, tiRetVal); break; } } // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand // if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { if (op1->TypeGet() != type) { // We insert a cast of op1 to 'type' op1 = gtNewCastNode(type, op1, false, type); } if (op2->TypeGet() != type) { // We insert a cast of op2 to 'type' op2 = gtNewCastNode(type, op2, false, type); } } if (callNode) { /* These operators can later be transformed into 'GT_CALL' */ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]); #ifndef TARGET_ARM assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]); #endif // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying // that we'll need to transform into a general large node, but rather specifically // to a call: by doing it this way, things keep working if there are multiple sizes, // and a CALL is no longer the largest. // That said, as of now it *is* a large node, so we'll do this with an assert rather // than an "if". assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE); op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true)); } else { op1 = gtNewOperNode(oper, type, op1, op2); } /* Special case: integer/long division may throw an exception */ if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this)) { op1->gtFlags |= GTF_EXCEPT; } if (ovfl) { assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL); if (ovflType != TYP_UNKNOWN) { op1->gtType = ovflType; } op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } } impPushOnStack(op1, tiRetVal); break; case CEE_SHL: oper = GT_LSH; goto CEE_SH_OP2; case CEE_SHR: oper = GT_RSH; goto CEE_SH_OP2; case CEE_SHR_UN: oper = GT_RSZ; goto CEE_SH_OP2; CEE_SH_OP2: op2 = impPopStack().val; op1 = impPopStack().val; // operand to be shifted impBashVarAddrsToI(op1, op2); type = genActualType(op1->TypeGet()); op1 = gtNewOperNode(oper, type, op1, op2); impPushOnStack(op1, tiRetVal); break; case CEE_NOT: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); type = genActualType(op1->TypeGet()); impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal); break; case CEE_CKFINITE: op1 = impPopStack().val; type = op1->TypeGet(); op1 = gtNewOperNode(GT_CKFINITE, type, op1); op1->gtFlags |= GTF_EXCEPT; impPushOnStack(op1, tiRetVal); break; case CEE_LEAVE: val = getI4LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val); goto LEAVE; case CEE_LEAVE_S: val = getI1LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val); LEAVE: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE); return; } JITDUMP(" %04X", jmpAddr); if (block->bbJumpKind != BBJ_LEAVE) { impResetLeaveBlock(block, jmpAddr); } assert(jmpAddr == block->bbJumpDest->bbCodeOffs); impImportLeave(block); impNoteBranchOffs(); break; case CEE_BR: case CEE_BR_S: jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0) { break; /* NOP */ } impNoteBranchOffs(); break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: /* Pop the comparand (now there's a neat term) from the stack */ op1 = impPopStack().val; type = op1->TypeGet(); // Per Ecma-355, brfalse and brtrue are only specified for nint, ref, and byref. // // We've historically been a bit more permissive, so here we allow // any type that gtNewZeroConNode can handle. if (!varTypeIsArithmetic(type) && !varTypeIsGC(type)) { BADCODE("invalid type for brtrue/brfalse"); } if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { op1 = gtUnusedValNode(op1); goto SPILL_APPEND; } else { break; } } if (op1->OperIsCompare()) { if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S) { // Flip the sense of the compare op1 = gtReverseCond(op1); } } else { // We'll compare against an equally-sized integer 0 // For small types, we always compare against int op2 = gtNewZeroConNode(genActualType(op1->gtType)); // Create the comparison operator and try to fold it oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ; op1 = gtNewOperNode(oper, TYP_INT, op1, op2); } // fall through COND_JUMP: /* Fold comparison if we can */ op1 = gtFoldExpr(op1); /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/ /* Don't make any blocks unreachable in import only mode */ if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly()) { /* gtFoldExpr() should prevent this as we don't want to make any blocks unreachable under compDbgCode */ assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); assertImp((block->bbJumpKind == BBJ_COND) // normal case || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the // block for the second time block->bbJumpKind = foldedJumpKind; #ifdef DEBUG if (verbose) { if (op1->AsIntCon()->gtIconVal) { printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->bbJumpDest->bbNum); } else { printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum); } } #endif break; } op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1); /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt' in impImportBlock(block). For correct line numbers, spill stack. */ if (opts.compDbgCode && impCurStmtDI.IsValid()) { impSpillStackEnsure(true); } goto SPILL_APPEND; case CEE_CEQ: oper = GT_EQ; uns = false; goto CMP_2_OPs; case CEE_CGT_UN: oper = GT_GT; uns = true; goto CMP_2_OPs; case CEE_CGT: oper = GT_GT; uns = false; goto CMP_2_OPs; case CEE_CLT_UN: oper = GT_LT; uns = true; goto CMP_2_OPs; case CEE_CLT: oper = GT_LT; uns = false; goto CMP_2_OPs; CMP_2_OPs: op2 = impPopStack().val; op1 = impPopStack().val; // Recognize the IL idiom of CGT_UN(op1, 0) and normalize // it so that downstream optimizations don't have to. if ((opcode == CEE_CGT_UN) && op2->IsIntegralConst(0)) { oper = GT_NE; uns = false; } #ifdef TARGET_64BIT // TODO-Casts: create a helper that upcasts int32 -> native int when necessary. // See also identical code in impGetByRefResultType and STSFLD import. if (varTypeIsI(op1) && (genActualType(op2) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, TYP_I_IMPL); } else if (varTypeIsI(op2) && (genActualType(op1) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1) == genActualType(op2) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); // Create the comparison node. op1 = gtNewOperNode(oper, TYP_INT, op1, op2); // TODO: setting both flags when only one is appropriate. if (uns) { op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED; } // Fold result, if possible. op1 = gtFoldExpr(op1); impPushOnStack(op1, tiRetVal); break; case CEE_BEQ_S: case CEE_BEQ: oper = GT_EQ; goto CMP_2_OPs_AND_BR; case CEE_BGE_S: case CEE_BGE: oper = GT_GE; goto CMP_2_OPs_AND_BR; case CEE_BGE_UN_S: case CEE_BGE_UN: oper = GT_GE; goto CMP_2_OPs_AND_BR_UN; case CEE_BGT_S: case CEE_BGT: oper = GT_GT; goto CMP_2_OPs_AND_BR; case CEE_BGT_UN_S: case CEE_BGT_UN: oper = GT_GT; goto CMP_2_OPs_AND_BR_UN; case CEE_BLE_S: case CEE_BLE: oper = GT_LE; goto CMP_2_OPs_AND_BR; case CEE_BLE_UN_S: case CEE_BLE_UN: oper = GT_LE; goto CMP_2_OPs_AND_BR_UN; case CEE_BLT_S: case CEE_BLT: oper = GT_LT; goto CMP_2_OPs_AND_BR; case CEE_BLT_UN_S: case CEE_BLT_UN: oper = GT_LT; goto CMP_2_OPs_AND_BR_UN; case CEE_BNE_UN_S: case CEE_BNE_UN: oper = GT_NE; goto CMP_2_OPs_AND_BR_UN; CMP_2_OPs_AND_BR_UN: uns = true; unordered = true; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR: uns = false; unordered = false; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR_ALL: /* Pull two values */ op2 = impPopStack().val; op1 = impPopStack().val; #ifdef TARGET_64BIT if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op1 side effect")); impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } if (op2->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op2 side effect")); impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } #ifdef DEBUG if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT) { impNoteLastILoffs(); } #endif break; } // We can generate an compare of different sized floating point op1 and op2 // We insert a cast // if (varTypeIsFloating(op1->TypeGet())) { if (op1->TypeGet() != op2->TypeGet()) { assert(varTypeIsFloating(op2->TypeGet())); // say op1=double, op2=float. To avoid loss of precision // while comparing, op2 is converted to double and double // comparison is done. if (op1->TypeGet() == TYP_DOUBLE) { // We insert a cast of op2 to TYP_DOUBLE op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_DOUBLE) { // We insert a cast of op1 to TYP_DOUBLE op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } } /* Create and append the operator */ op1 = gtNewOperNode(oper, TYP_INT, op1, op2); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } if (unordered) { op1->gtFlags |= GTF_RELOP_NAN_UN; } goto COND_JUMP; case CEE_SWITCH: /* Pop the switch value off the stack */ op1 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op1->TypeGet())); /* We can create a switch node */ op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1); val = (int)getU4LittleEndian(codeAddr); codeAddr += 4 + val * 4; // skip over the switch-table goto SPILL_APPEND; /************************** Casting OPCODES ***************************/ case CEE_CONV_OVF_I1: lclTyp = TYP_BYTE; goto CONV_OVF; case CEE_CONV_OVF_I2: lclTyp = TYP_SHORT; goto CONV_OVF; case CEE_CONV_OVF_I: lclTyp = TYP_I_IMPL; goto CONV_OVF; case CEE_CONV_OVF_I4: lclTyp = TYP_INT; goto CONV_OVF; case CEE_CONV_OVF_I8: lclTyp = TYP_LONG; goto CONV_OVF; case CEE_CONV_OVF_U1: lclTyp = TYP_UBYTE; goto CONV_OVF; case CEE_CONV_OVF_U2: lclTyp = TYP_USHORT; goto CONV_OVF; case CEE_CONV_OVF_U: lclTyp = TYP_U_IMPL; goto CONV_OVF; case CEE_CONV_OVF_U4: lclTyp = TYP_UINT; goto CONV_OVF; case CEE_CONV_OVF_U8: lclTyp = TYP_ULONG; goto CONV_OVF; case CEE_CONV_OVF_I1_UN: lclTyp = TYP_BYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_I2_UN: lclTyp = TYP_SHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_I_UN: lclTyp = TYP_I_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_I4_UN: lclTyp = TYP_INT; goto CONV_OVF_UN; case CEE_CONV_OVF_I8_UN: lclTyp = TYP_LONG; goto CONV_OVF_UN; case CEE_CONV_OVF_U1_UN: lclTyp = TYP_UBYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_U2_UN: lclTyp = TYP_USHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_U_UN: lclTyp = TYP_U_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_U4_UN: lclTyp = TYP_UINT; goto CONV_OVF_UN; case CEE_CONV_OVF_U8_UN: lclTyp = TYP_ULONG; goto CONV_OVF_UN; CONV_OVF_UN: uns = true; goto CONV_OVF_COMMON; CONV_OVF: uns = false; goto CONV_OVF_COMMON; CONV_OVF_COMMON: ovfl = true; goto _CONV; case CEE_CONV_I1: lclTyp = TYP_BYTE; goto CONV; case CEE_CONV_I2: lclTyp = TYP_SHORT; goto CONV; case CEE_CONV_I: lclTyp = TYP_I_IMPL; goto CONV; case CEE_CONV_I4: lclTyp = TYP_INT; goto CONV; case CEE_CONV_I8: lclTyp = TYP_LONG; goto CONV; case CEE_CONV_U1: lclTyp = TYP_UBYTE; goto CONV; case CEE_CONV_U2: lclTyp = TYP_USHORT; goto CONV; #if (REGSIZE_BYTES == 8) case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV_UN; #else case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV; #endif case CEE_CONV_U4: lclTyp = TYP_UINT; goto CONV; case CEE_CONV_U8: lclTyp = TYP_ULONG; goto CONV_UN; case CEE_CONV_R4: lclTyp = TYP_FLOAT; goto CONV; case CEE_CONV_R8: lclTyp = TYP_DOUBLE; goto CONV; case CEE_CONV_R_UN: lclTyp = TYP_DOUBLE; goto CONV_UN; CONV_UN: uns = true; ovfl = false; goto _CONV; CONV: uns = false; ovfl = false; goto _CONV; _CONV: // only converts from FLOAT or DOUBLE to an integer type // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls if (varTypeIsFloating(lclTyp)) { callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl #ifdef TARGET_64BIT // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? // TYP_BYREF could be used as TYP_I_IMPL which is long. // TODO-CQ: remove this when we lower casts long/ulong --> float/double // and generate SSE2 code instead of going through helper calls. || (impStackTop().val->TypeGet() == TYP_BYREF) #endif ; } else { callNode = varTypeIsFloating(impStackTop().val->TypeGet()); } op1 = impPopStack().val; impBashVarAddrsToI(op1); // Casts from floating point types must not have GTF_UNSIGNED set. if (varTypeIsFloating(op1)) { uns = false; } // At this point uns, ovf, callNode are all set. if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND) { op2 = op1->AsOp()->gtOp2; if (op2->gtOper == GT_CNS_INT) { ssize_t ival = op2->AsIntCon()->gtIconVal; ssize_t mask, umask; switch (lclTyp) { case TYP_BYTE: case TYP_UBYTE: mask = 0x00FF; umask = 0x007F; break; case TYP_USHORT: case TYP_SHORT: mask = 0xFFFF; umask = 0x7FFF; break; default: assert(!"unexpected type"); return; } if (((ival & umask) == ival) || ((ival & mask) == ival && uns)) { /* Toss the cast, it's a waste of time */ impPushOnStack(op1, tiRetVal); break; } else if (ival == mask) { /* Toss the masking, it's a waste of time, since we sign-extend from the small value anyways */ op1 = op1->AsOp()->gtOp1; } } } /* The 'op2' sub-operand of a cast is the 'real' type number, since the result of a cast to one of the 'small' integer types is an integer. */ type = genActualType(lclTyp); // If this is a no-op cast, just use op1. if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp))) { // Nothing needs to change } // Work is evidently required, add cast node else { if (callNode) { op1 = gtNewCastNodeL(type, op1, uns, lclTyp); } else { op1 = gtNewCastNode(type, op1, uns, lclTyp); } if (ovfl) { op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT); } if (op1->gtGetOp1()->OperIsConst() && opts.OptimizationEnabled()) { // Try and fold the introduced cast op1 = gtFoldExprConst(op1); } } impPushOnStack(op1, tiRetVal); break; case CEE_NEG: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal); break; case CEE_POP: { /* Pull the top value from the stack */ StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; /* Get hold of the type of the value being duplicated */ lclTyp = genActualType(op1->gtType); /* Does the value have any side effects? */ if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode) { // Since we are throwing away the value, just normalize // it to its address. This is more efficient. if (varTypeIsStruct(op1)) { JITDUMP("\n ... CEE_POP struct ...\n"); DISPTREE(op1); #ifdef UNIX_AMD64_ABI // Non-calls, such as obj or ret_expr, have to go through this. // Calls with large struct return value have to go through this. // Helper calls with small struct return value also have to go // through this since they do not follow Unix calling convention. if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) || op1->AsCall()->gtCallType == CT_HELPER) #endif // UNIX_AMD64_ABI { // If the value being produced comes from loading // via an underlying address, just null check the address. if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ)) { gtChangeOperToNullCheck(op1, block); } else { op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false); } JITDUMP("\n ... optimized to ...\n"); DISPTREE(op1); } } // If op1 is non-overflow cast, throw it away since it is useless. // Another reason for throwing away the useless cast is in the context of // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)). // The cast gets added as part of importing GT_CALL, which gets in the way // of fgMorphCall() on the forms of tail call nodes that we assert. if ((op1->gtOper == GT_CAST) && !op1->gtOverflow()) { op1 = op1->AsOp()->gtOp1; } if (op1->gtOper != GT_CALL) { if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0) { op1 = gtUnusedValNode(op1); } else { // Can't bash to NOP here because op1 can be referenced from `currentBlock->bbEntryState`, // if we ever need to reimport we need a valid LCL_VAR on it. op1 = gtNewNothingNode(); } } /* Append the value to the tree list */ goto SPILL_APPEND; } /* No side effects - just throw the <BEEP> thing away */ } break; case CEE_DUP: { StackEntry se = impPopStack(); GenTree* tree = se.val; tiRetVal = se.seTypeInfo; op1 = tree; // If the expression to dup is simple, just clone it. // Otherwise spill it to a temp, and reload the temp twice. bool cloneExpr = false; if (!opts.compDbgCode) { // Duplicate 0 and +0.0 if (op1->IsIntegralConst(0) || op1->IsFloatPositiveZero()) { cloneExpr = true; } // Duplicate locals and addresses of them else if (op1->IsLocal()) { cloneExpr = true; } else if (op1->TypeIs(TYP_BYREF) && op1->OperIs(GT_ADDR) && op1->gtGetOp1()->IsLocal() && (OPCODE)impGetNonPrefixOpcode(codeAddr + sz, codeEndp) != CEE_INITOBJ) { cloneExpr = true; } } else { // Always clone for debug mode cloneExpr = true; } if (!cloneExpr) { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); op1 = gtNewLclvNode(tmpNum, type); // Propagate type info to the temp from the stack and the original tree if (type == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", tmpNum); lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle()); } } op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("DUP instruction")); assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT)); impPushOnStack(op1, tiRetVal); impPushOnStack(op2, tiRetVal); } break; case CEE_STIND_I1: lclTyp = TYP_BYTE; goto STIND; case CEE_STIND_I2: lclTyp = TYP_SHORT; goto STIND; case CEE_STIND_I4: lclTyp = TYP_INT; goto STIND; case CEE_STIND_I8: lclTyp = TYP_LONG; goto STIND; case CEE_STIND_I: lclTyp = TYP_I_IMPL; goto STIND; case CEE_STIND_REF: lclTyp = TYP_REF; goto STIND; case CEE_STIND_R4: lclTyp = TYP_FLOAT; goto STIND; case CEE_STIND_R8: lclTyp = TYP_DOUBLE; goto STIND; STIND: compUnsafeCastUsed = true; // Have to go conservative STIND_POST_VERIFY: op2 = impPopStack().val; // value to store op1 = impPopStack().val; // address to store to // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); impBashVarAddrsToI(op1, op2); op2 = impImplicitR4orR8Cast(op2, lclTyp); #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // TARGET_64BIT if (opcode == CEE_STIND_REF) { // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType)); lclTyp = genActualType(op2->TypeGet()); } // Check target type. #ifdef DEBUG if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF) { if (op2->gtType == TYP_BYREF) { assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL); } else if (lclTyp == TYP_BYREF) { assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType)); } } else { assertImp(genActualType(op2->gtType) == genActualType(lclTyp) || ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp))); } #endif op1 = gtNewOperNode(GT_IND, lclTyp, op1); // stind could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE; if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } op1 = gtNewAssignNode(op1, op2); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; // Spill side-effects AND global-data-accesses if (verCurrentState.esStackDepth > 0) { impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND")); } goto APPEND; case CEE_LDIND_I1: lclTyp = TYP_BYTE; goto LDIND; case CEE_LDIND_I2: lclTyp = TYP_SHORT; goto LDIND; case CEE_LDIND_U4: case CEE_LDIND_I4: lclTyp = TYP_INT; goto LDIND; case CEE_LDIND_I8: lclTyp = TYP_LONG; goto LDIND; case CEE_LDIND_REF: lclTyp = TYP_REF; goto LDIND; case CEE_LDIND_I: lclTyp = TYP_I_IMPL; goto LDIND; case CEE_LDIND_R4: lclTyp = TYP_FLOAT; goto LDIND; case CEE_LDIND_R8: lclTyp = TYP_DOUBLE; goto LDIND; case CEE_LDIND_U1: lclTyp = TYP_UBYTE; goto LDIND; case CEE_LDIND_U2: lclTyp = TYP_USHORT; goto LDIND; LDIND: compUnsafeCastUsed = true; // Have to go conservative LDIND_POST_VERIFY: op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); #ifdef TARGET_64BIT // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (genActualType(op1->gtType) == TYP_INT) { op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL); } #endif assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, lclTyp, op1); // ldind could point anywhere, example a boxed class static int op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; case CEE_UNALIGNED: assert(sz == 1); val = getU1LittleEndian(codeAddr); ++codeAddr; JITDUMP(" %u", val); if ((val != 1) && (val != 2) && (val != 4)) { BADCODE("Alignment unaligned. must be 1, 2, or 4"); } Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes"); prefixFlags |= PREFIX_UNALIGNED; impValidateMemoryAccessOpcode(codeAddr, codeEndp, false); PREFIX: opcode = (OPCODE)getU1LittleEndian(codeAddr); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; case CEE_VOLATILE: Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes"); prefixFlags |= PREFIX_VOLATILE; impValidateMemoryAccessOpcode(codeAddr, codeEndp, true); assert(sz == 0); goto PREFIX; case CEE_LDFTN: { // Need to do a lookup here so that we perform an access check // and do a NOWAY if protections are violated _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); DO_LDFTN: op1 = impMethodPointer(&resolvedToken, &callInfo); if (compDonotInline()) { return; } // Call info may have more precise information about the function than // the resolved token. CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(callInfo.hMethod != nullptr); heapToken->hMethod = callInfo.hMethod; impPushOnStack(op1, typeInfo(heapToken)); break; } case CEE_LDVIRTFTN: { /* Get the method token */ _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */, combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), CORINFO_CALLINFO_CALLVIRT), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } mflags = callInfo.methodFlags; impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); if (compIsForInlining()) { if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL); return; } } CORINFO_SIG_INFO& ftnSig = callInfo.sig; /* Get the object-ref */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); if (opts.IsReadyToRun()) { if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } } else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo); if (compDonotInline()) { return; } CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(heapToken->tokenType == CORINFO_TOKENKIND_Method); assert(callInfo.hMethod != nullptr); heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn; heapToken->hMethod = callInfo.hMethod; impPushOnStack(fptr, typeInfo(heapToken)); break; } case CEE_CONSTRAINED: assertImp(sz == sizeof(unsigned)); impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained); codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually JITDUMP(" (%08X) ", constrainedResolvedToken.token); Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes"); prefixFlags |= PREFIX_CONSTRAINED; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN) { BADCODE("constrained. has to be followed by callvirt, call or ldftn"); } } goto PREFIX; case CEE_READONLY: JITDUMP(" readonly."); Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes"); prefixFlags |= PREFIX_READONLY; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("readonly. has to be followed by ldelema or call"); } } assert(sz == 0); goto PREFIX; case CEE_TAILCALL: JITDUMP(" tail."); Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("tailcall. has to be followed by call, callvirt or calli"); } } assert(sz == 0); goto PREFIX; case CEE_NEWOBJ: /* Since we will implicitly insert newObjThisPtr at the start of the argument list, spill any GTF_ORDER_SIDEEFF */ impSpillSpecialSideEff(); /* NEWOBJ does not respond to TAIL */ prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT; /* NEWOBJ does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; _impResolveToken(CORINFO_TOKENKIND_NewObj); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM), &callInfo); mflags = callInfo.methodFlags; if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0) { BADCODE("newobj on static or abstract method"); } // Insert the security callout before any actual code is generated impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); // There are three different cases for new // Object size is variable (depends on arguments) // 1) Object is an array (arrays treated specially by the EE) // 2) Object is some other variable sized object (e.g. String) // 3) Class Size can be determined beforehand (normal case) // In the first case, we need to call a NEWOBJ helper (multinewarray) // in the second case we call the constructor with a '0' this pointer // In the third case we alloc the memory, then call the constuctor clsFlags = callInfo.classFlags; if (clsFlags & CORINFO_FLG_ARRAY) { // Arrays need to call the NEWOBJ helper. assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE); impImportNewObjArray(&resolvedToken, &callInfo); if (compDonotInline()) { return; } callTyp = TYP_REF; break; } // At present this can only be String else if (clsFlags & CORINFO_FLG_VAROBJSIZE) { // Skip this thisPtr argument newObjThisPtr = nullptr; /* Remember that this basic block contains 'new' of an object */ block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; } else { // This is the normal case where the size of the object is // fixed. Allocate the memory and call the constructor. // Note: We cannot add a peep to avoid use of temp here // becase we don't have enough interference info to detect when // sources and destination interfere, example: s = new S(ref); // TODO: We find the correct place to introduce a general // reverse copy prop for struct return values from newobj or // any function returning structs. /* get a temporary for the new object */ lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp")); if (compDonotInline()) { // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS. assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } // In the value class case we only need clsHnd for size calcs. // // The lookup of the code pointer will be handled by CALL in this case if (clsFlags & CORINFO_FLG_VALUECLASS) { if (compIsForInlining()) { // If value class has GC fields, inform the inliner. It may choose to // bail out on the inline. DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; // some policies do not track the relative hotness of call sites for // "always" inline cases. if (impInlineInfo->iciBlock->isRunRarely()) { compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } } } } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lvaTable[lclNum].lvType = JITtype2varType(jitTyp); } else { // The local variable itself is the allocated space. // Here we need unsafe value cls check, since the address of struct is taken for further use // and potentially exploitable. lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } bool bbInALoop = impBlockIsInALoop(block); bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) && (!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN)); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { // Append a tree to zero-out the temp newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet()); newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } else { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", lclNum); lclDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; } // Obtain the address of the temp newObjThisPtr = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet())); } else { // If we're newing up a finalizable object, spill anything that can cause exceptions. // bool hasSideEffects = false; CorInfoHelpFunc newHelper = info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd, &hasSideEffects); if (hasSideEffects) { JITDUMP("\nSpilling stack for finalizable newobj\n"); impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); } const bool useParent = true; op1 = gtNewAllocObjNode(&resolvedToken, useParent); if (op1 == nullptr) { return; } // Remember that this basic block contains 'new' of an object block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Append the assignment to the temp/local. Dont need to spill // at all as we are just calling an EE-Jit helper which can only // cause an (async) OutOfMemoryException. // We assign the newly allocated object (by a GT_ALLOCOBJ node) // to a temp. Note that the pattern "temp = allocObj" is required // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", lclNum); lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */); newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF); } } goto CALL; case CEE_CALLI: /* CALLI does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; FALLTHROUGH; case CEE_CALLVIRT: case CEE_CALL: // We can't call getCallInfo on the token from a CALLI, but we need it in // many other places. We unfortunately embed that knowledge here. if (opcode != CEE_CALLI) { _impResolveToken(CORINFO_TOKENKIND_Method); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, // this is how impImportCall invokes getCallInfo combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT : CORINFO_CALLINFO_NONE), &callInfo); } else { // Suppress uninitialized use warning. memset(&resolvedToken, 0, sizeof(resolvedToken)); memset(&callInfo, 0, sizeof(callInfo)); resolvedToken.token = getU4LittleEndian(codeAddr); resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; } CALL: // memberRef should be set. // newObjThisPtr should be set for CEE_NEWOBJ JITDUMP(" %08X", resolvedToken.token); constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0; bool newBBcreatedForTailcallStress; bool passedStressModeValidation; newBBcreatedForTailcallStress = false; passedStressModeValidation = true; if (compIsForInlining()) { if (compDonotInline()) { return; } // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks. assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0); } else { if (compTailCallStress()) { // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()? // Tail call stress only recognizes call+ret patterns and forces them to be // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress // doesn't import 'ret' opcode following the call into the basic block containing // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks() // is already checking that there is an opcode following call and hence it is // safe here to read next opcode without bounds check. newBBcreatedForTailcallStress = impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't // make it jump to RET. (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT); if (newBBcreatedForTailcallStress && !hasTailPrefix) { // Do a more detailed evaluation of legality const bool returnFalseIfInvalid = true; const bool passedConstraintCheck = verCheckTailCallConstraint(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, returnFalseIfInvalid); if (passedConstraintCheck) { // Now check with the runtime CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod; bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) || (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE); CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd, hasTailPrefix)) // Is it legal to do tailcall? { // Stress the tailcall. JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; prefixFlags |= PREFIX_TAILCALL_STRESS; } else { // Runtime disallows this tail call JITDUMP(" (Tailcall stress: runtime preventing tailcall)"); passedStressModeValidation = false; } } else { // Constraints disallow this tail call JITDUMP(" (Tailcall stress: constraint check failed)"); passedStressModeValidation = false; } } } } // This is split up to avoid goto flow warnings. bool isRecursive; isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd); // If we've already disqualified this call as a tail call under tail call stress, // don't consider it for implicit tail calling either. // // When not running under tail call stress, we may mark this call as an implicit // tail call candidate. We'll do an "equivalent" validation during impImportCall. // // Note that when running under tail call stress, a call marked as explicit // tail prefixed will not be considered for implicit tail calling. if (passedStressModeValidation && impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive)) { if (compIsForInlining()) { #if FEATURE_TAILCALL_OPT_SHARED_RETURN // Are we inlining at an implicit tail call site? If so the we can flag // implicit tail call sites in the inline body. These call sites // often end up in non BBJ_RETURN blocks, so only flag them when // we're able to handle shared returns. if (impInlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("\n (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN } else { JITDUMP("\n (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } } // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call). explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0; readonlyCall = (prefixFlags & PREFIX_READONLY) != 0; if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ) { // All calls and delegates need a security callout. // For delegates, this is the call to the delegate constructor, not the access check on the // LD(virt)FTN. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); } callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, newObjThisPtr, prefixFlags, &callInfo, opcodeOffs); if (compDonotInline()) { // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue. assert((callTyp == TYP_UNDEF) || (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS)); return; } if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we // have created a new BB after the "call" // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless. { assert(!compIsForInlining()); goto RET; } break; case CEE_LDFLD: case CEE_LDSFLD: case CEE_LDFLDA: case CEE_LDSFLDA: { bool isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA); bool isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA); /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; CORINFO_CLASS_HANDLE objType = nullptr; // used for fields if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA) { tiObj = &impStackTop().seTypeInfo; StackEntry se = impPopStack(); objType = se.seTypeInfo.GetClassHandle(); obj = se.val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; clsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER); return; default: break; } if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT && clsHnd) { if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) && !(info.compFlags & CORINFO_FLG_FORCEINLINE)) { // Loading a static valuetype field usually will cause a JitHelper to be called // for the static base. This will bloat the code. compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS); if (compInlineResult->IsFailure()) { return; } } } } tiRetVal = verMakeTypeInfo(ciType, clsHnd); if (isLoadAddress) { tiRetVal.MakeByRef(); } else { tiRetVal.NormaliseForStack(); } // Perform this check always to ensure that we get field access exceptions even with // SkipVerification. impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static load accesses non-static field if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj. if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } bool usesHelper = false; switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { // If the object is a struct, what we really want is // for the field to operate on the address of the struct. if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj)) { assert(opcode == CEE_LDFLD && objType != nullptr); obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true); } /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If the object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } // wrap it in a address of operator if necessary if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1); } else { if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1); } break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, nullptr); usesHelper = true; break; case CORINFO_FIELD_STATIC_ADDRESS: // Replace static read-only fields with constant if possible if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) && !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) && (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp))) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd, impTokenLookupContextHandle); if (initClassResult & CORINFO_INITCLASS_INITIALIZED) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr); // We should always be able to access this static's address directly // assert(pFldAddr == nullptr); op1 = impImportStaticReadOnlyField(fldAddr, lclTyp); // Widen small types since we're propagating the value // instead of producing an indir. // op1->gtType = genActualType(lclTyp); goto FIELD_DONE; } } FALLTHROUGH; case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; case CORINFO_FIELD_INTRINSIC_ZERO: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); op1 = gtNewIconNode(0, lclTyp); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_EMPTY_STRING: { assert(aflags & CORINFO_ACCESS_GET); LPVOID pValue; InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue); op1 = gtNewStringLiteralNode(iat, pValue); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); #if BIGENDIAN op1 = gtNewIconNode(0, lclTyp); #else op1 = gtNewIconNode(1, lclTyp); #endif goto FIELD_DONE; } break; default: assert(!"Unexpected fieldAccessor"); } if (!isLoadAddress) { if (prefixFlags & PREFIX_VOLATILE) { op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_VOLATILE; } } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_UNALIGNED; } } } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } FIELD_DONE: impPushOnStack(op1, tiRetVal); } break; case CEE_STFLD: case CEE_STSFLD: { bool isStoreStatic = (opcode == CEE_STSFLD); CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type) /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = CORINFO_ACCESS_SET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; typeInfo tiVal; /* Pull the value from the stack */ StackEntry se = impPopStack(); op2 = se.val; tiVal = se.seTypeInfo; clsHnd = tiVal.GetClassHandle(); if (opcode == CEE_STFLD) { tiObj = &impStackTop().seTypeInfo; obj = impPopStack().val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; fieldClsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or * per-inst static? */ switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER); return; default: break; } } impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static store accesses non-static field if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using stfld on a static field. // We allow it, but need to eval any side-effects for obj if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, op2); goto SPILL_APPEND; case CORINFO_FIELD_STATIC_ADDRESS: case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; default: assert(!"Unexpected fieldAccessor"); } // Create the member assignment, unless we have a TYP_STRUCT. bool deferStructAssign = (lclTyp == TYP_STRUCT); if (!deferStructAssign) { if (prefixFlags & PREFIX_VOLATILE) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_IND_UNALIGNED; } /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during importation and reads from the union as if it were a long during code generation. Though this can potentially read garbage, one can get lucky to have this working correctly. This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency on it. To be backward compatible, we will explicitly add an upward cast here so that it works correctly always. Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT for V4.0. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be // generated for ARM as well as x86, so the following IR will be accepted: // STMTx (IL 0x... ???) // * ASG long // +--* CLS_VAR long // \--* CNS_INT int 2 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) && varTypeIsLong(op1->TypeGet())) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } #endif #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op1' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } op1 = gtNewAssignNode(op1, op2); /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } /* stfld can interfere with value classes (consider the sequence ldloc, ldloca, ..., stfld, stloc). We will be conservative and spill all value class references from the stack. */ if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL))) { assert(tiObj); // If we can resolve the field to be within some local, // then just spill that local. // GenTreeLclVarCommon* const lcl = obj->IsLocalAddrExpr(); if (lcl != nullptr) { impSpillLclRefs(lcl->GetLclNum()); } else if (impIsValueType(tiObj)) { impSpillEvalStack(); } else { impSpillValueClasses(); } } /* Spill any refs to the same member from the stack */ impSpillLclRefs((ssize_t)resolvedToken.hField); /* stsfld also interferes with indirect accesses (for aliased statics) and calls. But don't need to spill other statics as we have explicitly spilled this particular static field. */ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD")); if (deferStructAssign) { op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL); } } goto APPEND; case CEE_NEWARR: { /* Get the class type index operand */ _impResolveToken(CORINFO_TOKENKIND_Newarr); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } tiRetVal = verMakeTypeInfo(resolvedToken.hClass); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); /* Form the arglist: array class handle, size */ op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); #ifdef TARGET_64BIT // The array helper takes a native int for array length. // So if we have an int, explicitly extend it to be a native int. if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { if (op2->IsIntegralConst()) { op2->gtType = TYP_I_IMPL; } else { bool isUnsigned = false; op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL); } } #endif // TARGET_64BIT #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF, gtNewCallArgs(op2)); usingReadyToRunHelper = (op1 != nullptr); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the newarr call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub // 3) Allocate the new array // Reason: performance (today, we'll always use the slow helper for the R2R generics case) // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { GenTreeCall::Use* args = gtNewCallArgs(op1, op2); /* Create a call to 'new' */ // Note that this only works for shared generic code because the same helper is used for all // reference array types op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args); } op1->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass; /* Remember that this basic block contains 'new' of an sd array */ block->bbFlags |= BBF_HAS_NEWARRAY; optMethodFlags |= OMF_HAS_NEWARRAY; /* Push the result of the call on the stack */ impPushOnStack(op1, tiRetVal); callTyp = TYP_REF; } break; case CEE_LOCALLOC: // We don't allow locallocs inside handlers if (block->hasHndIndex()) { BADCODE("Localloc can't be inside handler"); } // Get the size to allocate op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); if (verCurrentState.esStackDepth != 0) { BADCODE("Localloc can only be used when the stack is empty"); } // If the localloc is not in a loop and its size is a small constant, // create a new local var of TYP_BLK and return its address. { bool convertedToLocal = false; // Need to aggressively fold here, as even fixed-size locallocs // will have casts in the way. op2 = gtFoldExpr(op2); if (op2->IsIntegralConst()) { const ssize_t allocSize = op2->AsIntCon()->IconValue(); bool bbInALoop = impBlockIsInALoop(block); if (allocSize == 0) { // Result is nullptr JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n"); op1 = gtNewIconNode(0, TYP_I_IMPL); convertedToLocal = true; } else if ((allocSize > 0) && !bbInALoop) { // Get the size threshold for local conversion ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE; #ifdef DEBUG // Optionally allow this to be modified maxSize = JitConfig.JitStackAllocToLocalSize(); #endif // DEBUG if (allocSize <= maxSize) { const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal")); JITDUMP("Converting stackalloc of %zd bytes to new local V%02u\n", allocSize, stackallocAsLocal); lvaTable[stackallocAsLocal].lvType = TYP_BLK; lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize; lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true; op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK); op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1); convertedToLocal = true; if (!this->opts.compDbgEnC) { // Ensure we have stack security for this method. // Reorder layout since the converted localloc is treated as an unsafe buffer. setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; } } } } if (!convertedToLocal) { // Bail out if inlining and the localloc was not converted. // // Note we might consider allowing the inline, if the call // site is not in a loop. if (compIsForInlining()) { InlineObservation obs = op2->IsIntegralConst() ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN; compInlineResult->NoteFatal(obs); return; } op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2); // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd. op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE); // Ensure we have stack security for this method. setNeedsGSSecurityCookie(); /* The FP register may not be back to the original value at the end of the method, even if the frame size is 0, as localloc may have modified it. So we will HAVE to reset it */ compLocallocUsed = true; } else { compLocallocOptimized = true; } } impPushOnStack(op1, tiRetVal); break; case CEE_ISINST: { /* Get the type token */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the isinstanceof_any call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Perform the 'is instance' check on the input object // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false); } if (compDonotInline()) { return; } impPushOnStack(op1, tiRetVal); } break; } case CEE_REFANYVAL: // get the class handle and make a ICON node out of it _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); // Call helper GETREFANY(classHandle, op1); op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, gtNewCallArgs(op2, op1)); impPushOnStack(op1, tiRetVal); break; case CEE_REFANYTYPE: op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); if (op1->gtOper == GT_OBJ) { // Get the address of the refany op1 = op1->AsOp()->gtOp1; // Fetch the type from the correct slot op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL)); op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1); } else { assertImp(op1->gtOper == GT_MKREFANY); // The pointer may have side-effects if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT) { impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); #ifdef DEBUG impNoteLastILoffs(); #endif } // We already have the class handle op1 = op1->AsOp()->gtOp2; } // convert native TypeHandle to RuntimeTypeHandle { GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT, helperArgs); CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass(); // The handle struct is returned in register op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); op1->AsCall()->gtRetClsHnd = classHandle; #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv()); #endif tiRetVal = typeInfo(TI_STRUCT, classHandle); } impPushOnStack(op1, tiRetVal); break; case CEE_LDTOKEN: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); lastLoadToken = codeAddr; _impResolveToken(CORINFO_TOKENKIND_Ldtoken); tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken); op1 = impTokenToHandle(&resolvedToken, nullptr, true); if (op1 == nullptr) { // compDonotInline() return; } helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE; assert(resolvedToken.hClass != nullptr); if (resolvedToken.hMethod != nullptr) { helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD; } else if (resolvedToken.hField != nullptr) { helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD; } GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs); // The handle struct is returned in register and // it could be consumed both as `TYP_STRUCT` and `TYP_REF`. op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv()); #endif op1->AsCall()->gtRetClsHnd = tokenType; tiRetVal = verMakeTypeInfo(tokenType); impPushOnStack(op1, tiRetVal); } break; case CEE_UNBOX: case CEE_UNBOX_ANY: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); bool runtimeLookup; op2 = impTokenToHandle(&resolvedToken, &runtimeLookup); if (op2 == nullptr) { assert(compDonotInline()); return; } // Run this always so we can get access exceptions even with SkipVerification. accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n"); op1 = impPopStack().val; goto CASTCLASS; } /* Pop the object and create the unbox helper call */ /* You might think that for UNBOX_ANY we need to push a different */ /* (non-byref) type, but here we're making the tiRetVal that is used */ /* for the intermediate pointer which we then transfer onto the OBJ */ /* instruction. OBJ then creates the appropriate tiRetVal. */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass); assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE); // Check legality and profitability of inline expansion for unboxing. const bool canExpandInline = (helper == CORINFO_HELP_UNBOX); const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled(); if (canExpandInline && shouldExpandInline) { // See if we know anything about the type of op1, the object being unboxed. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(op1, &isExact, &isNonNull); // We can skip the "exact" bit here as we are comparing to a value class. // compareTypesForEquality should bail on comparisions for shared value classes. if (clsHnd != NO_CLASS_HANDLE) { const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(resolvedToken.hClass, clsHnd); if (compare == TypeCompareState::Must) { JITDUMP("\nOptimizing %s (%s) -- type test will succeed\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", eeGetClassName(clsHnd)); // For UNBOX, null check (if necessary), and then leave the box payload byref on the stack. if (opcode == CEE_UNBOX) { GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("optimized unbox clone")); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset); GenTree* nullcheck = gtNewNullCheck(op1, block); GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress); impPushOnStack(result, tiRetVal); break; } // For UNBOX.ANY load the struct from the box payload byref (the load will nullcheck) assert(opcode == CEE_UNBOX_ANY); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, op1, boxPayloadOffset); impPushOnStack(boxPayloadAddress, tiRetVal); oper = GT_OBJ; goto OBJ; } else { JITDUMP("\nUnable to optimize %s -- can't resolve type comparison\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); } } else { JITDUMP("\nUnable to optimize %s -- class for [%06u] not known\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", dspTreeID(op1)); } JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); // we are doing normal unboxing // inline the common case of the unbox helper // UNBOX(exp) morphs into // clone = pop(exp); // ((*clone == typeToken) ? nop : helper(clone, typeToken)); // push(clone + TARGET_POINTER_SIZE) // GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone1")); op1 = gtNewMethodTableLookup(op1); GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2); op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone2")); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = gtNewHelperCallNode(helper, TYP_VOID, gtNewCallArgs(op2, op1)); op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1); op1 = gtNewQmarkNode(TYP_VOID, condBox, op1->AsColon()); // QMARK nodes cannot reside on the evaluation stack. Because there // may be other trees on the evaluation stack that side-effect the // sources of the UNBOX operation we must spill the stack. impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Create the address-expression to reference past the object header // to the beginning of the value-type. Today this means adjusting // past the base of the objects vtable field which is pointer sized. op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2); } else { JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // Don't optimize, just call the helper and be done with it op1 = gtNewHelperCallNode(helper, (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), gtNewCallArgs(op2, op1)); if (op1->gtType == TYP_STRUCT) { op1->AsCall()->gtRetClsHnd = resolvedToken.hClass; } } assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. (helper == CORINFO_HELP_UNBOX_NULLABLE && varTypeIsStruct(op1)) // UnboxNullable helper returns a struct. ); /* ---------------------------------------------------------------------- | \ helper | | | | \ | | | | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE | | \ | (which returns a BYREF) | (which returns a STRUCT) | | | opcode \ | | | |--------------------------------------------------------------------- | UNBOX | push the BYREF | spill the STRUCT to a local, | | | | push the BYREF to this local | |--------------------------------------------------------------------- | UNBOX_ANY | push a GT_OBJ of | push the STRUCT | | | the BYREF | For Linux when the | | | | struct is returned in two | | | | registers create a temp | | | | which address is passed to | | | | the unbox_nullable helper. | |--------------------------------------------------------------------- */ if (opcode == CEE_UNBOX) { if (helper == CORINFO_HELP_UNBOX_NULLABLE) { // Unbox nullable helper returns a struct type. // We need to spill it to a temp so than can take the address of it. // Here we need unsafe value cls check, since the address of struct is taken to be used // further along and potetially be exploitable. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable")); lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); } assert(op1->gtType == TYP_BYREF); } else { assert(opcode == CEE_UNBOX_ANY); if (helper == CORINFO_HELP_UNBOX) { // Normal unbox helper returns a TYP_BYREF. impPushOnStack(op1, tiRetVal); oper = GT_OBJ; goto OBJ; } assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!"); #if FEATURE_MULTIREG_RET if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed)) { // Unbox nullable helper returns a TYP_STRUCT. // For the multi-reg case we need to spill it to a temp so that // we can pass the address to the unbox_nullable jit helper. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable")); lvaTable[tmp].lvIsMultiRegArg = true; lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); // In this case the return value of the unbox helper is TYP_BYREF. // Make sure the right type is placed on the operand type stack. impPushOnStack(op1, tiRetVal); // Load the struct. oper = GT_OBJ; assert(op1->gtType == TYP_BYREF); goto OBJ; } else #endif // !FEATURE_MULTIREG_RET { // If non register passable struct we have it materialized in the RetBuf. assert(op1->gtType == TYP_STRUCT); tiRetVal = verMakeTypeInfo(resolvedToken.hClass); assert(tiRetVal.IsValueClass()); } } impPushOnStack(op1, tiRetVal); } break; case CEE_BOX: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Box); JITDUMP(" %08X", resolvedToken.token); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); // Note BOX can be used on things that are not value classes, in which // case we get a NOP. However the verifier's view of the type on the // stack changes (in generic code a 'T' becomes a 'boxed T') if (!eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing BOX(refClass) as NOP\n"); verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal; break; } // Look ahead for box idioms int matched = impBoxPatternMatch(&resolvedToken, codeAddr + sz, codeEndp); if (matched >= 0) { // Skip the matched IL instructions sz += matched; break; } impImportAndPushBox(&resolvedToken); if (compDonotInline()) { return; } } break; case CEE_SIZEOF: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)); impPushOnStack(op1, tiRetVal); break; case CEE_CASTCLASS: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; /* Pop the address and create the 'checked cast' helper call */ // At this point we expect typeRef to contain the token, op1 to contain the value being cast, // and op2 to contain code that creates the type handle corresponding to typeRef CASTCLASS: { GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the chkcastany call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Check the object on the stack for the type-cast // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true); } if (compDonotInline()) { return; } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); } } break; case CEE_THROW: // Any block with a throw is rarely executed. block->bbSetRunRarely(); // Pop the exception object and create the 'throw' helper call op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewCallArgs(impPopStack().val)); // Fall through to clear out the eval stack. EVAL_APPEND: if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); goto APPEND; case CEE_RETHROW: assert(!compIsForInlining()); if (info.compXcptnsCount == 0) { BADCODE("rethrow outside catch"); } /* Create the 'rethrow' helper call */ op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID); goto EVAL_APPEND; case CEE_INITOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = gtNewIconNode(0); // Value op1 = impPopStack().val; // Dest if (eeIsValueClass(resolvedToken.hClass)) { op1 = gtNewStructVal(resolvedToken.hClass, op1); if (op1->OperIs(GT_OBJ)) { gtSetObjGcInfo(op1->AsObj()); } } else { size = info.compCompHnd->getClassSize(resolvedToken.hClass); assert(size == TARGET_POINTER_SIZE); op1 = gtNewBlockVal(op1, size); } op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); goto SPILL_APPEND; case CEE_INITBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Value op1 = impPopStack().val; // Dst addr if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); } else { if (!op2->IsIntegralConst(0)) { op2 = gtNewOperNode(GT_INIT_VAL, TYP_INT, op2); } op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Src addr op1 = impPopStack().val; // Dst addr if (op2->OperGet() == GT_ADDR) { op2 = op2->AsOp()->gtOp1; } else { op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2); } if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, true); } else { op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (!eeIsValueClass(resolvedToken.hClass)) { op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; impPushOnStack(op1, typeInfo()); opcode = CEE_STIND_REF; lclTyp = TYP_REF; goto STIND_POST_VERIFY; } op2 = impPopStack().val; // Src op1 = impPopStack().val; // Dest op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0)); goto SPILL_APPEND; case CEE_STOBJ: { assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; } compUnsafeCastUsed = true; if (lclTyp == TYP_REF) { opcode = CEE_STIND_REF; goto STIND_POST_VERIFY; } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lclTyp = JITtype2varType(jitTyp); goto STIND_POST_VERIFY; } op2 = impPopStack().val; // Value op1 = impPopStack().val; // Ptr assertImp(varTypeIsStruct(op2)); op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED)) { op1->gtFlags |= GTF_BLK_UNALIGNED; } goto SPILL_APPEND; } case CEE_MKREFANY: assert(!compIsForInlining()); // Being lazy here. Refanys are tricky in terms of gc tracking. // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany. JITDUMP("disabling struct promotion because of mkrefany\n"); fgNoStructPromotion = true; oper = GT_MKREFANY; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken, nullptr, true); if (op2 == nullptr) { // compDonotInline() return; } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec. // But JIT32 allowed it, so we continue to allow it. assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT); // MKREFANY returns a struct. op2 is the class token. op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2); impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass())); break; case CEE_LDOBJ: { oper = GT_OBJ; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); OBJ: tiRetVal = verMakeTypeInfo(resolvedToken.hClass); compUnsafeCastUsed = true; if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; opcode = CEE_LDIND_REF; goto LDIND_POST_VERIFY; } op1 = impPopStack().val; assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL); CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1); // Could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF; assertImp(varTypeIsArithmetic(op1->gtType)); } else { // OBJ returns a struct // and an inline argument which is the class token of the loaded obj op1 = gtNewObjNode(resolvedToken.hClass, op1); } op1->gtFlags |= GTF_EXCEPT; if (prefixFlags & PREFIX_UNALIGNED) { op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; } case CEE_LDLEN: op1 = impPopStack().val; if (opts.OptimizationEnabled()) { /* Use GT_ARR_LENGTH operator so rng check opts see this */ GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length, block); op1 = arrLen; } else { /* Create the expression "*(array_addr + ArrLenOffs)" */ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL)); op1 = gtNewIndir(TYP_INT, op1); } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); break; case CEE_BREAK: op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID); goto SPILL_APPEND; case CEE_NOP: if (opts.compDbgCode) { op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); goto SPILL_APPEND; } break; /******************************** NYI *******************************/ case 0xCC: OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n"); FALLTHROUGH; case CEE_ILLEGAL: case CEE_MACRO_END: default: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); return; } BADCODE3("unknown opcode", ": %02X", (int)opcode); } codeAddr += sz; prevOpcode = opcode; prefixFlags = 0; } return; #undef _impResolveToken } #ifdef _PREFAST_ #pragma warning(pop) #endif // Push a local/argument treeon the operand stack void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal) { tiRetVal.NormaliseForStack(); if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr()) { tiRetVal.SetUninitialisedObjRef(); } impPushOnStack(op, tiRetVal); } //------------------------------------------------------------------------ // impCreateLocal: create a GT_LCL_VAR node to access a local that might need to be normalized on load // // Arguments: // lclNum -- The index into lvaTable // offset -- The offset to associate with the node // // Returns: // The node // GenTreeLclVar* Compiler::impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)) { var_types lclTyp; if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } return gtNewLclvNode(lclNum, lclTyp DEBUGARG(offset)); } // Load a local/argument on the operand stack // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal) { impPushVar(impCreateLocalNode(lclNum DEBUGARG(offset)), tiRetVal); } // Load an argument on the operand stack // Shared by the various CEE_LDARG opcodes // ilArgNum is the argument index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset) { Verify(ilArgNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { if (ilArgNum >= info.compArgsCount) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER); return; } impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo), impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo); } else { if (ilArgNum >= info.compArgsCount) { BADCODE("Bad IL"); } unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } impLoadVar(lclNum, offset); } } // Load a local on the operand stack // Shared by the various CEE_LDLOC opcodes // ilLclNum is the local index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) { if (compIsForInlining()) { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER); return; } // Get the local type var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo; typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo; /* Have we allocated a temp for this local? */ unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp")); // All vars of inlined methods should be !lvNormalizeOnLoad() assert(!lvaTable[lclNum].lvNormalizeOnLoad()); lclTyp = genActualType(lclTyp); impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal); } else { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { BADCODE("Bad IL"); } unsigned lclNum = info.compArgsCount + ilLclNum; impLoadVar(lclNum, offset); } } #ifdef TARGET_ARM /************************************************************************************** * * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the * dst struct, because struct promotion will turn it into a float/double variable while * the rhs will be an int/long variable. We don't code generate assignment of int into * a float, but there is nothing that might prevent us from doing so. The tree however * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int)) * * tmpNum - the lcl dst variable num that is a struct. * src - the src tree assigned to the dest that is a struct/int (when varargs call.) * hClass - the type handle for the struct variable. * * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play, * however, we could do a codegen of transferring from int to float registers * (transfer, not a cast.) * */ void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass) { if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass)) { int hfaSlots = GetHfaCount(hClass); var_types hfaType = GetHfaType(hClass); // If we have varargs we morph the method's return type to be "int" irrespective of its original // type: struct/float at importer because the ABI calls out return in integer registers. // We don't want struct promotion to replace an expression like this: // lclFld_int = callvar_int() into lclFld_float = callvar_int(); // This means an int is getting assigned to a float without a cast. Prevent the promotion. if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) || (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES)) { // Make sure this struct type stays as struct so we can receive the call in a struct. lvaTable[tmpNum].lvIsMultiRegRet = true; } } } #endif // TARGET_ARM #if FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple // registers return values to suitable temps. // // Arguments: // op -- call returning a struct in registers // hClass -- class handle for struct // // Returns: // Tree with reference to struct local to use as call return value. GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL); GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. ret->gtFlags |= GTF_DONT_CSE; assert(IsMultiRegReturnedType(hClass, callConv)); // Mark the var so that fields are not promoted and stay together. lvaTable[tmpNum].lvIsMultiRegRet = true; return ret; } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impReturnInstruction: import a return or an explicit tail call // // Arguments: // prefixFlags -- active IL prefixes // opcode -- [in, out] IL opcode // // Returns: // True if import was successful (may fail for some inlinees) // bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { const bool isTailCall = (prefixFlags & PREFIX_TAILCALL) != 0; #ifdef DEBUG // If we are importing an inlinee and have GC ref locals we always // need to have a spill temp for the return value. This temp // should have been set up in advance, over in fgFindBasicBlocks. if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID)) { assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM); } #endif // DEBUG GenTree* op2 = nullptr; GenTree* op1 = nullptr; CORINFO_CLASS_HANDLE retClsHnd = nullptr; if (info.compRetType != TYP_VOID) { StackEntry se = impPopStack(); retClsHnd = se.seTypeInfo.GetClassHandle(); op2 = se.val; if (!compIsForInlining()) { impBashVarAddrsToI(op2); op2 = impImplicitIorI4Cast(op2, info.compRetType); op2 = impImplicitR4orR8Cast(op2, info.compRetType); // Note that we allow TYP_I_IMPL<->TYP_BYREF transformation, but only TYP_I_IMPL<-TYP_REF. assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) || ((op2->TypeGet() == TYP_I_IMPL) && TypeIs(info.compRetType, TYP_BYREF)) || (op2->TypeIs(TYP_BYREF, TYP_REF) && (info.compRetType == TYP_I_IMPL)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) || (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType))); #ifdef DEBUG if (!isTailCall && opts.compGcChecks && (info.compRetType == TYP_REF)) { // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with // one-return BB. assert(op2->gtType == TYP_REF); // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTreeCall::Use* args = gtNewCallArgs(op2); op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op2); } } #endif } else { if (verCurrentState.esStackDepth != 0) { assert(compIsForInlining()); JITDUMP("CALLSITE_COMPILATION_ERROR: inlinee's stack is not empty."); compInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); return false; } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (before normalization) =>\n"); gtDispTree(op2); } #endif // Make sure the type matches the original call. var_types returnType = genActualType(op2->gtType); var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType; if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT)) { originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass); } if (returnType != originalCallType) { // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa. // Allow TYP_REF to be returned as TYP_I_IMPL and NOT vice verse. if ((TypeIs(returnType, TYP_BYREF, TYP_REF) && (originalCallType == TYP_I_IMPL)) || ((returnType == TYP_I_IMPL) && TypeIs(originalCallType, TYP_BYREF))) { JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); } else { JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH); return false; } } // Below, we are going to set impInlineInfo->retExpr to the tree with the return // expression. At this point, retExpr could already be set if there are multiple // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of // the other blocks already set it. If there is only a single return block, // retExpr shouldn't be set. However, this is not true if we reimport a block // with a return. In that case, retExpr will be set, then the block will be // reimported, but retExpr won't get cleared as part of setting the block to // be reimported. The reimported retExpr value should be the same, so even if // we don't unconditionally overwrite it, it shouldn't matter. if (info.compRetNativeType != TYP_STRUCT) { // compRetNativeType is not TYP_STRUCT. // This implies it could be either a scalar type or SIMD vector type or // a struct type that can be normalized to a scalar type. if (varTypeIsStruct(info.compRetType)) { noway_assert(info.compRetBuffArg == BAD_VAR_NUM); // adjust the type away from struct to integral // and no normalizing op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); } else { // Do we have to normalize? var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType); if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) && fgCastNeeded(op2, fncRealRetType)) { // Small-typed return values are normalized by the callee op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType); } } if (fgNeedReturnSpillTemp()) { assert(info.compRetNativeType != TYP_VOID && (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals())); // If this method returns a ref type, track the actual types seen // in the returns. if (info.compRetType == TYP_REF) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull); if (impInlineInfo->retExpr == nullptr) { // This is the first return, so best known type is the type // of this return value. impInlineInfo->retExprClassHnd = returnClsHnd; impInlineInfo->retExprClassHndIsExact = isExact; } else if (impInlineInfo->retExprClassHnd != returnClsHnd) { // This return site type differs from earlier seen sites, // so reset the info and we'll fall back to using the method's // declared return type for the return spill temp. impInlineInfo->retExprClassHnd = nullptr; impInlineInfo->retExprClassHndIsExact = false; } } impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); op2 = tmpOp2; #ifdef DEBUG if (impInlineInfo->retExpr) { // Some other block(s) have seen the CEE_RET first. // Better they spilled to the same temp. assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR); assert(impInlineInfo->retExpr->AsLclVarCommon()->GetLclNum() == op2->AsLclVarCommon()->GetLclNum()); } #endif } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (after normalization) =>\n"); gtDispTree(op2); } #endif // Report the return expression impInlineInfo->retExpr = op2; } else { // compRetNativeType is TYP_STRUCT. // This implies that struct return via RetBuf arg or multi-reg struct return GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall(); // Assign the inlinee return into a spill temp. // spill temp only exists if there are multiple return points if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) { // in this case we have to insert multiple struct copies to the temp // and the retexpr is just the temp. assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); } #if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) #if defined(TARGET_ARM) // TODO-ARM64-NYI: HFA // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the // next ifdefs could be refactored in a single method with the ifdef inside. if (IsHfa(retClsHnd)) { // Same as !IsHfa but just don't bother with impAssignStructPtr. #else // defined(UNIX_AMD64_ABI) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { // If single eightbyte, the return type would have been normalized and there won't be a temp var. // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes - // max allowed.) assert(retRegCount == MAX_RET_REG_COUNT); // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr. CLANG_FORMAT_COMMENT_ANCHOR; #endif // defined(UNIX_AMD64_ABI) if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { #if defined(TARGET_ARM) impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType); #else // defined(UNIX_AMD64_ABI) // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); #endif // defined(UNIX_AMD64_ABI) } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_ARM64) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount >= 2); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_X86) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount == MAX_RET_REG_COUNT); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #endif // defined(TARGET_ARM64) { assert(iciCall->HasRetBufArg()); GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode()); // spill temp only exists if there are multiple return points if (fgNeedReturnSpillTemp()) { // if this is the first return we have seen set the retExpr if (!impInlineInfo->retExpr) { impInlineInfo->retExpr = impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), retClsHnd, (unsigned)CHECK_SPILL_ALL); } } else { impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); } } } if (impInlineInfo->retExpr != nullptr) { impInlineInfo->retBB = compCurBB; } } } if (compIsForInlining()) { return true; } if (info.compRetType == TYP_VOID) { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } else if (info.compRetBuffArg != BAD_VAR_NUM) { // Assign value to return buff (first param) GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset())); op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX). CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // x64 (System V and Win64) calling convention requires to // return the implicit return buffer explicitly (in RAX). // Change the return type to be BYREF. op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); #else // !defined(TARGET_AMD64) // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX). // In such case the return value of the function is changed to BYREF. // If profiler hook is not needed the return type of the function is TYP_VOID. if (compIsProfilerHookNeeded()) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #if defined(TARGET_ARM64) // On ARM64, the native instance calling convention variant // requires the implicit ByRef to be explicitly returned. else if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif #if defined(TARGET_X86) else if (info.compCallConv != CorInfoCallConvExtension::Managed) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif else { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } #endif // !defined(TARGET_AMD64) } else if (varTypeIsStruct(info.compRetType)) { #if !FEATURE_MULTIREG_RET // For both ARM architectures the HFA native types are maintained as structs. // Also on System V AMD64 the multireg structs returns are also left as structs. noway_assert(info.compRetNativeType != TYP_STRUCT); #endif op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); // return op2 var_types returnType = info.compRetType; op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2); } else { // return op2 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2); } // We must have imported a tailcall and jumped to RET if (isTailCall) { assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode)); opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES // impImportCall() would have already appended TYP_VOID calls if (info.compRetType == TYP_VOID) { return true; } } impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif return true; } /***************************************************************************** * Mark the block as unimported. * Note that the caller is responsible for calling impImportBlockPending(), * with the appropriate stack-state */ inline void Compiler::impReimportMarkBlock(BasicBlock* block) { #ifdef DEBUG if (verbose && (block->bbFlags & BBF_IMPORTED)) { printf("\n" FMT_BB " will be reimported\n", block->bbNum); } #endif block->bbFlags &= ~BBF_IMPORTED; } /***************************************************************************** * Mark the successors of the given block as unimported. * Note that the caller is responsible for calling impImportBlockPending() * for all the successors, with the appropriate stack-state. */ void Compiler::impReimportMarkSuccessors(BasicBlock* block) { for (BasicBlock* const succBlock : block->Succs()) { impReimportMarkBlock(succBlock); } } /***************************************************************************** * * Filter wrapper to handle only passed in exception code * from it). */ LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam) { if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION) { return EXCEPTION_EXECUTE_HANDLER; } return EXCEPTION_CONTINUE_SEARCH; } void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart) { assert(block->hasTryIndex()); assert(!compIsForInlining()); unsigned tryIndex = block->getTryIndex(); EHblkDsc* HBtab = ehGetDsc(tryIndex); if (isTryStart) { assert(block->bbFlags & BBF_TRY_BEG); // The Stack must be empty // if (block->bbStkDepth != 0) { BADCODE("Evaluation stack must be empty on entry into a try block"); } } // Save the stack contents, we'll need to restore it later // SavedStack blockState; impSaveStackState(&blockState, false); while (HBtab != nullptr) { if (isTryStart) { // Are we verifying that an instance constructor properly initializes it's 'this' pointer once? // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions // if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init)) { // We trigger an invalid program exception here unless we have a try/fault region. // if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter()) { BADCODE( "The 'this' pointer of an instance constructor is not intialized upon entry to a try region"); } else { // Allow a try/fault region to proceed. assert(HBtab->HasFaultHandler()); } } } // Recursively process the handler block, if we haven't already done so. BasicBlock* hndBegBB = HBtab->ebdHndBeg; if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0)) { // Construct the proper verification stack state // either empty or one that contains just // the Exception Object that we are dealing with // verCurrentState.esStackDepth = 0; if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp)) { CORINFO_CLASS_HANDLE clsHnd; if (HBtab->HasFilter()) { clsHnd = impGetObjectClass(); } else { CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = HBtab->ebdTyp; resolvedToken.tokenType = CORINFO_TOKENKIND_Class; info.compCompHnd->resolveToken(&resolvedToken); clsHnd = resolvedToken.hClass; } // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdHndBeg! hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false); } // Queue up the handler for importing // impImportBlockPending(hndBegBB); } // Process the filter block, if we haven't already done so. if (HBtab->HasFilter()) { /* @VERIFICATION : Ideally the end of filter state should get propagated to the catch handler, this is an incompleteness, but is not a security/compliance issue, since the only interesting state is the 'thisInit' state. */ BasicBlock* filterBB = HBtab->ebdFilter; if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0)) { verCurrentState.esStackDepth = 0; // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdFilter! const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB); filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter); impImportBlockPending(filterBB); } } // This seems redundant ....?? if (verTrackObjCtorInitState && HBtab->HasFaultHandler()) { /* Recursively process the handler block */ verCurrentState.esStackDepth = 0; // Queue up the fault handler for importing // impImportBlockPending(HBtab->ebdHndBeg); } // Now process our enclosing try index (if any) // tryIndex = HBtab->ebdEnclosingTryIndex; if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { HBtab = nullptr; } else { HBtab = ehGetDsc(tryIndex); } } // Restore the stack contents impRestoreStackState(&blockState); } //*************************************************************** // Import the instructions for the given basic block. Perform // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first // time, or whose verification pre-state is changed. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif void Compiler::impImportBlock(BasicBlock* block) { // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to // handle them specially. In particular, there is no IL to import for them, but we do need // to mark them as imported and put their successors on the pending import list. if (block->bbFlags & BBF_INTERNAL) { JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum); block->bbFlags |= BBF_IMPORTED; for (BasicBlock* const succBlock : block->Succs()) { impImportBlockPending(succBlock); } return; } bool markImport; assert(block); /* Make the block globaly available */ compCurBB = block; #ifdef DEBUG /* Initialize the debug variables */ impCurOpcName = "unknown"; impCurOpcOffs = block->bbCodeOffs; #endif /* Set the current stack state to the merged result */ verResetCurrentState(block, &verCurrentState); /* Now walk the code and import the IL into GenTrees */ struct FilterVerificationExceptionsParam { Compiler* pThis; BasicBlock* block; }; FilterVerificationExceptionsParam param; param.pThis = this; param.block = block; PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param) { /* @VERIFICATION : For now, the only state propagation from try to it's handler is "thisInit" state (stack is empty at start of try). In general, for state that we track in verification, we need to model the possibility that an exception might happen at any IL instruction, so we really need to merge all states that obtain between IL instructions in a try block into the start states of all handlers. However we do not allow the 'this' pointer to be uninitialized when entering most kinds try regions (only try/fault are allowed to have an uninitialized this pointer on entry to the try) Fortunately, the stack is thrown away when an exception leads to a handler, so we don't have to worry about that. We DO, however, have to worry about the "thisInit" state. But only for the try/fault case. The only allowed transition is from TIS_Uninit to TIS_Init. So for a try/fault region for the fault handler block we will merge the start state of the try begin and the post-state of each block that is part of this try region */ // merge the start state of the try begin // if (pParam->block->bbFlags & BBF_TRY_BEG) { pParam->pThis->impVerifyEHBlock(pParam->block, true); } pParam->pThis->impImportBlockCode(pParam->block); // As discussed above: // merge the post-state of each block that is part of this try region // if (pParam->block->hasTryIndex()) { pParam->pThis->impVerifyEHBlock(pParam->block, false); } } PAL_EXCEPT_FILTER(FilterVerificationExceptions) { verHandleVerificationFailure(block DEBUGARG(false)); } PAL_ENDTRY if (compDonotInline()) { return; } assert(!compDonotInline()); markImport = false; SPILLSTACK: unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks bool reimportSpillClique = false; BasicBlock* tgtBlock = nullptr; /* If the stack is non-empty, we might have to spill its contents */ if (verCurrentState.esStackDepth != 0) { impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something // on the stack, its lifetime is hard to determine, simply // don't reuse such temps. Statement* addStmt = nullptr; /* Do the successors of 'block' have any other predecessors ? We do not want to do some of the optimizations related to multiRef if we can reimport blocks */ unsigned multRef = impCanReimport ? unsigned(~0) : 0; switch (block->bbJumpKind) { case BBJ_COND: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_JTRUE); /* Note if the next block has more than one ancestor */ multRef |= block->bbNext->bbRefs; /* Does the next block have temps assigned? */ baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; if (baseTmp != NO_BASE_TMP) { break; } /* Try the target of the jump then */ multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_ALWAYS: multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_NONE: multRef |= block->bbNext->bbRefs; baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; break; case BBJ_SWITCH: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_SWITCH); for (BasicBlock* const tgtBlock : block->SwitchTargets()) { multRef |= tgtBlock->bbRefs; // Thanks to spill cliques, we should have assigned all or none assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn)); baseTmp = tgtBlock->bbStkTempsIn; if (multRef > 1) { break; } } break; case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_THROW: NO_WAY("can't have 'unreached' end of BB with non-empty stack"); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } assert(multRef >= 1); /* Do we have a base temp number? */ bool newTemps = (baseTmp == NO_BASE_TMP); if (newTemps) { /* Grab enough temps for the whole stack */ baseTmp = impGetSpillTmpBase(block); } /* Spill all stack entries into temps */ unsigned level, tempNum; JITDUMP("\nSpilling stack entries into temps\n"); for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++) { GenTree* tree = verCurrentState.esStack[level].val; /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from the other. This should merge to a byref in unverifiable code. However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the successor would be imported assuming there was a TYP_I_IMPL on the stack. Thus the value would not get GC-tracked. Hence, change the temp to TYP_BYREF and reimport the successors. Note: We should only allow this in unverifiable code. */ if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL) { lvaTable[tempNum].lvType = TYP_BYREF; impReimportMarkSuccessors(block); markImport = true; } #ifdef TARGET_64BIT if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "native int". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_I_IMPL; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL) { // Spill clique has decided this should be "native int", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } // Consider the case where one branch left a 'byref' on the stack and the other leaves // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64 // behavior instead of asserting and then generating bad code (where we save/restore the // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been // imported already, we need to change the type of the local and reimport the spill clique. // If the 'byref' side has imported, we insert a cast from int to 'native int' to match // the 'byref' size. if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "byref". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_BYREF; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF) { // Spill clique has decided this should be "byref", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique size. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } #endif // TARGET_64BIT if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT) { // Some other block in the spill clique set this to "float", but now we have "double". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_DOUBLE; reimportSpillClique = true; } else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE) { // Spill clique has decided this should be "double", but this block only pushes a "float". // Insert a cast to "double" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE); } /* If addStmt has a reference to tempNum (can only happen if we are spilling to the temps already used by a previous block), we need to spill addStmt */ if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum)) { GenTree* addTree = addStmt->GetRootNode(); if (addTree->gtOper == GT_JTRUE) { GenTree* relOp = addTree->AsOp()->gtOp1; assert(relOp->OperIsCompare()); var_types type = genActualType(relOp->AsOp()->gtOp1->TypeGet()); if (gtHasRef(relOp->AsOp()->gtOp1, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1")); impAssignTempGen(temp, relOp->AsOp()->gtOp1, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type); } if (gtHasRef(relOp->AsOp()->gtOp2, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2")); impAssignTempGen(temp, relOp->AsOp()->gtOp2, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type); } } else { assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet())); unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH")); impAssignTempGen(temp, addTree->AsOp()->gtOp1, level); addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet())); } } /* Spill the stack entry, and replace with the temp */ if (!impSpillStackEntry(level, tempNum #ifdef DEBUG , true, "Spill Stack Entry" #endif )) { if (markImport) { BADCODE("bad stack state"); } // Oops. Something went wrong when spilling. Bad code. verHandleVerificationFailure(block DEBUGARG(true)); goto SPILLSTACK; } } /* Put back the 'jtrue'/'switch' if we removed it earlier */ if (addStmt != nullptr) { impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE); } } // Some of the append/spill logic works on compCurBB assert(compCurBB == block); /* Save the tree list in the block */ impEndTreeList(block); // impEndTreeList sets BBF_IMPORTED on the block // We do *NOT* want to set it later than this because // impReimportSpillClique might clear it if this block is both a // predecessor and successor in the current spill clique assert(block->bbFlags & BBF_IMPORTED); // If we had a int/native int, or float/double collision, we need to re-import if (reimportSpillClique) { // This will re-import all the successors of block (as well as each of their predecessors) impReimportSpillClique(block); // For blocks that haven't been imported yet, we still need to mark them as pending import. for (BasicBlock* const succ : block->Succs()) { if ((succ->bbFlags & BBF_IMPORTED) == 0) { impImportBlockPending(succ); } } } else // the normal case { // otherwise just import the successors of block /* Does this block jump to any other blocks? */ for (BasicBlock* const succ : block->Succs()) { impImportBlockPending(succ); } } } #ifdef _PREFAST_ #pragma warning(pop) #endif /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Merges the current verification state into the verification state of "block" // (its "pre-state"). void Compiler::impImportBlockPending(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum); } #endif // We will add a block to the pending set if it has not already been imported (or needs to be re-imported), // or if it has, but merging in a predecessor's post-state changes the block's pre-state. // (When we're doing verification, we always attempt the merge to detect verification errors.) // If the block has not been imported, add to pending set. bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0); // Initialize bbEntryState just the first time we try to add this block to the pending list // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set // We use NULL to indicate the 'common' state to avoid memory allocation if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) && (impGetPendingBlockMember(block) == 0)) { verInitBBEntryState(block, &verCurrentState); assert(block->bbStkDepth == 0); block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth); assert(addToPending); assert(impGetPendingBlockMember(block) == 0); } else { // The stack should have the same height on entry to the block from all its predecessors. if (block->bbStkDepth != verCurrentState.esStackDepth) { #ifdef DEBUG char buffer[400]; sprintf_s(buffer, sizeof(buffer), "Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n" "Previous depth was %d, current depth is %d", block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth, verCurrentState.esStackDepth); buffer[400 - 1] = 0; NO_WAY(buffer); #else NO_WAY("Block entered with different stack depths"); #endif } if (!addToPending) { return; } if (block->bbStkDepth > 0) { // We need to fix the types of any spill temps that might have changed: // int->native int, float->double, int->byref, etc. impRetypeEntryStateTemps(block); } // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_Unknown) PendingDsc; } dsc->pdBB = block; dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth; dsc->pdThisPtrInit = verCurrentState.thisInitialized; // Save the stack trees for later if (verCurrentState.esStackDepth) { impSaveStackState(&dsc->pdSavedStack, false); } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block. void Compiler::impReimportBlockPending(BasicBlock* block) { JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum); assert(block->bbFlags & BBF_IMPORTED); // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_ImpStack) PendingDsc; } dsc->pdBB = block; if (block->bbEntryState) { dsc->pdThisPtrInit = block->bbEntryState->thisInitialized; dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth; dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack; } else { dsc->pdThisPtrInit = TIS_Bottom; dsc->pdSavedStack.ssDepth = 0; dsc->pdSavedStack.ssTrees = nullptr; } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp) { if (comp->impBlockListNodeFreeList == nullptr) { return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1); } else { BlockListNode* res = comp->impBlockListNodeFreeList; comp->impBlockListNodeFreeList = res->m_next; return res; } } void Compiler::FreeBlockListNode(Compiler::BlockListNode* node) { node->m_next = impBlockListNodeFreeList; impBlockListNodeFreeList = node; } void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback) { bool toDo = true; noway_assert(!fgComputePredsDone); if (!fgCheapPredsValid) { fgComputeCheapPreds(); } BlockListNode* succCliqueToDo = nullptr; BlockListNode* predCliqueToDo = new (this) BlockListNode(block); while (toDo) { toDo = false; // Look at the successors of every member of the predecessor to-do list. while (predCliqueToDo != nullptr) { BlockListNode* node = predCliqueToDo; predCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlock* const succ : blk->Succs()) { // If it's not already in the clique, add it, and also add it // as a member of the successor "toDo" set. if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0) { callback->Visit(SpillCliqueSucc, succ); impSpillCliqueSetMember(SpillCliqueSucc, succ, 1); succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo); toDo = true; } } } // Look at the predecessors of every member of the successor to-do list. while (succCliqueToDo != nullptr) { BlockListNode* node = succCliqueToDo; succCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next) { BasicBlock* predBlock = pred->block; // If it's not already in the clique, add it, and also add it // as a member of the predecessor "toDo" set. if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0) { callback->Visit(SpillCliquePred, predBlock); impSpillCliqueSetMember(SpillCliquePred, predBlock, 1); predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo); toDo = true; } } } } // If this fails, it means we didn't walk the spill clique properly and somehow managed // miss walking back to include the predecessor we started from. // This most likely cause: missing or out of date bbPreds assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0); } void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliqueSucc) { assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor. blk->bbStkTempsIn = m_baseTmp; } else { assert(predOrSucc == SpillCliquePred); assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor. blk->bbStkTempsOut = m_baseTmp; } } void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { // For Preds we could be a little smarter and just find the existing store // and re-type it/add a cast, but that is complicated and hopefully very rare, so // just re-import the whole block (just like we do for successors) if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0)) { // If we haven't imported this block and we're not going to (because it isn't on // the pending list) then just ignore it for now. // This block has either never been imported (EntryState == NULL) or it failed // verification. Neither state requires us to force it to be imported now. assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION)); return; } // For successors we have a valid verCurrentState, so just mark them for reimport // the 'normal' way // Unlike predecessors, we *DO* need to reimport the current block because the // initial import had the wrong entry state types. // Similarly, blocks that are currently on the pending list, still need to call // impImportBlockPending to fixup their entry state. if (predOrSucc == SpillCliqueSucc) { m_pComp->impReimportMarkBlock(blk); // Set the current stack state to that of the blk->bbEntryState m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState); assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry()); m_pComp->impImportBlockPending(blk); } else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0)) { // As described above, we are only visiting predecessors so they can // add the appropriate casts, since we have already done that for the current // block, it does not need to be reimported. // Nor do we need to reimport blocks that are still pending, but not yet // imported. // // For predecessors, we have no state to seed the EntryState, so we just have // to assume the existing one is correct. // If the block is also a successor, it will get the EntryState properly // updated when it is visited as a successor in the above "if" block. assert(predOrSucc == SpillCliquePred); m_pComp->impReimportBlockPending(blk); } } // Re-type the incoming lclVar nodes to match the varDsc. void Compiler::impRetypeEntryStateTemps(BasicBlock* blk) { if (blk->bbEntryState != nullptr) { EntryState* es = blk->bbEntryState; for (unsigned level = 0; level < es->esStackDepth; level++) { GenTree* tree = es->esStack[level].val; if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)) { es->esStack[level].val->gtType = lvaGetDesc(tree->AsLclVarCommon())->TypeGet(); } } } } unsigned Compiler::impGetSpillTmpBase(BasicBlock* block) { if (block->bbStkTempsOut != NO_BASE_TMP) { return block->bbStkTempsOut; } #ifdef DEBUG if (verbose) { printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // Otherwise, choose one, and propagate to all members of the spill clique. // Grab enough temps for the whole stack. unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); SetSpillTempsBase callback(baseTmp); // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor // to one spill clique, and similarly can only be the successor to one spill clique impWalkSpillCliqueFromPred(block, &callback); return baseTmp; } void Compiler::impReimportSpillClique(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // If we get here, it is because this block is already part of a spill clique // and one predecessor had an outgoing live stack slot of type int, and this // block has an outgoing live stack slot of type native int. // We need to reset these before traversal because they have already been set // by the previous walk to determine all the members of the spill clique. impInlineRoot()->impSpillCliquePredMembers.Reset(); impInlineRoot()->impSpillCliqueSuccMembers.Reset(); ReimportSpillClique callback(this); impWalkSpillCliqueFromPred(block, &callback); } // Set the pre-state of "block" (which should not have a pre-state allocated) to // a copy of "srcState", cloning tree pointers as required. void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState) { if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom) { block->bbEntryState = nullptr; return; } block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1); // block->bbEntryState.esRefcount = 1; block->bbEntryState->esStackDepth = srcState->esStackDepth; block->bbEntryState->thisInitialized = TIS_Bottom; if (srcState->esStackDepth > 0) { block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]); unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry); memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize); for (unsigned level = 0; level < srcState->esStackDepth; level++) { GenTree* tree = srcState->esStack[level].val; block->bbEntryState->esStack[level].val = gtCloneExpr(tree); } } if (verTrackObjCtorInitState) { verSetThisInit(block, srcState->thisInitialized); } return; } void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis) { assert(tis != TIS_Bottom); // Precondition. if (block->bbEntryState == nullptr) { block->bbEntryState = new (this, CMK_Unknown) EntryState(); } block->bbEntryState->thisInitialized = tis; } /* * Resets the current state to the state at the start of the basic block */ void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState) { if (block->bbEntryState == nullptr) { destState->esStackDepth = 0; destState->thisInitialized = TIS_Bottom; return; } destState->esStackDepth = block->bbEntryState->esStackDepth; if (destState->esStackDepth > 0) { unsigned stackSize = destState->esStackDepth * sizeof(StackEntry); memcpy(destState->esStack, block->bbStackOnEntry(), stackSize); } destState->thisInitialized = block->bbThisOnEntry(); return; } ThisInitState BasicBlock::bbThisOnEntry() const { return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom; } unsigned BasicBlock::bbStackDepthOnEntry() const { return (bbEntryState ? bbEntryState->esStackDepth : 0); } void BasicBlock::bbSetStack(void* stackBuffer) { assert(bbEntryState); assert(stackBuffer); bbEntryState->esStack = (StackEntry*)stackBuffer; } StackEntry* BasicBlock::bbStackOnEntry() const { assert(bbEntryState); return bbEntryState->esStack; } void Compiler::verInitCurrentState() { verTrackObjCtorInitState = false; verCurrentState.thisInitialized = TIS_Bottom; // initialize stack info verCurrentState.esStackDepth = 0; assert(verCurrentState.esStack != nullptr); // copy current state to entry state of first BB verInitBBEntryState(fgFirstBB, &verCurrentState); } Compiler* Compiler::impInlineRoot() { if (impInlineInfo == nullptr) { return this; } else { return impInlineInfo->InlineRoot; } } BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliquePred) { return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd()); } else { assert(predOrSucc == SpillCliqueSucc); return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd()); } } void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val) { if (predOrSucc == SpillCliquePred) { impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val); } else { assert(predOrSucc == SpillCliqueSucc); impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val); } } /***************************************************************************** * * Convert the instrs ("import") into our internal format (trees). The * basic flowgraph has already been constructed and is passed in. */ void Compiler::impImport() { #ifdef DEBUG if (verbose) { printf("*************** In impImport() for %s\n", info.compFullName); } #endif Compiler* inlineRoot = impInlineRoot(); if (info.compMaxStack <= SMALL_STACK_SIZE) { impStkSize = SMALL_STACK_SIZE; } else { impStkSize = info.compMaxStack; } if (this == inlineRoot) { // Allocate the stack contents verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } else { // This is the inlinee compiler, steal the stack from the inliner compiler // (after ensuring that it is large enough). if (inlineRoot->impStkSize < impStkSize) { inlineRoot->impStkSize = impStkSize; inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } verCurrentState.esStack = inlineRoot->verCurrentState.esStack; } // initialize the entry state at start of method verInitCurrentState(); // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase). if (this == inlineRoot) // These are only used on the root of the inlining tree. { // We have initialized these previously, but to size 0. Make them larger. impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2); } inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2); impBlockListNodeFreeList = nullptr; #ifdef DEBUG impLastILoffsStmt = nullptr; impNestedStackSpill = false; #endif impBoxTemp = BAD_VAR_NUM; impPendingList = impPendingFree = nullptr; // Skip leading internal blocks. // These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects. // BasicBlock* entryBlock = fgFirstBB; while (entryBlock->bbFlags & BBF_INTERNAL) { JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; if (entryBlock->bbJumpKind == BBJ_NONE) { entryBlock = entryBlock->bbNext; } else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } else { assert(!"unexpected bbJumpKind in entry sequence"); } } // Note for OSR we'd like to be able to verify this block must be // stack empty, but won't know that until we've imported...so instead // we'll BADCODE out if we mess up. // // (the concern here is that the runtime asks us to OSR a // different IL version than the one that matched the method that // triggered OSR). This should not happen but I might have the // IL versioning stuff wrong. // // TODO: we also currently expect this block to be a join point, // which we should verify over when we find jump targets. impImportBlockPending(entryBlock); /* Import blocks in the worker-list until there are no more */ while (impPendingList) { /* Remove the entry at the front of the list */ PendingDsc* dsc = impPendingList; impPendingList = impPendingList->pdNext; impSetPendingBlockMember(dsc->pdBB, 0); /* Restore the stack state */ verCurrentState.thisInitialized = dsc->pdThisPtrInit; verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth; if (verCurrentState.esStackDepth) { impRestoreStackState(&dsc->pdSavedStack); } /* Add the entry to the free list for reuse */ dsc->pdNext = impPendingFree; impPendingFree = dsc; /* Now import the block */ if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION) { verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true)); impEndTreeList(dsc->pdBB); } else { impImportBlock(dsc->pdBB); if (compDonotInline()) { return; } if (compIsForImportOnly()) { return; } } } #ifdef DEBUG if (verbose && info.compXcptnsCount) { printf("\nAfter impImport() added block for try,catch,finally"); fgDispBasicBlocks(); printf("\n"); } // Used in impImportBlockPending() for STRESS_CHK_REIMPORT for (BasicBlock* const block : Blocks()) { block->bbFlags &= ~BBF_VISITED; } #endif } // Checks if a typeinfo (usually stored in the type stack) is a struct. // The invariant here is that if it's not a ref or a method and has a class handle // it's a valuetype bool Compiler::impIsValueType(typeInfo* pTypeInfo) { if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd()) { return true; } else { return false; } } /***************************************************************************** * Check to see if the tree is the address of a local or the address of a field in a local. *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns true. */ bool Compiler::impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut) { if (tree->gtOper != GT_ADDR) { return false; } GenTree* op = tree->AsOp()->gtOp1; while (op->gtOper == GT_FIELD) { op = op->AsField()->GetFldObj(); if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL. { op = op->AsOp()->gtOp1; } else { return false; } } if (op->gtOper == GT_LCL_VAR) { if (lclVarTreeOut != nullptr) { *lclVarTreeOut = op; } return true; } else { return false; } } //------------------------------------------------------------------------ // impMakeDiscretionaryInlineObservations: make observations that help // determine the profitability of a discretionary inline // // Arguments: // pInlineInfo -- InlineInfo for the inline, or null for the prejit root // inlineResult -- InlineResult accumulating information about this inline // // Notes: // If inlining or prejitting the root, this method also makes // various observations about the method that factor into inline // decisions. It sets `compNativeSizeEstimate` as a side effect. void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult) { assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining. (pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen. ); // If we're really inlining, we should just have one result in play. assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult)); // If this is a "forceinline" method, the JIT probably shouldn't have gone // to the trouble of estimating the native code size. Even if it did, it // shouldn't be relying on the result of this method. assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE); // Note if the caller contains NEWOBJ or NEWARR. Compiler* rootCompiler = impInlineRoot(); if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY); } if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ); } bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0; if (isSpecialMethod) { if (calleeIsStatic) { inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR); } else { inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR); } } else if (!calleeIsStatic) { // Callee is an instance method. // // Check if the callee has the same 'this' as the root. if (pInlineInfo != nullptr) { GenTree* thisArg = pInlineInfo->iciCall->AsCall()->gtCallThisArg->GetNode(); assert(thisArg); bool isSameThis = impIsThis(thisArg); inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis); } } bool callsiteIsGeneric = (rootCompiler->info.compMethodInfo->args.sigInst.methInstCount != 0) || (rootCompiler->info.compMethodInfo->args.sigInst.classInstCount != 0); bool calleeIsGeneric = (info.compMethodInfo->args.sigInst.methInstCount != 0) || (info.compMethodInfo->args.sigInst.classInstCount != 0); if (!callsiteIsGeneric && calleeIsGeneric) { inlineResult->Note(InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC); } // Inspect callee's arguments (and the actual values at the callsite for them) CORINFO_SIG_INFO sig = info.compMethodInfo->args; CORINFO_ARG_LIST_HANDLE sigArg = sig.args; GenTreeCall::Use* argUse = pInlineInfo == nullptr ? nullptr : pInlineInfo->iciCall->AsCall()->gtCallArgs; for (unsigned i = 0; i < info.compMethodInfo->args.numArgs; i++) { CORINFO_CLASS_HANDLE sigClass; CorInfoType corType = strip(info.compCompHnd->getArgType(&sig, sigArg, &sigClass)); GenTree* argNode = argUse == nullptr ? nullptr : argUse->GetNode()->gtSkipPutArgType(); if (corType == CORINFO_TYPE_CLASS) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); } else if (corType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT); } else if (corType == CORINFO_TYPE_BYREF) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); corType = info.compCompHnd->getChildType(sigClass, &sigClass); } if (argNode != nullptr) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE argCls = gtGetClassHandle(argNode, &isExact, &isNonNull); if (argCls != nullptr) { const bool isArgValueType = eeIsValueClass(argCls); // Exact class of the arg is known if (isExact && !isArgValueType) { inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS); if ((argCls != sigClass) && (sigClass != nullptr)) { // .. but the signature accepts a less concrete type. inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT); } } // Arg is a reference type in the signature and a boxed value type was passed. else if (isArgValueType && (corType == CORINFO_TYPE_CLASS)) { inlineResult->Note(InlineObservation::CALLSITE_ARG_BOXED); } } if (argNode->OperIsConst()) { inlineResult->Note(InlineObservation::CALLSITE_ARG_CONST); } argUse = argUse->GetNext(); } sigArg = info.compCompHnd->getArgNext(sigArg); } // Note if the callee's return type is a value type if (info.compMethodInfo->args.retType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_RETURNS_STRUCT); } // Note if the callee's class is a promotable struct if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0) { assert(structPromotionHelper != nullptr); if (structPromotionHelper->CanPromoteStructType(info.compClassHnd)) { inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE); } inlineResult->Note(InlineObservation::CALLEE_CLASS_VALUETYPE); } #ifdef FEATURE_SIMD // Note if this method is has SIMD args or return value if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn) { inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD); } #endif // FEATURE_SIMD // Roughly classify callsite frequency. InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED; // If this is a prejit root, or a maximally hot block... if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->isMaxBBWeight())) { frequency = InlineCallsiteFrequency::HOT; } // No training data. Look for loop-like things. // We consider a recursive call loop-like. Do not give the inlining boost to the method itself. // However, give it to things nearby. else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) && (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle)) { frequency = InlineCallsiteFrequency::LOOP; } else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT)) { frequency = InlineCallsiteFrequency::WARM; } // Now modify the multiplier based on where we're called from. else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { frequency = InlineCallsiteFrequency::RARE; } else { frequency = InlineCallsiteFrequency::BORING; } // Also capture the block weight of the call site. // // In the prejit root case, assume at runtime there might be a hot call site // for this method, so we won't prematurely conclude this method should never // be inlined. // weight_t weight = 0; if (pInlineInfo != nullptr) { weight = pInlineInfo->iciBlock->bbWeight; } else { const weight_t prejitHotCallerWeight = 1000000.0; weight = prejitHotCallerWeight; } inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency)); inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, (int)(weight)); bool hasProfile = false; double profileFreq = 0.0; // If the call site has profile data, report the relative frequency of the site. // if ((pInlineInfo != nullptr) && rootCompiler->fgHaveSufficientProfileData()) { const weight_t callSiteWeight = pInlineInfo->iciBlock->bbWeight; const weight_t entryWeight = rootCompiler->fgFirstBB->bbWeight; profileFreq = fgProfileWeightsEqual(entryWeight, 0.0) ? 0.0 : callSiteWeight / entryWeight; hasProfile = true; assert(callSiteWeight >= 0); assert(entryWeight >= 0); } else if (pInlineInfo == nullptr) { // Simulate a hot callsite for PrejitRoot mode. hasProfile = true; profileFreq = 1.0; } inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, hasProfile); inlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, profileFreq); } /***************************************************************************** This method makes STATIC inlining decision based on the IL code. It should not make any inlining decision based on the context. If forceInline is true, then the inlining decision should not depend on performance heuristics (code size, etc.). */ void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult) { unsigned codeSize = methInfo->ILCodeSize; // We shouldn't have made up our minds yet... assert(!inlineResult->IsDecided()); if (methInfo->EHcount) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH); return; } if ((methInfo->ILCode == nullptr) || (codeSize == 0)) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // For now we don't inline varargs (import code can't handle it) if (methInfo->args.isVarArg()) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return; } // Reject if it has too many locals. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs); if (methInfo->locals.numArgs > MAX_INL_LCLS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS); return; } // Make sure there aren't too many arguments. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs); if (methInfo->args.numArgs > MAX_INL_ARGS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS); return; } // Note force inline state inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline); // Note IL code size inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); if (inlineResult->IsFailure()) { return; } // Make sure maxstack is not too big inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack); if (inlineResult->IsFailure()) { return; } } /***************************************************************************** */ void Compiler::impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult) { // Either EE or JIT might throw exceptions below. // If that happens, just don't inline the method. struct Param { Compiler* pThis; GenTreeCall* call; CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; CORINFO_CONTEXT_HANDLE exactContextHnd; InlineResult* result; InlineCandidateInfo** ppInlineCandidateInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.methAttr = methAttr; param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle); param.result = inlineResult; param.ppInlineCandidateInfo = ppInlineCandidateInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { CorInfoInitClassResult initClassResult; #ifdef DEBUG const char* methodName; const char* className; methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className); if (JitConfig.JitNoInline()) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); goto _exit; } #endif /* Try to get the code address/size for the method */ CORINFO_METHOD_INFO methInfo; if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo)) { pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); goto _exit; } // Profile data allows us to avoid early "too many IL bytes" outs. pParam->result->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, pParam->pThis->fgHaveSufficientProfileData()); bool forceInline; forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE); pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result); if (pParam->result->IsFailure()) { assert(pParam->result->IsNever()); goto _exit; } // Speculatively check if initClass() can be done. // If it can be done, we will try to inline the method. initClassResult = pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */, pParam->exactContextHnd /* context */); if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) { pParam->result->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); goto _exit; } // Given the EE the final say in whether to inline or not. // This should be last since for verifiable code, this can be expensive /* VM Inline check also ensures that the method is verifiable if needed */ CorInfoInline vmResult; vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle); if (vmResult == INLINE_FAIL) { pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); } else if (vmResult == INLINE_NEVER) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); } if (pParam->result->IsFailure()) { // Make sure not to report this one. It was already reported by the VM. pParam->result->SetReported(); goto _exit; } /* Get the method properties */ CORINFO_CLASS_HANDLE clsHandle; clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle); unsigned clsAttr; clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle); /* Get the return type */ var_types fncRetType; fncRetType = pParam->call->TypeGet(); #ifdef DEBUG var_types fncRealRetType; fncRealRetType = JITtype2varType(methInfo.args.retType); assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || // <BUGNUM> VSW 288602 </BUGNUM> // In case of IJW, we allow to assign a native pointer to a BYREF. (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); #endif // Allocate an InlineCandidateInfo structure, // // Or, reuse the existing GuardedDevirtualizationCandidateInfo, // which was pre-allocated to have extra room. // InlineCandidateInfo* pInfo; if (pParam->call->IsGuardedDevirtualizationCandidate()) { pInfo = pParam->call->gtInlineCandidateInfo; } else { pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; // Null out bits we don't use when we're just inlining pInfo->guardedClassHandle = nullptr; pInfo->guardedMethodHandle = nullptr; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->likelihood = 0; pInfo->requiresInstMethodTableArg = false; } pInfo->methInfo = methInfo; pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; pInfo->clsHandle = clsHandle; pInfo->exactContextHnd = pParam->exactContextHnd; pInfo->retExpr = nullptr; pInfo->preexistingSpillTemp = BAD_VAR_NUM; pInfo->clsAttr = clsAttr; pInfo->methAttr = pParam->methAttr; pInfo->initClassResult = initClassResult; pInfo->fncRetType = fncRetType; pInfo->exactContextNeedsRuntimeLookup = false; pInfo->inlinersContext = pParam->pThis->compInlineContext; // Note exactContextNeedsRuntimeLookup is reset later on, // over in impMarkInlineCandidate. *(pParam->ppInlineCandidateInfo) = pInfo; _exit:; }, &param); if (!success) { param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } //------------------------------------------------------------------------ // impInlineRecordArgInfo: record information about an inline candidate argument // // Arguments: // pInlineInfo - inline info for the inline candidate // curArgVal - tree for the caller actual argument value // argNum - logical index of this argument // inlineResult - result of ongoing inline evaluation // // Notes: // // Checks for various inline blocking conditions and makes notes in // the inline info arg table about the properties of the actual. These // properties are used later by impInlineFetchArg to determine how best to // pass the argument into the inlinee. void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult) { InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum]; inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR. curArgVal = curArgVal->gtSkipPutArgType(); curArgVal = curArgVal->gtRetExprVal(); if (curArgVal->gtOper == GT_MKREFANY) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY); return; } GenTree* lclVarTree; const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree); if (isAddressInLocal && varTypeIsStruct(lclVarTree)) { inlCurArgInfo->argIsByRefToStructLocal = true; #ifdef FEATURE_SIMD if (lvaTable[lclVarTree->AsLclVarCommon()->GetLclNum()].lvSIMDType) { pInlineInfo->hasSIMDTypeArgLocalOrReturn = true; } #endif // FEATURE_SIMD } if (curArgVal->gtFlags & GTF_ALL_EFFECT) { inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0; inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0; } if (curArgVal->gtOper == GT_LCL_VAR) { inlCurArgInfo->argIsLclVar = true; /* Remember the "original" argument number */ INDEBUG(curArgVal->AsLclVar()->gtLclILoffs = argNum;) } if (curArgVal->IsInvariant()) { inlCurArgInfo->argIsInvariant = true; if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0)) { // Abort inlining at this call site inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS); return; } } bool isExact = false; bool isNonNull = false; inlCurArgInfo->argIsExact = (gtGetClassHandle(curArgVal, &isExact, &isNonNull) != NO_CLASS_HANDLE) && isExact; // If the arg is a local that is address-taken, we can't safely // directly substitute it into the inlinee. // // Previously we'd accomplish this by setting "argHasLdargaOp" but // that has a stronger meaning: that the arg value can change in // the method body. Using that flag prevents type propagation, // which is safe in this case. // // Instead mark the arg as having a caller local ref. if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal)) { inlCurArgInfo->argHasCallerLocalRef = true; } #ifdef DEBUG if (verbose) { if (inlCurArgInfo->argIsThis) { printf("thisArg:"); } else { printf("\nArgument #%u:", argNum); } if (inlCurArgInfo->argIsLclVar) { printf(" is a local var"); } if (inlCurArgInfo->argIsInvariant) { printf(" is a constant"); } if (inlCurArgInfo->argHasGlobRef) { printf(" has global refs"); } if (inlCurArgInfo->argHasCallerLocalRef) { printf(" has caller local ref"); } if (inlCurArgInfo->argHasSideEff) { printf(" has side effects"); } if (inlCurArgInfo->argHasLdargaOp) { printf(" has ldarga effect"); } if (inlCurArgInfo->argHasStargOp) { printf(" has starg effect"); } if (inlCurArgInfo->argIsByRefToStructLocal) { printf(" is byref to a struct local"); } printf("\n"); gtDispTree(curArgVal); printf("\n"); } #endif } //------------------------------------------------------------------------ // impInlineInitVars: setup inline information for inlinee args and locals // // Arguments: // pInlineInfo - inline info for the inline candidate // // Notes: // This method primarily adds caller-supplied info to the inlArgInfo // and sets up the lclVarInfo table. // // For args, the inlArgInfo records properties of the actual argument // including the tree node that produces the arg value. This node is // usually the tree node present at the call, but may also differ in // various ways: // - when the call arg is a GT_RET_EXPR, we search back through the ret // expr chain for the actual node. Note this will either be the original // call (which will be a failed inline by this point), or the return // expression from some set of inlines. // - when argument type casting is needed the necessary casts are added // around the argument node. // - if an argument can be simplified by folding then the node here is the // folded value. // // The method may make observations that lead to marking this candidate as // a failed inline. If this happens the initialization is abandoned immediately // to try and reduce the jit time cost for a failed inline. void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) { assert(!compIsForInlining()); GenTreeCall* call = pInlineInfo->iciCall; CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo; unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr; InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo; InlineResult* inlineResult = pInlineInfo->inlineResult; // Inlined methods always use the managed calling convention const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo, CorInfoCallConvExtension::Managed); /* init the argument stuct */ memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0])); GenTreeCall::Use* thisArg = call->gtCallThisArg; unsigned argCnt = 0; // Count of the arguments assert((methInfo->args.hasThis()) == (thisArg != nullptr)); if (thisArg != nullptr) { inlArgInfo[0].argIsThis = true; impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Record some information about each of the arguments */ bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0; #if USER_ARGS_COME_LAST unsigned typeCtxtArg = (thisArg != nullptr) ? 1 : 0; #else // USER_ARGS_COME_LAST unsigned typeCtxtArg = methInfo->args.totalILArgs(); #endif // USER_ARGS_COME_LAST for (GenTreeCall::Use& use : call->Args()) { if (hasRetBuffArg && (&use == call->gtCallArgs)) { continue; } // Ignore the type context argument if (hasTypeCtxtArg && (argCnt == typeCtxtArg)) { pInlineInfo->typeContextArg = typeCtxtArg; typeCtxtArg = 0xFFFFFFFF; continue; } GenTree* actualArg = gtFoldExpr(use.GetNode()); impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Make sure we got the arg number right */ assert(argCnt == methInfo->args.totalILArgs()); #ifdef FEATURE_SIMD bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn; #endif // FEATURE_SIMD /* We have typeless opcodes, get type information from the signature */ if (thisArg != nullptr) { lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle); lclVarInfo[0].lclHasLdlocaOp = false; #ifdef FEATURE_SIMD // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase // the inlining multiplier) for anything in that assembly. // But we only need to normalize it if it is a TYP_STRUCT // (which we need to do even if we have already set foundSIMDType). if (!foundSIMDType && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo))) { foundSIMDType = true; } #endif // FEATURE_SIMD var_types sigType = ((clsAttr & CORINFO_FLG_VALUECLASS) != 0) ? TYP_BYREF : TYP_REF; lclVarInfo[0].lclTypeInfo = sigType; GenTree* thisArgNode = thisArg->GetNode(); assert(varTypeIsGC(thisArgNode->TypeGet()) || // "this" is managed ((thisArgNode->TypeGet() == TYP_I_IMPL) && // "this" is unmgd but the method's class doesnt care (clsAttr & CORINFO_FLG_VALUECLASS))); if (genActualType(thisArgNode->TypeGet()) != genActualType(sigType)) { if (sigType == TYP_REF) { /* The argument cannot be bashed into a ref (see bug 750871) */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF); return; } /* This can only happen with byrefs <-> ints/shorts */ assert(sigType == TYP_BYREF); assert((genActualType(thisArgNode->TypeGet()) == TYP_I_IMPL) || (thisArgNode->TypeGet() == TYP_BYREF)); lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } } /* Init the types of the arguments and make sure the types * from the trees match the types in the signature */ CORINFO_ARG_LIST_HANDLE argLst; argLst = methInfo->args.args; unsigned i; for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst)) { var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args); lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst); #ifdef FEATURE_SIMD if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo))) { // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've // found a SIMD type, even if this may not be a type we recognize (the assumption is that // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier). foundSIMDType = true; if (sigType == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle()); sigType = structType; } } #endif // FEATURE_SIMD lclVarInfo[i].lclTypeInfo = sigType; lclVarInfo[i].lclHasLdlocaOp = false; /* Does the tree type match the signature type? */ GenTree* inlArgNode = inlArgInfo[i].argNode; if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE)) { assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType)); assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType)); /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints, but in bad IL cases with caller-callee signature mismatches we can see other types. Intentionally reject cases with mismatches so the jit is more flexible when encountering bad IL. */ bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) || (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) || (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType)); if (!isPlausibleTypeMatch) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE); return; } GenTree** pInlArgNode; if (inlArgNode->OperIs(GT_PUTARG_TYPE)) { // There was a widening or narrowing cast. GenTreeUnOp* putArgType = inlArgNode->AsUnOp(); pInlArgNode = &putArgType->gtOp1; inlArgNode = putArgType->gtOp1; } else { // The same size but different type of the arguments. pInlArgNode = &inlArgInfo[i].argNode; } /* Is it a narrowing or widening cast? * Widening casts are ok since the value computed is already * normalized to an int (on the IL stack) */ if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType)) { if (sigType == TYP_BYREF) { lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else if (inlArgNode->gtType == TYP_BYREF) { assert(varTypeIsIntOrI(sigType)); /* If possible bash the BYREF to an int */ if (inlArgNode->IsLocalAddrExpr() != nullptr) { inlArgNode->gtType = TYP_I_IMPL; lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else { /* Arguments 'int <- byref' cannot be changed */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT); return; } } else if (genTypeSize(sigType) < TARGET_POINTER_SIZE) { // Narrowing cast. if (inlArgNode->OperIs(GT_LCL_VAR)) { const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum(); if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum)) { // We don't need to insert a cast here as the variable // was assigned a normalized value of the right type. continue; } } inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; // Try to fold the node in case we have constant arguments. if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #ifdef TARGET_64BIT else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType)) { // This should only happen for int -> native int widening inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; /* Try to fold the node in case we have constant arguments */ if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #endif // TARGET_64BIT } } } /* Init the types of the local variables */ CORINFO_ARG_LIST_HANDLE localsSig; localsSig = methInfo->locals.args; for (i = 0; i < methInfo->locals.numArgs; i++) { bool isPinned; var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned); lclVarInfo[i + argCnt].lclHasLdlocaOp = false; lclVarInfo[i + argCnt].lclTypeInfo = type; if (varTypeIsGC(type)) { if (isPinned) { JITDUMP("Inlinee local #%02u is pinned\n", i); lclVarInfo[i + argCnt].lclIsPinned = true; // Pinned locals may cause inlines to fail. inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS); if (inlineResult->IsFailure()) { return; } } pInlineInfo->numberOfGcRefLocals++; } else if (isPinned) { JITDUMP("Ignoring pin on inlinee local #%02u -- not a GC type\n", i); } lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig); // If this local is a struct type with GC fields, inform the inliner. It may choose to bail // out on the inline. if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle(); DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (inlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; some policies do // not track the relative hotness of call sites for "always" inline cases. if (pInlineInfo->iciBlock->isRunRarely()) { inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (inlineResult->IsFailure()) { return; } } } } localsSig = info.compCompHnd->getArgNext(localsSig); #ifdef FEATURE_SIMD if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo))) { foundSIMDType = true; if (supportSIMDTypes() && type == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle()); lclVarInfo[i + argCnt].lclTypeInfo = structType; } } #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd)) { foundSIMDType = true; } pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType; #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // impInlineFetchLocal: get a local var that represents an inlinee local // // Arguments: // lclNum -- number of the inlinee local // reason -- debug string describing purpose of the local var // // Returns: // Number of the local to use // // Notes: // This method is invoked only for locals actually used in the // inlinee body. // // Allocates a new temp if necessary, and copies key properties // over from the inlinee local var info. unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)) { assert(compIsForInlining()); unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum]; if (tmpNum == BAD_VAR_NUM) { const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt]; const var_types lclTyp = inlineeLocal.lclTypeInfo; // The lifetime of this local might span multiple BBs. // So it is a long lifetime local. impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason)); // Copy over key info lvaTable[tmpNum].lvType = lclTyp; lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp; lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned; lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp; lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp; // Copy over class handle for ref types. Note this may be a // shared type -- someday perhaps we can get the exact // signature and pass in a more precise type. if (lclTyp == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp; if (lvaTable[tmpNum].lvSingleDef) { JITDUMP("Marked V%02u as a single def temp\n", tmpNum); } lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef()); } if (inlineeLocal.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo; } } #ifdef DEBUG // Sanity check that we're properly prepared for gc ref locals. if (varTypeIsGC(lclTyp)) { // Since there are gc locals we should have seen them earlier // and if there was a return value, set up the spill temp. assert(impInlineInfo->HasGcRefLocals()); assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp()); } else { // Make sure all pinned locals count as gc refs. assert(!inlineeLocal.lclIsPinned); } #endif // DEBUG } return tmpNum; } //------------------------------------------------------------------------ // impInlineFetchArg: return tree node for argument value in an inlinee // // Arguments: // lclNum -- argument number in inlinee IL // inlArgInfo -- argument info for inlinee // lclVarInfo -- var info for inlinee // // Returns: // Tree for the argument's value. Often an inlinee-scoped temp // GT_LCL_VAR but can be other tree kinds, if the argument // expression from the caller can be directly substituted into the // inlinee body. // // Notes: // Must be used only for arguments -- use impInlineFetchLocal for // inlinee locals. // // Direct substitution is performed when the formal argument cannot // change value in the inlinee body (no starg or ldarga), and the // actual argument expression's value cannot be changed if it is // substituted it into the inlinee body. // // Even if an inlinee-scoped temp is returned here, it may later be // "bashed" to a caller-supplied tree when arguments are actually // passed (see fgInlinePrependStatements). Bashing can happen if // the argument ends up being single use and other conditions are // met. So the contents of the tree returned here may not end up // being the ones ultimately used for the argument. // // This method will side effect inlArgInfo. It should only be called // for actual uses of the argument in the inlinee. GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo) { // Cache the relevant arg and lcl info for this argument. // We will modify argInfo but not lclVarInfo. InlArgInfo& argInfo = inlArgInfo[lclNum]; const InlLclVarInfo& lclInfo = lclVarInfo[lclNum]; const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp; const var_types lclTyp = lclInfo.lclTypeInfo; GenTree* op1 = nullptr; GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal(); if (argInfo.argIsInvariant && !argCanBeModified) { // Directly substitute constants or addresses of locals // // Clone the constant. Note that we cannot directly use // argNode in the trees even if !argInfo.argIsUsed as this // would introduce aliasing between inlArgInfo[].argNode and // impInlineExpr. Then gtFoldExpr() could change it, causing // further references to the argument working off of the // bashed copy. op1 = gtCloneExpr(argNode); PREFIX_ASSUME(op1 != nullptr); argInfo.argTmpNum = BAD_VAR_NUM; // We may need to retype to ensure we match the callee's view of the type. // Otherwise callee-pass throughs of arguments can create return type // mismatches that block inlining. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. if (op1->TypeGet() != lclTyp) { op1->gtType = genActualType(lclTyp); } } else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef) { // Directly substitute unaliased caller locals for args that cannot be modified // // Use the caller-supplied node if this is the first use. op1 = argNode; unsigned argLclNum = op1->AsLclVarCommon()->GetLclNum(); argInfo.argTmpNum = argLclNum; // Use an equivalent copy if this is the second or subsequent // use. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. If inlining is not prevented // but a cast is necessary, we similarly expect it to have been inserted then. // So here we may have argument type mismatches that are benign, for instance // passing a TYP_SHORT local (eg. normalized-on-load) as a TYP_INT arg. // The exception is when the inlining means we should start tracking the argument. if (argInfo.argIsUsed || ((lclTyp == TYP_BYREF) && (op1->TypeGet() != TYP_BYREF))) { assert(op1->gtOper == GT_LCL_VAR); assert(lclNum == op1->AsLclVar()->gtLclILoffs); // Create a new lcl var node - remember the argument lclNum op1 = impCreateLocalNode(argLclNum DEBUGARG(op1->AsLclVar()->gtLclILoffs)); // Start tracking things as a byref if the parameter is a byref. if (lclTyp == TYP_BYREF) { op1->gtType = TYP_BYREF; } } } else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp) { /* Argument is a by-ref address to a struct, a normed struct, or its field. In these cases, don't spill the byref to a local, simply clone the tree and use it. This way we will increase the chance for this byref to be optimized away by a subsequent "dereference" operation. From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal. For example, if the caller is: ldloca.s V_1 // V_1 is a local struct call void Test.ILPart::RunLdargaOnPointerArg(int32*) and the callee being inlined has: .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed ldarga.s ptrToInts call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**) then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR. */ assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL); op1 = gtCloneExpr(argNode); } else { /* Argument is a complex expression - it must be evaluated into a temp */ if (argInfo.argHasTmp) { assert(argInfo.argIsUsed); assert(argInfo.argTmpNum < lvaCount); /* Create a new lcl var node - remember the argument lclNum */ op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp)); /* This is the second or later use of the this argument, so we have to use the temp (instead of the actual arg) */ argInfo.argBashTmpNode = nullptr; } else { /* First time use */ assert(!argInfo.argIsUsed); /* Reserve a temp for the expression. * Use a large size node as we may change it later */ const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg")); lvaTable[tmpNum].lvType = lclTyp; // For ref types, determine the type of the temp. if (lclTyp == TYP_REF) { if (!argCanBeModified) { // If the arg can't be modified in the method // body, use the type of the value, if // known. Otherwise, use the declared type. assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmpNum); lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } else { // Arg might be modified, use the declared type of // the argument. lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } } assert(!lvaTable[tmpNum].IsAddressExposed()); if (argInfo.argHasLdargaOp) { lvaTable[tmpNum].lvHasLdAddrOp = 1; } if (lclInfo.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(tmpNum); } } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo; } } argInfo.argHasTmp = true; argInfo.argTmpNum = tmpNum; // If we require strict exception order, then arguments must // be evaluated in sequence before the body of the inlined method. // So we need to evaluate them to a temp. // Also, if arguments have global or local references, we need to // evaluate them to a temp before the inlined body as the // inlined body may be modifying the global ref. // TODO-1stClassStructs: We currently do not reuse an existing lclVar // if it is a struct, because it requires some additional handling. if ((!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef && !argInfo.argHasCallerLocalRef)) { /* Get a *LARGE* LCL_VAR node */ op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp) DEBUGARG(lclNum)); /* Record op1 as the very first use of this argument. If there are no further uses of the arg, we may be able to use the actual arg node instead of the temp. If we do see any further uses, we will clear this. */ argInfo.argBashTmpNode = op1; } else { /* Get a small LCL_VAR node */ op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp)); /* No bashing of this argument */ argInfo.argBashTmpNode = nullptr; } } } // Mark this argument as used. argInfo.argIsUsed = true; return op1; } /****************************************************************************** Is this the original "this" argument to the call being inlined? Note that we do not inline methods with "starg 0", and so we do not need to worry about it. */ bool Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); return (tree->gtOper == GT_LCL_VAR && tree->AsLclVarCommon()->GetLclNum() == inlArgInfo[0].argTmpNum); } //----------------------------------------------------------------------------- // impInlineIsGuaranteedThisDerefBeforeAnySideEffects: Check if a dereference in // the inlinee can guarantee that the "this" pointer is non-NULL. // // Arguments: // additionalTree - a tree to check for side effects // additionalCallArgs - a list of call args to check for side effects // dereferencedAddress - address expression being dereferenced // inlArgInfo - inlinee argument information // // Notes: // If we haven't hit a branch or a side effect, and we are dereferencing // from 'this' to access a field or make GTF_CALL_NULLCHECK call, // then we can avoid a separate null pointer check. // // The importer stack and current statement list are searched for side effects. // Trees that have been popped of the stack but haven't been appended to the // statement list and have to be checked for side effects may be provided via // additionalTree and additionalCallArgs. // bool Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); assert(opts.OptEnabled(CLFLG_INLINING)); BasicBlock* block = compCurBB; if (block != fgFirstBB) { return false; } if (!impInlineIsThis(dereferencedAddress, inlArgInfo)) { return false; } if ((additionalTree != nullptr) && GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTree->gtFlags)) { return false; } for (GenTreeCall::Use& use : GenTreeCall::UseList(additionalCallArgs)) { if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(use.GetNode()->gtFlags)) { return false; } } for (Statement* stmt : StatementList(impStmtList)) { GenTree* expr = stmt->GetRootNode(); if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags)) { return false; } } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTreeFlags stackTreeFlags = verCurrentState.esStack[level].val->gtFlags; if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags)) { return false; } } return true; } //------------------------------------------------------------------------ // impMarkInlineCandidate: determine if this call can be subsequently inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // Mostly a wrapper for impMarkInlineCandidateHelper that also undoes // guarded devirtualization for virtual calls where the method we'd // devirtualize to cannot be inlined. void Compiler::impMarkInlineCandidate(GenTree* callNode, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { GenTreeCall* call = callNode->AsCall(); // Do the actual evaluation impMarkInlineCandidateHelper(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); // If this call is an inline candidate or is not a guarded devirtualization // candidate, we're done. if (call->IsInlineCandidate() || !call->IsGuardedDevirtualizationCandidate()) { return; } // If we can't inline the call we'd guardedly devirtualize to, // we undo the guarded devirtualization, as the benefit from // just guarded devirtualization alone is likely not worth the // extra jit time and code size. // // TODO: it is possibly interesting to allow this, but requires // fixes elsewhere too... JITDUMP("Revoking guarded devirtualization candidacy for call [%06u]: target method can't be inlined\n", dspTreeID(call)); call->ClearGuardedDevirtualizationCandidate(); } //------------------------------------------------------------------------ // impMarkInlineCandidateHelper: determine if this call can be subsequently // inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // If callNode is an inline candidate, this method sets the flag // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have // filled in the associated InlineCandidateInfo. // // If callNode is not an inline candidate, and the reason is // something that is inherent to the method being called, the // method may be marked as "noinline" to short-circuit any // future assessments of calls to this method. void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { // Let the strategy know there's another call impInlineRoot()->m_inlineStrategy->NoteCall(); if (!opts.OptEnabled(CLFLG_INLINING)) { /* XXX Mon 8/18/2008 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and * figure out why we did not set MAXOPT for this compile. */ assert(!compIsForInlining()); return; } if (compIsForImportOnly()) { // Don't bother creating the inline candidate during verification. // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification // that leads to the creation of multiple instances of Compiler. return; } InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate"); // Don't inline if not optimizing root method if (opts.compDbgCode) { inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN); return; } // Don't inline if inlining into this method is disabled. if (impInlineRoot()->m_inlineStrategy->IsInliningDisabled()) { inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE); return; } // Don't inline into callers that use the NextCallReturnAddress intrinsic. if (info.compHasNextCallRetAddr) { inlineResult.NoteFatal(InlineObservation::CALLER_USES_NEXT_CALL_RET_ADDR); return; } // Inlining candidate determination needs to honor only IL tail prefix. // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive). if (call->IsTailPrefixedCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX); return; } // Delegate Invoke method doesn't have a body and gets special cased instead. // Don't even bother trying to inline it. if (call->IsDelegateInvoke()) { inlineResult.NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // Tail recursion elimination takes precedence over inlining. // TODO: We may want to do some of the additional checks from fgMorphCall // here to reduce the chance we don't inline a call that won't be optimized // as a fast tail call or turned into a loop. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } if (call->IsVirtual()) { // Allow guarded devirt calls to be treated as inline candidates, // but reject all other virtual calls. if (!call->IsGuardedDevirtualizationCandidate()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT); return; } } /* Ignore helper calls */ if (call->gtCallType == CT_HELPER) { assert(!call->IsGuardedDevirtualizationCandidate()); inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER); return; } /* Ignore indirect calls */ if (call->gtCallType == CT_INDIRECT) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED); return; } /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding * inlining in throw blocks. I should consider the same thing for catch and filter regions. */ CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; if (call->IsGuardedDevirtualizationCandidate()) { if (call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle != nullptr) { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle; } else { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodHandle; } methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } else { fncHandle = call->gtCallMethHnd; // Reuse method flags from the original callInfo if possible if (fncHandle == callInfo->hMethod) { methAttr = callInfo->methodFlags; } else { methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } } #ifdef DEBUG if (compStressCompile(STRESS_FORCE_INLINE, 0)) { methAttr |= CORINFO_FLG_FORCEINLINE; } #endif // Check for COMPlus_AggressiveInlining if (compDoAggressiveInlining) { methAttr |= CORINFO_FLG_FORCEINLINE; } if (!(methAttr & CORINFO_FLG_FORCEINLINE)) { /* Don't bother inline blocks that are in the filter region */ if (bbInCatchHandlerILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the catch handler region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH); return; } if (bbInFilterILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the filter region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER); return; } } /* Check if we tried to inline this method before */ if (methAttr & CORINFO_FLG_DONT_INLINE) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE); return; } /* Cannot inline synchronized methods */ if (methAttr & CORINFO_FLG_SYNCH) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED); return; } /* Check legality of PInvoke callsite (for inlining of marshalling code) */ if (methAttr & CORINFO_FLG_PINVOKE) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (!impCanPInvokeInlineCallSite(block)) { inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH); return; } } InlineCandidateInfo* inlineCandidateInfo = nullptr; impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult); if (inlineResult.IsFailure()) { return; } // The old value should be null OR this call should be a guarded devirtualization candidate. assert((call->gtInlineCandidateInfo == nullptr) || call->IsGuardedDevirtualizationCandidate()); // The new value should not be null. assert(inlineCandidateInfo != nullptr); inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup; call->gtInlineCandidateInfo = inlineCandidateInfo; // If we're in an inlinee compiler, and have a return spill temp, and this inline candidate // is also a tail call candidate, it can use the same return spill temp. // if (compIsForInlining() && call->CanTailCall() && (impInlineInfo->inlineCandidateInfo->preexistingSpillTemp != BAD_VAR_NUM)) { inlineCandidateInfo->preexistingSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; JITDUMP("Inline candidate [%06u] can share spill temp V%02u\n", dspTreeID(call), inlineCandidateInfo->preexistingSpillTemp); } // Mark the call node as inline candidate. call->gtFlags |= GTF_CALL_INLINE_CANDIDATE; // Let the strategy know there's another candidate. impInlineRoot()->m_inlineStrategy->NoteCandidate(); // Since we're not actually inlining yet, and this call site is // still just an inline candidate, there's nothing to report. inlineResult.SetReported(); } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by target-specific // instructions bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName) { #if defined(TARGET_XARCH) switch (intrinsicName) { // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 // instructions to directly compute round/ceiling/floor. case NI_System_Math_Abs: case NI_System_Math_Sqrt: return true; case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Round: return compOpportunisticallyDependsOn(InstructionSet_SSE41); case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_FMA); default: return false; } #elif defined(TARGET_ARM64) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Round: case NI_System_Math_Sqrt: return true; case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_AdvSimd); default: return false; } #elif defined(TARGET_ARM) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Round: case NI_System_Math_Sqrt: return true; default: return false; } #else // TODO: This portion of logic is not implemented for other arch. // The reason for returning true is that on all other arch the only intrinsic // enabled are target intrinsics. return true; #endif } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by calling System.Math // methods. bool Compiler::IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName) { // Currently, if a math intrinsic is not implemented by target-specific // instructions, it will be implemented by a System.Math call. In the // future, if we turn to implementing some of them with helper calls, // this predicate needs to be revisited. return !IsTargetIntrinsic(intrinsicName); } bool Compiler::IsMathIntrinsic(NamedIntrinsic intrinsicName) { switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_FusedMultiplyAdd: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: { assert((intrinsicName > NI_SYSTEM_MATH_START) && (intrinsicName < NI_SYSTEM_MATH_END)); return true; } default: { assert((intrinsicName < NI_SYSTEM_MATH_START) || (intrinsicName > NI_SYSTEM_MATH_END)); return false; } } } bool Compiler::IsMathIntrinsic(GenTree* tree) { return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->AsIntrinsic()->gtIntrinsicName); } //------------------------------------------------------------------------ // impDevirtualizeCall: Attempt to change a virtual vtable call into a // normal call // // Arguments: // call -- the call node to examine/modify // pResolvedToken -- [IN] the resolved token used to create the call. Used for R2R. // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized. // methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized. // pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized. // pExactContextHandle -- [OUT] updated context handle iff call devirtualized // isLateDevirtualization -- if devirtualization is happening after importation // isExplicitTailCalll -- [IN] true if we plan on using an explicit tail call // ilOffset -- IL offset of the call // // Notes: // Virtual calls in IL will always "invoke" the base class method. // // This transformation looks for evidence that the type of 'this' // in the call is exactly known, is a final class or would invoke // a final method, and if that and other safety checks pan out, // modifies the call and the call info to create a direct call. // // This transformation is initially done in the importer and not // in some subsequent optimization pass because we want it to be // upstream of inline candidate identification. // // However, later phases may supply improved type information that // can enable further devirtualization. We currently reinvoke this // code after inlining, if the return value of the inlined call is // the 'this obj' of a subsequent virtual call. // // If devirtualization succeeds and the call's this object is a // (boxed) value type, the jit will ask the EE for the unboxed entry // point. If this exists, the jit will invoke the unboxed entry // on the box payload. In addition if the boxing operation is // visible to the jit and the call is the only consmer of the box, // the jit will try analyze the box to see if the call can be instead // instead made on a local copy. If that is doable, the call is // updated to invoke the unboxed entry on the local copy and the // boxing operation is removed. // // When guarded devirtualization is enabled, this method will mark // calls as guarded devirtualization candidates, if the type of `this` // is not exactly known, and there is a plausible guess for the type. void Compiler::impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* pContextHandle, CORINFO_CONTEXT_HANDLE* pExactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset) { assert(call != nullptr); assert(method != nullptr); assert(methodFlags != nullptr); assert(pContextHandle != nullptr); // This should be a virtual vtable or virtual stub call. // assert(call->IsVirtual()); // Possibly instrument. Note for OSR+PGO we will instrument when // optimizing and (currently) won't devirtualize. We may want // to revisit -- if we can devirtualize we should be able to // suppress the probe. // // We strip BBINSTR from inlinees currently, so we'll only // do this for the root method calls. // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { assert(opts.OptimizationDisabled() || opts.IsOSR()); assert(!compIsForInlining()); // During importation, optionally flag this block as one that // contains calls requiring class profiling. Ideally perhaps // we'd just keep track of the calls themselves, so we don't // have to search for them later. // if ((call->gtCallType != CT_INDIRECT) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && (JitConfig.JitClassProfiling() > 0) && !isLateDevirtualization) { JITDUMP("\n ... marking [%06u] in " FMT_BB " for class profile instrumentation\n", dspTreeID(call), compCurBB->bbNum); ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; // Record some info needed for the class profiling probe. // pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; // Flag block as needing scrutiny // compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return; } // Bail if optimizations are disabled. if (opts.OptimizationDisabled()) { return; } #if defined(DEBUG) // Bail if devirt is disabled. if (JitConfig.JitEnableDevirtualization() == 0) { return; } // Optionally, print info on devirtualization Compiler* const rootCompiler = impInlineRoot(); const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodName, rootCompiler->info.compClassName, &rootCompiler->info.compMethodInfo->args); #endif // DEBUG // Fetch information about the virtual method we're calling. CORINFO_METHOD_HANDLE baseMethod = *method; unsigned baseMethodAttribs = *methodFlags; if (baseMethodAttribs == 0) { // For late devirt we may not have method attributes, so fetch them. baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); } else { #if defined(DEBUG) // Validate that callInfo has up to date method flags const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); // All the base method attributes should agree, save that // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1 // because of concurrent jitting activity. // // Note we don't look at this particular flag bit below, and // later on (if we do try and inline) we will rediscover why // the method can't be inlined, so there's no danger here in // seeing this particular flag bit in different states between // the cached and fresh values. if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE)) { assert(!"mismatched method attributes"); } #endif // DEBUG } // In R2R mode, we might see virtual stub calls to // non-virtuals. For instance cases where the non-virtual method // is in a different assembly but is called via CALLVIRT. For // verison resilience we must allow for the fact that the method // might become virtual in some update. // // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a // regular call+nullcheck upstream, so we won't reach this // point. if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0) { assert(call->IsVirtualStub()); assert(opts.IsReadyToRun()); JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n"); return; } // Fetch information about the class that introduced the virtual method. CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod); const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass); // Is the call an interface call? const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0; // See what we know about the type of 'this' in the call. GenTree* thisObj = call->gtCallThisArg->GetNode()->gtEffectiveVal(false); bool isExact = false; bool objIsNonNull = false; CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull); // Bail if we know nothing. if (objClass == NO_CLASS_HANDLE) { JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet())); // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG("unknown")); return; } // If the objClass is sealed (final), then we may be able to devirtualize. const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass); const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0; #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; const char* objClassNote = "[?]"; const char* objClassName = "?objClass"; const char* baseClassName = "?baseClass"; const char* baseMethodName = "?baseMethod"; if (verbose || doPrint) { objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : ""; objClassName = info.compCompHnd->getClassName(objClass); baseClassName = info.compCompHnd->getClassName(baseClass); baseMethodName = eeGetMethodName(baseMethod, nullptr); if (verbose) { printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n" " class for 'this' is %s%s (attrib %08x)\n" " base method is %s::%s\n", callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName); } } #endif // defined(DEBUG) // See if the jit's best type for `obj` is an interface. // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal // IL_021d: ldloc.0 // IL_021e: callvirt instance int32 System.Object::GetHashCode() // // If so, we can't devirtualize, but we may be able to do guarded devirtualization. // if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0) { // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // If we get this far, the jit has a lower bound class type for the `this` object being used for dispatch. // It may or may not know enough to devirtualize... if (isInterface) { assert(call->IsVirtualStub()); JITDUMP("--- base class is interface\n"); } // Fetch the method that would be called based on the declared type of 'this', // and prepare to fetch the method attributes. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = objClass; dvInfo.context = *pContextHandle; dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN; dvInfo.pResolvedTokenVirtualMethod = pResolvedToken; info.compCompHnd->resolveVirtualMethod(&dvInfo); CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod; CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext; CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE; CORINFO_RESOLVED_TOKEN* pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedMethod; if (derivedMethod != nullptr) { assert(exactContext != nullptr); assert(((size_t)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); derivedClass = (CORINFO_CLASS_HANDLE)((size_t)exactContext & ~CORINFO_CONTEXTFLAGS_MASK); } DWORD derivedMethodAttribs = 0; bool derivedMethodIsFinal = false; bool canDevirtualize = false; #if defined(DEBUG) const char* derivedClassName = "?derivedClass"; const char* derivedMethodName = "?derivedMethod"; const char* note = "inexact or not final"; #endif // If we failed to get a method handle, we can't directly devirtualize. // // This can happen when prejitting, if the devirtualization crosses // servicing bubble boundaries, or if objClass is a shared class. // if (derivedMethod == nullptr) { JITDUMP("--- no derived method: %s\n", devirtualizationDetailToString(dvInfo.detail)); } else { // Fetch method attributes to see if method is marked final. derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod); derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0); #if defined(DEBUG) if (isExact) { note = "exact"; } else if (objClassIsFinal) { note = "final class"; } else if (derivedMethodIsFinal) { note = "final method"; } if (verbose || doPrint) { derivedMethodName = eeGetMethodName(derivedMethod, nullptr); derivedClassName = eeGetClassName(derivedClass); if (verbose) { printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note); gtDispTree(call); } } #endif // defined(DEBUG) canDevirtualize = isExact || objClassIsFinal || (!isInterface && derivedMethodIsFinal); } // We still might be able to do a guarded devirtualization. // Note the call might be an interface call or a virtual call. // if (!canDevirtualize) { JITDUMP(" Class not final or exact%s\n", isInterface ? "" : ", and method not final"); #if defined(DEBUG) // If we know the object type exactly, we generally expect we can devirtualize. // (don't when doing late devirt as we won't have an owner type (yet)) // if (!isLateDevirtualization && (isExact || objClassIsFinal) && JitConfig.JitNoteFailedExactDevirtualization()) { printf("@@@ Exact/Final devirt failure in %s at [%06u] $ %s\n", info.compFullName, dspTreeID(call), devirtualizationDetailToString(dvInfo.detail)); } #endif // Don't try guarded devirtualiztion if we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // All checks done. Time to transform the call. // // We should always have an exact class context. // // Note that wouldnt' be true if the runtime side supported array interface devirt, // the resulting method would be a generic method of the non-generic SZArrayHelper class. // assert(canDevirtualize); JITDUMP(" %s; can devirtualize\n", note); // Make the updates. call->gtFlags &= ~GTF_CALL_VIRT_VTABLE; call->gtFlags &= ~GTF_CALL_VIRT_STUB; call->gtCallMethHnd = derivedMethod; call->gtCallType = CT_USER_FUNC; call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED; // Virtual calls include an implicit null check, which we may // now need to make explicit. if (!objIsNonNull) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Clear the inline candidate info (may be non-null since // it's a union field used for other things by virtual // stubs) call->gtInlineCandidateInfo = nullptr; #if defined(DEBUG) if (verbose) { printf("... after devirt...\n"); gtDispTree(call); } if (doPrint) { printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName, baseMethodName, derivedClassName, derivedMethodName, note); } // If we successfully devirtualized based on an exact or final class, // and we have dynamic PGO data describing the likely class, make sure they agree. // // If pgo source is not dynamic we may see likely classes from other versions of this code // where types had different properties. // // If method is an inlinee we may be specializing to a class that wasn't seen at runtime. // const bool canSensiblyCheck = (isExact || objClassIsFinal) && (fgPgoSource == ICorJitInfo::PgoSource::Dynamic) && !compIsForInlining(); if (JitConfig.JitCrossCheckDevirtualizationAndPGO() && canSensiblyCheck) { // We only can handle a single likely class for now const int maxLikelyClasses = 1; LikelyClassRecord likelyClasses[maxLikelyClasses]; UINT32 numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); UINT32 likelihood = likelyClasses[0].likelihood; CORINFO_CLASS_HANDLE likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses > 0) { // PGO had better agree the class we devirtualized to is plausible. // if (likelyClass != derivedClass) { // Managed type system may report different addresses for a class handle // at different times....? // // Also, AOT may have a more nuanced notion of class equality. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { bool mismatch = true; // derivedClass will be the introducer of derived method, so it's possible // likelyClass is a non-overriding subclass. Check up the hierarchy. // CORINFO_CLASS_HANDLE parentClass = likelyClass; while (parentClass != NO_CLASS_HANDLE) { if (parentClass == derivedClass) { mismatch = false; break; } parentClass = info.compCompHnd->getParentType(parentClass); } if (mismatch || (numberOfClasses != 1) || (likelihood != 100)) { printf("@@@ Likely %p (%s) != Derived %p (%s) [n=%u, l=%u, il=%u] in %s \n", likelyClass, eeGetClassName(likelyClass), derivedClass, eeGetClassName(derivedClass), numberOfClasses, likelihood, ilOffset, info.compFullName); } assert(!(mismatch || (numberOfClasses != 1) || (likelihood != 100))); } } } } #endif // defined(DEBUG) // If the 'this' object is a value class, see if we can rework the call to invoke the // unboxed entry. This effectively inlines the normally un-inlineable wrapper stub // and exposes the potentially inlinable unboxed entry method. // // We won't optimize explicit tail calls, as ensuring we get the right tail call info // is tricky (we'd need to pass an updated sig and resolved token back to some callers). // // Note we may not have a derived class in some cases (eg interface call on an array) // if (info.compCompHnd->isValueClass(derivedClass)) { if (isExplicitTailCall) { JITDUMP("Have a direct explicit tail call to boxed entry point; can't optimize further\n"); } else { JITDUMP("Have a direct call to boxed entry point. Trying to optimize to call an unboxed entry point\n"); // Note for some shared methods the unboxed entry point requires an extra parameter. bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethod = info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg); if (unboxedEntryMethod != nullptr) { bool optimizedTheBox = false; // If the 'this' object is a local box, see if we can revise things // to not require boxing. // if (thisObj->IsBoxedValue() && !isExplicitTailCall) { // Since the call is the only consumer of the box, we know the box can't escape // since it is being passed an interior pointer. // // So, revise the box to simply create a local copy, use the address of that copy // as the this pointer, and update the entry point to the unboxed entry. // // Ideally, we then inline the boxed method and and if it turns out not to modify // the copy, we can undo the copy too. if (requiresInstMethodTableArg) { // Perform a trial box removal and ask for the type handle tree that fed the box. // JITDUMP("Unboxed entry needs method table arg...\n"); GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE); if (methodTableArg != nullptr) { // If that worked, turn the box into a copy to a local var // JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg)); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { // Pass the local var as this and the type handle as a new arg // JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table " "arg\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } call->gtCallMethHnd = unboxedEntryMethod; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethodAttribs = unboxedMethodAttribs; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n"); } } else { JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n"); } } else { JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n"); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { JITDUMP("Success! invoking unboxed entry point on local copy\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box\n"); } } if (optimizedTheBox) { #if FEATURE_TAILCALL_OPT if (call->IsImplicitTailCall()) { JITDUMP("Clearing the implicit tail call flag\n"); // If set, we clear the implicit tail call flag // as we just introduced a new address taken local variable // call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; } #endif // FEATURE_TAILCALL_OPT } } if (!optimizedTheBox) { // If we get here, we have a boxed value class that either wasn't boxed // locally, or was boxed locally but we were unable to remove the box for // various reasons. // // We can still update the call to invoke the unboxed entry, if the // boxed value is simple. // if (requiresInstMethodTableArg) { // Get the method table from the boxed object. // GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const clonedThisArg = gtClone(thisArg); if (clonedThisArg == nullptr) { JITDUMP( "unboxed entry needs MT arg, but `this` was too complex to clone. Deferring update.\n"); } else { JITDUMP("revising call to invoke unboxed entry with additional method table arg\n"); GenTree* const methodTableArg = gtNewMethodTableLookup(clonedThisArg); // Update the 'this' pointer to refer to the box payload // GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; derivedMethodAttribs = unboxedMethodAttribs; // Add the method table argument. // // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } } } else { JITDUMP("revising call to invoke unboxed entry\n"); GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; } } } else { // Many of the low-level methods on value classes won't have unboxed entries, // as they need access to the type of the object. // // Note this may be a cue for us to stack allocate the boxed object, since // we probably know that these objects don't escape. JITDUMP("Sorry, failed to find unboxed entry point\n"); } } } // Need to update call info too. // *method = derivedMethod; *methodFlags = derivedMethodAttribs; // Update context handle // *pContextHandle = MAKE_METHODCONTEXT(derivedMethod); // Update exact context handle. // if (pExactContextHandle != nullptr) { *pExactContextHandle = MAKE_CLASSCONTEXT(derivedClass); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // For R2R, getCallInfo triggers bookkeeping on the zap // side and acquires the actual symbol to call so we need to call it here. // Look up the new call info. CORINFO_CALL_INFO derivedCallInfo; eeGetCallInfo(pDerivedResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, &derivedCallInfo); // Update the call. call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT; call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT; call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup); } #endif // FEATURE_READYTORUN } //------------------------------------------------------------------------ // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call // to an intrinsic returns an exact type // // Arguments: // methodHnd -- handle for the special intrinsic method // // Returns: // Exact class handle returned by the intrinsic call, if known. // Nullptr if not known, or not likely to lead to beneficial optimization. CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd) { JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd)); CORINFO_CLASS_HANDLE result = nullptr; // See what intrinisc we have... const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd); switch (ni) { case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: { // Expect one class generic parameter; figure out which it is. CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(methodHnd, &sig); assert(sig.sigInst.classInstCount == 1); CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0]; assert(typeHnd != nullptr); // Lookup can incorrect when we have __Canon as it won't appear // to implement any interface types. // // And if we do not have a final type, devirt & inlining is // unlikely to result in much simplification. // // We can use CORINFO_FLG_FINAL to screen out both of these cases. const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd); const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0); if (isFinalType) { if (ni == NI_System_Collections_Generic_EqualityComparer_get_Default) { result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd); } else { assert(ni == NI_System_Collections_Generic_Comparer_get_Default); result = info.compCompHnd->getDefaultComparerClass(typeHnd); } JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd), result != nullptr ? eeGetClassName(result) : "unknown"); } else { JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd)); } break; } default: { JITDUMP("This special intrinsic not handled, sorry...\n"); break; } } return result; } //------------------------------------------------------------------------ // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it. // // Arguments: // token - init value for the allocated token. // // Return Value: // pointer to token into jit-allocated memory. CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(const CORINFO_RESOLVED_TOKEN& token) { CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1); *memory = token; return memory; } //------------------------------------------------------------------------ // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables. // class SpillRetExprHelper { public: SpillRetExprHelper(Compiler* comp) : comp(comp) { } void StoreRetExprResultsInArgs(GenTreeCall* call) { for (GenTreeCall::Use& use : call->Args()) { comp->fgWalkTreePre(&use.NodeRef(), SpillRetExprVisitor, this); } if (call->gtCallThisArg != nullptr) { comp->fgWalkTreePre(&call->gtCallThisArg->NodeRef(), SpillRetExprVisitor, this); } } private: static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre) { assert((pTree != nullptr) && (*pTree != nullptr)); GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { // Trees with ret_expr are marked as GTF_CALL. return Compiler::WALK_SKIP_SUBTREES; } if (tree->OperGet() == GT_RET_EXPR) { SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData); walker->StoreRetExprAsLocalVar(pTree); } return Compiler::WALK_CONTINUE; } void StoreRetExprAsLocalVar(GenTree** pRetExpr) { GenTree* retExpr = *pRetExpr; assert(retExpr->OperGet() == GT_RET_EXPR); const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr")); JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp); comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE); *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet()); if (retExpr->TypeGet() == TYP_REF) { assert(comp->lvaTable[tmp].lvSingleDef == 0); comp->lvaTable[tmp].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE retClsHnd = comp->gtGetClassHandle(retExpr, &isExact, &isNonNull); if (retClsHnd != nullptr) { comp->lvaSetClass(tmp, retClsHnd, isExact); } } } private: Compiler* comp; }; //------------------------------------------------------------------------ // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate. // Spill ret_expr in the call node, because they can't be cloned. // // Arguments: // call - fat calli candidate // void Compiler::addFatPointerCandidate(GenTreeCall* call) { JITDUMP("Marking call [%06u] as fat pointer candidate\n", dspTreeID(call)); setMethodHasFatPointer(); call->SetFatPointerCandidate(); SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); } //------------------------------------------------------------------------ // considerGuardedDevirtualization: see if we can profitably guess at the // class involved in an interface or virtual call. // // Arguments: // // call - potential guarded devirtualization candidate // ilOffset - IL ofset of the call instruction // isInterface - true if this is an interface call // baseMethod - target method of the call // baseClass - class that introduced the target method // pContextHandle - context handle for the call // objClass - class of 'this' in the call // objClassName - name of the obj Class // // Notes: // Consults with VM to see if there's a likely class at runtime, // if so, adds a candidate for guarded devirtualization. // void Compiler::considerGuardedDevirtualization( GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)) { #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; #endif JITDUMP("Considering guarded devirtualization at IL offset %u (0x%x)\n", ilOffset, ilOffset); // We currently only get likely class guesses when there is PGO data // with class profiles. // if (fgPgoClassProfiles == 0) { JITDUMP("Not guessing for class: no class profile pgo data, or pgo disabled\n"); return; } // See if there's a likely guess for the class. // const unsigned likelihoodThreshold = isInterface ? 25 : 30; unsigned likelihood = 0; unsigned numberOfClasses = 0; CORINFO_CLASS_HANDLE likelyClass = NO_CLASS_HANDLE; bool doRandomDevirt = false; const int maxLikelyClasses = 32; LikelyClassRecord likelyClasses[maxLikelyClasses]; #ifdef DEBUG // Optional stress mode to pick a random known class, rather than // the most likely known class. // doRandomDevirt = JitConfig.JitRandomGuardedDevirtualization() != 0; if (doRandomDevirt) { // Reuse the random inliner's random state. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomGuardedDevirtualization()); likelyClasses[0].clsHandle = getRandomClass(fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset, random); likelyClasses[0].likelihood = 100; if (likelyClasses[0].clsHandle != NO_CLASS_HANDLE) { numberOfClasses = 1; } } else #endif { numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); } // For now we only use the most popular type likelihood = likelyClasses[0].likelihood; likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses < 1) { JITDUMP("No likely class, sorry\n"); return; } assert(likelyClass != NO_CLASS_HANDLE); // Print all likely classes JITDUMP("%s classes for %p (%s):\n", doRandomDevirt ? "Random" : "Likely", dspPtr(objClass), objClassName) for (UINT32 i = 0; i < numberOfClasses; i++) { JITDUMP(" %u) %p (%s) [likelihood:%u%%]\n", i + 1, likelyClasses[i].clsHandle, eeGetClassName(likelyClasses[i].clsHandle), likelyClasses[i].likelihood); } // Todo: a more advanced heuristic using likelihood, number of // classes, and the profile count for this block. // // For now we will guess if the likelihood is at least 25%/30% (intfc/virt), as studies // have shown this transformation should pay off even if we guess wrong sometimes. // if (likelihood < likelihoodThreshold) { JITDUMP("Not guessing for class; likelihood is below %s call threshold %u\n", callKind, likelihoodThreshold); return; } uint32_t const likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass); if ((likelyClassAttribs & CORINFO_FLG_ABSTRACT) != 0) { // We may see an abstract likely class, if we have a stale profile. // No point guessing for this. // JITDUMP("Not guessing for class; abstract (stale profile)\n"); return; } // Figure out which method will be called. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = likelyClass; dvInfo.context = *pContextHandle; dvInfo.exactContext = *pContextHandle; dvInfo.pResolvedTokenVirtualMethod = nullptr; const bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo); if (!canResolve) { JITDUMP("Can't figure out which method would be invoked, sorry\n"); return; } CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod; JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr)); // Add this as a potential candidate. // uint32_t const likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod); addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs, likelihood); } //------------------------------------------------------------------------ // addGuardedDevirtualizationCandidate: potentially mark the call as a guarded // devirtualization candidate // // Notes: // // Call sites in rare or unoptimized code, and calls that require cookies are // not marked as candidates. // // As part of marking the candidate, the code spills GT_RET_EXPRs anywhere in any // child tree, because and we need to clone all these trees when we clone the call // as part of guarded devirtualization, and these IR nodes can't be cloned. // // Arguments: // call - potential guarded devirtualization candidate // methodHandle - method that will be invoked if the class test succeeds // classHandle - class that will be tested for at runtime // methodAttr - attributes of the method // classAttr - attributes of the class // likelihood - odds that this class is the class seen at runtime // void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood) { // This transformation only makes sense for virtual calls assert(call->IsVirtual()); // Only mark calls if the feature is enabled. const bool isEnabled = JitConfig.JitEnableGuardedDevirtualization() > 0; if (!isEnabled) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- disabled by jit config\n", dspTreeID(call)); return; } // Bail if not optimizing or the call site is very likely cold if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n", dspTreeID(call)); return; } // CT_INDIRECT calls may use the cookie, bail if so... // // If transforming these provides a benefit, we could save this off in the same way // we save the stub address below. if ((call->gtCallType == CT_INDIRECT) && (call->AsCall()->gtCallCookie != nullptr)) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- CT_INDIRECT with cookie\n", dspTreeID(call)); return; } #ifdef DEBUG // See if disabled by range // static ConfigMethodRange JitGuardedDevirtualizationRange; JitGuardedDevirtualizationRange.EnsureInit(JitConfig.JitGuardedDevirtualizationRange()); assert(!JitGuardedDevirtualizationRange.Error()); if (!JitGuardedDevirtualizationRange.Contains(impInlineRoot()->info.compMethodHash())) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- excluded by " "JitGuardedDevirtualizationRange", dspTreeID(call)); return; } #endif // We're all set, proceed with candidate creation. // JITDUMP("Marking call [%06u] as guarded devirtualization candidate; will guess for class %s\n", dspTreeID(call), eeGetClassName(classHandle)); setMethodHasGuardedDevirtualization(); call->SetGuardedDevirtualizationCandidate(); // Spill off any GT_RET_EXPR subtrees so we can clone the call. // SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); // Gather some information for later. Note we actually allocate InlineCandidateInfo // here, as the devirtualized half of this call will likely become an inline candidate. // GuardedDevirtualizationCandidateInfo* pInfo = new (this, CMK_Inlining) InlineCandidateInfo; pInfo->guardedMethodHandle = methodHandle; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->guardedClassHandle = classHandle; pInfo->likelihood = likelihood; pInfo->requiresInstMethodTableArg = false; // If the guarded class is a value class, look for an unboxed entry point. // if ((classAttr & CORINFO_FLG_VALUECLASS) != 0) { JITDUMP(" ... class is a value class, looking for unboxed entry\n"); bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethodHandle = info.compCompHnd->getUnboxedEntry(methodHandle, &requiresInstMethodTableArg); if (unboxedEntryMethodHandle != nullptr) { JITDUMP(" ... updating GDV candidate with unboxed entry info\n"); pInfo->guardedMethodUnboxedEntryHandle = unboxedEntryMethodHandle; pInfo->requiresInstMethodTableArg = requiresInstMethodTableArg; } } call->gtGuardedDevirtualizationCandidateInfo = pInfo; } void Compiler::addExpRuntimeLookupCandidate(GenTreeCall* call) { setMethodHasExpRuntimeLookup(); call->SetExpRuntimeLookup(); } //------------------------------------------------------------------------ // impIsClassExact: check if a class handle can only describe values // of exactly one class. // // Arguments: // classHnd - handle for class in question // // Returns: // true if class is final and not subject to special casting from // variance or similar. // // Note: // We are conservative on arrays of primitive types here. bool Compiler::impIsClassExact(CORINFO_CLASS_HANDLE classHnd) { DWORD flags = info.compCompHnd->getClassAttribs(classHnd); DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY; if ((flags & flagsMask) == CORINFO_FLG_FINAL) { return true; } if ((flags & flagsMask) == (CORINFO_FLG_FINAL | CORINFO_FLG_ARRAY)) { CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType type = info.compCompHnd->getChildType(classHnd, &arrayElementHandle); if ((type == CORINFO_TYPE_CLASS) || (type == CORINFO_TYPE_VALUECLASS)) { return impIsClassExact(arrayElementHandle); } } return false; } //------------------------------------------------------------------------ // impCanSkipCovariantStoreCheck: see if storing a ref type value to an array // can skip the array store covariance check. // // Arguments: // value -- tree producing the value to store // array -- tree representing the array to store to // // Returns: // true if the store does not require a covariance check. // bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array) { // We should only call this when optimizing. assert(opts.OptimizationEnabled()); // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j] if (value->OperIs(GT_INDEX) && array->OperIs(GT_LCL_VAR)) { GenTree* valueIndex = value->AsIndex()->Arr(); if (valueIndex->OperIs(GT_LCL_VAR)) { unsigned valueLcl = valueIndex->AsLclVar()->GetLclNum(); unsigned arrayLcl = array->AsLclVar()->GetLclNum(); if ((valueLcl == arrayLcl) && !lvaGetDesc(arrayLcl)->IsAddressExposed()) { JITDUMP("\nstelem of ref from same array: skipping covariant store check\n"); return true; } } } // Check for assignment of NULL. if (value->OperIs(GT_CNS_INT)) { assert(value->gtType == TYP_REF); if (value->AsIntCon()->gtIconVal == 0) { JITDUMP("\nstelem of null: skipping covariant store check\n"); return true; } // Non-0 const refs can only occur with frozen objects assert(value->IsIconHandle(GTF_ICON_STR_HDL)); assert(doesMethodHaveFrozenString() || (compIsForInlining() && impInlineInfo->InlinerCompiler->doesMethodHaveFrozenString())); } // Try and get a class handle for the array if (value->gtType != TYP_REF) { return false; } bool arrayIsExact = false; bool arrayIsNonNull = false; CORINFO_CLASS_HANDLE arrayHandle = gtGetClassHandle(array, &arrayIsExact, &arrayIsNonNull); if (arrayHandle == NO_CLASS_HANDLE) { return false; } // There are some methods in corelib where we're storing to an array but the IL // doesn't reflect this (see SZArrayHelper). Avoid. DWORD attribs = info.compCompHnd->getClassAttribs(arrayHandle); if ((attribs & CORINFO_FLG_ARRAY) == 0) { return false; } CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayHandle, &arrayElementHandle); // Verify array type handle is really an array of ref type assert(arrayElemType == CORINFO_TYPE_CLASS); // Check for exactly object[] if (arrayIsExact && (arrayElementHandle == impGetObjectClass())) { JITDUMP("\nstelem to (exact) object[]: skipping covariant store check\n"); return true; } const bool arrayTypeIsSealed = impIsClassExact(arrayElementHandle); if ((!arrayIsExact && !arrayTypeIsSealed) || (arrayElementHandle == NO_CLASS_HANDLE)) { // Bail out if we don't know array's exact type return false; } bool valueIsExact = false; bool valueIsNonNull = false; CORINFO_CLASS_HANDLE valueHandle = gtGetClassHandle(value, &valueIsExact, &valueIsNonNull); // Array's type is sealed and equals to value's type if (arrayTypeIsSealed && (valueHandle == arrayElementHandle)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } // Array's type is not sealed but we know its exact type if (arrayIsExact && (valueHandle != NO_CLASS_HANDLE) && (info.compCompHnd->compareTypesForCast(valueHandle, arrayElementHandle) == TypeCompareState::Must)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } return false; }